diff --git a/AMDiS/src/Error.hh b/AMDiS/src/Error.hh
index 74c5b2e54df43fb92ff045b1405773104c973c82..a8062d221a2a0ad66cb9d8433ed5b62a4bd81257 100644
--- a/AMDiS/src/Error.hh
+++ b/AMDiS/src/Error.hh
@@ -347,7 +347,10 @@ namespace AMDiS {
 
     TraverseStack stack;
     ElInfo *elInfo = stack.traverseFirst(fe_space->getMesh(), level, flag);
-		
+
+    double a = 0.0;
+    double b = 0.0;
+
     while (elInfo) {
       elinfo = elInfo;
      
@@ -364,18 +367,22 @@ namespace AMDiS {
 	int_uh_vec += quadFast->getWeight(i) * uh_vec[i];
       }
 
-      l2_err_el = pow(fabs(det * int_u_vec - det * int_uh_vec), 2.0);      
+      a += det * int_u_vec - det * int_uh_vec;
+      b += fabs(det * int_u_vec - det * int_uh_vec);
+      l2_err_el = pow(fabs(det * int_u_vec - det * int_uh_vec), 2.0);
       l2Err2 += l2_err_el;
       maxErr = std::max(maxErr, l2_err_el);
       
-//       if (writeInLeafData)
-// 	elInfo->getElement()->setEstimation(l2_err_el, component);
+      if (writeInLeafData)
+ 	elInfo->getElement()->setEstimation(l2_err_el, component);
       
       estMap[elInfo->getElement()->getIndex()] = l2_err_el;
 
       elInfo = stack.traverseNext(elInfo);
     }
 
+    MSG("L2ERR values %e %e %e %e\n", a, b, l2Err2, sqrt(l2Err2));
+
     delete [] u_vec;
 
     if (max)  
diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc
index 9b30b20c723f647cd4649ee43eec462c63f17097..c5ce2e0500ea36ae415f4c1d15878634b9162aea 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.cc
+++ b/AMDiS/src/parallel/ParallelDofMapping.cc
@@ -113,6 +113,7 @@ namespace AMDiS {
 
   void ParallelDofMapping::init(MPI::Intracomm *m,
 				vector<const FiniteElemSpace*> &fe,
+				vector<const FiniteElemSpace*> &uniqueFe,
 				bool needGlobalMapping,
 				bool bNonLocalDofs)
   {
@@ -120,17 +121,13 @@ namespace AMDiS {
 
     mpiComm = m;
     feSpaces = fe;
+    feSpacesUnique = uniqueFe;
     hasNonLocalDofs = bNonLocalDofs;
     
-    // === Create a set of unique FE spaces. ===
-
-    for (unsigned int i = 0; i < feSpaces.size(); i++)
-      feSpacesUnique.insert(feSpaces[i]);
-
 
     // === Init the mapping for all different FE spaces. ===
 
-    for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
+    for (vector<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
 	 it != feSpacesUnique.end(); ++it) {
       addFeSpace(*it);
       data[*it].setNeedGlobalMapping(needGlobalMapping);
@@ -143,7 +140,7 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelDofMapping::clear()");
 
-    for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
+    for (vector<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
 	 it != feSpacesUnique.end(); ++it)
       data[*it].clear();
 
@@ -164,7 +161,7 @@ namespace AMDiS {
     recvDofs = &pRecv;
 
     // Add the DOF communicator also to all FE space DOF mappings.
-    for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
+    for (vector<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
 	 it != feSpacesUnique.end(); ++it)
       data[*it].setDofComm(pSend, pRecv);
   }
@@ -244,7 +241,7 @@ namespace AMDiS {
     FUNCNAME("ParallelDofMapping::update()");
 
     // First, update all FE space DOF mappings.
-    for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
+    for (vector<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
 	 it != feSpacesUnique.end(); ++it)
       data[*it].update();
     
diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h
index 9f7fa3fe974c8e287a7eb9b3ae7f9dba4384384d..40aca2c4ec760a629baf056a03af613c11d41262 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.h
+++ b/AMDiS/src/parallel/ParallelDofMapping.h
@@ -282,7 +282,10 @@ namespace AMDiS {
      *
      * \param[in]  m                  MPI communicator.
      * \param[in]  fe                 The FE spaces of all components of the 
-     *                                PDE to be solved. 
+     *                                PDE to be solved.
+     * \param[in]  uniqueFe           Unique list of FE spaces. Thus, two
+     *                                arbitrary elements of this list are always
+     *                                different.
      * \param[in]  needGlobalMapping  If true, the mapping computes also a global
      *                                index for the DOFs.
      * \param[in]  bNonLocalDofs      If true, at least one rank's mapping con-
@@ -290,6 +293,7 @@ namespace AMDiS {
      */
     void init(MPI::Intracomm *m,
 	      vector<const FiniteElemSpace*> &fe,
+	      vector<const FiniteElemSpace*> &uniqueFe,
 	      bool needGlobalMapping,
 	      bool bNonLocalDofs);
     
@@ -404,7 +408,7 @@ namespace AMDiS {
 
     /// The set of all FE spaces. It uniquly contains all different FE spaces
     /// from \ref feSpaces.
-    std::set<const FiniteElemSpace*> feSpacesUnique;
+    vector<const FiniteElemSpace*> feSpacesUnique;
 
     /// Number of DOFs owned by rank.
     int nRankDofs;
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index f4d4a4e0c7ab04f1832010490e1a6720aa2f9ec0..8e6b95b4b58cbe3b04f2c8de9cb099d2569beb72 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -216,9 +216,6 @@ namespace AMDiS {
   {
     FUNCNAME("PetscSolverFeti::updateDofData()");
 
-    TEST_EXIT(meshDistributor->getFeSpace()->getBasisFcts()->getDegree() == 1)
-      ("Works for linear basis functions only!\n");
-
     primalDofMap.clear();
     dualDofMap.clear();
     lagrangeMap.clear();
@@ -227,6 +224,7 @@ namespace AMDiS {
 
     for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
       const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
+
       createPrimals(feSpace);      
       createDuals(feSpace);
       createLagrange(feSpace);      
@@ -247,14 +245,15 @@ namespace AMDiS {
 
     for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
       const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
+      MSG("FETI-DP data for %d-ith FE space:\n", i);
 
-      MSG("nRankPrimals = %d   nOverallPrimals = %d\n", 
+      MSG("  nRankPrimals = %d   nOverallPrimals = %d\n", 
 	  primalDofMap[feSpace].nRankDofs, primalDofMap[feSpace].nOverallDofs);
       
-      MSG("nRankDuals = %d   nOverallDuals = %d\n",
+      MSG("  nRankDuals = %d  nOverallDuals = %d\n",
 	  dualDofMap[feSpace].nRankDofs, dualDofMap[feSpace].nOverallDofs);
 
-      MSG("nRankLagrange = %d  nOverallLagrange = %d\n",
+      MSG("  nRankLagrange = %d  nOverallLagrange = %d\n",
 	  lagrangeMap[feSpace].nRankDofs, lagrangeMap[feSpace].nOverallDofs);
 
       TEST_EXIT_DBG(localDofMap[feSpace].size() + primalDofMap[feSpace].size() == 
@@ -904,12 +903,17 @@ namespace AMDiS {
     
     vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
     
-    primalDofMap.init(mpiComm, feSpaces, true, true);
-    dualDofMap.init(mpiComm, feSpaces, false, false);
-    lagrangeMap.init(mpiComm, feSpaces, true, true);
-    localDofMap.init(mpiComm, feSpaces, false, false);
+    primalDofMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(), 
+		      true, true);
+    dualDofMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
+		    false, false);
+    lagrangeMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
+		     true, true);
+    localDofMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
+		     false, false);
     if (fetiPreconditioner == FETI_DIRICHLET)
-      interiorDofMap.init(mpiComm, feSpaces, false, false);
+      interiorDofMap.init(mpiComm, feSpaces,  meshDistributor->getFeSpaces(),
+			  false, false);
 
     updateDofData();
     
@@ -920,23 +924,23 @@ namespace AMDiS {
     int nRowsRankPrimal = primalDofMap.getRankDofs();
     int nRowsOverallPrimal = primalDofMap.getOverallDofs();
     
-    MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 30, PETSC_NULL,
+    MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 60, PETSC_NULL,
 		    &mat_b_b);
     
     MatCreateMPIAIJ(PETSC_COMM_WORLD,
 		    nRowsRankPrimal, nRowsRankPrimal, 
 		    nRowsOverallPrimal, nRowsOverallPrimal,
-		    30, PETSC_NULL, 30, PETSC_NULL, &mat_primal_primal);
+		    60, PETSC_NULL, 60, PETSC_NULL, &mat_primal_primal);
     
     MatCreateMPIAIJ(PETSC_COMM_WORLD,
 		    nRowsRankB, nRowsRankPrimal, 
 		    nRowsOverallB, nRowsOverallPrimal,
-		    30, PETSC_NULL, 30, PETSC_NULL, &mat_b_primal);
+		    60, PETSC_NULL, 60, PETSC_NULL, &mat_b_primal);
     
     MatCreateMPIAIJ(PETSC_COMM_WORLD,
 		    nRowsRankPrimal, nRowsRankB,
 		    nRowsOverallPrimal, nRowsOverallB,
-		    15, PETSC_NULL, 15, PETSC_NULL, &mat_primal_b);
+		    30, PETSC_NULL, 30, PETSC_NULL, &mat_primal_b);
     
     
     // === Create matrices for FETI-DP preconditioner. ===
@@ -945,22 +949,22 @@ namespace AMDiS {
       int nRowsDual = dualDofMap.getRankDofs();
 
       MatCreateSeqAIJ(PETSC_COMM_SELF,
-		      nRowsDual, nRowsDual, 30, PETSC_NULL,
+		      nRowsDual, nRowsDual, 60, PETSC_NULL,
 		      &mat_duals_duals);
 
       if (fetiPreconditioner == FETI_DIRICHLET) {
 	int nRowsInterior = interiorDofMap.getRankDofs();
 
 	MatCreateSeqAIJ(PETSC_COMM_SELF,
-			nRowsInterior, nRowsInterior, 30, PETSC_NULL,
+			nRowsInterior, nRowsInterior, 60, PETSC_NULL,
 			&mat_interior_interior);
 	
 	MatCreateSeqAIJ(PETSC_COMM_SELF,
-			nRowsInterior, nRowsDual, 30, PETSC_NULL,
+			nRowsInterior, nRowsDual, 60, PETSC_NULL,
 			&mat_interior_duals);
 	
 	MatCreateSeqAIJ(PETSC_COMM_SELF,
-			nRowsDual, nRowsInterior, 30, PETSC_NULL,
+			nRowsDual, nRowsInterior, 60, PETSC_NULL,
 			&mat_duals_interior);
       }
     }