diff --git a/AMDiS/src/parallel/InteriorBoundary.h b/AMDiS/src/parallel/InteriorBoundary.h
index 2bf154f3cdc1fc77d2d85cbbc48f0d264b9fd8dd..eb69baa66e08ed397b3c830ec4be354e6c47b82c 100644
--- a/AMDiS/src/parallel/InteriorBoundary.h
+++ b/AMDiS/src/parallel/InteriorBoundary.h
@@ -109,8 +109,15 @@ namespace AMDiS {
 	  vecIt = mapIt->second.begin();	
       }
 
-      int getRank() 
+      inline int getRank() 
       {
+	if (level > 0) {
+	  int r = levelData->mapRank(mapIt->first, level - 1, level);
+	  TEST_EXIT_DBG(r >= 0)
+	    ("Mapping rank %d from level % to level %d does not work!\n",
+	     mapIt->first, level - 1, level);
+	}
+
 	return mapIt->first;
       }
 
@@ -125,7 +132,7 @@ namespace AMDiS {
 	  TEST_EXIT_DBG(levelData)("No mesh level data object defined!\n");
 	  TEST_EXIT_DBG(level == 1)("Only 2-level method supported!\n");
 
-	  while (levelData->rankInSubdomain(mapIt->first, level) ||
+	  while (!levelData->rankInSubdomain(mapIt->first, level) ||
 		 mapIt->second.size() == 0) {
 	    ++mapIt;
 	    if (mapIt == bound.boundary.end())
diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index 630c3401bf4861758a4fd4c67d16590bdaa2f8f8..55bf0f53e40ee832dcc4db8250af2bb2255f145f 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -1967,8 +1967,9 @@ namespace AMDiS {
     recvDofs.init(nLevels);
     boundaryDofInfo.resize(nLevels);
 
-    dofMap.init(mpiComm, feSpaces, feSpaces, true, true);
+    dofMap.init(levelData, feSpaces, feSpaces, true, true);
     dofMap.setDofComm(sendDofs, recvDofs);
+    dofMap.clear();
 
     for (unsigned int i = 0; i < feSpaces.size(); i++)
       updateLocalGlobalNumbering(feSpaces[i]);
@@ -1977,11 +1978,16 @@ namespace AMDiS {
 
 #if (DEBUG != 0)
     MSG("------------- Debug information -------------\n");
-    for (unsigned int i = 0; i < feSpaces.size(); i++) {
-      MSG("FE space %d:\n", i);
-      MSG("   nRankDofs = %d\n", dofMap[feSpaces[i]].nRankDofs[0]);
-      MSG("   nOverallDofs = %d\n", dofMap[feSpaces[i]].nOverallDofs[0]);
-      MSG("   rStartDofs = %d\n", dofMap[feSpaces[i]].rStartDofs[0]);
+    MSG("|  number of levels:         %d\n", nLevels);
+    MSG("|  number of FE spaces:      %d\n", feSpaces.size());
+
+    for (int level = 0; level < nLevels; level++) {
+      for (unsigned int i = 0; i < feSpaces.size(); i++) {
+	MSG("|  level = %d   FE space = %d:\n", level, i);
+	MSG("|      nRankDofs    = %d\n", dofMap[feSpaces[i]].nRankDofs[level]);
+	MSG("|      nOverallDofs = %d\n", dofMap[feSpaces[i]].nOverallDofs[level]);
+	MSG("|      rStartDofs   = %d\n", dofMap[feSpaces[i]].rStartDofs[level]);
+      }
     }
 
     stringstream oss;
@@ -2006,8 +2012,6 @@ namespace AMDiS {
 
     mesh->dofCompress();
 
-    dofMap.clear();
-
     // === Get all DOFs in ranks partition. ===
 
     std::set<const DegreeOfFreedom*> rankDofSet;
@@ -2019,27 +2023,33 @@ namespace AMDiS {
     // === Traverse interior boundaries and get all DOFs on them. ===
 
     int nLevels = levelData.getLevelNumber();
-    for (int i = 0; i < nLevels; i++)
-      createBoundaryDofs(feSpace, i);
-
-    // All DOFs that must be received are DOFs not owned by rank and have 
-    // therefore to be removed from the set 'rankDofs'.
-    for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank()) {
-      for (; !it.endDofIter(); it.nextDof()) {
-	DofContainer::iterator eraseIt = 
-	  find(rankDofs.begin(), rankDofs.end(), it.getDof());
-	if (eraseIt != rankDofs.end())
-	  rankDofs.erase(eraseIt);
+    for (int level = 0; level < nLevels; level++) {
+      createBoundaryDofs(feSpace, level);
+      
+      // All DOFs that must be received are DOFs not owned by rank and have 
+      // therefore to be removed from the set 'rankDofs'.
+      DofContainerSet nonRankDofs;
+      for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank()) {
+	for (; !it.endDofIter(); it.nextDof()) {
+#if 0
+	  DofContainer::iterator eraseIt = 
+	    find(rankDofs.begin(), rankDofs.end(), it.getDof());
+	  if (eraseIt != rankDofs.end())
+	    rankDofs.erase(eraseIt);
+#endif
+	  nonRankDofs.insert(it.getDof());
+	}
       }
+      
+      for (unsigned int i = 0; i < rankDofs.size(); i++)
+	if (nonRankDofs.count(rankDofs[i]) == 0)
+	  dofMap[feSpace].insertRankDof(level, *(rankDofs[i]));
+      
+      for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank())
+	for (; !it.endDofIter(); it.nextDof())
+	  dofMap[feSpace].insertNonRankDof(level, it.getDofIndex());
     }
-	
-    for (unsigned int i = 0; i < rankDofs.size(); i++)
-      dofMap[feSpace].insertRankDof(*(rankDofs[i]));
 
-    for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank())
-      for (; !it.endDofIter(); it.nextDof())
-	dofMap[feSpace].insert(it.getDofIndex());
-    
     // === Update DOF admins due to new number of DOFs. ===
   
     lastMeshChangeIndex = mesh->getChangeIndex();
diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc
index 35a81ec4cf3567c7e7a83e6ee88bfd352477fbd5..87e678d5c74565e8c57254a9f6dc4f714470b6f9 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.cc
+++ b/AMDiS/src/parallel/ParallelDofMapping.cc
@@ -19,6 +19,7 @@ namespace AMDiS {
 
   void FeSpaceDofMap::clear()
   {
+    int nLevel = levelData->getLevelNumber();
     dofMap.clear();
     dofMap.resize(nLevel);
 
@@ -38,6 +39,7 @@ namespace AMDiS {
   {
     FUNCNAME("FeSpaceDofMap::update()");
 
+    int nLevel = levelData->getLevelNumber();
     for (int i = 0; i < nLevel; i++) {
 
       // === Compute local indices for all rank owned DOFs. ===
@@ -50,7 +52,8 @@ namespace AMDiS {
       
       nOverallDofs[i] = 0;
       rStartDofs[i] = 0;
-      mpi::getDofNumbering(mpiComm, nRankDofs[i], rStartDofs[i], nOverallDofs[i]);
+      mpi::getDofNumbering(levelData->getMpiComm(i), 
+			   nRankDofs[i], rStartDofs[i], nOverallDofs[i]);
       
       // === If required, compute also the global indices. ===
       
@@ -83,7 +86,7 @@ namespace AMDiS {
     // === Send all global indices of DOFs that are owned by the rank to all ===
     // === other ranks that also include this DOF.                           ===
 
-    StdMpi<vector<int> > stdMpi(mpiComm);
+    StdMpi<vector<int> > stdMpi(levelData->getMpiComm(level));
     for (DofComm::Iterator it(*sendDofs, level, feSpace); 
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
@@ -127,7 +130,7 @@ namespace AMDiS {
   }
 
 
-  void ParallelDofMapping::init(MPI::Intracomm m,
+  void ParallelDofMapping::init(MeshLevelData &ldata,
 				vector<const FiniteElemSpace*> &fe,
 				vector<const FiniteElemSpace*> &uniqueFe,
 				bool needGlobalMapping,
@@ -135,7 +138,7 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelDofMapping::init()");
 
-    mpiComm = m;
+    levelData = &ldata;
     feSpaces = fe;
     feSpacesUnique = uniqueFe;
     hasNonLocalDofs = bNonLocalDofs;
@@ -160,6 +163,12 @@ namespace AMDiS {
 	 it != feSpacesUnique.end(); ++it)
       data[*it].clear();
 
+    int nLevel = levelData->getLevelNumber();
+    nRankDofs.resize(nLevel);
+    nLocalDofs.resize(nLevel);
+    nOverallDofs.resize(nLevel);
+    rStartDofs.resize(nLevel);
+
     for (int i = 0; i < nLevel; i++) {
       nRankDofs[i] = -1;
       nLocalDofs[i] = -1;
@@ -183,7 +192,7 @@ namespace AMDiS {
 	 it != feSpacesUnique.end(); ++it)
       data[*it].setDofComm(pSend, pRecv);
   }
-  
+
 
   void ParallelDofMapping::addFeSpace(const FiniteElemSpace* feSpace)
   {
@@ -192,7 +201,7 @@ namespace AMDiS {
     if (data.count(feSpace))
       data.find(feSpace)->second.clear();
     else
-      data.insert(make_pair(feSpace, FeSpaceDofMap(mpiComm)));
+      data.insert(make_pair(feSpace, FeSpaceDofMap(levelData)));
     
     data.find(feSpace)->second.setFeSpace(feSpace);
   }    
@@ -263,6 +272,7 @@ namespace AMDiS {
 	 it != feSpacesUnique.end(); ++it)
       data[*it].update();
     
+    int nLevel = levelData->getLevelNumber();
     for (int i = 0; i < nLevel; i++) {
       // Compute all numbers from this mappings.
 
@@ -338,7 +348,7 @@ namespace AMDiS {
       // === Communicate the matrix indices for all DOFs that are on some ===
       // === interior boundaries.                                         ===
 
-      StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm);
+      StdMpi<vector<DegreeOfFreedom> > stdMpi(levelData->getMpiComm(level));
       for (DofComm::Iterator it(*sendDofs, level, feSpaces[i]); 
 	   !it.end(); it.nextRank()) {
 	vector<DegreeOfFreedom> sendGlobalDofs;
diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h
index e43a835687bae08ef0851a5740f2f3c9d326b56a..1043fe7535f1f1a4404f7600e28a0a4c7fe40a7f 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.h
+++ b/AMDiS/src/parallel/ParallelDofMapping.h
@@ -24,6 +24,7 @@
 #include <map>
 #include <set>
 #include "parallel/DofComm.h"
+#include "parallel/MeshLevelData.h"
 #include "parallel/MpiHelper.h"
 #include "parallel/ParallelTypes.h"
 #include "parallel/StdMpi.h"
@@ -102,12 +103,11 @@ namespace AMDiS {
     }
      
     /// This is the only valid constructur to be used. 
-    FeSpaceDofMap(MPI::Intracomm m)
-      : mpiComm(m),
+    FeSpaceDofMap(MeshLevelData* ld)
+      : levelData(ld),
 	sendDofs(NULL),
 	recvDofs(NULL),
 	feSpace(NULL),
-	nLevel(1),
 	dofMap(1),
 	needGlobalMapping(false),
 	hasNonLocalDofs(false),
@@ -133,29 +133,29 @@ namespace AMDiS {
     
     /// Inserts a new DOF to rank's mapping. The DOF is assumed to be owend by
     /// the rank.
-    void insertRankDof(DegreeOfFreedom dof0, DegreeOfFreedom dof1 = -1)
+    void insertRankDof(int level, DegreeOfFreedom dof0, DegreeOfFreedom dof1 = -1)
     {
       FUNCNAME("FeSpaceDofMap::insertRankDof()");
       
-      TEST_EXIT_DBG(dofMap[0].count(dof0) == 0)("Should not happen!\n");
+      TEST_EXIT_DBG(dofMap[level].count(dof0) == 0)("Should not happen!\n");
       
-      dofMap[0][dof0].local = dof1;
-      nLocalDofs[0]++;
+      dofMap[level][dof0].local = dof1;
+      nLocalDofs[level]++;
       if (dof1 != -1)
-	nRankDofs[0]++;
+	nRankDofs[level]++;
     }
     
     /// Inserts a new DOF to rank's mapping. The DOF exists in rank's subdomain
     /// but is owned by a different rank, thus it is part of an interior boundary.
-    void insert(DegreeOfFreedom dof0, DegreeOfFreedom dof1 = -1)
+    void insertNonRankDof(int level, DegreeOfFreedom dof0, DegreeOfFreedom dof1 = -1)
     {
-      FUNCNAME("FeSpaceDofMap::insert()");
+      FUNCNAME("FeSpaceDofMap::insertNonRankDof()");
       
-      TEST_EXIT_DBG(dofMap[0].count(dof0) == 0)("Should not happen!\n");
+      TEST_EXIT_DBG(dofMap[level].count(dof0) == 0)("Should not happen!\n");
       
-      dofMap[0][dof0].local = dof1;
-      nLocalDofs[0]++;
-      nonRankDofs[0].insert(dof0);
+      dofMap[level][dof0].local = dof1;
+      nLocalDofs[level]++;
+      nonRankDofs[level].insert(dof0);
     }
     
     /// Checks if a given DOF is in the DOF mapping.
@@ -222,8 +222,7 @@ namespace AMDiS {
     void computeNonLocalIndices(int level);
 
   private:
-    /// MPI communicator object;
-    MPI::Intracomm mpiComm;
+    MeshLevelData *levelData;
 
     /// DOF communicators for all DOFs on interior boundaries.
     DofComm *sendDofs, *recvDofs;
@@ -232,9 +231,6 @@ namespace AMDiS {
     /// correct DOF communicator in \ref sendDofs and \ref recvDofs.
     const FiniteElemSpace *feSpace;
 
-    /// Number of mesh levels.
-    int nLevel;
-
     /// Mapping data from DOF indices to local and global indices.
     vector<DofMap> dofMap;
 
@@ -264,12 +260,12 @@ namespace AMDiS {
   {
   public:
     ParallelDofMapping() 
-      : sendDofs(NULL),
+      : levelData(NULL),
+	sendDofs(NULL),
 	recvDofs(NULL),
 	hasNonLocalDofs(false),
 	needMatIndex(false),
 	needMatIndexFromGlobal(false),
-	nLevel(1),
 	feSpaces(0),
 	nRankDofs(1),
 	nLocalDofs(1),
@@ -295,7 +291,7 @@ namespace AMDiS {
      * \param[in]  bNonLocalDofs      If true, at least one rank's mapping con-
      *                                taines DOFs that are not owend by the rank.
      */
-    void init(MPI::Intracomm m,
+    void init(MeshLevelData& mld,
 	      vector<const FiniteElemSpace*> &fe,
 	      vector<const FiniteElemSpace*> &uniqueFe,
 	      bool needGlobalMapping,
@@ -308,6 +304,12 @@ namespace AMDiS {
     /// about DOFs that are on interior boundaries.
     void setDofComm(DofComm &pSend, DofComm &pRecv);
 
+    void setComputeMatIndex(bool b, bool global = false)
+    {
+      needMatIndex = b;
+      needMatIndexFromGlobal = global;
+    }
+
     /// Access the DOF mapping for a given FE space.
     inline FeSpaceDofMap& operator[](const FiniteElemSpace* feSpace)
     {
@@ -389,12 +391,6 @@ namespace AMDiS {
       ERROR_EXIT("MUST BE IMPLEMENTED!\n");
     }
 
-    void setComputeMatIndex(bool b, bool global = false)
-    {
-      needMatIndex = b;
-      needMatIndexFromGlobal = global;
-    }
-
     /// Compute local and global matrix indices.
     void computeMatIndex(bool globalIndex, int level);
 
@@ -415,8 +411,7 @@ namespace AMDiS {
     int computeStartDofs(int level);
 
   private:
-    /// MPI communicator object;
-    MPI::Intracomm mpiComm;
+    MeshLevelData *levelData;
 
     /// DOF communicators for all DOFs on interior boundaries.
     DofComm *sendDofs, *recvDofs;
@@ -436,8 +431,6 @@ namespace AMDiS {
     /// on global ones, otherwise on local DOF indices.
     bool needMatIndexFromGlobal;
 
-    int nLevel;
-
     /// Maps from FE space pointers to DOF mappings.
     map<const FiniteElemSpace*, FeSpaceDofMap> data;
 
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 10416ebb171a6e72a2011b04799a10de09c9d875..d1e6df6269d6232364dcadd3f6a180d4e04b3c6e 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -228,13 +228,13 @@ namespace AMDiS {
     TEST_EXIT_DBG(meshLevel + 1 == meshDistributor->getMeshLevelData().getLevelNumber())
       ("Mesh hierarchy does not contain %d levels!\n", meshLevel + 1);
 
+    MeshLevelData& levelData = meshDistributor->getMeshLevelData();
+
     if (subDomainSolver == NULL) {
       if (meshLevel == 0) {
 	subDomainSolver = 
 	  new SubDomainSolver(meshDistributor, mpiComm, mpiSelfComm);
       } else {
-	MeshLevelData& levelData = meshDistributor->getMeshLevelData();
-
 	subDomainSolver = 
 	  new SubDomainSolver(meshDistributor, 
 			      levelData.getMpiComm(meshLevel - 1),
@@ -243,24 +243,29 @@ namespace AMDiS {
       }
     }
 
-    primalDofMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(), 
+    primalDofMap.init(levelData,
+		      feSpaces, meshDistributor->getFeSpaces(), 
 		      true, true);
     primalDofMap.setComputeMatIndex(true);
     
-    dualDofMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
+    dualDofMap.init(levelData, 
+		    feSpaces, meshDistributor->getFeSpaces(),
 		    false, false);
     dualDofMap.setComputeMatIndex(true);
 
-    lagrangeMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
+    lagrangeMap.init(levelData, 
+		     feSpaces, meshDistributor->getFeSpaces(),
 		     true, true);
     lagrangeMap.setComputeMatIndex(true);
  
-    localDofMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
+    localDofMap.init(levelData, 
+		     feSpaces, meshDistributor->getFeSpaces(),
 		     false, false);
     localDofMap.setComputeMatIndex(true);
 
     if (fetiPreconditioner == FETI_DIRICHLET) {
-      interiorDofMap.init(mpiComm, feSpaces,  meshDistributor->getFeSpaces(),
+      interiorDofMap.init(levelData, 
+			  feSpaces,  meshDistributor->getFeSpaces(),
 			  false, false);
       interiorDofMap.setComputeMatIndex(true);
     }
@@ -348,9 +353,9 @@ namespace AMDiS {
 
     for (DofIndexSet::iterator it = primals.begin(); it != primals.end(); ++it)
       if (meshDistributor->getDofMap()[feSpace].isRankDof(*it))
-	primalDofMap[feSpace].insertRankDof(*it);
+	primalDofMap[feSpace].insertRankDof(0, *it);
       else
-  	primalDofMap[feSpace].insert(*it);
+  	primalDofMap[feSpace].insertNonRankDof(0, *it);
   }
 
 
@@ -366,7 +371,7 @@ namespace AMDiS {
     for (DofContainer::iterator it = allBoundaryDofs.begin();
 	 it != allBoundaryDofs.end(); ++it)
       if (!isPrimal(feSpace, **it))
-	dualDofMap[feSpace].insertRankDof(**it);
+	dualDofMap[feSpace].insertRankDof(0, **it);
   }
 
   
@@ -435,11 +440,11 @@ namespace AMDiS {
     for (DofMap::iterator it = dualMap.begin(); it != dualMap.end(); ++it) {
 
       if (meshDistributor->getDofMap()[feSpace].isRankDof(it->first)) {
-	lagrangeMap[feSpace].insertRankDof(it->first, nRankLagrange);
+	lagrangeMap[feSpace].insertRankDof(0, it->first, nRankLagrange);
 	int degree = boundaryDofRanks[feSpace][it->first].size();
 	nRankLagrange += (degree * (degree - 1)) / 2;
       } else {
-	lagrangeMap[feSpace].insert(it->first);
+	lagrangeMap[feSpace].insertNonRankDof(0, it->first);
       }
     }
     lagrangeMap[feSpace].nRankDofs[0] = nRankLagrange;
@@ -461,10 +466,10 @@ namespace AMDiS {
       if (admin->isDofFree(i) == false && 
 	  isPrimal(feSpace, i) == false &&
 	  dualDofMap[feSpace].isSet(i) == false) {
-	localDofMap[feSpace].insertRankDof(i, nLocalInterior);
+	localDofMap[feSpace].insertRankDof(0, i, nLocalInterior);
 
 	if (fetiPreconditioner != FETI_NONE)
-	  interiorDofMap[feSpace].insertRankDof(i, nLocalInterior);
+	  interiorDofMap[feSpace].insertRankDof(0, i, nLocalInterior);
 
 	nLocalInterior++;
       }
@@ -474,7 +479,7 @@ namespace AMDiS {
 
     for (DofMap::iterator it = dualDofMap[feSpace].getMap(0).begin();
 	 it != dualDofMap[feSpace].getMap(0).end(); ++it)
-      localDofMap[feSpace].insertRankDof(it->first);
+      localDofMap[feSpace].insertRankDof(0, it->first);
   }
 
 
diff --git a/AMDiS/src/parallel/SubDomainSolver.cc b/AMDiS/src/parallel/SubDomainSolver.cc
index 5086b5761f737ee1ccb6d54004424cd2bf7d04aa..c9fa411a379773e801b8be2f3d2ca5dc5abfb04f 100644
--- a/AMDiS/src/parallel/SubDomainSolver.cc
+++ b/AMDiS/src/parallel/SubDomainSolver.cc
@@ -262,6 +262,8 @@ namespace AMDiS {
   {
     FUNCNAME("SubDomainSolver::solveGlobal()");
 
+    ERROR_EXIT("BLUB!\n");
+
     Vec tmp;
     VecCreateSeq(PETSC_COMM_SELF, interiorMap->getRankDofs(level), &tmp);