From 78a18179e159512d645fdfd10026e5be74dd17d6 Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Mon, 3 Dec 2012 22:15:20 +0000
Subject: [PATCH] Work on multi level methods, NEVER, NEVER, NEVER USE THIS
 RELEASE OTHERWISE YOU WILL BURN IN HELL.

---
 AMDiS/src/Operator.cc                         |   2 +-
 AMDiS/src/est/ResidualEstimator.cc            |   6 +-
 AMDiS/src/parallel/DofComm.cc                 |  91 +--
 AMDiS/src/parallel/DofComm.h                  |  77 +-
 AMDiS/src/parallel/ElementObjectDatabase.cc   |   8 +-
 AMDiS/src/parallel/ElementObjectDatabase.h    |   8 +-
 AMDiS/src/parallel/InteriorBoundary.cc        | 102 ++-
 AMDiS/src/parallel/InteriorBoundary.h         |  36 +-
 AMDiS/src/parallel/MatrixNnzStructure.cc      |   9 +-
 AMDiS/src/parallel/MatrixNnzStructure.h       |   4 +-
 AMDiS/src/parallel/MeshDistributor.cc         | 364 +++++-----
 AMDiS/src/parallel/MeshDistributor.h          | 144 ++--
 AMDiS/src/parallel/MeshLevelData.cc           |  62 +-
 AMDiS/src/parallel/MeshLevelData.h            |  80 +--
 AMDiS/src/parallel/MpiHelper.cc               |   8 +-
 AMDiS/src/parallel/MpiHelper.h                |  14 +-
 .../src/parallel/ParallelCoarseSpaceSolver.cc | 129 ++--
 .../src/parallel/ParallelCoarseSpaceSolver.h  |  22 +-
 AMDiS/src/parallel/ParallelDebug.cc           | 655 ++++++++++--------
 AMDiS/src/parallel/ParallelDebug.h            |   3 -
 AMDiS/src/parallel/ParallelDofMapping.cc      |  88 +--
 AMDiS/src/parallel/ParallelDofMapping.h       |  51 +-
 AMDiS/src/parallel/PetscProblemStat.cc        |   2 +-
 AMDiS/src/parallel/PetscSolver.cc             |  32 +-
 AMDiS/src/parallel/PetscSolver.h              |   5 -
 AMDiS/src/parallel/PetscSolverFeti.cc         | 310 ++++-----
 AMDiS/src/parallel/PetscSolverFeti.h          |   7 +-
 AMDiS/src/parallel/PetscSolverFetiDebug.cc    |  16 +-
 .../parallel/PetscSolverGlobalBlockMatrix.cc  |  10 +-
 AMDiS/src/parallel/PetscSolverGlobalMatrix.cc |  38 +-
 AMDiS/src/parallel/PetscSolverNavierStokes.cc |   2 +-
 AMDiS/src/parallel/PetscSolverSchur.cc        |  40 +-
 AMDiS/src/parallel/StdMpi.cc                  |  13 +-
 33 files changed, 1282 insertions(+), 1156 deletions(-)

diff --git a/AMDiS/src/Operator.cc b/AMDiS/src/Operator.cc
index 7f192f36..900bbc07 100644
--- a/AMDiS/src/Operator.cc
+++ b/AMDiS/src/Operator.cc
@@ -190,7 +190,7 @@ namespace AMDiS {
 
 
   void Operator::addTerm(FirstOrderTerm *term,
-           FirstOrderType type)
+			 FirstOrderType type)
   {
     addFirstOrderTerm(term, type);
   }
diff --git a/AMDiS/src/est/ResidualEstimator.cc b/AMDiS/src/est/ResidualEstimator.cc
index c1995787..e105c863 100644
--- a/AMDiS/src/est/ResidualEstimator.cc
+++ b/AMDiS/src/est/ResidualEstimator.cc
@@ -172,13 +172,13 @@ namespace AMDiS {
     mesh->getDofIndexCoords(coords);
 
     InteriorBoundary &intBoundary = 
-      MeshDistributor::globalMeshDistributor->getIntBoundary();
+      MeshDistributor::globalMeshDistributor->getIntBoundary(0);
 
     ElInfo *elInfo = mesh->createNewElInfo();
     elInfo->setFillFlag(Mesh::FILL_COORDS);
 
-    StdMpi<vector<double> > stdMpiDet(MeshDistributor::globalMeshDistributor->getMpiComm());
-    StdMpi<vector<vector<WorldVector<double> > > > stdMpiGrdUh(MeshDistributor::globalMeshDistributor->getMpiComm());
+    StdMpi<vector<double> > stdMpiDet(MeshDistributor::globalMeshDistributor->getMpiComm(0));
+    StdMpi<vector<vector<WorldVector<double> > > > stdMpiGrdUh(MeshDistributor::globalMeshDistributor->getMpiComm(0));
 
     RankToBoundMap allBounds = intBoundary.getOther();
     allBounds.insert(intBoundary.getOwn().begin(), intBoundary.getOwn().end());
diff --git a/AMDiS/src/parallel/DofComm.cc b/AMDiS/src/parallel/DofComm.cc
index 7344ca14..ba020791 100644
--- a/AMDiS/src/parallel/DofComm.cc
+++ b/AMDiS/src/parallel/DofComm.cc
@@ -16,32 +16,22 @@
 #include "FiniteElemSpace.h"
 #include "Debug.h"
 #include "ElementDofIterator.h"
+#include "DOFVector.h"
 
 namespace AMDiS {
 
   using namespace std;
 
 
-  void DofComm::init(int level, 
-		     MeshLevelData &ld,
-		     vector<const FiniteElemSpace*> &fe)
+  void DofComm::init(vector<const FiniteElemSpace*> &fe)
   {
     FUNCNAME("DofComm::init()");
     
-    meshLevel = level;
-    levelData = &ld;
     feSpaces = fe;
     
-    nLevel = levelData->getLevelNumber() - meshLevel;
-    TEST_EXIT_DBG(nLevel >= 1)("Should not happen!\n");
-
     sendDofs.clear();
     recvDofs.clear();
-    periodicDofs.clear();
-    
-    sendDofs.resize(nLevel);
-    recvDofs.resize(nLevel);
-    periodicDofs.resize(nLevel);
+    periodicDofs.clear();   
   }
 
 
@@ -55,40 +45,36 @@ namespace AMDiS {
 
 
   void DofComm::createContainer(RankToBoundMap &boundary,
-				LevelDataType &data)
+				DataType &data)
   {
     FUNCNAME("DofComm::createContainer()");
 
     // === Fill data. ===
 
     for (unsigned int i = 0; i < feSpaces.size(); i++)
-      for (int level = 0; level < nLevel; level++)
-		for (InteriorBoundary::iterator it(boundary, level); !it.end(); ++it){
-		  it->rankObj.el->getAllDofs(feSpaces[i], it->rankObj, 
-					     data[level][it.getRank()][feSpaces[i]]);
-		}
+      for (InteriorBoundary::iterator it(boundary); !it.end(); ++it)
+	it->rankObj.el->getAllDofs(feSpaces[i], it->rankObj, 
+				   data[it.getRank()][feSpaces[i]]);
 
 
     // === Remove empty data containers. ===
 
-    for (unsigned int i = 0; i < data.size(); i++) {
-      	DataIter dit = data[i].begin();
-      	while (dit != data[i].end()) {
-			FeMapIter it = dit->second.begin();
-			while (it != dit->second.end()) {
-			  if (it->second.size() == 0) {
-			    const FiniteElemSpace *fe = it->first;
-			    ++it;
-			    dit->second.erase(fe);
-			  } else
-			    ++it;
-			}
-	
-			if (dit->second.size() == 0)
-			  data[i].erase(dit++);
-			else
-			  ++dit;
-    	}
+    DataIter dit = data.begin();
+    while (dit != data.end()) {
+      FeMapIter it = dit->second.begin();
+      while (it != dit->second.end()) {
+	if (it->second.size() == 0) {
+	  const FiniteElemSpace *fe = it->first;
+	  ++it;
+	  dit->second.erase(fe);
+	} else
+	  ++it;
+      }
+      
+      if (dit->second.size() == 0)
+	data.erase(dit++);
+      else
+	++dit;
     }
   }
 
@@ -101,20 +87,16 @@ namespace AMDiS {
   }
 
   
-  int DofComm::getNumberDofs(LevelDataType &data, 
-			     int level, 
+  int DofComm::getNumberDofs(DataType &data, 
 			     const FiniteElemSpace *feSpace,
 			     bool countDouble)
   {
     FUNCNAME("DofComm::getNumberDofs()");
 
-    TEST_EXIT_DBG(level < data.size())("Should not happen!\n");
-
     DofContainerSet dofSet;
     DofContainer dofVec;
 
-    for (DataIter rankIt = data[level].begin(); 
-	 rankIt != data[level].end(); ++rankIt)
+    for (DataIter rankIt = data.begin(); rankIt != data.end(); ++rankIt)
       for (FeMapIter feIt = rankIt->second.begin();
 	   feIt != rankIt->second.end(); ++feIt)
 	if (feIt->first == feSpace)
@@ -133,7 +115,7 @@ namespace AMDiS {
   {
     FUNCNAME("DofComm::Iterator::setNextFeMap()");
 
-    if (dataIter != data[traverseLevel].end()) {
+    if (dataIter != data.end()) {
       TEST_EXIT_DBG(dataIter->second.size())("Should not happen!\n");
 
       feMapIter = dataIter->second.begin();
@@ -158,4 +140,25 @@ namespace AMDiS {
     return true;
   }
 
+
+  void MultiLevelDofComm::init(MeshLevelData &levelData,
+			       vector<const FiniteElemSpace*> &fe)
+  {
+    FUNCNAME("MultiLevelDofComm::init()");
+
+    int nLevel = levelData.getNumberOfLevels();
+    for (int level = 0; level < nLevel; level++)
+      levelDofComm[level].init(fe);
+  }
+
+  
+  void MultiLevelDofComm::create(MultiLevelInteriorBoundary &boundary)
+  {
+    FUNCNAME("MultiLevelDofComm::create()");
+
+    for (map<int, DofComm>::iterator it = levelDofComm.begin();
+	 it != levelDofComm.end(); ++it)
+      it->second.create(boundary[it->first]);
+  }
+
 }
diff --git a/AMDiS/src/parallel/DofComm.h b/AMDiS/src/parallel/DofComm.h
index 5afe98d7..42c98e48 100644
--- a/AMDiS/src/parallel/DofComm.h
+++ b/AMDiS/src/parallel/DofComm.h
@@ -37,38 +37,28 @@ namespace AMDiS {
   {
   public:
     DofComm() 
-      : recvDofs(1),
-	sendDofs(1),
-	periodicDofs(0),
-	meshLevel(-1),
-	nLevel(0),
-	levelData(NULL)
-    {}
+      {}
     
     typedef map<const FiniteElemSpace*, DofContainer> FeMapType;
     typedef FeMapType::iterator FeMapIter;
     typedef map<int, FeMapType> DataType;
     typedef DataType::iterator DataIter;
-    // meshLevel: map[rank -> map[feSpace -> DofContainer]]
-    typedef vector<DataType> LevelDataType;
 
-    void init(int level, 
-	      MeshLevelData &levelData, 
-	      vector<const FiniteElemSpace*> &fe);
+    void init(vector<const FiniteElemSpace*> &fe);
 
     void create(InteriorBoundary &boundary);
 
-    LevelDataType& getSendDofs()
+    DataType& getSendDofs()
     {
       return sendDofs;
     }
 
-    LevelDataType& getRecvDofs()
+    DataType& getRecvDofs()
     {
       return recvDofs;
     }
 
-    LevelDataType& getPeriodicDofs()
+    DataType& getPeriodicDofs()
     {
       return periodicDofs;
     }
@@ -76,35 +66,29 @@ namespace AMDiS {
     // Writes all data of this object to an output stream.
     void serialize(ostream &out);
 
-    int getNumberDofs(LevelDataType &data, 
-		      int level, 
+    int getNumberDofs(DataType &data, 
 		      const FiniteElemSpace *feSpace,
 		      bool countDouble = false);
 
   protected:
-    void createContainer(RankToBoundMap &boundary, LevelDataType &data);
+    void createContainer(RankToBoundMap &boundary, 
+			 DataType &data);
 
   protected:
     /// This map contains for each rank the list of DOFs the current rank must 
     /// end to exchange solution DOFs at the interior boundaries.
-    LevelDataType sendDofs;
+    DataType sendDofs;
 
     /// This map contains on each rank the list of DOFs from which the current 
     /// rank will receive DOF values (i.e., this are all DOFs at an interior 
     /// boundary). The DOF indices are given in rank's local numbering.
-    LevelDataType recvDofs;
+    DataType recvDofs;
 
     /// This map contains on each rank a list of DOFs along the interior bound-
     /// aries to communicate with other ranks. The DOF indices are given in rank's
     /// local numbering. Periodic boundaries within one subdomain are not 
     /// considered here. 
-    LevelDataType periodicDofs;
-
-    int meshLevel;
-
-    int nLevel;
-
-    MeshLevelData *levelData;
+    DataType periodicDofs;
 
     vector<const FiniteElemSpace*> feSpaces;
 
@@ -114,24 +98,11 @@ namespace AMDiS {
     class Iterator
     {
     public:
-      Iterator(LevelDataType &d,
-	       const FiniteElemSpace *fe = NULL)
-	: data(d),
-	  dofCounter(-1),
-	  traverseFeSpace(fe),
-	  traverseLevel(0),
-	  removedDof(false)
-      {
-	goFirst();
-      }
-
-      Iterator(LevelDataType &d,
-	       int level,
+      Iterator(DataType &d,
 	       const FiniteElemSpace *fe = NULL)
 	: data(d),
 	  dofCounter(-1),
 	  traverseFeSpace(fe),
-	  traverseLevel(level),
 	  removedDof(false)
       {
 	goFirst();
@@ -139,7 +110,7 @@ namespace AMDiS {
 
       inline bool end()
       {
-	return (dataIter == data[traverseLevel].end());
+	return (dataIter == data.end());
       }
       
       inline void nextRank()
@@ -242,7 +213,7 @@ namespace AMDiS {
     protected:
       void goFirst()
       {
-	dataIter = data[traverseLevel].begin();
+	dataIter = data.begin();
 
 	while (setNextFeMap() == false)
 	  ++dataIter;
@@ -251,7 +222,7 @@ namespace AMDiS {
       bool setNextFeMap();
 
     protected:
-      LevelDataType &data;
+      DataType &data;
       
       DofComm::DataIter dataIter;
       
@@ -263,12 +234,26 @@ namespace AMDiS {
 
       const FiniteElemSpace *traverseFeSpace;
 
-      int traverseLevel;
-
       bool removedDof;
     };
 
+  };
+
+  
+  class MultiLevelDofComm {
+  public:
+    void init(MeshLevelData &levelData,
+	      vector<const FiniteElemSpace*> &fe);
+
+    void create(MultiLevelInteriorBoundary &boundary);
+
+    inline DofComm& operator[](int level) 
+    {
+      return levelDofComm[level];
+    }
 
+  private:
+    map<int, DofComm> levelDofComm;
   };
 
 }
diff --git a/AMDiS/src/parallel/ElementObjectDatabase.cc b/AMDiS/src/parallel/ElementObjectDatabase.cc
index 018ec42f..0a2164a0 100644
--- a/AMDiS/src/parallel/ElementObjectDatabase.cc
+++ b/AMDiS/src/parallel/ElementObjectDatabase.cc
@@ -722,11 +722,13 @@ namespace AMDiS {
   int ElementObjectDatabase::getOwner(vector<ElementObjectData>& objData, 
 				      int level)
   {
+    FUNCNAME("ElementObjectDatabase::getOwner()");
+
     int owner = -1;
 
     std::set<int> &levelRanks = levelData->getLevelRanks(level);
-    bool allRanks = (levelRanks.size() == 1 && *(levelRanks.begin()) == -1);
-    
+    bool allRanks = (level == 0);
+   
     for (vector<ElementObjectData>::iterator it = objData.begin();
 	 it != objData.end(); ++it) {
       int elRank = (*macroElementRankMap)[it->elIndex];
@@ -744,7 +746,7 @@ namespace AMDiS {
   {
     FUNCNAME("ElementObjectDatabase::getIterateMaxLevel()");
 
-    int nLevel = levelData->getLevelNumber();
+    int nLevel = levelData->getNumberOfLevels();
     TEST_EXIT_DBG(nLevel > 0)("Should not happen!\n");
 
     vector<std::set<int> > ranksInLevel;
diff --git a/AMDiS/src/parallel/ElementObjectDatabase.h b/AMDiS/src/parallel/ElementObjectDatabase.h
index 523d78da..302e4f26 100644
--- a/AMDiS/src/parallel/ElementObjectDatabase.h
+++ b/AMDiS/src/parallel/ElementObjectDatabase.h
@@ -143,6 +143,12 @@ namespace AMDiS {
     /// All data from the database is dropped. 
     void clear();
 
+    /// Resets iteration;
+    bool resetIterator()
+    {
+      iterGeoPos = CENTER;
+    }
+
     /** \brief
      * Iterates over all elements for one geometrical index, i.e., over all
      * vertices, edges or faces in the mesh. The function returns true, if the
@@ -151,7 +157,7 @@ namespace AMDiS {
      * \param[in]  pos   Must be either VERTEX, EDGE or FACE and defines the
      *                   elements that should be traversed.
      */
-    bool iterate(GeoIndex pos)
+    inline bool iterate(GeoIndex pos)
     {
       // CENTER marks the variable "iterGeoPos" to be in an undefined state. I.e.,
       // there is no iteration that is actually running.
diff --git a/AMDiS/src/parallel/InteriorBoundary.cc b/AMDiS/src/parallel/InteriorBoundary.cc
index cdfcbcec..f285a66c 100644
--- a/AMDiS/src/parallel/InteriorBoundary.cc
+++ b/AMDiS/src/parallel/InteriorBoundary.cc
@@ -35,19 +35,24 @@ namespace AMDiS {
 
     Mesh *mesh = elObjDb.getMesh();
     TEST_EXIT_DBG(mesh)("Should not happen!\n");
-    TEST_EXIT_DBG(level < levelData.getLevelNumber())("Should not happen!\n");
+    TEST_EXIT_DBG(level < levelData.getNumberOfLevels())
+      ("Should not happen!\n");
 
     MPI::Intracomm mpiComm = levelData.getMpiComm(level);
+
+    if (mpiComm == MPI::COMM_SELF)
+      return;
+
     int levelMpiRank = mpiComm.Get_rank();
     int globalMpiRank = MPI::COMM_WORLD.Get_rank();
     std::set<int> levelRanks = levelData.getLevelRanks(level);
-    bool allRanks = (levelRanks.size() == 1 && *(levelRanks.begin()) == -1);
 
     // === Create interior boundary data structure. ===
     
     for (int geoPos = 0; geoPos < mesh->getDim(); geoPos++) {
       GeoIndex geoIndex = INDEX_OF_DIM(geoPos, mesh->getDim());
 
+      elObjDb.resetIterator();
       while (elObjDb.iterate(geoIndex)) {
 	flat_map<int, ElementObjectData>& objData = elObjDb.getIterateData();
 
@@ -58,17 +63,22 @@ namespace AMDiS {
 	// Test, if the boundary object defines an interior boundary within the
 	// ranks of the MPI group. If not, go to next element.
 	bool boundaryWithinMpiGroup = false;
-	if (allRanks) {
+	if (levelData.getNumberOfLevels() == 1) {
 	  boundaryWithinMpiGroup = true;
 	} else {
+	  TEST_EXIT_DBG(level + 1 < levelData.getNumberOfLevels())
+	    ("Level = %d    but number of levels = %d\n",
+	     level, levelData.getNumberOfLevels());
+
 	  for (flat_map<int, ElementObjectData>::iterator it = objData.begin();
 	       it != objData.end(); ++it) {
-	    if (it->first != globalMpiRank && levelRanks.count(it->first)) {
+	    if (!levelData.rankInLevel(it->first, level + 1)) {
 	      boundaryWithinMpiGroup = true;
 	      break;
 	    }
 	  }
 	}
+	
 	if (!boundaryWithinMpiGroup)
 	  continue;
 
@@ -100,7 +110,7 @@ namespace AMDiS {
 	    if (it2->first == globalMpiRank)
 	      continue;
 
-	    if (!allRanks && levelRanks.count(it2->first) == 0)
+	    if (!levelData.rankInLevel(it2->first, level))
 	      continue;
 
 	    bound.neighObj.el = elObjDb.getElementPtr(it2->second.elIndex);
@@ -111,7 +121,8 @@ namespace AMDiS {
 	    
 	    bound.type = INTERIOR;
 
-	    AtomicBoundary& b = getNewOwn(it2->first);
+	    int rankInLevel = levelData.mapRank(it2->first, 0, level);
+	    AtomicBoundary& b = getNewOwn(rankInLevel);
 	    b = bound;
 	    if (geoIndex == EDGE)
 	      b.neighObj.reverseMode = 
@@ -134,8 +145,9 @@ namespace AMDiS {
 	  bound.neighObj.ithObj = ownerBoundEl.ithObject;
 	  
 	  bound.type = INTERIOR;
-	  
-	  AtomicBoundary& b = getNewOther(owner);
+
+	  int rankInLevel = levelData.mapRank(owner, 0, level);	  
+	  AtomicBoundary& b = getNewOther(rankInLevel);
 	  b = bound;	    
 	  if (geoIndex == EDGE)
 	    b.rankObj.reverseMode =
@@ -224,23 +236,26 @@ namespace AMDiS {
 	  // interior database.
 
 	  if (owner == globalMpiRank) {
-	    AtomicBoundary& b = getNewOwn(otherElementRank);	  
+	    int rankInLevel = levelData.mapRank(otherElementRank, 0, level);
+	    AtomicBoundary& b = getNewOwn(rankInLevel);	  
 	    b = bound;	  
 	  } else {
-	    ElementObjectData& ownerBoundEl = objData[owner];    
+	    ElementObjectData& ownerBoundEl = objData[owner];
 	    bound.neighObj.el = elObjDb.getElementPtr(ownerBoundEl.elIndex);
 	    bound.neighObj.elIndex = ownerBoundEl.elIndex;
 	    bound.neighObj.elType = -1;
 	    bound.neighObj.ithObj = ownerBoundEl.ithObject;
 
-	    AtomicBoundary& b = getNewOther(owner);
+	    int rankInLevel = levelData.mapRank(owner, 0, level);
+	    AtomicBoundary& b = getNewOther(rankInLevel);
 	    b = bound;
 	  }
 
 	} else {
 	  bound.type = it->second;
 	  
-	  AtomicBoundary& b = getNewPeriodic(otherElementRank);
+	  int rankInLevel = levelData.mapRank(otherElementRank, 0, level);
+	  AtomicBoundary& b = getNewPeriodic(rankInLevel);
 	  b = bound;	  
 	}
       }
@@ -293,10 +308,12 @@ namespace AMDiS {
 	  }
 
 	  if (owner == globalMpiRank) {
-	    AtomicBoundary& b = getNewOwn(otherElementRank); 
+	    int rankInLevel = levelData.mapRank(owner, 0, level);
+	    AtomicBoundary& b = getNewOwn(rankInLevel); 
 	    b = bound;	  
 	  } else {
-	    AtomicBoundary& b = getNewOther(owner);
+	    int rankInLevel = levelData.mapRank(owner, 0, level);
+	    AtomicBoundary& b = getNewOther(rankInLevel);
 	    b = bound;
 	    
 	    ElementObjectData& ownerBoundEl = objData[owner];    
@@ -309,8 +326,9 @@ namespace AMDiS {
 	  }
 	} else {
 	  bound.type = it->second;
-	  
-	  AtomicBoundary& b = getNewPeriodic(otherElementRank);
+
+	  int rankInLevel = levelData.mapRank(otherElementRank, 0, level);
+	  AtomicBoundary& b = getNewPeriodic(rankInLevel);
 	  b = bound;
 	  
 	  if (globalMpiRank > otherElementRank)
@@ -369,10 +387,12 @@ namespace AMDiS {
 	  ElementObjectData& rankBoundEl = objData[globalMpiRank];
 
 	  if (owner == globalMpiRank) {
-	    AtomicBoundary& b = getNewOwn(otherElementRank);
+	    int rankInLevel = levelData.mapRank(otherElementRank, 0, level);
+	    AtomicBoundary& b = getNewOwn(rankInLevel);
 	    b = bound;	  
 	  } else {
-	    AtomicBoundary& b = getNewOther(owner);
+	    int rankInLevel = levelData.mapRank(owner, 0, level);
+	    AtomicBoundary& b = getNewOther(rankInLevel);
 	    b = bound;
 	    
 	    ElementObjectData& ownerBoundEl = objData[owner];    
@@ -386,7 +406,8 @@ namespace AMDiS {
 	} else {
 	  bound.type = it->second;
 	  
-	  AtomicBoundary& b = getNewPeriodic(otherElementRank);
+	  int rankInLevel = levelData.mapRank(otherElementRank, 0, level);
+	  AtomicBoundary& b = getNewPeriodic(rankInLevel);
 	  b = bound;
 	  
 	  if (globalMpiRank > otherElementRank)
@@ -406,18 +427,8 @@ namespace AMDiS {
     // === share the bounday.                                                 ===
 
     StdMpi<vector<AtomicBoundary> > stdMpi(mpiComm);
-    if (level == 0) {
-      stdMpi.send(own);
-      stdMpi.recv(other);
-    } else {
-      for (RankToBoundMap::iterator rankIt = own.begin();	 
-	   rankIt != own.end(); ++rankIt)
-	stdMpi.send(levelData.mapRank(rankIt->first, 0, level), rankIt->second);
-      for (RankToBoundMap::iterator rankIt = other.begin();
-	   rankIt != other.end(); ++rankIt)
-	stdMpi.recv(levelData.mapRank(rankIt->first, 0, level));
-    }
-
+    stdMpi.send(own);
+    stdMpi.recv(other);
     stdMpi.startCommunication();
 
 
@@ -430,8 +441,6 @@ namespace AMDiS {
 	 rankIt != other.end(); ++rankIt) {
 
       int rank = rankIt->first;
-      if (level > 0)
-	rank = levelData.mapRank(rank, 0, level);
 
       // === We have received from "rank" the ordered list of element       ===
       // === indices. Now, we have to sort the corresponding list in this   ===
@@ -819,4 +828,31 @@ namespace AMDiS {
     }
   }
 
+
+  void MultiLevelInteriorBoundary::create(MeshLevelData &levelData,
+					  ElementObjectDatabase &elObjDb)
+  {
+    FUNCNAME(" MultLevelInteriorBoundary::create()");
+
+    levelIntBound.clear();
+
+    int nLevel = levelData.getNumberOfLevels();
+    for (int level = 0; level < nLevel; level++)
+      levelIntBound[level].create(levelData, level, elObjDb);
+  }
+
+
+  void MultiLevelInteriorBoundary::serialize(ostream &out)
+  {
+    FUNCNAME("MultiLevelInteriorBoundary::serialize()");
+    ERROR_EXIT("Not yet implemented!\n");
+  }
+
+
+  void MultiLevelInteriorBoundary::deserialize(istream &in, Mesh *mesh)
+  {
+    FUNCNAME("MultiLevelInteriorBoundary::deserialize()");
+    ERROR_EXIT("Not yet implemented!\n");
+  }
+
 }
diff --git a/AMDiS/src/parallel/InteriorBoundary.h b/AMDiS/src/parallel/InteriorBoundary.h
index d763459c..a06f8e41 100644
--- a/AMDiS/src/parallel/InteriorBoundary.h
+++ b/AMDiS/src/parallel/InteriorBoundary.h
@@ -133,9 +133,9 @@ namespace AMDiS {
     /// Iterator for the interior boundary object.
     class iterator {      
     public:
-      iterator(RankToBoundMap &b, int traverseLevel = 0)
+      iterator(RankToBoundMap &b)
 	: bound(b),
-	  level(traverseLevel)
+	  level(0)
       {
 	reset();
       }
@@ -156,9 +156,13 @@ namespace AMDiS {
       /// Move iterator to the next position.
       void operator++()
       {
+#if 0
 	do {
 	  ++vecIt;
 	} while (vecIt != mapIt->second.end() && vecIt->maxLevel < level);
+#else
+	++vecIt;
+#endif
 
 	if (vecIt == mapIt->second.end()) {
 	  ++mapIt;
@@ -206,9 +210,11 @@ namespace AMDiS {
 	  vecIt = mapIt->second.begin();
 	  
 	  // Search for the next atomic boundary on the mesh level
+#if 0
 	  while (vecIt != mapIt->second.end() && vecIt->maxLevel < level)
 	    ++vecIt;
-	  
+#endif
+  
 	  // If vector iterator is not at the end, we have found one and
 	  // can return.
 	  if (vecIt != mapIt->second.end())
@@ -230,6 +236,30 @@ namespace AMDiS {
       int level;     
     };
   };
+
+  
+  class MultiLevelInteriorBoundary {
+  public:
+    void create(MeshLevelData &levelData,
+		ElementObjectDatabase &elObjDb);
+
+    inline InteriorBoundary& operator[](int level) 
+    {
+      TEST_EXIT_DBG(levelIntBound.count(level))("Should not happen!\n");
+
+      return levelIntBound[level];
+    }
+
+    /// Writes this object to a file.
+    void serialize(ostream &out);
+
+    /// Reads the state of an interior boundary from a file.
+    void deserialize(istream &in, Mesh *mesh);
+
+  private:
+    map<int, InteriorBoundary> levelIntBound;
+  };
+
 }
 
 #endif // AMDIS_INTERIORBOUNDARY_H
diff --git a/AMDiS/src/parallel/MatrixNnzStructure.cc b/AMDiS/src/parallel/MatrixNnzStructure.cc
index 47df0191..4f9e65b7 100644
--- a/AMDiS/src/parallel/MatrixNnzStructure.cc
+++ b/AMDiS/src/parallel/MatrixNnzStructure.cc
@@ -39,7 +39,6 @@ namespace AMDiS {
 
 
   void MatrixNnzStructure::create(Matrix<DOFMatrix*> &mat,
-				  MPI::Intracomm mpiComm,
 				  ParallelDofMapping &rowDofMap,
 				  ParallelDofMapping &colDofMap,
 				  PeriodicMap *perMap,
@@ -272,14 +271,14 @@ namespace AMDiS {
 
     if (localMatrix == false) {
       // === Send and recv the nnz row structure to/from other ranks. ===
-      
-      StdMpi<MatrixNnzEntry> stdMpi(mpiComm, true);
+     
+      StdMpi<MatrixNnzEntry> stdMpi(rowDofMap.getMpiComm());
       stdMpi.send(sendMatrixEntry);
+
       for (std::set<int>::iterator it = recvFromRank.begin(); 
 	   it != recvFromRank.end(); ++it)
 	stdMpi.recv(*it);
-      stdMpi.startCommunication();
-      
+      stdMpi.startCommunication();     
      
       // === Evaluate the nnz structure this rank got from other ranks and add ===
       // === it to the PETSc nnz data structure.                               ===
diff --git a/AMDiS/src/parallel/MatrixNnzStructure.h b/AMDiS/src/parallel/MatrixNnzStructure.h
index cb9441e5..8e03191b 100644
--- a/AMDiS/src/parallel/MatrixNnzStructure.h
+++ b/AMDiS/src/parallel/MatrixNnzStructure.h
@@ -43,7 +43,6 @@ namespace AMDiS {
     void clear();
 
     void create(Matrix<DOFMatrix*> &mat,
-		MPI::Intracomm mpiComm,
 		ParallelDofMapping &rowDofMap,
 		ParallelDofMapping &colDofMap,
 		PeriodicMap *perMap,
@@ -51,13 +50,12 @@ namespace AMDiS {
 		bool localMatrix = false);
 
     void create(Matrix<DOFMatrix*> &mat,
-		MPI::Intracomm mpiComm,
 		ParallelDofMapping &dofMap,
 		PeriodicMap *perMap,
 		const ElementObjectDatabase& elObjDb,
 		bool localMatrix = false)
     {
-      create(mat, mpiComm, dofMap, dofMap, perMap, elObjDb, localMatrix);
+      create(mat, dofMap, dofMap, perMap, elObjDb, localMatrix);
     }
 
   protected:
diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index c82c2884..4f7a2a87 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -94,11 +94,10 @@ namespace AMDiS {
       hasPeriodicBoundary(false),
       printTimings(false)
   {
-    FUNCNAME("MeshDistributor::ParalleDomainBase()");
+    FUNCNAME("MeshDistributor::MeshDistributor()");
 
-    mpiComm = MPI::COMM_WORLD;
+    MPI::Intracomm &mpiComm = MPI::COMM_WORLD;
     mpiRank = mpiComm.Get_rank();
-    mpiSize = mpiComm.Get_size();
 
     Parameters::get(name + "->repartitioning", repartitioningAllowed);
     Parameters::get(name + "->debug output dir", debugOutputDir);
@@ -157,7 +156,7 @@ namespace AMDiS {
     MSG("Initialization phase 1 needed %.5f seconds\n", 
 	first - ParallelProblemStatBase::initTimeStamp);
 
-    TEST_EXIT(mpiSize > 1)
+    TEST_EXIT(MPI::COMM_WORLD.Get_size() > 1)
       ("Parallelization does not work with only one process!\n");
     TEST_EXIT(feSpaces.size() > 0)
       ("No FE space has been defined for the mesh distributor!\n");
@@ -288,6 +287,7 @@ namespace AMDiS {
 	 it != mesh->getPeriodicAssociations().end(); ++it)
       const_cast<DOFAdmin&>(mesh->getDofAdmin(0)).removeDOFContainer(dynamic_cast<DOFContainer*>(it->second));
 
+
     updateLocalGlobalNumbering();
 
 
@@ -606,62 +606,67 @@ namespace AMDiS {
       elInfo = stack.traverseNext(elInfo);
     }
 
-    TEST_EXIT(nMacroElements >= mpiSize)
+    TEST_EXIT(nMacroElements >= MPI::COMM_WORLD.Get_size())
       ("The mesh has less macro elements than number of mpi processes!\n");
   }
 
 
-  void MeshDistributor::synchVector(SystemVector &vec, int level)
+  void MeshDistributor::synchVector(SystemVector &vec)
   {
     FUNCNAME("MeshDistributor::synchVector()");
+   
+    int nLevels = levelData.getNumberOfLevels();
+    for (int level = nLevels - 1; level >= 0; level--) {
+      MPI::Intracomm &mpiComm = levelData.getMpiComm(level);
+      
+      if (mpiComm == MPI::COMM_SELF)
+	continue;
 
-    TEST_EXIT_DBG(level >= 0 && level <= 1)("Wrong level number!\n");
-
-    MPI::Intracomm &levelComm = levelData.getMpiComm(level);
-    DofComm &dc = (level == 0 ? dofComm : dofCommSd);
-
-    StdMpi<vector<double> > stdMpi(levelComm);
-
-    for (DofComm::Iterator it(dc.getSendDofs()); 
-	 !it.end(); it.nextRank()) {
-      vector<double> dofs;
+      StdMpi<vector<double> > stdMpi(mpiComm);
 
-      for (int i = 0; i < vec.getSize(); i++) {
-	DOFVector<double> &dofVec = *(vec.getDOFVector(i));
-	for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
-	  dofs.push_back(dofVec[it.getDofIndex()]);
+      for (DofComm::Iterator it(dofComm[level].getSendDofs()); 
+	   !it.end(); it.nextRank()) {
+	vector<double> dofs;
+	
+	for (int i = 0; i < vec.getSize(); i++) {
+	  DOFVector<double> &dofVec = *(vec.getDOFVector(i));
+	  for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
+	    dofs.push_back(dofVec[it.getDofIndex()]);
+	}
+	
+	int rank = it.getRank();
+	// if (level > 0)
+	//   rank = levelData.mapRank(rank, 0, level);
+	stdMpi.send(rank, dofs);
       }
-
-      int rank = it.getRank();
-      if (level > 0)
-	rank = levelData.mapRank(rank, 0, level);
-      stdMpi.send(rank, dofs);
-    }
-	   
-    for (DofComm::Iterator it(dc.getRecvDofs()); !it.end(); it.nextRank()) {
-      int rank = it.getRank();
-      if (level > 0)
-	rank = levelData.mapRank(rank, 0, level);
-      stdMpi.recv(rank);
-    }
-
-    stdMpi.startCommunication();
-
-    for (DofComm::Iterator it(dc.getRecvDofs()); !it.end(); it.nextRank()) {
-      int rank = it.getRank();
-      if (level > 0)
-	rank = levelData.mapRank(rank, 0, level);
-
-      int counter = 0;
-      vector<double> &recvData =  stdMpi.getRecvData(rank);
-
-      for (int i = 0; i < vec.getSize(); i++) {
-	DOFVector<double> &dofVec = *(vec.getDOFVector(i));
-
-	for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof()) {
-	  TEST_EXIT_DBG(counter < recvData.size())
-	    ("Recv data from rank %d has only %d entries!\n", rank, recvData.size());
-	  dofVec[it.getDofIndex()] = recvData[counter++];
+      
+      for (DofComm::Iterator it(dofComm[level].getRecvDofs()); 
+	   !it.end(); it.nextRank()) {
+	int rank = it.getRank();
+	// if (level > 0)
+	//   rank = levelData.mapRank(rank, 0, level);
+	stdMpi.recv(rank);
+      }
+      
+      stdMpi.startCommunication();
+      
+      for (DofComm::Iterator it(dofComm[level].getRecvDofs()); 
+	   !it.end(); it.nextRank()) {
+	int rank = it.getRank();
+	// if (level > 0)
+	//   rank = levelData.mapRank(rank, 0, level);
+	
+	int counter = 0;
+	vector<double> &recvData =  stdMpi.getRecvData(rank);
+	
+	for (int i = 0; i < vec.getSize(); i++) {
+	  DOFVector<double> &dofVec = *(vec.getDOFVector(i));
+	  
+	  for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof()) {
+	    TEST_EXIT_DBG(counter < recvData.size())
+	      ("Recv data from rank %d has only %d entries!\n", rank, recvData.size());
+	    dofVec[it.getDofIndex()] = recvData[counter++];
+	  }
 	}
       }
     }
@@ -830,10 +835,10 @@ namespace AMDiS {
     FUNCNAME("MeshDistributor::getAllBoundaryDofs()");
 
     DofContainerSet dofSet;
-    for (DofComm::Iterator it(dofComm.getSendDofs(), level, feSpace); 
+    for (DofComm::Iterator it(dofComm[level].getSendDofs(), feSpace); 
 	 !it.end(); it.nextRank())
       dofSet.insert(it.getDofs().begin(), it.getDofs().end());
-    for (DofComm::Iterator it(dofComm.getRecvDofs(), level, feSpace); 
+    for (DofComm::Iterator it(dofComm[level].getRecvDofs(), feSpace); 
 	 !it.end(); it.nextRank())
       dofSet.insert(it.getDofs().begin(), it.getDofs().end());
 
@@ -901,78 +906,95 @@ namespace AMDiS {
 
   void MeshDistributor::createMeshLevelStructure()
   {
-    FUNCNAME("MeshDistributor::createMeshLevelStructure()");  
+    FUNCNAME("MeshDistributor::createMeshLevelStructure()");
 
-    if (mpiSize != 16)
+    int levelMode = -1;
+    Parameters::get("parallel->level mode", levelMode);
+
+    TEST_EXIT(levelMode >= 0)("Wrong level mode %d!\n", levelMode);
+
+    if (levelMode == 0) {
+      std::set<int> neighbours;
+      levelData.init(neighbours);
       return;
+    }
 
-    std::set<int> neighbours;
-    switch (mpiRank) {
-    case 0:
-      neighbours.insert(1); neighbours.insert(2); neighbours.insert(3);
-      break;
-    case 1:
-      neighbours.insert(0); neighbours.insert(4); neighbours.insert(2); neighbours.insert(3); neighbours.insert(6);
-      break;
-    case 2:
-      neighbours.insert(0); neighbours.insert(1); neighbours.insert(3); neighbours.insert(8); neighbours.insert(9);
-      break;
-    case 3:
-      neighbours.insert(0); neighbours.insert(1); neighbours.insert(4); 
-      neighbours.insert(2); neighbours.insert(6);
-      neighbours.insert(8); neighbours.insert(9); neighbours.insert(12); 
-      break;
-    case 4:
-      neighbours.insert(1); neighbours.insert(3); neighbours.insert(6); neighbours.insert(7); neighbours.insert(5);
-      break;
-    case 5:
-      neighbours.insert(4); neighbours.insert(6); neighbours.insert(7);
-      break;
-    case 6:
-      neighbours.insert(1); neighbours.insert(4); neighbours.insert(5); 
-      neighbours.insert(3); neighbours.insert(7);
-      neighbours.insert(9); neighbours.insert(12); neighbours.insert(13); 
-      break;
-    case 7:
-      neighbours.insert(4); neighbours.insert(5); neighbours.insert(6);  neighbours.insert(12); neighbours.insert(13);
-      break;
-    case 8:
-      neighbours.insert(2); neighbours.insert(3); neighbours.insert(9);  neighbours.insert(10); neighbours.insert(11);
-      break;
-    case 9:
-      neighbours.insert(2); neighbours.insert(3); neighbours.insert(6); 
-      neighbours.insert(8); neighbours.insert(12);
-      neighbours.insert(10); neighbours.insert(11); neighbours.insert(14); 
-      break;
-    case 10:
-      neighbours.insert(8); neighbours.insert(9); neighbours.insert(11);
-      break;
-    case 11:
-      neighbours.insert(8); neighbours.insert(9); neighbours.insert(12);  neighbours.insert(10); neighbours.insert(14);
-      break;
-    case 12:
-      neighbours.insert(3); neighbours.insert(6); neighbours.insert(7); 
-      neighbours.insert(9); neighbours.insert(13);
-      neighbours.insert(11); neighbours.insert(14); neighbours.insert(15); 
-      break;
-    case 13:
-      neighbours.insert(6); neighbours.insert(7); neighbours.insert(12);  neighbours.insert(14); neighbours.insert(15);
-      break;
-    case 14:
-      neighbours.insert(9); neighbours.insert(12); neighbours.insert(13);  neighbours.insert(11); neighbours.insert(15);
-      break;
-    case 15:
-      neighbours.insert(12); neighbours.insert(13); neighbours.insert(14);
-      break;
-    }    
+    if (levelMode == 1) {
+      std::set<int> neighbours;
+      levelData.init(neighbours);
+      levelData.addLevelMode1();
+      return;
+    }
 
-    TEST_EXIT(neighbours.size() > 0)("Should not happen!\n");
+    TEST_EXIT(MPI::COMM_WORLD.Get_size() == 16)
+      ("Only mpiSize == 16 supported!\n");
 
-    levelData.init(neighbours);
+    if (levelMode == 2) {
 
-    bool multiLevelTest = false;
-    Parameters::get("parallel->multi level test", multiLevelTest);
-    if (multiLevelTest) {
+      std::set<int> neighbours;
+      switch (mpiRank) {
+      case 0:
+	neighbours.insert(1); neighbours.insert(2); neighbours.insert(3);
+	break;
+      case 1:
+	neighbours.insert(0); neighbours.insert(4); neighbours.insert(2); neighbours.insert(3); neighbours.insert(6);
+	break;
+      case 2:
+	neighbours.insert(0); neighbours.insert(1); neighbours.insert(3); neighbours.insert(8); neighbours.insert(9);
+	break;
+      case 3:
+	neighbours.insert(0); neighbours.insert(1); neighbours.insert(4); 
+	neighbours.insert(2); neighbours.insert(6);
+	neighbours.insert(8); neighbours.insert(9); neighbours.insert(12); 
+	break;
+      case 4:
+	neighbours.insert(1); neighbours.insert(3); neighbours.insert(6); neighbours.insert(7); neighbours.insert(5);
+	break;
+      case 5:
+	neighbours.insert(4); neighbours.insert(6); neighbours.insert(7);
+	break;
+      case 6:
+	neighbours.insert(1); neighbours.insert(4); neighbours.insert(5); 
+	neighbours.insert(3); neighbours.insert(7);
+	neighbours.insert(9); neighbours.insert(12); neighbours.insert(13); 
+	break;
+      case 7:
+	neighbours.insert(4); neighbours.insert(5); neighbours.insert(6);  neighbours.insert(12); neighbours.insert(13);
+	break;
+      case 8:
+	neighbours.insert(2); neighbours.insert(3); neighbours.insert(9);  neighbours.insert(10); neighbours.insert(11);
+	break;
+      case 9:
+	neighbours.insert(2); neighbours.insert(3); neighbours.insert(6); 
+	neighbours.insert(8); neighbours.insert(12);
+	neighbours.insert(10); neighbours.insert(11); neighbours.insert(14); 
+	break;
+      case 10:
+	neighbours.insert(8); neighbours.insert(9); neighbours.insert(11);
+	break;
+      case 11:
+	neighbours.insert(8); neighbours.insert(9); neighbours.insert(12);  neighbours.insert(10); neighbours.insert(14);
+	break;
+      case 12:
+	neighbours.insert(3); neighbours.insert(6); neighbours.insert(7); 
+	neighbours.insert(9); neighbours.insert(13);
+	neighbours.insert(11); neighbours.insert(14); neighbours.insert(15); 
+	break;
+      case 13:
+	neighbours.insert(6); neighbours.insert(7); neighbours.insert(12);  neighbours.insert(14); neighbours.insert(15);
+	break;
+      case 14:
+	neighbours.insert(9); neighbours.insert(12); neighbours.insert(13);  neighbours.insert(11); neighbours.insert(15);
+	break;
+      case 15:
+	neighbours.insert(12); neighbours.insert(13); neighbours.insert(14);
+	break;
+      }    
+      
+      TEST_EXIT(neighbours.size() > 0)("Should not happen!\n");
+      
+      levelData.init(neighbours);
+      
       switch (mpiRank) {
       case 0: case 1: case 2: case 3:
 	{
@@ -1003,6 +1025,10 @@ namespace AMDiS {
 	}
 	break;
       }
+
+      levelData.addLevelMode1();
+
+      return;
     }
   }
 
@@ -1011,7 +1037,8 @@ namespace AMDiS {
   {
     FUNCNAME("MeshDistributor::checkMeshChange()");
 
-    MPI::COMM_WORLD.Barrier();
+    MPI::Intracomm &mpiComm = MPI::COMM_WORLD;
+    mpiComm.Barrier();
     double first = MPI::Wtime();
 
     int skip = 0;
@@ -1026,6 +1053,9 @@ namespace AMDiS {
     if (recvAllValues == 0)
       return;
 
+    TEST_EXIT(levelData.getNumberOfLevels() == 1)
+      ("Not yet implemented for multi level!\n");
+
     // === At least one rank mesh has been changed, so the boundaries must be ===
     // === adapted to the new mesh structure.                                 ===
 
@@ -1036,18 +1066,18 @@ namespace AMDiS {
       // important. Therefore, we add all boundaries to one boundary container.
       RankToBoundMap allBound;
       
-      for (InteriorBoundary::iterator it(intBoundary.getOwn()); !it.end(); ++it)
+      for (InteriorBoundary::iterator it(intBoundary[0].getOwn()); !it.end(); ++it)
 	if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || 
 	    (mesh->getDim() == 3 && it->rankObj.subObj == FACE))
  	  allBound[it.getRank()].push_back(*it);
       
-      for (InteriorBoundary::iterator it(intBoundary.getOther()); 
+      for (InteriorBoundary::iterator it(intBoundary[0].getOther()); 
 	   !it.end(); ++it)
 	if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || 
 	    (mesh->getDim() == 3 && it->rankObj.subObj == FACE))
 	  allBound[it.getRank()].push_back(*it);
       
-      for (InteriorBoundary::iterator it(intBoundary.getPeriodic()); 
+      for (InteriorBoundary::iterator it(intBoundary[0].getPeriodic()); 
 	   !it.end(); ++it)
 	if (it.getRank() != mpiRank)
 	  if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || 
@@ -1061,7 +1091,7 @@ namespace AMDiS {
 	MSG_DBG("Run checkAndAdaptBoundary ...\n");
 
 	// Check for periodic boundaries within rank's subdomain.
-	for (InteriorBoundary::iterator it(intBoundary.getPeriodic()); 
+	for (InteriorBoundary::iterator it(intBoundary[0].getPeriodic()); 
 	     !it.end(); ++it) {
 	  if (it.getRank() == mpiRank) {
 	    if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || 
@@ -1091,7 +1121,7 @@ namespace AMDiS {
       MSG("Number of iteration to adapt mesh: %d\n", iterationCounter);
     }
 
-    MPI::COMM_WORLD.Barrier();
+    mpiComm.Barrier();
     MSG("Parallel mesh adaption needed %.5f seconds\n", MPI::Wtime() - first);
 
 
@@ -1138,6 +1168,9 @@ namespace AMDiS {
   {
     FUNCNAME("MeshDistributor::getImbalanceFactor()");
 
+    MPI::Intracomm &mpiComm = MPI::COMM_WORLD;
+    int mpiSize = mpiComm.Get_size();
+
     vector<int> nDofsInRank(mpiSize);
     int nDofs = mesh->getDofAdmin(0).getUsedDofs();
     mpiComm.Gather(&nDofs, 1, MPI_INT, &(nDofsInRank[0]), 1, MPI_INT, 0);
@@ -1205,7 +1238,7 @@ namespace AMDiS {
       }
     }
 
-    StdMpi<MeshCodeVec> stdMpi(mpiComm, true);
+    StdMpi<MeshCodeVec> stdMpi(MPI::COMM_WORLD, true);
     stdMpi.send(sendCodes);
     for (RankToBoundMap::iterator it = allBound.begin(); 
 	 it != allBound.end(); ++it)
@@ -1333,6 +1366,8 @@ namespace AMDiS {
   {
     FUNCNAME("MeshDistributor::repartitionMesh()");
 
+    MPI::Intracomm &mpiComm = MPI::COMM_WORLD;
+
     // === First, check if the load is unbalanced on the ranks. ===
 
     int repartitioning = 0;
@@ -1354,7 +1389,7 @@ namespace AMDiS {
     if (repartitioning == 0)
       return;
 
-    MPI::COMM_WORLD.Barrier();
+    mpiComm.Barrier();
     double timePoint = MPI::Wtime();
 
 #if (DEBUG != 0)
@@ -1404,7 +1439,7 @@ namespace AMDiS {
     bool partitioningSucceed = 
       partitioner->partition(elemWeights, ADAPTIVE_REPART);
     if (!partitioningSucceed) {
-      MPI::COMM_WORLD.Barrier();
+      mpiComm.Barrier();
       repartitioningFailed = repartitioningWaitAfterFail;;
       MSG("Mesh partitioner created empty partition!\n");
       MSG("Mesh repartitioning needed %.5f seconds\n", MPI::Wtime() - timePoint); 
@@ -1427,7 +1462,7 @@ namespace AMDiS {
       }
       
       if (!partitioningSucceed || !partitioner->meshChanged()) {
-	MPI::COMM_WORLD.Barrier();
+	mpiComm.Barrier();
 	repartitioningFailed = repartitioningWaitAfterFail;;
 	MSG("Mesh repartitioning needed %.5f seconds\n", MPI::Wtime() - timePoint);   
 	return;
@@ -1646,7 +1681,7 @@ namespace AMDiS {
     fix3dMeshRefinement();
 
 
-    MPI::COMM_WORLD.Barrier();
+    mpiComm.Barrier();
     MSG("Mesh repartitioning needed %.5f seconds\n", MPI::Wtime() - timePoint);   
   }
 
@@ -1659,26 +1694,16 @@ namespace AMDiS {
       elObjDb.create(partitionMap, levelData);
     elObjDb.updateRankData();
 
-    // unsigned long memsize = elObjDb.calculateMemoryUsage();
-    // MSG("Memory usage of element object database = %5.f KByte\n", 
-    // 	static_cast<double>(memsize / 1024.0));
-
-    MSG("MAKE INT BOUND!\n");
-    intBoundary.create(levelData, 0, elObjDb);
-    ParallelDebug::printBoundaryInfo(intBoundary);
-
-    MSG("TEST = %d\n", levelData.getLevelNumber());
+    
+    intBoundary.create(levelData, elObjDb);
 
-    if (levelData.getLevelNumber() > 1) {
-      MSG("MAKE INT SD BOUND!\n");
-      intBoundarySd.create(levelData, 1, elObjDb);
-      ParallelDebug::printBoundaryInfo(intBoundarySd, 0, true);
-    }
+    for (int level = 0; level < levelData.getNumberOfLevels(); level++)
+      ParallelDebug::printBoundaryInfo(intBoundary[level]);
 
     if (firstCall) {
-      int tmpSend = static_cast<int>(intBoundary.hasPeriodic());
+      int tmpSend = static_cast<int>(intBoundary[0].hasPeriodic());
       int tmpRecv = 0;
-      mpiComm.Allreduce(&tmpSend, &tmpRecv, 1, MPI_INT, MPI_MAX);
+      MPI::COMM_WORLD.Allreduce(&tmpSend, &tmpRecv, 1, MPI_INT, MPI_MAX);
       hasPeriodicBoundary = static_cast<bool>(tmpRecv);
     }
   }
@@ -1694,19 +1719,15 @@ namespace AMDiS {
 
     // === Create DOF communicator. ===
 
-    dofComm.init(0, levelData, feSpaces);
+    dofComm.init(levelData, feSpaces);
     dofComm.create(intBoundary);
 
-    if (levelData.getLevelNumber() > 1) {
-      dofCommSd.init(0, levelData, feSpaces);
-      dofCommSd.create(intBoundarySd);
-    }
 
     // === If requested, create more information on communication DOFs. ===
     
     if (createBoundaryDofFlag.isSet(BOUNDARY_SUBOBJ_SORTED)) {
 
-      int nLevels = levelData.getLevelNumber();
+      int nLevels = levelData.getNumberOfLevels();
       boundaryDofInfo.resize(nLevels);
       
       for (unsigned int i = 0; i < feSpaces.size(); i++) {
@@ -1720,7 +1741,7 @@ namespace AMDiS {
 	  
 	  // === Create send DOFs. ===
 	  for (int geo = FACE; geo >= VERTEX; geo--) {
-	    for (InteriorBoundary::iterator it(intBoundary.getOwn(), level); 
+	    for (InteriorBoundary::iterator it(intBoundary[level].getOwn()); 
 		 !it.end(); ++it) {
 	      if (it->rankObj.subObj == geo) {
 		DofContainer dofs;
@@ -1735,7 +1756,7 @@ namespace AMDiS {
 	  
 	  // === Create recv DOFs. ===
 	  for (int geo = FACE; geo >= VERTEX; geo--) {
-	    for (InteriorBoundary::iterator it(intBoundary.getOther(), level); 
+	    for (InteriorBoundary::iterator it(intBoundary[level].getOther()); 
 		 !it.end(); ++it) {
 	      if (it->rankObj.subObj == geo) {
 		DofContainer dofs;
@@ -1826,7 +1847,7 @@ namespace AMDiS {
     ParallelDebug::testDofContainerCommunication(*this);
 
     MSG("------------- Debug information -------------\n");
-    MSG("|  number of levels:         %d\n", levelData.getLevelNumber());
+    MSG("|  number of levels:         %d\n", levelData.getNumberOfLevels());
     MSG("|  number of FE spaces:      %d\n", feSpaces.size());
 
 
@@ -1841,12 +1862,13 @@ namespace AMDiS {
 	MSG("|      rStartDofs   = %d\n", (*(dofMaps[i]))[feSpace].rStartDofs);
       }
     }
-	
+
 //     debug::writeElementIndexMesh(mesh, debugOutputDir + "elementIndex-" + 
 // 				 lexical_cast<string>(mpiRank) + ".vtu");
     ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], 
 				  *(dofMaps[0]), 
 				  debugOutputDir + "mpi-dbg", "dat");
+
     debug::testSortedDofs(mesh, elMap);
 
     int test = 0;
@@ -1931,7 +1953,7 @@ namespace AMDiS {
     // === Traverse interior boundaries and get all DOFs on them. ===
 
     DofContainerSet nonRankDofs;
-    for (DofComm::Iterator it(dcom.getRecvDofs(), 0, feSpace); 
+    for (DofComm::Iterator it(dcom.getRecvDofs(), feSpace); 
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
 	nonRankDofs.insert(it.getDof());
@@ -1954,10 +1976,10 @@ namespace AMDiS {
     periodicMap.clear();
 
     // If there are no periodic boundaries, return. 
-    if (!intBoundary.hasPeriodic())
+    if (!intBoundary[0].hasPeriodic())
       return;
 
-    TEST_EXIT(levelData.getLevelNumber() == 1)
+    TEST_EXIT(levelData.getNumberOfLevels() == 1)
       ("Periodic DOF map does not support multi level domain decomposition!\n");
 
     //    MPI::COMM_WORLD.Barrier();   [TODO: CHANGE BECAUSE NOT ALL RANKS HAVE PERIODIC MAP!!!]
@@ -1977,9 +1999,9 @@ namespace AMDiS {
 
     TEST_EXIT(dofMaps.size())("No DOF mapping defined!\n");
 
-    DofComm::LevelDataType &periodicDofs = dofComm.getPeriodicDofs();
+    DofComm::DataType &periodicDofs = dofComm[0].getPeriodicDofs();
     ComponentDofMap &dofMap = (*(dofMaps[0]))[feSpace];
-    StdMpi<vector<int> > stdMpi(mpiComm, false);
+    StdMpi<vector<int> > stdMpi(MPI::COMM_WORLD, false);
 
 
     // === Each rank traverse its periodic boundaries and sends the DOF      ===
@@ -1987,8 +2009,8 @@ namespace AMDiS {
 
     map<int, vector<int> > rankToDofType;
 
-    for (RankToBoundMap::iterator it = intBoundary.getPeriodic().begin();
-	 it != intBoundary.getPeriodic().end(); ++it) {
+    for (RankToBoundMap::iterator it = intBoundary[0].getPeriodic().begin();
+	 it != intBoundary[0].getPeriodic().end(); ++it) {
 
       if (it->first == mpiRank) {
 	// Here we have a periodic boundary within rank's subdomain. So we can
@@ -2025,7 +2047,7 @@ namespace AMDiS {
 	// Here we have a periodic boundary between two ranks.
 
 	// Create DOF indices on the boundary. 
-	DofContainer& dofs = periodicDofs[0][it->first][feSpace];
+	DofContainer& dofs = periodicDofs[it->first][feSpace];
 	for (vector<AtomicBoundary>::iterator boundIt = it->second.begin();
 	     boundIt != it->second.end(); ++boundIt) {
 
@@ -2056,9 +2078,9 @@ namespace AMDiS {
     // === DOFs from the other ranks.                                        ===
 
 
-    for (RankToBoundMap::iterator it = intBoundary.getPeriodic().begin();
-	 it != intBoundary.getPeriodic().end(); ++it) {
-      DofContainer& dofs = periodicDofs[0][it->first][feSpace];
+    for (RankToBoundMap::iterator it = intBoundary[0].getPeriodic().begin();
+	 it != intBoundary[0].getPeriodic().end(); ++it) {
+      DofContainer& dofs = periodicDofs[it->first][feSpace];
       vector<int>& types = rankToDofType[it->first];
 
       TEST_EXIT_DBG(dofs.size() == types.size())("Should not happen!\n");
@@ -2077,11 +2099,11 @@ namespace AMDiS {
     }
 
 
-    StdMpi<PeriodicDofMap> stdMpi2(mpiComm);
+    StdMpi<PeriodicDofMap> stdMpi2(MPI::COMM_WORLD);
 
 
-    for (RankToBoundMap::iterator it = intBoundary.getPeriodic().begin();
-	 it != intBoundary.getPeriodic().end(); ++it) {
+    for (RankToBoundMap::iterator it = intBoundary[0].getPeriodic().begin();
+	 it != intBoundary[0].getPeriodic().end(); ++it) {
       if (it->first == mpiRank)
 	continue;
 
@@ -2175,7 +2197,7 @@ namespace AMDiS {
     SerUtil::serialize(out, partitionMap);
 
     elObjDb.serialize(out);
-
+    
     intBoundary.serialize(out);
 
     // === Serialieze FE space dependent data ===
diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h
index e9882c9c..4d36f6a4 100644
--- a/AMDiS/src/parallel/MeshDistributor.h
+++ b/AMDiS/src/parallel/MeshDistributor.h
@@ -151,24 +151,14 @@ namespace AMDiS {
       return periodicMap;
     }
 
-    DofComm& getDofComm()
+    DofComm& getDofComm(int level)
     {
-      return dofComm;
+      return dofComm[level];
     }
 
-    DofComm& getDofCommSd()
+    InteriorBoundary& getIntBoundary(int level)
     {
-      return dofCommSd;
-    }
-
-    InteriorBoundary& getIntBoundary()
-    {
-      return intBoundary;
-    }
-
-    InteriorBoundary& getIntBoundarySd()
-    {
-      return intBoundarySd;
+      return intBoundary[level];
     }
 
     inline long getLastMeshChangeIndex()
@@ -181,24 +171,14 @@ namespace AMDiS {
       return mpiRank;
     }
 
-    inline int getMpiSize()
+    inline int getMpiSize(int level)
     {
-      return mpiSize;
+      return levelData.getMpiComm(level).Get_size();
     }
 
-    inline MPI::Intracomm& getMpiComm()
+    inline MPI::Intracomm& getMpiComm(int level)
     {
-      return mpiComm;
-    }
-
-    inline MPI::Intracomm getLocalMpiComm()
-    {
-      if (levelData.getLevelNumber() == 1)
-	return PETSC_COMM_SELF;
-
-      TEST_EXIT(levelData.getLevelNumber() == 2)("Not yet implemented!\n");
-
-      return levelData.getMpiComm(1);
+      return levelData.getMpiComm(level);
     }
 
     inline bool isInitialized()
@@ -225,71 +205,77 @@ namespace AMDiS {
     template<typename T>
     void synchVector(DOFVector<T> &vec) 
     {
-      StdMpi<vector<T> > stdMpi(mpiComm);
-
       const FiniteElemSpace *fe = vec.getFeSpace();
 
-      for (DofComm::Iterator it(dofComm.getSendDofs(), fe); 
-	   !it.end(); it.nextRank()) {
-	vector<T> dofs;
-	dofs.reserve(it.getDofs().size());
+      int nLevels = levelData.getNumberOfLevels();
+      for (int level = nLevels - 1; level >= 0; level--) {
+	StdMpi<vector<T> > stdMpi(levelData.getMpiComm(level));
+
+	for (DofComm::Iterator it(dofComm[level].getSendDofs(), fe); 
+	     !it.end(); it.nextRank()) {
+	  vector<T> dofs;
+	  dofs.reserve(it.getDofs().size());
+	  
+	  for (; !it.endDofIter(); it.nextDof())
+	    dofs.push_back(vec[it.getDofIndex()]);
+	  
+	  stdMpi.send(it.getRank(), dofs);
+	}
+	
+	for (DofComm::Iterator it(dofComm[level].getRecvDofs()); 
+	     !it.end(); it.nextRank())
+	  stdMpi.recv(it.getRank());
 	
-	for (; !it.endDofIter(); it.nextDof())
-	  dofs.push_back(vec[it.getDofIndex()]);
+	stdMpi.startCommunication();
 	
-	stdMpi.send(it.getRank(), dofs);
+	for (DofComm::Iterator it(dofComm[level].getRecvDofs(), fe); 
+	     !it.end(); it.nextRank())
+	  for (; !it.endDofIter(); it.nextDof())
+	    vec[it.getDofIndex()] = 
+	      stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
       }
-	     
-      for (DofComm::Iterator it(dofComm.getRecvDofs()); 
-	   !it.end(); it.nextRank())
-        stdMpi.recv(it.getRank());
-	     
-      stdMpi.startCommunication();
-
-      for (DofComm::Iterator it(dofComm.getRecvDofs(), fe); 
-	   !it.end(); it.nextRank())
-	for (; !it.endDofIter(); it.nextDof())
-	  vec[it.getDofIndex()] = 
-	     stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
     }
    
     /// Works in the same way as the function above defined for DOFVectors. Due
     /// to performance, this function does not call \ref synchVector for each 
     /// DOFVector, but instead sends all values of all DOFVectors all at once.
-    void synchVector(SystemVector &vec, int level = 0);
+    void synchVector(SystemVector &vec);
 
     /// Works quite similar to the function \ref synchVector, but instead the 
     /// values of subdomain vectors are add along the boundaries.
     template<typename T>
     void synchAddVector(DOFVector<T> &vec)
     {
-      StdMpi<vector<T> > stdMpi(mpiComm);
-
       const FiniteElemSpace *fe = vec.getFeSpace();
 
-      for (DofComm::Iterator it(dofComm.getRecvDofs(), fe);
-	   !it.end(); it.nextRank()) {
-	vector<T> dofs;
-	dofs.reserve(it.getDofs().size());
+      int nLevels = levelData.getNumberOfLevels();
+      for (int level = nLevels - 1; level >= 0; level--) {
+	StdMpi<vector<T> > stdMpi(levelData.getMpiComm(level));
+
+	for (DofComm::Iterator it(dofComm[level].getRecvDofs(), fe);
+	     !it.end(); it.nextRank()) {
+	  vector<T> dofs;
+	  dofs.reserve(it.getDofs().size());
+	  
+	  for (; !it.endDofIter(); it.nextDof())
+	    dofs.push_back(vec[it.getDofIndex()]);
+	  
+	  stdMpi.send(it.getRank(), dofs);
+	}
 	
-	for (; !it.endDofIter(); it.nextDof())
-	  dofs.push_back(vec[it.getDofIndex()]);
+	for (DofComm::Iterator it(dofComm[level].getSendDofs()); 
+	     !it.end(); it.nextRank())
+	  stdMpi.recv(it.getRank());
 	
-	stdMpi.send(it.getRank(), dofs);
+	stdMpi.startCommunication();
+	
+	for (DofComm::Iterator it(dofComm[level].getSendDofs(), fe); 
+	     !it.end(); it.nextRank())
+	  for (; !it.endDofIter(); it.nextDof())
+	    vec[it.getDofIndex()] += 
+	      stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
       }
 
-      for (DofComm::Iterator it(dofComm.getSendDofs()); 
-	   !it.end(); it.nextRank())
-        stdMpi.recv(it.getRank());
-	     
-      stdMpi.startCommunication();
-
-      for (DofComm::Iterator it(dofComm.getSendDofs(), fe); 
-	   !it.end(); it.nextRank())
-	for (; !it.endDofIter(); it.nextDof())
-	  vec[it.getDofIndex()] += 
-	     stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
-
       synchVector(vec);
     }
 
@@ -498,13 +484,6 @@ namespace AMDiS {
     /// The rank of the current process.
     int mpiRank;
 
-    /// Overall number of processes.
-    int mpiSize;
-
-    /// MPI communicator collected all processes, which should be used for
-    /// calculation. The Debug procces is not included in this communicator.
-    MPI::Intracomm mpiComm;
-
     /// Name of the problem (as used in the init files)
     string name;
 
@@ -539,13 +518,10 @@ namespace AMDiS {
 
     /// Defines the interior boundaries of the domain that result from 
     /// partitioning the whole mesh. 
-    InteriorBoundary intBoundary;
-
-    InteriorBoundary intBoundarySd;
-
-    DofComm dofComm;
+    MultiLevelInteriorBoundary intBoundary;
 
-    DofComm dofCommSd;
+    /// Dof communicator object
+    MultiLevelDofComm dofComm;
 
     PeriodicMap periodicMap;
 
diff --git a/AMDiS/src/parallel/MeshLevelData.cc b/AMDiS/src/parallel/MeshLevelData.cc
index f29fd047..7291dad9 100644
--- a/AMDiS/src/parallel/MeshLevelData.cc
+++ b/AMDiS/src/parallel/MeshLevelData.cc
@@ -29,7 +29,10 @@ namespace AMDiS {
   void MeshLevelData::init(std::set<int> &neighbourRanks)
   {
     levelRanks.resize(1);
-    levelRanks[0].insert(-1);
+    
+    int mpiSize = MPI::COMM_WORLD.Get_size();
+    for (int i = 0; i < mpiSize; i++)
+      levelRanks[0].insert(i);
     nLevel = 1;
     
     levelNeighbours.resize(1);
@@ -48,7 +51,6 @@ namespace AMDiS {
     FUNCNAME("MeshLevelData()::addLevel()");
 
     TEST_EXIT(nLevel >= 1)("Mesh level structure is not initialized()");
-    TEST_EXIT(nLevel == 1)("Only 2 level are supported yet!\n");
 
     levelRanks.push_back(ranksInDomain);
     nLevel++;
@@ -68,6 +70,19 @@ namespace AMDiS {
   }
 
 
+  void MeshLevelData::addLevelMode1()
+  {
+    nLevel++;
+    mpiComms.push_back(MPI::COMM_SELF);
+    mpiGroups.push_back(MPI::COMM_SELF.Get_group());
+
+    levelRanks.resize(nLevel);
+    levelNeighbours.resize(nLevel);
+
+    levelRanks[nLevel - 1].insert(MPI::COMM_WORLD.Get_rank());
+  }
+
+
   void MeshLevelData::serialize(ostream &out)
   {
     ERROR_EXIT("Not yet implemented!\n");
@@ -110,4 +125,47 @@ namespace AMDiS {
 	MSG("  neighbours string to long!\n");
     }
   }
+
+
+  int MeshLevelData::mapRank(int fromRank, int fromLevel, int toLevel)
+  {
+    FUNCNAME("MeshLevelData::mapRank()");
+
+    if (fromLevel == toLevel)
+      return fromRank;
+    
+    int toRank = -1;
+    
+    MPI::Group::Translate_ranks(mpiGroups[fromLevel], 1, &fromRank, 
+				mpiGroups[toLevel], &toRank);
+    
+    TEST_EXIT_DBG(toRank != MPI::UNDEFINED)
+      ("Cannot map rank %d from level %d to level %d\n",
+       fromRank, fromLevel, toLevel);
+    
+    return toRank;			
+  }
+
+
+  bool MeshLevelData::isLevelBoundary(int rank, int level)
+  {
+    FUNCNAME("MeshLevelData::isLevelBoundary()");
+
+    TEST_EXIT_DBG(level < nLevel)("Should not happen!\n");
+
+    if (nLevel == 1)
+      return true;
+
+    if (level + 1 < nLevel && mpiComms[level + 1] == MPI::COMM_SELF)
+      return true;
+
+    return static_cast<bool>(levelRanks[level].count(rank));
+  }
+
+
+  bool MeshLevelData::rankInLevel(int rank, int level)
+  {
+    return static_cast<bool>(levelRanks[level].count(rank));
+  }
+
 }
diff --git a/AMDiS/src/parallel/MeshLevelData.h b/AMDiS/src/parallel/MeshLevelData.h
index 93a35faf..f73c53f3 100644
--- a/AMDiS/src/parallel/MeshLevelData.h
+++ b/AMDiS/src/parallel/MeshLevelData.h
@@ -43,6 +43,8 @@ namespace AMDiS {
 
     void addLevel(std::set<int> &ranksInDomain, int domainId);
 
+    void addLevelMode1();
+
     // Writes all data of this object to an output stream.
     void serialize(ostream &out);
 
@@ -58,28 +60,28 @@ namespace AMDiS {
       return levelRanks[level];
     }
 
-    std::set<int>& getLevelNeighbours(int level)
-    {
-      TEST_EXIT_DBG(level < nLevel)("Should not happen!\n");
+    /* std::set<int>& getLevelNeighbours(int level) */
+    /* {       */
+    /*   TEST_EXIT_DBG(level < nLevel)("Should not happen!\n"); */
       
-      return levelNeighbours[level];
-    }
-
-   int getLevelNumber()
-   {
-     return nLevel;
-   }
-
-   MPI::Intracomm& getMpiComm(int level)
-   {
-     FUNCNAME("MeshLevelData::getMpiComm()");
-
-     TEST_EXIT_DBG(level < nLevel)
-       ("Asked for level %d, but defined only for %d levels!\n", level, nLevel);
+    /*   return levelNeighbours[level]; */
+    /* } */
 
-     return mpiComms[level];
-   }
+    inline int getNumberOfLevels()
+    {
+      return nLevel;
+    }
 
+    MPI::Intracomm& getMpiComm(int level)
+    {
+      FUNCNAME("MeshLevelData::getMpiComm()");
+      
+      TEST_EXIT_DBG(level < nLevel)
+	("Asked for level %d, but defined only for %d levels!\n", level, nLevel);
+      
+      return mpiComms[level];
+    }
+    
     MPI::Group& getMpiGroup(int level)
     {
       TEST_EXIT_DBG(level < nLevel)("Should not happen!\n");
@@ -89,41 +91,15 @@ namespace AMDiS {
 
     int getLevelId(int level, int rank) 
     {
-      if (level == 0)
-	return rank;
-
-      TEST_EXIT_DBG(level == 1 && rank <= 15)("Should not happen!\n");
-      TEST_EXIT_DBG(MPI::COMM_WORLD.Get_size() == 16)("Should not happen!\n");
+      FUNCNAME("MeshLevelData::getLevelId()");
 
-      if (rank == 0 || rank == 1 || rank == 2 || rank == 3)
-	return 0;
-
-      if (rank == 4 || rank == 5 || rank == 6 || rank == 7)
-	return 1;
-      
-      if (rank == 8 || rank == 9 || rank == 10 || rank == 11)
-	return 2;
+      TEST_EXIT_DBG(level < nLevel)
+	("Wrong level number: %d  nLevel = %d\n", level, nLevel);
 
-      if (rank == 12 || rank == 13 || rank == 14 || rank == 15)
-	return 3;
-
-      ERROR_EXIT("Should not happen!\n");
-
-      return -1;
+      return mpiComms[level].Get_rank();				    
     }
 
-    int mapRank(int fromRank, int fromLevel, int toLevel)
-    {
-      int toRank = -1;
-
-      MPI::Group::Translate_ranks(mpiGroups[fromLevel], 1, &fromRank, 
-				  mpiGroups[toLevel], &toRank);
-
-      TEST_EXIT_DBG(toRank != MPI::UNDEFINED)
-	("Should not happen!\n");
-
-      return toRank;			
-    }
+    int mapRank(int fromRank, int fromLevel, int toLevel);
 
     bool rankInSubdomain(int rank, int level)
     {
@@ -132,6 +108,10 @@ namespace AMDiS {
       return static_cast<bool>(levelRanks[level].count(rank));
     }
 
+    bool isLevelBoundary(int rank, int level);
+
+    bool rankInLevel(int rank, int level);
+
   protected:
     int nLevel;
 
diff --git a/AMDiS/src/parallel/MpiHelper.cc b/AMDiS/src/parallel/MpiHelper.cc
index 008a7c05..a4331550 100644
--- a/AMDiS/src/parallel/MpiHelper.cc
+++ b/AMDiS/src/parallel/MpiHelper.cc
@@ -17,16 +17,16 @@ namespace AMDiS {
 
   namespace mpi {
 
-    void globalAdd(double &value)
+    void globalAdd(MPI::Intracomm &mpiComm, double &value)
     {
       double valCopy = value;
-      MPI::COMM_WORLD.Allreduce(&valCopy, &value, 1, MPI_DOUBLE, MPI_SUM);
+      mpiComm.Allreduce(&valCopy, &value, 1, MPI_DOUBLE, MPI_SUM);
     }
 
-    void globalAdd(int &value)
+    void globalAdd(MPI::Intracomm &mpiComm, int &value)
     {
       int valCopy = value;
-      MPI::COMM_WORLD.Allreduce(&valCopy, &value, 1, MPI_INT, MPI_SUM);
+      mpiComm.Allreduce(&valCopy, &value, 1, MPI_INT, MPI_SUM);
     }
     
     void globalMin(double &value)
diff --git a/AMDiS/src/parallel/MpiHelper.h b/AMDiS/src/parallel/MpiHelper.h
index a83c4af8..d67f2bc7 100644
--- a/AMDiS/src/parallel/MpiHelper.h
+++ b/AMDiS/src/parallel/MpiHelper.h
@@ -31,9 +31,19 @@
 namespace AMDiS {
 
   namespace mpi {
-    void globalAdd(double &value);
+    void globalAdd(MPI::Intracomm &mpiComm, double &value);
 
-    void globalAdd(int &value);
+    inline void globalAdd(double &value)
+    {
+      globalAdd(MPI::COMM_WORLD, value);
+    }
+
+    void globalAdd(MPI::Intracomm &mpiComm, int &value);
+
+    inline void globalAdd(int &value)
+    {
+      globalAdd(MPI::COMM_WORLD, value);
+    }
     
     template<typename T>
     void globalAdd(T &value) 
diff --git a/AMDiS/src/parallel/ParallelCoarseSpaceSolver.cc b/AMDiS/src/parallel/ParallelCoarseSpaceSolver.cc
index ed1d1607..486465d3 100644
--- a/AMDiS/src/parallel/ParallelCoarseSpaceSolver.cc
+++ b/AMDiS/src/parallel/ParallelCoarseSpaceSolver.cc
@@ -25,7 +25,7 @@ namespace AMDiS {
       lastMeshNnz(-1),
       alwaysCreateNnzStructure(false),
       meshDistributor(NULL),
-      subdomainLevel(0),
+      meshLevel(0),
       rStartInterior(0),
       nGlobalOverallInterior(0)
   {
@@ -61,25 +61,26 @@ namespace AMDiS {
   }
 
 
-  void ParallelCoarseSpaceSolver::setMeshDistributor(MeshDistributor *md)
-  {
-    FUNCNAME("ParallelCoarseSpaceSolver::setMeshDistributor()");
-
-    meshDistributor = md;
-    mpiCommGlobal = meshDistributor->getMpiComm();
-    mpiCommLocal = meshDistributor->getLocalMpiComm();
-  }
-
-
   void ParallelCoarseSpaceSolver::setMeshDistributor(MeshDistributor *md,
-						     MPI::Intracomm mpiComm0,
-						     MPI::Intracomm mpiComm1)
+						     int level)
   {
     FUNCNAME("ParallelCoarseSpaceSolver::setMeshDistributor()");
 
+    meshLevel = level;
     meshDistributor = md;
-    mpiCommGlobal = mpiComm0;
-    mpiCommLocal = mpiComm1; 
+
+#if 0
+    mpiCommGlobal = meshDistributor->getMpiComm(meshLevel);
+    if (meshLevel + 1 < 
+	meshDistributor->getMeshLevelData().getNumberOfLevels())
+      mpiCommLocal = meshDistributor->getMpiComm(meshLevel + 1);
+    else 
+      mpiCommLocal = MPI::COMM_SELF;
+#endif
+    domainComm = meshDistributor->getMpiComm(meshLevel);
+
+    if (meshLevel >= 1)
+      coarseSpaceComm = meshDistributor->getMpiComm(meshLevel - 1);
   }
 
 
@@ -135,7 +136,7 @@ namespace AMDiS {
 
     // === If required, recompute non zero structure of the matrix. ===
 
-    bool localMatrix = (coarseSpaceMap.size() && subdomainLevel == 0);
+    bool localMatrix = (domainComm == MPI::COMM_SELF);
 
     if (checkMeshChange()) {
       int nMat = uniqueCoarseMap.size() + 1;
@@ -146,7 +147,7 @@ namespace AMDiS {
 	  nnz[i][j].clear();
       }
 
-      nnz[0][0].create(seqMat, mpiCommGlobal, *interiorMap,
+      nnz[0][0].create(seqMat, *interiorMap,
 		       (coarseSpaceMap.size() == 0 ? &(meshDistributor->getPeriodicMap()) : NULL),
 		       meshDistributor->getElementObjectDb(),
 		       localMatrix);
@@ -161,7 +162,7 @@ namespace AMDiS {
 	  ParallelDofMapping &colMap =
 	    (j == 0 ? *interiorMap : *(uniqueCoarseMap[j - 1]));
 
-	  nnz[i][j].create(seqMat, mpiCommGlobal, rowMap, colMap, NULL,
+	  nnz[i][j].create(seqMat, rowMap, colMap, NULL,
 			   meshDistributor->getElementObjectDb());
 	}
       }
@@ -174,48 +175,43 @@ namespace AMDiS {
     int nRankInteriorRows = interiorMap->getRankDofs();
     int nOverallInteriorRows = interiorMap->getOverallDofs();
 
-    MPI::Intracomm mpiGlobal = meshDistributor->getMeshLevelData().getMpiComm(subdomainLevel);
-    
     if (localMatrix) {
-      MatCreateSeqAIJ(mpiCommLocal, nRankInteriorRows, nRankInteriorRows,
+      MatCreateSeqAIJ(domainComm, nRankInteriorRows, nRankInteriorRows,
 		      0, nnz[0][0].dnnz,
-		      &mat[0][0]);
+		      &mat[0][0]);    
     } else {
-      if (subdomainLevel == 0) {
-	MatCreateAIJ(mpiGlobal, nRankInteriorRows, nRankInteriorRows, 
+      if (meshLevel == 0 || domainComm == MPI::COMM_SELF) {
+    
+	MatCreateAIJ(domainComm, nRankInteriorRows, nRankInteriorRows, 
 		     nOverallInteriorRows, nOverallInteriorRows,
 		     0, nnz[0][0].dnnz, 0, nnz[0][0].onnz,		   
 		     &mat[0][0]);    
       } else {
-	MSG("WARNING: USE MULTILEVEL, MAKE THIS GENERAL!\n");
+	MSG("WARNING: USE MULTILEVEL, MAKE THIS GENERAL: %d of %d\n", domainComm.Get_rank(), domainComm.Get_size());
 	// TODO: MAKE NNZ CALCULATION GENERAL (see also creation of coarse space
 	// matrices some lines below)
 
-	MatCreateAIJ(mpiGlobal, nRankInteriorRows, nRankInteriorRows, 
+	MatCreateAIJ(domainComm, nRankInteriorRows, nRankInteriorRows, 
 		     nOverallInteriorRows, nOverallInteriorRows,
 		     300, PETSC_NULL, 300, PETSC_NULL, &mat[0][0]);    
-
       }
     }
-    MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
 
+    MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
 
-    if (subdomainLevel == 0) {
-      VecCreateMPI(mpiGlobal, nRankInteriorRows, nOverallInteriorRows, 
+    if (meshLevel == 0) {
+      VecCreateMPI(domainComm, nRankInteriorRows, nOverallInteriorRows, 
 		   &vecSol[0]);
-      VecCreateMPI(mpiGlobal, nRankInteriorRows, nOverallInteriorRows, 
+      VecCreateMPI(domainComm, nRankInteriorRows, nOverallInteriorRows, 
 		   &vecRhs[0]);
     } else {
-      MSG("WARNING: USE MULTILEVEL AND THIS IS A VERY BAD HACK, MAKE GENERAL!!\n");
+      MSG("WARNING: USE MULTILEVEL AND THIS IS A VERY BAD HACK 1, MAKE GENERAL!!\n");
 
-      MPI::Intracomm mpiComm = 
-	meshDistributor->getMeshLevelData().getMpiComm(subdomainLevel - 1);
-      
       int nAllRows = nRankInteriorRows;
-      mpi::globalAdd(nAllRows);
-      VecCreateMPI(mpiComm, nRankInteriorRows, nAllRows, 
+      mpi::globalAdd(coarseSpaceComm, nAllRows);
+      VecCreateMPI(coarseSpaceComm, nRankInteriorRows, nAllRows, 
 		   &vecSol[0]);
-      VecCreateMPI(mpiComm, nRankInteriorRows, nAllRows, 
+      VecCreateMPI(coarseSpaceComm, nRankInteriorRows, nAllRows, 
 		   &vecRhs[0]);
     }
 
@@ -227,19 +223,22 @@ namespace AMDiS {
       int nRankCoarseRows = cMap->getRankDofs();
       int nOverallCoarseRows = cMap->getOverallDofs();
       
-      if (subdomainLevel == 0) {
-	MatCreateAIJ(mpiCommGlobal,
+      if (meshLevel == 0) {
+	MatCreateAIJ(domainComm,
+		     nRankCoarseRows, nRankCoarseRows,
+		     nOverallCoarseRows, nOverallCoarseRows,
+		     0, nnz[i + 1][i + 1].dnnz, 0, nnz[i + 1][i + 1].onnz,
+		     &mat[i + 1][i + 1]);
+      } else if (domainComm == MPI::COMM_SELF) {
+	MatCreateAIJ(coarseSpaceComm,
 		     nRankCoarseRows, nRankCoarseRows,
 		     nOverallCoarseRows, nOverallCoarseRows,
 		     0, nnz[i + 1][i + 1].dnnz, 0, nnz[i + 1][i + 1].onnz,
 		     &mat[i + 1][i + 1]);
       } else {
-	MSG("WARNING: USE MULTILEVEL AND THIS IS A VERY BAD HACK, MAKE GENERAL!!\n");
+	MSG("WARNING: USE MULTILEVEL AND THIS IS A VERY BAD HACK 2, MAKE GENERAL!!\n");
 	
-	MPI::Intracomm mpiComm = 
-	  meshDistributor->getMeshLevelData().getMpiComm(subdomainLevel - 1);
-
-	MatCreateAIJ(mpiComm,
+	MatCreateAIJ(coarseSpaceComm,
 		     nRankCoarseRows, nRankCoarseRows,
 		     nOverallCoarseRows, nOverallCoarseRows,
 		     300, PETSC_NULL, 300, PETSC_NULL, 
@@ -250,7 +249,6 @@ namespace AMDiS {
       cMap->createVec(vecRhs[i + 1]);
     }
 
-
     for (int i = 0; i < nCoarseMap + 1; i++) {
       for (int j = 0; j < nCoarseMap + 1; j++) {
 	if (i == j)
@@ -262,36 +260,36 @@ namespace AMDiS {
 	int nColsRankMat = (j == 0 ? nRankInteriorRows : uniqueCoarseMap[j - 1]->getRankDofs());
 	int nColsOverallMat = (j == 0 ? nOverallInteriorRows : uniqueCoarseMap[j - 1]->getOverallDofs());
 
-	if (subdomainLevel == 0) {  
-	  MatCreateAIJ(mpiCommGlobal,
+	if (meshLevel == 0) {
+	  MatCreateAIJ(domainComm,
 		       nRowsRankMat, nColsRankMat,
 		       nRowsOverallMat, nColsOverallMat,
 		       0, nnz[i][j].dnnz, 0, nnz[i][j].onnz,
 		       &mat[i][j]);	  
-	  MatCreateAIJ(mpiCommGlobal,
+	  MatCreateAIJ(domainComm,
 		       nColsRankMat, nRowsRankMat,
 		       nColsOverallMat, nRowsOverallMat,
 		       0, nnz[j][i].dnnz, 0, nnz[j][i].onnz,
 		       &mat[j][i]);
 	} else {
-	  MSG("WARNING: USE MULTILEVEL AND THIS IS A VERY BAD HACK, MAKE GENERAL!!\n");	  
+	  MSG("WARNING: USE MULTILEVEL AND THIS IS A VERY BAD HACK 3, MAKE GENERAL!!\n");	  
 
 	  if (i == 0) {
 	    nRowsOverallMat = nRowsRankMat;
-	    mpi::globalAdd(nRowsOverallMat);
+	    mpi::globalAdd(coarseSpaceComm, nRowsOverallMat);
 	  }
 
 	  if (j == 0) {
 	    nColsOverallMat = nColsRankMat;
-	    mpi::globalAdd(nColsOverallMat);
+	    mpi::globalAdd(coarseSpaceComm, nColsOverallMat);
 	  }
 
-	  MatCreateAIJ(mpiCommGlobal,
+	  MatCreateAIJ(coarseSpaceComm,
 		       nRowsRankMat, nColsRankMat,
 		       nRowsOverallMat, nColsOverallMat,
 		       300, PETSC_NULL, 300, PETSC_NULL,
 		       &mat[i][j]);	  
-	  MatCreateAIJ(mpiCommGlobal,
+	  MatCreateAIJ(coarseSpaceComm,
 		       nColsRankMat, nRowsRankMat,
 		       nColsOverallMat, nRowsOverallMat,
 		       300, PETSC_NULL, 300, PETSC_NULL,
@@ -370,7 +368,7 @@ namespace AMDiS {
     int recvAllValues = 0;
     int sendValue = 
       static_cast<int>(meshDistributor->getLastMeshChangeIndex() != lastMeshNnz);
-    mpiCommGlobal.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM);
+    domainComm.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM);
 
     if (recvAllValues != 0 || alwaysCreateNnzStructure) {
       lastMeshNnz = meshDistributor->getLastMeshChangeIndex();
@@ -385,23 +383,24 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelCoarseSpaceSolver::updateSubdomainData()");
 
-    if (mpiCommLocal.Get_size() == 1) {
-      rStartInterior = 0;
-      nGlobalOverallInterior = interiorMap->getOverallDofs();
+    if (domainComm == MPI::COMM_WORLD &&
+	domainComm == interiorMap->getMpiComm()) {
+       rStartInterior = 0;
+       nGlobalOverallInterior = interiorMap->getOverallDofs();
     } else {
       int groupRowsInterior = 0;
-      if (mpiCommLocal.Get_rank() == 0)
+      if (domainComm.Get_rank() == 0)
 	groupRowsInterior = interiorMap->getOverallDofs();
 
-      mpi::getDofNumbering(mpiCommGlobal, groupRowsInterior,
+      mpi::getDofNumbering(coarseSpaceComm, groupRowsInterior,
 			   rStartInterior, nGlobalOverallInterior);
-
+      
       int tmp = 0;
-      if (mpiCommLocal.Get_rank() == 0)
+      if (domainComm.Get_rank() == 0)
 	tmp = rStartInterior;
-
-      mpiCommLocal.Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM);
-    }
+      
+      domainComm.Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM);
+  }
   }
 
 }
diff --git a/AMDiS/src/parallel/ParallelCoarseSpaceSolver.h b/AMDiS/src/parallel/ParallelCoarseSpaceSolver.h
index f30724a2..9b1b2ed7 100644
--- a/AMDiS/src/parallel/ParallelCoarseSpaceSolver.h
+++ b/AMDiS/src/parallel/ParallelCoarseSpaceSolver.h
@@ -79,19 +79,7 @@ namespace AMDiS {
 				  int component = -1);
 
     /// Set mesh distributor object
-    void setMeshDistributor(MeshDistributor *m);
-
-    /// Set mesh distributor object
-    void setMeshDistributor(MeshDistributor *md,
-			    MPI::Intracomm mpiComm0,
-			    MPI::Intracomm mpiComm1);
-
-    /// Set level of the interior discretization. Is only used for
-    /// multi-level methods.
-    void setLevel(int l) 
-    {
-      subdomainLevel = l;
-    }
+    void setMeshDistributor(MeshDistributor *m, int level);
 
     /// Creates matrices and vectors with respect to the coarse space.
     void createMatVec(Matrix<DOFMatrix*>& seqMat);
@@ -312,8 +300,9 @@ namespace AMDiS {
 
     /// Level of subdomain/interior discretization. Is used for multi-level
     /// methods only.
-    int subdomainLevel;
+    int meshLevel;
 
+#if 0
     /// MPI communicator on the subdomain level. If no multi-level is used
     /// this is alway MPI_COMM_SELF. In the case of a multi-level method, this
     /// is a subset of MPI_COMM_WORLD.
@@ -322,6 +311,11 @@ namespace AMDiS {
     /// MPI communicator on the coarse space level. If no multi-level method
     /// is used, this is always MPI_COMM_WORLD, otherwise a subset of it.
     MPI::Intracomm mpiCommGlobal;
+#endif
+
+    MPI::Intracomm domainComm;
+
+    MPI::Intracomm coarseSpaceComm;
 
     /// Offset for the interior DOFs of the local interior with respect to the
     /// subdomain. In the case of a one-level method, each local interior
diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc
index 619133b0..5152477f 100644
--- a/AMDiS/src/parallel/ParallelDebug.cc
+++ b/AMDiS/src/parallel/ParallelDebug.cc
@@ -32,16 +32,20 @@ namespace AMDiS {
 
     vector<int*> sendBuffers, recvBuffers;
 
-    MPI::Request request[pdb.intBoundary.own.size() + 
-			 pdb.intBoundary.other.size() +
-                         pdb.intBoundary.periodic.size() * 2];
+    MSG("WARNING: TEST ONLY FOR LEVEL 0!\n");
+    MPI::Intracomm &mpiComm = pdb.levelData.getMpiComm(0);
+    int mpiRank = mpiComm.Get_rank();
+
+    MPI::Request request[pdb.intBoundary[0].own.size() + 
+			 pdb.intBoundary[0].other.size() +
+                         pdb.intBoundary[0].periodic.size() * 2];
     int requestCounter = 0;
 
 
     // === Send rank's boundary information. ===
 
-    for (RankToBoundMap::iterator rankIt = pdb.intBoundary.own.begin();
-	 rankIt != pdb.intBoundary.own.end(); ++rankIt) {
+    for (RankToBoundMap::iterator rankIt = pdb.intBoundary[0].own.begin();
+	 rankIt != pdb.intBoundary[0].own.end(); ++rankIt) {
 
       int nSendInt = rankIt->second.size();
       int* buffer = new int[nSendInt];
@@ -51,28 +55,28 @@ namespace AMDiS {
       sendBuffers.push_back(buffer);
       
       request[requestCounter++] =
-	pdb.mpiComm.Isend(buffer, nSendInt, MPI_INT, rankIt->first, 0);
+	mpiComm.Isend(buffer, nSendInt, MPI_INT, rankIt->first, 0);
     }
 
 
     // === Receive information from other ranks about the interior boundaries. ====
 
-    for (RankToBoundMap::iterator rankIt = pdb.intBoundary.other.begin();
-	 rankIt != pdb.intBoundary.other.end(); ++rankIt) {
+    for (RankToBoundMap::iterator rankIt = pdb.intBoundary[0].other.begin();
+	 rankIt != pdb.intBoundary[0].other.end(); ++rankIt) {
       int nRecvInt = rankIt->second.size();
       int *buffer = new int[nRecvInt];
       recvBuffers.push_back(buffer);
 
       request[requestCounter++] = 
-	pdb.mpiComm.Irecv(buffer, nRecvInt, MPI_INT, rankIt->first, 0);
+	mpiComm.Irecv(buffer, nRecvInt, MPI_INT, rankIt->first, 0);
     }
 
 
     // === To the last, do the same of periodic boundaries. ===
 
-    for (RankToBoundMap::iterator rankIt = pdb.intBoundary.periodic.begin();       
-	 rankIt != pdb.intBoundary.periodic.end(); ++rankIt) {
-      if (rankIt->first == pdb.mpiRank)
+    for (RankToBoundMap::iterator rankIt = pdb.intBoundary[0].periodic.begin();       
+	 rankIt != pdb.intBoundary[0].periodic.end(); ++rankIt) {
+      if (rankIt->first == mpiRank)
 	continue;
 
       int nValues = rankIt->second.size();
@@ -83,13 +87,13 @@ namespace AMDiS {
       sendBuffers.push_back(sBuffer);
 
       request[requestCounter++] =
-	pdb.mpiComm.Isend(sBuffer, nValues, MPI_INT, rankIt->first, 0);
+	mpiComm.Isend(sBuffer, nValues, MPI_INT, rankIt->first, 0);
 
       int *rBuffer = new int[nValues];
       recvBuffers.push_back(rBuffer);
 
       request[requestCounter++] = 
-	pdb.mpiComm.Irecv(rBuffer, nValues, MPI_INT, rankIt->first, 0);      
+	mpiComm.Irecv(rBuffer, nValues, MPI_INT, rankIt->first, 0);
     }
 
     // === Finish communication and delete all send buffers. ===
@@ -104,16 +108,16 @@ namespace AMDiS {
     // === and after this the periodic ones.                                   ===
 
     int bufCounter = 0;
-    for (RankToBoundMap::iterator rankIt = pdb.intBoundary.other.begin();
-	 rankIt != pdb.intBoundary.other.end(); ++rankIt) {
+    for (RankToBoundMap::iterator rankIt = pdb.intBoundary[0].other.begin();
+	 rankIt != pdb.intBoundary[0].other.end(); ++rankIt) {
 
       TEST_EXIT(rankIt->second.size() == 
-		pdb.intBoundary.other[rankIt->first].size())
+		pdb.intBoundary[0].other[rankIt->first].size())
 	("Boundaries does not fit together!\n");      
 
       for (unsigned int i = 0; i < rankIt->second.size(); i++) {
 	int elIndex1 = recvBuffers[bufCounter][i];
-	int elIndex2 = pdb.intBoundary.other[rankIt->first][i].neighObj.elIndex;
+	int elIndex2 = pdb.intBoundary[0].other[rankIt->first][i].neighObj.elIndex;
 
 	TEST_EXIT(elIndex1 == elIndex2)("Wrong element index at interior boundary!\n");
       }
@@ -122,18 +126,18 @@ namespace AMDiS {
     }
 
 
-    for (RankToBoundMap::iterator rankIt = pdb.intBoundary.periodic.begin();
-	 rankIt != pdb.intBoundary.periodic.end(); ++rankIt) {
-      if (rankIt->first == pdb.mpiRank)
+    for (RankToBoundMap::iterator rankIt = pdb.intBoundary[0].periodic.begin();
+	 rankIt != pdb.intBoundary[0].periodic.end(); ++rankIt) {
+      if (rankIt->first == mpiRank)
 	continue;
 
       for (unsigned int i = 0; i < rankIt->second.size(); i++) {
 	int elIndex1 = recvBuffers[bufCounter][i];
-	int elIndex2 = pdb.intBoundary.periodic[rankIt->first][i].neighObj.elIndex;
+	int elIndex2 = pdb.intBoundary[0].periodic[rankIt->first][i].neighObj.elIndex;
 
 	TEST_EXIT(elIndex1 == elIndex2)
 	  ("Wrong element index at periodic boundary el %d with rank %d: %d %d\n", 
-	   pdb.intBoundary.periodic[rankIt->first][i].rankObj.elIndex,
+	   pdb.intBoundary[0].periodic[rankIt->first][i].rankObj.elIndex,
 	   rankIt->first, elIndex1, elIndex2);
       }
 
@@ -146,7 +150,7 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelDebug::testPeriodicBoundary()");
 
-    MSG(" NO TEST HERE!\n");
+    MSG("WARNING: TEST FOR PERIODIC BOUNDARIES IS DISABLED!\n");
 //     for (unsigned int i = 0; i < pdb.feSpaces.size(); i++)
 //       testPeriodicBoundary(pdb, pdb.feSpaces[i]);
   }
@@ -173,10 +177,14 @@ namespace AMDiS {
     // === 2. check: All periodic DOFs must be symmetric, i.e., if A is mapped ===
     // === to B, then B must be mapped to A.                                   ===
 
-    StdMpi<PeriodicDofMap> stdMpi(pdb.mpiComm, true);
+    MPI::Intracomm &mpiComm = pdb.levelData.getMpiComm(0);
+    int mpiSize = mpiComm.Get_size();
+    int mpiRank = mpiComm.Get_rank();
+
+    StdMpi<PeriodicDofMap> stdMpi(mpiComm, true);
 
-    if (pdb.mpiRank == 0) {
-      for (int i = 1; i < pdb.mpiSize; i++)
+    if (mpiRank == 0) {
+      for (int i = 1; i < mpiSize; i++)
 	stdMpi.recv(i);
     } else {
       stdMpi.send(0, perMap.periodicDofMap[feSpace]);
@@ -188,13 +196,13 @@ namespace AMDiS {
 
     // === The boundary DOFs are checked only on the zero rank. === 
 
-    if (pdb.mpiRank == 0) {
+    if (mpiRank == 0) {
       // Stores to each rank the periodic DOF mappings of this rank.
       map<int, PeriodicDofMap> rankToMaps;
       PeriodicDofMap dofMap = perMap.periodicDofMap[feSpace];
       rankToMaps[0] = dofMap;
 
-      for (int i = 1; i < pdb.mpiSize; i++) {
+      for (int i = 1; i < mpiSize; i++) {
 	PeriodicDofMap &otherMap = stdMpi.getRecvData(i);
 	rankToMaps[i] = otherMap;
 	
@@ -227,7 +235,7 @@ namespace AMDiS {
 		dofIt->first, dofIt->second, 
 		dofIt->second, it->second[dofIt->second]);
 
-	    for (int i = 0; i < pdb.mpiSize; i++) {
+	    for (int i = 0; i < mpiSize; i++) {
 	      if (rankToMaps[i][it->first].count(dofIt->first) == 1) {
 		MSG("[DBG]    %d -> %d in rank %d\n", 
 		    dofIt->first, rankToMaps[i][it->first][dofIt->first], i);
@@ -258,9 +266,9 @@ namespace AMDiS {
     RankToCoords sendCoords;
     map<int, vector<BoundaryType> > rankToDofType;
 
-    for (RankToBoundMap::iterator it = pdb.intBoundary.periodic.begin();
-	 it != pdb.intBoundary.periodic.end(); ++it) {
-      if (it->first == pdb.mpiRank)
+    for (RankToBoundMap::iterator it = pdb.intBoundary[0].periodic.begin();
+	 it != pdb.intBoundary[0].periodic.end(); ++it) {
+      if (it->first == mpiRank)
 	continue;
 
       for (vector<AtomicBoundary>::iterator boundIt = it->second.begin();
@@ -288,7 +296,7 @@ namespace AMDiS {
       recvCoords[it->first].resize(it->second.size());
 
 
-    StdMpi<CoordsVec> stdMpiCoords(pdb.mpiComm, true);
+    StdMpi<CoordsVec> stdMpiCoords(mpiComm, true);
     stdMpiCoords.send(sendCoords);
     stdMpiCoords.recv(recvCoords);   
     stdMpiCoords.startCommunication();
@@ -308,9 +316,9 @@ namespace AMDiS {
 
 	if (nEqual == 0) {
 	  MSG("[DBG]  %d-ith periodic DOF in boundary between ranks %d <-> %d is not correct!\n",
-	      i, pdb.mpiRank, it->first);
+	      i, mpiRank, it->first);
 	  MSG("[DBG]  Coords on rank %d: %f %f %f\n", 
-	      pdb.mpiRank, c0[0], c0[1], (pdb.mesh->getDim() == 3 ? c0[2] : 0.0));
+	      mpiRank, c0[0], c0[1], (pdb.mesh->getDim() == 3 ? c0[2] : 0.0));
 	  MSG("[DBG]  Coords on rank %d: %f %f %f\n", 
 	      it->first, c1[0], c1[1], (pdb.mesh->getDim() == 3 ? c1[2] : 0.0));
 
@@ -339,130 +347,144 @@ namespace AMDiS {
       return;
     }
 
-    /// Defines a mapping type from rank numbers to sets of DOFs.
-    typedef map<int, DofContainer> RankToDofContainer;
-
-    // Maps to each neighbour rank an array of WorldVectors. This array contains the 
-    // coordinates of all DOFs this rank shares on the interior boundary with the 
-    // neighbour rank. A rank sends the coordinates to another rank, if it owns the
-    // boundarys DOFs.
-    RankToCoords sendCoords;
-
-    // A rank receives all boundary DOFs that are at its interior boundaries but are
-    // not owned by the rank. This map stores for each rank the coordinates of DOFs
-    // this rank expectes to receive from.
-    RankToCoords recvCoords;
-
-    DOFVector<WorldVector<double> > coords(feSpace, "dofCorrds");
-    pdb.mesh->getDofIndexCoords(coords);
-
-    for (DofComm::Iterator it(pdb.dofComm.getSendDofs(), feSpace); 
-	 !it.end(); it.nextRank())
-      for (; !it.endDofIter(); it.nextDof())
-	sendCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
-
-    for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace); 
-	 !it.end(); it.nextRank())
-      for (; !it.endDofIter(); it.nextDof())
-	recvCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
-
-    vector<int> sendSize(pdb.mpiSize, 0);
-    vector<int> recvSize(pdb.mpiSize, 0);
-    vector<int> recvSizeBuffer(pdb.mpiSize, 0);
-    MPI::Request request[(pdb.mpiSize - 1) * 2];
-    int requestCounter = 0;
-
-    for (RankToCoords::iterator it = sendCoords.begin(); it != sendCoords.end(); ++it)
-      sendSize[it->first] = it->second.size();
-
-    for (RankToCoords::iterator it = recvCoords.begin(); it != recvCoords.end(); ++it)
-      recvSize[it->first] = it->second.size();
-
-    for (int i = 0; i < pdb.mpiSize; i++) {
-      if (i == pdb.mpiRank)
-	continue;
-
-      request[requestCounter++] = 
-	pdb.mpiComm.Isend(&(sendSize[i]), 1, MPI_INT, i, 0);
-    }   
-
-    for (int i = 0; i < pdb.mpiSize; i++) {
-      if (i == pdb.mpiRank)
+    int nLevels = pdb.levelData.getNumberOfLevels();
+    for (int level = 0; level < nLevels; level++) {
+      MPI::Intracomm &mpiComm = pdb.levelData.getMpiComm(level);
+      if (mpiComm == MPI::COMM_SELF)
 	continue;
 
-      request[requestCounter++] = 
-	pdb.mpiComm.Irecv(&(recvSizeBuffer[i]), 1, MPI_INT, i, 0);
-    }
+      int mpiRank = mpiComm.Get_rank();
+      int mpiSize = mpiComm.Get_size();
+      std::set<int> &ranks = pdb.levelData.getLevelRanks(level);
 
-    MPI::Request::Waitall(requestCounter, request);
+      TEST_EXIT(mpiSize == ranks.size())
+	("Wrong mpi sizes:  Get_size() = %d   ranks.size() = %d\n",
+	 mpiSize, ranks.size());
 
+      /// Defines a mapping type from rank numbers to sets of DOFs.
+      typedef map<int, DofContainer> RankToDofContainer;
+      
+      // Maps to each neighbour rank an array of WorldVectors. This array contains the 
+      // coordinates of all DOFs this rank shares on the interior boundary with the 
+      // neighbour rank. A rank sends the coordinates to another rank, if it owns the
+      // boundarys DOFs.
+      RankToCoords sendCoords;
+      
+      // A rank receives all boundary DOFs that are at its interior boundaries but are
+      // not owned by the rank. This map stores for each rank the coordinates of DOFs
+      // this rank expectes to receive from.
+      RankToCoords recvCoords;
+
+      DOFVector<WorldVector<double> > coords(feSpace, "dofCorrds");
+      pdb.mesh->getDofIndexCoords(coords);
+
+      for (DofComm::Iterator it(pdb.dofComm[level].getSendDofs(), feSpace); 
+	   !it.end(); it.nextRank())
+	for (; !it.endDofIter(); it.nextDof())
+	  sendCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
+
+      for (DofComm::Iterator it(pdb.dofComm[level].getRecvDofs(), feSpace); 
+	   !it.end(); it.nextRank())
+	for (; !it.endDofIter(); it.nextDof())
+	  recvCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
+      
+      map<int, int> sendSize;
+      map<int, int> recvSize;
+      map<int, int> recvSizeBuffer;
+      MPI::Request request[(mpiSize - 1) * 2];
+      int requestCounter = 0;
 
-    int foundError = 0;
-    for (int i = 0; i < pdb.mpiSize; i++) {
-      if (i == pdb.mpiRank)
-	continue;
+      for (RankToCoords::iterator it = sendCoords.begin(); it != sendCoords.end(); ++it)
+	sendSize[it->first] = it->second.size();
 
-      if (recvSize[i] != recvSizeBuffer[i]) {
-	ERROR("MPI rank %d expectes to receive %d DOFs from rank %d. But this rank sends %d DOFs!\n", 
-	      pdb.mpiRank, recvSize[i], i, recvSizeBuffer[i]);	
-	foundError = 1;
-      }
-    }
-    mpi::globalAdd(foundError);
-    TEST_EXIT(foundError == 0)("Error found on at least on rank!\n");
+      for (RankToCoords::iterator it = recvCoords.begin(); it != recvCoords.end(); ++it)
+	recvSize[it->first] = it->second.size();
 
-    // === Now we know that the number of send and received DOFs fits together. ===
-    // === So we can check if also the coordinates of the communicated DOFs are ===
-    // === the same on both corresponding ranks.                                ===
-
-    StdMpi<CoordsVec> stdMpi(pdb.mpiComm, true);
-    stdMpi.send(sendCoords);
-    stdMpi.recv(recvCoords);   
-    stdMpi.startCommunication();
+      for (int rank = 0; rank < mpiSize; rank++) {
+	if (rank == mpiRank)
+	  continue;
 
-    int dimOfWorld = Global::getGeo(WORLD);
+	request[requestCounter++] = 
+	  mpiComm.Isend(&(sendSize[rank]), 1, MPI_INT, rank, 0);
+      }   
 
-    // === Compare the received with the expected coordinates. ===
+      for (int rank = 0; rank < mpiSize; rank++) {
+	if (rank == mpiRank)
+	  continue;
 
-    for (RankToCoords::iterator it = stdMpi.getRecvData().begin(); 
-	 it != stdMpi.getRecvData().end(); ++it) {
-      for (unsigned int i = 0; i < it->second.size(); i++) {
-	WorldVector<double> tmp = (it->second)[i];
-	tmp -=  recvCoords[it->first][i];
+	request[requestCounter++] = 
+	  mpiComm.Irecv(&(recvSizeBuffer[rank]), 1, MPI_INT, rank, 0);
+      }
+      
+      MPI::Request::Waitall(requestCounter, request);
 
-	if (norm(tmp) > 1e-8) {
-	  // === Print error message if the coordinates are not the same. ===
-	  if (printCoords) {
-	    MSG("[DBG] i = %d\n", i);	  
-	    stringstream oss;
-	    oss.precision(5);
-	    oss << "[DBG] Rank " << pdb.mpiRank << " from rank " << it->first
-		<< " expect coords (";
-	    for (int k = 0; k < dimOfWorld; k++) {
-	      oss << recvCoords[it->first][i][k];
-	      if (k + 1 < dimOfWorld)
-		oss << " / ";
-	    }
-	    oss << ")  received coords (";
-	    for (int k = 0; k < dimOfWorld; k++) {
-	      oss << (it->second)[i][k];
-	      if (k + 1 < dimOfWorld)
-		oss << " / ";
-	    }
-	    oss << ")";
-	    MSG("%s\n", oss.str().c_str());
-	    
-	    debug::printInfoByDof(feSpace, 
-				  *(pdb.dofComm.getRecvDofs()[0][it->first][feSpace][i]));
-	  }
-	  ERROR("Wrong DOFs in rank %d!\n", pdb.mpiRank);
+      int foundError = 0;
+      for (std::set<int>::iterator it = ranks.begin(); it != ranks.end(); ++it) {
+	if (*it == mpiRank)
+	  continue;
+	
+	if (recvSize[*it] != recvSizeBuffer[*it]) {
+	  ERROR("MPI rank %d expectes to receive %d DOFs from rank %d. But this rank sends %d DOFs!\n", 
+		mpiRank, recvSize[*it], *it, recvSizeBuffer[*it]);	
 	  foundError = 1;
-	}	 
+	}
       }
+      mpi::globalAdd(foundError);
+      TEST_EXIT(foundError == 0)("Error found on at least on rank!\n");
+      
+      // === Now we know that the number of send and received DOFs fits together. ===
+      // === So we can check if also the coordinates of the communicated DOFs are ===
+      // === the same on both corresponding ranks.                                ===
+      
+      StdMpi<CoordsVec> stdMpi(mpiComm, true);
+      stdMpi.send(sendCoords);
+      stdMpi.recv(recvCoords);   
+      stdMpi.startCommunication();
+      
+      int dimOfWorld = Global::getGeo(WORLD);
+      
+      // === Compare the received with the expected coordinates. ===
+      
+      for (RankToCoords::iterator it = stdMpi.getRecvData().begin(); 
+	   it != stdMpi.getRecvData().end(); ++it) {
+	for (unsigned int i = 0; i < it->second.size(); i++) {
+	  WorldVector<double> tmp = (it->second)[i];
+	  tmp -= recvCoords[it->first][i];
+	  
+	  if (norm(tmp) > 1e-8) {
+	    // === Print error message if the coordinates are not the same. ===
+	    if (printCoords) {
+	      MSG("[DBG] i = %d\n", i);	  
+	      stringstream oss;
+	      oss.precision(5);
+	      oss << "[DBG] Rank " << mpiRank << " from rank " << it->first
+		  << " expect coords (";
+	      for (int k = 0; k < dimOfWorld; k++) {
+		oss << recvCoords[it->first][i][k];
+		if (k + 1 < dimOfWorld)
+		  oss << " / ";
+	      }
+	      oss << ")  received coords (";
+	      for (int k = 0; k < dimOfWorld; k++) {
+		oss << (it->second)[i][k];
+		if (k + 1 < dimOfWorld)
+		  oss << " / ";
+	      }
+	      oss << ")";
+	      MSG("%s\n", oss.str().c_str());
+	      
+	      debug::printInfoByDof(feSpace, 
+				    *(pdb.dofComm[level].getRecvDofs()[it->first][feSpace][i]));
+	    }
+	    ERROR("Wrong DOFs in rank %d!\n", mpiRank);
+	    foundError = 1;
+	  }	 
+	}
+      }
+      mpi::globalAdd(foundError);
+      TEST_EXIT(foundError == 0)("Error found on at least on rank!\n");
     }
-    mpi::globalAdd(foundError);
-    TEST_EXIT(foundError == 0)("Error found on at least on rank!\n");
-
+      
     MSG("Test common dofs needed %.5f seconds\n", TIME_USED(first, clock()));
   }
 
@@ -477,55 +499,68 @@ namespace AMDiS {
     DOFVector<WorldVector<double> > coords(feSpace, "tmp");
     pdb.mesh->getDofIndexCoords(coords);
 
-    typedef map<WorldVector<double>, int> CoordsIndexMap;
-    CoordsIndexMap coordsToIndex;
+    int nLevels = pdb.levelData.getNumberOfLevels();
+    for (int level = 0; level < nLevels; level++) {
+      MPI::Intracomm &mpiComm = pdb.levelData.getMpiComm(level);
+      if (mpiComm == MPI::COMM_SELF)
+	continue;
 
-    DOFIterator<WorldVector<double> > it(&coords, USED_DOFS);
-    for (it.reset(); !it.end(); ++it) {
-      coordsToIndex[(*it)] = (*(pdb.dofMaps[0]))[feSpace][it.getDOFIndex()].global;
-      // MSG("   CHECK FOR DOF %d/%d AT COORDS %f %f %f\n",
-      // 	  it.getDOFIndex(),
-      // 	  coordsToIndex[(*it)], 
-      // 	  (*it)[0], (*it)[1], (pdb.mesh->getDim() == 3 ? (*it)[2] : 0.0));
-    }
+      int mpiRank = mpiComm.Get_rank();
+      int mpiSize = mpiComm.Get_size();
 
-    StdMpi<CoordsIndexMap> stdMpi(pdb.mpiComm, true);
-    for (DofComm::Iterator it(pdb.dofComm.getSendDofs(), feSpace); 
-	 !it.end(); it.nextRank())
-      stdMpi.send(it.getRank(), coordsToIndex);
-    for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace); 
-	 !it.end(); it.nextRank())
-      stdMpi.recv(it.getRank());
-   
-    stdMpi.startCommunication();
+      typedef map<WorldVector<double>, int> CoordsIndexMap;
+      CoordsIndexMap coordsToIndex;
+      
+      DOFIterator<WorldVector<double> > it(&coords, USED_DOFS);
+      for (it.reset(); !it.end(); ++it) {
+	coordsToIndex[(*it)] = (*(pdb.dofMaps[level]))[feSpace][it.getDOFIndex()].global;
+	// MSG("   CHECK FOR DOF %d/%d AT COORDS %f %f %f\n",
+	//     it.getDOFIndex(),
+	//     coordsToIndex[(*it)], 
+	//     (*it)[0], (*it)[1], (pdb.mesh->getDim() == 3 ? (*it)[2] : 0.0));
+      }
+      
+      StdMpi<CoordsIndexMap> stdMpi(mpiComm, true);
+      for (DofComm::Iterator it(pdb.dofComm[level].getSendDofs(), feSpace); 
+	   !it.end(); it.nextRank())
+	stdMpi.send(it.getRank(), coordsToIndex);
+      for (DofComm::Iterator it(pdb.dofComm[level].getRecvDofs(), feSpace); 
+	   !it.end(); it.nextRank())
+	stdMpi.recv(it.getRank());
+      
+      stdMpi.startCommunication();
+      
+      int foundError = 0;
+      for (DofComm::Iterator it(pdb.dofComm[level].getRecvDofs(), feSpace); 
+	   !it.end(); it.nextRank()) {
+	CoordsIndexMap& otherCoords = stdMpi.getRecvData(it.getRank());
 
-    int foundError = 0;
-    for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace); 
-	 !it.end(); it.nextRank()) {
-      CoordsIndexMap& otherCoords = stdMpi.getRecvData(it.getRank());
-
-      for (CoordsIndexMap::iterator coordsIt = otherCoords.begin();
-	   coordsIt != otherCoords.end(); ++coordsIt) {
-	if (coordsToIndex.count(coordsIt->first) == 1 &&
-	    coordsToIndex[coordsIt->first] != coordsIt->second) {
-	  stringstream oss;
-	  oss.precision(5);
-	  oss << "DOF at coords ";
-	  for (int i = 0; i < Global::getGeo(WORLD); i++)
-	    oss << coordsIt->first[i] << " ";
-	  oss << " do not fit together on rank " 
-	      << pdb.getMpiRank() << " (global index: " 
-	      << coordsToIndex[coordsIt->first] << ") and on rank "
-	      << it.getRank() << " (global index: " << coordsIt->second << ")";
-
-	  MSG("[DBG] %s\n", oss.str().c_str());
-	  foundError = 1;
+	for (; !it.endDofIter(); it.nextDof()) {
+	  WorldVector<double> recvCoords = coords[it.getDofIndex()];
+	  
+	  TEST_EXIT_DBG(otherCoords.count(recvCoords))("Should not happen!\n");
+
+	  if (coordsToIndex[recvCoords] != otherCoords[recvCoords]) {
+	    stringstream oss;
+	    oss.precision(5);
+	    oss << "DOF at coords ";
+	    for (int i = 0; i < Global::getGeo(WORLD); i++)
+	      oss << recvCoords[i] << " ";
+	    oss << " do not fit together on rank " 
+		<< mpiComm.Get_rank() << " (global index: " 
+		<< coordsToIndex[recvCoords] << ") and on rank "
+		<< it.getRank() << " (global index: " << otherCoords[recvCoords] 
+		<< ")  - LEVEL " << level;
+	    
+	    MSG("[DBG] %s\n", oss.str().c_str());
+	    foundError = 1;	    
+	  }
 	}
       }
-    }
 
-    mpi::globalAdd(foundError);
-    TEST_EXIT(foundError == 0)("Error found on at least on rank!\n");
+      mpi::globalAdd(foundError);
+      TEST_EXIT(foundError == 0)("Error found on at least on rank!\n");
+    }
   }
 
 
@@ -547,17 +582,20 @@ namespace AMDiS {
       elInfo = stack.traverseNext(elInfo);      
     }
     
+
+    MPI::Intracomm mpiComm = MPI::COMM_WORLD;
+
     int globalMinIndex, globalMaxIndex;
-    pdb.mpiComm.Allreduce(&minElementIndex, &globalMinIndex, 1, MPI_INT, MPI_MIN);
-    pdb.mpiComm.Allreduce(&maxElementIndex, &globalMaxIndex, 1, MPI_INT, MPI_MAX);
+    mpiComm.Allreduce(&minElementIndex, &globalMinIndex, 1, MPI_INT, MPI_MIN);
+    mpiComm.Allreduce(&maxElementIndex, &globalMaxIndex, 1, MPI_INT, MPI_MAX);
 
     TEST_EXIT(globalMinIndex == 0)("No macro element with index 0!\n");
     for (int i = 0; i <= globalMaxIndex; i++) {
       int sendId = macroElements.count(i);
       int recvId = 0;
-      pdb.mpiComm.Allreduce(&sendId, &recvId, 1, MPI_INT, MPI_SUM);
+      mpiComm.Allreduce(&sendId, &recvId, 1, MPI_INT, MPI_SUM);
 
-      if (recvId != 1 && pdb.mpiRank == 0) {
+      if (recvId != 1 && mpiComm.Get_rank() == 0) {
 	if (recvId == 0) {
 	  ERROR_EXIT("Element %d has no member partition!\n", i);
 	}
@@ -574,27 +612,31 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelDebug::testDofContainerCommunication()");    
 
+    MSG("WARNING: ONLY LEVEL 0 TEST!\n");
+
     typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
 
+    MPI::Intracomm &mpiComm = pdb.levelData.getMpiComm(0);
     map<int, int> sendNumber;
-    for (it_type it = pdb.dofComm.getSendDofs()[0].begin(); 
-	 it != pdb.dofComm.getSendDofs()[0].end(); ++it)
+    for (it_type it = pdb.dofComm[0].getSendDofs().begin(); 
+	 it != pdb.dofComm[0].getSendDofs().end(); ++it)
       for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin(); 
 	   dcIt != it->second.end(); ++dcIt)
 	sendNumber[it->first] += dcIt->second.size();
        
     map<int, int> recvNumber;
-    for (it_type it = pdb.dofComm.getRecvDofs()[0].begin(); 
-	 it != pdb.dofComm.getRecvDofs()[0].end(); ++it)
+    for (it_type it = pdb.dofComm[0].getRecvDofs().begin(); 
+	 it != pdb.dofComm[0].getRecvDofs().end(); ++it)
       for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin(); 
 	   dcIt != it->second.end(); ++dcIt)
 	recvNumber[it->first] += dcIt->second.size();
     
-    StdMpi<int> stdMpi(pdb.mpiComm);
+    StdMpi<int> stdMpi(mpiComm);
     stdMpi.send(sendNumber);
-    for (it_type it = pdb.dofComm.getRecvDofs()[0].begin(); 
-	 it != pdb.dofComm.getRecvDofs()[0].end(); ++it)
+    for (it_type it = pdb.dofComm[0].getRecvDofs().begin(); 
+	 it != pdb.dofComm[0].getRecvDofs().end(); ++it)
       stdMpi.recv(it->first);
+
     stdMpi.startCommunication();
 
     int foundError = 0;
@@ -651,7 +693,7 @@ namespace AMDiS {
     ERROR_EXIT("Rewrite this function!\n");
 
 #if 0
-    if (rank == -1 || pdb.mpiRank == rank) {
+    if (rank == -1 || MPI::COMM_WORLD.Get_rank() == rank) {
       const FiniteElemSpace *feSpace = pdb.feSpaces[0];
 
       cout << "====== DOF MAP LOCAL -> GLOBAL ====== " << endl;
@@ -695,7 +737,7 @@ namespace AMDiS {
 				    DofContainer& rankDofs,
 				    DofContainer& rankAllDofs)
   {
-    if (rank == -1 || pdb.mpiRank == rank) {
+    if (rank == -1 || MPI::COMM_WORLD.Get_rank() == rank) {
       cout << "====== RANK DOF INFORMATION ====== " << endl;
 
       cout << "  RANK OWNED DOFS: " << endl;
@@ -720,20 +762,18 @@ namespace AMDiS {
 
 
   void ParallelDebug::printBoundaryInfo(InteriorBoundary &intBoundary,
-					int level, 
 					bool force)
   {
     FUNCNAME("ParallelDebug::printBoundaryInfo()");
 
-    int tmp = 0;
-    Parameters::get("parallel->debug->print boundary info", tmp);
-    if (tmp <= 0 && force == false)
-      return;
+    // int tmp = 0;
+    // Parameters::get("parallel->debug->print boundary info", tmp);
+    // if (tmp <= 0 && force == false)
+    //   return;
 
     MSG("Interior boundary info:\n");
 
-    for (InteriorBoundary::iterator it(intBoundary.own, level); 
-	 !it.end(); ++it) {
+    for (InteriorBoundary::iterator it(intBoundary.own); !it.end(); ++it) {
       MSG("Rank owned boundary with rank %d: \n", it.getRank());
       MSG("  ranks obj-ind: %d  sub-obj: %d   ith-obj: %d\n",
 	  it->rankObj.elIndex, it->rankObj.subObj, it->rankObj.ithObj);
@@ -741,8 +781,7 @@ namespace AMDiS {
 	  it->neighObj.elIndex, it->neighObj.subObj, it->neighObj.ithObj);
     }
 
-    for (InteriorBoundary::iterator it(intBoundary.other, level); 
-	 !it.end(); ++it) {
+    for (InteriorBoundary::iterator it(intBoundary.other); !it.end(); ++it) {
       MSG("Other owned boundary with rank %d: \n", it.getRank());
       MSG("  ranks obj-ind: %d  sub-obj: %d   ith-obj: %d\n",
 	  it->rankObj.elIndex, it->rankObj.subObj, it->rankObj.ithObj);
@@ -750,8 +789,7 @@ namespace AMDiS {
 	  it->neighObj.elIndex, it->neighObj.subObj, it->neighObj.ithObj);
     }
 
-    for (InteriorBoundary::iterator it(intBoundary.periodic, level); 
-	 !it.end(); ++it) {
+    for (InteriorBoundary::iterator it(intBoundary.periodic); !it.end(); ++it) {
       MSG("Periodic boundary (ID %d) with rank %d: \n", 
 	  it->type, it.getRank());
       MSG("  ranks obj-ind: %d  sub-obj: %d   ith-obj: %d\n",
@@ -884,15 +922,17 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelDebug::followBoundary()");
 
-    for (InteriorBoundary::iterator it(pdb.intBoundary.own); !it.end(); ++it)
+    int mpiRank = MPI::COMM_WORLD.Get_rank();
+
+    for (InteriorBoundary::iterator it(pdb.intBoundary[0].own); !it.end(); ++it)
       if (followThisBound(it->rankObj.elIndex, it->neighObj.elIndex))
-	debug::writeLocalElementDofs(pdb.mpiRank, 
+	debug::writeLocalElementDofs(mpiRank, 
 				     it->rankObj.elIndex, 
 				     pdb.feSpaces[0]);
 
-    for (InteriorBoundary::iterator it(pdb.intBoundary.other); !it.end(); ++it)
+    for (InteriorBoundary::iterator it(pdb.intBoundary[0].other); !it.end(); ++it)
       if (followThisBound(it->rankObj.elIndex, it->neighObj.elIndex))
-	debug::writeLocalElementDofs(pdb.mpiRank,
+	debug::writeLocalElementDofs(mpiRank,
 				     it->rankObj.elIndex,
 				     pdb.feSpaces[0]);
   }
@@ -916,89 +956,144 @@ namespace AMDiS {
 	code.toStr().c_str());
   }
 
+
+#if 0
+  //  compare PETSc matrices
+
+    if (MPI::COMM_WORLD.Get_rank() == 0) {
+      Mat matOld, matNew;
+      MatCreate(PETSC_COMM_SELF, &matOld);
+      MatCreate(PETSC_COMM_SELF, &matNew);
+
+      PetscViewer fdOld, fdNew;
+      PetscViewerBinaryOpen(PETSC_COMM_SELF, "coarse_interior_old.mat", FILE_MODE_READ, &fdOld);
+      PetscViewerBinaryOpen(PETSC_COMM_SELF, "coarse_interior_new.mat", FILE_MODE_READ, &fdNew);
+      MatLoad(matOld, fdOld);
+      MatLoad(matNew, fdNew);
+
+      int m, n;
+      MatGetSize(matOld, &m, &n);
+      MSG("MAT OLD SIZE: %d %d\n", m, n);
+
+      MatGetSize(matNew, &m, &n);
+      MSG("MAT NEW SIZE: %d %d\n", m, n);
+
+      for (int i = 0; i < m; i++) {
+	PetscInt nColsOld, nColsNew;
+	const PetscInt *colsOld, *colsNew;
+	const PetscScalar *valsOld, *valsNew;
+
+	MatGetRow(matOld, i, &nColsOld, &colsOld, &valsOld);
+	MatGetRow(matNew, i, &nColsNew, &colsNew, &valsNew);
+
+	MSG("  row: %d with cols %d %d\n", i, nColsOld, nColsNew);
+
+	if (nColsOld != nColsNew) {
+	  MSG("WRONG COLS NUMBER in ROW %d: %d %d\n", i, nColsOld, nColsNew);
+	} else {
+	  for (int j = 0; j < nColsOld; j++) {
+	    if (colsOld[j] != colsNew[j]) {
+	      MSG("WRONG COLS IN ROW %d: %d %d \n", i, colsOld[j], colsNew[j]);
+	    } else {
+	      if (fabs(colsOld[j] - colsNew[j]) > 1e-8) {
+		MSG("WRONG VALUES IN ROW: %d\n", i);
+	      }
+	    }
+	  }
+	}
+
+	MatRestoreRow(matOld, i, &nColsOld, &colsOld, &valsOld);
+	MatRestoreRow(matNew, i, &nColsNew, &colsNew, &valsNew);
+      }
+
+      MSG("FINISHED-TEST\n");
+    }
+#endif
+
+
   void ParallelDebug::writeCsvElementMap(const FiniteElemSpace *feSpace,
-			       ParallelDofMapping &dofMap,
-			       string prefix, 
-			       string postfix)
+					 ParallelDofMapping &dofMap,
+					 string prefix, 
+					 string postfix)
   {
-	FUNCNAME("ParallelDebug::writeCsvElementMap()");
+    FUNCNAME("ParallelDebug::writeCsvElementMap()");
 
-	MSG("writing local Element map to CSV File \n");
+    MSG("writing local Element map to CSV File \n");
 
-	Mesh *mesh = feSpace->getMesh();
+    Mesh *mesh = feSpace->getMesh();
 
-	if (mesh->getDim() != 3)
-	  return;
+    if (mesh->getDim() != 3)
+      return;
 
-	stringstream filename;
+    stringstream filename;
     filename << prefix << "-" << MPI::COMM_WORLD.Get_rank() << "." << postfix;
 
-	ofstream file;
+    ofstream file;
     file.open(filename.str().c_str());
 
-	DOFVector<WorldVector<double> > dofCoords(feSpace, "DOF coords");
+    DOFVector<WorldVector<double> > dofCoords(feSpace, "DOF coords");
     mesh->getDofIndexCoords(dofCoords);
 
-	TraverseStack stack;
-	ElInfo *elInfo = stack.traverseFirst(mesh,	
-									-1, 
-									Mesh::CALL_EVERY_EL_POSTORDER);
-
-	while (elInfo) {
-		/*
-		MSG("Start traverse element %d in level %d\n",
-			elInfo->getElement()->getIndex(),
-			elInfo->getLevel());
-		 */
-
-		// check if Element is Leaflevel
-		// because higherOrderDofs(=NON VERTICES DOFS) are not saved in nonleaf Elements
-		if (elInfo->getElement()->isLeaf()) {
-
-			//traverse ALL DOFS
-  			ElementDofIterator elDofIter(feSpace, true);
-			elDofIter.reset(elInfo->getElement());
+    TraverseStack stack;
+    ElInfo *elInfo = stack.traverseFirst(mesh,	
+					 -1, 
+					 Mesh::CALL_EVERY_EL_POSTORDER);
+
+    while (elInfo) {
+      /*
+	MSG("Start traverse element %d in level %d\n",
+	elInfo->getElement()->getIndex(),
+	elInfo->getLevel());
+      */
+
+      // check if Element is Leaflevel
+      // because higherOrderDofs(=NON VERTICES DOFS) are not saved in nonleaf Elements
+      if (elInfo->getElement()->isLeaf()) {
+
+	//traverse ALL DOFS
+	ElementDofIterator elDofIter(feSpace, true);
+	elDofIter.reset(elInfo->getElement());
 	
-			int locDofNumber = 0;  
+	int locDofNumber = 0;  
 
-			do {
+	do {
    	
-		    	WorldVector<double> c = dofCoords[elDofIter.getDof()];
+	  WorldVector<double> c = dofCoords[elDofIter.getDof()];
 	
-				file << elInfo->getElement()->getIndex() << "\t";	//element number
-				file << elInfo->getLevel() << "\t";					//element Level
-				file << elDofIter.getDof() << "\t";					//dof number
-				file << elDofIter.getCurrentPos()+1 << "\t";		//dof type (+1 to pseudo convert to geoIndex, does not work for CENTER DOFS( geoIndex=0))
-				file << c[0] << "\t";								//dof coords
-				file << c[1] << "\t";	
-				file << c[2] << "\t";
-				file << locDofNumber << "\t";						//local Dof number
-				file << elInfo->getType() << "\n";					//element Type
+	  file << elInfo->getElement()->getIndex() << "\t";	//element number
+	  file << elInfo->getLevel() << "\t";					//element Level
+	  file << elDofIter.getDof() << "\t";					//dof number
+	  file << elDofIter.getCurrentPos()+1 << "\t";		//dof type (+1 to pseudo convert to geoIndex, does not work for CENTER DOFS( geoIndex=0))
+	  file << c[0] << "\t";								//dof coords
+	  file << c[1] << "\t";	
+	  file << c[2] << "\t";
+	  file << locDofNumber << "\t";						//local Dof number
+	  file << elInfo->getType() << "\n";					//element Type
 		
-				locDofNumber++;
-		    } while (elDofIter.next());
-		} else {
+	  locDofNumber++;
+	} while (elDofIter.next());
+      } else {
 	
-			// traverse only VERTEX DOFS
-			for (int i = 0; i < mesh->getGeo(VERTEX); i++) {
-				DegreeOfFreedom dof = elInfo->getElement()->getDof(i, 0);	
+	// traverse only VERTEX DOFS
+	for (int i = 0; i < mesh->getGeo(VERTEX); i++) {
+	  DegreeOfFreedom dof = elInfo->getElement()->getDof(i, 0);	
 
-				WorldVector<double> c = dofCoords[dof];
+	  WorldVector<double> c = dofCoords[dof];
 		
-				file << elInfo->getElement()->getIndex() << "\t";	//element number
-				file << elInfo->getLevel() << "\t";					//element Level
-				file << dof << "\t";								//dof number
-				file << VERTEX << "\t";								//dof type
-				file << c[0] << "\t";								//dof coords
-				file << c[1] << "\t";	
-				file << c[2] << "\t";
-				file << i << "\t";									//local Dof number
-				file << elInfo->getType() << "\n";					//element Type
-			}		
-		}
-
-   		elInfo = stack.traverseNext(elInfo);
-	}
+	  file << elInfo->getElement()->getIndex() << "\t";	//element number
+	  file << elInfo->getLevel() << "\t";					//element Level
+	  file << dof << "\t";								//dof number
+	  file << VERTEX << "\t";								//dof type
+	  file << c[0] << "\t";								//dof coords
+	  file << c[1] << "\t";	
+	  file << c[2] << "\t";
+	  file << i << "\t";									//local Dof number
+	  file << elInfo->getType() << "\n";					//element Type
+	}		
+      }
+
+      elInfo = stack.traverseNext(elInfo);
+    }
 
   }
 }
diff --git a/AMDiS/src/parallel/ParallelDebug.h b/AMDiS/src/parallel/ParallelDebug.h
index db44c5d2..8106f523 100644
--- a/AMDiS/src/parallel/ParallelDebug.h
+++ b/AMDiS/src/parallel/ParallelDebug.h
@@ -144,8 +144,6 @@ namespace AMDiS {
      * all ranks.
      *
      * \param[in]  intBoundary   The boundary object to be printed.
-     * \param[in]  level         Mesh level number for which the boundary should
-     *                           be printed.
      * \param[in]  force         If true, the information is always printed to
      *                           screen. Otherwise, this is done only if AMDiS
      *                           is compiled in debug mode or if the init file
@@ -153,7 +151,6 @@ namespace AMDiS {
      *                           is set.
      */
     static void printBoundaryInfo(InteriorBoundary &intBoundary,
-				  int level = 0, 
 				  bool force = false);
 
 
diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc
index 4a39b2d0..e69ac719 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.cc
+++ b/AMDiS/src/parallel/ParallelDofMapping.cc
@@ -11,7 +11,6 @@
 
 
 #include "parallel/ParallelDofMapping.h"
-#include "parallel/MeshLevelData.h"
 #include "parallel/StdMpi.h"
 
 namespace AMDiS {
@@ -38,10 +37,8 @@ namespace AMDiS {
   }
 
 
-  ComponentDofMap::ComponentDofMap(MeshLevelData* ld)
-    : levelData(ld),
-      meshLevel(0),
-      dofComm(NULL),
+  ComponentDofMap::ComponentDofMap()
+    : dofComm(NULL),
       feSpace(NULL),
       globalMapping(false)
   {
@@ -109,11 +106,11 @@ namespace AMDiS {
 
     StdMpi<vector<int> > stdMpi(mpiComm);
 
-    for (DofComm::Iterator it(dofComm->getSendDofs(), 0, feSpace); 
+    for (DofComm::Iterator it(dofComm->getSendDofs(), feSpace); 
 	 !it.end(); it.nextRank()) {
       int rank = it.getRank();
-      if (meshLevel > 0)
-	rank = levelData->mapRank(rank, 0, meshLevel);
+      // if (meshLevel > 0)
+      //   rank = levelData->mapRank(rank, 0, meshLevel);
       
       for (; !it.endDofIter(); it.nextDof())
 	if (dofMap.count(it.getDofIndex()) && 
@@ -127,7 +124,7 @@ namespace AMDiS {
 
     // === Check from which ranks this rank must receive some data. ===
 
-    for (DofComm::Iterator it(dofComm->getRecvDofs(), 0, feSpace); 
+    for (DofComm::Iterator it(dofComm->getRecvDofs(), feSpace); 
 	 !it.end(); it.nextRank()) {
       bool recvFromRank = false;
       for (; !it.endDofIter(); it.nextDof()) {
@@ -139,8 +136,8 @@ namespace AMDiS {
 
       if (recvFromRank) {
 	int rank = it.getRank();
-	if (meshLevel > 0)
-	  rank = levelData->mapRank(rank, 0, meshLevel);
+	// if (meshLevel > 0)
+	//   rank = levelData->mapRank(rank, 0, meshLevel);
 
 	stdMpi.recv(rank);
       }
@@ -154,11 +151,11 @@ namespace AMDiS {
 
     // === And set the global indices for all DOFs that are not owned by rank. ===
     
-    for (DofComm::Iterator it(dofComm->getRecvDofs(), 0, feSpace);
+    for (DofComm::Iterator it(dofComm->getRecvDofs(), feSpace);
 	 !it.end(); it.nextRank()) {
       int rank = it.getRank();
-      if (meshLevel > 0)
-	rank = levelData->mapRank(rank, 0, meshLevel);
+      // if (meshLevel > 0)
+      //   rank = levelData->mapRank(rank, 0, meshLevel);
 
       int i = 0;
       for (; !it.endDofIter(); it.nextDof())
@@ -170,8 +167,7 @@ namespace AMDiS {
 
   void FeSpaceData::init(vector<const FiniteElemSpace*> &f0,
 			 vector<const FiniteElemSpace*> &f1,
-			 bool globalMapping,
-			 MeshLevelData &levelData)
+			 bool globalMapping)
   {
     componentSpaces = f0;
     feSpaces = f1;
@@ -180,7 +176,7 @@ namespace AMDiS {
       if (componentData.count(*it))
 	componentData.find(*it)->second.clear();
       else
-	componentData.insert(make_pair(*it, ComponentDofMap(&levelData)));   
+	componentData.insert(make_pair(*it, ComponentDofMap()));   
 
       componentData[*it].setFeSpace(*it);    
       componentData[*it].setGlobalMapping(globalMapping);
@@ -190,8 +186,7 @@ namespace AMDiS {
 
   void ComponentData::init(vector<const FiniteElemSpace*> &f0,
 			   vector<const FiniteElemSpace*> &f1,
-			   bool globalMapping,
-			   MeshLevelData &levelData)
+			   bool globalMapping)
   {
     componentSpaces = f0;
     feSpaces = f1;
@@ -200,7 +195,7 @@ namespace AMDiS {
       if (componentData.count(component)) 
 	componentData.find(component)->second.clear();
       else
-	componentData.insert(make_pair(component, ComponentDofMap(&levelData)));
+	componentData.insert(make_pair(component, ComponentDofMap()));
       
       componentData[component].setFeSpace(componentSpaces[component]);
       componentData[component].setGlobalMapping(globalMapping);
@@ -210,9 +205,7 @@ namespace AMDiS {
 
   ParallelDofMapping::ParallelDofMapping(DofMappingMode mode, 
 					 bool matIndexFromGlobal) 
-    : meshLevel(0),
-      levelData(NULL),
-      dofComm(NULL),
+    : dofComm(NULL),
       globalMapping(true),
       needMatIndexFromGlobal(matIndexFromGlobal),
       nRankDofs(1),
@@ -237,19 +230,17 @@ namespace AMDiS {
   } 
 
 
-  void ParallelDofMapping::init(MeshLevelData &ldata,
-				vector<const FiniteElemSpace*> &fe,
+  void ParallelDofMapping::init(vector<const FiniteElemSpace*> &fe,
 				vector<const FiniteElemSpace*> &uniqueFe,
 				bool b)
   {
     FUNCNAME("ParallelDofMapping::init()");
 
-    levelData = &ldata;
     globalMapping = b;
     componentSpaces = fe;
     feSpaces = uniqueFe;
 
-    data->init(fe, uniqueFe, globalMapping, ldata);
+    data->init(fe, uniqueFe, globalMapping);
   }
 
 
@@ -257,8 +248,6 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelDofMapping::clear()");
 
-    TEST_EXIT_DBG(levelData)("No mesh level data object defined!\n");
-
     for (ComponentIterator &it = data->getIteratorData(); !it.end(); it.next())
       it->clear();
 
@@ -270,13 +259,12 @@ namespace AMDiS {
   }
 
   
-  void ParallelDofMapping::setMpiComm(MPI::Intracomm &m, int l)
+  void ParallelDofMapping::setMpiComm(MPI::Intracomm &m)
   {
     mpiComm = m;
-    meshLevel = l;
 
     for (ComponentIterator &it = data->getIteratorData(); !it.end(); it.next())
-      it->setMpiComm(m, l);
+      it->setMpiComm(m);
   }
 
 
@@ -410,7 +398,7 @@ namespace AMDiS {
       // === interior boundaries.                                         ===
 
       StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm);
-      for (DofComm::Iterator it(dofComm->getSendDofs(), 0, componentSpaces[component]); 
+      for (DofComm::Iterator it(dofComm->getSendDofs(), componentSpaces[component]); 
 	   !it.end(); it.nextRank()) {
 	vector<DegreeOfFreedom> sendGlobalDofs;
 	
@@ -423,29 +411,27 @@ namespace AMDiS {
 	      sendGlobalDofs.push_back(dofToMatIndex.get(component, it.getDofIndex()));
 	
 	int rank = it.getRank();
-	if (meshLevel > 0)
-	  rank = levelData->mapRank(rank, 0, meshLevel);
-	
+	// if (meshLevel > 0)
+	//   rank = levelData->mapRank(rank, 0, meshLevel);
 	stdMpi.send(rank, sendGlobalDofs);
       }
       
-      for (DofComm::Iterator it(dofComm->getRecvDofs(), 0, componentSpaces[component]); 
+      for (DofComm::Iterator it(dofComm->getRecvDofs(), componentSpaces[component]); 
 	   !it.end(); it.nextRank()) {
 	int rank = it.getRank();
-	if (meshLevel > 0)
-	  rank = levelData->mapRank(rank, 0, meshLevel);
-
+	// if (meshLevel > 0)
+	//   rank = levelData->mapRank(rank, 0, meshLevel);      
 	stdMpi.recv(rank);
       }
 
       stdMpi.startCommunication();
       
-      for (DofComm::Iterator it(dofComm->getRecvDofs(), 0, componentSpaces[component]); 
+      for (DofComm::Iterator it(dofComm->getRecvDofs(), componentSpaces[component]); 
 	   !it.end(); it.nextRank()) {
 	int rank = it.getRank();
-	if (meshLevel > 0)
-	  rank = levelData->mapRank(rank, 0, meshLevel);
-	
+	// if (meshLevel > 0)
+	//   rank = levelData->mapRank(rank, 0, meshLevel);
+
 	int counter = 0;
 	for (; !it.endDofIter(); it.nextDof()) {
 	  if (dofMap.count(it.getDofIndex())) {
@@ -463,7 +449,7 @@ namespace AMDiS {
 
       stdMpi.clear();
 
-      for (DofComm::Iterator it(dofComm->getPeriodicDofs(), 0, componentSpaces[component]); 
+      for (DofComm::Iterator it(dofComm->getPeriodicDofs(), componentSpaces[component]); 
 	   !it.end(); it.nextRank()) {
 	vector<DegreeOfFreedom> sendGlobalDofs;
 	
@@ -479,21 +465,21 @@ namespace AMDiS {
 	  }
 	
 	int rank = it.getRank();
-	if (meshLevel > 0)
-	  rank = levelData->mapRank(rank, 0, meshLevel);
-	
+	// if (meshLevel > 0)
+	//   rank = levelData->mapRank(rank, 0, meshLevel);
+
 	stdMpi.send(rank, sendGlobalDofs);
 	stdMpi.recv(rank);
       }
 
       stdMpi.startCommunication();
 
-      for (DofComm::Iterator it(dofComm->getPeriodicDofs(), 0, componentSpaces[component]); 
+      for (DofComm::Iterator it(dofComm->getPeriodicDofs(), componentSpaces[component]); 
 	   !it.end(); it.nextRank()) {
 
 	int rank = it.getRank();
-	if (meshLevel > 0)
-	  rank = levelData->mapRank(rank, 0, meshLevel);
+	// if (meshLevel > 0)
+	//   rank = levelData->mapRank(rank, 0, meshLevel);
 
 	int counter = 0;
 
diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h
index f2774941..176c03a6 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.h
+++ b/AMDiS/src/parallel/ParallelDofMapping.h
@@ -118,16 +118,9 @@ namespace AMDiS {
   class ComponentDofMap
   {
   public:
-    /// This constructor exists only to create std::map of this class and make
-    /// use of the operator [] for read access. Should never be called.
-    ComponentDofMap() 
-    {
-      ERROR_EXIT("Should not be called!\n");
-    }
+    /// Constructor.
+    ComponentDofMap();
      
-    /// This is the only valid constructur to be used. 
-    ComponentDofMap(MeshLevelData* ld);
-
     /// Clears all data of the mapping.
     void clear();
 
@@ -182,7 +175,8 @@ namespace AMDiS {
     {
       FUNCNAME("ComponentDofMap::insertNonRankDof()");
       
-      TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n");
+      TEST_EXIT_DBG(dofMap.count(dof0) == 0)
+	("DOF %d is already in mapping!\n", dof0);
       
       dofMap[dof0].local = dof1;
       nLocalDofs++;
@@ -251,10 +245,9 @@ namespace AMDiS {
       dofComm = &dc;
     }
 
-    void setMpiComm(MPI::Intracomm &m, int l)
+    void setMpiComm(MPI::Intracomm &m)
     {
       mpiComm = m;
-      meshLevel = l;
     }
 
   private:
@@ -266,15 +259,11 @@ namespace AMDiS {
     void computeNonLocalIndices();
 
   private:
-    MeshLevelData *levelData;
-
     /// DOF communicator for all DOFs on interior boundaries.
     DofComm *dofComm;
 
     MPI::Intracomm mpiComm;
 
-    int meshLevel;
-
     /// The FE space this mapping belongs to. This is used only the get the
     /// correct DOF communicator in \ref dofComm.
     const FiniteElemSpace *feSpace;
@@ -342,12 +331,10 @@ namespace AMDiS {
      * \param[in]   feSpaces          Set of all different FE spaces.
      * \param[in]   globalMapping     Mapping is parallel (true) or only 
      *                                local (false).
-     * \param[in]   levelData         Data for multi level method.
      */
     virtual void init(vector<const FiniteElemSpace*> &componentSpaces,
 		      vector<const FiniteElemSpace*> &feSpaces,
-		      bool globalMapping,
-		      MeshLevelData &levelData) = 0;
+		      bool globalMapping) = 0;
 
   protected:
     /// The FE spaces for all components.
@@ -410,8 +397,7 @@ namespace AMDiS {
     /// Initialization
     void init(vector<const FiniteElemSpace*> &f0,
 	      vector<const FiniteElemSpace*> &f1,
-	      bool globalMapping,
-	      MeshLevelData &levelData);
+	      bool globalMapping);
 
   protected:
 
@@ -530,8 +516,7 @@ namespace AMDiS {
     /// Initialization
     void init(vector<const FiniteElemSpace*> &f0,
 	      vector<const FiniteElemSpace*> &f1,
-	      bool globalMapping,
-	      MeshLevelData &levelData);
+	      bool globalMapping);
 
 
   protected:
@@ -656,7 +641,6 @@ namespace AMDiS {
     /** \brief 
      * Initialize the parallel DOF mapping.
      *
-     * \param[in]  m                  MPI communicator.
      * \param[in]  componentSpaces    The FE spaces of all components of the 
      *                                PDE to be solved.
      * \param[in]  feSpaces           Unique list of FE spaces. Thus, two
@@ -665,22 +649,25 @@ namespace AMDiS {
      * \param[in]  globalMapping      If true, at least one rank's mapping con-
      *                                taines DOFs that are not owend by the rank.
      */
-    void init(MeshLevelData& mld,
-	      vector<const FiniteElemSpace*> &componentSpaces,
+    void init(vector<const FiniteElemSpace*> &componentSpaces,
 	      vector<const FiniteElemSpace*> &feSpaces,
 	      bool globalMapping = true);
 
     /// In the case of having only one FE space, this init function can be used.
-    void init(MeshLevelData& mld,
-	      const FiniteElemSpace *feSpace,
+    void init(const FiniteElemSpace *feSpace,
 	      bool globalMapping = true)
     {
       vector<const FiniteElemSpace*> feSpaces;
       feSpaces.push_back(feSpace);
-      init(mld, feSpaces, feSpaces, globalMapping);
+      init(feSpaces, feSpaces, globalMapping);
     }
 
-    void setMpiComm(MPI::Intracomm &m, int l);
+    void setMpiComm(MPI::Intracomm &m);
+
+    inline MPI::Intracomm& getMpiComm()
+    {
+      return mpiComm;
+    }
     
     /// Clear all data.
     void clear();
@@ -854,10 +841,6 @@ namespace AMDiS {
   private:
     MPI::Intracomm mpiComm;
 
-    int meshLevel;
-
-    MeshLevelData *levelData;
-
     /// DOF communicator for all DOFs on interior boundaries.
     DofComm *dofComm;
 
diff --git a/AMDiS/src/parallel/PetscProblemStat.cc b/AMDiS/src/parallel/PetscProblemStat.cc
index 7faaa078..0a21e9b2 100644
--- a/AMDiS/src/parallel/PetscProblemStat.cc
+++ b/AMDiS/src/parallel/PetscProblemStat.cc
@@ -55,7 +55,7 @@ namespace AMDiS {
     
     meshDistributor->setBoundaryDofRequirement(petscSolver->getBoundaryDofRequirement());
 
-    petscSolver->setMeshDistributor(meshDistributor);
+    petscSolver->setMeshDistributor(meshDistributor, 0);
     petscSolver->init(this->getComponentSpaces(), this->getFeSpaces());
   }
   
diff --git a/AMDiS/src/parallel/PetscSolver.cc b/AMDiS/src/parallel/PetscSolver.cc
index 71bde614..b53ffdc2 100644
--- a/AMDiS/src/parallel/PetscSolver.cc
+++ b/AMDiS/src/parallel/PetscSolver.cc
@@ -47,13 +47,8 @@ namespace AMDiS {
 
   PetscSolver::~PetscSolver()
   {
-    if (parallelDofMappingsRegistered) {
-      int nLevels = meshDistributor->getMeshLevelData().getLevelNumber();
-
+    if (parallelDofMappingsRegistered)
       meshDistributor->removeDofMap(dofMap);
-      if (nLevels > 1)
-	meshDistributor->removeDofMap(dofMapSd);
-    }
   }
 
 
@@ -71,22 +66,23 @@ namespace AMDiS {
     feSpaces = fe1;
 
     MeshLevelData& levelData = meshDistributor->getMeshLevelData();
-    int nLevels = levelData.getLevelNumber();
+    int nLevels = levelData.getNumberOfLevels();
     TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n");
 
     if (createGlobalMapping) {
       parallelDofMappingsRegistered = true;
 
-      dofMap.init(levelData, componentSpaces, feSpaces);
-      dofMap.setMpiComm(levelData.getMpiComm(0), 0);
-      dofMap.setDofComm(meshDistributor->getDofComm());
+      dofMap.init(componentSpaces, feSpaces);
+      dofMap.setMpiComm(levelData.getMpiComm(0));
+      dofMap.setDofComm(meshDistributor->getDofComm(0));
       dofMap.clear();
-      meshDistributor->registerDofMap(dofMap);
-      
-      if (nLevels > 1) {
-	dofMapSd.init(levelData, componentSpaces, feSpaces);
-	dofMapSd.setMpiComm(levelData.getMpiComm(1), 1);
-	dofMapSd.setDofComm(meshDistributor->getDofCommSd());
+      meshDistributor->registerDofMap(dofMap);     
+
+      if (nLevels > 1 && levelData.getMpiComm(1) != MPI::COMM_SELF) {
+	MSG("WARNING: MAKE GENERAL!\n");
+	dofMapSd.init(componentSpaces, feSpaces);
+	dofMapSd.setMpiComm(levelData.getMpiComm(1));
+	dofMapSd.setDofComm(meshDistributor->getDofComm(1));
 	dofMapSd.clear();
 	meshDistributor->registerDofMap(dofMapSd);
       }
@@ -128,13 +124,13 @@ namespace AMDiS {
     FUNCNAME("PetscSolver::copyVec()");
 
     IS originIs, destIs;
-    ISCreateGeneral(mpiCommGlobal, 
+    ISCreateGeneral(domainComm, 
 		    originIndex.size(), 
 		    &(originIndex[0]),
 		    PETSC_USE_POINTER,
 		    &originIs);
 
-    ISCreateGeneral(mpiCommGlobal, 
+    ISCreateGeneral(domainComm, 
 		    destIndex.size(), 
 		    &(destIndex[0]),
 		    PETSC_USE_POINTER,
diff --git a/AMDiS/src/parallel/PetscSolver.h b/AMDiS/src/parallel/PetscSolver.h
index 8c70fb3d..fb0b6cc8 100644
--- a/AMDiS/src/parallel/PetscSolver.h
+++ b/AMDiS/src/parallel/PetscSolver.h
@@ -150,11 +150,6 @@ namespace AMDiS {
       return dofMap;
     }
 
-    ParallelDofMapping& getDofMapSd()
-    {
-      return dofMapSd;
-    }
-
     vector<const FiniteElemSpace*>& getComponentSpaces()
     {
       return componentSpaces;
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 465c8674..97b359ff 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -36,10 +36,9 @@ namespace AMDiS {
       lagrangeMap(COMPONENT_WISE),
       interiorDofMap(COMPONENT_WISE),
       schurPrimalSolver(0),
-      multiLevelTest(false),
+      subDomainIsLocal(true),
       subdomain(NULL),
       massMatrixSolver(NULL),
-      meshLevel(0),
       rStartInterior(0),
       nGlobalOverallInterior(0),
       printTimings(false),
@@ -89,11 +88,14 @@ namespace AMDiS {
 
     Parameters::get(initFileStr + "->feti->symmetric", isSymmetric);
 
-    Parameters::get("parallel->multi level test", multiLevelTest);
-    if (multiLevelTest)
-      meshLevel = 1;
+    {
+      MSG("WARNING: CHECK THIS HERE BEFORE GOING INTO RUNNING MULTILEVEL FETI-DP!\n");
+      Parameters::get("parallel->level mode", levelMode);
+    }
+
 
     Parameters::get("parallel->print timings", printTimings);
+
   }
 
 
@@ -101,13 +103,16 @@ namespace AMDiS {
   {
     FUNCNAME("PetscSolverFeti::initialize()");
 
-    MSG("INIT WITH MESH-LEVEL: %d\n", meshLevel);
+    MSG_DBG("Init FETI-DP on mesh level %d\n", meshLevel);
 
-    TEST_EXIT_DBG(meshLevel + 1 == meshDistributor->getMeshLevelData().getLevelNumber())
-      ("Mesh hierarchy does not contain %d levels!\n", meshLevel + 1);
+    TEST_EXIT_DBG(meshLevel + 2 <= 
+		  meshDistributor->getMeshLevelData().getNumberOfLevels())
+      ("Mesh hierarchy does not contain at least %d levels!\n", meshLevel + 2);
 
     MeshLevelData& levelData = meshDistributor->getMeshLevelData();
 
+    subDomainIsLocal = (levelData.getMpiComm(meshLevel + 1) == MPI::COMM_SELF);
+
     if (subdomain == NULL) {
       string subSolverInitStr = initFileStr + "->subsolver";
       string solverType = "petsc";
@@ -121,36 +126,25 @@ namespace AMDiS {
       subdomain = dynamic_cast<PetscSolver*>(solverCreator->create());
       subdomain->initParameters();
       subdomain->setSymmetric(isSymmetric);
-      if (dirichletMode == 0)
-	subdomain->setHandleDirichletRows(true);
-      else
-	subdomain->setHandleDirichletRows(false);
-
-      if (meshLevel == 0) {
-	subdomain->setMeshDistributor(meshDistributor);
-      } else {
-	subdomain->setMeshDistributor(meshDistributor, 
-				      levelData.getMpiComm(meshLevel - 1),
-				      levelData.getMpiComm(meshLevel));
-	subdomain->setLevel(meshLevel);
-      }
+      subdomain->setHandleDirichletRows(dirichletMode == 0);
+      subdomain->setMeshDistributor(meshDistributor, meshLevel + 1);
 
       delete solverCreator;
     }
 
-    primalDofMap.init(levelData, componentSpaces, feSpaces);   
-    dualDofMap.init(levelData, componentSpaces, feSpaces, false);
-    localDofMap.init(levelData, componentSpaces, feSpaces, meshLevel != 0);
-    lagrangeMap.init(levelData, componentSpaces, feSpaces);
+    primalDofMap.init(componentSpaces, feSpaces);   
+    dualDofMap.init(componentSpaces, feSpaces, false);
+    localDofMap.init(componentSpaces, feSpaces, !subDomainIsLocal);
+    lagrangeMap.init(componentSpaces, feSpaces);
 
     if (stokesMode)
-      interfaceDofMap.init(levelData, componentSpaces, feSpaces);
+      interfaceDofMap.init(componentSpaces, feSpaces);
 
     if (fetiPreconditioner == FETI_DIRICHLET) {
-      TEST_EXIT(meshLevel == 0)
+      TEST_EXIT(levelMode == 1)
 	("Dirichlet preconditioner not yet implemented for multilevel FETI-DP\n");
 
-      interiorDofMap.init(levelData, componentSpaces, feSpaces, false);
+      interiorDofMap.init(componentSpaces, feSpaces, false);
     }
   }
 
@@ -187,25 +181,22 @@ namespace AMDiS {
     if (fetiPreconditioner == FETI_DIRICHLET)
       interiorDofMap.clear();
 
-    primalDofMap.setDofComm(meshDistributor->getDofComm());
-    lagrangeMap.setDofComm(meshDistributor->getDofComm());
+    primalDofMap.setDofComm(meshDistributor->getDofComm(meshLevel));
+    lagrangeMap.setDofComm(meshDistributor->getDofComm(meshLevel));
 
-    primalDofMap.setMpiComm(levelData.getMpiComm(0), 0);
-    dualDofMap.setMpiComm(levelData.getMpiComm(0), 0);
-    lagrangeMap.setMpiComm(levelData.getMpiComm(0), 0);
-    localDofMap.setMpiComm(levelData.getMpiComm(meshLevel), meshLevel);
+    primalDofMap.setMpiComm(levelData.getMpiComm(meshLevel));
+    dualDofMap.setMpiComm(levelData.getMpiComm(meshLevel));
+    lagrangeMap.setMpiComm(levelData.getMpiComm(meshLevel));
+    localDofMap.setMpiComm(levelData.getMpiComm(meshLevel + 1));
     if (fetiPreconditioner == FETI_DIRICHLET)
-      interiorDofMap.setMpiComm(levelData.getMpiComm(meshLevel), meshLevel);
-
-    if (meshLevel == 0)
-      localDofMap.setDofComm(meshDistributor->getDofComm());
-    else
-      localDofMap.setDofComm(meshDistributor->getDofCommSd());
+      interiorDofMap.setMpiComm(levelData.getMpiComm(meshLevel + 1));
 
+    localDofMap.setDofComm(meshDistributor->getDofComm(meshLevel + 1));
+    
     if (stokesMode) {
       interfaceDofMap.clear();
-      interfaceDofMap.setDofComm(meshDistributor->getDofComm());
-      interfaceDofMap.setMpiComm(levelData.getMpiComm(0), 0);
+      interfaceDofMap.setDofComm(meshDistributor->getDofComm(meshLevel));
+      interfaceDofMap.setMpiComm(levelData.getMpiComm(0));
     }
 
     int nComponents = componentSpaces.size();
@@ -219,6 +210,7 @@ namespace AMDiS {
     primalDofMap.update();
     dualDofMap.update();
     localDofMap.update();
+
     if (fetiPreconditioner == FETI_DIRICHLET)
       interiorDofMap.update();
 
@@ -235,27 +227,35 @@ namespace AMDiS {
 
     // === ===
 
-    if (meshLevel == 0) {
+    if (subDomainIsLocal) {
+      MSG("WARNING: MAKE GENERAL!\n");
+
       rStartInterior = 0;
-      nGlobalOverallInterior = localDofMap.getOverallDofs();
+      int localDofs = localDofMap.getOverallDofs();
+
+      mpi::getDofNumbering(domainComm, localDofs,
+			   rStartInterior, nGlobalOverallInterior);
     } else {
+      MSG("WARNING: MAKE GENERAL!\n");
+
       MeshLevelData& levelData = meshDistributor->getMeshLevelData();
 
       int groupRowsInterior = 0;
       if (levelData.getMpiComm(1).Get_rank() == 0)
 	groupRowsInterior = localDofMap.getOverallDofs();
 
-      mpi::getDofNumbering(mpiCommGlobal, groupRowsInterior,
+      mpi::getDofNumbering(domainComm, groupRowsInterior,
 			   rStartInterior, nGlobalOverallInterior);
 
       int tmp = 0;
       if (levelData.getMpiComm(1).Get_rank() == 0)
 	tmp = rStartInterior;
 
-      levelData.getMpiComm(1).Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM);
+      levelData.getMpiComm(1).Allreduce(&tmp, &rStartInterior, 1, 
+					MPI_INT, MPI_SUM);
     }
 
-    MSG("FETI-DP data created on mesh level %d\n", meshLevel);
+
     for (unsigned int i = 0; i < componentSpaces.size(); i++) {
       const FiniteElemSpace *feSpace = componentSpaces[i];
 
@@ -324,7 +324,7 @@ namespace AMDiS {
       if (dirichletRows[component].count(**it))
 	continue;
 
-      if (meshLevel == 0) {
+      if (subDomainIsLocal) {
 	primals.insert(**it);
       } else {
 	double e = 1e-8;
@@ -377,13 +377,9 @@ namespace AMDiS {
 
       if (isPrimal(component, **it))
 	continue;
-
-      if (meshLevel == 0) {
+      
+      if (subDomainIsLocal || dofMapSd[feSpace].isRankDof(**it))
 	dualDofMap[component].insertRankDof(**it);
-      } else {
-	if (dofMapSd[feSpace].isRankDof(**it))
-	  dualDofMap[component].insertRankDof(**it);
-      }	  
     }
   }
 
@@ -425,42 +421,39 @@ namespace AMDiS {
     // Stores for all rank owned communication DOFs, if the counterpart is
     // a rank owned DOF in its subdomain. Thus, the following map stores to
     // each rank number all DOFs that fulfill this requirenment.
-    map<int, std::set<DegreeOfFreedom> > sdRankDofs;
+    map<int, std::set<DegreeOfFreedom> > subDomainRankDofs;
 
-    if (meshLevel > 0) {
-      StdMpi<vector<int> > stdMpi(mpiCommGlobal);
+    if (not subDomainIsLocal) {
+      StdMpi<vector<int> > stdMpi(domainComm);
 
-      for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), 
-				meshLevel, feSpace);
+      for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getRecvDofs(), feSpace);
 	   !it.end(); it.nextRank()) {
 
-	vector<int> subdomainRankDofs;
-	subdomainRankDofs.reserve(it.getDofs().size());
+	vector<int> dofs;
+	dofs.reserve(it.getDofs().size());
 
 	for (; !it.endDofIter(); it.nextDof()) {
 	  if (dofMapSd[feSpace].isRankDof(it.getDofIndex()))
-	    subdomainRankDofs.push_back(1);
+	    dofs.push_back(1);
 	  else
-	    subdomainRankDofs.push_back(0);
+	    dofs.push_back(0);
 	}
 
-	stdMpi.send(it.getRank(), subdomainRankDofs);
+	stdMpi.send(it.getRank(), dofs);
       }	     
 
-      for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), 
-				meshLevel, feSpace);
+      for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getSendDofs(), feSpace);
 	   !it.end(); it.nextRank())
 	stdMpi.recv(it.getRank());
 
       stdMpi.startCommunication();
 
-      for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), 
-				meshLevel, feSpace); 
+      for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getSendDofs(), feSpace); 
 	   !it.end(); it.nextRank())
 	for (; !it.endDofIter(); it.nextDof())
-	  if (!isPrimal(component, it.getDofIndex()))
-	    if (stdMpi.getRecvData(it.getRank())[it.getDofCounter()] == 1)
-	      sdRankDofs[it.getRank()].insert(it.getDofIndex());
+	  if (!isPrimal(component, it.getDofIndex()) &&
+	      stdMpi.getRecvData(it.getRank())[it.getDofCounter()] == 1)
+	    subDomainRankDofs[it.getRank()].insert(it.getDofIndex());
     }
 
     if (dualDofMap[component].nLocalDofs == 0)
@@ -470,16 +463,18 @@ namespace AMDiS {
     // === Create for each dual node that is owned by the rank, the set ===
     // === of ranks that contain this node (denoted by W(x_j)).         ===
 
-    int mpiRank = meshDistributor->getMpiRank();
-    for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), 
-			      meshLevel, feSpace); 
+    int mpiRank = domainComm.Get_rank();
+    for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getSendDofs(), feSpace); 
 	 !it.end(); it.nextRank()) {
       for (; !it.endDofIter(); it.nextDof()) {
 	if (!isPrimal(component, it.getDofIndex())) {
 	  boundaryDofRanks[feSpace][it.getDofIndex()].insert(mpiRank);
 
- 	  if (meshLevel == 0 ||
- 	      (meshLevel > 0 && sdRankDofs[it.getRank()].count(it.getDofIndex())))
+	  // If the subdomain is local, always add the counterpart rank, 
+	  // otherwise check if the other rank is the owner of the DOF in 
+	  // its subdomain.
+ 	  if (subDomainIsLocal || 
+	      subDomainRankDofs[it.getRank()].count(it.getDofIndex()))
 	    boundaryDofRanks[feSpace][it.getDofIndex()].insert(it.getRank());	  
 	}
       }
@@ -489,26 +484,23 @@ namespace AMDiS {
     // === Communicate these sets for all rank owned dual nodes to other ===
     // === ranks that also have this node.                               ===
 
-    StdMpi<vector<std::set<int> > > stdMpi(meshDistributor->getMpiComm());
+    StdMpi<vector<std::set<int> > > stdMpi(meshDistributor->getMpiComm(meshLevel));
 
-    for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), meshLevel, feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getSendDofs(), feSpace);
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
 	if (!isPrimal(component, it.getDofIndex()))
- 	  if (meshLevel == 0 ||
- 	      (meshLevel > 0 && sdRankDofs[it.getRank()].count(it.getDofIndex())))
+ 	  if (subDomainIsLocal || subDomainRankDofs[it.getRank()].count(it.getDofIndex()))
 	    stdMpi.getSendData(it.getRank()).push_back(boundaryDofRanks[feSpace][it.getDofIndex()]);
 
     stdMpi.updateSendDataSize();
 
-    for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), meshLevel, feSpace); 
+    for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getRecvDofs(), feSpace); 
 	 !it.end(); it.nextRank()) {
       bool recvFromRank = false;
       for (; !it.endDofIter(); it.nextDof()) {
 	if (!isPrimal(component, it.getDofIndex())) {
- 	  if (meshLevel == 0 ||
- 	      (meshLevel > 0 && 
-	       dofMapSd[feSpace].isRankDof(it.getDofIndex()))) {
+ 	  if (subDomainIsLocal || dofMapSd[feSpace].isRankDof(it.getDofIndex())) {
 	    recvFromRank = true;
 	    break;
 	  }
@@ -521,18 +513,17 @@ namespace AMDiS {
 
     stdMpi.startCommunication();
 
-    for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), meshLevel, feSpace); 
+    for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getRecvDofs(), feSpace); 
 	 !it.end(); it.nextRank()) {
       int i = 0;
       for (; !it.endDofIter(); it.nextDof())
 	if (!isPrimal(component, it.getDofIndex()))
- 	  if (meshLevel == 0 ||
- 	      (meshLevel > 0 && 
-	       dofMapSd[feSpace].isRankDof(it.getDofIndex())))	    
+ 	  if (subDomainIsLocal || dofMapSd[feSpace].isRankDof(it.getDofIndex()))
 	    boundaryDofRanks[feSpace][it.getDofIndex()] = 
 	      stdMpi.getRecvData(it.getRank())[i++];
-	  else
+	  else {
 	    lagrangeMap[component].insertNonRankDof(it.getDofIndex());
+	  }
     }
 
 
@@ -583,7 +574,7 @@ namespace AMDiS {
 	  dirichletRows[component].count(i))
 	continue;      
 
-      if (meshLevel == 0) {
+      if (subDomainIsLocal) {
 	localDofMap[component].insertRankDof(i, nLocalInterior);
 	
 	if (fetiPreconditioner == FETI_DIRICHLET)
@@ -605,7 +596,7 @@ namespace AMDiS {
 
     for (DofMap::iterator it = dualDofMap[component].getMap().begin();
 	 it != dualDofMap[component].getMap().end(); ++it) {
-      if (meshLevel == 0) {
+      if (subDomainIsLocal) {
 	localDofMap[component].insertRankDof(it->first);
       } else {
 	if (dofMapSd[feSpace].isRankDof(it->first))
@@ -626,16 +617,18 @@ namespace AMDiS {
 
     // === Create distributed matrix for Lagrange constraints. ===
 
-    MatCreateAIJ(mpiCommGlobal,
+    MatCreateAIJ(domainComm,
 		 lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
 		 lagrangeMap.getOverallDofs(), nGlobalOverallInterior,
 		 2, PETSC_NULL, 2, PETSC_NULL,
 		 &mat_lagrange);
     MatSetOption(mat_lagrange, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
 
+
     Vec vec_scale_lagrange;
     lagrangeMap.createVec(vec_scale_lagrange);
 
+
     // === Create for all duals the corresponding Lagrange constraints. On ===
     // === each rank we traverse all pairs (n, m) of ranks, with n < m,    ===
     // === that contain this node. If the current rank number is r, and    ===
@@ -694,16 +687,12 @@ namespace AMDiS {
     MatAssemblyEnd(mat_lagrange, MAT_FINAL_ASSEMBLY);
 
 
-    int nZeroRows = PetscSolverFetiDebug::testZeroRows(mat_lagrange);
-    int m,n;
-    MatGetSize(mat_lagrange, &m ,&n);
-    MSG("Lagrange matrix has %d zero rows and global size of %d %d!\n", nZeroRows, m, n);
-
-    PetscViewer petscView;
-    PetscViewerBinaryOpen(PETSC_COMM_WORLD, "lagrange.mat", 
-			  FILE_MODE_WRITE, &petscView);
-    MatView(mat_lagrange, petscView);
-    PetscViewerDestroy(&petscView);
+    {
+      int nZeroRows = PetscSolverFetiDebug::testZeroRows(mat_lagrange);
+      int m,n;
+      MatGetSize(mat_lagrange, &m ,&n);
+      MSG("Lagrange matrix has %d zero rows and global size of %d %d!\n", nZeroRows, m, n);
+    }
 
 
     // === If required, create \ref mat_lagrange_scaled ===
@@ -738,7 +727,7 @@ namespace AMDiS {
     if (meshDistributor->getMesh()->getDim() == 2)
       return true;
 
-    if (meshDistributor->getIntBoundary().getDegreeOwn(edge) != 3)
+    if (meshDistributor->getIntBoundary(meshLevel).getDegreeOwn(edge) != 3)
       return false;
 
     return true;
@@ -763,7 +752,7 @@ namespace AMDiS {
   {
     FUNCNAME("PetscSolverFeti::getAugmentedLagrange()");
 
-    InteriorBoundary &intBound = meshDistributor->getIntBoundary();
+    InteriorBoundary &intBound = meshDistributor->getIntBoundary(meshLevel);
     std::set<BoundaryObject> allEdges;
     for (InteriorBoundary::iterator it(intBound.getOwn()); !it.end(); ++it)
       if (it->rankObj.subObj == EDGE && 
@@ -800,7 +789,7 @@ namespace AMDiS {
   {
     FUNCNAME("PetscSolverFeti::getAugmentedLagrange()");
 
-    InteriorBoundary &intBound = meshDistributor->getIntBoundary();
+    InteriorBoundary &intBound = meshDistributor->getIntBoundary(meshLevel);
     map<int, std::set<BoundaryObject> > allFaces;
     for (InteriorBoundary::iterator it(intBound.getOwn()); !it.end(); ++it)
       if (it->rankObj.subObj == FACE)
@@ -884,7 +873,7 @@ namespace AMDiS {
 
     nRankEdges = allEdges.size();
     int rStartEdges = 0;
-    mpi::getDofNumbering(mpiCommGlobal, nRankEdges, rStartEdges, nOverallEdges);
+    mpi::getDofNumbering(domainComm, nRankEdges, rStartEdges, nOverallEdges);
 
     MSG("nRankEdges = %d, nOverallEdges = %d\n", nRankEdges, nOverallEdges);
 
@@ -892,7 +881,7 @@ namespace AMDiS {
     rStartEdges *= componentSpaces.size();
     nOverallEdges *= componentSpaces.size();
 
-    MatCreateAIJ(mpiCommGlobal,
+    MatCreateAIJ(domainComm,
 		 nRankEdges, lagrangeMap.getRankDofs(),
 		 nOverallEdges, lagrangeMap.getOverallDofs(),
 		 2, PETSC_NULL, 2, PETSC_NULL, 
@@ -950,20 +939,15 @@ namespace AMDiS {
 
       if (augmentedLagrange == false) {
 	schurPrimalData.subSolver = subdomain;
-	
-	if (meshLevel == 0) {
-	  localDofMap.createVec(schurPrimalData.tmp_vec_b, nGlobalOverallInterior);
-	} else {
-	  MSG("WARNING: MULTILEVEL SUPER SHIT, MAKE GENERAL!\n");
-	  VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel - 1),
-		       localDofMap.getRankDofs(),
-		       nGlobalOverallInterior, &(schurPrimalData.tmp_vec_b));
-	}
+		
+	VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel),
+		     localDofMap.getRankDofs(),
+		     nGlobalOverallInterior, &(schurPrimalData.tmp_vec_b));
 
 	primalDofMap.createVec(schurPrimalData.tmp_vec_primal);
 
 	
-	MatCreateShell(mpiCommGlobal,
+	MatCreateShell(domainComm,
 		       primalDofMap.getRankDofs(), 
 		       primalDofMap.getRankDofs(), 
 		       primalDofMap.getOverallDofs(), 
@@ -984,7 +968,7 @@ namespace AMDiS {
 	schurPrimalAugmentedData.mat_lagrange = &mat_lagrange;
 	schurPrimalAugmentedData.mat_augmented_lagrange = &mat_augmented_lagrange;
 
-	MatCreateShell(mpiCommGlobal,
+	MatCreateShell(domainComm,
 		       primalDofMap.getRankDofs() + nRankEdges, 
 		       primalDofMap.getRankDofs() + nRankEdges, 
 		       primalDofMap.getOverallDofs() + nOverallEdges, 
@@ -995,7 +979,7 @@ namespace AMDiS {
 			     (void(*)(void))petscMultMatSchurPrimalAugmented);
       }
 
-      KSPCreate(mpiCommGlobal, &ksp_schur_primal);
+      KSPCreate(domainComm, &ksp_schur_primal);
       KSPSetOperators(ksp_schur_primal, mat_schur_primal, mat_schur_primal, SAME_NONZERO_PATTERN);
       KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_");
       KSPSetType(ksp_schur_primal, KSPGMRES);
@@ -1005,7 +989,7 @@ namespace AMDiS {
 
       double wtime = MPI::Wtime();
 
-      TEST_EXIT_DBG(meshLevel == 0)
+      TEST_EXIT_DBG(subDomainIsLocal)
 	("Does not support for multilevel, check usage of localDofMap.\n");
 
 
@@ -1019,7 +1003,7 @@ namespace AMDiS {
 
       // === Create KSP solver object and set appropriate solver options. ===
 
-      KSPCreate(mpiCommGlobal, &ksp_schur_primal);
+      KSPCreate(domainComm, &ksp_schur_primal);
       KSPSetOperators(ksp_schur_primal, mat_schur_primal, mat_schur_primal,
 		      SAME_NONZERO_PATTERN);
       KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_");
@@ -1082,7 +1066,7 @@ namespace AMDiS {
       localDofMap.createVec(schurPrimalData.tmp_vec_b, nGlobalOverallInterior);
       primalDofMap.createVec(schurPrimalData.tmp_vec_primal);
       
-      MatCreateShell(mpiCommGlobal,
+      MatCreateShell(domainComm,
 		     primalDofMap.getRankDofs(), 
 		     primalDofMap.getRankDofs(), 
 		     primalDofMap.getOverallDofs(), 
@@ -1183,7 +1167,7 @@ namespace AMDiS {
       schurPrimalAugmentedData.mat_lagrange = &mat_lagrange;
       schurPrimalAugmentedData.mat_augmented_lagrange = &mat_augmented_lagrange;
       
-      MatCreateShell(mpiCommGlobal,
+      MatCreateShell(domainComm,
 		     primalDofMap.getRankDofs() + nRankEdges, 
 		     primalDofMap.getRankDofs() + nRankEdges, 
 		     primalDofMap.getOverallDofs() + nOverallEdges, 
@@ -1244,20 +1228,15 @@ namespace AMDiS {
     fetiData.subSolver = subdomain;
     fetiData.ksp_schur_primal = &ksp_schur_primal;
 
-    if (meshLevel == 0) {
-      localDofMap.createVec(fetiData.tmp_vec_b0, nGlobalOverallInterior);      
-    } else {
-      MSG("WARNING: MULTILEVEL SUPER SHIT, MAKE GENERAL!\n");
-      VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel - 1),
-		   localDofMap.getRankDofs(),
-		   nGlobalOverallInterior, &(fetiData.tmp_vec_b0));
-    }
+    VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel),
+		 localDofMap.getRankDofs(),
+		 nGlobalOverallInterior, &(fetiData.tmp_vec_b0));
 
     lagrangeMap.createVec(fetiData.tmp_vec_lagrange);
     primalDofMap.createVec(fetiData.tmp_vec_primal0);
 
     if (stokesMode == false) {      
-      MatCreateShell(mpiCommGlobal,
+      MatCreateShell(domainComm,
 		     lagrangeMap.getRankDofs(), 
 		     lagrangeMap.getRankDofs(),
 		     lagrangeMap.getOverallDofs(), 
@@ -1279,7 +1258,7 @@ namespace AMDiS {
       primalDofMap.createVec(fetiData.tmp_vec_primal1);
       interfaceDofMap.createVec(fetiData.tmp_vec_interface);
 
-      MatCreateShell(mpiCommGlobal,
+      MatCreateShell(domainComm,
 		     interfaceDofMap.getRankDofs() + lagrangeMap.getRankDofs(), 
 		     interfaceDofMap.getRankDofs() + lagrangeMap.getRankDofs(),
 		     interfaceDofMap.getOverallDofs() + lagrangeMap.getOverallDofs(), 
@@ -1289,7 +1268,7 @@ namespace AMDiS {
 			   (void(*)(void))petscMultMatFetiInterface);      
     }
 
-    KSPCreate(mpiCommGlobal, &ksp_feti);
+    KSPCreate(domainComm, &ksp_feti);
     KSPSetOperators(ksp_feti, mat_feti, mat_feti, SAME_NONZERO_PATTERN);
     KSPSetOptionsPrefix(ksp_feti, "feti_");
     KSPSetType(ksp_feti, KSPGMRES);
@@ -1316,7 +1295,7 @@ namespace AMDiS {
 
     switch (fetiPreconditioner) {
     case FETI_DIRICHLET:
-      TEST_EXIT(meshLevel == 0)
+      TEST_EXIT(subDomainIsLocal)
 	("Check for localDofMap.getLocalMatIndex, which should not work for multilevel FETI-DP!\n");
 
       TEST_EXIT(!stokesMode)
@@ -1354,7 +1333,7 @@ namespace AMDiS {
       MatGetVecs(mat_interior_interior, PETSC_NULL, 
 		 &(fetiDirichletPreconData.tmp_vec_interior));
 
-      TEST_EXIT_DBG(meshLevel == 0)
+      TEST_EXIT_DBG(subDomainIsLocal)
 	("Should not happen, check usage of localDofMap!\n");
 
       for (unsigned int component = 0; component < componentSpaces.size(); component++) {
@@ -1426,10 +1405,9 @@ namespace AMDiS {
 	  } else {
 	    massMapping = 
 	      new ParallelDofMapping(COMPONENT_WISE, true);
-	    massMapping->init(meshDistributor->getMeshLevelData(), 
-			      pressureFeSpace, pressureFeSpace);
-	    massMapping->setDofComm(meshDistributor->getDofComm());
-	    massMapping->setMpiComm(meshDistributor->getMeshLevelData().getMpiComm(0), 0);
+	    massMapping->init(pressureFeSpace, pressureFeSpace);
+	    massMapping->setDofComm(meshDistributor->getDofComm(meshLevel));
+	    massMapping->setMpiComm(meshDistributor->getMeshLevelData().getMpiComm(0));
 	  }	   
 	  (*massMapping)[0] = interfaceDofMap[pressureComponent];
 	  massMapping->update();
@@ -1443,9 +1421,7 @@ namespace AMDiS {
 	  if (!massMatrixSolver) {
 	    massMatrixSolver = new PetscSolverGlobalMatrix("");
 	    massMatrixSolver->setKspPrefix("mass_");
-	    massMatrixSolver->setMeshDistributor(meshDistributor,
-						 mpiCommGlobal,
-						 mpiCommLocal);
+	    massMatrixSolver->setMeshDistributor(meshDistributor, meshLevel);
 	    massMatrixSolver->setDofMapping(massMapping);
 	  }
 	   
@@ -1597,14 +1573,14 @@ namespace AMDiS {
     VecScale(vecArray[1], -1.0);
         
     Vec nullSpaceBasis;
-    VecCreateNest(mpiCommGlobal, 2, PETSC_NULL, vecArray, &nullSpaceBasis); 
+    VecCreateNest(domainComm, 2, PETSC_NULL, vecArray, &nullSpaceBasis); 
 
 #if (DEBUG != 0)
     PetscSolverFetiDebug::writeNullSpace(*this, nullSpaceBasis);
 #endif
 
     MatNullSpace matNullSpace;
-    MatNullSpaceCreate(mpiCommGlobal, PETSC_FALSE, 1, &nullSpaceBasis, 
+    MatNullSpaceCreate(domainComm, PETSC_FALSE, 1, &nullSpaceBasis, 
 		       &matNullSpace);
     MatSetNullSpace(mat_feti, matNullSpace);
     KSPSetNullSpace(ksp_feti, matNullSpace);
@@ -1682,17 +1658,18 @@ namespace AMDiS {
 
     int writeInteriorMatrix = -1;
     Parameters::get("parallel->debug->write interior matrix", 
-		    writeInteriorMatrix);
+    		    writeInteriorMatrix);
 
     if (writeInteriorMatrix >= 0 &&
-	writeInteriorMatrix == MPI::COMM_WORLD.Get_rank()) {
+    	writeInteriorMatrix == MPI::COMM_WORLD.Get_rank()) {
       PetscViewer petscView;
       PetscViewerBinaryOpen(PETSC_COMM_SELF, "interior.mat", 
-			    FILE_MODE_WRITE, &petscView);
+    			    FILE_MODE_WRITE, &petscView);
       MatView(subdomain->getMatInterior(), petscView);
       PetscViewerDestroy(&petscView);
     }
 
+
     bool checkInteriorMatrix = false;;
     Parameters::get("parallel->debug->check interior matrix",
 		    checkInteriorMatrix);
@@ -1826,7 +1803,7 @@ namespace AMDiS {
       
       for (DofMap::iterator it = localDofMap[component].getMap().begin();
 	   it != localDofMap[component].getMap().end(); ++it) {
-	if (meshLevel == 0) {
+	if (subDomainIsLocal) {
 	  int petscIndex = localDofMap.getLocalMatIndex(component, it->first);
 	  dofVec[it->first] = localSolB[petscIndex];
 	} else {
@@ -1935,8 +1912,6 @@ namespace AMDiS {
   
     subdomain->fillPetscMatrix(mat);
 
-    dbgMatrix(mat);
-
     if (printTimings) {
       MPI::COMM_WORLD.Barrier();
       MSG("FETI-DP timing 02: %.5f seconds (creation of interior matrices)\n",
@@ -1950,19 +1925,18 @@ namespace AMDiS {
       MPI::COMM_WORLD.Barrier();
       MSG("FETI-DP timing 04: %.5f seconds (factorization of subdomain matrices)\n",
 	  MPI::Wtime() - wtime);
-    }
+    }  
    
-    
     // === Create matrices for FETI-DP preconditioner. ===    
     createPreconditionerMatrix(mat);
-   
+
     // === Create and fill PETSc matrix for Lagrange constraints. ===
     createMatLagrange();
 
     // === ===
     createMatAugmentedLagrange();
 
-    // === Create PETSc solver for the Schur complement on primal variables. ===   
+    // === Create PETSc solver for the Schur complement on primal variables. === 
     createSchurPrimalKsp();
 
     // === Create PETSc solver for the FETI-DP operator. ===
@@ -2228,18 +2202,12 @@ namespace AMDiS {
     // === Some temporary vectors. ===
 
     Vec tmp_b0, tmp_b1;
-    if (meshLevel == 0) {
-      localDofMap.createVec(tmp_b0, nGlobalOverallInterior);
-      localDofMap.createVec(tmp_b1, nGlobalOverallInterior);
-    } else {
-      MSG("WARNING: MULTILEVEL SUPER SHIT, MAKE GENERAL!\n");
-      VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel - 1),
-		   localDofMap.getRankDofs(),
-		   nGlobalOverallInterior, &tmp_b0);
-      VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel - 1),
-		   localDofMap.getRankDofs(),
-		   nGlobalOverallInterior, &tmp_b1);
-    }
+    VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel),
+		 localDofMap.getRankDofs(),
+		 nGlobalOverallInterior, &tmp_b0);
+    VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel),
+		 localDofMap.getRankDofs(),
+		 nGlobalOverallInterior, &tmp_b1);
 
     Vec tmp_primal0, tmp_primal1;
     primalDofMap.createVec(tmp_primal0);
@@ -2268,10 +2236,10 @@ namespace AMDiS {
       interfaceDofMap.createVec(vecSolInterface);
 
       Vec vecRhsArray[2] = {vecRhsInterface, vecRhsLagrange}; 
-      VecCreateNest(mpiCommGlobal, 2, PETSC_NULL, vecRhsArray, &vecRhs);
+      VecCreateNest(domainComm, 2, PETSC_NULL, vecRhsArray, &vecRhs);
 
       Vec vecSolArray[2] = {vecSolInterface, vecSolLagrange}; 
-      VecCreateNest(mpiCommGlobal, 2, PETSC_NULL, vecSolArray, &vecSol);
+      VecCreateNest(domainComm, 2, PETSC_NULL, vecSolArray, &vecSol);
 
       VecAssemblyBegin(vecRhs);
       VecAssemblyEnd(vecRhs);
@@ -2409,7 +2377,7 @@ namespace AMDiS {
       MatMult(subdomain->getMatInteriorCoarse(1), vecSolInterface, tmp_b1);
       VecAXPY(tmp_b0, -1.0, tmp_b1);
     }
-  
+
     subdomain->solveGlobal(tmp_b0, tmp_b1);
     MatMult(subdomain->getMatCoarseInterior(), tmp_b1, tmp_primal0);
     VecAYPX(tmp_primal0, -1.0, subdomain->getVecRhsCoarse());
@@ -2449,14 +2417,12 @@ namespace AMDiS {
     VecAXPY(tmp_b0, -1.0, tmp_b1);
     subdomain->solveGlobal(tmp_b0, tmp_b0);
 
-
     // === And recover AMDiS solution vectors. ===
 
     recoverSolution(tmp_b0, tmp_primal0, vec);
     if (stokesMode)
       recoverInterfaceSolution(vecSolInterface, vec);
     
-    
     // === Print timings and delete data. ===
 
     if (printTimings) {
diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h
index 1d312e6c..a20a771b 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.h
+++ b/AMDiS/src/parallel/PetscSolverFeti.h
@@ -331,14 +331,15 @@ namespace AMDiS {
 
     KSP ksp_interior;
 
-    bool multiLevelTest;
+    int levelMode;
+
+    // If true, FETI-DP subdomains are on MPI::COMM_SELF
+    bool subDomainIsLocal;
 
     PetscSolver *subdomain;
 
     PetscSolver *massMatrixSolver;
 
-    int meshLevel;
-
     int rStartInterior;
 
     int nGlobalOverallInterior;
diff --git a/AMDiS/src/parallel/PetscSolverFetiDebug.cc b/AMDiS/src/parallel/PetscSolverFetiDebug.cc
index 04510815..ca9aebbe 100644
--- a/AMDiS/src/parallel/PetscSolverFetiDebug.cc
+++ b/AMDiS/src/parallel/PetscSolverFetiDebug.cc
@@ -112,7 +112,7 @@ namespace AMDiS {
 
     
     Vec nullSpaceBasis;
-    VecCreateNest(feti.mpiCommGlobal, 4, PETSC_NULL, vecArray, &nullSpaceBasis);
+    VecCreateNest(feti.domainComm, 4, PETSC_NULL, vecArray, &nullSpaceBasis);
 
     Vec vecSol;
     VecDuplicate(nullSpaceBasis, &vecSol);
@@ -219,7 +219,7 @@ namespace AMDiS {
       nestMat[15] = PETSC_NULL;
       
       Mat nestFetiMat;
-      MatCreateNest(feti.mpiCommGlobal, 4, PETSC_NULL, 4, PETSC_NULL,
+      MatCreateNest(feti.domainComm, 4, PETSC_NULL, 4, PETSC_NULL,
 		    &(nestMat[0]), &mat);
     } else {
       vector<Mat> nestMat;
@@ -236,7 +236,7 @@ namespace AMDiS {
       nestMat[8] = PETSC_NULL;
       
       Mat nestFetiMat;
-      MatCreateNest(feti.mpiCommGlobal, 3, PETSC_NULL, 3, PETSC_NULL,
+      MatCreateNest(feti.domainComm, 3, PETSC_NULL, 3, PETSC_NULL,
 		    &(nestMat[0]), &mat);
     }
   }
@@ -256,7 +256,7 @@ namespace AMDiS {
     if (feti.stokesMode == false) {     
       int nRankRows = lagrangeMap.getRankDofs();
       int nOverallRows = lagrangeMap.getOverallDofs();
-      MatCreateAIJ(feti.mpiCommGlobal, 
+      MatCreateAIJ(feti.domainComm, 
 		   nRankRows, nRankRows, nOverallRows, nOverallRows,
 		   nOverallRows, PETSC_NULL, nOverallRows, PETSC_NULL,
 		   &explicitMat);
@@ -305,7 +305,7 @@ namespace AMDiS {
       int nOverallRows = 
 	interfaceDofMap.getOverallDofs() + lagrangeMap.getOverallDofs();
 
-      MatCreateAIJ(feti.mpiCommGlobal, 
+      MatCreateAIJ(feti.domainComm, 
 		   nRankRows, nRankRows, nOverallRows, nOverallRows,
 		   nOverallRows, PETSC_NULL, nOverallRows, PETSC_NULL,
 		   &explicitMat);
@@ -318,8 +318,8 @@ namespace AMDiS {
       lagrangeMap.createVec(resultVector[1]);
 
       Vec unitNestVec, resultNestVec;
-      VecCreateNest(feti.mpiCommGlobal, 2, PETSC_NULL, unitVector, &unitNestVec);
-      VecCreateNest(feti.mpiCommGlobal, 2, PETSC_NULL, resultVector, &resultNestVec);
+      VecCreateNest(feti.domainComm, 2, PETSC_NULL, unitVector, &unitNestVec);
+      VecCreateNest(feti.domainComm, 2, PETSC_NULL, resultVector, &resultNestVec);
 
       PetscInt lowInterface, highInterface;
       VecGetOwnershipRange(unitVector[0], &lowInterface, &highInterface);
@@ -447,7 +447,7 @@ namespace AMDiS {
     VecGetOwnershipRange(v0, &rStart0, PETSC_NULL);
     VecGetOwnershipRange(v1, &rStart1, PETSC_NULL);
     
-    VecCreateMPI(feti.mpiCommGlobal, l0 + l1, s0 + s1, &explicitVec);
+    VecCreateMPI(feti.domainComm, l0 + l1, s0 + s1, &explicitVec);
 
     double *val;
     VecGetArray(v0, &val);
diff --git a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
index 77acf392..6e2019e3 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
@@ -57,7 +57,7 @@ namespace AMDiS {
 
     for (int i = 0; i < nBlocks; i++)
       for (int j = 0; j < nBlocks; j++)
-	MatCreateAIJ(mpiCommGlobal,
+	MatCreateAIJ(domainComm,
 		     nRankRows * blockSize[i], nRankRows * blockSize[j],
 		     nOverallRows * blockSize[i], nOverallRows * blockSize[j],
 		     30 * blockSize[i], PETSC_NULL, 
@@ -83,7 +83,7 @@ namespace AMDiS {
     }	  
 	
 
-    MatCreateNest(mpiCommGlobal, nBlocks, PETSC_NULL, nBlocks, PETSC_NULL,
+    MatCreateNest(domainComm, nBlocks, PETSC_NULL, nBlocks, PETSC_NULL,
 		  &(nestMat[0]), &getMatInterior());
 
 #if (DEBUG != 0)
@@ -119,7 +119,7 @@ namespace AMDiS {
     nestVec.resize(nComponents);
 
     for (int i = 0; i < nComponents; i++) {
-      VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &(nestVec[i]));
+      VecCreateMPI(domainComm, nRankRows, nOverallRows, &(nestVec[i]));
 
       setDofVector(nestVec[i], vec->getDOFVector(i));
       
@@ -127,7 +127,7 @@ namespace AMDiS {
       VecAssemblyEnd(nestVec[i]);
     }
 
-    VecCreateNest(mpiCommGlobal, nComponents, PETSC_NULL, 
+    VecCreateNest(domainComm, nComponents, PETSC_NULL, 
 		  &(nestVec[0]), &(getVecRhsInterior()));
 
     vecRhsAssembly();
@@ -138,7 +138,7 @@ namespace AMDiS {
   {
     FUNCNAME("PetscSolverGlobalBlockMatrix::initSolver()");
 
-    KSPCreate(mpiCommGlobal, &ksp);
+    KSPCreate(domainComm, &ksp);
     KSPSetOperators(ksp, getMatInterior(), getMatInterior(), 
 		    SAME_NONZERO_PATTERN); 
     KSPSetOptionsPrefix(ksp, kspPrefix.c_str());
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
index a5e3f7da..bda315c2 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
@@ -44,7 +44,7 @@ namespace AMDiS {
       fillPetscMatrixWithCoarseSpace(seqMat);
       return;
     }
-
+    
     // === Create PETSc vector (solution and a temporary vector). ===
 
 #if (DEBUG != 0)
@@ -127,6 +127,9 @@ namespace AMDiS {
     values.reserve(300);
     valuesOther.reserve(300);
 
+    bool localMatrix = 
+      (meshDistributor->getMeshLevelData().getMpiComm(meshLevel) == MPI::COMM_SELF);
+
     // === Traverse all sequentially created matrices and add the values to ===
     // === the global PETSc matrices.                                       ===
 
@@ -193,8 +196,8 @@ namespace AMDiS {
 
 	    int rowIndex = rowCoarseSpace->getMatIndex(rowComponent, *cursor);
 	    MatSetValues(getMatCoarseByComponent(rowComponent, colComponent),
-			 1, &rowIndex, cols.size(),
-			 &(cols[0]), &(values[0]), ADD_VALUES);
+	    		 1, &rowIndex, cols.size(),
+	    		 &(cols[0]), &(values[0]), ADD_VALUES);
 
 	    if (colsOther.size()) {
 	      for (unsigned int i = 0; i < colsOther.size(); i++)
@@ -211,12 +214,12 @@ namespace AMDiS {
 	      continue;
 
 	    int localRowIndex = 
-	      (subdomainLevel == 0 ? 
+	      (localMatrix ? 
 	       interiorMap->getLocalMatIndex(rowComponent, *cursor) :
 	       interiorMap->getMatIndex(rowComponent, *cursor));
 
 	    for (unsigned int i = 0; i < cols.size(); i++) {
-	      if (subdomainLevel == 0)
+	      if (localMatrix)
 		cols[i] = interiorMap->getLocalMatIndex(colComponent, cols[i]);
 	      else
 		cols[i] = interiorMap->getMatIndex(colComponent, cols[i]);
@@ -233,9 +236,9 @@ namespace AMDiS {
 		colsOther[i] = 
 		  colCoarseSpace->getMatIndex(colComponent, colsOther[i]);
 
-  	      MatSetValues(getMatInteriorCoarseByComponent(colComponent), 
-	       		   1, &globalRowIndex, colsOther.size(),
-  	       		   &(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
+	      MatSetValues(getMatInteriorCoarseByComponent(colComponent), 
+			   1, &globalRowIndex, colsOther.size(),
+			   &(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
 	    }
 	  }
 	} 
@@ -244,10 +247,9 @@ namespace AMDiS {
 
     matAssembly();
 
-
     // === Create solver for the non primal (thus local) variables. ===
 
-    KSPCreate(mpiCommLocal, &kspInterior);
+    KSPCreate(domainComm, &kspInterior);
     KSPSetOperators(kspInterior, getMatInterior(), getMatInterior(),
 		    SAME_NONZERO_PATTERN);
     KSPSetOptionsPrefix(kspInterior, "interior_");
@@ -258,7 +260,7 @@ namespace AMDiS {
       PCFactorSetMatSolverPackage(pcInterior, MATSOLVERMUMPS);
     } else {
       PCSetType(pcInterior, PCLU);
-      if (subdomainLevel == 0)
+      if (localMatrix)
 	PCFactorSetMatSolverPackage(pcInterior, MATSOLVERUMFPACK);
       else
 	PCFactorSetMatSolverPackage(pcInterior, MATSOLVERMUMPS);
@@ -346,7 +348,7 @@ namespace AMDiS {
 
 	VecNormalize(nullspaceBasis, PETSC_NULL);
 	
-	MatNullSpaceCreate(mpiCommGlobal, (hasConstantNullspace ? PETSC_TRUE : PETSC_FALSE), 
+	MatNullSpaceCreate(domainComm, (hasConstantNullspace ? PETSC_TRUE : PETSC_FALSE), 
 			   1, &nullspaceBasis, &matNullspace);
 
 	MatMult(getMatInterior(), nullspaceBasis, getVecSolInterior());
@@ -354,7 +356,7 @@ namespace AMDiS {
 	VecNorm(getVecSolInterior(), NORM_2, &n);
 	MSG("NORM IS: %e\n", n);
       } else {
-	MatNullSpaceCreate(mpiCommGlobal, PETSC_TRUE, 0, PETSC_NULL, &matNullspace);
+	MatNullSpaceCreate(domainComm, PETSC_TRUE, 0, PETSC_NULL, &matNullspace);
       }
 
       MSG("NULLSPACE IS NOT REMOVED!\n");
@@ -415,7 +417,7 @@ namespace AMDiS {
     double t0 = 0.0, t1 = 0.0;
 
     Vec tmp;
-    if (mpiCommLocal.Get_size() == 1)
+    if (domainComm.Get_size() == 1)
       interiorMap->createLocalVec(tmp);
     else
       interiorMap->createVec(tmp);
@@ -524,7 +526,7 @@ namespace AMDiS {
   {
     FUNCNAME("PetscSolverGlobalMatrix::initSolver()");
 
-    KSPCreate(mpiCommGlobal, &ksp);
+    KSPCreate(domainComm, &ksp);
     KSPSetOperators(ksp, getMatInterior(), getMatInterior(), 
 		    SAME_NONZERO_PATTERN); 
     KSPSetTolerances(ksp, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);
@@ -825,7 +827,7 @@ namespace AMDiS {
 
     PetscSolver* subSolver = new PetscSolverGlobalMatrix("");
     subSolver->setKspPrefix(kspPrefix);
-    subSolver->setMeshDistributor(meshDistributor);
+    subSolver->setMeshDistributor(meshDistributor, 0);
     subSolver->init(fe, fe);
 
     ParallelDofMapping &subDofMap = subSolver->getDofMap();
@@ -866,7 +868,7 @@ namespace AMDiS {
     
     
     MatNullSpace matNullSpace;
-    MatNullSpaceCreate(mpiCommGlobal, PETSC_FALSE, 1, &nullSpaceBasis, &matNullSpace);
+    MatNullSpaceCreate(domainComm, PETSC_FALSE, 1, &nullSpaceBasis, &matNullSpace);
     KSPSetNullSpace(ksp, matNullSpace);
     MatNullSpaceDestroy(&matNullSpace);
   }
@@ -877,7 +879,7 @@ namespace AMDiS {
     FUNCNAME("PetscSolverGlobalMatrix::setConstantNullSpace()");
 
     MatNullSpace matNullSpace;
-    MatNullSpaceCreate(mpiCommGlobal, PETSC_TRUE, 0, PETSC_NULL, &matNullSpace);
+    MatNullSpaceCreate(domainComm, PETSC_TRUE, 0, PETSC_NULL, &matNullSpace);
     KSPSetNullSpace(ksp, matNullSpace);
     MatNullSpaceDestroy(&matNullSpace);
   }
diff --git a/AMDiS/src/parallel/PetscSolverNavierStokes.cc b/AMDiS/src/parallel/PetscSolverNavierStokes.cc
index 38927ce7..e3a9aa11 100644
--- a/AMDiS/src/parallel/PetscSolverNavierStokes.cc
+++ b/AMDiS/src/parallel/PetscSolverNavierStokes.cc
@@ -104,7 +104,7 @@ namespace AMDiS {
     FUNCNAME("PetscSolverNavierStokes::initSolver()");
 
     // Create FGMRES based outer solver
-    KSPCreate(mpiCommGlobal, &ksp);
+    KSPCreate(domainComm, &ksp);
     KSPSetOperators(ksp, getMatInterior(), getMatInterior(), SAME_NONZERO_PATTERN);
     KSPMonitorSet(ksp, KSPMonitorTrueResidualNorm, PETSC_NULL, PETSC_NULL);
     petsc_helper::setSolver(ksp, "ns_", KSPFGMRES, PCNONE, 1e-6, 1e-8, 10000);
diff --git a/AMDiS/src/parallel/PetscSolverSchur.cc b/AMDiS/src/parallel/PetscSolverSchur.cc
index 1c0db4e6..260179b6 100644
--- a/AMDiS/src/parallel/PetscSolverSchur.cc
+++ b/AMDiS/src/parallel/PetscSolverSchur.cc
@@ -31,7 +31,7 @@ namespace AMDiS {
 
     boundaryDofs.clear();
     std::set<DegreeOfFreedom> boundaryLocalDofs;
-    for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm(0).getSendDofs(), feSpace);
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof()) {
 	boundaryLocalDofs.insert(it.getDofIndex());	  
@@ -40,7 +40,7 @@ namespace AMDiS {
 
       
     nBoundaryDofs = boundaryDofs.size();
-    mpi::getDofNumbering(mpiCommGlobal, nBoundaryDofs, 
+    mpi::getDofNumbering(domainComm, nBoundaryDofs, 
 			 rStartBoundaryDofs, nOverallBoundaryDofs);
 
 
@@ -55,11 +55,11 @@ namespace AMDiS {
       ("Should not happen!\n");
 
     int rStartEdgeDofs, nOverallEdgeDofs;
-    mpi::getDofNumbering(mpiCommGlobal, nEdgeDofs, 
+    mpi::getDofNumbering(domainComm, nEdgeDofs, 
 			 rStartEdgeDofs, nOverallEdgeDofs);
 
     int rStartVertexDofs, nOverallVertexDofs;
-    mpi::getDofNumbering(mpiCommGlobal, nVertexDofs, 
+    mpi::getDofNumbering(domainComm, nVertexDofs, 
 			 rStartVertexDofs, nOverallVertexDofs);
 
     TEST_EXIT_DBG(nOverallEdgeDofs + nOverallVertexDofs == nOverallBoundaryDofs)
@@ -93,7 +93,7 @@ namespace AMDiS {
 
 
     std::set<DegreeOfFreedom> otherBoundaryLocalDofs;
-    for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm(0).getRecvDofs(), feSpace);
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
 	otherBoundaryLocalDofs.insert(it.getDofIndex());
@@ -113,7 +113,7 @@ namespace AMDiS {
 #endif
       
     nInteriorDofs = interiorDofs.size();
-    mpi::getDofNumbering(mpiCommGlobal, nInteriorDofs, 
+    mpi::getDofNumbering(domainComm, nInteriorDofs, 
 			 rStartInteriorDofs, nOverallInteriorDofs);
 
     {
@@ -128,8 +128,8 @@ namespace AMDiS {
     TEST_EXIT_DBG(nInteriorDofs > 0)("Should not happen!\n");
 
 
-    StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiCommGlobal);
-    for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), feSpace);
+    StdMpi<vector<DegreeOfFreedom> > stdMpi(domainComm);
+    for (DofComm::Iterator it(meshDistributor->getDofComm(0).getSendDofs(), feSpace);
 	 !it.end(); it.nextRank()) {
       stdMpi.getSendData(it.getRank()).resize(0);
       stdMpi.getSendData(it.getRank()).reserve(it.getDofs().size());
@@ -146,13 +146,13 @@ namespace AMDiS {
 
     stdMpi.updateSendDataSize();
 
-    for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm(0).getRecvDofs(), feSpace);
 	 !it.end(); it.nextRank())
       stdMpi.recv(it.getRank());
 
     stdMpi.startCommunication();
 
-    for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm(0).getRecvDofs(), feSpace);
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof()) {
 	int globalRecvDof = (*interiorMap)[feSpace][it.getDofIndex()].global;
@@ -164,12 +164,12 @@ namespace AMDiS {
 
     // === Create PETSc IS structurs for interior and boundary DOFs. ===
 
-    ISCreateStride(mpiCommGlobal, 
+    ISCreateStride(domainComm, 
 		   nInteriorDofs * nComponents,
 		   (rStartInteriorDofs + rStartBoundaryDofs) * nComponents, 
 		   1, &interiorIs);
 
-    ISCreateStride(mpiCommGlobal,
+    ISCreateStride(domainComm,
 		   nBoundaryDofs * nComponents,
 		   (rStartInteriorDofs + rStartBoundaryDofs + nInteriorDofs) * nComponents, 
 		   1, &boundaryIs);
@@ -192,22 +192,22 @@ namespace AMDiS {
     int nOverallBoundaryRows = nOverallBoundaryDofs * nComponents;
 
 
-    MatCreateAIJ(mpiCommGlobal, 
+    MatCreateAIJ(domainComm, 
 		 nInteriorRows, nInteriorRows, 
 		 nOverallInteriorRows, nOverallInteriorRows,
 		 100, PETSC_NULL, 100, PETSC_NULL, &matA11);
 
-    MatCreateAIJ(mpiCommGlobal, 
+    MatCreateAIJ(domainComm, 
 		 nBoundaryRows, nBoundaryRows, 
 		 nOverallBoundaryRows, nOverallBoundaryRows,
 		 100, PETSC_NULL, 100, PETSC_NULL, &matA22);
 
-    MatCreateAIJ(mpiCommGlobal, 
+    MatCreateAIJ(domainComm, 
 		 nInteriorRows, nBoundaryRows, 
 		 nOverallInteriorRows, nOverallBoundaryRows,
 		 100, PETSC_NULL, 100, PETSC_NULL, &matA12);    
 
-    MatCreateAIJ(mpiCommGlobal, 
+    MatCreateAIJ(domainComm, 
 		 nBoundaryRows, nInteriorRows, 
 		 nOverallBoundaryRows, nOverallInteriorRows,
 		 100, PETSC_NULL, 100, PETSC_NULL, &matA21);
@@ -240,7 +240,7 @@ namespace AMDiS {
     tmpIS[0] = interiorIs;
     tmpIS[1] = boundaryIs;
 
-    MatCreateNest(mpiCommGlobal, 2, &tmpIS[0], 2, &tmpIS[0], &tmpMat[0][0], 
+    MatCreateNest(domainComm, 2, &tmpIS[0], 2, &tmpIS[0], &tmpMat[0][0], 
 		  &getMatInterior());
     MatNestSetVecType(getMatInterior(), VECNEST);
 
@@ -249,7 +249,7 @@ namespace AMDiS {
     int nRankRows = (*interiorMap)[feSpace].nRankDofs * nComponents;
     int nOverallRows = (*interiorMap)[feSpace].nOverallDofs * nComponents;
 
-    VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &petscSolVec);
+    VecCreateMPI(domainComm, nRankRows, nOverallRows, &petscSolVec);
   }
 
 
@@ -262,7 +262,7 @@ namespace AMDiS {
     int nRankRows = (*interiorMap)[feSpace].nRankDofs * nComponents;
     int nOverallRows = (*interiorMap)[feSpace].nOverallDofs * nComponents;
 
-    VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &(getVecRhsInterior()));
+    VecCreateMPI(domainComm, nRankRows, nOverallRows, &(getVecRhsInterior()));
 
     for (int i = 0; i < nComponents; i++)
       setDofVector(getVecRhsInterior(), vec->getDOFVector(i), nComponents, i);
@@ -279,7 +279,7 @@ namespace AMDiS {
     const FiniteElemSpace *feSpace = componentSpaces[0];
     int nComponents = vec.getSize();
 
-    KSPCreate(mpiCommGlobal, &kspInterior);
+    KSPCreate(domainComm, &kspInterior);
 
     KSPSetOperators(kspInterior, getMatInterior(), getMatInterior(), 
 		    SAME_NONZERO_PATTERN); 
diff --git a/AMDiS/src/parallel/StdMpi.cc b/AMDiS/src/parallel/StdMpi.cc
index 664dae74..2bec40b8 100644
--- a/AMDiS/src/parallel/StdMpi.cc
+++ b/AMDiS/src/parallel/StdMpi.cc
@@ -109,7 +109,8 @@ namespace AMDiS {
     return dataSize;
   }
 
-  void StdMpiHelper<vector<std::set<int> > >::createBuffer(vector<std::set<int> > &data, int *buf)
+  void StdMpiHelper<vector<std::set<int> > >::createBuffer(vector<std::set<int> > &data, 
+							   int *buf)
   {
     int counter = 1;
     buf[0] = data.size();
@@ -121,7 +122,9 @@ namespace AMDiS {
     }
   }
 
-  void StdMpiHelper<vector<std::set<int> > >::makeFromBuffer(vector<std::set<int> > &data, int *buf, int bufSize)
+  void StdMpiHelper<vector<std::set<int> > >::makeFromBuffer(vector<std::set<int> > &data, 
+							     int *buf, 
+							     int bufSize)
   {
     FUNCNAME("StdMpiHelper<vector<set<int> > >::makeFromBuffer()");
 
@@ -131,6 +134,9 @@ namespace AMDiS {
     for (int i = 0; i < buf[0]; i++) {
       data[i].clear();
       int setSize = buf[counter++];
+      TEST_EXIT_DBG(setSize <= bufSize - counter)
+	("Should not happen: %d %d %d\n", setSize, bufSize, counter);
+
       for (int j = 0; j < setSize; j++)
 	data[i].insert(buf[counter++]);
     }
@@ -148,7 +154,8 @@ namespace AMDiS {
     return data.size();
   }
 
-  void StdMpiHelper<vector<double> >::createBuffer(vector<double> &data, double *buf)
+  void StdMpiHelper<vector<double> >::createBuffer(vector<double> &data, 
+						   double *buf)
   {
     for (unsigned int i = 0; i < data.size(); i++)
       buf[i] = data[i];
-- 
GitLab