From 3e2d84dc832f6e3fd199393f3657a612349ec4cc Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Mon, 23 Apr 2012 11:16:51 +0000
Subject: [PATCH] Setup for multilevel parallel dof mapping.

---
 AMDiS/src/parallel/MeshDistributor.cc         |  22 +--
 AMDiS/src/parallel/MeshDistributor.h          |   7 -
 AMDiS/src/parallel/ParallelDebug.cc           |   8 +-
 AMDiS/src/parallel/ParallelDofMapping.cc      | 143 ++++++++++--------
 AMDiS/src/parallel/ParallelDofMapping.h       | 125 ++++++++-------
 AMDiS/src/parallel/ParallelTypes.h            |   3 -
 AMDiS/src/parallel/PetscSolverFeti.cc         | 100 ++++++------
 AMDiS/src/parallel/PetscSolverFeti.h          |   8 +-
 .../parallel/PetscSolverGlobalBlockMatrix.cc  |  16 +-
 AMDiS/src/parallel/PetscSolverGlobalMatrix.cc |  18 +--
 AMDiS/src/parallel/PetscSolverSchur.cc        |   8 +-
 AMDiS/src/parallel/SubDomainSolver.cc         |  33 ++--
 AMDiS/src/parallel/SubDomainSolver.h          |   8 +
 test/mpi/src/test0002.cc                      |  12 +-
 14 files changed, 275 insertions(+), 236 deletions(-)

diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index 35a0a8f0..630c3401 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -1200,7 +1200,7 @@ namespace AMDiS {
 
     // === Run mesh partitioner to calculate a new mesh partitioning.  ===
 
-    partitioner->setLocalGlobalDofMap(&(dofMap[feSpaces[0]].getMap()));
+    partitioner->setLocalGlobalDofMap(&(dofMap[feSpaces[0]].getMap(0)));
 
     bool partitioningSucceed = 
       partitioner->partition(elemWeights, ADAPTIVE_REPART);
@@ -1979,9 +1979,9 @@ namespace AMDiS {
     MSG("------------- Debug information -------------\n");
     for (unsigned int i = 0; i < feSpaces.size(); i++) {
       MSG("FE space %d:\n", i);
-      MSG("   nRankDofs = %d\n", dofMap[feSpaces[i]].nRankDofs);
-      MSG("   nOverallDofs = %d\n", dofMap[feSpaces[i]].nOverallDofs);
-      MSG("   rStartDofs = %d\n", dofMap[feSpaces[i]].rStartDofs);
+      MSG("   nRankDofs = %d\n", dofMap[feSpaces[i]].nRankDofs[0]);
+      MSG("   nOverallDofs = %d\n", dofMap[feSpaces[i]].nOverallDofs[0]);
+      MSG("   rStartDofs = %d\n", dofMap[feSpaces[i]].rStartDofs[0]);
     }
 
     stringstream oss;
@@ -2259,20 +2259,6 @@ namespace AMDiS {
   }
 
 
-  DegreeOfFreedom MeshDistributor::mapGlobalToLocal(const FiniteElemSpace *feSpace,
-						    DegreeOfFreedom dof)
-  {
-    FUNCNAME("MeshDistributor::mapGlobalToLocal()");
-
-    for (DofMap::iterator it = dofMap[feSpace].getMap().begin();
-	 it != dofMap[feSpace].getMap().end(); ++it) 
-      if (it->second.global == dof)
-	return it->first;
-
-    return -1;
-  }
-
-
   void MeshDistributor::serialize(ostream &out)
   {
     FUNCNAME("MeshDistributor::serialize()");
diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h
index cf80fa64..f1b0c2bf 100644
--- a/AMDiS/src/parallel/MeshDistributor.h
+++ b/AMDiS/src/parallel/MeshDistributor.h
@@ -157,13 +157,6 @@ namespace AMDiS {
       return periodicMap;
     }
 
-    /// Returns for a global index the DOF index in rank's subdomain. As there
-    /// is no direct data structure that stores this information, we have to
-    /// search for it in \ref dofMap. This is not very efficient and this 
-    /// function should thus be used for debugging only.
-    DegreeOfFreedom mapGlobalToLocal(const FiniteElemSpace *feSpace,
-				     DegreeOfFreedom dof);
-
     DofComm& getSendDofs()
     {
       return sendDofs;
diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc
index 174d5566..d9a63eed 100644
--- a/AMDiS/src/parallel/ParallelDebug.cc
+++ b/AMDiS/src/parallel/ParallelDebug.cc
@@ -634,7 +634,12 @@ namespace AMDiS {
 
 
   void ParallelDebug::printMapLocalGlobal(MeshDistributor &pdb, int rank)
-  {    
+  {   
+    FUNCNAME("ParallelDebug::printMapLocalGlobal()");
+
+    ERROR_EXIT("Rewrite this function!\n");
+
+#if 0
     if (rank == -1 || pdb.mpiRank == rank) {
       const FiniteElemSpace *feSpace = pdb.feSpaces[0];
 
@@ -662,6 +667,7 @@ namespace AMDiS {
 	cout << "------" << endl;
       }
     }
+#endif
   }
 
 
diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc
index fc651b0f..35a81ec4 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.cc
+++ b/AMDiS/src/parallel/ParallelDofMapping.cc
@@ -20,47 +20,61 @@ namespace AMDiS {
   void FeSpaceDofMap::clear()
   {
     dofMap.clear();
+    dofMap.resize(nLevel);
+
     nonRankDofs.clear();
-    nRankDofs = 0;
-    nLocalDofs = 0;
-    nOverallDofs = 0;
-    rStartDofs = 0;
+    nonRankDofs.resize(nLevel);
+
+    for (int i = 0; i < nLevel; i++) {
+      nRankDofs[i] = 0;
+      nLocalDofs[i] = 0;
+      nOverallDofs[i] = 0;
+      rStartDofs[i] = 0;
+    }
   }
 
 
   void FeSpaceDofMap::update()
   {
-    // === Compute local indices for all rank owned DOFs. ===
+    FUNCNAME("FeSpaceDofMap::update()");
 
-    for (DofMap::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
-      if (it->second.local == -1 && nonRankDofs.count(it->first) == 0)
-	it->second.local = nRankDofs++;
-        
-    // === Compute number of local and global DOFs in the mapping. ===
-
-    nOverallDofs = 0;
-    rStartDofs = 0;
-    mpi::getDofNumbering(mpiComm, nRankDofs, rStartDofs, nOverallDofs);
-    
-    // === If required, compute also the global indices. ===
+    for (int i = 0; i < nLevel; i++) {
 
-    if (needGlobalMapping) {
-      computeGlobalMapping();
+      // === Compute local indices for all rank owned DOFs. ===
       
-      if (hasNonLocalDofs)
-	computeNonLocalIndices();
+      for (DofMap::iterator it = dofMap[i].begin(); it != dofMap[i].end(); ++it)
+	if (it->second.local == -1 && nonRankDofs[i].count(it->first) == 0)
+	  it->second.local = nRankDofs[i]++;
+      
+      // === Compute number of local and global DOFs in the mapping. ===
+      
+      nOverallDofs[i] = 0;
+      rStartDofs[i] = 0;
+      mpi::getDofNumbering(mpiComm, nRankDofs[i], rStartDofs[i], nOverallDofs[i]);
+      
+      // === If required, compute also the global indices. ===
+      
+      if (needGlobalMapping) {
+	computeGlobalMapping(i);
+	
+	if (hasNonLocalDofs)
+	  computeNonLocalIndices(i);
+      }
     }
   }
 
 
-  void FeSpaceDofMap::computeGlobalMapping()
+  void FeSpaceDofMap::computeGlobalMapping(int level)
   {
-    for (DofMap::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
-      it->second.global = it->second.local + rStartDofs;
+    FUNCNAME("FeSpaceDofMap::computeGlobalMapping()");
+
+    for (DofMap::iterator it = dofMap[level].begin(); 
+	 it != dofMap[level].end(); ++it)
+      it->second.global = it->second.local + rStartDofs[level];
   }
 
 
-  void FeSpaceDofMap::computeNonLocalIndices()
+  void FeSpaceDofMap::computeNonLocalIndices(int level)
   {
     FUNCNAME("FeSpaceDofMap::computeNonLocalIndices()");
 
@@ -70,20 +84,22 @@ namespace AMDiS {
     // === other ranks that also include this DOF.                           ===
 
     StdMpi<vector<int> > stdMpi(mpiComm);
-    for (DofComm::Iterator it(*sendDofs, feSpace); !it.end(); it.nextRank())
+    for (DofComm::Iterator it(*sendDofs, level, feSpace); 
+	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
-	if (dofMap.count(it.getDofIndex()) && !nonRankDofs.count(it.getDofIndex()))
-	  stdMpi.getSendData(it.getRank()).push_back(dofMap[it.getDofIndex()].global);
+	if (dofMap[level].count(it.getDofIndex()) && !nonRankDofs[level].count(it.getDofIndex()))
+	  stdMpi.getSendData(it.getRank()).push_back(dofMap[level][it.getDofIndex()].global);
 
     stdMpi.updateSendDataSize();
 
 
     // === Check from which ranks this rank must receive some data. ===
 
-    for (DofComm::Iterator it(*recvDofs, feSpace); !it.end(); it.nextRank()) {
+    for (DofComm::Iterator it(*recvDofs, level, feSpace); 
+	 !it.end(); it.nextRank()) {
       bool recvFromRank = false;
       for (; !it.endDofIter(); it.nextDof()) {
-	if (nonRankDofs.count(it.getDofIndex())) {
+	if (nonRankDofs[level].count(it.getDofIndex())) {
 	  recvFromRank = true;
 	  break;
 	}
@@ -101,12 +117,12 @@ namespace AMDiS {
 
     // === And set the global indices for all DOFs that are not owned by rank. ===
     
-    for (DofComm::Iterator it(*recvDofs, feSpace);
+    for (DofComm::Iterator it(*recvDofs, level, feSpace);
 	 !it.end(); it.nextRank()) {
       int i = 0;
       for (; !it.endDofIter(); it.nextDof())
-	if (nonRankDofs.count(it.getDofIndex()))
-	  dofMap[it.getDofIndex()].global = stdMpi.getRecvData(it.getRank())[i++];
+	if (nonRankDofs[level].count(it.getDofIndex()))
+	  dofMap[level][it.getDofIndex()].global = stdMpi.getRecvData(it.getRank())[i++];
     }
   }
 
@@ -144,10 +160,12 @@ namespace AMDiS {
 	 it != feSpacesUnique.end(); ++it)
       data[*it].clear();
 
-    nRankDofs = -1;
-    nLocalDofs = -1;
-    nOverallDofs = -1;
-    rStartDofs = -1;
+    for (int i = 0; i < nLevel; i++) {
+      nRankDofs[i] = -1;
+      nLocalDofs[i] = -1;
+      nOverallDofs[i] = -1;
+      rStartDofs[i] = -1;
+    }
 
     dofToMatIndex.clear();
   }
@@ -180,56 +198,56 @@ namespace AMDiS {
   }    
 
 
-  int ParallelDofMapping::computeRankDofs()
+  int ParallelDofMapping::computeRankDofs(int level)
   {
     FUNCNAME("ParallelDofMapping::computeRankDofs()");
     
     int result = 0;
     for (unsigned int i = 0; i < feSpaces.size(); i++) {
       TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
-      result += data[feSpaces[i]].nRankDofs;
+      result += data[feSpaces[i]].nRankDofs[level];
     }
     
     return result;
   }
 
 
-  int ParallelDofMapping::computeLocalDofs()
+  int ParallelDofMapping::computeLocalDofs(int level)
   {
     FUNCNAME("ParallelDofMapping::computeLocalDofs()");
     
     int result = 0;
     for (unsigned int i = 0; i < feSpaces.size(); i++) {
       TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
-      result += data[feSpaces[i]].nLocalDofs;
+      result += data[feSpaces[i]].nLocalDofs[level];
     }
     
     return result;
   }
 
 
-  int ParallelDofMapping::computeOverallDofs()
+  int ParallelDofMapping::computeOverallDofs(int level)
   {
     FUNCNAME("ParallelDofMapping::computeOverallDofs()");
 
     int result = 0;
     for (unsigned int i = 0; i < feSpaces.size(); i++) {
       TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
-      result += data.find(feSpaces[i])->second.nOverallDofs;
+      result += data.find(feSpaces[i])->second.nOverallDofs[level];
     }
     
     return result;
   }
 
 
-  int ParallelDofMapping::computeStartDofs()
+  int ParallelDofMapping::computeStartDofs(int level)
   {
     FUNCNAME("ParallelDofMapping::computeStartDofs()");
 
     int result = 0;
     for (unsigned int i = 0; i < feSpaces.size(); i++) {
       TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
-      result += data.find(feSpaces[i])->second.rStartDofs;
+      result += data.find(feSpaces[i])->second.rStartDofs[level];
     }
     
     return result;
@@ -245,15 +263,18 @@ namespace AMDiS {
 	 it != feSpacesUnique.end(); ++it)
       data[*it].update();
     
-    // Compute all numbers from this mappings.
-    nRankDofs = computeRankDofs();
-    nLocalDofs = computeLocalDofs();
-    nOverallDofs = computeOverallDofs();
-    rStartDofs = computeStartDofs();
-    
-    // And finally, compute the matrix indices.
-    if (needMatIndex)
-      computeMatIndex(needMatIndexFromGlobal);
+    for (int i = 0; i < nLevel; i++) {
+      // Compute all numbers from this mappings.
+
+      nRankDofs[i] = computeRankDofs(i);
+      nLocalDofs[i] = computeLocalDofs(i);
+      nOverallDofs[i] = computeOverallDofs(i);
+      rStartDofs[i] = computeStartDofs(i);
+      
+      // And finally, compute the matrix indices.
+      if (needMatIndex)
+	computeMatIndex(needMatIndexFromGlobal, i);
+    }
   }
   
 
@@ -272,16 +293,18 @@ namespace AMDiS {
   }
 
   
-  void ParallelDofMapping::computeMatIndex(bool globalIndex)
+  void ParallelDofMapping::computeMatIndex(bool globalIndex, int level)
   {
     FUNCNAME("ParallelDofMapping::computeMatIndex()");
 
+    TEST_EXIT(level == 0)("Not yet implemented for non-zero level!\n");
+
     dofToMatIndex.clear();
     
     // The offset is always added to the local matrix index. The offset for the
     // DOFs in the first FE spaces is the smalled global index of a DOF that is
     // owned by the rank.
-    int offset = rStartDofs;
+    int offset = rStartDofs[level];
     
 
     // === Create the matrix indices for all component FE spaces. ===
@@ -290,7 +313,7 @@ namespace AMDiS {
 
       // Traverse all DOFs of the FE space and create for all rank owned DOFs
       // a matrix index.
-      DofMap& dofMap = data[feSpaces[i]].getMap();
+      DofMap& dofMap = data[feSpaces[i]].getMap(level);
       for (DofMap::iterator it = dofMap.begin(); it != dofMap.end(); ++it) {
 	if (data[feSpaces[i]].isRankDof(it->first)) {
 	  int globalMatIndex = it->second.local + offset;
@@ -303,7 +326,7 @@ namespace AMDiS {
       
       // Increase the offset for the next FE space by the number of DOFs owned 
       // by the rank in the current FE space.
-      offset += data[feSpaces[i]].nRankDofs;
+      offset += data[feSpaces[i]].nRankDofs[level];
 	
       // If there are no non local DOFs, continue with the next FE space.
       if (!hasNonLocalDofs)
@@ -316,7 +339,7 @@ namespace AMDiS {
       // === interior boundaries.                                         ===
 
       StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm);
-      for (DofComm::Iterator it(*sendDofs, feSpaces[i]); 
+      for (DofComm::Iterator it(*sendDofs, level, feSpaces[i]); 
 	   !it.end(); it.nextRank()) {
 	vector<DegreeOfFreedom> sendGlobalDofs;
 	
@@ -330,14 +353,14 @@ namespace AMDiS {
 	stdMpi.send(it.getRank(), sendGlobalDofs);
       }
       
-      for (DofComm::Iterator it(*recvDofs, feSpaces[i]); 
+      for (DofComm::Iterator it(*recvDofs, level, feSpaces[i]); 
 	   !it.end(); it.nextRank())
 	stdMpi.recv(it.getRank());
       
       stdMpi.startCommunication();
       
       {
-	for (DofComm::Iterator it(*recvDofs, feSpaces[i]); 
+	for (DofComm::Iterator it(*recvDofs, level, feSpaces[i]); 
 	     !it.end(); it.nextRank()) {
 	  int counter = 0;
 	  for (; !it.endDofIter(); it.nextDof()) {
diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h
index 2a1ef9e0..e43a8356 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.h
+++ b/AMDiS/src/parallel/ParallelDofMapping.h
@@ -107,13 +107,17 @@ namespace AMDiS {
 	sendDofs(NULL),
 	recvDofs(NULL),
 	feSpace(NULL),
+	nLevel(1),
+	dofMap(1),
 	needGlobalMapping(false),
 	hasNonLocalDofs(false),
-	nRankDofs(0),
-	nLocalDofs(0),
-	nOverallDofs(0),
-	rStartDofs(0)
-    {}
+	nRankDofs(1),
+	nLocalDofs(1),
+	nOverallDofs(1),
+	rStartDofs(1)
+    {
+      clear();
+    }
 
     /// Clears all data of the mapping.
     void clear();
@@ -122,9 +126,9 @@ namespace AMDiS {
     /// global index must not be set.
     MultiIndex& operator[](DegreeOfFreedom d)
     {
-      TEST_EXIT_DBG(dofMap.count(d))("Should not happen!\n");
+      TEST_EXIT_DBG(dofMap[0].count(d))("Should not happen!\n");
 
-      return dofMap[d];
+      return dofMap[0][d];
     }
     
     /// Inserts a new DOF to rank's mapping. The DOF is assumed to be owend by
@@ -133,12 +137,12 @@ namespace AMDiS {
     {
       FUNCNAME("FeSpaceDofMap::insertRankDof()");
       
-      TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n");
+      TEST_EXIT_DBG(dofMap[0].count(dof0) == 0)("Should not happen!\n");
       
-      dofMap[dof0].local = dof1;
-      nLocalDofs++;
+      dofMap[0][dof0].local = dof1;
+      nLocalDofs[0]++;
       if (dof1 != -1)
-	nRankDofs++;
+	nRankDofs[0]++;
     }
     
     /// Inserts a new DOF to rank's mapping. The DOF exists in rank's subdomain
@@ -147,17 +151,17 @@ namespace AMDiS {
     {
       FUNCNAME("FeSpaceDofMap::insert()");
       
-      TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n");
+      TEST_EXIT_DBG(dofMap[0].count(dof0) == 0)("Should not happen!\n");
       
-      dofMap[dof0].local = dof1;
-      nLocalDofs++;
-      nonRankDofs.insert(dof0);
+      dofMap[0][dof0].local = dof1;
+      nLocalDofs[0]++;
+      nonRankDofs[0].insert(dof0);
     }
     
     /// Checks if a given DOF is in the DOF mapping.
-    bool isSet(DegreeOfFreedom dof)
+    bool isSet(DegreeOfFreedom dof, int level = 0)
     {
-      return static_cast<bool>(dofMap.count(dof));
+      return static_cast<bool>(dofMap[level].count(dof));
     }
 
     /// Checks if a given DOF is a rank owned DOF of the DOF mapping. The DOF must
@@ -165,19 +169,19 @@ namespace AMDiS {
     /// meaningsless.
     bool isRankDof(DegreeOfFreedom dof)
     {
-      return !(static_cast<bool>(nonRankDofs.count(dof)));
+      return !(static_cast<bool>(nonRankDofs[0].count(dof)));
     }
     
     /// Returns number of DOFs in the mapping.
     unsigned int size()
     {
-      return dofMap.size();
+      return dofMap[0].size();
     }
     
     /// Returns the raw data of the mapping.
-    DofMap& getMap()
+    DofMap& getMap(int level)
     {
-      return dofMap;
+      return dofMap[level];
     }
 
     /// Recomputes the mapping.
@@ -211,11 +215,11 @@ namespace AMDiS {
 
   private:
     /// Computes a global mapping from the local one.
-    void computeGlobalMapping();
+    void computeGlobalMapping(int level);
 
     /// Computes the global indices of all DOFs in the mapping that are not owned
     /// by the rank.
-    void computeNonLocalIndices();
+    void computeNonLocalIndices(int level);
 
   private:
     /// MPI communicator object;
@@ -228,11 +232,14 @@ namespace AMDiS {
     /// correct DOF communicator in \ref sendDofs and \ref recvDofs.
     const FiniteElemSpace *feSpace;
 
+    /// Number of mesh levels.
+    int nLevel;
+
     /// Mapping data from DOF indices to local and global indices.
-    DofMap dofMap;
+    vector<DofMap> dofMap;
 
     /// Set of all DOFs that are in mapping but are not owned by the rank.
-    std::set<DegreeOfFreedom> nonRankDofs;
+    vector<std::set<DegreeOfFreedom> > nonRankDofs;
 
     /// If true, a global index mapping will be computed for all DOFs.
     bool needGlobalMapping;
@@ -241,9 +248,10 @@ namespace AMDiS {
     /// by the rank. If the value is false, each rank contains only DOFs that
     /// are also owned by this rank.
     bool hasNonLocalDofs;
+
   public:
     /// 
-    int nRankDofs, nLocalDofs, nOverallDofs, rStartDofs;
+    vector<int> nRankDofs, nLocalDofs, nOverallDofs, rStartDofs;
   };
   
   
@@ -256,18 +264,23 @@ namespace AMDiS {
   {
   public:
     ParallelDofMapping() 
-      : mpiComm(NULL),
-	sendDofs(NULL),
+      : sendDofs(NULL),
 	recvDofs(NULL),
 	hasNonLocalDofs(false),
 	needMatIndex(false),
 	needMatIndexFromGlobal(false),
+	nLevel(1),
 	feSpaces(0),
-	nRankDofs(-1),
-	nLocalDofs(-1),
-	nOverallDofs(-1),
-	rStartDofs(-1)
-    {} 
+	nRankDofs(1),
+	nLocalDofs(1),
+	nOverallDofs(1),
+	rStartDofs(1)
+    {
+      nRankDofs[0] = -1;
+      nLocalDofs[0] = -1;
+      nOverallDofs[0] = -1;
+      rStartDofs[0] = -1;
+    } 
 
     /** \brief Initialize the parallel DOF mapping.
      *
@@ -307,36 +320,36 @@ namespace AMDiS {
     }
 
     /// Returns \ref nRankDofs, thus the number of DOFs owned by the rank.
-    inline int getRankDofs()
+    inline int getRankDofs(int level)
     {
-      TEST_EXIT_DBG(nRankDofs >= 0)("Should not happen!\n");
+      TEST_EXIT_DBG(nRankDofs[level] >= 0)("Should not happen!\n");
 
-      return nRankDofs;
+      return nRankDofs[level];
     }
 
     /// Returns \ref nLocalDofs, thus the number of DOFs in ranks subdomain.
-    inline int getLocalDofs()
+    inline int getLocalDofs(int level)
     {
-      TEST_EXIT_DBG(nLocalDofs >= 0)("Should not happen!\n");
+      TEST_EXIT_DBG(nLocalDofs[level] >= 0)("Should not happen!\n");
 
-      return nLocalDofs;
+      return nLocalDofs[level];
     }
 
     /// Returns \ref nOverallDofs, thus the number of all DOFs in this mapping.
-    inline int getOverallDofs()
+    inline int getOverallDofs(int level)
     {
-      TEST_EXIT_DBG(nOverallDofs >= 0)("Should not happen!\n");
+      TEST_EXIT_DBG(nOverallDofs[level] >= 0)("Should not happen!\n");
 
-      return nOverallDofs;
+      return nOverallDofs[level];
     }
 
     /// Returns \ref rStartDofs, thus the smallest global index of a DOF that is
     /// owned by the rank.
-    inline int getStartDofs()
+    inline int getStartDofs(int level)
     {
-      TEST_EXIT_DBG(rStartDofs >= 0)("Should not happen!\n");
+      TEST_EXIT_DBG(rStartDofs[level] >= 0)("Should not happen!\n");
 
-      return rStartDofs;
+      return rStartDofs[level];
     }
 
     /// Update the mapping.
@@ -361,7 +374,7 @@ namespace AMDiS {
       TEST_EXIT_DBG(data[feSpaces[ithComponent]].isRankDof(d))
 	("Should not happen!\n");
 
-      return dofToMatIndex.get(ithComponent, d) - rStartDofs;
+      return dofToMatIndex.get(ithComponent, d) - rStartDofs[0];
     }
 
     // Writes all data of this object to an output stream.
@@ -383,23 +396,23 @@ namespace AMDiS {
     }
 
     /// Compute local and global matrix indices.
-    void computeMatIndex(bool globalIndex);
+    void computeMatIndex(bool globalIndex, int level);
 
   protected:
     /// Insert a new FE space DOF mapping for a given FE space.
     void addFeSpace(const FiniteElemSpace* feSpace);
 
     /// Compute \ref nRankDofs.
-    int computeRankDofs();
+    int computeRankDofs(int level);
 
     /// Compute \ref nLocalDofs.
-    int computeLocalDofs();
+    int computeLocalDofs(int level);
 
     /// Compute \ref nOverallDofs.
-    int computeOverallDofs();
+    int computeOverallDofs(int level);
 
     /// Compute \ref rStartDofs.
-    int computeStartDofs();
+    int computeStartDofs(int level);
 
   private:
     /// MPI communicator object;
@@ -423,6 +436,8 @@ namespace AMDiS {
     /// on global ones, otherwise on local DOF indices.
     bool needMatIndexFromGlobal;
 
+    int nLevel;
+
     /// Maps from FE space pointers to DOF mappings.
     map<const FiniteElemSpace*, FeSpaceDofMap> data;
 
@@ -434,16 +449,16 @@ namespace AMDiS {
     vector<const FiniteElemSpace*> feSpacesUnique;
 
     /// Number of DOFs owned by rank.
-    int nRankDofs;
+    vector<int> nRankDofs;
 
     /// Number of DOFs in rank's subdomain.
-    int nLocalDofs;
+    vector<int> nLocalDofs;
 
     /// Number of global DOFs (this value is thus the same on all ranks).
-    int nOverallDofs;
+    vector<int> nOverallDofs;
 
     /// Smallest global index of a DOF owned by the rank.
-    int rStartDofs;
+    vector<int> rStartDofs;
 
     /// Mapping from global DOF indices to global matrix indices under 
     /// consideration of possibly multiple components.
diff --git a/AMDiS/src/parallel/ParallelTypes.h b/AMDiS/src/parallel/ParallelTypes.h
index 2d170bc9..2e580a5b 100644
--- a/AMDiS/src/parallel/ParallelTypes.h
+++ b/AMDiS/src/parallel/ParallelTypes.h
@@ -48,9 +48,6 @@ namespace AMDiS {
   /// Defines a mapping type from rank numbers to sets of DOFs.
   typedef map<int, DofContainer> RankToDofContainer;
   
-  /// Defines a mapping type from DOF indices to DOF indices.
-  //typedef map<DegreeOfFreedom, DegreeOfFreedom> DofMap;
-  
   /// Defines a mapping type from DOFs to boolean values.
   typedef map<const DegreeOfFreedom*, bool> DofToBool;
   
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 8affcba7..10416ebb 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -239,6 +239,7 @@ namespace AMDiS {
 	  new SubDomainSolver(meshDistributor, 
 			      levelData.getMpiComm(meshLevel - 1),
 			      levelData.getMpiComm(meshLevel));
+	subDomainSolver->setLevel(meshLevel);
       }
     }
 
@@ -306,13 +307,13 @@ namespace AMDiS {
       MSG("FETI-DP data for %d-ith FE space:\n", i);
 
       MSG("  nRankPrimals = %d   nOverallPrimals = %d\n", 
-	  primalDofMap[feSpace].nRankDofs, primalDofMap[feSpace].nOverallDofs);
+	  primalDofMap[feSpace].nRankDofs[0], primalDofMap[feSpace].nOverallDofs[0]);
       
       MSG("  nRankDuals = %d  nOverallDuals = %d\n",
-	  dualDofMap[feSpace].nRankDofs, dualDofMap[feSpace].nOverallDofs);
+	  dualDofMap[feSpace].nRankDofs[0], dualDofMap[feSpace].nOverallDofs[0]);
 
       MSG("  nRankLagrange = %d  nOverallLagrange = %d\n",
-	  lagrangeMap[feSpace].nRankDofs, lagrangeMap[feSpace].nOverallDofs);
+	  lagrangeMap[feSpace].nRankDofs[0], lagrangeMap[feSpace].nOverallDofs[0]);
 
       TEST_EXIT_DBG(localDofMap[feSpace].size() + primalDofMap[feSpace].size() == 
 		    static_cast<unsigned int>(feSpace->getAdmin()->getUsedDofs()))
@@ -430,7 +431,7 @@ namespace AMDiS {
     // === appropriate number of Lagrange constraints.                      ===
 
     int nRankLagrange = 0;
-    DofMap& dualMap = dualDofMap[feSpace].getMap();
+    DofMap& dualMap = dualDofMap[feSpace].getMap(0);
     for (DofMap::iterator it = dualMap.begin(); it != dualMap.end(); ++it) {
 
       if (meshDistributor->getDofMap()[feSpace].isRankDof(it->first)) {
@@ -441,7 +442,7 @@ namespace AMDiS {
 	lagrangeMap[feSpace].insert(it->first);
       }
     }
-    lagrangeMap[feSpace].nRankDofs = nRankLagrange;
+    lagrangeMap[feSpace].nRankDofs[0] = nRankLagrange;
   }
 
 
@@ -471,8 +472,8 @@ namespace AMDiS {
     
     // === And finally, add the global indicies of all dual nodes. ===
 
-    for (DofMap::iterator it = dualDofMap[feSpace].getMap().begin();
-	 it != dualDofMap[feSpace].getMap().end(); ++it)
+    for (DofMap::iterator it = dualDofMap[feSpace].getMap(0).begin();
+	 it != dualDofMap[feSpace].getMap(0).end(); ++it)
       localDofMap[feSpace].insertRankDof(it->first);
   }
 
@@ -484,8 +485,8 @@ namespace AMDiS {
     // === Create distributed matrix for Lagrange constraints. ===
 
     MatCreateMPIAIJ(mpiComm,
-		    lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
-		    lagrangeMap.getOverallDofs(), localDofMap.getOverallDofs(),	
+		    lagrangeMap.getRankDofs(0), localDofMap.getRankDofs(0),
+		    lagrangeMap.getOverallDofs(0), localDofMap.getOverallDofs(0),
 		    2, PETSC_NULL, 2, PETSC_NULL,
 		    &mat_lagrange);
 
@@ -497,7 +498,7 @@ namespace AMDiS {
     // === constraint.                                                     ===
 
     for (unsigned int k = 0; k < feSpaces.size(); k++) {
-      DofMap &dualMap = dualDofMap[feSpaces[k]].getMap();
+      DofMap &dualMap = dualDofMap[feSpaces[k]].getMap(0);
 
       for (DofMap::iterator it = dualMap.begin(); it != dualMap.end(); ++it) {
 	TEST_EXIT_DBG(boundaryDofRanks[feSpaces[k]].count(it->first))
@@ -540,15 +541,15 @@ namespace AMDiS {
       schurPrimalData.subSolver = subDomainSolver;
       
       VecCreateMPI(mpiComm, 
-		   localDofMap.getRankDofs(), localDofMap.getOverallDofs(),
+		   localDofMap.getRankDofs(0), localDofMap.getOverallDofs(0),
 		   &(schurPrimalData.tmp_vec_b));
       VecCreateMPI(mpiComm, 
-		   primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
+		   primalDofMap.getRankDofs(0), primalDofMap.getOverallDofs(0),
 		   &(schurPrimalData.tmp_vec_primal));
 
       MatCreateShell(mpiComm,
-		     primalDofMap.getRankDofs(), primalDofMap.getRankDofs(), 
-		     primalDofMap.getOverallDofs(), primalDofMap.getOverallDofs(),
+		     primalDofMap.getRankDofs(0), primalDofMap.getRankDofs(0), 
+		     primalDofMap.getOverallDofs(0), primalDofMap.getOverallDofs(0),
 		     &schurPrimalData, 
 		     &mat_schur_primal);
       MatShellSetOperation(mat_schur_primal, MATOP_MULT, 
@@ -564,10 +565,10 @@ namespace AMDiS {
 
       double wtime = MPI::Wtime();
 
-      int nRowsRankPrimal = primalDofMap.getRankDofs();
-      int nRowsOverallPrimal = primalDofMap.getOverallDofs();
-      int nRowsRankB = localDofMap.getRankDofs();
-      int nRowsOverallB = localDofMap.getOverallDofs();
+      int nRowsRankPrimal = primalDofMap.getRankDofs(0);
+      int nRowsOverallPrimal = primalDofMap.getOverallDofs(0);
+      int nRowsRankB = localDofMap.getRankDofs(0);
+      int nRowsOverallB = localDofMap.getOverallDofs(0);
 
       Mat matBPi;
       MatCreateMPIAIJ(mpiComm,
@@ -583,7 +584,7 @@ namespace AMDiS {
       map<int, vector<pair<int, double> > > mat_b_primal_cols;
 
       for (int i = 0; i < nRowsRankB; i++) {
-	PetscInt row = localDofMap.getStartDofs() + i;
+	PetscInt row = localDofMap.getStartDofs(0) + i;
 	MatGetRow(subDomainSolver->getMatIntCoarse(), row, &nCols, &cols, &values);
 
 	for (int j = 0; j < nCols; j++)
@@ -594,7 +595,7 @@ namespace AMDiS {
       }
 
       TEST_EXIT(static_cast<int>(mat_b_primal_cols.size()) == 
-		primalDofMap.getLocalDofs())
+		primalDofMap.getLocalDofs(0))
 	("Should not happen!\n");
 
       for (map<int, vector<pair<int, double> > >::iterator it = mat_b_primal_cols.begin();
@@ -615,7 +616,7 @@ namespace AMDiS {
 	VecGetArray(tmpVec, &tmpValues);
 	for (int i  = 0; i < nRowsRankB; i++)
 	  MatSetValue(matBPi, 
-		      localDofMap.getStartDofs() + i,
+		      localDofMap.getStartDofs(0) + i,
 		      it->first,
 		      tmpValues[i],
 		      ADD_VALUES);
@@ -682,18 +683,18 @@ namespace AMDiS {
     fetiData.ksp_schur_primal = &ksp_schur_primal;
 
     VecCreateMPI(mpiComm, 
-		 localDofMap.getRankDofs(), localDofMap.getOverallDofs(),
+		 localDofMap.getRankDofs(0), localDofMap.getOverallDofs(0),
 		 &(fetiData.tmp_vec_b));
     VecCreateMPI(mpiComm,
-		 lagrangeMap.getRankDofs(), lagrangeMap.getOverallDofs(),
+		 lagrangeMap.getRankDofs(0), lagrangeMap.getOverallDofs(0),
 		 &(fetiData.tmp_vec_lagrange));
     VecCreateMPI(mpiComm, 
-		 primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
+		 primalDofMap.getRankDofs(0), primalDofMap.getOverallDofs(0),
 		 &(fetiData.tmp_vec_primal));
 
     MatCreateShell(mpiComm,
-		   lagrangeMap.getRankDofs(), lagrangeMap.getRankDofs(),
-		   lagrangeMap.getOverallDofs(), lagrangeMap.getOverallDofs(),
+		   lagrangeMap.getRankDofs(0), lagrangeMap.getRankDofs(0),
+		   lagrangeMap.getOverallDofs(0), lagrangeMap.getOverallDofs(0),
 		   &fetiData, &mat_feti);
     MatShellSetOperation(mat_feti, MATOP_MULT, (void(*)(void))petscMultMatFeti);
 
@@ -734,7 +735,7 @@ namespace AMDiS {
       fetiDirichletPreconData.ksp_interior = &ksp_interior;
       
       VecCreateMPI(mpiComm, 
-		   localDofMap.getRankDofs(),localDofMap.getOverallDofs(),
+		   localDofMap.getRankDofs(0),localDofMap.getOverallDofs(0),
 		   &(fetiDirichletPreconData.tmp_vec_b));      
       MatGetVecs(mat_duals_duals, PETSC_NULL, 
 		 &(fetiDirichletPreconData.tmp_vec_duals0));
@@ -744,7 +745,7 @@ namespace AMDiS {
 		 &(fetiDirichletPreconData.tmp_vec_interior));
 
       for (unsigned int i = 0; i < feSpaces.size(); i++) {
-	DofMap &dualMap = dualDofMap[feSpaces[i]].getMap();
+	DofMap &dualMap = dualDofMap[feSpaces[i]].getMap(0);
 	for (DofMap::iterator it = dualMap.begin(); it != dualMap.end(); ++it) {
 	  DegreeOfFreedom d = it->first;
 	  int matIndexLocal = localDofMap.getLocalMatIndex(i, d);
@@ -765,7 +766,7 @@ namespace AMDiS {
       fetiLumpedPreconData.mat_duals_duals = &mat_duals_duals;
 
       for (unsigned int i = 0; i < feSpaces.size(); i++) {
-	DofMap &dualMap = dualDofMap[feSpaces[i]].getMap();
+	DofMap &dualMap = dualDofMap[feSpaces[i]].getMap(0);
 	for (DofMap::iterator it = dualMap.begin(); it != dualMap.end(); ++it) {
 	  DegreeOfFreedom d = it->first;
 	  int matIndexLocal = localDofMap.getLocalMatIndex(i, d);
@@ -775,8 +776,8 @@ namespace AMDiS {
       }
 
       VecCreateMPI(mpiComm, 
-		   localDofMap.getRankDofs(),
-		   localDofMap.getOverallDofs(),
+		   localDofMap.getRankDofs(0),
+		   localDofMap.getOverallDofs(0),
 		   &(fetiLumpedPreconData.tmp_vec_b));
       MatGetVecs(mat_duals_duals, PETSC_NULL, 
 		 &(fetiLumpedPreconData.tmp_vec_duals0));
@@ -864,20 +865,21 @@ namespace AMDiS {
     vector<const FiniteElemSpace*> feSpaces = getFeSpaces(&vec);
 
     vector<PetscInt> globalIsIndex, localIsIndex;
-    globalIsIndex.reserve(primalDofMap.getLocalDofs());
-    localIsIndex.reserve(primalDofMap.getLocalDofs());
+    globalIsIndex.reserve(primalDofMap.getLocalDofs(0));
+    localIsIndex.reserve(primalDofMap.getLocalDofs(0));
 
     {
       int cnt = 0;
       for (unsigned int i = 0; i < feSpaces.size(); i++) {
-	DofMap& dofMap = primalDofMap[feSpaces[i]].getMap();
+	DofMap& dofMap = primalDofMap[feSpaces[i]].getMap(0);
 	for (DofMap::iterator it = dofMap.begin();it != dofMap.end(); ++it) {
 	  globalIsIndex.push_back(primalDofMap.getMatIndex(i, it->first));
 	  localIsIndex.push_back(cnt++);
 	}
       }
       
-      TEST_EXIT_DBG(cnt == primalDofMap.getLocalDofs())("Should not happen!\n");
+      TEST_EXIT_DBG(cnt == primalDofMap.getLocalDofs(0))
+	("Should not happen!\n");
     }
     
     IS globalIs, localIs;
@@ -916,14 +918,14 @@ namespace AMDiS {
     for (int i = 0; i < vec.getSize(); i++) {
       DOFVector<double>& dofVec = *(vec.getDOFVector(i));
 
-      for (DofMap::iterator it = localDofMap[feSpaces[i]].getMap().begin();
-	   it != localDofMap[feSpaces[i]].getMap().end(); ++it) {
+      for (DofMap::iterator it = localDofMap[feSpaces[i]].getMap(0).begin();
+	   it != localDofMap[feSpaces[i]].getMap(0).end(); ++it) {
 	int petscIndex = localDofMap.getLocalMatIndex(i, it->first);
 	dofVec[it->first] = localSolB[petscIndex];
       }
 
-      for (DofMap::iterator it = primalDofMap[feSpaces[i]].getMap().begin();
-	   it != primalDofMap[feSpaces[i]].getMap().end(); ++it)
+      for (DofMap::iterator it = primalDofMap[feSpaces[i]].getMap(0).begin();
+	   it != primalDofMap[feSpaces[i]].getMap(0).end(); ++it)
 	dofVec[it->first] = localSolPrimal[cnt++];
     }
 
@@ -947,10 +949,10 @@ namespace AMDiS {
     
     // === Create matrices for the FETI-DP method. ===
     
-    int nRowsRankB = localDofMap.getRankDofs();
-    int nRowsOverallB = localDofMap.getOverallDofs();
-    int nRowsRankPrimal = primalDofMap.getRankDofs();
-    int nRowsOverallPrimal = primalDofMap.getOverallDofs();
+    int nRowsRankB = localDofMap.getRankDofs(0);
+    int nRowsOverallB = localDofMap.getOverallDofs(0);
+    int nRowsRankPrimal = primalDofMap.getRankDofs(0);
+    int nRowsOverallPrimal = primalDofMap.getOverallDofs(0);
     
     subDomainSolver->fillPetscMatrix(mat);
    
@@ -958,14 +960,14 @@ namespace AMDiS {
     // === Create matrices for FETI-DP preconditioner. ===
     
     if (fetiPreconditioner != FETI_NONE) {
-      int nRowsDual = dualDofMap.getRankDofs();
+      int nRowsDual = dualDofMap.getRankDofs(0);
 
       MatCreateSeqAIJ(PETSC_COMM_SELF,
 		      nRowsDual, nRowsDual, 60, PETSC_NULL,
 		      &mat_duals_duals);
 
       if (fetiPreconditioner == FETI_DIRICHLET) {
-	int nRowsInterior = interiorDofMap.getRankDofs();
+	int nRowsInterior = interiorDofMap.getRankDofs(0);
 
 	MatCreateSeqAIJ(PETSC_COMM_SELF,
 			nRowsInterior, nRowsInterior, 60, PETSC_NULL,
@@ -1158,13 +1160,13 @@ namespace AMDiS {
     Vec tmp_b0, tmp_b1, tmp_lagrange0, tmp_primal0, tmp_primal1;
 
     VecCreateMPI(mpiComm, 
-		 localDofMap.getRankDofs(), localDofMap.getOverallDofs(), &tmp_b0);
+		 localDofMap.getRankDofs(0), localDofMap.getOverallDofs(0), &tmp_b0);
     VecCreateMPI(mpiComm, 
-		 localDofMap.getRankDofs(), localDofMap.getOverallDofs(), &tmp_b1);
+		 localDofMap.getRankDofs(0), localDofMap.getOverallDofs(0), &tmp_b1);
     VecCreateMPI(mpiComm,
-		 primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(), &tmp_primal0);
+		 primalDofMap.getRankDofs(0), primalDofMap.getOverallDofs(0), &tmp_primal0);
     VecCreateMPI(mpiComm,
-		 primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(), &tmp_primal1);
+		 primalDofMap.getRankDofs(0), primalDofMap.getOverallDofs(0), &tmp_primal1);
 
     MatGetVecs(mat_lagrange, PETSC_NULL, &tmp_lagrange0);
     MatGetVecs(mat_lagrange, PETSC_NULL, &vec_rhs);
diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h
index 4853314d..e640e7d6 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.h
+++ b/AMDiS/src/parallel/PetscSolverFeti.h
@@ -80,22 +80,22 @@ namespace AMDiS {
 
     int getNumberOfPrimals()
     {
-      return primalDofMap.getOverallDofs();
+      return primalDofMap.getOverallDofs(0);
     }
 
     int getNumberOfRankPrimals()
     {
-      return primalDofMap.getRankDofs();
+      return primalDofMap.getRankDofs(0);
     }
 
     int getNumberOfDuals()
     {
-      return dualDofMap.getOverallDofs();
+      return dualDofMap.getOverallDofs(0);
     }
 
     int getNumberOfRankDuals()
     {
-      return dualDofMap.getRankDofs();
+      return dualDofMap.getRankDofs(0);
     }
 
   protected:
diff --git a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
index 70a1b89d..74026bb6 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
@@ -27,8 +27,8 @@ namespace AMDiS {
     double wtime = MPI::Wtime();
     const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
     nComponents = mat->getNumRows();
-    int nRankRows = (*dofMap)[feSpace].nRankDofs;
-    int nOverallRows = (*dofMap)[feSpace].nOverallDofs;
+    int nRankRows = (*dofMap)[feSpace].nRankDofs[0];
+    int nOverallRows = (*dofMap)[feSpace].nOverallDofs[0];
 
 #if (DEBUG != 0)
     MSG("Fill petsc matrix 1 needed %.5f seconds\n", MPI::Wtime() - wtime);
@@ -108,8 +108,8 @@ namespace AMDiS {
 
     nComponents = vec->getSize();
     const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
-    int nRankRows = (*dofMap)[feSpace].nRankDofs;
-    int nOverallRows = (*dofMap)[feSpace].nOverallDofs;
+    int nRankRows = (*dofMap)[feSpace].nRankDofs[0];
+    int nOverallRows = (*dofMap)[feSpace].nOverallDofs[0];
 
     nestVec.resize(nComponents);
 
@@ -151,11 +151,11 @@ namespace AMDiS {
       Vec tmp;
       VecNestGetSubVec(petscSolVec, i, &tmp);
 
-      int nRankDofs = (*dofMap)[feSpace].nRankDofs;
+      int nRankDofs = (*dofMap)[feSpace].nRankDofs[0];
       PetscScalar *vecPointer;
       VecGetArray(tmp, &vecPointer);
 
-      DofMap& d = (*dofMap)[feSpace].getMap();
+      DofMap& d = (*dofMap)[feSpace].getMap(0);
       for (DofMap::iterator it = d.begin(); it != d.end(); ++it)
 	if (it->second.local != -1)
 	  dofvec[it->first] = vecPointer[it->second.local];
@@ -217,8 +217,8 @@ namespace AMDiS {
     typedef traits::range_generator<row, Matrix>::type cursor_type;
     typedef traits::range_generator<nz, cursor_type>::type icursor_type;
 
-    int dispRowIndex = (*dofMap)[feSpace].nRankDofs * dispRowBlock;
-    int dispColIndex = (*dofMap)[feSpace].nRankDofs * dispColBlock;
+    int dispRowIndex = (*dofMap)[feSpace].nRankDofs[0] * dispRowBlock;
+    int dispColIndex = (*dofMap)[feSpace].nRankDofs[0] * dispColBlock;
 
     vector<int> cols;
     vector<double> values;
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
index c2a53b8d..58eaaa65 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
@@ -55,8 +55,8 @@ namespace AMDiS {
 
     // === Create PETSc vector (solution and a temporary vector). ===
 
-    int nRankRows = dofMap->getRankDofs();
-    int nOverallRows = dofMap->getOverallDofs();
+    int nRankRows = dofMap->getRankDofs(0);
+    int nOverallRows = dofMap->getOverallDofs(0);
 
     VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscSolVec);
     VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscTmpVec);
@@ -87,8 +87,8 @@ namespace AMDiS {
 #if (DEBUG != 0)
     int a, b;
     MatGetOwnershipRange(petscMatrix, &a, &b);
-    TEST_EXIT(a == dofMap->getStartDofs())("Wrong matrix ownership range!\n");
-    TEST_EXIT(b == dofMap->getStartDofs() + nRankRows)
+    TEST_EXIT(a == dofMap->getStartDofs(0))("Wrong matrix ownership range!\n");
+    TEST_EXIT(b == dofMap->getStartDofs(0) + nRankRows)
       ("Wrong matrix ownership range!\n");
 #endif
 
@@ -133,8 +133,8 @@ namespace AMDiS {
     TEST_EXIT_DBG(vec)("No DOF vector defined!\n");
     TEST_EXIT_DBG(dofMap)("No parallel DOF map defined!\n");
 
-    int nRankRows = dofMap->getRankDofs();
-    int nOverallRows = dofMap->getOverallDofs();
+    int nRankRows = dofMap->getRankDofs(0);
+    int nOverallRows = dofMap->getOverallDofs(0);
 
     VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscRhsVec);
 
@@ -193,7 +193,7 @@ namespace AMDiS {
     int c = 0;
     for (int i = 0; i < nComponents; i++) {
       DOFVector<double> &dv = *(vec.getDOFVector(i));
-      DofMap& d = (*dofMap)[dv.getFeSpace()].getMap();
+      DofMap& d = (*dofMap)[dv.getFeSpace()].getMap(0);
       for (DofMap::iterator it = d.begin(); it != d.end(); ++it)
 	if (it->second.local != -1)
 	  dv[it->first] = vecPointer[c++];
@@ -502,8 +502,8 @@ namespace AMDiS {
     TEST_EXIT_DBG(!o_nnz)("There is something wrong!\n");
 
     vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
-    int nRankRows = dofMap->getRankDofs();
-    int rankStartIndex = dofMap->getStartDofs();
+    int nRankRows = dofMap->getRankDofs(0);
+    int rankStartIndex = dofMap->getStartDofs(0);
 
     d_nnz = new int[nRankRows];
     o_nnz = new int[nRankRows];
diff --git a/AMDiS/src/parallel/PetscSolverSchur.cc b/AMDiS/src/parallel/PetscSolverSchur.cc
index b3066b21..1ef550dd 100644
--- a/AMDiS/src/parallel/PetscSolverSchur.cc
+++ b/AMDiS/src/parallel/PetscSolverSchur.cc
@@ -244,8 +244,8 @@ namespace AMDiS {
     MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
 
 
-    int nRankRows = (*dofMap)[feSpace].nRankDofs * nComponents;
-    int nOverallRows = (*dofMap)[feSpace].nOverallDofs * nComponents;
+    int nRankRows = (*dofMap)[feSpace].nRankDofs[0] * nComponents;
+    int nOverallRows = (*dofMap)[feSpace].nOverallDofs[0] * nComponents;
 
     VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscSolVec);
     VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscTmpVec);
@@ -258,8 +258,8 @@ namespace AMDiS {
 
     const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
     int nComponents = vec->getSize();
-    int nRankRows = (*dofMap)[feSpace].nRankDofs * nComponents;
-    int nOverallRows = (*dofMap)[feSpace].nOverallDofs * nComponents;
+    int nRankRows = (*dofMap)[feSpace].nRankDofs[0] * nComponents;
+    int nOverallRows = (*dofMap)[feSpace].nOverallDofs[0] * nComponents;
 
     VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscRhsVec);
 
diff --git a/AMDiS/src/parallel/SubDomainSolver.cc b/AMDiS/src/parallel/SubDomainSolver.cc
index 07c48872..5086b576 100644
--- a/AMDiS/src/parallel/SubDomainSolver.cc
+++ b/AMDiS/src/parallel/SubDomainSolver.cc
@@ -24,13 +24,20 @@ namespace AMDiS {
 
     vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
 
-    int nRowsRankInterior = interiorMap->getRankDofs();
-    int nRowsOverallInterior = interiorMap->getOverallDofs();
-    int nRowsRankCoarse = coarseSpaceMap->getRankDofs();
-    int nRowsOverallCoarse = coarseSpaceMap->getOverallDofs();
-
-    MatCreateSeqAIJ(mpiCommInterior, nRowsRankInterior, nRowsRankInterior,
-		    60, PETSC_NULL, &matIntInt);
+    int nRowsRankInterior = interiorMap->getRankDofs(level);
+    int nRowsOverallInterior = interiorMap->getOverallDofs(level);
+    int nRowsRankCoarse = coarseSpaceMap->getRankDofs(level);
+    int nRowsOverallCoarse = coarseSpaceMap->getOverallDofs(level);
+
+    if (mpiCommInterior.Get_size() == 1) {
+      MatCreateSeqAIJ(mpiCommInterior, nRowsRankInterior, nRowsRankInterior,
+		      60, PETSC_NULL, &matIntInt);
+    } else {
+      MatCreateMPIAIJ(mpiCommInterior, 
+		      nRowsRankInterior, nRowsRankInterior,
+		      nRowsOverallInterior, nRowsOverallInterior,
+		      60, PETSC_NULL, 60, PETSC_NULL, &matIntInt);
+    }
 
     MatCreateMPIAIJ(mpiCommCoarseSpace,
 		    nRowsRankCoarse, nRowsRankCoarse,
@@ -185,11 +192,13 @@ namespace AMDiS {
     FUNCNAME("SubDomainSolver::fillPetscRhs()");
 
     VecCreateMPI(mpiCommCoarseSpace, 
-		 coarseSpaceMap->getRankDofs(), coarseSpaceMap->getOverallDofs(),
+		 coarseSpaceMap->getRankDofs(level), 
+		 coarseSpaceMap->getOverallDofs(level),
 		 &rhsCoarseSpace);
 
     VecCreateMPI(mpiCommCoarseSpace, 
-		 interiorMap->getRankDofs(), interiorMap->getOverallDofs(),
+		 interiorMap->getRankDofs(level), 
+		 interiorMap->getOverallDofs(level),
 		 &rhsInterior);
 
     for (int i = 0; i < vec->getSize(); i++) {
@@ -254,13 +263,13 @@ namespace AMDiS {
     FUNCNAME("SubDomainSolver::solveGlobal()");
 
     Vec tmp;
-    VecCreateSeq(PETSC_COMM_SELF, interiorMap->getRankDofs(), &tmp);
+    VecCreateSeq(PETSC_COMM_SELF, interiorMap->getRankDofs(level), &tmp);
 
     PetscScalar *tmpValues, *rhsValues;
     VecGetArray(tmp, &tmpValues);
     VecGetArray(rhs, &rhsValues);
 
-    for (int i = 0; i < interiorMap->getRankDofs(); i++)
+    for (int i = 0; i < interiorMap->getRankDofs(level); i++)
       tmpValues[i] = rhsValues[i];
 
     VecRestoreArray(rhs, &rhsValues);
@@ -271,7 +280,7 @@ namespace AMDiS {
     VecGetArray(tmp, &tmpValues);
     VecGetArray(sol, &rhsValues);
 
-    for (int i = 0; i < interiorMap->getRankDofs(); i++) 
+    for (int i = 0; i < interiorMap->getRankDofs(level); i++) 
       rhsValues[i] = tmpValues[i];
 
     VecRestoreArray(sol, &rhsValues);
diff --git a/AMDiS/src/parallel/SubDomainSolver.h b/AMDiS/src/parallel/SubDomainSolver.h
index 84c9ace7..52df9910 100644
--- a/AMDiS/src/parallel/SubDomainSolver.h
+++ b/AMDiS/src/parallel/SubDomainSolver.h
@@ -40,12 +40,18 @@ namespace AMDiS {
 		    MPI::Intracomm mpiComm0,
 		    MPI::Intracomm mpiComm1)
       : meshDistributor(md),
+	level(0),
 	mpiCommCoarseSpace(mpiComm0),
 	mpiCommInterior(mpiComm1),
 	coarseSpaceMap(NULL),
 	interiorMap(NULL)
     {}
 
+    void setLevel(int l) 
+    {
+      level = l;
+    }
+
     void setDofMapping(ParallelDofMapping *coarseDofs,
 		       ParallelDofMapping *interiorDofs)
     {
@@ -113,6 +119,8 @@ namespace AMDiS {
   protected:
     MeshDistributor *meshDistributor;
 
+    int level;
+
     MPI::Intracomm mpiCommCoarseSpace;
 
     MPI::Intracomm mpiCommInterior;
diff --git a/test/mpi/src/test0002.cc b/test/mpi/src/test0002.cc
index 8b6abbf6..2f3c508a 100644
--- a/test/mpi/src/test0002.cc
+++ b/test/mpi/src/test0002.cc
@@ -48,9 +48,9 @@ BOOST_AUTO_TEST_CASE(amdis_mpi_feti)
   vector<double> testData;
   testData.push_back(feti.getNumberOfRankPrimals());
   testData.push_back(feti.getNumberOfRankDuals());
-  testData.push_back(dofMap[feSpace].nRankDofs);
-  testData.push_back(dofMap[feSpace].rStartDofs);
-  testData.push_back(dofMap[feSpace].nOverallDofs);
+  testData.push_back(dofMap[feSpace].nRankDofs[0]);
+  testData.push_back(dofMap[feSpace].rStartDofs[0]);
+  testData.push_back(dofMap[feSpace].nOverallDofs[0]);
 
   BOOST_REQUIRE(data.size() - 1 == testData.size());
   BOOST_REQUIRE(equal(data.begin() + 1, data.end(), testData.begin()));
@@ -66,9 +66,9 @@ BOOST_AUTO_TEST_CASE(amdis_mpi_feti)
   testData.clear();
   testData.push_back(feti.getNumberOfRankPrimals());
   testData.push_back(feti.getNumberOfRankDuals());
-  testData.push_back(dofMap[feSpace].nRankDofs);
-  testData.push_back(dofMap[feSpace].rStartDofs);
-  testData.push_back(dofMap[feSpace].nOverallDofs);
+  testData.push_back(dofMap[feSpace].nRankDofs[0]);
+  testData.push_back(dofMap[feSpace].rStartDofs[0]);
+  testData.push_back(dofMap[feSpace].nOverallDofs[0]);
 
   BOOST_REQUIRE(data.size() - 1 == testData.size());
   BOOST_REQUIRE(equal(data.begin() + 1, data.end(), testData.begin()));
-- 
GitLab