diff --git a/AMDiS/src/parallel/DofComm.cc b/AMDiS/src/parallel/DofComm.cc
index 6507ab23ed00d22e251325c98d9eefc525155ef8..53d241c45b632d495cc64fc45e8933191f52ea26 100644
--- a/AMDiS/src/parallel/DofComm.cc
+++ b/AMDiS/src/parallel/DofComm.cc
@@ -18,26 +18,15 @@ namespace AMDiS {
 
   using namespace std;
 
-  int DofComm::getNumberDofs(int level, const FiniteElemSpace *feSpace)
+  void DofComm::create(InteriorBoundary &boundary)
   {
-    FUNCNAME("DofComm::getNumberDofs()");
-
-    TEST_EXIT_DBG(level < data.size())("Should not happen!\n");
-
-    DofContainerSet dofs;
-
-    for (DataIter rankIt = data[level].begin(); 
-	 rankIt != data[level].end(); ++rankIt)
-      for (FeMapIter feIt = rankIt->second.begin();
-	   feIt != rankIt->second.end(); ++feIt)
-	if (feIt->first == feSpace)
-	  dofs.insert(feIt->second.begin(), feIt->second.end());
-
-    return static_cast<int>(dofs.size());
+    createContainer(boundary.getOwn(), sendDofs);
+    createContainer(boundary.getOther(), recvDofs);
   }
 
 
-  void DofComm::create(RankToBoundMap &boundary)
+  void DofComm::createContainer(RankToBoundMap &boundary,
+				LevelDataType &data)
   {
     // === Fill data. ===
 
@@ -72,11 +61,32 @@ namespace AMDiS {
   }
 
 
+  int DofComm::getNumberDofs(LevelDataType &data, 
+			     int level, 
+			     const FiniteElemSpace *feSpace)
+  {
+    FUNCNAME("DofComm::getNumberDofs()");
+
+    TEST_EXIT_DBG(level < data.size())("Should not happen!\n");
+
+    DofContainerSet dofs;
+
+    for (DataIter rankIt = data[level].begin(); 
+	 rankIt != data[level].end(); ++rankIt)
+      for (FeMapIter feIt = rankIt->second.begin();
+	   feIt != rankIt->second.end(); ++feIt)
+	if (feIt->first == feSpace)
+	  dofs.insert(feIt->second.begin(), feIt->second.end());
+
+    return static_cast<int>(dofs.size());
+  }
+  
+
   bool DofComm::Iterator::setNextFeMap()
   {
     FUNCNAME("DofComm::Iterator::setNextFeMap()");
 
-    if (dataIter != dofComm.data[traverseLevel].end()) {
+    if (dataIter != data[traverseLevel].end()) {
       TEST_EXIT_DBG(dataIter->second.size())("Should not happen!\n");
 
       feMapIter = dataIter->second.begin();
diff --git a/AMDiS/src/parallel/DofComm.h b/AMDiS/src/parallel/DofComm.h
index 89874a4b6480f3efbe56508b9c6c398a10907f61..7e3a7bea4ec2890de2675de770b5b6998054bfe9 100644
--- a/AMDiS/src/parallel/DofComm.h
+++ b/AMDiS/src/parallel/DofComm.h
@@ -36,7 +36,9 @@ namespace AMDiS {
   {
   public:
     DofComm() 
-      : data(1)
+      : recvDofs(1),
+	sendDofs(1),
+	periodicDofs(0)
     {}
     
     typedef map<const FiniteElemSpace*, DofContainer> FeMapType;
@@ -46,33 +48,76 @@ namespace AMDiS {
     // meshLevel: map[rank -> map[feSpace -> DofContainer]]
     typedef vector<DataType> LevelDataType;
 
-    inline DofContainer& getDofContainer(int rank, 
-					 const FiniteElemSpace *feSpace, 
-					 int level = 0)
-    {
-      return data[level][rank][feSpace];
-    }
-
     void init(int n, vector<const FiniteElemSpace*> &fe)
     {
+      FUNCNAME("DofComm::init()");
+
+      TEST_EXIT_DBG(n >= 1)("Should not happen!\n");
+
       nLevel = n;
       feSpaces = fe;
 
-      data.clear();
-      data.resize(nLevel);
+      sendDofs.clear();
+      recvDofs.clear();
+      periodicDofs.clear();
+
+      sendDofs.resize(nLevel);
+      recvDofs.resize(nLevel);
+      periodicDofs.resize(nLevel);
+    }
+
+    void create(InteriorBoundary &boundary);
+
+    LevelDataType& getSendDofs()
+    {
+      return sendDofs;
+    }
+
+    LevelDataType& getRecvDofs()
+    {
+      return recvDofs;
     }
 
-    DataType& getData(int level = 0)
+    LevelDataType& getPeriodicDofs()
     {
-      return data[level];
+      return periodicDofs;
     }
 
-    int getNumberDofs(int level, const FiniteElemSpace *feSpace);
+    // Writes all data of this object to an output stream.
+    void serialize(ostream &out)
+    {
+      ERROR_EXIT("MUST BE IMPLEMENTED!\n");
+    }
 
-    void create(RankToBoundMap &boundary);
+    // Reads the object data from an input stream.
+    void deserialize(istream &in, 
+		     map<const FiniteElemSpace*, map<int, const DegreeOfFreedom*> > dofIndexMap)
+    {
+      ERROR_EXIT("MUST BE IMPLEMENTED!\n");
+    }
+
+    int getNumberDofs(LevelDataType &data, 
+		      int level, 
+		      const FiniteElemSpace *feSpace);
+
+  protected:
+    void createContainer(RankToBoundMap &boundary, LevelDataType &data);
 
   protected:
-    LevelDataType data;
+    /// This map contains for each rank the list of DOFs the current rank must 
+    /// end to exchange solution DOFs at the interior boundaries.
+    LevelDataType sendDofs;
+
+    /// This map contains on each rank the list of DOFs from which the current 
+    /// rank will receive DOF values (i.e., this are all DOFs at an interior 
+    /// boundary). The DOF indices are given in rank's local numbering.
+    LevelDataType recvDofs;
+
+    /// This map contains on each rank a list of DOFs along the interior bound-
+    /// aries to communicate with other ranks. The DOF indices are given in rank's
+    /// local numbering. Periodic boundaries within one subdomain are not 
+    /// considered here. 
+    LevelDataType periodicDofs;
 
     int nLevel;
 
@@ -84,9 +129,9 @@ namespace AMDiS {
     class Iterator
     {
     public:
-      Iterator(DofComm &dc,
+      Iterator(LevelDataType &d,
 	       const FiniteElemSpace *fe = NULL)
-	: dofComm(dc),
+	: data(d),
 	  dofCounter(-1),
 	  traverseFeSpace(fe),
 	  traverseLevel(0)
@@ -94,10 +139,10 @@ namespace AMDiS {
 	goFirst();
       }
 
-      Iterator(DofComm &dc,
+      Iterator(LevelDataType &d,
 	       int level,
 	       const FiniteElemSpace *fe = NULL)
-	: dofComm(dc),
+	: data(d),
 	  dofCounter(-1),
 	  traverseFeSpace(fe),
 	  traverseLevel(level)
@@ -107,7 +152,7 @@ namespace AMDiS {
 
       inline bool end()
       {
-	return (dataIter == dofComm.data[traverseLevel].end());
+	return (dataIter == data[traverseLevel].end());
       }
       
       inline void nextRank()
@@ -200,7 +245,7 @@ namespace AMDiS {
     protected:
       void goFirst()
       {
-	dataIter = dofComm.data[traverseLevel].begin();
+	dataIter = data[traverseLevel].begin();
 
 	while (setNextFeMap() == false)
 	  ++dataIter;
@@ -209,7 +254,7 @@ namespace AMDiS {
       bool setNextFeMap();
 
     protected:
-      DofComm &dofComm;
+      LevelDataType &data;
       
       DofComm::DataIter dataIter;
       
diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index 679025fdda1b13d64f25aa127d66cfd5463c0ca3..8dc62837e905ea9f82719b362d5665d355d1462f 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -511,7 +511,8 @@ namespace AMDiS {
 
     StdMpi<vector<double> > stdMpi(mpiComm);
 
-    for (DofComm::Iterator it(sendDofs); !it.end(); it.nextRank()) {
+    for (DofComm::Iterator it(dofComm.getSendDofs()); 
+	 !it.end(); it.nextRank()) {
       vector<double> dofs;
 
       for (int i = 0; i < vec.getSize(); i++) {
@@ -524,12 +525,12 @@ namespace AMDiS {
       stdMpi.send(it.getRank(), dofs);
     }
 	   
-    for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
+    for (DofComm::Iterator it(dofComm.getRecvDofs()); !it.end(); it.nextRank())
       stdMpi.recv(it.getRank());
 
     stdMpi.startCommunication();
 
-    for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank()) {
+    for (DofComm::Iterator it(dofComm.getRecvDofs()); !it.end(); it.nextRank()) {
       int counter = 0;
 
       for (int i = 0; i < vec.getSize(); i++) {
@@ -659,9 +660,11 @@ namespace AMDiS {
     FUNCNAME("MeshDistributor::getAllBoundaryDofs()");
 
     DofContainerSet dofSet;
-    for (DofComm::Iterator it(sendDofs, level, feSpace); !it.end(); it.nextRank())
+    for (DofComm::Iterator it(dofComm.getSendDofs(), level, feSpace); 
+	 !it.end(); it.nextRank())
       dofSet.insert(it.getDofs().begin(), it.getDofs().end());
-    for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank())
+    for (DofComm::Iterator it(dofComm.getRecvDofs(), level, feSpace); 
+	 !it.end(); it.nextRank())
       dofSet.insert(it.getDofs().begin(), it.getDofs().end());
 
     dofs.clear();
@@ -1533,14 +1536,8 @@ namespace AMDiS {
   {
     FUNCNAME("MeshDistributor::createBoundaryDofs()");
 
-    int nLevels = levelData.getLevelNumber();
-    TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n");
-
-    sendDofs.init(nLevels, feSpaces);
-    sendDofs.create(intBoundary.getOwn());
-
-    recvDofs.init(nLevels, feSpaces);
-    recvDofs.create(intBoundary.getOther());
+    dofComm.init(levelData.getLevelNumber(), feSpaces);
+    dofComm.create(intBoundary);
 
     createBoundaryDofInfo();
   }
@@ -1625,7 +1622,7 @@ namespace AMDiS {
     TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n");
 
     dofMap.init(levelData, feSpaces, feSpaces, true, true);
-    dofMap.setDofComm(sendDofs, recvDofs);
+    dofMap.setDofComm(dofComm);
     dofMap.clear();
 
     createBoundaryDofs();
@@ -1684,7 +1681,8 @@ namespace AMDiS {
     int nLevels = levelData.getLevelNumber();
     for (int level = 0; level < nLevels; level++) {
       DofContainerSet nonRankDofs;
-      for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank())
+      for (DofComm::Iterator it(dofComm.getRecvDofs(), level, feSpace); 
+	   !it.end(); it.nextRank())
 	for (; !it.endDofIter(); it.nextDof())
 	  nonRankDofs.insert(it.getDof());
       
@@ -1692,7 +1690,8 @@ namespace AMDiS {
 	if (nonRankDofs.count(rankDofs[i]) == 0)
 	  dofMap[feSpace].insertRankDof(level, *(rankDofs[i]));
       
-      for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank())
+      for (DofComm::Iterator it(dofComm.getRecvDofs(), level, feSpace); 
+	   !it.end(); it.nextRank())
 	for (; !it.endDofIter(); it.nextDof())
 	  dofMap[feSpace].insertNonRankDof(level, it.getDofIndex());
     }
@@ -1702,9 +1701,7 @@ namespace AMDiS {
     lastMeshChangeIndex = mesh->getChangeIndex();
 
 #if (DEBUG != 0)
-    ParallelDebug::testDofContainerCommunication(*this, 
-						 sendDofs.getData(), 
-						 recvDofs.getData());
+    ParallelDebug::testDofContainerCommunication(*this);
 #endif
   }
 
@@ -1714,13 +1711,9 @@ namespace AMDiS {
     FUNCNAME("MeshDistributor::createPeriodicMap()");
 
     // Clear all periodic DOF mappings calculated before. We do it from scratch.
-    periodicDofs.init(levelData.getLevelNumber(), feSpaces);
     periodicMap.clear();
 
-    // If there are no periodic boundaries, return. Note that periodicDofs and
-    // periodicMap must be still cleared before: if we do repartitioning and
-    // there were periodic boundaries in subdomain before and after repartitioning
-    // there are no more periodic boundaries.
+    // If there are no periodic boundaries, return. 
     if (!intBoundary.hasPeriodic())
       return;
 
@@ -1736,6 +1729,8 @@ namespace AMDiS {
   {
     FUNCNAME("MeshDistributor::createPeriodicMap()");
 
+    DofComm::LevelDataType &periodicDofs = dofComm.getPeriodicDofs();
+
     StdMpi<vector<int> > stdMpi(mpiComm, false);
 
     // === Each rank traverse its periodic boundaries and sends the DOF      ===
@@ -1781,7 +1776,7 @@ namespace AMDiS {
 	// Here we have a periodic boundary between two ranks.
 
 	// Create DOF indices on the boundary. 
-	DofContainer& dofs = periodicDofs.getDofContainer(it->first, feSpace);
+	DofContainer& dofs = periodicDofs[0][it->first][feSpace];
 	for (vector<AtomicBoundary>::iterator boundIt = it->second.begin();
 	     boundIt != it->second.end(); ++boundIt) {
 
@@ -1814,7 +1809,7 @@ namespace AMDiS {
 
     for (RankToBoundMap::iterator it = intBoundary.getPeriodic().begin();
 	 it != intBoundary.getPeriodic().end(); ++it) {
-      DofContainer& dofs = periodicDofs.getDofContainer(it->first, feSpace);
+      DofContainer& dofs = periodicDofs[0][it->first][feSpace];
       vector<int>& types = rankToDofType[it->first];
 
       TEST_EXIT_DBG(dofs.size() == types.size())("Should not happen!\n");
@@ -1934,8 +1929,7 @@ namespace AMDiS {
 
     intBoundary.serialize(out);
 
-    serialize(out, sendDofs.getData());
-    serialize(out, recvDofs.getData());
+    dofComm.serialize(out);
 
     // === Serialieze FE space dependent data ===
 
@@ -1993,8 +1987,7 @@ namespace AMDiS {
    
     intBoundary.deserialize(in, elIndexMap);
 
-    deserialize(in, sendDofs.getData(), dofIndexMap);
-    deserialize(in, recvDofs.getData(), dofIndexMap);
+    dofComm.deserialize(in, dofIndexMap);
 
     // === Deerialieze FE space dependent data ===
     
diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h
index ba580061c42534233bbd321765b8c3623e70a5ee..4299edf77c4ff976c0240c57e5215ecb1f770429 100644
--- a/AMDiS/src/parallel/MeshDistributor.h
+++ b/AMDiS/src/parallel/MeshDistributor.h
@@ -157,19 +157,9 @@ namespace AMDiS {
       return periodicMap;
     }
 
-    DofComm& getSendDofs()
+    DofComm& getDofComm()
     {
-      return sendDofs;
-    }
-
-    DofComm& getRecvDofs()
-    {
-      return recvDofs;
-    }
-
-    DofComm& getPeriodicDofs()
-    {
-      return periodicDofs;
+      return dofComm;
     }
 
     inline long getLastMeshChangeIndex()
@@ -220,7 +210,8 @@ namespace AMDiS {
 
       const FiniteElemSpace *fe = vec.getFeSpace();
 
-      for (DofComm::Iterator it(sendDofs, fe); !it.end(); it.nextRank()) {
+      for (DofComm::Iterator it(dofComm.getSendDofs(), fe); 
+	   !it.end(); it.nextRank()) {
 	vector<T> dofs;
 	dofs.reserve(it.getDofs().size());
 	
@@ -230,12 +221,14 @@ namespace AMDiS {
 	stdMpi.send(it.getRank(), dofs);
       }
 	     
-      for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
+      for (DofComm::Iterator it(dofComm.getRecvDofs()); 
+	   !it.end(); it.nextRank())
         stdMpi.recv(it.getRank());
 	     
       stdMpi.startCommunication();
 
-      for (DofComm::Iterator it(recvDofs, fe); !it.end(); it.nextRank())
+      for (DofComm::Iterator it(dofComm.getRecvDofs(), fe); 
+	   !it.end(); it.nextRank())
 	for (; !it.endDofIter(); it.nextDof())
 	  vec[it.getDofIndex()] = 
 	     stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
@@ -477,20 +470,7 @@ namespace AMDiS {
     /// partitioning the whole mesh. 
     InteriorBoundary intBoundary;
 
-    /// This map contains for each rank the list of DOFs the current rank must 
-    /// end to exchange solution DOFs at the interior boundaries.
-    DofComm sendDofs;
-
-    /// This map contains on each rank the list of DOFs from which the current 
-    /// rank will receive DOF values (i.e., this are all DOFs at an interior 
-    /// boundary). The DOF indices are given in rank's local numbering.
-    DofComm recvDofs;
-
-    /// This map contains on each rank a list of DOFs along the interior bound-
-    /// aries to communicate with other ranks. The DOF indices are given in rank's
-    /// local numbering. Periodic boundaries within one subdomain are not 
-    /// considered here. 
-    DofComm periodicDofs;
+    DofComm dofComm;
 
     PeriodicMap periodicMap;
 
diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc
index a9cab1df16e144d7714b129a270a53c4178761a2..a8ed8ee6190e3d509db64dd192e05587edc445eb 100644
--- a/AMDiS/src/parallel/ParallelDebug.cc
+++ b/AMDiS/src/parallel/ParallelDebug.cc
@@ -354,11 +354,13 @@ namespace AMDiS {
     DOFVector<WorldVector<double> > coords(feSpace, "dofCorrds");
     pdb.mesh->getDofIndexCoords(feSpace, coords);
 
-    for (DofComm::Iterator it(pdb.sendDofs, feSpace); !it.end(); it.nextRank())
+    for (DofComm::Iterator it(pdb.dofComm.getSendDofs(), feSpace); 
+	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
 	sendCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
 
-    for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank())
+    for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace); 
+	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
 	recvCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
 
@@ -449,8 +451,7 @@ namespace AMDiS {
 	    MSG("%s\n", oss.str().c_str());
 	    
 	    debug::printInfoByDof(feSpace, 
-				  *(pdb.recvDofs.getDofContainer(it->first, 
-								 feSpace)[i]));
+				  *(pdb.dofComm.getRecvDofs()[0][it->first][feSpace][i]));
 	  }
 	  ERROR("Wrong DOFs in rank %d!\n", pdb.mpiRank);
 	  foundError = 1;
@@ -485,15 +486,18 @@ namespace AMDiS {
     }
 
     StdMpi<CoordsIndexMap> stdMpi(pdb.mpiComm, true);
-    for (DofComm::Iterator it(pdb.sendDofs, feSpace); !it.end(); it.nextRank())
+    for (DofComm::Iterator it(pdb.dofComm.getSendDofs(), feSpace); 
+	 !it.end(); it.nextRank())
       stdMpi.send(it.getRank(), coordsToIndex);
-    for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank())
+    for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace); 
+	 !it.end(); it.nextRank())
       stdMpi.recv(it.getRank());
    
     stdMpi.startCommunication();
 
     int foundError = 0;
-    for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank()) {
+    for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace); 
+	 !it.end(); it.nextRank()) {
       CoordsIndexMap& otherCoords = stdMpi.getRecvData(it.getRank());
 
       for (CoordsIndexMap::iterator coordsIt = otherCoords.begin();
@@ -562,27 +566,30 @@ namespace AMDiS {
   }
 
 
-  void ParallelDebug::testDofContainerCommunication(MeshDistributor &pdb, 
-						    map<int, map<const FiniteElemSpace*, DofContainer> > &sendDofs,
-						    map<int, map<const FiniteElemSpace*, DofContainer> > &recvDofs)
+  void ParallelDebug::testDofContainerCommunication(MeshDistributor &pdb)
   {
-    FUNCNAME("ParallelDebug::testDofContainerCommunication()");
+    FUNCNAME("ParallelDebug::testDofContainerCommunication()");    
 
     typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
 
     map<int, int> sendNumber;
-    for (it_type it = sendDofs.begin(); it != sendDofs.end(); ++it)
-      for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin(); dcIt != it->second.end(); ++dcIt)
+    for (it_type it = pdb.dofComm.getSendDofs()[0].begin(); 
+	 it != pdb.dofComm.getSendDofs()[0].end(); ++it)
+      for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin(); 
+	   dcIt != it->second.end(); ++dcIt)
 	sendNumber[it->first] += dcIt->second.size();
     
     map<int, int> recvNumber;
-    for (it_type it = recvDofs.begin(); it != recvDofs.end(); ++it)
-      for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin(); dcIt != it->second.end(); ++dcIt)
+    for (it_type it = pdb.dofComm.getRecvDofs()[0].begin(); 
+	 it != pdb.dofComm.getRecvDofs()[0].end(); ++it)
+      for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin(); 
+	   dcIt != it->second.end(); ++dcIt)
 	recvNumber[it->first] += dcIt->second.size();
     
     StdMpi<int> stdMpi(pdb.mpiComm);
     stdMpi.send(sendNumber);
-    for (it_type it = recvDofs.begin(); it != recvDofs.end(); ++it)
+    for (it_type it = pdb.dofComm.getRecvDofs()[0].begin(); 
+	 it != pdb.dofComm.getRecvDofs()[0].end(); ++it)
       stdMpi.recv(it->first);
     stdMpi.startCommunication();
      
@@ -652,13 +659,13 @@ namespace AMDiS {
 	pdb.mesh->getDofIndexCoords(it->first, feSpace, coords);
 	coords.print();
 
-	for (DofComm::Iterator rit(pdb.sendDofs, feSpace); 
+	for (DofComm::Iterator rit(pdb.dofComm.getSendDofs(), feSpace); 
 	     !rit.end(); rit.nextRank())
 	  for (; !rit.endDofIter(); rit.nextDof())
 	    if (it->first == rit.getDofIndex())
 	      cout << "SEND DOF TO " << rit.getRank() << endl;
 	
-	for (DofComm::Iterator rit(pdb.recvDofs, feSpace); 
+	for (DofComm::Iterator rit(pdb.dofComm.getRecvDofs(), feSpace); 
 	     !rit.end(); rit.nextRank())
 	  for (; !rit.endDofIter(); rit.nextDof())
 	    if (it->first == rit.getDofIndex())
diff --git a/AMDiS/src/parallel/ParallelDebug.h b/AMDiS/src/parallel/ParallelDebug.h
index 28d10f48834834492f71714a7c422f47cbbb8dbe..5536f8024572352a09b1febbe479b376e4d6ecab 100644
--- a/AMDiS/src/parallel/ParallelDebug.h
+++ b/AMDiS/src/parallel/ParallelDebug.h
@@ -95,12 +95,8 @@ namespace AMDiS {
      * DOFs fits together for all communication partners.
      *
      * \param[in]  pdb        Parallel problem definition used for debugging.
-     * \param[in]  sendDofs   The map of all DOFs the rank will send.
-     * \param[in]  recvDofs   The map of all DOFs the rank will receive.
      */
-    static void testDofContainerCommunication(MeshDistributor &pdb, 
-					      map<int, map<const FiniteElemSpace*, DofContainer> > &sendDofs,
-					      map<int, map<const FiniteElemSpace*, DofContainer> > &recvDofs);
+    static void testDofContainerCommunication(MeshDistributor &pdb);
 
     /// Tests if there are multiple DOFs in mesh with the same coords.
     static void testDoubleDofs(Mesh *mesh);
diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc
index 6690c6044e8d5fc972dc9502a6700722929cbbf9..0566169c2150937e91c19f6e212d35c6239fca34 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.cc
+++ b/AMDiS/src/parallel/ParallelDofMapping.cc
@@ -88,7 +88,7 @@ namespace AMDiS {
 
     StdMpi<vector<int> > stdMpi(levelData->getMpiComm(0));
 
-    for (DofComm::Iterator it(*sendDofs, level, feSpace); 
+    for (DofComm::Iterator it(dofComm->getSendDofs(), level, feSpace); 
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
 	if (dofMap[level].count(it.getDofIndex()) && !nonRankDofs[level].count(it.getDofIndex()))
@@ -99,7 +99,7 @@ namespace AMDiS {
 
     // === Check from which ranks this rank must receive some data. ===
 
-    for (DofComm::Iterator it(*recvDofs, level, feSpace); 
+    for (DofComm::Iterator it(dofComm->getRecvDofs(), level, feSpace); 
 	 !it.end(); it.nextRank()) {
       bool recvFromRank = false;
       for (; !it.endDofIter(); it.nextDof()) {
@@ -121,7 +121,7 @@ namespace AMDiS {
 
     // === And set the global indices for all DOFs that are not owned by rank. ===
     
-    for (DofComm::Iterator it(*recvDofs, level, feSpace);
+    for (DofComm::Iterator it(dofComm->getRecvDofs(), level, feSpace);
 	 !it.end(); it.nextRank()) {
       int i = 0;
       for (; !it.endDofIter(); it.nextDof())
@@ -183,17 +183,16 @@ namespace AMDiS {
   }
 
 
-  void ParallelDofMapping::setDofComm(DofComm &pSend, DofComm &pRecv)
+  void ParallelDofMapping::setDofComm(DofComm &dc)
   {
     FUNCNAME("ParallelDofMapping::setDofComm()");
 
-    sendDofs = &pSend;
-    recvDofs = &pRecv;
+    dofComm = &dc;
 
     // Add the DOF communicator also to all FE space DOF mappings.
     for (vector<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
 	 it != feSpacesUnique.end(); ++it)
-      data[*it].setDofComm(pSend, pRecv);
+      data[*it].setDofComm(dc);
   }
 
 
@@ -343,14 +342,13 @@ namespace AMDiS {
       if (!hasNonLocalDofs)
 	continue;
       
-      TEST_EXIT_DBG(sendDofs != NULL && recvDofs != NULL)
-	("No communicator given!\n");
+      TEST_EXIT_DBG(dofComm != NULL)("No communicator given!\n");
       
       // === Communicate the matrix indices for all DOFs that are on some ===
       // === interior boundaries.                                         ===
 
       StdMpi<vector<DegreeOfFreedom> > stdMpi(levelData->getMpiComm(0));
-      for (DofComm::Iterator it(*sendDofs, level, feSpaces[i]); 
+      for (DofComm::Iterator it(dofComm->getSendDofs(), level, feSpaces[i]); 
 	   !it.end(); it.nextRank()) {
 	vector<DegreeOfFreedom> sendGlobalDofs;
 	
@@ -364,14 +362,14 @@ namespace AMDiS {
 	stdMpi.send(it.getRank(), sendGlobalDofs);
       }
       
-      for (DofComm::Iterator it(*recvDofs, level, feSpaces[i]); 
+      for (DofComm::Iterator it(dofComm->getRecvDofs(), level, feSpaces[i]); 
 	   !it.end(); it.nextRank())
 	stdMpi.recv(it.getRank());
       
       stdMpi.startCommunication();
       
       {
-	for (DofComm::Iterator it(*recvDofs, level, feSpaces[i]); 
+	for (DofComm::Iterator it(dofComm->getRecvDofs(), level, feSpaces[i]); 
 	     !it.end(); it.nextRank()) {
 	  int counter = 0;
 	  for (; !it.endDofIter(); it.nextDof()) {
diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h
index c4e945113e54cbf54f265362ad11971d6c085836..00d2c9e762053087851d0eafee11b05cdbb01262 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.h
+++ b/AMDiS/src/parallel/ParallelDofMapping.h
@@ -105,8 +105,7 @@ namespace AMDiS {
     /// This is the only valid constructur to be used. 
     FeSpaceDofMap(MeshLevelData* ld)
       : levelData(ld),
-	sendDofs(NULL),
-	recvDofs(NULL),
+	dofComm(NULL),
 	feSpace(NULL),
 	dofMap(1),
 	needGlobalMapping(false),
@@ -216,10 +215,9 @@ namespace AMDiS {
     }
 
     /// Sets the DOF communicators.
-    void setDofComm(DofComm &pSend, DofComm &pRecv)
+    void setDofComm(DofComm &dc)
     {
-      sendDofs = &pSend;
-      recvDofs = &pRecv;
+      dofComm = &dc;
     }
 
   private:
@@ -233,11 +231,11 @@ namespace AMDiS {
   private:
     MeshLevelData *levelData;
 
-    /// DOF communicators for all DOFs on interior boundaries.
-    DofComm *sendDofs, *recvDofs;
+    /// DOF communicator for all DOFs on interior boundaries.
+    DofComm *dofComm;
 
     /// The FE space this mapping belongs to. This is used only the get the
-    /// correct DOF communicator in \ref sendDofs and \ref recvDofs.
+    /// correct DOF communicator in \ref dofComm.
     const FiniteElemSpace *feSpace;
 
     /// Mapping data from DOF indices to local and global indices.
@@ -270,8 +268,6 @@ namespace AMDiS {
   public:
     ParallelDofMapping() 
       : levelData(NULL),
-	sendDofs(NULL),
-	recvDofs(NULL),
 	hasNonLocalDofs(false),
 	needMatIndex(false),
 	needMatIndexFromGlobal(false),
@@ -311,7 +307,7 @@ namespace AMDiS {
 
     /// Set the DOF communicator objects that are required to exchange information
     /// about DOFs that are on interior boundaries.
-    void setDofComm(DofComm &pSend, DofComm &pRecv);
+    void setDofComm(DofComm &dofComm);
 
     void setComputeMatIndex(bool b, bool global = false)
     {
@@ -422,8 +418,8 @@ namespace AMDiS {
   private:
     MeshLevelData *levelData;
 
-    /// DOF communicators for all DOFs on interior boundaries.
-    DofComm *sendDofs, *recvDofs;
+    /// DOF communicator for all DOFs on interior boundaries.
+    DofComm *dofComm;
 
     /// Is true if there are DOFs in at least one subdomain that are not owned
     /// by the rank. If the value is false, each rank contains only DOFs that
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index aad8af990c9976db2200de464b5ce23e656a9bcc..b7163f260e11139302effd260e063664772767fa 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -287,10 +287,8 @@ namespace AMDiS {
     if (fetiPreconditioner == FETI_DIRICHLET)
       interiorDofMap.clear();
 
-    primalDofMap.setDofComm(meshDistributor->getSendDofs(),
-			    meshDistributor->getRecvDofs());
-    lagrangeMap.setDofComm(meshDistributor->getSendDofs(), 
-			   meshDistributor->getRecvDofs());
+    primalDofMap.setDofComm(meshDistributor->getDofComm());
+    lagrangeMap.setDofComm(meshDistributor->getDofComm());
 
     for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
       const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
@@ -400,7 +398,7 @@ namespace AMDiS {
     // === Create for each dual node that is owned by the rank, the set ===
     // === of ranks that contain this node (denoted by W(x_j)).         ===
 
-    for (DofComm::Iterator it(meshDistributor->getSendDofs(), meshLevel, feSpace); 
+    for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), meshLevel, feSpace); 
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof()) {
 	if (!isPrimal(feSpace, it.getDofIndex())) {
@@ -415,7 +413,7 @@ namespace AMDiS {
 
     StdMpi<vector<std::set<int> > > stdMpi(meshDistributor->getMpiComm());
 
-    for (DofComm::Iterator it(meshDistributor->getSendDofs(), meshLevel, feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), meshLevel, feSpace);
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
 	if (!isPrimal(feSpace, it.getDofIndex()))
@@ -423,7 +421,7 @@ namespace AMDiS {
 
     stdMpi.updateSendDataSize();
 
-    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), meshLevel, feSpace); 
+    for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), meshLevel, feSpace); 
 	 !it.end(); it.nextRank()) {
       bool recvFromRank = false;
       for (; !it.endDofIter(); it.nextDof()) {
@@ -439,7 +437,7 @@ namespace AMDiS {
 
     stdMpi.startCommunication();
 
-    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), meshLevel, feSpace); 
+    for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), meshLevel, feSpace); 
 	 !it.end(); it.nextRank()) {
       int i = 0;
       for (; !it.endDofIter(); it.nextDof())
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
index f1dafc82d61dc239522763eca8d25398eae08396..7dfd12283c0a43fff8ff930a73eadf967237ce3d 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
@@ -532,7 +532,7 @@ namespace AMDiS {
     // First, create for all ranks, to which we send data to, MatrixNnzEntry 
     // object with 0 entries.
     for (unsigned int i = 0; i < feSpaces.size(); i++) {
-      for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpaces[i]);
+      for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), feSpaces[i]);
 	   !it.end(); it.nextRank()) {
 	sendMatrixEntry[it.getRank()].resize(0);
 	
@@ -544,7 +544,7 @@ namespace AMDiS {
     // Create list of ranks from which we receive data from.
     std::set<int> recvFromRank;
     for (unsigned int i = 0; i < feSpaces.size(); i++) 
-      for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpaces[i]);
+      for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), feSpaces[i]);
 	   !it.end(); it.nextRank())
 	recvFromRank.insert(it.getRank());
 
diff --git a/AMDiS/src/parallel/PetscSolverSchur.cc b/AMDiS/src/parallel/PetscSolverSchur.cc
index 38813de583e526909a9eabb60fbfe753b1f3e493..cbf962b8c213283ec44e64e1a54506ad51d6e393 100644
--- a/AMDiS/src/parallel/PetscSolverSchur.cc
+++ b/AMDiS/src/parallel/PetscSolverSchur.cc
@@ -31,7 +31,7 @@ namespace AMDiS {
 
     boundaryDofs.clear();
     std::set<DegreeOfFreedom> boundaryLocalDofs;
-    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), feSpace);
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof()) {
 	boundaryLocalDofs.insert(it.getDofIndex());	  
@@ -93,7 +93,7 @@ namespace AMDiS {
 
 
     std::set<DegreeOfFreedom> otherBoundaryLocalDofs;
-    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), feSpace);
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
 	otherBoundaryLocalDofs.insert(it.getDofIndex());
@@ -129,7 +129,7 @@ namespace AMDiS {
 
 
     StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm);
-    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), feSpace);
 	 !it.end(); it.nextRank()) {
       stdMpi.getSendData(it.getRank()).resize(0);
       stdMpi.getSendData(it.getRank()).reserve(it.getDofs().size());
@@ -146,13 +146,13 @@ namespace AMDiS {
 
     stdMpi.updateSendDataSize();
 
-    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), feSpace);
 	 !it.end(); it.nextRank())
       stdMpi.recv(it.getRank());
 
     stdMpi.startCommunication();
 
-    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
+    for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), feSpace);
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof()) {
 	int globalRecvDof = (*dofMap)[feSpace][0][it.getDofIndex()].global;