diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index f9b9cedf36ceb7f0c18e19bc352038f0533a63b4..f027859386cf14d7fd3d53b7653b5e2d07804f24 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -96,8 +96,16 @@ namespace AMDiS {
     GET_PARAMETER(0, name + "->log main rank", "%d", &tmp);
     Msg::outputMainRank = (tmp > 0);
 
-    //    partitioner = new ParMetisPartitioner(&mpiComm);
-    partitioner = new ZoltanPartitioner(&mpiComm);
+    string partStr = "parmetis";
+    GET_PARAMETER(0, name + "->partitioner", &partStr);
+
+    if (partStr == "parmetis") 
+      partitioner = new ParMetisPartitioner(&mpiComm);
+
+    if (partStr == "zoltan")
+      partitioner = new ZoltanPartitioner(&mpiComm);
+
+    TEST_EXIT(partitioner)("Could not create partitioner \"%s\"!\n", partStr.c_str());
   }
 
 
@@ -256,11 +264,13 @@ namespace AMDiS {
     MSG("Debug mode tests finished!\n");
 #endif
 
-
     // === Create periodic DOF mapping, if there are periodic boundaries. ===
 
     createPeriodicMap();
 
+#if (DEBUG != 0)
+    ParallelDebug::testPeriodicBoundary(*this);
+#endif
 
     // === Global refinements. ===
     
@@ -276,6 +286,10 @@ namespace AMDiS {
       // === Update periodic mapping, if there are periodic boundaries. ===     
 
       createPeriodicMap();
+
+#if (DEBUG != 0)
+    ParallelDebug::testPeriodicBoundary(*this);
+#endif
     }
 
 
@@ -700,21 +714,29 @@ namespace AMDiS {
     debug::writeMesh(feSpace, -1, debugOutputDir + "mesh");
 #endif
 
+
     // === Because the mesh has been changed, update the DOF numbering and mappings. ===
 
     updateLocalGlobalNumbering();
 
+
     // === Update periodic mapping, if there are periodic boundaries. ===
+
     createPeriodicMap();
 
-    INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n", 
-		  MPI::Wtime() - first);
+#if (DEBUG != 0)
+    ParallelDebug::testPeriodicBoundary(*this);
+#endif
 
 
     // === The mesh has changed, so check if it is required to repartition the mesh. ===
 
     nMeshChangesAfterLastRepartitioning++;
 
+
+    INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n", 
+		  MPI::Wtime() - first);
+
     if (repartitioningAllowed && 
 	nMeshChangesAfterLastRepartitioning >= repartitionIthChange) {
       repartitionMesh();
@@ -1781,8 +1803,8 @@ namespace AMDiS {
     }
 
     stdMpi.updateSendDataSize();
-    stdMpi.startCommunication();
 
+    stdMpi.startCommunication();
 
     // === The rank has received the dofs from the rank on the other side of ===
     // === the boundary. Now it can use them to create the mapping between   ===
@@ -1856,9 +1878,9 @@ namespace AMDiS {
     stdMpi2.startCommunication();
 
     for (std::map<int, PeriodicDofMap>::iterator it = stdMpi2.getRecvData().begin();
-	 it != stdMpi2.getRecvData().end(); ++it)
+	 it != stdMpi2.getRecvData().end(); ++it) {
       for (PeriodicDofMap::iterator perIt = it->second.begin();
-	   perIt != it->second.end(); ++perIt)
+	   perIt != it->second.end(); ++perIt) {
 	for (DofMapping::iterator dofIt = perIt->second.begin();
 	     dofIt != perIt->second.end(); ++dofIt) {
 	  TEST_EXIT_DBG(periodicDof[perIt->first].count(dofIt->second) == 0 ||
@@ -1867,10 +1889,8 @@ namespace AMDiS {
 
 	  periodicDof[perIt->first][dofIt->second] = dofIt->first;
 	}
-
-#if (DEBUG != 0)
-    ParallelDebug::testPeriodicBoundary(*this);
-#endif
+      }
+    }
   }
 
 
diff --git a/AMDiS/src/parallel/ParMetisPartitioner.cc b/AMDiS/src/parallel/ParMetisPartitioner.cc
index 8676a8b106124d69cac83347c97aba0f697c1c3a..80a6ef4bda0a7a85102cd1d0c3c9d35023b92c19 100644
--- a/AMDiS/src/parallel/ParMetisPartitioner.cc
+++ b/AMDiS/src/parallel/ParMetisPartitioner.cc
@@ -28,7 +28,7 @@ namespace AMDiS {
 
   ParMetisMesh::ParMetisMesh(Mesh *mesh, MPI::Intracomm *comm, 
 			     std::map<int, bool>& elementInRank,
-			     std::map<DegreeOfFreedom, DegreeOfFreedom> *mapLocalGlobal)
+			     map<DegreeOfFreedom, DegreeOfFreedom> *mapLocalGlobal)
     : dim(mesh->getDim()),
       nElements(0),
       mpiComm(comm)
@@ -184,7 +184,7 @@ namespace AMDiS {
   {
     FUNCNAME("ParMetisGraph::print()");
 
-    std::stringstream oss;
+    stringstream oss;
     for (int i = 0; i <= MPI::COMM_WORLD.Get_size(); i++)
       oss << parMetisMesh->getElementDist()[i] << " ";
     
@@ -224,7 +224,7 @@ namespace AMDiS {
   }
 
 
-  bool ParMetisPartitioner::partition(std::map<int, double> &elemWeights,
+  bool ParMetisPartitioner::partition(map<int, double> &elemWeights,
 				      PartitionMode mode) 
   {
     FUNCNAME("ParMetisPartitioner::partition()");
@@ -246,8 +246,8 @@ namespace AMDiS {
 
     // === Create weight array ===
 
-    std::vector<int> wgts(nElements);
-    std::vector<float> floatWgts(nElements);
+    vector<int> wgts(nElements);
+    vector<float> floatWgts(nElements);
     unsigned int floatWgtsPos = 0;
     float maxWgt = 0.0;
 
@@ -287,11 +287,11 @@ namespace AMDiS {
     int ncon = 1; // one weight at each vertex!
     int nparts = mpiSize; // number of partitions
 
-    std::vector<float> tpwgts(mpiSize);
+    vector<float> tpwgts(mpiSize);
     float ubvec = 1.05;
     int options[4] = {0, 0, 15, 1}; // default options
     int edgecut = -1;
-    std::vector<int> part(nElements);
+    vector<int> part(nElements);
 
     // set tpwgts
     for (int i = 0; i < mpiSize; i++)
@@ -373,7 +373,7 @@ namespace AMDiS {
       break;
     case ADAPTIVE_REPART:
       {
-	std::vector<int> vsize(nElements);
+	vector<int> vsize(nElements);
 	for (int i = 0; i < nElements; i++)
 	  vsize[i] = static_cast<int>(floatWgts[i]);
 
@@ -425,7 +425,7 @@ namespace AMDiS {
   }
 
 
-  void ParMetisPartitioner::getPartitionMap(std::map<int, int> &partitionMap)
+  void ParMetisPartitioner::getPartitionMap(map<int, int> &partitionMap)
   {
     FUNCNAME("ParMetisPartitioner::getPartitionMap()");
 
@@ -437,7 +437,7 @@ namespace AMDiS {
 
     int mpiRank = mpiComm->Get_rank();
     int mpiSize = mpiComm->Get_size();
-    std::vector<int> nPartitionElements(mpiSize);
+    vector<int> nPartitionElements(mpiSize);
     int *elmdist = parMetisMesh->getElementDist();
 
     for (int i = 0; i < mpiSize; i++)
@@ -448,7 +448,7 @@ namespace AMDiS {
     int localElements = parMetisMesh->getNumElements();
     mpiComm->Allreduce(&localElements, &nElements, 1, MPI_INT, MPI_SUM);
 
-    std::vector<int> partitionElements(nElements);
+    vector<int> partitionElements(nElements);
 
     // distribute partition elements
     mpiComm->Allgatherv(parMetisMesh->getAMDiSIndices(),
@@ -459,7 +459,7 @@ namespace AMDiS {
 			elmdist, 
 			MPI_INT);
 
-    // fill partitionVec
+    // fill partitionMap
     for (int i = 0; i < mpiSize; i++)
       for (int j = 0; j < nPartitionElements[i]; j++)
 	partitionMap[partitionElements[elmdist[i] + j]] = i;
@@ -540,7 +540,7 @@ namespace AMDiS {
 		       MPI_INT);
     
     TEST_EXIT(elementInRank.size() != 0)("Should not happen!\n");
-    for (std::map<int, bool>::iterator it = elementInRank.begin();
+    for (map<int, bool>::iterator it = elementInRank.begin();
 	 it != elementInRank.end(); ++it)
       elementInRank[it->first] = false;
 
diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc
index c93e00749b26d2b804c9fb0c4169276847e237a3..7219cf1e475371ecce86d63ff0a8e14e39df2ea5 100644
--- a/AMDiS/src/parallel/ParallelDebug.cc
+++ b/AMDiS/src/parallel/ParallelDebug.cc
@@ -146,7 +146,7 @@ namespace AMDiS {
 
     // === 1. check: All periodic DOFs should have at least a correct number ===
     // === of periodic associations.                                         ===
-
+   
     for (map<int, std::set<BoundaryType> >::iterator it = pdb.periodicDofAssociations.begin();
 	 it != pdb.periodicDofAssociations.end(); ++it) {
       WorldVector<double> c;