diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index 5f85e17741a0baa8d84e7781c08892d77d6f0aa6..3471f3c3872d6b8d390ee22e3918da9766f0e174 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -91,9 +91,9 @@ namespace AMDiS {
   {
     FUNCNAME("MeshDistributor::ParalleDomainBase()");
 
-    mpiRank = MPI::COMM_WORLD.Get_rank();
-    mpiSize = MPI::COMM_WORLD.Get_size();
     mpiComm = MPI::COMM_WORLD;
+    mpiRank = mpiComm.Get_rank();
+    mpiSize = mpiComm.Get_size();
 
     Parameters::get(name + "->repartitioning", repartitioningAllowed);
     Parameters::get(name + "->debug output dir", debugOutputDir);
diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc
index 2e24631771b40ebddb4db8d6e85810049c3c0ac9..ed6bf80d240d4564041d906d3a221b65356f0882 100644
--- a/AMDiS/src/parallel/ParallelDebug.cc
+++ b/AMDiS/src/parallel/ParallelDebug.cc
@@ -874,9 +874,9 @@ namespace AMDiS {
       oss << "-" << counter;
     oss << ".vtu";
 
-    DOFVector<double> tmpa(feSpace, "tmp");
-    tmpa.set(MPI::COMM_WORLD.Get_rank());
-    VtkWriter::writeFile(&tmpa, oss.str());
+    DOFVector<double> tmp(feSpace, "tmp");
+    tmp.set(MPI::COMM_WORLD.Get_rank());
+    VtkWriter::writeFile(&tmp, oss.str());
   }
 
   
diff --git a/AMDiS/src/parallel/PetscSolver.cc b/AMDiS/src/parallel/PetscSolver.cc
index da03fe93fa366cfc367e7496879983b96aaee8df..c05d59637ab851e70b9110d119d2e1f0ce1d9c41 100644
--- a/AMDiS/src/parallel/PetscSolver.cc
+++ b/AMDiS/src/parallel/PetscSolver.cc
@@ -62,13 +62,13 @@ namespace AMDiS {
     FUNCNAME("PetscSolver::copyVec()");
 
     IS originIs, destIs;
-    ISCreateGeneral(PETSC_COMM_WORLD, 
+    ISCreateGeneral(*mpiComm, 
 		    originIndex.size(), 
 		    &(originIndex[0]),
 		    PETSC_USE_POINTER,
 		    &originIs);
 
-    ISCreateGeneral(PETSC_COMM_WORLD, 
+    ISCreateGeneral(*mpiComm, 
 		    destIndex.size(), 
 		    &(destIndex[0]),
 		    PETSC_USE_POINTER,
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 8e6b95b4b58cbe3b04f2c8de9cb099d2569beb72..1178e701991d05f4dce6ed99aad013f21457fe54 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -422,7 +422,7 @@ namespace AMDiS {
 
     // === Create distributed matrix for Lagrange constraints. ===
 
-    MatCreateMPIAIJ(PETSC_COMM_WORLD,
+    MatCreateMPIAIJ(*mpiComm,
 		    lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
 		    lagrangeMap.getOverallDofs(), localDofMap.getOverallDofs(),	
 		    2, PETSC_NULL, 2, PETSC_NULL,
@@ -483,14 +483,14 @@ namespace AMDiS {
       schurPrimalData.mat_b_primal = &mat_b_primal;
       schurPrimalData.fetiSolver = this;
       
-      VecCreateMPI(PETSC_COMM_WORLD, 
+      VecCreateMPI(*mpiComm, 
 		   localDofMap.getRankDofs(), localDofMap.getOverallDofs(),
 		   &(schurPrimalData.tmp_vec_b));
-      VecCreateMPI(PETSC_COMM_WORLD, 
+      VecCreateMPI(*mpiComm, 
 		   primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
 		   &(schurPrimalData.tmp_vec_primal));
 
-      MatCreateShell(PETSC_COMM_WORLD,
+      MatCreateShell(*mpiComm,
 		     primalDofMap.getRankDofs(), primalDofMap.getRankDofs(), 
 		     primalDofMap.getOverallDofs(), primalDofMap.getOverallDofs(),
 		     &schurPrimalData, 
@@ -498,7 +498,7 @@ namespace AMDiS {
       MatShellSetOperation(mat_schur_primal, MATOP_MULT, 
 			   (void(*)(void))petscMultMatSchurPrimal);
       
-      KSPCreate(PETSC_COMM_WORLD, &ksp_schur_primal);
+      KSPCreate(*mpiComm, &ksp_schur_primal);
       KSPSetOperators(ksp_schur_primal, mat_schur_primal, mat_schur_primal, SAME_NONZERO_PATTERN);
       KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_");
       KSPSetType(ksp_schur_primal, KSPGMRES);
@@ -516,7 +516,7 @@ namespace AMDiS {
       int nRowsOverallB = localDofMap.getOverallDofs();
 
       Mat matBPi;
-      MatCreateMPIAIJ(PETSC_COMM_WORLD,
+      MatCreateMPIAIJ(*mpiComm,
 		      nRowsRankB, nRowsRankPrimal, 
 		      nRowsOverallB, nRowsOverallPrimal,
 		      30, PETSC_NULL, 30, PETSC_NULL, &matBPi);
@@ -539,9 +539,6 @@ namespace AMDiS {
 	MatRestoreRow(mat_b_primal, row, &nCols, &cols, &values);
       }
 
-      int maxLocalPrimal = mat_b_primal_cols.size();
-      mpi::globalMax(maxLocalPrimal);
-
       TEST_EXIT(static_cast<int>(mat_b_primal_cols.size()) == 
 		primalDofMap.getLocalDofs())
 	("Should not happen!\n");
@@ -585,7 +582,7 @@ namespace AMDiS {
       MatGetInfo(mat_primal_primal, MAT_GLOBAL_SUM, &minfo);
       MSG("Schur primal matrix nnz = %f\n", minfo.nz_used);
 
-      KSPCreate(PETSC_COMM_WORLD, &ksp_schur_primal);
+      KSPCreate(*mpiComm, &ksp_schur_primal);
       KSPSetOperators(ksp_schur_primal, mat_primal_primal, 
 		      mat_primal_primal, SAME_NONZERO_PATTERN);
       KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_");
@@ -635,24 +632,24 @@ namespace AMDiS {
     fetiData.fetiSolver = this;
     fetiData.ksp_schur_primal = &ksp_schur_primal;
 
-    VecCreateMPI(PETSC_COMM_WORLD, 
+    VecCreateMPI(*mpiComm, 
 		 localDofMap.getRankDofs(), localDofMap.getOverallDofs(),
 		 &(fetiData.tmp_vec_b));
-    VecCreateMPI(PETSC_COMM_WORLD,
+    VecCreateMPI(*mpiComm,
 		 lagrangeMap.getRankDofs(), lagrangeMap.getOverallDofs(),
 		 &(fetiData.tmp_vec_lagrange));
-    VecCreateMPI(PETSC_COMM_WORLD, 
+    VecCreateMPI(*mpiComm, 
 		 primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
 		 &(fetiData.tmp_vec_primal));
 
-    MatCreateShell(PETSC_COMM_WORLD,
+    MatCreateShell(*mpiComm,
 		   lagrangeMap.getRankDofs(), lagrangeMap.getRankDofs(),
 		   lagrangeMap.getOverallDofs(), lagrangeMap.getOverallDofs(),
 		   &fetiData, &mat_feti);
     MatShellSetOperation(mat_feti, MATOP_MULT, (void(*)(void))petscMultMatFeti);
 
 
-    KSPCreate(PETSC_COMM_WORLD, &ksp_feti);
+    KSPCreate(*mpiComm, &ksp_feti);
     KSPSetOperators(ksp_feti, mat_feti, mat_feti, SAME_NONZERO_PATTERN);
     KSPSetOptionsPrefix(ksp_feti, "feti_");
     KSPSetType(ksp_feti, KSPGMRES);
@@ -687,7 +684,7 @@ namespace AMDiS {
       fetiDirichletPreconData.mat_duals_interior = &mat_duals_interior;
       fetiDirichletPreconData.ksp_interior = &ksp_interior;
       
-      VecCreateMPI(PETSC_COMM_WORLD, 
+      VecCreateMPI(*mpiComm, 
 		   localDofMap.getRankDofs(),localDofMap.getOverallDofs(),
 		   &(fetiDirichletPreconData.tmp_vec_b));      
       MatGetVecs(mat_duals_duals, PETSC_NULL, 
@@ -732,7 +729,7 @@ namespace AMDiS {
 	}
       }
 
-      VecCreateMPI(PETSC_COMM_WORLD, 
+      VecCreateMPI(*mpiComm, 
 		   localDofMap.getRankDofs(),
 		   localDofMap.getOverallDofs(),
 		   &(fetiLumpedPreconData.tmp_vec_b));
@@ -927,17 +924,17 @@ namespace AMDiS {
     MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 60, PETSC_NULL,
 		    &mat_b_b);
     
-    MatCreateMPIAIJ(PETSC_COMM_WORLD,
+    MatCreateMPIAIJ(*mpiComm,
 		    nRowsRankPrimal, nRowsRankPrimal, 
 		    nRowsOverallPrimal, nRowsOverallPrimal,
 		    60, PETSC_NULL, 60, PETSC_NULL, &mat_primal_primal);
     
-    MatCreateMPIAIJ(PETSC_COMM_WORLD,
+    MatCreateMPIAIJ(*mpiComm,
 		    nRowsRankB, nRowsRankPrimal, 
 		    nRowsOverallB, nRowsOverallPrimal,
 		    60, PETSC_NULL, 60, PETSC_NULL, &mat_b_primal);
     
-    MatCreateMPIAIJ(PETSC_COMM_WORLD,
+    MatCreateMPIAIJ(*mpiComm,
 		    nRowsRankPrimal, nRowsRankB,
 		    nRowsOverallPrimal, nRowsOverallB,
 		    30, PETSC_NULL, 30, PETSC_NULL, &mat_primal_b);
@@ -1222,9 +1219,9 @@ namespace AMDiS {
 
     vector<const FiniteElemSpace*> feSpaces = getFeSpaces(vec);
 
-    VecCreateMPI(PETSC_COMM_WORLD, 
+    VecCreateMPI(*mpiComm, 
 		 localDofMap.getRankDofs(), localDofMap.getOverallDofs(), &f_b);
-    VecCreateMPI(PETSC_COMM_WORLD, 
+    VecCreateMPI(*mpiComm, 
 		 primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
 		 &f_primal);
     
diff --git a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
index c124628f3b2e0a4d5f5517e9c48b51dbe33e1d11..a2faa6eee9b3b0f37d4b53f2365ee0e21bdef49a 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
@@ -53,7 +53,7 @@ namespace AMDiS {
 
     for (int i = 0; i < nBlocks; i++)
       for (int j = 0; j < nBlocks; j++)
-	MatCreateMPIAIJ(PETSC_COMM_WORLD,
+	MatCreateMPIAIJ(*mpiComm,
 			nRankRows * blockSize[i], nRankRows * blockSize[j],
 			nOverallRows * blockSize[i], nOverallRows * blockSize[j],
 			30 * blockSize[i], PETSC_NULL, 
@@ -79,7 +79,7 @@ namespace AMDiS {
     }	  
 	
 
-    MatCreateNest(PETSC_COMM_WORLD, 
+    MatCreateNest(*mpiComm, 
 		  nBlocks, PETSC_NULL, nBlocks, PETSC_NULL, 
 		  &(nestMat[0]), &petscMatrix);
 
@@ -91,7 +91,7 @@ namespace AMDiS {
     MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
 
     // === Init PETSc solver. ===
-    KSPCreate(PETSC_COMM_WORLD, &solver);
+    KSPCreate(*mpiComm, &solver);
     KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); 
     KSPSetFromOptions(solver);
 
@@ -113,7 +113,7 @@ namespace AMDiS {
     nestVec.resize(nComponents);
 
     for (int i = 0; i < nComponents; i++) {
-      VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &(nestVec[i]));
+      VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &(nestVec[i]));
 
       setDofVector(nestVec[i], vec->getDOFVector(i));
       
@@ -121,7 +121,7 @@ namespace AMDiS {
       VecAssemblyEnd(nestVec[i]);
     }
 
-    VecCreateNest(PETSC_COMM_WORLD, nComponents, PETSC_NULL, 
+    VecCreateNest(*mpiComm, nComponents, PETSC_NULL, 
 		  &(nestVec[0]), &petscRhsVec);
 
     VecAssemblyBegin(petscRhsVec);
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
index 3e30747d17c7579d447c6e56175278d1a02a9ad8..44c7f122903e05c9923a53b516905233421a9f30 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
@@ -31,8 +31,8 @@ namespace AMDiS {
 
     // === Create PETSc vector (solution and a temporary vector). ===
 
-    VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscSolVec);
-    VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscTmpVec);
+    VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscSolVec);
+    VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscTmpVec);
 
     int testddd = 1;
     Parameters::get("block size", testddd);
@@ -70,7 +70,7 @@ namespace AMDiS {
 
     // === Create PETSc matrix with the computed nnz data structure. ===
 
-    MatCreateMPIAIJ(PETSC_COMM_WORLD, nRankRows, nRankRows, 
+    MatCreateMPIAIJ(*mpiComm, nRankRows, nRankRows, 
  		    nOverallRows, nOverallRows,
 		    0, d_nnz, 0, o_nnz, &petscMatrix);
 
@@ -109,7 +109,7 @@ namespace AMDiS {
     MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
 
     // === Init PETSc solver. ===
-    KSPCreate(PETSC_COMM_WORLD, &solver);
+    KSPCreate(*mpiComm, &solver);
     KSPGetPC(solver, &pc);
     KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); 
     KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);
@@ -137,7 +137,7 @@ namespace AMDiS {
     int nRankRows = meshDistributor->getNumberRankDofs(feSpaces);
     int nOverallRows = meshDistributor->getNumberOverallDofs(feSpaces);
 
-    VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscRhsVec);
+    VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscRhsVec);
 
     int testddd = 1;
     Parameters::get("block size", testddd);
@@ -155,7 +155,7 @@ namespace AMDiS {
     if (removeRhsNullSpace) {
       MSG("Remove constant null space from the RHS!\n");
       MatNullSpace sp;
-      MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, PETSC_NULL, &sp);
+      MatNullSpaceCreate(*mpiComm, PETSC_TRUE, 0, PETSC_NULL, &sp);
       MatNullSpaceRemove(sp, petscRhsVec, PETSC_NULL);
       MatNullSpaceDestroy(&sp);
     }
diff --git a/AMDiS/src/parallel/PetscSolverSchur.cc b/AMDiS/src/parallel/PetscSolverSchur.cc
index 77bc310852a57ccac90aef4a690f663bf51092e9..c8f972942122b094363f38a248c5f4c5fd60d265 100644
--- a/AMDiS/src/parallel/PetscSolverSchur.cc
+++ b/AMDiS/src/parallel/PetscSolverSchur.cc
@@ -163,12 +163,12 @@ namespace AMDiS {
 
     // === Create PETSc IS structurs for interior and boundary DOFs. ===
 
-    ISCreateStride(PETSC_COMM_WORLD, 
+    ISCreateStride(*mpiComm, 
 		   nInteriorDofs * nComponents,
 		   (rStartInteriorDofs + rStartBoundaryDofs) * nComponents, 
 		   1, &interiorIs);
 
-    ISCreateStride(PETSC_COMM_WORLD,
+    ISCreateStride(*mpiComm,
 		   nBoundaryDofs * nComponents,
 		   (rStartInteriorDofs + rStartBoundaryDofs + nInteriorDofs) * nComponents, 
 		   1, &boundaryIs);
@@ -189,22 +189,22 @@ namespace AMDiS {
     int nOverallBoundaryRows = nOverallBoundaryDofs * nComponents;
 
 
-    MatCreateMPIAIJ(PETSC_COMM_WORLD, 
+    MatCreateMPIAIJ(*mpiComm, 
 		    nInteriorRows, nInteriorRows, 
 		    nOverallInteriorRows, nOverallInteriorRows,
 		    100, PETSC_NULL, 100, PETSC_NULL, &matA11);
 
-    MatCreateMPIAIJ(PETSC_COMM_WORLD, 
+    MatCreateMPIAIJ(*mpiComm, 
 		    nBoundaryRows, nBoundaryRows, 
 		    nOverallBoundaryRows, nOverallBoundaryRows,
 		    100, PETSC_NULL, 100, PETSC_NULL, &matA22);
 
-    MatCreateMPIAIJ(PETSC_COMM_WORLD, 
+    MatCreateMPIAIJ(*mpiComm, 
 		    nInteriorRows, nBoundaryRows, 
 		    nOverallInteriorRows, nOverallBoundaryRows,
 		    100, PETSC_NULL, 100, PETSC_NULL, &matA12);    
 
-    MatCreateMPIAIJ(PETSC_COMM_WORLD, 
+    MatCreateMPIAIJ(*mpiComm, 
 		    nBoundaryRows, nInteriorRows, 
 		    nOverallBoundaryRows, nOverallInteriorRows,
 		    100, PETSC_NULL, 100, PETSC_NULL, &matA21);
@@ -237,7 +237,7 @@ namespace AMDiS {
     tmpIS[0] = interiorIs;
     tmpIS[1] = boundaryIs;
 
-    MatCreateNest(PETSC_COMM_WORLD, 2, &tmpIS[0], 2, &tmpIS[0], &tmpMat[0][0], &petscMatrix);
+    MatCreateNest(*mpiComm, 2, &tmpIS[0], 2, &tmpIS[0], &tmpMat[0][0], &petscMatrix);
     MatNestSetVecType(petscMatrix, VECNEST);
     MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
     MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
@@ -246,8 +246,8 @@ namespace AMDiS {
     int nRankRows = meshDistributor->getNumberRankDofs(feSpace) * nComponents;
     int nOverallRows = meshDistributor->getNumberOverallDofs(feSpace) * nComponents;
 
-    VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscSolVec);
-    VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscTmpVec);
+    VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscSolVec);
+    VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscTmpVec);
   }
 
 
@@ -260,7 +260,7 @@ namespace AMDiS {
     int nRankRows = meshDistributor->getNumberRankDofs(feSpace) * nComponents;
     int nOverallRows = meshDistributor->getNumberOverallDofs(feSpace) * nComponents;
 
-    VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscRhsVec);
+    VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscRhsVec);
 
     for (int i = 0; i < nComponents; i++)
       setDofVector(petscRhsVec, vec->getDOFVector(i), nComponents, i);
@@ -278,7 +278,7 @@ namespace AMDiS {
     const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
     int nComponents = vec.getSize();
 
-    KSPCreate(PETSC_COMM_WORLD, &solver);
+    KSPCreate(*mpiComm, &solver);
 
     KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); 
     KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);