diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index e2e965b05c5e080eccab10e7f19aed93aca2eda1..b3c945cd28d141a75cdc5ac92ed1c07a1656c562 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -59,7 +59,7 @@ dune:git clang:
   - *before
   script: duneci-standard-test
 
-dune:git dune-parmg dune-vtk dune-curvedgeometry dune-curvedgrid gcc:
+dune:git dune-parmg dune-vtk dune-curvedgeometry dune-curvedgrid dune-foamgrid gcc:
   image: registry.dune-project.org/docker/ci/dune:git-debian-10-gcc-8-17
   before_script:
   - *patch-dune-common
@@ -68,9 +68,10 @@ dune:git dune-parmg dune-vtk dune-curvedgeometry dune-curvedgrid gcc:
   - duneci-install-module https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.mn.tu-dresden.de/spraetor/dune-vtk.git
   - duneci-install-module https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.mn.tu-dresden.de/spraetor/dune-curvedgeometry.git
   - duneci-install-module https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.mn.tu-dresden.de/iwr/dune-curvedgrid.git
+  - duneci-install-module https://gitlab.dune-project.org/extensions/dune-foamgrid.git
   script: duneci-standard-test
 
-dune:git dune-parmg dune-vtk dune-curvedgeometry dune-curvedgrid clang:
+dune:git dune-parmg dune-vtk dune-curvedgeometry dune-curvedgrid dune-foamgrid clang:
   image: registry.dune-project.org/docker/ci/dune:git-ubuntu-20.04-clang-10-20
   before_script:
   - *patch-dune-common
@@ -79,6 +80,7 @@ dune:git dune-parmg dune-vtk dune-curvedgeometry dune-curvedgrid clang:
   - duneci-install-module https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.mn.tu-dresden.de/spraetor/dune-vtk.git
   - duneci-install-module https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.mn.tu-dresden.de/spraetor/dune-curvedgeometry.git
   - duneci-install-module https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.mn.tu-dresden.de/iwr/dune-curvedgrid.git
+  - duneci-install-module https://gitlab.dune-project.org/extensions/dune-foamgrid.git
   script: duneci-standard-test
 
 # Check for spelling mistakes in text
diff --git a/dune/gfe/riemannianpnsolver.cc b/dune/gfe/riemannianpnsolver.cc
index 480c1ae9ee67c000183b4f15934ad96c97d1e408..1085ad501533d53e70d075e3d10a7da3c0fee2e7 100644
--- a/dune/gfe/riemannianpnsolver.cc
+++ b/dune/gfe/riemannianpnsolver.cc
@@ -21,7 +21,8 @@
 #include <dune/solvers/norms/twonorm.hh>
 #include <dune/solvers/norms/h1seminorm.hh>
 
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
 #include <dune/gfe/parallel/matrixcommunicator.hh>
 #include <dune/gfe/parallel/vectorcommunicator.hh>
 #endif
@@ -46,15 +47,13 @@ setup(const GridType& grid,
     instrumented_             = instrumented;
     ignoreNodes_              = &dirichletNodes;
 
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
     //////////////////////////////////////////////////////////////////
     //  Create global numbering for matrix and vector transfer
     //////////////////////////////////////////////////////////////////
 
-#if HAVE_MPI
     globalMapper_ = std::make_unique<GlobalMapper>(grid_->leafGridView());
-#endif
-    
-#if HAVE_MPI
     // Transfer all Dirichlet data to the master processor
     VectorCommunicator<GlobalMapper, typename GridType::LeafGridView::CollectiveCommunication, Dune::BitSetVector<blocksize> > vectorComm(*globalMapper_,
                                                                                                                                      grid_->leafGridView().comm(),
@@ -77,7 +76,8 @@ setup(const GridType& grid,
 
     operatorAssembler.assembleBulk(Dune::Fufem::istlMatrixBackend(localA), laplaceStiffness);
 
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
     LocalMapper localMapper = MapperFactory<Basis>::createLocalMapper(grid_->leafGridView());
 
     MatrixCommunicator<GlobalMapper,
@@ -115,7 +115,8 @@ setup(const GridType& grid,
 
     operatorAssembler.assembleBulk(Dune::Fufem::istlMatrixBackend(localMassMatrix), massStiffness);
 
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
     auto massMatrix = std::make_shared<ScalarMatrixType>(matrixComm.reduceAdd(localMassMatrix));
 #else
     auto massMatrix = std::make_shared<ScalarMatrixType>(localMassMatrix);
@@ -170,7 +171,8 @@ void RiemannianProximalNewtonSolver<Basis,TargetSpace,Assembler>::solve()
     bool recomputeGradientHessian = true;
     CorrectionType rhs, rhs_global;
     MatrixType stiffnessMatrix;
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
     VectorCommunicator<GlobalMapper, typename GridType::LeafGridView::CollectiveCommunication, CorrectionType> vectorComm(*globalMapper_,
                                                                                                                      grid_->leafGridView().comm(),
                                                                                                                      0);
@@ -216,7 +218,8 @@ void RiemannianProximalNewtonSolver<Basis,TargetSpace,Assembler>::solve()
             rhs *= -1;        // The right hand side is the _negative_ gradient
 
             // Transfer vector data
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
             rhs_global = vectorComm.reduceAdd(rhs);
 #else
             rhs_global = rhs;
@@ -234,8 +237,8 @@ void RiemannianProximalNewtonSolver<Basis,TargetSpace,Assembler>::solve()
               std::cout << "Overall assembly took " << gradientTimer.elapsed() << " sec." << std::endl;
             totalAssemblyTime += gradientTimer.elapsed();
 
-            // Transfer matrix data
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
             stiffnessMatrix = matrixComm.reduceAdd(*hessianMatrix_);
 #else
             stiffnessMatrix = *hessianMatrix_;
@@ -280,7 +283,8 @@ void RiemannianProximalNewtonSolver<Basis,TargetSpace,Assembler>::solve()
         if (grid_->comm().size()>1 and rank==0)
             std::cout << "Transfer solution back to root process ..." << std::endl;
 
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
         solved = grid_->comm().min(solved);
         if (solved) {
             corr = vectorComm.scatter(corr_global);
diff --git a/dune/gfe/riemanniantrsolver.cc b/dune/gfe/riemanniantrsolver.cc
index 528f8e2eb10faff4a77764b65033ca62c7302cee..d5bdce74ab503275b860496b21f22fe1c7a8746f 100644
--- a/dune/gfe/riemanniantrsolver.cc
+++ b/dune/gfe/riemanniantrsolver.cc
@@ -22,7 +22,8 @@
 #include <dune/solvers/norms/twonorm.hh>
 #include <dune/solvers/norms/h1seminorm.hh>
 
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3))
 #include <dune/gfe/parallel/matrixcommunicator.hh>
 #include <dune/gfe/parallel/vectorcommunicator.hh>
 #endif
@@ -60,11 +61,12 @@ setup(const GridType& grid,
 
     int numLevels = grid_->maxLevel()+1;
 
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
     //////////////////////////////////////////////////////////////////
     //  Create global numbering for matrix and vector transfer
     //////////////////////////////////////////////////////////////////
 
-#if HAVE_MPI
     globalMapper_ = std::make_unique<GlobalMapper>(grid_->leafGridView());
 #endif
 
@@ -89,7 +91,8 @@ setup(const GridType& grid,
                                                                             baseNorm,
                                                                             Solver::QUIET);
 #endif
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
     // Transfer all Dirichlet data to the master processor
     VectorCommunicator<GlobalMapper, typename GridType::LeafGridView::CollectiveCommunication, Dune::BitSetVector<blocksize> > vectorComm(*globalMapper_,
                                                                                                                                      grid_->leafGridView().comm(),
@@ -124,7 +127,8 @@ setup(const GridType& grid,
 
     operatorAssembler.assembleBulk(Dune::Fufem::istlMatrixBackend(localA), laplaceStiffness);
 
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
     LocalMapper localMapper = MapperFactory<Basis>::createLocalMapper(grid_->leafGridView());
 
     MatrixCommunicator<GlobalMapper,
@@ -156,7 +160,8 @@ setup(const GridType& grid,
 
     operatorAssembler.assembleBulk(Dune::Fufem::istlMatrixBackend(localMassMatrix), massStiffness);
 
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
     auto massMatrix = std::make_shared<ScalarMatrixType>(matrixComm.reduceAdd(localMassMatrix));
 #else
     auto massMatrix = std::make_shared<ScalarMatrixType>(localMassMatrix);
@@ -205,7 +210,9 @@ setup(const GridType& grid,
 
         TransferOperatorType pkToP1TransferMatrix;
         assembleGlobalBasisTransferMatrix(pkToP1TransferMatrix,p1Basis,basis);
-#if HAVE_MPI
+
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
         // If we are on more than 1 processors, join all local transfer matrices on rank 0,
         // and construct a single global transfer operator there.
         typedef Dune::GlobalP1Mapper<Dune::Functions::LagrangeBasis<typename Basis::GridView,1>> GlobalLeafP1Mapper;
@@ -237,7 +244,8 @@ setup(const GridType& grid,
         // Construct the local multigrid transfer matrix
         auto newTransferOp = std::make_unique<TruncatedCompressedMGTransfer<CorrectionType>>();
         newTransferOp->setup(*grid_,i,i+1);
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
         // If we are on more than 1 processors, join all local transfer matrices on rank 0,
         // and construct a single global transfer operator there.
         typedef Dune::Functions::LagrangeBasis<typename GridType::LevelGridView, 1> FEBasis;
@@ -250,7 +258,8 @@ setup(const GridType& grid,
         LevelLocalMapper coarseLevelLocalMapper(grid_->levelGridView(i), Dune::mcmgVertexLayout());
 #endif
         typedef typename TruncatedCompressedMGTransfer<CorrectionType>::TransferOperatorType TransferOperatorType;
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
         MatrixCommunicator<GlobalLevelP1Mapper,
                            typename GridType::LevelGridView,
                            typename GridType::LevelGridView,
@@ -274,7 +283,8 @@ setup(const GridType& grid,
 
     if (rank==0)
     {
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
         hasObstacle_.resize(globalMapper_->size(), true);
 #else
         hasObstacle_.resize(basis.size(), true);
@@ -298,7 +308,8 @@ void RiemannianTrustRegionSolver<Basis,TargetSpace,Assembler>::solve()
         mgStep = dynamic_cast<MonotoneMGStep<MatrixType,CorrectionType>*>(&loopSolver->getIterationStep());
     }
 
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
     MaxNormTrustRegion<blocksize> trustRegion(globalMapper_->size(), initialTrustRegionRadius_);
 #else
     const Basis& basis = assembler_->getBasis();
@@ -337,7 +348,8 @@ void RiemannianTrustRegionSolver<Basis,TargetSpace,Assembler>::solve()
     CorrectionType rhs;
     MatrixType stiffnessMatrix;
     CorrectionType rhs_global;
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
     VectorCommunicator<GlobalMapper, typename GridType::LeafGridView::CollectiveCommunication, CorrectionType> vectorComm(*globalMapper_,
                                                                                                                      grid_->leafGridView().comm(),
                                                                                                                      0);
@@ -388,7 +400,8 @@ void RiemannianTrustRegionSolver<Basis,TargetSpace,Assembler>::solve()
             rhs *= -1;        // The right hand side is the _negative_ gradient
 
             // Transfer vector data
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
             rhs_global = vectorComm.reduceAdd(rhs);
 #else
             rhs_global = rhs;
@@ -407,8 +420,8 @@ void RiemannianTrustRegionSolver<Basis,TargetSpace,Assembler>::solve()
               std::cout << "Overall assembly took " << gradientTimer.elapsed() << " sec." << std::endl;
             totalAssemblyTime += gradientTimer.elapsed();
 
-            // Transfer matrix data
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
             stiffnessMatrix = matrixComm.reduceAdd(*hessianMatrix_);
 #else
             stiffnessMatrix = *hessianMatrix_;
@@ -456,7 +469,8 @@ void RiemannianTrustRegionSolver<Basis,TargetSpace,Assembler>::solve()
         if (grid_->comm().size()>1 and rank==0)
             std::cout << "Transfer solution back to root process ..." << std::endl;
 
-#if HAVE_MPI
+// The VectorCommunicator and MatrixCommunicator work only for GRID_DIM == WORLD_DIM == 2 or GRID_DIM == WORLD_DIM == 3
+#if HAVE_MPI && (!defined(GRID_DIM) or (defined(GRID_DIM) && GRID_DIM < 3)) && (!defined(WORLD_DIM) or (defined(WORLD_DIM) && WORLD_DIM < 3))
         solved = grid_->comm().min(solved);
         if (solved) {
             corr = vectorComm.scatter(corr_global);
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 4962aa5d98b6055ca4856b75545ed8a336fda077..b0b4eeca145e3857724a98f03e3bb142d18598a2 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,5 +1,4 @@
 set(programs compute-disc-error
-             cosserat-continuum
              gradient-flow
              harmonicmaps
              simofoxshell)
@@ -13,8 +12,26 @@ endif()
 if (dune-parmg_FOUND AND dune-curvedgeometry_FOUND AND ${DUNE_ELASTICITY_VERSION} VERSION_GREATER_EQUAL 2.8)
 	set(programs film-on-substrate ${programs})
 endif()
+
 foreach(_program ${programs})
   add_executable(${_program} ${_program}.cc)
+endforeach()
+
+add_executable("cosserat-continuum-2d-in-2d" cosserat-continuum.cc)
+set_property(TARGET "cosserat-continuum-2d-in-2d" APPEND PROPERTY COMPILE_DEFINITIONS "GRID_DIM=2; WORLD_DIM=2")
+set(programs cosserat-continuum-2d-in-2d ${programs})
+
+if (dune-foamgrid_FOUND)
+  add_executable("cosserat-continuum-2d-in-3d" cosserat-continuum.cc)
+  set_property(TARGET "cosserat-continuum-2d-in-3d" APPEND PROPERTY COMPILE_DEFINITIONS "GRID_DIM=2; WORLD_DIM=3")
+  set(programs cosserat-continuum-2d-in-3d ${programs})
+endif()
+
+add_executable("cosserat-continuum-3d-in-3d" cosserat-continuum.cc)
+set_property(TARGET "cosserat-continuum-3d-in-3d" APPEND PROPERTY COMPILE_DEFINITIONS "GRID_DIM=3; WORLD_DIM=3")
+set(programs cosserat-continuum-3d-in-3d ${programs})
+
+foreach(_program ${programs})
   target_link_dune_default_libraries(${_program})
   add_dune_pythonlibs_flags(${_program})
   if (${DUNE_COMMON_VERSION} VERSION_GREATER_EQUAL 2.8)
@@ -23,4 +40,4 @@ foreach(_program ${programs})
     target_link_libraries(${_program} tinyxml2)
   endif()
 #  target_compile_options(${_program} PRIVATE "-fpermissive")
-endforeach()
+endforeach()
\ No newline at end of file
diff --git a/src/cosserat-continuum.cc b/src/cosserat-continuum.cc
index c80e6b58de55032d7c75c4ed704eb344d541a283..607b127670e63cf27082873d30ed0476f6f8f4ce 100644
--- a/src/cosserat-continuum.cc
+++ b/src/cosserat-continuum.cc
@@ -66,8 +66,8 @@
 
 
 // grid dimension
-const int dim = 2;
-const int dimworld = 2;
+const int dim = GRID_DIM;
+const int dimworld = WORLD_DIM;
 
 // Order of the approximation space for the displacement
 const int displacementOrder = 2;