diff --git a/dune/gfe/parallel/globalindex.hh b/dune/gfe/parallel/globalindex.hh
index 3ae58d0c15c44971825e9b6e3d4f4fc44e97eca0..88c93c4a41ef190e847380456233ed58feb86e6f 100644
--- a/dune/gfe/parallel/globalindex.hh
+++ b/dune/gfe/parallel/globalindex.hh
@@ -333,10 +333,6 @@ public:
     return(nLocalEntity_);
   }
 
-  const GridView& getGridView() const {
-    return gridview_;
-  }
-
 protected:
   /** store data members */
   const GridView gridview_;                                                       /** store a const reference to a gridview */
diff --git a/dune/gfe/parallel/globalp2mapper.hh b/dune/gfe/parallel/globalp2mapper.hh
index a99ccd28bc501d819495f669b98ef56cbf2d933f..5031de86691a3af98cbc7e9fba08b594f39c6522 100644
--- a/dune/gfe/parallel/globalp2mapper.hh
+++ b/dune/gfe/parallel/globalp2mapper.hh
@@ -30,7 +30,6 @@ namespace Dune {
     typedef std::map<int,int>    IndexMap;
 
     GlobalP2Mapper(const GridView& gridView)
-    : gridView_(gridView)
     {
       static_assert(GridView::dimension==2, "Only implemented for two-dimensional grids");
 
@@ -116,12 +115,6 @@ namespace Dune {
       return nOwnedLocalEntity_;
     }
 
-    const GridView& getGridView() const {
-      return gridView_;
-    }
-
-    const GridView gridView_;
-
     IndexMap localGlobalMap_;
     IndexMap globalLocalMap_;
 
diff --git a/dune/gfe/parallel/matrixcommunicator.hh b/dune/gfe/parallel/matrixcommunicator.hh
index 195dc93d42a2e532efd88e2367371111b3f726d9..07da2e3bbb35f18517dc9f9599f23bc25513745b 100644
--- a/dune/gfe/parallel/matrixcommunicator.hh
+++ b/dune/gfe/parallel/matrixcommunicator.hh
@@ -9,7 +9,7 @@
 #include <dune/gfe/parallel/mpifunctions.hh>
 
 
-template<typename GUIndex, typename MatrixType, typename ColGUIndex=GUIndex>
+template<typename GUIndex, typename Communicator, typename MatrixType, typename ColGUIndex=GUIndex>
 class MatrixCommunicator {
 
   struct TransferMatrixTuple {
@@ -38,22 +38,24 @@ class MatrixCommunicator {
       }
 
     // Get number of matrix entries on each process
-    std::vector<int> localMatrixEntriesSizes(MPIFunctions::shareSizes(guIndex1_.getGridView().comm(), localMatrixEntries.size()));
+    std::vector<int> localMatrixEntriesSizes(MPIFunctions::shareSizes(communicator_, localMatrixEntries.size()));
 
     // Get matrix entries from every process
-    globalMatrixEntries = MPIFunctions::gatherv(guIndex1_.getGridView().comm(), localMatrixEntries, localMatrixEntriesSizes, root_rank);
+    globalMatrixEntries = MPIFunctions::gatherv(communicator_, localMatrixEntries, localMatrixEntriesSizes, root_rank);
   }
 
 public:
-  MatrixCommunicator(const GUIndex& rowIndex, const int& root)
+  MatrixCommunicator(const GUIndex& rowIndex, const Communicator& communicator, const int& root)
   : guIndex1_(rowIndex),
     guIndex2_(rowIndex),
+    communicator_(communicator),
     root_rank(root)
   {}
 
-  MatrixCommunicator(const GUIndex& rowIndex, const ColGUIndex& colIndex, const int& root)
+  MatrixCommunicator(const GUIndex& rowIndex, const ColGUIndex& colIndex, const Communicator& communicator, const int& root)
   : guIndex1_(rowIndex),
     guIndex2_(colIndex),
+    communicator_(communicator),
     root_rank(root)
   {}
 
@@ -110,6 +112,7 @@ public:
 private:
   const GUIndex& guIndex1_;
   const ColGUIndex& guIndex2_;
+  const Communicator& communicator_;
   int root_rank;
 
   std::vector<TransferMatrixTuple> globalMatrixEntries;
diff --git a/dune/gfe/parallel/vectorcommunicator.hh b/dune/gfe/parallel/vectorcommunicator.hh
index a89172341ce4be1d3fd459336d12a20b6b4bc7ca..37e344ad5c829377a0374d743ddff63e257570de 100644
--- a/dune/gfe/parallel/vectorcommunicator.hh
+++ b/dune/gfe/parallel/vectorcommunicator.hh
@@ -7,7 +7,7 @@
 #include <dune/gfe/parallel/mpifunctions.hh>
 
 
-template<typename GUIndex, typename VectorType>
+template<typename GUIndex, typename Communicator, typename VectorType>
 class VectorCommunicator {
 
   struct TransferVectorTuple {
@@ -32,18 +32,20 @@ private:
         localVectorEntries.push_back(TransferVectorTuple(guIndex.index(k), localVector[k]));
 
     // Get number of vector entries on each process
-    localVectorEntriesSizes = MPIFunctions::shareSizes(guIndex.getGridView().comm(), localVectorEntries.size());
+    localVectorEntriesSizes = MPIFunctions::shareSizes(communicator_, localVectorEntries.size());
 
     // Get vector entries from every process
-    globalVectorEntries = MPIFunctions::gatherv(guIndex.getGridView().comm(), localVectorEntries, localVectorEntriesSizes, root_rank);
+    globalVectorEntries = MPIFunctions::gatherv(communicator_, localVectorEntries, localVectorEntriesSizes, root_rank);
   }
 
 public:
-  VectorCommunicator(const GUIndex& gi, const int& root)
-  : guIndex(gi), root_rank(root)
+  VectorCommunicator(const GUIndex& gi,
+                     const Communicator& communicator,
+                     const int& root)
+  : guIndex(gi), communicator_(communicator), root_rank(root)
   {
     // Get number of vector entries on each process
-    localVectorEntriesSizes = MPIFunctions::shareSizes(guIndex.getGridView().comm(), guIndex.nOwnedLocalEntity());
+    localVectorEntriesSizes = MPIFunctions::shareSizes(communicator, guIndex.nOwnedLocalEntity());
   }
 
   VectorType reduceAdd(const VectorType& localVector)
@@ -76,12 +78,12 @@ public:
     for (size_t k = 0; k < globalVectorEntries.size(); ++k)
       globalVectorEntries[k].value_ = global[globalVectorEntries[k].globalIndex_];
 
-    const int localSize = localVectorEntriesSizes[guIndex.getGridView().comm().rank()];
+    const int localSize = localVectorEntriesSizes[communicator_.rank()];
 
     // Create vector for transfer data
     std::vector<TransferVectorTuple> localVectorEntries(localSize);
 
-    MPIFunctions::scatterv(guIndex.getGridView().comm(), localVectorEntries, globalVectorEntries, localVectorEntriesSizes, root_rank);
+    MPIFunctions::scatterv(communicator_, localVectorEntries, globalVectorEntries, localVectorEntriesSizes, root_rank);
 
     // Create vector for local solution
     VectorType x(localSize);
@@ -95,6 +97,7 @@ public:
 
 private:
   const GUIndex& guIndex;
+  const Communicator& communicator_;
   int root_rank;
 
   std::vector<int> localVectorEntriesSizes;
diff --git a/dune/gfe/riemanniantrsolver.cc b/dune/gfe/riemanniantrsolver.cc
index 7866299a0752211ff8d2483d48f8a327edc194b6..942102e3350ae18fcae1e7eb2d32ea55dd9161f7 100644
--- a/dune/gfe/riemanniantrsolver.cc
+++ b/dune/gfe/riemanniantrsolver.cc
@@ -91,7 +91,9 @@ setup(const GridType& grid,
 #endif
 
     // Transfer all Dirichlet data to the master processor
-    VectorCommunicator<GUIndex, Dune::BitSetVector<blocksize> > vectorComm(*guIndex_, 0);
+    VectorCommunicator<GUIndex, typename GridType::LeafGridView::CollectiveCommunication, Dune::BitSetVector<blocksize> > vectorComm(*guIndex_,
+                                                                                                                                     grid_->leafGridView().comm(),
+                                                                                                                                     0);
     Dune::BitSetVector<blocksize>* globalDirichletNodes = NULL;
     globalDirichletNodes = new Dune::BitSetVector<blocksize>(vectorComm.reduceCopy(dirichletNodes));
 
@@ -125,7 +127,7 @@ setup(const GridType& grid,
         delete h1SemiNorm_;
 
 
-    MatrixCommunicator<GUIndex, ScalarMatrixType> matrixComm(*guIndex_, 0);
+    MatrixCommunicator<GUIndex, typename GridType::LeafGridView::CollectiveCommunication, ScalarMatrixType> matrixComm(*guIndex_, grid_->leafGridView().comm(), 0);
     ScalarMatrixType* A = new ScalarMatrixType(matrixComm.reduceAdd(localA));
 
     h1SemiNorm_ = new H1SemiNorm<CorrectionType>(*A);
@@ -185,7 +187,10 @@ setup(const GridType& grid,
         LeafP1GUIndex p1Index(grid_->leafGridView());
 
         typedef typename TruncatedCompressedMGTransfer<CorrectionType>::TransferOperatorType TransferOperatorType;
-        MatrixCommunicator<GUIndex, TransferOperatorType, LeafP1GUIndex> matrixComm(*guIndex_, p1Index, 0);
+        MatrixCommunicator<GUIndex,
+                           typename GridType::LeafGridView::CollectiveCommunication,
+                           TransferOperatorType,
+                           LeafP1GUIndex> matrixComm(*guIndex_, p1Index, grid_->leafGridView().comm(), 0);
 
         mmgStep->mgTransfer_.back() = new PKtoP1MGTransfer<CorrectionType>;
         Dune::shared_ptr<TransferOperatorType> topTransferOperator = Dune::make_shared<TransferOperatorType>(matrixComm.reduceCopy(topTransferOp->getMatrix()));
@@ -203,7 +208,9 @@ setup(const GridType& grid,
           LevelGUIndex coarseGUIndex(grid_->levelGridView(i+1));
 
           typedef typename TruncatedCompressedMGTransfer<CorrectionType>::TransferOperatorType TransferOperatorType;
-          MatrixCommunicator<LevelGUIndex, TransferOperatorType> matrixComm(fineGUIndex, coarseGUIndex, 0);
+          MatrixCommunicator<LevelGUIndex,
+                             typename GridType::LevelGridView::CollectiveCommunication,
+                             TransferOperatorType> matrixComm(fineGUIndex, coarseGUIndex, grid_->levelGridView(i+1).comm(), 0);
 
           mmgStep->mgTransfer_[i] = new TruncatedCompressedMGTransfer<CorrectionType>;
           Dune::shared_ptr<TransferOperatorType> transferOperatorMatrix = Dune::make_shared<TransferOperatorType>(matrixComm.reduceCopy(newTransferOp->getMatrix()));
@@ -292,8 +299,12 @@ void RiemannianTrustRegionSolver<GridType,TargetSpace>::solve()
     MatrixType stiffnessMatrix;
     CorrectionType rhs_global;
 
-    VectorCommunicator<GUIndex, CorrectionType> vectorComm(*guIndex_, 0);
-    MatrixCommunicator<GUIndex, MatrixType> matrixComm(*guIndex_, 0);
+    VectorCommunicator<GUIndex, typename GridType::LeafGridView::CollectiveCommunication, CorrectionType> vectorComm(*guIndex_,
+                                                                                                                     grid_->leafGridView().comm(),
+                                                                                                                     0);
+    MatrixCommunicator<GUIndex, typename GridType::LeafGridView::CollectiveCommunication, MatrixType> matrixComm(*guIndex_,
+                                                                                                                 grid_->leafGridView().comm(),
+                                                                                                                 0);
 
     for (int i=0; i<maxTrustRegionSteps_; i++) {