diff --git a/dune/gfe/mixedriemanniantrsolver.cc b/dune/gfe/mixedriemanniantrsolver.cc
index 15def31270de0bec83cdc35286bc26e4f320a14d..1c1a399339a37988533866fed14739cbb75c78bc 100644
--- a/dune/gfe/mixedriemanniantrsolver.cc
+++ b/dune/gfe/mixedriemanniantrsolver.cc
@@ -5,7 +5,6 @@
 #include <dune/istl/io.hh>
 
 #include <dune/functions/functionspacebases/lagrangebasis.hh>
-#include <dune/fufem/functionspacebases/dunefunctionsbasis.hh>
 #include <dune/fufem/assemblers/operatorassembler.hh>
 #include <dune/fufem/assemblers/localassemblers/laplaceassembler.hh>
 #include <dune/fufem/assemblers/localassemblers/massassembler.hh>
@@ -147,11 +146,8 @@ setup(const GridType& grid,
     // //////////////////////////////////////////////////////////////////////////////////////
     //   Assemble a Laplace matrix to create a norm that's equivalent to the H1-norm
     // //////////////////////////////////////////////////////////////////////////////////////
-    typedef DuneFunctionsBasis<Basis0> FufemBasis0;
-    FufemBasis0 basis0(grid.leafGridView());
-
-    typedef DuneFunctionsBasis<Basis1> FufemBasis1;
-    FufemBasis1 basis1(grid.leafGridView());
+    Basis0 basis0(grid.leafGridView());
+    Basis1 basis1(grid.leafGridView());
 
 #if 0
     BasisType basis(grid.leafGridView());
@@ -194,16 +190,18 @@ setup(const GridType& grid,
     mmgStep0->mgTransfer_.resize(numLevels-1);
     mmgStep1->mgTransfer_.resize(numLevels-1);
 
-    if (basis0.getLocalFiniteElement(*grid.leafGridView().template begin<0>()).localBasis().order() > 1)
+    // Get bind to some element to be able to query the FE space order
+    auto localView0 = basis0.localView();
+    localView0.bind(*grid.leafGridView().template begin<0>());
+
+    if (localView0.tree().finiteElement().localBasis().order() > 1)
     {
       if (numLevels>1) {
         typedef typename TruncatedCompressedMGTransfer<CorrectionType0>::TransferOperatorType TransferOperatorType;
-        DuneFunctionsBasis<Dune::Functions::LagrangeBasis<typename GridType::LeafGridView,1> > p1Basis(grid_->leafGridView());
+        Dune::Functions::LagrangeBasis<typename GridType::LeafGridView,1> p1Basis(grid_->leafGridView());
 
         TransferOperatorType pkToP1TransferMatrix;
-        assembleBasisInterpolationMatrix<TransferOperatorType,
-                                         DuneFunctionsBasis<Dune::Functions::LagrangeBasis<typename GridType::LeafGridView,1> >,
-                                         FufemBasis0>(pkToP1TransferMatrix,p1Basis,*basis0_);
+        assembleGlobalBasisTransferMatrix(pkToP1TransferMatrix,p1Basis,*basis0_);
 
         mmgStep0->mgTransfer_.back() = std::make_shared<TruncatedCompressedMGTransfer<CorrectionType0>>();
         std::shared_ptr<TransferOperatorType> topTransferOperator = std::make_shared<TransferOperatorType>(pkToP1TransferMatrix);
@@ -234,16 +232,17 @@ setup(const GridType& grid,
 
     }
 
-    if (basis1.getLocalFiniteElement(*grid.leafGridView().template begin<0>()).localBasis().order() > 1)
+    auto localView1 = basis1.localView();
+    localView1.bind(*grid.leafGridView().template begin<0>());
+
+    if (localView1.tree().finiteElement().localBasis().order() > 1)
     {
       if (numLevels>1) {
         typedef typename TruncatedCompressedMGTransfer<CorrectionType1>::TransferOperatorType TransferOperatorType;
-        DuneFunctionsBasis<Dune::Functions::LagrangeBasis<typename GridType::LeafGridView,1> > p1Basis(grid_->leafGridView());
+        Dune::Functions::LagrangeBasis<typename GridType::LeafGridView,1> p1Basis(grid_->leafGridView());
 
         TransferOperatorType pkToP1TransferMatrix;
-        assembleBasisInterpolationMatrix<TransferOperatorType,
-                                         DuneFunctionsBasis<Dune::Functions::LagrangeBasis<typename GridType::LeafGridView,1> >,
-                                         FufemBasis1>(pkToP1TransferMatrix,p1Basis,*basis1_);
+        assembleGlobalBasisTransferMatrix(pkToP1TransferMatrix,p1Basis,*basis1_);
 
         mmgStep1->mgTransfer_.back() = std::make_shared<TruncatedCompressedMGTransfer<CorrectionType1>>();
         std::shared_ptr<TransferOperatorType> topTransferOperator = std::make_shared<TransferOperatorType>(pkToP1TransferMatrix);
diff --git a/dune/gfe/riemannianpnsolver.cc b/dune/gfe/riemannianpnsolver.cc
index 64125625d13cd289838b3c87dba233c6740f932c..7eab28c746c8c4292617aab343d0978aa5dc680a 100644
--- a/dune/gfe/riemannianpnsolver.cc
+++ b/dune/gfe/riemannianpnsolver.cc
@@ -5,8 +5,7 @@
 
 #include <dune/grid/common/mcmgmapper.hh>
 
-#include <dune/fufem/functionspacebases/dunefunctionsbasis.hh>
-#include <dune/fufem/assemblers/operatorassembler.hh>
+#include <dune/fufem/assemblers/dunefunctionsoperatorassembler.hh>
 #include <dune/fufem/assemblers/localassemblers/laplaceassembler.hh>
 #include <dune/fufem/assemblers/localassemblers/massassembler.hh>
 #include <dune/fufem/assemblers/basisinterpolationmatrixassembler.hh>
@@ -69,15 +68,14 @@ setup(const GridType& grid,
     //   Assemble a Laplace matrix to create a norm that's equivalent to the H1-norm
     // //////////////////////////////////////////////////////////////////////////////////////
 
-    typedef DuneFunctionsBasis<Basis> FufemBasis;
-    FufemBasis basis(assembler_->getBasis());
-    OperatorAssembler<FufemBasis,FufemBasis> operatorAssembler(basis, basis);
+    const Basis& basis = assembler_->getBasis();
+    Dune::Fufem::DuneFunctionsOperatorAssembler<Basis,Basis> operatorAssembler(basis, basis);
 
-    LaplaceAssembler<GridType, typename FufemBasis::LocalFiniteElement, typename FufemBasis::LocalFiniteElement> laplaceStiffness;
+    Dune::Fufem::LaplaceAssembler laplaceStiffness;
     typedef Dune::BCRSMatrix<Dune::FieldMatrix<double,1,1> > ScalarMatrixType;
     ScalarMatrixType localA;
 
-    operatorAssembler.assemble(laplaceStiffness, localA);
+    operatorAssembler.assembleBulk(Dune::Fufem::istlMatrixBackend(localA), laplaceStiffness);
 
 #if HAVE_MPI
     LocalMapper localMapper = MapperFactory<Basis>::createLocalMapper(grid_->leafGridView());
@@ -112,10 +110,10 @@ setup(const GridType& grid,
     //   This will be used to monitor the gradient
     // //////////////////////////////////////////////////////////////////////////////////////
 
-    MassAssembler<GridType, typename Basis::LocalView::Tree::FiniteElement, typename Basis::LocalView::Tree::FiniteElement> massStiffness;
+    Dune::Fufem::MassAssembler massStiffness;
     ScalarMatrixType localMassMatrix;
 
-    operatorAssembler.assemble(massStiffness, localMassMatrix);
+    operatorAssembler.assembleBulk(Dune::Fufem::istlMatrixBackend(localMassMatrix), massStiffness);
 
 #if HAVE_MPI
     auto massMatrix = std::make_shared<ScalarMatrixType>(matrixComm.reduceAdd(localMassMatrix));
diff --git a/dune/gfe/riemanniantrsolver.cc b/dune/gfe/riemanniantrsolver.cc
index a7169a143e456a80ab322c0ac9122a8d876eb1f6..9e3e9e50f290261b2536c3434b834cd1eeea6cbe 100644
--- a/dune/gfe/riemanniantrsolver.cc
+++ b/dune/gfe/riemanniantrsolver.cc
@@ -5,8 +5,7 @@
 
 #include <dune/grid/common/mcmgmapper.hh>
 
-#include <dune/fufem/functionspacebases/dunefunctionsbasis.hh>
-#include <dune/fufem/assemblers/operatorassembler.hh>
+#include <dune/fufem/assemblers/dunefunctionsoperatorassembler.hh>
 #include <dune/fufem/assemblers/localassemblers/laplaceassembler.hh>
 #include <dune/fufem/assemblers/localassemblers/massassembler.hh>
 #include <dune/fufem/assemblers/basisinterpolationmatrixassembler.hh>
@@ -116,15 +115,14 @@ setup(const GridType& grid,
     //   Assemble a Laplace matrix to create a norm that's equivalent to the H1-norm
     // //////////////////////////////////////////////////////////////////////////////////////
 
-    typedef DuneFunctionsBasis<Basis> FufemBasis;
-    FufemBasis basis(assembler_->getBasis());
-    OperatorAssembler<FufemBasis,FufemBasis> operatorAssembler(basis, basis);
+    const Basis& basis = assembler_->getBasis();
+    Dune::Fufem::DuneFunctionsOperatorAssembler<Basis,Basis> operatorAssembler(basis, basis);
 
-    LaplaceAssembler<GridType, typename FufemBasis::LocalFiniteElement, typename FufemBasis::LocalFiniteElement> laplaceStiffness;
+    Dune::Fufem::LaplaceAssembler laplaceStiffness;
     typedef Dune::BCRSMatrix<Dune::FieldMatrix<double,1,1> > ScalarMatrixType;
     ScalarMatrixType localA;
 
-    operatorAssembler.assemble(laplaceStiffness, localA);
+    operatorAssembler.assembleBulk(Dune::Fufem::istlMatrixBackend(localA), laplaceStiffness);
 
 #if HAVE_MPI
     LocalMapper localMapper = MapperFactory<Basis>::createLocalMapper(grid_->leafGridView());
@@ -153,10 +151,10 @@ setup(const GridType& grid,
     //   This will be used to monitor the gradient
     // //////////////////////////////////////////////////////////////////////////////////////
 
-    MassAssembler<GridType, typename Basis::LocalView::Tree::FiniteElement, typename Basis::LocalView::Tree::FiniteElement> massStiffness;
+    Dune::Fufem::MassAssembler massStiffness;
     ScalarMatrixType localMassMatrix;
 
-    operatorAssembler.assemble(massStiffness, localMassMatrix);
+    operatorAssembler.assembleBulk(Dune::Fufem::istlMatrixBackend(localMassMatrix), massStiffness);
 
 #if HAVE_MPI
     auto massMatrix = std::make_shared<ScalarMatrixType>(matrixComm.reduceAdd(localMassMatrix));
@@ -203,12 +201,10 @@ setup(const GridType& grid,
     if (not isP1Basis)
     {
         typedef typename TruncatedCompressedMGTransfer<CorrectionType>::TransferOperatorType TransferOperatorType;
-        DuneFunctionsBasis<Dune::Functions::LagrangeBasis<typename GridType::LeafGridView,1> > p1Basis(grid_->leafGridView());
+        Dune::Functions::LagrangeBasis<typename GridType::LeafGridView,1> p1Basis(grid_->leafGridView());
 
         TransferOperatorType pkToP1TransferMatrix;
-        assembleBasisInterpolationMatrix<TransferOperatorType,
-                                         DuneFunctionsBasis<Dune::Functions::LagrangeBasis<typename GridType::LeafGridView,1> >,
-                                         FufemBasis>(pkToP1TransferMatrix,p1Basis,basis);
+        assembleGlobalBasisTransferMatrix(pkToP1TransferMatrix,p1Basis,basis);
 #if HAVE_MPI
         // If we are on more than 1 processors, join all local transfer matrices on rank 0,
         // and construct a single global transfer operator there.