diff --git a/AMDiS/src/parallel/MeshLevelData.h b/AMDiS/src/parallel/MeshLevelData.h
index 66f826abbecb05794b18c5fbedbd4f8f9fabfb1b..5b1a0962e982c6a12180f0118bfb2bb832f2b2f9 100644
--- a/AMDiS/src/parallel/MeshLevelData.h
+++ b/AMDiS/src/parallel/MeshLevelData.h
@@ -20,16 +20,15 @@
 
 /** \file MeshLevelData.h */
 
-#ifndef AMDIS_MESH_LEVEL_DATA_H
-#define AMDIS_MESH_LEVEL_DATA_H
-
-
 #include <iostream>
 #include <set>
 #include <vector>
 #include <mpi.h>
 #include "Global.h"
 
+#ifndef AMDIS_MESH_LEVEL_DATA_H
+#define AMDIS_MESH_LEVEL_DATA_H
+
 namespace AMDiS {
 
   using namespace std;
diff --git a/AMDiS/src/parallel/ParMetisPartitioner.cc b/AMDiS/src/parallel/ParMetisPartitioner.cc
index 359ee3ad5487c059a53e513d0454fbe057c4f835..083d7937083f478a94c77d618876560a056e9f30 100644
--- a/AMDiS/src/parallel/ParMetisPartitioner.cc
+++ b/AMDiS/src/parallel/ParMetisPartitioner.cc
@@ -288,8 +288,8 @@ namespace AMDiS {
     int ncon = 1; // one weight at each vertex!
     int nparts = mpiSize; // number of partitions
 
-    vector<float> tpwgts(mpiSize);
-    float ubvec = 1.05;
+    vector<double> tpwgts(mpiSize);
+    double ubvec = 1.05;
     int options[4] = {0, 0, 15, 1}; // default options
     int edgecut = -1;
     vector<int> part(nElements);
diff --git a/AMDiS/src/parallel/ParMetisPartitioner.h b/AMDiS/src/parallel/ParMetisPartitioner.h
index 982ca347211d7ca13d5c1b9262342d1ddcbc6ed1..445d1667d248ebef3df6c91ad7d4aa446b5ba2c0 100644
--- a/AMDiS/src/parallel/ParMetisPartitioner.h
+++ b/AMDiS/src/parallel/ParMetisPartitioner.h
@@ -182,7 +182,7 @@ namespace AMDiS {
 
     void createPartitionMap(map<int, int>& partitionMap);
 
-    void setItr(float value)
+    void setItr(double value)
     {
       itr = value;
     }
@@ -195,7 +195,7 @@ namespace AMDiS {
   protected:
     ParMetisMesh *parMetisMesh;
 
-    float itr;
+    double itr;
   };
 }
 
diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc
index 9f95414cef5c245ad1e9c06222ecabed6b4e13b3..4639659943f568a8ef80f3007917dc088bc8b67c 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.cc
+++ b/AMDiS/src/parallel/ParallelDofMapping.cc
@@ -475,5 +475,41 @@ namespace AMDiS {
       }
     }
   }
-  
+
+
+  void ParallelDofMapping::createIndexSet(IS &is, 
+					  int firstComponent, 
+					  int nComponents)
+  {
+    FUNCNAME("ParallelDofMapping::createIndexSet()");
+
+    TEST_EXIT_DBG(firstComponent + nComponents <= feSpaces.size())
+      ("Should not happen!\n");
+
+    TEST_EXIT_DBG(data.count(feSpaces[firstComponent]))
+      ("No data for FE space at address %p!\n", feSpaces[firstComponent]);
+    
+    int firstRankDof = -1;
+    FeSpaceDofMap &feMap = data.find(feSpaces[firstComponent])->second;
+    DofMap &dofMap = feMap.getMap();
+    for (DofMap::iterator it = dofMap.begin(); it != dofMap.end(); ++it) {
+      if (feMap.isRankDof(it->first)) {
+	if (needMatIndexFromGlobal)
+	  firstRankDof = it->second.global;
+	else
+	  firstRankDof = it->first;
+	break;
+      }
+    }
+    
+    TEST_EXIT_DBG(firstRankDof >= 0)("No rank DOF found!\n");    
+    int firstMatIndex = dofToMatIndex.get(firstComponent, firstRankDof);
+
+    int nRankRows = 0;
+    for (int i = firstComponent; i < firstComponent + nComponents; i++)
+      nRankRows += data.find(feSpaces[firstComponent])->second.nRankDofs;
+
+    ISCreateStride(mpiComm, nRankRows, firstMatIndex, 1, &is);
+  }
+
 }
diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h
index f24c605403a987f1d09bedd61c3127eb68b54a19..c4ffdbda54d7cb4b68df46da4153701178f2b932 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.h
+++ b/AMDiS/src/parallel/ParallelDofMapping.h
@@ -29,6 +29,8 @@
 #include "parallel/ParallelTypes.h"
 #include "parallel/StdMpi.h"
 
+#include <petscis.h>
+
 #ifndef AMDIS_FE_SPACE_MAPPING_H
 #define AMDIS_FE_SPACE_MAPPING_H
 
@@ -414,6 +416,10 @@ namespace AMDiS {
     /// Compute local and global matrix indices.
     void computeMatIndex(bool globalIndex);
 
+    void createIndexSet(IS &is, 
+			int firstComponent, 
+			int nComponents);
+
   protected:
     /// Insert a new FE space DOF mapping for a given FE space.
     void addFeSpace(const FiniteElemSpace* feSpace);
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 5db9773fc0f52fb2d58c6f93161ca0dbf5600c85..a95752f9adc4180b8d52ec72ed45d9634effccca 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -688,11 +688,11 @@ namespace AMDiS {
 
     // === Create distributed matrix for Lagrange constraints. ===
 
-    MatCreateMPIAIJ(mpiCommGlobal,
-		    lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
-		    lagrangeMap.getOverallDofs(), nGlobalOverallInterior,
-		    2, PETSC_NULL, 2, PETSC_NULL,
-		    &mat_lagrange);
+    MatCreateAIJ(mpiCommGlobal,
+		 lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
+		 lagrangeMap.getOverallDofs(), nGlobalOverallInterior,
+		 2, PETSC_NULL, 2, PETSC_NULL,
+		 &mat_lagrange);
 
     // === Create for all duals the corresponding Lagrange constraints. On ===
     // === each rank we traverse all pairs (n, m) of ranks, with n < m,    ===
@@ -789,10 +789,10 @@ namespace AMDiS {
       int nRowsRankB = localDofMap.getRankDofs();
 
       Mat matBPi;
-      MatCreateMPIAIJ(mpiCommGlobal,
-		      nRowsRankB, nRowsRankPrimal, 
-		      nGlobalOverallInterior, nRowsOverallPrimal,
-		      30, PETSC_NULL, 30, PETSC_NULL, &matBPi);
+      MatCreateAIJ(mpiCommGlobal,
+		   nRowsRankB, nRowsRankPrimal, 
+		   nGlobalOverallInterior, nRowsOverallPrimal,
+		   30, PETSC_NULL, 30, PETSC_NULL, &matBPi);
       Mat matPrimal;
 
       PetscInt nCols;
diff --git a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
index 9ab830e7a30a022b8ea3eda9037a94b1cc6ffb43..d7d100b6a855a38245c71941ea8e621b75e94d8a 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
@@ -54,12 +54,12 @@ namespace AMDiS {
 
     for (int i = 0; i < nBlocks; i++)
       for (int j = 0; j < nBlocks; j++)
-	MatCreateMPIAIJ(mpiCommGlobal,
-			nRankRows * blockSize[i], nRankRows * blockSize[j],
-			nOverallRows * blockSize[i], nOverallRows * blockSize[j],
-			30 * blockSize[i], PETSC_NULL, 
-			30 * blockSize[j], PETSC_NULL,
-			&(nestMat[i * nBlocks + j]));
+	MatCreateAIJ(mpiCommGlobal,
+		     nRankRows * blockSize[i], nRankRows * blockSize[j],
+		     nOverallRows * blockSize[i], nOverallRows * blockSize[j],
+		     30 * blockSize[i], PETSC_NULL, 
+		     30 * blockSize[j], PETSC_NULL,
+		     &(nestMat[i * nBlocks + j]));
 			
     for (int i = 0; i < nComponents; i++)
       for (int j = 0; j < nComponents; j++)
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
index 9f67240df7d571cbeadc167658e224646807ce31..7c84020fcf783d0fd60f76b0ee8abdab8e024430 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
@@ -70,7 +70,7 @@ namespace AMDiS {
 
     // === Create PETSc matrix with the computed nnz data structure. ===
 
-    MatCreateMPIAIJ(mpiCommGlobal, nRankRows, nRankRows, 
+    MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows, 
  		    nOverallRows, nOverallRows,
 		    0, d_nnz, 0, o_nnz, &matIntInt);
 
@@ -119,6 +119,8 @@ namespace AMDiS {
     KSPSetFromOptions(kspInterior);
     PCSetFromOptions(pcInterior);
 
+    createFieldSplit(pcInterior);
+
     // Do not delete the solution vector, use it for the initial guess.
     if (!zeroStartVector)
       KSPSetInitialGuessNonzero(kspInterior, PETSC_TRUE);
@@ -140,30 +142,30 @@ namespace AMDiS {
       MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior,
 		      60, PETSC_NULL, &matIntInt);
     } else {
-      MatCreateMPIAIJ(mpiCommLocal, 
-		      nRowsRankInterior, nRowsRankInterior,
-		      nRowsOverallInterior, nRowsOverallInterior,
-		      60, PETSC_NULL, 60, PETSC_NULL, &matIntInt);
+      MatCreateAIJ(mpiCommLocal, 
+		   nRowsRankInterior, nRowsRankInterior,
+		   nRowsOverallInterior, nRowsOverallInterior,
+		   60, PETSC_NULL, 60, PETSC_NULL, &matIntInt);
     }
 
     if (coarseSpaceMap) {
       int nRowsRankCoarse = coarseSpaceMap->getRankDofs();
       int nRowsOverallCoarse = coarseSpaceMap->getOverallDofs();
 
-      MatCreateMPIAIJ(mpiCommGlobal,
-		      nRowsRankCoarse, nRowsRankCoarse,
-		      nRowsOverallCoarse, nRowsOverallCoarse,
-		      60, PETSC_NULL, 60, PETSC_NULL, &matCoarseCoarse);
+      MatCreateAIJ(mpiCommGlobal,
+		   nRowsRankCoarse, nRowsRankCoarse,
+		   nRowsOverallCoarse, nRowsOverallCoarse,
+		   60, PETSC_NULL, 60, PETSC_NULL, &matCoarseCoarse);
       
-      MatCreateMPIAIJ(mpiCommGlobal,
-		      nRowsRankCoarse, nRowsRankInterior,
-		      nRowsOverallCoarse, nGlobalOverallInterior,
-		      60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt);
+      MatCreateAIJ(mpiCommGlobal,
+		   nRowsRankCoarse, nRowsRankInterior,
+		   nRowsOverallCoarse, nGlobalOverallInterior,
+		   60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt);
       
-      MatCreateMPIAIJ(mpiCommGlobal,
-		      nRowsRankInterior, nRowsRankCoarse,
-		      nGlobalOverallInterior, nRowsOverallCoarse,
-		      60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse);
+      MatCreateAIJ(mpiCommGlobal,
+		   nRowsRankInterior, nRowsRankCoarse,
+		   nGlobalOverallInterior, nRowsOverallCoarse,
+		   60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse);
     }
 
     // === Prepare traverse of sequentially created matrices. ===
@@ -540,6 +542,42 @@ namespace AMDiS {
   }
 
 
+  void PetscSolverGlobalMatrix::createFieldSplit(PC pc)
+  {
+    FUNCNAME("PetscSolverGlobalMatrix::createFieldSplit()");
+
+    vector<string> isNames;
+    Parameters::get("parallel->solver->is blocks", isNames);
+
+    int nBlocks = isNames.size();
+    if (nBlocks == 0)
+      return;
+
+    for (int i = 0; i < nBlocks; i++) {
+      MSG("Create for block %s\n", isNames[i].c_str());
+
+      vector<int> blockComponents;
+      Parameters::get("parallel->solver->is block " + lexical_cast<string>(i),
+		      blockComponents);
+      int nComponents = static_cast<int>(blockComponents.size());
+
+      TEST_EXIT(nComponents > 0)("No is block for block %d defined!\n", i);
+
+      // Check if blocks are continous
+      for (int j = 0; j < nComponents; j++) {
+	TEST_EXIT(blockComponents[j] == blockComponents[0] + j)
+	  ("Does not yet support not continous IS blocks! Block %s\n", 
+	   isNames[i].c_str());
+      }
+
+      IS is;
+      interiorMap->createIndexSet(is, blockComponents[0], nComponents);
+      PCFieldSplitSetIS(pc, isNames[i].c_str(), is);
+      ISDestroy(&is);
+    }
+  }
+
+
   void PetscSolverGlobalMatrix::setDofMatrix(DOFMatrix* mat,
 					     int nRowMat, int nColMat)
   {
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h
index ac2631300c9e61b104431099f53ed6448d8b2048..0c9457d405f94aa0be22e409dbc3515d873762d7 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h
@@ -62,6 +62,8 @@ namespace AMDiS {
 
     void destroyVectorData();
 
+    void createFieldSplit(PC pc);
+
   protected:
     /// Creates a new non zero pattern structure for the PETSc matrix. 
     void createPetscNnzStructure(Matrix<DOFMatrix*> *mat);
diff --git a/AMDiS/src/parallel/PetscSolverSchur.cc b/AMDiS/src/parallel/PetscSolverSchur.cc
index 2811227ad3a88cc73c4f6d41a7267ef3b5f5a4a0..c4a8c9976e78cf6430793dbfe8c0f20d26211b2a 100644
--- a/AMDiS/src/parallel/PetscSolverSchur.cc
+++ b/AMDiS/src/parallel/PetscSolverSchur.cc
@@ -190,25 +190,25 @@ namespace AMDiS {
     int nOverallBoundaryRows = nOverallBoundaryDofs * nComponents;
 
 
-    MatCreateMPIAIJ(mpiCommGlobal, 
-		    nInteriorRows, nInteriorRows, 
-		    nOverallInteriorRows, nOverallInteriorRows,
-		    100, PETSC_NULL, 100, PETSC_NULL, &matA11);
-
-    MatCreateMPIAIJ(mpiCommGlobal, 
-		    nBoundaryRows, nBoundaryRows, 
-		    nOverallBoundaryRows, nOverallBoundaryRows,
-		    100, PETSC_NULL, 100, PETSC_NULL, &matA22);
-
-    MatCreateMPIAIJ(mpiCommGlobal, 
-		    nInteriorRows, nBoundaryRows, 
-		    nOverallInteriorRows, nOverallBoundaryRows,
-		    100, PETSC_NULL, 100, PETSC_NULL, &matA12);    
-
-    MatCreateMPIAIJ(mpiCommGlobal, 
-		    nBoundaryRows, nInteriorRows, 
-		    nOverallBoundaryRows, nOverallInteriorRows,
-		    100, PETSC_NULL, 100, PETSC_NULL, &matA21);
+    MatCreateAIJ(mpiCommGlobal, 
+		 nInteriorRows, nInteriorRows, 
+		 nOverallInteriorRows, nOverallInteriorRows,
+		 100, PETSC_NULL, 100, PETSC_NULL, &matA11);
+
+    MatCreateAIJ(mpiCommGlobal, 
+		 nBoundaryRows, nBoundaryRows, 
+		 nOverallBoundaryRows, nOverallBoundaryRows,
+		 100, PETSC_NULL, 100, PETSC_NULL, &matA22);
+
+    MatCreateAIJ(mpiCommGlobal, 
+		 nInteriorRows, nBoundaryRows, 
+		 nOverallInteriorRows, nOverallBoundaryRows,
+		 100, PETSC_NULL, 100, PETSC_NULL, &matA12);    
+
+    MatCreateAIJ(mpiCommGlobal, 
+		 nBoundaryRows, nInteriorRows, 
+		 nOverallBoundaryRows, nOverallInteriorRows,
+		 100, PETSC_NULL, 100, PETSC_NULL, &matA21);
 
 
     for (int i = 0; i < nComponents; i++)