diff --git a/AMDiS/src/CreatorMap.cc b/AMDiS/src/CreatorMap.cc
index d65332a84a96e071a46d4093fc275c0e538d6527..ff18226c7eacebccab73541238c5f3cba019155c 100644
--- a/AMDiS/src/CreatorMap.cc
+++ b/AMDiS/src/CreatorMap.cc
@@ -92,7 +92,7 @@ namespace AMDiS {
     #ifdef HAVE_BDDC_ML
       addCreator("bddcml", new BddcMlSolver::Creator);
     #endif
-    addCreator("petsc-stokes", new PetscSolverNavierStokes::Creator);
+    addCreator("petsc-navierstokes", new PetscSolverNavierStokes::Creator);
 #endif
   }
 
diff --git a/AMDiS/src/Element.cc b/AMDiS/src/Element.cc
index a51f9ed522dd603f679d5fca5cbd720eac6e4971..d4d435a17af78014ad306cf8991cc739e47be26a 100644
--- a/AMDiS/src/Element.cc
+++ b/AMDiS/src/Element.cc
@@ -653,12 +653,8 @@ namespace AMDiS {
   {
     FUNCNAME("Element::getAllDofs()");
 
-    int sb = dofs.size();
-
     getNodeDofs(feSpace, bound, dofs, baseDofPtr);
 
-    //    MSG("    -> getNodeDofs %d\n", dofs.size() - sb);
-
     if (dofGeoIndex != NULL) {
       // In the case dofGeoIndex should be filled, set all node DOFs to be 
       // vertex DOFs.
@@ -667,14 +663,8 @@ namespace AMDiS {
 	(*dofGeoIndex)[i] = VERTEX;
     }
 
-    if (feSpace->getBasisFcts()->getDegree() > 1) {
-
-      sb = dofs.size();
-
+    if (feSpace->getBasisFcts()->getDegree() > 1)
       getHigherOrderDofs(feSpace, bound, dofs, baseDofPtr, dofGeoIndex);
-      
-      //      MSG("     -> getHODofs %d\n", dofs.size() - sb);
-    }
 
     if (dofGeoIndex) {
       TEST_EXIT_DBG(dofs.size() == dofGeoIndex->size())
diff --git a/AMDiS/src/io/ArhReader.cc b/AMDiS/src/io/ArhReader.cc
index 66096239f1dcff8a7552ca1b0cc52ad9c2603961..e125376b0c59aacd8f4176c30d187ac46ad36056 100644
--- a/AMDiS/src/io/ArhReader.cc
+++ b/AMDiS/src/io/ArhReader.cc
@@ -108,7 +108,7 @@ namespace AMDiS {
     string arhPrefix = "";
     map<int, int> elInRank;
     map<int, int> elCodeSize;
-    readMetaData(filename, elInRank, elCodeSize, arhPrefix);
+    int nProc = readMetaData(filename, elInRank, elCodeSize, arhPrefix);
     
 
     // === Check which arh files must be read by current rank. ===
@@ -133,8 +133,12 @@ namespace AMDiS {
 
     for (std::set<int>::iterator it = readArhFiles.begin();
 	 it != readArhFiles.end(); ++it) {
-      string arhFilename = 
-	directory.native() + "/" + arhPrefix + "-p" + boost::lexical_cast<string>(*it) + "-.arh";
+      string arhFilename = directory.native() + "/" + arhPrefix;
+      if (nProc == 1)
+	arhFilename += ".arh";
+      else
+	arhFilename += "-p" + boost::lexical_cast<string>(*it) + "-.arh";
+
       MSG("ARH file read from: %s\n", arhFilename.c_str());
       readFile(arhFilename, mesh, vecs);
     }
diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index e3efae18c37426638889aec4a03c3cf28b9b0959..f63324f876d00586445def8b121b424cb7562680 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -81,9 +81,11 @@ namespace AMDiS {
       writeSerializationFile(false),
       repartitioningAllowed(false),
       repartitionIthChange(20),
+      repartitioningWaitAfterFail(20),
       nMeshChangesAfterLastRepartitioning(0),
       repartitioningCounter(0),
       repartitioningFailed(0),
+
       debugOutputDir(""),
       lastMeshChangeIndex(0),
       createBoundaryDofFlag(0),
@@ -101,6 +103,7 @@ namespace AMDiS {
     Parameters::get(name + "->repartitioning", repartitioningAllowed);
     Parameters::get(name + "->debug output dir", debugOutputDir);
     Parameters::get(name + "->repartition ith change", repartitionIthChange);
+    Parameters::get(name + "->repartition wait after fail", repartitioningWaitAfterFail);
     Parameters::get(name + "->mesh adaptivity", meshAdaptivity);
     
     string partStr = "parmetis";
@@ -366,8 +369,9 @@ namespace AMDiS {
       // and now partition the mesh    
       bool partitioningSucceed = partitioner->partition(elemWeights, INITIAL);
       TEST_EXIT(partitioningSucceed)("Initial partitioning does not work!\n");
-      partitioner->createPartitionMap(partitionMap);
     }
+
+    partitioner->createPartitionMap(partitionMap);
   }
 
 
@@ -1372,7 +1376,7 @@ namespace AMDiS {
       partitioner->partition(elemWeights, ADAPTIVE_REPART);
     if (!partitioningSucceed) {
       MPI::COMM_WORLD.Barrier();
-      repartitioningFailed = 20;
+      repartitioningFailed = repartitioningWaitAfterFail;;
       MSG("Mesh partitioner created empty partition!\n");
       MSG("Mesh repartitioning needed %.5f seconds\n", MPI::Wtime() - timePoint); 
       return;
@@ -1395,7 +1399,7 @@ namespace AMDiS {
       
       if (!partitioningSucceed || !partitioner->meshChanged()) {
 	MPI::COMM_WORLD.Barrier();
-	repartitioningFailed = 20;
+	repartitioningFailed = repartitioningWaitAfterFail;;
 	MSG("Mesh repartitioning needed %.5f seconds\n", MPI::Wtime() - timePoint);   
 	return;
       }
@@ -1562,7 +1566,9 @@ namespace AMDiS {
     // update the number of elements, vertices, etc. of the mesh.
     mesh->removeMacroElements(deleteMacroElements, feSpaces);
 
+
     // === Remove double DOFs. ===
+
     MeshManipulation meshManipulation(mesh);
     meshManipulation.deleteDoubleDofs(feSpaces, newMacroEl, elObjDb);
 
diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h
index cf6a20268f739cdaa6f032337b71cd28d1181d0c..11c134df924268c1f1d001dba5c3e12b4311b1ba 100644
--- a/AMDiS/src/parallel/MeshDistributor.h
+++ b/AMDiS/src/parallel/MeshDistributor.h
@@ -536,6 +536,9 @@ namespace AMDiS {
     /// repartitionings.
     int repartitionIthChange;
 
+    ///
+    int repartitioningWaitAfterFail;
+
     /// Counts the number of mesh changes after the last mesh repartitioning 
     /// was done.
     int nMeshChangesAfterLastRepartitioning;
diff --git a/AMDiS/src/parallel/MeshManipulation.cc b/AMDiS/src/parallel/MeshManipulation.cc
index 053ae75cdc17e9b0a84c2659d3371c821b780d44..a76b52fefcb56c28bde40e400a0ad396ce704d2d 100644
--- a/AMDiS/src/parallel/MeshManipulation.cc
+++ b/AMDiS/src/parallel/MeshManipulation.cc
@@ -58,7 +58,7 @@ namespace AMDiS {
     for (std::set<MacroElement*>::iterator it = newMacroEl.begin();
 	 it != newMacroEl.end(); ++it)
       macroIndexMap[(*it)->getIndex()] = *it;
-
+   
     // === Traverse mesh and put all "old" macro element to macrosProcessed  ===
     // === that stores all macro elements which are really connected to the  ===
     // === overall mesh structure.                                           ===
@@ -83,7 +83,6 @@ namespace AMDiS {
     feSpace->getMesh()->getDofIndexCoords(coords);
 #endif
 
-
     // === Traverse all new elements and connect them to the overall mesh     ===
     // === structure by deleting all double DOFs on macro element's vertices, ===
     // === edges and faces.                                                   ===
@@ -105,8 +104,8 @@ namespace AMDiS {
 	  if (elIt->elIndex == (*it)->getIndex())
 	    continue;
 
-	  if (macrosProcessed.count(elIt->elIndex) == 1) {
-	    TEST_EXIT_DBG(macroIndexMap.count(elIt->elIndex) == 1)
+	  if (macrosProcessed.count(elIt->elIndex)) {
+	    TEST_EXIT_DBG(macroIndexMap.count(elIt->elIndex))
 	      ("Should not happen!\n");
 
 	    Element *el0 = (*it)->getElement();
@@ -129,12 +128,12 @@ namespace AMDiS {
 	vector<ElementObjectData> &edgeEl = 
 	  elObjDb.getElementsEdge((*it)->getIndex(), i);
 
-	for (vector<ElementObjectData>::iterator elIt = edgeEl.begin();
+      	for (vector<ElementObjectData>::iterator elIt = edgeEl.begin();
 	     elIt != edgeEl.end(); ++elIt) {
 	  if (elIt->elIndex == (*it)->getIndex())
 	    continue;
 
-	  if (macrosProcessed.count(elIt->elIndex) == 1) {
+	  if (macrosProcessed.count(elIt->elIndex)) {
 	    TEST_EXIT_DBG(macroIndexMap.count(elIt->elIndex))
 	      ("Should not happen!\n");
 	    
@@ -150,9 +149,9 @@ namespace AMDiS {
 	    vector<GeoIndex> dofGeoIndex0, dofGeoIndex1;
 	    el0->getAllDofs(feSpace, b0, dofs0, true, &dofGeoIndex0);
 	    el1->getAllDofs(feSpace, b1, dofs1, true, &dofGeoIndex1);
-
+	    
 #if (DEBUG != 0)
-	    if (feSpaces.size() == 1)
+	    if (feSpaces.size())
 	      debug::testDofsByCoords(coords, dofs0, dofs1);
 	    else 
 	      TEST_EXIT_DBG(dofs0.size() == dofs1.size())
@@ -184,7 +183,7 @@ namespace AMDiS {
 	  if (elIt->elIndex == (*it)->getIndex())
 	    continue;
 
-	  if (macrosProcessed.count(elIt->elIndex) == 1) {
+	  if (macrosProcessed.count(elIt->elIndex)) {
 	    TEST_EXIT_DBG(macroIndexMap.count(elIt->elIndex))
 	      ("Should not happen!\n");
 
@@ -202,7 +201,7 @@ namespace AMDiS {
 	    el1->getAllDofs(feSpace, b1, dofs1, true, &dofGeoIndex1);
 
 #if (DEBUG != 0)
-	    if (feSpaces.size() == 1)
+	    if (feSpaces.size())
 	      debug::testDofsByCoords(coords, dofs0, dofs1);
 	    else 
 	      TEST_EXIT_DBG(dofs0.size() == dofs1.size())
@@ -355,7 +354,8 @@ namespace AMDiS {
     // === To the mesh structure code.                                          ===
 
     TraverseStack stack;
-    ElInfo *elInfo = stack.traverseFirst(mesh, -1, traverseFlag);
+    ElInfo *elInfo = 
+      stack.traverseFirstOneMacro(mesh, boundEl.elIndex, -1, traverseFlag);
 
     if (s0 != -1) {
       while (elInfo && elInfo->getElement() != child0)
diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc
index 818859f466f68b525d8af7efd5a2e027c8dbd100..24b508c5aa75174c0ed1b013823d4d9c269e70c3 100644
--- a/AMDiS/src/parallel/ParallelDebug.cc
+++ b/AMDiS/src/parallel/ParallelDebug.cc
@@ -833,8 +833,7 @@ namespace AMDiS {
 
     map<int, double> vec;    
     TraverseStack stack;
-    ElInfo *elInfo = stack.traverseFirst(pdb.mesh, -1, 
-					 Mesh::CALL_LEAF_EL | Mesh::FILL_COORDS);
+    ElInfo *elInfo = stack.traverseFirst(pdb.mesh, -1, Mesh::CALL_LEAF_EL);
     
     while (elInfo) {		  
       int index = elInfo->getElement()->getIndex();
diff --git a/AMDiS/src/parallel/PetscHelper.cc b/AMDiS/src/parallel/PetscHelper.cc
index eab075af33b4de5872b2b9dd52da6fd09bb14d2a..fb6c433a3e7016f8359c25022a594454fa089df1 100644
--- a/AMDiS/src/parallel/PetscHelper.cc
+++ b/AMDiS/src/parallel/PetscHelper.cc
@@ -250,7 +250,7 @@ namespace AMDiS {
 
 
     void setSolver(KSP ksp, 
-		   string kspPrefix,
+		   const char* kspPrefix,
 		   KSPType kspType, 
 		   PCType pcType, 
 		   PetscReal rtol,
@@ -259,8 +259,9 @@ namespace AMDiS {
     {
       KSPSetType(ksp, kspType);
       KSPSetTolerances(ksp, rtol, atol, PETSC_DEFAULT, maxIt);
-      if (kspPrefix != "")
-	KSPSetOptionsPrefix(ksp, kspPrefix.c_str());
+      //if (kspPrefix != "")
+      if (strcmp(kspPrefix, "") != 0)
+	KSPSetOptionsPrefix(ksp, kspPrefix);
       KSPSetFromOptions(ksp);
 
       PC pc;
diff --git a/AMDiS/src/parallel/PetscHelper.h b/AMDiS/src/parallel/PetscHelper.h
index e6ac70ea8ce6957d70d0139d6e55207080b9eec7..546ca6f8ee153fbd3aae3b25848201b7f67d98b9 100644
--- a/AMDiS/src/parallel/PetscHelper.h
+++ b/AMDiS/src/parallel/PetscHelper.h
@@ -92,7 +92,7 @@ namespace AMDiS {
     void matNestConvert(Mat matNest, Mat &mat);
 
     void setSolver(KSP ksp, 
-		   string kspPrefix,
+		   const char* kspPrefix,
 		   KSPType kspType, 
 		   PCType pcType, 
 		   PetscReal rtol = PETSC_DEFAULT,
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
index 928309b01052d7fd1f4a38388911fd5501b286fc..00d4b309df7eff1c667a8d6749a8f6ae9eacd630 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
@@ -501,20 +501,20 @@ namespace AMDiS {
 	   isNames[i].c_str());
       }
 
-      createFieldSplit(pc, isNames[i], blockComponents);
+      createFieldSplit(pc, isNames[i].c_str(), blockComponents);
     }
   }
 
 
   void PetscSolverGlobalMatrix::createFieldSplit(PC pc, 
-						 string splitName, 
+						 const char* splitName, 
 						 vector<int> &components)
   {
     FUNCNAME("PetscSolverGlobalMatrix::createFieldSplit()");
 
     IS is;
     interiorMap->createIndexSet(is, components[0], components.size());
-    PCFieldSplitSetIS(pc, splitName.c_str(), is);
+    PCFieldSplitSetIS(pc, splitName, is);
     ISDestroy(&is);
   }
 
@@ -823,7 +823,7 @@ namespace AMDiS {
     fe.push_back(componentSpaces[component]);
 
     PetscSolver* subSolver = new PetscSolverGlobalMatrix("");
-    subSolver->setKspPrefix(kspPrefix.c_str());
+    subSolver->setKspPrefix(kspPrefix);
     subSolver->setMeshDistributor(meshDistributor,
 				  mpiCommGlobal,
 				  mpiCommLocal);
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h
index 4ceb56d2d3344e82aef75cd8c8474e15e8400404..040151de5b95b24ea31a996ec9dac912ecb4cf09 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h
@@ -88,10 +88,10 @@ namespace AMDiS {
      * \param[in] components  System component numbers of the field split. At
      *                        the moment only continuous splits are allowed.
      */
-    void createFieldSplit(PC pc, string splitName, vector<int> &components);
+    void createFieldSplit(PC pc, const char* splitName, vector<int> &components);
 
     /// Wrapper to create field split from only one component.
-    void createFieldSplit(PC pc, string splitName, int component)
+    void createFieldSplit(PC pc, const char* splitName, int component)
     {
       vector<int> components;
       components.push_back(component);
diff --git a/AMDiS/src/parallel/PetscSolverNavierStokes.cc b/AMDiS/src/parallel/PetscSolverNavierStokes.cc
index 14cace273cec027a2489fb85e0d9d464273747c8..f6886a4ac7f6d69e1aa3a5e16c6bafb77037f774 100644
--- a/AMDiS/src/parallel/PetscSolverNavierStokes.cc
+++ b/AMDiS/src/parallel/PetscSolverNavierStokes.cc
@@ -46,6 +46,7 @@ namespace AMDiS {
     : PetscSolverGlobalMatrix(name),
       pressureComponent(-1),
       pressureNullSpace(true),
+      useOldInitialGuess(false),
       massMatrixSolver(NULL),
       laplaceMatrixSolver(NULL),
       nu(NULL),
@@ -61,6 +62,28 @@ namespace AMDiS {
     Parameters::get(initFileStr + "->navierstokes->pressure null space",
 		    pressureNullSpace);
     TEST_EXIT(pressureNullSpace)("This is not yet tested, may be wrong!\n");
+
+    Parameters::get(initFileStr + "->navierstokes->use old initial guess", 
+		    useOldInitialGuess);
+  }
+
+
+  void PetscSolverNavierStokes::solvePetscMatrix(SystemVector &vec, 
+						 AdaptInfo *adaptInfo)
+  {
+    FUNCNAME("PetscSolverNavierStokes::solvePetscMatrix()");
+
+    if (useOldInitialGuess) {
+      VecSet(getVecSolInterior(), 0.0);
+      
+      for (int i = 0; i < solution->getSize(); i++)
+	setDofVector(getVecSolInterior(), solution->getDOFVector(i), i, true);
+      
+      vecSolAssembly();
+      KSPSetInitialGuessNonzero(kspInterior, PETSC_TRUE);
+    }
+
+    PetscSolverGlobalMatrix::solvePetscMatrix(vec, adaptInfo);
   }
 
 
@@ -151,7 +174,7 @@ namespace AMDiS {
     else
       laplaceOp.addTerm(new VecAtQP_SOT(phase, &idFct));
     laplaceMatrix.assembleOperator(laplaceOp);
-    laplaceMatrixSolver = createSubSolver(pressureComponent, "laplace_");
+    laplaceMatrixSolver = createSubSolver(pressureComponent, string("laplace_"));
     laplaceMatrixSolver->fillPetscMatrix(&laplaceMatrix);
 
 
diff --git a/AMDiS/src/parallel/PetscSolverNavierStokes.h b/AMDiS/src/parallel/PetscSolverNavierStokes.h
index fdac59c5976a30863872ed3713d057b17c526844..96bef02ca14c7ef1d4d85bcd42ba314b0c7fe1f7 100644
--- a/AMDiS/src/parallel/PetscSolverNavierStokes.h
+++ b/AMDiS/src/parallel/PetscSolverNavierStokes.h
@@ -120,8 +120,11 @@ namespace AMDiS {
 	return new PetscSolverNavierStokes(this->name); 
       }
     };
+
     PetscSolverNavierStokes(string name);
 
+    void solvePetscMatrix(SystemVector &vec, AdaptInfo *adaptInfo);
+
     void setStokesData(double *nuPtr, double *invTauPtr, SystemVector *vec)
     {
       nu = nuPtr;
@@ -144,6 +147,9 @@ namespace AMDiS {
 
     bool pressureNullSpace;
 
+    /// If true, old solution is used for initial guess in solver phase.
+    bool useOldInitialGuess;
+
     PetscSolver *massMatrixSolver, *laplaceMatrixSolver, *conDifMatrixSolver;
 
     NavierStokesSchurData matShellContext;