From d089b0500ba59c14e9309e889d31bf0912bab01c Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Thu, 14 Jun 2012 11:50:32 +0000
Subject: [PATCH] Upgrade to PETSc 3.3, added basic support for fieldsplit
 preconditioner.

---
 AMDiS/src/parallel/MeshLevelData.h            |  7 +-
 AMDiS/src/parallel/ParMetisPartitioner.cc     |  4 +-
 AMDiS/src/parallel/ParMetisPartitioner.h      |  4 +-
 AMDiS/src/parallel/ParallelDofMapping.cc      | 38 +++++++++-
 AMDiS/src/parallel/ParallelDofMapping.h       |  6 ++
 AMDiS/src/parallel/PetscSolverFeti.cc         | 18 ++---
 .../parallel/PetscSolverGlobalBlockMatrix.cc  | 12 ++--
 AMDiS/src/parallel/PetscSolverGlobalMatrix.cc | 72 ++++++++++++++-----
 AMDiS/src/parallel/PetscSolverGlobalMatrix.h  |  2 +
 AMDiS/src/parallel/PetscSolverSchur.cc        | 38 +++++-----
 10 files changed, 141 insertions(+), 60 deletions(-)

diff --git a/AMDiS/src/parallel/MeshLevelData.h b/AMDiS/src/parallel/MeshLevelData.h
index 66f826ab..5b1a0962 100644
--- a/AMDiS/src/parallel/MeshLevelData.h
+++ b/AMDiS/src/parallel/MeshLevelData.h
@@ -20,16 +20,15 @@
 
 /** \file MeshLevelData.h */
 
-#ifndef AMDIS_MESH_LEVEL_DATA_H
-#define AMDIS_MESH_LEVEL_DATA_H
-
-
 #include <iostream>
 #include <set>
 #include <vector>
 #include <mpi.h>
 #include "Global.h"
 
+#ifndef AMDIS_MESH_LEVEL_DATA_H
+#define AMDIS_MESH_LEVEL_DATA_H
+
 namespace AMDiS {
 
   using namespace std;
diff --git a/AMDiS/src/parallel/ParMetisPartitioner.cc b/AMDiS/src/parallel/ParMetisPartitioner.cc
index 359ee3ad..083d7937 100644
--- a/AMDiS/src/parallel/ParMetisPartitioner.cc
+++ b/AMDiS/src/parallel/ParMetisPartitioner.cc
@@ -288,8 +288,8 @@ namespace AMDiS {
     int ncon = 1; // one weight at each vertex!
     int nparts = mpiSize; // number of partitions
 
-    vector<float> tpwgts(mpiSize);
-    float ubvec = 1.05;
+    vector<double> tpwgts(mpiSize);
+    double ubvec = 1.05;
     int options[4] = {0, 0, 15, 1}; // default options
     int edgecut = -1;
     vector<int> part(nElements);
diff --git a/AMDiS/src/parallel/ParMetisPartitioner.h b/AMDiS/src/parallel/ParMetisPartitioner.h
index 982ca347..445d1667 100644
--- a/AMDiS/src/parallel/ParMetisPartitioner.h
+++ b/AMDiS/src/parallel/ParMetisPartitioner.h
@@ -182,7 +182,7 @@ namespace AMDiS {
 
     void createPartitionMap(map<int, int>& partitionMap);
 
-    void setItr(float value)
+    void setItr(double value)
     {
       itr = value;
     }
@@ -195,7 +195,7 @@ namespace AMDiS {
   protected:
     ParMetisMesh *parMetisMesh;
 
-    float itr;
+    double itr;
   };
 }
 
diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc
index 9f95414c..46396599 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.cc
+++ b/AMDiS/src/parallel/ParallelDofMapping.cc
@@ -475,5 +475,41 @@ namespace AMDiS {
       }
     }
   }
-  
+
+
+  void ParallelDofMapping::createIndexSet(IS &is, 
+					  int firstComponent, 
+					  int nComponents)
+  {
+    FUNCNAME("ParallelDofMapping::createIndexSet()");
+
+    TEST_EXIT_DBG(firstComponent + nComponents <= feSpaces.size())
+      ("Should not happen!\n");
+
+    TEST_EXIT_DBG(data.count(feSpaces[firstComponent]))
+      ("No data for FE space at address %p!\n", feSpaces[firstComponent]);
+    
+    int firstRankDof = -1;
+    FeSpaceDofMap &feMap = data.find(feSpaces[firstComponent])->second;
+    DofMap &dofMap = feMap.getMap();
+    for (DofMap::iterator it = dofMap.begin(); it != dofMap.end(); ++it) {
+      if (feMap.isRankDof(it->first)) {
+	if (needMatIndexFromGlobal)
+	  firstRankDof = it->second.global;
+	else
+	  firstRankDof = it->first;
+	break;
+      }
+    }
+    
+    TEST_EXIT_DBG(firstRankDof >= 0)("No rank DOF found!\n");    
+    int firstMatIndex = dofToMatIndex.get(firstComponent, firstRankDof);
+
+    int nRankRows = 0;
+    for (int i = firstComponent; i < firstComponent + nComponents; i++)
+      nRankRows += data.find(feSpaces[firstComponent])->second.nRankDofs;
+
+    ISCreateStride(mpiComm, nRankRows, firstMatIndex, 1, &is);
+  }
+
 }
diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h
index f24c6054..c4ffdbda 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.h
+++ b/AMDiS/src/parallel/ParallelDofMapping.h
@@ -29,6 +29,8 @@
 #include "parallel/ParallelTypes.h"
 #include "parallel/StdMpi.h"
 
+#include <petscis.h>
+
 #ifndef AMDIS_FE_SPACE_MAPPING_H
 #define AMDIS_FE_SPACE_MAPPING_H
 
@@ -414,6 +416,10 @@ namespace AMDiS {
     /// Compute local and global matrix indices.
     void computeMatIndex(bool globalIndex);
 
+    void createIndexSet(IS &is, 
+			int firstComponent, 
+			int nComponents);
+
   protected:
     /// Insert a new FE space DOF mapping for a given FE space.
     void addFeSpace(const FiniteElemSpace* feSpace);
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 5db9773f..a95752f9 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -688,11 +688,11 @@ namespace AMDiS {
 
     // === Create distributed matrix for Lagrange constraints. ===
 
-    MatCreateMPIAIJ(mpiCommGlobal,
-		    lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
-		    lagrangeMap.getOverallDofs(), nGlobalOverallInterior,
-		    2, PETSC_NULL, 2, PETSC_NULL,
-		    &mat_lagrange);
+    MatCreateAIJ(mpiCommGlobal,
+		 lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
+		 lagrangeMap.getOverallDofs(), nGlobalOverallInterior,
+		 2, PETSC_NULL, 2, PETSC_NULL,
+		 &mat_lagrange);
 
     // === Create for all duals the corresponding Lagrange constraints. On ===
     // === each rank we traverse all pairs (n, m) of ranks, with n < m,    ===
@@ -789,10 +789,10 @@ namespace AMDiS {
       int nRowsRankB = localDofMap.getRankDofs();
 
       Mat matBPi;
-      MatCreateMPIAIJ(mpiCommGlobal,
-		      nRowsRankB, nRowsRankPrimal, 
-		      nGlobalOverallInterior, nRowsOverallPrimal,
-		      30, PETSC_NULL, 30, PETSC_NULL, &matBPi);
+      MatCreateAIJ(mpiCommGlobal,
+		   nRowsRankB, nRowsRankPrimal, 
+		   nGlobalOverallInterior, nRowsOverallPrimal,
+		   30, PETSC_NULL, 30, PETSC_NULL, &matBPi);
       Mat matPrimal;
 
       PetscInt nCols;
diff --git a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
index 9ab830e7..d7d100b6 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
@@ -54,12 +54,12 @@ namespace AMDiS {
 
     for (int i = 0; i < nBlocks; i++)
       for (int j = 0; j < nBlocks; j++)
-	MatCreateMPIAIJ(mpiCommGlobal,
-			nRankRows * blockSize[i], nRankRows * blockSize[j],
-			nOverallRows * blockSize[i], nOverallRows * blockSize[j],
-			30 * blockSize[i], PETSC_NULL, 
-			30 * blockSize[j], PETSC_NULL,
-			&(nestMat[i * nBlocks + j]));
+	MatCreateAIJ(mpiCommGlobal,
+		     nRankRows * blockSize[i], nRankRows * blockSize[j],
+		     nOverallRows * blockSize[i], nOverallRows * blockSize[j],
+		     30 * blockSize[i], PETSC_NULL, 
+		     30 * blockSize[j], PETSC_NULL,
+		     &(nestMat[i * nBlocks + j]));
 			
     for (int i = 0; i < nComponents; i++)
       for (int j = 0; j < nComponents; j++)
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
index 9f67240d..7c84020f 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
@@ -70,7 +70,7 @@ namespace AMDiS {
 
     // === Create PETSc matrix with the computed nnz data structure. ===
 
-    MatCreateMPIAIJ(mpiCommGlobal, nRankRows, nRankRows, 
+    MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows, 
  		    nOverallRows, nOverallRows,
 		    0, d_nnz, 0, o_nnz, &matIntInt);
 
@@ -119,6 +119,8 @@ namespace AMDiS {
     KSPSetFromOptions(kspInterior);
     PCSetFromOptions(pcInterior);
 
+    createFieldSplit(pcInterior);
+
     // Do not delete the solution vector, use it for the initial guess.
     if (!zeroStartVector)
       KSPSetInitialGuessNonzero(kspInterior, PETSC_TRUE);
@@ -140,30 +142,30 @@ namespace AMDiS {
       MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior,
 		      60, PETSC_NULL, &matIntInt);
     } else {
-      MatCreateMPIAIJ(mpiCommLocal, 
-		      nRowsRankInterior, nRowsRankInterior,
-		      nRowsOverallInterior, nRowsOverallInterior,
-		      60, PETSC_NULL, 60, PETSC_NULL, &matIntInt);
+      MatCreateAIJ(mpiCommLocal, 
+		   nRowsRankInterior, nRowsRankInterior,
+		   nRowsOverallInterior, nRowsOverallInterior,
+		   60, PETSC_NULL, 60, PETSC_NULL, &matIntInt);
     }
 
     if (coarseSpaceMap) {
       int nRowsRankCoarse = coarseSpaceMap->getRankDofs();
       int nRowsOverallCoarse = coarseSpaceMap->getOverallDofs();
 
-      MatCreateMPIAIJ(mpiCommGlobal,
-		      nRowsRankCoarse, nRowsRankCoarse,
-		      nRowsOverallCoarse, nRowsOverallCoarse,
-		      60, PETSC_NULL, 60, PETSC_NULL, &matCoarseCoarse);
+      MatCreateAIJ(mpiCommGlobal,
+		   nRowsRankCoarse, nRowsRankCoarse,
+		   nRowsOverallCoarse, nRowsOverallCoarse,
+		   60, PETSC_NULL, 60, PETSC_NULL, &matCoarseCoarse);
       
-      MatCreateMPIAIJ(mpiCommGlobal,
-		      nRowsRankCoarse, nRowsRankInterior,
-		      nRowsOverallCoarse, nGlobalOverallInterior,
-		      60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt);
+      MatCreateAIJ(mpiCommGlobal,
+		   nRowsRankCoarse, nRowsRankInterior,
+		   nRowsOverallCoarse, nGlobalOverallInterior,
+		   60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt);
       
-      MatCreateMPIAIJ(mpiCommGlobal,
-		      nRowsRankInterior, nRowsRankCoarse,
-		      nGlobalOverallInterior, nRowsOverallCoarse,
-		      60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse);
+      MatCreateAIJ(mpiCommGlobal,
+		   nRowsRankInterior, nRowsRankCoarse,
+		   nGlobalOverallInterior, nRowsOverallCoarse,
+		   60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse);
     }
 
     // === Prepare traverse of sequentially created matrices. ===
@@ -540,6 +542,42 @@ namespace AMDiS {
   }
 
 
+  void PetscSolverGlobalMatrix::createFieldSplit(PC pc)
+  {
+    FUNCNAME("PetscSolverGlobalMatrix::createFieldSplit()");
+
+    vector<string> isNames;
+    Parameters::get("parallel->solver->is blocks", isNames);
+
+    int nBlocks = isNames.size();
+    if (nBlocks == 0)
+      return;
+
+    for (int i = 0; i < nBlocks; i++) {
+      MSG("Create for block %s\n", isNames[i].c_str());
+
+      vector<int> blockComponents;
+      Parameters::get("parallel->solver->is block " + lexical_cast<string>(i),
+		      blockComponents);
+      int nComponents = static_cast<int>(blockComponents.size());
+
+      TEST_EXIT(nComponents > 0)("No is block for block %d defined!\n", i);
+
+      // Check if blocks are continous
+      for (int j = 0; j < nComponents; j++) {
+	TEST_EXIT(blockComponents[j] == blockComponents[0] + j)
+	  ("Does not yet support not continous IS blocks! Block %s\n", 
+	   isNames[i].c_str());
+      }
+
+      IS is;
+      interiorMap->createIndexSet(is, blockComponents[0], nComponents);
+      PCFieldSplitSetIS(pc, isNames[i].c_str(), is);
+      ISDestroy(&is);
+    }
+  }
+
+
   void PetscSolverGlobalMatrix::setDofMatrix(DOFMatrix* mat,
 					     int nRowMat, int nColMat)
   {
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h
index ac263130..0c9457d4 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h
@@ -62,6 +62,8 @@ namespace AMDiS {
 
     void destroyVectorData();
 
+    void createFieldSplit(PC pc);
+
   protected:
     /// Creates a new non zero pattern structure for the PETSc matrix. 
     void createPetscNnzStructure(Matrix<DOFMatrix*> *mat);
diff --git a/AMDiS/src/parallel/PetscSolverSchur.cc b/AMDiS/src/parallel/PetscSolverSchur.cc
index 2811227a..c4a8c997 100644
--- a/AMDiS/src/parallel/PetscSolverSchur.cc
+++ b/AMDiS/src/parallel/PetscSolverSchur.cc
@@ -190,25 +190,25 @@ namespace AMDiS {
     int nOverallBoundaryRows = nOverallBoundaryDofs * nComponents;
 
 
-    MatCreateMPIAIJ(mpiCommGlobal, 
-		    nInteriorRows, nInteriorRows, 
-		    nOverallInteriorRows, nOverallInteriorRows,
-		    100, PETSC_NULL, 100, PETSC_NULL, &matA11);
-
-    MatCreateMPIAIJ(mpiCommGlobal, 
-		    nBoundaryRows, nBoundaryRows, 
-		    nOverallBoundaryRows, nOverallBoundaryRows,
-		    100, PETSC_NULL, 100, PETSC_NULL, &matA22);
-
-    MatCreateMPIAIJ(mpiCommGlobal, 
-		    nInteriorRows, nBoundaryRows, 
-		    nOverallInteriorRows, nOverallBoundaryRows,
-		    100, PETSC_NULL, 100, PETSC_NULL, &matA12);    
-
-    MatCreateMPIAIJ(mpiCommGlobal, 
-		    nBoundaryRows, nInteriorRows, 
-		    nOverallBoundaryRows, nOverallInteriorRows,
-		    100, PETSC_NULL, 100, PETSC_NULL, &matA21);
+    MatCreateAIJ(mpiCommGlobal, 
+		 nInteriorRows, nInteriorRows, 
+		 nOverallInteriorRows, nOverallInteriorRows,
+		 100, PETSC_NULL, 100, PETSC_NULL, &matA11);
+
+    MatCreateAIJ(mpiCommGlobal, 
+		 nBoundaryRows, nBoundaryRows, 
+		 nOverallBoundaryRows, nOverallBoundaryRows,
+		 100, PETSC_NULL, 100, PETSC_NULL, &matA22);
+
+    MatCreateAIJ(mpiCommGlobal, 
+		 nInteriorRows, nBoundaryRows, 
+		 nOverallInteriorRows, nOverallBoundaryRows,
+		 100, PETSC_NULL, 100, PETSC_NULL, &matA12);    
+
+    MatCreateAIJ(mpiCommGlobal, 
+		 nBoundaryRows, nInteriorRows, 
+		 nOverallBoundaryRows, nOverallInteriorRows,
+		 100, PETSC_NULL, 100, PETSC_NULL, &matA21);
 
 
     for (int i = 0; i < nComponents; i++)
-- 
GitLab