Commit d089b050 authored by Thomas Witkowski's avatar Thomas Witkowski

Upgrade to PETSc 3.3, added basic support for fieldsplit preconditioner.

parent 602a62d3
......@@ -20,16 +20,15 @@
/** \file MeshLevelData.h */
#ifndef AMDIS_MESH_LEVEL_DATA_H
#define AMDIS_MESH_LEVEL_DATA_H
#include <iostream>
#include <set>
#include <vector>
#include <mpi.h>
#include "Global.h"
#ifndef AMDIS_MESH_LEVEL_DATA_H
#define AMDIS_MESH_LEVEL_DATA_H
namespace AMDiS {
using namespace std;
......
......@@ -288,8 +288,8 @@ namespace AMDiS {
int ncon = 1; // one weight at each vertex!
int nparts = mpiSize; // number of partitions
vector<float> tpwgts(mpiSize);
float ubvec = 1.05;
vector<double> tpwgts(mpiSize);
double ubvec = 1.05;
int options[4] = {0, 0, 15, 1}; // default options
int edgecut = -1;
vector<int> part(nElements);
......
......@@ -182,7 +182,7 @@ namespace AMDiS {
void createPartitionMap(map<int, int>& partitionMap);
void setItr(float value)
void setItr(double value)
{
itr = value;
}
......@@ -195,7 +195,7 @@ namespace AMDiS {
protected:
ParMetisMesh *parMetisMesh;
float itr;
double itr;
};
}
......
......@@ -475,5 +475,41 @@ namespace AMDiS {
}
}
}
void ParallelDofMapping::createIndexSet(IS &is,
int firstComponent,
int nComponents)
{
FUNCNAME("ParallelDofMapping::createIndexSet()");
TEST_EXIT_DBG(firstComponent + nComponents <= feSpaces.size())
("Should not happen!\n");
TEST_EXIT_DBG(data.count(feSpaces[firstComponent]))
("No data for FE space at address %p!\n", feSpaces[firstComponent]);
int firstRankDof = -1;
FeSpaceDofMap &feMap = data.find(feSpaces[firstComponent])->second;
DofMap &dofMap = feMap.getMap();
for (DofMap::iterator it = dofMap.begin(); it != dofMap.end(); ++it) {
if (feMap.isRankDof(it->first)) {
if (needMatIndexFromGlobal)
firstRankDof = it->second.global;
else
firstRankDof = it->first;
break;
}
}
TEST_EXIT_DBG(firstRankDof >= 0)("No rank DOF found!\n");
int firstMatIndex = dofToMatIndex.get(firstComponent, firstRankDof);
int nRankRows = 0;
for (int i = firstComponent; i < firstComponent + nComponents; i++)
nRankRows += data.find(feSpaces[firstComponent])->second.nRankDofs;
ISCreateStride(mpiComm, nRankRows, firstMatIndex, 1, &is);
}
}
......@@ -29,6 +29,8 @@
#include "parallel/ParallelTypes.h"
#include "parallel/StdMpi.h"
#include <petscis.h>
#ifndef AMDIS_FE_SPACE_MAPPING_H
#define AMDIS_FE_SPACE_MAPPING_H
......@@ -414,6 +416,10 @@ namespace AMDiS {
/// Compute local and global matrix indices.
void computeMatIndex(bool globalIndex);
void createIndexSet(IS &is,
int firstComponent,
int nComponents);
protected:
/// Insert a new FE space DOF mapping for a given FE space.
void addFeSpace(const FiniteElemSpace* feSpace);
......
......@@ -688,11 +688,11 @@ namespace AMDiS {
// === Create distributed matrix for Lagrange constraints. ===
MatCreateMPIAIJ(mpiCommGlobal,
lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
lagrangeMap.getOverallDofs(), nGlobalOverallInterior,
2, PETSC_NULL, 2, PETSC_NULL,
&mat_lagrange);
MatCreateAIJ(mpiCommGlobal,
lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
lagrangeMap.getOverallDofs(), nGlobalOverallInterior,
2, PETSC_NULL, 2, PETSC_NULL,
&mat_lagrange);
// === Create for all duals the corresponding Lagrange constraints. On ===
// === each rank we traverse all pairs (n, m) of ranks, with n < m, ===
......@@ -789,10 +789,10 @@ namespace AMDiS {
int nRowsRankB = localDofMap.getRankDofs();
Mat matBPi;
MatCreateMPIAIJ(mpiCommGlobal,
nRowsRankB, nRowsRankPrimal,
nGlobalOverallInterior, nRowsOverallPrimal,
30, PETSC_NULL, 30, PETSC_NULL, &matBPi);
MatCreateAIJ(mpiCommGlobal,
nRowsRankB, nRowsRankPrimal,
nGlobalOverallInterior, nRowsOverallPrimal,
30, PETSC_NULL, 30, PETSC_NULL, &matBPi);
Mat matPrimal;
PetscInt nCols;
......
......@@ -54,12 +54,12 @@ namespace AMDiS {
for (int i = 0; i < nBlocks; i++)
for (int j = 0; j < nBlocks; j++)
MatCreateMPIAIJ(mpiCommGlobal,
nRankRows * blockSize[i], nRankRows * blockSize[j],
nOverallRows * blockSize[i], nOverallRows * blockSize[j],
30 * blockSize[i], PETSC_NULL,
30 * blockSize[j], PETSC_NULL,
&(nestMat[i * nBlocks + j]));
MatCreateAIJ(mpiCommGlobal,
nRankRows * blockSize[i], nRankRows * blockSize[j],
nOverallRows * blockSize[i], nOverallRows * blockSize[j],
30 * blockSize[i], PETSC_NULL,
30 * blockSize[j], PETSC_NULL,
&(nestMat[i * nBlocks + j]));
for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++)
......
......@@ -70,7 +70,7 @@ namespace AMDiS {
// === Create PETSc matrix with the computed nnz data structure. ===
MatCreateMPIAIJ(mpiCommGlobal, nRankRows, nRankRows,
MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows,
nOverallRows, nOverallRows,
0, d_nnz, 0, o_nnz, &matIntInt);
......@@ -119,6 +119,8 @@ namespace AMDiS {
KSPSetFromOptions(kspInterior);
PCSetFromOptions(pcInterior);
createFieldSplit(pcInterior);
// Do not delete the solution vector, use it for the initial guess.
if (!zeroStartVector)
KSPSetInitialGuessNonzero(kspInterior, PETSC_TRUE);
......@@ -140,30 +142,30 @@ namespace AMDiS {
MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior,
60, PETSC_NULL, &matIntInt);
} else {
MatCreateMPIAIJ(mpiCommLocal,
nRowsRankInterior, nRowsRankInterior,
nRowsOverallInterior, nRowsOverallInterior,
60, PETSC_NULL, 60, PETSC_NULL, &matIntInt);
MatCreateAIJ(mpiCommLocal,
nRowsRankInterior, nRowsRankInterior,
nRowsOverallInterior, nRowsOverallInterior,
60, PETSC_NULL, 60, PETSC_NULL, &matIntInt);
}
if (coarseSpaceMap) {
int nRowsRankCoarse = coarseSpaceMap->getRankDofs();
int nRowsOverallCoarse = coarseSpaceMap->getOverallDofs();
MatCreateMPIAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse,
60, PETSC_NULL, 60, PETSC_NULL, &matCoarseCoarse);
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse,
60, PETSC_NULL, 60, PETSC_NULL, &matCoarseCoarse);
MatCreateMPIAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankInterior,
nRowsOverallCoarse, nGlobalOverallInterior,
60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt);
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankInterior,
nRowsOverallCoarse, nGlobalOverallInterior,
60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt);
MatCreateMPIAIJ(mpiCommGlobal,
nRowsRankInterior, nRowsRankCoarse,
nGlobalOverallInterior, nRowsOverallCoarse,
60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse);
MatCreateAIJ(mpiCommGlobal,
nRowsRankInterior, nRowsRankCoarse,
nGlobalOverallInterior, nRowsOverallCoarse,
60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse);
}
// === Prepare traverse of sequentially created matrices. ===
......@@ -540,6 +542,42 @@ namespace AMDiS {
}
void PetscSolverGlobalMatrix::createFieldSplit(PC pc)
{
FUNCNAME("PetscSolverGlobalMatrix::createFieldSplit()");
vector<string> isNames;
Parameters::get("parallel->solver->is blocks", isNames);
int nBlocks = isNames.size();
if (nBlocks == 0)
return;
for (int i = 0; i < nBlocks; i++) {
MSG("Create for block %s\n", isNames[i].c_str());
vector<int> blockComponents;
Parameters::get("parallel->solver->is block " + lexical_cast<string>(i),
blockComponents);
int nComponents = static_cast<int>(blockComponents.size());
TEST_EXIT(nComponents > 0)("No is block for block %d defined!\n", i);
// Check if blocks are continous
for (int j = 0; j < nComponents; j++) {
TEST_EXIT(blockComponents[j] == blockComponents[0] + j)
("Does not yet support not continous IS blocks! Block %s\n",
isNames[i].c_str());
}
IS is;
interiorMap->createIndexSet(is, blockComponents[0], nComponents);
PCFieldSplitSetIS(pc, isNames[i].c_str(), is);
ISDestroy(&is);
}
}
void PetscSolverGlobalMatrix::setDofMatrix(DOFMatrix* mat,
int nRowMat, int nColMat)
{
......
......@@ -62,6 +62,8 @@ namespace AMDiS {
void destroyVectorData();
void createFieldSplit(PC pc);
protected:
/// Creates a new non zero pattern structure for the PETSc matrix.
void createPetscNnzStructure(Matrix<DOFMatrix*> *mat);
......
......@@ -190,25 +190,25 @@ namespace AMDiS {
int nOverallBoundaryRows = nOverallBoundaryDofs * nComponents;
MatCreateMPIAIJ(mpiCommGlobal,
nInteriorRows, nInteriorRows,
nOverallInteriorRows, nOverallInteriorRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA11);
MatCreateMPIAIJ(mpiCommGlobal,
nBoundaryRows, nBoundaryRows,
nOverallBoundaryRows, nOverallBoundaryRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA22);
MatCreateMPIAIJ(mpiCommGlobal,
nInteriorRows, nBoundaryRows,
nOverallInteriorRows, nOverallBoundaryRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA12);
MatCreateMPIAIJ(mpiCommGlobal,
nBoundaryRows, nInteriorRows,
nOverallBoundaryRows, nOverallInteriorRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA21);
MatCreateAIJ(mpiCommGlobal,
nInteriorRows, nInteriorRows,
nOverallInteriorRows, nOverallInteriorRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA11);
MatCreateAIJ(mpiCommGlobal,
nBoundaryRows, nBoundaryRows,
nOverallBoundaryRows, nOverallBoundaryRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA22);
MatCreateAIJ(mpiCommGlobal,
nInteriorRows, nBoundaryRows,
nOverallInteriorRows, nOverallBoundaryRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA12);
MatCreateAIJ(mpiCommGlobal,
nBoundaryRows, nInteriorRows,
nOverallBoundaryRows, nOverallInteriorRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA21);
for (int i = 0; i < nComponents; i++)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment