Commit fc1440fc authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Fixed interior subdomain mapping problem for 2 level domain decomposition.

parent 09c650f9
......@@ -190,7 +190,9 @@ namespace AMDiS {
schurPrimalSolver(0),
multiLevelTest(false),
subDomainSolver(NULL),
meshLevel(0)
meshLevel(0),
rStartInterior(0),
nGlobalOverallInterior(0)
{
FUNCNAME("PetscSolverFeti::PetscSolverFeti()");
......@@ -335,6 +337,39 @@ namespace AMDiS {
lagrangeMap.update();
// === ===
if (meshLevel == 0) {
rStartInterior = 0;
nGlobalOverallInterior = localDofMap.getOverallDofs();
} else {
MeshLevelData& levelData = meshDistributor->getMeshLevelData();
MSG("RANK %d FROM %d\n",
levelData.getMpiComm(1).Get_rank(),
levelData.getMpiComm(1).Get_size());
int groupRowsInterior = 0;
if (levelData.getMpiComm(1).Get_rank() == 0)
groupRowsInterior = localDofMap.getOverallDofs();
mpi::getDofNumbering(mpiComm, groupRowsInterior,
rStartInterior, nGlobalOverallInterior);
int tmp = 0;
if (levelData.getMpiComm(1).Get_rank() == 0)
tmp = rStartInterior;
levelData.getMpiComm(1).Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM);
MSG("COMM TEST FETI-DP: %d %d %d %d %d\n",
levelData.getMpiComm(1).Get_size(),
localDofMap.getRankDofs(),
localDofMap.getOverallDofs(),
nGlobalOverallInterior, rStartInterior);
}
MSG("FETI-DP data created on mesh level %d\n", meshLevel);
for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
......@@ -514,6 +549,9 @@ namespace AMDiS {
localDofMap[feSpace].insertRankDof(i);
else
localDofMap[feSpace].insertNonRankDof(i);
TEST_EXIT_DBG(fetiPreconditioner == FETI_NONE)
("Not yet implemnted!\n");
}
}
}
......@@ -543,7 +581,7 @@ namespace AMDiS {
lagrangeMap.getRankDofs(),
localDofMap.getRankDofs(),
lagrangeMap.getOverallDofs(),
localDofMap.getOverallDofs(),
nGlobalOverallInterior,
2, PETSC_NULL, 2, PETSC_NULL,
&mat_lagrange);
......@@ -574,6 +612,10 @@ namespace AMDiS {
for (int j = i + 1; j < degree; j++) {
if (W[i] == mpiRank || W[j] == mpiRank) {
int colIndex = localDofMap.getMatIndex(k, it->first);
if (meshLevel > 0)
colIndex += rStartInterior;
double value = (W[i] == mpiRank ? 1.0 : -1.0);
MatSetValue(mat_lagrange, index, colIndex, value, INSERT_VALUES);
}
......@@ -599,7 +641,7 @@ namespace AMDiS {
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(),
localDofMap.getOverallDofs(),
nGlobalOverallInterior,
&(schurPrimalData.tmp_vec_b));
VecCreateMPI(mpiComm,
primalDofMap.getRankDofs(),
......@@ -626,15 +668,17 @@ namespace AMDiS {
double wtime = MPI::Wtime();
TEST_EXIT_DBG(meshLevel == 0)
("Does not support for multilevel, check usage of localDofMap.\n");
int nRowsRankPrimal = primalDofMap.getRankDofs();
int nRowsOverallPrimal = primalDofMap.getOverallDofs();
int nRowsRankB = localDofMap.getRankDofs();
int nRowsOverallB = localDofMap.getOverallDofs();
Mat matBPi;
MatCreateMPIAIJ(mpiComm,
nRowsRankB, nRowsRankPrimal,
nRowsOverallB, nRowsOverallPrimal,
nGlobalOverallInterior, nRowsOverallPrimal,
30, PETSC_NULL, 30, PETSC_NULL, &matBPi);
Mat matPrimal;
......@@ -745,7 +789,7 @@ namespace AMDiS {
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(),
localDofMap.getOverallDofs(),
nGlobalOverallInterior,
&(fetiData.tmp_vec_b));
VecCreateMPI(mpiComm,
lagrangeMap.getRankDofs(),
......@@ -781,7 +825,10 @@ namespace AMDiS {
}
switch (fetiPreconditioner) {
case FETI_DIRICHLET:
case FETI_DIRICHLET:
TEST_EXIT_DBG(meshLevel == 0)
("Check for localDofMap.getLocalMatIndex, which should not work for multilevel FETI-DP!\n");
KSPCreate(PETSC_COMM_SELF, &ksp_interior);
KSPSetOperators(ksp_interior, mat_interior_interior, mat_interior_interior,
SAME_NONZERO_PATTERN);
......@@ -802,7 +849,7 @@ namespace AMDiS {
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(),
localDofMap.getOverallDofs(),
nGlobalOverallInterior,
&(fetiDirichletPreconData.tmp_vec_b));
MatGetVecs(mat_duals_duals, PETSC_NULL,
&(fetiDirichletPreconData.tmp_vec_duals0));
......@@ -811,10 +858,13 @@ namespace AMDiS {
MatGetVecs(mat_interior_interior, PETSC_NULL,
&(fetiDirichletPreconData.tmp_vec_interior));
TEST_EXIT_DBG(meshLevel == 0)
("Should not happen, check usage of localDofMap!\n");
for (unsigned int i = 0; i < feSpaces.size(); i++) {
DofMap &dualMap = dualDofMap[feSpaces[i]].getMap();
for (DofMap::iterator it = dualMap.begin(); it != dualMap.end(); ++it) {
DegreeOfFreedom d = it->first;
DegreeOfFreedom d = it->first;
int matIndexLocal = localDofMap.getLocalMatIndex(i, d);
int matIndexDual = dualDofMap.getLocalMatIndex(i, d);
fetiDirichletPreconData.localToDualMap[matIndexLocal] = matIndexDual;
......@@ -981,6 +1031,9 @@ namespace AMDiS {
// === And copy from PETSc local vectors to the DOF vectors. ===
TEST_EXIT_DBG(meshLevel == 0)
("Recover solution does not work for multileve method!\n");
int cnt = 0;
for (int i = 0; i < vec.getSize(); i++) {
DOFVector<double>& dofVec = *(vec.getDOFVector(i));
......@@ -1219,10 +1272,12 @@ namespace AMDiS {
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(),
localDofMap.getOverallDofs(), &tmp_b0);
nGlobalOverallInterior,
&tmp_b0);
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(),
localDofMap.getOverallDofs(), &tmp_b1);
nGlobalOverallInterior,
&tmp_b1);
VecCreateMPI(mpiComm,
primalDofMap.getRankDofs(),
primalDofMap.getOverallDofs(), &tmp_primal0);
......
......@@ -257,6 +257,11 @@ namespace AMDiS {
SubDomainSolver *subDomainSolver;
int meshLevel;
int rStartInterior;
int nGlobalOverallInterior;
};
}
......
......@@ -18,6 +18,38 @@ namespace AMDiS {
using namespace std;
void SubDomainSolver::setDofMapping(ParallelDofMapping *coarseDofs,
ParallelDofMapping *interiorDofs)
{
coarseSpaceMap = coarseDofs;
interiorMap = interiorDofs;
if (mpiCommInterior.Get_size() == 1) {
rStartInterior = 0;
nGlobalOverallInterior = interiorMap->getOverallDofs();
} else {
int groupRowsInterior = 0;
if (mpiCommInterior.Get_rank() == 0)
groupRowsInterior = interiorMap->getOverallDofs();
mpi::getDofNumbering(mpiCommCoarseSpace, groupRowsInterior,
rStartInterior, nGlobalOverallInterior);
int tmp = 0;
if (mpiCommInterior.Get_rank() == 0)
tmp = rStartInterior;
mpiCommInterior.Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM);
MSG("COMM TEST: %d %d %d %d %d\n",
mpiCommInterior.Get_size(),
interiorMap->getRankDofs(),
interiorMap->getOverallDofs(),
nGlobalOverallInterior, rStartInterior);
}
}
void SubDomainSolver::fillPetscMatrix(Matrix<DOFMatrix*> *mat)
{
FUNCNAME("SubDomainSolver::fillPetscMatrix()");
......@@ -29,10 +61,15 @@ namespace AMDiS {
int nRowsRankCoarse = coarseSpaceMap->getRankDofs();
int nRowsOverallCoarse = coarseSpaceMap->getOverallDofs();
bool multilevel = false;
if (mpiCommInterior.Get_size() == 1) {
nGlobalOverallInterior = nRowsOverallInterior;
MatCreateSeqAIJ(mpiCommInterior, nRowsRankInterior, nRowsRankInterior,
60, PETSC_NULL, &matIntInt);
} else {
multilevel = true;
MatCreateMPIAIJ(mpiCommInterior,
nRowsRankInterior, nRowsRankInterior,
nRowsOverallInterior, nRowsOverallInterior,
......@@ -46,12 +83,12 @@ namespace AMDiS {
MatCreateMPIAIJ(mpiCommCoarseSpace,
nRowsRankCoarse, nRowsRankInterior,
nRowsOverallCoarse, nRowsOverallInterior,
nRowsOverallCoarse, nGlobalOverallInterior,
60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt);
MatCreateMPIAIJ(mpiCommCoarseSpace,
nRowsRankInterior, nRowsRankCoarse,
nRowsOverallInterior, nRowsOverallCoarse,
nGlobalOverallInterior, nRowsOverallCoarse,
60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse);
// === Prepare traverse of sequentially created matrices. ===
......@@ -130,23 +167,56 @@ namespace AMDiS {
&(cols[0]), &(values[0]), ADD_VALUES);
if (colsOther.size()) {
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] = interiorMap->getMatIndex(j, colsOther[k]);
if (multilevel == false) {
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] = interiorMap->getMatIndex(j, colsOther[k]);
} else {
for (unsigned int k = 0; k < colsOther.size(); k++) {
colsOther[k] =
interiorMap->getMatIndex(j, colsOther[k]) + rStartInterior;
if (colsOther[k] == 50723) {
MSG("RRTEST A: %d %d %d\n",
nRowsOverallInterior,
nGlobalOverallInterior,
rStartInterior);
}
}
}
MatSetValues(matCoarseInt, 1, &rowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
}
} else {
int localRowIndex = interiorMap->getLocalMatIndex(i, *cursor);
int localRowIndex =
(multilevel == false ? interiorMap->getLocalMatIndex(i, *cursor) :
interiorMap->getMatIndex(i, *cursor));
for (unsigned int k = 0; k < cols.size(); k++) {
if (multilevel == false)
cols[k] = interiorMap->getLocalMatIndex(j, cols[k]);
else {
cols[k] = interiorMap->getMatIndex(j, cols[k]);
if (colsOther[k] == 50723) {
MSG("RRTEST B: %d %d %d\n",
nRowsOverallInterior,
nGlobalOverallInterior,
rStartInterior);
}
for (unsigned int k = 0; k < cols.size(); k++)
cols[k] = interiorMap->getLocalMatIndex(j, cols[k]);
}
}
MatSetValues(matIntInt, 1, &localRowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
if (colsOther.size()) {
int globalRowIndex = interiorMap->getMatIndex(i, *cursor);
if (multilevel == false)
globalRowIndex += rStartInterior;
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] = coarseSpaceMap->getMatIndex(j, colsOther[k]);
......@@ -198,7 +268,7 @@ namespace AMDiS {
VecCreateMPI(mpiCommCoarseSpace,
interiorMap->getRankDofs(),
interiorMap->getOverallDofs(),
nGlobalOverallInterior,
&rhsInterior);
for (int i = 0; i < vec->getSize(); i++) {
......@@ -210,7 +280,7 @@ namespace AMDiS {
index = coarseSpaceMap->getMatIndex(i, index);
VecSetValue(rhsCoarseSpace, index, *dofIt, ADD_VALUES);
} else {
index = interiorMap->getMatIndex(i, index);
index = interiorMap->getMatIndex(i, index) + rStartInterior;
VecSetValue(rhsInterior, index, *dofIt, INSERT_VALUES);
}
}
......
......@@ -53,11 +53,7 @@ namespace AMDiS {
}
void setDofMapping(ParallelDofMapping *coarseDofs,
ParallelDofMapping *interiorDofs)
{
coarseSpaceMap = coarseDofs;
interiorMap = interiorDofs;
}
ParallelDofMapping *interiorDofs);
void fillPetscMatrix(Matrix<DOFMatrix*> *mat);
......@@ -122,6 +118,10 @@ namespace AMDiS {
int level;
int rStartInterior;
int nGlobalOverallInterior;
MPI::Intracomm mpiCommCoarseSpace;
MPI::Intracomm mpiCommInterior;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment