Commit 0a2b884b authored by Thomas Witkowski's avatar Thomas Witkowski

Removed most COMM_WORLD in parallel code and replaced through a communicator variable.

parent 0e4b89a6
......@@ -91,9 +91,9 @@ namespace AMDiS {
{
FUNCNAME("MeshDistributor::ParalleDomainBase()");
mpiRank = MPI::COMM_WORLD.Get_rank();
mpiSize = MPI::COMM_WORLD.Get_size();
mpiComm = MPI::COMM_WORLD;
mpiRank = mpiComm.Get_rank();
mpiSize = mpiComm.Get_size();
Parameters::get(name + "->repartitioning", repartitioningAllowed);
Parameters::get(name + "->debug output dir", debugOutputDir);
......
......@@ -874,9 +874,9 @@ namespace AMDiS {
oss << "-" << counter;
oss << ".vtu";
DOFVector<double> tmpa(feSpace, "tmp");
tmpa.set(MPI::COMM_WORLD.Get_rank());
VtkWriter::writeFile(&tmpa, oss.str());
DOFVector<double> tmp(feSpace, "tmp");
tmp.set(MPI::COMM_WORLD.Get_rank());
VtkWriter::writeFile(&tmp, oss.str());
}
......
......@@ -62,13 +62,13 @@ namespace AMDiS {
FUNCNAME("PetscSolver::copyVec()");
IS originIs, destIs;
ISCreateGeneral(PETSC_COMM_WORLD,
ISCreateGeneral(*mpiComm,
originIndex.size(),
&(originIndex[0]),
PETSC_USE_POINTER,
&originIs);
ISCreateGeneral(PETSC_COMM_WORLD,
ISCreateGeneral(*mpiComm,
destIndex.size(),
&(destIndex[0]),
PETSC_USE_POINTER,
......
......@@ -422,7 +422,7 @@ namespace AMDiS {
// === Create distributed matrix for Lagrange constraints. ===
MatCreateMPIAIJ(PETSC_COMM_WORLD,
MatCreateMPIAIJ(*mpiComm,
lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
lagrangeMap.getOverallDofs(), localDofMap.getOverallDofs(),
2, PETSC_NULL, 2, PETSC_NULL,
......@@ -483,14 +483,14 @@ namespace AMDiS {
schurPrimalData.mat_b_primal = &mat_b_primal;
schurPrimalData.fetiSolver = this;
VecCreateMPI(PETSC_COMM_WORLD,
VecCreateMPI(*mpiComm,
localDofMap.getRankDofs(), localDofMap.getOverallDofs(),
&(schurPrimalData.tmp_vec_b));
VecCreateMPI(PETSC_COMM_WORLD,
VecCreateMPI(*mpiComm,
primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
&(schurPrimalData.tmp_vec_primal));
MatCreateShell(PETSC_COMM_WORLD,
MatCreateShell(*mpiComm,
primalDofMap.getRankDofs(), primalDofMap.getRankDofs(),
primalDofMap.getOverallDofs(), primalDofMap.getOverallDofs(),
&schurPrimalData,
......@@ -498,7 +498,7 @@ namespace AMDiS {
MatShellSetOperation(mat_schur_primal, MATOP_MULT,
(void(*)(void))petscMultMatSchurPrimal);
KSPCreate(PETSC_COMM_WORLD, &ksp_schur_primal);
KSPCreate(*mpiComm, &ksp_schur_primal);
KSPSetOperators(ksp_schur_primal, mat_schur_primal, mat_schur_primal, SAME_NONZERO_PATTERN);
KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_");
KSPSetType(ksp_schur_primal, KSPGMRES);
......@@ -516,7 +516,7 @@ namespace AMDiS {
int nRowsOverallB = localDofMap.getOverallDofs();
Mat matBPi;
MatCreateMPIAIJ(PETSC_COMM_WORLD,
MatCreateMPIAIJ(*mpiComm,
nRowsRankB, nRowsRankPrimal,
nRowsOverallB, nRowsOverallPrimal,
30, PETSC_NULL, 30, PETSC_NULL, &matBPi);
......@@ -539,9 +539,6 @@ namespace AMDiS {
MatRestoreRow(mat_b_primal, row, &nCols, &cols, &values);
}
int maxLocalPrimal = mat_b_primal_cols.size();
mpi::globalMax(maxLocalPrimal);
TEST_EXIT(static_cast<int>(mat_b_primal_cols.size()) ==
primalDofMap.getLocalDofs())
("Should not happen!\n");
......@@ -585,7 +582,7 @@ namespace AMDiS {
MatGetInfo(mat_primal_primal, MAT_GLOBAL_SUM, &minfo);
MSG("Schur primal matrix nnz = %f\n", minfo.nz_used);
KSPCreate(PETSC_COMM_WORLD, &ksp_schur_primal);
KSPCreate(*mpiComm, &ksp_schur_primal);
KSPSetOperators(ksp_schur_primal, mat_primal_primal,
mat_primal_primal, SAME_NONZERO_PATTERN);
KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_");
......@@ -635,24 +632,24 @@ namespace AMDiS {
fetiData.fetiSolver = this;
fetiData.ksp_schur_primal = &ksp_schur_primal;
VecCreateMPI(PETSC_COMM_WORLD,
VecCreateMPI(*mpiComm,
localDofMap.getRankDofs(), localDofMap.getOverallDofs(),
&(fetiData.tmp_vec_b));
VecCreateMPI(PETSC_COMM_WORLD,
VecCreateMPI(*mpiComm,
lagrangeMap.getRankDofs(), lagrangeMap.getOverallDofs(),
&(fetiData.tmp_vec_lagrange));
VecCreateMPI(PETSC_COMM_WORLD,
VecCreateMPI(*mpiComm,
primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
&(fetiData.tmp_vec_primal));
MatCreateShell(PETSC_COMM_WORLD,
MatCreateShell(*mpiComm,
lagrangeMap.getRankDofs(), lagrangeMap.getRankDofs(),
lagrangeMap.getOverallDofs(), lagrangeMap.getOverallDofs(),
&fetiData, &mat_feti);
MatShellSetOperation(mat_feti, MATOP_MULT, (void(*)(void))petscMultMatFeti);
KSPCreate(PETSC_COMM_WORLD, &ksp_feti);
KSPCreate(*mpiComm, &ksp_feti);
KSPSetOperators(ksp_feti, mat_feti, mat_feti, SAME_NONZERO_PATTERN);
KSPSetOptionsPrefix(ksp_feti, "feti_");
KSPSetType(ksp_feti, KSPGMRES);
......@@ -687,7 +684,7 @@ namespace AMDiS {
fetiDirichletPreconData.mat_duals_interior = &mat_duals_interior;
fetiDirichletPreconData.ksp_interior = &ksp_interior;
VecCreateMPI(PETSC_COMM_WORLD,
VecCreateMPI(*mpiComm,
localDofMap.getRankDofs(),localDofMap.getOverallDofs(),
&(fetiDirichletPreconData.tmp_vec_b));
MatGetVecs(mat_duals_duals, PETSC_NULL,
......@@ -732,7 +729,7 @@ namespace AMDiS {
}
}
VecCreateMPI(PETSC_COMM_WORLD,
VecCreateMPI(*mpiComm,
localDofMap.getRankDofs(),
localDofMap.getOverallDofs(),
&(fetiLumpedPreconData.tmp_vec_b));
......@@ -927,17 +924,17 @@ namespace AMDiS {
MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 60, PETSC_NULL,
&mat_b_b);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
MatCreateMPIAIJ(*mpiComm,
nRowsRankPrimal, nRowsRankPrimal,
nRowsOverallPrimal, nRowsOverallPrimal,
60, PETSC_NULL, 60, PETSC_NULL, &mat_primal_primal);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
MatCreateMPIAIJ(*mpiComm,
nRowsRankB, nRowsRankPrimal,
nRowsOverallB, nRowsOverallPrimal,
60, PETSC_NULL, 60, PETSC_NULL, &mat_b_primal);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
MatCreateMPIAIJ(*mpiComm,
nRowsRankPrimal, nRowsRankB,
nRowsOverallPrimal, nRowsOverallB,
30, PETSC_NULL, 30, PETSC_NULL, &mat_primal_b);
......@@ -1222,9 +1219,9 @@ namespace AMDiS {
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(vec);
VecCreateMPI(PETSC_COMM_WORLD,
VecCreateMPI(*mpiComm,
localDofMap.getRankDofs(), localDofMap.getOverallDofs(), &f_b);
VecCreateMPI(PETSC_COMM_WORLD,
VecCreateMPI(*mpiComm,
primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
&f_primal);
......
......@@ -53,7 +53,7 @@ namespace AMDiS {
for (int i = 0; i < nBlocks; i++)
for (int j = 0; j < nBlocks; j++)
MatCreateMPIAIJ(PETSC_COMM_WORLD,
MatCreateMPIAIJ(*mpiComm,
nRankRows * blockSize[i], nRankRows * blockSize[j],
nOverallRows * blockSize[i], nOverallRows * blockSize[j],
30 * blockSize[i], PETSC_NULL,
......@@ -79,7 +79,7 @@ namespace AMDiS {
}
MatCreateNest(PETSC_COMM_WORLD,
MatCreateNest(*mpiComm,
nBlocks, PETSC_NULL, nBlocks, PETSC_NULL,
&(nestMat[0]), &petscMatrix);
......@@ -91,7 +91,7 @@ namespace AMDiS {
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
// === Init PETSc solver. ===
KSPCreate(PETSC_COMM_WORLD, &solver);
KSPCreate(*mpiComm, &solver);
KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
KSPSetFromOptions(solver);
......@@ -113,7 +113,7 @@ namespace AMDiS {
nestVec.resize(nComponents);
for (int i = 0; i < nComponents; i++) {
VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &(nestVec[i]));
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &(nestVec[i]));
setDofVector(nestVec[i], vec->getDOFVector(i));
......@@ -121,7 +121,7 @@ namespace AMDiS {
VecAssemblyEnd(nestVec[i]);
}
VecCreateNest(PETSC_COMM_WORLD, nComponents, PETSC_NULL,
VecCreateNest(*mpiComm, nComponents, PETSC_NULL,
&(nestVec[0]), &petscRhsVec);
VecAssemblyBegin(petscRhsVec);
......
......@@ -31,8 +31,8 @@ namespace AMDiS {
// === Create PETSc vector (solution and a temporary vector). ===
VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscSolVec);
VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscTmpVec);
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscSolVec);
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscTmpVec);
int testddd = 1;
Parameters::get("block size", testddd);
......@@ -70,7 +70,7 @@ namespace AMDiS {
// === Create PETSc matrix with the computed nnz data structure. ===
MatCreateMPIAIJ(PETSC_COMM_WORLD, nRankRows, nRankRows,
MatCreateMPIAIJ(*mpiComm, nRankRows, nRankRows,
nOverallRows, nOverallRows,
0, d_nnz, 0, o_nnz, &petscMatrix);
......@@ -109,7 +109,7 @@ namespace AMDiS {
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
// === Init PETSc solver. ===
KSPCreate(PETSC_COMM_WORLD, &solver);
KSPCreate(*mpiComm, &solver);
KSPGetPC(solver, &pc);
KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);
......@@ -137,7 +137,7 @@ namespace AMDiS {
int nRankRows = meshDistributor->getNumberRankDofs(feSpaces);
int nOverallRows = meshDistributor->getNumberOverallDofs(feSpaces);
VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscRhsVec);
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscRhsVec);
int testddd = 1;
Parameters::get("block size", testddd);
......@@ -155,7 +155,7 @@ namespace AMDiS {
if (removeRhsNullSpace) {
MSG("Remove constant null space from the RHS!\n");
MatNullSpace sp;
MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, PETSC_NULL, &sp);
MatNullSpaceCreate(*mpiComm, PETSC_TRUE, 0, PETSC_NULL, &sp);
MatNullSpaceRemove(sp, petscRhsVec, PETSC_NULL);
MatNullSpaceDestroy(&sp);
}
......
......@@ -163,12 +163,12 @@ namespace AMDiS {
// === Create PETSc IS structurs for interior and boundary DOFs. ===
ISCreateStride(PETSC_COMM_WORLD,
ISCreateStride(*mpiComm,
nInteriorDofs * nComponents,
(rStartInteriorDofs + rStartBoundaryDofs) * nComponents,
1, &interiorIs);
ISCreateStride(PETSC_COMM_WORLD,
ISCreateStride(*mpiComm,
nBoundaryDofs * nComponents,
(rStartInteriorDofs + rStartBoundaryDofs + nInteriorDofs) * nComponents,
1, &boundaryIs);
......@@ -189,22 +189,22 @@ namespace AMDiS {
int nOverallBoundaryRows = nOverallBoundaryDofs * nComponents;
MatCreateMPIAIJ(PETSC_COMM_WORLD,
MatCreateMPIAIJ(*mpiComm,
nInteriorRows, nInteriorRows,
nOverallInteriorRows, nOverallInteriorRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA11);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
MatCreateMPIAIJ(*mpiComm,
nBoundaryRows, nBoundaryRows,
nOverallBoundaryRows, nOverallBoundaryRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA22);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
MatCreateMPIAIJ(*mpiComm,
nInteriorRows, nBoundaryRows,
nOverallInteriorRows, nOverallBoundaryRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA12);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
MatCreateMPIAIJ(*mpiComm,
nBoundaryRows, nInteriorRows,
nOverallBoundaryRows, nOverallInteriorRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA21);
......@@ -237,7 +237,7 @@ namespace AMDiS {
tmpIS[0] = interiorIs;
tmpIS[1] = boundaryIs;
MatCreateNest(PETSC_COMM_WORLD, 2, &tmpIS[0], 2, &tmpIS[0], &tmpMat[0][0], &petscMatrix);
MatCreateNest(*mpiComm, 2, &tmpIS[0], 2, &tmpIS[0], &tmpMat[0][0], &petscMatrix);
MatNestSetVecType(petscMatrix, VECNEST);
MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
......@@ -246,8 +246,8 @@ namespace AMDiS {
int nRankRows = meshDistributor->getNumberRankDofs(feSpace) * nComponents;
int nOverallRows = meshDistributor->getNumberOverallDofs(feSpace) * nComponents;
VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscSolVec);
VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscTmpVec);
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscSolVec);
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscTmpVec);
}
......@@ -260,7 +260,7 @@ namespace AMDiS {
int nRankRows = meshDistributor->getNumberRankDofs(feSpace) * nComponents;
int nOverallRows = meshDistributor->getNumberOverallDofs(feSpace) * nComponents;
VecCreateMPI(PETSC_COMM_WORLD, nRankRows, nOverallRows, &petscRhsVec);
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscRhsVec);
for (int i = 0; i < nComponents; i++)
setDofVector(petscRhsVec, vec->getDOFVector(i), nComponents, i);
......@@ -278,7 +278,7 @@ namespace AMDiS {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
int nComponents = vec.getSize();
KSPCreate(PETSC_COMM_WORLD, &solver);
KSPCreate(*mpiComm, &solver);
KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment