diff --git a/AMDiS/src/parallel/PetscProblemStat.cc b/AMDiS/src/parallel/PetscProblemStat.cc index c8285f0ca0f6aab6fabe5b7ba6349ab7896343da..72569145d4fc7cab201946b88022164504377f13 100644 --- a/AMDiS/src/parallel/PetscProblemStat.cc +++ b/AMDiS/src/parallel/PetscProblemStat.cc @@ -87,7 +87,10 @@ namespace AMDiS { double wtime = MPI::Wtime(); if (createMatrixData) { - petscSolver->setMeshDistributor(meshDistributor); + petscSolver->setMeshDistributor(meshDistributor, + meshDistributor->getMpiComm(), + PETSC_COMM_SELF); + petscSolver->setDofMapping(&(meshDistributor->getDofMap())); petscSolver->fillPetscMatrix(systemMatrix); } diff --git a/AMDiS/src/parallel/PetscSolver.cc b/AMDiS/src/parallel/PetscSolver.cc index a4c1aebc8a081b7beb551acf975c1ac58fdf8b77..e3c37edc94d172708275ffc17d6265ce501ab50f 100644 --- a/AMDiS/src/parallel/PetscSolver.cc +++ b/AMDiS/src/parallel/PetscSolver.cc @@ -10,17 +10,21 @@ // See also license.opensource.txt in the distribution. +#include "AMDiS.h" #include "parallel/PetscSolver.h" #include "parallel/StdMpi.h" #include "parallel/MpiHelper.h" + namespace AMDiS { using namespace std; PetscSolver::PetscSolver() - : meshDistributor(NULL), - dofMap(NULL), + : meshDistributor(NULL), + subdomainLevel(0), + interiorMap(NULL), + coarseSpaceMap(NULL), mpiRank(-1), kspPrefix(""), removeRhsNullSpace(false) @@ -34,42 +38,65 @@ namespace AMDiS { } - void PetscSolver::printSolutionInfo(AdaptInfo *adaptInfo, - bool iterationCounter, - bool residual) + void PetscSolver::setDofMapping(ParallelDofMapping *interiorDofs, + ParallelDofMapping *coarseDofs) { - FUNCNAME("PetscSolver::printSolutionInfo()"); + interiorMap = interiorDofs; + coarseSpaceMap = coarseDofs; + + if (mpiCommLocal.Get_size() == 1) { + rStartInterior = 0; + nGlobalOverallInterior = interiorMap->getOverallDofs(); + } else { + int groupRowsInterior = 0; + if (mpiCommLocal.Get_rank() == 0) + groupRowsInterior = interiorMap->getOverallDofs(); + + mpi::getDofNumbering(mpiCommGlobal, groupRowsInterior, + rStartInterior, nGlobalOverallInterior); - if (iterationCounter) { - int iterations = 0; - KSPGetIterationNumber(solver, &iterations); - MSG(" Number of iterations: %d\n", iterations); - adaptInfo->setSolverIterations(iterations); + int tmp = 0; + if (mpiCommLocal.Get_rank() == 0) + tmp = rStartInterior; + + mpiCommLocal.Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM); } + } + + + void PetscSolver::solve(Vec &rhs, Vec &sol) + { + FUNCNAME("PetscSolver::solve()"); - if (residual) { - double norm = 0.0; - MatMult(petscMatrix, petscSolVec, petscTmpVec); - VecAXPY(petscTmpVec, -1.0, petscRhsVec); - VecNorm(petscTmpVec, NORM_2, &norm); - MSG(" Residual norm: %e\n", norm); + PetscErrorCode solverError = KSPSolve(kspInterior, rhs, sol); + if (solverError != 0) { + AMDiS::finalize(); + exit(-1); } } + void PetscSolver::solveGlobal(Vec &rhs, Vec &sol) + { + FUNCNAME("PetscSolver::solveGlobal()"); + + ERROR_EXIT("Not implemented!\n"); + } + + void PetscSolver::copyVec(Vec& originVec, Vec& destVec, vector& originIndex, vector& destIndex) { FUNCNAME("PetscSolver::copyVec()"); IS originIs, destIs; - ISCreateGeneral(mpiComm, + ISCreateGeneral(mpiCommGlobal, originIndex.size(), &(originIndex[0]), PETSC_USE_POINTER, &originIs); - ISCreateGeneral(mpiComm, + ISCreateGeneral(mpiCommGlobal, destIndex.size(), &(destIndex[0]), PETSC_USE_POINTER, diff --git a/AMDiS/src/parallel/PetscSolver.h b/AMDiS/src/parallel/PetscSolver.h index 56f7c65262ad27fe6421f5e3ebe4ec59e0bff503..a85be2a6eaf77708c1cec37caa4c1479e055e2b6 100644 --- a/AMDiS/src/parallel/PetscSolver.h +++ b/AMDiS/src/parallel/PetscSolver.h @@ -50,15 +50,24 @@ namespace AMDiS { virtual ~PetscSolver() {} - void setMeshDistributor(MeshDistributor *m) + void setMeshDistributor(MeshDistributor *m, + MPI::Intracomm mpiComm0, + MPI::Intracomm mpiComm1) { meshDistributor = m; - dofMap = &(meshDistributor->getDofMap()); - mpiRank = meshDistributor->getMpiRank(); - mpiComm = meshDistributor->getMpiComm(); - mpiSelfComm = PETSC_COMM_SELF; + mpiCommGlobal = mpiComm0; + mpiCommLocal = mpiComm1; + mpiRank = mpiCommGlobal.Get_rank(); } + void setLevel(int l) + { + subdomainLevel = l; + } + + void setDofMapping(ParallelDofMapping *interiorDofs, + ParallelDofMapping *coarseDofs = NULL); + /** \brief * Create a PETSc matrix. The given DOF matrices are used to create the nnz * structure of the PETSc matrix and the values are transfered to it. @@ -77,6 +86,10 @@ namespace AMDiS { /// Use PETSc to solve the linear system of equations virtual void solvePetscMatrix(SystemVector &vec, AdaptInfo *adaptInfo) = 0; + virtual void solve(Vec &rhs, Vec &sol); + + virtual void solveGlobal(Vec &rhs, Vec &sol); + /// Destroys all matrix data structures. virtual void destroyMatrixData() = 0; @@ -90,12 +103,12 @@ namespace AMDiS { KSP getSolver() { - return solver; + return kspInterior; } PC getPc() { - return pc; + return pcInterior; } void setKspPrefix(std::string s) @@ -108,11 +121,63 @@ namespace AMDiS { removeRhsNullSpace = b; } - protected: - void printSolutionInfo(AdaptInfo* adaptInfo, - bool iterationCounter = true, - bool residual = true); + inline bool isCoarseSpace(const FiniteElemSpace *feSpace, + DegreeOfFreedom dof) + { + FUNCNAME("SubDomainSolver::isCoarseSpace()"); + TEST_EXIT_DBG(coarseSpaceMap) + ("Subdomain solver does not contain a coarse space!\n"); + + return (*coarseSpaceMap)[feSpace].isSet(dof); + } + + inline Vec& getRhsCoarseSpace() + { + FUNCNAME("SubDomainSolver::getRhsCoarseSpace()"); + TEST_EXIT_DBG(coarseSpaceMap) + ("Subdomain solver does not contain a coarse space!\n"); + + return rhsCoarseSpace; + } + + inline Vec& getRhsInterior() + { + return rhsInterior; + } + + inline Mat& getMatIntInt() + { + return matIntInt; + } + + inline Mat& getMatCoarseCoarse() + { + FUNCNAME("SubDomainSolver::getMatCoarseCoarse()"); + TEST_EXIT_DBG(coarseSpaceMap) + ("Subdomain solver does not contain a coarse space!\n"); + + return matCoarseCoarse; + } + inline Mat& getMatIntCoarse() + { + FUNCNAME("SubDomainSolver::getMatIntCoarse()"); + TEST_EXIT_DBG(coarseSpaceMap) + ("Subdomain solver does not contain a coarse space!\n"); + + return matIntCoarse; + } + + inline Mat& getMatCoarseInt() + { + FUNCNAME("SubDomainSolver::getMatCoarseInt()"); + TEST_EXIT_DBG(coarseSpaceMap) + ("Subdomain solver does not contain a coarse space!\n"); + + return matCoarseInt; + } + + protected: /** \brief * Copies between to PETSc vectors by using different index sets for the * origin and the destination vectors. @@ -140,26 +205,36 @@ namespace AMDiS { protected: MeshDistributor *meshDistributor; - ParallelDofMapping *dofMap; + int subdomainLevel; + + int rStartInterior; + + int nGlobalOverallInterior; + + ParallelDofMapping *interiorMap; + + ParallelDofMapping* coarseSpaceMap; int mpiRank; - MPI::Intracomm mpiComm; + MPI::Intracomm mpiCommGlobal; - MPI::Intracomm mpiSelfComm; + MPI::Intracomm mpiCommLocal; /// Petsc's matrix structure. - Mat petscMatrix; + Mat matIntInt, matCoarseCoarse, matIntCoarse, matCoarseInt; /// PETSc's vector structures for the rhs vector, the solution vector and a /// temporary vector for calculating the final residuum. - Vec petscRhsVec, petscSolVec, petscTmpVec; + Vec rhsInterior; + + Vec rhsCoarseSpace; /// PETSc solver object - KSP solver; + KSP kspInterior; /// PETSc preconditioner object - PC pc; + PC pcInterior; /// KSP database prefix string kspPrefix; diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc index e25bd276c0cfb15a4ebcc36f467b25bc5f8c01f7..01faf258fa5b00dc3e0a8389b737dbbe3094ef23 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.cc +++ b/AMDiS/src/parallel/PetscSolverFeti.cc @@ -233,14 +233,15 @@ namespace AMDiS { MeshLevelData& levelData = meshDistributor->getMeshLevelData(); if (subDomainSolver == NULL) { + subDomainSolver = new SubDomainSolver(); + if (meshLevel == 0) { - subDomainSolver = - new SubDomainSolver(meshDistributor, mpiComm, mpiSelfComm); + subDomainSolver->setMeshDistributor(meshDistributor, + mpiCommGlobal, mpiCommLocal); } else { - subDomainSolver = - new SubDomainSolver(meshDistributor, - levelData.getMpiComm(meshLevel - 1), - levelData.getMpiComm(meshLevel)); + subDomainSolver->setMeshDistributor(meshDistributor, + levelData.getMpiComm(meshLevel - 1), + levelData.getMpiComm(meshLevel)); subDomainSolver->setLevel(meshLevel); } } @@ -350,7 +351,7 @@ namespace AMDiS { if (levelData.getMpiComm(1).Get_rank() == 0) groupRowsInterior = localDofMap.getOverallDofs(); - mpi::getDofNumbering(mpiComm, groupRowsInterior, + mpi::getDofNumbering(mpiCommGlobal, groupRowsInterior, rStartInterior, nGlobalOverallInterior); int tmp = 0; @@ -383,7 +384,7 @@ namespace AMDiS { } // If multi level test, inform sub domain solver about coarse space. - subDomainSolver->setDofMapping(&primalDofMap, &localDofMap); + subDomainSolver->setDofMapping(&localDofMap, &primalDofMap); } @@ -463,7 +464,7 @@ namespace AMDiS { map > sdRankDofs; if (meshLevel > 0) { - StdMpi > stdMpi(mpiComm); + StdMpi > stdMpi(mpiCommGlobal); for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), meshLevel, feSpace); @@ -645,7 +646,7 @@ namespace AMDiS { // === Create distributed matrix for Lagrange constraints. === - MatCreateMPIAIJ(mpiComm, + MatCreateMPIAIJ(mpiCommGlobal, lagrangeMap.getRankDofs(), localDofMap.getRankDofs(), lagrangeMap.getOverallDofs(), nGlobalOverallInterior, 2, PETSC_NULL, 2, PETSC_NULL, @@ -705,16 +706,16 @@ namespace AMDiS { schurPrimalData.subSolver = subDomainSolver; - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, localDofMap.getRankDofs(), nGlobalOverallInterior, &(schurPrimalData.tmp_vec_b)); - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(), &(schurPrimalData.tmp_vec_primal)); - MatCreateShell(mpiComm, + MatCreateShell(mpiCommGlobal, primalDofMap.getRankDofs(), primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(), @@ -724,7 +725,7 @@ namespace AMDiS { MatShellSetOperation(mat_schur_primal, MATOP_MULT, (void(*)(void))petscMultMatSchurPrimal); - KSPCreate(mpiComm, &ksp_schur_primal); + KSPCreate(mpiCommGlobal, &ksp_schur_primal); KSPSetOperators(ksp_schur_primal, mat_schur_primal, mat_schur_primal, SAME_NONZERO_PATTERN); KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_"); KSPSetType(ksp_schur_primal, KSPGMRES); @@ -742,7 +743,7 @@ namespace AMDiS { int nRowsRankB = localDofMap.getRankDofs(); Mat matBPi; - MatCreateMPIAIJ(mpiComm, + MatCreateMPIAIJ(mpiCommGlobal, nRowsRankB, nRowsRankPrimal, nGlobalOverallInterior, nRowsOverallPrimal, 30, PETSC_NULL, 30, PETSC_NULL, &matBPi); @@ -810,7 +811,7 @@ namespace AMDiS { MatGetInfo(mat_schur_primal, MAT_GLOBAL_SUM, &minfo); MSG("Schur primal matrix nnz = %f\n", minfo.nz_used); - KSPCreate(mpiComm, &ksp_schur_primal); + KSPCreate(mpiCommGlobal, &ksp_schur_primal); KSPSetOperators(ksp_schur_primal, mat_schur_primal, mat_schur_primal, SAME_NONZERO_PATTERN); KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_"); @@ -853,20 +854,20 @@ namespace AMDiS { fetiData.subSolver = subDomainSolver; fetiData.ksp_schur_primal = &ksp_schur_primal; - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, localDofMap.getRankDofs(), nGlobalOverallInterior, &(fetiData.tmp_vec_b)); - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, lagrangeMap.getRankDofs(), lagrangeMap.getOverallDofs(), &(fetiData.tmp_vec_lagrange)); - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(), &(fetiData.tmp_vec_primal)); - MatCreateShell(mpiComm, + MatCreateShell(mpiCommGlobal, lagrangeMap.getRankDofs(), lagrangeMap.getRankDofs(), lagrangeMap.getOverallDofs(), @@ -875,7 +876,7 @@ namespace AMDiS { MatShellSetOperation(mat_feti, MATOP_MULT, (void(*)(void))petscMultMatFeti); - KSPCreate(mpiComm, &ksp_feti); + KSPCreate(mpiCommGlobal, &ksp_feti); KSPSetOperators(ksp_feti, mat_feti, mat_feti, SAME_NONZERO_PATTERN); KSPSetOptionsPrefix(ksp_feti, "feti_"); KSPSetType(ksp_feti, KSPGMRES); @@ -913,7 +914,7 @@ namespace AMDiS { fetiDirichletPreconData.mat_duals_interior = &mat_duals_interior; fetiDirichletPreconData.ksp_interior = &ksp_interior; - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, localDofMap.getRankDofs(), nGlobalOverallInterior, &(fetiDirichletPreconData.tmp_vec_b)); @@ -958,7 +959,7 @@ namespace AMDiS { } } - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, localDofMap.getRankDofs(), localDofMap.getOverallDofs(), &(fetiLumpedPreconData.tmp_vec_b)); @@ -1343,18 +1344,18 @@ namespace AMDiS { // Some temporary vectors. Vec tmp_b0, tmp_b1, tmp_lagrange0, tmp_primal0, tmp_primal1; - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, localDofMap.getRankDofs(), nGlobalOverallInterior, &tmp_b0); - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, localDofMap.getRankDofs(), nGlobalOverallInterior, &tmp_b1); - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(), &tmp_primal0); - VecCreateMPI(mpiComm, + VecCreateMPI(mpiCommGlobal, primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(), &tmp_primal1); diff --git a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc index 70a1b89da3e59b5eafe4a51ddc1b5e7fc8485aa6..9ab830e7a30a022b8ea3eda9037a94b1cc6ffb43 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc +++ b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc @@ -21,14 +21,14 @@ namespace AMDiS { FUNCNAME("PetscSolverGlobalBlockMatrix::fillPetscMatrix()"); TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n"); - TEST_EXIT_DBG(dofMap)("No parallel mapping object defined!\n"); + TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n"); TEST_EXIT_DBG(mat)("No DOF matrix defined!\n"); double wtime = MPI::Wtime(); const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); nComponents = mat->getNumRows(); - int nRankRows = (*dofMap)[feSpace].nRankDofs; - int nOverallRows = (*dofMap)[feSpace].nOverallDofs; + int nRankRows = (*interiorMap)[feSpace].nRankDofs; + int nOverallRows = (*interiorMap)[feSpace].nOverallDofs; #if (DEBUG != 0) MSG("Fill petsc matrix 1 needed %.5f seconds\n", MPI::Wtime() - wtime); @@ -54,7 +54,7 @@ namespace AMDiS { for (int i = 0; i < nBlocks; i++) for (int j = 0; j < nBlocks; j++) - MatCreateMPIAIJ(mpiComm, + MatCreateMPIAIJ(mpiCommGlobal, nRankRows * blockSize[i], nRankRows * blockSize[j], nOverallRows * blockSize[i], nOverallRows * blockSize[j], 30 * blockSize[i], PETSC_NULL, @@ -80,21 +80,21 @@ namespace AMDiS { } - MatCreateNest(mpiComm, + MatCreateNest(mpiCommGlobal, nBlocks, PETSC_NULL, nBlocks, PETSC_NULL, - &(nestMat[0]), &petscMatrix); + &(nestMat[0]), &matIntInt); #if (DEBUG != 0) MSG("Fill petsc matrix 2 needed %.5f seconds\n", MPI::Wtime() - wtime); #endif - MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY); - MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY); + MatAssemblyBegin(matIntInt, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(matIntInt, MAT_FINAL_ASSEMBLY); // === Init PETSc solver. === - KSPCreate(mpiComm, &solver); - KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); - KSPSetFromOptions(solver); + KSPCreate(mpiCommGlobal, &kspInterior); + KSPSetOperators(kspInterior, matIntInt, matIntInt, SAME_NONZERO_PATTERN); + KSPSetFromOptions(kspInterior); MSG("Fill petsc matrix needed %.5f seconds\n", MPI::Wtime() - wtime); } @@ -108,13 +108,13 @@ namespace AMDiS { nComponents = vec->getSize(); const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); - int nRankRows = (*dofMap)[feSpace].nRankDofs; - int nOverallRows = (*dofMap)[feSpace].nOverallDofs; + int nRankRows = (*interiorMap)[feSpace].nRankDofs; + int nOverallRows = (*interiorMap)[feSpace].nOverallDofs; nestVec.resize(nComponents); for (int i = 0; i < nComponents; i++) { - VecCreateMPI(mpiComm, nRankRows, nOverallRows, &(nestVec[i])); + VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &(nestVec[i])); setDofVector(nestVec[i], vec->getDOFVector(i)); @@ -122,11 +122,11 @@ namespace AMDiS { VecAssemblyEnd(nestVec[i]); } - VecCreateNest(mpiComm, nComponents, PETSC_NULL, - &(nestVec[0]), &petscRhsVec); + VecCreateNest(mpiCommGlobal, nComponents, PETSC_NULL, + &(nestVec[0]), &rhsInterior); - VecAssemblyBegin(petscRhsVec); - VecAssemblyEnd(petscRhsVec); + VecAssemblyBegin(rhsInterior); + VecAssemblyEnd(rhsInterior); } @@ -135,14 +135,14 @@ namespace AMDiS { { FUNCNAME("PetscSolverGlobalBlockMatrix::solvePetscMatrix()"); - KSPGetPC(solver, &pc); - setBlockPreconditioner(pc); + KSPGetPC(kspInterior, &pcInterior); + setBlockPreconditioner(pcInterior); const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); - VecDuplicate(petscRhsVec, &petscSolVec); + VecDuplicate(rhsInterior, &petscSolVec); // PETSc. - KSPSolve(solver, petscRhsVec, petscSolVec); + solve(rhsInterior, petscSolVec); // === Transfere values from PETSc's solution vectors to the DOF vectors. === for (int i = 0; i < nComponents; i++) { @@ -151,11 +151,11 @@ namespace AMDiS { Vec tmp; VecNestGetSubVec(petscSolVec, i, &tmp); - int nRankDofs = (*dofMap)[feSpace].nRankDofs; + int nRankDofs = (*interiorMap)[feSpace].nRankDofs; PetscScalar *vecPointer; VecGetArray(tmp, &vecPointer); - DofMap& d = (*dofMap)[feSpace].getMap(); + DofMap& d = (*interiorMap)[feSpace].getMap(); for (DofMap::iterator it = d.begin(); it != d.end(); ++it) if (it->second.local != -1) dofvec[it->first] = vecPointer[it->second.local]; @@ -178,8 +178,8 @@ namespace AMDiS { if (nestMat[i] != PETSC_NULL) MatDestroy(&(nestMat[i])); - MatDestroy(&petscMatrix); - KSPDestroy(&solver); + MatDestroy(&matIntInt); + KSPDestroy(&kspInterior); } @@ -187,7 +187,7 @@ namespace AMDiS { { FUNCNAME("PetscSolverGlobalBlockMatrix::destroyVectorData()"); - VecDestroy(&petscRhsVec); + VecDestroy(&rhsInterior); for (int i = 0; i < nComponents; i++) VecDestroy(&(nestVec[i])); @@ -217,8 +217,8 @@ namespace AMDiS { typedef traits::range_generator::type cursor_type; typedef traits::range_generator::type icursor_type; - int dispRowIndex = (*dofMap)[feSpace].nRankDofs * dispRowBlock; - int dispColIndex = (*dofMap)[feSpace].nRankDofs * dispColBlock; + int dispRowIndex = (*interiorMap)[feSpace].nRankDofs * dispRowBlock; + int dispColIndex = (*interiorMap)[feSpace].nRankDofs * dispColBlock; vector cols; vector values; @@ -232,7 +232,7 @@ namespace AMDiS { cend = end(mat->getBaseMatrix()); cursor != cend; ++cursor) { // Global index of the current row DOF. - int rowIndex = (*dofMap)[feSpace][*cursor].global + dispRowIndex; + int rowIndex = (*interiorMap)[feSpace][*cursor].global + dispRowIndex; cols.clear(); values.clear(); @@ -240,7 +240,7 @@ namespace AMDiS { for (icursor_type icursor = begin(cursor), icend = end(cursor); icursor != icend; ++icursor) { // Global index of the current column index. - int colIndex = (*dofMap)[feSpace][col(*icursor)].global + dispColIndex; + int colIndex = (*interiorMap)[feSpace][col(*icursor)].global + dispColIndex; // Ignore all zero entries, expect it is a diagonal entry. if (value(*icursor) == 0.0 && rowIndex != colIndex) @@ -267,7 +267,7 @@ namespace AMDiS { // Traverse all used DOFs in the dof vector. DOFVector::Iterator dofIt(vec, USED_DOFS); for (dofIt.reset(); !dofIt.end(); ++dofIt) { - int index = (*dofMap)[feSpace][dofIt.getDOFIndex()].global; + int index = (*interiorMap)[feSpace][dofIt.getDOFIndex()].global; double value = *dofIt; VecSetValues(petscVec, 1, &index, &value, ADD_VALUES); diff --git a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.h b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.h index 32a15f09631661d98f5bca6e90315b36c2021438..ef428934d5a9621c6a5d8387ba916c68bd42769d 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.h +++ b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.h @@ -69,6 +69,8 @@ namespace AMDiS { vector nestVec; + Vec petscSolVec; + /// Number of components (= number of unknowns in the PDE) int nComponents; diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc index 0c978e55f30dd372a7642c9b5891bb35e5642b75..dd7b0e68877a395bae7c6b06d4bc32d4631db357 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc @@ -22,7 +22,7 @@ namespace AMDiS { FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()"); TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n"); - TEST_EXIT_DBG(dofMap)("No parallel mapping object defined!\n"); + TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n"); TEST_EXIT_DBG(mat)("No DOF matrix defined!\n"); double wtime = MPI::Wtime(); @@ -34,12 +34,12 @@ namespace AMDiS { int recvAllValues = 0; int sendValue = static_cast(meshDistributor->getLastMeshChangeIndex() != lastMeshNnz); - mpiComm.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM); + mpiCommGlobal.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM); if (!d_nnz || recvAllValues != 0 || alwaysCreateNnzStructure) { vector feSpaces = getFeSpaces(mat); - dofMap->setComputeMatIndex(true, true); - dofMap->update(feSpaces); + interiorMap->setComputeMatIndex(true, true); + interiorMap->update(feSpaces); if (d_nnz) { delete [] d_nnz; @@ -55,28 +55,26 @@ namespace AMDiS { // === Create PETSc vector (solution and a temporary vector). === - int nRankRows = dofMap->getRankDofs(); - int nOverallRows = dofMap->getOverallDofs(); + int nRankRows = interiorMap->getRankDofs(); + int nOverallRows = interiorMap->getOverallDofs(); - VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscSolVec); - VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscTmpVec); + VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &petscSolVec); int testddd = 1; Parameters::get("block size", testddd); - if (testddd > 1) { + if (testddd > 1) VecSetBlockSize(petscSolVec, testddd); - VecSetBlockSize(petscTmpVec, testddd); - } + // === Create PETSc matrix with the computed nnz data structure. === - MatCreateMPIAIJ(mpiComm, nRankRows, nRankRows, + MatCreateMPIAIJ(mpiCommGlobal, nRankRows, nRankRows, nOverallRows, nOverallRows, - 0, d_nnz, 0, o_nnz, &petscMatrix); + 0, d_nnz, 0, o_nnz, &matIntInt); if (testddd > 1) { - MatSetBlockSize(petscMatrix, testddd); + MatSetBlockSize(matIntInt, testddd); MSG("MAT SET BLOCK SIZE: %d\n", testddd); } @@ -86,9 +84,9 @@ namespace AMDiS { #if (DEBUG != 0) int a, b; - MatGetOwnershipRange(petscMatrix, &a, &b); - TEST_EXIT(a == dofMap->getStartDofs())("Wrong matrix ownership range!\n"); - TEST_EXIT(b == dofMap->getStartDofs() + nRankRows) + MatGetOwnershipRange(matIntInt, &a, &b); + TEST_EXIT(a == interiorMap->getStartDofs())("Wrong matrix ownership range!\n"); + TEST_EXIT(b == interiorMap->getStartDofs() + nRankRows) ("Wrong matrix ownership range!\n"); #endif @@ -105,22 +103,22 @@ namespace AMDiS { MSG("Fill petsc matrix 2 needed %.5f seconds\n", MPI::Wtime() - wtime); #endif - MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY); - MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY); + MatAssemblyBegin(matIntInt, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(matIntInt, MAT_FINAL_ASSEMBLY); // === Init PETSc solver. === - KSPCreate(mpiComm, &solver); - KSPGetPC(solver, &pc); - KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); - KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT); - KSPSetType(solver, KSPBCGS); - KSPSetOptionsPrefix(solver, kspPrefix.c_str()); - KSPSetFromOptions(solver); - PCSetFromOptions(pc); + KSPCreate(mpiCommGlobal, &kspInterior); + KSPGetPC(kspInterior, &pcInterior); + KSPSetOperators(kspInterior, matIntInt, matIntInt, SAME_NONZERO_PATTERN); + KSPSetTolerances(kspInterior, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT); + KSPSetType(kspInterior, KSPBCGS); + KSPSetOptionsPrefix(kspInterior, kspPrefix.c_str()); + KSPSetFromOptions(kspInterior); + PCSetFromOptions(pcInterior); // Do not delete the solution vector, use it for the initial guess. if (!zeroStartVector) - KSPSetInitialGuessNonzero(solver, PETSC_TRUE); + KSPSetInitialGuessNonzero(kspInterior, PETSC_TRUE); MSG("Fill petsc matrix needed %.5f seconds\n", MPI::Wtime() - wtime); } @@ -131,31 +129,31 @@ namespace AMDiS { FUNCNAME("PetscSolverGlobalMatrix::fillPetscRhs()"); TEST_EXIT_DBG(vec)("No DOF vector defined!\n"); - TEST_EXIT_DBG(dofMap)("No parallel DOF map defined!\n"); + TEST_EXIT_DBG(interiorMap)("No parallel DOF map defined!\n"); - int nRankRows = dofMap->getRankDofs(); - int nOverallRows = dofMap->getOverallDofs(); + int nRankRows = interiorMap->getRankDofs(); + int nOverallRows = interiorMap->getOverallDofs(); - VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscRhsVec); + VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &rhsInterior); int testddd = 1; Parameters::get("block size", testddd); if (testddd > 1) - VecSetBlockSize(petscRhsVec, testddd); + VecSetBlockSize(rhsInterior, testddd); // === Transfer values from DOF vector to the PETSc vector. === for (int i = 0; i < vec->getSize(); i++) - setDofVector(petscRhsVec, vec->getDOFVector(i), i); + setDofVector(rhsInterior, vec->getDOFVector(i), i); - VecAssemblyBegin(petscRhsVec); - VecAssemblyEnd(petscRhsVec); + VecAssemblyBegin(rhsInterior); + VecAssemblyEnd(rhsInterior); if (removeRhsNullSpace) { MSG("Remove constant null space from the RHS!\n"); MatNullSpace sp; - MatNullSpaceCreate(mpiComm, PETSC_TRUE, 0, PETSC_NULL, &sp); - MatNullSpaceRemove(sp, petscRhsVec, PETSC_NULL); + MatNullSpaceCreate(mpiCommGlobal, PETSC_TRUE, 0, PETSC_NULL, &sp); + MatNullSpaceRemove(sp, rhsInterior, PETSC_NULL); MatNullSpaceDestroy(&sp); } } @@ -180,11 +178,7 @@ namespace AMDiS { } // PETSc. - PetscErrorCode solverError = KSPSolve(solver, petscRhsVec, petscSolVec); - if (solverError != 0) { - AMDiS::finalize(); - exit(-1); - } + solve(rhsInterior, petscSolVec); // === Transfere values from PETSc's solution vectors to the DOF vectors. === PetscScalar *vecPointer; @@ -193,7 +187,7 @@ namespace AMDiS { int c = 0; for (int i = 0; i < nComponents; i++) { DOFVector &dv = *(vec.getDOFVector(i)); - DofMap& d = (*dofMap)[dv.getFeSpace()].getMap(); + DofMap& d = (*interiorMap)[dv.getFeSpace()].getMap(); for (DofMap::iterator it = d.begin(); it != d.end(); ++it) if (it->second.local != -1) dv[it->first] = vecPointer[c++]; @@ -205,10 +199,6 @@ namespace AMDiS { // === Synchronize DOFs at common DOFs, i.e., DOFs that correspond to === // === more than one partition. === meshDistributor->synchVector(vec); - - - // Print iteration counter and residual norm of the solution. - printSolutionInfo(adaptInfo); } @@ -216,10 +206,9 @@ namespace AMDiS { { FUNCNAME("PetscSolverGlobalMatrix::destroyMatrixData()"); - MatDestroy(&petscMatrix); - KSPDestroy(&solver); + MatDestroy(&matIntInt); + KSPDestroy(&kspInterior); VecDestroy(&petscSolVec); - VecDestroy(&petscTmpVec); } @@ -227,7 +216,7 @@ namespace AMDiS { { FUNCNAME("PetscSolverGlobalMatrix::destroyVectorData()"); - VecDestroy(&petscRhsVec); + VecDestroy(&rhsInterior); } @@ -268,7 +257,7 @@ namespace AMDiS { const FiniteElemSpace *colFe = mat->getColFeSpace(); // Global index of the current row DOF. - int globalRowDof = (*dofMap)[rowFe][*cursor].global; + int globalRowDof = (*interiorMap)[rowFe][*cursor].global; // Test if the current row DOF is a periodic DOF. bool periodicRow = perMap.isPeriodic(rowFe, globalRowDof); @@ -277,7 +266,7 @@ namespace AMDiS { // === Row DOF index is not periodic. === // Get PETSc's mat row index. - int rowIndex = dofMap->getMatIndex(nRowMat, globalRowDof); + int rowIndex = interiorMap->getMatIndex(nRowMat, globalRowDof); cols.clear(); values.clear(); @@ -286,11 +275,11 @@ namespace AMDiS { icursor != icend; ++icursor) { // Global index of the current column index. - int globalColDof = (*dofMap)[colFe][col(*icursor)].global; + int globalColDof = (*interiorMap)[colFe][col(*icursor)].global; // Test if the current col dof is a periodic dof. bool periodicCol = perMap.isPeriodic(colFe, globalColDof); // Get PETSc's mat col index. - int colIndex = dofMap->getMatIndex(nColMat, globalColDof); + int colIndex = interiorMap->getMatIndex(nColMat, globalColDof); // Ignore all zero entries, expect it is a diagonal entry. if (value(*icursor) == 0.0 && rowIndex != colIndex) @@ -339,13 +328,13 @@ namespace AMDiS { } for (unsigned int i = 0; i < newCols.size(); i++) { - cols.push_back(dofMap->getMatIndex(nColMat, newCols[i])); + cols.push_back(interiorMap->getMatIndex(nColMat, newCols[i])); values.push_back(scaledValue); } } } - MatSetValues(petscMatrix, 1, &rowIndex, cols.size(), + MatSetValues(matIntInt, 1, &rowIndex, cols.size(), &(cols[0]), &(values[0]), ADD_VALUES); } else { // === Row DOF index is periodic. === @@ -361,7 +350,7 @@ namespace AMDiS { for (icursor_type icursor = begin(cursor), icend = end(cursor); icursor != icend; ++icursor) { // Global index of the current column index. - int globalColDof = (*dofMap)[colFe][col(*icursor)].global; + int globalColDof = (*interiorMap)[colFe][col(*icursor)].global; // Ignore all zero entries, expect it is a diagonal entry. if (value(*icursor) == 0.0 && globalRowDof != globalColDof) @@ -426,8 +415,8 @@ namespace AMDiS { // === Translate the matrix entries to PETSc's matrix. for (unsigned int i = 0; i < entry.size(); i++) { - int rowIdx = dofMap->getMatIndex(nRowMat, entry[i].first); - int colIdx = dofMap->getMatIndex(nColMat, entry[i].second); + int rowIdx = interiorMap->getMatIndex(nRowMat, entry[i].first); + int colIdx = interiorMap->getMatIndex(nColMat, entry[i].second); colsMap[rowIdx].push_back(colIdx); valsMap[rowIdx].push_back(scaledValue); @@ -444,7 +433,7 @@ namespace AMDiS { int rowIndex = rowIt->first; - MatSetValues(petscMatrix, 1, &rowIndex, rowIt->second.size(), + MatSetValues(matIntInt, 1, &rowIndex, rowIt->second.size(), &(rowIt->second[0]), &(valsMap[rowIt->first][0]), ADD_VALUES); } } @@ -465,14 +454,14 @@ namespace AMDiS { // Traverse all used DOFs in the dof vector. DOFVector::Iterator dofIt(vec, USED_DOFS); for (dofIt.reset(); !dofIt.end(); ++dofIt) { - if (rankOnly && !(*dofMap)[feSpace].isRankDof(dofIt.getDOFIndex())) + if (rankOnly && !(*interiorMap)[feSpace].isRankDof(dofIt.getDOFIndex())) continue; // Calculate global row index of the DOF. DegreeOfFreedom globalRowDof = - (*dofMap)[feSpace][dofIt.getDOFIndex()].global; + (*interiorMap)[feSpace][dofIt.getDOFIndex()].global; // Get PETSc's mat index of the row DOF. - int index = dofMap->getMatIndex(nRowVec, globalRowDof); + int index = interiorMap->getMatIndex(nRowVec, globalRowDof); if (perMap.isPeriodic(feSpace, globalRowDof)) { std::set& perAsc = perMap.getAssociations(feSpace, globalRowDof); @@ -482,7 +471,7 @@ namespace AMDiS { for (std::set::iterator perIt = perAsc.begin(); perIt != perAsc.end(); ++perIt) { int mappedDof = perMap.map(feSpace, *perIt, globalRowDof); - int mappedIndex = dofMap->getMatIndex(nRowVec, mappedDof); + int mappedIndex = interiorMap->getMatIndex(nRowVec, mappedDof); VecSetValues(petscVec, 1, &mappedIndex, &value, ADD_VALUES); } } else { @@ -502,8 +491,8 @@ namespace AMDiS { TEST_EXIT_DBG(!o_nnz)("There is something wrong!\n"); vector feSpaces = getFeSpaces(mat); - int nRankRows = dofMap->getRankDofs(); - int rankStartIndex = dofMap->getStartDofs(); + int nRankRows = interiorMap->getRankDofs(); + int rankStartIndex = interiorMap->getStartDofs(); d_nnz = new int[nRankRows]; o_nnz = new int[nRankRows]; @@ -572,11 +561,11 @@ namespace AMDiS { for (cursor_type cursor = begin(bmat), cend = end(bmat); cursor != cend; ++cursor) { - int globalRowDof = (*dofMap)[feSpaces[i]][*cursor].global; + int globalRowDof = (*interiorMap)[feSpaces[i]][*cursor].global; // The corresponding global matrix row index of the current row DOF. - int petscRowIdx = dofMap->getMatIndex(i, globalRowDof); - if ((*dofMap)[feSpaces[i]].isRankDof(*cursor)) { + int petscRowIdx = interiorMap->getMatIndex(i, globalRowDof); + if ((*interiorMap)[feSpaces[i]].isRankDof(*cursor)) { // === The current row DOF is a rank DOF, so create the === // === corresponding nnz values directly on rank's nnz data. === @@ -587,7 +576,7 @@ namespace AMDiS { TEST_EXIT_DBG(localPetscRowIdx >= 0 && localPetscRowIdx < nRankRows) ("Should not happen! \n Debug info: localRowIdx = %d globalRowIndx = %d petscRowIdx = %d localPetscRowIdx = %d rStart = %d nCompontens = %d nRankRows = %d\n", *cursor, - (*dofMap)[feSpaces[i]][*cursor].global, + (*interiorMap)[feSpaces[i]][*cursor].global, petscRowIdx, localPetscRowIdx, rankStartIndex, @@ -598,8 +587,8 @@ namespace AMDiS { // Traverse all non zero entries in this row. for (icursor_type icursor = begin(cursor), icend = end(cursor); icursor != icend; ++icursor) { - int globalColDof = (*dofMap)[feSpaces[j]][col(*icursor)].global; - int petscColIdx = dofMap->getMatIndex(j, globalColDof); + int globalColDof = (*interiorMap)[feSpaces[j]][col(*icursor)].global; + int petscColIdx = interiorMap->getMatIndex(j, globalColDof); if (value(*icursor) != 0.0 || petscRowIdx == petscColIdx) { // The row DOF is a rank DOF, if also the column is a rank DOF, @@ -626,8 +615,8 @@ namespace AMDiS { icend = end(cursor); icursor != icend; ++icursor) { if (value(*icursor) != 0.0) { int globalColDof = - (*dofMap)[feSpaces[j]][col(*icursor)].global; - int petscColIdx = dofMap->getMatIndex(j, globalColDof); + (*interiorMap)[feSpaces[j]][col(*icursor)].global; + int petscColIdx = interiorMap->getMatIndex(j, globalColDof); sendMatrixEntry[sendToRank]. push_back(make_pair(petscRowIdx, petscColIdx)); @@ -641,7 +630,7 @@ namespace AMDiS { // === Send and recv the nnz row structure to/from other ranks. === - StdMpi stdMpi(mpiComm, true); + StdMpi stdMpi(mpiCommGlobal, true); stdMpi.send(sendMatrixEntry); for (std::set::iterator it = recvFromRank.begin(); it != recvFromRank.end(); ++it) diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h index b95f68dd02966052c041fd7f14234c1a76aa9f6b..653e94fcc691e909ee25797779f325ca5e77b20e 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h @@ -72,6 +72,8 @@ namespace AMDiS { /// Arrays definig the non zero pattern of Petsc's matrix. int *d_nnz, *o_nnz; + Vec petscSolVec; + /** \brief * Stores the mesh change index of the mesh the nnz structure was created for. * Therefore, if the mesh change index is higher than this value, we have to create diff --git a/AMDiS/src/parallel/PetscSolverSchur.cc b/AMDiS/src/parallel/PetscSolverSchur.cc index ecccc62ad78c5e27f14b7588e342f9f7734641dc..2811227ad3a88cc73c4f6d41a7267ef3b5f5a4a0 100644 --- a/AMDiS/src/parallel/PetscSolverSchur.cc +++ b/AMDiS/src/parallel/PetscSolverSchur.cc @@ -23,7 +23,7 @@ namespace AMDiS { FUNCNAME("PetscSolverSchur::updateDofData()"); TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n"); - TEST_EXIT_DBG(dofMap)("No parallel DOF map defined!\n"); + TEST_EXIT_DBG(interiorMap)("No parallel DOF map defined!\n"); const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); typedef map RankToDofContainer; @@ -35,12 +35,12 @@ namespace AMDiS { !it.end(); it.nextRank()) for (; !it.endDofIter(); it.nextDof()) { boundaryLocalDofs.insert(it.getDofIndex()); - boundaryDofs.insert((*dofMap)[feSpace][it.getDofIndex()].global); + boundaryDofs.insert((*interiorMap)[feSpace][it.getDofIndex()].global); } nBoundaryDofs = boundaryDofs.size(); - mpi::getDofNumbering(mpiComm, nBoundaryDofs, + mpi::getDofNumbering(mpiCommGlobal, nBoundaryDofs, rStartBoundaryDofs, nOverallBoundaryDofs); @@ -55,11 +55,11 @@ namespace AMDiS { ("Should not happen!\n"); int rStartEdgeDofs, nOverallEdgeDofs; - mpi::getDofNumbering(mpiComm, nEdgeDofs, + mpi::getDofNumbering(mpiCommGlobal, nEdgeDofs, rStartEdgeDofs, nOverallEdgeDofs); int rStartVertexDofs, nOverallVertexDofs; - mpi::getDofNumbering(mpiComm, nVertexDofs, + mpi::getDofNumbering(mpiCommGlobal, nVertexDofs, rStartVertexDofs, nOverallVertexDofs); TEST_EXIT_DBG(nOverallEdgeDofs + nOverallVertexDofs == nOverallBoundaryDofs) @@ -72,14 +72,14 @@ namespace AMDiS { int counter = rStartEdgeDofs; for (DofContainerSet::iterator it = edgeDofs.begin(); it != edgeDofs.end(); ++it) - mapGlobalBoundaryDof[(*dofMap)[feSpace][**it].global] = + mapGlobalBoundaryDof[(*interiorMap)[feSpace][**it].global] = counter++; } { int counter = nOverallEdgeDofs + rStartVertexDofs; for (DofContainerSet::iterator it = vertexDofs.begin(); it != vertexDofs.end(); ++it) - mapGlobalBoundaryDof[(*dofMap)[feSpace][**it].global] = + mapGlobalBoundaryDof[(*interiorMap)[feSpace][**it].global] = counter++; } #else @@ -113,7 +113,7 @@ namespace AMDiS { #endif nInteriorDofs = interiorDofs.size(); - mpi::getDofNumbering(mpiComm, nInteriorDofs, + mpi::getDofNumbering(mpiCommGlobal, nInteriorDofs, rStartInteriorDofs, nOverallInteriorDofs); { @@ -128,14 +128,14 @@ namespace AMDiS { TEST_EXIT_DBG(nInteriorDofs > 0)("Should not happen!\n"); - StdMpi > stdMpi(mpiComm); + StdMpi > stdMpi(mpiCommGlobal); for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), feSpace); !it.end(); it.nextRank()) { stdMpi.getSendData(it.getRank()).resize(0); stdMpi.getSendData(it.getRank()).reserve(it.getDofs().size()); for (; !it.endDofIter(); it.nextDof()) { - int globalSendDof = (*dofMap)[feSpace][it.getDofIndex()].global; + int globalSendDof = (*interiorMap)[feSpace][it.getDofIndex()].global; TEST_EXIT_DBG(mapGlobalBoundaryDof.count(globalSendDof)) ("No mapping for boundary DOF %d!\n", globalSendDof); @@ -155,7 +155,7 @@ namespace AMDiS { for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), feSpace); !it.end(); it.nextRank()) for (; !it.endDofIter(); it.nextDof()) { - int globalRecvDof = (*dofMap)[feSpace][it.getDofIndex()].global; + int globalRecvDof = (*interiorMap)[feSpace][it.getDofIndex()].global; mapGlobalBoundaryDof[globalRecvDof] = stdMpi.getRecvData(it.getRank())[it.getDofCounter()]; boundaryDofs.insert(globalRecvDof); @@ -164,12 +164,12 @@ namespace AMDiS { // === Create PETSc IS structurs for interior and boundary DOFs. === - ISCreateStride(mpiComm, + ISCreateStride(mpiCommGlobal, nInteriorDofs * nComponents, (rStartInteriorDofs + rStartBoundaryDofs) * nComponents, 1, &interiorIs); - ISCreateStride(mpiComm, + ISCreateStride(mpiCommGlobal, nBoundaryDofs * nComponents, (rStartInteriorDofs + rStartBoundaryDofs + nInteriorDofs) * nComponents, 1, &boundaryIs); @@ -190,22 +190,22 @@ namespace AMDiS { int nOverallBoundaryRows = nOverallBoundaryDofs * nComponents; - MatCreateMPIAIJ(mpiComm, + MatCreateMPIAIJ(mpiCommGlobal, nInteriorRows, nInteriorRows, nOverallInteriorRows, nOverallInteriorRows, 100, PETSC_NULL, 100, PETSC_NULL, &matA11); - MatCreateMPIAIJ(mpiComm, + MatCreateMPIAIJ(mpiCommGlobal, nBoundaryRows, nBoundaryRows, nOverallBoundaryRows, nOverallBoundaryRows, 100, PETSC_NULL, 100, PETSC_NULL, &matA22); - MatCreateMPIAIJ(mpiComm, + MatCreateMPIAIJ(mpiCommGlobal, nInteriorRows, nBoundaryRows, nOverallInteriorRows, nOverallBoundaryRows, 100, PETSC_NULL, 100, PETSC_NULL, &matA12); - MatCreateMPIAIJ(mpiComm, + MatCreateMPIAIJ(mpiCommGlobal, nBoundaryRows, nInteriorRows, nOverallBoundaryRows, nOverallInteriorRows, 100, PETSC_NULL, 100, PETSC_NULL, &matA21); @@ -238,17 +238,16 @@ namespace AMDiS { tmpIS[0] = interiorIs; tmpIS[1] = boundaryIs; - MatCreateNest(mpiComm, 2, &tmpIS[0], 2, &tmpIS[0], &tmpMat[0][0], &petscMatrix); - MatNestSetVecType(petscMatrix, VECNEST); - MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY); - MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY); + MatCreateNest(mpiCommGlobal, 2, &tmpIS[0], 2, &tmpIS[0], &tmpMat[0][0], &matIntInt); + MatNestSetVecType(matIntInt, VECNEST); + MatAssemblyBegin(matIntInt, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(matIntInt, MAT_FINAL_ASSEMBLY); - int nRankRows = (*dofMap)[feSpace].nRankDofs * nComponents; - int nOverallRows = (*dofMap)[feSpace].nOverallDofs * nComponents; + int nRankRows = (*interiorMap)[feSpace].nRankDofs * nComponents; + int nOverallRows = (*interiorMap)[feSpace].nOverallDofs * nComponents; - VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscSolVec); - VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscTmpVec); + VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &petscSolVec); } @@ -258,16 +257,16 @@ namespace AMDiS { const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); int nComponents = vec->getSize(); - int nRankRows = (*dofMap)[feSpace].nRankDofs * nComponents; - int nOverallRows = (*dofMap)[feSpace].nOverallDofs * nComponents; + int nRankRows = (*interiorMap)[feSpace].nRankDofs * nComponents; + int nOverallRows = (*interiorMap)[feSpace].nOverallDofs * nComponents; - VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscRhsVec); + VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &rhsInterior); for (int i = 0; i < nComponents; i++) - setDofVector(petscRhsVec, vec->getDOFVector(i), nComponents, i); + setDofVector(rhsInterior, vec->getDOFVector(i), nComponents, i); - VecAssemblyBegin(petscRhsVec); - VecAssemblyEnd(petscRhsVec); + VecAssemblyBegin(rhsInterior); + VecAssemblyEnd(rhsInterior); } @@ -279,20 +278,20 @@ namespace AMDiS { const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); int nComponents = vec.getSize(); - KSPCreate(mpiComm, &solver); + KSPCreate(mpiCommGlobal, &kspInterior); - KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); - KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT); - KSPSetFromOptions(solver); + KSPSetOperators(kspInterior, matIntInt, matIntInt, SAME_NONZERO_PATTERN); + KSPSetTolerances(kspInterior, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT); + KSPSetFromOptions(kspInterior); - KSPGetPC(solver, &pc); - PCSetType(pc, PCFIELDSPLIT); - PCFieldSplitSetIS(pc, "interior", interiorIs); - PCFieldSplitSetIS(pc, "boundary", boundaryIs); - PCSetFromOptions(pc); + KSPGetPC(kspInterior, &pcInterior); + PCSetType(pcInterior, PCFIELDSPLIT); + PCFieldSplitSetIS(pcInterior, "interior", interiorIs); + PCFieldSplitSetIS(pcInterior, "boundary", boundaryIs); + PCSetFromOptions(pcInterior); - KSPSolve(solver, petscRhsVec, petscSolVec); + KSPSolve(kspInterior, rhsInterior, petscSolVec); // === Transfere values from PETSc's solution vectors to AMDiS vectors. === @@ -302,7 +301,7 @@ namespace AMDiS { for (int i = 0; i < nComponents; i++) { DOFVector::Iterator dofIt(vec.getDOFVector(i), USED_DOFS); for (dofIt.reset(); !dofIt.end(); ++dofIt) { - DegreeOfFreedom globalRowDof = (*dofMap)[feSpace][dofIt.getDOFIndex()].global; + DegreeOfFreedom globalRowDof = (*interiorMap)[feSpace][dofIt.getDOFIndex()].global; if (boundaryDofs.count(globalRowDof)) { int index = (mapGlobalBoundaryDof[globalRowDof] - rStartBoundaryDofs + nInteriorDofs) * (i + 1); @@ -323,24 +322,18 @@ namespace AMDiS { meshDistributor->synchVector(vec); - - // === Print iteration counter and residual norm of the solution. === - printSolutionInfo(adaptInfo); - - // === Destroy PETSC's variables. === - VecDestroy(&petscRhsVec); + VecDestroy(&rhsInterior); VecDestroy(&petscSolVec); - VecDestroy(&petscTmpVec); MatDestroy(&matA11); MatDestroy(&matA12); MatDestroy(&matA21); MatDestroy(&matA22); - MatDestroy(&petscMatrix); + MatDestroy(&matIntInt); - KSPDestroy(&solver); + KSPDestroy(&kspInterior); } @@ -373,7 +366,7 @@ namespace AMDiS { cend = end(mat->getBaseMatrix()); cursor != cend; ++cursor) { // Global index of the current row DOF. - int globalRowDof = (*dofMap)[feSpace][*cursor].global; + int globalRowDof = (*interiorMap)[feSpace][*cursor].global; colsBoundary.clear(); colsInterior.clear(); @@ -382,7 +375,7 @@ namespace AMDiS { for (icursor_type icursor = begin(cursor), icend = end(cursor); icursor != icend; ++icursor) { - int globalColDof = (*dofMap)[feSpace][col(*icursor)].global; + int globalColDof = (*interiorMap)[feSpace][col(*icursor)].global; if (boundaryDofs.count(globalColDof)) { TEST_EXIT_DBG(mapGlobalBoundaryDof.count(globalColDof)) @@ -441,12 +434,12 @@ namespace AMDiS { DOFVector::Iterator dofIt(vec, USED_DOFS); for (dofIt.reset(); !dofIt.end(); ++dofIt) { - if (rankOnly && !(*dofMap)[feSpace].isRankDof(dofIt.getDOFIndex())) + if (rankOnly && !(*interiorMap)[feSpace].isRankDof(dofIt.getDOFIndex())) continue; // Calculate global row index of the DOF. DegreeOfFreedom globalRowDof = - (*dofMap)[feSpace][dofIt.getDOFIndex()].global; + (*interiorMap)[feSpace][dofIt.getDOFIndex()].global; double value = *dofIt; if (boundaryDofs.count(globalRowDof)) { @@ -457,7 +450,7 @@ namespace AMDiS { (rStartInteriorDofs + nInteriorDofs + mapGlobalBoundaryDof[globalRowDof]) * dispMult + dispAdd; - VecSetValues(petscRhsVec, 1, &index, &value, ADD_VALUES); + VecSetValues(rhsInterior, 1, &index, &value, ADD_VALUES); } else { TEST_EXIT_DBG(mapGlobalInteriorDof.count(globalRowDof)) ("Should not happen!\n"); @@ -465,7 +458,7 @@ namespace AMDiS { int index = (rStartBoundaryDofs + mapGlobalInteriorDof[globalRowDof]) * dispMult + dispAdd; - VecSetValues(petscRhsVec, 1, &index, &value, ADD_VALUES); + VecSetValues(rhsInterior, 1, &index, &value, ADD_VALUES); } } } diff --git a/AMDiS/src/parallel/PetscSolverSchur.h b/AMDiS/src/parallel/PetscSolverSchur.h index c9e8446c316a11640868bf042c60250ea0286a28..598a2943b4aaca923cada3b248007ff8c7fab00c 100644 --- a/AMDiS/src/parallel/PetscSolverSchur.h +++ b/AMDiS/src/parallel/PetscSolverSchur.h @@ -91,6 +91,8 @@ namespace AMDiS { Mat matA11, matA12, matA21, matA22; IS interiorIs, boundaryIs; + + Vec petscSolVec; }; } diff --git a/AMDiS/src/parallel/SubDomainSolver.cc b/AMDiS/src/parallel/SubDomainSolver.cc index 3aeca45f1e8a1313b7938579f33b505312f2efd5..9b13483d6616a9ec09836e15ab4d08b059b83dc7 100644 --- a/AMDiS/src/parallel/SubDomainSolver.cc +++ b/AMDiS/src/parallel/SubDomainSolver.cc @@ -13,34 +13,35 @@ #include "parallel/SubDomainSolver.h" #include "parallel/ParallelDebug.h" #include "SystemVector.h" +#include "AMDiS.h" namespace AMDiS { using namespace std; - void SubDomainSolver::setDofMapping(ParallelDofMapping *coarseDofs, - ParallelDofMapping *interiorDofs) + void SubDomainSolver::setDofMapping(ParallelDofMapping *interiorDofs, + ParallelDofMapping *coarseDofs) { - coarseSpaceMap = coarseDofs; interiorMap = interiorDofs; + coarseSpaceMap = coarseDofs; - if (mpiCommInterior.Get_size() == 1) { + if (mpiCommLocal.Get_size() == 1) { rStartInterior = 0; nGlobalOverallInterior = interiorMap->getOverallDofs(); } else { int groupRowsInterior = 0; - if (mpiCommInterior.Get_rank() == 0) + if (mpiCommLocal.Get_rank() == 0) groupRowsInterior = interiorMap->getOverallDofs(); - mpi::getDofNumbering(mpiCommCoarseSpace, groupRowsInterior, + mpi::getDofNumbering(mpiCommGlobal, groupRowsInterior, rStartInterior, nGlobalOverallInterior); int tmp = 0; - if (mpiCommInterior.Get_rank() == 0) + if (mpiCommLocal.Get_rank() == 0) tmp = rStartInterior; - mpiCommInterior.Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM); + mpiCommLocal.Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM); } } @@ -53,38 +54,38 @@ namespace AMDiS { int nRowsRankInterior = interiorMap->getRankDofs(); int nRowsOverallInterior = interiorMap->getOverallDofs(); - int nRowsRankCoarse = coarseSpaceMap->getRankDofs(); - int nRowsOverallCoarse = coarseSpaceMap->getOverallDofs(); - bool multilevel = false; - if (mpiCommInterior.Get_size() == 1) { + if (subdomainLevel == 0) { nGlobalOverallInterior = nRowsOverallInterior; - MatCreateSeqAIJ(mpiCommInterior, nRowsRankInterior, nRowsRankInterior, + MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior, 60, PETSC_NULL, &matIntInt); } else { - multilevel = true; - - MatCreateMPIAIJ(mpiCommInterior, + MatCreateMPIAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior, nRowsOverallInterior, nRowsOverallInterior, 60, PETSC_NULL, 60, PETSC_NULL, &matIntInt); } - MatCreateMPIAIJ(mpiCommCoarseSpace, - nRowsRankCoarse, nRowsRankCoarse, - nRowsOverallCoarse, nRowsOverallCoarse, - 60, PETSC_NULL, 60, PETSC_NULL, &matCoarseCoarse); - - MatCreateMPIAIJ(mpiCommCoarseSpace, - nRowsRankCoarse, nRowsRankInterior, - nRowsOverallCoarse, nGlobalOverallInterior, - 60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt); - - MatCreateMPIAIJ(mpiCommCoarseSpace, - nRowsRankInterior, nRowsRankCoarse, - nGlobalOverallInterior, nRowsOverallCoarse, - 60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse); + if (coarseSpaceMap) { + int nRowsRankCoarse = coarseSpaceMap->getRankDofs(); + int nRowsOverallCoarse = coarseSpaceMap->getOverallDofs(); + + MatCreateMPIAIJ(mpiCommGlobal, + nRowsRankCoarse, nRowsRankCoarse, + nRowsOverallCoarse, nRowsOverallCoarse, + 60, PETSC_NULL, 60, PETSC_NULL, &matCoarseCoarse); + + MatCreateMPIAIJ(mpiCommGlobal, + nRowsRankCoarse, nRowsRankInterior, + nRowsOverallCoarse, nGlobalOverallInterior, + 60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt); + + MatCreateMPIAIJ(mpiCommGlobal, + nRowsRankInterior, nRowsRankCoarse, + nGlobalOverallInterior, nRowsOverallCoarse, + 60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse); + } // === Prepare traverse of sequentially created matrices. === @@ -162,7 +163,7 @@ namespace AMDiS { &(cols[0]), &(values[0]), ADD_VALUES); if (colsOther.size()) { - if (multilevel == false) { + if (subdomainLevel == 0) { for (unsigned int k = 0; k < colsOther.size(); k++) colsOther[k] = interiorMap->getMatIndex(j, colsOther[k]); } else { @@ -176,11 +177,11 @@ namespace AMDiS { } } else { int localRowIndex = - (multilevel == false ? interiorMap->getLocalMatIndex(i, *cursor) : + (subdomainLevel == 0 ? interiorMap->getLocalMatIndex(i, *cursor) : interiorMap->getMatIndex(i, *cursor)); for (unsigned int k = 0; k < cols.size(); k++) { - if (multilevel == false) + if (subdomainLevel == 0) cols[k] = interiorMap->getLocalMatIndex(j, cols[k]); else cols[k] = interiorMap->getMatIndex(j, cols[k]); @@ -192,7 +193,7 @@ namespace AMDiS { if (colsOther.size()) { int globalRowIndex = interiorMap->getMatIndex(i, *cursor); - if (multilevel) + if (subdomainLevel != 0) globalRowIndex += rStartInterior; for (unsigned int k = 0; k < colsOther.size(); k++) @@ -211,55 +212,32 @@ namespace AMDiS { MatAssemblyBegin(matIntInt, MAT_FINAL_ASSEMBLY); MatAssemblyEnd(matIntInt, MAT_FINAL_ASSEMBLY); - MatAssemblyBegin(matCoarseCoarse, MAT_FINAL_ASSEMBLY); - MatAssemblyEnd(matCoarseCoarse, MAT_FINAL_ASSEMBLY); - - MatAssemblyBegin(matIntCoarse, MAT_FINAL_ASSEMBLY); - MatAssemblyEnd(matIntCoarse, MAT_FINAL_ASSEMBLY); - - MatAssemblyBegin(matCoarseInt, MAT_FINAL_ASSEMBLY); - MatAssemblyEnd(matCoarseInt, MAT_FINAL_ASSEMBLY); + if (coarseSpaceMap) { + MatAssemblyBegin(matCoarseCoarse, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(matCoarseCoarse, MAT_FINAL_ASSEMBLY); + + MatAssemblyBegin(matIntCoarse, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(matIntCoarse, MAT_FINAL_ASSEMBLY); + + MatAssemblyBegin(matCoarseInt, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(matCoarseInt, MAT_FINAL_ASSEMBLY); + } // === Create solver for the non primal (thus local) variables. === - KSPCreate(mpiCommInterior, &kspInterior); + KSPCreate(mpiCommLocal, &kspInterior); KSPSetOperators(kspInterior, matIntInt, matIntInt, SAME_NONZERO_PATTERN); KSPSetOptionsPrefix(kspInterior, "interior_"); KSPSetType(kspInterior, KSPPREONLY); PC pcInterior; KSPGetPC(kspInterior, &pcInterior); PCSetType(pcInterior, PCLU); - if (multilevel == false) + if (subdomainLevel == 0) PCFactorSetMatSolverPackage(pcInterior, MATSOLVERUMFPACK); else PCFactorSetMatSolverPackage(pcInterior, MATSOLVERMUMPS); KSPSetFromOptions(kspInterior); - - - if (multilevel == false) { - PetscViewer matview; - PetscViewerBinaryOpen(mpiCommCoarseSpace, "mat_coarse_int.dat", - FILE_MODE_WRITE, &matview); - MatView(matCoarseInt, matview); - PetscViewerDestroy(&matview); - - PetscViewerBinaryOpen(mpiCommCoarseSpace, "mat_int_coarse.dat", - FILE_MODE_WRITE, &matview); - MatView(matIntCoarse, matview); - PetscViewerDestroy(&matview); - } else { - PetscViewer matview; - PetscViewerBinaryOpen(mpiCommCoarseSpace, "mat_coarse_int.dat", - FILE_MODE_WRITE, &matview); - MatView(matCoarseInt, matview); - PetscViewerDestroy(&matview); - - PetscViewerBinaryOpen(mpiCommCoarseSpace, "mat_int_coarse.dat", - FILE_MODE_WRITE, &matview); - MatView(matIntCoarse, matview); - PetscViewerDestroy(&matview); - } } @@ -267,16 +245,18 @@ namespace AMDiS { { FUNCNAME("SubDomainSolver::fillPetscRhs()"); - VecCreateMPI(mpiCommCoarseSpace, - coarseSpaceMap->getRankDofs(), - coarseSpaceMap->getOverallDofs(), - &rhsCoarseSpace); - - VecCreateMPI(mpiCommCoarseSpace, + VecCreateMPI(mpiCommGlobal, interiorMap->getRankDofs(), nGlobalOverallInterior, &rhsInterior); + if (coarseSpaceMap) + VecCreateMPI(mpiCommGlobal, + coarseSpaceMap->getRankDofs(), + coarseSpaceMap->getOverallDofs(), + &rhsCoarseSpace); + + for (int i = 0; i < vec->getSize(); i++) { const FiniteElemSpace *feSpace = vec->getDOFVector(i)->getFeSpace(); DOFVector::Iterator dofIt(vec->getDOFVector(i), USED_DOFS); @@ -292,11 +272,13 @@ namespace AMDiS { } } - VecAssemblyBegin(rhsCoarseSpace); - VecAssemblyEnd(rhsCoarseSpace); - VecAssemblyBegin(rhsInterior); VecAssemblyEnd(rhsInterior); + + if (coarseSpaceMap) { + VecAssemblyBegin(rhsCoarseSpace); + VecAssemblyEnd(rhsCoarseSpace); + } } @@ -330,7 +312,13 @@ namespace AMDiS { void SubDomainSolver::solve(Vec &rhs, Vec &sol) { - KSPSolve(kspInterior, rhs, sol); + FUNCNAME("SubDomainSolver::solve()"); + + PetscErrorCode solverError = KSPSolve(kspInterior, rhs, sol); + if (solverError != 0) { + AMDiS::finalize(); + exit(-1); + } } @@ -338,14 +326,11 @@ namespace AMDiS { { FUNCNAME("SubDomainSolver::solveGlobal()"); - int ml = 0; - Parameters::get("parallel->multi level test", ml); - Vec tmp; - if (ml == 0) - VecCreateSeq(PETSC_COMM_SELF, interiorMap->getRankDofs(), &tmp); + if (mpiCommLocal.Get_size() == 1) + VecCreateSeq(mpiCommLocal, interiorMap->getRankDofs(), &tmp); else - VecCreateMPI(mpiCommInterior, + VecCreateMPI(mpiCommLocal, interiorMap->getRankDofs(), interiorMap->getOverallDofs(), &tmp); diff --git a/AMDiS/src/parallel/SubDomainSolver.h b/AMDiS/src/parallel/SubDomainSolver.h index ff27f90a9aeb8048057276784f4a332d77c98b96..27bdcd8b36823c8b0a60994ba22edfa9377190f6 100644 --- a/AMDiS/src/parallel/SubDomainSolver.h +++ b/AMDiS/src/parallel/SubDomainSolver.h @@ -36,24 +36,29 @@ namespace AMDiS { class SubDomainSolver { public: - SubDomainSolver(MeshDistributor *md, - MPI::Intracomm mpiComm0, - MPI::Intracomm mpiComm1) - : meshDistributor(md), - level(0), - mpiCommCoarseSpace(mpiComm0), - mpiCommInterior(mpiComm1), - coarseSpaceMap(NULL), - interiorMap(NULL) + SubDomainSolver() + : meshDistributor(NULL), + subdomainLevel(0), + interiorMap(NULL), + coarseSpaceMap(NULL) {} + void setMeshDistributor(MeshDistributor *m, + MPI::Intracomm mpiComm0, + MPI::Intracomm mpiComm1) + { + meshDistributor = m; + mpiCommGlobal = mpiComm0; + mpiCommLocal = mpiComm1; + } + void setLevel(int l) { - level = l; + subdomainLevel = l; } - void setDofMapping(ParallelDofMapping *coarseDofs, - ParallelDofMapping *interiorDofs); + void setDofMapping(ParallelDofMapping *interiorDofs, + ParallelDofMapping *coarseDofs = NULL); void fillPetscMatrix(Matrix *mat); @@ -72,11 +77,19 @@ namespace AMDiS { inline bool isCoarseSpace(const FiniteElemSpace *feSpace, DegreeOfFreedom dof) { + FUNCNAME("SubDomainSolver::isCoarseSpace()"); + TEST_EXIT_DBG(coarseSpaceMap) + ("Subdomain solver does not contain a coarse space!\n"); + return (*coarseSpaceMap)[feSpace].isSet(dof); } inline Vec& getRhsCoarseSpace() { + FUNCNAME("SubDomainSolver::getRhsCoarseSpace()"); + TEST_EXIT_DBG(coarseSpaceMap) + ("Subdomain solver does not contain a coarse space!\n"); + return rhsCoarseSpace; } @@ -92,16 +105,28 @@ namespace AMDiS { inline Mat& getMatCoarseCoarse() { + FUNCNAME("SubDomainSolver::getMatCoarseCoarse()"); + TEST_EXIT_DBG(coarseSpaceMap) + ("Subdomain solver does not contain a coarse space!\n"); + return matCoarseCoarse; } inline Mat& getMatIntCoarse() { + FUNCNAME("SubDomainSolver::getMatIntCoarse()"); + TEST_EXIT_DBG(coarseSpaceMap) + ("Subdomain solver does not contain a coarse space!\n"); + return matIntCoarse; } inline Mat& getMatCoarseInt() { + FUNCNAME("SubDomainSolver::getMatCoarseInt()"); + TEST_EXIT_DBG(coarseSpaceMap) + ("Subdomain solver does not contain a coarse space!\n"); + return matCoarseInt; } @@ -116,20 +141,20 @@ namespace AMDiS { protected: MeshDistributor *meshDistributor; - int level; + int subdomainLevel; int rStartInterior; int nGlobalOverallInterior; - MPI::Intracomm mpiCommCoarseSpace; - - MPI::Intracomm mpiCommInterior; + MPI::Intracomm mpiCommGlobal; - ParallelDofMapping* coarseSpaceMap; + MPI::Intracomm mpiCommLocal; ParallelDofMapping* interiorMap; + ParallelDofMapping* coarseSpaceMap; + Vec rhsCoarseSpace; Vec rhsInterior;