diff --git a/AMDiS/src/parallel/InteriorBoundary.h b/AMDiS/src/parallel/InteriorBoundary.h index fa105823272c090483327ee048157a763c3e1e18..ff081f56ec68ba935276d177d4ba302642d0667c 100644 --- a/AMDiS/src/parallel/InteriorBoundary.h +++ b/AMDiS/src/parallel/InteriorBoundary.h @@ -212,6 +212,14 @@ namespace AMDiS { if (mapIt == bound.boundary.end()) return; + if (level > 0) { + TEST_EXIT_DBG(levelData)("No mesh level data object defined!\n"); + TEST_EXIT_DBG(level == 1)("Only 2-level method supported!\n"); + + int rankInLevel = levelData->mapRank(mapIt->first, level - 1, level); + MSG("rankInLevel %d\n", rankInLevel); + } + while (mapIt->second.size() == 0) { ++mapIt; if (mapIt == bound.boundary.end()) diff --git a/AMDiS/src/parallel/MeshLevelData.cc b/AMDiS/src/parallel/MeshLevelData.cc index 889b0543a89188e6b30ad1b3df3f9c553528963e..e0170a9de5616983e5ca8a6f5f0646eb376da735 100644 --- a/AMDiS/src/parallel/MeshLevelData.cc +++ b/AMDiS/src/parallel/MeshLevelData.cc @@ -30,6 +30,9 @@ namespace AMDiS { mpiComms.resize(1); mpiComms[0] = MPI::COMM_WORLD; + + mpiGroups.resize(1); + mpiGroups[0] = mpiComms[0].Get_group(); } @@ -52,6 +55,9 @@ namespace AMDiS { mpiComms.resize(2); mpiComms[1] = mpiComms[0].Split(domainId, mpiComms[0].Get_rank()); + + mpiGroups.resize(2); + mpiGroups[1] = mpiComms[1].Get_group(); } diff --git a/AMDiS/src/parallel/MeshLevelData.h b/AMDiS/src/parallel/MeshLevelData.h index b0a0cf3bb9122819d51bdd770a2573681c393263..d697e5b58ae777d5b1119e30a733526b2e7d32aa 100644 --- a/AMDiS/src/parallel/MeshLevelData.h +++ b/AMDiS/src/parallel/MeshLevelData.h @@ -72,6 +72,32 @@ namespace AMDiS { return nLevel; } + MPI::Intracomm& getMpiComm(int level) + { + TEST_EXIT_DBG(level < nLevel)("Should not happen!\n"); + + return mpiComms[level]; + } + + MPI::Group& getMpiGroup(int level) + { + TEST_EXIT_DBG(level < nLevel)("Should not happen!\n"); + + return mpiGroups[level]; + } + + int mapRank(int fromRank, int fromLevel, int toLevel) + { + int toRank = -1; + + MPI::Group::Translate_ranks(mpiGroups[fromLevel], 1, &fromRank, + mpiGroups[toLevel], &toRank); + if (toRank == MPI::UNDEFINED) + toRank = -1; + + return toRank; + } + protected: int nLevel; @@ -80,6 +106,8 @@ namespace AMDiS { vector<std::set<int> > levelNeighbours; vector<MPI::Intracomm> mpiComms; + + vector<MPI::Group> mpiGroups; }; } diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc index 65bcc105279b7bf3cb5466b5360ee7c8462afab0..4d2058fbf1558c50cd750657697173bced9d10cc 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.cc +++ b/AMDiS/src/parallel/PetscSolverFeti.cc @@ -314,7 +314,7 @@ namespace AMDiS { DofIndexSet primals; DofContainerSet& vertices = - meshDistributor->getBoundaryDofInfo(feSpace).geoDofs[VERTEX]; + meshDistributor->getBoundaryDofInfo(feSpace, meshLevel).geoDofs[VERTEX]; TEST_EXIT_DBG(vertices.size())("No primal vertices on this rank!\n"); for (DofContainerSet::iterator it = vertices.begin(); it != vertices.end(); ++it)