#include #include #include #include #include #include #include #include "parallel/MeshDistributor.h" #include "parallel/ParallelDebug.h" #include "parallel/StdMpi.h" #include "ParMetisPartitioner.h" #include "Mesh.h" #include "Traverse.h" #include "ElInfo.h" #include "Element.h" #include "MacroElement.h" #include "PartitionElementData.h" #include "DOFMatrix.h" #include "DOFVector.h" #include "SystemVector.h" #include "VtkWriter.h" #include "ElementDofIterator.h" #include "ProblemStatBase.h" #include "StandardProblemIteration.h" #include "ElementFileWriter.h" #include "VertexVector.h" #include "MeshStructure.h" #include "ProblemVec.h" #include "ProblemInstat.h" #include "Debug.h" namespace AMDiS { using boost::lexical_cast; using namespace boost::filesystem; inline bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2) { return (*dof1 < *dof2); } MeshDistributor::MeshDistributor(std::string str) : probStat(0), name(str), feSpace(NULL), mesh(NULL), refineManager(NULL), info(10), partitioner(NULL), initialPartitionMesh(true), nRankDofs(0), nOverallDofs(0), rstart(0), deserialized(false), writeSerializationFile(false), lastMeshChangeIndex(0), macroElementStructureConsisten(false) { FUNCNAME("MeshDistributor::ParalleDomainBase()"); mpiRank = MPI::COMM_WORLD.Get_rank(); mpiSize = MPI::COMM_WORLD.Get_size(); mpiComm = MPI::COMM_WORLD; } void MeshDistributor::initParallelization(AdaptInfo *adaptInfo) { FUNCNAME("MeshDistributor::initParallelization()"); TEST_EXIT(mpiSize > 1) ("Parallelization does not work with only one process!\n"); TEST_EXIT(mesh)("No mesh has been defined for mesh distribution!\n"); // If the problem has been already read from a file, we need only to set // isRankDofs to all matrices and rhs vector and to remove periodic // boundary conditions (if there are some). if (deserialized) { setRankDofs(); removePeriodicBoundaryConditions(); return; } // Test, if the mesh is the macro mesh only! Paritioning of the mesh is supported // only for macro meshes, so it will not work yet if the mesh is already refined // in some way. testForMacroMesh(); // create an initial partitioning of the mesh partitioner->createPartitionData(); // set the element weights, which are 1 at the very first begin setElemWeights(adaptInfo); // and now partition the mesh partitionMesh(adaptInfo); #if (DEBUG != 0) debug::ElementIdxToDofs elMap; debug::createSortedDofs(mesh, elMap); if (mpiRank == 0) { int writePartMesh = 1; GET_PARAMETER(0, "dbg->write part mesh", "%d", &writePartMesh); if (writePartMesh > 0) writePartitioningMesh("part.vtu"); else MSG("Skip write part mesh!\n"); } ParallelDebug::testAllElements(*this); #endif // === Create interior boundary information. === createInteriorBoundaryInfo(); #if (DEBUG != 0) ParallelDebug::printBoundaryInfo(*this); #endif // === Create new global and local DOF numbering. === createLocalGlobalNumbering(); // === Remove all macro elements that are not part of the rank partition. === removeMacroElements(); macroElementStructureConsisten = true; // === Reset all DOFAdmins of the mesh. === updateDofAdmins(); // === If in debug mode, make some tests. === #if (DEBUG != 0) MSG("AMDiS runs in debug mode, so make some test ...\n"); debug::testSortedDofs(mesh, elMap); ParallelDebug::testInteriorBoundary(*this); ParallelDebug::testCommonDofs(*this, true); MSG("Debug mode tests finished!\n"); debug::writeMesh(feSpace, -1, "macro_mesh"); #endif // === Create periodic dof mapping, if there are periodic boundaries. === createPeriodicMap(); // === Global refinements. === int globalRefinement = 0; GET_PARAMETER(0, mesh->getName() + "->global refinements", "%d", &globalRefinement); if (globalRefinement > 0) { refineManager->globalRefine(mesh, globalRefinement); #if (DEBUG != 0) debug::writeMesh(feSpace, -1, "gr_mesh"); #endif updateLocalGlobalNumbering(); // === Update periodic mapping, if there are periodic boundaries. === createPeriodicMap(); } /// === Set DOF rank information to all matrices and vectors. === setRankDofs(); // === Remove periodic boundary conditions in sequential problem definition. === removePeriodicBoundaryConditions(); } void MeshDistributor::addProblemStat(ProblemVec *probVec) { FUNCNAME("MeshDistributor::addProblemVec()"); if (feSpace != NULL) { std::vector feSpaces = probVec->getFeSpaces(); for (unsigned int i = 0; i < feSpaces.size(); i++) { TEST_EXIT(feSpace == feSpaces[i]) ("Parallelizaton is not supported for multiple FE spaces!\n"); } } else { feSpace = probVec->getFeSpace(0); mesh = feSpace->getMesh(); info = probVec->getInfo(); TEST_EXIT(mesh->getNumberOfDOFAdmin() == 1) ("Only meshes with one DOFAdmin are supported!\n"); TEST_EXIT(mesh->getDOFAdmin(0).getNumberOfPreDOFs(0) == 0) ("Wrong pre dof number for DOFAdmin!\n"); switch (mesh->getDim()) { case 2: refineManager = new RefinementManager2d(); break; case 3: refineManager = new RefinementManager3d(); break; default: ERROR_EXIT("This should not happen for dim = %d!\n", mesh->getDim()); } partitioner = new ParMetisPartitioner(mesh, &mpiComm); } // Create parallel serialization file writer, if needed. int writeSerialization = 0; GET_PARAMETER(0, probVec->getName() + "->output->write serialization", "%d", &writeSerialization); if (writeSerialization && !writeSerializationFile) { std::string filename = ""; GET_PARAMETER(0, name + "->output->serialization filename", &filename); TEST_EXIT(filename != "") ("No filename defined for parallel serialization file!\n"); int tsModulo = -1; GET_PARAMETER(0, probVec->getName() + "->output->write every i-th timestep", "%d", &tsModulo); probVec->getFileWriterList().push_back(new Serializer(this, filename, tsModulo)); writeSerializationFile = true; } int readSerialization = 0; GET_PARAMETER(0, probVec->getName() + "->input->read serialization", "%d", &readSerialization); if (readSerialization) { std::string filename = ""; GET_PARAMETER(0, probVec->getName() + "->input->serialization filename", &filename); filename += ".p" + lexical_cast(mpiRank); MSG("Start deserialization with %s\n", filename.c_str()); std::ifstream in(filename.c_str()); TEST_EXIT(!in.fail())("Could not open deserialization file: %s\n", filename.c_str()); probVec->deserialize(in); in.close(); MSG("Deserialization from file: %s\n", filename.c_str()); filename = ""; GET_PARAMETER(0, name + "->input->serialization filename", &filename); TEST_EXIT(filename != "") ("No filename defined for parallel deserialization file!\n"); std::string rankFilename = filename + ".p" + lexical_cast(mpiRank); in.open(rankFilename.c_str()); TEST_EXIT(!in.fail())("Could not open parallel deserialization file: %s\n", filename.c_str()); deserialize(in); in.close(); MSG("Deserializtion of mesh distributor from file: %s\n", rankFilename.c_str()); deserialized = true; } probStat.push_back(probVec); } void MeshDistributor::exitParallelization(AdaptInfo *adaptInfo) {} void MeshDistributor::updateDofAdmins() { FUNCNAME("MeshDistributor::updateDofAdmins()"); for (int i = 0; i < mesh->getNumberOfDOFAdmin(); i++) { DOFAdmin& admin = const_cast(mesh->getDOFAdmin(i)); // There must be always more allocated DOFs than used DOFs in DOFAdmin. Otherwise, // it is not possible to define a first DOF hole. if (static_cast(admin.getSize()) == mapLocalGlobalDofs.size()) admin.enlargeDOFLists(); for (int j = 0; j < admin.getSize(); j++) admin.setDOFFree(j, true); for (unsigned int j = 0; j < mapLocalGlobalDofs.size(); j++) admin.setDOFFree(j, false); admin.setUsedSize(mapLocalGlobalDofs.size()); admin.setUsedCount(mapLocalGlobalDofs.size()); admin.setFirstHole(mapLocalGlobalDofs.size()); } } void MeshDistributor::testForMacroMesh() { FUNCNAME("MeshDistributor::testForMacroMesh()"); int nMacroElements = 0; TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { TEST_EXIT(elInfo->getLevel() == 0) ("Mesh is already refined! This does not work with parallelization!\n"); nMacroElements++; elInfo = stack.traverseNext(elInfo); } TEST_EXIT(nMacroElements >= mpiSize) ("The mesh has less macro elements than number of mpi processes!\n"); } void MeshDistributor::synchVector(DOFVector &vec) { StdMpi > stdMpi(mpiComm); for (RankToDofContainer::iterator sendIt = sendDofs.begin(); sendIt != sendDofs.end(); ++sendIt) { std::vector dofs; int nSendDofs = sendIt->second.size(); dofs.reserve(nSendDofs); for (int i = 0; i < nSendDofs; i++) dofs.push_back(vec[*((sendIt->second)[i])]); stdMpi.send(sendIt->first, dofs); } for (RankToDofContainer::iterator recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) stdMpi.recv(recvIt->first, recvIt->second.size()); stdMpi.startCommunication(MPI_DOUBLE); for (RankToDofContainer::iterator recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) for (unsigned int i = 0; i < recvIt->second.size(); i++) vec[*(recvIt->second)[i]] = stdMpi.getRecvData(recvIt->first)[i]; } void MeshDistributor::synchVector(SystemVector &vec) { int nComponents = vec.getSize(); StdMpi > stdMpi(mpiComm); for (RankToDofContainer::iterator sendIt = sendDofs.begin(); sendIt != sendDofs.end(); ++sendIt) { std::vector dofs; int nSendDofs = sendIt->second.size(); dofs.reserve(nComponents * nSendDofs); for (int i = 0; i < nComponents; i++) { DOFVector *dofvec = vec.getDOFVector(i); for (int j = 0; j < nSendDofs; j++) dofs.push_back((*dofvec)[*((sendIt->second)[j])]); } stdMpi.send(sendIt->first, dofs); } for (RankToDofContainer::iterator recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) stdMpi.recv(recvIt->first, recvIt->second.size() * nComponents); stdMpi.startCommunication(MPI_DOUBLE); for (RankToDofContainer::iterator recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) { int nRecvDofs = recvIt->second.size(); int counter = 0; for (int i = 0; i < nComponents; i++) { DOFVector *dofvec = vec.getDOFVector(i); for (int j = 0; j < nRecvDofs; j++) (*dofvec)[*(recvIt->second)[j]] = stdMpi.getRecvData(recvIt->first)[counter++]; } } } void MeshDistributor::setRankDofs() { for (unsigned int i = 0; i < probStat.size(); i++) { int nComponents = probStat[i]->getNumComponents(); for (int j = 0; j < nComponents; j++) { for (int k = 0; k < nComponents; k++) if (probStat[i]->getSystemMatrix(j, k)) probStat[i]->getSystemMatrix(j, k)->setRankDofs(isRankDof); TEST_EXIT_DBG(probStat[i]->getRhs()->getDOFVector(j))("No RHS vector!\n"); TEST_EXIT_DBG(probStat[i]->getSolution()->getDOFVector(j))("No solution vector!\n"); probStat[i]->getRhs()->getDOFVector(j)->setRankDofs(isRankDof); probStat[i]->getSolution()->getDOFVector(j)->setRankDofs(isRankDof); } } } void MeshDistributor::removePeriodicBoundaryConditions() { // Remove periodic boundaries in boundary manager on matrices and vectors. for (unsigned int i = 0; i < probStat.size(); i++) { int nComponents = probStat[i]->getNumComponents(); for (int j = 0; j < nComponents; j++) { for (int k = 0; k < nComponents; k++) { DOFMatrix* mat = probStat[i]->getSystemMatrix(j, k); if (mat && mat->getBoundaryManager()) removePeriodicBoundaryConditions(const_cast(mat->getBoundaryManager()->getBoundaryConditionMap())); } if (probStat[i]->getSolution()->getDOFVector(j)->getBoundaryManager()) removePeriodicBoundaryConditions(const_cast(probStat[i]->getSolution()->getDOFVector(j)->getBoundaryManager()->getBoundaryConditionMap())); if (probStat[i]->getRhs()->getDOFVector(j)->getBoundaryManager()) removePeriodicBoundaryConditions(const_cast(probStat[i]->getRhs()->getDOFVector(j)->getBoundaryManager()->getBoundaryConditionMap())); } } // Remove periodic boundaries on elements in mesh. TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER); while (elInfo) { elInfo->getElement()->deleteElementData(PERIODIC); elInfo = stack.traverseNext(elInfo); } } void MeshDistributor::removePeriodicBoundaryConditions(BoundaryIndexMap& boundaryMap) { BoundaryIndexMap::iterator it = boundaryMap.begin(); while (it != boundaryMap.end()) { if (it->second->isPeriodic()) boundaryMap.erase(it++); else ++it; } } void MeshDistributor::checkMeshChange() { FUNCNAME("MeshDistributor::checkMeshChange()"); #if (DEBUG != 0) debug::writeMesh(feSpace, -1, "before_check_mesh"); #endif // === If mesh has not been changed on all ranks, return. === int recvAllValues = 0; int sendValue = static_cast(mesh->getChangeIndex() != lastMeshChangeIndex); mpiComm.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM); if (recvAllValues == 0) return; // === At least one rank mesh has been changed, so the boundaries must be === // === adapted to the new mesh structure. === clock_t first = clock(); do { // To check the interior boundaries, the ownership of the boundaries is not // important. Therefore, we add all boundaries to one boundary container. RankToBoundMap allBound; for (InteriorBoundary::iterator it(myIntBoundary); !it.end(); ++it) if (it->rankObj.subObj == EDGE || it->rankObj.subObj == FACE) allBound[it.getRank()].push_back(*it); for (InteriorBoundary::iterator it(otherIntBoundary); !it.end(); ++it) if (it->rankObj.subObj == EDGE || it->rankObj.subObj == FACE) allBound[it.getRank()].push_back(*it); for (InteriorBoundary::iterator it(periodicBoundary); !it.end(); ++it) if (it->rankObj.subObj == EDGE || it->rankObj.subObj == FACE) allBound[it.getRank()].push_back(*it); // === Check the boundaries and adapt mesh if necessary. === #if (DEBUG != 0) MSG("Run checkAndAdaptBoundary ...\n"); #endif bool meshChanged = checkAndAdaptBoundary(allBound); // === Check on all ranks if at least one rank's mesh has changed. === int sendValue = static_cast(!meshChanged); recvAllValues = 0; mpiComm.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM); #if (DEBUG != 0) MSG("Mesh changed on %d ranks!\n", recvAllValues); #endif } while (recvAllValues != 0); #if (DEBUG != 0) debug::writeMesh(feSpace, -1, "mesh"); #endif INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n", TIME_USED(first, clock())); // === Because the mesh has been changed, update the DOF numbering and mappings. === mesh->dofCompress(); updateLocalGlobalNumbering(); // === Update periodic mapping, if there are periodic boundaries. === createPeriodicMap(); } bool MeshDistributor::checkAndAdaptBoundary(RankToBoundMap &allBound) { FUNCNAME("MeshDistributor::checkAndAdaptBoundary()"); // === Create mesh structure codes for all ranks boundary elements. === std::map sendCodes; for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) { for (std::vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { MeshStructure elCode; elCode.init(boundIt->rankObj); sendCodes[it->first].push_back(elCode); } } StdMpi stdMpi(mpiComm, true); stdMpi.send(sendCodes); stdMpi.recv(allBound); stdMpi.startCommunication(MPI_UNSIGNED_LONG); // === Compare received mesh structure codes. === bool meshFitTogether = true; for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) { MeshCodeVec &recvCodes = stdMpi.getRecvData()[it->first]; int i = 0; for (std::vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { MeshStructure elCode; elCode.init(boundIt->rankObj); if (elCode.getCode() != recvCodes[i].getCode()) { TEST_EXIT_DBG(refineManager)("Refinement manager is not set correctly!\n"); bool b = fitElementToMeshCode(recvCodes[i], boundIt->rankObj.el, boundIt->rankObj.subObj, boundIt->rankObj.ithObj, boundIt->rankObj.elType, boundIt->rankObj.reverseMode); if (b) meshFitTogether = false; } i++; } } return meshFitTogether; } bool MeshDistributor::fitElementToMeshCode(MeshStructure &code, Element *el, GeoIndex subObj, int ithObj, int elType, bool reverseMode) { FUNCNAME("MeshDistributor::fitElementToMeshCode()"); TEST_EXIT_DBG(el)("No element given!\n"); if (code.empty()) return false; int s1 = el->getSubObjOfChild(0, subObj, ithObj, elType); int s2 = el->getSubObjOfChild(1, subObj, ithObj, elType); TEST_EXIT_DBG(s1 != -1 || s2 != -1)("This should not happen!\n"); bool meshChanged = false; Flag traverseFlag = Mesh::CALL_EVERY_EL_PREORDER | Mesh::FILL_NEIGH | Mesh::FILL_BOUND; if (reverseMode) { std::swap(s1, s2); traverseFlag |= Mesh::CALL_REVERSE_MODE; } if (s1 != -1 && s2 != -1) { TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(el->getMesh(), -1, traverseFlag); while (elInfo && elInfo->getElement() != el) elInfo = stack.traverseNext(elInfo); meshChanged = fitElementToMeshCode2(code, stack, subObj, ithObj, elType, reverseMode); return meshChanged; } if (el->isLeaf()) { if (code.getNumElements() == 1 && code.isLeafElement()) return false; meshChanged = true; TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(el->getMesh(), -1, traverseFlag); while (elInfo && elInfo->getElement() != el) { elInfo = stack.traverseNext(elInfo); } TEST_EXIT_DBG(elInfo)("This should not happen!\n"); el->setMark(1); refineManager->setMesh(el->getMesh()); refineManager->setStack(&stack); refineManager->refineFunction(elInfo); } Element *child0 = el->getFirstChild(); Element *child1 = el->getSecondChild(); if (reverseMode) std::swap(child0, child1); if (s1 != -1) { TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(el->getMesh(), -1, traverseFlag); while (elInfo && elInfo->getElement() != child0) { elInfo = stack.traverseNext(elInfo); } meshChanged |= fitElementToMeshCode2(code, stack, subObj, s1, el->getChildType(elType), reverseMode); } else { TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(el->getMesh(), -1, traverseFlag); while (elInfo && elInfo->getElement() != child1) { elInfo = stack.traverseNext(elInfo); } meshChanged |= fitElementToMeshCode2(code, stack, subObj, s2, el->getChildType(elType), reverseMode); } return meshChanged; } bool MeshDistributor::fitElementToMeshCode2(MeshStructure &code, TraverseStack &stack, GeoIndex subObj, int ithObj, int elType, bool reverseMode) { FUNCNAME("MeshDistributor::fitElementToMeshCode2()"); ElInfo *elInfo = stack.getElInfo(); bool value = false; if (!elInfo) return value; Element *el = elInfo->getElement(); if (code.isLeafElement()) { int level = elInfo->getLevel(); do { elInfo = stack.traverseNext(elInfo); } while (elInfo && elInfo->getLevel() > level); return value; } if (!elInfo) return value; if (el->isLeaf()) { el->setMark(1); refineManager->setMesh(el->getMesh()); refineManager->setStack(&stack); refineManager->refineFunction(elInfo); value = true; } int s1 = el->getSubObjOfChild(0, subObj, ithObj, elType); int s2 = el->getSubObjOfChild(1, subObj, ithObj, elType); Element *child0 = el->getFirstChild(); Element *child1 = el->getSecondChild(); if (reverseMode) { std::swap(s1, s2); std::swap(child0, child1); } if (s1 != -1) { stack.traverseNext(elInfo); code.nextElement(); value |= fitElementToMeshCode2(code, stack, subObj, s1, el->getChildType(elType), reverseMode); elInfo = stack.getElInfo(); } else { do { elInfo = stack.traverseNext(elInfo); } while (elInfo && elInfo->getElement() != child1); } TEST_EXIT_DBG(elInfo->getElement() == child1)("This should not happen!\n"); if (s2 != -1) { code.nextElement(); value |= fitElementToMeshCode2(code, stack, subObj, s2, el->getChildType(elType), reverseMode); } else { int level = elInfo->getLevel(); do { elInfo = stack.traverseNext(elInfo); } while (elInfo && elInfo->getLevel() > level); } return value; } void MeshDistributor::serialize(std::ostream &out, DofContainer &data) { int vecSize = data.size(); SerUtil::serialize(out, vecSize); for (int i = 0; i < vecSize; i++) { int dofIndex = (*data[i]); SerUtil::serialize(out, dofIndex); } } void MeshDistributor::deserialize(std::istream &in, DofContainer &data, std::map &dofMap) { FUNCNAME("MeshDistributor::deserialize()"); int vecSize = 0; SerUtil::deserialize(in, vecSize); data.clear(); data.resize(vecSize); for (int i = 0; i < vecSize; i++) { int dofIndex = 0; SerUtil::deserialize(in, dofIndex); TEST_EXIT_DBG(dofMap.count(dofIndex) != 0) ("Dof index could not be deserialized correctly!\n"); data[i] = dofMap[dofIndex]; } } void MeshDistributor::serialize(std::ostream &out, RankToDofContainer &data) { int mapSize = data.size(); SerUtil::serialize(out, mapSize); for (RankToDofContainer::iterator it = data.begin(); it != data.end(); ++it) { int rank = it->first; SerUtil::serialize(out, rank); serialize(out, it->second); } } void MeshDistributor::deserialize(std::istream &in, RankToDofContainer &data, std::map &dofMap) { data.clear(); int mapSize = 0; SerUtil::deserialize(in, mapSize); for (int i = 0; i < mapSize; i++) { int rank = 0; SerUtil::deserialize(in, rank); deserialize(in, data[rank], dofMap); } } double MeshDistributor::setElemWeights(AdaptInfo *adaptInfo) { FUNCNAME("MeshDistributor::setElemWeights()"); double localWeightSum = 0.0; elemWeights.clear(); std::string filename = ""; GET_PARAMETER(0, mesh->getName() + "->macro weights", &filename); if (filename != "") { MSG("Read macro weights from %s\n", filename.c_str()); std::ifstream infile; infile.open(filename.c_str(), std::ifstream::in); while (!infile.eof()) { int elNum, elWeight; infile >> elNum; if (infile.eof()) break; infile >> elWeight; elemWeights[elNum] = elWeight; localWeightSum += elWeight; } infile.close(); } else { TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { elemWeights[elInfo->getElement()->getIndex()] = 1.0; localWeightSum++; elInfo = stack.traverseNext(elInfo); } } return localWeightSum; } void MeshDistributor::partitionMesh(AdaptInfo *adaptInfo) { FUNCNAME("MeshDistributor::partitionMesh()"); if (initialPartitionMesh) { initialPartitionMesh = false; partitioner->fillCoarsePartitionVec(&oldPartitionVec); partitioner->partition(&elemWeights, INITIAL); } else { oldPartitionVec = partitionVec; partitioner->partition(&elemWeights, ADAPTIVE_REPART, 100.0 /*0.000001*/); } partitioner->fillCoarsePartitionVec(&partitionVec); } void MeshDistributor::createInteriorBoundaryInfo() { FUNCNAME("MeshDistributor::createInteriorBoundaryInfo()"); createBoundaryDataStructure(); // === Once we have this information, we must care about the order of the atomic === // === bounds in the three boundary handling object. Eventually all the bound- === // === aries have to be in the same order on both ranks that share the bounday. === StdMpi > stdMpi(mpiComm); stdMpi.send(myIntBoundary.boundary); stdMpi.recv(otherIntBoundary.boundary); stdMpi.startCommunication(MPI_INT); // === The information about all neighbouring boundaries has been received. So === // === the rank tests if its own atomic boundaries are in the same order. If === // === not, the atomic boundaries are swaped to the correct order. === for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin(); rankIt != otherIntBoundary.boundary.end(); ++rankIt) { // === We have received from rank "rankIt->first" the ordered list of element === // === indices. Now, we have to sort the corresponding list in this rank to === // === get the same order. === for (unsigned int j = 0; j < rankIt->second.size(); j++) { // If the expected object is not at place, search for it. BoundaryObject &recvedBound = stdMpi.getRecvData()[rankIt->first][j].rankObj; if ((rankIt->second)[j].neighObj != recvedBound) { unsigned int k = j + 1; for (; k < rankIt->second.size(); k++) if ((rankIt->second)[k].neighObj == recvedBound) break; // The element must always be found, because the list is just in another order. TEST_EXIT_DBG(k < rankIt->second.size())("Should never happen!\n"); // Swap the current with the found element. AtomicBoundary tmpBound = (rankIt->second)[k]; (rankIt->second)[k] = (rankIt->second)[j]; (rankIt->second)[j] = tmpBound; } } } // === Do the same for the periodic boundaries. === if (periodicBoundary.boundary.size() > 0) { stdMpi.clear(); InteriorBoundary sendBounds, recvBounds; for (RankToBoundMap::iterator rankIt = periodicBoundary.boundary.begin(); rankIt != periodicBoundary.boundary.end(); ++rankIt) { TEST_EXIT_DBG(rankIt->first != mpiRank) ("It is no possible to have an interior boundary within a rank partition!\n"); if (rankIt->first < mpiRank) sendBounds.boundary[rankIt->first] = rankIt->second; else recvBounds.boundary[rankIt->first] = rankIt->second; } stdMpi.send(sendBounds.boundary); stdMpi.recv(recvBounds.boundary); stdMpi.startCommunication(MPI_INT); for (RankToBoundMap::iterator rankIt = periodicBoundary.boundary.begin(); rankIt != periodicBoundary.boundary.end(); ++rankIt) { if (rankIt->first <= mpiRank) continue; for (unsigned int j = 0; j < rankIt->second.size(); j++) { BoundaryObject &recvedBound = stdMpi.getRecvData()[rankIt->first][j].rankObj; if (periodicBoundary.boundary[rankIt->first][j].neighObj != recvedBound) { unsigned int k = j + 1; for (; k < rankIt->second.size(); k++) if (periodicBoundary.boundary[rankIt->first][k].neighObj == recvedBound) break; // The element must always be found, because the list is just in // another order. TEST_EXIT_DBG(k < rankIt->second.size())("Should never happen!\n"); // Swap the current with the found element. AtomicBoundary tmpBound = (rankIt->second)[k]; (rankIt->second)[k] = (rankIt->second)[j]; (rankIt->second)[j] = tmpBound; } } } } // periodicBoundary.boundary.size() > 0 } void MeshDistributor::createBoundaryDataStructure() { FUNCNAME("MeshDistributor::createBoundaryDataStructure()"); // Maps to each vertex dof the set of all elements that include this // vertex. An element is denoted by a pair of two ints. The first is the // element index, the second int denotes the local vertex index within // the element. std::map > vertexElements; std::map > edgeElements; std::map > faceElements; std::map > vertexInRank; std::map > edgeInRank; std::map > faceInRank; std::map elIndexMap; std::map elIndexTypeMap; std::map vertexOwner; std::map edgeOwner; std::map faceOwner; std::map, BoundaryType> periodicEdges; std::map, BoundaryType> periodicFaces; // === PHASE 1 === TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL | Mesh::FILL_NEIGH | Mesh::FILL_BOUND); while (elInfo) { Element *el = elInfo->getElement(); elIndexMap[el->getIndex()] = el; elIndexTypeMap[el->getIndex()] = elInfo->getType(); for (int i = 0; i < el->getGeo(VERTEX); i++) { DegreeOfFreedom vertex = el->getDOF(i, 0); vertexElements[vertex].push_back(ElementNeighbour(el->getIndex(), i)); vertexOwner[vertex] = std::max(vertexOwner[vertex], partitionVec[el->getIndex()]); } for (int i = 0; i < el->getGeo(EDGE); i++) { DofEdge edge = el->getEdge(i); edgeElements[edge].push_back(ElementNeighbour(el->getIndex(), i)); edgeOwner[edge] = std::max(edgeOwner[edge], partitionVec[el->getIndex()]); } for (int i = 0; i < el->getGeo(FACE); i++) { DofFace face = el->getFace(i); faceElements[face].push_back(ElementNeighbour(el->getIndex(), i)); faceOwner[face] = std::max(faceOwner[face], partitionVec[el->getIndex()]); } switch (mesh->getDim()) { case 2: for (int i = 0; i < el->getGeo(EDGE); i++) { if (mesh->isPeriodicAssociation(elInfo->getBoundary(i))) { DofEdge edge1 = el->getEdge(i); DofEdge edge2 = elInfo->getNeighbour(i)->getEdge(elInfo->getOppVertex(i)); periodicEdges[std::make_pair(edge1, edge2)] = elInfo->getBoundary(i); } } break; case 3: for (int i = 0; i < el->getGeo(FACE); i++) { if (mesh->isPeriodicAssociation(elInfo->getBoundary(i))) { DofFace face1 = el->getFace(i); DofFace face2 = elInfo->getNeighbour(i)->getFace(elInfo->getOppVertex(i)); periodicFaces[std::make_pair(face1, face2)] = elInfo->getBoundary(i); } } break; } elInfo = stack.traverseNext(elInfo); } // === PHASE 2 === for (std::map >::iterator it = vertexElements.begin(); it != vertexElements.end(); ++it) { for (std::vector::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) { int elOwner = partitionVec[it2->elIndex]; if (it2->elIndex > vertexInRank[it->first][elOwner].elIndex) vertexInRank[it->first][elOwner] = *it2; } } for (std::map >::iterator it = edgeElements.begin(); it != edgeElements.end(); ++it) { for (std::vector::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) { int elOwner = partitionVec[it2->elIndex]; if (it2->elIndex > edgeInRank[it->first][elOwner].elIndex) edgeInRank[it->first][elOwner] = *it2; } } for (std::map >::iterator it = faceElements.begin(); it != faceElements.end(); ++it) { for (std::vector::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) { int elOwner = partitionVec[it2->elIndex]; if (it2->elIndex > faceInRank[it->first][elOwner].elIndex) faceInRank[it->first][elOwner] = *it2; } } // === PHASE 3 === for (std::map >::iterator it = vertexInRank.begin(); it != vertexInRank.end(); ++it) { if (it->second.count(mpiRank) && it->second.size() > 1) { int owner = vertexOwner[it->first]; ElementNeighbour& rankBoundEl = it->second[mpiRank]; AtomicBoundary bound; bound.rankObj.el = elIndexMap[rankBoundEl.elIndex]; bound.rankObj.elIndex = rankBoundEl.elIndex; bound.rankObj.elType = elIndexTypeMap[rankBoundEl.elIndex]; bound.rankObj.subObj = VERTEX; bound.rankObj.ithObj = rankBoundEl.ith; if (owner == mpiRank) { for (std::map::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) { if (it2->first == mpiRank) continue; bound.neighObj.el = elIndexMap[it2->second.elIndex]; bound.neighObj.elIndex = it2->second.elIndex; bound.neighObj.elType = -1; bound.neighObj.subObj = VERTEX; bound.neighObj.ithObj = it2->second.ith; TEST_EXIT_DBG(rankBoundEl.boundaryIndex == it2->second.boundaryIndex) ("Should not happen!\n"); bound.type = rankBoundEl.boundaryIndex; AtomicBoundary& b = myIntBoundary.getNewAtomic(it2->first); b = bound; b.rankObj.setReverseMode(b.neighObj, feSpace); } } else { TEST_EXIT_DBG(it->second.count(owner) == 1) ("Should not happen!\n"); ElementNeighbour& ownerBoundEl = it->second[owner]; bound.neighObj.el = elIndexMap[ownerBoundEl.elIndex]; bound.neighObj.elIndex = ownerBoundEl.elIndex; bound.neighObj.elType = -1; bound.neighObj.subObj = VERTEX; bound.neighObj.ithObj = ownerBoundEl.ith; TEST_EXIT_DBG(rankBoundEl.boundaryIndex == ownerBoundEl.boundaryIndex) ("Should not happen!\n"); bound.type = rankBoundEl.boundaryIndex; AtomicBoundary& b = otherIntBoundary.getNewAtomic(owner); b = bound; b.neighObj.setReverseMode(b.rankObj, feSpace); } } } for (std::map >::iterator it = edgeInRank.begin(); it != edgeInRank.end(); ++it) { if (it->second.count(mpiRank) && it->second.size() > 1) { int owner = edgeOwner[it->first]; ElementNeighbour& rankBoundEl = it->second[mpiRank]; AtomicBoundary bound; bound.rankObj.el = elIndexMap[rankBoundEl.elIndex]; bound.rankObj.elIndex = rankBoundEl.elIndex; bound.rankObj.elType = elIndexTypeMap[rankBoundEl.elIndex]; bound.rankObj.subObj = EDGE; bound.rankObj.ithObj = rankBoundEl.ith; if (owner == mpiRank) { for (std::map::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) { if (it2->first == mpiRank) continue; bound.neighObj.el = elIndexMap[it2->second.elIndex]; bound.neighObj.elIndex = it2->second.elIndex; bound.neighObj.elType = -1; bound.neighObj.subObj = EDGE; bound.neighObj.ithObj = it2->second.ith; TEST_EXIT_DBG(rankBoundEl.boundaryIndex == it2->second.boundaryIndex) ("Should not happen!\n"); bound.type = rankBoundEl.boundaryIndex; AtomicBoundary& b = myIntBoundary.getNewAtomic(it2->first); b = bound; b.rankObj.setReverseMode(b.neighObj, feSpace); } } else { TEST_EXIT_DBG(it->second.count(owner) == 1) ("Should not happen!\n"); ElementNeighbour& ownerBoundEl = it->second[owner]; bound.neighObj.el = elIndexMap[ownerBoundEl.elIndex]; bound.neighObj.elIndex = ownerBoundEl.elIndex; bound.neighObj.elType = -1; bound.neighObj.subObj = EDGE; bound.neighObj.ithObj = ownerBoundEl.ith; TEST_EXIT_DBG(rankBoundEl.boundaryIndex == ownerBoundEl.boundaryIndex) ("Should not happen!\n"); bound.type = rankBoundEl.boundaryIndex; AtomicBoundary& b = otherIntBoundary.getNewAtomic(owner); b = bound; b.neighObj.setReverseMode(b.rankObj, feSpace); } } } for (std::map >::iterator it = faceInRank.begin(); it != faceInRank.end(); ++it) { if (it->second.count(mpiRank) && it->second.size() > 1) { int owner = faceOwner[it->first]; ElementNeighbour& rankBoundEl = it->second[mpiRank]; AtomicBoundary bound; bound.rankObj.el = elIndexMap[rankBoundEl.elIndex]; bound.rankObj.elIndex = rankBoundEl.elIndex; bound.rankObj.elType = elIndexTypeMap[rankBoundEl.elIndex]; bound.rankObj.subObj = EDGE; bound.rankObj.ithObj = rankBoundEl.ith; if (owner == mpiRank) { for (std::map::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) { if (it2->first == mpiRank) continue; bound.neighObj.el = elIndexMap[it2->second.elIndex]; bound.neighObj.elIndex = it2->second.elIndex; bound.neighObj.elType = -1; bound.neighObj.subObj = FACE; bound.neighObj.ithObj = it2->second.ith; TEST_EXIT_DBG(rankBoundEl.boundaryIndex == it2->second.boundaryIndex) ("Should not happen!\n"); bound.type = rankBoundEl.boundaryIndex; AtomicBoundary& b = myIntBoundary.getNewAtomic(it2->first); b = bound; b.rankObj.setReverseMode(b.neighObj, feSpace); } } else { TEST_EXIT_DBG(it->second.count(owner) == 1) ("Should not happen!\n"); ElementNeighbour& ownerBoundEl = it->second[owner]; bound.neighObj.el = elIndexMap[ownerBoundEl.elIndex]; bound.neighObj.elIndex = ownerBoundEl.elIndex; bound.neighObj.elType = -1; bound.neighObj.subObj = FACE; bound.neighObj.ithObj = ownerBoundEl.ith; TEST_EXIT_DBG(rankBoundEl.boundaryIndex == ownerBoundEl.boundaryIndex) ("Should not happen!\n"); bound.type = rankBoundEl.boundaryIndex; AtomicBoundary& b = otherIntBoundary.getNewAtomic(owner); b = bound; b.neighObj.setReverseMode(b.rankObj, feSpace); } } } // === PHASE 4 === for (std::map, BoundaryType>::iterator it = periodicEdges.begin(); it != periodicEdges.end(); ++it) { int perEdgeOwner0 = edgeOwner[it->first.first]; int perEdgeOwner1 = edgeOwner[it->first.second]; if (perEdgeOwner0 == mpiRank) { TEST_EXIT_DBG(perEdgeOwner0 != perEdgeOwner1)("Should not happen!\n"); TEST_EXIT_DBG(edgeElements[it->first.first].size() == 1)("Should not happen!\n"); TEST_EXIT_DBG(edgeElements[it->first.second].size() == 1)("Should not happen!\n"); int otherElementRank = perEdgeOwner1; ElementNeighbour& perEdgeEl0 = edgeElements[it->first.first][0]; ElementNeighbour& perEdgeEl1 = edgeElements[it->first.second][0]; AtomicBoundary bound; bound.rankObj.el = elIndexMap[perEdgeEl0.elIndex]; bound.rankObj.elIndex = perEdgeEl0.elIndex; bound.rankObj.elType = elIndexTypeMap[perEdgeEl0.elIndex]; bound.rankObj.subObj = EDGE; bound.rankObj.ithObj = perEdgeEl0.ith; bound.neighObj.el = elIndexMap[perEdgeEl1.elIndex]; bound.neighObj.elIndex = perEdgeEl1.elIndex; bound.neighObj.elType = -1; bound.neighObj.subObj = EDGE; bound.neighObj.ithObj = perEdgeEl1.ith; bound.type = it->second; AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank); b = bound; if (mpiRank > otherElementRank) b.rankObj.setReverseMode(b.neighObj, feSpace); else b.neighObj.setReverseMode(b.rankObj, feSpace); for (int i = 0; i < 2; i++) { int ithDofRankObj = bound.rankObj.el->getVertexOfEdge(perEdgeEl0.ith, i); DegreeOfFreedom dof = bound.rankObj.el->getDOF(ithDofRankObj, 0); int ithDofNeighObj = -1; for (int j = 0; j < 3; j++) if (bound.neighObj.el->getDOF(j, 0) == mesh->getPeriodicAssociations(bound.type)[dof]) ithDofNeighObj = j; TEST_EXIT_DBG(ithDofNeighObj > -1)("Should not happen!\n"); bound.rankObj.subObj = VERTEX; bound.rankObj.ithObj = ithDofRankObj; bound.neighObj.subObj = VERTEX; bound.neighObj.ithObj = ithDofNeighObj; AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank); b = bound; } } } } void MeshDistributor::removeMacroElements() { FUNCNAME("MeshDistributor::removeMacroElements()"); std::set macrosToRemove; for (std::deque::iterator it = mesh->firstMacroElement(); it != mesh->endOfMacroElements(); ++it) { PartitionElementData *partitionData = dynamic_cast ((*it)->getElement()->getElementData(PARTITION_ED)); if (partitionData->getPartitionStatus() != IN) macrosToRemove.insert(*it); } mesh->removeMacroElements(macrosToRemove, feSpace); } void MeshDistributor::createLocalGlobalNumbering() { FUNCNAME("MeshDistributor::createLocalGlobalNumbering()"); // === Get rank information about DOFs. === // Stores to each DOF pointer the set of ranks the DOF is part of. DofToPartitions partitionDofs; DofContainer rankDofs, rankAllDofs; DofToRank boundaryDofs; createDofMemberInfo(partitionDofs, rankDofs, rankAllDofs, boundaryDofs, vertexDof); nRankDofs = rankDofs.size(); nOverallDofs = partitionDofs.size(); // === Get starting position for global rank dof ordering. ==== rstart = 0; mpiComm.Scan(&nRankDofs, &rstart, 1, MPI_INT, MPI_SUM); rstart -= nRankDofs; // === Create for all dofs in rank new indices. The new index must start at === // === index 0, must be continues and have the same order as the indices === // === had before. === // Do not change the indices now, but create a new indexing and store it here. DofIndexMap rankDofsNewLocalIndex; isRankDof.clear(); int i = 0; for (DofContainer::iterator dofIt = rankAllDofs.begin(); dofIt != rankAllDofs.end(); ++dofIt) { rankDofsNewLocalIndex[*dofIt] = i; // First, we set all dofs in ranks partition to be owend by the rank. Later, // the dofs in ranks partition that are owned by other rank are set to false. isRankDof[i] = true; i++; } // === Create for all rank owned dofs a new global indexing. === // Stores for dofs in rank a new global index. DofIndexMap rankDofsNewGlobalIndex; // Stores for all rank owned dofs a continues local index. DofIndexMap rankOwnedDofsNewLocalIndex; i = 0; for (DofContainer::iterator dofIt = rankDofs.begin(); dofIt != rankDofs.end(); ++dofIt) { rankDofsNewGlobalIndex[*dofIt] = i + rstart; rankOwnedDofsNewLocalIndex[*dofIt] = i; i++; } // === Create information which dof indices must be send and which must === // === be received to/from other ranks. === // Stores to each rank a map from DOF pointers (which are all owned by the rank // and lie on an interior boundary) to new global DOF indices. std::map sendNewDofs; // Maps to each rank the number of new DOF indices this rank will receive from. // All these DOFs are at an interior boundary on this rank, but are owned by // another rank. std::map recvNewDofs; for (DofToRank::iterator it = boundaryDofs.begin(); it != boundaryDofs.end(); ++it) { if (it->second == mpiRank) { // If the boundary dof is a rank dof, it must be send to other ranks. // Search for all ranks that have this dof too. for (std::set::iterator itRanks = partitionDofs[it->first].begin(); itRanks != partitionDofs[it->first].end(); ++itRanks) { if (*itRanks != mpiRank) { TEST_EXIT_DBG(rankDofsNewGlobalIndex.count(it->first) == 1) ("DOF Key not found!\n"); sendNewDofs[*itRanks][it->first] = rankDofsNewGlobalIndex[it->first]; } } } else { // If the boundary dof is not a rank dof, its new dof index (and later // also the dof values) must be received from another rank. if (recvNewDofs.find(it->second) == recvNewDofs.end()) recvNewDofs[it->second] = 1; else recvNewDofs[it->second]++; } } // === Send and receive the dof indices at boundary. === sendDofs.clear(); for (std::map::iterator sendIt = sendNewDofs.begin(); sendIt != sendNewDofs.end(); ++sendIt) for (DofIndexMap::iterator dofIt = sendIt->second.begin(); dofIt != sendIt->second.end(); ++dofIt) sendDofs[sendIt->first].push_back(dofIt->first); typedef std::vector > DofMapVec; StdMpi stdMpi(mpiComm); stdMpi.send(sendNewDofs); for (std::map::iterator recvIt = recvNewDofs.begin(); recvIt != recvNewDofs.end(); ++recvIt, i++) stdMpi.recv(recvIt->first, recvIt->second * 2); stdMpi.startCommunication(MPI_INT); std::map &dofMap = stdMpi.getRecvData(); // === Change global dof indices at boundary from other ranks. === // Within this small data structure we track which dof index was already changed. // This is used to avoid the following situation: Assume, there are two dof indices // a and b in boundaryDofs. Then we have to change index a to b and b to c. When // the second rule applies, we have to avoid that not the first b, resulted from // changing a to b, is set to c, but the second one. Therefore, after the first // rule was applied, the dof pointer is set to false in this data structure and // is not allowed to be changed anymore. std::map dofChanged; recvDofs.clear(); for (DofToRank::iterator dofIt = boundaryDofs.begin(); dofIt != boundaryDofs.end(); ++dofIt) dofChanged[dofIt->first] = false; for (std::map::iterator rankIt = dofMap.begin(); rankIt != dofMap.end(); ++rankIt) { for (DofMapVec::iterator recvDofIt = rankIt->second.begin(); recvDofIt != rankIt->second.end(); ++recvDofIt) { DegreeOfFreedom oldDof = recvDofIt->first; DegreeOfFreedom newGlobalDof = recvDofIt->second; bool found = false; // Iterate over all boundary dofs to find the dof which index we have to change. for (DofToRank::iterator dofIt = boundaryDofs.begin(); dofIt != boundaryDofs.end(); ++dofIt) { if (*(dofIt->first) == oldDof && !dofChanged[dofIt->first]) { dofChanged[dofIt->first] = true; recvDofs[rankIt->first].push_back(dofIt->first); rankDofsNewGlobalIndex[dofIt->first] = newGlobalDof; isRankDof[rankDofsNewLocalIndex[dofIt->first]] = false; found = true; break; } } TEST_EXIT_DBG(found)("Should not happen!\n"); } } // === Create now the local to global index and local to dof index mappings. === createLocalMappings(rankDofsNewLocalIndex, rankOwnedDofsNewLocalIndex, rankDofsNewGlobalIndex); lastMeshChangeIndex = mesh->getChangeIndex(); } void MeshDistributor::updateLocalGlobalNumbering() { FUNCNAME("MeshDistributor::updateLocalGlobalNumbering()"); #if (DEBUG != 0) debug::ElementIdxToDofs elMap; debug::createSortedDofs(mesh, elMap); #endif typedef std::set DofSet; // === Get all DOFs in ranks partition. === ElementDofIterator elDofIt(feSpace); DofSet rankDofSet; // The vertexDof list must be recreated from the scratch. Otherwise, it is possible // that it maps dofs, that were removed (this is also possible, if the mesh was // refined, e.g., center dofs of an element are not dofs of the children). DofToBool oldVertexDof = vertexDof; vertexDof.clear(); TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { elDofIt.reset(elInfo->getElement()); do { rankDofSet.insert(elDofIt.getDofPtr()); vertexDof[elDofIt.getDofPtr()] = false; } while(elDofIt.next()); elInfo = stack.traverseNext(elInfo); } elInfo = stack.traverseFirst(mesh, 0, Mesh::CALL_EL_LEVEL); while (elInfo) { Element *element = elInfo->getElement(); elDofIt.reset(element); elDofIt.reset(elInfo->getElement()); do { if (elDofIt.getCurrentPos() == 0) vertexDof[elDofIt.getDofPtr()] = true; } while(elDofIt.next()); elInfo = stack.traverseNext(elInfo); } DofContainer rankDofs; for (DofSet::iterator it = rankDofSet.begin(); it != rankDofSet.end(); ++it) rankDofs.push_back(*it); sort(rankDofs.begin(), rankDofs.end(), cmpDofsByValue); int nRankAllDofs = rankDofs.size(); // === Traverse on interior boundaries and move all not ranked owned DOFs from === // === rankDofs to boundaryDofs. === RankToDofContainer oldSendDofs = sendDofs; RankToDofContainer oldRecvDofs = recvDofs; sendDofs.clear(); recvDofs.clear(); for (InteriorBoundary::iterator it(myIntBoundary); !it.end(); ++it) { DofContainer dofs; it->rankObj.el->getVertexDofs(feSpace, it->rankObj, dofs); it->rankObj.el->getNonVertexDofs(feSpace, it->rankObj, dofs); for (unsigned int i = 0; i < dofs.size(); i++) sendDofs[it.getRank()].push_back(dofs[i]); } for (InteriorBoundary::iterator it(otherIntBoundary); !it.end(); ++it) { DofContainer dofs; it->rankObj.el->getVertexDofs(feSpace, it->rankObj, dofs); it->rankObj.el->getNonVertexDofs(feSpace, it->rankObj, dofs); for (unsigned int i = 0; i < dofs.size(); i++) { DofContainer::iterator eraseIt = find(rankDofs.begin(), rankDofs.end(), dofs[i]); if (eraseIt != rankDofs.end()) rankDofs.erase(eraseIt); recvDofs[it.getRank()].push_back(dofs[i]); } } nRankDofs = rankDofs.size(); // === Get starting position for global rank dof ordering. ==== mpiComm.Scan(&nRankDofs, &rstart, 1, MPI_INT, MPI_SUM); rstart -= nRankDofs; // === Calculate number of overall DOFs of all partitions. === nOverallDofs = 0; mpiComm.Allreduce(&nRankDofs, &nOverallDofs, 1, MPI_INT, MPI_SUM); // First, we set all dofs in ranks partition to be owend by the rank. Later, // the dofs in ranks partition that are owned by other rank are set to false. isRankDof.clear(); for (int i = 0; i < nRankAllDofs; i++) isRankDof[i] = true; // Stores for all rank owned dofs a new global index. DofIndexMap rankDofsNewGlobalIndex; for (int i = 0; i < nRankDofs; i++) rankDofsNewGlobalIndex[rankDofs[i]] = i + rstart; // === Send new DOF indices. === #if (DEBUG != 0) ParallelDebug::testDofContainerCommunication(*this, sendDofs, recvDofs); #endif int i = 0; StdMpi > stdMpi(mpiComm, false); for (RankToDofContainer::iterator sendIt = sendDofs.begin(); sendIt != sendDofs.end(); ++sendIt, i++) { stdMpi.getSendData(sendIt->first).resize(0); stdMpi.getSendData(sendIt->first).reserve(sendIt->second.size()); for (DofContainer::iterator dofIt = sendIt->second.begin(); dofIt != sendIt->second.end(); ++dofIt) stdMpi.getSendData(sendIt->first).push_back(rankDofsNewGlobalIndex[*dofIt]); } stdMpi.updateSendDataSize(); stdMpi.recv(recvDofs); stdMpi.startCommunication(MPI_INT); for (RankToDofContainer::iterator recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) { int j = 0; for (DofContainer::iterator dofIt = recvIt->second.begin(); dofIt != recvIt->second.end(); ++dofIt) { rankDofsNewGlobalIndex[*dofIt] = stdMpi.getRecvData(recvIt->first)[j++]; isRankDof[**dofIt] = false; } } // === Create now the local to global index and local to dof index mappings. === mapLocalGlobalDofs.clear(); mapLocalDofIndex.clear(); for (DofIndexMap::iterator dofIt = rankDofsNewGlobalIndex.begin(); dofIt != rankDofsNewGlobalIndex.end(); ++dofIt) mapLocalGlobalDofs[*(dofIt->first)] = dofIt->second; for (int i = 0; i < nRankDofs; i++) mapLocalDofIndex[i] = *(rankDofs[i]); // === Update dof admins due to new number of dofs. === updateDofAdmins(); lastMeshChangeIndex = mesh->getChangeIndex(); #if (DEBUG != 0) debug::testSortedDofs(mesh, elMap); ParallelDebug::testCommonDofs(*this, true); #if 0 MSG("------------- Debug information -------------\n"); MSG("nRankDofs = %d\n", nRankDofs); MSG("nOverallDofs = %d\n", nOverallDofs); MSG("rstart %d\n", rstart); /* for (RankToDofContainer::iterator it = sendDofs.begin(); it != sendDofs.end(); ++it) { MSG("send %d DOFs to rank %d\n", it->second.size(), it->first); for (unsigned int i = 0; i < it->second.size(); i++) MSG("%d send DOF: %d\n", i, *(it->second[i])); } for (RankToDofContainer::iterator it = recvDofs.begin(); it != recvDofs.end(); ++it) { MSG("recv %d DOFs from rank %d\n", it->second.size(), it->first); for (unsigned int i = 0; i < it->second.size(); i++) MSG("%d recv DOF: %d\n", i, *(it->second[i])); } */ #if 0 std::set testDofs; debug::getAllDofs(feSpace, testDofs); for (std::set::iterator it = testDofs.begin(); it != testDofs.end(); ++it) { MSG("DOF %d: mapLocalGlobalDofs = %d vertexDof = %d isRankDof = %d\n", **it, mapLocalGlobalDofs[**it], vertexDof[*it], isRankDof[**it]); } #endif #endif #endif } void MeshDistributor::createLocalMappings(DofIndexMap &rankDofsNewLocalIndex, DofIndexMap &rankOwnedDofsNewLocalIndex, DofIndexMap &rankDofsNewGlobalIndex) { FUNCNAME("MeshDistributor::createLocalMappings()"); #if (DEBUG != 0) if (macroElementStructureConsisten) { std::set allDofs; debug::getAllDofs(feSpace, allDofs); for (std::set::iterator it = allDofs.begin(); it != allDofs.end(); ++it) { TEST_EXIT(rankDofsNewLocalIndex.count(*it) == 1) ("Missing DOF %d in rankDofsNewLocalIndex!\n", **it); TEST_EXIT(rankDofsNewGlobalIndex.count(*it) == 1) ("Missing DOF %d in rankDofsNewGlobalIndex!\n", **it); if (isRankDof[**it]) { TEST_EXIT(rankOwnedDofsNewLocalIndex.count(*it) == 1) ("Missing DOF %d in rankOwnedDofsNewLocalIndex!\n", **it); } } } #endif mapLocalGlobalDofs.clear(); mapLocalDofIndex.clear(); // Iterate over all DOFs in ranks partition. for (DofIndexMap::iterator dofIt = rankDofsNewLocalIndex.begin(); dofIt != rankDofsNewLocalIndex.end(); ++dofIt) { DegreeOfFreedom newLocalIndex = dofIt->second; DegreeOfFreedom newGlobalIndex = rankDofsNewGlobalIndex[dofIt->first]; *const_cast(dofIt->first) = newLocalIndex; mapLocalGlobalDofs[newLocalIndex] = newGlobalIndex; } for (DofIndexMap::iterator dofIt = rankOwnedDofsNewLocalIndex.begin(); dofIt != rankOwnedDofsNewLocalIndex.end(); ++dofIt) mapLocalDofIndex[dofIt->second] = *(dofIt->first); } void MeshDistributor::createDofMemberInfo(DofToPartitions& partitionDofs, DofContainer& rankOwnedDofs, DofContainer& rankAllDofs, DofToRank& boundaryDofs, DofToBool& isVertexDof) { partitionDofs.clear(); rankOwnedDofs.clear(); rankAllDofs.clear(); boundaryDofs.clear(); isVertexDof.clear(); // === Determine to each dof the set of partitions the dof belongs to. === ElementDofIterator elDofIt(feSpace); TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { Element *element = elInfo->getElement(); elDofIt.reset(element); do { // Determine to each dof the partition(s) it corresponds to. partitionDofs[elDofIt.getDofPtr()].insert(partitionVec[element->getIndex()]); if (elDofIt.getCurrentPos() == 0) isVertexDof[elDofIt.getDofPtr()] = true; else isVertexDof[elDofIt.getDofPtr()] = false; } while (elDofIt.next()); elInfo = stack.traverseNext(elInfo); } // === Determine the set of ranks dofs and the dofs ownership at the boundary. === // iterate over all DOFs for (DofToPartitions::iterator it = partitionDofs.begin(); it != partitionDofs.end(); ++it) { bool isInRank = false; // iterate over all partition the current DOF is part of. for (std::set::iterator itpart1 = it->second.begin(); itpart1 != it->second.end(); ++itpart1) { if (*itpart1 == mpiRank) { rankAllDofs.push_back(it->first); if (it->second.size() == 1) { rankOwnedDofs.push_back(it->first); } else { // This dof is at the ranks boundary. It is owned by the rank only if // the rank number is the highest of all ranks containing this dof. bool insert = true; int highestRank = mpiRank; for (std::set::iterator itpart2 = it->second.begin(); itpart2 != it->second.end(); ++itpart2) { if (*itpart2 > mpiRank) insert = false; if (*itpart2 > highestRank) highestRank = *itpart2; } if (insert) rankOwnedDofs.push_back(it->first); boundaryDofs[it->first] = highestRank; } isInRank = true; break; } } if (!isInRank) isVertexDof.erase(it->first); } sort(rankAllDofs.begin(), rankAllDofs.end(), cmpDofsByValue); sort(rankOwnedDofs.begin(), rankOwnedDofs.end(), cmpDofsByValue); } void MeshDistributor::createPeriodicMap() { FUNCNAME("MeshDistributor::createPeriodicMap()"); if (periodicBoundary.boundary.size() == 0) return; // Clear all periodic dof mappings calculated before. We do it from scratch. periodicDof.clear(); periodicDofAssociations.clear(); StdMpi > stdMpi(mpiComm, false); // === Each rank traverse its periodic boundaries and sends the dof indices === // === to the rank "on the other side" of the periodic boundary. === RankToDofContainer rankPeriodicDofs; std::map > rankToDofType; for (RankToBoundMap::iterator it = periodicBoundary.boundary.begin(); it != periodicBoundary.boundary.end(); ++it) { TEST_EXIT_DBG(it->first != mpiRank) ("Periodic interior boundary within the rank itself is not possible!\n"); // Create dof indices on the boundary. DofContainer& dofs = rankPeriodicDofs[it->first]; for (std::vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { int nDofs = dofs.size(); boundIt->rankObj.el->getVertexDofs(feSpace, boundIt->rankObj, dofs); boundIt->rankObj.el->getNonVertexDofs(feSpace, boundIt->rankObj, dofs); for (unsigned int i = 0; i < (dofs.size() - nDofs); i++) rankToDofType[it->first].push_back(boundIt->type); } // Send the global indices to the rank on the other side. stdMpi.getSendData(it->first).reserve(dofs.size()); for (unsigned int i = 0; i < dofs.size(); i++) stdMpi.getSendData(it->first).push_back(mapLocalGlobalDofs[*(dofs[i])]); // Receive from this rank the same number of dofs. stdMpi.recv(it->first, dofs.size()); } stdMpi.updateSendDataSize(); stdMpi.startCommunication(MPI_INT); // === The rank has received the dofs from the rank on the other side of === // === the boundary. Now it can use them to create the mapping between === // === the periodic dofs in this rank and the corresponding periodic === // === dofs from the other ranks. === std::map > dofFromRank; for (RankToBoundMap::iterator it = periodicBoundary.boundary.begin(); it != periodicBoundary.boundary.end(); ++it) { DofContainer& dofs = rankPeriodicDofs[it->first]; std::vector& types = rankToDofType[it->first]; TEST_EXIT_DBG(dofs.size() == types.size())("Should not happen!\n"); // Added the received dofs to the mapping. for (unsigned int i = 0; i < dofs.size(); i++) { int globalDofIndex = mapLocalGlobalDofs[*(dofs[i])]; int mapGlobalDofIndex = stdMpi.getRecvData(it->first)[i]; BoundaryType type = types[i]; // Check if this global dof with the corresponding boundary type was // not added before by another periodic boundary from other rank. if (periodicDofAssociations[globalDofIndex].count(type) == 0) { periodicDof[type][globalDofIndex] = mapGlobalDofIndex; periodicDofAssociations[globalDofIndex].insert(type); dofFromRank[globalDofIndex].insert(it->first); } } } if (dofFromRank.size() > 0) { TEST_EXIT_DBG(mesh->getDim() == 2) ("Periodic boundary corner problem must be generalized to 3d!\n"); } MPI::Request request[min(static_cast(periodicBoundary.boundary.size() * 2), 4)]; int requestCounter = 0; std::vector sendBuffers, recvBuffers; for (std::map >::iterator it = dofFromRank.begin(); it != dofFromRank.end(); ++it) { if (it->second.size() == 2) { TEST_EXIT_DBG(periodicDofAssociations[it->first].size() == 2) ("DOF %d has only %d periodic associations!\n", it->first, periodicDofAssociations[it->first].size()); int type0 = *(periodicDofAssociations[it->first].begin()); int type1 = *(++(periodicDofAssociations[it->first].begin())); int *sendbuf = new int[2]; sendbuf[0] = periodicDof[type0][it->first]; sendbuf[1] = periodicDof[type1][it->first]; request[requestCounter++] = mpiComm.Isend(sendbuf, 2, MPI_INT, *(it->second.begin()), 0); request[requestCounter++] = mpiComm.Isend(sendbuf, 2, MPI_INT, *(++(it->second.begin())), 0); sendBuffers.push_back(sendbuf); int *recvbuf1 = new int[2]; int *recvbuf2 = new int[2]; request[requestCounter++] = mpiComm.Irecv(recvbuf1, 2, MPI_INT, *(it->second.begin()), 0); request[requestCounter++] = mpiComm.Irecv(recvbuf2, 2, MPI_INT, *(++(it->second.begin())), 0); recvBuffers.push_back(recvbuf1); recvBuffers.push_back(recvbuf2); } } MPI::Request::Waitall(requestCounter, request); int i = 0; for (std::map >::iterator it = dofFromRank.begin(); it != dofFromRank.end(); ++it) { if (it->second.size() == 2) { for (int k = 0; k < 2; k++) for (int j = 0; j < 2; j++) if (recvBuffers[i + k][j] != it->first) { int globalDofIndex = it->first; int mapGlobalDofIndex = recvBuffers[i + k][j]; int type = 3; periodicDof[type][globalDofIndex] = mapGlobalDofIndex; periodicDofAssociations[globalDofIndex].insert(type); } i++; } } for (unsigned int i = 0; i < sendBuffers.size(); i++) delete [] sendBuffers[i]; for (unsigned int i = 0; i < recvBuffers.size(); i++) delete [] recvBuffers[i]; #if (DEBUG != 0) for (std::map >::iterator it = periodicDofAssociations.begin(); it != periodicDofAssociations.end(); ++it) { int nAssoc = it->second.size(); TEST_EXIT_DBG(nAssoc == 1 || nAssoc == 3) ("Should not happen! DOF %d has %d periodic associations!\n", it->first, nAssoc); } #endif } void MeshDistributor::serialize(std::ostream &out) { SerUtil::serialize(out, elemWeights); SerUtil::serialize(out, initialPartitionMesh); SerUtil::serialize(out, partitionVec); SerUtil::serialize(out, oldPartitionVec); SerUtil::serialize(out, nRankDofs); SerUtil::serialize(out, nOverallDofs); myIntBoundary.serialize(out); otherIntBoundary.serialize(out); periodicBoundary.serialize(out); serialize(out, sendDofs); serialize(out, recvDofs); SerUtil::serialize(out, mapLocalGlobalDofs); SerUtil::serialize(out, mapLocalDofIndex); SerUtil::serialize(out, isRankDof); serialize(out, vertexDof); serialize(out, periodicDof); serialize(out, periodicDofAssociations); SerUtil::serialize(out, rstart); SerUtil::serialize(out, macroElementStructureConsisten); } void MeshDistributor::deserialize(std::istream &in) { FUNCNAME("MeshDistributor::deserialize()"); SerUtil::deserialize(in, elemWeights); SerUtil::deserialize(in, initialPartitionMesh); SerUtil::deserialize(in, partitionVec); SerUtil::deserialize(in, oldPartitionVec); SerUtil::deserialize(in, nRankDofs); SerUtil::deserialize(in, nOverallDofs); // Create two maps: one from from element indices to the corresponding element // pointers, and one map from Dof indices to the corresponding dof pointers. std::map elIndexMap; std::map dofMap; ElementDofIterator elDofIter(feSpace); TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER); while (elInfo) { Element *el = elInfo->getElement(); elIndexMap[el->getIndex()] = el; if (el->isLeaf()) { elDofIter.reset(el); do { dofMap[elDofIter.getDof()] = elDofIter.getDofPtr(); } while (elDofIter.next()); } elInfo = stack.traverseNext(elInfo); } myIntBoundary.deserialize(in, elIndexMap); otherIntBoundary.deserialize(in, elIndexMap); periodicBoundary.deserialize(in, elIndexMap); deserialize(in, sendDofs, dofMap); deserialize(in, recvDofs, dofMap); SerUtil::deserialize(in, mapLocalGlobalDofs); SerUtil::deserialize(in, mapLocalDofIndex); SerUtil::deserialize(in, isRankDof); deserialize(in, vertexDof, dofMap); deserialize(in, periodicDof); deserialize(in, periodicDofAssociations); SerUtil::deserialize(in, rstart); SerUtil::deserialize(in, macroElementStructureConsisten); deserialized = true; } void MeshDistributor::serialize(std::ostream &out, PeriodicDofMap &data) { int mapSize = data.size(); SerUtil::serialize(out, mapSize); for (PeriodicDofMap::iterator it = data.begin(); it != data.end(); ++it) { int type = it->first; DofMapping dofMap = it->second; SerUtil::serialize(out, type); SerUtil::serialize(out, dofMap); } } void MeshDistributor::serialize(std::ostream &out, std::map >& data) { int mapSize = data.size(); SerUtil::serialize(out, mapSize); for (std::map >::iterator it = data.begin(); it != data.end(); ++it) { int dof = it->first; std::set typeSet = it->second; SerUtil::serialize(out, dof); SerUtil::serialize(out, typeSet); } } void MeshDistributor::deserialize(std::istream &in, PeriodicDofMap &data) { data.clear(); int mapSize = 0; SerUtil::deserialize(in, mapSize); for (int i = 0; i < mapSize; i++) { int type; DofMapping dofMap; SerUtil::deserialize(in, type); SerUtil::deserialize(in, dofMap); data[type] = dofMap; } } void MeshDistributor::deserialize(std::istream &in, std::map >& data) { data.clear(); int mapSize = 0; SerUtil::deserialize(in, mapSize); for (int i = 0; i < mapSize; i++) { int dof; std::set typeSet; SerUtil::deserialize(in, dof); SerUtil::deserialize(in, typeSet); data[dof] = typeSet; } } void MeshDistributor::writePartitioningMesh(std::string filename) { FUNCNAME("MeshDistributor::writePartitioningMesh()"); std::map vec; TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL | Mesh::FILL_COORDS); while (elInfo) { int index = elInfo->getElement()->getIndex(); vec[index] = partitionVec[index]; elInfo = stack.traverseNext(elInfo); } ElementFileWriter::writeFile(vec, mesh, filename); } }