// // Software License for AMDiS // // Copyright (c) 2010 Dresden University of Technology // All rights reserved. // Authors: Simon Vey, Thomas Witkowski et al. // // This file is part of AMDiS // // See also license.opensource.txt in the distribution. #include #include #include #include #include #include #include #include "parallel/MeshDistributor.h" #include "parallel/MeshManipulation.h" #include "parallel/ParallelDebug.h" #include "parallel/StdMpi.h" #include "parallel/ParMetisPartitioner.h" #include "io/ElementFileWriter.h" #include "io/MacroInfo.h" #include "io/VtkWriter.h" #include "Mesh.h" #include "Traverse.h" #include "ElInfo.h" #include "Element.h" #include "MacroElement.h" #include "DOFMatrix.h" #include "DOFVector.h" #include "SystemVector.h" #include "ElementDofIterator.h" #include "ProblemStatBase.h" #include "StandardProblemIteration.h" #include "VertexVector.h" #include "MeshStructure.h" #include "ProblemVec.h" #include "ProblemInstat.h" #include "Debug.h" namespace AMDiS { using boost::lexical_cast; using namespace boost::filesystem; using namespace std; inline bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2) { return (*dof1 < *dof2); } MeshDistributor::MeshDistributor(string str) : probStat(0), name(str), feSpace(NULL), mesh(NULL), refineManager(NULL), info(10), partitioner(NULL), nRankDofs(0), nOverallDofs(0), rstart(0), deserialized(false), writeSerializationFile(false), repartitioningAllowed(false), repartitionIthChange(20), nTimestepsAfterLastRepartitioning(0), repartCounter(0), debugOutputDir(""), lastMeshChangeIndex(0) { FUNCNAME("MeshDistributor::ParalleDomainBase()"); mpiRank = MPI::COMM_WORLD.Get_rank(); mpiSize = MPI::COMM_WORLD.Get_size(); mpiComm = MPI::COMM_WORLD; int tmp = 0; GET_PARAMETER(0, name + "->repartitioning", "%d", &tmp); repartitioningAllowed = (tmp > 0); GET_PARAMETER(0, name + "->debug output dir", &debugOutputDir); GET_PARAMETER(0, name + "->repartition ith change", "%d", &repartitionIthChange); tmp = 0; GET_PARAMETER(0, name + "->log main rank", "%d", &tmp); Msg::outputMainRank = (tmp > 0); } void MeshDistributor::initParallelization() { FUNCNAME("MeshDistributor::initParallelization()"); TEST_EXIT(mpiSize > 1) ("Parallelization does not work with only one process!\n"); TEST_EXIT(feSpace)("No FE space has been defined for the mesh distributor!\n"); TEST_EXIT(mesh)("No mesh has been defined for the mesh distributor!\n"); elObjects.setFeSpace(feSpace); // If the problem has been already read from a file, we need only to set // isRankDofs to all matrices and rhs vector and to remove periodic // boundary conditions (if there are some). if (deserialized) { updateMacroElementInfo(); setRankDofs(); removePeriodicBoundaryConditions(); macroElIndexMap.clear(); macroElIndexTypeMap.clear(); map& elementInRank = partitioner->getElementInRank(); for (vector::iterator it = allMacroElements.begin(); it != allMacroElements.end(); ++it) { elementInRank[(*it)->getIndex()] = false; macroElIndexMap[(*it)->getIndex()] = (*it)->getElement(); macroElIndexTypeMap[(*it)->getIndex()] = (*it)->getElType(); } for (deque::iterator it = mesh->getMacroElements().begin(); it != mesh->getMacroElements().end(); ++it) elementInRank[(*it)->getIndex()] = true; return; } // Test, if the mesh is the macro mesh only! Paritioning of the mesh is supported // only for macro meshes, so it will not work yet if the mesh is already refined // in some way. testForMacroMesh(); // For later mesh repartitioning, we need to store some information about the // macro mesh. createMacroElementInfo(); // create an initial partitioning of the mesh partitioner->createPartitionData(); // set the element weights, which are 1 at the very first begin setInitialElementWeights(); // and now partition the mesh partitioner->fillCoarsePartitionVec(&oldPartitionVec); bool partitioningSucceed = partitioner->partition(elemWeights, INITIAL); TEST_EXIT(partitioningSucceed)("Initial partitioning does not work!\n"); partitioner->fillCoarsePartitionVec(&partitionVec); #if (DEBUG != 0) debug::ElementIdxToDofs elMap; debug::createSortedDofs(mesh, elMap); if (mpiRank == 0) { int writePartMesh = 1; GET_PARAMETER(0, "dbg->write part mesh", "%d", &writePartMesh); if (writePartMesh > 0) { debug::writeElementIndexMesh(mesh, debugOutputDir + "elementIndex.vtu"); writePartitioningMesh(debugOutputDir + "part.vtu"); } else { MSG("Skip write part mesh!\n"); } } #endif // === Create interior boundary information. === createInteriorBoundaryInfo(); #if (DEBUG != 0) ParallelDebug::printBoundaryInfo(*this); #endif for (deque::iterator it = mesh->firstMacroElement(); it != mesh->endOfMacroElements(); ++it) { for (int i = 0; i < mesh->getGeo(NEIGH); i++) { if ((*it)->getNeighbour(i) && mesh->isPeriodicAssociation((*it)->getBoundary(i))) { (*it)->getNeighbour(i)->setNeighbour((*it)->getOppVertex(i), NULL); (*it)->setNeighbour(i, NULL); (*it)->setBoundary(i, 0); macroElementNeighbours[(*it)->getIndex()][i] = -1; } } } for (vector::iterator it = allMacroElements.begin(); it != allMacroElements.end(); ++it) { for (int i = 0; i < mesh->getGeo(NEIGH); i++) { if ((*it)->getNeighbour(i) && mesh->isPeriodicAssociation((*it)->getBoundary(i))) { (*it)->getNeighbour(i)->setNeighbour((*it)->getOppVertex(i), NULL); (*it)->setNeighbour(i, NULL); (*it)->setBoundary(i, 0); macroElementNeighbours[(*it)->getIndex()][i] = -1; } } } // === Remove all macro elements that are not part of the rank partition. === removeMacroElements(); // === Create new global and local DOF numbering. === // We have to remove the VertexVectors, which contain periodic assoiciations, // because they are not valid anymore after some macro elements have been removed // and the corresponding DOFs were deleted. for (map::iterator it = mesh->getPeriodicAssociations().begin(); it != mesh->getPeriodicAssociations().end(); ++it) const_cast(mesh->getDofAdmin(0)).removeDOFContainer(dynamic_cast(it->second)); updateLocalGlobalNumbering(); // === If in debug mode, make some tests. === #if (DEBUG != 0) MSG("AMDiS runs in debug mode, so make some test ...\n"); ParallelDebug::testAllElements(*this); debug::testSortedDofs(mesh, elMap); ParallelDebug::testInteriorBoundary(*this); ParallelDebug::testCommonDofs(*this, true); ParallelDebug::testGlobalIndexByCoords(*this); debug::writeMesh(feSpace, -1, debugOutputDir + "macro_mesh"); MSG("Debug mode tests finished!\n"); #endif // === Create periodic dof mapping, if there are periodic boundaries. === createPeriodicMap(); // === Global refinements. === int globalRefinement = 0; GET_PARAMETER(0, mesh->getName() + "->global refinements", "%d", &globalRefinement); if (globalRefinement > 0) { refineManager->globalRefine(mesh, globalRefinement); updateLocalGlobalNumbering(); // === Update periodic mapping, if there are periodic boundaries. === createPeriodicMap(); } /// === Set DOF rank information to all matrices and vectors. === setRankDofs(); // === Remove periodic boundary conditions in sequential problem definition. === removePeriodicBoundaryConditions(); } void MeshDistributor::addProblemStat(ProblemVec *probVec) { FUNCNAME("MeshDistributor::addProblemVec()"); if (feSpace != NULL) { vector feSpaces = probVec->getFeSpaces(); for (unsigned int i = 0; i < feSpaces.size(); i++) { TEST_EXIT(feSpace == feSpaces[i]) ("Parallelizaton is not supported for multiple FE spaces!\n"); } } else { feSpace = probVec->getFeSpace(0); mesh = feSpace->getMesh(); info = probVec->getInfo(); TEST_EXIT(mesh->getNumberOfDOFAdmin() == 1) ("Only meshes with one DOFAdmin are supported!\n"); TEST_EXIT(mesh->getDofAdmin(0).getNumberOfPreDofs(VERTEX) == 0) ("Wrong pre dof number for DOFAdmin!\n"); switch (mesh->getDim()) { case 2: refineManager = new RefinementManager2d(); break; case 3: refineManager = new RefinementManager3d(); break; default: ERROR_EXIT("This should not happen for dim = %d!\n", mesh->getDim()); } partitioner = new ParMetisPartitioner(mesh, &mpiComm); } // Create parallel serialization file writer, if needed. int writeSerialization = 0; GET_PARAMETER(0, probVec->getName() + "->output->write serialization", "%d", &writeSerialization); if (writeSerialization && !writeSerializationFile) { string filename = ""; GET_PARAMETER(0, name + "->output->serialization filename", &filename); TEST_EXIT(filename != "") ("No filename defined for parallel serialization file!\n"); int tsModulo = -1; GET_PARAMETER(0, probVec->getName() + "->output->write every i-th timestep", "%d", &tsModulo); probVec->getFileWriterList().push_back(new Serializer(this, filename, tsModulo)); writeSerializationFile = true; } int readSerialization = 0; GET_PARAMETER(0, probVec->getName() + "->input->read serialization", "%d", &readSerialization); if (readSerialization) { string filename = ""; GET_PARAMETER(0, probVec->getName() + "->input->serialization filename", &filename); filename += ".p" + lexical_cast(mpiRank); MSG("Start deserialization with %s\n", filename.c_str()); ifstream in(filename.c_str()); TEST_EXIT(!in.fail())("Could not open deserialization file: %s\n", filename.c_str()); probVec->deserialize(in); in.close(); MSG("Deserialization from file: %s\n", filename.c_str()); filename = ""; GET_PARAMETER(0, name + "->input->serialization filename", &filename); TEST_EXIT(filename != "") ("No filename defined for parallel deserialization file!\n"); string rankFilename = filename + ".p" + lexical_cast(mpiRank); in.open(rankFilename.c_str()); TEST_EXIT(!in.fail())("Could not open parallel deserialization file: %s\n", filename.c_str()); deserialize(in); in.close(); MSG("Deserializtion of mesh distributor from file: %s\n", rankFilename.c_str()); deserialized = true; } probStat.push_back(probVec); } void MeshDistributor::exitParallelization() {} void MeshDistributor::testForMacroMesh() { FUNCNAME("MeshDistributor::testForMacroMesh()"); int nMacroElements = 0; TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { TEST_EXIT(elInfo->getLevel() == 0) ("Mesh is already refined! This does not work with parallelization!\n"); TEST_EXIT(elInfo->getType() == 0) ("Only macro elements with level 0 are supported!\n"); nMacroElements++; elInfo = stack.traverseNext(elInfo); } TEST_EXIT(nMacroElements >= mpiSize) ("The mesh has less macro elements than number of mpi processes!\n"); } void MeshDistributor::synchVector(DOFVector &vec) { StdMpi > stdMpi(mpiComm); for (RankToDofContainer::iterator sendIt = sendDofs.begin(); sendIt != sendDofs.end(); ++sendIt) { vector dofs; int nSendDofs = sendIt->second.size(); dofs.reserve(nSendDofs); for (int i = 0; i < nSendDofs; i++) dofs.push_back(vec[*((sendIt->second)[i])]); stdMpi.send(sendIt->first, dofs); } for (RankToDofContainer::iterator recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) stdMpi.recv(recvIt->first, recvIt->second.size()); stdMpi.startCommunication(MPI_DOUBLE); for (RankToDofContainer::iterator recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) for (unsigned int i = 0; i < recvIt->second.size(); i++) vec[*(recvIt->second)[i]] = stdMpi.getRecvData(recvIt->first)[i]; } void MeshDistributor::synchVector(SystemVector &vec) { int nComponents = vec.getSize(); StdMpi > stdMpi(mpiComm); for (RankToDofContainer::iterator sendIt = sendDofs.begin(); sendIt != sendDofs.end(); ++sendIt) { vector dofs; int nSendDofs = sendIt->second.size(); dofs.reserve(nComponents * nSendDofs); for (int i = 0; i < nComponents; i++) { DOFVector *dofvec = vec.getDOFVector(i); for (int j = 0; j < nSendDofs; j++) dofs.push_back((*dofvec)[*((sendIt->second)[j])]); } stdMpi.send(sendIt->first, dofs); } for (RankToDofContainer::iterator recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) stdMpi.recv(recvIt->first, recvIt->second.size() * nComponents); stdMpi.startCommunication(MPI_DOUBLE); for (RankToDofContainer::iterator recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) { int nRecvDofs = recvIt->second.size(); int counter = 0; for (int i = 0; i < nComponents; i++) { DOFVector *dofvec = vec.getDOFVector(i); for (int j = 0; j < nRecvDofs; j++) (*dofvec)[*(recvIt->second)[j]] = stdMpi.getRecvData(recvIt->first)[counter++]; } } } void MeshDistributor::setRankDofs() { for (unsigned int i = 0; i < probStat.size(); i++) { int nComponents = probStat[i]->getNumComponents(); for (int j = 0; j < nComponents; j++) { for (int k = 0; k < nComponents; k++) if (probStat[i]->getSystemMatrix(j, k)) probStat[i]->getSystemMatrix(j, k)->setRankDofs(isRankDof); TEST_EXIT_DBG(probStat[i]->getRhs()->getDOFVector(j))("No RHS vector!\n"); TEST_EXIT_DBG(probStat[i]->getSolution()->getDOFVector(j))("No solution vector!\n"); probStat[i]->getRhs()->getDOFVector(j)->setRankDofs(isRankDof); probStat[i]->getSolution()->getDOFVector(j)->setRankDofs(isRankDof); } } } void MeshDistributor::removePeriodicBoundaryConditions() { FUNCNAME("MeshDistributor::removePeriodicBoundaryConditions()"); // Remove periodic boundaries in boundary manager on matrices and vectors. for (unsigned int i = 0; i < probStat.size(); i++) { int nComponents = probStat[i]->getNumComponents(); for (int j = 0; j < nComponents; j++) { for (int k = 0; k < nComponents; k++) { DOFMatrix* mat = probStat[i]->getSystemMatrix(j, k); if (mat && mat->getBoundaryManager()) removePeriodicBoundaryConditions(const_cast(mat->getBoundaryManager()->getBoundaryConditionMap())); } if (probStat[i]->getSolution()->getDOFVector(j)->getBoundaryManager()) removePeriodicBoundaryConditions(const_cast(probStat[i]->getSolution()->getDOFVector(j)->getBoundaryManager()->getBoundaryConditionMap())); if (probStat[i]->getRhs()->getDOFVector(j)->getBoundaryManager()) removePeriodicBoundaryConditions(const_cast(probStat[i]->getRhs()->getDOFVector(j)->getBoundaryManager()->getBoundaryConditionMap())); } } // Remove periodic boundaries on elements in mesh. TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER); while (elInfo) { elInfo->getElement()->deleteElementData(PERIODIC); elInfo = stack.traverseNext(elInfo); } // Remove periodic vertex associations mesh->getPeriodicAssociations().clear(); } void MeshDistributor::removePeriodicBoundaryConditions(BoundaryIndexMap& boundaryMap) { BoundaryIndexMap::iterator it = boundaryMap.begin(); while (it != boundaryMap.end()) { if (it->second->isPeriodic()) boundaryMap.erase(it++); else ++it; } } void MeshDistributor::checkMeshChange() { FUNCNAME("MeshDistributor::checkMeshChange()"); double first = MPI::Wtime(); // === If mesh has not been changed on all ranks, return. === int recvAllValues = 0; int sendValue = static_cast(mesh->getChangeIndex() != lastMeshChangeIndex); mpiComm.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM); if (recvAllValues == 0) return; // === At least one rank mesh has been changed, so the boundaries must be === // === adapted to the new mesh structure. === do { // To check the interior boundaries, the ownership of the boundaries is not // important. Therefore, we add all boundaries to one boundary container. RankToBoundMap allBound; for (InteriorBoundary::iterator it(myIntBoundary); !it.end(); ++it) if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || (mesh->getDim() == 3 && it->rankObj.subObj == FACE)) allBound[it.getRank()].push_back(*it); for (InteriorBoundary::iterator it(otherIntBoundary); !it.end(); ++it) if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || (mesh->getDim() == 3 && it->rankObj.subObj == FACE)) allBound[it.getRank()].push_back(*it); for (InteriorBoundary::iterator it(periodicBoundary); !it.end(); ++it) { if (it.getRank() == mpiRank) { WARNING("Na, du weisst schon!\n"); } else { if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || (mesh->getDim() == 3 && it->rankObj.subObj == FACE)) allBound[it.getRank()].push_back(*it); } } // === Check the boundaries and adapt mesh if necessary. === #if (DEBUG != 0) MSG("Run checkAndAdaptBoundary ...\n"); #endif bool meshChanged = checkAndAdaptBoundary(allBound); // === Check on all ranks if at least one rank's mesh has changed. === int sendValue = static_cast(!meshChanged); recvAllValues = 0; mpiComm.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM); #if (DEBUG != 0) MSG("Mesh changed on %d ranks!\n", recvAllValues); #endif } while (recvAllValues != 0); #if (DEBUG != 0) debug::writeMesh(feSpace, -1, debugOutputDir + "mesh"); #endif // === Because the mesh has been changed, update the DOF numbering and mappings. === updateLocalGlobalNumbering(); // === Update periodic mapping, if there are periodic boundaries. === createPeriodicMap(); INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n", MPI::Wtime() - first); // === The mesh has changed, so check if it is required to repartition the mesh. === nTimestepsAfterLastRepartitioning++; if (repartitioningAllowed) { if (nTimestepsAfterLastRepartitioning >= repartitionIthChange) { repartitionMesh(); nTimestepsAfterLastRepartitioning = 0; } } } bool MeshDistributor::checkAndAdaptBoundary(RankToBoundMap &allBound) { FUNCNAME("MeshDistributor::checkAndAdaptBoundary()"); // === Create mesh structure codes for all ranks boundary elements. === map sendCodes; for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) { for (vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { MeshStructure elCode; elCode.init(boundIt->rankObj); sendCodes[it->first].push_back(elCode); } } StdMpi stdMpi(mpiComm, true); stdMpi.send(sendCodes); stdMpi.recv(allBound); stdMpi.startCommunication(MPI_UNSIGNED_LONG); // === Compare received mesh structure codes. === bool meshFitTogether = true; for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) { MeshCodeVec &recvCodes = stdMpi.getRecvData()[it->first]; int i = 0; for (vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { MeshStructure elCode; elCode.init(boundIt->rankObj); if (elCode.getCode() != recvCodes[i].getCode()) { TEST_EXIT_DBG(refineManager)("Refinement manager is not set correctly!\n"); bool b = startFitElementToMeshCode(recvCodes[i], boundIt->rankObj.el, boundIt->rankObj.subObj, boundIt->rankObj.ithObj, boundIt->rankObj.elType, boundIt->rankObj.reverseMode); if (b) meshFitTogether = false; } i++; } } return meshFitTogether; } bool MeshDistributor::startFitElementToMeshCode(MeshStructure &code, Element *el, GeoIndex subObj, int ithObj, int elType, bool reverseMode) { FUNCNAME("MeshDistributor::startFitElementToMeshCode()"); TEST_EXIT_DBG(el)("No element given!\n"); // If the code is empty, the element does not matter and the function can // return without chaning the mesh. if (code.empty()) return false; // s0 and s1 are the number of the edge/face in both child of the element, // which contain the edge/face the function has to traverse through. If the // edge/face is not contained in one of the children, s0 or s1 is -1. int s0 = el->getSubObjOfChild(0, subObj, ithObj, elType); int s1 = el->getSubObjOfChild(1, subObj, ithObj, elType); TEST_EXIT_DBG(s0 != -1 || s1 != -1)("This should not happen!\n"); bool meshChanged = false; Flag traverseFlag = Mesh::CALL_EVERY_EL_PREORDER | Mesh::FILL_NEIGH | Mesh::FILL_BOUND; // Test for reverse mode, in which the left and right children of elements // are flipped. if (reverseMode) traverseFlag |= Mesh::CALL_REVERSE_MODE; // === If the edge/face is contained in both children. === if (s0 != -1 && s1 != -1) { // Create traverse stack and traverse within the mesh until the element, // which should be fitted to the mesh structure code, is reached. TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(el->getMesh(), -1, traverseFlag); while (elInfo && elInfo->getElement() != el) elInfo = stack.traverseNext(elInfo); TEST_EXIT_DBG(elInfo->getElement() == el)("This should not happen!\n"); meshChanged = fitElementToMeshCode(code, stack, subObj, ithObj, reverseMode); return meshChanged; } // === The edge/face is contained in only on of the both children. === if (el->isLeaf()) { // If element is leaf and code contains only one leaf element, we are finished. if (code.getNumElements() == 1 && code.isLeafElement()) return false; // Create traverse stack and traverse the mesh to the element. TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(el->getMesh(), -1, traverseFlag); while (elInfo && elInfo->getElement() != el) elInfo = stack.traverseNext(elInfo); TEST_EXIT_DBG(elInfo)("This should not happen!\n"); // Code is not leaf, therefore refine the element. el->setMark(1); refineManager->setMesh(el->getMesh()); refineManager->setStack(&stack); refineManager->refineFunction(elInfo); meshChanged = true; } Element *child0 = el->getFirstChild(); Element *child1 = el->getSecondChild(); if (reverseMode) { swap(s0, s1); swap(child0, child1); } // === We know that the edge/face is contained in only one of the children. === // === Therefore, traverse the mesh to this children and fit this element === // === To the mesh structure code. === TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(el->getMesh(), -1, traverseFlag); if (s0 != -1) { while (elInfo && elInfo->getElement() != child0) elInfo = stack.traverseNext(elInfo); meshChanged |= fitElementToMeshCode(code, stack, subObj, s0, reverseMode); } else { while (elInfo && elInfo->getElement() != child1) elInfo = stack.traverseNext(elInfo); meshChanged |= fitElementToMeshCode(code, stack, subObj, s1, reverseMode); } return meshChanged; } bool MeshDistributor::fitElementToMeshCode(MeshStructure &code, TraverseStack &stack, GeoIndex subObj, int ithObj, bool reverseMode) { FUNCNAME("MeshDistributor::fitElementToMeshCode()"); // === Test if there are more elements in stack to check with the code. === ElInfo *elInfo = stack.getElInfo(); if (!elInfo) return false; // === Test if code contains a leaf element. If this is the case, the === // === current element is finished. Traverse the mesh to the next === // === coarser element. === if (code.isLeafElement()) { int level = elInfo->getLevel(); do { elInfo = stack.traverseNext(elInfo); } while (elInfo && elInfo->getLevel() > level); return false; } bool meshChanged = false; Element *el = elInfo->getElement(); // === If element is leaf (and the code is not), refine the element. === if (el->isLeaf()) { TEST_EXIT_DBG(elInfo->getLevel() < 255)("This should not happen!\n"); el->setMark(1); refineManager->setMesh(el->getMesh()); refineManager->setStack(&stack); refineManager->refineFunction(elInfo); meshChanged = true; } // === Continue fitting the mesh structure code to the children of the === // === current element. === int s0 = el->getSubObjOfChild(0, subObj, ithObj, elInfo->getType()); int s1 = el->getSubObjOfChild(1, subObj, ithObj, elInfo->getType()); Element *child0 = el->getFirstChild(); Element *child1 = el->getSecondChild(); if (reverseMode) { swap(s0, s1); swap(child0, child1); } // === Traverse left child. === if (s0 != -1) { // The edge/face is contained in the left child, therefore fit this // child to the mesh structure code. stack.traverseNext(elInfo); code.nextElement(); meshChanged |= fitElementToMeshCode(code, stack, subObj, s0, reverseMode); elInfo = stack.getElInfo(); } else { // The edge/face is not contained in the left child. Hence we need // to traverse through all subelements of the left child until we get // the second child of the current element. do { elInfo = stack.traverseNext(elInfo); } while (elInfo && elInfo->getElement() != child1); TEST_EXIT_DBG(elInfo != NULL)("This should not happen!\n"); } TEST_EXIT_DBG(elInfo->getElement() == child1) ("Got wrong child with idx = %d! Searched for child idx = %d\n", elInfo->getElement()->getIndex(), child1->getIndex()); // === Traverse right child. === if (s1 != -1) { // The edge/face is contained in the right child, therefore fit this // child to the mesh structure code. code.nextElement(); meshChanged |= fitElementToMeshCode(code, stack, subObj, s1, reverseMode); } else { // The edge/face is not contained in the right child. Hence we need // to traverse through all subelements of the right child until we have // finished traversing the current element with all its subelements. int level = elInfo->getLevel(); do { elInfo = stack.traverseNext(elInfo); } while (elInfo && elInfo->getLevel() > level); } return meshChanged; } void MeshDistributor::serialize(ostream &out, DofContainer &data) { int vecSize = data.size(); SerUtil::serialize(out, vecSize); for (int i = 0; i < vecSize; i++) { int dofIndex = *(data[i]); SerUtil::serialize(out, dofIndex); } } void MeshDistributor::deserialize(istream &in, DofContainer &data, map &dofMap) { FUNCNAME("MeshDistributor::deserialize()"); int vecSize = 0; SerUtil::deserialize(in, vecSize); data.clear(); data.resize(vecSize); for (int i = 0; i < vecSize; i++) { int dofIndex = 0; SerUtil::deserialize(in, dofIndex); TEST_EXIT_DBG(dofMap.count(dofIndex) != 0) ("Dof index could not be deserialized correctly!\n"); data[i] = dofMap[dofIndex]; } } void MeshDistributor::serialize(ostream &out, RankToDofContainer &data) { int mapSize = data.size(); SerUtil::serialize(out, mapSize); for (RankToDofContainer::iterator it = data.begin(); it != data.end(); ++it) { int rank = it->first; SerUtil::serialize(out, rank); serialize(out, it->second); } } void MeshDistributor::deserialize(istream &in, RankToDofContainer &data, map &dofMap) { data.clear(); int mapSize = 0; SerUtil::deserialize(in, mapSize); for (int i = 0; i < mapSize; i++) { int rank = 0; SerUtil::deserialize(in, rank); deserialize(in, data[rank], dofMap); } } void MeshDistributor::setInitialElementWeights() { FUNCNAME("MeshDistributor::setInitialElementWeights()"); elemWeights.clear(); string filename = ""; GET_PARAMETER(0, mesh->getName() + "->macro weights", &filename); if (filename != "") { MSG("Read macro weights from %s\n", filename.c_str()); ifstream infile; infile.open(filename.c_str(), ifstream::in); while (!infile.eof()) { int elNum, elWeight; infile >> elNum; if (infile.eof()) break; infile >> elWeight; elemWeights[elNum] = elWeight; } infile.close(); } else { TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { elemWeights[elInfo->getElement()->getIndex()] = 1.0; elInfo = stack.traverseNext(elInfo); } } } void MeshDistributor::repartitionMesh() { FUNCNAME("MeshDistributor::repartitionMesh()"); TEST_EXIT(mesh->getNumberOfDOFAdmin() == 1) ("Only meshes with one DOFAdmin are supported!\n"); int nDofs = mesh->getDofAdmin(0).getUsedDofs(); int repartitioning = 0; vector nDofsInRank(mpiSize); mpiComm.Gather(&nDofs, 1, MPI_INT, &(nDofsInRank[0]), 1, MPI_INT, 0); if (mpiRank == 0) { int nOverallDofs = 0; int minDofs = numeric_limits::max(); int maxDofs = numeric_limits::min(); for (int i = 0; i < mpiSize; i++) { nOverallDofs += nDofsInRank[i]; minDofs = std::min(minDofs, nDofsInRank[i]); maxDofs = std::max(maxDofs, nDofsInRank[i]); } MSG("Overall DOFs: %d Min DOFs: %d Max DOFs: %d\n", nOverallDofs, minDofs, maxDofs); if (static_cast(maxDofs) / static_cast(minDofs)) repartitioning = 1; mpiComm.Bcast(&repartitioning, 1, MPI_INT, 0); } else { mpiComm.Bcast(&repartitioning, 1, MPI_INT, 0); } if (repartitioning == 0) return; double timePoint = MPI::Wtime(); #if (DEBUG != 0) ParallelDebug::testDoubleDofs(mesh); if (repartCounter == 0) { stringstream oss; oss << debugOutputDir << "partitioning-" << repartCounter << ".vtu"; MSG("Write partitioning to %s\n", oss.str().c_str()); DOFVector tmpa(feSpace, "tmp"); tmpa.set(mpiRank); VtkWriter::writeFile(&tmpa, oss.str()); repartCounter++; } #endif elemWeights.clear(); TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { elemWeights[elInfo->getMacroElement()->getIndex()]++; elInfo = stack.traverseNext(elInfo); } partitioner->useLocalGlobalDofMap(&mapLocalGlobalDofs); bool partitioningSucceed = partitioner->partition(elemWeights, ADAPTIVE_REPART, 1000.0); if (!partitioningSucceed) { MSG("ParMETIS created empty partition!\n"); return; } oldPartitionVec = partitionVec; // === Create map that maps macro element indices to pointers to the === // === macro elements. === map elIndexMap; for (vector::iterator it = allMacroElements.begin(); it != allMacroElements.end(); ++it) elIndexMap[(*it)->getIndex()] = *it; // === Create set of all new macro elements this rank will receive from === // === other ranks. === std::set newMacroEl; for (map >::iterator it = partitioner->getRecvElements().begin(); it != partitioner->getRecvElements().end(); ++it) { for (vector::iterator elIt = it->second.begin(); elIt != it->second.end(); ++elIt) { TEST_EXIT_DBG(elIndexMap.count(*elIt) == 1) ("Could not find macro element %d\n", *elIt); newMacroEl.insert(elIndexMap[*elIt]); } } std::set allMacroEl; for (std::set::iterator it = newMacroEl.begin(); it != newMacroEl.end(); ++it) allMacroEl.insert(*it); for (deque::iterator it = mesh->firstMacroElement(); it != mesh->endOfMacroElements(); ++it) allMacroEl.insert(*it); // === Add new macro elements to mesh. === for (std::set::iterator it = newMacroEl.begin(); it != newMacroEl.end(); ++it) { MacroElement *mel = *it; // First, reset all neighbour relations. The correct neighbours will be set later. for (int i = 0; i < mesh->getGeo(NEIGH); i++) mel->setNeighbour(i, NULL); // Create new DOFs for the macro element. for (int i = 0; i < mesh->getGeo(VERTEX); i++) mel->getElement()->setDof(i, mesh->getDof(VERTEX)); // The macro element now has valid DOF pointers. mel->getElement()->setDofValid(true); // Push the macro element to all macro elements in mesh. mesh->getMacroElements().push_back(mel); } // === Send and receive mesh structure codes. === map sendCodes; map > > sendValues; for (map >::iterator it = partitioner->getSendElements().begin(); it != partitioner->getSendElements().end(); ++it) { for (vector::iterator elIt = it->second.begin(); elIt != it->second.end(); ++elIt) { MeshStructure elCode; elCode.init(mesh, *elIt); sendCodes[it->first].push_back(elCode); for (unsigned int i = 0; i < testVec.size(); i++) { vector valVec; elCode.getMeshStructureValues(mesh, *elIt, testVec[i], valVec); sendValues[it->first].push_back(valVec); } } } StdMpi stdMpi(mpiComm, true); stdMpi.send(sendCodes); stdMpi.recv(partitioner->getRecvElements()); stdMpi.startCommunication(MPI_UNSIGNED_LONG); StdMpi > > stdMpi2(mpiComm, true); stdMpi2.send(sendValues); stdMpi2.recv(partitioner->getRecvElements()); stdMpi2.startCommunication(MPI_DOUBLE); // === Adapte the new macro elements due to the received mesh structure codes. === for (map >::iterator it = partitioner->getRecvElements().begin(); it != partitioner->getRecvElements().end(); ++it) { MeshCodeVec &recvCodes = stdMpi.getRecvData()[it->first]; vector > &recvValues = stdMpi2.getRecvData()[it->first]; int i = 0; int j = 0; TEST_EXIT_DBG(recvCodes.size() == it->second.size()) ("Should not happen!\n"); for (vector::iterator elIt = it->second.begin(); elIt != it->second.end(); ++elIt) { recvCodes[i].fitMeshToStructure(mesh, refineManager, false, *elIt); for (unsigned int k = 0; k < testVec.size(); k++) recvCodes[i].setMeshStructureValues(mesh, *elIt, testVec[k], recvValues[j++]); i++; } } // === Set correct neighbour information on macro elements. === for (std::set::iterator it = allMacroEl.begin(); it != allMacroEl.end(); ++it) { int elIndex = (*it)->getIndex(); for (int i = 0; i < mesh->getGeo(NEIGH); i++) { TEST_EXIT_DBG(macroElementNeighbours.count(elIndex) == 1) ("Should not happen!\n"); TEST_EXIT_DBG(static_cast(macroElementNeighbours[elIndex].size()) == mesh->getGeo(NEIGH)) ("Should not happen!\n"); int neighIndex = macroElementNeighbours[elIndex][i]; if (neighIndex == -1 || partitioner->getElementInRank()[neighIndex] == false) { (*it)->setNeighbour(i, NULL); } else { TEST_EXIT_DBG(elIndexMap.count(neighIndex) == 1)("Should not happen!\n"); (*it)->setNeighbour(i, elIndexMap[neighIndex]); } } } // === Delete macros from mesh. === std::set deleteMacroElements; for (map >::iterator it = partitioner->getSendElements().begin(); it != partitioner->getSendElements().end(); ++it) { for (vector::iterator elIt = it->second.begin(); elIt != it->second.end(); ++elIt) { TEST_EXIT_DBG(elIndexMap.count(*elIt) == 1) ("Could not find macro element %d\n", *elIt); deleteMacroElements.insert(elIndexMap[*elIt]); } } // Note that also if there are no macros to be deleted, this function will // update the number of elements, vertices, etc. of the mesh. mesh->removeMacroElements(deleteMacroElements, feSpace); // === Remove double DOFs. === MeshManipulation meshManipulation(feSpace); meshManipulation.deleteDoubleDofs(newMacroEl, elObjects); mesh->dofCompress(); #if (DEBUG != 0) stringstream oss; oss << debugOutputDir << "partitioning-" << repartCounter << ".vtu"; DOFVector tmpa(feSpace, "tmp"); tmpa.set(mpiRank); VtkWriter::writeFile(&tmpa, oss.str()); repartCounter++; ParallelDebug::testAllElements(*this); ParallelDebug::testDoubleDofs(mesh); #endif partitioner->fillCoarsePartitionVec(&partitionVec); updateInteriorBoundaryInfo(); #if (DEBUG != 0) ParallelDebug::printBoundaryInfo(*this); #endif updateLocalGlobalNumbering(); #if (DEBUG != 0) MSG("AMDiS runs in debug mode, so make some test ...\n"); ParallelDebug::testAllElements(*this); ParallelDebug::testInteriorBoundary(*this); ParallelDebug::printBoundaryInfo(*this); MSG("Debug mode tests finished!\n"); #endif MSG("Mesh repartitioning needed %.5f seconds\n", MPI::Wtime() - timePoint); } void MeshDistributor::createInteriorBoundaryInfo() { FUNCNAME("MeshDistributor::createInteriorBoundaryInfo()"); createMeshElementData(); createBoundaryData(); } void MeshDistributor::updateInteriorBoundaryInfo() { FUNCNAME("MeshDistributor::updateInteriorBoundaryInfo()"); elObjects.createRankData(partitionVec); createBoundaryData(); } void MeshDistributor::createMeshElementData() { FUNCNAME("MeshDistributor::createMeshElementData()"); // === Fills macro element data structures. === TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL | Mesh::FILL_NEIGH | Mesh::FILL_BOUND); while (elInfo) { TEST_EXIT_DBG(elInfo->getLevel() == 0)("Should not happen!\n"); Element *el = elInfo->getElement(); macroElIndexMap[el->getIndex()] = el; macroElIndexTypeMap[el->getIndex()] = elInfo->getType(); // === Add all sub object of the element to the variable elObjects. === elObjects.addElement(elInfo); elInfo = stack.traverseNext(elInfo); } // === Create periodic data, if there are periodic boundary conditions. === elObjects.createPeriodicData(); // === Create mesh element data for this rank. === elObjects.createRankData(partitionVec); } void MeshDistributor::createBoundaryData() { FUNCNAME("MeshDistributor::createBoundaryData()"); // === Clear all relevant data structures. === myIntBoundary.clear(); otherIntBoundary.clear(); periodicBoundary.clear(); // === Create interior boundary data structure. === for (int geoPos = 0; geoPos < mesh->getDim(); geoPos++) { GeoIndex geoIndex = INDEX_OF_DIM(geoPos, mesh->getDim()); while (elObjects.iterate(geoIndex)) { map& objData = elObjects.getIterateData(); if (objData.count(mpiRank) && objData.size() > 1) { int owner = elObjects.getIterateOwner(); ElementObjectData& rankBoundEl = objData[mpiRank]; TEST_EXIT_DBG(macroElIndexMap[rankBoundEl.elIndex])("Should not happen!\n"); AtomicBoundary bound; bound.rankObj.el = macroElIndexMap[rankBoundEl.elIndex]; bound.rankObj.elIndex = rankBoundEl.elIndex; bound.rankObj.elType = macroElIndexTypeMap[rankBoundEl.elIndex]; bound.rankObj.subObj = geoIndex; bound.rankObj.ithObj = rankBoundEl.ithObject; if (geoIndex == FACE) { for (int edgeNo = 0; edgeNo < 3; edgeNo++) { int edgeOfFace = bound.rankObj.el->getEdgeOfFace(bound.rankObj.ithObj, edgeNo); bound.rankObj.excludedSubstructures.push_back(make_pair(EDGE, edgeOfFace)); } } if (owner == mpiRank) { for (map::iterator it2 = objData.begin(); it2 != objData.end(); ++it2) { if (it2->first == mpiRank) continue; bound.neighObj.el = macroElIndexMap[it2->second.elIndex]; bound.neighObj.elIndex = it2->second.elIndex; bound.neighObj.elType = macroElIndexTypeMap[it2->second.elIndex]; bound.neighObj.subObj = geoIndex; bound.neighObj.ithObj = it2->second.ithObject; bound.type = INTERIOR; AtomicBoundary& b = myIntBoundary.getNewAtomic(it2->first); b = bound; b.rankObj.setReverseMode(b.neighObj, feSpace, bound.type); } } else { TEST_EXIT_DBG(objData.count(owner) == 1) ("Should not happen!\n"); ElementObjectData& ownerBoundEl = objData[owner]; bound.neighObj.el = macroElIndexMap[ownerBoundEl.elIndex]; bound.neighObj.elIndex = ownerBoundEl.elIndex; bound.neighObj.elType = -1; bound.neighObj.subObj = geoIndex; bound.neighObj.ithObj = ownerBoundEl.ithObject; bound.type = INTERIOR; AtomicBoundary& b = otherIntBoundary.getNewAtomic(owner); b = bound; b.neighObj.setReverseMode(b.rankObj, feSpace, bound.type); } } } } // === Create periodic boundary data structure. === for (PerBoundMap::iterator it = elObjects.getPeriodicVertices().begin(); it != elObjects.getPeriodicVertices().end(); ++it) { if (elObjects.isInRank(it->first.first, mpiRank) == false) continue; WorldVector c0, c1; mesh->getDofIndexCoords(it->first.first, feSpace, c0); mesh->getDofIndexCoords(it->first.second, feSpace, c1); // MSG("CREATE BOUNDARY FOR DOF MAP: %d (%.3f %.3f %.3f)<-> %d (%.3f %.3f %.3f)\n", it->first.first, // c0[0], c0[1], c0[2], it->first.second, c1[0], c1[1], c1[2]); ElementObjectData& perDofEl0 = elObjects.getElementsInRank(it->first.first)[mpiRank]; // MSG("DATA: %d %d %d\n", perDofEl0.elIndex, VERTEX, perDofEl0.ithObject); for (map::iterator elIt = elObjects.getElementsInRank(it->first.second).begin(); elIt != elObjects.getElementsInRank(it->first.second).end(); ++elIt) { int otherElementRank = elIt->first; ElementObjectData& perDofEl1 = elIt->second; AtomicBoundary bound; bound.rankObj.el = macroElIndexMap[perDofEl0.elIndex]; bound.rankObj.elIndex = perDofEl0.elIndex; bound.rankObj.elType = macroElIndexTypeMap[perDofEl0.elIndex]; bound.rankObj.subObj = VERTEX; bound.rankObj.ithObj = perDofEl0.ithObject; bound.neighObj.el = macroElIndexMap[perDofEl1.elIndex]; bound.neighObj.elIndex = perDofEl1.elIndex; bound.neighObj.elType = macroElIndexTypeMap[perDofEl1.elIndex]; bound.neighObj.subObj = VERTEX; bound.neighObj.ithObj = perDofEl1.ithObject; bound.type = it->second; AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank); b = bound; } } for (PerBoundMap::iterator it = elObjects.getPeriodicEdges().begin(); it != elObjects.getPeriodicEdges().end(); ++it) { if (elObjects.isInRank(it->first.first, mpiRank) == false) continue; ElementObjectData& perEdgeEl0 = elObjects.getElementsInRank(it->first.first)[mpiRank]; for (map::iterator elIt = elObjects.getElementsInRank(it->first.second).begin(); elIt != elObjects.getElementsInRank(it->first.second).end(); ++elIt) { int otherElementRank = elIt->first; ElementObjectData& perEdgeEl1 = elIt->second; AtomicBoundary bound; bound.rankObj.el = macroElIndexMap[perEdgeEl0.elIndex]; bound.rankObj.elIndex = perEdgeEl0.elIndex; bound.rankObj.elType = macroElIndexTypeMap[perEdgeEl0.elIndex]; bound.rankObj.subObj = EDGE; bound.rankObj.ithObj = perEdgeEl0.ithObject; bound.neighObj.el = macroElIndexMap[perEdgeEl1.elIndex]; bound.neighObj.elIndex = perEdgeEl1.elIndex; bound.neighObj.elType = macroElIndexTypeMap[perEdgeEl1.elIndex]; bound.neighObj.subObj = EDGE; bound.neighObj.ithObj = perEdgeEl1.ithObject; bound.type = it->second; AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank); b = bound; if (mpiRank > otherElementRank) b.rankObj.setReverseMode(b.neighObj, feSpace, bound.type); else b.neighObj.setReverseMode(b.rankObj, feSpace, bound.type); } } for (PerBoundMap::iterator it = elObjects.getPeriodicFaces().begin(); it != elObjects.getPeriodicFaces().end(); ++it) { if (elObjects.isInRank(it->first.first, mpiRank) == false) continue; TEST_EXIT_DBG(elObjects.getElements(it->first.first).size() == 1) ("Should not happen!\n"); TEST_EXIT_DBG(elObjects.getElements(it->first.second).size() == 1) ("Should not happen!\n"); ElementObjectData& perEdgeEl0 = elObjects.getElementsInRank(it->first.first)[mpiRank]; for (map::iterator elIt = elObjects.getElementsInRank(it->first.second).begin(); elIt != elObjects.getElementsInRank(it->first.second).end(); ++elIt) { int otherElementRank = elIt->first; ElementObjectData& perEdgeEl1 = elIt->second; AtomicBoundary bound; bound.rankObj.el = macroElIndexMap[perEdgeEl0.elIndex]; bound.rankObj.elIndex = perEdgeEl0.elIndex; bound.rankObj.elType = macroElIndexTypeMap[perEdgeEl0.elIndex]; bound.rankObj.subObj = FACE; bound.rankObj.ithObj = perEdgeEl0.ithObject; bound.neighObj.el = macroElIndexMap[perEdgeEl1.elIndex]; bound.neighObj.elIndex = perEdgeEl1.elIndex; bound.neighObj.elType = macroElIndexTypeMap[perEdgeEl1.elIndex]; bound.neighObj.subObj = FACE; bound.neighObj.ithObj = perEdgeEl1.ithObject; bound.type = it->second; AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank); b = bound; if (mpiRank > otherElementRank) b.rankObj.setReverseMode(b.neighObj, feSpace, bound.type); else b.neighObj.setReverseMode(b.rankObj, feSpace, bound.type); } } // === Once we have this information, we must care about the order of the atomic === // === bounds in the three boundary handling object. Eventually all the bound- === // === aries have to be in the same order on both ranks that share the bounday. === StdMpi > stdMpi(mpiComm); stdMpi.send(myIntBoundary.boundary); stdMpi.recv(otherIntBoundary.boundary); stdMpi.startCommunication(MPI_INT); // === The information about all neighbouring boundaries has been received. So === // === the rank tests if its own atomic boundaries are in the same order. If === // === not, the atomic boundaries are swaped to the correct order. === for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin(); rankIt != otherIntBoundary.boundary.end(); ++rankIt) { // === We have received from rank "rankIt->first" the ordered list of element === // === indices. Now, we have to sort the corresponding list in this rank to === // === get the same order. === for (unsigned int j = 0; j < rankIt->second.size(); j++) { // If the expected object is not at place, search for it. BoundaryObject &recvedBound = stdMpi.getRecvData()[rankIt->first][j].rankObj; if ((rankIt->second)[j].neighObj != recvedBound) { unsigned int k = j + 1; for (; k < rankIt->second.size(); k++) if ((rankIt->second)[k].neighObj == recvedBound) break; // The element must always be found, because the list is just in another order. TEST_EXIT_DBG(k < rankIt->second.size())("Should never happen!\n"); // Swap the current with the found element. AtomicBoundary tmpBound = (rankIt->second)[k]; (rankIt->second)[k] = (rankIt->second)[j]; (rankIt->second)[j] = tmpBound; } } } // === Do the same for the periodic boundaries. === if (periodicBoundary.boundary.size() > 0) { stdMpi.clear(); InteriorBoundary sendBounds, recvBounds; for (RankToBoundMap::iterator rankIt = periodicBoundary.boundary.begin(); rankIt != periodicBoundary.boundary.end(); ++rankIt) { if (rankIt->first == mpiRank) continue; if (rankIt->first < mpiRank) sendBounds.boundary[rankIt->first] = rankIt->second; else recvBounds.boundary[rankIt->first] = rankIt->second; } stdMpi.send(sendBounds.boundary); stdMpi.recv(recvBounds.boundary); stdMpi.startCommunication(MPI_INT); /* for (RankToBoundMap::iterator rankIt = periodicBoundary.boundary.begin(); rankIt != periodicBoundary.boundary.end(); ++rankIt) { for (unsigned int j = 0; j < rankIt->second.size(); j++) { MSG("%-ith boundary with rank %d\n", j, rankIt->first); MSG(" bound: el = %d subobj = %d ithobj = %d\n", periodicBoundary.boundary[rankIt->first][j].rankObj.elIndex, periodicBoundary.boundary[rankIt->first][j].rankObj.subObj, periodicBoundary.boundary[rankIt->first][j].rankObj.ithObj); MSG(" neigh: el = %d subobj = %d ithobj = %d\n", periodicBoundary.boundary[rankIt->first][j].neighObj.elIndex, periodicBoundary.boundary[rankIt->first][j].neighObj.subObj, periodicBoundary.boundary[rankIt->first][j].neighObj.ithObj); } } */ for (RankToBoundMap::iterator rankIt = periodicBoundary.boundary.begin(); rankIt != periodicBoundary.boundary.end(); ++rankIt) { if (rankIt->first <= mpiRank) continue; for (unsigned int j = 0; j < rankIt->second.size(); j++) { BoundaryObject &recvRankObj = stdMpi.getRecvData()[rankIt->first][j].rankObj; BoundaryObject &recvNeighObj = stdMpi.getRecvData()[rankIt->first][j].neighObj; if (periodicBoundary.boundary[rankIt->first][j].neighObj != recvRankObj || periodicBoundary.boundary[rankIt->first][j].rankObj != recvNeighObj) { unsigned int k = j + 1; for (; k < rankIt->second.size(); k++) if (periodicBoundary.boundary[rankIt->first][k].neighObj == recvRankObj && periodicBoundary.boundary[rankIt->first][k].rankObj == recvNeighObj) break; // The element must always be found, because the list is just in // another order. TEST_EXIT_DBG(k < rankIt->second.size())("Should never happen!\n"); // Swap the current with the found element. AtomicBoundary tmpBound = (rankIt->second)[k]; (rankIt->second)[k] = (rankIt->second)[j]; (rankIt->second)[j] = tmpBound; } } } } // periodicBoundary.boundary.size() > 0 } void MeshDistributor::removeMacroElements() { FUNCNAME("MeshDistributor::removeMacroElements()"); std::set macrosToRemove; for (deque::iterator it = mesh->firstMacroElement(); it != mesh->endOfMacroElements(); ++it) if (partitioner->getElementInRank()[(*it)->getIndex()] == false) macrosToRemove.insert(*it); mesh->removeMacroElements(macrosToRemove, feSpace); } void MeshDistributor::updateLocalGlobalNumbering() { FUNCNAME("MeshDistributor::updateLocalGlobalNumbering()"); mesh->dofCompress(); #if (DEBUG != 0) debug::ElementIdxToDofs elMap; debug::createSortedDofs(mesh, elMap); #endif typedef std::set DofSet; // === Get all DOFs in ranks partition. === ElementDofIterator elDofIt(feSpace); DofSet rankDofSet; TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { elDofIt.reset(elInfo->getElement()); do { rankDofSet.insert(elDofIt.getDofPtr()); } while(elDofIt.next()); elInfo = stack.traverseNext(elInfo); } DofContainer rankDofs; for (DofSet::iterator it = rankDofSet.begin(); it != rankDofSet.end(); ++it) rankDofs.push_back(*it); sort(rankDofs.begin(), rankDofs.end(), cmpDofsByValue); int nRankAllDofs = rankDofs.size(); // === Traverse on interior boundaries and move all not ranked owned DOFs from === // === rankDofs to boundaryDofs. === sendDofs.clear(); recvDofs.clear(); for (InteriorBoundary::iterator it(myIntBoundary); !it.end(); ++it) { DofContainer dofs; it->rankObj.el->getVertexDofs(feSpace, it->rankObj, dofs); it->rankObj.el->getNonVertexDofs(feSpace, it->rankObj, dofs); for (unsigned int i = 0; i < dofs.size(); i++) sendDofs[it.getRank()].push_back(dofs[i]); } for (InteriorBoundary::iterator it(otherIntBoundary); !it.end(); ++it) { DofContainer dofs; it->rankObj.el->getVertexDofs(feSpace, it->rankObj, dofs); it->rankObj.el->getNonVertexDofs(feSpace, it->rankObj, dofs); for (unsigned int i = 0; i < dofs.size(); i++) { DofContainer::iterator eraseIt = find(rankDofs.begin(), rankDofs.end(), dofs[i]); if (eraseIt != rankDofs.end()) rankDofs.erase(eraseIt); recvDofs[it.getRank()].push_back(dofs[i]); } } nRankDofs = rankDofs.size(); // === Get starting position for global rank dof ordering. ==== mpiComm.Scan(&nRankDofs, &rstart, 1, MPI_INT, MPI_SUM); rstart -= nRankDofs; // === Calculate number of overall DOFs of all partitions. === nOverallDofs = 0; mpiComm.Allreduce(&nRankDofs, &nOverallDofs, 1, MPI_INT, MPI_SUM); // First, we set all dofs in ranks partition to be owend by the rank. Later, // the dofs in ranks partition that are owned by other rank are set to false. isRankDof.clear(); for (int i = 0; i < nRankAllDofs; i++) isRankDof[i] = true; // Stores for all rank owned dofs a new global index. DofIndexMap rankDofsNewGlobalIndex; for (int i = 0; i < nRankDofs; i++) rankDofsNewGlobalIndex[rankDofs[i]] = i + rstart; // === Send new DOF indices. === #if (DEBUG != 0) ParallelDebug::testDofContainerCommunication(*this, sendDofs, recvDofs); #endif int i = 0; StdMpi > stdMpi(mpiComm, false); for (RankToDofContainer::iterator sendIt = sendDofs.begin(); sendIt != sendDofs.end(); ++sendIt, i++) { stdMpi.getSendData(sendIt->first).resize(0); stdMpi.getSendData(sendIt->first).reserve(sendIt->second.size()); for (DofContainer::iterator dofIt = sendIt->second.begin(); dofIt != sendIt->second.end(); ++dofIt) stdMpi.getSendData(sendIt->first).push_back(rankDofsNewGlobalIndex[*dofIt]); } stdMpi.updateSendDataSize(); stdMpi.recv(recvDofs); stdMpi.startCommunication(MPI_INT); for (RankToDofContainer::iterator recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) { int j = 0; for (DofContainer::iterator dofIt = recvIt->second.begin(); dofIt != recvIt->second.end(); ++dofIt) { rankDofsNewGlobalIndex[*dofIt] = stdMpi.getRecvData(recvIt->first)[j++]; isRankDof[**dofIt] = false; } } // === Create now the local to global index and local to dof index mappings. === mapLocalGlobalDofs.clear(); mapLocalDofIndex.clear(); for (DofIndexMap::iterator dofIt = rankDofsNewGlobalIndex.begin(); dofIt != rankDofsNewGlobalIndex.end(); ++dofIt) mapLocalGlobalDofs[*(dofIt->first)] = dofIt->second; for (int i = 0; i < nRankDofs; i++) mapLocalDofIndex[i] = *(rankDofs[i]); // === Update dof admins due to new number of dofs. === lastMeshChangeIndex = mesh->getChangeIndex(); #if (DEBUG != 0) stringstream oss; oss << debugOutputDir << "elementIndex-" << mpiRank << ".vtu"; debug::writeElementIndexMesh(mesh, oss.str()); debug::testSortedDofs(mesh, elMap); ParallelDebug::testCommonDofs(*this, true); ParallelDebug::testGlobalIndexByCoords(*this); ParallelDebug::writeDebugFile(*this, debugOutputDir + "mpi-dbg", "dat"); MSG("------------- Debug information -------------\n"); MSG("nRankDofs = %d\n", nRankDofs); MSG("nOverallDofs = %d\n", nOverallDofs); MSG("rstart %d\n", rstart); #endif } void MeshDistributor::createPeriodicMap() { FUNCNAME("MeshDistributor::createPeriodicMap()"); if (periodicBoundary.boundary.size() == 0) return; // Clear all periodic dof mappings calculated before. We do it from scratch. periodicDof.clear(); periodicDofAssociations.clear(); StdMpi > stdMpi(mpiComm, false); // === Each rank traverse its periodic boundaries and sends the DOF indices === // === to the rank "on the other side" of the periodic boundary. === RankToDofContainer rankPeriodicDofs; map > rankToDofType; for (RankToBoundMap::iterator it = periodicBoundary.boundary.begin(); it != periodicBoundary.boundary.end(); ++it) { if (it->first == mpiRank) { TEST_EXIT_DBG(it->second.size() % 2 == 0)("Should not happen!\n"); for (unsigned int i = 0; i < it->second.size(); i++) { AtomicBoundary &bound = it->second[i]; DofContainer dofs0, dofs1; bound.rankObj.el->getVertexDofs(feSpace, bound.rankObj, dofs0); bound.rankObj.el->getNonVertexDofs(feSpace, bound.rankObj, dofs0); bound.neighObj.el->getVertexDofs(feSpace, bound.neighObj, dofs1); bound.neighObj.el->getNonVertexDofs(feSpace, bound.neighObj, dofs1); TEST_EXIT_DBG(dofs0.size() == dofs1.size())("Should not happen!\n"); BoundaryType type = bound.type; for (unsigned int j = 0; j < dofs0.size(); j++) { DegreeOfFreedom globalDof0 = mapLocalGlobalDofs[*(dofs0[j])]; DegreeOfFreedom globalDof1 = mapLocalGlobalDofs[*(dofs1[j])]; if (periodicDofAssociations[globalDof0].count(type) == 0) { periodicDof[type][globalDof0] = globalDof1; periodicDofAssociations[globalDof0].insert(type); } } } } else { // Create DOF indices on the boundary. DofContainer& dofs = rankPeriodicDofs[it->first]; for (vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { int nDofs = dofs.size(); boundIt->rankObj.el->getVertexDofs(feSpace, boundIt->rankObj, dofs); boundIt->rankObj.el->getNonVertexDofs(feSpace, boundIt->rankObj, dofs); for (unsigned int i = 0; i < (dofs.size() - nDofs); i++) rankToDofType[it->first].push_back(boundIt->type); } // Send the global indices to the rank on the other side. stdMpi.getSendData(it->first).reserve(dofs.size()); for (unsigned int i = 0; i < dofs.size(); i++) stdMpi.getSendData(it->first).push_back(mapLocalGlobalDofs[*(dofs[i])]); // Receive from this rank the same number of dofs. stdMpi.recv(it->first, dofs.size()); } } stdMpi.updateSendDataSize(); stdMpi.startCommunication(MPI_INT); // === The rank has received the dofs from the rank on the other side of === // === the boundary. Now it can use them to create the mapping between === // === the periodic dofs in this rank and the corresponding periodic === // === dofs from the other ranks. === for (RankToBoundMap::iterator it = periodicBoundary.boundary.begin(); it != periodicBoundary.boundary.end(); ++it) { DofContainer& dofs = rankPeriodicDofs[it->first]; vector& types = rankToDofType[it->first]; TEST_EXIT_DBG(dofs.size() == types.size())("Should not happen!\n"); // Added the received dofs to the mapping. for (unsigned int i = 0; i < dofs.size(); i++) { int globalDofIndex = mapLocalGlobalDofs[*(dofs[i])]; int mapGlobalDofIndex = stdMpi.getRecvData(it->first)[i]; BoundaryType type = types[i]; // Check if this global dof with the corresponding boundary type was // not added before by another periodic boundary from other rank. if (periodicDofAssociations[globalDofIndex].count(type) == 0) { periodicDof[type][globalDofIndex] = mapGlobalDofIndex; periodicDofAssociations[globalDofIndex].insert(type); } } } StdMpi stdMpi2(mpiComm); for (RankToBoundMap::iterator it = periodicBoundary.boundary.begin(); it != periodicBoundary.boundary.end(); ++it) { if (it->first == mpiRank) continue; PeriodicDofMap perObjMap; for (vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { if (boundIt->type >= -3) continue; DofContainer dofs; boundIt->rankObj.el->getVertexDofs(feSpace, boundIt->rankObj, dofs); boundIt->rankObj.el->getNonVertexDofs(feSpace, boundIt->rankObj, dofs); for (unsigned int i = 0; i < dofs.size(); i++) { DegreeOfFreedom globalDof = mapLocalGlobalDofs[*dofs[i]]; for (std::set::iterator perAscIt = periodicDofAssociations[globalDof].begin(); perAscIt != periodicDofAssociations[globalDof].end(); ++perAscIt) if (*perAscIt >= -3) { TEST_EXIT_DBG(periodicDof[*perAscIt].count(globalDof) == 1) ("Should not happen!\n"); perObjMap[*perAscIt][globalDof] = periodicDof[*perAscIt][globalDof]; } } } stdMpi2.send(it->first, perObjMap); stdMpi2.recv(it->first); } stdMpi2.startCommunication(MPI_INT); for (std::map::iterator it = stdMpi2.getRecvData().begin(); it != stdMpi2.getRecvData().end(); ++it) for (PeriodicDofMap::iterator perIt = it->second.begin(); perIt != it->second.end(); ++perIt) for (DofMapping::iterator dofIt = perIt->second.begin(); dofIt != perIt->second.end(); ++dofIt) { TEST_EXIT_DBG(periodicDof[perIt->first].count(dofIt->second) == 0 || periodicDof[perIt->first][dofIt->second] == dofIt->first) ("Should not happen!\n"); periodicDof[perIt->first][dofIt->second] = dofIt->first; } #if (DEBUG != 0) ParallelDebug::testPeriodicBoundary(*this); #endif } void MeshDistributor::createMacroElementInfo() { FUNCNAME("MeshDistributor::createMacroElementInfo()"); for (deque::iterator it = mesh->firstMacroElement(); it != mesh->endOfMacroElements(); ++it) { allMacroElements.push_back(*it); macroElementNeighbours[(*it)->getIndex()].resize(mesh->getGeo(NEIGH)); for (int i = 0; i < mesh->getGeo(NEIGH); i++) macroElementNeighbours[(*it)->getIndex()][i] = (*it)->getNeighbour(i) ? (*it)->getNeighbour(i)->getIndex() : -1; } } void MeshDistributor::updateMacroElementInfo() { deque& meshMacros = mesh->getMacroElements(); for (unsigned int i = 0; i < allMacroElements.size(); i++) { for (deque::iterator it = meshMacros.begin(); it != meshMacros.end(); ++it) { if ((*it)->getIndex() == allMacroElements[i]->getIndex() && *it != allMacroElements[i]) { delete allMacroElements[i]; allMacroElements[i] = *it; } } } } void MeshDistributor::serialize(ostream &out) { FUNCNAME("MeshDistributor::serialize()"); checkMeshChange(); SerUtil::serialize(out, elemWeights); SerUtil::serialize(out, partitionVec); SerUtil::serialize(out, oldPartitionVec); SerUtil::serialize(out, nRankDofs); SerUtil::serialize(out, nOverallDofs); elObjects.serialize(out); myIntBoundary.serialize(out); otherIntBoundary.serialize(out); periodicBoundary.serialize(out); serialize(out, sendDofs); serialize(out, recvDofs); SerUtil::serialize(out, mapLocalGlobalDofs); SerUtil::serialize(out, mapLocalDofIndex); SerUtil::serialize(out, isRankDof); serialize(out, periodicDof); serialize(out, periodicDofAssociations); SerUtil::serialize(out, rstart); SerUtil::serialize(out, macroElementNeighbours); int nSize = allMacroElements.size(); SerUtil::serialize(out, nSize); for (int i = 0; i < nSize; i++) allMacroElements[i]->serialize(out); SerUtil::serialize(out, nTimestepsAfterLastRepartitioning); SerUtil::serialize(out, repartCounter); } void MeshDistributor::deserialize(istream &in) { FUNCNAME("MeshDistributor::deserialize()"); SerUtil::deserialize(in, elemWeights); SerUtil::deserialize(in, partitionVec); SerUtil::deserialize(in, oldPartitionVec); SerUtil::deserialize(in, nRankDofs); SerUtil::deserialize(in, nOverallDofs); // Create two maps: one from from element indices to the corresponding element // pointers, and one map from Dof indices to the corresponding dof pointers. map elIndexMap; map dofMap; ElementDofIterator elDofIter(feSpace); TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER); while (elInfo) { Element *el = elInfo->getElement(); elIndexMap[el->getIndex()] = el; if (el->isLeaf()) { elDofIter.reset(el); do { dofMap[elDofIter.getDof()] = elDofIter.getDofPtr(); } while (elDofIter.next()); } elInfo = stack.traverseNext(elInfo); } elObjects.deserialize(in); myIntBoundary.deserialize(in, elIndexMap); otherIntBoundary.deserialize(in, elIndexMap); periodicBoundary.deserialize(in, elIndexMap); deserialize(in, sendDofs, dofMap); deserialize(in, recvDofs, dofMap); SerUtil::deserialize(in, mapLocalGlobalDofs); SerUtil::deserialize(in, mapLocalDofIndex); SerUtil::deserialize(in, isRankDof); deserialize(in, periodicDof); deserialize(in, periodicDofAssociations); SerUtil::deserialize(in, rstart); SerUtil::deserialize(in, macroElementNeighbours); int nSize = 0; SerUtil::deserialize(in, nSize); allMacroElements.resize(nSize); for (int i = 0; i < nSize; i++) { // We do not need the neighbour indices, but must still read them. vector indices; allMacroElements[i] = new MacroElement(mesh->getDim()); allMacroElements[i]->writeNeighboursTo(&indices); allMacroElements[i]->deserialize(in); allMacroElements[i]->getElement()->setMesh(mesh); } SerUtil::deserialize(in, nTimestepsAfterLastRepartitioning); SerUtil::deserialize(in, repartCounter); deserialized = true; } void MeshDistributor::serialize(ostream &out, PeriodicDofMap &data) { int mapSize = data.size(); SerUtil::serialize(out, mapSize); for (PeriodicDofMap::iterator it = data.begin(); it != data.end(); ++it) { int type = it->first; DofMapping dofMap = it->second; SerUtil::serialize(out, type); SerUtil::serialize(out, dofMap); } } void MeshDistributor::serialize(ostream &out, map >& data) { int mapSize = data.size(); SerUtil::serialize(out, mapSize); for (map >::iterator it = data.begin(); it != data.end(); ++it) { int dof = it->first; std::set typeSet = it->second; SerUtil::serialize(out, dof); SerUtil::serialize(out, typeSet); } } void MeshDistributor::deserialize(istream &in, PeriodicDofMap &data) { data.clear(); int mapSize = 0; SerUtil::deserialize(in, mapSize); for (int i = 0; i < mapSize; i++) { int type; DofMapping dofMap; SerUtil::deserialize(in, type); SerUtil::deserialize(in, dofMap); data[type] = dofMap; } } void MeshDistributor::deserialize(istream &in, map >& data) { data.clear(); int mapSize = 0; SerUtil::deserialize(in, mapSize); for (int i = 0; i < mapSize; i++) { int dof; std::set typeSet; SerUtil::deserialize(in, dof); SerUtil::deserialize(in, typeSet); data[dof] = typeSet; } } void MeshDistributor::writePartitioningMesh(string filename) { FUNCNAME("MeshDistributor::writePartitioningMesh()"); map vec; TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL | Mesh::FILL_COORDS); while (elInfo) { int index = elInfo->getElement()->getIndex(); vec[index] = partitionVec[index]; elInfo = stack.traverseNext(elInfo); } ElementFileWriter::writeFile(vec, mesh, filename); } }