// // Software License for AMDiS // // Copyright (c) 2010 Dresden University of Technology // All rights reserved. // Authors: Simon Vey, Thomas Witkowski et al. // // This file is part of AMDiS // // See also license.opensource.txt in the distribution. #include #include #include #include #include #include #include #include "parallel/MeshDistributor.h" #include "parallel/MeshManipulation.h" #include "parallel/ParallelDebug.h" #include "parallel/StdMpi.h" #include "parallel/MeshPartitioner.h" #include "parallel/ParMetisPartitioner.h" #include "parallel/ZoltanPartitioner.h" #include "parallel/SimplePartitioner.h" #include "parallel/CheckerPartitioner.h" #include "parallel/MpiHelper.h" #include "parallel/DofComm.h" #include "io/ElementFileWriter.h" #include "io/MacroInfo.h" #include "io/MacroWriter.h" #include "io/VtkWriter.h" #include "Mesh.h" #include "Traverse.h" #include "ElInfo.h" #include "Element.h" #include "MacroElement.h" #include "DOFMatrix.h" #include "DOFVector.h" #include "SystemVector.h" #include "ElementDofIterator.h" #include "ProblemStatBase.h" #include "StandardProblemIteration.h" #include "VertexVector.h" #include "MeshStructure.h" #include "ProblemStat.h" #include "ProblemInstat.h" #include "RefinementManager3d.h" #include "Debug.h" namespace AMDiS { using boost::lexical_cast; using namespace boost::filesystem; using namespace std; MeshDistributor* MeshDistributor::globalMeshDistributor = NULL; const Flag MeshDistributor::BOUNDARY_SUBOBJ_SORTED = 0X01L; const Flag MeshDistributor::BOUNDARY_FILL_INFO_SEND_DOFS = 0X02L; const Flag MeshDistributor::BOUNDARY_FILL_INFO_RECV_DOFS = 0X04L; inline bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2) { return (*dof1 < *dof2); } MeshDistributor::MeshDistributor() : problemStat(0), initialized(false), name("parallel"), feSpaces(0), mesh(NULL), refineManager(NULL), info(10), partitioner(NULL), deserialized(false), writeSerializationFile(false), repartitioningAllowed(false), repartitionIthChange(20), nMeshChangesAfterLastRepartitioning(0), repartitioningCounter(0), debugOutputDir(""), lastMeshChangeIndex(0), createBoundaryDofFlag(0), sebastianMode(false), boundaryDofInfo(1) { FUNCNAME("MeshDistributor::ParalleDomainBase()"); mpiComm = MPI::COMM_WORLD; mpiRank = mpiComm.Get_rank(); mpiSize = mpiComm.Get_size(); Parameters::get(name + "->repartitioning", repartitioningAllowed); Parameters::get(name + "->debug output dir", debugOutputDir); Parameters::get(name + "->repartition ith change", repartitionIthChange); Parameters::get(name + "->log main rank", Msg::outputMainRank); string partStr = "parmetis"; Parameters::get(name + "->partitioner", partStr); if (partStr == "parmetis") partitioner = new ParMetisPartitioner(&mpiComm); if (partStr == "zoltan") { #ifdef HAVE_ZOLTAN partitioner = new ZoltanPartitioner(&mpiComm); #else ERROR_EXIT("AMDiS was compiled without Zoltan support. Therefore you cannot make use of it!\n"); #endif } if (partStr == "checker") partitioner = new CheckerPartitioner(&mpiComm); if (partStr == "simple") partitioner = new SimplePartitioner(&mpiComm); int tmp = 0; Parameters::get(name + "->box partitioning", tmp); partitioner->setBoxPartitioning(static_cast(tmp)); TEST_EXIT(partitioner)("Could not create partitioner \"%s\"!\n", partStr.c_str()); } void MeshDistributor::initParallelization() { FUNCNAME("MeshDistributor::initParallelization()"); if (initialized) return; TEST_EXIT(mpiSize > 1) ("Parallelization does not work with only one process!\n"); TEST_EXIT(feSpaces.size() > 0) ("No FE space has been defined for the mesh distributor!\n"); TEST_EXIT(mesh)("No mesh has been defined for the mesh distributor!\n"); // === Sort FE spaces with respect to the degree of the basis === // === functions. Use stuiped bubble sort for this. === bool doNext = false; do { doNext = false; for (unsigned int i = 0; i < feSpaces.size() - 1; i++) { if (feSpaces[i]->getBasisFcts()->getDegree() > feSpaces[i + 1]->getBasisFcts()->getDegree()) { const FiniteElemSpace *tmp = feSpaces[i + 1]; feSpaces[i + 1] = feSpaces[i]; feSpaces[i] = tmp; doNext = true; } } } while (doNext); elObjDb.setMesh(feSpaces[0]->getMesh()); // If the problem has been already read from a file, we need only to set // isRankDofs to all matrices and rhs vector and to remove periodic // boundary conditions (if there are some). if (deserialized) { updateMacroElementInfo(); setRankDofs(); removePeriodicBoundaryConditions(); macroElIndexMap.clear(); macroElIndexTypeMap.clear(); for (vector::iterator it = allMacroElements.begin(); it != allMacroElements.end(); ++it) { macroElIndexMap.insert(make_pair((*it)->getIndex(), (*it)->getElement())); macroElIndexTypeMap.insert(make_pair((*it)->getIndex(), (*it)->getElType())); } createBoundaryDofs(); #if (DEBUG != 0) ParallelDebug::writeDebugFile(*this, debugOutputDir + "mpi-dbg", "dat"); #endif initialized = true; return; } // Test, if the mesh is the macro mesh only! Paritioning of the mesh is supported // only for macro meshes, so it will not work yet if the mesh is already refined // in some way. testForMacroMesh(); // For later mesh repartitioning, we need to store some information about the // macro mesh. createMacroElementInfo(); // create an initial partitioning of the mesh partitioner->createInitialPartitioning(); // set the element weights, which are 1 at the very first begin setInitialElementWeights(); // and now partition the mesh bool partitioningSucceed = partitioner->partition(elemWeights, INITIAL); TEST_EXIT(partitioningSucceed)("Initial partitioning does not work!\n"); partitioner->createPartitionMap(partitionMap); #if (DEBUG != 0) debug::ElementIdxToDofs elMap; debug::createSortedDofs(mesh, elMap); #endif if (mpiRank == 0) { #if (DEBUG != 0) int writePartMesh = 1; #else int writePartMesh = 0; #endif Parameters::get("dbg->write part mesh", writePartMesh); if (writePartMesh > 0) { debug::writeElementIndexMesh(mesh, debugOutputDir + "elementIndex.vtu"); ParallelDebug::writePartitioning(*this, debugOutputDir + "part.vtu"); } } // If required, create hierarchical mesh level structure. createMeshLevelStructure(); // Create interior boundary information. createInteriorBoundaryInfo(); #if (DEBUG != 0) ParallelDebug::printBoundaryInfo(*this); #endif // === Remove neighbourhood relations due to periodic bounday conditions. === for (deque::iterator it = mesh->firstMacroElement(); it != mesh->endOfMacroElements(); ++it) { for (int i = 0; i < mesh->getGeo(NEIGH); i++) { if ((*it)->getNeighbour(i) && mesh->isPeriodicAssociation((*it)->getBoundary(i))) { int neighIndex = (*it)->getNeighbour(i)->getIndex(); (*it)->getNeighbour(i)->setNeighbour((*it)->getOppVertex(i), NULL); (*it)->setNeighbour(i, NULL); (*it)->setBoundary(i, 0); macroElementNeighbours[(*it)->getIndex()][i] = -1; macroElementNeighbours[neighIndex][(*it)->getOppVertex(i)] = -1; } } } for (vector::iterator it = allMacroElements.begin(); it != allMacroElements.end(); ++it) { for (int i = 0; i < mesh->getGeo(NEIGH); i++) { if ((*it)->getNeighbour(i) && mesh->isPeriodicAssociation((*it)->getBoundary(i))) { int neighIndex = (*it)->getNeighbour(i)->getIndex(); (*it)->getNeighbour(i)->setNeighbour((*it)->getOppVertex(i), NULL); (*it)->setNeighbour(i, NULL); (*it)->setBoundary(i, 0); macroElementNeighbours[(*it)->getIndex()][i] = -1; macroElementNeighbours[neighIndex][(*it)->getOppVertex(i)] = -1; } } } // === Remove all macro elements that are not part of the rank partition. === removeMacroElements(); // === Create new global and local DOF numbering. === // We have to remove the VertexVectors, which contain periodic assoiciations, // because they are not valid anymore after some macro elements have been // removed and the corresponding DOFs were deleted. for (map::iterator it = mesh->getPeriodicAssociations().begin(); it != mesh->getPeriodicAssociations().end(); ++it) const_cast(mesh->getDofAdmin(0)).removeDOFContainer(dynamic_cast(it->second)); updateLocalGlobalNumbering(); // === In 3D we have to make some test, if the resulting mesh is valid. === // === If it is not valid, there is no possiblity yet to fix this === // === problem, just exit with an error message. === check3dValidMesh(); // === If in debug mode, make some tests. === #if (DEBUG != 0) MSG("AMDiS runs in debug mode, so make some test ...\n"); ParallelDebug::testAllElements(*this); debug::testSortedDofs(mesh, elMap); ParallelDebug::testInteriorBoundary(*this); ParallelDebug::followBoundary(*this); debug::writeMesh(feSpaces[0], -1, debugOutputDir + "macro_mesh"); MSG("Debug mode tests finished!\n"); #endif // Create periodic DOF mapping, if there are periodic boundaries. createPeriodicMap(); // Remove periodic boundary conditions in sequential problem definition. removePeriodicBoundaryConditions(); #if (DEBUG != 0) ParallelDebug::testPeriodicBoundary(*this); #endif // === Global refinements. === int globalRefinement = 0; Parameters::get(mesh->getName() + "->global refinements", globalRefinement); if (globalRefinement > 0) { refineManager->globalRefine(mesh, globalRefinement); updateLocalGlobalNumbering(); // === Update periodic mapping, if there are periodic boundaries. === createPeriodicMap(); #if (DEBUG != 0) ParallelDebug::testPeriodicBoundary(*this); #endif } // Set DOF rank information to all matrices and vectors. setRankDofs(); initialized = true; } void MeshDistributor::addProblemStat(ProblemStatSeq *probStat) { FUNCNAME("MeshDistributor::addProblemStat()"); TEST_EXIT_DBG(probStat->getFeSpaces().size()) ("No FE spaces in stationary problem!\n"); // === Add FE spaces from stationary problem to mesh distributor. === for (unsigned int i = 0; i < probStat->getFeSpaces().size(); i++) { bool foundFeSpace = false; for (unsigned int j = 0; j < feSpaces.size(); j++) { if (feSpaces[j] == probStat->getFeSpaces()[i]) foundFeSpace = true; TEST_EXIT(feSpaces[j]->getMesh() == probStat->getFeSpaces()[i]->getMesh()) ("MeshDistributor does not yet support usage of different meshes!\n"); } if (!foundFeSpace) feSpaces.push_back(probStat->getFeSpaces()[i]); } TEST_EXIT(feSpaces.size() > 0)("Should not happen!\n"); mesh = feSpaces[0]->getMesh(); info = probStat->getInfo(); switch (mesh->getDim()) { case 2: refineManager = new RefinementManager2d(); break; case 3: refineManager = new RefinementManager3d(); break; default: ERROR_EXIT("This should not happen for dim = %d!\n", mesh->getDim()); } partitioner->setMesh(mesh); // Create parallel serialization file writer, if needed. int writeSerialization = 0; Parameters::get(probStat->getName() + "->output->write serialization", writeSerialization); if (writeSerialization && !writeSerializationFile) { string filename = ""; Parameters::get(name + "->output->serialization filename", filename); TEST_EXIT(filename != "") ("No filename defined for parallel serialization file!\n"); int tsModulo = -1; Parameters::get(probStat->getName() + "->output->write every i-th timestep", tsModulo); probStat->getFileWriterList().push_back(new Serializer(this, filename, tsModulo)); writeSerializationFile = true; } int readSerialization = 0; Parameters::get(probStat->getName() + "->input->read serialization", readSerialization); if (readSerialization) { string filename = ""; Parameters::get(probStat->getName() + "->input->serialization filename", filename); filename += ".p" + lexical_cast(mpiRank); MSG("Start deserialization with %s\n", filename.c_str()); ifstream in(filename.c_str()); TEST_EXIT(!in.fail())("Could not open deserialization file: %s\n", filename.c_str()); // Read the revision number of the AMDiS version which was used to create // the serialization file. int revNumber = -1; SerUtil::deserialize(in, revNumber); probStat->deserialize(in); in.close(); MSG("Deserialization from file: %s\n", filename.c_str()); filename = ""; Parameters::get(name + "->input->serialization filename", filename); TEST_EXIT(filename != "") ("No filename defined for parallel deserialization file!\n"); string rankFilename = filename + ".p" + lexical_cast(mpiRank); in.open(rankFilename.c_str()); TEST_EXIT(!in.fail())("Could not open parallel deserialization file: %s\n", filename.c_str()); // Read the revision number of the AMDiS version which was used to create // the serialization file. revNumber = -1; SerUtil::deserialize(in, revNumber); deserialize(in); in.close(); MSG("Deserializtion of mesh distributor from file: %s\n", rankFilename.c_str()); deserialized = true; } problemStat.push_back(probStat); // If the mesh distributor is already initialized, don't forget to set rank // DOFs object to the matrices and vectors of the added stationary problem, // and to remove the periodic boundary conditions on these objects. if (initialized) { setRankDofs(probStat); removePeriodicBoundaryConditions(probStat); } } void MeshDistributor::addProblemStatGlobal(ProblemStatSeq *probStat) { FUNCNAME("MeshDistributor::addProblemStatGlobal()"); if (globalMeshDistributor == NULL) globalMeshDistributor = new MeshDistributor(); globalMeshDistributor->addProblemStat(probStat); } void MeshDistributor::exitParallelization() {} void MeshDistributor::testForMacroMesh() { FUNCNAME("MeshDistributor::testForMacroMesh()"); int nMacroElements = 0; TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { TEST_EXIT(elInfo->getLevel() == 0) ("Mesh is already refined! This does not work with parallelization!\n"); TEST_EXIT(elInfo->getType() == 0) ("Only macro elements with level 0 are supported!\n"); nMacroElements++; elInfo = stack.traverseNext(elInfo); } TEST_EXIT(nMacroElements >= mpiSize) ("The mesh has less macro elements than number of mpi processes!\n"); } void MeshDistributor::synchVector(SystemVector &vec) { FUNCNAME("MeshDistributor::synchVector()"); StdMpi > stdMpi(mpiComm); for (DofComm::Iterator it(sendDofs); !it.end(); it.nextRank()) { vector dofs; for (int i = 0; i < vec.getSize(); i++) { DOFVector &dofVec = *(vec.getDOFVector(i)); for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof()) dofs.push_back(dofVec[it.getDofIndex()]); } stdMpi.send(it.getRank(), dofs); } for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank()) stdMpi.recv(it.getRank()); stdMpi.startCommunication(); for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank()) { int counter = 0; for (int i = 0; i < vec.getSize(); i++) { DOFVector &dofVec = *(vec.getDOFVector(i)); for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof()) dofVec[it.getDofIndex()] = stdMpi.getRecvData(it.getRank())[counter++]; } } } void MeshDistributor::check3dValidMesh() { FUNCNAME("MeshDistributor::check3dValidMesh()"); if (mesh->getDim() != 3) return; std::set allEdges; std::set rankMacroEls; TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, 0, Mesh::CALL_EL_LEVEL); while (elInfo) { for (int i = 0; i < 4; i++) { ElementObjectData elData(elInfo->getElement()->getIndex(), i); allEdges.insert(elObjDb.getEdgeLocalMap(elData)); } rankMacroEls.insert(elInfo->getElement()->getIndex()); elInfo = stack.traverseNext(elInfo); } FixRefinementPatch::connectedEdges.clear(); bool valid3dMesh = true; for (std::set::iterator it = allEdges.begin(); it != allEdges.end(); ++it) { vector& edgeEls = elObjDb.getElements(*it); TEST_EXIT_DBG(edgeEls.size() > 0) ("No edge %d/%d in elObjDB!\n", it->first, it->second); std::set el0, el1; map edgeNoInEl; for (unsigned int i = 0; i < edgeEls.size(); i++) { if (rankMacroEls.count(edgeEls[i].elIndex)) { if (el0.empty()) el0.insert(edgeEls[i].elIndex); else el1.insert(edgeEls[i].elIndex); edgeNoInEl[edgeEls[i].elIndex] = edgeEls[i].ithObject; } } TEST_EXIT_DBG(!el0.empty())("Should not happen!\n"); if (el1.empty()) continue; bool found = false; do { found = false; for (std::set::iterator elIt = el0.begin(); elIt != el0.end(); ++elIt) { for (int i = 0; i < 4; i++) { int neighEl = macroElementNeighbours[*elIt][i]; if (neighEl != -1 && el1.count(neighEl)) { el0.insert(neighEl); el1.erase(neighEl); found = true; } } if (found) break; } } while (found); if (!el1.empty()) { valid3dMesh = false; MSG_DBG("Unvalid 3D mesh with %d (%d - %d) elements on this edge!\n", edgeEls.size(), el0.size(), el1.size()); for (std::set::iterator elIt0 = el0.begin(); elIt0 != el0.end(); ++elIt0) { for (std::set::iterator elIt1 = el1.begin(); elIt1 != el1.end(); ++elIt1) { pair edge0 = make_pair(macroElIndexMap[*elIt0], edgeNoInEl[*elIt0]); pair edge1 = make_pair(macroElIndexMap[*elIt1], edgeNoInEl[*elIt1]); DofEdge dofEdge0 = edge0.first->getEdge(edge0.second); DofEdge dofEdge1 = edge1.first->getEdge(edge1.second); WorldVector c0, c1; mesh->getDofIndexCoords(dofEdge0.first, feSpaces[0], c0); mesh->getDofIndexCoords(dofEdge0.second, feSpaces[0], c1); /* MSG_DBG("FOUND EDGE: %d %d with coords %f %f %f %f %f %f\n", dofEdge0.first, dofEdge0.second, c0[0], c0[1], c0[2], c1[0], c1[1], c1[2]); */ TEST_EXIT_DBG(dofEdge0 == dofEdge1)("Should noth happen!\n"); FixRefinementPatch::connectedEdges.push_back(make_pair(edge0, edge1)); FixRefinementPatch::connectedEdges.push_back(make_pair(edge1, edge0)); } } } } MSG_DBG("Mesh is 3d valid mesh: %d\n", valid3dMesh); } void MeshDistributor::getAllBoundaryDofs(const FiniteElemSpace *feSpace, int level, DofContainer& dofs) { FUNCNAME("MeshDistributor::getAllBoundaryDofs()"); DofContainerSet dofSet; for (DofComm::Iterator it(sendDofs, level, feSpace); !it.end(); it.nextRank()) dofSet.insert(it.getDofs().begin(), it.getDofs().end()); for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank()) dofSet.insert(it.getDofs().begin(), it.getDofs().end()); dofs.clear(); dofs.insert(dofs.begin(), dofSet.begin(), dofSet.end()); } void MeshDistributor::setRankDofs(ProblemStatSeq *probStat) { FUNCNAME("MeshDistributor::setRankDofs()"); int nComponents = probStat->getNumComponents(); for (int i = 0; i < nComponents; i++) { for (int j = 0; j < nComponents; j++) if (probStat->getSystemMatrix(i, j)) { const FiniteElemSpace *rowFeSpace = probStat->getSystemMatrix(i, j)->getRowFeSpace(); TEST_EXIT(find(feSpaces.begin(), feSpaces.end(), rowFeSpace) != feSpaces.end()) ("Should not happen!\n"); probStat->getSystemMatrix(i, j)->setDofMap(dofMap[rowFeSpace]); } TEST_EXIT_DBG(probStat->getRhs()->getDOFVector(i))("No RHS vector!\n"); TEST_EXIT_DBG(probStat->getSolution()->getDOFVector(i)) ("No solution vector!\n"); TEST_EXIT_DBG(probStat->getRhsVector(i)->getFeSpace() == probStat->getSolution(i)->getFeSpace()) ("Should not happen!\n"); const FiniteElemSpace *feSpace = probStat->getRhsVector(i)->getFeSpace(); TEST_EXIT(find(feSpaces.begin(), feSpaces.end(), feSpace) != feSpaces.end()) ("Should not happen!\n"); probStat->getRhsVector(i)->setDofMap(dofMap[feSpace]); probStat->getSolution(i)->setDofMap(dofMap[feSpace]); } } void MeshDistributor::setRankDofs() { for (unsigned int i = 0; i < problemStat.size(); i++) setRankDofs(problemStat[i]); } void MeshDistributor::removePeriodicBoundaryConditions() { FUNCNAME("MeshDistributor::removePeriodicBoundaryConditions()"); // Remove periodic boundaries in boundary manager on matrices and vectors. for (unsigned int i = 0; i < problemStat.size(); i++) removePeriodicBoundaryConditions(problemStat[i]); // Remove periodic boundaries on elements in mesh. TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER); while (elInfo) { elInfo->getElement()->deleteElementData(PERIODIC); elInfo = stack.traverseNext(elInfo); } // Remove periodic vertex associations mesh->getPeriodicAssociations().clear(); } void MeshDistributor::removePeriodicBoundaryConditions(ProblemStatSeq *probStat) { FUNCNAME("MeshDistributor::removePeriodicBoundaryConditions()"); int nComponents = probStat->getNumComponents(); for (int j = 0; j < nComponents; j++) { for (int k = 0; k < nComponents; k++) { DOFMatrix* mat = probStat->getSystemMatrix(j, k); if (mat && mat->getBoundaryManager()) removePeriodicBoundaryConditions(const_cast(mat->getBoundaryManager()->getBoundaryConditionMap())); } if (probStat->getSolution()->getDOFVector(j)->getBoundaryManager()) removePeriodicBoundaryConditions(const_cast(probStat->getSolution()->getDOFVector(j)->getBoundaryManager()->getBoundaryConditionMap())); if (probStat->getRhs()->getDOFVector(j)->getBoundaryManager()) removePeriodicBoundaryConditions(const_cast(probStat->getRhs()->getDOFVector(j)->getBoundaryManager()->getBoundaryConditionMap())); } } void MeshDistributor::removePeriodicBoundaryConditions(BoundaryIndexMap& boundaryMap) { FUNCNAME("MeshDistributor::removePeriodicBoundaryConditions()"); BoundaryIndexMap::iterator it = boundaryMap.begin(); while (it != boundaryMap.end()) { if (it->second->isPeriodic()) boundaryMap.erase(it++); else ++it; } } void MeshDistributor::createMeshLevelStructure() { FUNCNAME("MeshDistributor::createMeshLevelStructure()"); if (mpiSize != 16) return; std::set neighbours; switch (mpiRank) { case 0: neighbours.insert(1); neighbours.insert(4); neighbours.insert(5); break; case 1: neighbours.insert(0); neighbours.insert(2); neighbours.insert(4); neighbours.insert(5); neighbours.insert(6); break; case 2: neighbours.insert(1); neighbours.insert(3); neighbours.insert(5); neighbours.insert(6); neighbours.insert(7); break; case 3: neighbours.insert(2); neighbours.insert(6); neighbours.insert(7); break; case 4: neighbours.insert(0); neighbours.insert(1); neighbours.insert(5); neighbours.insert(8); neighbours.insert(9); break; case 5: neighbours.insert(0); neighbours.insert(1); neighbours.insert(2); neighbours.insert(4); neighbours.insert(6); neighbours.insert(8); neighbours.insert(9); neighbours.insert(10); break; case 6: neighbours.insert(1); neighbours.insert(2); neighbours.insert(3); neighbours.insert(5); neighbours.insert(7); neighbours.insert(9); neighbours.insert(10); neighbours.insert(11); break; case 7: neighbours.insert(2); neighbours.insert(3); neighbours.insert(6); neighbours.insert(10); neighbours.insert(11); break; case 8: neighbours.insert(4); neighbours.insert(5); neighbours.insert(9); neighbours.insert(12); neighbours.insert(13); break; case 9: neighbours.insert(4); neighbours.insert(5); neighbours.insert(6); neighbours.insert(8); neighbours.insert(10); neighbours.insert(12); neighbours.insert(13); neighbours.insert(14); break; case 10: neighbours.insert(5); neighbours.insert(6); neighbours.insert(7); neighbours.insert(9); neighbours.insert(11); neighbours.insert(13); neighbours.insert(14); neighbours.insert(15); break; case 11: neighbours.insert(6); neighbours.insert(7); neighbours.insert(10); neighbours.insert(14); neighbours.insert(15); break; case 12: neighbours.insert(8); neighbours.insert(9); neighbours.insert(13); break; case 13: neighbours.insert(8); neighbours.insert(9); neighbours.insert(10); neighbours.insert(12); neighbours.insert(14); break; case 14: neighbours.insert(9); neighbours.insert(10); neighbours.insert(11); neighbours.insert(13); neighbours.insert(15); break; case 15: neighbours.insert(10); neighbours.insert(11); neighbours.insert(14); break; } TEST_EXIT(neighbours.size() > 0)("Should not happen!\n"); levelData.init(neighbours); MSG("INIT MESH LEVEL %d\n", levelData.getLevelNumber()); bool multiLevelTest = false; Parameters::get("parallel->multi level test", multiLevelTest); if (multiLevelTest) { switch (mpiRank) { case 0: case 1: case 4: case 5: { std::set m; m.insert(0); m.insert(1); m.insert(4); m.insert(5); levelData.addLevel(m, 0); } break; case 2: case 3: case 6: case 7: { std::set m; m.insert(2); m.insert(3); m.insert(6); m.insert(7); levelData.addLevel(m, 1); } break; case 8: case 9: case 12: case 13: { std::set m; m.insert(8); m.insert(9); m.insert(12); m.insert(13); levelData.addLevel(m, 2); } break; case 10: case 11: case 14: case 15: { std::set m; m.insert(10); m.insert(11); m.insert(14); m.insert(15); levelData.addLevel(m, 3); } break; } } } void MeshDistributor::checkMeshChange(bool tryRepartition) { FUNCNAME("MeshDistributor::checkMeshChange()"); double first = MPI::Wtime(); int skip = 0; Parameters::get("parallel->debug->skip check mesh change", skip); // === If mesh has not been changed on all ranks, return. === int recvAllValues = 0; int sendValue = static_cast(mesh->getChangeIndex() != lastMeshChangeIndex); mpiComm.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM); if (recvAllValues == 0) return; // === At least one rank mesh has been changed, so the boundaries must be === // === adapted to the new mesh structure. === if (skip == 0) do { bool meshChanged = false; // To check the interior boundaries, the ownership of the boundaries is not // important. Therefore, we add all boundaries to one boundary container. RankToBoundMap allBound; for (InteriorBoundary::iterator it(rankIntBoundary); !it.end(); ++it) if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || (mesh->getDim() == 3 && it->rankObj.subObj == FACE)) allBound[it.getRank()].push_back(*it); for (InteriorBoundary::iterator it(otherIntBoundary); !it.end(); ++it) if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || (mesh->getDim() == 3 && it->rankObj.subObj == FACE)) allBound[it.getRank()].push_back(*it); for (InteriorBoundary::iterator it(periodicBoundary); !it.end(); ++it) { if (it.getRank() == mpiRank) { if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || (mesh->getDim() == 3 && it->rankObj.subObj == FACE)) { MeshStructure elCode; elCode.init(it->rankObj); MeshManipulation mm(mesh); meshChanged |= mm.fitElementToMeshCode(elCode, it->neighObj); } } else { if ((mesh->getDim() == 2 && it->rankObj.subObj == EDGE) || (mesh->getDim() == 3 && it->rankObj.subObj == FACE)) allBound[it.getRank()].push_back(*it); } } // === Check the boundaries and adapt mesh if necessary. === MSG_DBG("Run checkAndAdaptBoundary ...\n"); meshChanged |= checkAndAdaptBoundary(allBound); // === Check on all ranks if at least one rank's mesh has changed. === int sendValue = static_cast(meshChanged); recvAllValues = 0; mpiComm.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM); MSG("Mesh changed on %d ranks!\n", recvAllValues); } while (recvAllValues != 0); #if (DEBUG != 0) debug::writeMesh(feSpaces[0], -1, debugOutputDir + "mesh"); #endif // Because the mesh has been changed, update the DOF numbering and mappings. updateLocalGlobalNumbering(); // Update periodic mapping, if there are periodic boundaries. createPeriodicMap(); #if (DEBUG != 0) ParallelDebug::testPeriodicBoundary(*this); #endif // === The mesh has changed, so check if it is required to repartition === // === the mesh. === nMeshChangesAfterLastRepartitioning++; INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n", MPI::Wtime() - first); if (tryRepartition && repartitioningAllowed && nMeshChangesAfterLastRepartitioning >= repartitionIthChange) { repartitionMesh(); nMeshChangesAfterLastRepartitioning = 0; } else { MSG_DBG("Repartitioning not tried because tryRepartitioning = %d repartitioningAllowed = %d nMeshChange = %d repartitionIthChange = %d\n", tryRepartition, repartitioningAllowed, nMeshChangesAfterLastRepartitioning, repartitionIthChange); } // === Print imbalance factor. === printImbalanceFactor(); } void MeshDistributor::printImbalanceFactor() { FUNCNAME("MeshDistributor::printImbalanceFactor()"); vector nDofsInRank(mpiSize); int nDofs = mesh->getDofAdmin(0).getUsedDofs(); mpiComm.Gather(&nDofs, 1, MPI_INT, &(nDofsInRank[0]), 1, MPI_INT, 0); if (mpiRank == 0) { int nOverallDofs = 0; int maxDofs = numeric_limits::min(); int minDofs = numeric_limits::max(); for (int i = 0; i < mpiSize; i++) { nOverallDofs += nDofsInRank[i]; maxDofs = std::max(maxDofs, nDofsInRank[i]); minDofs = std::min(minDofs, nDofsInRank[i]); } // int avrgDofs = nOverallDofs / mpiSize; // double imbalance0 = // (static_cast(maxDofs - avrgDofs) / avrgDofs) * 100.0; double imbalance1 = (static_cast(maxDofs) / minDofs - 1.0) * 100.0; MSG("Imbalancing factor: %.1f\n", imbalance1); } } bool MeshDistributor::checkAndAdaptBoundary(RankToBoundMap &allBound) { FUNCNAME("MeshDistributor::checkAndAdaptBoundary()"); // === Create mesh structure codes for all ranks boundary elements. === map sendCodes; for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) { for (vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { MeshStructure elCode; elCode.init(boundIt->rankObj); sendCodes[it->first].push_back(elCode); } } StdMpi stdMpi(mpiComm, true); stdMpi.send(sendCodes); for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) stdMpi.recv(it->first); stdMpi.startCommunication(); // === Compare received mesh structure codes. === bool meshChanged = false; for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) { MeshCodeVec &recvCodes = stdMpi.getRecvData()[it->first]; int i = 0; for (vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt, i++) { MeshStructure elCode; elCode.init(boundIt->rankObj); #if (DEBUG != 0) ParallelDebug::followBoundary(mesh, *boundIt, elCode); #endif if (elCode.getCode() != recvCodes[i].getCode()) { // MSG("MACRO EL %d %d %d DOES NOT FIT WITH MACRO EL %d %d %d ON RANK %d\n", // boundIt->rankObj.elIndex, boundIt->rankObj.subObj, boundIt->rankObj.ithObj, // boundIt->neighObj.elIndex, boundIt->neighObj.subObj, boundIt->neighObj.ithObj, // it->first); TEST_EXIT_DBG(refineManager)("Refinement manager is not set correctly!\n"); MeshManipulation mm(mesh); meshChanged |= mm.fitElementToMeshCode(recvCodes[i], boundIt->rankObj); } } } return meshChanged; } void MeshDistributor::serialize(ostream &out, DofContainer &data) { int vecSize = data.size(); SerUtil::serialize(out, vecSize); for (int i = 0; i < vecSize; i++) { int dofIndex = *(data[i]); SerUtil::serialize(out, dofIndex); } } void MeshDistributor::serialize(ostream &out, map > &data) { int mapSize = data.size(); SerUtil::serialize(out, mapSize); for (map >::iterator it = data.begin(); it != data.end(); ++it) { int rank = it->first; SerUtil::serialize(out, rank); for (map::iterator dcIt = it->second.begin(); dcIt != it->second.end(); ++dcIt) serialize(out, dcIt->second); } } void MeshDistributor::deserialize(istream &in, DofContainer &data, map &dofIndexMap) { FUNCNAME("MeshDistributor::deserialize()"); int vecSize = 0; SerUtil::deserialize(in, vecSize); data.clear(); data.resize(vecSize); for (int i = 0; i < vecSize; i++) { int dofIndex = 0; SerUtil::deserialize(in, dofIndex); TEST_EXIT_DBG(dofIndexMap.count(dofIndex) != 0) ("Dof index could not be deserialized correctly!\n"); data[i] = dofIndexMap[dofIndex]; } } void MeshDistributor::deserialize(istream &in, map > &data, map > &dofIndexMap) { data.clear(); int mapSize = 0; SerUtil::deserialize(in, mapSize); for (int i = 0; i < mapSize; i++) { int rank = 0; SerUtil::deserialize(in, rank); for (unsigned int j = 0; j < feSpaces.size(); j++) deserialize(in, data[rank][feSpaces[j]], dofIndexMap[feSpaces[j]]); } } void MeshDistributor::setInitialElementWeights() { FUNCNAME("MeshDistributor::setInitialElementWeights()"); elemWeights.clear(); string filename = ""; Parameters::get(mesh->getName() + "->macro weights", filename); if (filename != "") { MSG("Read macro weights from %s\n", filename.c_str()); ifstream infile; infile.open(filename.c_str(), ifstream::in); while (!infile.eof()) { int elNum, elWeight; infile >> elNum; if (infile.eof()) break; infile >> elWeight; elemWeights[elNum] = elWeight; } infile.close(); } else { TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { elemWeights[elInfo->getElement()->getIndex()] = 1.0; elInfo = stack.traverseNext(elInfo); } } } void MeshDistributor::repartitionMesh() { FUNCNAME("MeshDistributor::repartitionMesh()"); // === First we check if the rank with the maximum number of DOFs has at === // === least 20% more DOFs than the rank with the minimum number of DOFs. === // === In this case, the mesh will be repartition. === int repartitioning = 0; vector nDofsInRank(mpiSize); int nDofs = mesh->getDofAdmin(0).getUsedDofs(); mpiComm.Gather(&nDofs, 1, MPI_INT, &(nDofsInRank[0]), 1, MPI_INT, 0); if (mpiRank == 0) { int nOverallDofs = 0; int minDofs = numeric_limits::max(); int maxDofs = numeric_limits::min(); for (int i = 0; i < mpiSize; i++) { nOverallDofs += nDofsInRank[i]; minDofs = std::min(minDofs, nDofsInRank[i]); maxDofs = std::max(maxDofs, nDofsInRank[i]); } MSG("Overall DOFs: %d Min DOFs: %d Max DOFs: %d\n", nOverallDofs, minDofs, maxDofs); if (static_cast(maxDofs) / static_cast(minDofs) > 1.2) repartitioning = 1; mpiComm.Bcast(&repartitioning, 1, MPI_INT, 0); } else { mpiComm.Bcast(&repartitioning, 1, MPI_INT, 0); } if (repartitioning == 0) return; double timePoint = MPI::Wtime(); #if (DEBUG != 0) ParallelDebug::testDoubleDofs(mesh); if (repartitioningCounter == 0) ParallelDebug::writePartitioningFile(debugOutputDir + "partitioning", repartitioningCounter, feSpaces[0]); #endif repartitioningCounter++; // === Create new element weights. === elemWeights.clear(); { TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { elemWeights[elInfo->getMacroElement()->getIndex()]++; elInfo = stack.traverseNext(elInfo); } } // === Run mesh partitioner to calculate a new mesh partitioning. === partitioner->setLocalGlobalDofMap(&(dofMap[feSpaces[0]].getMap(0))); bool partitioningSucceed = partitioner->partition(elemWeights, ADAPTIVE_REPART); if (!partitioningSucceed) { MSG("Mesh partitioner created empty partition!\n"); return; } // In the case the partitioner does not create a new mesh partition, return // without and changes. if (!partitioner->meshChanged()) { MSG("Mesh partition does not create a new partition!\n"); return; } // === Create map that maps macro element indices to pointers to the === // === macro elements. === map elIndexMap; for (vector::iterator it = allMacroElements.begin(); it != allMacroElements.end(); ++it) elIndexMap[(*it)->getIndex()] = *it; // === Create set of all new macro elements this rank will receive from === // === other ranks. === std::set newMacroEl; for (map >::iterator it = partitioner->getRecvElements().begin(); it != partitioner->getRecvElements().end(); ++it) { for (vector::iterator elIt = it->second.begin(); elIt != it->second.end(); ++elIt) { TEST_EXIT_DBG(elIndexMap.count(*elIt) == 1) ("Could not find macro element %d\n", *elIt); newMacroEl.insert(elIndexMap[*elIt]); } } std::set allMacroEl; for (std::set::iterator it = newMacroEl.begin(); it != newMacroEl.end(); ++it) allMacroEl.insert(*it); for (deque::iterator it = mesh->firstMacroElement(); it != mesh->endOfMacroElements(); ++it) allMacroEl.insert(*it); // === Add new macro elements to mesh. === TEST_EXIT(mesh->getGeo(FACE) || mesh->getGeo(CENTER)) ("Mh, dass macht dann doch noch etwas Arbeit für den Thomas!\n"); for (std::set::iterator it = newMacroEl.begin(); it != newMacroEl.end(); ++it) { MacroElement *mel = *it; // First, reset all neighbour relations. The correct neighbours will be // set later. for (int i = 0; i < mesh->getGeo(NEIGH); i++) mel->setNeighbour(i, NULL); // Create new DOFs for the macro element. mel->getElement()->createNewDofPtrs(); for (int i = 0; i < mesh->getGeo(VERTEX); i++) mel->getElement()->setDof(i, mesh->getDof(VERTEX)); for (int i = 0; i < mesh->getGeo(EDGE); i++) mel->getElement()->setDof(mesh->getGeo(VERTEX) + i, mesh->getDof(EDGE)); // Push the macro element to all macro elements in mesh. mesh->getMacroElements().push_back(mel); } // === Send and receive mesh structure codes. === map sendCodes; map > > sendValues; for (map >::iterator it = partitioner->getSendElements().begin(); it != partitioner->getSendElements().end(); ++it) { for (vector::iterator elIt = it->second.begin(); elIt != it->second.end(); ++elIt) { MeshStructure elCode; elCode.init(mesh, *elIt); sendCodes[it->first].push_back(elCode); for (unsigned int i = 0; i < interchangeVectors.size(); i++) { vector valVec; elCode.getMeshStructureValues(*elIt, interchangeVectors[i], valVec); sendValues[it->first].push_back(valVec); } } } StdMpi stdMpi(mpiComm, true); stdMpi.send(sendCodes); for (map >::iterator it = partitioner->getRecvElements().begin(); it != partitioner->getRecvElements().end(); ++it) stdMpi.recv(it->first); stdMpi.startCommunication(); if (interchangeVectors.size() == 0) WARNING("There are no interchange vectors defined!\n"); StdMpi > > stdMpi2(mpiComm, true); stdMpi2.send(sendValues); for (map >::iterator it = partitioner->getRecvElements().begin(); it != partitioner->getRecvElements().end(); ++it) stdMpi2.recv(it->first); stdMpi2.startCommunication(); // === Adapte the new macro elements due to the received mesh === // === structure codes. === for (map >::iterator it = partitioner->getRecvElements().begin(); it != partitioner->getRecvElements().end(); ++it) { MeshCodeVec &recvCodes = stdMpi.getRecvData()[it->first]; int i = 0; TEST_EXIT_DBG(recvCodes.size() == it->second.size()) ("Should not happen!\n"); for (vector::iterator elIt = it->second.begin(); elIt != it->second.end(); ++elIt) recvCodes[i++].fitMeshToStructure(mesh, refineManager, false, *elIt); } // === Set correct neighbour information on macro elements. === for (std::set::iterator it = allMacroEl.begin(); it != allMacroEl.end(); ++it) { int elIndex = (*it)->getIndex(); for (int i = 0; i < mesh->getGeo(NEIGH); i++) { TEST_EXIT_DBG(macroElementNeighbours.count(elIndex) == 1) ("Should not happen!\n"); TEST_EXIT_DBG(static_cast(macroElementNeighbours[elIndex].size()) == mesh->getGeo(NEIGH)) ("Should not happen!\n"); int neighIndex = macroElementNeighbours[elIndex][i]; if (neighIndex == -1 || partitioner->getElementInRank()[neighIndex] == false) { (*it)->setNeighbour(i, NULL); } else { TEST_EXIT_DBG(elIndexMap.count(neighIndex) == 1) ("Should not happen!\n"); (*it)->setNeighbour(i, elIndexMap[neighIndex]); } } } // === Delete macros from mesh. === std::set deleteMacroElements; for (map >::iterator it = partitioner->getSendElements().begin(); it != partitioner->getSendElements().end(); ++it) { for (vector::iterator elIt = it->second.begin(); elIt != it->second.end(); ++elIt) { TEST_EXIT_DBG(elIndexMap.count(*elIt) == 1) ("Could not find macro element %d\n", *elIt); deleteMacroElements.insert(elIndexMap[*elIt]); } } // Note that also if there are no macros to be deleted, this function will // update the number of elements, vertices, etc. of the mesh. mesh->removeMacroElements(deleteMacroElements, feSpaces); // === Remove double DOFs. === MeshManipulation meshManipulation(mesh); meshManipulation.deleteDoubleDofs(feSpaces, newMacroEl, elObjDb); mesh->dofCompress(); partitioner->createPartitionMap(partitionMap); updateInteriorBoundaryInfo(); updateLocalGlobalNumbering(); // === After mesh adaption, set values of interchange vectors. === for (map >::iterator it = partitioner->getRecvElements().begin(); it != partitioner->getRecvElements().end(); ++it) { MeshCodeVec &recvCodes = stdMpi.getRecvData()[it->first]; vector > &recvValues = stdMpi2.getRecvData()[it->first]; int i = 0, j = 0; for (vector::iterator elIt = it->second.begin(); elIt != it->second.end(); ++elIt) { for (unsigned int k = 0; k < interchangeVectors.size(); k++) recvCodes[i].setMeshStructureValues(*elIt, interchangeVectors[k], recvValues[j++]); i++; } } // === Update periodic mapping, if there are periodic boundaries. === createPeriodicMap(); #if (DEBUG != 0) MSG("AMDiS runs in debug mode, so make some test ...\n"); ParallelDebug::writePartitioningFile(debugOutputDir + "partitioning", repartitioningCounter, feSpaces[0]); ParallelDebug::testAllElements(*this); ParallelDebug::testDoubleDofs(mesh); ParallelDebug::testInteriorBoundary(*this); ParallelDebug::testPeriodicBoundary(*this); ParallelDebug::printBoundaryInfo(*this); MSG("Debug mode tests finished!\n"); #endif // === In 3D we have to make some test, if the resulting mesh is valid. === // === If it is not valid, there is no possiblity yet to fix this === // === problem, just exit with an error message. === check3dValidMesh(); MSG("Mesh repartitioning needed %.5f seconds\n", MPI::Wtime() - timePoint); // === Print DOF information to screen. === nDofs = mesh->getDofAdmin(0).getUsedDofs(); mpiComm.Gather(&nDofs, 1, MPI_INT, &(nDofsInRank[0]), 1, MPI_INT, 0); if (mpiRank == 0) { int nOverallDofs = 0; int minDofs = numeric_limits::max(); int maxDofs = numeric_limits::min(); for (int i = 0; i < mpiSize; i++) { nOverallDofs += nDofsInRank[i]; minDofs = std::min(minDofs, nDofsInRank[i]); maxDofs = std::max(maxDofs, nDofsInRank[i]); } MSG("Overall DOFs: %d Min DOFs: %d Max DOFs: %d\n", nOverallDofs, minDofs, maxDofs); } } void MeshDistributor::createInteriorBoundaryInfo() { FUNCNAME("MeshDistributor::createInteriorBoundaryInfo()"); createMeshElementData(); createBoundaryData(); } void MeshDistributor::updateInteriorBoundaryInfo() { FUNCNAME("MeshDistributor::updateInteriorBoundaryInfo()"); elObjDb.createRankData(partitionMap, levelData); createBoundaryData(); #if (DEBUG != 0) ParallelDebug::printBoundaryInfo(*this); #endif } void MeshDistributor::createMeshElementData() { FUNCNAME("MeshDistributor::createMeshElementData()"); // === Fills macro element data structures. === TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL | Mesh::FILL_NEIGH | Mesh::FILL_BOUND); while (elInfo) { TEST_EXIT_DBG(elInfo->getLevel() == 0)("Should not happen!\n"); Element *el = elInfo->getElement(); macroElIndexMap.insert(make_pair(el->getIndex(), el)); macroElIndexTypeMap.insert(make_pair(el->getIndex(), elInfo->getType())); // Add all sub object of the element to the variable elObjDb. elObjDb.addElement(elInfo); elInfo = stack.traverseNext(elInfo); } // Create periodic data, if there are periodic boundary conditions. elObjDb.createPeriodicData(feSpaces[0]); // Create data about the reverse modes of neighbouring elements. elObjDb.createReverseModeData(feSpaces[0], macroElIndexMap, macroElIndexTypeMap); // Create mesh element data for this rank. elObjDb.createRankData(partitionMap, levelData); } void MeshDistributor::createBoundaryData() { FUNCNAME("MeshDistributor::createBoundaryData()"); // === Clear all relevant data structures. === rankIntBoundary.clear(); otherIntBoundary.clear(); periodicBoundary.clear(); // === Create interior boundary data structure. === for (int geoPos = 0; geoPos < mesh->getDim(); geoPos++) { GeoIndex geoIndex = INDEX_OF_DIM(geoPos, mesh->getDim()); while (elObjDb.iterate(geoIndex)) { map& objData = elObjDb.getIterateData(); if (!(objData.count(mpiRank) && objData.size() > 1)) continue; int owner = elObjDb.getIterateOwner(); ElementObjectData& rankBoundEl = objData[mpiRank]; AtomicBoundary bound; bound.maxLevel = elObjDb.getIterateMaxLevel(); bound.rankObj.el = macroElIndexMap[rankBoundEl.elIndex]; bound.rankObj.elIndex = rankBoundEl.elIndex; bound.rankObj.elType = macroElIndexTypeMap[rankBoundEl.elIndex]; bound.rankObj.subObj = geoIndex; bound.rankObj.ithObj = rankBoundEl.ithObject; if (geoIndex == FACE) { for (int edgeNo = 0; edgeNo < 3; edgeNo++) { int edgeOfFace = bound.rankObj.el->getEdgeOfFace(bound.rankObj.ithObj, edgeNo); bound.rankObj.excludedSubstructures.push_back(make_pair(EDGE, edgeOfFace)); } } if (owner == mpiRank) { for (map::iterator it2 = objData.begin(); it2 != objData.end(); ++it2) { if (it2->first == mpiRank) continue; bound.neighObj.el = macroElIndexMap[it2->second.elIndex]; bound.neighObj.elIndex = it2->second.elIndex; bound.neighObj.elType = macroElIndexTypeMap[it2->second.elIndex]; bound.neighObj.subObj = geoIndex; bound.neighObj.ithObj = it2->second.ithObject; bound.type = INTERIOR; AtomicBoundary& b = rankIntBoundary.getNewAtomic(it2->first); b = bound; if (geoIndex == EDGE) b.neighObj.reverseMode = elObjDb.getEdgeReverseMode(rankBoundEl, it2->second); if (geoIndex == FACE) b.neighObj.reverseMode = elObjDb.getFaceReverseMode(rankBoundEl, it2->second); } } else { TEST_EXIT_DBG(objData.count(owner) == 1) ("Should not happen!\n"); ElementObjectData& ownerBoundEl = objData[owner]; bound.neighObj.el = macroElIndexMap[ownerBoundEl.elIndex]; bound.neighObj.elIndex = ownerBoundEl.elIndex; bound.neighObj.elType = -1; bound.neighObj.subObj = geoIndex; bound.neighObj.ithObj = ownerBoundEl.ithObject; bound.type = INTERIOR; AtomicBoundary& b = otherIntBoundary.getNewAtomic(owner); b = bound; if (geoIndex == EDGE) b.rankObj.reverseMode = elObjDb.getEdgeReverseMode(rankBoundEl, ownerBoundEl); if (geoIndex == FACE) b.rankObj.reverseMode = elObjDb.getFaceReverseMode(rankBoundEl, ownerBoundEl); } } } // === Create periodic boundary data structure. === for (PerBoundMap::iterator it = elObjDb.getPeriodicVertices().begin(); it != elObjDb.getPeriodicVertices().end(); ++it) { if (elObjDb.isInRank(it->first.first, mpiRank) == false) continue; ElementObjectData& perDofEl0 = elObjDb.getElementsInRank(it->first.first)[mpiRank]; for (map::iterator elIt = elObjDb.getElementsInRank(it->first.second).begin(); elIt != elObjDb.getElementsInRank(it->first.second).end(); ++elIt) { int otherElementRank = elIt->first; ElementObjectData& perDofEl1 = elIt->second; AtomicBoundary bound; bound.rankObj.el = macroElIndexMap[perDofEl0.elIndex]; bound.rankObj.elIndex = perDofEl0.elIndex; bound.rankObj.elType = macroElIndexTypeMap[perDofEl0.elIndex]; bound.rankObj.subObj = VERTEX; bound.rankObj.ithObj = perDofEl0.ithObject; bound.neighObj.el = macroElIndexMap[perDofEl1.elIndex]; bound.neighObj.elIndex = perDofEl1.elIndex; bound.neighObj.elType = macroElIndexTypeMap[perDofEl1.elIndex]; bound.neighObj.subObj = VERTEX; bound.neighObj.ithObj = perDofEl1.ithObject; bound.type = it->second; if (sebastianMode) { if (bound.rankObj.elIndex == 3 && bound.rankObj.ithObj == 1 || bound.rankObj.elIndex == 78 && bound.rankObj.ithObj == 2) { AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank); b = bound; } } else { AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank); b = bound; } } } for (PerBoundMap::iterator it = elObjDb.getPeriodicEdges().begin(); it != elObjDb.getPeriodicEdges().end(); ++it) { if (elObjDb.isInRank(it->first.first, mpiRank) == false) continue; ElementObjectData& perEdgeEl0 = elObjDb.getElementsInRank(it->first.first)[mpiRank]; for (map::iterator elIt = elObjDb.getElementsInRank(it->first.second).begin(); elIt != elObjDb.getElementsInRank(it->first.second).end(); ++elIt) { int otherElementRank = elIt->first; ElementObjectData& perEdgeEl1 = elIt->second; AtomicBoundary bound; bound.rankObj.el = macroElIndexMap[perEdgeEl0.elIndex]; bound.rankObj.elIndex = perEdgeEl0.elIndex; bound.rankObj.elType = macroElIndexTypeMap[perEdgeEl0.elIndex]; bound.rankObj.subObj = EDGE; bound.rankObj.ithObj = perEdgeEl0.ithObject; bound.neighObj.el = macroElIndexMap[perEdgeEl1.elIndex]; bound.neighObj.elIndex = perEdgeEl1.elIndex; bound.neighObj.elType = macroElIndexTypeMap[perEdgeEl1.elIndex]; bound.neighObj.subObj = EDGE; bound.neighObj.ithObj = perEdgeEl1.ithObject; bound.type = it->second; AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank); b = bound; if (mpiRank > otherElementRank) b.neighObj.reverseMode = elObjDb.getEdgeReverseMode(perEdgeEl0, perEdgeEl1); else b.rankObj.reverseMode = elObjDb.getEdgeReverseMode(perEdgeEl0, perEdgeEl1); } } for (PerBoundMap::iterator it = elObjDb.getPeriodicFaces().begin(); it != elObjDb.getPeriodicFaces().end(); ++it) { if (elObjDb.isInRank(it->first.first, mpiRank) == false) continue; TEST_EXIT_DBG(elObjDb.getElements(it->first.first).size() == 1) ("Should not happen!\n"); TEST_EXIT_DBG(elObjDb.getElements(it->first.second).size() == 1) ("Should not happen!\n"); ElementObjectData& perFaceEl0 = elObjDb.getElementsInRank(it->first.first)[mpiRank]; for (map::iterator elIt = elObjDb.getElementsInRank(it->first.second).begin(); elIt != elObjDb.getElementsInRank(it->first.second).end(); ++elIt) { int otherElementRank = elIt->first; ElementObjectData& perFaceEl1 = elIt->second; AtomicBoundary bound; bound.rankObj.el = macroElIndexMap[perFaceEl0.elIndex]; bound.rankObj.elIndex = perFaceEl0.elIndex; bound.rankObj.elType = macroElIndexTypeMap[perFaceEl0.elIndex]; bound.rankObj.subObj = FACE; bound.rankObj.ithObj = perFaceEl0.ithObject; bound.neighObj.el = macroElIndexMap[perFaceEl1.elIndex]; bound.neighObj.elIndex = perFaceEl1.elIndex; bound.neighObj.elType = macroElIndexTypeMap[perFaceEl1.elIndex]; bound.neighObj.subObj = FACE; bound.neighObj.ithObj = perFaceEl1.ithObject; bound.type = it->second; AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank); b = bound; if (mpiRank > otherElementRank) b.neighObj.reverseMode = elObjDb.getFaceReverseMode(perFaceEl0, perFaceEl1); else b.rankObj.reverseMode = elObjDb.getFaceReverseMode(perFaceEl0, perFaceEl1); } } // === Once we have this information, we must care about the order of the === // === atomic bounds in the three boundary handling object. Eventually === // === all the boundaries have to be in the same order on both ranks that === // === share the bounday. === StdMpi > stdMpi(mpiComm); stdMpi.send(rankIntBoundary.boundary); stdMpi.recv(otherIntBoundary.boundary); stdMpi.startCommunication(); // === The information about all neighbouring boundaries has been === // === received. So the rank tests if its own atomic boundaries are in === // === the same order. If not, the atomic boundaries are swaped to the === // === correct order. === for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin(); rankIt != otherIntBoundary.boundary.end(); ++rankIt) { // === We have received from rank "rankIt->first" the ordered list of === // === element indices. Now, we have to sort the corresponding list in === // === this rank to get the same order. === for (unsigned int j = 0; j < rankIt->second.size(); j++) { // If the expected object is not at place, search for it. BoundaryObject &recvedBound = stdMpi.getRecvData()[rankIt->first][j].rankObj; if ((rankIt->second)[j].neighObj != recvedBound) { unsigned int k = j + 1; for (; k < rankIt->second.size(); k++) if ((rankIt->second)[k].neighObj == recvedBound) break; // The element must always be found, because the list is just in // another order. TEST_EXIT_DBG(k < rankIt->second.size())("Should never happen!\n"); // Swap the current with the found element. AtomicBoundary tmpBound = (rankIt->second)[k]; (rankIt->second)[k] = (rankIt->second)[j]; (rankIt->second)[j] = tmpBound; } } } // === Do the same for the periodic boundaries. === if (periodicBoundary.boundary.size() > 0) { stdMpi.clear(); InteriorBoundary sendBounds, recvBounds; for (RankToBoundMap::iterator rankIt = periodicBoundary.boundary.begin(); rankIt != periodicBoundary.boundary.end(); ++rankIt) { if (rankIt->first == mpiRank) continue; if (rankIt->first < mpiRank) sendBounds.boundary[rankIt->first] = rankIt->second; else recvBounds.boundary[rankIt->first] = rankIt->second; } stdMpi.send(sendBounds.boundary); stdMpi.recv(recvBounds.boundary); stdMpi.startCommunication(); for (RankToBoundMap::iterator rankIt = periodicBoundary.boundary.begin(); rankIt != periodicBoundary.boundary.end(); ++rankIt) { if (rankIt->first <= mpiRank) continue; for (unsigned int j = 0; j < rankIt->second.size(); j++) { BoundaryObject &recvRankObj = stdMpi.getRecvData()[rankIt->first][j].rankObj; BoundaryObject &recvNeighObj = stdMpi.getRecvData()[rankIt->first][j].neighObj; if (periodicBoundary.boundary[rankIt->first][j].neighObj != recvRankObj || periodicBoundary.boundary[rankIt->first][j].rankObj != recvNeighObj) { unsigned int k = j + 1; for (; k < rankIt->second.size(); k++) if (periodicBoundary.boundary[rankIt->first][k].neighObj == recvRankObj && periodicBoundary.boundary[rankIt->first][k].rankObj == recvNeighObj) break; // The element must always be found, because the list is just in // another order. TEST_EXIT_DBG(k < rankIt->second.size())("Should never happen!\n"); // Swap the current with the found element. AtomicBoundary tmpBound = (rankIt->second)[k]; (rankIt->second)[k] = (rankIt->second)[j]; (rankIt->second)[j] = tmpBound; } } } } // periodicBoundary.boundary.size() > 0 } void MeshDistributor::createBoundaryDofs() { FUNCNAME("MeshDistributor::createBoundaryDofs()"); int nLevels = levelData.getLevelNumber(); TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n"); sendDofs.init(nLevels); recvDofs.init(nLevels); boundaryDofInfo.resize(nLevels); for (unsigned int i = 0; i < feSpaces.size(); i++) for (int j = 0; j < nLevels; j++) createBoundaryDofs(feSpaces[i], j); } void MeshDistributor::createBoundaryDofs(const FiniteElemSpace *feSpace, int level) { FUNCNAME("MeshDistributor::createBoundaryDofs()"); if (createBoundaryDofFlag.isSet(BOUNDARY_SUBOBJ_SORTED)) { TEST_EXIT(level < boundaryDofInfo.size())("Should not happen!\n"); // === Clear data. === for (int geo = FACE; geo >= VERTEX; geo--) boundaryDofInfo[level][feSpace].geoDofs[static_cast(geo)].clear(); // === Create send DOFs. === for (int geo = FACE; geo >= VERTEX; geo--) { for (InteriorBoundary::iterator it(rankIntBoundary, level); !it.end(); ++it) { if (it->rankObj.subObj == geo) { DofContainer dofs; it->rankObj.el->getAllDofs(feSpace, it->rankObj, dofs); DofContainer& tmp = sendDofs.getDofContainer(it.getRank(), feSpace, level); tmp.insert(tmp.end(), dofs.begin(), dofs.end()); if (createBoundaryDofFlag.isSet(BOUNDARY_FILL_INFO_SEND_DOFS)) boundaryDofInfo[level][feSpace].geoDofs[static_cast(geo)].insert(dofs.begin(), dofs.end()); } } } // === Create recv DOFs. === for (int geo = FACE; geo >= VERTEX; geo--) { for (InteriorBoundary::iterator it(otherIntBoundary, level); !it.end(); ++it) { if (it->rankObj.subObj == geo) { DofContainer dofs; it->rankObj.el->getAllDofs(feSpace, it->rankObj, dofs); DofContainer& tmp = recvDofs.getDofContainer(it.getRank(), feSpace, level); tmp.insert(tmp.end(), dofs.begin(), dofs.end()); if (createBoundaryDofFlag.isSet(BOUNDARY_FILL_INFO_RECV_DOFS)) boundaryDofInfo[level][feSpace].geoDofs[static_cast(geo)].insert(dofs.begin(), dofs.end()); } } } } else { for (InteriorBoundary::iterator it(rankIntBoundary, level); !it.end(); ++it) it->rankObj.el->getAllDofs(feSpace, it->rankObj, sendDofs.getDofContainer(it.getRank(), feSpace, level)); for (InteriorBoundary::iterator it(otherIntBoundary, level); !it.end(); ++it) it->rankObj.el->getAllDofs(feSpace, it->rankObj, recvDofs.getDofContainer(it.getRank(), feSpace, level)); } // === Delete all empty DOF send and recv positions === sendDofs.removeEmpty(); recvDofs.removeEmpty(); } void MeshDistributor::removeMacroElements() { FUNCNAME("MeshDistributor::removeMacroElements()"); std::set macrosToRemove; for (deque::iterator it = mesh->firstMacroElement(); it != mesh->endOfMacroElements(); ++it) if (partitioner->getElementInRank()[(*it)->getIndex()] == false) macrosToRemove.insert(*it); mesh->removeMacroElements(macrosToRemove, feSpaces); } void MeshDistributor::updateLocalGlobalNumbering() { FUNCNAME("MeshDistributor::updateLocalGlobalNumbering()"); #if (DEBUG != 0) debug::ElementIdxToDofs elMap; debug::createSortedDofs(mesh, elMap); #endif int nLevels = levelData.getLevelNumber(); TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n"); dofMap.init(levelData, feSpaces, feSpaces, true, true); dofMap.setDofComm(sendDofs, recvDofs); dofMap.clear(); createBoundaryDofs(); for (unsigned int i = 0; i < feSpaces.size(); i++) updateLocalGlobalNumbering(feSpaces[i]); dofMap.update(); #if (DEBUG != 0) MSG("------------- Debug information -------------\n"); MSG("| number of levels: %d\n", nLevels); MSG("| number of FE spaces: %d\n", feSpaces.size()); for (int level = 0; level < nLevels; level++) { for (unsigned int i = 0; i < feSpaces.size(); i++) { MSG("| level = %d FE space = %d:\n", level, i); MSG("| nRankDofs = %d\n", dofMap[feSpaces[i]].nRankDofs[level]); MSG("| nOverallDofs = %d\n", dofMap[feSpaces[i]].nOverallDofs[level]); MSG("| rStartDofs = %d\n", dofMap[feSpaces[i]].rStartDofs[level]); } } stringstream oss; oss << debugOutputDir << "elementIndex-" << mpiRank << ".vtu"; debug::writeElementIndexMesh(mesh, oss.str()); ParallelDebug::writeDebugFile(*this, debugOutputDir + "mpi-dbg", "dat"); debug::testSortedDofs(mesh, elMap); ParallelDebug::testCommonDofs(*this, true); ParallelDebug::testGlobalIndexByCoords(*this); #else int tmp = 0; Parameters::get(name + "->write parallel debug file", tmp); if (tmp) ParallelDebug::writeDebugFile(*this, debugOutputDir + "mpi-dbg", "dat"); #endif } void MeshDistributor::updateLocalGlobalNumbering(const FiniteElemSpace *feSpace) { FUNCNAME("MeshDistributor::updateLocalGlobalNumbering()"); mesh->dofCompress(); // === Get all DOFs in ranks partition. === std::set rankDofSet; mesh->getAllDofs(feSpace, rankDofSet); DofContainer rankDofs(rankDofSet.begin(), rankDofSet.end()); sort(rankDofs.begin(), rankDofs.end(), cmpDofsByValue); // === Traverse interior boundaries and get all DOFs on them. === int nLevels = levelData.getLevelNumber(); for (int level = 0; level < nLevels; level++) { DofContainerSet nonRankDofs; for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank()) for (; !it.endDofIter(); it.nextDof()) nonRankDofs.insert(it.getDof()); for (unsigned int i = 0; i < rankDofs.size(); i++) if (nonRankDofs.count(rankDofs[i]) == 0) dofMap[feSpace].insertRankDof(level, *(rankDofs[i])); for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank()) for (; !it.endDofIter(); it.nextDof()) dofMap[feSpace].insertNonRankDof(level, it.getDofIndex()); } // === Update DOF admins due to new number of DOFs. === lastMeshChangeIndex = mesh->getChangeIndex(); #if (DEBUG != 0) ParallelDebug::testDofContainerCommunication(*this, sendDofs.getData(), recvDofs.getData()); #endif } void MeshDistributor::createPeriodicMap() { FUNCNAME("MeshDistributor::createPeriodicMap()"); // Clear all periodic DOF mappings calculated before. We do it from scratch. periodicDofs.init(levelData.getLevelNumber()); periodicMap.clear(); // If there are no periodic boundaries, return. Note that periodicDofs and // periodicMap must be still cleared before: if we do repartitioning and // there were periodic boundaries in subdomain before and after repartitioning // there are no more periodic boundaries. if (periodicBoundary.boundary.size() == 0) return; for (unsigned int i = 0; i < feSpaces.size(); i++) createPeriodicMap(feSpaces[i]); } void MeshDistributor::createPeriodicMap(const FiniteElemSpace *feSpace) { FUNCNAME("MeshDistributor::createPeriodicMap()"); StdMpi > stdMpi(mpiComm, false); // === Each rank traverse its periodic boundaries and sends the DOF === // === indices to the rank "on the other side" of the periodic boundary. === map > rankToDofType; for (RankToBoundMap::iterator it = periodicBoundary.boundary.begin(); it != periodicBoundary.boundary.end(); ++it) { if (it->first == mpiRank) { // Here we have a periodic boundary within rank's subdomain. So we can // compute the periodic mappings without communication. TEST_EXIT_DBG(it->second.size() % 2 == 0)("Should not happen!\n"); for (unsigned int i = 0; i < it->second.size(); i++) { AtomicBoundary &bound = it->second[i]; TEST_EXIT_DBG(bound.rankObj.el)("No rank object!\n"); TEST_EXIT_DBG(bound.neighObj.el)("No neigh object!\n"); DofContainer dofs0, dofs1; bound.rankObj.el->getAllDofs(feSpace, bound.rankObj, dofs0); bound.neighObj.el->getAllDofs(feSpace, bound.neighObj, dofs1); TEST_EXIT_DBG(dofs0.size() == dofs1.size()) ("Number of DOFs does not fit together: %d %d\n", dofs0.size(), dofs1.size()); BoundaryType type = bound.type; for (unsigned int j = 0; j < dofs0.size(); j++) { DegreeOfFreedom globalDof0 = dofMap[feSpace][0][*(dofs0[j])].global; DegreeOfFreedom globalDof1 = dofMap[feSpace][0][*(dofs1[j])].global; if (!periodicMap.isPeriodicOnBound(feSpace, type, globalDof0)) periodicMap.add(feSpace, type, globalDof0, globalDof1); } } } else { // Here we have a periodic boundary between two ranks. // Create DOF indices on the boundary. DofContainer& dofs = periodicDofs.getDofContainer(it->first, feSpace); for (vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { int nDofs = dofs.size(); boundIt->rankObj.el->getAllDofs(feSpace, boundIt->rankObj, dofs); for (unsigned int i = 0; i < (dofs.size() - nDofs); i++) rankToDofType[it->first].push_back(boundIt->type); } // Send the global indices to the rank on the other side. stdMpi.getSendData(it->first).reserve(dofs.size()); for (unsigned int i = 0; i < dofs.size(); i++) stdMpi.getSendData(it->first).push_back(dofMap[feSpace][0][*(dofs[i])].global); // Receive from this rank the same number of dofs. stdMpi.recv(it->first, dofs.size()); } } stdMpi.updateSendDataSize(); stdMpi.startCommunication(); // === The rank has received the DOFs from the rank on the other side of === // === the boundary. Now it can use them to create the mapping between === // === the periodic DOFs in this rank and the corresponding periodic === // === DOFs from the other ranks. === for (RankToBoundMap::iterator it = periodicBoundary.boundary.begin(); it != periodicBoundary.boundary.end(); ++it) { DofContainer& dofs = periodicDofs.getDofContainer(it->first, feSpace); vector& types = rankToDofType[it->first]; TEST_EXIT_DBG(dofs.size() == types.size())("Should not happen!\n"); // Added the received DOFs to the mapping. for (unsigned int i = 0; i < dofs.size(); i++) { int globalDofIndex = dofMap[feSpace][0][*(dofs[i])].global; int mapGlobalDofIndex = stdMpi.getRecvData(it->first)[i]; BoundaryType type = types[i]; // Check if this global DOF with the corresponding boundary type was // not added before by another periodic boundary from other rank. if (!periodicMap.isPeriodicOnBound(feSpace, type, globalDofIndex)) periodicMap.add(feSpace, type, globalDofIndex, mapGlobalDofIndex); } } StdMpi stdMpi2(mpiComm); for (RankToBoundMap::iterator it = periodicBoundary.boundary.begin(); it != periodicBoundary.boundary.end(); ++it) { if (it->first == mpiRank) continue; PeriodicDofMap perObjMap; for (vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { // If the boundary type is a normal periodic BC, continue. We just // operate on indirectly connected periodic boundaries. if (elObjDb.isValidPeriodicType(boundIt->type)) continue; DofContainer dofs; boundIt->rankObj.el->getAllDofs(feSpace, boundIt->rankObj, dofs); for (unsigned int i = 0; i < dofs.size(); i++) { DegreeOfFreedom globalDof = dofMap[feSpace][0][*dofs[i]].global; std::set& assoc = periodicMap.getAssociations(feSpace, globalDof); TEST_EXIT_DBG(assoc.size() > 0)("Should not happen!\n"); for (std::set::iterator perAscIt = assoc.begin(); perAscIt != assoc.end(); ++perAscIt) if (elObjDb.isValidPeriodicType(*perAscIt)) perObjMap[*perAscIt][globalDof] = periodicMap.map(feSpace, *perAscIt, globalDof); } } stdMpi2.send(it->first, perObjMap); stdMpi2.recv(it->first); } stdMpi2.startCommunication(); // NOTE: Before changing the data structure of periodic boundary DOFS, // at this place no periodicDofAssociations are set for the received // DOFs. I'm note sure what is correct here. for (map::iterator it = stdMpi2.getRecvData().begin(); it != stdMpi2.getRecvData().end(); ++it) periodicMap.add(feSpace, it->second); } void MeshDistributor::createMacroElementInfo() { FUNCNAME("MeshDistributor::createMacroElementInfo()"); for (deque::iterator it = mesh->firstMacroElement(); it != mesh->endOfMacroElements(); ++it) { allMacroElements.push_back(*it); int elIndex = (*it)->getIndex(); macroElementNeighbours[elIndex].resize(mesh->getGeo(NEIGH)); for (int i = 0; i < mesh->getGeo(NEIGH); i++) macroElementNeighbours[elIndex][i] = (*it)->getNeighbour(i) ? (*it)->getNeighbour(i)->getIndex() : -1; } } void MeshDistributor::updateMacroElementInfo() { deque& meshMacros = mesh->getMacroElements(); for (unsigned int i = 0; i < allMacroElements.size(); i++) { for (deque::iterator it = meshMacros.begin(); it != meshMacros.end(); ++it) { if ((*it)->getIndex() == allMacroElements[i]->getIndex() && *it != allMacroElements[i]) { delete allMacroElements[i]; allMacroElements[i] = *it; } } } } void MeshDistributor::serialize(ostream &out) { FUNCNAME("MeshDistributor::serialize()"); checkMeshChange(false); partitioner->serialize(out); SerUtil::serialize(out, elemWeights); SerUtil::serialize(out, partitionMap); elObjDb.serialize(out); rankIntBoundary.serialize(out); otherIntBoundary.serialize(out); periodicBoundary.serialize(out); serialize(out, sendDofs.getData()); serialize(out, recvDofs.getData()); // === Serialieze FE space dependent data === dofMap.serialize(out); periodicMap.serialize(out, feSpaces); SerUtil::serialize(out, macroElementNeighbours); int nSize = allMacroElements.size(); SerUtil::serialize(out, nSize); for (int i = 0; i < nSize; i++) allMacroElements[i]->serialize(out); SerUtil::serialize(out, nMeshChangesAfterLastRepartitioning); SerUtil::serialize(out, repartitioningCounter); levelData.serialize(out); } void MeshDistributor::deserialize(istream &in) { FUNCNAME("MeshDistributor::deserialize()"); partitioner->deserialize(in); SerUtil::deserialize(in, elemWeights); SerUtil::deserialize(in, partitionMap); // Create two maps: one from from element indices to the corresponding element // pointers, and one map from Dof indices to the corresponding dof pointers. map elIndexMap; map > dofIndexMap; for (unsigned int i = 0; i < feSpaces.size(); i++) { ElementDofIterator elDofIter(feSpaces[i]); TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER); while (elInfo) { Element *el = elInfo->getElement(); elIndexMap[el->getIndex()] = el; if (el->isLeaf()) { elDofIter.reset(el); do { dofIndexMap[feSpaces[i]][elDofIter.getDof()] = elDofIter.getDofPtr(); } while (elDofIter.next()); } elInfo = stack.traverseNext(elInfo); } } elObjDb.deserialize(in); rankIntBoundary.deserialize(in, elIndexMap); otherIntBoundary.deserialize(in, elIndexMap); periodicBoundary.deserialize(in, elIndexMap); deserialize(in, sendDofs.getData(), dofIndexMap); deserialize(in, recvDofs.getData(), dofIndexMap); // === Deerialieze FE space dependent data === dofMap.deserialize(in); periodicMap.deserialize(in, feSpaces); SerUtil::deserialize(in, macroElementNeighbours); int nSize = 0; SerUtil::deserialize(in, nSize); allMacroElements.resize(nSize); for (int i = 0; i < nSize; i++) { // We do not need the neighbour indices, but must still read them. vector indices; allMacroElements[i] = new MacroElement(mesh->getDim()); allMacroElements[i]->writeNeighboursTo(&indices); allMacroElements[i]->deserialize(in); allMacroElements[i]->getElement()->setMesh(mesh); } SerUtil::deserialize(in, nMeshChangesAfterLastRepartitioning); SerUtil::deserialize(in, repartitioningCounter); levelData.deserialize(in); deserialized = true; } }