// // Software License for AMDiS // // Copyright (c) 2010 Dresden University of Technology // All rights reserved. // Authors: Simon Vey, Thomas Witkowski et al. // // This file is part of AMDiS // // See also license.opensource.txt in the distribution. #include "parallel/ParallelDebug.h" #include "parallel/MeshDistributor.h" #include "parallel/MpiHelper.h" #include "ProblemStat.h" #include "DOFVector.h" #include "FixVec.h" #include "StdMpi.h" #include "Debug.h" #include "io/VtkWriter.h" namespace AMDiS { using namespace std; void ParallelDebug::testInteriorBoundary(MeshDistributor &pdb) { FUNCNAME("ParallelDebug::testInteriorBoundary()"); vector sendBuffers, recvBuffers; MPI::Request request[pdb.intBoundary.own.size() + pdb.intBoundary.other.size() + pdb.intBoundary.periodic.size() * 2]; int requestCounter = 0; // === Send rank's boundary information. === for (RankToBoundMap::iterator rankIt = pdb.intBoundary.own.begin(); rankIt != pdb.intBoundary.own.end(); ++rankIt) { int nSendInt = rankIt->second.size(); int* buffer = new int[nSendInt]; for (int i = 0; i < nSendInt; i++) buffer[i] = (rankIt->second)[i].rankObj.elIndex; sendBuffers.push_back(buffer); request[requestCounter++] = pdb.mpiComm.Isend(buffer, nSendInt, MPI_INT, rankIt->first, 0); } // === Receive information from other ranks about the interior boundaries. ==== for (RankToBoundMap::iterator rankIt = pdb.intBoundary.other.begin(); rankIt != pdb.intBoundary.other.end(); ++rankIt) { int nRecvInt = rankIt->second.size(); int *buffer = new int[nRecvInt]; recvBuffers.push_back(buffer); request[requestCounter++] = pdb.mpiComm.Irecv(buffer, nRecvInt, MPI_INT, rankIt->first, 0); } // === To the last, do the same of periodic boundaries. === for (RankToBoundMap::iterator rankIt = pdb.intBoundary.periodic.begin(); rankIt != pdb.intBoundary.periodic.end(); ++rankIt) { if (rankIt->first == pdb.mpiRank) continue; int nValues = rankIt->second.size(); int* sBuffer = new int[nValues]; for (int i = 0; i < nValues; i++) sBuffer[i] = (rankIt->second)[i].rankObj.elIndex; sendBuffers.push_back(sBuffer); request[requestCounter++] = pdb.mpiComm.Isend(sBuffer, nValues, MPI_INT, rankIt->first, 0); int *rBuffer = new int[nValues]; recvBuffers.push_back(rBuffer); request[requestCounter++] = pdb.mpiComm.Irecv(rBuffer, nValues, MPI_INT, rankIt->first, 0); } // === Finish communication and delete all send buffers. === MPI::Request::Waitall(requestCounter, request); for (int i = 0; i < static_cast(sendBuffers.size()); i++) delete [] sendBuffers[i]; // === Finally, check the results, i.e., the indices of element at the === // === boundaries, if they fit together. First check the interior bounds, === // === and after this the periodic ones. === int bufCounter = 0; for (RankToBoundMap::iterator rankIt = pdb.intBoundary.other.begin(); rankIt != pdb.intBoundary.other.end(); ++rankIt) { TEST_EXIT(rankIt->second.size() == pdb.intBoundary.other[rankIt->first].size()) ("Boundaries does not fit together!\n"); for (unsigned int i = 0; i < rankIt->second.size(); i++) { int elIndex1 = recvBuffers[bufCounter][i]; int elIndex2 = pdb.intBoundary.other[rankIt->first][i].neighObj.elIndex; TEST_EXIT(elIndex1 == elIndex2)("Wrong element index at interior boundary!\n"); } delete [] recvBuffers[bufCounter++]; } for (RankToBoundMap::iterator rankIt = pdb.intBoundary.periodic.begin(); rankIt != pdb.intBoundary.periodic.end(); ++rankIt) { if (rankIt->first == pdb.mpiRank) continue; for (unsigned int i = 0; i < rankIt->second.size(); i++) { int elIndex1 = recvBuffers[bufCounter][i]; int elIndex2 = pdb.intBoundary.periodic[rankIt->first][i].neighObj.elIndex; TEST_EXIT(elIndex1 == elIndex2) ("Wrong element index at periodic boundary el %d with rank %d: %d %d\n", pdb.intBoundary.periodic[rankIt->first][i].rankObj.elIndex, rankIt->first, elIndex1, elIndex2); } delete [] recvBuffers[bufCounter++]; } } void ParallelDebug::testPeriodicBoundary(MeshDistributor &pdb) { FUNCNAME("ParallelDebug::testPeriodicBoundary()"); for (unsigned int i = 0; i < pdb.feSpaces.size(); i++) testPeriodicBoundary(pdb, pdb.feSpaces[i]); } void ParallelDebug::testPeriodicBoundary(MeshDistributor &pdb, const FiniteElemSpace *feSpace) { FUNCNAME("ParallelDebug::testPeriodicBoundary()"); // === 1. check: All periodic DOFs should have at least a correct number === // === of periodic associations. === PeriodicMap &perMap = pdb.getPeriodicMap(); for (map >::iterator it = perMap.periodicDofAssociations[feSpace].begin(); it != perMap.periodicDofAssociations[feSpace].end(); ++it) { WorldVector c; pdb.mesh->getDofIndexCoords(it->first, pdb.feSpaces[0], c); int nAssoc = it->second.size(); } // === 2. check: All periodic DOFs must be symmetric, i.e., if A is mapped === // === to B, then B must be mapped to A. === StdMpi stdMpi(pdb.mpiComm, true); if (pdb.mpiRank == 0) { for (int i = 1; i < pdb.mpiSize; i++) stdMpi.recv(i); } else { stdMpi.send(0, perMap.periodicDofMap[feSpace]); } stdMpi.startCommunication(); int foundError = 0; // === The boundary DOFs are checked only on the zero rank. === if (pdb.mpiRank == 0) { // Stores to each rank the periodic DOF mappings of this rank. map rankToMaps; PeriodicDofMap dofMap = perMap.periodicDofMap[feSpace]; rankToMaps[0] = dofMap; for (int i = 1; i < pdb.mpiSize; i++) { PeriodicDofMap &otherMap = stdMpi.getRecvData(i); rankToMaps[i] = otherMap; for (PeriodicDofMap::iterator it = otherMap.begin(); it != otherMap.end(); ++it) { for (std::map::iterator dofIt = it->second.begin(); dofIt != it->second.end(); ++dofIt) { if (dofMap.count(it->first) == 1 && dofMap[it->first].count(dofIt->first) == 1) { TEST_EXIT_DBG(dofMap[it->first][dofIt->first] == dofIt->second) ("Should not happen!\n"); } else { dofMap[it->first][dofIt->first] = dofIt->second; } } } } // === Now we test if global DOF A is mapped to B, then B must be mapped === // === to A for the same boundary type. === for (PeriodicDofMap::iterator it = dofMap.begin(); it != dofMap.end(); ++it) { for (std::map::iterator dofIt = it->second.begin(); dofIt != it->second.end(); ++dofIt) { if (it->second[dofIt->second] != dofIt->first) { MSG("[DBG] For boundary type %d: DOF %d -> %d, but %d -> %d!\n", it ->first, dofIt->first, dofIt->second, dofIt->second, it->second[dofIt->second]); for (int i = 0; i < pdb.mpiSize; i++) { if (rankToMaps[i][it->first].count(dofIt->first) == 1) { MSG("[DBG] %d -> %d in rank %d\n", dofIt->first, rankToMaps[i][it->first][dofIt->first], i); } if (rankToMaps[i][it->first].count(dofIt->second) == 1) { MSG("[DBG] %d -> %d in rank %d\n", dofIt->second, rankToMaps[i][it->first][dofIt->second], i); } } ERROR("Wrong periodic DOFs!\n"); foundError = 1; } } } } mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Error found on at least on rank!\n"); // === 3. check: On all edge and face periodic DOFs, at least one === // === componant of coordinates of each periodic DOF pair must be equal === // === (at least as long we consider periodic boundaries only on === // === rectangulars and boxes. === RankToCoords sendCoords; map > rankToDofType; for (RankToBoundMap::iterator it = pdb.intBoundary.periodic.begin(); it != pdb.intBoundary.periodic.end(); ++it) { if (it->first == pdb.mpiRank) continue; for (vector::iterator boundIt = it->second.begin(); boundIt != it->second.end(); ++boundIt) { if (boundIt->rankObj.subObj == VERTEX) continue; DofContainer dofs; boundIt->rankObj.el->getAllDofs(feSpace, boundIt->rankObj, dofs); for (unsigned int i = 0; i < dofs.size(); i++) { WorldVector c; pdb.mesh->getDofIndexCoords(*(dofs[i]), feSpace, c); sendCoords[it->first].push_back(c); rankToDofType[it->first].push_back(boundIt->type); } } } // Each rank must receive exactly the same number of coordinates as it sends // to another rank. RankToCoords recvCoords; for (RankToCoords::iterator it = sendCoords.begin(); it != sendCoords.end(); ++it) recvCoords[it->first].resize(it->second.size()); StdMpi stdMpiCoords(pdb.mpiComm, true); stdMpiCoords.send(sendCoords); stdMpiCoords.recv(recvCoords); stdMpiCoords.startCommunication(); for (RankToCoords::iterator it = sendCoords.begin(); it != sendCoords.end(); ++it) { for (unsigned int i = 0; i < it->second.size(); i++) { WorldVector &c0 = it->second[i]; WorldVector &c1 = stdMpiCoords.getRecvData(it->first)[i]; int nEqual = 0; for (int j = 0; j < pdb.mesh->getDim(); j++) if (c0[j] == c1[j]) nEqual++; if (nEqual == 0) { MSG("[DBG] %d-ith periodic DOF in boundary between ranks %d <-> %d is not correct!\n", i, pdb.mpiRank, it->first); MSG("[DBG] Coords on rank %d: %f %f %f\n", pdb.mpiRank, c0[0], c0[1], (pdb.mesh->getDim() == 3 ? c0[2] : 0.0)); MSG("[DBG] Coords on rank %d: %f %f %f\n", it->first, c1[0], c1[1], (pdb.mesh->getDim() == 3 ? c1[2] : 0.0)); foundError = 1; } } } mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Wrond periodic coordinates found on at least on rank!\n"); } void ParallelDebug::testCommonDofs(MeshDistributor &pdb, bool printCoords) { FUNCNAME("ParallelDebug::testCommonDofs()"); clock_t first = clock(); // Get FE space with basis functions of the highest degree const FiniteElemSpace *feSpace = pdb.feSpaces[pdb.feSpaces.size() - 1]; int testCommonDofs = 1; Parameters::get("dbg->test common dofs", testCommonDofs); if (testCommonDofs == 0) { MSG("Skip test common dofs!\n"); return; } /// Defines a mapping type from rank numbers to sets of DOFs. typedef map RankToDofContainer; // Maps to each neighbour rank an array of WorldVectors. This array contains the // coordinates of all DOFs this rank shares on the interior boundary with the // neighbour rank. A rank sends the coordinates to another rank, if it owns the // boundarys DOFs. RankToCoords sendCoords; // A rank receives all boundary DOFs that are at its interior boundaries but are // not owned by the rank. This map stores for each rank the coordinates of DOFs // this rank expectes to receive from. RankToCoords recvCoords; DOFVector > coords(feSpace, "dofCorrds"); pdb.mesh->getDofIndexCoords(feSpace, coords); for (DofComm::Iterator it(pdb.dofComm.getSendDofs(), feSpace); !it.end(); it.nextRank()) for (; !it.endDofIter(); it.nextDof()) sendCoords[it.getRank()].push_back(coords[it.getDofIndex()]); for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace); !it.end(); it.nextRank()) for (; !it.endDofIter(); it.nextDof()) recvCoords[it.getRank()].push_back(coords[it.getDofIndex()]); vector sendSize(pdb.mpiSize, 0); vector recvSize(pdb.mpiSize, 0); vector recvSizeBuffer(pdb.mpiSize, 0); MPI::Request request[(pdb.mpiSize - 1) * 2]; int requestCounter = 0; for (RankToCoords::iterator it = sendCoords.begin(); it != sendCoords.end(); ++it) sendSize[it->first] = it->second.size(); for (RankToCoords::iterator it = recvCoords.begin(); it != recvCoords.end(); ++it) recvSize[it->first] = it->second.size(); for (int i = 0; i < pdb.mpiSize; i++) { if (i == pdb.mpiRank) continue; request[requestCounter++] = pdb.mpiComm.Isend(&(sendSize[i]), 1, MPI_INT, i, 0); } for (int i = 0; i < pdb.mpiSize; i++) { if (i == pdb.mpiRank) continue; request[requestCounter++] = pdb.mpiComm.Irecv(&(recvSizeBuffer[i]), 1, MPI_INT, i, 0); } MPI::Request::Waitall(requestCounter, request); int foundError = 0; for (int i = 0; i < pdb.mpiSize; i++) { if (i == pdb.mpiRank) continue; if (recvSize[i] != recvSizeBuffer[i]) { ERROR("MPI rank %d expectes to receive %d DOFs from rank %d. But this rank sends %d DOFs!\n", pdb.mpiRank, recvSize[i], i, recvSizeBuffer[i]); foundError = 1; } } mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Error found on at least on rank!\n"); // === Now we know that the number of send and received DOFs fits together. === // === So we can check if also the coordinates of the communicated DOFs are === // === the same on both corresponding ranks. === StdMpi stdMpi(pdb.mpiComm, true); stdMpi.send(sendCoords); stdMpi.recv(recvCoords); stdMpi.startCommunication(); int dimOfWorld = Global::getGeo(WORLD); // === Compare the received with the expected coordinates. === for (RankToCoords::iterator it = stdMpi.getRecvData().begin(); it != stdMpi.getRecvData().end(); ++it) { for (unsigned int i = 0; i < it->second.size(); i++) { WorldVector tmp = (it->second)[i]; tmp -= recvCoords[it->first][i]; if (norm(tmp) > 1e-8) { // === Print error message if the coordinates are not the same. === if (printCoords) { MSG("[DBG] i = %d\n", i); stringstream oss; oss.precision(5); oss << "[DBG] Rank " << pdb.mpiRank << " from rank " << it->first << " expect coords ("; for (int k = 0; k < dimOfWorld; k++) { oss << recvCoords[it->first][i][k]; if (k + 1 < dimOfWorld) oss << " / "; } oss << ") received coords ("; for (int k = 0; k < dimOfWorld; k++) { oss << (it->second)[i][k]; if (k + 1 < dimOfWorld) oss << " / "; } oss << ")"; MSG("%s\n", oss.str().c_str()); debug::printInfoByDof(feSpace, *(pdb.dofComm.getRecvDofs()[0][it->first][feSpace][i])); } ERROR("Wrong DOFs in rank %d!\n", pdb.mpiRank); foundError = 1; } } } mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Error found on at least on rank!\n"); INFO(pdb.info, 8)("Test common dofs needed %.5f seconds\n", TIME_USED(first, clock())); } void ParallelDebug::testGlobalIndexByCoords(MeshDistributor &pdb) { FUNCNAME("ParallelDebug::testGlobalIndexByCoords()"); // Get FE space with basis functions of the highest degree const FiniteElemSpace *feSpace = pdb.feSpaces[pdb.feSpaces.size() - 1]; DOFVector > coords(feSpace, "tmp"); pdb.mesh->getDofIndexCoords(feSpace, coords); typedef map, int> CoordsIndexMap; CoordsIndexMap coordsToIndex; DOFIterator > it(&coords, USED_DOFS); for (it.reset(); !it.end(); ++it) { coordsToIndex[(*it)] = pdb.dofMap[feSpace][0][it.getDOFIndex()].global; // MSG(" CHECK FOR DOF %d AT COORDS %f %f %f\n", // coordsToIndex[(*it)], (*it)[0], (*it)[1], (*it)[2]); } StdMpi stdMpi(pdb.mpiComm, true); for (DofComm::Iterator it(pdb.dofComm.getSendDofs(), feSpace); !it.end(); it.nextRank()) stdMpi.send(it.getRank(), coordsToIndex); for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace); !it.end(); it.nextRank()) stdMpi.recv(it.getRank()); stdMpi.startCommunication(); int foundError = 0; for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace); !it.end(); it.nextRank()) { CoordsIndexMap& otherCoords = stdMpi.getRecvData(it.getRank()); for (CoordsIndexMap::iterator coordsIt = otherCoords.begin(); coordsIt != otherCoords.end(); ++coordsIt) { if (coordsToIndex.count(coordsIt->first) == 1 && coordsToIndex[coordsIt->first] != coordsIt->second) { stringstream oss; oss.precision(5); oss << "DOF at coords "; for (int i = 0; i < Global::getGeo(WORLD); i++) oss << coordsIt->first[i] << " "; oss << " do not fit together on rank " << pdb.getMpiRank() << " (global index: " << coordsToIndex[coordsIt->first] << ") and on rank " << it.getRank() << " (global index: " << coordsIt->second << ")"; MSG("[DBG] %s\n", oss.str().c_str()); foundError = 1; } } } mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Error found on at least on rank!\n"); } void ParallelDebug::testAllElements(MeshDistributor &pdb) { FUNCNAME("ParallelDebug::testAllElements()"); std::set macroElements; int minElementIndex = numeric_limits::max(); int maxElementIndex = numeric_limits::min(); TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(pdb.mesh, 0, Mesh::CALL_EL_LEVEL); while (elInfo) { int elIndex = elInfo->getElement()->getIndex(); minElementIndex = std::min(minElementIndex, elIndex); maxElementIndex = std::max(maxElementIndex, elIndex); macroElements.insert(elInfo->getElement()->getIndex()); elInfo = stack.traverseNext(elInfo); } int globalMinIndex, globalMaxIndex; pdb.mpiComm.Allreduce(&minElementIndex, &globalMinIndex, 1, MPI_INT, MPI_MIN); pdb.mpiComm.Allreduce(&maxElementIndex, &globalMaxIndex, 1, MPI_INT, MPI_MAX); TEST_EXIT(globalMinIndex == 0)("No macro element with index 0!\n"); for (int i = 0; i <= globalMaxIndex; i++) { int sendId = macroElements.count(i); int recvId = 0; pdb.mpiComm.Allreduce(&sendId, &recvId, 1, MPI_INT, MPI_SUM); if (recvId != 1 && pdb.mpiRank == 0) { if (recvId == 0) { ERROR_EXIT("Element %d has no member partition!\n", i); } if (recvId > 1) { ERROR_EXIT("Element %d is member of more than pne partition!\n", i); } } } } void ParallelDebug::testDofContainerCommunication(MeshDistributor &pdb) { FUNCNAME("ParallelDebug::testDofContainerCommunication()"); typedef map >::iterator it_type; map sendNumber; for (it_type it = pdb.dofComm.getSendDofs()[0].begin(); it != pdb.dofComm.getSendDofs()[0].end(); ++it) for (map::iterator dcIt = it->second.begin(); dcIt != it->second.end(); ++dcIt) sendNumber[it->first] += dcIt->second.size(); map recvNumber; for (it_type it = pdb.dofComm.getRecvDofs()[0].begin(); it != pdb.dofComm.getRecvDofs()[0].end(); ++it) for (map::iterator dcIt = it->second.begin(); dcIt != it->second.end(); ++dcIt) recvNumber[it->first] += dcIt->second.size(); StdMpi stdMpi(pdb.mpiComm); stdMpi.send(sendNumber); for (it_type it = pdb.dofComm.getRecvDofs()[0].begin(); it != pdb.dofComm.getRecvDofs()[0].end(); ++it) stdMpi.recv(it->first); stdMpi.startCommunication(); int foundError = 0; for (map::iterator it = stdMpi.getRecvData().begin(); it != stdMpi.getRecvData().end(); ++it) { if (it->second != recvNumber[it->first]) { ERROR("Rank expectes %d DOFs to receive from rank %d, but %d DOFs are received!\n", recvNumber[it->first], it->first, it->second); foundError = 1; } } mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Error found on at least one rank!\n"); } void ParallelDebug::testDoubleDofs(Mesh *mesh) { FUNCNAME("ParallelDebug::testDoubleDofs()"); map, DegreeOfFreedom> cMap; int foundError = 0; TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL | Mesh::FILL_COORDS); while (elInfo) { for (int i = 0; i < mesh->getGeo(VERTEX); i++) { WorldVector &c = elInfo->getCoord(i); if (cMap.count(c) == 0) { cMap[c] = elInfo->getElement()->getDof(i, 0); } else { if (cMap[c] != elInfo->getElement()->getDof(i, 0)) { MSG("[DBG] Found two DOFs %d and %d with the same coords %f %f %f!\n", cMap[c], elInfo->getElement()->getDof(i, 0), c[0], c[1], mesh->getDim() == 3 ? c[2] : 0.0); foundError = 1; } } } elInfo = stack.traverseNext(elInfo); } mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Error found on at least one rank!\n"); } void ParallelDebug::printMapLocalGlobal(MeshDistributor &pdb, int rank) { FUNCNAME("ParallelDebug::printMapLocalGlobal()"); ERROR_EXIT("Rewrite this function!\n"); #if 0 if (rank == -1 || pdb.mpiRank == rank) { const FiniteElemSpace *feSpace = pdb.feSpaces[0]; cout << "====== DOF MAP LOCAL -> GLOBAL ====== " << endl; DofMap &dofMap = pdb.dofMap[feSpace].getMap(); for (DofMap::iterator it = dofMap.begin(); it != dofMap.end(); ++it) { cout << "DOF " << it->first << " " << it->second.global << "\n"; WorldVector coords; pdb.mesh->getDofIndexCoords(it->first, feSpace, coords); coords.print(); for (DofComm::Iterator rit(pdb.dofComm.getSendDofs(), feSpace); !rit.end(); rit.nextRank()) for (; !rit.endDofIter(); rit.nextDof()) if (it->first == rit.getDofIndex()) cout << "SEND DOF TO " << rit.getRank() << endl; for (DofComm::Iterator rit(pdb.dofComm.getRecvDofs(), feSpace); !rit.end(); rit.nextRank()) for (; !rit.endDofIter(); rit.nextDof()) if (it->first == rit.getDofIndex()) cout << "RECV DOF FROM " << rit.getRank() << endl; cout << "------" << endl; } } #endif } void ParallelDebug::printMapPeriodic(MeshDistributor &pdb, int rank) { FUNCNAME("ParallelDebug::printMapPeriodic()"); ERROR_EXIT("Function must be rewritten, check svn for old code!\n"); } void ParallelDebug::printRankDofs(MeshDistributor &pdb, int rank, DofContainer& rankDofs, DofContainer& rankAllDofs) { if (rank == -1 || pdb.mpiRank == rank) { cout << "====== RANK DOF INFORMATION ====== " << endl; cout << " RANK OWNED DOFS: " << endl; for (DofContainer::iterator dofit = rankDofs.begin(); dofit != rankDofs.end(); ++dofit) { cout << " " << **dofit << endl; WorldVector coords; pdb.mesh->getDofIndexCoords(*dofit, pdb.feSpaces[0], coords); coords.print(); } cout << " RANK ALL DOFS: " << endl; for (DofContainer::iterator dofit = rankAllDofs.begin(); dofit != rankAllDofs.end(); ++dofit) { cout << " " << **dofit << endl; WorldVector coords; pdb.mesh->getDofIndexCoords(*dofit, pdb.feSpaces[0], coords); coords.print(); } } } void ParallelDebug::printBoundaryInfo(MeshDistributor &pdb) { FUNCNAME("ParallelDebug::printBoundaryInfo()"); int tmp = 0; Parameters::get("parallel->debug->print boundary info", tmp); if (tmp <= 0) return; for (InteriorBoundary::iterator it(pdb.intBoundary.own); !it.end(); ++it) { MSG("Rank owned boundary with rank %d: \n", it.getRank()); MSG(" ranks obj-ind: %d sub-obj: %d ith-obj: %d\n", it->rankObj.elIndex, it->rankObj.subObj, it->rankObj.ithObj); MSG(" neigh obj-ind: %d sub-obj: %d ith-obj: %d\n", it->neighObj.elIndex, it->neighObj.subObj, it->neighObj.ithObj); } for (InteriorBoundary::iterator it(pdb.intBoundary.other); !it.end(); ++it) { MSG("Other owned boundary with rank %d: \n", it.getRank()); MSG(" ranks obj-ind: %d sub-obj: %d ith-obj: %d\n", it->rankObj.elIndex, it->rankObj.subObj, it->rankObj.ithObj); MSG(" neigh obj-ind: %d sub-obj: %d ith-obj: %d\n", it->neighObj.elIndex, it->neighObj.subObj, it->neighObj.ithObj); } for (InteriorBoundary::iterator it(pdb.intBoundary.periodic); !it.end(); ++it) { MSG("Periodic boundary (ID %d) with rank %d: \n", it->type, it.getRank()); MSG(" ranks obj-ind: %d sub-obj: %d ith-obj: %d\n", it->rankObj.elIndex, it->rankObj.subObj, it->rankObj.ithObj); MSG(" neigh obj-ind: %d sub-obj: %d ith-obj: %d\n", it->neighObj.elIndex, it->neighObj.subObj, it->neighObj.ithObj); } } void ParallelDebug::writeDebugFile(MeshDistributor &pdb, string prefix, string postfix) { FUNCNAME("ParallelDebug::writeCoordsFile()"); const FiniteElemSpace *feSpace = pdb.feSpaces[pdb.feSpaces.size() - 1]; stringstream filename; filename << prefix << "-" << pdb.mpiRank << "." << postfix; DOFVector > coords(feSpace, "tmp"); pdb.mesh->getDofIndexCoords(feSpace, coords); typedef map > ElDofMap; ElDofMap elDofMap; TraverseStack stack; const BasisFunction *basisFcts = feSpace->getBasisFcts(); vector localIndices(basisFcts->getNumber()); ElInfo *elInfo = stack.traverseFirst(pdb.mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { basisFcts->getLocalIndices(elInfo->getElement(), feSpace->getAdmin(), localIndices); elDofMap[elInfo->getElement()->getIndex()] = localIndices; elInfo = stack.traverseNext(elInfo); } // === Write informations about all DOFs. === ofstream file; file.open(filename.str().c_str()); // file << "# First line contains number of DOFs, than each line has the format\n"; // file << "# Local DOF index Global DOF index Is rank DOF x-coord y-coord z-coord\n"; file << coords.getUsedSize() << "\n"; DOFIterator > it(&coords, USED_DOFS); for (it.reset(); !it.end(); ++it) { file << it.getDOFIndex() << " " << pdb.dofMap[feSpace][0][it.getDOFIndex()].global << " " << pdb.dofMap[feSpace].isRankDof(it.getDOFIndex(), 0); for (int i = 0; i < pdb.mesh->getDim(); i++) file << " " << (*it)[i]; file << "\n"; } // === Write to all elements in ranks mesh the included dofs. === // file << "\n\n"; // file << "# First line containes number of elements in mesh, second line contain the number of DOFs per element.\n"; // file << "# Than, each entry contains of two lines. The first is the element index, the second line is a list with the local DOF indices of this element.\n"; file << elDofMap.size() << "\n"; file << basisFcts->getNumber() << "\n"; for (ElDofMap::iterator it = elDofMap.begin(); it != elDofMap.end(); ++it) { file << it->first << "\n"; for (int i = 0; i < basisFcts->getNumber(); i++) file << it->second[i] << " "; file << "\n"; } file.close(); } void ParallelDebug::writePartitioning(MeshDistributor &pdb, string filename) { FUNCNAME("ParallelDebug::writeParitioning()"); map vec; TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(pdb.mesh, -1, Mesh::CALL_LEAF_EL | Mesh::FILL_COORDS); while (elInfo) { int index = elInfo->getElement()->getIndex(); vec[index] = pdb.partitionMap[index]; elInfo = stack.traverseNext(elInfo); } ElementFileWriter::writeFile(vec, pdb.mesh, filename); } void ParallelDebug::writePartitioningFile(string filename, int counter, const FiniteElemSpace *feSpace) { FUNCNAME("ParallelDebug::writePartitioningFile()"); stringstream oss; oss << filename; if (counter >= 0) oss << "-" << counter; oss << ".vtu"; DOFVector tmp(feSpace, "tmp"); tmp.set(MPI::COMM_WORLD.Get_rank()); VtkWriter::writeFile(&tmp, oss.str()); } bool ParallelDebug::followThisBound(int rankElIndex, int neighElIndex) { FUNCNAME("ParallelDebug::followThisBound()"); int el0 = std::min(rankElIndex, neighElIndex); int el1 = std::max(rankElIndex, neighElIndex); vector els; Parameters::get("parallel->debug->follow boundary", els); if (els.size() != 2) return false; return (el0 == els[0] && el1 == els[1]); } void ParallelDebug::followBoundary(MeshDistributor &pdb) { FUNCNAME("ParallelDebug::followBoundary()"); for (InteriorBoundary::iterator it(pdb.intBoundary.own); !it.end(); ++it) if (followThisBound(it->rankObj.elIndex, it->neighObj.elIndex)) debug::writeLocalElementDofs(pdb.mpiRank, it->rankObj.elIndex, pdb.feSpaces[0]); for (InteriorBoundary::iterator it(pdb.intBoundary.other); !it.end(); ++it) if (followThisBound(it->rankObj.elIndex, it->neighObj.elIndex)) debug::writeLocalElementDofs(pdb.mpiRank, it->rankObj.elIndex, pdb.feSpaces[0]); } void ParallelDebug::followBoundary(Mesh *mesh, AtomicBoundary &bound, MeshStructure &code) { FUNCNAME("ParallelDebug::followBoundary()"); if (mesh->getDim() != bound.rankObj.subObj) return; if (!followThisBound(bound.rankObj.elIndex, bound.neighObj.elIndex)) return; MSG("Mesh structure code of bound %d/%d <-> %d/%d: %s\n", bound.rankObj.elIndex, mesh->getDim(), bound.neighObj.elIndex, mesh->getDim(), code.toStr().c_str()); } }