#include "ParallelDebug.h" #include "MeshDistributor.h" #include "PartitionElementData.h" #include "ProblemVec.h" #include "DOFVector.h" #include "FixVec.h" #include "StdMpi.h" #include "Debug.h" #include "MpiHelper.h" namespace AMDiS { void ParallelDebug::testInteriorBoundary(MeshDistributor &pdb) { FUNCNAME("ParallelDebug::testInteriorBoundary()"); typedef MeshDistributor::RankToBoundMap RankToBoundMap; std::vector sendBuffers, recvBuffers; MPI::Request request[pdb.myIntBoundary.boundary.size() + pdb.otherIntBoundary.boundary.size() + pdb.periodicBoundary.boundary.size() * 2]; int requestCounter = 0; // === Send rank's boundary information. === for (RankToBoundMap::iterator rankIt = pdb.myIntBoundary.boundary.begin(); rankIt != pdb.myIntBoundary.boundary.end(); ++rankIt) { int nSendInt = rankIt->second.size(); int* buffer = new int[nSendInt]; for (int i = 0; i < nSendInt; i++) buffer[i] = (rankIt->second)[i].rankObj.elIndex; sendBuffers.push_back(buffer); request[requestCounter++] = pdb.mpiComm.Isend(buffer, nSendInt, MPI_INT, rankIt->first, 0); } // === Receive information from other ranks about the interior boundaries. ==== for (RankToBoundMap::iterator rankIt = pdb.otherIntBoundary.boundary.begin(); rankIt != pdb.otherIntBoundary.boundary.end(); ++rankIt) { int nRecvInt = rankIt->second.size(); int *buffer = new int[nRecvInt]; recvBuffers.push_back(buffer); request[requestCounter++] = pdb.mpiComm.Irecv(buffer, nRecvInt, MPI_INT, rankIt->first, 0); } // === To the last, do the same of periodic boundaries. === for (RankToBoundMap::iterator rankIt = pdb.periodicBoundary.boundary.begin(); rankIt != pdb.periodicBoundary.boundary.end(); ++rankIt) { int nValues = rankIt->second.size(); int* sBuffer = new int[nValues]; for (int i = 0; i < nValues; i++) sBuffer[i] = (rankIt->second)[i].rankObj.elIndex; sendBuffers.push_back(sBuffer); request[requestCounter++] = pdb.mpiComm.Isend(sBuffer, nValues, MPI_INT, rankIt->first, 0); int *rBuffer = new int[nValues]; recvBuffers.push_back(rBuffer); request[requestCounter++] = pdb.mpiComm.Irecv(rBuffer, nValues, MPI_INT, rankIt->first, 0); } // === Finish communication and delete all send buffers. === MPI::Request::Waitall(requestCounter, request); for (int i = 0; i < static_cast(sendBuffers.size()); i++) delete [] sendBuffers[i]; // === Finally, check the results, i.e., the indices of element at the === // === boundaries, if they fit together. First check the interior bounds, === // === and after this the periodic ones. === int bufCounter = 0; for (RankToBoundMap::iterator rankIt = pdb.otherIntBoundary.boundary.begin(); rankIt != pdb.otherIntBoundary.boundary.end(); ++rankIt) { TEST_EXIT(rankIt->second.size() == pdb.otherIntBoundary.boundary[rankIt->first].size()) ("Boundaries does not fit together!\n"); for (unsigned int i = 0; i < rankIt->second.size(); i++) { int elIndex1 = recvBuffers[bufCounter][i]; int elIndex2 = pdb.otherIntBoundary.boundary[rankIt->first][i].neighObj.elIndex; TEST_EXIT(elIndex1 == elIndex2)("Wrong element index at interior boundary!\n"); } delete [] recvBuffers[bufCounter++]; } for (RankToBoundMap::iterator rankIt = pdb.periodicBoundary.boundary.begin(); rankIt != pdb.periodicBoundary.boundary.end(); ++rankIt) { for (unsigned int i = 0; i < rankIt->second.size(); i++) { int elIndex1 = recvBuffers[bufCounter][i]; int elIndex2 = pdb.periodicBoundary.boundary[rankIt->first][i].neighObj.elIndex; TEST_EXIT(elIndex1 == elIndex2)("Wrong element index at periodic boundary!\n"); } delete [] recvBuffers[bufCounter++]; } } void ParallelDebug::testCommonDofs(MeshDistributor &pdb, bool printCoords) { FUNCNAME("ParallelDebug::testCommonDofs()"); clock_t first = clock(); int testCommonDofs = 1; GET_PARAMETER(0, "dbg->test common dofs", "%d", &testCommonDofs); if (testCommonDofs == 0) { MSG("Skip test common dofs!\n"); return; } /// Defines a mapping type from rank numbers to sets of coordinates. typedef std::map > > RankToCoords; /// Defines a mapping type from rank numbers to sets of DOFs. typedef std::map RankToDofContainer; // Maps to each neighbour rank an array of WorldVectors. This array contains the // coordinates of all DOFs this rank shares on the interior boundary with the // neighbour rank. A rank sends the coordinates to another rank, if it owns the // boundarys DOFs. RankToCoords sendCoords; // A rank receives all boundary DOFs that are at its interior boundaries but are // not owned by the rank. This map stores for each rank the coordinates of DOFs // this rank expectes to receive from. RankToCoords recvCoords; DOFVector > coords(pdb.feSpace, "dofCorrds"); pdb.mesh->getDofIndexCoords(pdb.feSpace, coords); for (RankToDofContainer::iterator it = pdb.sendDofs.begin(); it != pdb.sendDofs.end(); ++it) for (DofContainer::iterator dofIt = it->second.begin(); dofIt != it->second.end(); ++dofIt) sendCoords[it->first].push_back(coords[**dofIt]); for (RankToDofContainer::iterator it = pdb.recvDofs.begin(); it != pdb.recvDofs.end(); ++it) for (DofContainer::iterator dofIt = it->second.begin(); dofIt != it->second.end(); ++dofIt) recvCoords[it->first].push_back(coords[**dofIt]); std::vector sendSize(pdb.mpiSize, 0); std::vector recvSize(pdb.mpiSize, 0); std::vector recvSizeBuffer(pdb.mpiSize, 0); MPI::Request request[(pdb.mpiSize - 1) * 2]; int requestCounter = 0; for (RankToCoords::iterator it = sendCoords.begin(); it != sendCoords.end(); ++it) sendSize[it->first] = it->second.size(); for (RankToCoords::iterator it = recvCoords.begin(); it != recvCoords.end(); ++it) recvSize[it->first] = it->second.size(); for (int i = 0; i < pdb.mpiSize; i++) { if (i == pdb.mpiRank) continue; request[requestCounter++] = pdb.mpiComm.Isend(&(sendSize[i]), 1, MPI_INT, i, 0); } for (int i = 0; i < pdb.mpiSize; i++) { if (i == pdb.mpiRank) continue; request[requestCounter++] = pdb.mpiComm.Irecv(&(recvSizeBuffer[i]), 1, MPI_INT, i, 0); } MPI::Request::Waitall(requestCounter, request); int foundError = 0; for (int i = 0; i < pdb.mpiSize; i++) { if (i == pdb.mpiRank) continue; if (recvSize[i] != recvSizeBuffer[i]) { ERROR("MPI rank %d expectes to receive %d DOFs from rank %d. But this rank sends %d DOFs!\n", pdb.mpiRank, recvSize[i], i, recvSizeBuffer[i]); foundError = 1; } } mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Error found on at least on rank!\n"); // === Now we know that the number of send and received DOFs fits together. === // === So we can check if also the coordinates of the communicated DOFs are === // === the same on both corresponding ranks. === typedef std::vector > CoordsVec; StdMpi stdMpi(pdb.mpiComm, true); stdMpi.send(sendCoords); stdMpi.recv(recvCoords); stdMpi.startCommunication(MPI_DOUBLE); int dimOfWorld = Global::getGeo(WORLD); // === Compare the received with the expected coordinates. === for (RankToCoords::iterator it = stdMpi.getRecvData().begin(); it != stdMpi.getRecvData().end(); ++it) { for (unsigned int i = 0; i < it->second.size(); i++) { WorldVector tmp = (it->second)[i]; tmp -= recvCoords[it->first][i]; if (norm(tmp) > 1e-13) { // === Print error message if the coordinates are not the same. === if (printCoords) { MSG("[DBG] i = %d\n", i); std::stringstream oss; oss.precision(5); oss << "[DBG] Rank " << pdb.mpiRank << " from rank " << it->first << " expect coords ("; for (int k = 0; k < dimOfWorld; k++) { oss << recvCoords[it->first][i][k]; if (k + 1 < dimOfWorld) oss << " / "; } oss << ") received coords ("; for (int k = 0; k < dimOfWorld; k++) { oss << (it->second)[i][k]; if (k + 1 < dimOfWorld) oss << " / "; } oss << ")"; MSG("%s\n", oss.str().c_str()); debug::printInfoByDof(pdb.feSpace, *(pdb.recvDofs[it->first][i])); } ERROR("Wrong DOFs in rank %d!\n", pdb.mpiRank); foundError = 1; } } } mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Error found on at least on rank!\n"); INFO(pdb.info, 8)("Test common dofs needed %.5f seconds\n", TIME_USED(first, clock())); } void ParallelDebug::testAllElements(MeshDistributor &pdb) { FUNCNAME("ParallelDebug::testAllElements()"); TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(pdb.mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { Element *element = elInfo->getElement(); PartitionElementData *partitionData = dynamic_cast(element->getElementData(PARTITION_ED)); int sendId = 0; if (partitionData->getPartitionStatus() == IN) sendId = 1; int recvId = 0; pdb.mpiComm.Allreduce(&sendId, &recvId, 1, MPI_INT, MPI_SUM); if (recvId != 1 && pdb.mpiRank == 0) { if (recvId == 0) ERROR_EXIT("Element %d has no member partition!\n", element->getIndex()); if (recvId > 1) ERROR_EXIT("Element %d is member of more than pne partition!\n", element->getIndex()); } elInfo = stack.traverseNext(elInfo); } } void ParallelDebug::testDofContainerCommunication(MeshDistributor &pdb, RankToDofContainer &sendDofs, RankToDofContainer &recvDofs) { FUNCNAME("ParallelDebug::testDofContainerCommunication()"); std::map sendNumber; for (RankToDofContainer::iterator it = sendDofs.begin(); it != sendDofs.end(); ++it) sendNumber[it->first] = it->second.size(); StdMpi stdMpi(pdb.mpiComm); stdMpi.send(sendNumber); for (RankToDofContainer::iterator it = recvDofs.begin(); it != recvDofs.end(); ++it) stdMpi.recv(it->first); stdMpi.startCommunication(MPI_INT); int foundError = 0; for (std::map::iterator it = stdMpi.getRecvData().begin(); it != stdMpi.getRecvData().end(); ++it) { if (it->second != static_cast(recvDofs[it->first].size())) { ERROR("Rank expectes %d DOFs to receive from rank %d, but %d DOFs are received!\n", recvDofs[it->first].size(), it->first, it->second); foundError = 1; } } mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Error found on at least one rank!\n"); } void ParallelDebug::printMapLocalGlobal(MeshDistributor &pdb, int rank) { if (rank == -1 || pdb.mpiRank == rank) { std::cout << "====== DOF MAP LOCAL -> GLOBAL ====== " << std::endl; typedef std::map DofMapping; typedef std::map RankToDofContainer; for (DofMapping::iterator it = pdb.mapLocalGlobalDofs.begin(); it != pdb.mapLocalGlobalDofs.end(); it++) { DegreeOfFreedom localdof = -1; if (pdb.mapLocalDofIndex.count(it->first) > 0) localdof = pdb.mapLocalDofIndex[it->first]; std::cout << "DOF " << it->first << " " << it->second << " " << localdof << std::endl; WorldVector coords; pdb.mesh->getDofIndexCoords(it->first, pdb.feSpace, coords); coords.print(); for (RankToDofContainer::iterator rankit = pdb.sendDofs.begin(); rankit != pdb.sendDofs.end(); ++rankit) { for (DofContainer::iterator dofit = rankit->second.begin(); dofit != rankit->second.end(); ++dofit) if (**dofit == it->first) std::cout << "SEND DOF TO " << rankit->first << std::endl; } for (RankToDofContainer::iterator rankit = pdb.recvDofs.begin(); rankit != pdb.recvDofs.end(); ++rankit) { for (DofContainer::iterator dofit = rankit->second.begin(); dofit != rankit->second.end(); ++dofit) if (**dofit == it->first) std::cout << "RECV DOF FROM " << rankit->first << std::endl; } std::cout << "------" << std::endl; } } } void ParallelDebug::printMapPeriodic(MeshDistributor &pdb, int rank) { FUNCNAME("ParallelDebug::printMapPeriodic()"); ERROR_EXIT("Function must be rewritten!\n"); #if 0 typedef std::map DofMapping; typedef std::map > PeriodicDofMap; if (rank == -1 || pdb.mpiRank == rank) { std::cout << "====== DOF MAP PERIODIC ====== " << std::endl; for (PeriodicDofMap::iterator it = pdb.periodicDof.begin(); it != pdb.periodicDof.end(); ++it) { std::cout << "DOF MAP " << it->first << ": "; for (std::set::iterator dofit = it->second.begin(); dofit != it->second.end(); ++dofit) std::cout << *dofit << " "; std::cout << std::endl; DegreeOfFreedom localdof = -1; for (DofMapping::iterator dofIt = pdb.mapLocalGlobalDofs.begin(); dofIt != pdb.mapLocalGlobalDofs.end(); ++dofIt) if (dofIt->second == it->first) localdof = dofIt->first; TEST_EXIT(localdof != -1)("There is something wrong!\n"); WorldVector coords; pdb.mesh->getDofIndexCoords(localdof, pdb.feSpace, coords); coords.print(); } } #endif } void ParallelDebug::printRankDofs(MeshDistributor &pdb, int rank, DofContainer& rankDofs, DofContainer& rankAllDofs) { if (rank == -1 || pdb.mpiRank == rank) { std::cout << "====== RANK DOF INFORMATION ====== " << std::endl; std::cout << " RANK OWNED DOFS: " << std::endl; for (DofContainer::iterator dofit = rankDofs.begin(); dofit != rankDofs.end(); ++dofit) { std::cout << " " << **dofit << std::endl; WorldVector coords; pdb.mesh->getDofIndexCoords(*dofit, pdb.feSpace, coords); coords.print(); } std::cout << " RANK ALL DOFS: " << std::endl; for (DofContainer::iterator dofit = rankAllDofs.begin(); dofit != rankAllDofs.end(); ++dofit) { std::cout << " " << **dofit << std::endl; WorldVector coords; pdb.mesh->getDofIndexCoords(*dofit, pdb.feSpace, coords); coords.print(); } } } void ParallelDebug::printBoundaryInfo(MeshDistributor &pdb) { FUNCNAME("ParallelDebug::printBoundaryInfo()"); for (InteriorBoundary::iterator it(pdb.myIntBoundary); !it.end(); ++it) { MSG("Rank owned boundary with rank %d: \n", it.getRank()); MSG(" ranks obj-ind: %d sub-obj: %d ith-obj: %d\n", it->rankObj.elIndex, it->rankObj.subObj, it->rankObj.ithObj); MSG(" neigh obj-ind: %d sub-obj: %d ith-obj: %d\n", it->neighObj.elIndex, it->neighObj.subObj, it->neighObj.ithObj); } for (InteriorBoundary::iterator it(pdb.otherIntBoundary); !it.end(); ++it) { MSG("Other owned boundary with rank %d: \n", it.getRank()); MSG(" ranks obj-ind: %d sub-obj: %d ith-obj: %d\n", it->rankObj.elIndex, it->rankObj.subObj, it->rankObj.ithObj); MSG(" neigh obj-ind: %d sub-obj: %d ith-obj: %d\n", it->neighObj.elIndex, it->neighObj.subObj, it->neighObj.ithObj); } for (InteriorBoundary::iterator it(pdb.periodicBoundary); !it.end(); ++it) { MSG("Periodic boundary with rank %d: \n", it.getRank()); MSG(" ranks obj-ind: %d sub-obj: %d ith-obj: %d\n", it->rankObj.elIndex, it->rankObj.subObj, it->rankObj.ithObj); MSG(" neigh obj-ind: %d sub-obj: %d ith-obj: %d\n", it->neighObj.elIndex, it->neighObj.subObj, it->neighObj.ithObj); } } void ParallelDebug::writeCoordsFile(MeshDistributor &pdb, std::string prefix, std::string postfix) { FUNCNAME("ParallelDebug::writeCoordsFile()"); std::stringstream filename; filename << prefix << "-" << pdb.mpiRank << "." << postfix; DOFVector > coords(pdb.feSpace, "tmp"); pdb.mesh->getDofIndexCoords(pdb.feSpace, coords); std::ofstream file; file.open(filename.str().c_str()); file << coords.getUsedSize() << "\n"; DOFIterator > it(&coords, USED_DOFS); for (it.reset(); !it.end(); ++it) { file << pdb.mapLocalGlobalDofs[it.getDOFIndex()]; for (int i = 0; i < pdb.mesh->getDim(); i++) file << " " << (*it)[i]; file << "\n"; } file.close(); } }