diff --git a/AMDiS/bin/Makefile.am b/AMDiS/bin/Makefile.am index 5e5ae77862efc1febdda152e8073cd15c78d52e6..2e236da852981fba0b14fe95d8cc46e5e9c42142 100644 --- a/AMDiS/bin/Makefile.am +++ b/AMDiS/bin/Makefile.am @@ -30,6 +30,7 @@ if USE_PARALLEL_DOMAIN_AMDIS PARALLEL_AMDIS_SOURCES += \ $(SOURCE_DIR)/parallel/StdMpi.h $(SOURCE_DIR)/parallel/StdMpi.cc \ $(SOURCE_DIR)/parallel/ParallelDomainBase.h $(SOURCE_DIR)/parallel/ParallelDomainBase.cc \ + $(SOURCE_DIR)/parallel/ParallelDomainDbg.h $(SOURCE_DIR)/parallel/ParallelDomainDbg.cc \ $(SOURCE_DIR)/parallel/GlobalMatrixSolver.h $(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc \ $(SOURCE_DIR)/parallel/MpiHelper.h $(SOURCE_DIR)/parallel/MpiHelper.cc libamdis_la_CXXFLAGS += -DHAVE_PARALLEL_DOMAIN_AMDIS=1 diff --git a/AMDiS/bin/Makefile.in b/AMDiS/bin/Makefile.in index 94777480f03113c35bd995e01349a9af9ef0874f..0a4a66cbe7c70233c1e73e13bcbd9d5515301b6e 100644 --- a/AMDiS/bin/Makefile.in +++ b/AMDiS/bin/Makefile.in @@ -40,6 +40,7 @@ host_triplet = @host@ @USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__append_2 = \ @USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(SOURCE_DIR)/parallel/StdMpi.h $(SOURCE_DIR)/parallel/StdMpi.cc \ @USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(SOURCE_DIR)/parallel/ParallelDomainBase.h $(SOURCE_DIR)/parallel/ParallelDomainBase.cc \ +@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(SOURCE_DIR)/parallel/ParallelDomainDbg.h $(SOURCE_DIR)/parallel/ParallelDomainDbg.cc \ @USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(SOURCE_DIR)/parallel/GlobalMatrixSolver.h $(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc \ @USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(SOURCE_DIR)/parallel/MpiHelper.h $(SOURCE_DIR)/parallel/MpiHelper.cc @@ -78,6 +79,8 @@ am__libamdis_la_SOURCES_DIST = $(SOURCE_DIR)/parallel/StdMpi.h \ $(SOURCE_DIR)/parallel/StdMpi.cc \ $(SOURCE_DIR)/parallel/ParallelDomainBase.h \ $(SOURCE_DIR)/parallel/ParallelDomainBase.cc \ + $(SOURCE_DIR)/parallel/ParallelDomainDbg.h \ + $(SOURCE_DIR)/parallel/ParallelDomainDbg.cc \ $(SOURCE_DIR)/parallel/GlobalMatrixSolver.h \ $(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc \ $(SOURCE_DIR)/parallel/MpiHelper.h \ @@ -235,6 +238,7 @@ am__libamdis_la_SOURCES_DIST = $(SOURCE_DIR)/parallel/StdMpi.h \ $(SOURCE_DIR)/Debug.h $(SOURCE_DIR)/Debug.cc @USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__objects_1 = libamdis_la-StdMpi.lo \ @USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-ParallelDomainBase.lo \ +@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-ParallelDomainDbg.lo \ @USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-GlobalMatrixSolver.lo \ @USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-MpiHelper.lo @USE_PARALLEL_AMDIS_FALSE@am__objects_2 = $(am__objects_1) @@ -786,6 +790,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Operator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParMetisPartitioner.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainBase.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainDbg.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelProblem.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Parameters.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Parametric.Plo@am__quote@ @@ -874,6 +879,13 @@ libamdis_la-ParallelDomainBase.lo: $(SOURCE_DIR)/parallel/ParallelDomainBase.cc @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainBase.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDomainBase.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDomainBase.cc +libamdis_la-ParallelDomainDbg.lo: $(SOURCE_DIR)/parallel/ParallelDomainDbg.cc +@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ParallelDomainDbg.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Tpo" -c -o libamdis_la-ParallelDomainDbg.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc; \ +@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Tpo" "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Plo"; else rm -f "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc' object='libamdis_la-ParallelDomainDbg.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainDbg.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc + libamdis_la-GlobalMatrixSolver.lo: $(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc @am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-GlobalMatrixSolver.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-GlobalMatrixSolver.Tpo" -c -o libamdis_la-GlobalMatrixSolver.lo `test -f '$(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc; \ @am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-GlobalMatrixSolver.Tpo" "$(DEPDIR)/libamdis_la-GlobalMatrixSolver.Plo"; else rm -f "$(DEPDIR)/libamdis_la-GlobalMatrixSolver.Tpo"; exit 1; fi diff --git a/AMDiS/src/Debug.cc b/AMDiS/src/Debug.cc index 220368ebf32301fa501e44991c2724d1328b6b38..6b95cf81deaba3ac7ac80eb38e63b4bd0d353cd0 100644 --- a/AMDiS/src/Debug.cc +++ b/AMDiS/src/Debug.cc @@ -22,6 +22,7 @@ namespace AMDiS { VtkWriter::writeFile(tmp, "tmp" + lexical_cast<std::string>(elIdx) + ".vtu"); } } + void writeDofMesh(int rank, DegreeOfFreedom dof, FiniteElemSpace *feSpace) { @@ -34,6 +35,7 @@ namespace AMDiS { VtkWriter::writeFile(tmp, "dofmesh" + lexical_cast<std::string>(rank) + ".vtu"); } } + void writeMesh(FiniteElemSpace *feSpace, int rank, std::string filename) { @@ -46,6 +48,7 @@ namespace AMDiS { } } #endif + void colorDofVectorByLocalElementDofs(DOFVector<double>& vec, Element *el) { @@ -62,6 +65,7 @@ namespace AMDiS { for (int i = 0; i < nBasisFcts; i++) vec[localDofs[i]] = static_cast<double>(i); } + bool colorDofVectorByLocalElementDofs(DOFVector<double>& vec, Mesh *mesh, int elIndex) @@ -80,6 +84,7 @@ namespace AMDiS { return false; } + Element* getDofIndexElement(FiniteElemSpace *feSpace, DegreeOfFreedom dof) { @@ -101,6 +106,7 @@ namespace AMDiS { return NULL; } + Element* getLevel0ParentElement(Mesh *mesh, Element *el) { @@ -115,6 +121,7 @@ namespace AMDiS { return NULL; } + void printInfoByDof(FiniteElemSpace *feSpace, DegreeOfFreedom dof) { @@ -137,6 +144,7 @@ namespace AMDiS { } } + void printMatValuesStatistics(Matrix<DOFMatrix*> *mat) { std::map<int, int> counter; @@ -178,6 +186,7 @@ namespace AMDiS { << it->second << std::endl; } + void writeElementIndexMesh(FiniteElemSpace *feSpace, std::string filename) { std::map<int, double> vec; @@ -193,6 +202,7 @@ namespace AMDiS { ElementFileWriter::writeFile(vec, feSpace, filename); } + void writeMatlabMatrix(DOFMatrix &mat, std::string filename) { using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end; @@ -219,6 +229,7 @@ namespace AMDiS { out.close(); } + void writeMatlabVector(DOFVector<double> &vec, std::string filename) { std::ofstream out; @@ -232,6 +243,106 @@ namespace AMDiS { out.close(); } + + void createSortedDofs(Mesh *mesh, ElementIdxToDofs &elMap) + { + FUNCNAME("Debug::dbgCreateElementMap()"); + + int dim = mesh->getDim(); + elMap.clear(); + + TraverseStack stack; + ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); + while (elInfo) { + Element *el = elInfo->getElement(); + switch (dim) { + case 2: + sortDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), elMap[el->getIndex()]); + break; + case 3: + sortDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), el->getDOF(3), elMap[el->getIndex()]); + break; + default: + ERROR_EXIT("What is this?\n"); + } + elInfo = stack.traverseNext(elInfo); + } + } + + + void testSortedDofs(Mesh *mesh, ElementIdxToDofs &elMap) + { + FUNCNAME("Debug::dbgTestElementMap()"); + + int dim = mesh->getDim(); + int nVertex = Global::getGeo(VERTEX, dim); + DofContainer vec(nVertex); + + TraverseStack stack; + ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); + while (elInfo) { + Element *el = elInfo->getElement(); + + switch (dim) { + case 2: + sortDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), vec); + break; + case 3: + sortDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), el->getDOF(3), vec); + break; + default: + ERROR_EXIT("What is this?\n"); + } + + for (int i = 0; i < nVertex; i++) { + if (elMap[el->getIndex()][i] != vec[i]) { + MSG("[DBG] Wrong new dof numeration in element = %d\n!", el->getIndex()); + std::cout << "[DBG]: Old numeration was: "; + for (int j = 0; j < nVertex; j++) + std::cout << elMap[el->getIndex()][j] << " = " + << *(elMap[el->getIndex()][j]) << " "; + std::cout << std::endl; + std::cout << "[DBG]: New numeration is: "; + for (int j = 0; j < nVertex; j++) + std::cout << vec[j] << " = " << *(vec[j]) << " "; + std::cout << std::endl; + ERROR_EXIT("WRONG NEW DOF NUMERATION!\n"); + } + } + elInfo = stack.traverseNext(elInfo); + } + } + + + void sortDofs(const DegreeOfFreedom* dof0, + const DegreeOfFreedom* dof1, + const DegreeOfFreedom* dof2, + DofContainer &vec) + { + DofPtrSortFct dofPtrSort; + vec.resize(3); + vec[0] = dof0; + vec[1] = dof1; + vec[2] = dof2; + sort(vec.begin(), vec.end(), dofPtrSort); + } + + void sortDofs(const DegreeOfFreedom* dof0, + const DegreeOfFreedom* dof1, + const DegreeOfFreedom* dof2, + const DegreeOfFreedom* dof3, + DofContainer &vec) + { + DofPtrSortFct dofPtrSort; + vec.resize(4); + vec[0] = dof0; + vec[1] = dof1; + vec[2] = dof2; + vec[3] = dof3; + sort(vec.begin(), vec.end(), dofPtrSort); + } + + } // namespace debug } // namespace AMDiS diff --git a/AMDiS/src/Debug.h b/AMDiS/src/Debug.h index 1d9f8572bb985053633f97d60c0a7719e3064d68..e302eec550ad02eb67f2d8e5b29a79ecb66395b3 100644 --- a/AMDiS/src/Debug.h +++ b/AMDiS/src/Debug.h @@ -28,6 +28,16 @@ namespace AMDiS { namespace debug { + + struct DofPtrSortFct { + bool operator() (const DegreeOfFreedom *dof0, const DegreeOfFreedom *dof1) + { + return (*dof0 < *dof1); + } + }; + + typedef std::map<int, DofContainer> ElementIdxToDofs; + #ifdef HAVE_PARALLEL_DOMAIN_AMDIS void writeLocalElementDofs(int rank, int elIdx, FiniteElemSpace *feSpace); @@ -55,6 +65,47 @@ namespace AMDiS { void writeMatlabMatrix(DOFMatrix &mat, std::string filename); void writeMatlabVector(DOFVector<double> &vec, std::string filename); + + /** \brief + * Traverse a mesh and store for each element all its vertex DOFs in local sorted + * order (by values). + * + * \param[in] mesh Mesh to be traversed. + * \param[out] elMap Stores to each element the vertex DOFs in sorted order. + */ + void createSortedDofs(Mesh *mesh, ElementIdxToDofs &elMap); + + /** \brief + * Takes a map from element indices to lists of DOFs. Checks, if for each element + * in the mesh the vertex value order is still valid. + * + * The element index map must be created by the function \createSortedDofs. Using + * both functions it can be checked if a renumbering of dofs does not changes the + * local vertex value order (which is required by AMDiS to be always equal on each + * element). + * + * If the test fails, the function prints some debug information to screen and + * terminates the programm. + * + * \param[in] mesh Mesh to be traversed. + * \param[in] elMap Map from element indices to lists of DOFs. It is used to check + * the validaty as described above. + */ + void testSortedDofs(Mesh *mesh, ElementIdxToDofs &elMap); + + /// Takes tree dofs and returns a list with the dofs sorted by their values. + void sortDofs(const DegreeOfFreedom* dof0, + const DegreeOfFreedom* dof1, + const DegreeOfFreedom* dof2, + DofContainer &vec); + + /// Takes four dofs and returns a list with the dofs sorted by their values. + void sortDofs(const DegreeOfFreedom* dof0, + const DegreeOfFreedom* dof1, + const DegreeOfFreedom* dof2, + const DegreeOfFreedom* dof3, + DofContainer &vec); + } } diff --git a/AMDiS/src/parallel/GlobalMatrixSolver.cc b/AMDiS/src/parallel/GlobalMatrixSolver.cc index 57838d521fab4bc210ce43aad14118465d8f9eb1..a8370cfb7bc549bb32de20b95939b74fddfec047 100644 --- a/AMDiS/src/parallel/GlobalMatrixSolver.cc +++ b/AMDiS/src/parallel/GlobalMatrixSolver.cc @@ -374,8 +374,15 @@ namespace AMDiS { VecSetSizes(petscTmpVec, nRankRows, nOverallRows); VecSetType(petscTmpVec, VECMPI); - if (!d_nnz) + if (!d_nnz || lastMeshChangeIndex != lastMeshNnz) { + if (d_nnz) { + delete [] d_nnz; + delete [] o_nnz; + } + createPetscNnzStructure(mat); + lastMeshNnz = lastMeshChangeIndex; + } // === Create PETSc matrix with the computed nnz data structure. === diff --git a/AMDiS/src/parallel/GlobalMatrixSolver.h b/AMDiS/src/parallel/GlobalMatrixSolver.h index c5087f46f36111829b05660366b83dc5ae1aac1e..165c905679e84888975f34d984052e7f80f59d07 100644 --- a/AMDiS/src/parallel/GlobalMatrixSolver.h +++ b/AMDiS/src/parallel/GlobalMatrixSolver.h @@ -40,7 +40,8 @@ namespace AMDiS { GlobalMatrixSolver(ProblemVec *problemStat, ProblemInstatVec *problemInstat) : ParallelDomainBase(problemStat, problemInstat), d_nnz(NULL), - o_nnz(NULL) + o_nnz(NULL), + lastMeshNnz(0) {} ~GlobalMatrixSolver() @@ -84,6 +85,14 @@ namespace AMDiS { /// Arrays definig the non zero pattern of Petsc's matrix. int *d_nnz, *o_nnz; + + /** \brief + * Stores the mesh change index of the mesh the nnz structure was created for. + * Therefore, if the mesh change index is higher than this value, we have to create + * a new nnz structure for PETSc matrices, because the mesh has been changed and + * therefore also the assembled matrix structure. + */ + int lastMeshNnz; }; typedef GlobalMatrixSolver ParallelDomainVec; diff --git a/AMDiS/src/parallel/ParallelDomainBase.cc b/AMDiS/src/parallel/ParallelDomainBase.cc index 0f3697fafba27e318a83bab3c7ddf6c741076bd2..2cc0f8b7f8e91683e2812002031e2791b297f529 100644 --- a/AMDiS/src/parallel/ParallelDomainBase.cc +++ b/AMDiS/src/parallel/ParallelDomainBase.cc @@ -3,6 +3,7 @@ #include <fstream> #include <boost/lexical_cast.hpp> #include "parallel/ParallelDomainBase.h" +#include "parallel/ParallelDomainDbg.h" #include "parallel/StdMpi.h" #include "ParMetisPartitioner.h" #include "Mesh.h" @@ -116,8 +117,8 @@ namespace AMDiS { partitionMesh(adaptInfo); #if (DEBUG != 0) - ElementIdxToDofs elMap; - dbgCreateElementMap(elMap); + debug::ElementIdxToDofs elMap; + debug::createSortedDofs(mesh, elMap); if (mpiRank == 0) { int writePartMesh = 1; GET_PARAMETER(0, "dbg->write part mesh", "%d", &writePartMesh); @@ -149,9 +150,9 @@ namespace AMDiS { #if (DEBUG != 0) MSG("AMDiS runs in debug mode, so make some test ...\n"); - dbgTestElementMap(elMap); - dbgTestInteriorBoundary(); - dbgTestCommonDofs(true); + debug::testSortedDofs(mesh, elMap); + ParallelDomainDbg::testInteriorBoundary(*this); + ParallelDomainDbg::testCommonDofs(*this, true); MSG("Debug mode tests finished!\n"); debug::writeMesh(feSpace, -1, "macromesh"); @@ -405,21 +406,6 @@ namespace AMDiS { // === Because the mesh has been changed, update the DOF numbering and mappings. === updateLocalGlobalNumbering(); - - // === If there is a non zero pattern computed for Petsc matrix, delete it. So === - // === it will be recomputed after next assembling. === - -#if 0 - if (d_nnz) { - delete [] d_nnz; - d_nnz = NULL; - } - - if (o_nnz) { - delete [] o_nnz; - o_nnz = NULL; - } -#endif } @@ -1424,8 +1410,8 @@ namespace AMDiS { FUNCNAME("ParallelDomainBase::updateLocalGlobalNumbering()"); #if (DEBUG != 0) - ElementIdxToDofs elMap; - dbgCreateElementMap(elMap); + debug::ElementIdxToDofs elMap; + debug::createSortedDofs(mesh, elMap); #endif typedef std::set<const DegreeOfFreedom*> DofSet; @@ -1602,8 +1588,8 @@ namespace AMDiS { lastMeshChangeIndex = mesh->getChangeIndex(); #if (DEBUG != 0) - dbgTestElementMap(elMap); - dbgTestCommonDofs(true); + debug::testSortedDofs(mesh, elMap); + ParallelDomainDbg::testCommonDofs(*this, true); #endif } @@ -1861,7 +1847,7 @@ namespace AMDiS { Flag ParallelDomainBase::buildAndAdapt(AdaptInfo *adaptInfo, Flag toDo) { - FUNCNAME("StandardProblemIteration::buildAndAdapt()"); + FUNCNAME("ParallelDomainBase::buildAndAdapt()"); Flag flag = 0, markFlag = 0; ProblemStatBase *problem = iterationIF->getProblem(); @@ -2002,364 +1988,8 @@ namespace AMDiS { data[dof] = dofSet; } } - - void ParallelDomainBase::dbgCreateElementMap(ElementIdxToDofs &elMap) - { - FUNCNAME("ParallelDomainBase::dbgCreateElementMap()"); - - int dim = mesh->getDim(); - elMap.clear(); - - TraverseStack stack; - ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); - while (elInfo) { - Element *el = elInfo->getElement(); - switch (dim) { - case 2: - orderDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), elMap[el->getIndex()]); - break; - case 3: - orderDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), el->getDOF(3), elMap[el->getIndex()]); - break; - default: - ERROR_EXIT("What is this?\n"); - } - elInfo = stack.traverseNext(elInfo); - } - } - - void ParallelDomainBase::dbgTestElementMap(ElementIdxToDofs &elMap) - { - FUNCNAME("ParallelDomainbase::dbgTestElementMap()"); - - int dim = mesh->getDim(); - int nVertex = Global::getGeo(VERTEX, dim); - DofContainer vec(nVertex); - - TraverseStack stack; - ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); - while (elInfo) { - Element *el = elInfo->getElement(); - - switch (dim) { - case 2: - orderDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), vec); - break; - case 3: - orderDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), el->getDOF(3), vec); - break; - default: - ERROR_EXIT("What is this?\n"); - } - - for (int i = 0; i < nVertex; i++) { - if (elMap[el->getIndex()][i] != vec[i]) { - std::cout << "[DBG " << mpiRank - << "]: Wrong new dof numeration in element = " - << el->getIndex() << std::endl; - std::cout << "[DBG " << mpiRank << "]: Old numeration was: "; - for (int j = 0; j < nVertex; j++) - std::cout << elMap[el->getIndex()][j] << " = " - << *(elMap[el->getIndex()][j]) << " "; - std::cout << std::endl; - std::cout << "[DBG " << mpiRank << "]: New numeration is: "; - for (int j = 0; j < nVertex; j++) - std::cout << vec[j] << " = " << *(vec[j]) << " "; - std::cout << std::endl; - ERROR_EXIT("WRONG NEW DOF NUMERATION!\n"); - } - } - elInfo = stack.traverseNext(elInfo); - } - } - - void ParallelDomainBase::dbgTestInteriorBoundary() - { - FUNCNAME("ParallelDomainBase::dbgTestInteriorBoundary()"); - - std::vector<int*> sendBuffers, recvBuffers; - - MPI::Request request[myIntBoundary.boundary.size() + - otherIntBoundary.boundary.size()]; - int requestCounter = 0; - - for (RankToBoundMap::iterator rankIt = myIntBoundary.boundary.begin(); - rankIt != myIntBoundary.boundary.end(); ++rankIt) { - - int nSendInt = rankIt->second.size(); - int* buffer = new int[nSendInt]; - for (int i = 0; i < nSendInt; i++) - buffer[i] = (rankIt->second)[i].rankObj.elIndex; - - sendBuffers.push_back(buffer); - - request[requestCounter++] = - mpiComm.Isend(buffer, nSendInt, MPI_INT, rankIt->first, 0); - } - - for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin(); - rankIt != otherIntBoundary.boundary.end(); ++rankIt) { - int nRecvInt = rankIt->second.size(); - int *buffer = new int[nRecvInt]; - recvBuffers.push_back(buffer); - - request[requestCounter++] = - mpiComm.Irecv(buffer, nRecvInt, MPI_INT, rankIt->first, 0); - } - - MPI::Request::Waitall(requestCounter, request); - - for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++) - delete [] sendBuffers[i]; - - int bufCounter = 0; - for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin(); - rankIt != otherIntBoundary.boundary.end(); ++rankIt) { - - TEST_EXIT(rankIt->second.size() == otherIntBoundary.boundary[rankIt->first].size()) - ("Boundaries does not fit together!\n"); - - for (int i = 0; i < static_cast<int>(rankIt->second.size()); i++) { - int elIndex1 = recvBuffers[bufCounter][i]; - int elIndex2 = otherIntBoundary.boundary[rankIt->first][i].neighObj.elIndex; - - TEST_EXIT(elIndex1 == elIndex2)("Wrong element index at interior boundary!\n"); - } - - delete [] recvBuffers[bufCounter++]; - } - } - - - void ParallelDomainBase::dbgTestCommonDofs(bool printCoords) - { - FUNCNAME("ParallelDomainBase::dbgTestCommonDofs()"); - - clock_t first = clock(); - - int testCommonDofs = 1; - GET_PARAMETER(0, "dbg->test common dofs", "%d", &testCommonDofs); - if (testCommonDofs == 0) { - MSG("Skip test common dofs!\n"); - return; - } - - // Maps to each neighbour rank an array of WorldVectors. This array contains the - // coordinates of all dofs this rank shares on the interior boundary with the - // neighbour rank. A rank sends the coordinates to another rank, if it owns the - // boundarys dof. - RankToCoords sendCoords; - - // A rank receives all boundary dofs that are at its interior boundaries but are - // not owned by the rank. This map stores for each rank the coordinates of dofs - // this rank expectes to receive from. - RankToCoords recvCoords; - - DOFVector<WorldVector<double> > coords(feSpace, "dofCorrds"); - mesh->getDofIndexCoords(feSpace, coords); - - for (RankToDofContainer::iterator it = sendDofs.begin(); - it != sendDofs.end(); ++it) - for (DofContainer::iterator dofIt = it->second.begin(); - dofIt != it->second.end(); ++dofIt) - sendCoords[it->first].push_back(coords[**dofIt]); - - for (RankToDofContainer::iterator it = recvDofs.begin(); - it != recvDofs.end(); ++it) - for (DofContainer::iterator dofIt = it->second.begin(); - dofIt != it->second.end(); ++dofIt) - recvCoords[it->first].push_back(coords[**dofIt]); - - std::vector<int> sendSize(mpiSize, 0); - std::vector<int> recvSize(mpiSize, 0); - std::vector<int> recvSizeBuffer(mpiSize, 0); - MPI::Request request[(mpiSize - 1) * 2]; - int requestCounter = 0; - - for (RankToCoords::iterator it = sendCoords.begin(); it != sendCoords.end(); ++it) - sendSize[it->first] = it->second.size(); - - for (RankToCoords::iterator it = recvCoords.begin(); it != recvCoords.end(); ++it) - recvSize[it->first] = it->second.size(); - - for (int i = 0; i < mpiSize; i++) { - if (i == mpiRank) - continue; - - request[requestCounter++] = mpiComm.Isend(&(sendSize[i]), 1, MPI_INT, i, 0); - } - - for (int i = 0; i < mpiSize; i++) { - if (i == mpiRank) - continue; - - request[requestCounter++] = mpiComm.Irecv(&(recvSizeBuffer[i]), 1, MPI_INT, i, 0); - } - - MPI::Request::Waitall(requestCounter, request); - - - for (int i = 0; i < mpiSize; i++) { - if (i == mpiRank) - continue; - - if (recvSize[i] != recvSizeBuffer[i]) { - std::cout << "Error: MPI rank " << mpiRank << " expectes to receive " - << recvSize[i] << " DOFs from rank " << i << ". But this rank sends " - << recvSizeBuffer[i] << " DOFs!" << std::endl; - - ERROR_EXIT("Should not happen!\n"); - } - } - - // === Now we know that the number of send and received DOFs fits together. === - // === So we can check if also the coordinates of the communicated DOFs are === - // === the same on both corresponding ranks. === - - typedef std::vector<WorldVector<double> > CoordsVec; - StdMpi<CoordsVec> stdMpi(mpiComm, true); - stdMpi.send(sendCoords); - stdMpi.recv(recvCoords); - stdMpi.startCommunication<double>(MPI_DOUBLE); - - double eps = 1e-13; - int dimOfWorld = Global::getGeo(WORLD); - - // === Compare the received with the expected coordinates. === - - for (RankToCoords::iterator it = stdMpi.getRecvData().begin(); - it != stdMpi.getRecvData().end(); ++it) { - for (int i = 0; i < static_cast<int>(it->second.size()); i++) { - for (int j = 0; j < dimOfWorld; j++) { - bool c = fabs((it->second)[i][j] - recvCoords[it->first][i][j]) < eps; - - // === Print error message if the coordinates are not the same. === - - if (printCoords && !c) { - std::cout.precision(5); - std::cout << "[DBG] i = " << i << std::endl; - std::cout << "[DBG] Rank " << mpiRank << " from rank " << it->first - << " expect coords ("; - for (int k = 0; k < dimOfWorld; k++) { - std::cout << recvCoords[it->first][i][k]; - if (k + 1 < dimOfWorld) - std::cout << " / "; - } - std::cout << ") received coords ("; - for (int k = 0; k < dimOfWorld; k++) { - std::cout << (it->second)[i][k]; - if (k + 1 < dimOfWorld) - std::cout << " / "; - } - std::cout << ")" << std::endl; - - debug::printInfoByDof(feSpace, *(recvDofs[it->first][i])); - } - - TEST_EXIT(c)("Wrong DOFs in rank %d!\n", mpiRank); - } - } - } - - INFO(info, 8)("Test common dofs needed %.5f seconds\n", TIME_USED(first, clock())); - } - - - void ParallelDomainBase::printMapLocalGlobal(int rank) - { - if (rank == -1 || mpiRank == rank) { - std::cout << "====== DOF MAP LOCAL -> GLOBAL ====== " << std::endl; - - for (DofMapping::iterator it = mapLocalGlobalDofs.begin(); - it != mapLocalGlobalDofs.end(); it++) { - DegreeOfFreedom localdof = -1; - if (mapLocalToDofIndex.count(it->first) > 0) - localdof = mapLocalToDofIndex[it->first]; - - std::cout << "DOF " << it->first << " " - << it->second << " " - << localdof << std::endl; - WorldVector<double> coords; - mesh->getDofIndexCoords(it->first, feSpace, coords); - coords.print(); - for (RankToDofContainer::iterator rankit = sendDofs.begin(); - rankit != sendDofs.end(); ++rankit) { - for (DofContainer::iterator dofit = rankit->second.begin(); - dofit != rankit->second.end(); ++dofit) - if (**dofit == it->first) - std::cout << "SEND DOF TO " << rankit->first << std::endl; - } - for (RankToDofContainer::iterator rankit = recvDofs.begin(); - rankit != recvDofs.end(); ++rankit) { - for (DofContainer::iterator dofit = rankit->second.begin(); - dofit != rankit->second.end(); ++dofit) - if (**dofit == it->first) - std::cout << "RECV DOF FROM " << rankit->first << std::endl; - } - std::cout << "------" << std::endl; - } - } - } - - - void ParallelDomainBase::printMapPeriodic(int rank) - { - FUNCNAME("ParallelDomainBase::printMapPeriodic()"); - - if (rank == -1 || mpiRank == rank) { - std::cout << "====== DOF MAP PERIODIC ====== " << std::endl; - - for (PeriodicDofMap::iterator it = periodicDof.begin(); - it != periodicDof.end(); ++it) { - std::cout << "DOF MAP " << it->first << ": "; - for (std::set<DegreeOfFreedom>::iterator dofit = it->second.begin(); - dofit != it->second.end(); ++dofit) - std::cout << *dofit << " "; - std::cout << std::endl; - - DegreeOfFreedom localdof = -1; - for (DofMapping::iterator dofIt = mapLocalGlobalDofs.begin(); - dofIt != mapLocalGlobalDofs.end(); ++dofIt) - if (dofIt->second == it->first) - localdof = dofIt->first; - - TEST_EXIT(localdof != -1)("There is something wrong!\n"); - - WorldVector<double> coords; - mesh->getDofIndexCoords(localdof, feSpace, coords); - coords.print(); - } - } - } - - - void ParallelDomainBase::printRankDofs(int rank, DofContainer& rankDofs, - DofContainer& rankAllDofs) - { - if (rank == -1 || mpiRank == rank) { - std::cout << "====== RANK DOF INFORMATION ====== " << std::endl; - - std::cout << " RANK OWNED DOFS: " << std::endl; - for (DofContainer::iterator dofit = rankDofs.begin(); - dofit != rankDofs.end(); ++dofit) { - std::cout << " " << **dofit << std::endl; - WorldVector<double> coords; - mesh->getDofIndexCoords(*dofit, feSpace, coords); - coords.print(); - } - - std::cout << " RANK ALL DOFS: " << std::endl; - for (DofContainer::iterator dofit = rankAllDofs.begin(); - dofit != rankAllDofs.end(); ++dofit) { - std::cout << " " << **dofit << std::endl; - WorldVector<double> coords; - mesh->getDofIndexCoords(*dofit, feSpace, coords); - coords.print(); - } - } - } - + void ParallelDomainBase::writePartitioningMesh(std::string filename) { FUNCNAME("ParallelDomainBase::writePartitioningMesh()"); diff --git a/AMDiS/src/parallel/ParallelDomainBase.h b/AMDiS/src/parallel/ParallelDomainBase.h index e8fbd32c67fd110548c5053d311b6f09508d4730..9559a993a985a7b14f67ba4c2430c72aa928db34 100644 --- a/AMDiS/src/parallel/ParallelDomainBase.h +++ b/AMDiS/src/parallel/ParallelDomainBase.h @@ -39,14 +39,7 @@ #include "AMDiS_fwd.h" namespace AMDiS { - - struct DofPtrSortFct { - bool operator() (const DegreeOfFreedom *dof0, const DegreeOfFreedom *dof1) - { - return (*dof0 < *dof1); - } - }; - + class ParMetisPartitioner; class ParallelDomainBase : public ProblemIterationInterface, @@ -71,14 +64,9 @@ namespace AMDiS { /// Defines a mapping type from DOF indices to boolean values. typedef std::map<DegreeOfFreedom, bool> DofIndexToBool; - /// Defines a mapping type from rank numbers to sets of coordinates. - typedef std::map<int, std::vector<WorldVector<double> > > RankToCoords; - /// Forward type (it maps rank numbers to the interior boundary objects). typedef InteriorBoundary::RankToBoundMap RankToBoundMap; - typedef std::map<int, DofContainer> ElementIdxToDofs; - typedef std::map<const DegreeOfFreedom*, DegreeOfFreedom> DofIndexMap; typedef std::map<DegreeOfFreedom, std::set<DegreeOfFreedom> > PeriodicDofMap; @@ -181,6 +169,7 @@ namespace AMDiS { { return NULL; } + // Writes all data of this object to an output stream. virtual void serialize(std::ostream &out); @@ -274,51 +263,7 @@ namespace AMDiS { * should be checked. */ bool checkAndAdaptBoundary(RankToBoundMap &allBound); - - void dbgCreateElementMap(ElementIdxToDofs &elMap); - void dbgTestElementMap(ElementIdxToDofs &elMap); - - void dbgTestInteriorBoundary(); - - /** \brief - * This function is used for debugging only. It traverses all interior boundaries - * and compares the dof indices on them with the dof indices of the boundarys - * neighbours. The function fails, when dof indices on an interior boundary do - * not fit together. - * - * \param printCoords If true, the coords of all common dofs are printed to - * the screen. - */ - void dbgTestCommonDofs(bool printCoords = false); - - /** \brief - * This function is used for debugging only. It prints all information from - * the local to global dof mapping, see \ref mapLocalGlobalDofs. - * - * \param rank If specified, only the information from the given rank is printed. - */ - void printMapLocalGlobal(int rank = -1); - - /** \brief - * This function is used for debugging only. It prints all information about - * the periodic mapping of dofs, that are on periodic boundaries. - * - * \param rank If specified, only the information from the given rank is printed. - */ - void printMapPeriodic(int rank = -1); - - /** \brief - * This function is used for debugging only. It prints information about dofs - * in rank's partition. - * - * \param rank If specified, only the information from the given - * rank is printed. - * \param rankDofs List of all dofs in ranks partition that are owned by rank. - * \param rankAllDofs List of all dofs in ranks partition. - */ - void printRankDofs(int rank, DofContainer& rankDofs, DofContainer& rankAllDofs); - /** \brief * This functions create a Paraview file with the macro mesh where the elements * are colored by the partition they are part of. This function can be used for @@ -416,42 +361,6 @@ namespace AMDiS { } } - inline void orderDofs(const DegreeOfFreedom* dof0, - const DegreeOfFreedom* dof1, - const DegreeOfFreedom* dof2, - DofContainer &vec) - { - DofPtrSortFct dofPtrSort; - vec.resize(3); - vec[0] = dof0; - vec[1] = dof1; - vec[2] = dof2; - sort(vec.begin(), vec.end(), dofPtrSort); - } - - inline void orderDofs(const DegreeOfFreedom* dof0, - const DegreeOfFreedom* dof1, - const DegreeOfFreedom* dof2, - const DegreeOfFreedom* dof3, - DofContainer &vec) - { - DofPtrSortFct dofPtrSort; - vec.resize(4); - vec[0] = dof0; - vec[1] = dof1; - vec[2] = dof2; - vec[3] = dof3; - sort(vec.begin(), vec.end(), dofPtrSort); - } - - inline void printColValues(int row, - std::vector<int>& cols, - std::vector<double>& values) - { - for (int i = 0; i < static_cast<int>(cols.size()); i++) - std::cout << "Mat[" << row << "][" << cols[i] << "] = " << values[i] << "\n"; - } - protected: /// ProblemIterationInterface *iterationIF; @@ -606,6 +515,8 @@ namespace AMDiS { * structure (e.g. through refinement or coarsening managers). */ long lastMeshChangeIndex; + + friend class ParallelDomainDbg; }; }