diff --git a/AMDiS/src/Assembler.cc b/AMDiS/src/Assembler.cc index 119cfeb4fe65eae534ff6e44b41c7e4649a369af..2d5d44499ff7bb76e39976ca5fb2339f7a144a0a 100644 --- a/AMDiS/src/Assembler.cc +++ b/AMDiS/src/Assembler.cc @@ -56,10 +56,9 @@ namespace AMDiS { Element *el = elInfo->getElement(); - if ((el != lastMatEl && el != lastVecEl) || !operat->isOptimized()) + if (el != lastMatEl || !operat->isOptimized()) { initElement(elInfo); - if (el != lastMatEl || !operat->isOptimized()) { if (rememberElMat) set_to_zero(elementMatrix); diff --git a/AMDiS/src/DOFMatrix.cc b/AMDiS/src/DOFMatrix.cc index c1cbe8abbf7635cfe271f2d2b3d6ff523ded0458..4740216e515175df0793e2c3dc1f1a9ce37b9875 100644 --- a/AMDiS/src/DOFMatrix.cc +++ b/AMDiS/src/DOFMatrix.cc @@ -211,7 +211,7 @@ namespace AMDiS { using namespace mtl; #if 0 - std::cout << "----- PRINT MAT --------" << std::endl; + std::cout << "----- PRINT MAT " << rowElInfo->getElement()->getIndex() << "--------" << std::endl; std::cout << elMat << std::endl; std::cout << "rows: "; for (int i = 0; i < rowIndices.size(); i++) diff --git a/AMDiS/src/ProblemStat.cc b/AMDiS/src/ProblemStat.cc index 91686c7bf0b4a0779f9e2d00a3e103702e02e565..ec816cb996058c769127594c6d010e58c58eb6c4 100644 --- a/AMDiS/src/ProblemStat.cc +++ b/AMDiS/src/ProblemStat.cc @@ -560,6 +560,8 @@ namespace AMDiS { adaptInfo->setMaxSolverIterations(solver->getMaxIterations()); adaptInfo->setSolverTolerance(solver->getTolerance()); adaptInfo->setSolverResidual(solver->getResidual()); + + debug::writeMatlabVector(*solution, "sol.dat"); } @@ -828,6 +830,9 @@ namespace AMDiS { INFO(info, 8)("buildAfterCoarsen needed %.5f seconds\n", TIME_USED(first, clock())); #endif + + debug::writeMatlabMatrix(*systemMatrix, "matrix.dat"); + debug::writeMatlabVector(*rhs, "rhs.dat"); } diff --git a/AMDiS/src/SubAssembler.hh b/AMDiS/src/SubAssembler.hh index daf144e04610f3a474d25a04b5d2a16f0895230a..eebcf141cd56114d747fd6ddd08f1d03760dbd2e 100644 --- a/AMDiS/src/SubAssembler.hh +++ b/AMDiS/src/SubAssembler.hh @@ -28,10 +28,12 @@ namespace AMDiS { TEST_EXIT_DBG(elInfo->getMesh() == vec->getFeSpace()->getMesh()) ("Vector and FE space do not fit together!\n"); + Quadrature *localQuad = quad ? quad : quadrature; if (valuesAtQPs[vec] && valuesAtQPs[vec]->valid) { vecAtQPs = valuesAtQPs[vec]->values; + return; } diff --git a/AMDiS/src/ZeroOrderAssembler.cc b/AMDiS/src/ZeroOrderAssembler.cc index 28429c09cad7f99701b71e790d55e3b339795c97..42903ce00caa9ad4c62a533796dab6bc5c3be53b 100644 --- a/AMDiS/src/ZeroOrderAssembler.cc +++ b/AMDiS/src/ZeroOrderAssembler.cc @@ -74,12 +74,12 @@ namespace AMDiS { // create new assembler if (!optimized) { - newAssembler = new StandardZOA(op, assembler, quad); + newAssembler = new StandardZOA(op, assembler, quad); } else { if (pwConst) - newAssembler = new PrecalcZOA(op, assembler, quad); + newAssembler = new PrecalcZOA(op, assembler, quad); else - newAssembler = new FastQuadZOA(op, assembler, quad); + newAssembler = new FastQuadZOA(op, assembler, quad); } subAssemblers->push_back(newAssembler); @@ -103,8 +103,8 @@ namespace AMDiS { ElementVector c(nPoints, 0.0); std::vector phival(nCol); - std::vector::iterator termIt; - for (termIt = terms.begin(); termIt != terms.end(); ++termIt) + for (std::vector::iterator termIt = terms.begin(); + termIt != terms.end(); ++termIt) (static_cast((*termIt)))->getC(elInfo, nPoints, c); if (symmetric) { diff --git a/AMDiS/src/ZeroOrderTerm.cc b/AMDiS/src/ZeroOrderTerm.cc index 6b49ad447796f7a03bbf399f4f37b90c53a725af..b03b08055df7384bb4b2618e1c51606d63bd9ab9 100644 --- a/AMDiS/src/ZeroOrderTerm.cc +++ b/AMDiS/src/ZeroOrderTerm.cc @@ -44,7 +44,7 @@ namespace AMDiS { } - void VecAtQP_ZOT::getC(const ElInfo *, int nPoints, ElementVector& C) + void VecAtQP_ZOT::getC(const ElInfo *elInfo, int nPoints, ElementVector& C) { if (f) { for (int iq = 0; iq < nPoints; iq++) diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc index b3aa5f333c51de3bc5fddb4fc74ae0878ffcf4b9..89aaf6825daf8e56cb2d7763ff164f0e09cd3b9f 100644 --- a/AMDiS/src/parallel/MeshDistributor.cc +++ b/AMDiS/src/parallel/MeshDistributor.cc @@ -571,7 +571,7 @@ namespace AMDiS { ("No edge %d/%d in elObjDB!\n", it->first, it->second); std::set el0, el1; - std::map edgeNoInEl; + map edgeNoInEl; for (unsigned int i = 0; i < edgeEls.size(); i++) { if (rankMacroEls.count(edgeEls[i].elIndex)) { @@ -1197,7 +1197,7 @@ namespace AMDiS { StdMpi stdMpi(mpiComm, true); stdMpi.send(sendCodes); - for (std::map >::iterator it = partitioner->getRecvElements().begin(); + for (map >::iterator it = partitioner->getRecvElements().begin(); it != partitioner->getRecvElements().end(); ++it) stdMpi.recv(it->first); stdMpi.startCommunication(); @@ -1208,7 +1208,7 @@ namespace AMDiS { StdMpi > > stdMpi2(mpiComm, true); stdMpi2.send(sendValues); - for (std::map >::iterator it = partitioner->getRecvElements().begin(); + for (map >::iterator it = partitioner->getRecvElements().begin(); it != partitioner->getRecvElements().end(); ++it) stdMpi2.recv(it->first); stdMpi2.startCommunication(); @@ -2044,7 +2044,7 @@ namespace AMDiS { stdMpi2.startCommunication(); - for (std::map::iterator it = stdMpi2.getRecvData().begin(); + for (map::iterator it = stdMpi2.getRecvData().begin(); it != stdMpi2.getRecvData().end(); ++it) { for (PeriodicDofMap::iterator perIt = it->second.begin(); perIt != it->second.end(); ++perIt) { @@ -2070,9 +2070,11 @@ namespace AMDiS { allMacroElements.push_back(*it); - macroElementNeighbours[(*it)->getIndex()].resize(mesh->getGeo(NEIGH)); + int elIndex = (*it)->getIndex(); + + macroElementNeighbours[elIndex].resize(mesh->getGeo(NEIGH)); for (int i = 0; i < mesh->getGeo(NEIGH); i++) - macroElementNeighbours[(*it)->getIndex()][i] = + macroElementNeighbours[elIndex][i] = (*it)->getNeighbour(i) ? (*it)->getNeighbour(i)->getIndex() : -1; } } diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h index e2fdd8fb68a375a18fcd9fc9e9efc07d6412d71e..39a3dd8fa3405486eccae858d164a77ed6e670f6 100644 --- a/AMDiS/src/parallel/MeshDistributor.h +++ b/AMDiS/src/parallel/MeshDistributor.h @@ -62,8 +62,9 @@ namespace AMDiS { void exitParallelization(); - /// Adds a DOFVector to the set of \ref interchangeVecs. Thus, this vector will - /// be automatically interchanged between ranks when mesh is repartitioned. + /// Adds a DOFVector to the set of \ref interchangeVecs. Thus, this vector + /// will be automatically interchanged between ranks when mesh is + /// repartitioned. void addInterchangeVector(DOFVector *vec) { interchangeVectors.push_back(vec); @@ -77,28 +78,30 @@ namespace AMDiS { } /** \brief - * This function checks if the mesh has changed on at least on rank. In this case, - * the interior boundaries are adapted on all ranks such that they fit together on - * all ranks. Furthermore the function \ref updateLocalGlobalNumbering() is called - * to update the DOF numberings and mappings on all rank due to the new mesh - * structure. + * This function checks if the mesh has changed on at least on rank. In + * this case, the interior boundaries are adapted on all ranks such that + * they fit together on all ranks. Furthermore the function + * \ref updateLocalGlobalNumbering() is called to update the DOF numberings + * and mappings on all rank due to the new mesh structure. * - * \param[in] tryRepartition If this parameter is true, repartitioning may be - * done. This depends on several other parameters. If - * the parameter is false, the mesh is only checked - * and adapted but never repartitioned. + * \param[in] tryRepartition If this parameter is true, repartitioning + * may be done. This depends on several other + * parameters. If the parameter is false, the + * mesh is only checked and adapted but never + * repartitioned. */ void checkMeshChange(bool tryRepartition = true); /** \brief - * Test, if the mesh consists of macro elements only. The mesh partitioning of - * the parallelization works for macro meshes only and would fail, if the mesh - * is already refined in some way. Therefore, this function will exit the program - * if it finds a non macro element in the mesh. + * Test, if the mesh consists of macro elements only. The mesh partitioning + * of the parallelization works for macro meshes only and would fail, if the + * mesh is already refined in some way. Therefore, this function will exit + * the program if it finds a non macro element in the mesh. */ void testForMacroMesh(); - /// Set for each element on the partitioning level the number of leaf elements. + /// Set for each element on the partitioning level the number of + /// leaf elements. void setInitialElementWeights(); inline virtual string getName() @@ -154,7 +157,8 @@ namespace AMDiS { return periodicDof; } - /// Returns for a global dof index its periodic mapping for a given boundary type. + /// Returns for a global dof index its periodic mapping for a given + /// boundary type. inline int getPeriodicMapping(int globalDofIndex, BoundaryType type) { FUNCNAME("MeshDistributor::getPeriodicMapping()"); @@ -167,7 +171,8 @@ namespace AMDiS { } /// For a given global DOF index, this function returns the set of periodic - /// associations, i.e., the boundary types the DOF is associated to, for this DOF. + /// associations, i.e., the boundary types the DOF is associated to, for + /// this DOF. inline std::set& getPerDofAssociations(int globalDofIndex) { TEST_EXIT_DBG(periodicDofAssociations.count(globalDofIndex)) @@ -190,8 +195,8 @@ namespace AMDiS { return (periodicDof[type].count(globalDofIndex) > 0); } - /// Return true, if the given DOF is owned by the rank. If false, the DOF is in - /// rank's partition, but is owned by some other rank. + /// Return true, if the given DOF is owned by the rank. If false, the DOF + /// is in rank's partition, but is owned by some other rank. inline bool getIsRankDof(DegreeOfFreedom dof) { if (isRankDof.count(dof)) @@ -251,14 +256,14 @@ namespace AMDiS { void deserialize(istream &in); /** \brief - * This function must be used if the values of a DOFVector must be synchronised - * over all ranks. That means, that each rank sends the values of the DOFs, which - * are owned by the rank and lie on an interior bounday, to all other ranks also - * having these DOFs. + * This function must be used if the values of a DOFVector must be + * synchronised over all ranks. That means, that each rank sends the + * values of the DOFs, which are owned by the rank and lie on an interior + * bounday, to all other ranks also having these DOFs. * - * This function must be used, for example, after the lineary system is solved, or - * after the DOFVector is set by some user defined functions, e.g., initial - * solution functions. + * This function must be used, for example, after the lineary system is + * solved, or after the DOFVector is set by some user defined functions, + * e.g., initial solution functions. */ template void synchVector(DOFVector &vec) @@ -290,9 +295,9 @@ namespace AMDiS { } /** \brief - * Works in the same way as the function above defined for DOFVectors. Due to - * performance, this function does not call \ref synchVector for each DOFVector, - * but instead sends all values of all DOFVectors all at once. + * Works in the same way as the function above defined for DOFVectors. Due + * to performance, this function does not call \ref synchVector for each + * DOFVector, but instead sends all values of all DOFVectors all at once. */ void synchVector(SystemVector &vec); @@ -319,10 +324,8 @@ namespace AMDiS { protected: void addProblemStat(ProblemStatSeq *probStat); - /** \brief - * Determines the interior boundaries, i.e. boundaries between ranks, and stores - * all information about them in \ref interiorBoundary. - */ + /// Determines the interior boundaries, i.e. boundaries between ranks, and + /// stores all information about them in \ref interiorBoundary. void createInteriorBoundaryInfo(); void updateInteriorBoundaryInfo(); @@ -333,36 +336,47 @@ namespace AMDiS { void createBoundaryDofs(); - /// Removes all macro elements from the mesh that are not part of ranks partition. + /// Removes all macro elements from the mesh that are not part of ranks + /// partition. void removeMacroElements(); - /// Updates the local and global DOF numbering after the mesh has been changed. + /// Updates the local and global DOF numbering after the mesh has been + /// changed. void updateLocalGlobalNumbering(); /** \brief - * Creates to all dofs in rank's partition that are on a periodic boundary the - * mapping from dof index to the other periodic dof indices. This information - * is stored in \ref periodicDof. + * Creates to all dofs in rank's partition that are on a periodic boundary + * the mapping from dof index to the other periodic dof indices. This + * information is stored in \ref periodicDof. */ void createPeriodicMap(); + /** \brief + * This function is called only once during the initialization when the + * whole macro mesh is available on all cores. It copies the pointers of all + * macro elements to \ref allMacroElements and stores all neighbour + * information based on macro element indices (and not pointer based) in + * \ref macroElementNeighbours. These information are then used to + * reconstruct macro elements during mesh redistribution. + */ void createMacroElementInfo(); void updateMacroElementInfo(); /** \brief - * Checks for all given interior boundaries if the elements fit together on both - * sides of the boundaries. If this is not the case, the mesh is adapted. Because - * refinement of a certain element may forces the refinement of other elements, - * it is not guaranteed that all rank's meshes fit together after this function - * terminates. Hence, it must be called until a stable mesh refinement is reached. + * Checks for all given interior boundaries if the elements fit together on + * both sides of the boundaries. If this is not the case, the mesh is + * adapted. Because refinement of a certain element may forces the + * refinement of other elements, it is not guaranteed that all rank's meshes + * fit together after this function terminates. Hence, it must be called + * until a stable mesh refinement is reached. * - * \param[in] allBound Defines a map from rank to interior boundaries which - * should be checked. + * \param[in] allBound Defines a map from rank to interior boundaries + * which should be checked. * - * \return If the mesh has been changed by this function, it returns true. - * Otherwise, it returns false, i.e., the given interior boundaries - * fit together on both sides. + * \return If the mesh has been changed by this function, it returns + * true. Otherwise, it returns false, i.e., the given interior + * boundaries fit together on both sides. */ bool checkAndAdaptBoundary(RankToBoundMap &allBound); @@ -377,7 +391,8 @@ namespace AMDiS { /// stationary problem. void setRankDofs(ProblemStatSeq *probStat); - /// Sets \ref isRankDof to all matrices and rhs vectors in all stationary problems. + /// Sets \ref isRankDof to all matrices and rhs vectors in all + /// stationary problems. void setRankDofs(); /// Removes all periodic boundary condition information from all matrices and @@ -450,7 +465,8 @@ namespace AMDiS { } protected: - /// List of all stationary problems that are managed by this mesh distributor. + /// List of all stationary problems that are managed by this mesh + /// distributor. vector problemStat; /// If true, the mesh distributor is already initialized; @@ -462,11 +478,8 @@ namespace AMDiS { /// Overall number of processes. int mpiSize; - /** \brief - * MPI communicator collected all processes, which should - * be used for calculation. The Debug procces is not included - * in this communicator. - */ + /// MPI communicator collected all processes, which should be used for + /// calculation. The Debug procces is not included in this communicator. MPI::Intracomm mpiComm; /// Name of the problem (as used in the init files) @@ -479,19 +492,21 @@ namespace AMDiS { Mesh *mesh; /** \brief - * A refinement manager that should be used on the mesh. It is used to refine - * elements at interior boundaries in order to fit together with elements on the - * other side of the interior boundary. + * A refinement manager that should be used on the mesh. It is used to + * refine elements at interior boundaries in order to fit together with + * elements on the other side of the interior boundary. */ RefinementManager *refineManager; /// Info level. int info; - /// Pointer to a mesh partitioner that is used to partition the mesh to the ranks. + /// Pointer to a mesh partitioner that is used to partition the mesh to + /// the ranks. MeshPartitioner *partitioner; - /// Weights for the elements, i.e., the number of leaf elements within this element. + /// Weights for the elements, i.e., the number of leaf elements within + /// this element. map elemWeights; /** \brief @@ -506,47 +521,48 @@ namespace AMDiS { /// Number of DOFs in the whole domain. int nOverallDofs; - // Data structure to store all sub-objects of all elements of the macro mesh. + /// Data structure to store all sub-objects of all elements of the + /// macro mesh. ElementObjects elObjects; - // Maps to each macro element index a pointer to the corresponding element. + /// Maps to each macro element index a pointer to the corresponding element. map macroElIndexMap; - // Maps to each macro element index the type of this element. + /// Maps to each macro element index the type of this element. map macroElIndexTypeMap; /** \brief - * Defines the interior boundaries of the domain that result from partitioning - * the whole mesh. Contains only the boundaries, which are owned by the rank, i.e., - * the object gives for every neighbour rank i the boundaries this rank owns and - * shares with rank i. + * Defines the interior boundaries of the domain that result from + * partitioning the whole mesh. Contains only the boundaries, which are + * owned by the rank, i.e., the object gives for every neighbour rank i + * the boundaries this rank owns and shares with rank i. */ InteriorBoundary myIntBoundary; /** \brief - * Defines the interior boundaries of the domain that result from partitioning - * the whole mesh. Contains only the boundaries, which are not owned by the rank, - * i.e., the object gives for every neighbour rank i the boundaries that are - * owned by rank i and are shared with this rank. + * Defines the interior boundaries of the domain that result from + * partitioning the whole mesh. Contains only the boundaries, which are + * not owned by the rank, i.e., the object gives for every neighbour rank + * i the boundaries that are owned by rank i and are shared with this rank. */ InteriorBoundary otherIntBoundary; /** \brief - * Defines the periodic boundaries with other ranks. Periodic boundaries have - * no owner, as it is the case of interior boundaries. + * Defines the periodic boundaries with other ranks. Periodic boundaries + * have no owner, as it is the case of interior boundaries. */ InteriorBoundary periodicBoundary; /** \brief - * This map contains for each rank the list of DOFs the current rank must send - * to exchange solution DOFs at the interior boundaries. + * This map contains for each rank the list of DOFs the current rank must + * send to exchange solution DOFs at the interior boundaries. */ RankToDofContainer sendDofs; /** \brief - * This map contains on each rank the list of DOFs from which the current rank - * will receive DOF values (i.e., this are all DOFs at an interior boundary). The - * DOF indices are given in rank's local numbering. + * This map contains on each rank the list of DOFs from which the current + * rank will receive DOF values (i.e., this are all DOFs at an interior + * boundary). The DOF indices are given in rank's local numbering. */ RankToDofContainer recvDofs; @@ -557,25 +573,26 @@ namespace AMDiS { DofMapping mapLocalDofIndex; /** \brief - * Maps all DOFs in ranks partition to a bool value. If it is true, the DOF is - * owned by the rank. Otherwise, its an interior boundary DOF that is owned by - * another rank. + * Maps all DOFs in ranks partition to a bool value. If it is true, the DOF + * is owned by the rank. Otherwise, its an interior boundary DOF that is + * owned by another rank. */ DofIndexToBool isRankDof; /** \brief - * If periodic boundaries are used, this map stores, for each periodic boundary - * type, for all DOFs in rank's partition (that are on periodic boundaries), the - * corresponding mapped periodic DOFs. The mapping is defined by using global - * dof indices. + * If periodic boundaries are used, this map stores, for each periodic + * boundary type, for all DOFs in rank's partition (that are on periodic + * boundaries), the corresponding mapped periodic DOFs. The mapping is + * defined by using global DOF indices. */ PeriodicDofMap periodicDof; /** \brief - * If periodic boundaries are used, this map stores to each periodic DOF in rank's - * partition the set of periodic boundaries the DOF is associated to. In 2D, most - * DOFs are only on one periodic boundary. Only, e.g., in a box with all boundaries - * being periodic, the four corners are associated by two different boundaries. + * If periodic boundaries are used, this map stores to each periodic DOF in + * rank's partition the set of periodic boundaries the DOF is associated to. + * In 2D, most DOFs are only on one periodic boundary. Only, e.g., in a box + * with all boundaries being periodic, the four corners are associated by + * two different boundaries. */ map > periodicDofAssociations; @@ -584,14 +601,15 @@ namespace AMDiS { /// repartitioned. vector*> interchangeVectors; - /// Is the index of the first row of the linear system, which is owned by the rank. + /// Is the index of the first row of the linear system, which is owned by + /// the rank. int rstart; /** \brief * If the problem definition has been read from a serialization file, this * variable is true, otherwise it is false. This variable is used to stop the - * initialization function, if the problem definition has already been read from - * a serialization file. + * initialization function, if the problem definition has already been read + * from a serialization file. */ bool deserialized; @@ -601,10 +619,12 @@ namespace AMDiS { /// If true, it is possible to repartition the mesh during computations. bool repartitioningAllowed; - /// Stores the number of mesh changes that must lie in between to repartitionings. + /// Stores the number of mesh changes that must lie in between to + /// repartitionings. int repartitionIthChange; - /// Counts the number of mesh changes after the last mesh repartitioning was done. + /// Counts the number of mesh changes after the last mesh repartitioning + /// was done. int nMeshChangesAfterLastRepartitioning; /// Countes the number of mesh repartitions that were done. Till now, this @@ -615,15 +635,19 @@ namespace AMDiS { string debugOutputDir; /** \brief - * Stores the mesh change index. This is used to recognize changes in the mesh - * structure (e.g. through refinement or coarsening managers). + * Stores the mesh change index. This is used to recognize changes in the + * mesh structure (e.g. through refinement or coarsening managers). */ long lastMeshChangeIndex; + /// Stores for all macro elements of the original macro mesh the + /// neighbourhood information based on element indices. Thus, each macro + /// element index is mapped to a vector containing all indices of + /// neighbouring macro elements. map > macroElementNeighbours; - /// Store all macro elements of the overall mesh, i.e., before the macro mesh is - /// redistributed for the first time. + /// Store all macro elements of the overall mesh, i.e., before the + /// mesh is redistributed for the first time. vector allMacroElements; Flag createBoundaryDofFlag; diff --git a/AMDiS/src/parallel/MeshPartitioner.cc b/AMDiS/src/parallel/MeshPartitioner.cc index 16b81847840f43b39dd0917134d441f06369054f..bbaf14ff2ded977a33509eaba57fa113eef1a1f1 100644 --- a/AMDiS/src/parallel/MeshPartitioner.cc +++ b/AMDiS/src/parallel/MeshPartitioner.cc @@ -29,6 +29,8 @@ namespace AMDiS { // === Create initial partitioning of the AMDiS mesh. === elementInRank.clear(); + + // Is used in box partitioning mode only. map > vertexElements; TraverseStack stack; @@ -39,27 +41,43 @@ namespace AMDiS { Element *el = elInfo->getElement(); int elIndex = el->getIndex(); + // === Store for all macro elements the interior neighbours (thus, no === + // === periodic neighbours) in the map elNeighbours. === + for (int i = 0; i < mesh->getGeo(NEIGH); i++) if (elInfo->getNeighbour(i) && elInfo->getBoundary(i) == INTERIOR) elNeighbours[elIndex].push_back(elInfo->getNeighbour(i)->getIndex()); - if (boxPartitioning) { - vertexElements[el->getEdge(0)].insert(elIndex); - } else { + + // === Create initial partitioning. === + + if (!boxPartitioning) { + // In standard mode assign to each macro element an arbitrary but unique + // rank number. + int elInRank = std::min(elIndex / elPerRank, mpiSize - 1); elementInRank[elIndex] = (elInRank == mpiRank); partitionMap[elIndex] = elInRank; + } else { + // In box partitioning mode, we do the assignment of boxes to ranks later. + + vertexElements[el->getEdge(0)].insert(elIndex); } elInfo = stack.traverseNext(elInfo); } + // === Do initial partitioning in box partitioning mode. === if (boxPartitioning) { TEST_EXIT(mesh->getDim() == 3)("Box partitioning only implemented for 3D!\n"); + + // === Create boxes: all elements sharing the same 0-edge are assumed === + // === to be in the same box. === + int boxCounter = 0; for (map >::iterator it = vertexElements.begin(); it != vertexElements.end(); ++it) { @@ -74,6 +92,9 @@ namespace AMDiS { boxCounter++; } + + // === Calculate box neighbourhood relation. === + for (map::iterator it = elInBox.begin(); it != elInBox.end(); ++it) { int elBoxNo = it->second; @@ -88,6 +109,9 @@ namespace AMDiS { MSG("Box partitioning with %d boxes enabled!\n", boxCounter); + + /// === Create initial partitioning of the boxes. ==== + int boxPerRank = boxCounter / mpiSize; for (map >::iterator it = boxSplitting.begin(); diff --git a/AMDiS/src/parallel/MeshPartitioner.h b/AMDiS/src/parallel/MeshPartitioner.h index 0b07124d830bdd505674d5d5e5d455d4d0e9ea98..e89ed1a635dd92b657ff4d1088f64e9793a5a58b 100644 --- a/AMDiS/src/parallel/MeshPartitioner.h +++ b/AMDiS/src/parallel/MeshPartitioner.h @@ -61,9 +61,30 @@ namespace AMDiS { virtual ~MeshPartitioner() {} - /// Creates an initial paritioning of the AMDiS mesh. + /// Creates an initial paritioning of the AMDiS mesh. This partitioning + /// can be arbitrary, the only requirement is that each macro element + /// must be uniquely assign to a rank. virtual void createInitialPartitioning(); + /** \brief + * Function the takes a weighted set of macro elements and returns a + * macro mesh partitioning. This function is virtual and must be implemented + * for a specific algorithm or an external partitioning library. + * + * \param[in] elemWeights Maps to each macro element in rank's subdomain + * a weight, which is usually the number of leaf + * elements in this macro element. + * \param[in] mode Most external partitioning libraries can make + * a difference whether we want to create a + * first partitioning or we alread have created + * one using this library but due to some mesh + * adaptivity we want to repartition the mesh. In + * the later case, the libraries also consider the + * time for redistribution of the new partitioning. + * \return Returns a boolean value if the partitioning algorithm created + * a correct partitioning. If it is so, the partitioning is + * stored in \ref elementInRank and \ref partitionMap. + */ virtual bool partition(map &elemWeights, PartitionMode mode = INITIAL) = 0; @@ -111,25 +132,42 @@ namespace AMDiS { } protected: + /// Pointer to the MPI communicator the mesh partitioner should make use of. MPI::Intracomm *mpiComm; + /// Pointer to the AMDiS mesh. Mesh *mesh; + /// The mesh partitioner can be used in to different modes, the standard + /// mode and the so called "box partitioning". The standard mode assigns + /// macro elements to ranks. If box partitioning is enabled, which makes + /// only sence if the macro mesh results from meshconv's "lego mesher", + /// then in 2D boxed (2 macro elements) and in 3D cubes (6 macro + /// elements) are assigned as a uniion to ranks. bool boxPartitioning; + /// In box partitioning mode this map stores for each box number the set + /// of macro element indices the box consists of. map > boxSplitting; - + + /// In box partitioning mode this map stores to each box number the set + /// of neighbouring boxes. map > boxNeighbours; + /// Is the reverse of the map \ref boxSplitting. Thus, it stores for each + /// macro element index the box number it belongs to. map elInBox; map *mapLocalGlobal; map > elNeighbours; - /// Maps to each macro element index if it is in rank's partition or not. + /// Maps to each macro element index (or box index in box + /// partitioning mode) if it is in rank's partition or not. map elementInRank; + /// Maps to each macro element index (or box index in box + /// partitiong mode) the rank number the element belongs to. map partitionMap; map > recvElements, sendElements;