Commit 576bb4eb authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Ups, mal ein code refactoring dass nicht gleich in 100 bugfixes enden. So, nun...

Ups, mal ein code refactoring dass nicht gleich in 100 bugfixes enden. So, nun sollte der ganze parallele spass auch fuer gekoppelte Probleme ohne Tricks funktionieren. Nur testen will es mal wieder keiner, daher geh ich mittag essen.
parent ecb93cc3
...@@ -328,13 +328,13 @@ namespace AMDiS { ...@@ -328,13 +328,13 @@ namespace AMDiS {
} }
/// Returns \ref feSpaces. /// Returns \ref feSpaces.
inline vector<const FiniteElemSpace*> getFeSpaces() inline vector<const FiniteElemSpace*>& getFeSpaces()
{ {
return feSpaces; return feSpaces;
} }
/// Returns \ref componentSpaces; /// Returns \ref componentSpaces;
inline vector<const FiniteElemSpace*> getComponentSpaces() inline vector<const FiniteElemSpace*>& getComponentSpaces()
{ {
return componentSpaces; return componentSpaces;
} }
......
...@@ -89,7 +89,6 @@ namespace AMDiS { ...@@ -89,7 +89,6 @@ namespace AMDiS {
MSG("nelem = %d\n", nelem); MSG("nelem = %d\n", nelem);
// global number of nodes // global number of nodes
ParallelDofMapping &dofMap = meshDistributor->getDofMap();
int nnod = dofMap[feSpace].nOverallDofs; int nnod = dofMap[feSpace].nOverallDofs;
MSG("nnod = %d\n", nnod); MSG("nnod = %d\n", nnod);
...@@ -443,8 +442,6 @@ namespace AMDiS { ...@@ -443,8 +442,6 @@ namespace AMDiS {
typedef traits::range_generator<row, Matrix>::type cursor_type; typedef traits::range_generator<row, Matrix>::type cursor_type;
typedef traits::range_generator<nz, cursor_type>::type icursor_type; typedef traits::range_generator<nz, cursor_type>::type icursor_type;
ParallelDofMapping &dofMap = meshDistributor->getDofMap();
for (cursor_type cursor = begin<row>(dmat->getBaseMatrix()), for (cursor_type cursor = begin<row>(dmat->getBaseMatrix()),
cend = end<row>(dmat->getBaseMatrix()); cursor != cend; ++cursor) { cend = end<row>(dmat->getBaseMatrix()); cursor != cend; ++cursor) {
for (icursor_type icursor = begin<nz>(cursor), icend = end<nz>(cursor); for (icursor_type icursor = begin<nz>(cursor), icend = end<nz>(cursor);
......
...@@ -74,13 +74,9 @@ namespace AMDiS { ...@@ -74,13 +74,9 @@ namespace AMDiS {
: problemStat(0), : problemStat(0),
initialized(false), initialized(false),
name("parallel"), name("parallel"),
componentSpaces(0),
mesh(NULL), mesh(NULL),
refineManager(NULL), refineManager(NULL),
info(10),
partitioner(NULL), partitioner(NULL),
dofMap(FESPACE_WISE),
dofMapSd(FESPACE_WISE),
deserialized(false), deserialized(false),
writeSerializationFile(false), writeSerializationFile(false),
repartitioningAllowed(false), repartitioningAllowed(false),
...@@ -149,8 +145,7 @@ namespace AMDiS { ...@@ -149,8 +145,7 @@ namespace AMDiS {
TEST_EXIT(mpiSize > 1) TEST_EXIT(mpiSize > 1)
("Parallelization does not work with only one process!\n"); ("Parallelization does not work with only one process!\n");
TEST_EXIT(feSpaces.size() > 0)
TEST_EXIT(componentSpaces.size() > 0)
("No FE space has been defined for the mesh distributor!\n"); ("No FE space has been defined for the mesh distributor!\n");
TEST_EXIT(mesh)("No mesh has been defined for the mesh distributor!\n"); TEST_EXIT(mesh)("No mesh has been defined for the mesh distributor!\n");
...@@ -193,8 +188,9 @@ namespace AMDiS { ...@@ -193,8 +188,9 @@ namespace AMDiS {
elObjDb.setData(partitionMap, levelData); elObjDb.setData(partitionMap, levelData);
#if (DEBUG != 0) #if (DEBUG != 0)
TEST_EXIT_DBG(dofMaps.size())("No DOF mapping defined!\n");
ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1],
dofMap, *(dofMaps[0]),
debugOutputDir + "mpi-dbg", "dat"); debugOutputDir + "mpi-dbg", "dat");
#endif #endif
...@@ -366,13 +362,25 @@ namespace AMDiS { ...@@ -366,13 +362,25 @@ namespace AMDiS {
TEST_EXIT_DBG(probStat->getFeSpaces().size()) TEST_EXIT_DBG(probStat->getFeSpaces().size())
("No FE spaces in stationary problem!\n"); ("No FE spaces in stationary problem!\n");
TEST_EXIT(componentSpaces.size() == 0)
("Parallelization of coupled problems is deactived at the moment!\n");
componentSpaces = probStat->getComponentSpaces(); // === Add all FE spaces from stationary problem. ===
feSpaces = probStat->getFeSpaces();
mesh = feSpaces[0]->getMesh(); vector<const FiniteElemSpace*> newFeSpaces = probStat->getFeSpaces();
info = probStat->getInfo(); for (int i = 0; i < static_cast<int>(newFeSpaces.size()); i++)
if (find(feSpaces.begin(), feSpaces.end(), newFeSpaces[i]) ==
feSpaces.end())
feSpaces.push_back(newFeSpaces[i]);
// === Add mesh of stationary problem and create a corresponding ===
// === refinement manager object. ===
if (mesh != NULL) {
TEST_EXIT(mesh == probStat->getMesh())
("Does not yet support for different meshes!\n");
} else {
mesh = probStat->getMesh();
}
switch (mesh->getDim()) { switch (mesh->getDim()) {
case 2: case 2:
...@@ -388,6 +396,9 @@ namespace AMDiS { ...@@ -388,6 +396,9 @@ namespace AMDiS {
partitioner->setMesh(mesh); partitioner->setMesh(mesh);
// === Check whether the stationary problem should be serialized. ===
// Create parallel serialization file writer, if needed. // Create parallel serialization file writer, if needed.
int writeSerialization = 0; int writeSerialization = 0;
Parameters::get(probStat->getName() + "->output->write serialization", Parameters::get(probStat->getName() + "->output->write serialization",
...@@ -407,6 +418,9 @@ namespace AMDiS { ...@@ -407,6 +418,9 @@ namespace AMDiS {
writeSerializationFile = true; writeSerializationFile = true;
} }
// === Check whether the stationary problem should be deserialized. ===
int readSerialization = 0; int readSerialization = 0;
Parameters::get(probStat->getName() + "->input->read serialization", Parameters::get(probStat->getName() + "->input->read serialization",
readSerialization); readSerialization);
...@@ -455,9 +469,9 @@ namespace AMDiS { ...@@ -455,9 +469,9 @@ namespace AMDiS {
problemStat.push_back(probStat); problemStat.push_back(probStat);
// If the mesh distributor is already initialized, don't forget to set rank
// DOFs object to the matrices and vectors of the added stationary problem, // === If the mesh distributor is already initialized, don't forget to ===
// and to remove the periodic boundary conditions on these objects. // === remove the periodic boundary conditions on these objects. ===
if (initialized) if (initialized)
removePeriodicBoundaryConditions(probStat); removePeriodicBoundaryConditions(probStat);
...@@ -478,6 +492,18 @@ namespace AMDiS { ...@@ -478,6 +492,18 @@ namespace AMDiS {
void MeshDistributor::exitParallelization() void MeshDistributor::exitParallelization()
{} {}
void MeshDistributor::registerDofMap(ParallelDofMapping &dofMap)
{
FUNCNAME("MeshDistributor::registerDofMap()");
TEST_EXIT(find(dofMaps.begin(), dofMaps.end(), &dofMap) ==
dofMaps.end())
("Parallel DOF mapping already registerd in mesh distributor object!\n");
dofMaps.push_back(&dofMap);
}
void MeshDistributor::testForMacroMesh() void MeshDistributor::testForMacroMesh()
{ {
...@@ -962,8 +988,7 @@ namespace AMDiS { ...@@ -962,8 +988,7 @@ namespace AMDiS {
} }
MPI::COMM_WORLD.Barrier(); MPI::COMM_WORLD.Barrier();
INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n", MSG("Parallel mesh adaption needed %.5f seconds\n", MPI::Wtime() - first);
MPI::Wtime() - first);
#if (DEBUG != 0) #if (DEBUG != 0)
debug::writeMesh(feSpaces[0], -1, debugOutputDir + "mesh"); debug::writeMesh(feSpaces[0], -1, debugOutputDir + "mesh");
...@@ -1178,6 +1203,10 @@ namespace AMDiS { ...@@ -1178,6 +1203,10 @@ namespace AMDiS {
map<int, map<const FiniteElemSpace*, DofContainer> > &data, map<int, map<const FiniteElemSpace*, DofContainer> > &data,
map<const FiniteElemSpace*, map<int, const DegreeOfFreedom*> > &dofIndexMap) map<const FiniteElemSpace*, map<int, const DegreeOfFreedom*> > &dofIndexMap)
{ {
FUNCNAME("MeshDistributor::deserialize()");
ERROR_EXIT("Must be reimplemented!\n");
#if 0
data.clear(); data.clear();
int mapSize = 0; int mapSize = 0;
...@@ -1191,6 +1220,7 @@ namespace AMDiS { ...@@ -1191,6 +1220,7 @@ namespace AMDiS {
data[rank][componentSpaces[j]], data[rank][componentSpaces[j]],
dofIndexMap[componentSpaces[j]]); dofIndexMap[componentSpaces[j]]);
} }
#endif
} }
...@@ -1298,6 +1328,8 @@ namespace AMDiS { ...@@ -1298,6 +1328,8 @@ namespace AMDiS {
// === Run mesh partitioner to calculate a new mesh partitioning. === // === Run mesh partitioner to calculate a new mesh partitioning. ===
TEST_EXIT(dofMaps.size())("No DOF mapping defined!\n");
ParallelDofMapping &dofMap = *(dofMaps[0]);
partitioner->setLocalGlobalDofMap(&(dofMap[feSpaces[0]].getMap())); partitioner->setLocalGlobalDofMap(&(dofMap[feSpaces[0]].getMap()));
bool partitioningSucceed = bool partitioningSucceed =
partitioner->partition(elemWeights, ADAPTIVE_REPART); partitioner->partition(elemWeights, ADAPTIVE_REPART);
...@@ -1648,33 +1680,25 @@ namespace AMDiS { ...@@ -1648,33 +1680,25 @@ namespace AMDiS {
debug::createSortedDofs(mesh, elMap); debug::createSortedDofs(mesh, elMap);
#endif #endif
int nLevels = levelData.getLevelNumber();
TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n");
dofMap.init(levelData, componentSpaces, feSpaces);
dofMap.setMpiComm(levelData.getMpiComm(0), 0);
dofMap.setDofComm(dofComm);
dofMap.clear();
if (nLevels > 1) {
dofMapSd.init(levelData, componentSpaces, feSpaces);
dofMapSd.setMpiComm(levelData.getMpiComm(1), 1);
dofMapSd.setDofComm(dofCommSd);
dofMapSd.clear();
}
createBoundaryDofs(); createBoundaryDofs();
for (unsigned int i = 0; i < feSpaces.size(); i++)
updateLocalGlobalNumbering(dofMap, dofComm, feSpaces[i]);
dofMap.update();
if (nLevels > 1) { // === Update all registered DOF mapping objects. ===
for (unsigned int i = 0; i < feSpaces.size(); i++)
updateLocalGlobalNumbering(dofMapSd, dofCommSd, feSpaces[i]); TEST_EXIT(dofMaps.size())("No DOF mapping defined!\n");
dofMapSd.update();
for (int i = 0; i < static_cast<int>(dofMaps.size()); i++) {
vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces();
dofMaps[i]->clear();
for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++)
updateLocalGlobalNumbering(*(dofMaps[i]), dofMapSpaces[j]);
dofMaps[i]->update();
} }
// === Update DOF admins due to new number of DOFs. === // === Update DOF admins due to new number of DOFs. ===
lastMeshChangeIndex = mesh->getChangeIndex(); lastMeshChangeIndex = mesh->getChangeIndex();
...@@ -1684,30 +1708,25 @@ namespace AMDiS { ...@@ -1684,30 +1708,25 @@ namespace AMDiS {
ParallelDebug::testDofContainerCommunication(*this); ParallelDebug::testDofContainerCommunication(*this);
MSG("------------- Debug information -------------\n"); MSG("------------- Debug information -------------\n");
MSG("| number of levels: %d\n", nLevels); MSG("| number of levels: %d\n", levelData.getLevelNumber());
MSG("| number of FE spaces: %d\n", feSpaces.size()); MSG("| number of FE spaces: %d\n", feSpaces.size());
for (unsigned int i = 0; i < feSpaces.size(); i++) { for (int i = 0; i < static_cast<int>(dofMaps.size()); i++) {
MSG("| FE space = %d (pointer adr %p):\n", i, feSpaces[i]); vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces();
MSG("| nRankDofs = %d\n", dofMap[feSpaces[i]].nRankDofs);
MSG("| nOverallDofs = %d\n", dofMap[feSpaces[i]].nOverallDofs); for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++) {
MSG("| rStartDofs = %d\n", dofMap[feSpaces[i]].rStartDofs); MSG("| FE space = %d (pointer adr %p):\n", j, feSpaces[j]);
} MSG("| nRankDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].nRankDofs);
MSG("| nOverallDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].nOverallDofs);
if (nLevels > 1) { MSG("| rStartDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].rStartDofs);
for (unsigned int i = 0; i < feSpaces.size(); i++) {
MSG("| FE space = %d:\n", i);
MSG("| nRankDofs = %d\n", dofMapSd[feSpaces[i]].nRankDofs);
MSG("| nOverallDofs = %d\n", dofMapSd[feSpaces[i]].nOverallDofs);
MSG("| rStartDofs = %d\n", dofMapSd[feSpaces[i]].rStartDofs);
} }
} }
// debug::writeElementIndexMesh(mesh, debugOutputDir + "elementIndex-" + // debug::writeElementIndexMesh(mesh, debugOutputDir + "elementIndex-" +
// lexical_cast<string>(mpiRank) + ".vtu"); // lexical_cast<string>(mpiRank) + ".vtu");
ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1],
dofMap, *(dofMaps[0]),
debugOutputDir + "mpi-dbg", "dat"); debugOutputDir + "mpi-dbg", "dat");
debug::testSortedDofs(mesh, elMap); debug::testSortedDofs(mesh, elMap);
...@@ -1718,16 +1737,20 @@ namespace AMDiS { ...@@ -1718,16 +1737,20 @@ namespace AMDiS {
ParallelDebug::testGlobalIndexByCoords(*this); ParallelDebug::testGlobalIndexByCoords(*this);
} }
#else #else
for (unsigned int i = 0; i < feSpaces.size(); i++) for (int i = 0; i < static_cast<int>(dofsMaps.size()); i++) {
MSG("FE space %d: nRankDofs = %d nOverallDofs = %d\n", vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces();
i, dofMap[feSpaces[i]].nRankDofs,
dofMap[feSpaces[i]].nOverallDofs); for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++)
MSG("FE space %d: nRankDofs = %d nOverallDofs = %d\n", j,
(*(dofMaps[i]))[feSpaces[j]].nRankDofs,
(*(dofMaps[i]))[feSpaces[j]].nOverallDofs);
}
int tmp = 0; int tmp = 0;
Parameters::get(name + "->write parallel debug file", tmp); Parameters::get(name + "->write parallel debug file", tmp);
if (tmp) if (tmp)
ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1],
dofMap, *(dofMaps[0])
debugOutputDir + "mpi-dbg", "dat"); debugOutputDir + "mpi-dbg", "dat");
#endif #endif
...@@ -1736,12 +1759,13 @@ namespace AMDiS { ...@@ -1736,12 +1759,13 @@ namespace AMDiS {
} }
void MeshDistributor::updateLocalGlobalNumbering(ParallelDofMapping &dmap, void MeshDistributor::updateLocalGlobalNumbering(ParallelDofMapping &dofMap,
DofComm &dcom,
const FiniteElemSpace *feSpace) const FiniteElemSpace *feSpace)
{ {
FUNCNAME("MeshDistributor::updateLocalGlobalNumbering()"); FUNCNAME("MeshDistributor::updateLocalGlobalNumbering()");
DofComm &dcom = dofMap.getDofComm();
// === Get all DOFs in ranks partition. === // === Get all DOFs in ranks partition. ===
std::set<const DegreeOfFreedom*> rankDofSet; std::set<const DegreeOfFreedom*> rankDofSet;
...@@ -1750,7 +1774,9 @@ namespace AMDiS { ...@@ -1750,7 +1774,9 @@ namespace AMDiS {
DofContainer rankDofs(rankDofSet.begin(), rankDofSet.end()); DofContainer rankDofs(rankDofSet.begin(), rankDofSet.end());
sort(rankDofs.begin(), rankDofs.end(), cmpDofsByValue); sort(rankDofs.begin(), rankDofs.end(), cmpDofsByValue);
// === Traverse interior boundaries and get all DOFs on them. === // === Traverse interior boundaries and get all DOFs on them. ===
DofContainerSet nonRankDofs; DofContainerSet nonRankDofs;
for (DofComm::Iterator it(dcom.getRecvDofs(), 0, feSpace); for (DofComm::Iterator it(dcom.getRecvDofs(), 0, feSpace);
!it.end(); it.nextRank()) !it.end(); it.nextRank())
...@@ -1759,11 +1785,11 @@ namespace AMDiS { ...@@ -1759,11 +1785,11 @@ namespace AMDiS {
for (unsigned int i = 0; i < rankDofs.size(); i++) for (unsigned int i = 0; i < rankDofs.size(); i++)
if (nonRankDofs.count(rankDofs[i]) == 0) if (nonRankDofs.count(rankDofs[i]) == 0)
dmap[feSpace].insertRankDof(*(rankDofs[i])); dofMap[feSpace].insertRankDof(*(rankDofs[i]));
for (DofContainerSet::iterator it = nonRankDofs.begin(); for (DofContainerSet::iterator it = nonRankDofs.begin();
it != nonRankDofs.end(); ++it) it != nonRankDofs.end(); ++it)
dmap[feSpace].insertNonRankDof(**it); dofMap[feSpace].insertNonRankDof(**it);
} }
...@@ -1788,8 +1814,7 @@ namespace AMDiS { ...@@ -1788,8 +1814,7 @@ namespace AMDiS {
createPeriodicMap(feSpaces[i]); createPeriodicMap(feSpaces[i]);
// MPI::COMM_WORLD.Barrier(); // MPI::COMM_WORLD.Barrier();
INFO(info, 8)("Creation of periodic mapping needed %.5f seconds\n", MSG("Creation of periodic mapping needed %.5f seconds\n", MPI::Wtime() - first);
MPI::Wtime() - first);
} }
...@@ -1797,8 +1822,10 @@ namespace AMDiS { ...@@ -1797,8 +1822,10 @@ namespace AMDiS {
{ {
FUNCNAME("MeshDistributor::createPeriodicMap()"); FUNCNAME("MeshDistributor::createPeriodicMap()");
DofComm::LevelDataType &periodicDofs = dofComm.getPeriodicDofs(); TEST_EXIT(dofMaps.size())("No DOF mapping defined!\n");
DofComm::LevelDataType &periodicDofs = dofComm.getPeriodicDofs();
ComponentDofMap &dofMap = (*(dofMaps[0]))[feSpace];
StdMpi<vector<int> > stdMpi(mpiComm, false); StdMpi<vector<int> > stdMpi(mpiComm, false);
// === Each rank traverse its periodic boundaries and sends the DOF === // === Each rank traverse its periodic boundaries and sends the DOF ===
...@@ -1832,8 +1859,8 @@ namespace AMDiS { ...@@ -1832,8 +1859,8 @@ namespace AMDiS {
BoundaryType type = bound.type; BoundaryType type = bound.type;
for (unsigned int j = 0; j < dofs0.size(); j++) { for (unsigned int j = 0; j < dofs0.size(); j++) {
DegreeOfFreedom globalDof0 = dofMap[feSpace][*(dofs0[j])].global; DegreeOfFreedom globalDof0 = dofMap[*(dofs0[j])].global;
DegreeOfFreedom globalDof1 = dofMap[feSpace][*(dofs1[j])].global; DegreeOfFreedom globalDof1 = dofMap[*(dofs1[j])].global;
if (!periodicMap.isPeriodicOnBound(feSpace, type, globalDof0)) if (!periodicMap.isPeriodicOnBound(feSpace, type, globalDof0))
periodicMap.add(feSpace, type, globalDof0, globalDof1); periodicMap.add(feSpace, type, globalDof0, globalDof1);
...@@ -1858,7 +1885,7 @@ namespace AMDiS { ...@@ -1858,7 +1885,7 @@ namespace AMDiS {
// Send the global indices to the rank on the other side. // Send the global indices to the rank on the other side.
stdMpi.getSendData(it->first).reserve(dofs.size()); stdMpi.getSendData(it->first).reserve(dofs.size());
for (unsigned int i = 0; i < dofs.size(); i++) for (unsigned int i = 0; i < dofs.size(); i++)
stdMpi.getSendData(it->first).push_back(dofMap[feSpace][*(dofs[i])].global); stdMpi.getSendData(it->first).push_back(dofMap[*(dofs[i])].global);
// Receive from this rank the same number of dofs. // Receive from this rank the same number of dofs.
stdMpi.recv(it->first, dofs.size()); stdMpi.recv(it->first, dofs.size());
...@@ -1884,7 +1911,7 @@ namespace AMDiS { ...@@ -1884,7 +1911,7 @@ namespace AMDiS {
// Added the received DOFs to the mapping. // Added the received DOFs to the mapping.
for (unsigned int i = 0; i < dofs.size(); i++) { for (unsigned int i = 0; i < dofs.size(); i++) {
int globalDofIndex = dofMap[feSpace][*(dofs[i])].global; int globalDofIndex = dofMap[*(dofs[i])].global;
int mapGlobalDofIndex = stdMpi.getRecvData(it->first)[i]; int mapGlobalDofIndex = stdMpi.getRecvData(it->first)[i];
BoundaryType type = types[i]; BoundaryType type = types[i];
...@@ -1917,7 +1944,7 @@ namespace AMDiS { ...@@ -1917,7 +1944,7 @@ namespace AMDiS {
boundIt->rankObj.el->getAllDofs(feSpace, boundIt->rankObj, dofs); boundIt->rankObj.el->getAllDofs(feSpace, boundIt->rankObj, dofs);
for (unsigned int i = 0; i < dofs.size(); i++) { for (unsigned int i = 0; i < dofs.size(); i++) {
DegreeOfFreedom globalDof = dofMap[feSpace][*dofs[i]].global; DegreeOfFreedom globalDof = dofMap[*dofs[i]].global;
std::set<BoundaryType>& assoc = std::set<BoundaryType>& assoc =
periodicMap.getAssociations(feSpace, globalDof); periodicMap.getAssociations(feSpace, globalDof);
......
...@@ -63,10 +63,20 @@ namespace AMDiS { ...@@ -63,10 +63,20 @@ namespace AMDiS {
public: public:
~MeshDistributor(); ~MeshDistributor();
/// Initialization of mesh distributor.
void initParallelization(); void initParallelization();
/// Clean up procedure for the mesh distributor and attached objects.
void exitParallelization(); void exitParallelization();
/** \brief
* Register a parallel DOF mapping. This DOF mapping object will than
* automatically updated by the mesh distributer after mesh changes.
*
* \param[in] dofMap Parallel DOF mapping object.
*/
void registerDofMap(ParallelDofMapping &dofMap);
/// Adds a DOFVector to the set of \ref interchangeVecs. Thus, this vector /// Adds a DOFVector to the set of \ref interchangeVecs. Thus, this vector
/// will be automatically interchanged between ranks when mesh is /// will be automatically interchanged between ranks when mesh is
/// repartitioned. /// repartitioned.
...@@ -132,49 +142,6 @@ namespace AMDiS { ...@@ -132,49 +142,6 @@ namespace AMDiS {
return mesh; return mesh;
} }
/// Returns an FE space from \ref feSpaces.
inline const FiniteElemSpace* getFeSpace(unsigned int i = 0)
{
FUNCNAME("MeshDistributor::getFeSpace()");
TEST_EXIT_DBG(i < feSpaces.size())
("Try to access FE space %d, but have only %d FE spaces!\n",
i, feSpaces.size());