Commit 576bb4eb authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Ups, mal ein code refactoring dass nicht gleich in 100 bugfixes enden. So, nun...

Ups, mal ein code refactoring dass nicht gleich in 100 bugfixes enden. So, nun sollte der ganze parallele spass auch fuer gekoppelte Probleme ohne Tricks funktionieren. Nur testen will es mal wieder keiner, daher geh ich mittag essen.
parent ecb93cc3
......@@ -328,13 +328,13 @@ namespace AMDiS {
}
/// Returns \ref feSpaces.
inline vector<const FiniteElemSpace*> getFeSpaces()
inline vector<const FiniteElemSpace*>& getFeSpaces()
{
return feSpaces;
}
/// Returns \ref componentSpaces;
inline vector<const FiniteElemSpace*> getComponentSpaces()
inline vector<const FiniteElemSpace*>& getComponentSpaces()
{
return componentSpaces;
}
......
......@@ -89,7 +89,6 @@ namespace AMDiS {
MSG("nelem = %d\n", nelem);
// global number of nodes
ParallelDofMapping &dofMap = meshDistributor->getDofMap();
int nnod = dofMap[feSpace].nOverallDofs;
MSG("nnod = %d\n", nnod);
......@@ -443,8 +442,6 @@ namespace AMDiS {
typedef traits::range_generator<row, Matrix>::type cursor_type;
typedef traits::range_generator<nz, cursor_type>::type icursor_type;
ParallelDofMapping &dofMap = meshDistributor->getDofMap();
for (cursor_type cursor = begin<row>(dmat->getBaseMatrix()),
cend = end<row>(dmat->getBaseMatrix()); cursor != cend; ++cursor) {
for (icursor_type icursor = begin<nz>(cursor), icend = end<nz>(cursor);
......
......@@ -74,13 +74,9 @@ namespace AMDiS {
: problemStat(0),
initialized(false),
name("parallel"),
componentSpaces(0),
mesh(NULL),
refineManager(NULL),
info(10),
partitioner(NULL),
dofMap(FESPACE_WISE),
dofMapSd(FESPACE_WISE),
deserialized(false),
writeSerializationFile(false),
repartitioningAllowed(false),
......@@ -149,8 +145,7 @@ namespace AMDiS {
TEST_EXIT(mpiSize > 1)
("Parallelization does not work with only one process!\n");
TEST_EXIT(componentSpaces.size() > 0)
TEST_EXIT(feSpaces.size() > 0)
("No FE space has been defined for the mesh distributor!\n");
TEST_EXIT(mesh)("No mesh has been defined for the mesh distributor!\n");
......@@ -193,8 +188,9 @@ namespace AMDiS {
elObjDb.setData(partitionMap, levelData);
#if (DEBUG != 0)
TEST_EXIT_DBG(dofMaps.size())("No DOF mapping defined!\n");
ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1],
dofMap,
*(dofMaps[0]),
debugOutputDir + "mpi-dbg", "dat");
#endif
......@@ -366,13 +362,25 @@ namespace AMDiS {
TEST_EXIT_DBG(probStat->getFeSpaces().size())
("No FE spaces in stationary problem!\n");
TEST_EXIT(componentSpaces.size() == 0)
("Parallelization of coupled problems is deactived at the moment!\n");
componentSpaces = probStat->getComponentSpaces();
feSpaces = probStat->getFeSpaces();
mesh = feSpaces[0]->getMesh();
info = probStat->getInfo();
// === Add all FE spaces from stationary problem. ===
vector<const FiniteElemSpace*> newFeSpaces = probStat->getFeSpaces();
for (int i = 0; i < static_cast<int>(newFeSpaces.size()); i++)
if (find(feSpaces.begin(), feSpaces.end(), newFeSpaces[i]) ==
feSpaces.end())
feSpaces.push_back(newFeSpaces[i]);
// === Add mesh of stationary problem and create a corresponding ===
// === refinement manager object. ===
if (mesh != NULL) {
TEST_EXIT(mesh == probStat->getMesh())
("Does not yet support for different meshes!\n");
} else {
mesh = probStat->getMesh();
}
switch (mesh->getDim()) {
case 2:
......@@ -388,6 +396,9 @@ namespace AMDiS {
partitioner->setMesh(mesh);
// === Check whether the stationary problem should be serialized. ===
// Create parallel serialization file writer, if needed.
int writeSerialization = 0;
Parameters::get(probStat->getName() + "->output->write serialization",
......@@ -407,6 +418,9 @@ namespace AMDiS {
writeSerializationFile = true;
}
// === Check whether the stationary problem should be deserialized. ===
int readSerialization = 0;
Parameters::get(probStat->getName() + "->input->read serialization",
readSerialization);
......@@ -455,9 +469,9 @@ namespace AMDiS {
problemStat.push_back(probStat);
// If the mesh distributor is already initialized, don't forget to set rank
// DOFs object to the matrices and vectors of the added stationary problem,
// and to remove the periodic boundary conditions on these objects.
// === If the mesh distributor is already initialized, don't forget to ===
// === remove the periodic boundary conditions on these objects. ===
if (initialized)
removePeriodicBoundaryConditions(probStat);
......@@ -479,6 +493,18 @@ namespace AMDiS {
{}
void MeshDistributor::registerDofMap(ParallelDofMapping &dofMap)
{
FUNCNAME("MeshDistributor::registerDofMap()");
TEST_EXIT(find(dofMaps.begin(), dofMaps.end(), &dofMap) ==
dofMaps.end())
("Parallel DOF mapping already registerd in mesh distributor object!\n");
dofMaps.push_back(&dofMap);
}
void MeshDistributor::testForMacroMesh()
{
FUNCNAME("MeshDistributor::testForMacroMesh()");
......@@ -962,8 +988,7 @@ namespace AMDiS {
}
MPI::COMM_WORLD.Barrier();
INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n",
MPI::Wtime() - first);
MSG("Parallel mesh adaption needed %.5f seconds\n", MPI::Wtime() - first);
#if (DEBUG != 0)
debug::writeMesh(feSpaces[0], -1, debugOutputDir + "mesh");
......@@ -1178,6 +1203,10 @@ namespace AMDiS {
map<int, map<const FiniteElemSpace*, DofContainer> > &data,
map<const FiniteElemSpace*, map<int, const DegreeOfFreedom*> > &dofIndexMap)
{
FUNCNAME("MeshDistributor::deserialize()");
ERROR_EXIT("Must be reimplemented!\n");
#if 0
data.clear();
int mapSize = 0;
......@@ -1191,6 +1220,7 @@ namespace AMDiS {
data[rank][componentSpaces[j]],
dofIndexMap[componentSpaces[j]]);
}
#endif
}
......@@ -1298,6 +1328,8 @@ namespace AMDiS {
// === Run mesh partitioner to calculate a new mesh partitioning. ===
TEST_EXIT(dofMaps.size())("No DOF mapping defined!\n");
ParallelDofMapping &dofMap = *(dofMaps[0]);
partitioner->setLocalGlobalDofMap(&(dofMap[feSpaces[0]].getMap()));
bool partitioningSucceed =
partitioner->partition(elemWeights, ADAPTIVE_REPART);
......@@ -1648,33 +1680,25 @@ namespace AMDiS {
debug::createSortedDofs(mesh, elMap);
#endif
int nLevels = levelData.getLevelNumber();
TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n");
createBoundaryDofs();
dofMap.init(levelData, componentSpaces, feSpaces);
dofMap.setMpiComm(levelData.getMpiComm(0), 0);
dofMap.setDofComm(dofComm);
dofMap.clear();
if (nLevels > 1) {
dofMapSd.init(levelData, componentSpaces, feSpaces);
dofMapSd.setMpiComm(levelData.getMpiComm(1), 1);
dofMapSd.setDofComm(dofCommSd);
dofMapSd.clear();
}
// === Update all registered DOF mapping objects. ===
createBoundaryDofs();
TEST_EXIT(dofMaps.size())("No DOF mapping defined!\n");
for (unsigned int i = 0; i < feSpaces.size(); i++)
updateLocalGlobalNumbering(dofMap, dofComm, feSpaces[i]);
dofMap.update();
for (int i = 0; i < static_cast<int>(dofMaps.size()); i++) {
vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces();
if (nLevels > 1) {
for (unsigned int i = 0; i < feSpaces.size(); i++)
updateLocalGlobalNumbering(dofMapSd, dofCommSd, feSpaces[i]);
dofMapSd.update();
dofMaps[i]->clear();
for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++)
updateLocalGlobalNumbering(*(dofMaps[i]), dofMapSpaces[j]);
dofMaps[i]->update();
}
// === Update DOF admins due to new number of DOFs. ===
lastMeshChangeIndex = mesh->getChangeIndex();
......@@ -1684,30 +1708,25 @@ namespace AMDiS {
ParallelDebug::testDofContainerCommunication(*this);
MSG("------------- Debug information -------------\n");
MSG("| number of levels: %d\n", nLevels);
MSG("| number of levels: %d\n", levelData.getLevelNumber());
MSG("| number of FE spaces: %d\n", feSpaces.size());
for (unsigned int i = 0; i < feSpaces.size(); i++) {
MSG("| FE space = %d (pointer adr %p):\n", i, feSpaces[i]);
MSG("| nRankDofs = %d\n", dofMap[feSpaces[i]].nRankDofs);
MSG("| nOverallDofs = %d\n", dofMap[feSpaces[i]].nOverallDofs);
MSG("| rStartDofs = %d\n", dofMap[feSpaces[i]].rStartDofs);
}
for (int i = 0; i < static_cast<int>(dofMaps.size()); i++) {
vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces();
if (nLevels > 1) {
for (unsigned int i = 0; i < feSpaces.size(); i++) {
MSG("| FE space = %d:\n", i);
MSG("| nRankDofs = %d\n", dofMapSd[feSpaces[i]].nRankDofs);
MSG("| nOverallDofs = %d\n", dofMapSd[feSpaces[i]].nOverallDofs);
MSG("| rStartDofs = %d\n", dofMapSd[feSpaces[i]].rStartDofs);
for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++) {
MSG("| FE space = %d (pointer adr %p):\n", j, feSpaces[j]);
MSG("| nRankDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].nRankDofs);
MSG("| nOverallDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].nOverallDofs);
MSG("| rStartDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].rStartDofs);
}
}
// debug::writeElementIndexMesh(mesh, debugOutputDir + "elementIndex-" +
// lexical_cast<string>(mpiRank) + ".vtu");
ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1],
dofMap,
*(dofMaps[0]),
debugOutputDir + "mpi-dbg", "dat");
debug::testSortedDofs(mesh, elMap);
......@@ -1718,16 +1737,20 @@ namespace AMDiS {
ParallelDebug::testGlobalIndexByCoords(*this);
}
#else
for (unsigned int i = 0; i < feSpaces.size(); i++)
MSG("FE space %d: nRankDofs = %d nOverallDofs = %d\n",
i, dofMap[feSpaces[i]].nRankDofs,
dofMap[feSpaces[i]].nOverallDofs);
for (int i = 0; i < static_cast<int>(dofsMaps.size()); i++) {
vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces();
for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++)
MSG("FE space %d: nRankDofs = %d nOverallDofs = %d\n", j,
(*(dofMaps[i]))[feSpaces[j]].nRankDofs,
(*(dofMaps[i]))[feSpaces[j]].nOverallDofs);
}
int tmp = 0;
Parameters::get(name + "->write parallel debug file", tmp);
if (tmp)
ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1],
dofMap,
*(dofMaps[0])
debugOutputDir + "mpi-dbg", "dat");
#endif
......@@ -1736,12 +1759,13 @@ namespace AMDiS {
}
void MeshDistributor::updateLocalGlobalNumbering(ParallelDofMapping &dmap,
DofComm &dcom,
void MeshDistributor::updateLocalGlobalNumbering(ParallelDofMapping &dofMap,
const FiniteElemSpace *feSpace)
{
FUNCNAME("MeshDistributor::updateLocalGlobalNumbering()");
DofComm &dcom = dofMap.getDofComm();
// === Get all DOFs in ranks partition. ===
std::set<const DegreeOfFreedom*> rankDofSet;
......@@ -1750,7 +1774,9 @@ namespace AMDiS {
DofContainer rankDofs(rankDofSet.begin(), rankDofSet.end());
sort(rankDofs.begin(), rankDofs.end(), cmpDofsByValue);
// === Traverse interior boundaries and get all DOFs on them. ===
DofContainerSet nonRankDofs;
for (DofComm::Iterator it(dcom.getRecvDofs(), 0, feSpace);
!it.end(); it.nextRank())
......@@ -1759,11 +1785,11 @@ namespace AMDiS {
for (unsigned int i = 0; i < rankDofs.size(); i++)
if (nonRankDofs.count(rankDofs[i]) == 0)
dmap[feSpace].insertRankDof(*(rankDofs[i]));
dofMap[feSpace].insertRankDof(*(rankDofs[i]));
for (DofContainerSet::iterator it = nonRankDofs.begin();
it != nonRankDofs.end(); ++it)
dmap[feSpace].insertNonRankDof(**it);
dofMap[feSpace].insertNonRankDof(**it);
}
......@@ -1788,8 +1814,7 @@ namespace AMDiS {
createPeriodicMap(feSpaces[i]);
// MPI::COMM_WORLD.Barrier();
INFO(info, 8)("Creation of periodic mapping needed %.5f seconds\n",
MPI::Wtime() - first);
MSG("Creation of periodic mapping needed %.5f seconds\n", MPI::Wtime() - first);
}
......@@ -1797,8 +1822,10 @@ namespace AMDiS {
{
FUNCNAME("MeshDistributor::createPeriodicMap()");
DofComm::LevelDataType &periodicDofs = dofComm.getPeriodicDofs();
TEST_EXIT(dofMaps.size())("No DOF mapping defined!\n");
DofComm::LevelDataType &periodicDofs = dofComm.getPeriodicDofs();
ComponentDofMap &dofMap = (*(dofMaps[0]))[feSpace];
StdMpi<vector<int> > stdMpi(mpiComm, false);
// === Each rank traverse its periodic boundaries and sends the DOF ===
......@@ -1832,8 +1859,8 @@ namespace AMDiS {
BoundaryType type = bound.type;
for (unsigned int j = 0; j < dofs0.size(); j++) {
DegreeOfFreedom globalDof0 = dofMap[feSpace][*(dofs0[j])].global;
DegreeOfFreedom globalDof1 = dofMap[feSpace][*(dofs1[j])].global;
DegreeOfFreedom globalDof0 = dofMap[*(dofs0[j])].global;
DegreeOfFreedom globalDof1 = dofMap[*(dofs1[j])].global;
if (!periodicMap.isPeriodicOnBound(feSpace, type, globalDof0))
periodicMap.add(feSpace, type, globalDof0, globalDof1);
......@@ -1858,7 +1885,7 @@ namespace AMDiS {
// Send the global indices to the rank on the other side.
stdMpi.getSendData(it->first).reserve(dofs.size());
for (unsigned int i = 0; i < dofs.size(); i++)
stdMpi.getSendData(it->first).push_back(dofMap[feSpace][*(dofs[i])].global);
stdMpi.getSendData(it->first).push_back(dofMap[*(dofs[i])].global);
// Receive from this rank the same number of dofs.
stdMpi.recv(it->first, dofs.size());
......@@ -1884,7 +1911,7 @@ namespace AMDiS {
// Added the received DOFs to the mapping.
for (unsigned int i = 0; i < dofs.size(); i++) {
int globalDofIndex = dofMap[feSpace][*(dofs[i])].global;
int globalDofIndex = dofMap[*(dofs[i])].global;
int mapGlobalDofIndex = stdMpi.getRecvData(it->first)[i];
BoundaryType type = types[i];
......@@ -1917,7 +1944,7 @@ namespace AMDiS {
boundIt->rankObj.el->getAllDofs(feSpace, boundIt->rankObj, dofs);
for (unsigned int i = 0; i < dofs.size(); i++) {
DegreeOfFreedom globalDof = dofMap[feSpace][*dofs[i]].global;
DegreeOfFreedom globalDof = dofMap[*dofs[i]].global;
std::set<BoundaryType>& assoc =
periodicMap.getAssociations(feSpace, globalDof);
......
......@@ -63,10 +63,20 @@ namespace AMDiS {
public:
~MeshDistributor();
/// Initialization of mesh distributor.
void initParallelization();
/// Clean up procedure for the mesh distributor and attached objects.
void exitParallelization();
/** \brief
* Register a parallel DOF mapping. This DOF mapping object will than
* automatically updated by the mesh distributer after mesh changes.
*
* \param[in] dofMap Parallel DOF mapping object.
*/
void registerDofMap(ParallelDofMapping &dofMap);
/// Adds a DOFVector to the set of \ref interchangeVecs. Thus, this vector
/// will be automatically interchanged between ranks when mesh is
/// repartitioned.
......@@ -132,49 +142,6 @@ namespace AMDiS {
return mesh;
}
/// Returns an FE space from \ref feSpaces.
inline const FiniteElemSpace* getFeSpace(unsigned int i = 0)
{
FUNCNAME("MeshDistributor::getFeSpace()");
TEST_EXIT_DBG(i < feSpaces.size())
("Try to access FE space %d, but have only %d FE spaces!\n",
i, feSpaces.size());
return feSpaces[i];
}
/// Returns all FE spaces, thus \ref feSpaces.
inline vector<const FiniteElemSpace*>& getFeSpaces()
{
return feSpaces;
}
inline const FiniteElemSpace* getComponentSpace(unsigned int i = 0)
{
FUNCNAME("MeshDistributor::getFeSpace()");
TEST_EXIT_DBG(i < componentSpaces.size())("Should not happen!\n");
return componentSpaces[i];
}
inline vector<const FiniteElemSpace*>& getComponentSpaces()
{
return componentSpaces;
}
/// Returns the DOF mapping object, \ref dofMap.
inline ParallelDofMapping& getDofMap()
{
return dofMap;
}
inline ParallelDofMapping& getDofMapSd()
{
return dofMapSd;
}
/// Returns the periodic mapping handler, \ref periodicMap.
inline PeriodicMap& getPeriodicMap()
{
......@@ -360,7 +327,6 @@ namespace AMDiS {
/// Updates the local and global DOF numbering after the mesh has been
/// changed.
void updateLocalGlobalNumbering(ParallelDofMapping &dmap,
DofComm &dcom,
const FiniteElemSpace *feSpace);
protected:
......@@ -501,14 +467,12 @@ namespace AMDiS {
/// Name of the problem (as used in the init files)
string name;
/// Finite element spaces of the problem.
vector<const FiniteElemSpace*> componentSpaces;
/// Set of all different FE spaces.
vector<const FiniteElemSpace*> feSpaces;
/// Mesh of the problem.
/// Pointer to the only mesh. Note that we do not yet support multi mesh
/// method, thus even if we consider coupled problems, all problems must
/// be defined on the same mesh.
Mesh *mesh;
/// A refinement manager that should be used on the mesh. It is used to
......@@ -516,9 +480,6 @@ namespace AMDiS {
/// elements on the other side of the interior boundary.
RefinementManager *refineManager;
/// Info level.
int info;
/// Pointer to a mesh partitioner that is used to partition the mesh to
/// the ranks.
MeshPartitioner *partitioner;
......@@ -531,11 +492,6 @@ namespace AMDiS {
/// macro element.
map<int, int> partitionMap;
/// Mapping object to map from local DOF indices to global ones.
ParallelDofMapping dofMap;
ParallelDofMapping dofMapSd;
/// Database to store and query all sub-objects of all elements of the
/// macro mesh.
ElementObjectDatabase elObjDb;
......@@ -608,6 +564,8 @@ namespace AMDiS {
/// all boundary DOFs.
vector<map<const FiniteElemSpace*, BoundaryDofInfo> > boundaryDofInfo;
/// Stores information about hierarchical decomposition of the mesh levels.
/// Is used to specify multi level solver methods.
MeshLevelData levelData;
/// If there is no mesh adaptivity, the mesh distributor can remove some
......@@ -616,6 +574,10 @@ namespace AMDiS {
/// is set to true, and thus no special assumption are made.
bool meshAdaptivity;
/// Set of all parallel DOF mapping object that are registered by parallel
/// solver objects and must be updated automatically after mesh change.
vector<ParallelDofMapping*> dofMaps;
public:
static bool sebastianMode;
......
......@@ -115,8 +115,6 @@ namespace AMDiS {
if (checkMeshChange()) {
// Mesh has been changed, recompute interior DOF mapping.
vector<const FiniteElemSpace*> feSpaces =
meshDistributor->getComponentSpaces();
interiorMap->setComputeMatIndex(!localMatrix);
interiorMap->update();
......
......@@ -462,7 +462,7 @@ namespace AMDiS {
mpi::globalAdd(foundError);
TEST_EXIT(foundError == 0)("Error found on at least on rank!\n");
INFO(pdb.info, 8)("Test common dofs needed %.5f seconds\n", TIME_USED(first, clock()));
MSG("Test common dofs needed %.5f seconds\n", TIME_USED(first, clock()));
}
......@@ -481,7 +481,7 @@ namespace AMDiS {
DOFIterator<WorldVector<double> > it(&coords, USED_DOFS);
for (it.reset(); !it.end(); ++it) {
coordsToIndex[(*it)] = pdb.dofMap[feSpace][it.getDOFIndex()].global;
coordsToIndex[(*it)] = (*(pdb.dofMaps[0]))[feSpace][it.getDOFIndex()].global;
// MSG(" CHECK FOR DOF %d AT COORDS %f %f %f\n",
// coordsToIndex[(*it)], (*it)[0], (*it)[1], (*it)[2]);
}
......
......@@ -243,6 +243,7 @@ namespace AMDiS {
levelData = &ldata;
globalMapping = b;
componentSpaces = fe;
feSpaces = uniqueFe;
data->init(fe, uniqueFe, globalMapping, ldata);
}
......
......@@ -151,7 +151,8 @@ namespace AMDiS {
{
FUNCNAME("ComponentDofMap::insertRankDof()");
TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n");
TEST_EXIT_DBG(dofMap.count(dof0) == 0)
("DOF %d is already defined in mapping!\n", dof0);
dofMap[dof0].local = dof1;
nLocalDofs++;
......@@ -637,17 +638,17 @@ namespace AMDiS {
* Initialize the parallel DOF mapping.
*
* \param[in] m MPI communicator.
* \param[in] fe The FE spaces of all components of the
* \param[in] componentSpaces The FE spaces of all components of the
* PDE to be solved.
* \param[in] uniqueFe Unique list of FE spaces. Thus, two
* \param[in] feSpaces Unique list of FE spaces. Thus, two
* arbitrary elements of this list are always
* different.
* \param[in] globalMapping If true, at least one rank's mapping con-
* taines DOFs that are not owend by the rank.
*/
void init(MeshLevelData& mld,
vector<const FiniteElemSpace*> &fe,
vector<const FiniteElemSpace*> &uniqueFe,
vector<const FiniteElemSpace*> &componentSpaces,
vector<const FiniteElemSpace*> &feSpaces,
bool globalMapping = true);
/// In the case of having only one FE space, this init function can be used.
......@@ -774,6 +775,12 @@ namespace AMDiS {
return dofToMatIndex.get(ithComponent, d) - rStartDofs;
}
/// Returns the set of unique FE spaces.
inline vector<const FiniteElemSpace*>& getFeSpaces()
{
return feSpaces;
}
<