Commit 37e0d38c authored by Thomas Witkowski's avatar Thomas Witkowski

And more changes on the DofCommunicator.

parent 78d5dc6b
......@@ -18,26 +18,15 @@ namespace AMDiS {
using namespace std;
int DofComm::getNumberDofs(int level, const FiniteElemSpace *feSpace)
void DofComm::create(InteriorBoundary &boundary)
{
FUNCNAME("DofComm::getNumberDofs()");
TEST_EXIT_DBG(level < data.size())("Should not happen!\n");
DofContainerSet dofs;
for (DataIter rankIt = data[level].begin();
rankIt != data[level].end(); ++rankIt)
for (FeMapIter feIt = rankIt->second.begin();
feIt != rankIt->second.end(); ++feIt)
if (feIt->first == feSpace)
dofs.insert(feIt->second.begin(), feIt->second.end());
return static_cast<int>(dofs.size());
createContainer(boundary.getOwn(), sendDofs);
createContainer(boundary.getOther(), recvDofs);
}
void DofComm::create(RankToBoundMap &boundary)
void DofComm::createContainer(RankToBoundMap &boundary,
LevelDataType &data)
{
// === Fill data. ===
......@@ -72,11 +61,32 @@ namespace AMDiS {
}
int DofComm::getNumberDofs(LevelDataType &data,
int level,
const FiniteElemSpace *feSpace)
{
FUNCNAME("DofComm::getNumberDofs()");
TEST_EXIT_DBG(level < data.size())("Should not happen!\n");
DofContainerSet dofs;
for (DataIter rankIt = data[level].begin();
rankIt != data[level].end(); ++rankIt)
for (FeMapIter feIt = rankIt->second.begin();
feIt != rankIt->second.end(); ++feIt)
if (feIt->first == feSpace)
dofs.insert(feIt->second.begin(), feIt->second.end());
return static_cast<int>(dofs.size());
}
bool DofComm::Iterator::setNextFeMap()
{
FUNCNAME("DofComm::Iterator::setNextFeMap()");
if (dataIter != dofComm.data[traverseLevel].end()) {
if (dataIter != data[traverseLevel].end()) {
TEST_EXIT_DBG(dataIter->second.size())("Should not happen!\n");
feMapIter = dataIter->second.begin();
......
......@@ -36,7 +36,9 @@ namespace AMDiS {
{
public:
DofComm()
: data(1)
: recvDofs(1),
sendDofs(1),
periodicDofs(0)
{}
typedef map<const FiniteElemSpace*, DofContainer> FeMapType;
......@@ -46,33 +48,76 @@ namespace AMDiS {
// meshLevel: map[rank -> map[feSpace -> DofContainer]]
typedef vector<DataType> LevelDataType;
inline DofContainer& getDofContainer(int rank,
const FiniteElemSpace *feSpace,
int level = 0)
{
return data[level][rank][feSpace];
}
void init(int n, vector<const FiniteElemSpace*> &fe)
{
FUNCNAME("DofComm::init()");
TEST_EXIT_DBG(n >= 1)("Should not happen!\n");
nLevel = n;
feSpaces = fe;
data.clear();
data.resize(nLevel);
sendDofs.clear();
recvDofs.clear();
periodicDofs.clear();
sendDofs.resize(nLevel);
recvDofs.resize(nLevel);
periodicDofs.resize(nLevel);
}
void create(InteriorBoundary &boundary);
LevelDataType& getSendDofs()
{
return sendDofs;
}
LevelDataType& getRecvDofs()
{
return recvDofs;
}
DataType& getData(int level = 0)
LevelDataType& getPeriodicDofs()
{
return data[level];
return periodicDofs;
}
int getNumberDofs(int level, const FiniteElemSpace *feSpace);
// Writes all data of this object to an output stream.
void serialize(ostream &out)
{
ERROR_EXIT("MUST BE IMPLEMENTED!\n");
}
void create(RankToBoundMap &boundary);
// Reads the object data from an input stream.
void deserialize(istream &in,
map<const FiniteElemSpace*, map<int, const DegreeOfFreedom*> > dofIndexMap)
{
ERROR_EXIT("MUST BE IMPLEMENTED!\n");
}
int getNumberDofs(LevelDataType &data,
int level,
const FiniteElemSpace *feSpace);
protected:
void createContainer(RankToBoundMap &boundary, LevelDataType &data);
protected:
LevelDataType data;
/// This map contains for each rank the list of DOFs the current rank must
/// end to exchange solution DOFs at the interior boundaries.
LevelDataType sendDofs;
/// This map contains on each rank the list of DOFs from which the current
/// rank will receive DOF values (i.e., this are all DOFs at an interior
/// boundary). The DOF indices are given in rank's local numbering.
LevelDataType recvDofs;
/// This map contains on each rank a list of DOFs along the interior bound-
/// aries to communicate with other ranks. The DOF indices are given in rank's
/// local numbering. Periodic boundaries within one subdomain are not
/// considered here.
LevelDataType periodicDofs;
int nLevel;
......@@ -84,9 +129,9 @@ namespace AMDiS {
class Iterator
{
public:
Iterator(DofComm &dc,
Iterator(LevelDataType &d,
const FiniteElemSpace *fe = NULL)
: dofComm(dc),
: data(d),
dofCounter(-1),
traverseFeSpace(fe),
traverseLevel(0)
......@@ -94,10 +139,10 @@ namespace AMDiS {
goFirst();
}
Iterator(DofComm &dc,
Iterator(LevelDataType &d,
int level,
const FiniteElemSpace *fe = NULL)
: dofComm(dc),
: data(d),
dofCounter(-1),
traverseFeSpace(fe),
traverseLevel(level)
......@@ -107,7 +152,7 @@ namespace AMDiS {
inline bool end()
{
return (dataIter == dofComm.data[traverseLevel].end());
return (dataIter == data[traverseLevel].end());
}
inline void nextRank()
......@@ -200,7 +245,7 @@ namespace AMDiS {
protected:
void goFirst()
{
dataIter = dofComm.data[traverseLevel].begin();
dataIter = data[traverseLevel].begin();
while (setNextFeMap() == false)
++dataIter;
......@@ -209,7 +254,7 @@ namespace AMDiS {
bool setNextFeMap();
protected:
DofComm &dofComm;
LevelDataType &data;
DofComm::DataIter dataIter;
......
......@@ -511,7 +511,8 @@ namespace AMDiS {
StdMpi<vector<double> > stdMpi(mpiComm);
for (DofComm::Iterator it(sendDofs); !it.end(); it.nextRank()) {
for (DofComm::Iterator it(dofComm.getSendDofs());
!it.end(); it.nextRank()) {
vector<double> dofs;
for (int i = 0; i < vec.getSize(); i++) {
......@@ -524,12 +525,12 @@ namespace AMDiS {
stdMpi.send(it.getRank(), dofs);
}
for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
for (DofComm::Iterator it(dofComm.getRecvDofs()); !it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank()) {
for (DofComm::Iterator it(dofComm.getRecvDofs()); !it.end(); it.nextRank()) {
int counter = 0;
for (int i = 0; i < vec.getSize(); i++) {
......@@ -659,9 +660,11 @@ namespace AMDiS {
FUNCNAME("MeshDistributor::getAllBoundaryDofs()");
DofContainerSet dofSet;
for (DofComm::Iterator it(sendDofs, level, feSpace); !it.end(); it.nextRank())
for (DofComm::Iterator it(dofComm.getSendDofs(), level, feSpace);
!it.end(); it.nextRank())
dofSet.insert(it.getDofs().begin(), it.getDofs().end());
for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank())
for (DofComm::Iterator it(dofComm.getRecvDofs(), level, feSpace);
!it.end(); it.nextRank())
dofSet.insert(it.getDofs().begin(), it.getDofs().end());
dofs.clear();
......@@ -1533,14 +1536,8 @@ namespace AMDiS {
{
FUNCNAME("MeshDistributor::createBoundaryDofs()");
int nLevels = levelData.getLevelNumber();
TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n");
sendDofs.init(nLevels, feSpaces);
sendDofs.create(intBoundary.getOwn());
recvDofs.init(nLevels, feSpaces);
recvDofs.create(intBoundary.getOther());
dofComm.init(levelData.getLevelNumber(), feSpaces);
dofComm.create(intBoundary);
createBoundaryDofInfo();
}
......@@ -1625,7 +1622,7 @@ namespace AMDiS {
TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n");
dofMap.init(levelData, feSpaces, feSpaces, true, true);
dofMap.setDofComm(sendDofs, recvDofs);
dofMap.setDofComm(dofComm);
dofMap.clear();
createBoundaryDofs();
......@@ -1684,7 +1681,8 @@ namespace AMDiS {
int nLevels = levelData.getLevelNumber();
for (int level = 0; level < nLevels; level++) {
DofContainerSet nonRankDofs;
for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank())
for (DofComm::Iterator it(dofComm.getRecvDofs(), level, feSpace);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
nonRankDofs.insert(it.getDof());
......@@ -1692,7 +1690,8 @@ namespace AMDiS {
if (nonRankDofs.count(rankDofs[i]) == 0)
dofMap[feSpace].insertRankDof(level, *(rankDofs[i]));
for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank())
for (DofComm::Iterator it(dofComm.getRecvDofs(), level, feSpace);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
dofMap[feSpace].insertNonRankDof(level, it.getDofIndex());
}
......@@ -1702,9 +1701,7 @@ namespace AMDiS {
lastMeshChangeIndex = mesh->getChangeIndex();
#if (DEBUG != 0)
ParallelDebug::testDofContainerCommunication(*this,
sendDofs.getData(),
recvDofs.getData());
ParallelDebug::testDofContainerCommunication(*this);
#endif
}
......@@ -1714,13 +1711,9 @@ namespace AMDiS {
FUNCNAME("MeshDistributor::createPeriodicMap()");
// Clear all periodic DOF mappings calculated before. We do it from scratch.
periodicDofs.init(levelData.getLevelNumber(), feSpaces);
periodicMap.clear();
// If there are no periodic boundaries, return. Note that periodicDofs and
// periodicMap must be still cleared before: if we do repartitioning and
// there were periodic boundaries in subdomain before and after repartitioning
// there are no more periodic boundaries.
// If there are no periodic boundaries, return.
if (!intBoundary.hasPeriodic())
return;
......@@ -1736,6 +1729,8 @@ namespace AMDiS {
{
FUNCNAME("MeshDistributor::createPeriodicMap()");
DofComm::LevelDataType &periodicDofs = dofComm.getPeriodicDofs();
StdMpi<vector<int> > stdMpi(mpiComm, false);
// === Each rank traverse its periodic boundaries and sends the DOF ===
......@@ -1781,7 +1776,7 @@ namespace AMDiS {
// Here we have a periodic boundary between two ranks.
// Create DOF indices on the boundary.
DofContainer& dofs = periodicDofs.getDofContainer(it->first, feSpace);
DofContainer& dofs = periodicDofs[0][it->first][feSpace];
for (vector<AtomicBoundary>::iterator boundIt = it->second.begin();
boundIt != it->second.end(); ++boundIt) {
......@@ -1814,7 +1809,7 @@ namespace AMDiS {
for (RankToBoundMap::iterator it = intBoundary.getPeriodic().begin();
it != intBoundary.getPeriodic().end(); ++it) {
DofContainer& dofs = periodicDofs.getDofContainer(it->first, feSpace);
DofContainer& dofs = periodicDofs[0][it->first][feSpace];
vector<int>& types = rankToDofType[it->first];
TEST_EXIT_DBG(dofs.size() == types.size())("Should not happen!\n");
......@@ -1934,8 +1929,7 @@ namespace AMDiS {
intBoundary.serialize(out);
serialize(out, sendDofs.getData());
serialize(out, recvDofs.getData());
dofComm.serialize(out);
// === Serialieze FE space dependent data ===
......@@ -1993,8 +1987,7 @@ namespace AMDiS {
intBoundary.deserialize(in, elIndexMap);
deserialize(in, sendDofs.getData(), dofIndexMap);
deserialize(in, recvDofs.getData(), dofIndexMap);
dofComm.deserialize(in, dofIndexMap);
// === Deerialieze FE space dependent data ===
......
......@@ -157,19 +157,9 @@ namespace AMDiS {
return periodicMap;
}
DofComm& getSendDofs()
DofComm& getDofComm()
{
return sendDofs;
}
DofComm& getRecvDofs()
{
return recvDofs;
}
DofComm& getPeriodicDofs()
{
return periodicDofs;
return dofComm;
}
inline long getLastMeshChangeIndex()
......@@ -220,7 +210,8 @@ namespace AMDiS {
const FiniteElemSpace *fe = vec.getFeSpace();
for (DofComm::Iterator it(sendDofs, fe); !it.end(); it.nextRank()) {
for (DofComm::Iterator it(dofComm.getSendDofs(), fe);
!it.end(); it.nextRank()) {
vector<T> dofs;
dofs.reserve(it.getDofs().size());
......@@ -230,12 +221,14 @@ namespace AMDiS {
stdMpi.send(it.getRank(), dofs);
}
for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
for (DofComm::Iterator it(dofComm.getRecvDofs());
!it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
for (DofComm::Iterator it(recvDofs, fe); !it.end(); it.nextRank())
for (DofComm::Iterator it(dofComm.getRecvDofs(), fe);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
vec[it.getDofIndex()] =
stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
......@@ -477,20 +470,7 @@ namespace AMDiS {
/// partitioning the whole mesh.
InteriorBoundary intBoundary;
/// This map contains for each rank the list of DOFs the current rank must
/// end to exchange solution DOFs at the interior boundaries.
DofComm sendDofs;
/// This map contains on each rank the list of DOFs from which the current
/// rank will receive DOF values (i.e., this are all DOFs at an interior
/// boundary). The DOF indices are given in rank's local numbering.
DofComm recvDofs;
/// This map contains on each rank a list of DOFs along the interior bound-
/// aries to communicate with other ranks. The DOF indices are given in rank's
/// local numbering. Periodic boundaries within one subdomain are not
/// considered here.
DofComm periodicDofs;
DofComm dofComm;
PeriodicMap periodicMap;
......
......@@ -354,11 +354,13 @@ namespace AMDiS {
DOFVector<WorldVector<double> > coords(feSpace, "dofCorrds");
pdb.mesh->getDofIndexCoords(feSpace, coords);
for (DofComm::Iterator it(pdb.sendDofs, feSpace); !it.end(); it.nextRank())
for (DofComm::Iterator it(pdb.dofComm.getSendDofs(), feSpace);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
sendCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank())
for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
recvCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
......@@ -449,8 +451,7 @@ namespace AMDiS {
MSG("%s\n", oss.str().c_str());
debug::printInfoByDof(feSpace,
*(pdb.recvDofs.getDofContainer(it->first,
feSpace)[i]));
*(pdb.dofComm.getRecvDofs()[0][it->first][feSpace][i]));
}
ERROR("Wrong DOFs in rank %d!\n", pdb.mpiRank);
foundError = 1;
......@@ -485,15 +486,18 @@ namespace AMDiS {
}
StdMpi<CoordsIndexMap> stdMpi(pdb.mpiComm, true);
for (DofComm::Iterator it(pdb.sendDofs, feSpace); !it.end(); it.nextRank())
for (DofComm::Iterator it(pdb.dofComm.getSendDofs(), feSpace);
!it.end(); it.nextRank())
stdMpi.send(it.getRank(), coordsToIndex);
for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank())
for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace);
!it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
int foundError = 0;
for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank()) {
for (DofComm::Iterator it(pdb.dofComm.getRecvDofs(), feSpace);
!it.end(); it.nextRank()) {
CoordsIndexMap& otherCoords = stdMpi.getRecvData(it.getRank());
for (CoordsIndexMap::iterator coordsIt = otherCoords.begin();
......@@ -562,27 +566,30 @@ namespace AMDiS {
}
void ParallelDebug::testDofContainerCommunication(MeshDistributor &pdb,
map<int, map<const FiniteElemSpace*, DofContainer> > &sendDofs,
map<int, map<const FiniteElemSpace*, DofContainer> > &recvDofs)
void ParallelDebug::testDofContainerCommunication(MeshDistributor &pdb)
{
FUNCNAME("ParallelDebug::testDofContainerCommunication()");
FUNCNAME("ParallelDebug::testDofContainerCommunication()");
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
map<int, int> sendNumber;
for (it_type it = sendDofs.begin(); it != sendDofs.end(); ++it)
for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin(); dcIt != it->second.end(); ++dcIt)
for (it_type it = pdb.dofComm.getSendDofs()[0].begin();
it != pdb.dofComm.getSendDofs()[0].end(); ++it)
for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin();
dcIt != it->second.end(); ++dcIt)
sendNumber[it->first] += dcIt->second.size();
map<int, int> recvNumber;
for (it_type it = recvDofs.begin(); it != recvDofs.end(); ++it)
for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin(); dcIt != it->second.end(); ++dcIt)
for (it_type it = pdb.dofComm.getRecvDofs()[0].begin();
it != pdb.dofComm.getRecvDofs()[0].end(); ++it)
for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin();
dcIt != it->second.end(); ++dcIt)
recvNumber[it->first] += dcIt->second.size();
StdMpi<int> stdMpi(pdb.mpiComm);
stdMpi.send(sendNumber);
for (it_type it = recvDofs.begin(); it != recvDofs.end(); ++it)
for (it_type it = pdb.dofComm.getRecvDofs()[0].begin();
it != pdb.dofComm.getRecvDofs()[0].end(); ++it)
stdMpi.recv(it->first);
stdMpi.startCommunication();
......@@ -652,13 +659,13 @@ namespace AMDiS {
pdb.mesh->getDofIndexCoords(it->first, feSpace, coords);
coords.print();
for (DofComm::Iterator rit(pdb.sendDofs, feSpace);
for (DofComm::Iterator rit(pdb.dofComm.getSendDofs(), feSpace);
!rit.end(); rit.nextRank())
for (; !rit.endDofIter(); rit.nextDof())
if (it->first == rit.getDofIndex())
cout << "SEND DOF TO " << rit.getRank() << endl;
for (DofComm::Iterator rit(pdb.recvDofs, feSpace);
for (DofComm::Iterator rit(pdb.dofComm.getRecvDofs(), feSpace);
!rit.end(); rit.nextRank())
for (; !rit.endDofIter(); rit.nextDof())
if (it->first == rit.getDofIndex())
......
......@@ -95,12 +95,8 @@ namespace AMDiS {
* DOFs fits together for all communication partners.
*
* \param[in] pdb Parallel problem definition used for debugging.
* \param[in] sendDofs The map of all DOFs the rank will send.
* \param[in] recvDofs The map of all DOFs the rank will receive.
*/
static void testDofContainerCommunication(MeshDistributor &pdb,
map<int, map<const FiniteElemSpace*, DofContainer> > &sendDofs,
map<int, map<const FiniteElemSpace*, DofContainer> > &recvDofs);
static void testDofContainerCommunication(MeshDistributor &pdb);
/// Tests if there are multiple DOFs in mesh with the same coords.
static void testDoubleDofs(Mesh *mesh);
......
......@@ -88,7 +88,7 @@ namespace AMDiS {
StdMpi<vector<int> > stdMpi(levelData->getMpiComm(0));
for (DofComm::Iterator it(*sendDofs, level, feSpace);
for (DofComm::Iterator it(dofComm->getSendDofs(), level, feSpace);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
if (dofMap[level].count(it.getDofIndex()) && !nonRankDofs[level].count(it.getDofIndex()))
......@@ -99,7 +99,7 @@ namespace AMDiS {
// === Check from which ranks this rank must receive some data. ===
for (DofComm::Iterator it(*recvDofs, level, feSpace);
for (DofComm::Iterator it(dofComm->getRecvDofs(), level, feSpace);
!it.end(); it.nextRank()) {
bool recvFromRank = false;
for (; !it.endDofIter(); it.nextDof()) {
......@@ -121,7 +121,7 @@ namespace AMDiS {
// === And set the global indices for all DOFs that are not owned by rank. ===
for (DofComm::Iterator it(*recvDofs, level, feSpace);
for (DofComm::Iterator it(dofComm->getRecvDofs(), level, feSpace);
!it.end(); it.nextRank()) {
int i = 0;
for (; !it.endDofIter(); it.nextDof())
......@@ -183,17 +183,16 @@ namespace AMDiS {
}
void ParallelDofMapping::setDofComm(DofComm &pSend, DofComm &pRecv)
void ParallelDofMapping::setDofComm(DofComm &dc)
{
FUNCNAME("ParallelDofMapping::setDofComm()");
sendDofs = &pSend;
recvDofs = &pRecv;
dofComm = &dc;
// Add the DOF communicator also to all FE space DOF mappings.
for (vector<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it)
data[*it].setDofComm(pSend, pRecv);
data[*it].setDofComm(dc);
}
......@@ -343,14 +342,13 @@ namespace AMDiS {
if (!hasNonLocalDofs)
continue;
TEST_EXIT_DBG(sendDofs != NULL && recvDofs != NULL)
("No communicator given!\n");
TEST_EXIT_DBG(dofComm != NULL)("No communicator given!\n");
// === Communicate the matrix indices for all DOFs that are on some ===
// === interior boundaries. ===
StdMpi<vector<DegreeOfFreedom> > stdMpi(levelData->getMpiComm(0));
for (DofComm::Iterator it(*sendDofs, level, feSpaces[i]);
for (DofComm::Iterator it(dofComm->getSendDofs(), level, feSpaces[i]);
!it.end(); it.nextRank()) {
vector<DegreeOfFreedom> sendGlobalDofs;
......@@ -364,14 +362,14 @@ namespace AMDiS {
stdMpi.send(it.getRank(), sendGlobalDofs);
}
for (DofComm::Iterator it(*recvDofs, level, feSpaces[i]);
for (DofComm::Iterator it(dofComm->getRecvDofs(), level, feSpaces[i]);
!it.end(); it.nextRank())
stdMpi.recv(it.getRank());