Commit e0139072 authored by Thomas Witkowski's avatar Thomas Witkowski

Changed sendDofs and recvDofs data structure in parallel code.

parent 1f381921
......@@ -220,7 +220,7 @@ if(ENABLE_PARALLEL_DOMAIN)
list(APPEND COMPILEFLAGS "-DHAVE_PARALLEL_DOMAIN_AMDIS=1")
SET(PARALLEL_DOMAIN_AMDIS_SRC
${SOURCE_DIR}/parallel/ParMetisPartitioner.cc
${SOURCE_DIR}/parallel/DofComm.cc
${SOURCE_DIR}/parallel/CheckerPartitioner.cc
${SOURCE_DIR}/parallel/ElementObjectData.cc
${SOURCE_DIR}/parallel/MeshDistributor.cc
......
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "DofComm.h"
namespace AMDiS {
using namespace std;
void DofComm::removeEmpty()
{
for (DataIter dit = data.begin(); dit != data.end(); ++dit) {
FeMapIter it = dit->second.begin();
while (it != dit->second.end()) {
if (it->second.size() == 0) {
const FiniteElemSpace *fe = it->first;
++it;
dit->second.erase(fe);
} else
++it;
}
}
}
void DofComm::Iterator::setNextFeMap()
{
if (dataIter != dofComm.data.end()) {
feMapIter = dataIter->second.begin();
if (traverseFeSpace != NULL) {
TEST_EXIT_DBG(dataIter->second.count(traverseFeSpace))
("Should not happen!\n");
while (feMapIter->first != traverseFeSpace &&
feMapIter != dataIter->second.end())
++feMapIter;
TEST_EXIT_DBG(feMapIter != dataIter->second.end() &&
feMapIter->first == traverseFeSpace)
("Should not happen!\n");
}
if (feMapIter != dataIter->second.end())
dofIter = feMapIter->second.begin();
dofCounter = 0;
}
}
}
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// == http://www.amdis-fem.org ==
// == ==
// ============================================================================
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
/** \file DofComm.h */
#ifndef AMDIS_DOF_COMM_H
#define AMDIS_DOF_COMM_H
#include <map>
#include "FiniteElemSpace.h"
#include "Global.h"
namespace AMDiS {
using namespace std;
class DofComm
{
public:
DofComm() {}
typedef map<const FiniteElemSpace*, DofContainer> FeMapType;
typedef FeMapType::iterator FeMapIter;
typedef map<int, FeMapType> DataType;
typedef DataType::iterator DataIter;
DofContainer& getDofCont(int rank, const FiniteElemSpace *feSpace)
{
return data[rank][feSpace];
}
void removeEmpty();
void clear()
{
data.clear();
}
DataType& getData()
{
return data;
}
protected:
DataType data;
friend class Iterator;
public:
class Iterator
{
public:
Iterator(DofComm &dc,
const FiniteElemSpace *fe = NULL)
: dofComm(dc),
dofCounter(-1),
traverseFeSpace(fe)
{
FUNCNAME("DofComm::Iterator::Iterator()");
dataIter = dofComm.data.begin();
setNextFeMap();
}
inline bool end()
{
return (dataIter == dofComm.data.end());
}
inline void nextRank()
{
++dataIter;
setNextFeMap();
}
inline void nextFeSpace()
{
++feMapIter;
}
inline void beginDofIter(const FiniteElemSpace *fe = NULL)
{
FUNCNAME("DofComm::Iterator::beginDofIter()");
if (fe != NULL) {
feMapIter = dataIter->second.begin();
while (feMapIter->first != fe &&
feMapIter != dataIter->second.end())
++feMapIter;
}
TEST_EXIT_DBG(feMapIter != dataIter->second.end())
("Should not happen!\n");
dofIter = feMapIter->second.begin();
dofCounter = 0;
}
inline bool endDofIter()
{
return (dofIter == feMapIter->second.end());
}
inline void nextDof()
{
++dofIter;
++dofCounter;
}
inline int getRank()
{
return dataIter->first;
}
inline const FiniteElemSpace* getFeSpace()
{
return feMapIter->first;
}
inline DofContainer& getDofs()
{
return feMapIter->second;
}
inline const DegreeOfFreedom* getDof()
{
return *dofIter;
}
inline DegreeOfFreedom getDofIndex()
{
return **dofIter;
}
inline int getDofCounter()
{
return dofCounter;
}
protected:
void setNextFeMap();
protected:
DofComm &dofComm;
DofComm::DataIter dataIter;
DofComm::FeMapIter feMapIter;
DofContainer::iterator dofIter;
int dofCounter;
const FiniteElemSpace *traverseFeSpace;
};
};
}
#endif // AMDIS_DOF_COMM_H
This diff is collapsed.
......@@ -25,6 +25,7 @@
#include <mpi.h>
#include "parallel/DofComm.h"
#include "parallel/ElementObjectData.h"
#include "parallel/ParallelTypes.h"
#include "parallel/MeshPartitioner.h"
......@@ -74,7 +75,8 @@ namespace AMDiS {
/// Maps local dof indices to real dof indices.
DofMapping mapLocalDofIndex;
};
class MeshDistributor
{
private:
......@@ -275,12 +277,12 @@ namespace AMDiS {
return (periodicDof[type].count(globalDofIndex) > 0);
}
map<int, map<const FiniteElemSpace*, DofContainer> >& getSendDofs()
DofComm& getSendDofs()
{
return sendDofs;
}
map<int, map<const FiniteElemSpace*, DofContainer> >& getRecvDofs()
DofComm& getRecvDofs()
{
return recvDofs;
}
......@@ -348,30 +350,25 @@ namespace AMDiS {
const FiniteElemSpace *fe = vec.getFeSpace();
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type sendIt = sendDofs.begin();
sendIt != sendDofs.end(); ++sendIt) {
for (DofComm::Iterator it(sendDofs, fe); !it.end(); it.nextRank()) {
vector<T> dofs;
int nSendDofs = sendIt->second[fe].size();
dofs.reserve(nSendDofs);
dofs.reserve(it.getDofs().size());
for (int i = 0; i < nSendDofs; i++)
dofs.push_back(vec[*((sendIt->second[fe])[i])]);
for (; !it.endDofIter(); it.nextDof())
dofs.push_back(vec[it.getDofIndex()]);
stdMpi.send(sendIt->first, dofs);
stdMpi.send(it.getRank(), dofs);
}
for (it_type recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt)
stdMpi.recv(recvIt->first, recvIt->second[fe].size());
for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
for (it_type recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt)
for (unsigned int i = 0; i < recvIt->second.size(); i++)
vec[*(recvIt->second[fe])[i]] = stdMpi.getRecvData(recvIt->first)[i];
for (DofComm::Iterator it(recvDofs, fe); !it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
vec[it.getDofIndex()] =
stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
}
/** \brief
......@@ -633,16 +630,14 @@ namespace AMDiS {
* This map contains for each rank the list of DOFs the current rank must
* send to exchange solution DOFs at the interior boundaries.
*/
// map<FiniteElemSpace, RankToDofContainer> sendDofs;
map<int, map<const FiniteElemSpace*, DofContainer> > sendDofs;
DofComm sendDofs;
/** \brief
* This map contains on each rank the list of DOFs from which the current
* rank will receive DOF values (i.e., this are all DOFs at an interior
* boundary). The DOF indices are given in rank's local numbering.
*/
// map<FiniteElemSpace, RankToDofContainer> recvDofs;
map<int, map<const FiniteElemSpace*, DofContainer> > recvDofs;
DofComm recvDofs;
/** \brief
* If periodic boundaries are used, this map stores, for each periodic
......
......@@ -349,16 +349,13 @@ namespace AMDiS {
DOFVector<WorldVector<double> > coords(feSpace, "dofCorrds");
pdb.mesh->getDofIndexCoords(feSpace, coords);
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type it = pdb.sendDofs.begin(); it != pdb.sendDofs.end(); ++it)
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].end(); ++dofIt)
sendCoords[it->first].push_back(coords[**dofIt]);
for (DofComm::Iterator it(pdb.sendDofs, feSpace); !it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
sendCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
for (it_type it = pdb.recvDofs.begin(); it != pdb.recvDofs.end(); ++it)
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].end(); ++dofIt)
recvCoords[it->first].push_back(coords[**dofIt]);
for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
recvCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
vector<int> sendSize(pdb.mpiSize, 0);
vector<int> recvSize(pdb.mpiSize, 0);
......@@ -446,7 +443,8 @@ namespace AMDiS {
oss << ")";
MSG("%s\n", oss.str().c_str());
debug::printInfoByDof(feSpace, *(pdb.recvDofs[it->first][feSpace][i]));
debug::printInfoByDof(feSpace,
*(pdb.recvDofs.getDofCont(it->first, feSpace)[i]));
}
ERROR("Wrong DOFs in rank %d!\n", pdb.mpiRank);
foundError = 1;
......@@ -479,17 +477,16 @@ namespace AMDiS {
pdb.dofFeData[feSpace].mapLocalGlobalDofs[it.getDOFIndex()];
StdMpi<CoordsIndexMap> stdMpi(pdb.mpiComm, true);
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type it = pdb.sendDofs.begin(); it != pdb.sendDofs.end(); ++it)
stdMpi.send(it->first, coordsToIndex);
for (it_type it = pdb.recvDofs.begin(); it != pdb.recvDofs.end(); ++it)
stdMpi.recv(it->first);
for (DofComm::Iterator it(pdb.sendDofs, feSpace); !it.end(); it.nextRank())
stdMpi.send(it.getRank(), coordsToIndex);
for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
int foundError = 0;
for (it_type it = pdb.recvDofs.begin(); it != pdb.recvDofs.end(); ++it) {
CoordsIndexMap& otherCoords = stdMpi.getRecvData(it->first);
for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank()) {
CoordsIndexMap& otherCoords = stdMpi.getRecvData(it.getRank());
for (CoordsIndexMap::iterator coordsIt = otherCoords.begin();
coordsIt != otherCoords.end(); ++coordsIt) {
......@@ -503,7 +500,7 @@ namespace AMDiS {
oss << " do not fit together on rank "
<< pdb.getMpiRank() << " (global index: "
<< coordsToIndex[coordsIt->first] << " and on rank "
<< it->first << " (global index: " << coordsIt->second << ")";
<< it.getRank() << " (global index: " << coordsIt->second << ")";
MSG("[DBG] %s\n", oss.str().c_str());
foundError = 1;
......@@ -648,20 +645,15 @@ namespace AMDiS {
pdb.mesh->getDofIndexCoords(it->first, feSpace, coords);
coords.print();
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type rankit = pdb.sendDofs.begin(); rankit != pdb.sendDofs.end(); ++rankit) {
for (DofContainer::iterator dofit = rankit->second[feSpace].begin();
dofit != rankit->second[feSpace].end(); ++dofit)
if (**dofit == it->first)
cout << "SEND DOF TO " << rankit->first << endl;
}
for (it_type rankit = pdb.recvDofs.begin(); rankit != pdb.recvDofs.end(); ++rankit) {
for (DofContainer::iterator dofit = rankit->second[feSpace].begin();
dofit != rankit->second[feSpace].end(); ++dofit)
if (**dofit == it->first)
cout << "RECV DOF FROM " << rankit->first << endl;
}
for (DofComm::Iterator rit(pdb.sendDofs, feSpace); !rit.end(); rit.nextRank())
for (; !rit.endDofIter(); rit.nextDof())
if (it->first == rit.getDofIndex())
cout << "SEND DOF TO " << rit.getRank() << endl;
for (DofComm::Iterator rit(pdb.recvDofs, feSpace); !rit.end(); rit.nextRank())
for (; !rit.endDofIter(); rit.nextDof())
if (it->first == rit.getDofIndex())
cout << "RECV DOF FROM " << rit.getRank() << endl;
cout << "------" << endl;
}
......
......@@ -283,38 +283,39 @@ namespace AMDiS {
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
StdMpi<vector<int> > stdMpi(meshDistributor->getMpiComm());
for (it_type it = meshDistributor->getSendDofs().begin();
it != meshDistributor->getSendDofs().end(); ++it)
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].end(); ++dofIt)
if (globalPrimalIndex.count(**dofIt))
stdMpi.getSendData(it->first).push_back(globalPrimalIndex[**dofIt]);
for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
if (globalPrimalIndex.count(it.getDofIndex()))
stdMpi.getSendData(it.getRank()).push_back(globalPrimalIndex[it.getDofIndex()]);
stdMpi.updateSendDataSize();
for (it_type it = meshDistributor->getRecvDofs().begin();
it != meshDistributor->getRecvDofs().end(); ++it) {
for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
!it.end(); it.nextRank()) {
bool recvFromRank = false;
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].end(); ++dofIt)
if (primals.count(**dofIt) &&
meshDistributor->getIsRankDof(feSpace, **dofIt) == false) {
for (; !it.endDofIter(); it.nextDof()) {
if (primals.count(it.getDofIndex()) &&
meshDistributor->getIsRankDof(feSpace, it.getDofIndex()) == false) {
recvFromRank = true;
break;
}
}
if (recvFromRank)
stdMpi.recv(it->first);
stdMpi.recv(it.getRank());
}
stdMpi.startCommunication();
for (it_type it = meshDistributor->getRecvDofs().begin();
it != meshDistributor->getRecvDofs().end(); ++it) {
for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
!it.end(); it.nextRank()) {
int i = 0;
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].end(); ++dofIt) {
if (primals.count(**dofIt) &&
meshDistributor->getIsRankDof(feSpace, **dofIt) == false)
globalPrimalIndex[**dofIt] = stdMpi.getRecvData(it->first)[i++];
for (; !it.endDofIter(); it.nextDof()) {
if (primals.count(it.getDofIndex()) &&
meshDistributor->getIsRankDof(feSpace, it.getDofIndex()) == false)
globalPrimalIndex[it.getDofIndex()] =
stdMpi.getRecvData(it.getRank())[i++];
}
}
......@@ -335,59 +336,55 @@ namespace AMDiS {
boundaryDofRanks.clear();
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type it = meshDistributor->getSendDofs().begin();
it != meshDistributor->getSendDofs().end(); ++it) {
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].end(); ++dofIt) {
for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof()) {
// If DOF is not primal, i.e., its a dual node
if (globalPrimalIndex.count(**dofIt) == 0) {
boundaryDofRanks[**dofIt].insert(mpiRank);
boundaryDofRanks[**dofIt].insert(it->first);
if (globalPrimalIndex.count(it.getDofIndex()) == 0) {
boundaryDofRanks[it.getDofIndex()].insert(mpiRank);
boundaryDofRanks[it.getDofIndex()].insert(it.getRank());
}
}
}
// === Communicate these sets for all rank owned dual nodes to other ===
// === ranks that also have this node. ===
StdMpi<vector<std::set<int> > > stdMpi(meshDistributor->getMpiComm());
for (it_type it = meshDistributor->getSendDofs().begin();
it != meshDistributor->getSendDofs().end(); ++it)
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].end(); ++dofIt)
if (globalPrimalIndex.count(**dofIt) == 0)
stdMpi.getSendData(it->first).push_back(boundaryDofRanks[**dofIt]);
for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
if (globalPrimalIndex.count(it.getDofIndex()) == 0)
stdMpi.getSendData(it.getRank()).push_back(boundaryDofRanks[it.getDofIndex()]);
stdMpi.updateSendDataSize();
for (it_type it = meshDistributor->getRecvDofs().begin();
it != meshDistributor->getRecvDofs().end(); ++it) {
for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
!it.end(); it.nextRank()) {
bool recvFromRank = false;
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].end(); ++dofIt)
if (globalPrimalIndex.count(**dofIt) == 0) {
for (; !it.endDofIter(); it.nextDof()) {
if (globalPrimalIndex.count(it.getDofIndex()) == 0) {
recvFromRank = true;
break;
}
}
if (recvFromRank)
stdMpi.recv(it->first);
stdMpi.recv(it.getRank());
}
stdMpi.startCommunication();
for (it_type it = meshDistributor->getRecvDofs().begin();
it != meshDistributor->getRecvDofs().end(); ++it) {
for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
!it.end(); it.nextRank()) {
int i = 0;
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].end(); ++dofIt)
if (globalPrimalIndex.count(**dofIt) == 0)
boundaryDofRanks[**dofIt] = stdMpi.getRecvData(it->first)[i++];
for (; !it.endDofIter(); it.nextDof())
if (globalPrimalIndex.count(it.getDofIndex()) == 0)
boundaryDofRanks[it.getDofIndex()] =
stdMpi.getRecvData(it.getRank())[i++];
}
// === Create global index of the dual nodes on each rank. ===
duals.clear();
......@@ -460,43 +457,41 @@ namespace AMDiS {
// === Communicate dofFirstLagrange to all other ranks. ===
StdMpi<vector<int> > stdMpi(meshDistributor->getMpiComm());
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type it = meshDistributor->getSendDofs().begin();
it != meshDistributor->getSendDofs().end(); ++it)
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].end(); ++dofIt) {
if (globalPrimalIndex.count(**dofIt) == 0) {
TEST_EXIT_DBG(dofFirstLagrange.count(**dofIt))("Should not happen!\n");
stdMpi.getSendData(it->first).push_back(dofFirstLagrange[**dofIt]);
for (DofComm