Skip to content
Snippets Groups Projects
Commit 859d3918 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Added comments to the parallel DOF mapping class.

parent 21017269
No related branches found
No related tags found
No related merge requests found
......@@ -17,7 +17,7 @@ namespace AMDiS {
using namespace std;
void GlobalDofMap::clear()
void FeSpaceDofMap::clear()
{
dofMap.clear();
nRankDofs = 0;
......@@ -26,37 +26,47 @@ namespace AMDiS {
}
void GlobalDofMap::update()
void FeSpaceDofMap::update()
{
// === Compute local indices for all rank owned DOFs. ===
for (map<DegreeOfFreedom, MultiIndex>::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
if (it->second.local == -1 && nonRankDofs.count(it->first) == 0)
it->second.local = nRankDofs++;
// === Compute number of local and global DOFs in the mapping. ===
nOverallDofs = 0;
rStartDofs = 0;
mpi::getDofNumbering(*mpiComm, nRankDofs, rStartDofs, nOverallDofs);
if (needGlobalMapping)
computeGlobalMapping(rStartDofs);
// === If required, compute also the global indices. ===
if (hasNonLocalDofs)
computeNonLocalIndices();
if (needGlobalMapping) {
computeGlobalMapping();
if (hasNonLocalDofs)
computeNonLocalIndices();
}
}
void GlobalDofMap::computeGlobalMapping(int offset)
void FeSpaceDofMap::computeGlobalMapping()
{
for (map<DegreeOfFreedom, MultiIndex>::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
it->second.global = it->second.local + offset;
it->second.global = it->second.local + rStartDofs;
}
void GlobalDofMap::computeNonLocalIndices()
void FeSpaceDofMap::computeNonLocalIndices()
{
FUNCNAME("GlobalDofMap::computeNonLocalIndices()");
FUNCNAME("FeSpaceDofMap::computeNonLocalIndices()");
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
// === Send all global indices of DOFs that are owned by the rank to all ===
// === other ranks that also include this DOF. ===
StdMpi<vector<int> > stdMpi(*mpiComm);
for (DofComm::Iterator it(*sendDofs, feSpace); !it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
......@@ -65,6 +75,9 @@ namespace AMDiS {
stdMpi.updateSendDataSize();
// === Check from which ranks this rank must receive some data. ===
for (DofComm::Iterator it(*recvDofs, feSpace); !it.end(); it.nextRank()) {
bool recvFromRank = false;
for (; !it.endDofIter(); it.nextDof()) {
......@@ -78,8 +91,14 @@ namespace AMDiS {
stdMpi.recv(it.getRank());
}
// === Start communication to exchange global indices. ===
stdMpi.startCommunication();
// === And set the global indices for all DOFs that are not owned by rank. ===
for (DofComm::Iterator it(*recvDofs, feSpace);
!it.end(); it.nextRank()) {
int i = 0;
......@@ -90,20 +109,48 @@ namespace AMDiS {
}
void GlobalDofMap::print()
void ParallelDofMapping::init(MPI::Intracomm *m,
vector<const FiniteElemSpace*> &fe,
bool needGlobalMapping,
bool bNonLocalDofs)
{
FUNCNAME("GlobalDofMap::print()");
FUNCNAME("ParallelDofMapping::init()");
mpiComm = m;
feSpaces = fe;
hasNonLocalDofs = bNonLocalDofs;
MSG("Local to global mapping on this rank: \n");
// === Create a set of unique FE spaces. ===
for (map<DegreeOfFreedom, MultiIndex>::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
if (nonRankDofs.count(it->first) == 0)
MSG(" %d -> %d (rank-dof)\n", it->first, it->second.local);
else
MSG(" %d -> %d \n", it->first, it->second.local);
for (unsigned int i = 0; i < feSpaces.size(); i++)
feSpacesUnique.insert(feSpaces[i]);
// === Init the mapping for all different FE spaces. ===
for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it) {
addFeSpace(*it);
data[*it].setNeedGlobalMapping(needGlobalMapping);
data[*it].setNonLocalDofs(hasNonLocalDofs);
}
}
void ParallelDofMapping::setDofComm(DofComm &pSend, DofComm &pRecv)
{
FUNCNAME("ParallelDofMapping::setDofComm()");
sendDofs = &pSend;
recvDofs = &pRecv;
// Add the DOF communicator also to all FE space DOF mappings.
for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it)
data[*it].setDofComm(pSend, pRecv);
}
void ParallelDofMapping::addFeSpace(const FiniteElemSpace* feSpace)
{
FUNCNAME("ParallelDofMapping::addFeSpace()");
......@@ -111,43 +158,43 @@ namespace AMDiS {
if (data.count(feSpace))
data.find(feSpace)->second.clear();
else
data.insert(make_pair(feSpace, GlobalDofMap(mpiComm)));
data.insert(make_pair(feSpace, FeSpaceDofMap(mpiComm)));
data.find(feSpace)->second.setFeSpace(feSpace);
}
int ParallelDofMapping::getRankDofs(vector<const FiniteElemSpace*> &fe)
int ParallelDofMapping::computeRankDofs()
{
FUNCNAME("ParallelDofMapping::getRankDofs()");
FUNCNAME("ParallelDofMapping::computeRankDofs()");
int result = 0;
for (unsigned int i = 0; i < fe.size(); i++) {
TEST_EXIT_DBG(data.count(fe[i]))("Cannot find FE space: %p\n", fe[i]);
result += data[fe[i]].nRankDofs;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
result += data[feSpaces[i]].nRankDofs;
}
return result;
}
int ParallelDofMapping::getLocalDofs(vector<const FiniteElemSpace*> &fe)
int ParallelDofMapping::computeLocalDofs()
{
FUNCNAME("ParallelDofMapping::getLocalDofs()");
FUNCNAME("ParallelDofMapping::computeLocalDofs()");
int result = 0;
for (unsigned int i = 0; i < fe.size(); i++) {
TEST_EXIT_DBG(data.count(fe[i]))("Cannot find FE space: %p\n", fe[i]);
result += data[fe[i]].nLocalDofs;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
result += data[feSpaces[i]].nLocalDofs;
}
return result;
}
int ParallelDofMapping::getOverallDofs(vector<const FiniteElemSpace*> &feSpaces)
int ParallelDofMapping::computeOverallDofs()
{
FUNCNAME("ParallelDofMapping::getOverallDofs()");
FUNCNAME("ParallelDofMapping::computeOverallDofs()");
int result = 0;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
......@@ -159,9 +206,9 @@ namespace AMDiS {
}
int ParallelDofMapping::getStartDofs(vector<const FiniteElemSpace*> &feSpaces)
int ParallelDofMapping::computeStartDofs()
{
FUNCNAME("ParallelDofMapping::getStartDofs()");
FUNCNAME("ParallelDofMapping::computeStartDofs()");
int result = 0;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
......@@ -173,39 +220,22 @@ namespace AMDiS {
}
void ParallelDofMapping::init(MPI::Intracomm *m,
vector<const FiniteElemSpace*> &fe,
bool needGlobalMapping,
bool bNonLocalDofs)
{
FUNCNAME("ParallelDofMapping::init()");
mpiComm = m;
feSpaces = fe;
hasNonLocalDofs = bNonLocalDofs;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
feSpacesUnique.insert(feSpaces[i]);
addFeSpace(feSpaces[i]);
data[feSpaces[i]].setNeedGlobalMapping(needGlobalMapping);
data[feSpaces[i]].setNonLocalDofs(hasNonLocalDofs);
}
}
void ParallelDofMapping::update()
{
FUNCNAME("ParallelDofMapping::update()");
// First, update all FE space DOF mappings.
for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it)
data[*it].update();
nRankDofs = getRankDofs(feSpaces);
nLocalDofs = getLocalDofs(feSpaces);
nOverallDofs = getOverallDofs(feSpaces);
rStartDofs = getStartDofs(feSpaces);
// Compute all numbers from this mappings.
nRankDofs = computeRankDofs();
nLocalDofs = computeLocalDofs();
nOverallDofs = computeOverallDofs();
rStartDofs = computeStartDofs();
// And finally, compute the matrix indices.
computeMatIndex();
}
......@@ -216,10 +246,18 @@ namespace AMDiS {
dofToMatIndex.clear();
// The offset is always added to the local matrix index. The offset for the
// DOFs in the first FE spaces is the smalled global index of a DOF that is
// owned by the rank.
int offset = rStartDofs;
// === Create the matrix indices for all component FE spaces. ===
for (unsigned int i = 0; i < feSpaces.size(); i++) {
// Traverse all DOFs of the FE space and create for all rank owned DOFs
// a matrix index.
map<DegreeOfFreedom, MultiIndex>& dofMap = data[feSpaces[i]].getMap();
typedef map<DegreeOfFreedom, MultiIndex>::iterator ItType;
for (ItType it = dofMap.begin(); it != dofMap.end(); ++it) {
......@@ -229,14 +267,20 @@ namespace AMDiS {
}
}
// Increase the offset for the next FE space by the number of DOFs owned
// by the rank in the current FE space.
offset += data[feSpaces[i]].nRankDofs;
// If there are no non local DOFs, continue with the next FE space.
if (!hasNonLocalDofs)
continue;
TEST_EXIT_DBG(sendDofs != NULL && recvDofs != NULL)
("No communicator given!\n");
// === Communicate the matrix indices for all DOFs that are on some ===
// === interior boundaries. ===
StdMpi<vector<DegreeOfFreedom> > stdMpi(*mpiComm);
for (DofComm::Iterator it(*sendDofs, feSpaces[i]);
!it.end(); it.nextRank()) {
......@@ -270,17 +314,4 @@ namespace AMDiS {
}
}
void ParallelDofMapping::setDofComm(DofComm &pSend, DofComm &pRecv)
{
FUNCNAME("ParallelDofMapping::setDofComm()");
sendDofs = &pSend;
recvDofs = &pRecv;
for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it)
data[*it].setDofComm(pSend, pRecv);
}
}
......@@ -34,17 +34,18 @@
namespace AMDiS {
using namespace std;
/// Is used if a DOF index is mapped to multiple indices, i.e., to both, a local
/// and a global one.
struct MultiIndex
{
int local, global;
};
/** \brief
* Defines for each system component a mapping for sets of global DOF indices
* to global matrix indices. The mapping is defined for all DOFs in rank's
* subdomain. When periodic boundary conditions are used, then the mapping
* stores also information for the periodic associations of rank's DOF on
* periodic boundaries.
* Is used to store matrix indices to all DOFs in rank's subdomain. Thus, the
* class defines a mapping from component number and DOF index to a global
* matrix index. This class does not calculate the indices by itself!
*/
class DofToMatIndex
{
......@@ -57,13 +58,19 @@ namespace AMDiS {
data.clear();
}
/// Add a new mapping.
/** Add a new mapping for a given DOF.
*
* \param[in] component Component number for which the mapping
* is defined.
* \param[in] dof DOF index
* \param[in] globalMatIndex Global matrix index.
*/
inline void add(int component, DegreeOfFreedom dof, int globalMatIndex)
{
data[component][dof] = globalMatIndex;
}
/// Map a global DOF index to the global matrix index for a specific
/// Maps a global DOF index to the global matrix index for a specific
/// system component number.
inline int get(int component, DegreeOfFreedom dof)
{
......@@ -86,31 +93,44 @@ namespace AMDiS {
};
class GlobalDofMap
/**
* This class defines the parallel mapping of DOFs for one FE space. It is used
* by the class \ref ParallelDofMapping to specifiy the mapping for a set of
* FE spaces.
*/
class FeSpaceDofMap
{
public:
/// This constructor exists only to create std::map of this class and make
/// use of the operator [] for read access. Should never be called.
GlobalDofMap()
FeSpaceDofMap()
{
ERROR_EXIT("Should not be called!\n");
}
GlobalDofMap(MPI::Intracomm* m)
/// This is the only valid constructur to be used.
FeSpaceDofMap(MPI::Intracomm* m)
: mpiComm(m),
sendDofs(NULL),
recvDofs(NULL),
feSpace(NULL),
needGlobalMapping(false),
hasNonLocalDofs(false),
nRankDofs(0),
nLocalDofs(0),
nOverallDofs(0),
rStartDofs(0),
hasNonLocalDofs(false)
{}
rStartDofs(0)
{
FUNCNAME("FeSpaceDofMap::FeSpaceDofMap()");
TEST_EXIT(mpiComm)("No MPI Communicator specified!\n");
}
/// Clears all data of the mapping.
void clear();
/// Maps a DOF index to both, the local and global index of the mapping. The
/// global index must not be set.
MultiIndex& operator[](DegreeOfFreedom d)
{
TEST_EXIT_DBG(dofMap.count(d))("Should not happen!\n");
......@@ -118,9 +138,11 @@ namespace AMDiS {
return dofMap[d];
}
/// Inserts a new DOF to rank's mapping. The DOF is assumed to be owend by
/// the rank.
void insertRankDof(DegreeOfFreedom dof0, DegreeOfFreedom dof1 = -1)
{
FUNCNAME("GlobalDofMap::insertRankDof()");
FUNCNAME("FeSpaceDofMap::insertRankDof()");
TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n");
......@@ -130,9 +152,11 @@ namespace AMDiS {
nRankDofs++;
}
/// Inserts a new DOF to rank's mapping. The DOF exists in rank's subdomain
/// but is owned by a different rank, thus it is part of an interior boundary.
void insert(DegreeOfFreedom dof0, DegreeOfFreedom dof1 = -1)
{
FUNCNAME("GlobalDofMap::insert()");
FUNCNAME("FeSpaceDofMap::insert()");
TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n");
......@@ -141,49 +165,55 @@ namespace AMDiS {
nonRankDofs.insert(dof0);
}
/// Checks if a given DOF is in the DOF mapping.
bool isSet(DegreeOfFreedom dof)
{
return static_cast<bool>(dofMap.count(dof));
}
/// Checks if a given DOF is a rank owned DOF of the DOF mapping. The DOF must
/// a DOF of the mapping (this is not checked here), otherwise the result is
/// meaningsless.
bool isRankDof(DegreeOfFreedom dof)
{
return !(static_cast<bool>(nonRankDofs.count(dof)));
}
/// Returns number of DOFs in the mapping.
unsigned int size()
{
return dofMap.size();
}
/// Returns the raw data of the mapping.
map<DegreeOfFreedom, MultiIndex>& getMap()
{
return dofMap;
}
void update();
void computeGlobalMapping(int offset);
void computeNonLocalIndices();
void print();
/// Recomputes the mapping.
void update();
/// Sets the FE space this mapping corresponds to.
void setFeSpace(const FiniteElemSpace *fe)
{
feSpace = fe;
}
/// Informs the mapping whether the mapping will include DOFs that are not
/// owned by the rank.
void setNonLocalDofs(bool b)
{
hasNonLocalDofs = b;
}
/// Informs the mapping whether a global index must be computed.
void setNeedGlobalMapping(bool b)
{
needGlobalMapping = b;
}
/// Sets the DOF communicators.
void setDofComm(DofComm &pSend, DofComm &pRecv)
{
sendDofs = &pSend;
......@@ -191,31 +221,48 @@ namespace AMDiS {
}
private:
/// Computes a global mapping from the local one.
void computeGlobalMapping();
/// Computes the global indices of all DOFs in the mapping that are not owned
/// by the rank.
void computeNonLocalIndices();
private:
/// MPI communicator object;
MPI::Intracomm* mpiComm;
DofComm *sendDofs;
DofComm *recvDofs;
/// DOF communicators for all DOFs on interior boundaries.
DofComm *sendDofs, *recvDofs;
/// The FE space this mapping belongs to. This is used only the get the
/// correct DOF communicator in \ref sendDofs and \ref recvDofs.
const FiniteElemSpace *feSpace;
///
/// Mapping data from DOF indices to local and global indices.
map<DegreeOfFreedom, MultiIndex> dofMap;
/// Set of all DOFs that are in mapping but are not owned by the rank.
std::set<DegreeOfFreedom> nonRankDofs;
/// If true, a global index mapping will be computed for all DOFs.
bool needGlobalMapping;
public:
///
int nRankDofs, nLocalDofs, nOverallDofs, rStartDofs;
/// Is true if there are DOFs in at least one subdomain that are not owned
/// by the rank. If the value is false, each rank contains only DOFs that
/// are also owned by this rank.
bool hasNonLocalDofs;
public:
///
int nRankDofs, nLocalDofs, nOverallDofs, rStartDofs;
};
/**
* Implements the mapping from sets of distributed DOF indices to local and
* global indices. The mapping works for a given set of FE spaces. Furthermore,
* this class may compute the matrix indices of the set of DOF indices.
*/
class ParallelDofMapping
{
public:
......@@ -230,7 +277,27 @@ namespace AMDiS {
rStartDofs(-1)
{}
inline GlobalDofMap& operator[](const FiniteElemSpace* feSpace)
/** \brief Initialize the parallel DOF mapping.
*
* \param[in] m MPI communicator.
* \param[in] fe The FE spaces of all components of the
* PDE to be solved.
* \param[in] needGlobalMapping If true, the mapping computes also a global
* index for the DOFs.
* \param[in] bNonLocalDofs If true, at least one rank's mapping con-
* taines DOFs that are not owend by the rank.
*/
void init(MPI::Intracomm *m,
vector<const FiniteElemSpace*> &fe,
bool needGlobalMapping,
bool bNonLocalDofs);
/// Set the DOF communicator objects that are required to exchange information
/// about DOFs that are on interior boundaries.
void setDofComm(DofComm &pSend, DofComm &pRecv);
/// Access the DOF mapping for a given FE space.
inline FeSpaceDofMap& operator[](const FiniteElemSpace* feSpace)
{
FUNCNAME("ParallelDofMapping::operator[]()");
......@@ -239,10 +306,7 @@ namespace AMDiS {
return data.find(feSpace)->second;
}
void addFeSpace(const FiniteElemSpace* feSpace);
int getRankDofs(vector<const FiniteElemSpace*> &fe);
/// Returns \ref nRankDofs, thus the number of DOFs owned by the rank.
inline int getRankDofs()
{
TEST_EXIT_DBG(nRankDofs >= 0)("Should not happen!\n");
......@@ -250,8 +314,7 @@ namespace AMDiS {
return nRankDofs;
}
int getLocalDofs(vector<const FiniteElemSpace*> &fe);
/// Returns \ref nLocalDofs, thus the number of DOFs in ranks subdomain.
inline int getLocalDofs()
{
TEST_EXIT_DBG(nLocalDofs >= 0)("Should not happen!\n");
......@@ -259,8 +322,7 @@ namespace AMDiS {
return nLocalDofs;
}
int getOverallDofs(vector<const FiniteElemSpace*> &feSpaces);
/// Returns \ref nOverallDofs, thus the number of all DOFs in this mapping.
inline int getOverallDofs()
{
TEST_EXIT_DBG(nOverallDofs >= 0)("Should not happen!\n");
......@@ -268,8 +330,8 @@ namespace AMDiS {
return nOverallDofs;
}
int getStartDofs(vector<const FiniteElemSpace*> &feSpaces);
/// Returns \ref rStartDofs, thus the smallest global index of a DOF that is
/// owned by the rank.
inline int getStartDofs()
{
TEST_EXIT_DBG(rStartDofs >= 0)("Should not happen!\n");
......@@ -277,22 +339,18 @@ namespace AMDiS {
return rStartDofs;
}
void init(MPI::Intracomm *m,
vector<const FiniteElemSpace*> &fe,
bool needGlobalMapping,
bool bNonLocalDofs);
/// Update the mapping.
void update();
void computeMatIndex();
void setDofComm(DofComm &pSend, DofComm &pRecv);
/// Returns the global matrix index of a given DOF for a given
/// component number.
inline int getMatIndex(int ithComponent, DegreeOfFreedom d)
{
return dofToMatIndex.get(ithComponent, d);
}
/// Returns the local matrix index of a given DOF for a given
/// component number.
inline int getLocalMatIndex(int ithComponent, DegreeOfFreedom d)
{
FUNCNAME("ParallelDofMapping::getLocalMatIndex()");
......@@ -303,25 +361,58 @@ namespace AMDiS {
return dofToMatIndex.get(ithComponent, d) - rStartDofs;
}
protected:
/// Insert a new FE space DOF mapping for a given FE space.
void addFeSpace(const FiniteElemSpace* feSpace);
/// Compute \ref nRankDofs.
int computeRankDofs();
/// Compute \ref nLocalDofs.
int computeLocalDofs();
/// Compute \ref nOverallDofs.
int computeOverallDofs();
/// Compute \ref rStartDofs.
int computeStartDofs();
/// Compute local and global matrix indices.
void computeMatIndex();
private:
/// MPI communicator object;
MPI::Intracomm* mpiComm;
DofComm *sendDofs;
DofComm *recvDofs;
/// DOF communicators for all DOFs on interior boundaries.
DofComm *sendDofs, *recvDofs;
/// Is true if there are DOFs in at least one subdomain that are not owned
/// by the rank. If the value is false, each rank contains only DOFs that
/// are also owned by this rank.
bool hasNonLocalDofs;
map<const FiniteElemSpace*, GlobalDofMap> data;
/// Maps from FE space pointers to DOF mappings.
map<const FiniteElemSpace*, FeSpaceDofMap> data;
/// The FE spaces for all components.
vector<const FiniteElemSpace*> feSpaces;
/// The set of all FE spaces. It uniquly contains all different FE spaces
/// from \ref feSpaces.
std::set<const FiniteElemSpace*> feSpacesUnique;
int nRankDofs, nLocalDofs, nOverallDofs, rStartDofs;
/// Number of DOFs owned by rank.
int nRankDofs;
/// Number of DOFs in rank's subdomain.
int nLocalDofs;
/// Number of global DOFs (this value is thus the same on all ranks).
int nOverallDofs;
/// Smallest global index of a DOF owned by the rank.
int rStartDofs;
/// Mapping from global DOF indices to global matrix indices under
/// consideration of possibly multiple components.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment