Liebe Gitlab-Nutzer, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit e0139072 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Changed sendDofs and recvDofs data structure in parallel code.

parent 1f381921
......@@ -220,7 +220,7 @@ if(ENABLE_PARALLEL_DOMAIN)
list(APPEND COMPILEFLAGS "-DHAVE_PARALLEL_DOMAIN_AMDIS=1")
SET(PARALLEL_DOMAIN_AMDIS_SRC
${SOURCE_DIR}/parallel/ParMetisPartitioner.cc
${SOURCE_DIR}/parallel/DofComm.cc
${SOURCE_DIR}/parallel/CheckerPartitioner.cc
${SOURCE_DIR}/parallel/ElementObjectData.cc
${SOURCE_DIR}/parallel/MeshDistributor.cc
......
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "DofComm.h"
namespace AMDiS {
using namespace std;
void DofComm::removeEmpty()
{
for (DataIter dit = data.begin(); dit != data.end(); ++dit) {
FeMapIter it = dit->second.begin();
while (it != dit->second.end()) {
if (it->second.size() == 0) {
const FiniteElemSpace *fe = it->first;
++it;
dit->second.erase(fe);
} else
++it;
}
}
}
void DofComm::Iterator::setNextFeMap()
{
if (dataIter != dofComm.data.end()) {
feMapIter = dataIter->second.begin();
if (traverseFeSpace != NULL) {
TEST_EXIT_DBG(dataIter->second.count(traverseFeSpace))
("Should not happen!\n");
while (feMapIter->first != traverseFeSpace &&
feMapIter != dataIter->second.end())
++feMapIter;
TEST_EXIT_DBG(feMapIter != dataIter->second.end() &&
feMapIter->first == traverseFeSpace)
("Should not happen!\n");
}
if (feMapIter != dataIter->second.end())
dofIter = feMapIter->second.begin();
dofCounter = 0;
}
}
}
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// == http://www.amdis-fem.org ==
// == ==
// ============================================================================
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
/** \file DofComm.h */
#ifndef AMDIS_DOF_COMM_H
#define AMDIS_DOF_COMM_H
#include <map>
#include "FiniteElemSpace.h"
#include "Global.h"
namespace AMDiS {
using namespace std;
class DofComm
{
public:
DofComm() {}
typedef map<const FiniteElemSpace*, DofContainer> FeMapType;
typedef FeMapType::iterator FeMapIter;
typedef map<int, FeMapType> DataType;
typedef DataType::iterator DataIter;
DofContainer& getDofCont(int rank, const FiniteElemSpace *feSpace)
{
return data[rank][feSpace];
}
void removeEmpty();
void clear()
{
data.clear();
}
DataType& getData()
{
return data;
}
protected:
DataType data;
friend class Iterator;
public:
class Iterator
{
public:
Iterator(DofComm &dc,
const FiniteElemSpace *fe = NULL)
: dofComm(dc),
dofCounter(-1),
traverseFeSpace(fe)
{
FUNCNAME("DofComm::Iterator::Iterator()");
dataIter = dofComm.data.begin();
setNextFeMap();
}
inline bool end()
{
return (dataIter == dofComm.data.end());
}
inline void nextRank()
{
++dataIter;
setNextFeMap();
}
inline void nextFeSpace()
{
++feMapIter;
}
inline void beginDofIter(const FiniteElemSpace *fe = NULL)
{
FUNCNAME("DofComm::Iterator::beginDofIter()");
if (fe != NULL) {
feMapIter = dataIter->second.begin();
while (feMapIter->first != fe &&
feMapIter != dataIter->second.end())
++feMapIter;
}
TEST_EXIT_DBG(feMapIter != dataIter->second.end())
("Should not happen!\n");
dofIter = feMapIter->second.begin();
dofCounter = 0;
}
inline bool endDofIter()
{
return (dofIter == feMapIter->second.end());
}
inline void nextDof()
{
++dofIter;
++dofCounter;
}
inline int getRank()
{
return dataIter->first;
}
inline const FiniteElemSpace* getFeSpace()
{
return feMapIter->first;
}
inline DofContainer& getDofs()
{
return feMapIter->second;
}
inline const DegreeOfFreedom* getDof()
{
return *dofIter;
}
inline DegreeOfFreedom getDofIndex()
{
return **dofIter;
}
inline int getDofCounter()
{
return dofCounter;
}
protected:
void setNextFeMap();
protected:
DofComm &dofComm;
DofComm::DataIter dataIter;
DofComm::FeMapIter feMapIter;
DofContainer::iterator dofIter;
int dofCounter;
const FiniteElemSpace *traverseFeSpace;
};
};
}
#endif // AMDIS_DOF_COMM_H
......@@ -28,6 +28,7 @@
#include "parallel/SimplePartitioner.h"
#include "parallel/CheckerPartitioner.h"
#include "parallel/MpiHelper.h"
#include "parallel/DofComm.h"
#include "io/ElementFileWriter.h"
#include "io/MacroInfo.h"
#include "io/VtkWriter.h"
......@@ -522,44 +523,34 @@ namespace AMDiS {
{
FUNCNAME("MeshDistributor::synchVector()");
int nComponents = vec.getSize();
StdMpi<vector<double> > stdMpi(mpiComm);
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type sendIt = sendDofs.begin(); sendIt != sendDofs.end(); ++sendIt) {
for (DofComm::Iterator it(sendDofs); !it.end(); it.nextRank()) {
vector<double> dofs;
for (int i = 0; i < nComponents; i++) {
TEST_EXIT_DBG(sendIt->second.count(vec.getFeSpace(i)))
("Should not happen!\n");
DofContainer &feDofs = sendIt->second[vec.getFeSpace(i)];
DOFVector<double>& dofVec = *(vec.getDOFVector(i));
for (int i = 0; i < vec.getSize(); i++) {
DOFVector<double> &dofVec = *(vec.getDOFVector(i));
int nFeDofs = feDofs.size();
for (int j = 0; j < nFeDofs; j++)
dofs.push_back(dofVec[*(feDofs[j])]);
}
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
dofs.push_back(dofVec[it.getDofIndex()]);
}
stdMpi.send(sendIt->first, dofs);
stdMpi.send(it.getRank(), dofs);
}
for (it_type recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt)
stdMpi.recv(recvIt->first);
for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
for (it_type recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) {
for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank()) {
int counter = 0;
for (int i = 0; i < nComponents; i++) {
DofContainer &feDofs = recvIt->second[vec.getFeSpace(i)];
DOFVector<double>& dofVec = *(vec.getDOFVector(i));
int nFeDofs = feDofs.size();
for (int j = 0; j < nFeDofs; j++)
dofVec[*(feDofs[j])] = stdMpi.getRecvData(recvIt->first)[counter++];
for (int i = 0; i < vec.getSize(); i++) {
DOFVector<double> &dofVec = *(vec.getDOFVector(i));
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
dofVec[it.getDofIndex()] = stdMpi.getRecvData(it.getRank())[counter++];
}
}
}
......@@ -685,15 +676,11 @@ namespace AMDiS {
FUNCNAME("MeshDistributor::getAllBoundaryDofs()");
DofContainerSet dofSet;
for (DofComm::Iterator it(sendDofs, feSpace); !it.end(); it.nextRank())
dofSet.insert(it.getDofs().begin(), it.getDofs().end());
for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank())
dofSet.insert(it.getDofs().begin(), it.getDofs().end());
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type it = sendDofs.begin(); it != sendDofs.end(); ++it)
dofSet.insert(it->second[feSpace].begin(), it->second[feSpace].end());
for (it_type it = recvDofs.begin(); it != recvDofs.end(); ++it)
dofSet.insert(it->second[feSpace].begin(), it->second[feSpace].end());
dofs.clear();
dofs.insert(dofs.begin(), dofSet.begin(), dofSet.end());
}
......@@ -993,17 +980,13 @@ namespace AMDiS {
boundaryDofs.clear();
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type it = sendDofs.begin(); it != sendDofs.end(); ++it)
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].begin(); ++dofIt)
boundaryDofs.insert(**dofIt);
for (DofComm::Iterator it(sendDofs, feSpace); !it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
boundaryDofs.insert(it.getDofIndex());
for (it_type it = recvDofs.begin(); it != recvDofs.end(); ++it)
for (DofContainer::iterator dofIt = it->second[feSpace].begin();
dofIt != it->second[feSpace].begin(); ++dofIt)
boundaryDofs.insert(**dofIt);
for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
boundaryDofs.insert(it.getDofIndex());
}
......@@ -1820,7 +1803,7 @@ namespace AMDiS {
DofContainer dofs;
it->rankObj.el->getAllDofs(feSpace, it->rankObj, dofs);
DofContainer& tmp = sendDofs[it.getRank()][feSpace];
DofContainer& tmp = sendDofs.getDofCont(it.getRank(), feSpace);
tmp.insert(tmp.end(), dofs.begin(), dofs.end());
if (createBoundaryDofFlag.isSet(BOUNDARY_FILL_INFO_SEND_DOFS))
......@@ -1836,7 +1819,7 @@ namespace AMDiS {
DofContainer dofs;
it->rankObj.el->getAllDofs(feSpace, it->rankObj, dofs);
DofContainer& tmp = recvDofs[it.getRank()][feSpace];
DofContainer& tmp = recvDofs.getDofCont(it.getRank(), feSpace);
tmp.insert(tmp.end(), dofs.begin(), dofs.end());
if (createBoundaryDofFlag.isSet(BOUNDARY_FILL_INFO_RECV_DOFS))
......@@ -1847,41 +1830,18 @@ namespace AMDiS {
} else {
for (InteriorBoundary::iterator it(myIntBoundary); !it.end(); ++it)
it->rankObj.el->getAllDofs(feSpace, it->rankObj,
sendDofs[it.getRank()][feSpace]);
sendDofs.getDofCont(it.getRank(), feSpace));
for (InteriorBoundary::iterator it(otherIntBoundary); !it.end(); ++it)
it->rankObj.el->getAllDofs(feSpace, it->rankObj,
recvDofs[it.getRank()][feSpace]);
recvDofs.getDofCont(it.getRank(), feSpace));
}
// === Delete all empty DOF send and recv positions ===
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type sendIt = sendDofs.begin(); sendIt != sendDofs.end(); ++sendIt) {
map<const FiniteElemSpace*, DofContainer>::iterator it =
sendIt->second.begin();
while (it != sendIt->second.end()) {
if (it->second.size() == 0) {
const FiniteElemSpace* fe = it->first;
++it;
sendIt->second.erase(fe);
} else
++it;
}
}
for (it_type recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) {
map<const FiniteElemSpace*, DofContainer>::iterator it =
recvIt->second.begin();
while (it != recvIt->second.end()) {
if (it->second.size() == 0) {
const FiniteElemSpace* fe = it->first;
++it;
recvIt->second.erase(fe);
} else
++it;
}
}
sendDofs.removeEmpty();
recvDofs.removeEmpty();
}
......@@ -1964,18 +1924,14 @@ namespace AMDiS {
// All DOFs that must be received are DOFs not owned by rank and have
// therefore to be removed from the set 'rankDofs'.
for (map<int, map<const FiniteElemSpace*, DofContainer> >::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt) {
DofContainer &rDofs = recvIt->second[feSpace];
for (DofContainer::iterator dofIt = rDofs.begin();
dofIt != rDofs.end(); ++dofIt) {
for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank()) {
for (; !it.endDofIter(); it.nextDof()) {
DofContainer::iterator eraseIt =
find(rankDofs.begin(), rankDofs.end(), *dofIt);
if (eraseIt != rankDofs.end())
find(rankDofs.begin(), rankDofs.end(), it.getDof());
if (eraseIt != rankDofs.end())
rankDofs.erase(eraseIt);
}
}
// Get displacment for global rank DOF ordering and global DOF number.
dofFeData[feSpace].nRankDofs = rankDofs.size();
......@@ -1994,26 +1950,25 @@ namespace AMDiS {
// === Send and receive new DOF indices. ===
#if (DEBUG != 0)
ParallelDebug::testDofContainerCommunication(*this, sendDofs, recvDofs);
ParallelDebug::testDofContainerCommunication(*this,
sendDofs.getData(),
recvDofs.getData());
#endif
StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm);
for (map<int, map<const FiniteElemSpace*, DofContainer> >::iterator sendIt = sendDofs.begin();
sendIt != sendDofs.end(); ++sendIt) {
DofContainer &sDofs = sendIt->second[feSpace];
stdMpi.getSendData(sendIt->first).resize(0);
stdMpi.getSendData(sendIt->first).reserve(sDofs.size());
for (DofContainer::iterator dofIt = sDofs.begin();
dofIt != sDofs.end(); ++dofIt)
stdMpi.getSendData(sendIt->first).push_back(rankDofsNewGlobalIndex[*dofIt]);
for (DofComm::Iterator it(sendDofs, feSpace); !it.end(); it.nextRank()) {
stdMpi.getSendData(it.getRank()).resize(0);
stdMpi.getSendData(it.getRank()).reserve(it.getDofs().size());
for (; !it.endDofIter(); it.nextDof())
stdMpi.getSendData(it.getRank()).
push_back(rankDofsNewGlobalIndex[it.getDof()]);
}
stdMpi.updateSendDataSize();
for (map<int, map<const FiniteElemSpace*, DofContainer> >::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt)
stdMpi.recv(recvIt->first);
for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
......@@ -2024,15 +1979,11 @@ namespace AMDiS {
for (int i = 0; i < nRankAllDofs; i++)
dofFeData[feSpace].isRankDof[i] = true;
for (map<int, map<const FiniteElemSpace*, DofContainer> >::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt) {
DofContainer &rDofs = recvIt->second[feSpace];
int i = 0;
for (DofContainer::iterator dofIt = rDofs.begin();
dofIt != rDofs.end(); ++dofIt) {
rankDofsNewGlobalIndex[*dofIt] = stdMpi.getRecvData(recvIt->first)[i++];
dofFeData[feSpace].isRankDof[**dofIt] = false;
for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank()) {
for (; !it.endDofIter(); it.nextDof()) {
rankDofsNewGlobalIndex[it.getDof()] =
stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
dofFeData[feSpace].isRankDof[it.getDofIndex()] = false;
}
}
......@@ -2296,8 +2247,8 @@ namespace AMDiS {
otherIntBoundary.serialize(out);
periodicBoundary.serialize(out);
serialize(out, sendDofs);
serialize(out, recvDofs);
serialize(out, sendDofs.getData());
serialize(out, recvDofs.getData());
// === Serialieze FE space dependent data ===
......@@ -2368,8 +2319,8 @@ namespace AMDiS {
otherIntBoundary.deserialize(in, elIndexMap);
periodicBoundary.deserialize(in, elIndexMap);
deserialize(in, sendDofs, dofMap);
deserialize(in, recvDofs, dofMap);
deserialize(in, sendDofs.getData(), dofMap);
deserialize(in, recvDofs.getData(), dofMap);
// === Deerialieze FE space dependent data ===
......
......@@ -25,6 +25,7 @@
#include <mpi.h>
#include "parallel/DofComm.h"
#include "parallel/ElementObjectData.h"
#include "parallel/ParallelTypes.h"
#include "parallel/MeshPartitioner.h"
......@@ -74,7 +75,8 @@ namespace AMDiS {
/// Maps local dof indices to real dof indices.
DofMapping mapLocalDofIndex;
};
class MeshDistributor
{
private:
......@@ -275,12 +277,12 @@ namespace AMDiS {
return (periodicDof[type].count(globalDofIndex) > 0);
}
map<int, map<const FiniteElemSpace*, DofContainer> >& getSendDofs()
DofComm& getSendDofs()
{
return sendDofs;
}
map<int, map<const FiniteElemSpace*, DofContainer> >& getRecvDofs()
DofComm& getRecvDofs()
{
return recvDofs;
}
......@@ -348,30 +350,25 @@ namespace AMDiS {
const FiniteElemSpace *fe = vec.getFeSpace();
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
for (it_type sendIt = sendDofs.begin();
sendIt != sendDofs.end(); ++sendIt) {
for (DofComm::Iterator it(sendDofs, fe); !it.end(); it.nextRank()) {
vector<T> dofs;
int nSendDofs = sendIt->second[fe].size();
dofs.reserve(nSendDofs);
dofs.reserve(it.getDofs().size());
for (int i = 0; i < nSendDofs; i++)
dofs.push_back(vec[*((sendIt->second[fe])[i])]);
for (; !it.endDofIter(); it.nextDof())
dofs.push_back(vec[it.getDofIndex()]);
stdMpi.send(sendIt->first, dofs);
stdMpi.send(it.getRank(), dofs);
}
for (it_type recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt)
stdMpi.recv(recvIt->first, recvIt->second[fe].size());
for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
for (it_type recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt)
for (unsigned int i = 0; i < recvIt->second.size(); i++)