Commit 4bb35e1e authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Ich bin so blooooeeed, eh...

parent 59ee4244
......@@ -572,7 +572,7 @@ namespace AMDiS {
}
int ElementObjectDatabase::getIterateOwner()
int ElementObjectDatabase::getIterateOwner(int level)
{
FUNCNAME("ElementObjectDatabase::getIterateOwner()");
......@@ -593,9 +593,17 @@ namespace AMDiS {
break;
}
std::set<int> &levelRanks = levelData->getLevelRanks(level);
bool allRanks = (levelRanks.size() == 1 && *(levelRanks.begin()) == -1);
MSG("ALLRANKS [LEVEL%d]: %d (%d %d)\n", level, allRanks,
levelRanks.size(), *(levelRanks.begin()));
for (vector<ElementObjectData>::iterator it = objData->begin();
it != objData->end(); ++it)
owner = std::max(owner, (*macroElementRankMap)[it->elIndex]);
it != objData->end(); ++it) {
int elRank = (*macroElementRankMap)[it->elIndex];
if (allRanks || levelData->getLevelRanks(level).count(elRank))
owner = std::max(owner, elRank);
}
return owner;
}
......
......@@ -230,7 +230,7 @@ namespace AMDiS {
}
/// Returns the rank owner of the current iterator position.
int getIterateOwner();
int getIterateOwner(int level);
/// Returns the rank owner of the current iterator position.
int getIterateMaxLevel();
......
......@@ -22,7 +22,8 @@ namespace AMDiS {
using namespace std;
void InteriorBoundary::create(MPI::Intracomm &mpiComm,
void InteriorBoundary::create(MeshLevelData &levelData,
int level,
ElementObjectDatabase &elObjDb)
{
FUNCNAME("InteriorBoundary::clear()");
......@@ -34,7 +35,9 @@ namespace AMDiS {
Mesh *mesh = elObjDb.getMesh();
TEST_EXIT_DBG(mesh)("Should not happen!\n");
int mpiRank = mpiComm.Get_rank();
int mpiRank = levelData.getMpiComm(0);
MPI::Intracomm mpiComm = levelData.getMpiComm(level);
std::set<int> levelRanks = levelData.getLevelRanks(level);
// === Create interior boundary data structure. ===
......@@ -43,10 +46,31 @@ namespace AMDiS {
while (elObjDb.iterate(geoIndex)) {
map<int, ElementObjectData>& objData = elObjDb.getIterateData();
// Test, if this is a boundary object of this rank.
if (!(objData.count(mpiRank) && objData.size() > 1))
continue;
int owner = elObjDb.getIterateOwner();
#if 0
// Test, if the boundary object defines an interior boundary within the
// ranks of the MPI group. If not, go to next element.
bool boundaryWithinMpiGroup = false;
if (levelRanks.size() == 1 && *(levelRanks.begin()) == -1) {
boundaryWithinMpiGroup = true;
} else {
for (map<int, ElementObjectData>::iterator it = objData.begin();
it != objData.end(); ++it) {
if (it->first != mpiRank && levelRanks.count(it->first)) {
boundaryWithinMpiGroup == true;
break;
}
}
}
if (!boundaryWithinMpiGroup)
continue;
#endif
int owner = elObjDb.getIterateOwner(level);
ElementObjectData& rankBoundEl = objData[mpiRank];
AtomicBoundary bound;
......
......@@ -41,7 +41,8 @@ namespace AMDiS {
*/
class InteriorBoundary {
public:
void create(MPI::Intracomm &mpiComm,
void create(MeshLevelData &levelData,
int level,
ElementObjectDatabase &elObjDb);
RankToBoundMap& getOwn()
......@@ -121,7 +122,7 @@ namespace AMDiS {
{
do {
++vecIt;
} while (vecIt->maxLevel < level && vecIt != mapIt->second.end());
} while (vecIt != mapIt->second.end() && vecIt->maxLevel < level);
if (vecIt == mapIt->second.end()) {
++mapIt;
......@@ -169,7 +170,7 @@ namespace AMDiS {
vecIt = mapIt->second.begin();
// Search for the next atomic boundary on the mesh level
while (vecIt->maxLevel < level && vecIt != mapIt->second.end())
while (vecIt != mapIt->second.end() && vecIt->maxLevel < level)
++vecIt;
// If vector iterator is not at the end, we have found one and
......
......@@ -1511,9 +1511,12 @@ namespace AMDiS {
if (firstCall)
elObjDb.create(partitionMap, levelData);
elObjDb.updateRankData();
intBoundary.create(mpiComm, elObjDb);
intBoundary.create(levelData, 0, elObjDb);
// if (levelData.getLevelNumber() > 1)
// intBoundarySd.create(levelData, 1, elObjDb);
#if (DEBUG != 0)
ParallelDebug::printBoundaryInfo(*this);
......@@ -1530,10 +1533,12 @@ namespace AMDiS {
dofComm.init(0, levelData, feSpaces);
dofComm.create(intBoundary);
#if 0
if (levelData.getLevelNumber() > 1) {
dofCommSd.init(1, levelData, feSpaces);
dofCommSd.create(intBoundary);
}
#endif
// === If requested, create more information on communication DOFs. ===
......@@ -1690,6 +1695,11 @@ namespace AMDiS {
lastMeshChangeIndex = mesh->getChangeIndex();
MSG("TEST: %d %d\n",
dofComm.getNumberDofs(dofComm.getSendDofs(), 0, feSpaces[0]),
dofComm.getNumberDofs(dofComm.getRecvDofs(), 0, feSpaces[0]));
#if (DEBUG != 0)
ParallelDebug::testDofContainerCommunication(*this);
#endif
......
......@@ -471,6 +471,8 @@ namespace AMDiS {
/// partitioning the whole mesh.
InteriorBoundary intBoundary;
InteriorBoundary intBoundarySd;
DofComm dofComm;
DofComm dofCommSd;
......
......@@ -26,12 +26,10 @@ namespace AMDiS {
nonRankDofs.clear();
nonRankDofs.resize(nLevel);
for (int i = 0; i < nLevel; i++) {
nRankDofs[i] = 0;
nLocalDofs[i] = 0;
nOverallDofs[i] = 0;
rStartDofs[i] = 0;
}
nRankDofs.resize(nLevel, 0);
nLocalDofs.resize(nLevel, 0);
nOverallDofs.resize(nLevel, 0);
rStartDofs.resize(nLevel, 0);
}
......
......@@ -109,11 +109,7 @@ namespace AMDiS {
feSpace(NULL),
dofMap(1),
needGlobalMapping(false),
hasNonLocalDofs(false),
nRankDofs(1),
nLocalDofs(1),
nOverallDofs(1),
rStartDofs(1)
hasNonLocalDofs(false)
{
clear();
}
......
......@@ -39,7 +39,6 @@ BOOST_AUTO_TEST_CASE(amdis_mpi_feti)
BOOST_REQUIRE(feti.getNumberOfDuals() == 48);
Spreadsheet sheet;
sheet.read("data/data0002a");
vector<double> data = sheet.getData()[MPI::COMM_WORLD.Get_rank()];
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment