Commit 1f8ce33e authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

On the way to a bug free AMDiS

parent 0287917c
......@@ -128,7 +128,8 @@ namespace AMDiS {
: data(d),
dofCounter(-1),
traverseFeSpace(fe),
traverseLevel(0)
traverseLevel(0),
removedDof(false)
{
goFirst();
}
......@@ -139,7 +140,8 @@ namespace AMDiS {
: data(d),
dofCounter(-1),
traverseFeSpace(fe),
traverseLevel(level)
traverseLevel(level),
removedDof(false)
{
goFirst();
}
......@@ -202,8 +204,18 @@ namespace AMDiS {
inline void nextDof()
{
++dofIter;
++dofCounter;
if (removedDof) {
removedDof = false;
} else {
++dofIter;
++dofCounter;
}
}
inline void removeDof()
{
dofIter = feMapIter->second.erase(dofIter);
removedDof = true;
}
inline int getRank()
......@@ -261,6 +273,8 @@ namespace AMDiS {
const FiniteElemSpace *traverseFeSpace;
int traverseLevel;
bool removedDof;
};
......
......@@ -508,6 +508,8 @@ namespace AMDiS {
TEST_EXIT_DBG(level >= 0 && level <= 1)("Wrong level number!\n");
MSG("-----------------\n");
MPI::Intracomm &levelComm = levelData.getMpiComm(level);
DofComm &dc = (level == 0 ? dofComm : dofCommSd);
......@@ -520,14 +522,18 @@ namespace AMDiS {
for (int i = 0; i < vec.getSize(); i++) {
DOFVector<double> &dofVec = *(vec.getDOFVector(i));
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof()) {
// MSG("SEND TO RANK %d FOR COMP %d\n", it.getRank(), i);
dofs.push_back(dofVec[it.getDofIndex()]);
}
}
int rank = it.getRank();
if (level > 0)
rank = levelData.mapRank(rank, 0, level);
stdMpi.send(rank, dofs);
MSG("SEND TO RANK %d OF SIZE %d\n", rank, dofs.size());
}
for (DofComm::Iterator it(dc.getRecvDofs()); !it.end(); it.nextRank()) {
......@@ -538,6 +544,10 @@ namespace AMDiS {
}
stdMpi.startCommunication();
for (DofComm::Iterator it(dc.getRecvDofs()); !it.end(); it.nextRank()) {
MSG("RECV FROM RANK %d OF SIZE %d\n", it.getRank(), stdMpi.getRecvData(it.getRank()).size());
}
for (DofComm::Iterator it(dc.getRecvDofs()); !it.end(); it.nextRank()) {
int rank = it.getRank();
......@@ -545,12 +555,16 @@ namespace AMDiS {
rank = levelData.mapRank(rank, 0, level);
int counter = 0;
vector<double> &recvData = stdMpi.getRecvData(rank);
for (int i = 0; i < vec.getSize(); i++) {
DOFVector<double> &dofVec = *(vec.getDOFVector(i));
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
dofVec[it.getDofIndex()] = stdMpi.getRecvData(rank)[counter++];
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof()) {
TEST_EXIT_DBG(counter < recvData.size())
("Recv data from rank %d has only %d entries!\n", rank, recvData.size());
dofVec[it.getDofIndex()] = recvData[counter++];
}
}
}
}
......@@ -1557,6 +1571,7 @@ namespace AMDiS {
dofCommSd.create(intBoundarySd);
}
// === If requested, create more information on communication DOFs. ===
if (!createBoundaryDofFlag.isSet(BOUNDARY_SUBOBJ_SORTED))
......
......@@ -84,7 +84,7 @@ namespace AMDiS {
int rank = it.getRank();
if (meshLevel > 0)
rank = levelData->mapRank(rank, 0, meshLevel);
for (; !it.endDofIter(); it.nextDof())
if (dofMap.count(it.getDofIndex()) &&
!nonRankDofs.count(it.getDofIndex()))
......
......@@ -306,7 +306,7 @@ namespace AMDiS {
primalDofMap.setMpiComm(levelData.getMpiComm(0), 0);
dualDofMap.setMpiComm(levelData.getMpiComm(0), 0);
lagrangeMap.setMpiComm(levelData.getMpiComm(0), 0);
lagrangeMap.setMpiComm(levelData.getMpiComm(0), 0);
localDofMap.setMpiComm(levelData.getMpiComm(meshLevel), meshLevel);
if (fetiPreconditioner != FETI_NONE)
interiorDofMap.setMpiComm(levelData.getMpiComm(meshLevel), meshLevel);
......@@ -330,12 +330,11 @@ namespace AMDiS {
if (fetiPreconditioner != FETI_NONE)
interiorDofMap.update();
for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
createLagrange(feSpace);
}
lagrangeMap.update();
......@@ -347,10 +346,6 @@ namespace AMDiS {
} else {
MeshLevelData& levelData = meshDistributor->getMeshLevelData();
MSG("RANK %d FROM %d\n",
levelData.getMpiComm(1).Get_rank(),
levelData.getMpiComm(1).Get_size());
int groupRowsInterior = 0;
if (levelData.getMpiComm(1).Get_rank() == 0)
groupRowsInterior = localDofMap.getOverallDofs();
......@@ -363,12 +358,6 @@ namespace AMDiS {
tmp = rStartInterior;
levelData.getMpiComm(1).Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM);
MSG("COMM TEST FETI-DP: %d %d %d %d %d\n",
levelData.getMpiComm(1).Get_size(),
localDofMap.getRankDofs(),
localDofMap.getOverallDofs(),
nGlobalOverallInterior, rStartInterior);
}
MSG("FETI-DP data created on mesh level %d\n", meshLevel);
......@@ -393,7 +382,6 @@ namespace AMDiS {
("Should not happen!\n");
}
// If multi level test, inform sub domain solver about coarse space.
subDomainSolver->setDofMapping(&primalDofMap, &localDofMap);
}
......@@ -456,10 +444,10 @@ namespace AMDiS {
if (!isPrimal(feSpace, **it))
if (meshLevel == 0) {
dualDofMap[feSpace].insertRankDof(**it);
} else {
if (meshDistributor->getDofMapSd()[feSpace].isRankDof(**it))
dualDofMap[feSpace].insertRankDof(**it);
}
} else {
if (meshDistributor->getDofMapSd()[feSpace].isRankDof(**it))
dualDofMap[feSpace].insertRankDof(**it);
}
}
......@@ -510,7 +498,6 @@ namespace AMDiS {
sdRankDofs[it.getRank()].insert(it.getDofIndex());
}
if (dualDofMap[feSpace].nLocalDofs == 0)
return;
......@@ -525,9 +512,11 @@ namespace AMDiS {
if (!isPrimal(feSpace, it.getDofIndex())) {
boundaryDofRanks[feSpace][it.getDofIndex()].insert(mpiRank);
if (meshLevel == 0 ||
(meshLevel > 0 && sdRankDofs[it.getRank()].count(it.getDofIndex())))
boundaryDofRanks[feSpace][it.getDofIndex()].insert(it.getRank());
if (meshLevel == 0 ||
(meshLevel > 0 && sdRankDofs[it.getRank()].count(it.getDofIndex()))) {
boundaryDofRanks[feSpace][it.getDofIndex()].insert(it.getRank());
}
}
}
}
......@@ -542,9 +531,9 @@ namespace AMDiS {
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
if (!isPrimal(feSpace, it.getDofIndex()))
if (meshLevel == 0 ||
(meshLevel > 0 && sdRankDofs[it.getRank()].count(it.getDofIndex())))
stdMpi.getSendData(it.getRank()).push_back(boundaryDofRanks[feSpace][it.getDofIndex()]);
if (meshLevel == 0 ||
(meshLevel > 0 && sdRankDofs[it.getRank()].count(it.getDofIndex())))
stdMpi.getSendData(it.getRank()).push_back(boundaryDofRanks[feSpace][it.getDofIndex()]);
stdMpi.updateSendDataSize();
......@@ -553,9 +542,9 @@ namespace AMDiS {
bool recvFromRank = false;
for (; !it.endDofIter(); it.nextDof()) {
if (!isPrimal(feSpace, it.getDofIndex())) {
if (meshLevel == 0 ||
(meshLevel > 0 &&
meshDistributor->getDofMapSd()[feSpace].isRankDof(it.getDofIndex()))) {
if (meshLevel == 0 ||
(meshLevel > 0 &&
meshDistributor->getDofMapSd()[feSpace].isRankDof(it.getDofIndex()))) {
recvFromRank = true;
break;
}
......@@ -573,11 +562,13 @@ namespace AMDiS {
int i = 0;
for (; !it.endDofIter(); it.nextDof())
if (!isPrimal(feSpace, it.getDofIndex()))
if (meshLevel == 0 ||
(meshLevel > 0 &&
meshDistributor->getDofMapSd()[feSpace].isRankDof(it.getDofIndex())))
if (meshLevel == 0 ||
(meshLevel > 0 &&
meshDistributor->getDofMapSd()[feSpace].isRankDof(it.getDofIndex())))
boundaryDofRanks[feSpace][it.getDofIndex()] =
stdMpi.getRecvData(it.getRank())[i++];
else
lagrangeMap[feSpace].insertNonRankDof(it.getDofIndex());
}
......@@ -692,7 +683,6 @@ namespace AMDiS {
localDofMap.getMatIndex(k, it->first) + rStartInterior;
double value = (W[i] == mpiRank ? 1.0 : -1.0);
MSG("SET VALUE: %f\n", value);
MatSetValue(mat_lagrange, index, colIndex, value, INSERT_VALUES);
}
index++;
......@@ -1135,12 +1125,6 @@ namespace AMDiS {
VecRestoreArray(vec_sol_b, &localSolB);
VecRestoreArray(local_sol_primal, &localSolPrimal);
VecDestroy(&local_sol_primal);
TEST_EXIT_DBG(meshLevel <= 1)("Rework for higher multilevel methods!\n");
if (meshLevel == 1) {
// meshDistributor->synchVector(vec, meshLevel);
}
}
......@@ -1534,7 +1518,7 @@ namespace AMDiS {
else
solveReducedFetiMatrix(vec);
MeshDistributor::globalMeshDistributor->synchVector(vec);
// MeshDistributor::globalMeshDistributor->synchVector(vec);
}
}
......@@ -228,6 +228,7 @@ namespace AMDiS {
};
/** \brief
* This class is used to easily send and receive STL containers using MPI.
*/
......@@ -376,6 +377,8 @@ namespace AMDiS {
/// Returns received data from a specific rank, see \ref recvData.
RecvT& getRecvData(int rank)
{
FUNCNAME("StdMpi::getRecvData()");
TEST_EXIT_DBG(recvData.count(rank))("No recv data from rank %d\n", rank);
return recvData[rank];
}
......@@ -395,7 +398,6 @@ namespace AMDiS {
for (map<int, int>::iterator sendIt = sendDataSize.begin();
sendIt != sendDataSize.end(); ++sendIt) {
sendBuffers[requestCounter] = sendIt->second;
request[requestCounter] =
mpiComm.Isend(&(sendBuffers[requestCounter]), 1,
MPI_INT, sendIt->first, 0);
......@@ -403,9 +405,10 @@ namespace AMDiS {
}
for (map<int, int>::iterator recvIt = recvDataSize.begin();
recvIt != recvDataSize.end(); ++recvIt)
recvIt != recvDataSize.end(); ++recvIt) {
request[requestCounter++] =
mpiComm.Irecv(&(recvIt->second), 1, MPI_INT, recvIt->first, 0);
}
MPI::Request::Waitall(requestCounter, request);
}
......@@ -425,7 +428,6 @@ namespace AMDiS {
if (exchangeDataSize)
commDataSize();
// === Remove empty data communication. ===
{
......@@ -531,6 +533,8 @@ namespace AMDiS {
bool commPrepared;
bool exchangeDataSize;
static int ccc;
};
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment