Commit 1f8ce33e authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

On the way to a bug free AMDiS

parent 0287917c
......@@ -128,7 +128,8 @@ namespace AMDiS {
: data(d),
dofCounter(-1),
traverseFeSpace(fe),
traverseLevel(0)
traverseLevel(0),
removedDof(false)
{
goFirst();
}
......@@ -139,7 +140,8 @@ namespace AMDiS {
: data(d),
dofCounter(-1),
traverseFeSpace(fe),
traverseLevel(level)
traverseLevel(level),
removedDof(false)
{
goFirst();
}
......@@ -202,9 +204,19 @@ namespace AMDiS {
inline void nextDof()
{
if (removedDof) {
removedDof = false;
} else {
++dofIter;
++dofCounter;
}
}
inline void removeDof()
{
dofIter = feMapIter->second.erase(dofIter);
removedDof = true;
}
inline int getRank()
{
......@@ -261,6 +273,8 @@ namespace AMDiS {
const FiniteElemSpace *traverseFeSpace;
int traverseLevel;
bool removedDof;
};
......
......@@ -508,6 +508,8 @@ namespace AMDiS {
TEST_EXIT_DBG(level >= 0 && level <= 1)("Wrong level number!\n");
MSG("-----------------\n");
MPI::Intracomm &levelComm = levelData.getMpiComm(level);
DofComm &dc = (level == 0 ? dofComm : dofCommSd);
......@@ -520,14 +522,18 @@ namespace AMDiS {
for (int i = 0; i < vec.getSize(); i++) {
DOFVector<double> &dofVec = *(vec.getDOFVector(i));
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof()) {
// MSG("SEND TO RANK %d FOR COMP %d\n", it.getRank(), i);
dofs.push_back(dofVec[it.getDofIndex()]);
}
}
int rank = it.getRank();
if (level > 0)
rank = levelData.mapRank(rank, 0, level);
stdMpi.send(rank, dofs);
MSG("SEND TO RANK %d OF SIZE %d\n", rank, dofs.size());
}
for (DofComm::Iterator it(dc.getRecvDofs()); !it.end(); it.nextRank()) {
......@@ -539,18 +545,26 @@ namespace AMDiS {
stdMpi.startCommunication();
for (DofComm::Iterator it(dc.getRecvDofs()); !it.end(); it.nextRank()) {
MSG("RECV FROM RANK %d OF SIZE %d\n", it.getRank(), stdMpi.getRecvData(it.getRank()).size());
}
for (DofComm::Iterator it(dc.getRecvDofs()); !it.end(); it.nextRank()) {
int rank = it.getRank();
if (level > 0)
rank = levelData.mapRank(rank, 0, level);
int counter = 0;
vector<double> &recvData = stdMpi.getRecvData(rank);
for (int i = 0; i < vec.getSize(); i++) {
DOFVector<double> &dofVec = *(vec.getDOFVector(i));
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
dofVec[it.getDofIndex()] = stdMpi.getRecvData(rank)[counter++];
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof()) {
TEST_EXIT_DBG(counter < recvData.size())
("Recv data from rank %d has only %d entries!\n", rank, recvData.size());
dofVec[it.getDofIndex()] = recvData[counter++];
}
}
}
}
......@@ -1557,6 +1571,7 @@ namespace AMDiS {
dofCommSd.create(intBoundarySd);
}
// === If requested, create more information on communication DOFs. ===
if (!createBoundaryDofFlag.isSet(BOUNDARY_SUBOBJ_SORTED))
......
......@@ -330,7 +330,6 @@ namespace AMDiS {
if (fetiPreconditioner != FETI_NONE)
interiorDofMap.update();
for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
createLagrange(feSpace);
......@@ -347,10 +346,6 @@ namespace AMDiS {
} else {
MeshLevelData& levelData = meshDistributor->getMeshLevelData();
MSG("RANK %d FROM %d\n",
levelData.getMpiComm(1).Get_rank(),
levelData.getMpiComm(1).Get_size());
int groupRowsInterior = 0;
if (levelData.getMpiComm(1).Get_rank() == 0)
groupRowsInterior = localDofMap.getOverallDofs();
......@@ -363,12 +358,6 @@ namespace AMDiS {
tmp = rStartInterior;
levelData.getMpiComm(1).Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM);
MSG("COMM TEST FETI-DP: %d %d %d %d %d\n",
levelData.getMpiComm(1).Get_size(),
localDofMap.getRankDofs(),
localDofMap.getOverallDofs(),
nGlobalOverallInterior, rStartInterior);
}
MSG("FETI-DP data created on mesh level %d\n", meshLevel);
......@@ -393,7 +382,6 @@ namespace AMDiS {
("Should not happen!\n");
}
// If multi level test, inform sub domain solver about coarse space.
subDomainSolver->setDofMapping(&primalDofMap, &localDofMap);
}
......@@ -510,7 +498,6 @@ namespace AMDiS {
sdRankDofs[it.getRank()].insert(it.getDofIndex());
}
if (dualDofMap[feSpace].nLocalDofs == 0)
return;
......@@ -526,9 +513,11 @@ namespace AMDiS {
boundaryDofRanks[feSpace][it.getDofIndex()].insert(mpiRank);
if (meshLevel == 0 ||
(meshLevel > 0 && sdRankDofs[it.getRank()].count(it.getDofIndex())))
(meshLevel > 0 && sdRankDofs[it.getRank()].count(it.getDofIndex()))) {
boundaryDofRanks[feSpace][it.getDofIndex()].insert(it.getRank());
}
}
}
}
......@@ -578,6 +567,8 @@ namespace AMDiS {
meshDistributor->getDofMapSd()[feSpace].isRankDof(it.getDofIndex())))
boundaryDofRanks[feSpace][it.getDofIndex()] =
stdMpi.getRecvData(it.getRank())[i++];
else
lagrangeMap[feSpace].insertNonRankDof(it.getDofIndex());
}
......@@ -692,7 +683,6 @@ namespace AMDiS {
localDofMap.getMatIndex(k, it->first) + rStartInterior;
double value = (W[i] == mpiRank ? 1.0 : -1.0);
MSG("SET VALUE: %f\n", value);
MatSetValue(mat_lagrange, index, colIndex, value, INSERT_VALUES);
}
index++;
......@@ -1135,12 +1125,6 @@ namespace AMDiS {
VecRestoreArray(vec_sol_b, &localSolB);
VecRestoreArray(local_sol_primal, &localSolPrimal);
VecDestroy(&local_sol_primal);
TEST_EXIT_DBG(meshLevel <= 1)("Rework for higher multilevel methods!\n");
if (meshLevel == 1) {
// meshDistributor->synchVector(vec, meshLevel);
}
}
......@@ -1534,7 +1518,7 @@ namespace AMDiS {
else
solveReducedFetiMatrix(vec);
MeshDistributor::globalMeshDistributor->synchVector(vec);
// MeshDistributor::globalMeshDistributor->synchVector(vec);
}
}
......@@ -228,6 +228,7 @@ namespace AMDiS {
};
/** \brief
* This class is used to easily send and receive STL containers using MPI.
*/
......@@ -376,6 +377,8 @@ namespace AMDiS {
/// Returns received data from a specific rank, see \ref recvData.
RecvT& getRecvData(int rank)
{
FUNCNAME("StdMpi::getRecvData()");
TEST_EXIT_DBG(recvData.count(rank))("No recv data from rank %d\n", rank);
return recvData[rank];
}
......@@ -395,7 +398,6 @@ namespace AMDiS {
for (map<int, int>::iterator sendIt = sendDataSize.begin();
sendIt != sendDataSize.end(); ++sendIt) {
sendBuffers[requestCounter] = sendIt->second;
request[requestCounter] =
mpiComm.Isend(&(sendBuffers[requestCounter]), 1,
MPI_INT, sendIt->first, 0);
......@@ -403,9 +405,10 @@ namespace AMDiS {
}
for (map<int, int>::iterator recvIt = recvDataSize.begin();
recvIt != recvDataSize.end(); ++recvIt)
recvIt != recvDataSize.end(); ++recvIt) {
request[requestCounter++] =
mpiComm.Irecv(&(recvIt->second), 1, MPI_INT, recvIt->first, 0);
}
MPI::Request::Waitall(requestCounter, request);
}
......@@ -425,7 +428,6 @@ namespace AMDiS {
if (exchangeDataSize)
commDataSize();
// === Remove empty data communication. ===
{
......@@ -531,6 +533,8 @@ namespace AMDiS {
bool commPrepared;
bool exchangeDataSize;
static int ccc;
};
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment