Commit 23eb48b5 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Cool, the parallelization works. Why, I do not know.

parent 6f81ca98
...@@ -24,7 +24,7 @@ namespace AMDiS { ...@@ -24,7 +24,7 @@ namespace AMDiS {
PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *) PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *)
{ {
if (iter % 1 == 0 && MPI::COMM_WORLD.Get_rank() == 0) if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0)
std::cout << " Iteration " << iter << ": " << rnorm << std::endl; std::cout << " Iteration " << iter << ": " << rnorm << std::endl;
return 0; return 0;
...@@ -108,7 +108,6 @@ namespace AMDiS { ...@@ -108,7 +108,6 @@ namespace AMDiS {
updateDofAdmins(); updateDofAdmins();
// === Global refinements. === // === Global refinements. ===
int globalRefinement = 0; int globalRefinement = 0;
...@@ -579,15 +578,15 @@ namespace AMDiS { ...@@ -579,15 +578,15 @@ namespace AMDiS {
// === Create for all rank owned dofs a new global indexing. === // === Create for all rank owned dofs a new global indexing. ===
// Stores for all rank owned dofs a new global index. // Stores for dofs in rank a new global index.
DofIndexMap rankOwnedDofsNewGlobalIndex; DofIndexMap rankDofsNewGlobalIndex;
// Stores for all rank owned dofs a continues local index. // Stores for all rank owned dofs a continues local index.
DofIndexMap rankOwnedDofsNewLocalIndex; DofIndexMap rankOwnedDofsNewLocalIndex;
i = 0; i = 0;
for (DofContainer::iterator dofIt = rankDOFs.begin(); for (DofContainer::iterator dofIt = rankDOFs.begin();
dofIt != rankDOFs.end(); ++dofIt) { dofIt != rankDOFs.end(); ++dofIt) {
rankOwnedDofsNewGlobalIndex[*dofIt] = i + rstart; rankDofsNewGlobalIndex[*dofIt] = i + rstart;
rankOwnedDofsNewLocalIndex[*dofIt] = i; rankOwnedDofsNewLocalIndex[*dofIt] = i;
i++; i++;
} }
...@@ -615,10 +614,10 @@ namespace AMDiS { ...@@ -615,10 +614,10 @@ namespace AMDiS {
itRanks != partitionDOFs[it->first].end(); itRanks != partitionDOFs[it->first].end();
++itRanks) { ++itRanks) {
if (*itRanks != mpiRank) { if (*itRanks != mpiRank) {
TEST_EXIT_DBG(rankOwnedDofsNewGlobalIndex.count(it->first) == 1) TEST_EXIT_DBG(rankDofsNewGlobalIndex.count(it->first) == 1)
("DOF Key not found!\n"); ("DOF Key not found!\n");
sendNewDofs[*itRanks][it->first] = rankOwnedDofsNewGlobalIndex[it->first]; sendNewDofs[*itRanks][it->first] = rankDofsNewGlobalIndex[it->first];
} }
} }
} else { } else {
...@@ -725,7 +724,7 @@ namespace AMDiS { ...@@ -725,7 +724,7 @@ namespace AMDiS {
dofChanged[dofIt->first] = true; dofChanged[dofIt->first] = true;
recvDofs[recvIt->first].push_back(dofIt->first); recvDofs[recvIt->first].push_back(dofIt->first);
rankOwnedDofsNewGlobalIndex[dofIt->first] = newGlobalDof; rankDofsNewGlobalIndex[dofIt->first] = newGlobalDof;
isRankDof[rankDofsNewLocalIndex[dofIt->first]] = false; isRankDof[rankDofsNewLocalIndex[dofIt->first]] = false;
found = true; found = true;
...@@ -745,7 +744,7 @@ namespace AMDiS { ...@@ -745,7 +744,7 @@ namespace AMDiS {
for (DofIndexMap::iterator dofIt = rankDofsNewLocalIndex.begin(); for (DofIndexMap::iterator dofIt = rankDofsNewLocalIndex.begin();
dofIt != rankDofsNewLocalIndex.end(); ++dofIt) { dofIt != rankDofsNewLocalIndex.end(); ++dofIt) {
DegreeOfFreedom localDof = dofIt->second; DegreeOfFreedom localDof = dofIt->second;
DegreeOfFreedom globalDof = rankOwnedDofsNewGlobalIndex[dofIt->first]; DegreeOfFreedom globalDof = rankDofsNewGlobalIndex[dofIt->first];
*const_cast<DegreeOfFreedom*>(dofIt->first) = localDof; *const_cast<DegreeOfFreedom*>(dofIt->first) = localDof;
mapLocalGlobalDOFs[localDof] = globalDof; mapLocalGlobalDOFs[localDof] = globalDof;
...@@ -932,15 +931,13 @@ namespace AMDiS { ...@@ -932,15 +931,13 @@ namespace AMDiS {
mpiComm.Allreduce(&nRankDOFs, &nOverallDOFs, 1, MPI_INT, MPI_SUM); mpiComm.Allreduce(&nRankDOFs, &nOverallDOFs, 1, MPI_INT, MPI_SUM);
// ===
// Do not change the indices now, but create a new indexing a store it here. // Do not change the indices now, but create a new indexing a store it here.
DofIndexMap rankDofsNewLocalIndex; DofIndexMap rankDofsNewLocalIndex;
isRankDof.clear(); isRankDof.clear();
int i = 0; int i = 0;
for (DofContainer::iterator dofIt = rankAllDofs.begin(); for (DofContainer::iterator dofIt = rankAllDofs.begin();
dofIt != rankAllDofs.end(); ++dofIt) { dofIt != rankAllDofs.end(); ++dofIt) {
rankDofsNewLocalIndex[*dofIt] = i; rankDofsNewLocalIndex[*dofIt] = i;
// First, we set all dofs in ranks partition to be owend by the rank. Later, // First, we set all dofs in ranks partition to be owend by the rank. Later,
// the dofs in ranks partition that are owned by other rank are set to false. // the dofs in ranks partition that are owned by other rank are set to false.
...@@ -949,14 +946,14 @@ namespace AMDiS { ...@@ -949,14 +946,14 @@ namespace AMDiS {
} }
// Stores for all rank owned dofs a new global index. // Stores for all rank owned dofs a new global index.
DofIndexMap rankOwnedDofsNewGlobalIndex; DofIndexMap rankDofsNewGlobalIndex;
// Stores for all rank owned dofs a continues local index. // Stores for all rank owned dofs a continues local index.
DofIndexMap rankOwnedDofsNewLocalIndex; DofIndexMap rankOwnedDofsNewLocalIndex;
i = 0; i = 0;
for (DofContainer::iterator dofIt = rankDOFs.begin(); for (DofContainer::iterator dofIt = rankDOFs.begin();
dofIt != rankDOFs.end(); ++dofIt) { dofIt != rankDOFs.end(); ++dofIt) {
rankOwnedDofsNewGlobalIndex[*dofIt] = i + rstart; rankDofsNewGlobalIndex[*dofIt] = i + rstart;
rankOwnedDofsNewLocalIndex[*dofIt] = i; rankOwnedDofsNewLocalIndex[*dofIt] = i;
i++; i++;
} }
...@@ -978,7 +975,7 @@ namespace AMDiS { ...@@ -978,7 +975,7 @@ namespace AMDiS {
int c = 0; int c = 0;
for (DofContainer::iterator dofIt = sendIt->second.begin(); for (DofContainer::iterator dofIt = sendIt->second.begin();
dofIt != sendIt->second.end(); ++dofIt) dofIt != sendIt->second.end(); ++dofIt)
sendBuffers[i][c++] = rankOwnedDofsNewGlobalIndex[*dofIt]; sendBuffers[i][c++] = rankDofsNewGlobalIndex[*dofIt];
request[requestCounter++] = request[requestCounter++] =
mpiComm.Isend(sendBuffers[i], nSendDofs, MPI_INT, sendIt->first, 0); mpiComm.Isend(sendBuffers[i], nSendDofs, MPI_INT, sendIt->first, 0);
...@@ -1006,14 +1003,15 @@ namespace AMDiS { ...@@ -1006,14 +1003,15 @@ namespace AMDiS {
for (DofContainer::iterator dofIt = recvIt->second.begin(); for (DofContainer::iterator dofIt = recvIt->second.begin();
dofIt != recvIt->second.end(); ++dofIt) { dofIt != recvIt->second.end(); ++dofIt) {
rankOwnedDofsNewGlobalIndex[*dofIt] = recvBuffers[i][j]; rankDofsNewGlobalIndex[*dofIt] = recvBuffers[i][j];
isRankDof[rankOwnedDofsNewLocalIndex[*dofIt]] = false; isRankDof[rankDofsNewLocalIndex[*dofIt]] = false;
j++; j++;
} }
delete [] recvBuffers[i++]; delete [] recvBuffers[i++];
} }
// === Update list of dofs that must be communicated for solution exchange. === // === Update list of dofs that must be communicated for solution exchange. ===
sendDofs = sendNewDofs; sendDofs = sendNewDofs;
...@@ -1023,11 +1021,12 @@ namespace AMDiS { ...@@ -1023,11 +1021,12 @@ namespace AMDiS {
// === Create now the local to global index, and vice verse, mappings. === // === Create now the local to global index, and vice verse, mappings. ===
mapLocalGlobalDOFs.clear(); mapLocalGlobalDOFs.clear();
mapLocalToDofIndex.clear();
for (DofIndexMap::iterator dofIt = rankDofsNewLocalIndex.begin(); for (DofIndexMap::iterator dofIt = rankDofsNewLocalIndex.begin();
dofIt != rankDofsNewLocalIndex.end(); ++dofIt) { dofIt != rankDofsNewLocalIndex.end(); ++dofIt) {
DegreeOfFreedom localDof = dofIt->second; DegreeOfFreedom localDof = dofIt->second;
DegreeOfFreedom globalDof = rankOwnedDofsNewGlobalIndex[dofIt->first]; DegreeOfFreedom globalDof = rankDofsNewGlobalIndex[dofIt->first];
*const_cast<DegreeOfFreedom*>(dofIt->first) = localDof; *const_cast<DegreeOfFreedom*>(dofIt->first) = localDof;
mapLocalGlobalDOFs[localDof] = globalDof; mapLocalGlobalDOFs[localDof] = globalDof;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment