Skip to content
Snippets Groups Projects
Commit 23da3114 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Bugfix in parallel domain decomposition with higher order elements.

parent 5f6c2d39
No related branches found
No related tags found
No related merge requests found
...@@ -241,7 +241,7 @@ namespace AMDiS { ...@@ -241,7 +241,7 @@ namespace AMDiS {
} }
void ParallelDomainBase::setDofVector(DOFVector<double>* vec, void ParallelDomainBase::setDofVector(Vec& petscVec, DOFVector<double>* vec,
int dispMult, int dispAdd) int dispMult, int dispAdd)
{ {
DOFVector<double>::Iterator dofIt(vec, USED_DOFS); DOFVector<double>::Iterator dofIt(vec, USED_DOFS);
...@@ -249,7 +249,7 @@ namespace AMDiS { ...@@ -249,7 +249,7 @@ namespace AMDiS {
int index = mapLocalGlobalDOFs[dofIt.getDOFIndex()] * dispMult + dispAdd; int index = mapLocalGlobalDOFs[dofIt.getDOFIndex()] * dispMult + dispAdd;
double value = *dofIt; double value = *dofIt;
VecSetValues(petscRhsVec, 1, &index, &value, ADD_VALUES); VecSetValues(petscVec, 1, &index, &value, ADD_VALUES);
} }
} }
...@@ -277,7 +277,7 @@ namespace AMDiS { ...@@ -277,7 +277,7 @@ namespace AMDiS {
MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY); MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY); MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
setDofVector(vec); setDofVector(petscRhsVec, vec);
VecAssemblyBegin(petscRhsVec); VecAssemblyBegin(petscRhsVec);
VecAssemblyEnd(petscRhsVec); VecAssemblyEnd(petscRhsVec);
...@@ -512,7 +512,7 @@ namespace AMDiS { ...@@ -512,7 +512,7 @@ namespace AMDiS {
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY); MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
for (int i = 0; i < nComponents; i++) for (int i = 0; i < nComponents; i++)
setDofVector(vec->getDOFVector(i), nComponents, i); setDofVector(petscRhsVec, vec->getDOFVector(i), nComponents, i);
VecAssemblyBegin(petscRhsVec); VecAssemblyBegin(petscRhsVec);
VecAssemblyEnd(petscRhsVec); VecAssemblyEnd(petscRhsVec);
...@@ -602,6 +602,15 @@ namespace AMDiS { ...@@ -602,6 +602,15 @@ namespace AMDiS {
{ {
FUNCNAME("ParallelDomainBase::solvePetscMatrix()"); FUNCNAME("ParallelDomainBase::solvePetscMatrix()");
#if 0
// Set old solution to be initiual guess for petsc solver.
for (int i = 0; i < nComponents; i++)
setDofVector(petscSolVec, vec->getDOFVector(i), nComponents, i);
VecAssemblyBegin(petscSolVec);
VecAssemblyEnd(petscSolVec);
#endif
KSP solver; KSP solver;
KSPCreate(PETSC_COMM_WORLD, &solver); KSPCreate(PETSC_COMM_WORLD, &solver);
...@@ -612,6 +621,8 @@ namespace AMDiS { ...@@ -612,6 +621,8 @@ namespace AMDiS {
KSPSetType(solver, KSPBCGS); KSPSetType(solver, KSPBCGS);
KSPMonitorSet(solver, myKSPMonitor, PETSC_NULL, 0); KSPMonitorSet(solver, myKSPMonitor, PETSC_NULL, 0);
KSPSetFromOptions(solver); KSPSetFromOptions(solver);
// Do not delete the solution vector, use it for the initial guess.
// KSPSetInitialGuessNonzero(solver, PETSC_TRUE);
KSPSolve(solver, petscRhsVec, petscSolVec); KSPSolve(solver, petscRhsVec, petscSolVec);
...@@ -1188,10 +1199,11 @@ namespace AMDiS { ...@@ -1188,10 +1199,11 @@ namespace AMDiS {
ElementDofIterator elDofIt(feSpace); ElementDofIterator elDofIt(feSpace);
DofSet rankDofSet; DofSet rankDofSet;
// The vertexDof list must be recreated from the scratch. Otherwise, it is possible // The vertexDof list must be recreated from the scratch. Otherwise, it is possible
// that it maps dofs, that were removed (this is also possible, if the mesh was // that it maps dofs, that were removed (this is also possible, if the mesh was
// refined, e.g., center dofs of an element are not dofs of the children). // refined, e.g., center dofs of an element are not dofs of the children).
DofToBool oldVertexDof = vertexDof;
vertexDof.clear(); vertexDof.clear();
TraverseStack stack; TraverseStack stack;
...@@ -1237,7 +1249,7 @@ namespace AMDiS { ...@@ -1237,7 +1249,7 @@ namespace AMDiS {
for (DofContainer::iterator iit = oldSendDofs[it->first].begin(); for (DofContainer::iterator iit = oldSendDofs[it->first].begin();
iit != oldSendDofs[it->first].end(); ++iit) iit != oldSendDofs[it->first].end(); ++iit)
if (vertexDof[*iit]) if (oldVertexDof[*iit])
dofsToSend.push_back(*iit); dofsToSend.push_back(*iit);
DofContainer dofs; DofContainer dofs;
...@@ -1265,7 +1277,7 @@ namespace AMDiS { ...@@ -1265,7 +1277,7 @@ namespace AMDiS {
for (DofContainer::iterator iit = oldRecvDofs[it->first].begin(); for (DofContainer::iterator iit = oldRecvDofs[it->first].begin();
iit != oldRecvDofs[it->first].end(); ++iit) iit != oldRecvDofs[it->first].end(); ++iit)
if (vertexDof[*iit]) { if (oldVertexDof[*iit]) {
dofsToRecv.push_back(*iit); dofsToRecv.push_back(*iit);
DofContainer::iterator eraseIt = find(rankDofs.begin(), rankDofs.end(), *iit); DofContainer::iterator eraseIt = find(rankDofs.begin(), rankDofs.end(), *iit);
...@@ -1334,7 +1346,6 @@ namespace AMDiS { ...@@ -1334,7 +1346,6 @@ namespace AMDiS {
i++; i++;
} }
// === Send new DOF indices. === // === Send new DOF indices. ===
std::vector<int*> sendBuffers(sendDofs.size()); std::vector<int*> sendBuffers(sendDofs.size());
...@@ -1350,12 +1361,11 @@ namespace AMDiS { ...@@ -1350,12 +1361,11 @@ namespace AMDiS {
sendBuffers[i] = new int[nSendDofs]; sendBuffers[i] = new int[nSendDofs];
int c = 0; int c = 0;
for (DofContainer::iterator dofIt = sendIt->second.begin(); for (DofContainer::iterator dofIt = sendIt->second.begin();
dofIt != sendIt->second.end(); ++dofIt) { dofIt != sendIt->second.end(); ++dofIt)
sendBuffers[i][c++] = rankDofsNewGlobalIndex[*dofIt]; sendBuffers[i][c++] = rankDofsNewGlobalIndex[*dofIt];
}
request[requestCounter++] = request[requestCounter++] =
mpiComm.Isend(sendBuffers[i], nSendDofs, MPI_INT, sendIt->first, 0); mpiComm.Isend(sendBuffers[i], nSendDofs, MPI_INT, sendIt->first, 0);
} }
i = 0; i = 0;
...@@ -1363,9 +1373,9 @@ namespace AMDiS { ...@@ -1363,9 +1373,9 @@ namespace AMDiS {
recvIt != recvDofs.end(); ++recvIt, i++) { recvIt != recvDofs.end(); ++recvIt, i++) {
int nRecvDofs = recvIt->second.size(); int nRecvDofs = recvIt->second.size();
recvBuffers[i] = new int[nRecvDofs]; recvBuffers[i] = new int[nRecvDofs];
request[requestCounter++] = request[requestCounter++] =
mpiComm.Irecv(recvBuffers[i], nRecvDofs, MPI_INT, recvIt->first, 0); mpiComm.Irecv(recvBuffers[i], nRecvDofs, MPI_INT, recvIt->first, 0);
} }
MPI::Request::Waitall(requestCounter, request); MPI::Request::Waitall(requestCounter, request);
......
...@@ -254,7 +254,8 @@ namespace AMDiS { ...@@ -254,7 +254,8 @@ namespace AMDiS {
void setDofMatrix(DOFMatrix* mat, int dispMult = 1, void setDofMatrix(DOFMatrix* mat, int dispMult = 1,
int dispAddRow = 0, int dispAddCol = 0); int dispAddRow = 0, int dispAddCol = 0);
void setDofVector(DOFVector<double>* vec, int disMult = 1, int dispAdd = 0); void setDofVector(Vec& petscVec, DOFVector<double>* vec,
int disMult = 1, int dispAdd = 0);
void DbgCreateElementMap(ElementIdxToDofs &elMap); void DbgCreateElementMap(ElementIdxToDofs &elMap);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment