Commit 7bd09e85 authored by Praetorius, Simon's avatar Praetorius, Simon

Error in MeshDistributer corrected due to the use of non-initialized dofComm...

Error in MeshDistributer corrected due to the use of non-initialized dofComm that is replaced by dofComms
parent 7181ba7c
......@@ -773,7 +773,7 @@ namespace AMDiS {
// list = informations about element and the neighbours
// n = number of neighbours
// Calculation of the values can be traced in the documentation
// TODO: maybe the implementation is wrong!
// NOTE: maybe the implementation is wrong!
void Bubble::coarseRestr3_2d(DOFIndexed<double> *drv,
RCNeighbourList *list,
int n, BasisFunction* basFct)
......
......@@ -621,7 +621,7 @@ namespace AMDiS {
for (int j = 0; j < nComponents; j++)
estimator[i]->addSystem((*systemMatrix)[i][j],
solution->getDOFVector(j),
rhs->getDOFVector(j)); // TODO: hier eventuell (i) statt (j) ???
rhs->getDOFVector(i)); // NOTE: hier eventuell (i) statt (j) ??? --> corrected
}
}
......
......@@ -147,12 +147,13 @@ namespace AMDiS {
void dualAssemble(AdaptInfo *adaptInfo, Flag flag,
bool asmMatrix = true, bool asmVector = true);
/// Implementation of ProblemStatBase::getNumComponents(), TODO: Wrong!!
virtual int getNumComponents()
/// Returns nr of components \ref nComponents
virtual int getNumComponents()
{
return nComponents;
}
/// Returns nr of additional components \ref nAddComponents
virtual int getNumAddComponents()
{
return nAddComponents;
......
......@@ -396,6 +396,7 @@ namespace AMDiS {
if (bound) {
mesh->incrementNumberOfEdges(n_neigh + 2);
mesh->incrementNumberOfFaces(2 * n_neigh + 1);
newCoords = true; // added to allow BOUNDARY_PROJECTION
} else {
mesh->incrementNumberOfEdges(n_neigh + 1);
mesh->incrementNumberOfFaces(2 * n_neigh);
......
......@@ -184,9 +184,6 @@ namespace AMDiS { namespace io {
writeTestStuff(out, *dataCollector);
writeMesh2(out, *dataCollector);
// TODO: remove
//tryMeshTraversal(out);
out.close();
}
......
......@@ -86,7 +86,7 @@ namespace AMDiS { namespace Parallel {
protected:
/// This map contains for each rank the list of DOFs the current rank must
/// end to exchange solution DOFs at the interior boundaries.
/// send to exchange solution DOFs at the interior boundaries.
DataType sendDofs;
/// This map contains on each rank the list of DOFs from which the current
......
......@@ -1015,6 +1015,7 @@ namespace AMDiS { namespace Parallel {
DofContainer& dofs)
{
DofContainerSet dofSet;
MultiLevelDofComm& dofComm = dofComms[feSpace->getMesh()];
for (DofComm::Iterator it(dofComm[level].getSendDofs(), feSpace);
!it.end(); it.nextRank())
dofSet.insert(it.getDofs().begin(), it.getDofs().end());
......
......@@ -177,10 +177,10 @@ namespace AMDiS { namespace Parallel {
return periodicMap;
}
DofComm& getDofComm(int level)
{
return dofComm[level];
}
// DofComm& getDofComm(int level)
// {
// return dofComm[level];
// }
DofComm& getDofComm(Mesh* mesh, int level)
{
......@@ -245,10 +245,12 @@ namespace AMDiS { namespace Parallel {
/// Works quite similar to the function \ref synchVector, but instead the
/// values of subdomain vectors are combined along the boundaries, by a
/// binary functor.
// minorRank => majorRank
template<typename T, typename Operator>
void synchVector(DOFVector<T> &vec, Operator op)
{
const FiniteElemSpace *fe = vec.getFeSpace();
MultiLevelDofComm& dofComm = dofComms[fe->getMesh()];
int nLevels = levelData.getNumberOfLevels();
for (int level = nLevels - 1; level >= 0; level--) {
......@@ -277,8 +279,7 @@ namespace AMDiS { namespace Parallel {
op(vec[it.getDofIndex()],
stdMpi.getRecvData(it.getRank())[it.getDofCounter()]);
}
if (!boost::is_same<Operator, functors::assign<T> >::value)
synchVector(vec);
synchVector(vec);
}
/** \brief
......@@ -291,10 +292,41 @@ namespace AMDiS { namespace Parallel {
* solved, or after the DOFVector is set by some user defined functions,
* e.g., initial solution functions.
*/
// majorRank => minorRank
template<typename T>
void synchVector(DOFVector<T> &vec)
{
synchVector(vec, functors::assign<T>());
const FiniteElemSpace *fe = vec.getFeSpace();
MultiLevelDofComm& dofComm = dofComms[fe->getMesh()];
int nLevels = levelData.getNumberOfLevels();
for (int level = nLevels - 1; level >= 0; level--) {
StdMpi<std::vector<T> > stdMpi(levelData.getMpiComm(level));
for (DofComm::Iterator it(dofComm[level].getSendDofs(), fe);
!it.end(); it.nextRank()) {
std::vector<T> dofs;
dofs.reserve(it.getDofs().size());
for (; !it.endDofIter(); it.nextDof())
dofs.push_back(vec[it.getDofIndex()]);
stdMpi.send(it.getRank(), dofs);
}
for (DofComm::Iterator it(dofComm[level].getRecvDofs());
!it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
for (DofComm::Iterator it(dofComm[level].getRecvDofs(), fe);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
vec[it.getDofIndex()] =
stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
}
}
/// Works in the same way as the function above defined for DOFVectors. Due
......@@ -304,10 +336,12 @@ namespace AMDiS { namespace Parallel {
/// Works quite similar to the function \ref synchVector, but instead the
/// values of subdomain vectors are add along the boundaries.
// minorRank => majorRank
template<typename T>
void synchAddVector(DOFVector<T> &vec)
{
const FiniteElemSpace *fe = vec.getFeSpace();
MultiLevelDofComm& dofComm = dofComms[fe->getMesh()];
int nLevels = levelData.getNumberOfLevels();
for (int level = nLevels - 1; level >= 0; level--) {
......@@ -621,9 +655,6 @@ namespace AMDiS { namespace Parallel {
/// Defines the interior boundaries of the domain that result from
/// partitioning the whole mesh.
MultiLevelInteriorBoundary intBoundary;
/// Dof communicator object // TODO: to be deleted
MultiLevelDofComm dofComm;
/// Dof communicator objects for each mesh
std::map<Mesh*, MultiLevelDofComm> dofComms;
......
......@@ -474,6 +474,7 @@ namespace AMDiS { namespace Parallel {
return;
const FiniteElemSpace *feSpace = componentSpaces[component];
Mesh* mesh = feSpace->getMesh();
boundaryDofRanks[feSpace].clear();
// Stores for all rank owned communication DOFs, if the counterpart is
......@@ -484,7 +485,7 @@ namespace AMDiS { namespace Parallel {
if (not subDomainIsLocal) {
StdMpi<vector<int> > stdMpi(domainComm);
for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getRecvDofs(), feSpace);
for (DofComm::Iterator it(meshDistributor->getDofComm(mesh, meshLevel).getRecvDofs(), feSpace);
!it.end(); it.nextRank()) {
vector<int> dofs;
......@@ -500,13 +501,13 @@ namespace AMDiS { namespace Parallel {
stdMpi.send(it.getRank(), dofs);
}
for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getSendDofs(), feSpace);
for (DofComm::Iterator it(meshDistributor->getDofComm(mesh, meshLevel).getSendDofs(), feSpace);
!it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getSendDofs(), feSpace);
for (DofComm::Iterator it(meshDistributor->getDofComm(mesh, meshLevel).getSendDofs(), feSpace);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
if (!isPrimal(component, it.getDofIndex()) &&
......@@ -522,7 +523,7 @@ namespace AMDiS { namespace Parallel {
// === of ranks that contain this node (denoted by W(x_j)). ===
int mpiRank = domainComm.Get_rank();
for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getSendDofs(), feSpace);
for (DofComm::Iterator it(meshDistributor->getDofComm(mesh, meshLevel).getSendDofs(), feSpace);
!it.end(); it.nextRank()) {
for (; !it.endDofIter(); it.nextDof()) {
if (!isPrimal(component, it.getDofIndex())) {
......@@ -544,7 +545,7 @@ namespace AMDiS { namespace Parallel {
StdMpi<vector<std::set<int> > > stdMpi(meshDistributor->getMpiComm(meshLevel));
for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getSendDofs(), feSpace);
for (DofComm::Iterator it(meshDistributor->getDofComm(mesh, meshLevel).getSendDofs(), feSpace);
!it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
if (!isPrimal(component, it.getDofIndex()))
......@@ -553,7 +554,7 @@ namespace AMDiS { namespace Parallel {
stdMpi.updateSendDataSize();
for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getRecvDofs(), feSpace);
for (DofComm::Iterator it(meshDistributor->getDofComm(mesh, meshLevel).getRecvDofs(), feSpace);
!it.end(); it.nextRank()) {
bool recvFromRank = false;
for (; !it.endDofIter(); it.nextDof()) {
......@@ -571,7 +572,7 @@ namespace AMDiS { namespace Parallel {
stdMpi.startCommunication();
for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getRecvDofs(), feSpace);
for (DofComm::Iterator it(meshDistributor->getDofComm(mesh, meshLevel).getRecvDofs(), feSpace);
!it.end(); it.nextRank()) {
int i = 0;
for (; !it.endDofIter(); it.nextDof()) {
......
dimension of world: 2
ballMesh->macro file name: ./macro/macro.ball.2d
ballMesh->global refinements: 3
ballMesh->global refinements: 5
ball->mesh: ballMesh
ball->dim: 2
ball->components: 1
ball->polynomial degree[0]: 1
ball->space->components: 1
ball->components: 1
ball->solver: cg
ball->solver->max iteration: 1000
......
......@@ -8,7 +8,7 @@ ball->dim: 3
ball->components: 1
ball->polynomial degree[0]: 1
ball->space->components: 1
ball->components: 1
ball->solver: cg
ball->solver->max iteration: 1000
......
......@@ -31,6 +31,6 @@ element neighbours:
projections:
0 0 1
0 0 0
0 0 0
0 0 0
0 0 1
0 0 1
0 0 1
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment