Liebe Gitlab-Nutzer, lieber Gitlab-Nutzer, es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Ein Anmelden über dieses erzeugt ein neues Konto. Das alte Konto ist über den Reiter "Standard" erreichbar. Die Administratoren

Dear Gitlab user, it is now possible to log in to our service using the ZIH login/LDAP. Logging in via this will create a new account. The old account can be accessed via the "Standard" tab. The administrators

Commit f1d2d34b authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Work on pdd.

parent 3d53fff8
......@@ -15,6 +15,8 @@
#include "BoundaryManager.h"
#include "Assembler.h"
#include "mpi.h"
namespace AMDiS {
using namespace mtl;
......@@ -221,18 +223,7 @@ namespace AMDiS {
for (int j = 0; j < nCol; j++) { // for all columns
DegreeOfFreedom col = colIndices[j];
double entry = elMat[i][j];
/*
if (MPI::COMM_WORLD.Get_rank() == 0 && (row >= 156 || col >= 156))
std::cout << "PROB 0: " << row << " " << col << std::endl;
if (MPI::COMM_WORLD.Get_rank() == 1 && (row >= 151 || col >= 151))
std::cout << "PROB 1: " << row << " " << col << std::endl;
if (MPI::COMM_WORLD.Get_rank() == 2 && (row >= 146 || col >= 146))
std::cout << "PROB 2: " << row << " " << col << std::endl;
if (MPI::COMM_WORLD.Get_rank() == 3 && (row >= 153 || col >= 153))
std::cout << "PROB 3: " << row << " " << col << std::endl;
*/
if (add)
ins[row][col] += sign * entry;
else
......
......@@ -82,5 +82,6 @@ namespace AMDiS {
}
return true;
}
}
}
......@@ -66,6 +66,19 @@ namespace AMDiS {
return &dofs[node0 + elementPos][n0 + dofPos];
}
/// Returns \ref pos, the current position (vertex, edge, face) of the traverse.
int getCurrentPos()
{
return pos;
}
/// Returns \ref elementPos, the number of vertex, edge or face that is traversed.
int getCurrentElementPos()
{
return elementPos;
}
protected:
/// The dof admin for which dofs should be traversed.
const DOFAdmin* admin;
......@@ -108,7 +121,7 @@ namespace AMDiS {
*/
int nElements;
/// Current element (vertex, edge, face) that is traversed.
/// Current element, i.e., ith vertex, edge or face, that is traversed.
int elementPos;
/// Currrent dof that is traversed on the current element;
......
......@@ -8,6 +8,7 @@
#include "AdaptInstationary.h"
#include "FiniteElemSpace.h"
#include "ElementData.h"
#include "ElementDofIterator.h"
#include "MacroElement.h"
#include "MacroReader.h"
#include "Mesh.h"
......@@ -867,19 +868,23 @@ namespace AMDiS {
{
DimVec<double>* baryCoords;
bool found = false;
ElementDofIterator elDofIter(feSpace->getAdmin());
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(this, -1,
Mesh::CALL_LEAF_EL | Mesh::FILL_COORDS);
while (elInfo) {
for (int i = 0; i < nDOFEl; i++) {
if (elInfo->getElement()->getDOF(i) == dof) {
elDofIter.reset(elInfo->getElement());
int i = 0;
do {
if (elDofIter.getDofPtr() == dof) {
baryCoords = feSpace->getBasisFcts()->getCoords(i);
elInfo->coordToWorld(*baryCoords, coords);
found = true;
break;
}
}
i++;
} while (elDofIter.next());
if (found)
break;
......
......@@ -38,6 +38,13 @@ namespace AMDiS {
initialPartitionMesh(true),
nRankDOFs(0)
{
FUNCNAME("ParallelDomainBase::ParalleDomainBase()");
TEST_EXIT(mesh->getNumberOfDOFAdmin() == 1)
("Only meshes with one DOFAdmin are supported!\n");
TEST_EXIT(mesh->getDOFAdmin(0).getNumberOfPreDOFs(0) == 0)
("Wrong pre dof number for DOFAdmin!\n");
mpiRank = MPI::COMM_WORLD.Get_rank();
mpiSize = MPI::COMM_WORLD.Get_size();
mpiComm = MPI::COMM_WORLD;
......@@ -100,8 +107,6 @@ namespace AMDiS {
admin.setFirstHole(mapLocalGlobalDOFs.size());
}
exit(0);
// === Global refinements. ===
int globalRefinement = 0;
......@@ -114,9 +119,9 @@ namespace AMDiS {
}
// #if (DEBUG != 0)
// testInteriorBoundary();
// #endif
#if (DEBUG != 0)
testInteriorBoundary();
#endif
// === Create petsc matrix. ===
......@@ -182,7 +187,7 @@ namespace AMDiS {
icend = end<nz>(cursor); icursor != icend; ++icursor)
if (value(*icursor) != 0.0) {
int r = mapLocalGlobalDOFs[row(*icursor)];
int c = mapLocalGlobalDOFs[col(*icursor)];
int c = mapLocalGlobalDOFs[col(*icursor)];
double v = value(*icursor);
MatSetValues(petscMatrix, 1, &r, 1, &c, &v, ADD_VALUES);
......@@ -234,9 +239,8 @@ namespace AMDiS {
int requestCounter = 0;
int i = 0;
for (std::map<int, std::vector<const DegreeOfFreedom*> >::iterator
sendIt = sendDofs.begin();
sendIt != sendDofs.end();
for (RankToDofContainer::iterator sendIt = sendDofs.begin();
sendIt != sendDofs.end();
++sendIt, i++) {
int nSendDOFs = sendIt->second.size();
sendBuffers[i] = new double[nSendDOFs];
......@@ -249,9 +253,8 @@ namespace AMDiS {
}
i = 0;
for (std::map<int, std::vector<const DegreeOfFreedom*> >::iterator
recvIt = recvDofs.begin();
recvIt != recvDofs.end();
for (RankToDofContainer::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end();
++recvIt, i++) {
int nRecvDOFs = recvIt->second.size();
recvBuffers[i] = new double[nRecvDOFs];
......@@ -265,12 +268,12 @@ namespace AMDiS {
i = 0;
for (std::map<int, std::vector<const DegreeOfFreedom*> >::iterator
recvIt = recvDofs.begin();
for (RankToDofContainer::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end();
++recvIt, i++) {
for (int j = 0; j < static_cast<int>(recvIt->second.size()); j++)
(*vec)[(recvIt->second)[j][0]] = recvBuffers[i][j];
for (int j = 0; j < static_cast<int>(recvIt->second.size()); j++) {
(*vec)[*(recvIt->second)[j]] = recvBuffers[i][j];
}
delete [] recvBuffers[i];
}
......@@ -330,8 +333,8 @@ namespace AMDiS {
}
void ParallelDomainBase::createInteriorBoundaryInfo(std::vector<const DegreeOfFreedom*>& rankDOFs,
std::map<const DegreeOfFreedom*, int>& boundaryDOFs)
void ParallelDomainBase::createInteriorBoundaryInfo(DofContainer& rankDOFs,
DofToRank& boundaryDOFs)
{
FUNCNAME("ParallelDomainBase::createInteriorBoundaryInfo()");
......@@ -499,14 +502,14 @@ namespace AMDiS {
}
void ParallelDomainBase::createLocalGlobalNumbering(
std::vector<const DegreeOfFreedom*>& rankDOFs,
std::map<const DegreeOfFreedom*, int>& boundaryDOFs,
int& nRankDOFs,
int& nOverallDOFs)
void ParallelDomainBase::createLocalGlobalNumbering(DofContainer& rankDOFs,
DofToRank& boundaryDOFs,
int& nRankDOFs,
int& nOverallDOFs)
{
FUNCNAME("ParallelDomainBase::createLocalGlobalNumbering()");
// === Get rank information about DOFs. ===
// Stores to each DOF pointer the set of ranks the DOF is part of.
......@@ -537,7 +540,7 @@ namespace AMDiS {
// another rank.
std::map<int, int> recvNewDofs;
for (std::map<const DegreeOfFreedom*, int>::iterator it = boundaryDOFs.begin();
for (DofToRank::iterator it = boundaryDOFs.begin();
it != boundaryDOFs.end();
++it) {
......@@ -570,7 +573,6 @@ namespace AMDiS {
}
}
// === Send and receive the dof indices at boundary. ===
std::vector<int*> sendBuffers(sendNewDofs.size());
......@@ -646,6 +648,20 @@ namespace AMDiS {
// === Change dof indices at boundary from other ranks. ===
// Within this small data structure we track which dof index was already changed.
// This is used to avoid the following situation: Assume, there are two dof indices
// a and b in boundaryDOFs. Then we have to change index a to b and b to c. When
// the second rule applies, we have to avoid that not the first b, resulted from
// changing a to b, is set to c, but the second one. Therefore, after the first
// rule was applied, the dof pointer is set to false in this data structure and
// is not allowed to be changed anymore.
std::map<const DegreeOfFreedom*, bool> dofChanged;
for (DofToRank::iterator dofIt = boundaryDOFs.begin();
dofIt != boundaryDOFs.end();
++dofIt) {
dofChanged[dofIt->first] = false;
}
i = 0;
for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
recvIt != recvNewDofs.end();
......@@ -659,13 +675,18 @@ namespace AMDiS {
bool found = false;
for (std::map<const DegreeOfFreedom*, int>::iterator dofIt = boundaryDOFs.begin();
// Iterate over all boundary dofs to find the dof, which index we have to change.
for (DofToRank::iterator dofIt = boundaryDOFs.begin();
dofIt != boundaryDOFs.end();
++dofIt) {
if ((dofIt->first)[0] == oldDof) {
if (*(dofIt->first) == oldDof && !dofChanged[dofIt->first]) {
dofChanged[dofIt->first] = true;
recvDofs[recvIt->first].push_back(dofIt->first);
const_cast<DegreeOfFreedom*>(dofIt->first)[0] = newLocalDof;
*(const_cast<DegreeOfFreedom*>(dofIt->first)) = newLocalDof;
mapLocalGlobalDOFs[newLocalDof] = newGlobalDof;
mapGlobalLocalDOFs[newGlobalDof] = newLocalDof;
isRankDOF[newLocalDof] = false;
......@@ -688,27 +709,31 @@ namespace AMDiS {
FUNCNAME("ParallelDomainBase::updateLocalGlobalNumbering()");
std::set<const DegreeOfFreedom*> rankDOFs;
std::map<const DegreeOfFreedom*, int> newBoundaryDOFs;
DofToRank newBoundaryDOFs;
// === Get all DOFs in ranks partition. ===
ElementDofIterator elDofIt(feSpace->getAdmin());
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
while (elInfo) {
Element *element = elInfo->getElement();
for (int i = 0; i < 3; i++)
rankDOFs.insert(element->getDOF(i));
elDofIt.reset(element);
do {
rankDOFs.insert(elDofIt.getDofPtr());
} while(elDofIt.next());
elInfo = stack.traverseNext(elInfo);
}
// === Traverse on interior boundaries and move all not ranked owned DOFs from ===
// === rankDOFs to boundaryDOFs. ===
std::map<int, std::vector<const DegreeOfFreedom*> > sendNewDofs = sendDofs;
std::map<int, std::vector<const DegreeOfFreedom*> > recvNewDofs = recvDofs;
RankToDofContainer sendNewDofs;
RankToDofContainer recvNewDofs;
for (std::map<int, std::vector<AtomicBoundary> >::iterator it =
myIntBoundary.boundary.begin();
......@@ -744,10 +769,22 @@ namespace AMDiS {
newBoundaryDOFs[dof1] = boundaryDOFs[dof1];
newBoundaryDOFs[dof2] = boundaryDOFs[dof2];
if (find(sendNewDofs[it->first].begin(), sendNewDofs[it->first].end(), dof1) ==
sendNewDofs[it->first].end())
sendNewDofs[it->first].push_back(dof1);
if (find(sendNewDofs[it->first].begin(), sendNewDofs[it->first].end(), dof2) ==
sendNewDofs[it->first].end())
sendNewDofs[it->first].push_back(dof2);
DofContainer boundDOFs;
addAllVertexDOFs(boundIt->rankObject.el,
boundIt->rankObject.ithObjAtBoundary,
boundDOFs);
addAllEdgeDOFs(boundIt->rankObject.el,
boundIt->rankObject.ithObjAtBoundary,
boundDOFs);
std::vector<const DegreeOfFreedom*> boundDOFs;
addAllDOFs(boundIt->rankObject.el, boundIt->rankObject.ithObjAtBoundary,
boundDOFs);
for (int i = 0; i < static_cast<int>(boundDOFs.size()); i++) {
newBoundaryDOFs[boundDOFs[i]] = mpiRank;
sendNewDofs[it->first].push_back(boundDOFs[i]);
......@@ -755,7 +792,6 @@ namespace AMDiS {
}
}
for (std::map<int, std::vector<AtomicBoundary> >::iterator it =
otherIntBoundary.boundary.begin();
it != otherIntBoundary.boundary.end();
......@@ -793,9 +829,21 @@ namespace AMDiS {
newBoundaryDOFs[dof1] = boundaryDOFs[dof1];
newBoundaryDOFs[dof2] = boundaryDOFs[dof2];
std::vector<const DegreeOfFreedom*> boundDOFs;
addAllDOFs(boundIt->rankObject.el, boundIt->rankObject.ithObjAtBoundary,
boundDOFs);
if (find(recvNewDofs[it->first].begin(), recvNewDofs[it->first].end(), dof2) ==
recvNewDofs[it->first].end())
recvNewDofs[it->first].push_back(dof2);
if (find(recvNewDofs[it->first].begin(), recvNewDofs[it->first].end(), dof1) ==
recvNewDofs[it->first].end())
recvNewDofs[it->first].push_back(dof1);
DofContainer boundDOFs;
addAllEdgeDOFs(boundIt->rankObject.el,
boundIt->rankObject.ithObjAtBoundary,
boundDOFs);
addAllVertexDOFs(boundIt->rankObject.el,
boundIt->rankObject.ithObjAtBoundary,
boundDOFs);
for (int i = static_cast<int>(boundDOFs.size()) - 1; i >= 0; i--) {
// for (int i = 0; i < static_cast<int>(boundDOFs.size()); i++) {
rankDOFs.erase(boundDOFs[i]);
......@@ -803,7 +851,6 @@ namespace AMDiS {
recvNewDofs[it->first].push_back(boundDOFs[i]);
}
}
}
nRankDOFs = rankDOFs.size();
......@@ -846,14 +893,13 @@ namespace AMDiS {
int requestCounter = 0;
i = 0;
for (std::map<int, std::vector<const DegreeOfFreedom*> >::iterator
sendIt = sendNewDofs.begin();
for (RankToDofContainer::iterator sendIt = sendNewDofs.begin();
sendIt != sendNewDofs.end();
++sendIt, i++) {
int nSendDofs = sendIt->second.size();
sendBuffers[i] = new int[nSendDofs];
int c = 0;
for (std::vector<const DegreeOfFreedom*>::iterator dofIt = sendIt->second.begin();
for (DofContainer::iterator dofIt = sendIt->second.begin();
dofIt != sendIt->second.end();
++dofIt)
sendBuffers[i][c++] = (*dofIt)[0] + rstart;
......@@ -863,9 +909,9 @@ namespace AMDiS {
}
i = 0;
for (std::map<int, std::vector<const DegreeOfFreedom*> >::iterator
recvIt = recvNewDofs.begin();
recvIt != recvNewDofs.end(); ++recvIt, i++) {
for (RankToDofContainer::iterator recvIt = recvNewDofs.begin();
recvIt != recvNewDofs.end();
++recvIt, i++) {
int nRecvDofs = recvIt->second.size();
recvBuffers[i] = new int[nRecvDofs];
......@@ -873,28 +919,25 @@ namespace AMDiS {
mpiComm.Irecv(recvBuffers[i], nRecvDofs, MPI_INT, recvIt->first, 0);
}
MPI::Request::Waitall(requestCounter, request);
i = 0;
for (std::map<int, std::vector<const DegreeOfFreedom*> >::iterator
sendIt = sendNewDofs.begin();
for (RankToDofContainer::iterator sendIt = sendNewDofs.begin();
sendIt != sendNewDofs.end();
++sendIt)
delete [] sendBuffers[i++];
i = 0;
int newDofIndex = nRankDOFs;
for (std::map<int, std::vector<const DegreeOfFreedom*> >::iterator
recvIt = recvNewDofs.begin();
recvIt != recvNewDofs.end(); ++recvIt) {
for (RankToDofContainer::iterator recvIt = recvNewDofs.begin();
recvIt != recvNewDofs.end();
++recvIt) {
int j = 0;
for (std::vector<const DegreeOfFreedom*>::iterator dofIt = recvIt->second.begin();
for (DofContainer::iterator dofIt = recvIt->second.begin();
dofIt != recvIt->second.end();
++dofIt) {
const_cast<DegreeOfFreedom*>(*dofIt)[0] = newDofIndex;
(*const_cast<DegreeOfFreedom*>(*dofIt)) = newDofIndex;
mapLocalGlobalDOFs[newDofIndex] = recvBuffers[i][j];
mapGlobalLocalDOFs[recvBuffers[i][j]] = newDofIndex;
isRankDOF[newDofIndex] = false;
......@@ -912,31 +955,31 @@ namespace AMDiS {
}
void ParallelDomainBase::addAllDOFs(Element *el, int ithEdge,
std::vector<const DegreeOfFreedom*>& dofs)
void ParallelDomainBase::addAllVertexDOFs(Element *el, int ithEdge,
DofContainer& dofs)
{
FUNCNAME("ParallelDomainBase::addAllDOFs()");
FUNCNAME("ParallelDomainBase::addAllVertexDOFs()");
switch (ithEdge) {
case 0:
if (el->getSecondChild() && el->getSecondChild()->getFirstChild()) {
addAllDOFs(el->getSecondChild()->getFirstChild(), 0, dofs);
addAllVertexDOFs(el->getSecondChild()->getFirstChild(), 0, dofs);
dofs.push_back(el->getSecondChild()->getFirstChild()->getDOF(2));
addAllDOFs(el->getSecondChild()->getSecondChild(), 1, dofs);
addAllVertexDOFs(el->getSecondChild()->getSecondChild(), 1, dofs);
}
break;
case 1:
if (el->getFirstChild() && el->getFirstChild()->getFirstChild()) {
addAllDOFs(el->getFirstChild()->getFirstChild(), 0, dofs);
addAllVertexDOFs(el->getFirstChild()->getFirstChild(), 0, dofs);
dofs.push_back(el->getFirstChild()->getFirstChild()->getDOF(2));
addAllDOFs(el->getFirstChild()->getSecondChild(), 1, dofs);
addAllVertexDOFs(el->getFirstChild()->getSecondChild(), 1, dofs);
}
break;
case 2:
if (el->getFirstChild()) {
addAllDOFs(el->getFirstChild(), 0, dofs);
addAllVertexDOFs(el->getFirstChild(), 0, dofs);
dofs.push_back(el->getFirstChild()->getDOF(2));
addAllDOFs(el->getSecondChild(), 1, dofs);
addAllVertexDOFs(el->getSecondChild(), 1, dofs);
}
break;
default:
......@@ -945,10 +988,57 @@ namespace AMDiS {
}
void ParallelDomainBase::createDOFMemberInfo(
std::map<const DegreeOfFreedom*, std::set<int> >& partitionDOFs,
std::vector<const DegreeOfFreedom*>& rankDOFs,
std::map<const DegreeOfFreedom*, int>& boundaryDOFs)
void ParallelDomainBase::addAllEdgeDOFs(Element *el, int ithEdge,
DofContainer& dofs)
{
FUNCNAME("ParallelDomainBase::addAllEdgeDOFs()");
bool addThisEdge = false;
switch (ithEdge) {
case 0:
if (el->getSecondChild())
addAllEdgeDOFs(el->getSecondChild(), 2, dofs);
else
addThisEdge = true;
break;
case 1:
if (el->getFirstChild())
addAllEdgeDOFs(el->getFirstChild(), 2, dofs);
else
addThisEdge = true;
break;
case 2:
if (el->getFirstChild()) {
addAllEdgeDOFs(el->getFirstChild(), 1, dofs);
addAllEdgeDOFs(el->getFirstChild(), 0, dofs);
} else {
addThisEdge = true;
}
break;
default:
ERROR_EXIT("Should never happen!\n");
}
if (addThisEdge) {
ElementDofIterator elDofIter(feSpace->getAdmin());
elDofIter.reset(el);
do {
if (elDofIter.getCurrentPos() == 1 &&
elDofIter.getCurrentElementPos() == ithEdge) {
dofs.push_back(elDofIter.getDofPtr());
}
} while(elDofIter.next());
}
}
void ParallelDomainBase::createDOFMemberInfo(DofToPartitions& partitionDOFs,
DofContainer& rankDOFs,
DofToRank& boundaryDOFs)
{
// === Determine to each dof the set of partitions the dof belongs to. ===
......@@ -972,7 +1062,7 @@ namespace AMDiS {
// === Determine the set of ranks dofs and the dofs ownership at the boundary. ===
// iterate over all DOFs
for (std::map<const DegreeOfFreedom*, std::set<int> >::iterator it = partitionDOFs.begin();
for (DofToPartitions::iterator it = partitionDOFs.begin();
it != partitionDOFs.end();
++it) {
......@@ -1052,13 +1142,99 @@ namespace AMDiS {
}
}
std::vector<int> sendSize(mpiSize, 0);
std::vector<int> recvSize(mpiSize, 0);
std::vector<int> recvSizeBuffer(mpiSize, 0);
MPI::Request request[(mpiSize - 1) * 2];
int requestCounter = 0;
for (std::map<int, std::vector<WorldVector<double> > >::iterator it
= sendCoords.begin();
= sendCoords.begin();
it != sendCoords.end(); ++it) {
sendSize[it->first] = it->second.size();
}
for (std::map<int, std::vector<WorldVector<double> > >::iterator it = recvCoords.begin();
it != recvCoords.end(); ++it) {
recvSize[it->first] = it->second.size();
}
for (int i = 0; i < mpiSize; i++) {
if (i == mpiRank)
continue;
request[requestCounter++] = mpiComm.Isend(&(sendSize[i]), 1, MPI_INT, i, 0);
}
for (int i = 0; i < mpiSize; i++) {
if (i == mpiRank)
continue;
request[requestCounter++] = mpiComm.Irecv(&(recvSizeBuffer[i]), 1, MPI_INT, i, 0);
}
MPI::Request::Waitall(requestCounter, request);
for (int i = 0; i < mpiSize; i++) {
if (i == mpiRank)
continue;
if (recvSize[i] != recvSizeBuffer[i]) {
std::cout << "Error: MPI rank " << mpiRank << " expectes to receive "
<< recvSize[i] << " DOFs from rank " << i << ". But this rank sends "
<< recvSizeBuffer[i] << " DOFs!" << std::endl;
ERROR_EXIT("Should not happen!\n");
}
}
std::map<int, double*> sendCoordsBuffer;
std::map<int, double*> recvCoordsBuffer;
requestCounter = 0;