Liebe Gitlab-Nutzerin, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit aff33faa authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Work on parallelization.

parent 808eac8e
......@@ -239,7 +239,8 @@ if(ENABLE_PARALLEL_DOMAIN)
find_package(PETSc REQUIRED)
include_directories(${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include)
list(APPEND AMDIS_INCLUDE_DIRS ${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include)
list(APPEND PARALLEL_DOMAIN_AMDIS_SRC ${SOURCE_DIR}/parallel/PetscSolver.cc
list(APPEND PARALLEL_DOMAIN_AMDIS_SRC ${SOURCE_DIR}/parallel/PetscMultigridPrecon.cc
${SOURCE_DIR}/parallel/PetscSolver.cc
${SOURCE_DIR}/parallel/PetscProblemStat.cc
${SOURCE_DIR}/parallel/PetscSolverFeti.cc
${SOURCE_DIR}/parallel/PetscSolverGlobalMatrix.cc
......
......@@ -63,7 +63,7 @@ namespace AMDiS {
problemIteration->endIteration(adaptInfo);
adaptInfo->incSpaceIteration();
}
// adaption loop
while (!adaptInfo->spaceToleranceReached() &&
(adaptInfo->getSpaceIteration() < adaptInfo->getMaxSpaceIteration() ||
......@@ -72,9 +72,17 @@ namespace AMDiS {
problemIteration->beginIteration(adaptInfo);
Flag adapted = problemIteration->oneIteration(adaptInfo, FULL_ITERATION);
problemIteration->endIteration(adaptInfo);
#if HAVE_PARALLEL_DOMAIN_AMDIS
int recvAllValues = 0;
int isAdapted = static_cast<bool>(adapted);
MPI::COMM_WORLD.Allreduce(&isAdapted, &recvAllValues, 1, MPI_INT, MPI_SUM);
if (recvAllValues == 0)
break;
#else
if (!adapted)
break;
#endif
adaptInfo->incSpaceIteration();
}
......
......@@ -158,7 +158,7 @@ namespace AMDiS {
inline Flag operator^(const Flag& f) const
{
Flag r(flags);
r.flags ^=f.flags;
r.flags ^= f.flags;
return r;
}
......
......@@ -363,10 +363,8 @@ namespace AMDiS {
}
void MeshStructure::print(bool resetCode)
string MeshStructure::toStr(bool resetCode)
{
FUNCNAME("MeshStructure::print()");
std::stringstream oss;
if (empty()) {
......@@ -385,14 +383,24 @@ namespace AMDiS {
cont = nextElement();
}
}
return oss.str();
}
void MeshStructure::print(bool resetCode)
{
FUNCNAME("MeshStructure::print()");
string str = toStr(resetCode);
if (oss.str().length() < 255) {
MSG("Mesh structure code: %s\n", oss.str().c_str());
if (str.length() < 255) {
MSG("Mesh structure code: %s\n", str.c_str());
} else {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
std::cout << "[" << MPI::COMM_WORLD.Get_rank() << "] Mesh structure code: " << oss.str() << "\n";
std::cout << "[" << MPI::COMM_WORLD.Get_rank() << "] Mesh structure code: " << str << "\n";
#else
std::cout << " Mesh structure code: " << oss.str() << "\n";
std::cout << " Mesh structure code: " << str << "\n";
#endif
}
}
......
......@@ -126,6 +126,9 @@ namespace AMDiS {
int macroElIndex = -1,
bool ignoreFinerMesh = false);
/// Converts the mesh structure code to a string (for debugging).
string toStr(bool resetCode = true);
/// Prints the mesh structure code.
void print(bool resetCode = true);
......
......@@ -45,10 +45,10 @@ namespace AMDiS {
: name(""),
problem(prob),
tsModulo(1),
timestepNumber(-1),
appendIndex(0),
indexLength(5),
indexDecimals(3)
indexDecimals(3),
timestepNumber(-1)
{
FUNCNAME("Serializer::Serializer()");
......@@ -68,10 +68,10 @@ namespace AMDiS {
: name(filename),
problem(prob),
tsModulo(writeEveryIth),
timestepNumber(-1),
appendIndex(0),
indexLength(5),
indexDecimals(3)
indexDecimals(3),
timestepNumber(-1)
{
FUNCNAME("Serializer::Serializer()");
......
......@@ -15,27 +15,105 @@
namespace AMDiS {
void CheckerPartitioner::createInitialPartitioning()
{
FUNCNAME("CheckerPartitioner::createInitialPartitioning()");
int mpiRank = mpiComm->Get_rank();
int mpiSize = mpiComm->Get_size();
// In one of the stripes mode, we have to check if the number of macro
// elements with together with the number of nodes.
if (mode == 1 || mode == 2 || mode == 3) {
int elCounter = 0;
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, 0, Mesh::CALL_EL_LEVEL);
while (elInfo) {
elCounter++;
elInfo = stack.traverseNext(elInfo);
}
if (mesh->getDim() == 2)
TEST_EXIT(elCounter == 2 * mpiSize * mpiSize)
("The number of macro elements is %d, but must be %d for %d number of nodes!",
elCounter, 2 * mpiSize * mpiSize, mpiSize);
int nElementsPerBlock = (mesh->getDim() == 2 ? 2 : 6);
if (mesh->getDim() == 3)
TEST_EXIT(elCounter == 6 * static_cast<int>(pow(mpiSize, 1.5)))
("The number of macro elements is %d, but must be %d for %d number of nodes!",
elCounter, 6 * static_cast<int>(pow(mpiSize, 1.5)), mpiSize);
}
int dim = mesh->getDim();
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, 0, Mesh::CALL_EL_LEVEL);
while (elInfo) {
Element *el = elInfo->getElement();
int elIndex = el->getIndex();
int elInRank = elIndex / nElementsPerBlock;
TEST_EXIT_DBG(elInRank < mpiSize)("Should not happen!\n");
int boxIndex = elIndex / (dim == 2 ? 2 : 6);
int elInRank = -1;
switch (mode) {
case 0:
elInRank = boxIndex;
break;
case 1:
// x-slices
{
if (dim == 2)
elInRank = elIndex / (2 * mpiSize);
if (dim == 3) {
int boxSliceY =
(boxIndex % mpiSize) / static_cast<int>(sqrt(mpiSize));
int boxSliceZ = boxIndex / mpiSize;
elInRank = boxSliceY * static_cast<int>(sqrt(mpiSize)) + boxSliceZ;
}
}
break;
case 2:
// y-slices
{
if (dim == 2)
elInRank = (elIndex % (2 * mpiSize)) / 2;
if (dim == 3) {
int boxSliceX =
(boxIndex % mpiSize) % static_cast<int>(sqrt(mpiSize));
int boxSliceZ = boxIndex / mpiSize;
elInRank = boxSliceX * static_cast<int>(sqrt(mpiSize)) + boxSliceZ;
}
}
break;
case 3:
// z-slices
{
int boxSliceX = (boxIndex % mpiSize) % static_cast<int>(sqrt(mpiSize));
int boxSliceY = (boxIndex % mpiSize) / static_cast<int>(sqrt(mpiSize));
elInRank = boxSliceX * static_cast<int>(sqrt(mpiSize)) + boxSliceY;
}
break;
default:
ERROR_EXIT("Mode %d does not exists for checker based mesh partitioning!\n",
mode);
}
TEST_EXIT_DBG(elInRank >= 0)("Should not happen!\n");
TEST_EXIT_DBG(elInRank < mpiSize)("Should not happen!\n");
elementInRank[elIndex] = (elInRank == mpiRank);
partitionMap[elIndex] = elInRank;
elInfo = stack.traverseNext(elInfo);
}
}
......
......@@ -25,16 +25,35 @@
#include "AMDiS_fwd.h"
#include "Global.h"
#include "Initfile.h"
#include "parallel/MeshPartitioner.h"
namespace AMDiS {
using namespace std;
class CheckerPartitioner : public MeshPartitioner
{
public:
CheckerPartitioner(MPI::Intracomm *comm)
: MeshPartitioner(comm)
{}
: MeshPartitioner(comm),
mpiRank(mpiComm->Get_rank()),
mpiSize(mpiComm->Get_size())
{
string modestr = "";
Parameters::get("parallel->partitioner->mode", modestr);
if (modestr == "")
mode = 0;
else if (modestr == "x-stripes")
mode = 1;
else if (modestr == "y-stripes")
mode = 2;
else if (modestr == "z-stripes")
mode = 3;
else
ERROR_EXIT("No partitioner mode \"%s\"!\n", modestr.c_str());
}
~CheckerPartitioner() {}
......@@ -50,6 +69,14 @@ namespace AMDiS {
{
pMap = partitionMap;
}
protected:
int mpiRank, mpiSize;
/// 0: standard mode: each node gets one box
/// 1: x-stripes: each node gets one x-stripe of boxes
/// 2: y-stripes: each node gets one y-stripe of boxes
int mode;
};
}
......
......@@ -77,8 +77,8 @@ namespace AMDiS {
info(10),
partitioner(NULL),
nRankDofs(0),
nOverallDofs(0),
rStartDofs(0),
nOverallDofs(0),
deserialized(false),
writeSerializationFile(false),
repartitioningAllowed(false),
......@@ -297,6 +297,7 @@ namespace AMDiS {
ParallelDebug::testAllElements(*this);
debug::testSortedDofs(mesh, elMap);
ParallelDebug::testInteriorBoundary(*this);
ParallelDebug::followBoundary(*this);
debug::writeMesh(feSpace, -1, debugOutputDir + "macro_mesh");
......@@ -892,8 +893,8 @@ namespace AMDiS {
map<int, MeshCodeVec> sendCodes;
for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) {
for (RankToBoundMap::iterator it = allBound.begin();
it != allBound.end(); ++it) {
for (vector<AtomicBoundary>::iterator boundIt = it->second.begin();
boundIt != it->second.end(); ++boundIt) {
MeshStructure elCode;
......@@ -904,7 +905,8 @@ namespace AMDiS {
StdMpi<MeshCodeVec> stdMpi(mpiComm, true);
stdMpi.send(sendCodes);
for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it)
for (RankToBoundMap::iterator it = allBound.begin();
it != allBound.end(); ++it)
stdMpi.recv(it->first);
MSG("DA 1\n");
......@@ -917,7 +919,8 @@ namespace AMDiS {
bool meshChanged = false;
for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) {
for (RankToBoundMap::iterator it = allBound.begin();
it != allBound.end(); ++it) {
MeshCodeVec &recvCodes = stdMpi.getRecvData()[it->first];
int i = 0;
......@@ -928,6 +931,10 @@ namespace AMDiS {
MeshStructure elCode;
elCode.init(boundIt->rankObj);
#if (DEBUG != 0)
ParallelDebug::followBoundary(mesh, *boundIt, elCode);
#endif
if (elCode.getCode() != recvCodes[i].getCode()) {
// MSG("MACRO EL %d %d %d DOES NOT FIT WITH MACRO EL %d %d %d ON RANK %d\n",
// boundIt->rankObj.elIndex, boundIt->rankObj.subObj, boundIt->rankObj.ithObj,
......@@ -1440,77 +1447,78 @@ namespace AMDiS {
while (elObjects.iterate(geoIndex)) {
map<int, ElementObjectData>& objData = elObjects.getIterateData();
if (objData.count(mpiRank) && objData.size() > 1) {
int owner = elObjects.getIterateOwner();
ElementObjectData& rankBoundEl = objData[mpiRank];
TEST_EXIT_DBG(macroElIndexMap[rankBoundEl.elIndex])("Should not happen!\n");
AtomicBoundary bound;
bound.rankObj.el = macroElIndexMap[rankBoundEl.elIndex];
bound.rankObj.elIndex = rankBoundEl.elIndex;
bound.rankObj.elType = macroElIndexTypeMap[rankBoundEl.elIndex];
bound.rankObj.subObj = geoIndex;
bound.rankObj.ithObj = rankBoundEl.ithObject;
if (geoIndex == FACE) {
for (int edgeNo = 0; edgeNo < 3; edgeNo++) {
int edgeOfFace =
bound.rankObj.el->getEdgeOfFace(bound.rankObj.ithObj, edgeNo);
if (!(objData.count(mpiRank) && objData.size() > 1))
continue;
bound.rankObj.excludedSubstructures.push_back(make_pair(EDGE, edgeOfFace));
}
}
if (owner == mpiRank) {
for (map<int, ElementObjectData>::iterator it2 = objData.begin();
it2 != objData.end(); ++it2) {
if (it2->first == mpiRank)
continue;
bound.neighObj.el = macroElIndexMap[it2->second.elIndex];
bound.neighObj.elIndex = it2->second.elIndex;
bound.neighObj.elType = macroElIndexTypeMap[it2->second.elIndex];
bound.neighObj.subObj = geoIndex;
bound.neighObj.ithObj = it2->second.ithObject;
bound.type = INTERIOR;
AtomicBoundary& b = myIntBoundary.getNewAtomic(it2->first);
b = bound;
if (geoIndex == EDGE)
b.neighObj.reverseMode = elObjects.getEdgeReverseMode(rankBoundEl, it2->second);
if (geoIndex == FACE)
b.neighObj.reverseMode = elObjects.getFaceReverseMode(rankBoundEl, it2->second);
}
} else {
TEST_EXIT_DBG(objData.count(owner) == 1)
("Should not happen!\n");
int owner = elObjects.getIterateOwner();
ElementObjectData& rankBoundEl = objData[mpiRank];
TEST_EXIT_DBG(macroElIndexMap[rankBoundEl.elIndex])("Should not happen!\n");
AtomicBoundary bound;
bound.rankObj.el = macroElIndexMap[rankBoundEl.elIndex];
bound.rankObj.elIndex = rankBoundEl.elIndex;
bound.rankObj.elType = macroElIndexTypeMap[rankBoundEl.elIndex];
bound.rankObj.subObj = geoIndex;
bound.rankObj.ithObj = rankBoundEl.ithObject;
if (geoIndex == FACE) {
for (int edgeNo = 0; edgeNo < 3; edgeNo++) {
int edgeOfFace =
bound.rankObj.el->getEdgeOfFace(bound.rankObj.ithObj, edgeNo);
ElementObjectData& ownerBoundEl = objData[owner];
bound.rankObj.excludedSubstructures.push_back(make_pair(EDGE, edgeOfFace));
}
}
if (owner == mpiRank) {
for (map<int, ElementObjectData>::iterator it2 = objData.begin();
it2 != objData.end(); ++it2) {
if (it2->first == mpiRank)
continue;
bound.neighObj.el = macroElIndexMap[ownerBoundEl.elIndex];
bound.neighObj.elIndex = ownerBoundEl.elIndex;
bound.neighObj.elType = -1;
bound.neighObj.el = macroElIndexMap[it2->second.elIndex];
bound.neighObj.elIndex = it2->second.elIndex;
bound.neighObj.elType = macroElIndexTypeMap[it2->second.elIndex];
bound.neighObj.subObj = geoIndex;
bound.neighObj.ithObj = ownerBoundEl.ithObject;
bound.neighObj.ithObj = it2->second.ithObject;
bound.type = INTERIOR;
AtomicBoundary& b = otherIntBoundary.getNewAtomic(owner);
b = bound;
AtomicBoundary& b = myIntBoundary.getNewAtomic(it2->first);
b = bound;
if (geoIndex == EDGE)
b.rankObj.reverseMode = elObjects.getEdgeReverseMode(rankBoundEl, ownerBoundEl);
b.neighObj.reverseMode = elObjects.getEdgeReverseMode(rankBoundEl, it2->second);
if (geoIndex == FACE)
b.rankObj.reverseMode = elObjects.getFaceReverseMode(rankBoundEl, ownerBoundEl);
b.neighObj.reverseMode = elObjects.getFaceReverseMode(rankBoundEl, it2->second);
}
} else {
TEST_EXIT_DBG(objData.count(owner) == 1)
("Should not happen!\n");
ElementObjectData& ownerBoundEl = objData[owner];
bound.neighObj.el = macroElIndexMap[ownerBoundEl.elIndex];
bound.neighObj.elIndex = ownerBoundEl.elIndex;
bound.neighObj.elType = -1;
bound.neighObj.subObj = geoIndex;
bound.neighObj.ithObj = ownerBoundEl.ithObject;
bound.type = INTERIOR;
AtomicBoundary& b = otherIntBoundary.getNewAtomic(owner);
b = bound;
if (geoIndex == EDGE)
b.rankObj.reverseMode = elObjects.getEdgeReverseMode(rankBoundEl, ownerBoundEl);
if (geoIndex == FACE)
b.rankObj.reverseMode = elObjects.getFaceReverseMode(rankBoundEl, ownerBoundEl);
}
}
}
// === Create periodic boundary data structure. ===
for (PerBoundMap<DegreeOfFreedom>::iterator it = elObjects.getPeriodicVertices().begin();
......
......@@ -835,6 +835,7 @@ namespace AMDiS {
ElementFileWriter::writeFile(vec, pdb.mesh, filename);
}
void ParallelDebug::writePartitioningFile(string filename,
int counter,
FiniteElemSpace *feSpace)
......@@ -851,4 +852,57 @@ namespace AMDiS {
tmpa.set(MPI::COMM_WORLD.Get_rank());
VtkWriter::writeFile(&tmpa, oss.str());
}
bool ParallelDebug::followThisBound(int rankElIndex, int neighElIndex)
{
FUNCNAME("ParallelDebug::followThisBound()");
int el0 = std::min(rankElIndex, neighElIndex);
int el1 = std::max(rankElIndex, neighElIndex);
vector<int> els;
Parameters::get("parallel->debug->follow boundary", els);
if (els.size() != 2)
return false;
return (el0 == els[0] && el1 == els[1]);
}
void ParallelDebug::followBoundary(MeshDistributor &pdb)
{
FUNCNAME("ParallelDebug::followBoundary()");
for (InteriorBoundary::iterator it(pdb.myIntBoundary); !it.end(); ++it)
if (followThisBound(it->rankObj.elIndex, it->neighObj.elIndex))
debug::writeLocalElementDofs(pdb.mpiRank,
it->rankObj.elIndex,
pdb.feSpace);
for (InteriorBoundary::iterator it(pdb.otherIntBoundary); !it.end(); ++it)
if (followThisBound(it->rankObj.elIndex, it->neighObj.elIndex))
debug::writeLocalElementDofs(pdb.mpiRank,
it->rankObj.elIndex,
pdb.feSpace);
}
void ParallelDebug::followBoundary(Mesh *mesh,
AtomicBoundary &bound,
MeshStructure &code)
{
FUNCNAME("ParallelDebug::followBoundary()");
if (mesh->getDim() != bound.rankObj.subObj)
return;
if (!followThisBound(bound.rankObj.elIndex, bound.neighObj.elIndex))
return;
MSG("Mesh structure code of bound %d/%d <-> %d/%d: %s\n",
bound.rankObj.elIndex, mesh->getDim(),
bound.neighObj.elIndex, mesh->getDim(),
code.toStr().c_str());
}
}
......@@ -160,6 +160,15 @@ namespace AMDiS {
static void writePartitioningFile(std::string filename,
int counter,
FiniteElemSpace *feSpace);
static bool followThisBound(int rankElIndex, int neighElIndex);
static void followBoundary(MeshDistributor &pdb);
static void followBoundary(Mesh *mesh,
AtomicBoundary &bound,
MeshStructure &code);
};
} // namespace AMDiS
......
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "parallel/PetscMultigridPrecon.h"
namespace AMDiS {
#ifdef HAVE_PETSC_DEV
using namespace std;
PetscErrorCode multigridComputeRhs(DM da, Vec x, Vec b)
{
}