Commit aff33faa authored by Thomas Witkowski's avatar Thomas Witkowski

Work on parallelization.

parent 808eac8e
......@@ -239,7 +239,8 @@ if(ENABLE_PARALLEL_DOMAIN)
find_package(PETSc REQUIRED)
include_directories(${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include)
list(APPEND AMDIS_INCLUDE_DIRS ${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include)
list(APPEND PARALLEL_DOMAIN_AMDIS_SRC ${SOURCE_DIR}/parallel/PetscSolver.cc
list(APPEND PARALLEL_DOMAIN_AMDIS_SRC ${SOURCE_DIR}/parallel/PetscMultigridPrecon.cc
${SOURCE_DIR}/parallel/PetscSolver.cc
${SOURCE_DIR}/parallel/PetscProblemStat.cc
${SOURCE_DIR}/parallel/PetscSolverFeti.cc
${SOURCE_DIR}/parallel/PetscSolverGlobalMatrix.cc
......
......@@ -73,8 +73,16 @@ namespace AMDiS {
Flag adapted = problemIteration->oneIteration(adaptInfo, FULL_ITERATION);
problemIteration->endIteration(adaptInfo);
#if HAVE_PARALLEL_DOMAIN_AMDIS
int recvAllValues = 0;
int isAdapted = static_cast<bool>(adapted);
MPI::COMM_WORLD.Allreduce(&isAdapted, &recvAllValues, 1, MPI_INT, MPI_SUM);
if (recvAllValues == 0)
break;
#else
if (!adapted)
break;
#endif
adaptInfo->incSpaceIteration();
}
......
......@@ -158,7 +158,7 @@ namespace AMDiS {
inline Flag operator^(const Flag& f) const
{
Flag r(flags);
r.flags ^=f.flags;
r.flags ^= f.flags;
return r;
}
......
......@@ -363,10 +363,8 @@ namespace AMDiS {
}
void MeshStructure::print(bool resetCode)
string MeshStructure::toStr(bool resetCode)
{
FUNCNAME("MeshStructure::print()");
std::stringstream oss;
if (empty()) {
......@@ -386,13 +384,23 @@ namespace AMDiS {
}
}
if (oss.str().length() < 255) {
MSG("Mesh structure code: %s\n", oss.str().c_str());
return oss.str();
}
void MeshStructure::print(bool resetCode)
{
FUNCNAME("MeshStructure::print()");
string str = toStr(resetCode);
if (str.length() < 255) {
MSG("Mesh structure code: %s\n", str.c_str());
} else {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
std::cout << "[" << MPI::COMM_WORLD.Get_rank() << "] Mesh structure code: " << oss.str() << "\n";
std::cout << "[" << MPI::COMM_WORLD.Get_rank() << "] Mesh structure code: " << str << "\n";
#else
std::cout << " Mesh structure code: " << oss.str() << "\n";
std::cout << " Mesh structure code: " << str << "\n";
#endif
}
}
......
......@@ -126,6 +126,9 @@ namespace AMDiS {
int macroElIndex = -1,
bool ignoreFinerMesh = false);
/// Converts the mesh structure code to a string (for debugging).
string toStr(bool resetCode = true);
/// Prints the mesh structure code.
void print(bool resetCode = true);
......
......@@ -45,10 +45,10 @@ namespace AMDiS {
: name(""),
problem(prob),
tsModulo(1),
timestepNumber(-1),
appendIndex(0),
indexLength(5),
indexDecimals(3)
indexDecimals(3),
timestepNumber(-1)
{
FUNCNAME("Serializer::Serializer()");
......@@ -68,10 +68,10 @@ namespace AMDiS {
: name(filename),
problem(prob),
tsModulo(writeEveryIth),
timestepNumber(-1),
appendIndex(0),
indexLength(5),
indexDecimals(3)
indexDecimals(3),
timestepNumber(-1)
{
FUNCNAME("Serializer::Serializer()");
......
......@@ -20,17 +20,95 @@ namespace AMDiS {
{
FUNCNAME("CheckerPartitioner::createInitialPartitioning()");
int mpiRank = mpiComm->Get_rank();
int mpiSize = mpiComm->Get_size();
// In one of the stripes mode, we have to check if the number of macro
// elements with together with the number of nodes.
if (mode == 1 || mode == 2 || mode == 3) {
int elCounter = 0;
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, 0, Mesh::CALL_EL_LEVEL);
while (elInfo) {
elCounter++;
elInfo = stack.traverseNext(elInfo);
}
if (mesh->getDim() == 2)
TEST_EXIT(elCounter == 2 * mpiSize * mpiSize)
("The number of macro elements is %d, but must be %d for %d number of nodes!",
elCounter, 2 * mpiSize * mpiSize, mpiSize);
int nElementsPerBlock = (mesh->getDim() == 2 ? 2 : 6);
if (mesh->getDim() == 3)
TEST_EXIT(elCounter == 6 * static_cast<int>(pow(mpiSize, 1.5)))
("The number of macro elements is %d, but must be %d for %d number of nodes!",
elCounter, 6 * static_cast<int>(pow(mpiSize, 1.5)), mpiSize);
}
int dim = mesh->getDim();
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, 0, Mesh::CALL_EL_LEVEL);
while (elInfo) {
Element *el = elInfo->getElement();
int elIndex = el->getIndex();
int elInRank = elIndex / nElementsPerBlock;
int boxIndex = elIndex / (dim == 2 ? 2 : 6);
int elInRank = -1;
switch (mode) {
case 0:
elInRank = boxIndex;
break;
case 1:
// x-slices
{
if (dim == 2)
elInRank = elIndex / (2 * mpiSize);
if (dim == 3) {
int boxSliceY =
(boxIndex % mpiSize) / static_cast<int>(sqrt(mpiSize));
int boxSliceZ = boxIndex / mpiSize;
elInRank = boxSliceY * static_cast<int>(sqrt(mpiSize)) + boxSliceZ;
}
}
break;
case 2:
// y-slices
{
if (dim == 2)
elInRank = (elIndex % (2 * mpiSize)) / 2;
if (dim == 3) {
int boxSliceX =
(boxIndex % mpiSize) % static_cast<int>(sqrt(mpiSize));
int boxSliceZ = boxIndex / mpiSize;
elInRank = boxSliceX * static_cast<int>(sqrt(mpiSize)) + boxSliceZ;
}
}
break;
case 3:
// z-slices
{
int boxSliceX = (boxIndex % mpiSize) % static_cast<int>(sqrt(mpiSize));
int boxSliceY = (boxIndex % mpiSize) / static_cast<int>(sqrt(mpiSize));
elInRank = boxSliceX * static_cast<int>(sqrt(mpiSize)) + boxSliceY;
}
break;
default:
ERROR_EXIT("Mode %d does not exists for checker based mesh partitioning!\n",
mode);
}
TEST_EXIT_DBG(elInRank >= 0)("Should not happen!\n");
TEST_EXIT_DBG(elInRank < mpiSize)("Should not happen!\n");
elementInRank[elIndex] = (elInRank == mpiRank);
......
......@@ -25,16 +25,35 @@
#include "AMDiS_fwd.h"
#include "Global.h"
#include "Initfile.h"
#include "parallel/MeshPartitioner.h"
namespace AMDiS {
using namespace std;
class CheckerPartitioner : public MeshPartitioner
{
public:
CheckerPartitioner(MPI::Intracomm *comm)
: MeshPartitioner(comm)
{}
: MeshPartitioner(comm),
mpiRank(mpiComm->Get_rank()),
mpiSize(mpiComm->Get_size())
{
string modestr = "";
Parameters::get("parallel->partitioner->mode", modestr);
if (modestr == "")
mode = 0;
else if (modestr == "x-stripes")
mode = 1;
else if (modestr == "y-stripes")
mode = 2;
else if (modestr == "z-stripes")
mode = 3;
else
ERROR_EXIT("No partitioner mode \"%s\"!\n", modestr.c_str());
}
~CheckerPartitioner() {}
......@@ -50,6 +69,14 @@ namespace AMDiS {
{
pMap = partitionMap;
}
protected:
int mpiRank, mpiSize;
/// 0: standard mode: each node gets one box
/// 1: x-stripes: each node gets one x-stripe of boxes
/// 2: y-stripes: each node gets one y-stripe of boxes
int mode;
};
}
......
......@@ -77,8 +77,8 @@ namespace AMDiS {
info(10),
partitioner(NULL),
nRankDofs(0),
nOverallDofs(0),
rStartDofs(0),
nOverallDofs(0),
deserialized(false),
writeSerializationFile(false),
repartitioningAllowed(false),
......@@ -297,6 +297,7 @@ namespace AMDiS {
ParallelDebug::testAllElements(*this);
debug::testSortedDofs(mesh, elMap);
ParallelDebug::testInteriorBoundary(*this);
ParallelDebug::followBoundary(*this);
debug::writeMesh(feSpace, -1, debugOutputDir + "macro_mesh");
......@@ -892,8 +893,8 @@ namespace AMDiS {
map<int, MeshCodeVec> sendCodes;
for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) {
for (RankToBoundMap::iterator it = allBound.begin();
it != allBound.end(); ++it) {
for (vector<AtomicBoundary>::iterator boundIt = it->second.begin();
boundIt != it->second.end(); ++boundIt) {
MeshStructure elCode;
......@@ -904,7 +905,8 @@ namespace AMDiS {
StdMpi<MeshCodeVec> stdMpi(mpiComm, true);
stdMpi.send(sendCodes);
for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it)
for (RankToBoundMap::iterator it = allBound.begin();
it != allBound.end(); ++it)
stdMpi.recv(it->first);
MSG("DA 1\n");
......@@ -917,7 +919,8 @@ namespace AMDiS {
bool meshChanged = false;
for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it) {
for (RankToBoundMap::iterator it = allBound.begin();
it != allBound.end(); ++it) {
MeshCodeVec &recvCodes = stdMpi.getRecvData()[it->first];
int i = 0;
......@@ -928,6 +931,10 @@ namespace AMDiS {
MeshStructure elCode;
elCode.init(boundIt->rankObj);
#if (DEBUG != 0)
ParallelDebug::followBoundary(mesh, *boundIt, elCode);
#endif
if (elCode.getCode() != recvCodes[i].getCode()) {
// MSG("MACRO EL %d %d %d DOES NOT FIT WITH MACRO EL %d %d %d ON RANK %d\n",
// boundIt->rankObj.elIndex, boundIt->rankObj.subObj, boundIt->rankObj.ithObj,
......@@ -1440,7 +1447,9 @@ namespace AMDiS {
while (elObjects.iterate(geoIndex)) {
map<int, ElementObjectData>& objData = elObjects.getIterateData();
if (objData.count(mpiRank) && objData.size() > 1) {
if (!(objData.count(mpiRank) && objData.size() > 1))
continue;
int owner = elObjects.getIterateOwner();
ElementObjectData& rankBoundEl = objData[mpiRank];
......@@ -1508,7 +1517,6 @@ namespace AMDiS {
}
}
}
}
// === Create periodic boundary data structure. ===
......
......@@ -835,6 +835,7 @@ namespace AMDiS {
ElementFileWriter::writeFile(vec, pdb.mesh, filename);
}
void ParallelDebug::writePartitioningFile(string filename,
int counter,
FiniteElemSpace *feSpace)
......@@ -851,4 +852,57 @@ namespace AMDiS {
tmpa.set(MPI::COMM_WORLD.Get_rank());
VtkWriter::writeFile(&tmpa, oss.str());
}
bool ParallelDebug::followThisBound(int rankElIndex, int neighElIndex)
{
FUNCNAME("ParallelDebug::followThisBound()");
int el0 = std::min(rankElIndex, neighElIndex);
int el1 = std::max(rankElIndex, neighElIndex);
vector<int> els;
Parameters::get("parallel->debug->follow boundary", els);
if (els.size() != 2)
return false;
return (el0 == els[0] && el1 == els[1]);
}
void ParallelDebug::followBoundary(MeshDistributor &pdb)
{
FUNCNAME("ParallelDebug::followBoundary()");
for (InteriorBoundary::iterator it(pdb.myIntBoundary); !it.end(); ++it)
if (followThisBound(it->rankObj.elIndex, it->neighObj.elIndex))
debug::writeLocalElementDofs(pdb.mpiRank,
it->rankObj.elIndex,
pdb.feSpace);
for (InteriorBoundary::iterator it(pdb.otherIntBoundary); !it.end(); ++it)
if (followThisBound(it->rankObj.elIndex, it->neighObj.elIndex))
debug::writeLocalElementDofs(pdb.mpiRank,
it->rankObj.elIndex,
pdb.feSpace);
}
void ParallelDebug::followBoundary(Mesh *mesh,
AtomicBoundary &bound,
MeshStructure &code)
{
FUNCNAME("ParallelDebug::followBoundary()");
if (mesh->getDim() != bound.rankObj.subObj)
return;
if (!followThisBound(bound.rankObj.elIndex, bound.neighObj.elIndex))
return;
MSG("Mesh structure code of bound %d/%d <-> %d/%d: %s\n",
bound.rankObj.elIndex, mesh->getDim(),
bound.neighObj.elIndex, mesh->getDim(),
code.toStr().c_str());
}
}
......@@ -160,6 +160,15 @@ namespace AMDiS {
static void writePartitioningFile(std::string filename,
int counter,
FiniteElemSpace *feSpace);
static bool followThisBound(int rankElIndex, int neighElIndex);
static void followBoundary(MeshDistributor &pdb);
static void followBoundary(Mesh *mesh,
AtomicBoundary &bound,
MeshStructure &code);
};
} // namespace AMDiS
......
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "parallel/PetscMultigridPrecon.h"
namespace AMDiS {
#ifdef HAVE_PETSC_DEV
using namespace std;
PetscErrorCode multigridComputeRhs(DM da, Vec x, Vec b)
{
}
PetscErrorCode multigridComputeMatrix(DM da, Vec x,
Mat J, Mat jac, MatStructure *str)
{
}
PetscMultigridPrecon::PetscMultigridPrecon()
{}
void PetscMultigridPrecon::init(KSP &ksp)
{
int globalDofX = 100;
int globalDofY = 100;
int nProc = sqrt(MPI::COMM_WORLD.Get_rank());
int nDofsPerNode = 1000;
DMDACreate2d(PETSC_COMM_WORLD,
DMDA_BOUNDARY_NONE, DMDA_BOUNDARY_NONE,
DMDA_STENCIL_STAR,
globalDofX, globalDofY,
nProc, nProc,
nDofsPerNode,
1,
PETSC_NULL, PETSC_NULL, &dmObj);
DMSetFunction(dmObj, multigridComputeRhs);
DMSetJacobian(dmObj, multigridComputeMatrix);
KSPSetDM(ksp, dm);
}
void PetscMultigridPrecon::cleanUp()
{
DMDestroy(dmObj);
}
#endif
}
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// == http://www.amdis-fem.org ==
// == ==
// ============================================================================
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
/** \file PetscMultigridPrecon.h */
#ifndef AMDIS_PETSC_MULTIGRID_PRECON_H
#define AMDIS_PETSC_MULTIGRID_PRECON_H
#include <petsc.h>
namespace AMDiS {
using namespace std;
#ifdef HAVE_PETSC_DEV
class PetscMultigridPrecon
{
public:
PetscMultigridPrecon();
void init(KSP &ksp);
void cleanUp();
protected:
DM dmObj;
};
#endif
} // namespace AMDiS
#endif
......@@ -34,17 +34,9 @@ namespace AMDiS {
Parameters::get("parallel->solver", name);
if (name == "petsc-schur") {
#ifdef HAVE_PETSC_DEV
petscSolver = new PetscSolverSchur();
#else
ERROR_EXIT("PETSc schur complement solver is only supported when petsc-dev is used!\n");
#endif
} else if (name == "petsc-feti") {
#ifdef HAVE_PETSC_DEV
petscSolver = new PetscSolverFeti();
#else
ERROR_EXIT("PETSc FETI-DP solver is only supported when petsc-dev is used!\n");
#endif
} else if (name == "petsc-block") {
petscSolver = new PetscSolverGlobalBlockMatrix();
} else if (name == "petsc" || name == "") {
......
......@@ -19,8 +19,6 @@ namespace AMDiS {
using namespace std;
#ifdef HAVE_PETSC_DEV
// y = mat * x
int petscMultMatSchurPrimal(Mat mat, Vec x, Vec y)
{
......@@ -1708,6 +1706,4 @@ namespace AMDiS {
MeshDistributor::globalMeshDistributor->synchVector(vec);
}
#endif
}
......@@ -31,8 +31,6 @@ namespace AMDiS {
using namespace std;
#ifdef HAVE_PETSC_DEV
class PetscSolverFeti;
/** \brief
......@@ -338,8 +336,6 @@ namespace AMDiS {
// Number of local nodes that are duals.
int nLocalDuals;
};
#endif
}
#endif
......@@ -108,15 +108,17 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverGlobalBlockMatrix::solvePetscMatrix()");
VecDuplicate(petscRhsVec, &petscSolVec);
// PETSc.
KSPSolve(solver, petscRhsVec, petscRhsVec);
KSPSolve(solver, petscRhsVec, petscSolVec);
// === Transfere values from PETSc's solution vectors to the DOF vectors. ===
for (int i = 0; i < nComponents; i++) {
DOFVector<double> &dofvec = *(vec.getDOFVector(i));
Vec tmp;
VecNestGetSubVec(petscRhsVec, i, &tmp);
VecNestGetSubVec(petscSolVec, i, &tmp);
int nRankDofs = meshDistributor->getNumberRankDofs();
PetscScalar *vecPointer;
......@@ -138,6 +140,8 @@ namespace AMDiS {
VecDestroy(&petscRhsVec);
for (int i = 0; i < nComponents; i++)
VecDestroy(&(nestVec[i]));
VecDestroy(&petscSolVec);
}
......
......@@ -174,11 +174,7 @@ namespace AMDiS {
// === Destroy PETSc's variables. ===
#ifdef HAVE_PETSC_DEV
VecDestroy(&petscRhsVec);
#else
VecDestroy(petscRhsVec);