Commit 2c96156e authored by Thomas Witkowski's avatar Thomas Witkowski

Blub

parent a2e41205
......@@ -42,6 +42,11 @@ namespace AMDiS {
elCounter, 6 * static_cast<int>(pow(mpiSize, 1.5)), mpiSize);
}
if (multilevel) {
TEST_EXIT(MPI::COMM_WORLD.Get_size() == 16)
("Multilevel partitioning is implemented for 16 nodes only!\n");
}
int dim = mesh->getDim();
TraverseStack stack;
......@@ -54,7 +59,13 @@ namespace AMDiS {
switch (mode) {
case 0:
elInRank = boxIndex;
if (!multilevel) {
elInRank = boxIndex;
} else {
TEST_EXIT_DBG(boxIndex >= 0 && boxIndex <= 15)("Wrong box index!\n");
int rs[16] = {0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15};
elInRank = rs[boxIndex];
}
break;
case 1:
......
......@@ -38,21 +38,26 @@ namespace AMDiS {
CheckerPartitioner(MPI::Intracomm *comm)
: MeshPartitioner(comm),
mpiRank(mpiComm->Get_rank()),
mpiSize(mpiComm->Get_size())
mpiSize(mpiComm->Get_size()),
mode(0),
multilevel(false)
{
string modestr = "";
Parameters::get("parallel->partitioner->mode", modestr);
if (modestr == "")
mode = 0;
else if (modestr == "x-stripes")
if (modestr == "x-stripes")
mode = 1;
else if (modestr == "y-stripes")
mode = 2;
else if (modestr == "z-stripes")
mode = 3;
else
ERROR_EXIT("No partitioner mode \"%s\"!\n", modestr.c_str());
else if (modestr == "multilevel")
multilevel = true;
else {
if (modestr != "") {
ERROR_EXIT("No partitioner mode \"%s\"!\n", modestr.c_str());
}
}
}
~CheckerPartitioner() {}
......@@ -77,6 +82,8 @@ namespace AMDiS {
/// 1: x-stripes: each node gets one x-stripe of boxes
/// 2: y-stripes: each node gets one y-stripe of boxes
int mode;
bool multilevel;
};
}
......
......@@ -91,22 +91,29 @@ namespace AMDiS {
int DofComm::getNumberDofs(LevelDataType &data,
int level,
const FiniteElemSpace *feSpace)
const FiniteElemSpace *feSpace,
bool countDouble)
{
FUNCNAME("DofComm::getNumberDofs()");
TEST_EXIT_DBG(level < data.size())("Should not happen!\n");
DofContainerSet dofs;
DofContainerSet dofSet;
DofContainer dofVec;
for (DataIter rankIt = data[level].begin();
rankIt != data[level].end(); ++rankIt)
for (FeMapIter feIt = rankIt->second.begin();
feIt != rankIt->second.end(); ++feIt)
if (feIt->first == feSpace)
dofs.insert(feIt->second.begin(), feIt->second.end());
return static_cast<int>(dofs.size());
if (countDouble)
dofVec.insert(dofVec.end(), feIt->second.begin(), feIt->second.end());
else
dofSet.insert(feIt->second.begin(), feIt->second.end());
if (countDouble)
return static_cast<int>(dofVec.size());
return static_cast<int>(dofSet.size());
}
......
......@@ -87,7 +87,8 @@ namespace AMDiS {
int getNumberDofs(LevelDataType &data,
int level,
const FiniteElemSpace *feSpace);
const FiniteElemSpace *feSpace,
bool countDouble = false);
protected:
void createContainer(RankToBoundMap &boundary, LevelDataType &data);
......
......@@ -176,7 +176,8 @@ namespace AMDiS {
createBoundaryDofs();
#if (DEBUG != 0)
ParallelDebug::writeDebugFile(*this, debugOutputDir + "mpi-dbg", "dat");
ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], dofMap,
debugOutputDir + "mpi-dbg", "dat");
#endif
initialized = true;
......@@ -501,13 +502,18 @@ namespace AMDiS {
}
void MeshDistributor::synchVector(SystemVector &vec)
void MeshDistributor::synchVector(SystemVector &vec, int level)
{
FUNCNAME("MeshDistributor::synchVector()");
StdMpi<vector<double> > stdMpi(mpiComm);
TEST_EXIT_DBG(level >= 0 && level <= 1)("Wrong level number!\n");
for (DofComm::Iterator it(dofComm.getSendDofs());
MPI::Intracomm &levelComm = levelData.getMpiComm(level);
DofComm &dc = (level == 0 ? dofComm : dofCommSd);
StdMpi<vector<double> > stdMpi(levelComm);
for (DofComm::Iterator it(dc.getSendDofs());
!it.end(); it.nextRank()) {
vector<double> dofs;
......@@ -518,22 +524,33 @@ namespace AMDiS {
dofs.push_back(dofVec[it.getDofIndex()]);
}
stdMpi.send(it.getRank(), dofs);
int rank = it.getRank();
if (level > 0)
rank = levelData.mapRank(rank, 0, level);
stdMpi.send(rank, dofs);
}
for (DofComm::Iterator it(dofComm.getRecvDofs()); !it.end(); it.nextRank())
stdMpi.recv(it.getRank());
for (DofComm::Iterator it(dc.getRecvDofs()); !it.end(); it.nextRank()) {
int rank = it.getRank();
if (level > 0)
rank = levelData.mapRank(rank, 0, level);
stdMpi.recv(rank);
}
stdMpi.startCommunication();
for (DofComm::Iterator it(dc.getRecvDofs()); !it.end(); it.nextRank()) {
int rank = it.getRank();
if (level > 0)
rank = levelData.mapRank(rank, 0, level);
for (DofComm::Iterator it(dofComm.getRecvDofs()); !it.end(); it.nextRank()) {
int counter = 0;
for (int i = 0; i < vec.getSize(); i++) {
DOFVector<double> &dofVec = *(vec.getDOFVector(i));
for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
dofVec[it.getDofIndex()] = stdMpi.getRecvData(it.getRank())[counter++];
dofVec[it.getDofIndex()] = 0.0;//stdMpi.getRecvData(rank)[counter++];
}
}
}
......@@ -777,60 +794,60 @@ namespace AMDiS {
std::set<int> neighbours;
switch (mpiRank) {
case 0:
neighbours.insert(1); neighbours.insert(4); neighbours.insert(5);
neighbours.insert(1); neighbours.insert(2); neighbours.insert(3);
break;
case 1:
neighbours.insert(0); neighbours.insert(2); neighbours.insert(4); neighbours.insert(5); neighbours.insert(6);
neighbours.insert(0); neighbours.insert(4); neighbours.insert(2); neighbours.insert(3); neighbours.insert(6);
break;
case 2:
neighbours.insert(1); neighbours.insert(3); neighbours.insert(5); neighbours.insert(6); neighbours.insert(7);
neighbours.insert(0); neighbours.insert(1); neighbours.insert(3); neighbours.insert(8); neighbours.insert(9);
break;
case 3:
neighbours.insert(2); neighbours.insert(6); neighbours.insert(7);
neighbours.insert(0); neighbours.insert(1); neighbours.insert(4);
neighbours.insert(2); neighbours.insert(6);
neighbours.insert(8); neighbours.insert(9); neighbours.insert(12);
break;
case 4:
neighbours.insert(0); neighbours.insert(1); neighbours.insert(5); neighbours.insert(8); neighbours.insert(9);
neighbours.insert(1); neighbours.insert(3); neighbours.insert(6); neighbours.insert(7); neighbours.insert(5);
break;
case 5:
neighbours.insert(0); neighbours.insert(1); neighbours.insert(2);
neighbours.insert(4); neighbours.insert(6);
neighbours.insert(8); neighbours.insert(9); neighbours.insert(10);
neighbours.insert(4); neighbours.insert(6); neighbours.insert(7);
break;
case 6:
neighbours.insert(1); neighbours.insert(2); neighbours.insert(3);
neighbours.insert(5); neighbours.insert(7);
neighbours.insert(9); neighbours.insert(10); neighbours.insert(11);
neighbours.insert(1); neighbours.insert(4); neighbours.insert(5);
neighbours.insert(3); neighbours.insert(7);
neighbours.insert(9); neighbours.insert(12); neighbours.insert(13);
break;
case 7:
neighbours.insert(2); neighbours.insert(3); neighbours.insert(6); neighbours.insert(10); neighbours.insert(11);
neighbours.insert(4); neighbours.insert(5); neighbours.insert(6); neighbours.insert(12); neighbours.insert(13);
break;
case 8:
neighbours.insert(4); neighbours.insert(5); neighbours.insert(9); neighbours.insert(12); neighbours.insert(13);
neighbours.insert(2); neighbours.insert(3); neighbours.insert(9); neighbours.insert(10); neighbours.insert(11);
break;
case 9:
neighbours.insert(4); neighbours.insert(5); neighbours.insert(6);
neighbours.insert(8); neighbours.insert(10);
neighbours.insert(12); neighbours.insert(13); neighbours.insert(14);
neighbours.insert(2); neighbours.insert(3); neighbours.insert(6);
neighbours.insert(8); neighbours.insert(12);
neighbours.insert(10); neighbours.insert(11); neighbours.insert(14);
break;
case 10:
neighbours.insert(5); neighbours.insert(6); neighbours.insert(7);
neighbours.insert(9); neighbours.insert(11);
neighbours.insert(13); neighbours.insert(14); neighbours.insert(15);
neighbours.insert(8); neighbours.insert(9); neighbours.insert(11);
break;
case 11:
neighbours.insert(6); neighbours.insert(7); neighbours.insert(10); neighbours.insert(14); neighbours.insert(15);
neighbours.insert(8); neighbours.insert(9); neighbours.insert(12); neighbours.insert(10); neighbours.insert(14);
break;
case 12:
neighbours.insert(8); neighbours.insert(9); neighbours.insert(13);
neighbours.insert(3); neighbours.insert(6); neighbours.insert(7);
neighbours.insert(9); neighbours.insert(13);
neighbours.insert(11); neighbours.insert(14); neighbours.insert(15);
break;
case 13:
neighbours.insert(8); neighbours.insert(9); neighbours.insert(10); neighbours.insert(12); neighbours.insert(14);
neighbours.insert(6); neighbours.insert(7); neighbours.insert(12); neighbours.insert(14); neighbours.insert(15);
break;
case 14:
neighbours.insert(9); neighbours.insert(10); neighbours.insert(11); neighbours.insert(13); neighbours.insert(15);
neighbours.insert(9); neighbours.insert(12); neighbours.insert(13); neighbours.insert(11); neighbours.insert(15);
break;
case 15:
neighbours.insert(10); neighbours.insert(11); neighbours.insert(14);
neighbours.insert(12); neighbours.insert(13); neighbours.insert(14);
break;
}
......@@ -844,31 +861,31 @@ namespace AMDiS {
Parameters::get("parallel->multi level test", multiLevelTest);
if (multiLevelTest) {
switch (mpiRank) {
case 0: case 1: case 4: case 5:
case 0: case 1: case 2: case 3:
{
std::set<int> m;
m.insert(0); m.insert(1); m.insert(4); m.insert(5);
m.insert(0); m.insert(1); m.insert(2); m.insert(3);
levelData.addLevel(m, 0);
}
break;
case 2: case 3: case 6: case 7:
case 4: case 5: case 6: case 7:
{
std::set<int> m;
m.insert(2); m.insert(3); m.insert(6); m.insert(7);
m.insert(4); m.insert(5); m.insert(6); m.insert(7);
levelData.addLevel(m, 1);
}
break;
case 8: case 9: case 12: case 13:
case 8: case 9: case 10: case 11:
{
std::set<int> m;
m.insert(8); m.insert(9); m.insert(12); m.insert(13);
m.insert(8); m.insert(9); m.insert(10); m.insert(11);
levelData.addLevel(m, 2);
}
break;
case 10: case 11: case 14: case 15:
case 12: case 13: case 14: case 15:
{
std::set<int> m;
m.insert(10); m.insert(11); m.insert(14); m.insert(15);
m.insert(12); m.insert(13); m.insert(14); m.insert(15);
levelData.addLevel(m, 3);
}
break;
......@@ -1672,7 +1689,8 @@ namespace AMDiS {
debug::writeElementIndexMesh(mesh, debugOutputDir + "elementIndex-" +
lexical_cast<string>(mpiRank) + ".vtu");
ParallelDebug::writeDebugFile(*this, debugOutputDir + "mpi-dbg", "dat");
ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], dofMap,
debugOutputDir + "mpi-dbg", "dat");
debug::testSortedDofs(mesh, elMap);
ParallelDebug::testCommonDofs(*this, true);
ParallelDebug::testGlobalIndexByCoords(*this);
......@@ -1680,7 +1698,8 @@ namespace AMDiS {
int tmp = 0;
Parameters::get(name + "->write parallel debug file", tmp);
if (tmp)
ParallelDebug::writeDebugFile(*this, debugOutputDir + "mpi-dbg", "dat");
ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], dofMap,
debugOutputDir + "mpi-dbg", "dat");
#endif
}
......
......@@ -254,12 +254,10 @@ namespace AMDiS {
stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
}
/** \brief
* Works in the same way as the function above defined for DOFVectors. Due
* to performance, this function does not call \ref synchVector for each
* DOFVector, but instead sends all values of all DOFVectors all at once.
*/
void synchVector(SystemVector &vec);
/// Works in the same way as the function above defined for DOFVectors. Due
/// to performance, this function does not call \ref synchVector for each
/// DOFVector, but instead sends all values of all DOFVectors all at once.
void synchVector(SystemVector &vec, int level = 0);
void check3dValidMesh();
......
......@@ -70,11 +70,13 @@ namespace AMDiS {
void MeshLevelData::serialize(ostream &out)
{
ERROR_EXIT("Not yet implemented!\n");
}
void MeshLevelData::deserialize(istream &in)
{
ERROR_EXIT("Not yet implemented!\n");
}
......
......@@ -92,16 +92,16 @@ namespace AMDiS {
TEST_EXIT_DBG(level == 1 && rank <= 15)("Should not happen!\n");
TEST_EXIT_DBG(MPI::COMM_WORLD.Get_size() == 16)("Should not happen!\n");
if (rank == 0 || rank == 1 || rank == 4 || rank == 5)
if (rank == 0 || rank == 1 || rank == 2 || rank == 3)
return 0;
if (rank == 2 || rank == 3 || rank == 6 || rank == 7)
if (rank == 4 || rank == 5 || rank == 6 || rank == 7)
return 1;
if (rank == 8 || rank == 9 || rank == 12 || rank == 13)
if (rank == 8 || rank == 9 || rank == 10 || rank == 11)
return 2;
if (rank == 10 || rank == 11 || rank == 14 || rank == 15)
if (rank == 12 || rank == 13 || rank == 14 || rank == 15)
return 3;
ERROR_EXIT("Should not happen!\n");
......
......@@ -758,25 +758,27 @@ namespace AMDiS {
}
void ParallelDebug::writeDebugFile(MeshDistributor &pdb,
string prefix, string postfix)
void ParallelDebug::writeDebugFile(const FiniteElemSpace *feSpace,
ParallelDofMapping &dofMap,
string prefix,
string postfix)
{
FUNCNAME("ParallelDebug::writeCoordsFile()");
const FiniteElemSpace *feSpace = pdb.feSpaces[pdb.feSpaces.size() - 1];
Mesh *mesh = feSpace->getMesh();
stringstream filename;
filename << prefix << "-" << pdb.mpiRank << "." << postfix;
filename << prefix << "-" << MPI::COMM_WORLD.Get_rank() << "." << postfix;
DOFVector<WorldVector<double> > coords(feSpace, "tmp");
pdb.mesh->getDofIndexCoords(feSpace, coords);
mesh->getDofIndexCoords(feSpace, coords);
typedef map<int, vector<DegreeOfFreedom> > ElDofMap;
ElDofMap elDofMap;
TraverseStack stack;
const BasisFunction *basisFcts = feSpace->getBasisFcts();
vector<DegreeOfFreedom> localIndices(basisFcts->getNumber());
ElInfo *elInfo = stack.traverseFirst(pdb.mesh, -1, Mesh::CALL_LEAF_EL);
ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
while (elInfo) {
basisFcts->getLocalIndices(elInfo->getElement(),
feSpace->getAdmin(), localIndices);
......@@ -789,16 +791,19 @@ namespace AMDiS {
ofstream file;
file.open(filename.str().c_str());
// file << "# First line contains number of DOFs, than each line has the format\n";
// file << "# Local DOF index Global DOF index Is rank DOF x-coord y-coord z-coord\n";
file << coords.getUsedSize() << "\n";
// file << "# DOF index Local DOF index Global DOF index Is rank DOF x-coord y-coord z-coord\n";
file << dofMap[feSpace].size() << "\n";
DOFIterator<WorldVector<double> > it(&coords, USED_DOFS);
for (it.reset(); !it.end(); ++it) {
file << it.getDOFIndex() << " "
<< pdb.dofMap[feSpace][it.getDOFIndex()].global << " "
<< pdb.dofMap[feSpace].isRankDof(it.getDOFIndex());
for (int i = 0; i < pdb.mesh->getDim(); i++)
file << " " << (*it)[i];
file << "\n";
if (dofMap[feSpace].isSet(it.getDOFIndex())) {
file << it.getDOFIndex() << " "
<< dofMap[feSpace][it.getDOFIndex()].local << " "
<< dofMap[feSpace][it.getDOFIndex()].global << " "
<< dofMap[feSpace].isRankDof(it.getDOFIndex());
for (int i = 0; i < mesh->getDim(); i++)
file << " " << (*it)[i];
file << "\n";
}
}
// === Write to all elements in ranks mesh the included dofs. ===
......
......@@ -27,14 +27,16 @@
#include "parallel/MeshDistributor.h"
namespace AMDiS {
using namespace std;
class ParallelDebug
{
protected:
typedef std::vector<WorldVector<double> > CoordsVec;
typedef vector<WorldVector<double> > CoordsVec;
/// Defines a mapping type from rank numbers to sets of coordinates.
typedef std::map<int, CoordsVec> RankToCoords;
typedef map<int, CoordsVec> RankToCoords;
public:
/** \brief
......@@ -154,8 +156,11 @@ namespace AMDiS {
int level = 0,
bool force = false);
static void writeDebugFile(MeshDistributor &pdb,
std::string prefix, std::string postfix);
static void writeDebugFile(const FiniteElemSpace *feSpace,
ParallelDofMapping &dofMap,
string prefix,
string postfix);
/** \brief
* This functions create a Paraview file with the macro mesh where the
......@@ -172,7 +177,7 @@ namespace AMDiS {
* added to the filename.
* \param[in] feSpace
*/
static void writePartitioningFile(std::string filename,
static void writePartitioningFile(string filename,
int counter,
const FiniteElemSpace *feSpace);
......
......@@ -330,14 +330,15 @@ namespace AMDiS {
if (fetiPreconditioner != FETI_NONE)
interiorDofMap.update();
for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
createLagrange(feSpace);
}
lagrangeMap.update();
// === ===
if (meshLevel == 0) {
......@@ -415,12 +416,18 @@ namespace AMDiS {
it != vertices.end(); ++it) {
WorldVector<double> c;
feSpace->getMesh()->getDofIndexCoords(*it, feSpace, c);
MSG("COORD %f %f\n", c[0], c[1]);
double e = 1e-8;
if (fabs(c[0]) < e || fabs(c[1]) < e ||
fabs(c[0] - 25.0) < e || fabs(c[1] - 25.0) < e ||
(fabs(c[0] - 12.5) < e && fabs(c[1] - 12.5) < e))
if (meshLevel == 0) {
primals.insert(**it);
} else {
if (fabs(c[0]) < e || fabs(c[1]) < e ||
fabs(c[0] - 25.0) < e || fabs(c[1] - 25.0) < e ||
(fabs(c[0] - 12.5) < e && fabs(c[1] - 12.5) < e)) {
MSG("PRIMAL COORD %f %f\n", c[0], c[1]);
primals.insert(**it);
}
}
}
......@@ -462,24 +469,12 @@ namespace AMDiS {
boundaryDofRanks[feSpace].clear();
if (dualDofMap[feSpace].nLocalDofs == 0)
return;
// === Create for each dual node that is owned by the rank, the set ===
// === of ranks that contain this node (denoted by W(x_j)). ===
// Stores for all rank own communication DOFs, if the counterpart is
// a rank owmed DOF in its subdomain. Thus, the following map stores to
// each rank number all DOFS that fulfill this requirenment.
map<int, std::set<DegreeOfFreedom> > sdRankDofs;
if (meshLevel == 0) {
for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(),
feSpace);
!it.end(); it.nextRank()) {
for (; !it.endDofIter(); it.nextDof()) {
if (!isPrimal(feSpace, it.getDofIndex())) {
boundaryDofRanks[feSpace][it.getDofIndex()].insert(mpiRank);
boundaryDofRanks[feSpace][it.getDofIndex()].insert(it.getRank());
}
}
}
} else {
if (meshLevel > 0) {
StdMpi<vector<int> > stdMpi(mpiComm);
for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(),
......@@ -490,16 +485,11 @@ namespace AMDiS {
subdomainRankDofs.reserve(it.getDofs().size());
for (; !it.endDofIter(); it.nextDof()) {
if (meshDistributor->getDofMapSd()[feSpace].isRankDof(it.getDofIndex())) {
MSG("IN SD DOF\n");
if (meshDistributor->getDofMapSd()[feSpace].isRankDof(it.getDofIndex()))
subdomainRankDofs.push_back(1);
} else {
MSG("NOT IN SD DOF\n");
else
subdomainRankDofs.push_back(0);
}
}
MSG("SEND SIZE %d TO RANK %d\n", subdomainRankDofs.size(), it.getRank());
stdMpi.send(it.getRank(), subdomainRankDofs);
}
......@@ -513,25 +503,35 @@ namespace AMDiS {