Liebe Gitlab-Nutzerin, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit 369b2f8b authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Removed debug code for ParallelDomainBase to the new class ParallelDomainDbg.

parent 5c58229d
......@@ -30,6 +30,7 @@ if USE_PARALLEL_DOMAIN_AMDIS
PARALLEL_AMDIS_SOURCES += \
$(SOURCE_DIR)/parallel/StdMpi.h $(SOURCE_DIR)/parallel/StdMpi.cc \
$(SOURCE_DIR)/parallel/ParallelDomainBase.h $(SOURCE_DIR)/parallel/ParallelDomainBase.cc \
$(SOURCE_DIR)/parallel/ParallelDomainDbg.h $(SOURCE_DIR)/parallel/ParallelDomainDbg.cc \
$(SOURCE_DIR)/parallel/GlobalMatrixSolver.h $(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc \
$(SOURCE_DIR)/parallel/MpiHelper.h $(SOURCE_DIR)/parallel/MpiHelper.cc
libamdis_la_CXXFLAGS += -DHAVE_PARALLEL_DOMAIN_AMDIS=1
......
......@@ -40,6 +40,7 @@ host_triplet = @host@
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__append_2 = \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(SOURCE_DIR)/parallel/StdMpi.h $(SOURCE_DIR)/parallel/StdMpi.cc \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(SOURCE_DIR)/parallel/ParallelDomainBase.h $(SOURCE_DIR)/parallel/ParallelDomainBase.cc \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(SOURCE_DIR)/parallel/ParallelDomainDbg.h $(SOURCE_DIR)/parallel/ParallelDomainDbg.cc \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(SOURCE_DIR)/parallel/GlobalMatrixSolver.h $(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(SOURCE_DIR)/parallel/MpiHelper.h $(SOURCE_DIR)/parallel/MpiHelper.cc
......@@ -78,6 +79,8 @@ am__libamdis_la_SOURCES_DIST = $(SOURCE_DIR)/parallel/StdMpi.h \
$(SOURCE_DIR)/parallel/StdMpi.cc \
$(SOURCE_DIR)/parallel/ParallelDomainBase.h \
$(SOURCE_DIR)/parallel/ParallelDomainBase.cc \
$(SOURCE_DIR)/parallel/ParallelDomainDbg.h \
$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc \
$(SOURCE_DIR)/parallel/GlobalMatrixSolver.h \
$(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc \
$(SOURCE_DIR)/parallel/MpiHelper.h \
......@@ -235,6 +238,7 @@ am__libamdis_la_SOURCES_DIST = $(SOURCE_DIR)/parallel/StdMpi.h \
$(SOURCE_DIR)/Debug.h $(SOURCE_DIR)/Debug.cc
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__objects_1 = libamdis_la-StdMpi.lo \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-ParallelDomainBase.lo \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-ParallelDomainDbg.lo \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-GlobalMatrixSolver.lo \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-MpiHelper.lo
@USE_PARALLEL_AMDIS_FALSE@am__objects_2 = $(am__objects_1)
......@@ -786,6 +790,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Operator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParMetisPartitioner.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainBase.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainDbg.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelProblem.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Parameters.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Parametric.Plo@am__quote@
......@@ -874,6 +879,13 @@ libamdis_la-ParallelDomainBase.lo: $(SOURCE_DIR)/parallel/ParallelDomainBase.cc
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainBase.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDomainBase.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDomainBase.cc
libamdis_la-ParallelDomainDbg.lo: $(SOURCE_DIR)/parallel/ParallelDomainDbg.cc
@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ParallelDomainDbg.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Tpo" -c -o libamdis_la-ParallelDomainDbg.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc; \
@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Tpo" "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Plo"; else rm -f "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Tpo"; exit 1; fi
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc' object='libamdis_la-ParallelDomainDbg.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainDbg.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc
libamdis_la-GlobalMatrixSolver.lo: $(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc
@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-GlobalMatrixSolver.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-GlobalMatrixSolver.Tpo" -c -o libamdis_la-GlobalMatrixSolver.lo `test -f '$(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc; \
@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-GlobalMatrixSolver.Tpo" "$(DEPDIR)/libamdis_la-GlobalMatrixSolver.Plo"; else rm -f "$(DEPDIR)/libamdis_la-GlobalMatrixSolver.Tpo"; exit 1; fi
......
......@@ -22,6 +22,7 @@ namespace AMDiS {
VtkWriter::writeFile(tmp, "tmp" + lexical_cast<std::string>(elIdx) + ".vtu");
}
}
void writeDofMesh(int rank, DegreeOfFreedom dof, FiniteElemSpace *feSpace)
{
......@@ -34,6 +35,7 @@ namespace AMDiS {
VtkWriter::writeFile(tmp, "dofmesh" + lexical_cast<std::string>(rank) + ".vtu");
}
}
void writeMesh(FiniteElemSpace *feSpace, int rank, std::string filename)
{
......@@ -46,6 +48,7 @@ namespace AMDiS {
}
}
#endif
void colorDofVectorByLocalElementDofs(DOFVector<double>& vec, Element *el)
{
......@@ -62,6 +65,7 @@ namespace AMDiS {
for (int i = 0; i < nBasisFcts; i++)
vec[localDofs[i]] = static_cast<double>(i);
}
bool colorDofVectorByLocalElementDofs(DOFVector<double>& vec, Mesh *mesh,
int elIndex)
......@@ -80,6 +84,7 @@ namespace AMDiS {
return false;
}
Element* getDofIndexElement(FiniteElemSpace *feSpace, DegreeOfFreedom dof)
{
......@@ -101,6 +106,7 @@ namespace AMDiS {
return NULL;
}
Element* getLevel0ParentElement(Mesh *mesh, Element *el)
{
......@@ -115,6 +121,7 @@ namespace AMDiS {
return NULL;
}
void printInfoByDof(FiniteElemSpace *feSpace, DegreeOfFreedom dof)
{
......@@ -137,6 +144,7 @@ namespace AMDiS {
}
}
void printMatValuesStatistics(Matrix<DOFMatrix*> *mat)
{
std::map<int, int> counter;
......@@ -178,6 +186,7 @@ namespace AMDiS {
<< it->second << std::endl;
}
void writeElementIndexMesh(FiniteElemSpace *feSpace, std::string filename)
{
std::map<int, double> vec;
......@@ -193,6 +202,7 @@ namespace AMDiS {
ElementFileWriter::writeFile(vec, feSpace, filename);
}
void writeMatlabMatrix(DOFMatrix &mat, std::string filename)
{
using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
......@@ -219,6 +229,7 @@ namespace AMDiS {
out.close();
}
void writeMatlabVector(DOFVector<double> &vec, std::string filename)
{
std::ofstream out;
......@@ -232,6 +243,106 @@ namespace AMDiS {
out.close();
}
void createSortedDofs(Mesh *mesh, ElementIdxToDofs &elMap)
{
FUNCNAME("Debug::dbgCreateElementMap()");
int dim = mesh->getDim();
elMap.clear();
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
while (elInfo) {
Element *el = elInfo->getElement();
switch (dim) {
case 2:
sortDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), elMap[el->getIndex()]);
break;
case 3:
sortDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), el->getDOF(3), elMap[el->getIndex()]);
break;
default:
ERROR_EXIT("What is this?\n");
}
elInfo = stack.traverseNext(elInfo);
}
}
void testSortedDofs(Mesh *mesh, ElementIdxToDofs &elMap)
{
FUNCNAME("Debug::dbgTestElementMap()");
int dim = mesh->getDim();
int nVertex = Global::getGeo(VERTEX, dim);
DofContainer vec(nVertex);
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
while (elInfo) {
Element *el = elInfo->getElement();
switch (dim) {
case 2:
sortDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), vec);
break;
case 3:
sortDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), el->getDOF(3), vec);
break;
default:
ERROR_EXIT("What is this?\n");
}
for (int i = 0; i < nVertex; i++) {
if (elMap[el->getIndex()][i] != vec[i]) {
MSG("[DBG] Wrong new dof numeration in element = %d\n!", el->getIndex());
std::cout << "[DBG]: Old numeration was: ";
for (int j = 0; j < nVertex; j++)
std::cout << elMap[el->getIndex()][j] << " = "
<< *(elMap[el->getIndex()][j]) << " ";
std::cout << std::endl;
std::cout << "[DBG]: New numeration is: ";
for (int j = 0; j < nVertex; j++)
std::cout << vec[j] << " = " << *(vec[j]) << " ";
std::cout << std::endl;
ERROR_EXIT("WRONG NEW DOF NUMERATION!\n");
}
}
elInfo = stack.traverseNext(elInfo);
}
}
void sortDofs(const DegreeOfFreedom* dof0,
const DegreeOfFreedom* dof1,
const DegreeOfFreedom* dof2,
DofContainer &vec)
{
DofPtrSortFct dofPtrSort;
vec.resize(3);
vec[0] = dof0;
vec[1] = dof1;
vec[2] = dof2;
sort(vec.begin(), vec.end(), dofPtrSort);
}
void sortDofs(const DegreeOfFreedom* dof0,
const DegreeOfFreedom* dof1,
const DegreeOfFreedom* dof2,
const DegreeOfFreedom* dof3,
DofContainer &vec)
{
DofPtrSortFct dofPtrSort;
vec.resize(4);
vec[0] = dof0;
vec[1] = dof1;
vec[2] = dof2;
vec[3] = dof3;
sort(vec.begin(), vec.end(), dofPtrSort);
}
} // namespace debug
} // namespace AMDiS
......@@ -28,6 +28,16 @@
namespace AMDiS {
namespace debug {
struct DofPtrSortFct {
bool operator() (const DegreeOfFreedom *dof0, const DegreeOfFreedom *dof1)
{
return (*dof0 < *dof1);
}
};
typedef std::map<int, DofContainer> ElementIdxToDofs;
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
void writeLocalElementDofs(int rank, int elIdx, FiniteElemSpace *feSpace);
......@@ -55,6 +65,47 @@ namespace AMDiS {
void writeMatlabMatrix(DOFMatrix &mat, std::string filename);
void writeMatlabVector(DOFVector<double> &vec, std::string filename);
/** \brief
* Traverse a mesh and store for each element all its vertex DOFs in local sorted
* order (by values).
*
* \param[in] mesh Mesh to be traversed.
* \param[out] elMap Stores to each element the vertex DOFs in sorted order.
*/
void createSortedDofs(Mesh *mesh, ElementIdxToDofs &elMap);
/** \brief
* Takes a map from element indices to lists of DOFs. Checks, if for each element
* in the mesh the vertex value order is still valid.
*
* The element index map must be created by the function \createSortedDofs. Using
* both functions it can be checked if a renumbering of dofs does not changes the
* local vertex value order (which is required by AMDiS to be always equal on each
* element).
*
* If the test fails, the function prints some debug information to screen and
* terminates the programm.
*
* \param[in] mesh Mesh to be traversed.
* \param[in] elMap Map from element indices to lists of DOFs. It is used to check
* the validaty as described above.
*/
void testSortedDofs(Mesh *mesh, ElementIdxToDofs &elMap);
/// Takes tree dofs and returns a list with the dofs sorted by their values.
void sortDofs(const DegreeOfFreedom* dof0,
const DegreeOfFreedom* dof1,
const DegreeOfFreedom* dof2,
DofContainer &vec);
/// Takes four dofs and returns a list with the dofs sorted by their values.
void sortDofs(const DegreeOfFreedom* dof0,
const DegreeOfFreedom* dof1,
const DegreeOfFreedom* dof2,
const DegreeOfFreedom* dof3,
DofContainer &vec);
}
}
......
......@@ -374,8 +374,15 @@ namespace AMDiS {
VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
VecSetType(petscTmpVec, VECMPI);
if (!d_nnz)
if (!d_nnz || lastMeshChangeIndex != lastMeshNnz) {
if (d_nnz) {
delete [] d_nnz;
delete [] o_nnz;
}
createPetscNnzStructure(mat);
lastMeshNnz = lastMeshChangeIndex;
}
// === Create PETSc matrix with the computed nnz data structure. ===
......
......@@ -40,7 +40,8 @@ namespace AMDiS {
GlobalMatrixSolver(ProblemVec *problemStat, ProblemInstatVec *problemInstat)
: ParallelDomainBase(problemStat, problemInstat),
d_nnz(NULL),
o_nnz(NULL)
o_nnz(NULL),
lastMeshNnz(0)
{}
~GlobalMatrixSolver()
......@@ -84,6 +85,14 @@ namespace AMDiS {
/// Arrays definig the non zero pattern of Petsc's matrix.
int *d_nnz, *o_nnz;
/** \brief
* Stores the mesh change index of the mesh the nnz structure was created for.
* Therefore, if the mesh change index is higher than this value, we have to create
* a new nnz structure for PETSc matrices, because the mesh has been changed and
* therefore also the assembled matrix structure.
*/
int lastMeshNnz;
};
typedef GlobalMatrixSolver ParallelDomainVec;
......
......@@ -3,6 +3,7 @@
#include <fstream>
#include <boost/lexical_cast.hpp>
#include "parallel/ParallelDomainBase.h"
#include "parallel/ParallelDomainDbg.h"
#include "parallel/StdMpi.h"
#include "ParMetisPartitioner.h"
#include "Mesh.h"
......@@ -116,8 +117,8 @@ namespace AMDiS {
partitionMesh(adaptInfo);
#if (DEBUG != 0)
ElementIdxToDofs elMap;
dbgCreateElementMap(elMap);
debug::ElementIdxToDofs elMap;
debug::createSortedDofs(mesh, elMap);
if (mpiRank == 0) {
int writePartMesh = 1;
GET_PARAMETER(0, "dbg->write part mesh", "%d", &writePartMesh);
......@@ -149,9 +150,9 @@ namespace AMDiS {
#if (DEBUG != 0)
MSG("AMDiS runs in debug mode, so make some test ...\n");
dbgTestElementMap(elMap);
dbgTestInteriorBoundary();
dbgTestCommonDofs(true);
debug::testSortedDofs(mesh, elMap);
ParallelDomainDbg::testInteriorBoundary(*this);
ParallelDomainDbg::testCommonDofs(*this, true);
MSG("Debug mode tests finished!\n");
debug::writeMesh(feSpace, -1, "macromesh");
......@@ -405,21 +406,6 @@ namespace AMDiS {
// === Because the mesh has been changed, update the DOF numbering and mappings. ===
updateLocalGlobalNumbering();
// === If there is a non zero pattern computed for Petsc matrix, delete it. So ===
// === it will be recomputed after next assembling. ===
#if 0
if (d_nnz) {
delete [] d_nnz;
d_nnz = NULL;
}
if (o_nnz) {
delete [] o_nnz;
o_nnz = NULL;
}
#endif
}
......@@ -1424,8 +1410,8 @@ namespace AMDiS {
FUNCNAME("ParallelDomainBase::updateLocalGlobalNumbering()");
#if (DEBUG != 0)
ElementIdxToDofs elMap;
dbgCreateElementMap(elMap);
debug::ElementIdxToDofs elMap;
debug::createSortedDofs(mesh, elMap);
#endif
typedef std::set<const DegreeOfFreedom*> DofSet;
......@@ -1602,8 +1588,8 @@ namespace AMDiS {
lastMeshChangeIndex = mesh->getChangeIndex();
#if (DEBUG != 0)
dbgTestElementMap(elMap);
dbgTestCommonDofs(true);
debug::testSortedDofs(mesh, elMap);
ParallelDomainDbg::testCommonDofs(*this, true);
#endif
}
......@@ -1861,7 +1847,7 @@ namespace AMDiS {
Flag ParallelDomainBase::buildAndAdapt(AdaptInfo *adaptInfo, Flag toDo)
{
FUNCNAME("StandardProblemIteration::buildAndAdapt()");
FUNCNAME("ParallelDomainBase::buildAndAdapt()");
Flag flag = 0, markFlag = 0;
ProblemStatBase *problem = iterationIF->getProblem();
......@@ -2002,364 +1988,8 @@ namespace AMDiS {
data[dof] = dofSet;
}
}
void ParallelDomainBase::dbgCreateElementMap(ElementIdxToDofs &elMap)
{
FUNCNAME("ParallelDomainBase::dbgCreateElementMap()");
int dim = mesh->getDim();
elMap.clear();
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
while (elInfo) {
Element *el = elInfo->getElement();
switch (dim) {
case 2:
orderDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), elMap[el->getIndex()]);
break;
case 3:
orderDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), el->getDOF(3), elMap[el->getIndex()]);
break;
default:
ERROR_EXIT("What is this?\n");
}
elInfo = stack.traverseNext(elInfo);
}
}
void ParallelDomainBase::dbgTestElementMap(ElementIdxToDofs &elMap)
{
FUNCNAME("ParallelDomainbase::dbgTestElementMap()");
int dim = mesh->getDim();
int nVertex = Global::getGeo(VERTEX, dim);
DofContainer vec(nVertex);
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
while (elInfo) {
Element *el = elInfo->getElement();
switch (dim) {
case 2:
orderDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), vec);
break;
case 3:
orderDofs(el->getDOF(0), el->getDOF(1), el->getDOF(2), el->getDOF(3), vec);
break;
default:
ERROR_EXIT("What is this?\n");
}
for (int i = 0; i < nVertex; i++) {
if (elMap[el->getIndex()][i] != vec[i]) {
std::cout << "[DBG " << mpiRank
<< "]: Wrong new dof numeration in element = "
<< el->getIndex() << std::endl;
std::cout << "[DBG " << mpiRank << "]: Old numeration was: ";
for (int j = 0; j < nVertex; j++)
std::cout << elMap[el->getIndex()][j] << " = "
<< *(elMap[el->getIndex()][j]) << " ";
std::cout << std::endl;
std::cout << "[DBG " << mpiRank << "]: New numeration is: ";
for (int j = 0; j < nVertex; j++)
std::cout << vec[j] << " = " << *(vec[j]) << " ";
std::cout << std::endl;
ERROR_EXIT("WRONG NEW DOF NUMERATION!\n");
}
}
elInfo = stack.traverseNext(elInfo);
}
}
void ParallelDomainBase::dbgTestInteriorBoundary()
{
FUNCNAME("ParallelDomainBase::dbgTestInteriorBoundary()");
std::vector<int*> sendBuffers, recvBuffers;
MPI::Request request[myIntBoundary.boundary.size() +
otherIntBoundary.boundary.size()];
int requestCounter = 0;
for (RankToBoundMap::iterator rankIt = myIntBoundary.boundary.begin();
rankIt != myIntBoundary.boundary.end(); ++rankIt) {
int nSendInt = rankIt->second.size();
int* buffer = new int[nSendInt];
for (int i = 0; i < nSendInt; i++)
buffer[i] = (rankIt->second)[i].rankObj.elIndex;
sendBuffers.push_back(buffer);
request[requestCounter++] =
mpiComm.Isend(buffer, nSendInt, MPI_INT, rankIt->first, 0);
}
for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
int nRecvInt = rankIt->second.size();
int *buffer = new int[nRecvInt];
recvBuffers.push_back(buffer);
request[requestCounter++] =
mpiComm.Irecv(buffer, nRecvInt, MPI_INT, rankIt->first, 0);
}
MPI::Request::Waitall(requestCounter, request);
for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
delete [] sendBuffers[i];
int bufCounter = 0;
for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
TEST_EXIT(rankIt->second.size() == otherIntBoundary.boundary[rankIt->first].size())
("Boundaries does not fit together!\n");
for (int i = 0; i < static_cast<int>(rankIt->second.size()); i++) {
int elIndex1 = recvBuffers[bufCounter][i];
int elIndex2 = otherIntBoundary.boundary[rankIt->first][i].neighObj.elIndex;
TEST_EXIT(elIndex1 == elIndex2)("Wrong element index at interior boundary!\n");
}
delete [] recvBuffers[bufCounter++];
}
}
void ParallelDomainBase::dbgTestCommonDofs(bool printCoords)
{
FUNCNAME("ParallelDomainBase::dbgTestCommonDofs()");
clock_t first = clock();
int testCommonDofs = 1;
GET_PARAMETER(0, "dbg->test common dofs", "%d", &testCommonDofs);
if (testCommonDofs == 0) {
MSG("Skip test common dofs!\n");
return;
}
// Maps to each neighbour rank an array of WorldVectors. This array contains the
// coordinates of all dofs this rank shares on the interior boundary with the
// neighbour rank. A rank sends the coordinates to another rank, if it owns the
// boundarys dof.
RankToCoords sendCoords;
// A rank receives all boundary dofs that are at its interior boundaries but are
// not owned by the rank. This map stores for each rank the coordinates of dofs
// this rank expectes to receive from.
RankToCoords recvCoords;
DOFVector<WorldVector<double> > coords(feSpace, "dofCorrds");
mesh->getDofIndexCoords(feSpace, coords);
for (RankToDofContainer::iterator it = sendDofs.begin();
it != sendDofs.end(); ++it)
for (DofContainer::iterator dofIt = it->second.begin();
dofIt != it->second.end(); ++dofIt)
sendCoords[it->first].push_back(coords[**dofIt]);
for (RankToDofContainer::iterator it = recvDofs.begin();
it != recvDofs.end(); ++it)
for (DofContainer::iterator dofIt = it->second.begin();
dofIt != it->second.end(); ++dofIt)
recvCoords[it->first].push_back(coords[**dofIt]);
std::vector<int> sendSize(mpiSize, 0);
std::vector<int> recvSize(mpiSize, 0);
std::vector<int> recvSizeBuffer(mpiSize, 0);
MPI::Request request[(mpiSize - 1) * 2];