Liebe Gitlab-Nutzerin, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit 334a275f authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Moved all general debug functions to Debug.h and Debug.cc

parent 73afa800
......@@ -216,6 +216,7 @@ $(SOURCE_DIR)/SubElInfo.h $(SOURCE_DIR)/SubElInfo.cc \
$(SOURCE_DIR)/SolutionDataStorage.h $(SOURCE_DIR)/SolutionDataStorage.hh \
$(SOURCE_DIR)/InteriorBoundary.h $(SOURCE_DIR)/InteriorBoundary.cc \
$(SOURCE_DIR)/ElementDofIteartor.h $(SOURCE_DIR)/ElementDofIterator.cc \
$(SOURCE_DIR)/Debug.h $(SOURCE_DIR)/Debug.cc \
$(SOURCE_DIR)/parareal/ProblemBase.h \
$(SOURCE_DIR)/parareal/AdaptParaReal.h $(SOURCE_DIR)/parareal/AdaptParaReal.cc
......
......@@ -225,8 +225,8 @@ am__libamdis_la_SOURCES_DIST = $(PARALLEL_DIR)/ParallelDomainBase.h \
$(SOURCE_DIR)/InteriorBoundary.h \
$(SOURCE_DIR)/InteriorBoundary.cc \
$(SOURCE_DIR)/ElementDofIteartor.h \
$(SOURCE_DIR)/ElementDofIterator.cc \
$(SOURCE_DIR)/parareal/ProblemBase.h \
$(SOURCE_DIR)/ElementDofIterator.cc $(SOURCE_DIR)/Debug.h \
$(SOURCE_DIR)/Debug.cc $(SOURCE_DIR)/parareal/ProblemBase.h \
$(SOURCE_DIR)/parareal/AdaptParaReal.h \
$(SOURCE_DIR)/parareal/AdaptParaReal.cc
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__objects_1 = libamdis_la-ParallelDomainBase.lo \
......@@ -290,7 +290,8 @@ am_libamdis_la_OBJECTS = $(am__objects_2) libamdis_la-DOFIndexed.lo \
libamdis_la-PngWriter.lo libamdis_la-PovrayWriter.lo \
libamdis_la-DataCollector.lo libamdis_la-ScalableQuadrature.lo \
libamdis_la-SubElInfo.lo libamdis_la-InteriorBoundary.lo \
libamdis_la-ElementDofIterator.lo libamdis_la-AdaptParaReal.lo
libamdis_la-ElementDofIterator.lo libamdis_la-Debug.lo \
libamdis_la-AdaptParaReal.lo
libamdis_la_OBJECTS = $(am_libamdis_la_OBJECTS)
libcompositeFEM_la_LIBADD =
am_libcompositeFEM_la_OBJECTS = libcompositeFEM_la-CFE_Integration.lo \
......@@ -629,6 +630,7 @@ $(SOURCE_DIR)/SubElInfo.h $(SOURCE_DIR)/SubElInfo.cc \
$(SOURCE_DIR)/SolutionDataStorage.h $(SOURCE_DIR)/SolutionDataStorage.hh \
$(SOURCE_DIR)/InteriorBoundary.h $(SOURCE_DIR)/InteriorBoundary.cc \
$(SOURCE_DIR)/ElementDofIteartor.h $(SOURCE_DIR)/ElementDofIterator.cc \
$(SOURCE_DIR)/Debug.h $(SOURCE_DIR)/Debug.cc \
$(SOURCE_DIR)/parareal/ProblemBase.h \
$(SOURCE_DIR)/parareal/AdaptParaReal.h $(SOURCE_DIR)/parareal/AdaptParaReal.cc
......@@ -747,6 +749,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-DOFMatrix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-DOFVector.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-DataCollector.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Debug.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-DirichletBC.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-DualTraverse.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ElInfo.Plo@am__quote@
......@@ -1532,6 +1535,13 @@ libamdis_la-ElementDofIterator.lo: $(SOURCE_DIR)/ElementDofIterator.cc
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ElementDofIterator.lo `test -f '$(SOURCE_DIR)/ElementDofIterator.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/ElementDofIterator.cc
libamdis_la-Debug.lo: $(SOURCE_DIR)/Debug.cc
@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-Debug.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-Debug.Tpo" -c -o libamdis_la-Debug.lo `test -f '$(SOURCE_DIR)/Debug.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/Debug.cc; \
@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-Debug.Tpo" "$(DEPDIR)/libamdis_la-Debug.Plo"; else rm -f "$(DEPDIR)/libamdis_la-Debug.Tpo"; exit 1; fi
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(SOURCE_DIR)/Debug.cc' object='libamdis_la-Debug.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-Debug.lo `test -f '$(SOURCE_DIR)/Debug.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/Debug.cc
libamdis_la-AdaptParaReal.lo: $(SOURCE_DIR)/parareal/AdaptParaReal.cc
@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-AdaptParaReal.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-AdaptParaReal.Tpo" -c -o libamdis_la-AdaptParaReal.lo `test -f '$(SOURCE_DIR)/parareal/AdaptParaReal.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parareal/AdaptParaReal.cc; \
@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-AdaptParaReal.Tpo" "$(DEPDIR)/libamdis_la-AdaptParaReal.Plo"; else rm -f "$(DEPDIR)/libamdis_la-AdaptParaReal.Tpo"; exit 1; fi
......
#include <boost/lexical_cast.hpp>
#include "Debug.h"
#include "DOFVector.h"
#include "MacroElement.h"
#include "VtkWriter.h"
namespace AMDiS {
namespace debug {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
void writeLocalElementDofs(int rank, int elIdx, FiniteElemSpace *feSpace)
{
using boost::lexical_cast;
if (MPI::COMM_WORLD.Get_rank() == rank) {
DOFVector<double> tmp(feSpace, "tmp");
colorDofVectorByLocalElementDofs(tmp, feSpace->getMesh(), elIdx);
VtkWriter::writeFile(tmp, "tmp" + lexical_cast<std::string>(elIdx) + ".vtu");
}
}
void writeDofMesh(int rank, DegreeOfFreedom dof, FiniteElemSpace *feSpace)
{
using boost::lexical_cast;
if (MPI::COMM_WORLD.Get_rank() == rank) {
DOFVector<double> tmp(feSpace, "tmp");
tmp.set(0.0);
tmp[dof] = 1.0;
VtkWriter::writeFile(tmp, "dofmesh" + lexical_cast<std::string>(rank) + ".vtu");
}
}
void writeMesh(FiniteElemSpace *feSpace, int rank, std::string filename)
{
using boost::lexical_cast;
int myRank = MPI::COMM_WORLD.Get_rank();
if (rank == -1 || myRank == rank) {
DOFVector<double> tmp(feSpace, "tmp");
VtkWriter::writeFile(tmp, filename + lexical_cast<std::string>(myRank) + ".vtu");
}
}
#endif
void colorDofVectorByLocalElementDofs(DOFVector<double>& vec, Element *el)
{
// === Get local indices of the given element. ===
const BasisFunction *basisFcts = vec.getFESpace()->getBasisFcts();
int nBasisFcts = basisFcts->getNumber();
std::vector<DegreeOfFreedom> localDofs(nBasisFcts);
basisFcts->getLocalIndices(el, vec.getFESpace()->getAdmin(), localDofs);
// === Set the values of the dof vector. ===
vec.set(0.0);
for (int i = 0; i < nBasisFcts; i++)
vec[localDofs[i]] = static_cast<double>(i);
}
bool colorDofVectorByLocalElementDofs(DOFVector<double>& vec, Mesh *mesh,
int elIndex)
{
FUNCNAME("colorDofVectorByLocalElementDofs()");
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
while (elInfo) {
if (elInfo->getElement()->getIndex() == elIndex) {
colorDofVectorByLocalElementDofs(vec, elInfo->getElement());
return true;
}
elInfo = stack.traverseNext(elInfo);
}
return false;
}
Element* getDofIndexElement(FiniteElemSpace *feSpace, DegreeOfFreedom dof)
{
const BasisFunction* basFcts = feSpace->getBasisFcts();
int nBasFcts = basFcts->getNumber();
std::vector<DegreeOfFreedom> dofVec(nBasFcts);
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(feSpace->getMesh(), -1,
Mesh::CALL_EVERY_EL_PREORDER);
while (elInfo) {
basFcts->getLocalIndices(elInfo->getElement(), feSpace->getAdmin(), dofVec);
for (int i = 0; i < nBasFcts; i++)
if (dofVec[i] == dof)
return elInfo->getElement();
elInfo = stack.traverseNext(elInfo);
}
return NULL;
}
Element* getLevel0ParentElement(Mesh *mesh, Element *el)
{
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER);
while (elInfo) {
if (elInfo->getElement() == el)
return elInfo->getMacroElement()->getElement();
elInfo = stack.traverseNext(elInfo);
}
return NULL;
}
void printInfoByDof(FiniteElemSpace *feSpace, DegreeOfFreedom dof)
{
Element *el = getDofIndexElement(feSpace, dof);
Element *parEl = getLevel0ParentElement(feSpace->getMesh(), el);
std::cout << "DOF-INFO: dof = " << dof
<< " elidx = " << el->getIndex()
<< " pelidx = " << parEl->getIndex() << std::endl;
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(feSpace->getMesh(), -1,
Mesh::CALL_EVERY_EL_PREORDER);
while (elInfo) {
if (elInfo->getElement()->getIndex() == parEl->getIndex())
std::cout << "EL INFO TO " << parEl->getIndex() << ": "
<< elInfo->getType() << std::endl;
elInfo = stack.traverseNext(elInfo);
}
}
} // namespace debug
} // namespace AMDiS
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// ============================================================================
// == ==
// == TU Dresden ==
// == ==
// == Institut fr Wissenschaftliches Rechnen ==
// == Zellescher Weg 12-14 ==
// == 01069 Dresden ==
// == germany ==
// == ==
// ============================================================================
// == ==
// == https://gforge.zih.tu-dresden.de/projects/amdis/ ==
// == ==
// ============================================================================
/** \file Debug.h */
#ifndef AMDIS_DEBUG_H
#define AMDIS_DEBUG_H
#include "AMDiS_fwd.h"
#include "Global.h"
namespace AMDiS {
namespace debug {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
void writeLocalElementDofs(int rank, int elIdx, FiniteElemSpace *feSpace);
void writeMesh(FiniteElemSpace *feSpace, int rank, std::string filename);
void writeDofMesh(int rank, DegreeOfFreedom dof, FiniteElemSpace *feSpace);
#endif
void colorDofVectorByLocalElementDofs(DOFVector<double>& vec, Element *el);
bool colorDofVectorByLocalElementDofs(DOFVector<double>& vec,
Mesh *mesh,
int elIndex);
Element* getDofIndexElement(FiniteElemSpace *feSpace, DegreeOfFreedom dof);
Element* getLevel0ParentElement(Mesh *mesh, Element *el);
void printInfoByDof(FiniteElemSpace *feSpace, DegreeOfFreedom dof);
}
}
#endif
#include <algorithm>
#include <boost/lexical_cast.hpp>
#include "ParallelDomainBase.h"
#include "ParMetisPartitioner.h"
......@@ -20,6 +19,7 @@
#include "VertexVector.h"
#include "StdMpi.h"
#include "MeshStructure.h"
#include "Debug.h"
#include "petscksp.h"
......@@ -119,6 +119,10 @@ namespace AMDiS {
removeMacroElements();
// === Reset all DOFAdmins of the mesh. ===
updateDofAdmins();
// === If in debug mode, make some tests. ===
#if (DEBUG != 0)
......@@ -127,11 +131,9 @@ namespace AMDiS {
dbgTestInteriorBoundary();
dbgTestCommonDofs(true);
MSG("Debug mode tests finished!\n");
#endif
// === Reset all DOFAdmins of the mesh. ===
updateDofAdmins();
debug::writeMesh(feSpace, -1, "macromesh");
#endif
// === Create periodic dof mapping, if there are periodic boundaries. ===
......@@ -167,17 +169,17 @@ namespace AMDiS {
// There must be always more allocated DOFs than used DOFs in DOFAdmin. Otherwise,
// it is not possible to define a first DOF hole.
if (static_cast<unsigned int>(admin.getSize()) == mapLocalGlobalDOFs.size())
if (static_cast<unsigned int>(admin.getSize()) == mapLocalGlobalDofs.size())
admin.enlargeDOFLists();
for (int j = 0; j < admin.getSize(); j++)
admin.setDOFFree(j, true);
for (unsigned int j = 0; j < mapLocalGlobalDOFs.size(); j++)
for (unsigned int j = 0; j < mapLocalGlobalDofs.size(); j++)
admin.setDOFFree(j, false);
admin.setUsedSize(mapLocalGlobalDOFs.size());
admin.setUsedCount(mapLocalGlobalDOFs.size());
admin.setFirstHole(mapLocalGlobalDOFs.size());
admin.setUsedSize(mapLocalGlobalDofs.size());
admin.setUsedCount(mapLocalGlobalDofs.size());
admin.setFirstHole(mapLocalGlobalDofs.size());
}
}
......@@ -236,7 +238,7 @@ namespace AMDiS {
values.clear();
// Global index of the current row dof.
int globalRowDof = mapLocalGlobalDOFs[*cursor];
int globalRowDof = mapLocalGlobalDofs[*cursor];
// Test if the current row dof is a periodic dof.
bool periodicRow = (periodicDof.count(globalRowDof) > 0);
......@@ -251,7 +253,7 @@ namespace AMDiS {
// Set only non null values.
if (value(*icursor) != 0.0) {
// Global index of the current column index.
int globalColDof = mapLocalGlobalDOFs[col(*icursor)];
int globalColDof = mapLocalGlobalDofs[col(*icursor)];
// Calculate the exact position of the column index in the petsc matrix.
int colIndex = globalColDof * dispMult + dispAddCol;
......@@ -337,7 +339,7 @@ namespace AMDiS {
DOFVector<double>::Iterator dofIt(vec, USED_DOFS);
for (dofIt.reset(); !dofIt.end(); ++dofIt) {
// Calculate global row index of the dof.
int globalRow = mapLocalGlobalDOFs[dofIt.getDOFIndex()];
int globalRow = mapLocalGlobalDofs[dofIt.getDOFIndex()];
// Calculate petsc index of the row dof.
int index = globalRow * dispMult + dispAdd;
......@@ -368,6 +370,8 @@ namespace AMDiS {
clock_t first = clock();
// === Create PETSc vector (rhs, solution and a temporary vector). ===
VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
VecSetType(petscRhsVec, VECMPI);
......@@ -381,19 +385,25 @@ namespace AMDiS {
VecSetType(petscTmpVec, VECMPI);
using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
namespace traits= mtl::traits;
namespace traits = mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix;
typedef std::vector<std::pair<int, int> > MatrixNnzEntry;
int d_nnz[nRankRows];
int o_nnz[nRankRows];
std::map<int, std::vector< std::pair<int, int> > > sendMatrixEntry;
for (int i = 0; i < nRankRows; i++) {
d_nnz[i] = 0;
o_nnz[i] = 0;
}
MSG("Is rank DOF: %d\n", isRankDof[6717 - rstart * nComponents]);
// Stores to each rank a list of nnz entries (i.e. pairs of row and column index)
// that this rank will send to. This nnz entries will be assembled on this rank,
// but because the row DOFs are not DOFs of this rank they will be send to the
// owner of the row DOFs.
std::map<int, MatrixNnzEntry> sendMatrixEntry;
for (int i = 0; i < nComponents; i++) {
for (int j = 0; j < nComponents; j++) {
if ((*mat)[i][j]) {
......@@ -408,41 +418,63 @@ namespace AMDiS {
for (cursor_type cursor = begin<row>(bmat),
cend = end<row>(bmat); cursor != cend; ++cursor) {
int r = mapLocalGlobalDOFs[*cursor] * nComponents + i;
// Map the local row number to the global DOF index and create from it
// the global PETSc row index of this DOF.
int petscRowIdx = mapLocalGlobalDofs[*cursor] * nComponents + i;
if (isRankDof[*cursor]) {
r -= rstart * nComponents;
// === The current row DOF is a rank dof, so create the corresponding ===
// === nnz values directly on rank's nnz data. ===
// This is the local row index of the local PETSc matrix.
int localPetscRowIdx = petscRowIdx - rstart * nComponents;
#if (DEBUG != 0)
if (r < 0 || r >= nRankRows) {
if (localPetscRowIdx < 0 || localPetscRowIdx >= nRankRows) {
std::cout << "ERROR in rank: " << mpiRank << std::endl;
std::cout << " Wrong r = " << r << " " << *cursor << " "
<< mapLocalGlobalDOFs[*cursor] << " "
std::cout << " Wrong r = " << localPetscRowIdx << " " << *cursor
<< " " << mapLocalGlobalDofs[*cursor] << " "
<< nRankRows << std::endl;
ERROR_EXIT("Should not happen!\n");
}
#endif
// Traverse all non zero entries in this row.
for (icursor_type icursor = begin<nz>(cursor),
icend = end<nz>(cursor); icursor != icend; ++icursor) {
if (value(*icursor) != 0.0) {
int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
int petscColIdx = mapLocalGlobalDofs[col(*icursor)] * nComponents + j;
if (c >= rstart * nComponents &&
c < rstart * nComponents + nRankRows)
d_nnz[r]++;
// The row DOF is a rank DOF, if also the column is a rank DOF,
// increment the d_nnz values for this row, otherwise the o_nnz value.
if (petscColIdx >= rstart * nComponents &&
petscColIdx < rstart * nComponents + nRankRows)
d_nnz[localPetscRowIdx]++;
else
o_nnz[r]++;
o_nnz[localPetscRowIdx]++;
}
}
} else {
int sendToRank = -1;
// === The current row DOF is not a rank dof, i.e., it will be created ===
// === on this rank, but after this it will be send to another rank ===
// === matrix. So we need to send also the corresponding nnz structure ===
// === of this row to the corresponding rank. ===
// Find out who is the member of this DOF.
int sendToRank = -1;
for (RankToDofContainer::iterator it = recvDofs.begin();
it != recvDofs.end(); ++it) {
for (DofContainer::iterator dofIt = it->second.begin();
dofIt != it->second.end(); ++dofIt) {
if (**dofIt == *cursor) {
if (petscRowIdx == 6717) {
debug::writeDofMesh(mpiRank, *cursor, feSpace);
MSG("SEND DOF TO: %d/%d\n", it->first, *cursor);
}
sendToRank = it->first;
break;
}
......@@ -454,117 +486,56 @@ namespace AMDiS {
TEST_EXIT_DBG(sendToRank != -1)("Should not happen!\n");
// Send all non zero entries to the member of the row DOF.
for (icursor_type icursor = begin<nz>(cursor),
icend = end<nz>(cursor); icursor != icend; ++icursor) {
if (value(*icursor) != 0.0) {
int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
int petscColIdx = mapLocalGlobalDofs[col(*icursor)] * nComponents + j;
sendMatrixEntry[sendToRank].push_back(std::make_pair(r, c));
sendMatrixEntry[sendToRank].
push_back(std::make_pair(petscRowIdx, petscColIdx));
}
}
}
}
}
}
}
MPI::Request request[sendDofs.size() + recvDofs.size()];
int requestCounter = 0;
std::vector<int*> sendBuffers;
sendBuffers.reserve(recvDofs.size());
for (RankToDofContainer::iterator it = recvDofs.begin();
it != recvDofs.end(); ++it) {
int nSend = sendMatrixEntry[it->first].size();
request[requestCounter++] = mpiComm.Isend(&nSend, 1, MPI_INT, it->first, 0);
if (nSend > 0) {
int *sendbuf = new int[nSend * 2];
for (int i = 0; i < nSend; i++) {
sendbuf[i * 2] = sendMatrixEntry[it->first][i].first;
sendbuf[i * 2 + 1] = sendMatrixEntry[it->first][i].second;
}
sendBuffers.push_back(sendbuf);
} else {
sendBuffers.push_back(NULL);
}
}
std::vector<int> recvSize;
recvSize.reserve(sendDofs.size());
int i = 0;
for (RankToDofContainer::iterator it = sendDofs.begin();
it != sendDofs.end(); ++it)
request[requestCounter++] =
mpiComm.Irecv(&(recvSize[i++]), 1, MPI_INT, it->first, 0);
MPI::Request::Waitall(requestCounter, request);
requestCounter = 0;
i = 0;
for (RankToDofContainer::iterator it = recvDofs.begin();
it != recvDofs.end(); ++it) {
int nSend = sendMatrixEntry[it->first].size();
if (nSend > 0)
request[requestCounter++] =
mpiComm.Isend(sendBuffers[i], nSend * 2, MPI_INT, it->first, 0);
i++;
} // if (isRankDof[*cursor]) ... else ...
} // for each row in mat[i][j]
} // if mat[i][j]
}
}
std::vector<int*> recvBuffers;
recvBuffers.reserve(sendDofs.size());
i = 0;
for (RankToDofContainer::iterator it = sendDofs.begin();
it != sendDofs.end(); ++it) {
if (recvSize[i] > 0) {
int *recvbuf = new int[recvSize[i] * 2];
recvBuffers.push_back(recvbuf);
// === Send and recv the nnz row structure to/from other ranks. ===
request[requestCounter++] =
mpiComm.Irecv(recvbuf, recvSize[i] * 2, MPI_INT, it->first, 0);
} else {
recvBuffers.push_back(NULL);
}
i++;
}
MPI::Request::Waitall(requestCounter, request);
StdMpi<MatrixNnzEntry, MatrixNnzEntry> stdMpi(mpiComm);
stdMpi.prepareCommunication(true);
stdMpi.send(sendMatrixEntry);
stdMpi.recv(sendDofs);
stdMpi.startCommunication<int>(MPI_INT);
for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
if (sendBuffers[j])
delete [] sendBuffers[j];
// === Evaluate the nnz structure this rank got from other ranks and add it to ===
// === the PETSc nnz data structure. ===
i = 0;
for (RankToDofContainer::iterator it = sendDofs.begin();
it != sendDofs.end(); ++it) {
if (recvSize[i] > 0) {
for (int j = 0; j < recvSize[i]; j++) {
int r = recvBuffers[i][j * 2];
int c = recvBuffers[i][j * 2 + 1];
for (std::map<int, MatrixNnzEntry>::iterator it = stdMpi.getRecvData().begin();
it != stdMpi.getRecvData().end(); ++it) {
if (it->second.size() > 0) {
for (unsigned int i = 0; i < it->second.size(); i++) {
int r = it->second[i].first;
int c = it->second[i].second;
r -= rstart * nComponents;
int localRowIdx = r - rstart * nComponents;
TEST_EXIT_DBG(r >= 0 && r < nRankRows)("Should not happen!\n");
TEST_EXIT_DBG(localRowIdx >= 0 && localRowIdx < nRankRows)
("Got row index %d/%d (nRankRows = %d) from rank %d. Should not happen!\n",
r, localRowIdx, nRankRows, it->first);
if (c < rstart * nComponents || c >= rstart * nComponents + nRankRows)
o_nnz[r]++;
o_nnz[localRowIdx]++;
else
d_nnz[r]++;
d_nnz[localRowIdx]++;
}
delete [] recvBuffers[i];
}
}
i++;
}
// === Create PETSc matrix with the computed nnz data structure. ===
MatCreateMPIAIJ(PETSC_COMM_WORLD, nRankRows, nRankRows, nOverallRows, nOverallRows,
0, d_nnz, 0, o_nnz, &petscMatrix);
......@@ -576,9 +547,7 @@ namespace AMDiS {
TEST_EXIT(b == rstart * nComponents + nRankRows)("Wrong matrix ownership range!\n");
#endif
using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
namespace traits= mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix;
// === Transfer values from DOF matrices to the PETSc matrix. ===
for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++)
......@@ -588,6 +557,8 @@ namespace AMDiS {
MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
// === Transfer values from DOF vector to the PETSc vector. ===
for (int i = 0; i < nComponents; i++)
setDofVector(petscRhsVec, vec->getDOFVector(i), nComponents, i);
......@@ -789,7 +760,7 @@ namespace AMDiS {
#if (DEBUG != 0)
writeMesh(feSpace);
debug::writeMesh(feSpace, -1, "mesh");
#endif
INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n",
......@@ -798,6 +769,8 @@ namespace AMDiS {