Liebe Gitlab-Nutzer, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit 77275071 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Code refactoring of parallelization code.

parent 23eb48b5
......@@ -29,7 +29,8 @@ endif
if USE_PARALLEL_DOMAIN_AMDIS
PARALLEL_AMDIS_SOURCES += \
$(PARALLEL_DIR)/ParallelDomainProblem.h $(PARALLEL_DIR)/ParallelDomainProblem.cc
$(PARALLEL_DIR)/ParallelDomainBase.h $(PARALLEL_DIR)/ParallelDomainBase.cc \
$(PARALLEL_DIR)/ParallelDomainScal.h $(PARALLEL_DIR)/ParallelDomainScal.cc
libamdis_la_CXXFLAGS += -DHAVE_PARALLEL_DOMAIN_AMDIS=1
AMDIS_INCLUDES += -I/u/witkowski/local/petsc-3.0.0-p4/include -I/u/witkowski/local/petsc-3.0.0-p4/linux-gnu-c-debug/include
endif
......
......@@ -38,7 +38,8 @@ build_triplet = @build@
host_triplet = @host@
@USE_PARALLEL_AMDIS_TRUE@am__append_1 = -DHAVE_PARALLEL_AMDIS=1
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__append_2 = \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(PARALLEL_DIR)/ParallelDomainProblem.h $(PARALLEL_DIR)/ParallelDomainProblem.cc
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(PARALLEL_DIR)/ParallelDomainBase.h $(PARALLEL_DIR)/ParallelDomainBase.cc \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(PARALLEL_DIR)/ParallelDomainScal.h $(PARALLEL_DIR)/ParallelDomainScal.cc
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__append_3 = -DHAVE_PARALLEL_DOMAIN_AMDIS=1
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__append_4 = -I/u/witkowski/local/petsc-3.0.0-p4/include -I/u/witkowski/local/petsc-3.0.0-p4/linux-gnu-c-debug/include
......@@ -71,9 +72,10 @@ am__installdirs = "$(DESTDIR)$(libdir)"
libLTLIBRARIES_INSTALL = $(INSTALL)
LTLIBRARIES = $(lib_LTLIBRARIES)
libamdis_la_LIBADD =
am__libamdis_la_SOURCES_DIST = \
$(PARALLEL_DIR)/ParallelDomainProblem.h \
$(PARALLEL_DIR)/ParallelDomainProblem.cc \
am__libamdis_la_SOURCES_DIST = $(PARALLEL_DIR)/ParallelDomainBase.h \
$(PARALLEL_DIR)/ParallelDomainBase.cc \
$(PARALLEL_DIR)/ParallelDomainScal.h \
$(PARALLEL_DIR)/ParallelDomainScal.cc \
$(PARALLEL_DIR)/ConditionalEstimator.h \
$(PARALLEL_DIR)/ConditionalEstimator.cc \
$(PARALLEL_DIR)/ConditionalMarker.h \
......@@ -224,7 +226,8 @@ am__libamdis_la_SOURCES_DIST = \
$(SOURCE_DIR)/parareal/ProblemBase.h \
$(SOURCE_DIR)/parareal/AdaptParaReal.h \
$(SOURCE_DIR)/parareal/AdaptParaReal.cc
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__objects_1 = libamdis_la-ParallelDomainProblem.lo
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__objects_1 = libamdis_la-ParallelDomainBase.lo \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-ParallelDomainScal.lo
@USE_PARALLEL_AMDIS_FALSE@am__objects_2 = $(am__objects_1)
@USE_PARALLEL_AMDIS_TRUE@am__objects_2 = \
@USE_PARALLEL_AMDIS_TRUE@ libamdis_la-ConditionalEstimator.lo \
......@@ -754,7 +757,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-NonLinUpdater.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Operator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParMetisPartitioner.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainProblem.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainBase.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainScal.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelProblem.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Parameters.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Parametric.Plo@am__quote@
......@@ -825,12 +829,19 @@ distclean-compile:
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $<
libamdis_la-ParallelDomainProblem.lo: $(PARALLEL_DIR)/ParallelDomainProblem.cc
@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ParallelDomainProblem.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ParallelDomainProblem.Tpo" -c -o libamdis_la-ParallelDomainProblem.lo `test -f '$(PARALLEL_DIR)/ParallelDomainProblem.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ParallelDomainProblem.cc; \
@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-ParallelDomainProblem.Tpo" "$(DEPDIR)/libamdis_la-ParallelDomainProblem.Plo"; else rm -f "$(DEPDIR)/libamdis_la-ParallelDomainProblem.Tpo"; exit 1; fi
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(PARALLEL_DIR)/ParallelDomainProblem.cc' object='libamdis_la-ParallelDomainProblem.lo' libtool=yes @AMDEPBACKSLASH@
libamdis_la-ParallelDomainBase.lo: $(PARALLEL_DIR)/ParallelDomainBase.cc
@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ParallelDomainBase.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ParallelDomainBase.Tpo" -c -o libamdis_la-ParallelDomainBase.lo `test -f '$(PARALLEL_DIR)/ParallelDomainBase.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ParallelDomainBase.cc; \
@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-ParallelDomainBase.Tpo" "$(DEPDIR)/libamdis_la-ParallelDomainBase.Plo"; else rm -f "$(DEPDIR)/libamdis_la-ParallelDomainBase.Tpo"; exit 1; fi
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(PARALLEL_DIR)/ParallelDomainBase.cc' object='libamdis_la-ParallelDomainBase.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainProblem.lo `test -f '$(PARALLEL_DIR)/ParallelDomainProblem.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ParallelDomainProblem.cc
@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainBase.lo `test -f '$(PARALLEL_DIR)/ParallelDomainBase.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ParallelDomainBase.cc
libamdis_la-ParallelDomainScal.lo: $(PARALLEL_DIR)/ParallelDomainScal.cc
@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ParallelDomainScal.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ParallelDomainScal.Tpo" -c -o libamdis_la-ParallelDomainScal.lo `test -f '$(PARALLEL_DIR)/ParallelDomainScal.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ParallelDomainScal.cc; \
@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-ParallelDomainScal.Tpo" "$(DEPDIR)/libamdis_la-ParallelDomainScal.Plo"; else rm -f "$(DEPDIR)/libamdis_la-ParallelDomainScal.Tpo"; exit 1; fi
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(PARALLEL_DIR)/ParallelDomainScal.cc' object='libamdis_la-ParallelDomainScal.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainScal.lo `test -f '$(PARALLEL_DIR)/ParallelDomainScal.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ParallelDomainScal.cc
libamdis_la-ConditionalEstimator.lo: $(PARALLEL_DIR)/ConditionalEstimator.cc
@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ConditionalEstimator.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ConditionalEstimator.Tpo" -c -o libamdis_la-ConditionalEstimator.lo `test -f '$(PARALLEL_DIR)/ConditionalEstimator.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ConditionalEstimator.cc; \
......
#include <boost/lambda/lambda.hpp>
#include <algorithm>
#include "ParallelDomainProblem.h"
#include "ProblemScal.h"
#include "ProblemInstat.h"
#include "ParallelDomainBase.h"
#include "ParMetisPartitioner.h"
#include "Mesh.h"
#include "Traverse.h"
......@@ -19,8 +16,6 @@
#include "petscksp.h"
namespace AMDiS {
using namespace boost::lambda;
PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *)
{
......@@ -30,6 +25,11 @@ namespace AMDiS {
return 0;
}
inline bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2)
{
return (*dof1 < *dof2);
}
ParallelDomainBase::ParallelDomainBase(const std::string& name,
ProblemIterationInterface *iIF,
ProblemTimeInterface *tIF,
......@@ -88,11 +88,11 @@ namespace AMDiS {
// Number of all DOFs in the macro mesh.
int nOverallDOFs = 0;
createLocalGlobalNumbering(rankDOFs, boundaryDOFs, nRankDOFs, nOverallDOFs);
createLocalGlobalNumbering(rankDOFs, nRankDOFs, nOverallDOFs);
// === Create interior boundary information ===
createInteriorBoundaryInfo(rankDOFs, boundaryDOFs);
createInteriorBoundaryInfo(rankDOFs);
// === Remove all macro elements that are not part of the rank partition. ===
......@@ -369,8 +369,7 @@ namespace AMDiS {
}
void ParallelDomainBase::createInteriorBoundaryInfo(DofContainer& rankDOFs,
DofToRank& boundaryDOFs)
void ParallelDomainBase::createInteriorBoundaryInfo(DofContainer& rankDOFs)
{
FUNCNAME("ParallelDomainBase::createInteriorBoundaryInfo()");
......@@ -532,7 +531,6 @@ namespace AMDiS {
void ParallelDomainBase::createLocalGlobalNumbering(DofContainer& rankDOFs,
DofToRank& boundaryDOFs,
int& nRankDOFs,
int& nOverallDOFs)
{
......@@ -543,8 +541,9 @@ namespace AMDiS {
// Stores to each DOF pointer the set of ranks the DOF is part of.
std::map<const DegreeOfFreedom*, std::set<int> > partitionDOFs;
DofContainer rankAllDofs;
DofToRank boundaryDofs;
createDOFMemberInfo(partitionDOFs, rankDOFs, rankAllDofs, boundaryDOFs);
createDOFMemberInfo(partitionDOFs, rankDOFs, rankAllDofs, boundaryDofs);
nRankDOFs = rankDOFs.size();
nOverallDOFs = partitionDOFs.size();
......@@ -604,7 +603,7 @@ namespace AMDiS {
// another rank.
std::map<int, int> recvNewDofs;
for (DofToRank::iterator it = boundaryDOFs.begin(); it != boundaryDOFs.end(); ++it) {
for (DofToRank::iterator it = boundaryDofs.begin(); it != boundaryDofs.end(); ++it) {
if (it->second == mpiRank) {
// If the boundary dof is a rank dof, it must be send to other ranks.
......@@ -685,21 +684,17 @@ namespace AMDiS {
delete [] sendBuffers[j];
// === Change dof indices for rank partition. ===
mapLocalGlobalDOFs.clear();
// === Change dof indices at boundary from other ranks. ===
// Within this small data structure we track which dof index was already changed.
// This is used to avoid the following situation: Assume, there are two dof indices
// a and b in boundaryDOFs. Then we have to change index a to b and b to c. When
// a and b in boundaryDofs. Then we have to change index a to b and b to c. When
// the second rule applies, we have to avoid that not the first b, resulted from
// changing a to b, is set to c, but the second one. Therefore, after the first
// rule was applied, the dof pointer is set to false in this data structure and
// is not allowed to be changed anymore.
std::map<const DegreeOfFreedom*, bool> dofChanged;
for (DofToRank::iterator dofIt = boundaryDOFs.begin(); dofIt != boundaryDOFs.end();
for (DofToRank::iterator dofIt = boundaryDofs.begin(); dofIt != boundaryDofs.end();
++dofIt)
dofChanged[dofIt->first] = false;
......@@ -717,8 +712,8 @@ namespace AMDiS {
// Iterate over all boundary dofs to find the dof, which index we have to change.
for (DofToRank::iterator dofIt = boundaryDOFs.begin();
dofIt != boundaryDOFs.end(); ++dofIt) {
for (DofToRank::iterator dofIt = boundaryDofs.begin();
dofIt != boundaryDofs.end(); ++dofIt) {
if (*(dofIt->first) == oldDof && !dofChanged[dofIt->first]) {
dofChanged[dofIt->first] = true;
......@@ -738,22 +733,10 @@ namespace AMDiS {
delete [] recvBuffers[i];
}
// === Create now the local to global index, and vice verse, mappings. ===
// === Create now the local to global index and local to dof index mappings. ===
for (DofIndexMap::iterator dofIt = rankDofsNewLocalIndex.begin();
dofIt != rankDofsNewLocalIndex.end(); ++dofIt) {
DegreeOfFreedom localDof = dofIt->second;
DegreeOfFreedom globalDof = rankDofsNewGlobalIndex[dofIt->first];
*const_cast<DegreeOfFreedom*>(dofIt->first) = localDof;
mapLocalGlobalDOFs[localDof] = globalDof;
}
mapLocalToDofIndex.clear();
for (DofIndexMap::iterator dofIt = rankOwnedDofsNewLocalIndex.begin();
dofIt != rankOwnedDofsNewLocalIndex.end(); ++dofIt)
mapLocalToDofIndex[dofIt->second] = *(dofIt->first);
createLocalMappings(rankDofsNewLocalIndex, rankOwnedDofsNewLocalIndex,
rankDofsNewGlobalIndex);
}
......@@ -789,9 +772,8 @@ namespace AMDiS {
// === Traverse on interior boundaries and move all not ranked owned DOFs from ===
// === rankDOFs to boundaryDOFs. ===
DofToRank newBoundaryDOFs;
RankToDofContainer sendNewDofs;
RankToDofContainer recvNewDofs;
sendDofs.clear();
recvDofs.clear();
for (RankToBoundMap::iterator it = myIntBoundary.boundary.begin();
it != myIntBoundary.boundary.end(); ++it) {
......@@ -799,51 +781,38 @@ namespace AMDiS {
for (std::vector<AtomicBoundary>::iterator boundIt = it->second.begin();
boundIt != it->second.end(); ++boundIt) {
const DegreeOfFreedom *dof1, *dof2;
DofContainer dofs;
DofContainer &dofsToSend = sendDofs[it->first];
switch (boundIt->rankObject.ithObjAtBoundary) {
case 0:
dof1 = boundIt->rankObject.el->getDOF(1);
dof2 = boundIt->rankObject.el->getDOF(2);
dofs.push_back(boundIt->rankObject.el->getDOF(1));
dofs.push_back(boundIt->rankObject.el->getDOF(2));
break;
case 1:
dof1 = boundIt->rankObject.el->getDOF(0);
dof2 = boundIt->rankObject.el->getDOF(2);
dofs.push_back(boundIt->rankObject.el->getDOF(0));
dofs.push_back(boundIt->rankObject.el->getDOF(2));
break;
case 2:
dof1 = boundIt->rankObject.el->getDOF(0);
dof2 = boundIt->rankObject.el->getDOF(1);
dofs.push_back(boundIt->rankObject.el->getDOF(0));
dofs.push_back(boundIt->rankObject.el->getDOF(1));
break;
default:
ERROR_EXIT("Should never happen!\n");
}
TEST_EXIT_DBG(boundaryDOFs.find(dof1) != boundaryDOFs.end())
("Should never happen!\n");
TEST_EXIT_DBG(boundaryDOFs.find(dof2) != boundaryDOFs.end())
("Should never happen!\n");
newBoundaryDOFs[dof1] = boundaryDOFs[dof1];
newBoundaryDOFs[dof2] = boundaryDOFs[dof2];
DofContainer &dofsToSend = sendNewDofs[it->first];
if (find(dofsToSend.begin(), dofsToSend.end(), dof1) == dofsToSend.end())
dofsToSend.push_back(dof1);
if (find(dofsToSend.begin(), dofsToSend.end(), dof2) == dofsToSend.end())
dofsToSend.push_back(dof2);
DofContainer boundDOFs;
addAllVertexDOFs(boundIt->rankObject.el,
boundIt->rankObject.ithObjAtBoundary,
boundDOFs);
addAllEdgeDOFs(boundIt->rankObject.el,
boundIt->rankObject.ithObjAtBoundary,
boundDOFs);
for (DofContainer::iterator dofIt = dofs.begin(); dofIt != dofs.end(); ++dofIt) {
if (find(dofsToSend.begin(), dofsToSend.end(), *dofIt) == dofsToSend.end())
dofsToSend.push_back(*dofIt);
}
for (int i = 0; i < static_cast<int>(boundDOFs.size()); i++) {
newBoundaryDOFs[boundDOFs[i]] = mpiRank;
dofsToSend.push_back(boundDOFs[i]);
dofs.clear();
addAllVertexDOFs(boundIt->rankObject.el, boundIt->rankObject.ithObjAtBoundary,
dofs);
addAllEdgeDOFs(boundIt->rankObject.el, boundIt->rankObject.ithObjAtBoundary,
dofs);
for (int i = 0; i < static_cast<int>(dofs.size()); i++) {
dofsToSend.push_back(dofs[i]);
}
}
......@@ -855,64 +824,49 @@ namespace AMDiS {
for (std::vector<AtomicBoundary>::iterator boundIt = it->second.begin();
boundIt != it->second.end(); ++boundIt) {
const DegreeOfFreedom *dof1, *dof2;
DofContainer dofs;
DofContainer &dofsToRecv = recvDofs[it->first];
switch (boundIt->rankObject.ithObjAtBoundary) {
case 0:
dof1 = boundIt->rankObject.el->getDOF(1);
dof2 = boundIt->rankObject.el->getDOF(2);
dofs.push_back(boundIt->rankObject.el->getDOF(1));
dofs.push_back(boundIt->rankObject.el->getDOF(2));
break;
case 1:
dof1 = boundIt->rankObject.el->getDOF(0);
dof2 = boundIt->rankObject.el->getDOF(2);
dofs.push_back(boundIt->rankObject.el->getDOF(0));
dofs.push_back(boundIt->rankObject.el->getDOF(2));
break;
case 2:
dof1 = boundIt->rankObject.el->getDOF(1);
dof2 = boundIt->rankObject.el->getDOF(0);
dofs.push_back(boundIt->rankObject.el->getDOF(1));
dofs.push_back(boundIt->rankObject.el->getDOF(0));
break;
default:
ERROR_EXIT("Should never happen!\n");
}
TEST_EXIT_DBG(boundaryDOFs.find(dof1) != boundaryDOFs.end())
("Should never happen!\n");
TEST_EXIT_DBG(boundaryDOFs.find(dof2) != boundaryDOFs.end())
("Should never happen!\n");
DofContainer::iterator eraseIt = find(rankDOFs.begin(), rankDOFs.end(), dof1);
if (eraseIt != rankDOFs.end())
rankDOFs.erase(eraseIt);
eraseIt = find(rankDOFs.begin(), rankDOFs.end(), dof2);
if (eraseIt != rankDOFs.end())
rankDOFs.erase(eraseIt);
newBoundaryDOFs[dof1] = boundaryDOFs[dof1];
newBoundaryDOFs[dof2] = boundaryDOFs[dof2];
DofContainer &dofsToRecv = recvNewDofs[it->first];
if (find(dofsToRecv.begin(), dofsToRecv.end(), dof1) == dofsToRecv.end())
dofsToRecv.push_back(dof1);
if (find(dofsToRecv.begin(), dofsToRecv.end(), dof2) == dofsToRecv.end())
dofsToRecv.push_back(dof2);
DofContainer boundDOFs;
addAllEdgeDOFs(boundIt->rankObject.el,
boundIt->rankObject.ithObjAtBoundary,
boundDOFs);
addAllVertexDOFs(boundIt->rankObject.el,
boundIt->rankObject.ithObjAtBoundary,
boundDOFs);
for (int i = static_cast<int>(boundDOFs.size()) - 1; i >= 0; i--) {
TEST_EXIT_DBG(find(rankDOFs.begin(), rankDOFs.end(), boundDOFs[i]) != rankDOFs.end())
for (DofContainer::iterator dofIt = dofs.begin(); dofIt != dofs.end(); ++dofIt) {
DofContainer::iterator eraseIt = find(rankDOFs.begin(), rankDOFs.end(), *dofIt);
if (eraseIt != rankDOFs.end())
rankDOFs.erase(eraseIt);
if (find(dofsToRecv.begin(), dofsToRecv.end(), *dofIt) == dofsToRecv.end())
dofsToRecv.push_back(*dofIt);
}
dofs.clear();
addAllEdgeDOFs(boundIt->rankObject.el, boundIt->rankObject.ithObjAtBoundary,
dofs);
addAllVertexDOFs(boundIt->rankObject.el, boundIt->rankObject.ithObjAtBoundary,
dofs);
for (int i = static_cast<int>(dofs.size()) - 1; i >= 0; i--) {
TEST_EXIT_DBG(find(rankDOFs.begin(), rankDOFs.end(), dofs[i]) != rankDOFs.end())
("Should never happen!\n");
eraseIt = find(rankDOFs.begin(), rankDOFs.end(), boundDOFs[i]);
DofContainer::iterator eraseIt = find(rankDOFs.begin(), rankDOFs.end(), dofs[i]);
if (eraseIt != rankDOFs.end())
rankDOFs.erase(eraseIt);
newBoundaryDOFs[boundDOFs[i]] = it->first;
dofsToRecv.push_back(boundDOFs[i]);
dofsToRecv.push_back(dofs[i]);
}
}
}
......@@ -961,15 +915,15 @@ namespace AMDiS {
// === Send new DOF indices. ===
std::vector<int*> sendBuffers(sendNewDofs.size());
std::vector<int*> recvBuffers(recvNewDofs.size());
std::vector<int*> sendBuffers(sendDofs.size());
std::vector<int*> recvBuffers(recvDofs.size());
MPI::Request request[sendNewDofs.size() + recvNewDofs.size()];
MPI::Request request[sendDofs.size() + recvDofs.size()];
int requestCounter = 0;
i = 0;
for (RankToDofContainer::iterator sendIt = sendNewDofs.begin();
sendIt != sendNewDofs.end(); ++sendIt, i++) {
for (RankToDofContainer::iterator sendIt = sendDofs.begin();
sendIt != sendDofs.end(); ++sendIt, i++) {
int nSendDofs = sendIt->second.size();
sendBuffers[i] = new int[nSendDofs];
int c = 0;
......@@ -982,8 +936,8 @@ namespace AMDiS {
}
i = 0;
for (RankToDofContainer::iterator recvIt = recvNewDofs.begin();
recvIt != recvNewDofs.end(); ++recvIt, i++) {
for (RankToDofContainer::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt, i++) {
int nRecvDofs = recvIt->second.size();
recvBuffers[i] = new int[nRecvDofs];
......@@ -997,8 +951,8 @@ namespace AMDiS {
delete [] sendBuffers[j];
i = 0;
for (RankToDofContainer::iterator recvIt = recvNewDofs.begin();
recvIt != recvNewDofs.end(); ++recvIt) {
for (RankToDofContainer::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt) {
int j = 0;
for (DofContainer::iterator dofIt = recvIt->second.begin();
dofIt != recvIt->second.end(); ++dofIt) {
......@@ -1012,14 +966,16 @@ namespace AMDiS {
}
// === Update list of dofs that must be communicated for solution exchange. ===
// === Create now the local to global index and local to dof index mappings. ===
sendDofs = sendNewDofs;
recvDofs = recvNewDofs;
// === Create now the local to global index, and vice verse, mappings. ===
createLocalMappings(rankDofsNewLocalIndex, rankOwnedDofsNewLocalIndex,
rankDofsNewGlobalIndex);
}
void ParallelDomainBase::createLocalMappings(DofIndexMap &rankDofsNewLocalIndex,
DofIndexMap &rankOwnedDofsNewLocalIndex,
DofIndexMap &rankDofsNewGlobalIndex)
{
mapLocalGlobalDOFs.clear();
mapLocalToDofIndex.clear();
......@@ -1032,13 +988,11 @@ namespace AMDiS {
mapLocalGlobalDOFs[localDof] = globalDof;
}
mapLocalToDofIndex.clear();
for (DofIndexMap::iterator dofIt = rankOwnedDofsNewLocalIndex.begin();
dofIt != rankOwnedDofsNewLocalIndex.end(); ++dofIt)
mapLocalToDofIndex[dofIt->second] = *(dofIt->first);
}
void ParallelDomainBase::addAllVertexDOFs(Element *el, int ithEdge,
DofContainer& dofs)
{
......@@ -1081,19 +1035,17 @@ namespace AMDiS {
switch (ithEdge) {
case 0:
if (el->getSecondChild()) {
if (el->getSecondChild())
addAllEdgeDOFs(el->getSecondChild(), 2, dofs);
} else {
else
addThisEdge = true;
}
break;
case 1:
if (el->getFirstChild()) {
if (el->getFirstChild())
addAllEdgeDOFs(el->getFirstChild(), 2, dofs);
} else {
else
addThisEdge = true;
}
break;
case 2:
......@@ -1113,10 +1065,9 @@ namespace AMDiS {
ElementDofIterator elDofIter(feSpace, true);
elDofIter.reset(el);
do {
if (elDofIter.getCurrentPos() == 1 &&
elDofIter.getCurrentElementPos() == ithEdge) {
dofs.push_back(elDofIter.getDofPtr());
}
if (elDofIter.getCurrentPos() == 1 &&
elDofIter.getCurrentElementPos() == ithEdge)
dofs.push_back(elDofIter.getDofPtr());
} while(elDofIter.next());
}
}
......@@ -1439,72 +1390,4 @@ namespace AMDiS {
}
}
ParallelDomainScal::ParallelDomainScal(const std::string& name,
ProblemScal *problem,
ProblemInstatScal *problemInstat)
: ParallelDomainBase(name,
problem,
problemInstat,
problem->getFESpace(),
problem->getRefinementManager()),
probScal(problem)
{
info = problem->getInfo();
}
void ParallelDomainScal::initParallelization(AdaptInfo *adaptInfo)
{
FUNCNAME("ParallelDomainScal::initParallelization()");
ParallelDomainBase::initParallelization(adaptInfo);
DOFMatrix* m = probScal->getSystemMatrix();
TEST_EXIT(m)("No DOF Matrix!\n");
m->setIsRankDOF(isRankDof);
}
void ParallelDomainScal::solve()
{
FUNCNAME("ParallelDomainScal::solve()");
#ifdef _OPENMP
double wtime = omp_get_wtime();
#endif
clock_t first = clock();
fillPetscMatrix(probScal->getSystemMatrix(), probScal->getRHS());
solvePetscMatrix(probScal->getSolution());
#ifdef _OPENMP
INFO(info, 8)("solution of discrete system needed %.5f seconds system time / %.5f seconds wallclock time\n",
TIME_USED(first, clock()),
omp_get_wtime() - wtime);
#else
INFO(info, 8)("solution of discrete system needed %.5f seconds\n",
TIME_USED(first, clock()));
#endif
}
Flag ParallelDomainScal::oneIteration(AdaptInfo *adaptInfo, Flag toDo)
{
FUNCNAME("ParallelDomainScal::oneIteration()");
Flag flag = dynamic_cast<StandardProblemIteration*>(iterationIF)->
buildAndAdapt(adaptInfo, toDo);
if (toDo.isSet(SOLVE))
solve();
if (toDo.isSet(SOLVE_RHS))
ERROR_EXIT("Not yet implemented!\n");
if (toDo.isSet(ESTIMATE))
iterationIF->getProblem()->estimate(adaptInfo);
return flag;
}
}
......@@ -17,10 +17,11 @@
// == ==
// ============================================================================
/** \file ParallelDomain.h */
/** \file ParallelDomainBase.h */
#ifndef AMDIS_PARALLELDOMAINBASE_H
#define AMDIS_PARALLELDOMAINBASE_H
#ifndef AMDIS_PARALLELDOMAIN_H
#define AMDIS_PARALLELDOMAIN_H
#include <map>
#include <set>
......@@ -38,12 +39,14 @@
#include "petscao.h"
#include "mpi.h"
#include "Global.h"
namespace AMDiS {
class ParMetisPartitioner;