Commit 5b850bbb authored by Thomas Witkowski's avatar Thomas Witkowski

Added vec support to parallelization.

parent 77275071
......@@ -30,7 +30,8 @@ endif
if USE_PARALLEL_DOMAIN_AMDIS
PARALLEL_AMDIS_SOURCES += \
$(PARALLEL_DIR)/ParallelDomainBase.h $(PARALLEL_DIR)/ParallelDomainBase.cc \
$(PARALLEL_DIR)/ParallelDomainScal.h $(PARALLEL_DIR)/ParallelDomainScal.cc
$(PARALLEL_DIR)/ParallelDomainScal.h $(PARALLEL_DIR)/ParallelDomainScal.cc \
$(PARALLEL_DIR)/ParallelDomainVec.h $(PARALLEL_DIR)/ParallelDomainVec.cc
libamdis_la_CXXFLAGS += -DHAVE_PARALLEL_DOMAIN_AMDIS=1
AMDIS_INCLUDES += -I/u/witkowski/local/petsc-3.0.0-p4/include -I/u/witkowski/local/petsc-3.0.0-p4/linux-gnu-c-debug/include
endif
......
......@@ -39,7 +39,8 @@ host_triplet = @host@
@USE_PARALLEL_AMDIS_TRUE@am__append_1 = -DHAVE_PARALLEL_AMDIS=1
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__append_2 = \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(PARALLEL_DIR)/ParallelDomainBase.h $(PARALLEL_DIR)/ParallelDomainBase.cc \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(PARALLEL_DIR)/ParallelDomainScal.h $(PARALLEL_DIR)/ParallelDomainScal.cc
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(PARALLEL_DIR)/ParallelDomainScal.h $(PARALLEL_DIR)/ParallelDomainScal.cc \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ $(PARALLEL_DIR)/ParallelDomainVec.h $(PARALLEL_DIR)/ParallelDomainVec.cc
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__append_3 = -DHAVE_PARALLEL_DOMAIN_AMDIS=1
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__append_4 = -I/u/witkowski/local/petsc-3.0.0-p4/include -I/u/witkowski/local/petsc-3.0.0-p4/linux-gnu-c-debug/include
......@@ -76,6 +77,8 @@ am__libamdis_la_SOURCES_DIST = $(PARALLEL_DIR)/ParallelDomainBase.h \
$(PARALLEL_DIR)/ParallelDomainBase.cc \
$(PARALLEL_DIR)/ParallelDomainScal.h \
$(PARALLEL_DIR)/ParallelDomainScal.cc \
$(PARALLEL_DIR)/ParallelDomainVec.h \
$(PARALLEL_DIR)/ParallelDomainVec.cc \
$(PARALLEL_DIR)/ConditionalEstimator.h \
$(PARALLEL_DIR)/ConditionalEstimator.cc \
$(PARALLEL_DIR)/ConditionalMarker.h \
......@@ -227,7 +230,8 @@ am__libamdis_la_SOURCES_DIST = $(PARALLEL_DIR)/ParallelDomainBase.h \
$(SOURCE_DIR)/parareal/AdaptParaReal.h \
$(SOURCE_DIR)/parareal/AdaptParaReal.cc
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__objects_1 = libamdis_la-ParallelDomainBase.lo \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-ParallelDomainScal.lo
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-ParallelDomainScal.lo \
@USE_PARALLEL_DOMAIN_AMDIS_TRUE@ libamdis_la-ParallelDomainVec.lo
@USE_PARALLEL_AMDIS_FALSE@am__objects_2 = $(am__objects_1)
@USE_PARALLEL_AMDIS_TRUE@am__objects_2 = \
@USE_PARALLEL_AMDIS_TRUE@ libamdis_la-ConditionalEstimator.lo \
......@@ -759,6 +763,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParMetisPartitioner.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainBase.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainScal.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainVec.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelProblem.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Parameters.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Parametric.Plo@am__quote@
......@@ -843,6 +848,13 @@ libamdis_la-ParallelDomainScal.lo: $(PARALLEL_DIR)/ParallelDomainScal.cc
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainScal.lo `test -f '$(PARALLEL_DIR)/ParallelDomainScal.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ParallelDomainScal.cc
libamdis_la-ParallelDomainVec.lo: $(PARALLEL_DIR)/ParallelDomainVec.cc
@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ParallelDomainVec.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ParallelDomainVec.Tpo" -c -o libamdis_la-ParallelDomainVec.lo `test -f '$(PARALLEL_DIR)/ParallelDomainVec.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ParallelDomainVec.cc; \
@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-ParallelDomainVec.Tpo" "$(DEPDIR)/libamdis_la-ParallelDomainVec.Plo"; else rm -f "$(DEPDIR)/libamdis_la-ParallelDomainVec.Tpo"; exit 1; fi
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(PARALLEL_DIR)/ParallelDomainVec.cc' object='libamdis_la-ParallelDomainVec.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainVec.lo `test -f '$(PARALLEL_DIR)/ParallelDomainVec.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ParallelDomainVec.cc
libamdis_la-ConditionalEstimator.lo: $(PARALLEL_DIR)/ConditionalEstimator.cc
@am__fastdepCXX_TRUE@ if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ConditionalEstimator.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ConditionalEstimator.Tpo" -c -o libamdis_la-ConditionalEstimator.lo `test -f '$(PARALLEL_DIR)/ConditionalEstimator.cc' || echo '$(srcdir)/'`$(PARALLEL_DIR)/ConditionalEstimator.cc; \
@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/libamdis_la-ConditionalEstimator.Tpo" "$(DEPDIR)/libamdis_la-ConditionalEstimator.Plo"; else rm -f "$(DEPDIR)/libamdis_la-ConditionalEstimator.Tpo"; exit 1; fi
......
......@@ -210,7 +210,7 @@ namespace AMDiS {
if (condition && condition->isDirichlet()) {
if (condition->applyBoundaryCondition()) {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
if (isRankDOF[row])
if (rankDofs[row])
applyDBCs.insert(static_cast<int>(row));
#else
applyDBCs.insert(static_cast<int>(row));
......
......@@ -477,9 +477,9 @@ namespace AMDiS {
int memsize();
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
void setIsRankDOF(std::map<DegreeOfFreedom, bool>& dofmap)
void setRankDofs(std::map<DegreeOfFreedom, bool>& dofmap)
{
isRankDOF = dofmap;
rankDofs = dofmap;
}
#endif
......@@ -561,7 +561,7 @@ namespace AMDiS {
int nnzPerRow;
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
std::map<DegreeOfFreedom, bool> isRankDOF;
std::map<DegreeOfFreedom, bool> rankDofs;
#endif
/// Inserter object: implemented as pointer, allocated and deallocated as needed
......
......@@ -10,8 +10,11 @@
#include "PartitionElementData.h"
#include "DOFMatrix.h"
#include "DOFVector.h"
#include "SystemVector.h"
#include "VtkWriter.h"
#include "ElementDofIterator.h"
#include "ProblemStatBase.h"
#include "StandardProblemIteration.h"
#include "petscksp.h"
......@@ -42,7 +45,8 @@ namespace AMDiS {
refinementManager(refineManager),
initialPartitionMesh(true),
nRankDOFs(0),
rstart(0)
rstart(0),
nComponents(1)
{
FUNCNAME("ParallelDomainBase::ParalleDomainBase()");
......@@ -108,6 +112,7 @@ namespace AMDiS {
updateDofAdmins();
// === Global refinements. ===
int globalRefinement = 0;
......@@ -130,6 +135,7 @@ namespace AMDiS {
#endif
}
#if (DEBUG != 0)
DbgTestCommonDofs();
#endif
......@@ -137,23 +143,28 @@ namespace AMDiS {
// === Create petsc matrix. ===
int ierr;
ierr = MatCreate(PETSC_COMM_WORLD, &petscMatrix);
ierr = MatSetSizes(petscMatrix, nRankDOFs, nRankDOFs, nOverallDOFs, nOverallDOFs);
ierr = MatSetType(petscMatrix, MATAIJ);
int nRankRows = nRankDOFs * nComponents;
int nOverallRows = nOverallDOFs * nComponents;
ierr = VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
ierr = VecSetSizes(petscRhsVec, nRankDOFs, nOverallDOFs);
ierr = VecSetType(petscRhsVec, VECMPI);
MatCreate(PETSC_COMM_WORLD, &petscMatrix);
MatSetSizes(petscMatrix, nRankRows, nRankRows, nOverallRows, nOverallRows);
MatSetType(petscMatrix, MATAIJ);
ierr = VecCreate(PETSC_COMM_WORLD, &petscSolVec);
ierr = VecSetSizes(petscSolVec, nRankDOFs, nOverallDOFs);
ierr = VecSetType(petscSolVec, VECMPI);
VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
VecSetType(petscRhsVec, VECMPI);
VecCreate(PETSC_COMM_WORLD, &petscSolVec);
VecSetSizes(petscSolVec, nRankRows, nOverallRows);
VecSetType(petscSolVec, VECMPI);
}
void ParallelDomainBase::exitParallelization(AdaptInfo *adaptInfo)
{
MatDestroy(petscMatrix);
VecDestroy(petscRhsVec);
VecDestroy(petscSolVec);
}
......@@ -196,10 +207,13 @@ namespace AMDiS {
("The mesh has less macro elements than number of mpi processes!\n");
}
void ParallelDomainBase::fillPetscMatrix(DOFMatrix *mat,
DOFVector<double> *vec)
void ParallelDomainBase::setDofMatrix(DOFMatrix* mat, int dispMult,
int dispAddRow, int dispAddCol)
{
FUNCNAME("ParallelDomainBase::setDofMatrix()");
TEST_EXIT(mat)("No DOFMatrix!\n");
using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
namespace traits= mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix;
......@@ -216,21 +230,21 @@ namespace AMDiS {
for (icursor_type icursor = begin<nz>(cursor),
icend = end<nz>(cursor); icursor != icend; ++icursor)
if (value(*icursor) != 0.0) {
int r = mapLocalGlobalDOFs[row(*icursor)];
int c = mapLocalGlobalDOFs[col(*icursor)];
int r = mapLocalGlobalDOFs[row(*icursor)] * dispMult + dispAddRow;
int c = mapLocalGlobalDOFs[col(*icursor)] * dispMult + dispAddCol;
double v = value(*icursor);
MatSetValues(petscMatrix, 1, &r, 1, &c, &v, ADD_VALUES);
}
}
MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
void ParallelDomainBase::setDofVector(DOFVector<double>* vec,
int dispMult, int dispAdd)
{
DOFVector<double>::Iterator dofIt(vec, USED_DOFS);
for (dofIt.reset(); !dofIt.end(); ++dofIt) {
int index = mapLocalGlobalDOFs[dofIt.getDOFIndex()];
int index = mapLocalGlobalDOFs[dofIt.getDOFIndex()] * dispMult + dispAdd;
double value = *dofIt;
VecSetValues(petscRhsVec, 1, &index, &value, ADD_VALUES);
......@@ -238,6 +252,36 @@ namespace AMDiS {
}
void ParallelDomainBase::fillPetscMatrix(DOFMatrix *mat, DOFVector<double> *vec)
{
setDofMatrix(mat);
MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
setDofVector(vec);
}
void ParallelDomainBase::fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec)
{
using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
namespace traits= mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix;
for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j])
setDofMatrix((*mat)[i][j], nComponents, i, j);
MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
for (int i = 0; i < nComponents; i++)
setDofVector(vec->getDOFVector(i), nComponents, i);
}
void ParallelDomainBase::solvePetscMatrix(DOFVector<double> *vec)
{
FUNCNAME("ParallelDomainBase::solvePetscMatrix()");
......@@ -278,8 +322,7 @@ namespace AMDiS {
int i = 0;
for (RankToDofContainer::iterator sendIt = sendDofs.begin();
sendIt != sendDofs.end();
++sendIt, i++) {
sendIt != sendDofs.end(); ++sendIt, i++) {
int nSendDOFs = sendIt->second.size();
sendBuffers[i] = new double[nSendDOFs];
......@@ -292,8 +335,7 @@ namespace AMDiS {
i = 0;
for (RankToDofContainer::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end();
++recvIt, i++) {
recvIt != recvDofs.end(); ++recvIt, i++) {
int nRecvDOFs = recvIt->second.size();
recvBuffers[i] = new double[nRecvDOFs];
......@@ -306,8 +348,7 @@ namespace AMDiS {
i = 0;
for (RankToDofContainer::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end();
++recvIt, i++) {
recvIt != recvDofs.end(); ++recvIt, i++) {
for (int j = 0; j < static_cast<int>(recvIt->second.size()); j++)
(*vec)[*(recvIt->second)[j]] = recvBuffers[i][j];
......@@ -319,6 +360,97 @@ namespace AMDiS {
}
void ParallelDomainBase::solvePetscMatrix(SystemVector *vec)
{
FUNCNAME("ParallelDomainBase::solvePetscMatrix()");
KSP ksp;
PC pc;
KSPCreate(PETSC_COMM_WORLD, &ksp);
KSPSetOperators(ksp, petscMatrix, petscMatrix, DIFFERENT_NONZERO_PATTERN);
KSPGetPC(ksp, &pc);
// PCSetType(pc, PCNONE);
PCSetType(pc, PCJACOBI);
KSPSetTolerances(ksp, 1.e-7, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
KSPSetType(ksp, KSPBCGS);
//KSPSetType(ksp, KSPCG);
KSPMonitorSet(ksp, myKSPMonitor, PETSC_NULL, 0);
KSPSolve(ksp, petscRhsVec, petscSolVec);
#if (DEBUG != 0)
int size = 0;
VecGetLocalSize(petscSolVec, &size);
TEST_EXIT(size == nRankDOFs)("Vector and rank DOFs does not fit together!\n");
#endif
PetscScalar *vecPointer;
VecGetArray(petscSolVec, &vecPointer);
for (int i = 0; i < nComponents; i++) {
DOFVector<double> *dofvec = vec->getDOFVector(i);
for (int j = 0; j < nRankDOFs; j++)
(*dofvec)[mapLocalToDofIndex[j]] = vecPointer[j * nComponents + i];
}
VecRestoreArray(petscSolVec, &vecPointer);
std::vector<double*> sendBuffers(sendDofs.size());
std::vector<double*> recvBuffers(recvDofs.size());
MPI::Request request[sendDofs.size() + recvDofs.size()];
int requestCounter = 0;
int i = 0;
for (RankToDofContainer::iterator sendIt = sendDofs.begin();
sendIt != sendDofs.end(); ++sendIt, i++) {
int nSendDOFs = sendIt->second.size() * nComponents;
sendBuffers[i] = new double[nSendDOFs];
int counter = 0;
for (int j = 0; j < nComponents; j++) {
DOFVector<double> *dofvec = vec->getDOFVector(j);
for (int k = 0; k < nSendDOFs; k++)
sendBuffers[i][counter++] = (*dofvec)[*((sendIt->second)[k])];
}
request[requestCounter++] =
mpiComm.Isend(sendBuffers[i], nSendDOFs, MPI_DOUBLE, sendIt->first, 0);
}
i = 0;
for (RankToDofContainer::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt, i++) {
int nRecvDOFs = recvIt->second.size() * nComponents;
recvBuffers[i] = new double[nRecvDOFs];
request[requestCounter++] =
mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
}
MPI::Request::Waitall(requestCounter, request);
i = 0;
for (RankToDofContainer::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt, i++) {
int nRecvDOFs = recvIt->second.size();
int counter = 0;
for (int j = 0; j < nComponents; j++) {
DOFVector<double> *dofvec = vec->getDOFVector(j);
for (int k = 0; k < nRecvDOFs; k++)
(*dofvec)[*(recvIt->second)[k]] = recvBuffers[i][counter++];
}
delete [] recvBuffers[i];
}
for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
delete [] sendBuffers[i];
}
double ParallelDomainBase::setElemWeights(AdaptInfo *adaptInfo)
{
double localWeightSum = 0.0;
......@@ -1390,4 +1522,24 @@ namespace AMDiS {
}
}
Flag ParallelDomainBase::oneIteration(AdaptInfo *adaptInfo, Flag toDo)
{
FUNCNAME("ParallelDomainBase::oneIteration()");
Flag flag = dynamic_cast<StandardProblemIteration*>(iterationIF)->
buildAndAdapt(adaptInfo, toDo);
if (toDo.isSet(SOLVE))
solve();
if (toDo.isSet(SOLVE_RHS))
ERROR_EXIT("Not yet implemented!\n");
if (toDo.isSet(ESTIMATE))
iterationIF->getProblem()->estimate(adaptInfo);
return flag;
}
}
......@@ -120,17 +120,15 @@ namespace AMDiS {
iterationIF->beginIteration(adaptInfo);
}
virtual Flag oneIteration(AdaptInfo *adaptInfo, Flag toDo = FULL_ITERATION)
{
ERROR_EXIT("Not implemented!\n");
return 0;
}
virtual Flag oneIteration(AdaptInfo *adaptInfo, Flag toDo = FULL_ITERATION);
virtual void endIteration(AdaptInfo *adaptInfo)
{
iterationIF->endIteration(adaptInfo);
}
virtual void solve() {}
virtual int getNumProblems()
{
return 0;
......@@ -149,8 +147,12 @@ namespace AMDiS {
void fillPetscMatrix(DOFMatrix *mat, DOFVector<double> *vec);
void fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec);
void solvePetscMatrix(DOFVector<double> *vec);
void solvePetscMatrix(SystemVector *vec);
virtual ProblemStatBase *getProblem(int number = 0)
{
return NULL;
......@@ -224,6 +226,11 @@ namespace AMDiS {
DofContainer& rankAllDofs,
DofToRank& boundaryDofs);
void setDofMatrix(DOFMatrix* mat, int dispMult = 1,
int dispAddRow = 0, int dispAddCol = 0);
void setDofVector(DOFVector<double>* vec, int disMult = 1, int dispAdd = 0);
void DbgCreateElementMap(ElementIdxToDofs &elMap);
void DbgTestElementMap(ElementIdxToDofs &elMap);
......@@ -377,6 +384,8 @@ namespace AMDiS {
DofToBool isRankDof;
int rstart;
int nComponents;
};
}
......
......@@ -27,7 +27,7 @@ namespace AMDiS {
TEST_EXIT(m)("No DOF Matrix!\n");
m->setIsRankDOF(isRankDof);
m->setRankDofs(isRankDof);
}
void ParallelDomainScal::solve()
......@@ -52,23 +52,4 @@ namespace AMDiS {
#endif
}
Flag ParallelDomainScal::oneIteration(AdaptInfo *adaptInfo, Flag toDo)
{
FUNCNAME("ParallelDomainScal::oneIteration()");
Flag flag = dynamic_cast<StandardProblemIteration*>(iterationIF)->
buildAndAdapt(adaptInfo, toDo);
if (toDo.isSet(SOLVE))
solve();
if (toDo.isSet(SOLVE_RHS))
ERROR_EXIT("Not yet implemented!\n");
if (toDo.isSet(ESTIMATE))
iterationIF->getProblem()->estimate(adaptInfo);
return flag;
}
}
......@@ -35,8 +35,6 @@ namespace AMDiS {
void initParallelization(AdaptInfo *adaptInfo);
virtual Flag oneIteration(AdaptInfo *adaptInfo, Flag toDo = FULL_ITERATION);
protected:
/// Starts the solution of the linear system using Petsc.
void solve();
......
#include "ParallelDomainVec.h"
#include "ProblemVec.h"
#include "ProblemInstat.h"
namespace AMDiS {
ParallelDomainVec::ParallelDomainVec(const std::string& name,
ProblemVec *problem,
ProblemInstatVec *problemInstat)
: ParallelDomainBase(name,
problem,
problemInstat,
problem->getFESpace(0),
problem->getRefinementManager(0)),
probVec(problem)
{
FUNCNAME("ParallelDomainVec::ParallelDomainVec()");
info = probVec->getInfo();
nComponents = probVec->getNumComponents();
const FiniteElemSpace *fe = probVec->getFESpace(0);
for (int i = 0; i < nComponents; i++)
TEST_EXIT(fe == probVec->getFESpace(i))
("Parallelization does not supported different FE spaces!\n");
}
void ParallelDomainVec::initParallelization(AdaptInfo *adaptInfo)
{
FUNCNAME("ParallelDomainVec::initParallelization()");
ParallelDomainBase::initParallelization(adaptInfo);
for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++)
if (probVec->getSystemMatrix(i, j))
probVec->getSystemMatrix(i, j)->setRankDofs(isRankDof);
}
void ParallelDomainVec::solve()
{
FUNCNAME("ParallelDomainVec::solve()");
#if 0
#ifdef _OPENMP
double wtime = omp_get_wtime();
#endif
clock_t first = clock();
fillPetscMatrix(probVec->getSystemMatrix(), probVec->getRHS());
solvePetscMatrix(probVec->getSolution());
#ifdef _OPENMP
INFO(info, 8)("solution of discrete system needed %.5f seconds system time / %.5f seconds wallclock time\n",
TIME_USED(first, clock()),
omp_get_wtime() - wtime);
#else
INFO(info, 8)("solution of discrete system needed %.5f seconds\n",
TIME_USED(first, clock()));
#endif
#endif
}
}
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// ============================================================================
// == ==
// == crystal growth group ==
// == ==
// == Stiftung caesar ==
// == Ludwig-Erhard-Allee 2 ==
// == 53175 Bonn ==
// == germany ==
// == ==
// ============================================================================
// == ==
// == http://www.caesar.de/cg/AMDiS ==
// == ==
// ============================================================================
/** \file ParallelDomainVec.h */
#ifndef AMDIS_PARALLELDOMAINVEC_H
#define AMDIS_PARALLELDOMAINVEC_H
#include "ParallelDomainBase.h"
namespace AMDiS {
class ParallelDomainVec : public ParallelDomainBase
{
public:
ParallelDomainVec(const std::string& name,
ProblemVec *problem,
ProblemInstatVec *problemInstat);
void initParallelization(AdaptInfo *adaptInfo);
protected:
/// Starts the solution of the linear system using Petsc.
void solve();
protected:
/// Pointer to the stationary problem.
ProblemVec *probVec;
};
}
#endif // AMDIS_PARALLELDOMAINVEC_H
......@@ -71,12 +71,14 @@ namespace AMDiS {
if (oldSolution) {
WARNING("oldSolution already created\n");
} else {
if (initFlag.isSet(INIT_UH_OLD)) {
if (initFlag.isSet(INIT_UH_OLD))
createUhOld();
}
if (adoptProblem && adoptFlag.isSet(INIT_UH_OLD)) {
ProblemInstatScal* _adoptProblem = dynamic_cast<ProblemInstatScal*>(adoptProblem);
TEST_EXIT(_adoptProblem)("can't adopt oldSolution from problem which is not instationary and scalar");
ProblemInstatScal* _adoptProblem
= dynamic_cast<ProblemInstatScal*>(adoptProblem);
TEST_EXIT(_adoptProblem)
("can't adopt oldSolution from problem which is not instationary and scalar");
TEST_EXIT(!oldSolution)("oldSolution already created");
oldSolution = _adoptProblem->getOldSolution();
}
......@@ -143,7 +145,8 @@ namespace AMDiS {
if (adoptProblem && adoptFlag.isSet(INIT_UH_OLD)) {
ProblemInstatVec* _adoptProblem = dynamic_cast<ProblemInstatVec*>(adoptProblem);