Liebe Gitlab-Nutzer, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit 5ba917be authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Integration of the BDDCML library.

parent 425a0885
......@@ -61,6 +61,7 @@ endif(Boost_FOUND)
set(AMDIS_NEED_ZOLTAN @ENABLE_ZOLTAN@)
set(AMDIS_HAS_PARALLEL_DOMAIN @ENABLE_PARALLEL_DOMAIN@)
set(AMDIS_NEED_UMFPACK @ENABLE_UMFPACK@)
set(AMDIS_NEED_BDDCML @ENABLE_BDDCML@)
set(AMDIS_NEED_MKL @ENABLE_MKL@)
set(AMDIS_USE_FILE ${AMDIS_DIR}/AMDISUse.cmake)
set(AMDIS_COMPILEFLAGS "@COMPILEFLAGS@")
......@@ -101,6 +102,14 @@ if(AMDIS_NEED_UMFPACK)
endif()
endif(AMDIS_NEED_UMFPACK)
if(AMDIS_NEED_BDDCML)
set(AMDIS_BDDCML_PATH @BDDCML_PATH@)
list(APPEND AMDIS_INCLUDE_DIRS ${AMDIS_BDDCML_PATH})
set(AMDIS_BDDCML_LIB @BDDCML_LIB@)
list(APPEND AMDIS_LIBRARIES ${AMDIS_BDDCML_LIB})
endif(AMDIS_NEED_BDDCML)
#add directories for reinit
list(APPEND AMDIS_INCLUDE_DIRS ${AMDIS_INCLUDE_DIR}/reinit)
......
......@@ -36,6 +36,7 @@ SET(ENABLE_PARALLEL_DOMAIN "OFF" CACHE STRING "use parallel domain decomposition
option(USE_PETSC_DEV false)
option(ENABLE_ZOLTAN false)
option(ENABLE_UMFPACK "use umfpack solver" false)
option(ENABLE_BDDCML "Use of BDDCML library" false)
if(ENABLE_INTEL)
......@@ -239,7 +240,9 @@ if(ENABLE_PARALLEL_DOMAIN)
find_package(PETSc REQUIRED)
include_directories(${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include)
list(APPEND AMDIS_INCLUDE_DIRS ${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include)
list(APPEND PARALLEL_DOMAIN_AMDIS_SRC ${SOURCE_DIR}/parallel/PetscMultigridPrecon.cc
list(APPEND PARALLEL_DOMAIN_AMDIS_SRC
${SOURCE_DIR}/parallel/BddcMlSolver.cc
${SOURCE_DIR}/parallel/PetscMultigridPrecon.cc
${SOURCE_DIR}/parallel/PetscSolver.cc
${SOURCE_DIR}/parallel/PetscProblemStat.cc
${SOURCE_DIR}/parallel/PetscSolverFeti.cc
......@@ -279,6 +282,34 @@ if(ENABLE_UMFPACK)
SET(RPM_DEPEND_STR "blas")
endif(ENABLE_UMFPACK)
if(ENABLE_BDDCML)
find_file(BDDCML_H bddcml_interface_c.h
HINTS ENV CPATH
DOC "Header bddcml_interface_c.h for the BDDCML library.")
if(BDDCML_H)
get_filename_component(BDDCML_PATH ${BDDCML_H} PATH)
include_directories(${BDDCML_PATH})
list(APPEND COMPILEFLAGS "-DHAVE_BDDC_ML=1")
list(APPEND COMPILEFLAGS "-DAdd_")
else()
message(FATAL_ERROR "Could not find BDDCML headers.")
endif()
find_library(BDDCML_LIB bddcml
HINTS ENV LIBRARY_PATH
DOC "BDDCML library")
if(BDDCML_LIB)
list(APPEND AMDIS_LIBS ${BDDCML_LIB})
else()
message(FATAL_ERROR "Could not find the BDDCML library")
endif()
endif(ENABLE_BDDCML)
SET(COMPOSITE_SOURCE_DIR ${SOURCE_DIR}/compositeFEM)
SET(COMPOSITE_FEM_SRC ${COMPOSITE_SOURCE_DIR}/CFE_Integration.cc
${COMPOSITE_SOURCE_DIR}/CFE_NormAndErrorFcts.cc
......
......@@ -58,7 +58,7 @@ namespace AMDiS {
tsModulo);
TEST_EXIT(name != "")("No filename!\n");
Parameters::get(problem->getName() + "->output->append index", appendIndex);
Parameters::get(problem->getName() + "->output->append serialization index", appendIndex);
Parameters::get(problem->getName() + "->output->index length", indexLength);
Parameters::get(problem->getName() + "->output->index decimals", indexDecimals);
}
......@@ -77,7 +77,7 @@ namespace AMDiS {
TEST_EXIT(name != "")("No filename!\n");
Parameters::get(problem->getName() + "->output->append index", appendIndex);
Parameters::get(problem->getName() + "->output->append serialization index", appendIndex);
Parameters::get(problem->getName() + "->output->index length", indexLength);
Parameters::get(problem->getName() + "->output->index decimals", indexDecimals);
}
......@@ -118,8 +118,6 @@ namespace AMDiS {
filename += string(timeStr);
}
filename += ".ser";
#if HAVE_PARALLEL_DOMAIN_AMDIS
filename += ".p" + boost::lexical_cast<std::string>(MPI::COMM_WORLD.Get_rank());
#endif
......
......@@ -14,20 +14,25 @@ extern "C" {
}
#include "parallel/BddcMlSolver.h"
#include "parallel/MpiHelper.h"
namespace AMDiS {
#ifdef HAVE_BDDC_ML
void BddcMlSolver::fillPetscMatrix(Matrix<DOFMatrix*> *mat)
void BddcMlSolver::fillPetscMatrix(Matrix<DOFMatrix*> *m)
{
FUNCNAME("BddcMlSolver::fillPetscMatrix()");
mat = m;
}
void BddcMlSolver::fillPetscRhs(SystemVector *vec)
{
FUNCNAME("BddcMlSolver::fillPetscRhs()");
rhsVec = vec;
}
......@@ -35,8 +40,11 @@ namespace AMDiS {
{
FUNCNAME("BddcMlSolver::solvePetscMatrix()");
TEST_EXIT(rhsVec)("Should not happen!\n");
TEST_EXIT(mat)("Should not happen!\n");
int nComponents = vec.getSize();
const FiniteElemSüace *feSpace = vec.getFeSpace(0);
const FiniteElemSpace *feSpace = vec.getFeSpace(0);
Mesh *mesh = feSpace->getMesh();
......@@ -51,7 +59,7 @@ namespace AMDiS {
}
map<int, int> mapElIndex;
int c = nLeafEls;
int nLeafEls = 0;
for (std::set<int>::iterator it = leafElIndex.begin();
it != leafElIndex.end(); ++it)
mapElIndex[*it] = nLeafEls++;
......@@ -62,7 +70,7 @@ namespace AMDiS {
int nSubdomains = meshDistributor->getMpiSize();
int length = 1;
int nSubPerProc = 1;
MPI_Fint c2f = MPI_Comm_c2f(MPI::COMM_WORLD);
MPI_Fint c2f = MPI_Comm_c2f(meshDistributor->getMpiComm());
int verboseLevel = 2;
int numbase = 0;
......@@ -92,7 +100,7 @@ namespace AMDiS {
int nelems = nLeafEls;
// local number of nodes
int nnods = feSpace->getUsedSize();
int nnods = feSpace->getAdmin()->getUsedSize();
// local number of dofs
int ndofs = nnods * nComponents;
......@@ -117,40 +125,163 @@ namespace AMDiS {
nnet[i] = 3;
// local array with number of DOFs per node.
int nndf[nnod];
for (int i = 0; i < nnod; i++)
int nndf[nnods];
for (int i = 0; i < nnods; i++)
nndf[i] = nComponents;
// array of indices of subdomain nodes in global numbering
int isngn[nnod];
for (int i = 0; i < nnod; i++)
nnod[i] = meshDistributor->mapLocalToGlobal(feSpace, i);
int isngn[nnods];
for (int i = 0; i < nnods; i++)
isngn[i] = meshDistributor->mapLocalToGlobal(feSpace, i);
// array of indices of subdomain variables in global numbering
int isvgvn[ndof];
for (int j = 0; j < nnods; j++)
for (int i = 0; i < nComponents; i++)
isvgvn[j * nComponents + i] =
meshDistributor->mapLocalToGlobal(feSpace, j) * nComponents + i;
// array of indices of subdomain elements in global numbering
int isegn[nelems];
int rStartEl, nOverallEl;
mpi::getDofNumbering(meshDistributor->getMpiComm(),
nelems, rStartEl, nOverallEl);
for (int i = 0; i < nelems; i++)
isegn[i] = rStartEl + i;
int lxyz1 = nnods;
int lxyz2 = 2;
// local array with coordinates of nodes
double xyz[lxyz1 * lxyz2];
{
DOFVector<WorldVector<double> > coordDofs(feSpace, "tmp");
mesh->getDofIndexCoords(feSpace, coordDofs);
for (int i = 0; i < lxyz2; i++)
for (int j = 0; j < nnods; j++)
xyz[i * nnods + j] = coordDofs[j][i];
}
// local array of indices denoting dirichlet boundary data
int ifix[ndofs];
for (int i = 0; i < ndofs; i++)
ifix[ndofs] = -1;
// local array of values for dirichlet boundary data
double fixv[ndofs];
// local rhs data
double rhs[ndofs];
for (int i = 0; i < nComponents; i++) {
DOFVector<double>& dofvec = *(rhsVec->getDOFVector(i));
for (int j = 0; j < ndofs; j++)
rhs[j * nComponents + i] = dofvec[j];
}
// Completenes of the rhs vector on subdomains
int is_rhs_complete = 1;
// Local array with initial solution guess
double sol[ndofs];
for (int i = 0; i < ndofs; i++)
sol[i] = 0.0;
// matrix type (set here to unsymmetric)
int matrixtype = 0;
/*
bddcml_upload_subdomain_data(
&nelem,
&nnod,
&ndof,
&ndim,
&meshdim,
&isub,
&nelems,
&nnods,
&ndofs,
inet,
&linet,
nnet,
&nelems,
nndf,
&nnods,
isngn,
&nnods,
);
*/
// Non zero structure of matrix
vector<int> i_sparse;
vector<int> j_sparse;
vector<double> a_sparse;
for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j])
addDofMatrix((*mat)[i][j],
i_sparse, j_sparse, a_sparse, nComponents, i, j);
// Number of non-zero entries in matrix
int la = i_sparse.size();
// Matrix is assembled
int is_assembled_int = 1;
bddcml_upload_subdomain_data(&nelem,
&nnod,
&ndof,
&ndim,
&meshdim,
&isub,
&nelems,
&nnods,
&ndofs,
inet,
&linet,
nnet,
&nelems,
nndf,
&nnods,
isngn,
&nnods,
isvgvn,
&ndof,
isegn,
&nelems,
xyz,
&lxyz1,
&lxyz2,
ifix,
&ndofs,
fixv,
&ndofs,
rhs,
&ndofs,
&is_rhs_complete,
sol,
&ndofs,
&matrixtype,
&(i_sparse[0]),
&(j_sparse[0]),
&(a_sparse[0]),
&la,
&is_assembled_int);
int use_defaults_int = 1;
int parallel_division_int = 1;
int use_arithmetic_int = 1;
int use_adaptive_int = 1;
bddcml_setup_preconditioner(&matrixtype,
&use_defaults_int,
&parallel_division_int,
&use_arithmetic_int,
&use_adaptive_int);
int method = 1;
double tol = 1.e-6;
int maxit = 1000;
int ndecrmax = 30;
int num_iter = 0;
int converged_reason = 0;
double condition_number = 0.0;
bddcml_solve(&c2f,
&method,
&tol,
&maxit,
&ndecrmax,
&num_iter,
&converged_reason,
&condition_number);
MSG("BDDCML converged reason: %d within %d iterations \n",
converged_reason, num_iter);
bddcml_finalize();
}
......@@ -161,6 +292,53 @@ namespace AMDiS {
FUNCNAME("BddcMlSolver::destroyMatrixData()");
}
void BddcMlSolver::addDofMatrix(DOFMatrix* dmat,
vector<int> i_sparse,
vector<int> j_sparse,
vector<double> a_sparse,
int nComponents,
int ithRowComponent,
int ithColComponent)
{
FUNCNAME("BddcMlSolver::addDofMatrix()");
TEST_EXIT(dmat)("Should not happen!\n");
const FiniteElemSpace *feSpace = dmat->getFeSpace();
using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
namespace traits = mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix;
traits::col<Matrix>::type col(dmat->getBaseMatrix());
traits::const_value<Matrix>::type value(dmat->getBaseMatrix());
typedef traits::range_generator<row, Matrix>::type cursor_type;
typedef traits::range_generator<nz, cursor_type>::type icursor_type;
for (cursor_type cursor = begin<row>(dmat->getBaseMatrix()),
cend = end<row>(dmat->getBaseMatrix()); cursor != cend; ++cursor) {
int rowIndex =
meshDistributor->mapLocalToGlobal(feSpace, *cursor) * nComponents +
ithRowComponent;
for (icursor_type icursor = begin<nz>(cursor), icend = end<nz>(cursor);
icursor != icend; ++icursor) {
int colIndex =
meshDistributor->mapLocalToGlobal(feSpace, col(*icursor)) * nComponents +
ithColComponent;
double val = value(*icursor);
i_sparse.push_back(rowIndex);
j_sparse.push_back(colIndex);
a_sparse.push_back(val);
}
}
}
#endif
}
......@@ -36,7 +36,9 @@ namespace AMDiS {
{
public:
BddcMlSolver()
: PetscSolver()
: PetscSolver(),
rhsVec(NULL),
mat(NULL)
{}
void fillPetscMatrix(Matrix<DOFMatrix*> *mat);
......@@ -46,6 +48,19 @@ namespace AMDiS {
void solvePetscMatrix(SystemVector &vec, AdaptInfo *adaptInfo);
void destroyMatrixData();
protected:
void addDofMatrix(DOFMatrix* mat,
vector<int> i_sparse,
vector<int> j_sparse,
vector<double> a_sparse,
int nComponents,
int ithRowComponent,
int ithColComponent);
protected:
SystemVector *rhsVec;
Matrix<DOFMatrix*> *mat;
};
#endif
......
......@@ -233,7 +233,8 @@ namespace AMDiS {
if (firstTimestep)
firstTimestep = false;
if (fixFirstTimesteps > 0)
fixFirstTimesteps--;
} while (rejected ||
(dbgTimestepStudy && (studyTimestep + 1 < dbgTimesteps.size())));
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment