Commit 379e8d5c authored by Praetorius, Simon's avatar Praetorius, Simon
Browse files

Merge branch 'master' into 'issue/cmake_configuration'

# Conflicts:
#   AMDiS/cmake3/CMakeLists.txt
parents 39c59db5 d272ab51
......@@ -334,7 +334,7 @@ namespace AMDiS { namespace Parallel {
onnz[i] = std::min(onnz[i], nOverallCols - nRankCols);
}
#if (DEBUG != 0)
#ifndef NDEBUG
int nMax = 0;
int nSum = 0;
for (int i = 0; i < nRankRows; i++) {
......
......@@ -29,8 +29,8 @@
#include "parallel/MeshDistributor.h"
#include "parallel/MeshManipulation.h"
#if (DEBUG != 0)
#include "parallel/ParallelDebug.h"
#ifndef NDEBUG
#include "parallel/ParallelDebug.h"
#endif
#include "parallel/StdMpi.h"
#include "parallel/MeshPartitioner.h"
......@@ -63,8 +63,8 @@
#include "ProblemStat.h"
#include "ProblemInstat.h"
#include "RefinementManager3d.h"
#if (DEBUG != 0)
#include "Debug.h"
#ifndef NDEBUG
#include "Debug.h"
#endif
#include "Timer.h"
#include "io/MacroReader.h"
......@@ -264,7 +264,7 @@ namespace AMDiS { namespace Parallel {
// already refined in some way.
testForMacroMesh();
#if (DEBUG != 0)
#ifndef NDEBUG
// Check whether meshes come from the same macro mesh. The way is to compare
// the node coords of each macro element in the meshes.
debug::ElementIdxToCoords macroCoords;
......@@ -297,7 +297,7 @@ namespace AMDiS { namespace Parallel {
elObjDb.setData(partitionMap, levelData);
#if (DEBUG != 0)
#ifndef NDEBUG
TEST_EXIT_DBG(dofMaps.size())("No DOF mapping defined!\n");
ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1],
*(dofMaps[0]),
......@@ -313,7 +313,7 @@ namespace AMDiS { namespace Parallel {
createInitialPartitioning();
#if (DEBUG != 0)
#ifndef NDEBUG
std::vector<debug::ElementIdxToDofs> elMap(meshes.size());
for (size_t i = 0; i < meshes.size(); i++) {
debug::createSortedDofs(meshes[i], elMap[i]);
......@@ -407,7 +407,7 @@ namespace AMDiS { namespace Parallel {
// === If in debug mode, make some tests. ===
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("AMDiS runs in debug mode, so make some test ...\n");
ParallelDebug::testAllElements(*this);
......@@ -424,7 +424,7 @@ namespace AMDiS { namespace Parallel {
// Remove periodic boundary conditions in sequential problem definition.
removePeriodicBoundaryConditions();
#if (DEBUG != 0)
#ifndef NDEBUG
ParallelDebug::testPeriodicBoundary(*this);
#endif
......@@ -471,7 +471,7 @@ namespace AMDiS { namespace Parallel {
}
updateDofRelatedStruct(meshes[i]);
#if (DEBUG != 0)
#ifndef NDEBUG
ParallelDebug::testPeriodicBoundary(*this);
#endif
}
......@@ -529,8 +529,8 @@ namespace AMDiS { namespace Parallel {
if (filename != "") {
MSG("Read macro weights from %s\n", filename.c_str());
ifstream infile;
infile.open(filename.c_str(), ifstream::in);
std::ifstream infile;
infile.open(filename.c_str(), std::ifstream::in);
while (!infile.eof()) {
int elNum, elWeight;
infile >> elNum;
......@@ -656,7 +656,7 @@ namespace AMDiS { namespace Parallel {
filename);
filename += ".p" + lexical_cast<string>(mpiRank);
MSG("Start deserialization with %s\n", filename.c_str());
ifstream in(filename.c_str());
std::ifstream in(filename.c_str());
TEST_EXIT(!in.fail())("Could not open deserialization file: %s\n",
filename.c_str());
......@@ -946,7 +946,7 @@ namespace AMDiS { namespace Parallel {
pair<Element*, int> edge1 =
make_pair(elObjDb.getElementPtr(*elIt, meshes[i]), edgeNoInEl[*elIt]);
#if (DEBUG != 0)
#ifndef NDEBUG
DofEdge dofEdge0 = edge0.first->getEdge(edge0.second);
DofEdge dofEdge1 = edge1.first->getEdge(edge1.second);
......@@ -1226,7 +1226,7 @@ namespace AMDiS { namespace Parallel {
vector<int> meshAllValues(meshes.size(), 0);
for (size_t i = 0; i < meshes.size(); i++) {
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("mesh[%d] change index = %d, stored last index = %d.\n",
i, meshes[i]->getChangeIndex(), lastMeshChangeIndexs[meshes[i]]);
#endif
......@@ -1280,7 +1280,7 @@ namespace AMDiS { namespace Parallel {
bool meshChanged = false;
// === Check the boundaries and adapt mesh if necessary. ===
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("Run checkAndAdaptBoundary for mesh[%d]...\n", i);
#endif
......@@ -1325,7 +1325,7 @@ namespace AMDiS { namespace Parallel {
updateLocalGlobalNumbering();
#if (DEBUG != 0)
#ifndef NDEBUG
debug::writeMesh(feSpaces[0], -1, debugOutputDir + "mesh");
ParallelDebug::testPeriodicBoundary(*this);
#endif
......@@ -1337,7 +1337,7 @@ namespace AMDiS { namespace Parallel {
if (repartitioningFailed > 0) {
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("Repartitioning not tried because it has failed in the past!\n");
#endif
......@@ -1463,7 +1463,7 @@ namespace AMDiS { namespace Parallel {
MeshStructure elCode;
elCode.init(boundIt->rankObj, elObjDb.getElementPtr(boundIt->rankObj.elIndex, mesh));
#if (DEBUG != 0)
#ifndef NDEBUG
ParallelDebug::followBoundary(mesh, *boundIt, elCode);
#endif
......@@ -1581,7 +1581,7 @@ namespace AMDiS { namespace Parallel {
} else {
mpiComm.Bcast(&repartitioning, 1, MPI_INT, 0);
}
#if (DEBUG != 0)
#ifndef NDEBUG
if (repartitioning == 0) {
MSG("imbalanceFactor = %f < %f = imbalanceRepartitionBound\n", imbalanceFactor, imbalanceRepartitionBound);
}
......@@ -1633,7 +1633,7 @@ namespace AMDiS { namespace Parallel {
Timer t;
#if (DEBUG != 0)
#ifndef NDEBUG
for (size_t i = 0; i < meshes.size(); i++)
ParallelDebug::testDoubleDofs(meshes[i]);
int writePartMesh = 1;
......@@ -1709,7 +1709,7 @@ namespace AMDiS { namespace Parallel {
updateLocalGlobalNumbering();
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("AMDiS runs in debug mode, so make some test ...\n");
ParallelDebug::writePartitioningFile(debugOutputDir + "partitioning",
......@@ -1734,7 +1734,7 @@ namespace AMDiS { namespace Parallel {
{
FUNCNAME("MeshDistributor::quickRepartition()");
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("... Run quickRepartition ...\n");
#endif
......@@ -1954,7 +1954,7 @@ namespace AMDiS { namespace Parallel {
{
FUNCNAME("MeshDistributor::fullRepartition()");
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("... Run fullRepartition ...\n");
#endif
......@@ -1966,7 +1966,7 @@ namespace AMDiS { namespace Parallel {
MPI::Intracomm &mpiComm = MPI::COMM_WORLD;
#if (DEBUG != 0)
#ifndef NDEBUG
int nOldLeaves = mesh->getNumberOfLeaves();
mpi::globalAdd(mpiComm, nOldLeaves);
#endif
......@@ -2190,7 +2190,7 @@ namespace AMDiS { namespace Parallel {
interchangeVectors[i],
domainMacroValues[macroId][j++]);
#if (DEBUG != 0)
#ifndef NDEBUG
MeshStructure code;
code.init(mesh, macroId);
TEST_EXIT(code.getCode() == domainMacroCodes[macroId].getCode())
......@@ -2224,7 +2224,7 @@ namespace AMDiS { namespace Parallel {
}
updateDofRelatedStruct(mesh);
#if (DEBUG != 0)
#ifndef NDEBUG
int nNewLeaves = mesh->getNumberOfLeaves();
mpi::globalAdd(mpiComm, nNewLeaves);
......@@ -2242,7 +2242,7 @@ namespace AMDiS { namespace Parallel {
elObjDb.create(partitionMap, levelData);
elObjDb.updateRankData();
#if (DEBUG != 0)
#ifndef NDEBUG
if (mpiRank == 0)
ParallelDebug::writePeriodicElObjInfo(*this, debugOutputDir);
#endif
......@@ -2260,7 +2260,7 @@ namespace AMDiS { namespace Parallel {
intBoundary.create(levelData, elObjDb);
#if (DEBUG != 0)
#ifndef NDEBUG
for (int level = 0; level < levelData.getNumberOfLevels(); level++)
ParallelDebug::printBoundaryInfo(intBoundary[level]);
#endif
......@@ -2383,7 +2383,7 @@ namespace AMDiS { namespace Parallel {
mesh->dofCompress();
#if (DEBUG != 0)
#ifndef NDEBUG
debug::ElementIdxToDofs elMap;
debug::createSortedDofs(mesh, elMap);
#endif
......@@ -2400,7 +2400,7 @@ namespace AMDiS { namespace Parallel {
lastMeshChangeIndexs[mesh] = mesh->getChangeIndex();
#if (DEBUG != 0)
#ifndef NDEBUG
static int fileNumber(0); //improvised counter for adapt Iteration
stringstream ss;
ss << debugOutputDir << "elementMaps." << fileNumber ;
......@@ -2434,14 +2434,14 @@ namespace AMDiS { namespace Parallel {
FUNCNAME("MeshDistributor::updateLocalGlobalNumbering()");
Timer t;
#if (DEBUG != 0)
#ifndef NDEBUG
bool printInfo = true;
Parameters::get("parallel->print dofmap info", printInfo);
#endif
for (size_t i = 0; i < dofMaps.size(); i++) {
dofMaps[i]->update();
#if (DEBUG != 0)
#ifndef NDEBUG
if (printInfo)
dofMaps[i]->printInfo();
#endif
......@@ -2456,7 +2456,7 @@ namespace AMDiS { namespace Parallel {
dofMaps[i]->updateMatIndex();
}
#if (DEBUG != 0)
#ifndef NDEBUG
if (printInfo) {
int test = 0;
Parameters::get("parallel->remove periodic boundary", test);
......
......@@ -98,7 +98,7 @@ namespace AMDiS { namespace Parallel {
elInfo = stack.traverseNext(elInfo);
}
#if (DEBUG != 0)
#ifndef NDEBUG
DOFVector<WorldVector<double> > coords(feSpace, "dofCorrds");
feSpace->getMesh()->getDofIndexCoords(coords);
#endif
......@@ -173,7 +173,7 @@ namespace AMDiS { namespace Parallel {
el1->getAllDofs(feSpace, b1, dofs1, true, &dofGeoIndex1);
#if (DEBUG != 0)
#ifndef NDEBUG
if (feSpaces.size())
debug::testDofsByCoords(coords, dofs0, dofs1);
else
......@@ -222,7 +222,7 @@ namespace AMDiS { namespace Parallel {
el0->getAllDofs(feSpace, b0, dofs0, true, &dofGeoIndex0);
el1->getAllDofs(feSpace, b1, dofs1, true, &dofGeoIndex1);
#if (DEBUG != 0)
#ifndef NDEBUG
if (feSpaces.size())
debug::testDofsByCoords(coords, dofs0, dofs1);
else
......@@ -327,7 +327,7 @@ namespace AMDiS { namespace Parallel {
// Create traverse stack and traverse within the mesh until the element,
// which should be fitted to the mesh structure code, is reached.
TraverseStack stack;
#if (DEBUG != 0)
#ifndef NDEBUG
ElInfo *elInfo =
stack.traverseFirstOneMacro(mesh, boundEl.elIndex, -1, traverseFlag);
......
......@@ -30,6 +30,15 @@
#include <parmetis.h>
#include <mpi.h>
#ifndef REALTYPEWIDTH
#define real_t float
#endif
#ifndef PARMETIS_PSR_COUPLED
#define PARMETIS_PSR_COUPLED 1
#define PARMETIS_PSR_UNCOUPLED 2
#endif
#include "AMDiS_fwd.h"
#include "Global.h"
#include "parallel/MeshPartitioner.h"
......@@ -182,7 +191,7 @@ namespace AMDiS { namespace Parallel {
void createPartitionMap(std::map<int, int>& partitionMap);
void setItr(double value)
void setItr(real_t value)
{
itr = value;
}
......@@ -195,7 +204,7 @@ namespace AMDiS { namespace Parallel {
protected:
ParMetisMesh *parMetisMesh;
double itr;
real_t itr;
};
} }
......
......@@ -177,7 +177,7 @@ namespace AMDiS
int firstRow = firstRow0 + firstRow1;
int mpiSize = MPI::COMM_WORLD.Get_size();
#if (DEBUG != 0)
#ifndef NDEBUG
int mpiRank = MPI::COMM_WORLD.Get_rank();
#endif
vector<int> allFirstRow0(mpiSize + 1, 0);
......@@ -303,7 +303,7 @@ namespace AMDiS
PCFactorSetMatSolverPackage(pc, matSolverPackage);
PCSetFromOptions(pc);
#if DEBUG != 0
#ifndef NDEBUG
MSG("PetscOptionsView:\n");
PetscViewer viewer;
PetscViewerCreate(PETSC_COMM_WORLD, &viewer);
......
......@@ -147,7 +147,7 @@ namespace AMDiS { namespace Parallel {
{
FUNCNAME("PetscSolverFeti::initialize()");
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("Init FETI-DP on mesh level %d\n", meshLevel);
#endif
......@@ -751,7 +751,7 @@ namespace AMDiS { namespace Parallel {
MatAssemblyEnd(mat_lagrange, MAT_FINAL_ASSEMBLY);
#if (DEBUG != 0)
#ifndef NDEBUG
{
int nZeroRows = PetscSolverFetiDebug::testZeroRows(mat_lagrange);
int m,n;
......@@ -1833,7 +1833,7 @@ namespace AMDiS { namespace Parallel {
Vec nullSpaceBasis;
VecCreateNest(domainComm, 2, PETSC_NULL, vecArray, &nullSpaceBasis);
#if (DEBUG != 0)
#ifndef NDEBUG
PetscSolverFetiDebug::writeNullSpace(*this, nullSpaceBasis);
#endif
......@@ -1870,7 +1870,7 @@ namespace AMDiS { namespace Parallel {
return;
}
#if (DEBUG != 0)
#ifndef NDEBUG
PetscInt nRow, nCol;
MatGetLocalSize(subdomain->getMatInterior(), &nRow, &nCol);
mpi::globalAdd(nRow);
......
......@@ -44,7 +44,7 @@ namespace AMDiS { namespace Parallel {
int nRankRows = (*interiorMap)[feSpace].nRankDofs;
int nOverallRows = (*interiorMap)[feSpace].nOverallDofs;
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("Fill petsc matrix 1 needed %.5f seconds\n", MPI::Wtime() - wtime);
#endif
......@@ -97,7 +97,7 @@ namespace AMDiS { namespace Parallel {
MatCreateNest(domainComm, nBlocks, PETSC_NULL, nBlocks, PETSC_NULL,
&(nestMat[0]), &getMatInterior());
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("Fill petsc matrix 2 needed %.5f seconds\n", MPI::Wtime() - wtime);
#endif
......
......@@ -87,7 +87,7 @@ namespace AMDiS { namespace Parallel {
}
Parameters::get("parallel->print matrix info", printMatInfo);
#if DEBUG != 0
#ifndef NDEBUG
bool printOptionsInfo = false;
Parameters::get("parallel->debug->print options info", printOptionsInfo);
if (printOptionsInfo) {
......@@ -110,7 +110,7 @@ namespace AMDiS { namespace Parallel {
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
#if (DEBUG != 0)
#ifndef NDEBUG
Timer t;
#endif
......@@ -123,7 +123,7 @@ namespace AMDiS { namespace Parallel {
// === Create PETSc vector (solution and a temporary vector). ===
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("Fill petsc matrix 1 needed %.5f seconds\n", t.elapsed());
t.reset();
#endif
......@@ -136,7 +136,7 @@ namespace AMDiS { namespace Parallel {
if ((*seqMat)[i][j])
setDofMatrix((*seqMat)[i][j], i, j);
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("Fill petsc matrix 2 needed %.5f seconds\n", t.elapsed());
t.reset();
#endif
......@@ -162,7 +162,7 @@ namespace AMDiS { namespace Parallel {
initPreconditioner(*seqMat, mat[0][0]);
#if (DEBUG != 0)
#ifndef NDEBUG
MSG("Fill petsc matrix 3 needed %.5f seconds\n", t.elapsed());
#endif
......
......@@ -532,7 +532,7 @@ namespace AMDiS { namespace Parallel {
MPI::Request::Waitall(requestCounter, request);
#if (DEBUG != 0)
#ifndef NDEBUG
bool testall = MPI::Request::Testall(requestCounter, request);
TEST_EXIT(testall)("Should not happen!\n");
#endif
......
......@@ -140,7 +140,7 @@ namespace AMDiS { namespace Parallel {
zoltan.Set_Param("OBJ_WEIGHT_DIM", "1");
#if (DEBUG != 0)
#ifndef NDEBUG
zoltan.Set_Param("DEBUG_LEVEL", "1");
#else
zoltan.Set_Param("DEBUG_LEVEL", "0");
......
......@@ -127,7 +127,7 @@ namespace AMDiS {
MSG("Residual norm: ||b-Ax|| = %e\n", residual);
}
#if DEBUG != 0
#ifndef NDEBUG
if (getIterations() > 0) {
MSG("Nr. of iterations needed = %d\n", getIterations());
}
......
AMDiS (Adaptive MultiDimensional Simulations) is a C++ library to solve a broad class of partial differential equations (PDE) using adaptive finite elements.
AMDiS (Adaptive MultiDimensional Simulations) is a C++ library to solve a broad class of partial differential equations (PDEs) using adaptive finite elements. Here you will find some information and tutorials about AMDiS usage/installation/extension...
## Building
The AMDiS library uses [CMake](https://cmake.org) to build. To configure with CMake we suggest to create a build directory, and run:
```
mkdir build && cd build
cmake ../AMDiS
```
## Installing
To install the library you may provide an installation directory, by calling CMake with the additional argument:
```
cmake -DCMAKE_INSTALL_PREFIX:PATH=[install_dir] .
```
and just run the `install` target:
Install AMDiS by using cmake:
```
cmake AMDiS
make && make install
cmake --build . --target install
```
## Demos
Some demo programs using the AMDiS library are implemented in the subdirectory `demo`. To configure and build run CMake again, with the AMDiS installation directory specified:
```
mkdir demo/build && cd demo/build
cmake -DAMDIS_DIR:PATH=[install_dir]/share/amdis ..
cmake --build . --target all
```
## Tests
The tests are implemented in the subdirectory `test` and can be built and run by using the CMake:
```
mkdir test/build && cd test/build
cmake -DAMDIS_DIR:PATH=[install_dir]/share/amdis ..
cmake --build . --target all
```
## Documentation
The documentation is built using [Doxygen](http://www.doxygen.org). Simply run the command in the `doc` subdirectory:
```
cd doc
doxygen
```
Then html documentation is then generated in the `doc/html` folder.
## System requirements
AMDiS can be build with various c++ compilers, e.g.
- gcc, version >= 4.6
- clang, version >= 3.1
- intel, version >= 2013
- MSVC, version >= 11.0
A detailed overview about compiler compatibility is listed in the wiki-page [Compatibility](https://gitlab.math.tu-dresden.de/iwr/amdis/wikis/compatibility).
We assume the following libraries to be found in standard location:
- [boost](http://boost.org) (modules: system, iostreams, filesystem, program_options, and date_time), version >= 1.48
- [CMake](https://cmake.org), version >= 3.1
For the parallel AMDiS we require additionally
- [ParMETIS](http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview), version >= 4.0
- [PETSc](https://www.mcs.anl.gov/petsc), version >= 3.3
When PETSc is configured with ParMETIS, the version from PETSc can be used directly.
## CMake options
When configuring AMDiS several options can be modified. For an interactive gui, use either `ccmake`, or `cmake-gui`.
| Option | Type | Description |
| -------------- | ---- | ----------- |
| ENABLE_UMFPACK | BOOL | Compile with support for suitesparse UMFPack library |
| ENABLE_COMPRESSION | BOOL | Allow compressed output of .vtk and .arh files. Need zlib and libbz2 to be installed on the system. |
| ENABLE_PARALLEL_DOMAIN | BOOL | Switch to parallel AMDiS. Needs PETSc and ParMETIS to be installed. |
| PETSC_DIR | PATH | Path to the PETSc root directory, containing the lib/ and include/ subdirectories. |
| PARMETIS_DIR | PATH | Path to the ParMETIS root directory, containing the lib/ and include/ subdirectories. |
| ENABLE_EXTENSIONS | BOOL | Add additional features, not yet part of the standard AMDiS. Experimental! Should be used with care. |
#!/bin/bash
AMDIS_VERSION="1.1"
BUILD_DIR=$( pwd )
SRC_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# default parameters
INSTALL_PREFIX="/usr/local"
DEBUG="0"
VERBOSE="OFF"
ENABLE_EXTENSIONS="OFF"
ENABLE_UMFPACK="OFF"
ENABLE_COMPRESSION="OFF"
ENABLE_PARALLEL="OFF"
BOOST_PREFIX="${BOOST_ROOT}"
DOWNLOAD_BOOST="0"
BOOST_VERSION="1.62.0"
PETSC_PREFIX="${PETSC_DIR}"
DOWNLOAD_PETSC="0"
PETSC_VERSION="3.5.4"
PARMETIS_PREFIX=""
ZOLTAN_PREFIX=""
DOWNLOAD_ZOLTAN="0"
ZOLTAN_VERSION="3.83"
# a help message
help() {
cat << EOF
$(basename "$0") configures AMDiS, by calling the corresponding cmake commands.
Usage: $(basename "$0") [OPTION]... [VAR=VALUE]...
Defaults for the options are specified in brackets.
Configuration:
-h, --help Display this help and exit.
-V, --version Display version information of AMDiS and exit.
Installation directories:
--prefix=PREFIX Install files in PREFIX [${INSTALL_PREFIX}]
By default, 'make install' will install all the files in
'${INSTALL_PREFIX}/lib', '${INSTALL_PREFIX}/include' and '${INSTALL_PREFIX}/share'.