Liebe Gitlab-Nutzer, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit da59d993 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Work in general Dirichlet BCs for parallel computations.

parent 2969c018
......@@ -238,18 +238,18 @@ namespace AMDiS {
if (condition->applyBoundaryCondition()) {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
if (dofMap->isRankDof(rowIndices[i]))
applyDBCs.insert(static_cast<int>(row));
// if (dofMap->isRankDof(rowIndices[i])) {
applyDBCs.insert(row);
// dirichletDofs.push_back(row);
// }
#else
applyDBCs.insert(static_cast<int>(row));
applyDBCs.insert(row);
#endif
}
} else {
for (int j = 0; j < nCol; j++) {
DegreeOfFreedom col = colIndices[j];
if (fabs(elMat[i][j]) > 1e-10) {
ins[row][col] += elMat[i][j];
}
ins[row][col] += elMat[i][j];
}
}
}
......@@ -493,11 +493,16 @@ namespace AMDiS {
{
FUNCNAME("DOFMatrix::removeRowsWithDBC()");
// Do the following only in sequential code. In parallel mode, the specific
// solver method must care about dirichlet boundary conditions.
#ifndef HAVE_PARALLEL_DOMAIN_AMDIS
inserter_type &ins = *inserter;
for (std::set<int>::iterator it = rows->begin(); it != rows->end(); ++it)
ins[*it][*it] = 1.0;
rows->clear();
#endif
}
......
......@@ -455,27 +455,25 @@ namespace AMDiS {
/// Maps local col indices of an element to global matrix indices.
vector<DegreeOfFreedom> colIndices;
/* \brief
* A set of row indices. When assembling the DOF matrix, all rows, that
* correspond to a dof at a dirichlet boundary, are ignored and the row is
* left blank. After assembling, the diagonal element of the matrix must be
* set to 1. The indices of all rows, where this should be done, are stored
* in this set.
*/
/// A set of row indices. When assembling the DOF matrix, all rows, that
/// correspond to a dof at a dirichlet boundary, are ignored and the row is
/// left blank. After assembling, the diagonal element of the matrix must be
/// set to 1. The indices of all rows, where this should be done, are stored
/// in this set.
std::set<int> applyDBCs;
/* \brief
* Number of non zero entries per row (average). For instationary problems this
* information may be used in the next timestep to accelerate insertion of
* elemnts during assembling.
*/
int nnzPerRow;
/// Number of non zero entries per row (average). For instationary problems this
/// information may be used in the next timestep to accelerate insertion of
/// elemnts during assembling.
int nnzPerRow;
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
/// Is used in parallel computations to check whether specific DOFs in the
/// row FE spaces are owned by the rank or not. This is used to ensure that
/// Dirichlet BC is handled correctly in parallel computations.
FeSpaceDofMap *dofMap;
// std::set<DegreeOfFreedom> dirichletDofs;
#endif
/// Inserter object: implemented as pointer, allocated and deallocated as needed
......
......@@ -219,10 +219,10 @@ namespace AMDiS {
elInfo = stack.traverseNext(elInfo);
}
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
double localResult = result;
MPI::COMM_WORLD.Allreduce(&localResult, &result, 1, MPI_DOUBLE, MPI_SUM);
#endif
#endif
return result;
}
......
......@@ -250,6 +250,17 @@ namespace AMDiS {
return dofMap->isRankDof(dof);
}
inline void setDirichletDofValue(DegreeOfFreedom dof,
T value)
{
dirichletDofValues[dof] = value;
}
map<DegreeOfFreedom, T>& getDirichletValues()
{
return dirichletDofValues;
}
#endif
protected:
......@@ -282,10 +293,12 @@ namespace AMDiS {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
FeSpaceDofMap *dofMap;
#endif
map<DegreeOfFreedom, T> dirichletDofValues;
#endif
};
/// Specifies which operation should be done after coarsening
typedef enum{
NO_OPERATION = 0,
......
......@@ -65,15 +65,22 @@ namespace AMDiS {
for (int i = 0; i < nBasFcts; i++) {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
if (vector->isRankDof(dofIndices[i]))
// if (vector->isRankDof(dofIndices[i]))
#endif
if (localBound[i] == boundaryType) {
double value = 0.0;
if (f) {
elInfo->coordToWorld(*(basFcts->getCoords(i)), worldCoords);
(*vector)[dofIndices[i]] = (*f)(worldCoords);
value = (*f)(worldCoords);
}
if (dofVec)
(*vector)[dofIndices[i]] = (*dofVec)[dofIndices[i]];
value = (*dofVec)[dofIndices[i]];
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
vector->setDirichletDofValue(dofIndices[i], value);
#else
(*vector)[dofIndices[i]] = value;
#endif
}
}
}
......
......@@ -76,13 +76,30 @@ namespace AMDiS {
}
void Element::createNewDofPtrs()
void Element::createNewDofPtrs(bool setDofs)
{
FUNCNAME("Element::setDofPtrs()");
TEST_EXIT_DBG(mesh)("no mesh!\n");
dof = mesh->createDofPtrs();
if (setDofs) {
for (int i = 0; i < mesh->getGeo(VERTEX); i++)
dof[i] = mesh->getDof(VERTEX);
if (mesh->getNumberOfDofs(EDGE))
for (int i = 0; i < mesh->getGeo(EDGE); i++)
dof[mesh->getNode(EDGE) + i] = mesh->getDof(EDGE);
if (mesh->getNumberOfDofs(FACE))
for (int i = 0; i < mesh->getGeo(FACE); i++)
dof[mesh->getNode(FACE) + i] = mesh->getDof(FACE);
if (mesh->getNumberOfDofs(CENTER))
for (int i = 0; i < mesh->getGeo(CENTER); i++)
dof[mesh->getNode(CENTER) + i] = mesh->getDof(CENTER);
}
}
......
......@@ -538,7 +538,7 @@ namespace AMDiS {
void deserialize(std::istream &in);
/// Sets Element's \ref dof pointer.
void createNewDofPtrs();
void createNewDofPtrs(bool setDofs = false);
protected:
/// Sets Element's \ref index. Used by friend class Mesh.
......
......@@ -803,7 +803,7 @@ namespace AMDiS {
DimVec<int> nDof;
/// Number of nodes on a single element where DOFs are located. Needed for
/// the (de-) allocation of the dof-vector on the element (\ref Element::dof).
/// the (de-) allocation of the DOF-vector on the element (\ref Element::dof).
/// Here "node" is equivalent to the number of basis functions on the element.
int nNodeEl;
......
......@@ -12,6 +12,7 @@
#include "parallel/InteriorBoundary.h"
#include "parallel/ElementObjectDatabase.h"
#include "parallel/MeshDistributor.h"
#include "FiniteElemSpace.h"
#include "BasisFunction.h"
#include "Serializer.h"
......@@ -177,8 +178,18 @@ namespace AMDiS {
bound.type = it->second;
AtomicBoundary& b = getNewPeriodic(otherElementRank);
b = bound;
if (MeshDistributor::sebastianMode) {
if (bound.rankObj.elIndex == 3 &&
bound.rankObj.ithObj == 1 ||
bound.rankObj.elIndex == 78 &&
bound.rankObj.ithObj == 2) {
AtomicBoundary& b = getNewPeriodic(otherElementRank);
b = bound;
}
} else {
AtomicBoundary& b = getNewPeriodic(otherElementRank);
b = bound;
}
}
}
......
......@@ -68,6 +68,7 @@ namespace AMDiS {
return (*dof1 < *dof2);
}
bool MeshDistributor::sebastianMode = false;
MeshDistributor::MeshDistributor()
: problemStat(0),
......@@ -1322,10 +1323,6 @@ namespace AMDiS {
// === Add new macro elements to mesh. ===
TEST_EXIT_DBG(feSpaces.size() == 1)("Not yet implemented!\n");
TEST_EXIT_DBG(feSpaces[0]->getBasisFcts()->getDegree() == 1)
("Not yet implemented!\n");
for (std::set<MacroElement*>::iterator it = newMacroEl.begin();
it != newMacroEl.end(); ++it) {
MacroElement *mel = *it;
......@@ -1336,10 +1333,7 @@ namespace AMDiS {
mel->setNeighbour(i, NULL);
// Create new DOFs for the macro element.
mel->getElement()->createNewDofPtrs();
for (int i = 0; i < mesh->getGeo(VERTEX); i++)
mel->getElement()->setDof(i, mesh->getDof(VERTEX));
mel->getElement()->createNewDofPtrs(true);
// Push the macro element to all macro elements in mesh.
mesh->getMacroElements().push_back(mel);
......
......@@ -550,6 +550,8 @@ namespace AMDiS {
MeshLevelData levelData;
public:
static bool sebastianMode;
/// The boundary DOFs are sorted by subobject entities, i.e., first all
/// face DOFs, edge DOFs and to the last vertex DOFs will be set to
/// communication structure vectors, \ref sendDofs and \ref recvDofs.
......
......@@ -382,25 +382,74 @@ namespace AMDiS {
stdMpi.startCommunication();
{
for (DofComm::Iterator it(dofComm->getRecvDofs(), 0, feSpaces[i]);
!it.end(); it.nextRank()) {
int rank = it.getRank();
if (meshLevel > 0)
rank = levelData->mapRank(rank, 0, meshLevel);
int counter = 0;
for (; !it.endDofIter(); it.nextDof()) {
if (dofMap.count(it.getDofIndex())) {
DegreeOfFreedom d = stdMpi.getRecvData(rank)[counter++];
if (globalIndex)
dofToMatIndex.add(i, dofMap[it.getDofIndex()].global, d);
else
dofToMatIndex.add(i, it.getDofIndex(), d);
}
for (DofComm::Iterator it(dofComm->getRecvDofs(), 0, feSpaces[i]);
!it.end(); it.nextRank()) {
int rank = it.getRank();
if (meshLevel > 0)
rank = levelData->mapRank(rank, 0, meshLevel);
int counter = 0;
for (; !it.endDofIter(); it.nextDof()) {
if (dofMap.count(it.getDofIndex())) {
DegreeOfFreedom d = stdMpi.getRecvData(rank)[counter++];
if (globalIndex)
dofToMatIndex.add(i, dofMap[it.getDofIndex()].global, d);
else
dofToMatIndex.add(i, it.getDofIndex(), d);
}
}
}
// === Communicate DOFs on periodic boundaries. ===
stdMpi.clear();
for (DofComm::Iterator it(dofComm->getPeriodicDofs(), 0, feSpaces[i]);
!it.end(); it.nextRank()) {
vector<DegreeOfFreedom> sendGlobalDofs;
for (; !it.endDofIter(); it.nextDof())
if (dofMap.count(it.getDofIndex())) {
if (globalIndex) {
sendGlobalDofs.push_back(dofMap[it.getDofIndex()].global);
sendGlobalDofs.push_back(dofToMatIndex.get(i, dofMap[it.getDofIndex()].global));
} else {
sendGlobalDofs.push_back(dofToMatIndex.get(i, it.getDofIndex()));
}
}
int rank = it.getRank();
if (meshLevel > 0)
rank = levelData->mapRank(rank, 0, meshLevel);
stdMpi.send(rank, sendGlobalDofs);
stdMpi.recv(rank);
}
stdMpi.startCommunication();
for (DofComm::Iterator it(dofComm->getPeriodicDofs(), 0, feSpaces[i]);
!it.end(); it.nextRank()) {
int rank = it.getRank();
if (meshLevel > 0)
rank = levelData->mapRank(rank, 0, meshLevel);
int counter = 0;
for (; !it.endDofIter(); it.nextDof())
if (dofMap.count(it.getDofIndex())) {
if (globalIndex) {
dofToMatIndex.add(i,
stdMpi.getRecvData(it.getRank())[counter++],
stdMpi.getRecvData(it.getRank())[counter++]);
} else {
dofToMatIndex.add(i, it.getDofIndex(),
stdMpi.getRecvData(it.getRank())[counter++]);
}
}
}
}
}
......
......@@ -125,8 +125,9 @@ namespace AMDiS {
DegreeOfFreedom dof)
{
FUNCNAME("PetscSolver::isCoarseSpace()");
TEST_EXIT_DBG(coarseSpaceMap)
("Subdomain solver does not contain a coarse space!\n");
if (coarseSpaceMap == NULL)
return false;
return (*coarseSpaceMap)[feSpace].isSet(dof);
}
......
......@@ -22,6 +22,12 @@ namespace AMDiS {
using namespace std;
double FetiTimings::fetiSolve = 0.0;
double FetiTimings::fetiSolve01 = 0.0;
double FetiTimings::fetiSolve02 = 0.0;
double FetiTimings::fetiPreconditioner = 0.0;
// y = mat * x
int petscMultMatSchurPrimal(Mat mat, Vec x, Vec y)
{
......@@ -44,25 +50,44 @@ namespace AMDiS {
// y = mat * x
int petscMultMatFeti(Mat mat, Vec x, Vec y)
{
FUNCNAME("petscMultMatFeti()");
// F = L inv(K_BB) trans(L) + L inv(K_BB) K_BPi inv(S_PiPi) K_PiB inv(K_BB) trans(L)
// => F = L [I + inv(K_BB) K_BPi inv(S_PiPi) K_PiB] inv(K_BB) trans(L)
double wtime = MPI::Wtime();
void *ctx;
MatShellGetContext(mat, &ctx);
FetiData* data = static_cast<FetiData*>(ctx);
MatMultTranspose(*(data->mat_lagrange), x, data->tmp_vec_b);
double wtime01 = MPI::Wtime();
data->subSolver->solveGlobal(data->tmp_vec_b, data->tmp_vec_b);
FetiTimings::fetiSolve01 += (MPI::Wtime() - wtime01);
MatMult(*(data->mat_lagrange), data->tmp_vec_b, data->tmp_vec_lagrange);
MatMult(data->subSolver->getMatCoarseInt(), data->tmp_vec_b, data->tmp_vec_primal);
wtime01 = MPI::Wtime();
KSPSolve(*(data->ksp_schur_primal), data->tmp_vec_primal, data->tmp_vec_primal);
FetiTimings::fetiSolve02 += (MPI::Wtime() - wtime01);
MatMult(data->subSolver->getMatIntCoarse(), data->tmp_vec_primal, data->tmp_vec_b);
wtime01 = MPI::Wtime();
data->subSolver->solveGlobal(data->tmp_vec_b, data->tmp_vec_b);
FetiTimings::fetiSolve01 += (MPI::Wtime() - wtime01);
MatMult(*(data->mat_lagrange), data->tmp_vec_b, y);
VecAXPBY(y, 1.0, 1.0, data->tmp_vec_lagrange);
FetiTimings::fetiSolve += (MPI::Wtime() - wtime);
return 0;
}
......@@ -70,6 +95,8 @@ namespace AMDiS {
// y = PC * x
PetscErrorCode petscApplyFetiDirichletPrecon(PC pc, Vec x, Vec y)
{
double wtime = MPI::Wtime();
// Get data for the preconditioner
void *ctx;
PCShellGetContext(pc, &ctx);
......@@ -125,6 +152,8 @@ namespace AMDiS {
// Multiply with scaled Lagrange constraint matrix.
MatMult(*(data->mat_lagrange_scaled), data->tmp_vec_b, y);
FetiTimings::fetiPreconditioner += (MPI::Wtime() - wtime);
return 0;
}
......@@ -1452,6 +1481,7 @@ namespace AMDiS {
MSG("FETI-DP timing 09: %.5f seconds (create rhs vector)\n",
MPI::Wtime() - wtime);
wtime = MPI::Wtime();
FetiTimings::reset();
}
......@@ -1463,6 +1493,11 @@ namespace AMDiS {
MSG("FETI-DP timing 10: %.5f seconds (application of FETI-DP operator)\n",
MPI::Wtime() - wtime);
wtime = MPI::Wtime();
MSG("FETI-DP timing 10a: %.5f [ %.5f %.5f ] seconds (FETI-DP KSP Solve)\n",
FetiTimings::fetiSolve, FetiTimings::fetiSolve01, FetiTimings::fetiSolve02);
MSG("FETI-DP timing 10b: %.5f seconds (FETI-DP KSP Solve)\n",
FetiTimings::fetiPreconditioner);
}
// === Solve for u_primals. ===
......
......@@ -269,6 +269,27 @@ namespace AMDiS {
bool printTimings;
};
class FetiTimings {
private:
FetiTimings() {}
public:
static void reset()
{
fetiSolve = 0.0;
fetiSolve01 = 0.0;
fetiSolve02 = 0.0;
fetiPreconditioner = 0.0;
}
public:
static double fetiSolve;
static double fetiSolve01;
static double fetiSolve02;
static double fetiPreconditioner;
};
}
#endif
......@@ -102,7 +102,14 @@ namespace AMDiS {
MatAssemblyBegin(matIntInt, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matIntInt, MAT_FINAL_ASSEMBLY);
// === Remove Dirichlet BC DOFs. ===
removeDirichletBcDofs(mat);
// === Init PETSc solver. ===
KSPCreate(mpiCommGlobal, &kspInterior);
KSPGetPC(kspInterior, &pcInterior);
KSPSetOperators(kspInterior, matIntInt, matIntInt, SAME_NONZERO_PATTERN);
......@@ -296,6 +303,10 @@ namespace AMDiS {
}
// === Remove Dirichlet BC DOFs. ===
removeDirichletBcDofs(mat);
// === Create solver for the non primal (thus local) variables. ===
KSPCreate(mpiCommLocal, &kspInterior);
......@@ -342,6 +353,10 @@ namespace AMDiS {
}
// === Remove Dirichlet BC DOFs. ===
removeDirichletBcDofs(vec);
VecAssemblyBegin(rhsInterior);
VecAssemblyEnd(rhsInterior);
......@@ -350,6 +365,9 @@ namespace AMDiS {
VecAssemblyEnd(rhsCoarseSpace);
}
// === Remove null space, if requested. ===
if (removeRhsNullSpace) {
TEST_EXIT_DBG(coarseSpaceMap == NULL)("Not supported!\n");
......@@ -409,6 +427,9 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverGlobalMatrix::solveGlobal()");
double wtime = MPI::Wtime();
double t0 = 0.0, t1 = 0.0;
Vec tmp;
if (mpiCommLocal.Get_size() == 1)
VecCreateSeq(mpiCommLocal, interiorMap->getRankDofs(), &tmp);
......@@ -428,7 +449,13 @@ namespace AMDiS {
VecRestoreArray(rhs, &rhsValues);
VecRestoreArray(tmp, &tmpValues);
t0 = MPI::Wtime() - wtime;
wtime = MPI::Wtime();
KSPSolve(kspInterior, tmp, tmp);
t1 = MPI::Wtime() - wtime;
wtime = MPI::Wtime();
VecGetArray(tmp, &tmpValues);
VecGetArray(sol, &rhsValues);
......@@ -440,6 +467,9 @@ namespace AMDiS {
VecRestoreArray(tmp, &tmpValues);
VecDestroy(&tmp);
t0 += MPI::Wtime() - wtime;
MSG("TIMEING: %.5f %.5f\n", t0, t1);
}
......@@ -712,7 +742,7 @@ namespace AMDiS {
if (rankOnly && !(*interiorMap)[feSpace].isRankDof(dofIt.getDOFIndex()))
continue;
if (coarseSpaceMap && isCoarseSpace(feSpace, dofIt.getDOFIndex())) {
if (isCoarseSpace(feSpace, dofIt.getDOFIndex())) {
TEST_EXIT_DBG(vecCoarse != PETSC_NULL)("Should not happen!\n");
int index = coarseSpaceMap->getMatIndex(nRowVec, dofIt.getDOFIndex());
......@@ -752,6 +782,85 @@ namespace AMDiS {
}
void PetscSolverGlobalMatrix::removeDirichletBcDofs(Matrix<DOFMatrix*> *mat)
{
FUNCNAME("PetscSolverGlobalMatrix::removeDirichletBcDofs()");
vector<int> dofsInterior, dofsCoarse;
int nComponents = mat->getNumRows();
for (int i = 0; i < nComponents; i++) {
if ((*mat)[i][i]) {
const FiniteElemSpace *feSpace = (*mat)[i][i]->getRowFeSpace();
std::set<DegreeOfFreedom> &dirichletDofs = *((*mat)[i][i]->getApplyDBCs());
MSG("DIRICHLET DOFS: %d %d -> %d\n", i, i, dirichletDofs.size());