Liebe Gitlab-Nutzer, lieber Gitlab-Nutzer, es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Ein Anmelden über dieses erzeugt ein neues Konto. Das alte Konto ist über den Reiter "Standard" erreichbar. Die Administratoren

Dear Gitlab user, it is now possible to log in to our service using the ZIH login/LDAP. Logging in via this will create a new account. The old account can be accessed via the "Standard" tab. The administrators

Commit 6e3ccc91 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Backup my code before making big changes ....

parent e8831f17
......@@ -41,8 +41,6 @@ namespace AMDiS {
if (overlap)
computeNonLocalIndices();
computeMatIndex(0);
}
......@@ -91,52 +89,6 @@ namespace AMDiS {
}
void GlobalDofMap::computeMatIndex(int offset)
{
FUNCNAME("GlobalDofMap::computeMatIndex()");
map<DegreeOfFreedom, int> dofToMatIndex;
for (map<DegreeOfFreedom, MultiIndex>::iterator it = dofMap.begin(); it != dofMap.end(); ++it) {
if (!nonRankDofs.count(it->first)) {
int globalMatIndex = it->second.local - rStartDofs + offset;
dofToMatIndex[it->first] = globalMatIndex;
}
}
if (!overlap)
return;
TEST_EXIT_DBG(sendDofs != NULL && recvDofs != NULL)
("No communicator given!\n");
StdMpi<vector<DegreeOfFreedom> > stdMpi(*mpiComm);
for (DofComm::Iterator it(*sendDofs, feSpace); !it.end(); it.nextRank()) {
vector<DegreeOfFreedom> sendGlobalDofs;
for (; !it.endDofIter(); it.nextDof())
if (dofMap.count(it.getDofIndex()))
sendGlobalDofs.push_back(dofToMatIndex[it.getDofIndex()]);
stdMpi.send(it.getRank(), sendGlobalDofs);
}
for (DofComm::Iterator it(*recvDofs, feSpace); !it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
{
int i = 0;
for (DofComm::Iterator it(*recvDofs, feSpace); !it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
if (dofMap.count(it.getDofIndex()))
dofToMatIndex[it.getDofIndex()] =
stdMpi.getRecvData(it.getRank())[i++];
}
}
void GlobalDofMap::print()
{
FUNCNAME("GlobalDofMap::print()");
......
......@@ -26,6 +26,7 @@
#include "parallel/DofComm.h"
#include "parallel/MpiHelper.h"
#include "parallel/ParallelTypes.h"
#include "parallel/StdMpi.h"
#ifndef AMDIS_FE_SPACE_MAPPING_H
#define AMDIS_FE_SPACE_MAPPING_H
......@@ -38,6 +39,53 @@ namespace AMDiS {
int local, global;
};
/** \brief
* Defines for each system component a mapping for sets of global DOF indices
* to global matrix indices. The mapping is defined for all DOFs in rank's
* subdomain. When periodic boundary conditions are used, then the mapping
* stores also information for the periodic associations of rank's DOF on
* periodic boundaries.
*/
class DofToMatIndex
{
public:
DofToMatIndex() {}
/// Reset the data structure.
inline void clear()
{
data.clear();
}
/// Add a new mapping.
inline void add(int component, DegreeOfFreedom dof, int globalMatIndex)
{
data[component][dof] = globalMatIndex;
}
/// Map a global DOF index to the global matrix index for a specific
/// system component number.
inline int get(int component, DegreeOfFreedom dof)
{
FUNCNAME("DofToMatIndex::get()");
TEST_EXIT_DBG(data.count(component))
("No mapping data for component %d available!\n", component);
TEST_EXIT_DBG(data[component].count(dof))
("Mapping for DOF %d in component %d does not exists!\n",
dof, component);
return data[component][dof];
}
private:
/// The mapping data. For each system component there is a specific map that
/// maps global DOF indices to global matrix indices.
map<int, map<DegreeOfFreedom, int> > data;
};
class GlobalDofMap
{
public:
......@@ -50,9 +98,9 @@ namespace AMDiS {
GlobalDofMap(MPI::Intracomm* m)
: mpiComm(m),
feSpace(NULL),
sendDofs(NULL),
recvDofs(NULL),
feSpace(NULL),
needGlobalMapping(false),
nRankDofs(0),
nLocalDofs(0),
......@@ -98,6 +146,11 @@ namespace AMDiS {
return static_cast<bool>(dofMap.count(dof));
}
bool isRankDof(DegreeOfFreedom dof)
{
return !(static_cast<bool>(nonRankDofs.count(dof)));
}
unsigned int size()
{
return dofMap.size();
......@@ -114,8 +167,6 @@ namespace AMDiS {
void computeNonLocalIndices();
void computeMatIndex(int offset);
void print();
void setFeSpace(const FiniteElemSpace *fe)
......@@ -128,20 +179,24 @@ namespace AMDiS {
overlap = b;
}
void setDofComm(DofComm &pSend, DofComm &pRecv)
void setNeedGlobalMapping(bool b)
{
sendDofs = &pSend;
recvDofs = &pRecv;
needGlobalMapping = b;
}
void setNeedGlobalMapping(bool b)
void setDofComm(DofComm &pSend, DofComm &pRecv)
{
needGlobalMapping = b;
sendDofs = &pSend;
recvDofs = &pRecv;
}
private:
MPI::Intracomm* mpiComm;
DofComm *sendDofs;
DofComm *recvDofs;
const FiniteElemSpace *feSpace;
///
......@@ -149,10 +204,6 @@ namespace AMDiS {
std::set<DegreeOfFreedom> nonRankDofs;
DofComm *sendDofs;
DofComm *recvDofs;
bool needGlobalMapping;
public:
///
......@@ -168,6 +219,9 @@ namespace AMDiS {
public:
FeSpaceData()
: mpiComm(NULL),
sendDofs(NULL),
recvDofs(NULL),
overlap(false),
feSpaces(0),
nRankDofs(-1),
nOverallDofs(-1),
......@@ -273,15 +327,18 @@ namespace AMDiS {
void init(MPI::Intracomm *m,
vector<const FiniteElemSpace*> &fe,
bool needGlobalMapping)
bool needGlobalMapping,
bool ol)
{
mpiComm = m;
feSpaces = fe;
overlap = ol;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
feSpacesUnique.insert(feSpaces[i]);
addFeSpace(feSpaces[i]);
data[feSpaces[i]].setNeedGlobalMapping(needGlobalMapping);
data[feSpaces[i]].setOverlap(overlap);
}
}
......@@ -295,7 +352,70 @@ namespace AMDiS {
nLocalDofs = getLocalDofs(feSpaces);
nOverallDofs = getOverallDofs(feSpaces);
rStartDofs = getStartDofs(feSpaces);
computeMatIndex();
}
void computeMatIndex()
{
dofToMatIndex.clear();
int offset = rStartDofs;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
map<DegreeOfFreedom, MultiIndex>& dofMap = data[feSpaces[i]].getMap();
typedef map<DegreeOfFreedom, MultiIndex>::iterator ItType;
for (ItType it = dofMap.begin(); it != dofMap.end(); ++it) {
if (data[feSpaces[i]].isRankDof(it->first)) {
int globalMatIndex =
it->second.local - data[feSpaces[i]].rStartDofs + offset;
dofToMatIndex.add(i, it->first, globalMatIndex);
}
}
if (!overlap)
continue;
TEST_EXIT_DBG(sendDofs != NULL && recvDofs != NULL)
("No communicator given!\n");
StdMpi<vector<DegreeOfFreedom> > stdMpi(*mpiComm);
for (DofComm::Iterator it(*sendDofs, feSpaces[i]);
!it.end(); it.nextRank()) {
vector<DegreeOfFreedom> sendGlobalDofs;
for (; !it.endDofIter(); it.nextDof())
if (dofMap.count(it.getDofIndex()))
sendGlobalDofs.push_back(dofToMatIndex.get(i, it.getDofIndex()));
stdMpi.send(it.getRank(), sendGlobalDofs);
}
for (DofComm::Iterator it(*recvDofs, feSpaces[i]);
!it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
{
int counter = 0;
for (DofComm::Iterator it(*recvDofs, feSpaces[i]);
!it.end(); it.nextRank()) {
for (; !it.endDofIter(); it.nextDof()) {
if (dofMap.count(it.getDofIndex())) {
DegreeOfFreedom d = stdMpi.getRecvData(it.getRank())[counter++];
dofToMatIndex.add(i, it.getDofIndex(), d);
}
}
}
}
offset += data[feSpaces[i]].nRankDofs;
}
}
inline int mapLocal(int index, int ithFeSpace)
{
......@@ -335,9 +455,25 @@ namespace AMDiS {
return -1;
}
void setDofComm(DofComm &pSend, DofComm &pRecv)
{
sendDofs = &pSend;
recvDofs = &pRecv;
for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it)
data[*it].setDofComm(pSend, pRecv);
}
private:
MPI::Intracomm* mpiComm;
DofComm *sendDofs;
DofComm *recvDofs;
bool overlap;
map<const FiniteElemSpace*, T> data;
vector<const FiniteElemSpace*> feSpaces;
......@@ -345,6 +481,10 @@ namespace AMDiS {
std::set<const FiniteElemSpace*> feSpacesUnique;
int nRankDofs, nLocalDofs, nOverallDofs, rStartDofs;
/// Mapping from global DOF indices to global matrix indices under
/// consideration of possibly multiple components.
DofToMatIndex dofToMatIndex;
};
}
......
......@@ -224,6 +224,11 @@ namespace AMDiS {
createIndexB(feSpace);
}
primalDofMap.setDofComm(meshDistributor->getSendDofs(),
meshDistributor->getRecvDofs());
lagrangeMap.setDofComm(meshDistributor->getSendDofs(),
meshDistributor->getRecvDofs());
primalDofMap.update();
dualDofMap.update();
lagrangeMap.update();
......@@ -242,6 +247,10 @@ namespace AMDiS {
MSG("nRankLagrange = %d nOverallLagrange = %d\n",
lagrangeMap[feSpace].nRankDofs,
lagrangeMap[feSpace].nOverallDofs);
TEST_EXIT_DBG(localDofMap[feSpace].size() + primalDofMap[feSpace].size() ==
static_cast<unsigned int>(feSpace->getAdmin()->getUsedDofs()))
("Should not happen!\n");
}
}
......@@ -272,10 +281,6 @@ namespace AMDiS {
primalDofMap[feSpace].insertRankDof(*it);
else
primalDofMap[feSpace].insert(*it);
primalDofMap[feSpace].setOverlap(true);
primalDofMap[feSpace].setDofComm(meshDistributor->getSendDofs(),
meshDistributor->getRecvDofs());
}
......@@ -283,6 +288,22 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverFeti::createDuals()");
// === Create global index of the dual nodes on each rank. ===
DofContainer allBoundaryDofs;
meshDistributor->getAllBoundaryDofs(feSpace, allBoundaryDofs);
for (DofContainer::iterator it = allBoundaryDofs.begin();
it != allBoundaryDofs.end(); ++it)
if (!isPrimal(feSpace, **it))
dualDofMap[feSpace].insertRankDof(**it);
}
void PetscSolverFeti::createLagrange(const FiniteElemSpace *feSpace)
{
FUNCNAME("PetscSolverFeti::createLagrange()");
// === Create for each dual node that is owned by the rank, the set ===
// === of ranks that contain this node (denoted by W(x_j)). ===
......@@ -336,21 +357,6 @@ namespace AMDiS {
stdMpi.getRecvData(it.getRank())[i++];
}
// === Create global index of the dual nodes on each rank. ===
DofContainer allBoundaryDofs;
meshDistributor->getAllBoundaryDofs(feSpace, allBoundaryDofs);
for (DofContainer::iterator it = allBoundaryDofs.begin();
it != allBoundaryDofs.end(); ++it)
if (!isPrimal(feSpace, **it))
dualDofMap[feSpace].insertRankDof(**it);
}
void PetscSolverFeti::createLagrange(const FiniteElemSpace *feSpace)
{
FUNCNAME("PetscSolverFeti::createLagrange()");
// === Reserve for each dual node, on the rank that owns this node, the ===
// === appropriate number of Lagrange constraints. ===
......@@ -367,9 +373,6 @@ namespace AMDiS {
}
}
lagrangeMap[feSpace].nRankDofs = nRankLagrange;
lagrangeMap[feSpace].setOverlap(true);
lagrangeMap[feSpace].setDofComm(meshDistributor->getSendDofs(),
meshDistributor->getRecvDofs());
}
......@@ -383,21 +386,16 @@ namespace AMDiS {
// === the global index of all B nodes, insert all interior nodes first, ===
// === without defining a correct index. ===
nLocalInterior = 0;
nLocalInterior[feSpace] = 0;
for (int i = 0; i < admin->getUsedSize(); i++) {
if (admin->isDofFree(i) == false &&
isPrimal(feSpace, i) == false &&
dualDofMap[feSpace].isSet(i) == false) {
localDofMap[feSpace].insertRankDof(i, nLocalInterior);
nLocalInterior++;
localDofMap[feSpace].insertRankDof(i, nLocalInterior[feSpace]);
nLocalInterior[feSpace]++;
}
}
TEST_EXIT_DBG(nLocalInterior + primalDofMap[feSpace].size() +
dualDofMap[feSpace].size() ==
static_cast<unsigned int>(admin->getUsedDofs()))
("Should not happen!\n");
// === And finally, add the global indicies of all dual nodes. ===
for (map<DegreeOfFreedom, MultiIndex>::iterator it = dualDofMap[feSpace].getMap().begin();
......@@ -446,7 +444,6 @@ namespace AMDiS {
// Set the constraint for all components of the system.
for (int k = 0; k < nComponents; k++) {
int rowIndex = index * nComponents + k;
// int colIndex = it->second * nComponents + k;
int colIndex =
(localDofMap[feSpace].rStartDofs +
localDofMap[feSpace][it->first].local) * nComponents + k;
......@@ -470,8 +467,6 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverFeti::createSchurPrimal()");
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
if (schurPrimalSolver == 0) {
MSG("Create iterative schur primal solver!\n");
......@@ -538,8 +533,10 @@ namespace AMDiS {
int maxLocalPrimal = mat_b_primal_cols.size();
mpi::globalMax(maxLocalPrimal);
TEST_EXIT(mat_b_primal_cols.size() == primalDofMap.getLocalDofs())
TEST_EXIT(static_cast<int>(mat_b_primal_cols.size()) ==
primalDofMap.getLocalDofs())
("Should not happen!\n");
for (map<int, vector<pair<int, double> > >::iterator it = mat_b_primal_cols.begin();
it != mat_b_primal_cols.end(); ++it) {
Vec tmpVec;
......@@ -862,10 +859,10 @@ namespace AMDiS {
// === Create all sets and indices. ===
primalDofMap.init(mpiComm, feSpaces, true);
dualDofMap.init(mpiComm, feSpaces, false);
lagrangeMap.init(mpiComm, feSpaces, true);
localDofMap.init(mpiComm, feSpaces, false);
primalDofMap.init(mpiComm, feSpaces, true, true);
dualDofMap.init(mpiComm, feSpaces, false, false);
lagrangeMap.init(mpiComm, feSpaces, true, true);
localDofMap.init(mpiComm, feSpaces, false, false);
updateDofData();
......@@ -958,7 +955,7 @@ namespace AMDiS {
for (cursor_type cursor = begin<row>((*mat)[i][j]->getBaseMatrix()),
cend = end<row>((*mat)[i][j]->getBaseMatrix()); cursor != cend; ++cursor) {
bool rowPrimal = primalDofMap[feSpace].isSet(*cursor);
bool rowPrimal = isPrimal(feSpace, *cursor);
cols.clear();
colsOther.clear();
......@@ -1002,26 +999,26 @@ namespace AMDiS {
int rowIndex = localDofMap[feSpace][*cursor].local;
int colIndex = localDofMap[feSpace][col(*icursor)].local;
if (rowIndex < nLocalInterior) {
if (colIndex < nLocalInterior) {
if (rowIndex < nLocalInterior[feSpace]) {
if (colIndex < nLocalInterior[feSpace]) {
int colIndex2 = localDofMap.mapLocal(col(*icursor), j);
colsLocal.push_back(colIndex2);
valuesLocal.push_back(value(*icursor));
} else {
int colIndex2 =
(localDofMap[feSpace][col(*icursor)].local - nLocalInterior) *
(localDofMap[feSpace][col(*icursor)].local - nLocalInterior[feSpace]) *
nComponents + j;
colsLocalOther.push_back(colIndex2);
valuesLocalOther.push_back(value(*icursor));
}
} else {
if (colIndex < nLocalInterior) {
if (colIndex < nLocalInterior[feSpace]) {
int colIndex2 = localDofMap.mapLocal(col(*icursor), j);
colsLocalOther.push_back(colIndex2);
valuesLocalOther.push_back(value(*icursor));
} else {
int colIndex2 = (localDofMap[feSpace][col(*icursor)].local - nLocalInterior) *
int colIndex2 = (localDofMap[feSpace][col(*icursor)].local - nLocalInterior[feSpace]) *
nComponents + j;
colsLocal.push_back(colIndex2);
......@@ -1082,7 +1079,7 @@ namespace AMDiS {
{
int rowIndex = localDofMap[feSpace][*cursor].local;
if (rowIndex < nLocalInterior) {
if (rowIndex < nLocalInterior[feSpace]) {
int rowIndex2 = localDofMap[feSpace][*cursor].local * nComponents + i;
MatSetValues(mat_interior_interior, 1, &rowIndex2, colsLocal.size(),
......@@ -1093,7 +1090,7 @@ namespace AMDiS {
&(colsLocalOther[0]), &(valuesLocalOther[0]), INSERT_VALUES);
} else {
int rowIndex2 =
(localDofMap[feSpace][*cursor].local - nLocalInterior) * nComponents + i;
(localDofMap[feSpace][*cursor].local - nLocalInterior[feSpace]) * nComponents + i;
MatSetValues(mat_duals_duals, 1, &rowIndex2, colsLocal.size(),
&(colsLocal[0]), &(valuesLocal[0]), INSERT_VALUES);
......@@ -1110,9 +1107,9 @@ namespace AMDiS {
{
int rowIndex = localDofMap[feSpace][*cursor].local;
if (rowIndex >= nLocalInterior) {
if (rowIndex >= nLocalInterior[feSpace]) {
int rowIndex2 =
(localDofMap[feSpace][*cursor].local - nLocalInterior) * nComponents + i;
(localDofMap[feSpace][*cursor].local - nLocalInterior[feSpace]) * nComponents + i;
MatSetValues(mat_duals_duals, 1, &rowIndex2, colsLocal.size(),
&(colsLocal[0]), &(valuesLocal[0]), INSERT_VALUES);
......
......@@ -235,7 +235,7 @@ namespace AMDiS {
KSP ksp_interior;
int nLocalInterior;
map<const FiniteElemSpace*, int> nLocalInterior;
};
}
......
......@@ -26,59 +26,12 @@
#include <boost/tuple/tuple.hpp>
#include "AMDiS_fwd.h"
#include "parallel/PetscSolver.h"
#include "parallel/FeSpaceMapping.h"
namespace AMDiS {
using namespace std;
/** \brief
* Defines for each system component a mapping for sets of global DOF indices
* to global matrix indices. The mapping is defined for all DOFs in rank's
* subdomain. When periodic boundary conditions are used, then the mapping
* stores also information for the periodic associations of rank's DOF on
* periodic boundaries.
*/
class DofToMatIndex
{
public:
DofToMatIndex() {}
/// Reset the data structure.
inline void clear()
{
data.clear();
}
/// Add a new mapping.
inline void add(int component, DegreeOfFreedom dof, int globalMatIndex)
{
data[component][dof] = globalMatIndex;
}
/// Map a global DOF index to the global matrix index for a specific
/// system component number.
inline int get(int component, DegreeOfFreedom dof)
{
FUNCNAME("DofToMatIndex::get()");
TEST_EXIT_DBG(data.count(component))
("No mapping data for component %d available!\n", component);
TEST_EXIT_DBG(data[component].count(dof))
("Mapping for DOF %d in component %d does not exists!\n",
dof, component);
return data[component][dof];
}
private: