Commit 6e3ccc91 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Backup my code before making big changes ....

parent e8831f17
...@@ -41,8 +41,6 @@ namespace AMDiS { ...@@ -41,8 +41,6 @@ namespace AMDiS {
if (overlap) if (overlap)
computeNonLocalIndices(); computeNonLocalIndices();
computeMatIndex(0);
} }
...@@ -91,52 +89,6 @@ namespace AMDiS { ...@@ -91,52 +89,6 @@ namespace AMDiS {
} }
void GlobalDofMap::computeMatIndex(int offset)
{
FUNCNAME("GlobalDofMap::computeMatIndex()");
map<DegreeOfFreedom, int> dofToMatIndex;
for (map<DegreeOfFreedom, MultiIndex>::iterator it = dofMap.begin(); it != dofMap.end(); ++it) {
if (!nonRankDofs.count(it->first)) {
int globalMatIndex = it->second.local - rStartDofs + offset;
dofToMatIndex[it->first] = globalMatIndex;
}
}
if (!overlap)
return;
TEST_EXIT_DBG(sendDofs != NULL && recvDofs != NULL)
("No communicator given!\n");
StdMpi<vector<DegreeOfFreedom> > stdMpi(*mpiComm);
for (DofComm::Iterator it(*sendDofs, feSpace); !it.end(); it.nextRank()) {
vector<DegreeOfFreedom> sendGlobalDofs;
for (; !it.endDofIter(); it.nextDof())
if (dofMap.count(it.getDofIndex()))
sendGlobalDofs.push_back(dofToMatIndex[it.getDofIndex()]);
stdMpi.send(it.getRank(), sendGlobalDofs);
}
for (DofComm::Iterator it(*recvDofs, feSpace); !it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
{
int i = 0;
for (DofComm::Iterator it(*recvDofs, feSpace); !it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
if (dofMap.count(it.getDofIndex()))
dofToMatIndex[it.getDofIndex()] =
stdMpi.getRecvData(it.getRank())[i++];
}
}
void GlobalDofMap::print() void GlobalDofMap::print()
{ {
FUNCNAME("GlobalDofMap::print()"); FUNCNAME("GlobalDofMap::print()");
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "parallel/DofComm.h" #include "parallel/DofComm.h"
#include "parallel/MpiHelper.h" #include "parallel/MpiHelper.h"
#include "parallel/ParallelTypes.h" #include "parallel/ParallelTypes.h"
#include "parallel/StdMpi.h"
#ifndef AMDIS_FE_SPACE_MAPPING_H #ifndef AMDIS_FE_SPACE_MAPPING_H
#define AMDIS_FE_SPACE_MAPPING_H #define AMDIS_FE_SPACE_MAPPING_H
...@@ -38,6 +39,53 @@ namespace AMDiS { ...@@ -38,6 +39,53 @@ namespace AMDiS {
int local, global; int local, global;
}; };
/** \brief
* Defines for each system component a mapping for sets of global DOF indices
* to global matrix indices. The mapping is defined for all DOFs in rank's
* subdomain. When periodic boundary conditions are used, then the mapping
* stores also information for the periodic associations of rank's DOF on
* periodic boundaries.
*/
class DofToMatIndex
{
public:
DofToMatIndex() {}
/// Reset the data structure.
inline void clear()
{
data.clear();
}
/// Add a new mapping.
inline void add(int component, DegreeOfFreedom dof, int globalMatIndex)
{
data[component][dof] = globalMatIndex;
}
/// Map a global DOF index to the global matrix index for a specific
/// system component number.
inline int get(int component, DegreeOfFreedom dof)
{
FUNCNAME("DofToMatIndex::get()");
TEST_EXIT_DBG(data.count(component))
("No mapping data for component %d available!\n", component);
TEST_EXIT_DBG(data[component].count(dof))
("Mapping for DOF %d in component %d does not exists!\n",
dof, component);
return data[component][dof];
}
private:
/// The mapping data. For each system component there is a specific map that
/// maps global DOF indices to global matrix indices.
map<int, map<DegreeOfFreedom, int> > data;
};
class GlobalDofMap class GlobalDofMap
{ {
public: public:
...@@ -50,9 +98,9 @@ namespace AMDiS { ...@@ -50,9 +98,9 @@ namespace AMDiS {
GlobalDofMap(MPI::Intracomm* m) GlobalDofMap(MPI::Intracomm* m)
: mpiComm(m), : mpiComm(m),
feSpace(NULL),
sendDofs(NULL), sendDofs(NULL),
recvDofs(NULL), recvDofs(NULL),
feSpace(NULL),
needGlobalMapping(false), needGlobalMapping(false),
nRankDofs(0), nRankDofs(0),
nLocalDofs(0), nLocalDofs(0),
...@@ -98,6 +146,11 @@ namespace AMDiS { ...@@ -98,6 +146,11 @@ namespace AMDiS {
return static_cast<bool>(dofMap.count(dof)); return static_cast<bool>(dofMap.count(dof));
} }
bool isRankDof(DegreeOfFreedom dof)
{
return !(static_cast<bool>(nonRankDofs.count(dof)));
}
unsigned int size() unsigned int size()
{ {
return dofMap.size(); return dofMap.size();
...@@ -114,8 +167,6 @@ namespace AMDiS { ...@@ -114,8 +167,6 @@ namespace AMDiS {
void computeNonLocalIndices(); void computeNonLocalIndices();
void computeMatIndex(int offset);
void print(); void print();
void setFeSpace(const FiniteElemSpace *fe) void setFeSpace(const FiniteElemSpace *fe)
...@@ -128,20 +179,24 @@ namespace AMDiS { ...@@ -128,20 +179,24 @@ namespace AMDiS {
overlap = b; overlap = b;
} }
void setDofComm(DofComm &pSend, DofComm &pRecv) void setNeedGlobalMapping(bool b)
{ {
sendDofs = &pSend; needGlobalMapping = b;
recvDofs = &pRecv;
} }
void setNeedGlobalMapping(bool b) void setDofComm(DofComm &pSend, DofComm &pRecv)
{ {
needGlobalMapping = b; sendDofs = &pSend;
recvDofs = &pRecv;
} }
private: private:
MPI::Intracomm* mpiComm; MPI::Intracomm* mpiComm;
DofComm *sendDofs;
DofComm *recvDofs;
const FiniteElemSpace *feSpace; const FiniteElemSpace *feSpace;
/// ///
...@@ -149,10 +204,6 @@ namespace AMDiS { ...@@ -149,10 +204,6 @@ namespace AMDiS {
std::set<DegreeOfFreedom> nonRankDofs; std::set<DegreeOfFreedom> nonRankDofs;
DofComm *sendDofs;
DofComm *recvDofs;
bool needGlobalMapping; bool needGlobalMapping;
public: public:
/// ///
...@@ -168,6 +219,9 @@ namespace AMDiS { ...@@ -168,6 +219,9 @@ namespace AMDiS {
public: public:
FeSpaceData() FeSpaceData()
: mpiComm(NULL), : mpiComm(NULL),
sendDofs(NULL),
recvDofs(NULL),
overlap(false),
feSpaces(0), feSpaces(0),
nRankDofs(-1), nRankDofs(-1),
nOverallDofs(-1), nOverallDofs(-1),
...@@ -273,15 +327,18 @@ namespace AMDiS { ...@@ -273,15 +327,18 @@ namespace AMDiS {
void init(MPI::Intracomm *m, void init(MPI::Intracomm *m,
vector<const FiniteElemSpace*> &fe, vector<const FiniteElemSpace*> &fe,
bool needGlobalMapping) bool needGlobalMapping,
bool ol)
{ {
mpiComm = m; mpiComm = m;
feSpaces = fe; feSpaces = fe;
overlap = ol;
for (unsigned int i = 0; i < feSpaces.size(); i++) { for (unsigned int i = 0; i < feSpaces.size(); i++) {
feSpacesUnique.insert(feSpaces[i]); feSpacesUnique.insert(feSpaces[i]);
addFeSpace(feSpaces[i]); addFeSpace(feSpaces[i]);
data[feSpaces[i]].setNeedGlobalMapping(needGlobalMapping); data[feSpaces[i]].setNeedGlobalMapping(needGlobalMapping);
data[feSpaces[i]].setOverlap(overlap);
} }
} }
...@@ -295,7 +352,70 @@ namespace AMDiS { ...@@ -295,7 +352,70 @@ namespace AMDiS {
nLocalDofs = getLocalDofs(feSpaces); nLocalDofs = getLocalDofs(feSpaces);
nOverallDofs = getOverallDofs(feSpaces); nOverallDofs = getOverallDofs(feSpaces);
rStartDofs = getStartDofs(feSpaces); rStartDofs = getStartDofs(feSpaces);
computeMatIndex();
}
void computeMatIndex()
{
dofToMatIndex.clear();
int offset = rStartDofs;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
map<DegreeOfFreedom, MultiIndex>& dofMap = data[feSpaces[i]].getMap();
typedef map<DegreeOfFreedom, MultiIndex>::iterator ItType;
for (ItType it = dofMap.begin(); it != dofMap.end(); ++it) {
if (data[feSpaces[i]].isRankDof(it->first)) {
int globalMatIndex =
it->second.local - data[feSpaces[i]].rStartDofs + offset;
dofToMatIndex.add(i, it->first, globalMatIndex);
}
}
if (!overlap)
continue;
TEST_EXIT_DBG(sendDofs != NULL && recvDofs != NULL)
("No communicator given!\n");
StdMpi<vector<DegreeOfFreedom> > stdMpi(*mpiComm);
for (DofComm::Iterator it(*sendDofs, feSpaces[i]);
!it.end(); it.nextRank()) {
vector<DegreeOfFreedom> sendGlobalDofs;
for (; !it.endDofIter(); it.nextDof())
if (dofMap.count(it.getDofIndex()))
sendGlobalDofs.push_back(dofToMatIndex.get(i, it.getDofIndex()));
stdMpi.send(it.getRank(), sendGlobalDofs);
}
for (DofComm::Iterator it(*recvDofs, feSpaces[i]);
!it.end(); it.nextRank())
stdMpi.recv(it.getRank());
stdMpi.startCommunication();
{
int counter = 0;
for (DofComm::Iterator it(*recvDofs, feSpaces[i]);
!it.end(); it.nextRank()) {
for (; !it.endDofIter(); it.nextDof()) {
if (dofMap.count(it.getDofIndex())) {
DegreeOfFreedom d = stdMpi.getRecvData(it.getRank())[counter++];
dofToMatIndex.add(i, it.getDofIndex(), d);
}
} }
}
}
offset += data[feSpaces[i]].nRankDofs;
}
}
inline int mapLocal(int index, int ithFeSpace) inline int mapLocal(int index, int ithFeSpace)
{ {
...@@ -335,9 +455,25 @@ namespace AMDiS { ...@@ -335,9 +455,25 @@ namespace AMDiS {
return -1; return -1;
} }
void setDofComm(DofComm &pSend, DofComm &pRecv)
{
sendDofs = &pSend;
recvDofs = &pRecv;
for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it)
data[*it].setDofComm(pSend, pRecv);
}
private: private:
MPI::Intracomm* mpiComm; MPI::Intracomm* mpiComm;
DofComm *sendDofs;
DofComm *recvDofs;
bool overlap;
map<const FiniteElemSpace*, T> data; map<const FiniteElemSpace*, T> data;
vector<const FiniteElemSpace*> feSpaces; vector<const FiniteElemSpace*> feSpaces;
...@@ -345,6 +481,10 @@ namespace AMDiS { ...@@ -345,6 +481,10 @@ namespace AMDiS {
std::set<const FiniteElemSpace*> feSpacesUnique; std::set<const FiniteElemSpace*> feSpacesUnique;
int nRankDofs, nLocalDofs, nOverallDofs, rStartDofs; int nRankDofs, nLocalDofs, nOverallDofs, rStartDofs;
/// Mapping from global DOF indices to global matrix indices under
/// consideration of possibly multiple components.
DofToMatIndex dofToMatIndex;
}; };
} }
......
...@@ -224,6 +224,11 @@ namespace AMDiS { ...@@ -224,6 +224,11 @@ namespace AMDiS {
createIndexB(feSpace); createIndexB(feSpace);
} }
primalDofMap.setDofComm(meshDistributor->getSendDofs(),
meshDistributor->getRecvDofs());
lagrangeMap.setDofComm(meshDistributor->getSendDofs(),
meshDistributor->getRecvDofs());
primalDofMap.update(); primalDofMap.update();
dualDofMap.update(); dualDofMap.update();
lagrangeMap.update(); lagrangeMap.update();
...@@ -242,6 +247,10 @@ namespace AMDiS { ...@@ -242,6 +247,10 @@ namespace AMDiS {
MSG("nRankLagrange = %d nOverallLagrange = %d\n", MSG("nRankLagrange = %d nOverallLagrange = %d\n",
lagrangeMap[feSpace].nRankDofs, lagrangeMap[feSpace].nRankDofs,
lagrangeMap[feSpace].nOverallDofs); lagrangeMap[feSpace].nOverallDofs);
TEST_EXIT_DBG(localDofMap[feSpace].size() + primalDofMap[feSpace].size() ==
static_cast<unsigned int>(feSpace->getAdmin()->getUsedDofs()))
("Should not happen!\n");
} }
} }
...@@ -272,10 +281,6 @@ namespace AMDiS { ...@@ -272,10 +281,6 @@ namespace AMDiS {
primalDofMap[feSpace].insertRankDof(*it); primalDofMap[feSpace].insertRankDof(*it);
else else
primalDofMap[feSpace].insert(*it); primalDofMap[feSpace].insert(*it);
primalDofMap[feSpace].setOverlap(true);
primalDofMap[feSpace].setDofComm(meshDistributor->getSendDofs(),
meshDistributor->getRecvDofs());
} }
...@@ -283,6 +288,22 @@ namespace AMDiS { ...@@ -283,6 +288,22 @@ namespace AMDiS {
{ {
FUNCNAME("PetscSolverFeti::createDuals()"); FUNCNAME("PetscSolverFeti::createDuals()");
// === Create global index of the dual nodes on each rank. ===
DofContainer allBoundaryDofs;
meshDistributor->getAllBoundaryDofs(feSpace, allBoundaryDofs);
for (DofContainer::iterator it = allBoundaryDofs.begin();
it != allBoundaryDofs.end(); ++it)
if (!isPrimal(feSpace, **it))
dualDofMap[feSpace].insertRankDof(**it);
}
void PetscSolverFeti::createLagrange(const FiniteElemSpace *feSpace)
{
FUNCNAME("PetscSolverFeti::createLagrange()");
// === Create for each dual node that is owned by the rank, the set === // === Create for each dual node that is owned by the rank, the set ===
// === of ranks that contain this node (denoted by W(x_j)). === // === of ranks that contain this node (denoted by W(x_j)). ===
...@@ -336,21 +357,6 @@ namespace AMDiS { ...@@ -336,21 +357,6 @@ namespace AMDiS {
stdMpi.getRecvData(it.getRank())[i++]; stdMpi.getRecvData(it.getRank())[i++];
} }
// === Create global index of the dual nodes on each rank. ===
DofContainer allBoundaryDofs;
meshDistributor->getAllBoundaryDofs(feSpace, allBoundaryDofs);
for (DofContainer::iterator it = allBoundaryDofs.begin();
it != allBoundaryDofs.end(); ++it)
if (!isPrimal(feSpace, **it))
dualDofMap[feSpace].insertRankDof(**it);
}
void PetscSolverFeti::createLagrange(const FiniteElemSpace *feSpace)
{
FUNCNAME("PetscSolverFeti::createLagrange()");
// === Reserve for each dual node, on the rank that owns this node, the === // === Reserve for each dual node, on the rank that owns this node, the ===
// === appropriate number of Lagrange constraints. === // === appropriate number of Lagrange constraints. ===
...@@ -367,9 +373,6 @@ namespace AMDiS { ...@@ -367,9 +373,6 @@ namespace AMDiS {
} }
} }
lagrangeMap[feSpace].nRankDofs = nRankLagrange; lagrangeMap[feSpace].nRankDofs = nRankLagrange;
lagrangeMap[feSpace].setOverlap(true);
lagrangeMap[feSpace].setDofComm(meshDistributor->getSendDofs(),
meshDistributor->getRecvDofs());
} }
...@@ -383,21 +386,16 @@ namespace AMDiS { ...@@ -383,21 +386,16 @@ namespace AMDiS {
// === the global index of all B nodes, insert all interior nodes first, === // === the global index of all B nodes, insert all interior nodes first, ===
// === without defining a correct index. === // === without defining a correct index. ===
nLocalInterior = 0; nLocalInterior[feSpace] = 0;
for (int i = 0; i < admin->getUsedSize(); i++) { for (int i = 0; i < admin->getUsedSize(); i++) {
if (admin->isDofFree(i) == false && if (admin->isDofFree(i) == false &&
isPrimal(feSpace, i) == false && isPrimal(feSpace, i) == false &&
dualDofMap[feSpace].isSet(i) == false) { dualDofMap[feSpace].isSet(i) == false) {
localDofMap[feSpace].insertRankDof(i, nLocalInterior); localDofMap[feSpace].insertRankDof(i, nLocalInterior[feSpace]);
nLocalInterior++; nLocalInterior[feSpace]++;
} }
} }
TEST_EXIT_DBG(nLocalInterior + primalDofMap[feSpace].size() +
dualDofMap[feSpace].size() ==
static_cast<unsigned int>(admin->getUsedDofs()))
("Should not happen!\n");
// === And finally, add the global indicies of all dual nodes. === // === And finally, add the global indicies of all dual nodes. ===
for (map<DegreeOfFreedom, MultiIndex>::iterator it = dualDofMap[feSpace].getMap().begin(); for (map<DegreeOfFreedom, MultiIndex>::iterator it = dualDofMap[feSpace].getMap().begin();
...@@ -446,7 +444,6 @@ namespace AMDiS { ...@@ -446,7 +444,6 @@ namespace AMDiS {
// Set the constraint for all components of the system. // Set the constraint for all components of the system.
for (int k = 0; k < nComponents; k++) { for (int k = 0; k < nComponents; k++) {
int rowIndex = index * nComponents + k; int rowIndex = index * nComponents + k;
// int colIndex = it->second * nComponents + k;
int colIndex = int colIndex =
(localDofMap[feSpace].rStartDofs + (localDofMap[feSpace].rStartDofs +
localDofMap[feSpace][it->first].local) * nComponents + k; localDofMap[feSpace][it->first].local) * nComponents + k;
...@@ -470,8 +467,6 @@ namespace AMDiS { ...@@ -470,8 +467,6 @@ namespace AMDiS {
{ {
FUNCNAME("PetscSolverFeti::createSchurPrimal()"); FUNCNAME("PetscSolverFeti::createSchurPrimal()");
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
if (schurPrimalSolver == 0) { if (schurPrimalSolver == 0) {
MSG("Create iterative schur primal solver!\n"); MSG("Create iterative schur primal solver!\n");
...@@ -538,8 +533,10 @@ namespace AMDiS { ...@@ -538,8 +533,10 @@ namespace AMDiS {
int maxLocalPrimal = mat_b_primal_cols.size(); int maxLocalPrimal = mat_b_primal_cols.size();
mpi::globalMax(maxLocalPrimal); mpi::globalMax(maxLocalPrimal);
TEST_EXIT(mat_b_primal_cols.size() == primalDofMap.getLocalDofs()) TEST_EXIT(static_cast<int>(mat_b_primal_cols.size()) ==
primalDofMap.getLocalDofs())
("Should not happen!\n"); ("Should not happen!\n");
for (map<int, vector<pair<int, double> > >::iterator it = mat_b_primal_cols.begin(); for (map<int, vector<pair<int, double> > >::iterator it = mat_b_primal_cols.begin();
it != mat_b_primal_cols.end(); ++it) { it != mat_b_primal_cols.end(); ++it) {
Vec tmpVec; Vec tmpVec;
...@@ -862,10 +859,10 @@ namespace AMDiS { ...@@ -862,10 +859,10 @@ namespace AMDiS {
// === Create all sets and indices. === // === Create all sets and indices. ===
primalDofMap.init(mpiComm, feSpaces, true); primalDofMap.init(mpiComm, feSpaces, true, true);