Liebe Gitlab-Nutzerin, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit 83c8b38d authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

On the way to a better world and I am not payed for greate commit comments.

parent ae15b32c
...@@ -100,6 +100,7 @@ namespace AMDiS { ...@@ -100,6 +100,7 @@ namespace AMDiS {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS #ifdef HAVE_PARALLEL_DOMAIN_AMDIS
class ElementObjectDatabase; class ElementObjectDatabase;
class FeSpaceDofMap; class FeSpaceDofMap;
class MatrixNnzStructure;
class MeshLevelData; class MeshLevelData;
#endif #endif
......
...@@ -32,7 +32,7 @@ namespace AMDiS { ...@@ -32,7 +32,7 @@ namespace AMDiS {
} }
void MatrixNnzStructure::create(Matrix<DOFMatrix*> *mat, void MatrixNnzStructure::create(Matrix<DOFMatrix*> &mat,
MPI::Intracomm mpiComm, MPI::Intracomm mpiComm,
ParallelDofMapping &rowDofMap, ParallelDofMapping &rowDofMap,
ParallelDofMapping &colDofMap, ParallelDofMapping &colDofMap,
...@@ -69,15 +69,15 @@ namespace AMDiS { ...@@ -69,15 +69,15 @@ namespace AMDiS {
// receiving rank. // receiving rank.
map<pair<DegreeOfFreedom, int>, int> sendDofToRank; map<pair<DegreeOfFreedom, int>, int> sendDofToRank;
int nComponents = mat->getNumRows(); int nComponents = mat.getNumRows();
// First, create for all ranks, to which we send data to, MatrixNnzEntry // First, create for all ranks, to which we send data to, MatrixNnzEntry
// object with 0 entries. // object with 0 entries.
for (int i = 0; i < nComponents; i++) { for (int i = 0; i < nComponents; i++) {
const FiniteElemSpace* feSpace = NULL; const FiniteElemSpace* feSpace = NULL;
for (int j = 0; j < nComponents; j++) for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j]) if (mat[i][j])
feSpace = (*mat)[i][j]->getRowFeSpace(); feSpace = mat[i][j]->getRowFeSpace();
TEST_EXIT_DBG(feSpace)("No FE space found!\n"); TEST_EXIT_DBG(feSpace)("No FE space found!\n");
...@@ -95,8 +95,8 @@ namespace AMDiS { ...@@ -95,8 +95,8 @@ namespace AMDiS {
for (int i = 0; i < nComponents; i++) { for (int i = 0; i < nComponents; i++) {
const FiniteElemSpace* feSpace = NULL; const FiniteElemSpace* feSpace = NULL;
for (int j = 0; j < nComponents; j++) for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j]) if (mat[i][j])
feSpace = (*mat)[i][j]->getRowFeSpace(); feSpace = mat[i][j]->getRowFeSpace();
for (DofComm::Iterator it(rowDofMap.getDofComm().getSendDofs(), feSpace); for (DofComm::Iterator it(rowDofMap.getDofComm().getSendDofs(), feSpace);
!it.end(); it.nextRank()) !it.end(); it.nextRank())
...@@ -108,7 +108,7 @@ namespace AMDiS { ...@@ -108,7 +108,7 @@ namespace AMDiS {
for (int rowComp = 0; rowComp < nComponents; rowComp++) { for (int rowComp = 0; rowComp < nComponents; rowComp++) {
for (int colComp = 0; colComp < nComponents; colComp++) { for (int colComp = 0; colComp < nComponents; colComp++) {
DOFMatrix *dofMat = (*mat)[rowComp][colComp]; DOFMatrix *dofMat = mat[rowComp][colComp];
if (!dofMat) if (!dofMat)
continue; continue;
......
...@@ -40,7 +40,7 @@ namespace AMDiS { ...@@ -40,7 +40,7 @@ namespace AMDiS {
void clear(); void clear();
void create(Matrix<DOFMatrix*> *mat, void create(Matrix<DOFMatrix*> &mat,
MPI::Intracomm mpiComm, MPI::Intracomm mpiComm,
ParallelDofMapping &rowDofMap, ParallelDofMapping &rowDofMap,
ParallelDofMapping &colDofMap, ParallelDofMapping &colDofMap,
...@@ -48,7 +48,7 @@ namespace AMDiS { ...@@ -48,7 +48,7 @@ namespace AMDiS {
const ElementObjectDatabase& elObjDb, const ElementObjectDatabase& elObjDb,
bool localMatrix = false); bool localMatrix = false);
void create(Matrix<DOFMatrix*> *mat, void create(Matrix<DOFMatrix*> &mat,
MPI::Intracomm mpiComm, MPI::Intracomm mpiComm,
ParallelDofMapping &dofMap, ParallelDofMapping &dofMap,
PeriodicMap *perMap, PeriodicMap *perMap,
......
...@@ -12,24 +12,39 @@ ...@@ -12,24 +12,39 @@
#include "AMDiS.h" #include "AMDiS.h"
#include "parallel/ParallelCoarseSpaceMatVec.h" #include "parallel/ParallelCoarseSpaceMatVec.h"
#include "parallel/MatrixNnzStructure.h"
namespace AMDiS { namespace AMDiS {
using namespace std; using namespace std;
void ParallelCoarseSpaceMatVec::create(ParallelDofMapping *iMap, ParallelCoarseSpaceMatVec::ParallelCoarseSpaceMatVec()
map<int, ParallelDofMapping*> cMap, : lastMeshNnz(0),
int subdomainLevel, alwaysCreateNnzStructure(false)
MPI::Intracomm mpiCommLocal,
MPI::Intracomm mpiCommGlobal)
{ {
FUNCNAME("ParallelCoarseSpaceMatVec::update()"); Parameters::get("parallel->always create nnz structure",
alwaysCreateNnzStructure);
}
void ParallelCoarseSpaceMatVec::init(ParallelDofMapping *iMap,
map<int, ParallelDofMapping*> cMap,
int sdLevel,
MPI::Intracomm mcLocal,
MPI::Intracomm mcGlobal,
MeshDistributor *meshDist)
{
FUNCNAME("ParallelCoarseSpaceMatVec:init()");
interiorMap = iMap; interiorMap = iMap;
coarseSpaceMap = cMap; coarseSpaceMap = cMap;
subdomainLevel = sdLevel;
mpiCommLocal = mcLocal;
mpiCommGlobal = mcGlobal;
meshDistributor = meshDist;
vector<ParallelDofMapping*> uniqueCoarseMap; uniqueCoarseMap.clear();
if (coarseSpaceMap.size()) { if (coarseSpaceMap.size()) {
std::set<ParallelDofMapping*> tmp; std::set<ParallelDofMapping*> tmp;
for (map<int, ParallelDofMapping*>::iterator it = coarseSpaceMap.begin(); for (map<int, ParallelDofMapping*>::iterator it = coarseSpaceMap.begin();
...@@ -59,30 +74,80 @@ namespace AMDiS { ...@@ -59,30 +74,80 @@ namespace AMDiS {
TEST_EXIT_DBG(found)("Should not happen!\n"); TEST_EXIT_DBG(found)("Should not happen!\n");
} }
}
void ParallelCoarseSpaceMatVec::create(Matrix<DOFMatrix*>& seqMat)
{
FUNCNAME("ParallelCoarseSpaceMatVec::create()");
// === If required, recompute non zero structure of the matrix. ===
bool localMatrix = (coarseSpaceMap.size() && subdomainLevel == 0);
if (checkMeshChange(seqMat, localMatrix)) {
int nMat = uniqueCoarseMap.size() + 1;
nnz.resize(nMat);
for (int i = 0; i < nMat; i++) {
nnz.resize(nMat);
for (int j = 0; j < nMat; j++)
nnz[i][j].clear();
}
nnz[0][0].create(seqMat, mpiCommGlobal, *interiorMap,
(coarseSpaceMap.size() == 0 ? &(meshDistributor->getPeriodicMap()) : NULL),
meshDistributor->getElementObjectDb(),
localMatrix);
for (int i = 0; i < nMat; i++) {
for (int j = 0; j < nMat; j++) {
if (i == 0 && j == 0)
continue;
if (i == j) {
nnz[i][j].create(seqMat, mpiCommGlobal, coarseSpaceMap, NULL,
meshDistributor->getElementObjectDb());
} else {
ParallelDofMapping *rowMap = NULL;
ParallelDofMapping *colMap = NULL;
nnz[i][j].create(seqMat, mpiCommGlobal, *rowMap, *colMap, NULL,
meshDistributor->getElementObjectDb());
/*
nnzCoarse.create(mat, mpiCommGlobal, *coarseSpaceMap, NULL, meshDistributor->getElementObjectDb());
nnzCoarseInt.create(mat, mpiCommGlobal, *coarseSpaceMap, *interiorMap, NULL, meshDistributor->getElementObjectDb());
nnzIntCoarse.create(mat, mpiCommGlobal, *interiorMap, *coarseSpaceMap, NULL, meshDistributor->getElementObjectDb());
*/
}
}
}
}
// === Create PETSc matrix with the computed nnz data structure. === // === Create PETSc matrix with the computed nnz data structure. ===
int nRankRows = interiorMap->getRankDofs(); int nRankRows = interiorMap->getRankDofs();
int nOverallRows = interiorMap->getOverallDofs(); int nOverallRows = interiorMap->getOverallDofs();
bool localMatrix = (coarseSpaceMap.size() && subdomainLevel == 0);
if (localMatrix) { if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRankRows, nOverallRows, MatCreateSeqAIJ(mpiCommLocal, nRankRows, nRankRows,
0, PETSC_NULL, 100, PETSC_NULL,
&mat[0][0]); &mat[0][0]);
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE); MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
} else { } else {
MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows, MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows,
nOverallRows, nOverallRows, nOverallRows, nOverallRows,
0, PETSC_NULL, 100, PETSC_NULL,
0, PETSC_NULL, 100, PETSC_NULL,
&mat[0][0]); &mat[0][0]);
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE); MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
} }
if (coarseSpaceMap.size()) { if (coarseSpaceMap.size()) {
int nCoarseMap = uniqueCoarseMap.size();
for (int i = 0; i < nCoarseMap; i++) { for (int i = 0; i < nCoarseMap; i++) {
ParallelDofMapping* cMap = uniqueCoarseMap[i]; ParallelDofMapping* cMap = uniqueCoarseMap[i];
...@@ -92,11 +157,11 @@ namespace AMDiS { ...@@ -92,11 +157,11 @@ namespace AMDiS {
MatCreateAIJ(mpiCommGlobal, MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankCoarse, nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse, nRowsOverallCoarse, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL, 100, PETSC_NULL, 100, PETSC_NULL,
&mat[i][i]); &mat[i + 1][i + 1]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n"); MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i][i], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE); MatSetOption(mat[i + 1][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
for (int j = 0; j < nCoarseMap + 1; j++) { for (int j = 0; j < nCoarseMap + 1; j++) {
int nRowsRankMat = (j == 0 ? nRankRows : uniqueCoarseMap[j - 1]->getRankDofs()); int nRowsRankMat = (j == 0 ? nRankRows : uniqueCoarseMap[j - 1]->getRankDofs());
...@@ -113,7 +178,7 @@ namespace AMDiS { ...@@ -113,7 +178,7 @@ namespace AMDiS {
MatCreateAIJ(mpiCommGlobal, MatCreateAIJ(mpiCommGlobal,
nRowsRankMat, nRowsRankCoarse, nRowsRankMat, nRowsRankCoarse,
nRowsOverallMat, nRowsOverallCoarse, nRowsOverallMat, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL, 100, PETSC_NULL, 100, PETSC_NULL,
&mat[j][i + 1]); &mat[j][i + 1]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n"); MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[j][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE); MatSetOption(mat[j][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
...@@ -147,4 +212,30 @@ namespace AMDiS { ...@@ -147,4 +212,30 @@ namespace AMDiS {
} }
} }
bool ParallelCoarseSpaceMatVec::checkMeshChange(Matrix<DOFMatrix*> &seqMat,
bool localMatrix)
{
FUNCNAME("ParallelCoarseSpaceMatVec::checkMeshChange()");
int recvAllValues = 0;
int sendValue =
static_cast<int>(meshDistributor->getLastMeshChangeIndex() != lastMeshNnz);
mpiCommGlobal.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM);
if (recvAllValues != 0 || alwaysCreateNnzStructure) {
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(seqMat);
interiorMap->setComputeMatIndex(!localMatrix);
interiorMap->update(feSpaces);
// updateSubdomainData();
lastMeshNnz = meshDistributor->getLastMeshChangeIndex();
return true;
}
return false;
}
} }
...@@ -44,21 +44,26 @@ namespace AMDiS { ...@@ -44,21 +44,26 @@ namespace AMDiS {
*/ */
class ParallelCoarseSpaceMatVec { class ParallelCoarseSpaceMatVec {
public: public:
ParallelCoarseSpaceMatVec() ParallelCoarseSpaceMatVec();
{}
/// Creates matrices and vectors with respect to the coarse space. /// Creates matrices and vectors with respect to the coarse space.
void create(ParallelDofMapping *interiorMap, void init(ParallelDofMapping *interiorMap,
map<int, ParallelDofMapping*> coarseSpaceMap, map<int, ParallelDofMapping*> coarseSpaceMap,
int subdomainLevel, int subdomainLevel,
MPI::Intracomm mpiCommLocal, MPI::Intracomm mpiCommLocal,
MPI::Intracomm mpiCommGlobal); MPI::Intracomm mpiCommGlobal,
MeshDistributor *meshDistributor);
void create(Matrix<DOFMatrix*>& seqMat);
/// Run PETSc's assembly routines. /// Run PETSc's assembly routines.
void assembly(); void assembly();
void destroy(); void destroy();
bool checkMeshChange(Matrix<DOFMatrix*> &mat,
bool localMatrix = false);
inline Mat& getInteriorMat() inline Mat& getInteriorMat()
{ {
TEST_EXIT_DBG(mat.size() > 0)("No matrix data!\n"); TEST_EXIT_DBG(mat.size() > 0)("No matrix data!\n");
...@@ -105,6 +110,8 @@ namespace AMDiS { ...@@ -105,6 +110,8 @@ namespace AMDiS {
private: private:
vector<vector<Mat> > mat; vector<vector<Mat> > mat;
vector<vector<MatrixNnzStructure> > nnz;
ParallelDofMapping *interiorMap; ParallelDofMapping *interiorMap;
/// Parallel DOF mapping of the (optional) coarse space. Allows to define /// Parallel DOF mapping of the (optional) coarse space. Allows to define
...@@ -112,6 +119,30 @@ namespace AMDiS { ...@@ -112,6 +119,30 @@ namespace AMDiS {
map<int, ParallelDofMapping*> coarseSpaceMap; map<int, ParallelDofMapping*> coarseSpaceMap;
vector<int> componentIthCoarseMap; vector<int> componentIthCoarseMap;
vector<ParallelDofMapping*> uniqueCoarseMap;
MeshDistributor *meshDistributor;
int subdomainLevel;
MPI::Intracomm mpiCommLocal;
MPI::Intracomm mpiCommGlobal;
/// Stores the mesh change index of the mesh the nnz structure was created for.
/// Therefore, if the mesh change index is higher than this value, we have to create
/// a new nnz structure for PETSc matrices, because the mesh has been changed and
/// therefore also the assembled matrix structure.
int lastMeshNnz;
/// If this variable is set to true, the non-zero matrix structure is
/// created each time from scratch by calling \ref createPetscNnzStrcuture.
/// This can be necessary if the number of non-zeros in the matrix varies
/// though the mesh does not change. This may happen if there are many
/// operators using DOFVectors from old timestep containing many zeros due to
/// some phase fields.
bool alwaysCreateNnzStructure;
}; };
} }
......
...@@ -62,6 +62,8 @@ namespace AMDiS { ...@@ -62,6 +62,8 @@ namespace AMDiS {
void PetscSolver::updateSubdomainData() void PetscSolver::updateSubdomainData()
{ {
FUNCNAME("PetscSolver::updateSubdomainData()");
if (mpiCommLocal.Get_size() == 1) { if (mpiCommLocal.Get_size() == 1) {
rStartInterior = 0; rStartInterior = 0;
nGlobalOverallInterior = interiorMap->getOverallDofs(); nGlobalOverallInterior = interiorMap->getOverallDofs();
......
...@@ -26,8 +26,9 @@ namespace AMDiS { ...@@ -26,8 +26,9 @@ namespace AMDiS {
double wtime = MPI::Wtime(); double wtime = MPI::Wtime();
petscData.create(interiorMap, coarseSpaceMap, petscData.init(interiorMap, coarseSpaceMap,
subdomainLevel, mpiCommLocal, mpiCommGlobal); subdomainLevel, mpiCommLocal, mpiCommGlobal,
meshDistributor);
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
nComponents = seqMat->getNumRows(); nComponents = seqMat->getNumRows();
......
...@@ -21,8 +21,10 @@ namespace AMDiS { ...@@ -21,8 +21,10 @@ namespace AMDiS {
{ {
FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()"); FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()");
petscData.create(interiorMap, coarseSpaceMap, petscData.init(interiorMap, coarseSpaceMap,
subdomainLevel, mpiCommLocal, mpiCommGlobal); subdomainLevel, mpiCommLocal, mpiCommGlobal,
meshDistributor);
petscData.create();
if (coarseSpaceMap.size()) { if (coarseSpaceMap.size()) {
updateSubdomainData(); updateSubdomainData();
...@@ -36,13 +38,6 @@ namespace AMDiS { ...@@ -36,13 +38,6 @@ namespace AMDiS {
double wtime = MPI::Wtime(); double wtime = MPI::Wtime();
// === If required, recompute non zero structure of the matrix. ===
if (checkMeshChange(seqMat))
nnzInterior.create(seqMat, mpiCommGlobal, *interiorMap,
&(meshDistributor->getPeriodicMap()),
meshDistributor->getElementObjectDb());
// === Create PETSc vector (solution and a temporary vector). === // === Create PETSc vector (solution and a temporary vector). ===
int nRankRows = interiorMap->getRankDofs(); int nRankRows = interiorMap->getRankDofs();
...@@ -140,24 +135,6 @@ namespace AMDiS { ...@@ -140,24 +135,6 @@ namespace AMDiS {
int nRowsRankInterior = interiorMap->getRankDofs(); int nRowsRankInterior = interiorMap->getRankDofs();
int nRowsOverallInterior = interiorMap->getOverallDofs(); int nRowsOverallInterior = interiorMap->getOverallDofs();
// === If required, recompute non zero structure of the matrix. ===
bool localMatrix = (subdomainLevel == 0);
if (checkMeshChange(seqMat, localMatrix)) {
nnzInterior.create(seqMat, mpiCommGlobal, *interiorMap, NULL,
meshDistributor->getElementObjectDb(),
localMatrix);
if (coarseSpaceMap.size()) {
MSG("NO COARSE SPACE NNZ!\n");
/*
nnzCoarse.create(mat, mpiCommGlobal, *coarseSpaceMap, NULL, meshDistributor->getElementObjectDb());
nnzCoarseInt.create(mat, mpiCommGlobal, *coarseSpaceMap, *interiorMap, NULL, meshDistributor->getElementObjectDb());
nnzIntCoarse.create(mat, mpiCommGlobal, *interiorMap, *coarseSpaceMap, NULL, meshDistributor->getElementObjectDb());
*/
}
}
// === Prepare traverse of sequentially created matrices. === // === Prepare traverse of sequentially created matrices. ===
...@@ -416,6 +393,8 @@ namespace AMDiS { ...@@ -416,6 +393,8 @@ namespace AMDiS {
if (removeRhsNullspace) { if (removeRhsNullspace) {
TEST_EXIT_DBG(coarseSpaceMap.empty())("Not supported!\n"); TEST_EXIT_DBG(coarseSpaceMap.empty())("Not supported!\n");
MSG("Remove nullspace from rhs vector.\n");
MatNullSpaceRemove(matNullspace, rhsInterior, PETSC_NULL); MatNullSpaceRemove(matNullspace, rhsInterior, PETSC_NULL);
} }
...@@ -534,40 +513,6 @@ namespace AMDiS { ...@@ -534,40 +513,6 @@ namespace AMDiS {
} }
bool PetscSolverGlobalMatrix::checkMeshChange(Matrix<DOFMatrix*> *mat,
bool localMatrix)
{
FUNCNAME("PetscSolverGlobalMatrix::checkMeshChange()");
int recvAllValues = 0;
int sendValue =
static_cast<int>(meshDistributor->getLastMeshChangeIndex() != lastMeshNnz);
mpiCommGlobal.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM);
if (!nnzInterior.dnnz || recvAllValues != 0 || alwaysCreateNnzStructure) {
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
interiorMap->setComputeMatIndex(!localMatrix);
interiorMap->update(feSpaces);
nnzInterior.clear();
if (coarseSpaceMap.size()) {
nnzCoarse.clear();
nnzCoarseInt.clear();
nnzIntCoarse.clear();
}
updateSubdomainData();
lastMeshNnz = meshDistributor->getLastMeshChangeIndex();
return true;
}
return false;
}
void PetscSolverGlobalMatrix::createFieldSplit(PC pc) void PetscSolverGlobalMatrix::createFieldSplit(PC pc)
{ {
FUNCNAME("PetscSolverGlobalMatrix::createFieldSplit()"); FUNCNAME("PetscSolverGlobalMatrix::createFieldSplit()");
......
...@@ -39,14 +39,10 @@ namespace AMDiS { ...@@ -39,14 +39,10 @@ namespace AMDiS {
PetscSolverGlobalMatrix() PetscSolverGlobalMatrix()
: PetscSolver(), : PetscSolver(),
petscSolVec(PETSC_NULL), petscSolVec(PETSC_NULL),
lastMeshNnz(0),