Liebe Gitlab-Nutzer, lieber Gitlab-Nutzer, es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Ein Anmelden über dieses erzeugt ein neues Konto. Das alte Konto ist über den Reiter "Standard" erreichbar. Die Administratoren

Dear Gitlab user, it is now possible to log in to our service using the ZIH login/LDAP. Logging in via this will create a new account. The old account can be accessed via the "Standard" tab. The administrators

Commit 586b8402 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

On the way to get is compiling.

parent 66a86905
......@@ -248,6 +248,7 @@ if(ENABLE_PARALLEL_DOMAIN)
list(APPEND AMDIS_INCLUDE_DIRS ${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include)
list(APPEND PARALLEL_DOMAIN_AMDIS_SRC
${SOURCE_DIR}/parallel/BddcMlSolver.cc
${SOURCE_DIR}/parallel/ParallelCoarseSpaceMatVec.cc
${SOURCE_DIR}/parallel/PetscMultigridPrecon.cc
${SOURCE_DIR}/parallel/PetscSolver.cc
${SOURCE_DIR}/parallel/PetscProblemStat.cc
......
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "AMDiS.h"
#include "parallel/ParallelCoarseSpaceMatVec.h"
namespace AMDiS {
using namespace std;
void ParallelCoarseSpaceMatVec::create(ParallelDofMapping *iMap,
map<int, ParallelDofMapping*> cMap,
int subdomainLevel,
MPI::Intracomm mpiCommLocal,
MPI::Intracomm mpiCommGlobal)
{
FUNCNAME("ParallelCoarseSpaceMatVec::update()");
interiorMap = iMap;
coarseSpaceMap = cMap;
vector<ParallelDofMapping*> uniqueCoarseMap;
if (coarseSpaceMap.size()) {
std::set<ParallelDofMapping*> tmp;
for (map<int, ParallelDofMapping*>::iterator it = coarseSpaceMap.begin();
it != coarseSpaceMap.end(); ++it) {
if (tmp.count(it->second) == 0) {
tmp.insert(it->second);
uniqueCoarseMap.push_back(it->second);
}
}
}
int nCoarseMap = uniqueCoarseMap.size();
mat.resize(nCoarseMap + 1);
for (int i = 0; i < nCoarseMap + 1; i++)
mat[i].resize(nCoarseMap + 1);
componentIthCoarseMap.resize(coarseSpaceMap.size());
for (unsigned int i = 0; i < componentIthCoarseMap.size(); i++) {
bool found = false;
for (int j = 0; j < nCoarseMap; j++) {
if (coarseSpaceMap[i] == uniqueCoarseMap[j]) {
componentIthCoarseMap[i] = j;
found = true;
break;
}
}
TEST_EXIT_DBG(found)("Should not happen!\n");
}
// === Create PETSc matrix with the computed nnz data structure. ===
int nRankRows = interiorMap->getRankDofs();
int nOverallRows = interiorMap->getOverallDofs();
bool localMatrix = (coarseSpaceMap.size() && subdomainLevel == 0);
if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRankRows, nOverallRows,
0, PETSC_NULL,
&mat[0][0]);
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
} else {
MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows,
nOverallRows, nOverallRows,
0, PETSC_NULL,
0, PETSC_NULL,
&mat[0][0]);
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
}
if (coarseSpaceMap.size()) {
for (int i = 0; i < nCoarseMap; i++) {
ParallelDofMapping* cMap = uniqueCoarseMap[i];
int nRowsRankCoarse = cMap->getRankDofs();
int nRowsOverallCoarse = cMap->getOverallDofs();
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL,
&mat[i][i]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i][i], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
for (int j = 0; j < nCoarseMap + 1; j++) {
int nRowsRankMat = (j == 0 ? nRankRows : uniqueCoarseMap[j - 1]->getRankDofs());
int nRowsOverallMat = (j == 0 ? nOverallRows : uniqueCoarseMap[j - 1]->getOverallDofs());
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankMat,
nRowsOverallCoarse, nRowsOverallMat,
100, PETSC_NULL, 100, PETSC_NULL,
&mat[i + 1][j]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i + 1][j], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
MatCreateAIJ(mpiCommGlobal,
nRowsRankMat, nRowsRankCoarse,
nRowsOverallMat, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL,
&mat[j][i + 1]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[j][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
}
}
}
}
void ParallelCoarseSpaceMatVec::destroy()
{
FUNCNAME("ParallelCoarseSpaceMatVec::destroy()");
int nMatrix = mat.size();
for (int i = 0; i < nMatrix; i++)
for (int j = 0; j < nMatrix; j++)
MatDestroy(&mat[i][j]);
}
void ParallelCoarseSpaceMatVec::assembly()
{
FUNCNAME("ParallelCoarseSpaceMatVec::assembly()");
int nMatrix = mat.size();
for (int i = 0; i < nMatrix; i++) {
for (int j = 0; j < nMatrix; j++) {
MatAssemblyBegin(mat[i][j], MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(mat[i][j], MAT_FINAL_ASSEMBLY);
}
}
}
}
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// == http://www.amdis-fem.org ==
// == ==
// ============================================================================
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
/** \file ParallelCoarseSpaceMatVec.h */
#ifndef AMDIS_PARALLEL_COARSE_SPACE_MAT_VEC_H
#define AMDIS_PARALLEL_COARSE_SPACE_MAT_VEC_H
#include <vector>
#include <map>
#include <petsc.h>
#include "AMDiS_fwd.h"
namespace AMDiS {
/**
* This class implements a block structured PETSc matrix/vec which seperates
* the discretization of the interior of subdomains and the discretization
* of the coarse space. Thus, we have one matrix block for the interior and
* one matrix block for the coarse space plus the coupling blocks. Some notes:
* - For a single level domain decomposition method (e.g. the standad
* FETI-DP method), the interior matrix is local to the current rank and the
* coarse space matrix is a globally distributed matrix.
* - There are different coarse spaces for different components possible. In
* this case, there are as many blocks as there are different coarse spaces
* plus one block for the interior matrix.
*/
class ParallelCoarseSpaceMatVec {
public:
ParallelCoarseSpaceMatVec()
{}
/// Creates matrices and vectors with respect to the coarse space.
void create(ParallelDofMapping *interiorMap,
map<int, ParallelDofMapping*> coarseSpaceMap,
int subdomainLevel,
MPI::Intracomm mpiCommLocal,
MPI::Intracomm mpiCommGlobal);
/// Run PETSc's assembly routines.
void assembly();
void destroy();
inline Mat& getInteriorMat()
{
TEST_EXIT_DBG(mat.size() > 0)("No matrix data!\n");
return mat[0][0];
}
inline Mat& getCoarseMat(int coarseSpace0 = 0, int coarseSpace1 = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace0 + 1)("No matrix data!\n");
TEST_EXIT_DBG(mat.size() > coarseSpace1 + 1)("No matrix data!\n");
return mat[coarseSpace0 + 1][coarseSpace1 + 1];
}
inline Mat& getIntCoarseMat(int coarseSpace = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace + 1)("No matrix data!\n");
return mat[0][coarseSpace + 1];
}
inline Mat& getCoarseIntMat(int coarseSpace = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace + 1)("No matrix data!\n");
return mat[coarseSpace + 1][0];
}
inline Mat& getCoarseMatComp(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[matIndex][matIndex];
}
inline Mat& getIntCoarseMatComp(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[0][matIndex];
}
inline Mat& getCoarseIntMatComp(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[matIndex][0];
}
private:
vector<vector<Mat> > mat;
ParallelDofMapping *interiorMap;
/// Parallel DOF mapping of the (optional) coarse space. Allows to define
/// different coarse spaces for different components.
map<int, ParallelDofMapping*> coarseSpaceMap;
vector<int> componentIthCoarseMap;
};
}
#endif
......@@ -32,7 +32,7 @@
#include "Initfile.h"
#include "DOFMatrix.h"
#include "parallel/MeshDistributor.h"
#include "parallel/ParallelCoarseSpaceMatVec.h"
#include <petsc.h>
#include <petscsys.h>
#include <petscao.h>
......@@ -178,41 +178,37 @@ namespace AMDiS {
inline Mat& getMatIntInt()
{
return mat[0][0];
// return matIntInt;
return petscData.getInteriorMat();
}
inline Mat& getMatCoarseCoarse()
{
FUNCNAME("PetscSolver::getMatCoarseCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return mat[1][1];
// return matCoarseCoarse;
return petscData.getCoarseMat(0);
}
inline Mat& getMatIntCoarse()
{
FUNCNAME("PetscSolver::getMatIntCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return mat[0][1];
// return matIntCoarse;
return petscData.getIntCoarseMat();
}
inline Mat& getMatCoarseInt()
{
FUNCNAME("PetscSolver::getMatCoarseInt()");
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return mat[1][0];
// return matCoarseInt;
return petscData.getCoarseIntMat();
}
protected:
......@@ -263,10 +259,9 @@ namespace AMDiS {
MPI::Intracomm mpiCommLocal;
/// Petsc's matrix structure.
// Mat matIntInt, matCoarseCoarse, matIntCoarse, matCoarseInt;
vector<vector<Mat> > mat;
/// Petsc's matrices and vectors (possiblly block structured if there is
/// a coarse space defined).
ParallelCoarseSpaceMatVec petscData;
/// PETSc's vector structures for the rhs vector, the solution vector and a
/// temporary vector for calculating the final residuum.
......
......@@ -19,7 +19,7 @@ namespace AMDiS {
void PetscSolverGlobalBlockMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *seqMat)
{
FUNCNAME("PetscSolverGlobalBlockMatrix::fillPetscMatrix()");
#if 0
TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
......@@ -102,6 +102,7 @@ namespace AMDiS {
KSPSetFromOptions(kspInterior);
MSG("Fill petsc matrix needed %.5f seconds\n", MPI::Wtime() - wtime);
#endif
}
......@@ -183,7 +184,8 @@ namespace AMDiS {
if (nestMat[i] != PETSC_NULL)
MatDestroy(&(nestMat[i]));
MatDestroy(&mat[0][0]);
petscData.destroy();
KSPDestroy(&kspInterior);
}
......
......@@ -21,16 +21,15 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()");
petscData.create(interiorMap, coarseSpaceMap,
subdomainLevel, mpiCommLocal, mpiCommGlobal);
if (coarseSpaceMap.size()) {
updateSubdomainData();
fillPetscMatrixWithCoarseSpace(seqMat);
return;
}
mat.resize(1);
mat[0].resize(1);
Mat &matIntInt = mat[0][0];
TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
......@@ -52,23 +51,13 @@ namespace AMDiS {
VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &petscSolVec);
// === Create PETSc matrix with the computed nnz data structure. ===
MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows,
nOverallRows, nOverallRows,
0, nnzInterior.dnnz,
0, nnzInterior.onnz,
&matIntInt);
MatSetOption(matIntInt, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
#if (DEBUG != 0)
MSG("Fill petsc matrix 1 needed %.5f seconds\n", MPI::Wtime() - wtime);
#endif
#if (DEBUG != 0)
int a, b;
MatGetOwnershipRange(matIntInt, &a, &b);
MatGetOwnershipRange(petscData.getInteriorMat(), &a, &b);
TEST_EXIT(a == interiorMap->getStartDofs())("Wrong matrix ownership range!\n");
TEST_EXIT(b == interiorMap->getStartDofs() + nRankRows)
("Wrong matrix ownership range!\n");
......@@ -87,13 +76,11 @@ namespace AMDiS {
MSG("Fill petsc matrix 2 needed %.5f seconds\n", MPI::Wtime() - wtime);
#endif
MatAssemblyBegin(matIntInt, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matIntInt, MAT_FINAL_ASSEMBLY);
petscData.assembly();
if (printMatInfo) {
MatInfo matInfo;
MatGetInfo(matIntInt, MAT_GLOBAL_SUM, &matInfo);
MatGetInfo(petscData.getInteriorMat(), MAT_GLOBAL_SUM, &matInfo);
MSG("Matrix info:\n");
MSG(" memory usage: %e MB\n", matInfo.memory / (1024.0 * 1024.0));
MSG(" mallocs: %d\n", static_cast<int>(matInfo.mallocs));
......@@ -111,7 +98,10 @@ namespace AMDiS {
KSPCreate(mpiCommGlobal, &kspInterior);
KSPGetPC(kspInterior, &pcInterior);
KSPSetOperators(kspInterior, matIntInt, matIntInt, SAME_NONZERO_PATTERN);
KSPSetOperators(kspInterior,
petscData.getInteriorMat(),
petscData.getInteriorMat(),
SAME_NONZERO_PATTERN);
KSPSetTolerances(kspInterior, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);
KSPSetType(kspInterior, KSPBCGS);
KSPSetOptionsPrefix(kspInterior, kspPrefix.c_str());
......@@ -142,45 +132,11 @@ namespace AMDiS {
FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace()");
TEST_EXIT_DBG(interiorMap)("Should not happen!\n");
TEST_EXIT_DBG(coarseSpaceMap.size() == seqMat->getSize())
("Wrong sizes %d %d\n", coarseSpaceMap.size(), seqMat->getSize());
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(seqMat);
vector<ParallelDofMapping*> uniqueCoarseMap;
if (coarseSpaceMap.size()) {
TEST_EXIT_DBG(coarseSpaceMap.size() == seqMat->getSize())
("Wrong sizes %d %d\n", coarseSpaceMap.size(), seqMat->getSize());
std::set<ParallelDofMapping*> tmp;
for (map<int, ParallelDofMapping*>::iterator it = coarseSpaceMap.begin();
it != coarseSpaceMap.end(); ++it) {
if (tmp.count(it->second) == 0) {
tmp.insert(it->second);
uniqueCoarseMap.push_back(it->second);
}
}
}
int nCoarseMap = uniqueCoarseMap.size();
mat.resize(nCoarseMap + 1);
for (int i = 0; i < nCoarseMap + 1; i++)
mat[i].resize(nCoarseMap + 1);
vector<int> componentIthCoarseMap(coarseSpaceMap.size());
for (unsigned int i = 0; i < componentIthCoarseMap.size(); i++) {
bool found = false;
for (int j = 0; j < nCoarseMap; j++) {
if (coarseSpaceMap[i] == uniqueCoarseMap[j]) {
componentIthCoarseMap[i] = j;
found = true;
break;
}
}
TEST_EXIT_DBG(found)("Should not happen!\n");
}
int nRowsRankInterior = interiorMap->getRankDofs();
int nRowsOverallInterior = interiorMap->getOverallDofs();
......@@ -202,71 +158,6 @@ namespace AMDiS {
}
}
mat.resize(nCoarseMap + 1);
if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior,
0, nnzInterior.dnnz,
&mat[0][0]);
} else {
MatCreateAIJ(mpiCommLocal,
nRowsRankInterior, nRowsRankInterior,
nRowsOverallInterior, nRowsOverallInterior,
0, nnzInterior.dnnz, 0, nnzInterior.onnz,
&mat[0][0]);
}
if (coarseSpaceMap.size()) {
for (int i = 0; i < nCoarseMap; i++) {
ParallelDofMapping* cMap = uniqueCoarseMap[i];
int nRowsRankCoarse = cMap->getRankDofs();
int nRowsOverallCoarse = cMap->getOverallDofs();
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL,
&mat[i + 1][i + 1]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i + 1][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
for (int j = 0; j < nCoarseMap + 1; j++) {
int nRowsRankMat = (j == 0 ? nRowsRankInterior : uniqueCoarseMap[j - 1]->getRankDofs());
int nRowsOverallMat = (j == 0 ? nGlobalOverallInterior : uniqueCoarseMap[j - 1]->getOverallDofs());
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankMat,
nRowsOverallCoarse, nRowsOverallMat,
100, PETSC_NULL, 100, PETSC_NULL,
&mat[i + 1][j]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i + 1][j], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
MatCreateAIJ(mpiCommGlobal,
nRowsRankMat, nRowsRankCoarse,
nRowsOverallMat, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL,
&mat[j][i + 1]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[j][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
}
// MatCreateAIJ(mpiCommGlobal,
// nRowsRankCoarse, nRowsRankInterior,
// nRowsOverallCoarse, nGlobalOverallInterior,
// 0, nnzCoarseInt.dnnz, 0, nnzCoarseInt.onnz,
// &matCoarseInt);
// MatCreateAIJ(mpiCommGlobal,
// nRowsRankInterior, nRowsRankCoarse,
// nGlobalOverallInterior, nRowsOverallCoarse,
// 0, nnzIntCoarse.dnnz, 0, nnzIntCoarse.onnz,
// &matIntCoarse);
}
}
// === Prepare traverse of sequentially created matrices. ===
......@@ -344,7 +235,7 @@ namespace AMDiS {
cols[k] = colCoarseSpace->getMatIndex(j, cols[k]);
// matCoarseCoarse
MatSetValues(mat[componentIthCoarseMap[i] + 1][componentIthCoarseMap[i] + 1],
MatSetValues(petscData.getCoarseMatComp(i),
1, &rowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
......@@ -359,7 +250,7 @@ namespace AMDiS {
}
// matCoarseInt
MatSetValues(mat[componentIthCoarseMap[i] + 1][0],
MatSetValues(petscData.getCoarseIntMatComp(i),
1, &rowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
}
......@@ -375,8 +266,7 @@ namespace AMDiS {
cols[k] = interiorMap->getMatIndex(j, cols[k]);
}
// matIntInt
MatSetValues(mat[0][0], 1, &localRowIndex, cols.size(),
MatSetValues(petscData.getInteriorMat(), 1, &localRowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
if (colsOther.size()) {
......@@ -389,7 +279,7 @@ namespace AMDiS {
colsOther[k] = colCoarseSpace->getMatIndex(j, colsOther[k]);
// matIntCoarse
MatSetValues(mat[0][componentIthCoarseMap[i] + 1],
MatSetValues(petscData.getIntCoarseMatComp(i),
1, &globalRowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);