Commit 586b8402 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

On the way to get is compiling.

parent 66a86905
...@@ -248,6 +248,7 @@ if(ENABLE_PARALLEL_DOMAIN) ...@@ -248,6 +248,7 @@ if(ENABLE_PARALLEL_DOMAIN)
list(APPEND AMDIS_INCLUDE_DIRS ${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include) list(APPEND AMDIS_INCLUDE_DIRS ${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include)
list(APPEND PARALLEL_DOMAIN_AMDIS_SRC list(APPEND PARALLEL_DOMAIN_AMDIS_SRC
${SOURCE_DIR}/parallel/BddcMlSolver.cc ${SOURCE_DIR}/parallel/BddcMlSolver.cc
${SOURCE_DIR}/parallel/ParallelCoarseSpaceMatVec.cc
${SOURCE_DIR}/parallel/PetscMultigridPrecon.cc ${SOURCE_DIR}/parallel/PetscMultigridPrecon.cc
${SOURCE_DIR}/parallel/PetscSolver.cc ${SOURCE_DIR}/parallel/PetscSolver.cc
${SOURCE_DIR}/parallel/PetscProblemStat.cc ${SOURCE_DIR}/parallel/PetscProblemStat.cc
......
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "AMDiS.h"
#include "parallel/ParallelCoarseSpaceMatVec.h"
namespace AMDiS {
using namespace std;
void ParallelCoarseSpaceMatVec::create(ParallelDofMapping *iMap,
map<int, ParallelDofMapping*> cMap,
int subdomainLevel,
MPI::Intracomm mpiCommLocal,
MPI::Intracomm mpiCommGlobal)
{
FUNCNAME("ParallelCoarseSpaceMatVec::update()");
interiorMap = iMap;
coarseSpaceMap = cMap;
vector<ParallelDofMapping*> uniqueCoarseMap;
if (coarseSpaceMap.size()) {
std::set<ParallelDofMapping*> tmp;
for (map<int, ParallelDofMapping*>::iterator it = coarseSpaceMap.begin();
it != coarseSpaceMap.end(); ++it) {
if (tmp.count(it->second) == 0) {
tmp.insert(it->second);
uniqueCoarseMap.push_back(it->second);
}
}
}
int nCoarseMap = uniqueCoarseMap.size();
mat.resize(nCoarseMap + 1);
for (int i = 0; i < nCoarseMap + 1; i++)
mat[i].resize(nCoarseMap + 1);
componentIthCoarseMap.resize(coarseSpaceMap.size());
for (unsigned int i = 0; i < componentIthCoarseMap.size(); i++) {
bool found = false;
for (int j = 0; j < nCoarseMap; j++) {
if (coarseSpaceMap[i] == uniqueCoarseMap[j]) {
componentIthCoarseMap[i] = j;
found = true;
break;
}
}
TEST_EXIT_DBG(found)("Should not happen!\n");
}
// === Create PETSc matrix with the computed nnz data structure. ===
int nRankRows = interiorMap->getRankDofs();
int nOverallRows = interiorMap->getOverallDofs();
bool localMatrix = (coarseSpaceMap.size() && subdomainLevel == 0);
if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRankRows, nOverallRows,
0, PETSC_NULL,
&mat[0][0]);
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
} else {
MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows,
nOverallRows, nOverallRows,
0, PETSC_NULL,
0, PETSC_NULL,
&mat[0][0]);
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
}
if (coarseSpaceMap.size()) {
for (int i = 0; i < nCoarseMap; i++) {
ParallelDofMapping* cMap = uniqueCoarseMap[i];
int nRowsRankCoarse = cMap->getRankDofs();
int nRowsOverallCoarse = cMap->getOverallDofs();
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL,
&mat[i][i]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i][i], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
for (int j = 0; j < nCoarseMap + 1; j++) {
int nRowsRankMat = (j == 0 ? nRankRows : uniqueCoarseMap[j - 1]->getRankDofs());
int nRowsOverallMat = (j == 0 ? nOverallRows : uniqueCoarseMap[j - 1]->getOverallDofs());
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankMat,
nRowsOverallCoarse, nRowsOverallMat,
100, PETSC_NULL, 100, PETSC_NULL,
&mat[i + 1][j]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i + 1][j], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
MatCreateAIJ(mpiCommGlobal,
nRowsRankMat, nRowsRankCoarse,
nRowsOverallMat, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL,
&mat[j][i + 1]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[j][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
}
}
}
}
void ParallelCoarseSpaceMatVec::destroy()
{
FUNCNAME("ParallelCoarseSpaceMatVec::destroy()");
int nMatrix = mat.size();
for (int i = 0; i < nMatrix; i++)
for (int j = 0; j < nMatrix; j++)
MatDestroy(&mat[i][j]);
}
void ParallelCoarseSpaceMatVec::assembly()
{
FUNCNAME("ParallelCoarseSpaceMatVec::assembly()");
int nMatrix = mat.size();
for (int i = 0; i < nMatrix; i++) {
for (int j = 0; j < nMatrix; j++) {
MatAssemblyBegin(mat[i][j], MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(mat[i][j], MAT_FINAL_ASSEMBLY);
}
}
}
}
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// == http://www.amdis-fem.org ==
// == ==
// ============================================================================
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
/** \file ParallelCoarseSpaceMatVec.h */
#ifndef AMDIS_PARALLEL_COARSE_SPACE_MAT_VEC_H
#define AMDIS_PARALLEL_COARSE_SPACE_MAT_VEC_H
#include <vector>
#include <map>
#include <petsc.h>
#include "AMDiS_fwd.h"
namespace AMDiS {
/**
* This class implements a block structured PETSc matrix/vec which seperates
* the discretization of the interior of subdomains and the discretization
* of the coarse space. Thus, we have one matrix block for the interior and
* one matrix block for the coarse space plus the coupling blocks. Some notes:
* - For a single level domain decomposition method (e.g. the standad
* FETI-DP method), the interior matrix is local to the current rank and the
* coarse space matrix is a globally distributed matrix.
* - There are different coarse spaces for different components possible. In
* this case, there are as many blocks as there are different coarse spaces
* plus one block for the interior matrix.
*/
class ParallelCoarseSpaceMatVec {
public:
ParallelCoarseSpaceMatVec()
{}
/// Creates matrices and vectors with respect to the coarse space.
void create(ParallelDofMapping *interiorMap,
map<int, ParallelDofMapping*> coarseSpaceMap,
int subdomainLevel,
MPI::Intracomm mpiCommLocal,
MPI::Intracomm mpiCommGlobal);
/// Run PETSc's assembly routines.
void assembly();
void destroy();
inline Mat& getInteriorMat()
{
TEST_EXIT_DBG(mat.size() > 0)("No matrix data!\n");
return mat[0][0];
}
inline Mat& getCoarseMat(int coarseSpace0 = 0, int coarseSpace1 = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace0 + 1)("No matrix data!\n");
TEST_EXIT_DBG(mat.size() > coarseSpace1 + 1)("No matrix data!\n");
return mat[coarseSpace0 + 1][coarseSpace1 + 1];
}
inline Mat& getIntCoarseMat(int coarseSpace = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace + 1)("No matrix data!\n");
return mat[0][coarseSpace + 1];
}
inline Mat& getCoarseIntMat(int coarseSpace = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace + 1)("No matrix data!\n");
return mat[coarseSpace + 1][0];
}
inline Mat& getCoarseMatComp(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[matIndex][matIndex];
}
inline Mat& getIntCoarseMatComp(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[0][matIndex];
}
inline Mat& getCoarseIntMatComp(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[matIndex][0];
}
private:
vector<vector<Mat> > mat;
ParallelDofMapping *interiorMap;
/// Parallel DOF mapping of the (optional) coarse space. Allows to define
/// different coarse spaces for different components.
map<int, ParallelDofMapping*> coarseSpaceMap;
vector<int> componentIthCoarseMap;
};
}
#endif
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include "Initfile.h" #include "Initfile.h"
#include "DOFMatrix.h" #include "DOFMatrix.h"
#include "parallel/MeshDistributor.h" #include "parallel/MeshDistributor.h"
#include "parallel/ParallelCoarseSpaceMatVec.h"
#include <petsc.h> #include <petsc.h>
#include <petscsys.h> #include <petscsys.h>
#include <petscao.h> #include <petscao.h>
...@@ -178,41 +178,37 @@ namespace AMDiS { ...@@ -178,41 +178,37 @@ namespace AMDiS {
inline Mat& getMatIntInt() inline Mat& getMatIntInt()
{ {
return mat[0][0]; return petscData.getInteriorMat();
// return matIntInt;
} }
inline Mat& getMatCoarseCoarse() inline Mat& getMatCoarseCoarse()
{ {
FUNCNAME("PetscSolver::getMatCoarseCoarse()"); FUNCNAME("PetscSolver::getMatCoarseCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1) TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n"); ("Subdomain solver does not contain a coarse space!\n");
return mat[1][1]; return petscData.getCoarseMat(0);
// return matCoarseCoarse;
} }
inline Mat& getMatIntCoarse() inline Mat& getMatIntCoarse()
{ {
FUNCNAME("PetscSolver::getMatIntCoarse()"); FUNCNAME("PetscSolver::getMatIntCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1) TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n"); ("Subdomain solver does not contain a coarse space!\n");
return mat[0][1]; return petscData.getIntCoarseMat();
// return matIntCoarse;
} }
inline Mat& getMatCoarseInt() inline Mat& getMatCoarseInt()
{ {
FUNCNAME("PetscSolver::getMatCoarseInt()"); FUNCNAME("PetscSolver::getMatCoarseInt()");
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1) TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n"); ("Subdomain solver does not contain a coarse space!\n");
return mat[1][0]; return petscData.getCoarseIntMat();
// return matCoarseInt;
} }
protected: protected:
...@@ -263,10 +259,9 @@ namespace AMDiS { ...@@ -263,10 +259,9 @@ namespace AMDiS {
MPI::Intracomm mpiCommLocal; MPI::Intracomm mpiCommLocal;
/// Petsc's matrix structure. /// Petsc's matrices and vectors (possiblly block structured if there is
// Mat matIntInt, matCoarseCoarse, matIntCoarse, matCoarseInt; /// a coarse space defined).
ParallelCoarseSpaceMatVec petscData;
vector<vector<Mat> > mat;
/// PETSc's vector structures for the rhs vector, the solution vector and a /// PETSc's vector structures for the rhs vector, the solution vector and a
/// temporary vector for calculating the final residuum. /// temporary vector for calculating the final residuum.
......
...@@ -19,7 +19,7 @@ namespace AMDiS { ...@@ -19,7 +19,7 @@ namespace AMDiS {
void PetscSolverGlobalBlockMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *seqMat) void PetscSolverGlobalBlockMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *seqMat)
{ {
FUNCNAME("PetscSolverGlobalBlockMatrix::fillPetscMatrix()"); FUNCNAME("PetscSolverGlobalBlockMatrix::fillPetscMatrix()");
#if 0
TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n"); TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n"); TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n"); TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
...@@ -102,6 +102,7 @@ namespace AMDiS { ...@@ -102,6 +102,7 @@ namespace AMDiS {
KSPSetFromOptions(kspInterior); KSPSetFromOptions(kspInterior);
MSG("Fill petsc matrix needed %.5f seconds\n", MPI::Wtime() - wtime); MSG("Fill petsc matrix needed %.5f seconds\n", MPI::Wtime() - wtime);
#endif
} }
...@@ -183,7 +184,8 @@ namespace AMDiS { ...@@ -183,7 +184,8 @@ namespace AMDiS {
if (nestMat[i] != PETSC_NULL) if (nestMat[i] != PETSC_NULL)
MatDestroy(&(nestMat[i])); MatDestroy(&(nestMat[i]));
MatDestroy(&mat[0][0]); petscData.destroy();
KSPDestroy(&kspInterior); KSPDestroy(&kspInterior);
} }
......
...@@ -21,16 +21,15 @@ namespace AMDiS { ...@@ -21,16 +21,15 @@ namespace AMDiS {
{ {
FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()"); FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()");
petscData.create(interiorMap, coarseSpaceMap,
subdomainLevel, mpiCommLocal, mpiCommGlobal);
if (coarseSpaceMap.size()) { if (coarseSpaceMap.size()) {
updateSubdomainData(); updateSubdomainData();
fillPetscMatrixWithCoarseSpace(seqMat); fillPetscMatrixWithCoarseSpace(seqMat);
return; return;
} }
mat.resize(1);
mat[0].resize(1);
Mat &matIntInt = mat[0][0];
TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n"); TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n"); TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n"); TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
...@@ -52,23 +51,13 @@ namespace AMDiS { ...@@ -52,23 +51,13 @@ namespace AMDiS {
VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &petscSolVec); VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &petscSolVec);
// === Create PETSc matrix with the computed nnz data structure. ===
MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows,
nOverallRows, nOverallRows,
0, nnzInterior.dnnz,
0, nnzInterior.onnz,
&matIntInt);
MatSetOption(matIntInt, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
#if (DEBUG != 0) #if (DEBUG != 0)
MSG("Fill petsc matrix 1 needed %.5f seconds\n", MPI::Wtime() - wtime); MSG("Fill petsc matrix 1 needed %.5f seconds\n", MPI::Wtime() - wtime);
#endif #endif
#if (DEBUG != 0) #if (DEBUG != 0)
int a, b; int a, b;
MatGetOwnershipRange(matIntInt, &a, &b); MatGetOwnershipRange(petscData.getInteriorMat(), &a, &b);
TEST_EXIT(a == interiorMap->getStartDofs())("Wrong matrix ownership range!\n"); TEST_EXIT(a == interiorMap->getStartDofs())("Wrong matrix ownership range!\n");
TEST_EXIT(b == interiorMap->getStartDofs() + nRankRows) TEST_EXIT(b == interiorMap->getStartDofs() + nRankRows)
("Wrong matrix ownership range!\n"); ("Wrong matrix ownership range!\n");
...@@ -87,13 +76,11 @@ namespace AMDiS { ...@@ -87,13 +76,11 @@ namespace AMDiS {
MSG("Fill petsc matrix 2 needed %.5f seconds\n", MPI::Wtime() - wtime); MSG("Fill petsc matrix 2 needed %.5f seconds\n", MPI::Wtime() - wtime);
#endif #endif
MatAssemblyBegin(matIntInt, MAT_FINAL_ASSEMBLY); petscData.assembly();
MatAssemblyEnd(matIntInt, MAT_FINAL_ASSEMBLY);
if (printMatInfo) { if (printMatInfo) {
MatInfo matInfo; MatInfo matInfo;
MatGetInfo(matIntInt, MAT_GLOBAL_SUM, &matInfo); MatGetInfo(petscData.getInteriorMat(), MAT_GLOBAL_SUM, &matInfo);
MSG("Matrix info:\n"); MSG("Matrix info:\n");
MSG(" memory usage: %e MB\n", matInfo.memory / (1024.0 * 1024.0)); MSG(" memory usage: %e MB\n", matInfo.memory / (1024.0 * 1024.0));
MSG(" mallocs: %d\n", static_cast<int>(matInfo.mallocs)); MSG(" mallocs: %d\n", static_cast<int>(matInfo.mallocs));
...@@ -111,7 +98,10 @@ namespace AMDiS { ...@@ -111,7 +98,10 @@ namespace AMDiS {
KSPCreate(mpiCommGlobal, &kspInterior); KSPCreate(mpiCommGlobal, &kspInterior);
KSPGetPC(kspInterior, &pcInterior); KSPGetPC(kspInterior, &pcInterior);
KSPSetOperators(kspInterior, matIntInt, matIntInt, SAME_NONZERO_PATTERN); KSPSetOperators(kspInterior,
petscData.getInteriorMat(),
petscData.getInteriorMat(),
SAME_NONZERO_PATTERN);
KSPSetTolerances(kspInterior, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT); KSPSetTolerances(kspInterior, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);
KSPSetType(kspInterior, KSPBCGS); KSPSetType(kspInterior, KSPBCGS);
KSPSetOptionsPrefix(kspInterior, kspPrefix.c_str()); KSPSetOptionsPrefix(kspInterior, kspPrefix.c_str());
...@@ -142,45 +132,11 @@ namespace AMDiS { ...@@ -142,45 +132,11 @@ namespace AMDiS {
FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace()"); FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace()");
TEST_EXIT_DBG(interiorMap)("Should not happen!\n"); TEST_EXIT_DBG(interiorMap)("Should not happen!\n");
TEST_EXIT_DBG(coarseSpaceMap.size() == seqMat->getSize())
("Wrong sizes %d %d\n", coarseSpaceMap.size(), seqMat->getSize());
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(seqMat); vector<const FiniteElemSpace*> feSpaces = getFeSpaces(seqMat);
vector<ParallelDofMapping*> uniqueCoarseMap;
if (coarseSpaceMap.size()) {
TEST_EXIT_DBG(coarseSpaceMap.size() == seqMat->getSize())
("Wrong sizes %d %d\n", coarseSpaceMap.size(), seqMat->getSize());
std::set<ParallelDofMapping*> tmp;
for (map<int, ParallelDofMapping*>::iterator it = coarseSpaceMap.begin();
it != coarseSpaceMap.end(); ++it) {
if (tmp.count(it->second) == 0) {
tmp.insert(it->second);
uniqueCoarseMap.push_back(it->second);
}
}
}
int nCoarseMap = uniqueCoarseMap.size();
mat.resize(nCoarseMap + 1);
for (int i = 0; i < nCoarseMap + 1; i++)
mat[i].resize(nCoarseMap + 1);
vector<int> componentIthCoarseMap(coarseSpaceMap.size());
for (unsigned int i = 0; i < componentIthCoarseMap.size(); i++) {
bool found = false;
for (int j = 0; j < nCoarseMap; j++) {
if (coarseSpaceMap[i] == uniqueCoarseMap[j]) {
componentIthCoarseMap[i] = j;
found = true;
break;
}
}
TEST_EXIT_DBG(found)("Should not happen!\n");
}
int nRowsRankInterior = interiorMap->getRankDofs(); int nRowsRankInterior = interiorMap->getRankDofs();
int nRowsOverallInterior = interiorMap->getOverallDofs(); int nRowsOverallInterior = interiorMap->getOverallDofs();
...@@ -202,71 +158,6 @@ namespace AMDiS { ...@@ -202,71 +158,6 @@ namespace AMDiS {
} }
} }
mat.resize(nCoarseMap + 1);
if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior,
0, nnzInterior.dnnz,
&mat[0][0]);
} else {
MatCreateAIJ(mpiCommLocal,
nRowsRankInterior, nRowsRankInterior,
nRowsOverallInterior, nRowsOverallInterior,
0, nnzInterior.dnnz, 0, nnzInterior.onnz,
&mat[0][0]);
}
if (coarseSpaceMap.size()) {
for (int i = 0; i < nCoarseMap; i++) {
ParallelDofMapping* cMap