Commit 586b8402 authored by Thomas Witkowski's avatar Thomas Witkowski

On the way to get is compiling.

parent 66a86905
......@@ -248,6 +248,7 @@ if(ENABLE_PARALLEL_DOMAIN)
list(APPEND AMDIS_INCLUDE_DIRS ${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include)
list(APPEND PARALLEL_DOMAIN_AMDIS_SRC
${SOURCE_DIR}/parallel/BddcMlSolver.cc
${SOURCE_DIR}/parallel/ParallelCoarseSpaceMatVec.cc
${SOURCE_DIR}/parallel/PetscMultigridPrecon.cc
${SOURCE_DIR}/parallel/PetscSolver.cc
${SOURCE_DIR}/parallel/PetscProblemStat.cc
......
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "AMDiS.h"
#include "parallel/ParallelCoarseSpaceMatVec.h"
namespace AMDiS {
using namespace std;
void ParallelCoarseSpaceMatVec::create(ParallelDofMapping *iMap,
map<int, ParallelDofMapping*> cMap,
int subdomainLevel,
MPI::Intracomm mpiCommLocal,
MPI::Intracomm mpiCommGlobal)
{
FUNCNAME("ParallelCoarseSpaceMatVec::update()");
interiorMap = iMap;
coarseSpaceMap = cMap;
vector<ParallelDofMapping*> uniqueCoarseMap;
if (coarseSpaceMap.size()) {
std::set<ParallelDofMapping*> tmp;
for (map<int, ParallelDofMapping*>::iterator it = coarseSpaceMap.begin();
it != coarseSpaceMap.end(); ++it) {
if (tmp.count(it->second) == 0) {
tmp.insert(it->second);
uniqueCoarseMap.push_back(it->second);
}
}
}
int nCoarseMap = uniqueCoarseMap.size();
mat.resize(nCoarseMap + 1);
for (int i = 0; i < nCoarseMap + 1; i++)
mat[i].resize(nCoarseMap + 1);
componentIthCoarseMap.resize(coarseSpaceMap.size());
for (unsigned int i = 0; i < componentIthCoarseMap.size(); i++) {
bool found = false;
for (int j = 0; j < nCoarseMap; j++) {
if (coarseSpaceMap[i] == uniqueCoarseMap[j]) {
componentIthCoarseMap[i] = j;
found = true;
break;
}
}
TEST_EXIT_DBG(found)("Should not happen!\n");
}
// === Create PETSc matrix with the computed nnz data structure. ===
int nRankRows = interiorMap->getRankDofs();
int nOverallRows = interiorMap->getOverallDofs();
bool localMatrix = (coarseSpaceMap.size() && subdomainLevel == 0);
if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRankRows, nOverallRows,
0, PETSC_NULL,
&mat[0][0]);
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
} else {
MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows,
nOverallRows, nOverallRows,
0, PETSC_NULL,
0, PETSC_NULL,
&mat[0][0]);
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
}
if (coarseSpaceMap.size()) {
for (int i = 0; i < nCoarseMap; i++) {
ParallelDofMapping* cMap = uniqueCoarseMap[i];
int nRowsRankCoarse = cMap->getRankDofs();
int nRowsOverallCoarse = cMap->getOverallDofs();
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL,
&mat[i][i]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i][i], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
for (int j = 0; j < nCoarseMap + 1; j++) {
int nRowsRankMat = (j == 0 ? nRankRows : uniqueCoarseMap[j - 1]->getRankDofs());
int nRowsOverallMat = (j == 0 ? nOverallRows : uniqueCoarseMap[j - 1]->getOverallDofs());
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankMat,
nRowsOverallCoarse, nRowsOverallMat,
100, PETSC_NULL, 100, PETSC_NULL,
&mat[i + 1][j]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i + 1][j], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
MatCreateAIJ(mpiCommGlobal,
nRowsRankMat, nRowsRankCoarse,
nRowsOverallMat, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL,
&mat[j][i + 1]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[j][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
}
}
}
}
void ParallelCoarseSpaceMatVec::destroy()
{
FUNCNAME("ParallelCoarseSpaceMatVec::destroy()");
int nMatrix = mat.size();
for (int i = 0; i < nMatrix; i++)
for (int j = 0; j < nMatrix; j++)
MatDestroy(&mat[i][j]);
}
void ParallelCoarseSpaceMatVec::assembly()
{
FUNCNAME("ParallelCoarseSpaceMatVec::assembly()");
int nMatrix = mat.size();
for (int i = 0; i < nMatrix; i++) {
for (int j = 0; j < nMatrix; j++) {
MatAssemblyBegin(mat[i][j], MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(mat[i][j], MAT_FINAL_ASSEMBLY);
}
}
}
}
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// == http://www.amdis-fem.org ==
// == ==
// ============================================================================
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
/** \file ParallelCoarseSpaceMatVec.h */
#ifndef AMDIS_PARALLEL_COARSE_SPACE_MAT_VEC_H
#define AMDIS_PARALLEL_COARSE_SPACE_MAT_VEC_H
#include <vector>
#include <map>
#include <petsc.h>
#include "AMDiS_fwd.h"
namespace AMDiS {
/**
* This class implements a block structured PETSc matrix/vec which seperates
* the discretization of the interior of subdomains and the discretization
* of the coarse space. Thus, we have one matrix block for the interior and
* one matrix block for the coarse space plus the coupling blocks. Some notes:
* - For a single level domain decomposition method (e.g. the standad
* FETI-DP method), the interior matrix is local to the current rank and the
* coarse space matrix is a globally distributed matrix.
* - There are different coarse spaces for different components possible. In
* this case, there are as many blocks as there are different coarse spaces
* plus one block for the interior matrix.
*/
class ParallelCoarseSpaceMatVec {
public:
ParallelCoarseSpaceMatVec()
{}
/// Creates matrices and vectors with respect to the coarse space.
void create(ParallelDofMapping *interiorMap,
map<int, ParallelDofMapping*> coarseSpaceMap,
int subdomainLevel,
MPI::Intracomm mpiCommLocal,
MPI::Intracomm mpiCommGlobal);
/// Run PETSc's assembly routines.
void assembly();
void destroy();
inline Mat& getInteriorMat()
{
TEST_EXIT_DBG(mat.size() > 0)("No matrix data!\n");
return mat[0][0];
}
inline Mat& getCoarseMat(int coarseSpace0 = 0, int coarseSpace1 = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace0 + 1)("No matrix data!\n");
TEST_EXIT_DBG(mat.size() > coarseSpace1 + 1)("No matrix data!\n");
return mat[coarseSpace0 + 1][coarseSpace1 + 1];
}
inline Mat& getIntCoarseMat(int coarseSpace = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace + 1)("No matrix data!\n");
return mat[0][coarseSpace + 1];
}
inline Mat& getCoarseIntMat(int coarseSpace = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace + 1)("No matrix data!\n");
return mat[coarseSpace + 1][0];
}
inline Mat& getCoarseMatComp(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[matIndex][matIndex];
}
inline Mat& getIntCoarseMatComp(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[0][matIndex];
}
inline Mat& getCoarseIntMatComp(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[matIndex][0];
}
private:
vector<vector<Mat> > mat;
ParallelDofMapping *interiorMap;
/// Parallel DOF mapping of the (optional) coarse space. Allows to define
/// different coarse spaces for different components.
map<int, ParallelDofMapping*> coarseSpaceMap;
vector<int> componentIthCoarseMap;
};
}
#endif
......@@ -32,7 +32,7 @@
#include "Initfile.h"
#include "DOFMatrix.h"
#include "parallel/MeshDistributor.h"
#include "parallel/ParallelCoarseSpaceMatVec.h"
#include <petsc.h>
#include <petscsys.h>
#include <petscao.h>
......@@ -178,41 +178,37 @@ namespace AMDiS {
inline Mat& getMatIntInt()
{
return mat[0][0];
// return matIntInt;
return petscData.getInteriorMat();
}
inline Mat& getMatCoarseCoarse()
{
FUNCNAME("PetscSolver::getMatCoarseCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return mat[1][1];
// return matCoarseCoarse;
return petscData.getCoarseMat(0);
}
inline Mat& getMatIntCoarse()
{
FUNCNAME("PetscSolver::getMatIntCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return mat[0][1];
// return matIntCoarse;
return petscData.getIntCoarseMat();
}
inline Mat& getMatCoarseInt()
{
FUNCNAME("PetscSolver::getMatCoarseInt()");
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return mat[1][0];
// return matCoarseInt;
return petscData.getCoarseIntMat();
}
protected:
......@@ -263,10 +259,9 @@ namespace AMDiS {
MPI::Intracomm mpiCommLocal;
/// Petsc's matrix structure.
// Mat matIntInt, matCoarseCoarse, matIntCoarse, matCoarseInt;
vector<vector<Mat> > mat;
/// Petsc's matrices and vectors (possiblly block structured if there is
/// a coarse space defined).
ParallelCoarseSpaceMatVec petscData;
/// PETSc's vector structures for the rhs vector, the solution vector and a
/// temporary vector for calculating the final residuum.
......
......@@ -19,7 +19,7 @@ namespace AMDiS {
void PetscSolverGlobalBlockMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *seqMat)
{
FUNCNAME("PetscSolverGlobalBlockMatrix::fillPetscMatrix()");
#if 0
TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
......@@ -102,6 +102,7 @@ namespace AMDiS {
KSPSetFromOptions(kspInterior);
MSG("Fill petsc matrix needed %.5f seconds\n", MPI::Wtime() - wtime);
#endif
}
......@@ -183,7 +184,8 @@ namespace AMDiS {
if (nestMat[i] != PETSC_NULL)
MatDestroy(&(nestMat[i]));
MatDestroy(&mat[0][0]);
petscData.destroy();
KSPDestroy(&kspInterior);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment