Commit 4246111b authored by Thomas Witkowski's avatar Thomas Witkowski

Finished first phase of code refactoring to allow for multiple coarse space in...

Finished first phase of code refactoring to allow for multiple coarse space in domain decomposition methods.
parent 9a682f9d
......@@ -19,32 +19,47 @@ namespace AMDiS {
using namespace std;
ParallelCoarseSpaceMatVec::ParallelCoarseSpaceMatVec()
: rStartInterior(0),
nGlobalOverallInterior(0),
: interiorMap(NULL),
lastMeshNnz(0),
alwaysCreateNnzStructure(false)
alwaysCreateNnzStructure(false),
meshDistributor(NULL),
subdomainLevel(0),
rStartInterior(0),
nGlobalOverallInterior(0)
{
Parameters::get("parallel->always create nnz structure",
alwaysCreateNnzStructure);
}
void ParallelCoarseSpaceMatVec::init(ParallelDofMapping *iMap,
map<int, ParallelDofMapping*> cMap,
int sdLevel,
MPI::Intracomm mcLocal,
MPI::Intracomm mcGlobal,
MeshDistributor *meshDist)
void ParallelCoarseSpaceMatVec::setCoarseSpaceDofMapping(ParallelDofMapping *coarseDofs,
int component)
{
FUNCNAME("ParallelCoarseSpaceMatVec:init()");
interiorMap = iMap;
coarseSpaceMap = cMap;
subdomainLevel = sdLevel;
mpiCommLocal = mcLocal;
mpiCommGlobal = mcGlobal;
meshDistributor = meshDist;
FUNCNAME("ParallelCoarseSpaceMatVec::setCoarseSpaceDofMapping()");
TEST_EXIT_DBG(coarseDofs)("Should not happen!\n");
if (component == -1) {
// === Set coarse space for all components. ===
coarseSpaceMap.clear();
int nComponents = coarseDofs->getNumberOfComponents();
for (int i = 0; i < nComponents; i++)
coarseSpaceMap[i] = coarseDofs;
} else {
// === Set coarse space for just one component. ===
coarseSpaceMap[component] = coarseDofs;
}
}
void ParallelCoarseSpaceMatVec::prepare()
{
FUNCNAME("ParallelCoarseSpaceMatVec:prepare()");
// === Create vector of unique pointers to all coarse space maps. ===
uniqueCoarseMap.clear();
if (coarseSpaceMap.size()) {
......@@ -58,6 +73,9 @@ namespace AMDiS {
}
}
// === Create pointers to PETSc matrix and vector objects. ===
int nCoarseMap = uniqueCoarseMap.size();
mat.resize(nCoarseMap + 1);
for (int i = 0; i < nCoarseMap + 1; i++)
......@@ -66,6 +84,9 @@ namespace AMDiS {
vecSol.resize(nCoarseMap + 1);
vecRhs.resize(nCoarseMap + 1);
// === Create map from component number to its coarse space map. ===
componentIthCoarseMap.resize(coarseSpaceMap.size());
for (unsigned int i = 0; i < componentIthCoarseMap.size(); i++) {
bool found = false;
......@@ -82,16 +103,30 @@ namespace AMDiS {
}
void ParallelCoarseSpaceMatVec::create(Matrix<DOFMatrix*>& seqMat)
void ParallelCoarseSpaceMatVec::createMatVec(Matrix<DOFMatrix*>& seqMat)
{
FUNCNAME("ParallelCoarseSpaceMatVec::create()");
// === Prepare coarse space information and generate the correct number ===
// === of empty PETSc matrix and vector objects. ===
prepare();
// === Update subdomain data (mostly required for multi-level methods) ===
updateSubdomainData();
// === If required, recompute non zero structure of the matrix. ===
bool localMatrix = (coarseSpaceMap.size() && subdomainLevel == 0);
if (checkMeshChange(seqMat, localMatrix)) {
if (checkMeshChange()) {
// Mesh has been changed, recompute interior DOF mapping.
vector<const FiniteElemSpace*> feSpaces = getComponentFeSpaces(seqMat);
interiorMap->setComputeMatIndex(!localMatrix);
interiorMap->update(feSpaces);
int nMat = uniqueCoarseMap.size() + 1;
nnz.resize(nMat);
for (int i = 0; i < nMat; i++) {
......@@ -250,8 +285,7 @@ namespace AMDiS {
}
bool ParallelCoarseSpaceMatVec::checkMeshChange(Matrix<DOFMatrix*> &seqMat,
bool localMatrix)
bool ParallelCoarseSpaceMatVec::checkMeshChange()
{
FUNCNAME("ParallelCoarseSpaceMatVec::checkMeshChange()");
......@@ -261,13 +295,7 @@ namespace AMDiS {
mpiCommGlobal.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM);
if (recvAllValues != 0 || alwaysCreateNnzStructure) {
vector<const FiniteElemSpace*> feSpaces = getComponentFeSpaces(seqMat);
interiorMap->setComputeMatIndex(!localMatrix);
interiorMap->update(feSpaces);
lastMeshNnz = meshDistributor->getLastMeshChangeIndex();
return true;
}
......
......@@ -42,149 +42,269 @@ namespace AMDiS {
* - There are different coarse spaces for different components possible. In
* this case, there are as many blocks as there are different coarse spaces
* plus one block for the interior matrix.
* - This class also manages the creation of the corresponding non zero
* structure of the matrices.
*/
class ParallelCoarseSpaceMatVec {
public:
ParallelCoarseSpaceMatVec();
/// Creates matrices and vectors with respect to the coarse space.
void init(ParallelDofMapping *interiorMap,
map<int, ParallelDofMapping*> coarseSpaceMap,
int subdomainLevel,
MPI::Intracomm mpiCommLocal,
MPI::Intracomm mpiCommGlobal,
MeshDistributor *meshDistributor);
/// Set parallel DOF mapping for the interior DOFs.
void setDofMapping(ParallelDofMapping *interiorDofs)
{
interiorMap = interiorDofs;
}
void create(Matrix<DOFMatrix*>& seqMat);
/** \brief
* Sets the coarse space for all or a specific component.
*
* \param[in] coarseDofs Coarse space DOF mapping.
* \param[in] component If the standard value -1 is used, the coarse
* space DOF mapping is set for all components
* of the equation. Otherwise, the coarse space
* DOF mapping is set only for the given one.
*/
void setCoarseSpaceDofMapping(ParallelDofMapping *coarseDofs,
int component = -1);
/// Set mesh distributor object and MPI communicators.
void setMeshDistributor(MeshDistributor *m,
MPI::Intracomm mpiComm0,
MPI::Intracomm mpiComm1)
{
meshDistributor = m;
mpiCommGlobal = mpiComm0;
mpiCommLocal = mpiComm1;
}
/// Set level of the interior discretization. Is only used for
/// multi-level methods.
void setLevel(int l)
{
subdomainLevel = l;
}
/// Creates matrices and vectors with respect to the coarse space.
void createMatVec(Matrix<DOFMatrix*>& seqMat);
/// Run PETSc's matrix assembly routines.
void matAssembly();
/// Run PETSc's vector assembly routines.
/// Run PETSc's vector assembly routines on rhs vectors.
void vecRhsAssembly();
/// Run PETSc's vector assembly routines on solution vectors.
void vecSolAssembly();
/// Destroys PETSc matrix objects.
void matDestroy();
/// Destroys PETSc vector objects.
void vecDestroy();
bool checkMeshChange(Matrix<DOFMatrix*> &mat,
bool localMatrix = false);
inline Mat& getInteriorMat()
/// Get interior matrix.
inline Mat& getMatInterior()
{
TEST_EXIT_DBG(mat.size() > 0)("No matrix data!\n");
return mat[0][0];
}
inline Mat& getCoarseMat(int coarseSpace0 = 0, int coarseSpace1 = 0)
/// Get coarse space matrix.
inline Mat& getMatCoarse(int coarseSpace0 = 0, int coarseSpace1 = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace0 + 1)("No matrix data!\n");
TEST_EXIT_DBG(mat.size() > coarseSpace1 + 1)("No matrix data!\n");
return mat[coarseSpace0 + 1][coarseSpace1 + 1];
}
inline Mat& getIntCoarseMat(int coarseSpace = 0)
/// Get coupling matrix of the interior and some coarse space.
inline Mat& getMatInteriorCoarse(int coarseSpace = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace + 1)("No matrix data!\n");
return mat[0][coarseSpace + 1];
}
inline Mat& getCoarseIntMat(int coarseSpace = 0)
/// Get coupling of some coarse space matrix and the interior.
inline Mat& getMatCoarseInterior(int coarseSpace = 0)
{
TEST_EXIT_DBG(mat.size() > coarseSpace + 1)("No matrix data!\n");
return mat[coarseSpace + 1][0];
}
inline Mat& getCoarseMatComp(int comp)
/// Get the coarse space matrix of some system component.
inline Mat& getMatCoarseByComponent(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[matIndex][matIndex];
}
inline Mat& getIntCoarseMatComp(int comp)
/// Get coupling matrix of the interior and the coarse space of a
/// system component.
inline Mat& getMatInteriorCoarseByComponent(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[0][matIndex];
}
inline Mat& getCoarseIntMatComp(int comp)
/// Get coupling matrix of the coarse space of a system component and the
/// interior matrix.
inline Mat& getMatCoarseInteriorByComponent(int comp)
{
int matIndex = componentIthCoarseMap[comp] + 1;
return mat[matIndex][0];
}
inline Vec& getInteriorVecRhs()
/// Get the RHS vector of the interior.
inline Vec& getVecRhsInterior()
{
return vecRhs[0];
}
inline Vec& getCoarseVecRhs(int coarseSpace = 0)
/// Get the RHS vector of some coarse space.
inline Vec& getVecRhsCoarse(int coarseSpace = 0)
{
return vecRhs[coarseSpace + 1];
}
inline Vec& getInteriorVecSol()
/// Get the solution vector of the interior.
inline Vec& getVecSolInterior()
{
return vecSol[0];
}
inline Vec& getCoarseVecSol(int coarseSpace = 0)
/// Get the solution vector of some coarse space.
inline Vec& getVecSolCoarse(int coarseSpace = 0)
{
return vecSol[coarseSpace + 1];
}
inline int getStartInterior() const
/** \brief
* Checks whether a given DOF index in some component is a coarse space DOF.
* Note (TODO): The specification of both, the component number and FE
* space is not really necessary. Rewrite this!
*
* \param[in] component Component number of the system.
* \param[in] feSpace Finite element space of the component.
* \param[in] dof DOF index
*
* \return True, if the dof is a coarse space DOF in the component.
* False otherwise.
*/
inline bool isCoarseSpace(int component,
const FiniteElemSpace *feSpace,
DegreeOfFreedom dof)
{
return rStartInterior;
FUNCNAME("ParallelCoarseSpaceMatVec::isCoarseSpace()");
if (coarseSpaceMap.empty())
return false;
TEST_EXIT_DBG(coarseSpaceMap.count(component))
("Component %d has no coarse space defined!\n", component);
return (*(coarseSpaceMap[component]))[feSpace].isSet(dof);
}
protected:
/// Prepare internal data structures. First, it create \ref uniqueCoarseMap
/// and \ref componentIthCoarseMap . Both are used to create the correct
/// number of matrix and vectors in \ref mat and \ref vec.
void prepare();
/// Computes the values of \ref rStartInterior and
/// \ref nGlobalOverallInterior.
void updateSubdomainData();
private:
/// Checks for mesh changes. Returns true if the mesh has been changed
/// until the last matrix creation. Is used to rebuild matrix non
/// zero stricture.
bool checkMeshChange();
private:
/// Matrix of PETSc matrices. mat[0][0] is the interior discretization
/// matrix, mat[1][1] corresponds to the first coarse space and so on.
/// mat[i][j], with i not equal to j, are the coupling between the interior
/// and the coarse space matrices, and between the different coarse spaces
/// respectively.
vector<vector<Mat> > mat;
/// Solution and RHS vectors. vec[0] is the interior vector, vec[1] the
/// first coarse space vector and so on.
vector<Vec> vecSol, vecRhs;
/// Matrix of objects to control the matrix non zero structure of the
/// corresponding PETSc matrices stored in \ref mat.
vector<vector<MatrixNnzStructure> > nnz;
ParallelDofMapping *interiorMap;
/// Parallel DOF mapping of the (optional) coarse space. Allows to define
/// different coarse spaces for different components.
map<int, ParallelDofMapping*> coarseSpaceMap;
/** \brief
* Stores for each system component (i.e. each PDE variable) the coarse
* space that is used for its discretization.
*
* Example: We solve the Stokes equation in 2D with a different coarse
* space for the velocity unknowns (component 0 and 1) and the pressure
* (component 2). Than:
* componentIthCoarseMap[0] = 0
* componentIthCoarseMap[1] = 0
* componentIthCoarseMap[2] = 1
* The indices can directly be used to access the correspondig parallel
* DOF mapping in \ref uniqueCoarseMap.
*/
vector<int> componentIthCoarseMap;
/// Stores a set of all coarse space DOF mapping. All entries are unique.
vector<ParallelDofMapping*> uniqueCoarseMap;
/// Stores the mesh change index of the mesh the nnz structure was created
/// for. Therefore, if the mesh change index is higher than this value, we
/// have to create a new nnz structure for PETSc matrices, because the mesh
/// has been changed and therefore also the assembled matrix structure.
int lastMeshNnz;
/// If this variable is set to true, the non-zero matrix structure is
/// created each time from scratch by calling \ref createPetscNnzStrcuture.
/// This can be necessary if the number of non-zeros in the matrix varies
/// though the mesh does not change. This may happen if there are many
/// operators using DOFVectors from old timestep containing many zeros due to
/// some phase fields.
bool alwaysCreateNnzStructure;
protected:
/// Pointer to a mesh distributor object.
MeshDistributor *meshDistributor;
/// Level of subdomain/interior discretization. Is used for multi-level
/// methods only.
int subdomainLevel;
/// MPI communicator on the subdomain level. If no multi-level is used
/// this is alway MPI_COMM_SELF. In the case of a multi-level method, this
/// is a subset of MPI_COMM_WORLD.
MPI::Intracomm mpiCommLocal;
/// MPI communicator on the coarse space level. If no multi-level method
/// is used, this is always MPI_COMM_WORLD, otherwise a subset of it.
MPI::Intracomm mpiCommGlobal;
/// Offset for the interior DOFs of the local interior with respect to the
/// subdomain. In the case of a one-level method, each local interior
/// is exactly one subdomain. In the case of a multi-level method, one
/// subdomain may consists of several rank domains. This value defines than
/// the offset ot rank's interior rows to the subdomain's interior rows.
int rStartInterior;
/// Number of overall rows in subdomain's interior. For one-level methods,
/// this value is equal to the number of rows in rank's interior. See also
/// explenation for \ref rStarInterior.
int nGlobalOverallInterior;
/// Stores the mesh change index of the mesh the nnz structure was created for.
/// Therefore, if the mesh change index is higher than this value, we have to create
/// a new nnz structure for PETSc matrices, because the mesh has been changed and
/// therefore also the assembled matrix structure.
int lastMeshNnz;
/// Parallel DOF mapping for the interior.
ParallelDofMapping *interiorMap;
/// If this variable is set to true, the non-zero matrix structure is
/// created each time from scratch by calling \ref createPetscNnzStrcuture.
/// This can be necessary if the number of non-zeros in the matrix varies
/// though the mesh does not change. This may happen if there are many
/// operators using DOFVectors from old timestep containing many zeros due to
/// some phase fields.
bool alwaysCreateNnzStructure;
/// Parallel DOF mapping of the (optional) coarse space. Allows to define
/// different coarse spaces for different components.
map<int, ParallelDofMapping*> coarseSpaceMap;
};
}
......
......@@ -21,10 +21,7 @@ namespace AMDiS {
using namespace std;
PetscSolver::PetscSolver()
: meshDistributor(NULL),
subdomainLevel(0),
interiorMap(NULL),
mpiRank(-1),
: ParallelCoarseSpaceMatVec(),
kspPrefix(""),
removeRhsNullspace(false),
hasConstantNullspace(false)
......@@ -41,25 +38,6 @@ namespace AMDiS {
}
void PetscSolver::setCoarseSpaceDofMapping(ParallelDofMapping *coarseDofs,
int component)
{
FUNCNAME("PetscSolver::setCoarseSpaceDofMapping()");
TEST_EXIT_DBG(coarseDofs)("Should not happen!\n");
if (component == -1) {
coarseSpaceMap.clear();
int nComponents = coarseDofs->getNumberOfComponents();
for (int i = 0; i < nComponents; i++)
coarseSpaceMap[i] = coarseDofs;
} else {
coarseSpaceMap[component] = coarseDofs;
}
}
void PetscSolver::solve(Vec &rhs, Vec &sol)
{
FUNCNAME("PetscSolver::solve()");
......
......@@ -42,38 +42,18 @@ namespace AMDiS {
using namespace std;
class PetscSolver
/**
* Create an abstract interface to an arbitrary PETSc solver. This class is
* based on \ref ParallelCoarseSpaceMatVec to support for solvers which make
* use of a coarse grid problem.
*/
class PetscSolver : public ParallelCoarseSpaceMatVec
{
public:
PetscSolver();
virtual ~PetscSolver() {}
void setMeshDistributor(MeshDistributor *m,
MPI::Intracomm mpiComm0,
MPI::Intracomm mpiComm1)
{
meshDistributor = m;
mpiCommGlobal = mpiComm0;
mpiCommLocal = mpiComm1;
mpiRank = mpiCommGlobal.Get_rank();
}
void setLevel(int l)
{
subdomainLevel = l;
}
/// Set parallel DOF mapping for the interior DOFs.
void setDofMapping(ParallelDofMapping *interiorDofs)
{
interiorMap = interiorDofs;
}
void setCoarseSpaceDofMapping(ParallelDofMapping *coarseDofs,
int component = -1);
/** \brief
* Create a PETSc matrix. The given DOF matrices are used to create the nnz
* structure of the PETSc matrix and the values are transfered to it.
......@@ -146,86 +126,6 @@ namespace AMDiS {
constNullspaceComponent.push_back(component);
}
inline bool isCoarseSpace(int component,
const FiniteElemSpace *feSpace,
DegreeOfFreedom dof)
{
FUNCNAME("PetscSolver::isCoarseSpace()");
if (coarseSpaceMap.empty())
return false;
TEST_EXIT_DBG(coarseSpaceMap.count(component))
("Component %d has no coarse space defined!\n", component);
return (*(coarseSpaceMap[component]))[feSpace].isSet(dof);
}
inline Vec& getRhsCoarseSpace()
{
FUNCNAME("PetscSolver::getRhsCoarseSpace()");
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return petscData.getCoarseVecRhs();
}
inline Vec& getRhsInterior()
{
return petscData.getInteriorVecRhs();
}
inline Vec& getSolCoarseSpace()
{
FUNCNAME("PetscSolver::getSolCoarseSpace()");
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return petscData.getCoarseVecSol();
}
inline Vec& getSolInterior()
{
return petscData.getInteriorVecSol();
}
inline Mat& getMatIntInt()
{
return petscData.getInteriorMat();
}
inline Mat& getMatCoarseCoarse()
{
FUNCNAME("PetscSolver::getMatCoarseCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return petscData.getCoarseMat(0);
}
inline Mat& getMatIntCoarse()
{
FUNCNAME("PetscSolver::getMatIntCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return petscData.getIntCoarseMat();
}
inline Mat& getMatCoarseInt()
{
FUNCNAME("PetscSolver::getMatCoarseInt()");
TEST_EXIT_DBG(coarseSpaceMap.size())
("Subdomain solver does not contain a coarse space!\n");
return petscData.getCoarseIntMat();
}
protected:
/** \brief
* Copies between to PETSc vectors by using different index sets for the
......@@ -242,26 +142,6 @@ namespace AMDiS {
vector<int>& originIndex, vector<int>& destIndex);
protected:
MeshDistributor *meshDistributor;
int subdomainLevel;
ParallelDofMapping *interiorMap;
/// Parallel DOF mapping of the (optional) coarse space. Allows to define
/// different coarse spaces for different components.
map<int, ParallelDofMapping*> coarseSpaceMap;
int mpiRank;
MPI::Intracomm mpiCommGlobal;