Commit a33fd12b authored by Thomas Witkowski's avatar Thomas Witkowski

More work on FETI-DP for multilevel test.

parent 5161a040
......@@ -252,7 +252,8 @@ if(ENABLE_PARALLEL_DOMAIN)
${SOURCE_DIR}/parallel/PetscSolverFeti.cc
${SOURCE_DIR}/parallel/PetscSolverGlobalMatrix.cc
${SOURCE_DIR}/parallel/PetscSolverGlobalBlockMatrix.cc
${SOURCE_DIR}/parallel/PetscSolverSchur.cc)
${SOURCE_DIR}/parallel/PetscSolverSchur.cc
${SOURCE_DIR}/parallel/SubDomainSolver.cc)
elseif(ENABLE_PARALLEL_DOMAIN STREQUAL "PMTL")
set(MTL_INCLUDE_DIR "")
find_package(MTL REQUIRED)
......
......@@ -40,7 +40,7 @@ namespace AMDiS {
nOverallDofs = 0;
rStartDofs = 0;
mpi::getDofNumbering(*mpiComm, nRankDofs, rStartDofs, nOverallDofs);
mpi::getDofNumbering(mpiComm, nRankDofs, rStartDofs, nOverallDofs);
// === If required, compute also the global indices. ===
......@@ -69,7 +69,7 @@ namespace AMDiS {
// === Send all global indices of DOFs that are owned by the rank to all ===
// === other ranks that also include this DOF. ===
StdMpi<vector<int> > stdMpi(*mpiComm);
StdMpi<vector<int> > stdMpi(mpiComm);
for (DofComm::Iterator it(*sendDofs, feSpace); !it.end(); it.nextRank())
for (; !it.endDofIter(); it.nextDof())
if (dofMap.count(it.getDofIndex()) && !nonRankDofs.count(it.getDofIndex()))
......@@ -111,7 +111,7 @@ namespace AMDiS {
}
void ParallelDofMapping::init(MPI::Intracomm *m,
void ParallelDofMapping::init(MPI::Intracomm m,
vector<const FiniteElemSpace*> &fe,
vector<const FiniteElemSpace*> &uniqueFe,
bool needGlobalMapping,
......@@ -297,7 +297,7 @@ namespace AMDiS {
// === Communicate the matrix indices for all DOFs that are on some ===
// === interior boundaries. ===
StdMpi<vector<DegreeOfFreedom> > stdMpi(*mpiComm);
StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm);
for (DofComm::Iterator it(*sendDofs, feSpaces[i]);
!it.end(); it.nextRank()) {
vector<DegreeOfFreedom> sendGlobalDofs;
......
......@@ -109,7 +109,7 @@ namespace AMDiS {
}
/// This is the only valid constructur to be used.
FeSpaceDofMap(MPI::Intracomm* m)
FeSpaceDofMap(MPI::Intracomm m)
: mpiComm(m),
sendDofs(NULL),
recvDofs(NULL),
......@@ -120,11 +120,7 @@ namespace AMDiS {
nLocalDofs(0),
nOverallDofs(0),
rStartDofs(0)
{
FUNCNAME("FeSpaceDofMap::FeSpaceDofMap()");
TEST_EXIT(mpiComm)("No MPI Communicator specified!\n");
}
{}
/// Clears all data of the mapping.
void clear();
......@@ -230,7 +226,7 @@ namespace AMDiS {
private:
/// MPI communicator object;
MPI::Intracomm* mpiComm;
MPI::Intracomm mpiComm;
/// DOF communicators for all DOFs on interior boundaries.
DofComm *sendDofs, *recvDofs;
......@@ -291,7 +287,7 @@ namespace AMDiS {
* \param[in] bNonLocalDofs If true, at least one rank's mapping con-
* taines DOFs that are not owend by the rank.
*/
void init(MPI::Intracomm *m,
void init(MPI::Intracomm m,
vector<const FiniteElemSpace*> &fe,
vector<const FiniteElemSpace*> &uniqueFe,
bool needGlobalMapping,
......@@ -390,7 +386,7 @@ namespace AMDiS {
private:
/// MPI communicator object;
MPI::Intracomm* mpiComm;
MPI::Intracomm mpiComm;
/// DOF communicators for all DOFs on interior boundaries.
DofComm *sendDofs, *recvDofs;
......
......@@ -62,13 +62,13 @@ namespace AMDiS {
FUNCNAME("PetscSolver::copyVec()");
IS originIs, destIs;
ISCreateGeneral(*mpiComm,
ISCreateGeneral(mpiComm,
originIndex.size(),
&(originIndex[0]),
PETSC_USE_POINTER,
&originIs);
ISCreateGeneral(*mpiComm,
ISCreateGeneral(mpiComm,
destIndex.size(),
&(destIndex[0]),
PETSC_USE_POINTER,
......
......@@ -54,7 +54,8 @@ namespace AMDiS {
{
meshDistributor = m;
mpiRank = meshDistributor->getMpiRank();
mpiComm = &(meshDistributor->getMpiComm());
mpiComm = meshDistributor->getMpiComm();
mpiSelfComm = PETSC_COMM_SELF;
}
/** \brief
......@@ -137,7 +138,9 @@ namespace AMDiS {
int mpiRank;
MPI::Intracomm* mpiComm;
MPI::Intracomm mpiComm;
MPI::Intracomm mpiSelfComm;
/// Petsc's matrix structure.
Mat petscMatrix;
......
......@@ -214,8 +214,8 @@ namespace AMDiS {
Parameters::get("parallel->multi level test", multiLevelTest);
if (multiLevelTest) {
// subDomainSolver = new SubDomainSolver(meshDistributor, mpiComm, &PETSC_COMM_SELF);
if (multiLevelTest) {
subDomainSolver = new SubDomainSolver(meshDistributor, mpiComm, mpiSelfComm);
}
}
......@@ -436,7 +436,7 @@ namespace AMDiS {
// === Create distributed matrix for Lagrange constraints. ===
MatCreateMPIAIJ(*mpiComm,
MatCreateMPIAIJ(mpiComm,
lagrangeMap.getRankDofs(), localDofMap.getRankDofs(),
lagrangeMap.getOverallDofs(), localDofMap.getOverallDofs(),
2, PETSC_NULL, 2, PETSC_NULL,
......@@ -497,14 +497,14 @@ namespace AMDiS {
schurPrimalData.mat_b_primal = &mat_b_primal;
schurPrimalData.fetiSolver = this;
VecCreateMPI(*mpiComm,
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(), localDofMap.getOverallDofs(),
&(schurPrimalData.tmp_vec_b));
VecCreateMPI(*mpiComm,
VecCreateMPI(mpiComm,
primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
&(schurPrimalData.tmp_vec_primal));
MatCreateShell(*mpiComm,
MatCreateShell(mpiComm,
primalDofMap.getRankDofs(), primalDofMap.getRankDofs(),
primalDofMap.getOverallDofs(), primalDofMap.getOverallDofs(),
&schurPrimalData,
......@@ -512,7 +512,7 @@ namespace AMDiS {
MatShellSetOperation(mat_schur_primal, MATOP_MULT,
(void(*)(void))petscMultMatSchurPrimal);
KSPCreate(*mpiComm, &ksp_schur_primal);
KSPCreate(mpiComm, &ksp_schur_primal);
KSPSetOperators(ksp_schur_primal, mat_schur_primal, mat_schur_primal, SAME_NONZERO_PATTERN);
KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_");
KSPSetType(ksp_schur_primal, KSPGMRES);
......@@ -530,7 +530,7 @@ namespace AMDiS {
int nRowsOverallB = localDofMap.getOverallDofs();
Mat matBPi;
MatCreateMPIAIJ(*mpiComm,
MatCreateMPIAIJ(mpiComm,
nRowsRankB, nRowsRankPrimal,
nRowsOverallB, nRowsOverallPrimal,
30, PETSC_NULL, 30, PETSC_NULL, &matBPi);
......@@ -596,7 +596,7 @@ namespace AMDiS {
MatGetInfo(mat_primal_primal, MAT_GLOBAL_SUM, &minfo);
MSG("Schur primal matrix nnz = %f\n", minfo.nz_used);
KSPCreate(*mpiComm, &ksp_schur_primal);
KSPCreate(mpiComm, &ksp_schur_primal);
KSPSetOperators(ksp_schur_primal, mat_primal_primal,
mat_primal_primal, SAME_NONZERO_PATTERN);
KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_");
......@@ -646,24 +646,24 @@ namespace AMDiS {
fetiData.fetiSolver = this;
fetiData.ksp_schur_primal = &ksp_schur_primal;
VecCreateMPI(*mpiComm,
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(), localDofMap.getOverallDofs(),
&(fetiData.tmp_vec_b));
VecCreateMPI(*mpiComm,
VecCreateMPI(mpiComm,
lagrangeMap.getRankDofs(), lagrangeMap.getOverallDofs(),
&(fetiData.tmp_vec_lagrange));
VecCreateMPI(*mpiComm,
VecCreateMPI(mpiComm,
primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
&(fetiData.tmp_vec_primal));
MatCreateShell(*mpiComm,
MatCreateShell(mpiComm,
lagrangeMap.getRankDofs(), lagrangeMap.getRankDofs(),
lagrangeMap.getOverallDofs(), lagrangeMap.getOverallDofs(),
&fetiData, &mat_feti);
MatShellSetOperation(mat_feti, MATOP_MULT, (void(*)(void))petscMultMatFeti);
KSPCreate(*mpiComm, &ksp_feti);
KSPCreate(mpiComm, &ksp_feti);
KSPSetOperators(ksp_feti, mat_feti, mat_feti, SAME_NONZERO_PATTERN);
KSPSetOptionsPrefix(ksp_feti, "feti_");
KSPSetType(ksp_feti, KSPGMRES);
......@@ -698,7 +698,7 @@ namespace AMDiS {
fetiDirichletPreconData.mat_duals_interior = &mat_duals_interior;
fetiDirichletPreconData.ksp_interior = &ksp_interior;
VecCreateMPI(*mpiComm,
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(),localDofMap.getOverallDofs(),
&(fetiDirichletPreconData.tmp_vec_b));
MatGetVecs(mat_duals_duals, PETSC_NULL,
......@@ -743,7 +743,7 @@ namespace AMDiS {
}
}
VecCreateMPI(*mpiComm,
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(),
localDofMap.getOverallDofs(),
&(fetiLumpedPreconData.tmp_vec_b));
......@@ -938,17 +938,17 @@ namespace AMDiS {
MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 60, PETSC_NULL,
&mat_b_b);
MatCreateMPIAIJ(*mpiComm,
MatCreateMPIAIJ(mpiComm,
nRowsRankPrimal, nRowsRankPrimal,
nRowsOverallPrimal, nRowsOverallPrimal,
60, PETSC_NULL, 60, PETSC_NULL, &mat_primal_primal);
MatCreateMPIAIJ(*mpiComm,
MatCreateMPIAIJ(mpiComm,
nRowsRankB, nRowsRankPrimal,
nRowsOverallB, nRowsOverallPrimal,
60, PETSC_NULL, 60, PETSC_NULL, &mat_b_primal);
MatCreateMPIAIJ(*mpiComm,
MatCreateMPIAIJ(mpiComm,
nRowsRankPrimal, nRowsRankB,
nRowsOverallPrimal, nRowsOverallB,
30, PETSC_NULL, 30, PETSC_NULL, &mat_primal_b);
......@@ -1233,9 +1233,9 @@ namespace AMDiS {
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(vec);
VecCreateMPI(*mpiComm,
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(), localDofMap.getOverallDofs(), &f_b);
VecCreateMPI(*mpiComm,
VecCreateMPI(mpiComm,
primalDofMap.getRankDofs(), primalDofMap.getOverallDofs(),
&f_primal);
......
......@@ -53,7 +53,7 @@ namespace AMDiS {
for (int i = 0; i < nBlocks; i++)
for (int j = 0; j < nBlocks; j++)
MatCreateMPIAIJ(*mpiComm,
MatCreateMPIAIJ(mpiComm,
nRankRows * blockSize[i], nRankRows * blockSize[j],
nOverallRows * blockSize[i], nOverallRows * blockSize[j],
30 * blockSize[i], PETSC_NULL,
......@@ -79,7 +79,7 @@ namespace AMDiS {
}
MatCreateNest(*mpiComm,
MatCreateNest(mpiComm,
nBlocks, PETSC_NULL, nBlocks, PETSC_NULL,
&(nestMat[0]), &petscMatrix);
......@@ -91,7 +91,7 @@ namespace AMDiS {
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
// === Init PETSc solver. ===
KSPCreate(*mpiComm, &solver);
KSPCreate(mpiComm, &solver);
KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
KSPSetFromOptions(solver);
......@@ -113,7 +113,7 @@ namespace AMDiS {
nestVec.resize(nComponents);
for (int i = 0; i < nComponents; i++) {
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &(nestVec[i]));
VecCreateMPI(mpiComm, nRankRows, nOverallRows, &(nestVec[i]));
setDofVector(nestVec[i], vec->getDOFVector(i));
......@@ -121,7 +121,7 @@ namespace AMDiS {
VecAssemblyEnd(nestVec[i]);
}
VecCreateNest(*mpiComm, nComponents, PETSC_NULL,
VecCreateNest(mpiComm, nComponents, PETSC_NULL,
&(nestVec[0]), &petscRhsVec);
VecAssemblyBegin(petscRhsVec);
......
......@@ -31,8 +31,8 @@ namespace AMDiS {
// === Create PETSc vector (solution and a temporary vector). ===
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscSolVec);
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscTmpVec);
VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscSolVec);
VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscTmpVec);
int testddd = 1;
Parameters::get("block size", testddd);
......@@ -70,7 +70,7 @@ namespace AMDiS {
// === Create PETSc matrix with the computed nnz data structure. ===
MatCreateMPIAIJ(*mpiComm, nRankRows, nRankRows,
MatCreateMPIAIJ(mpiComm, nRankRows, nRankRows,
nOverallRows, nOverallRows,
0, d_nnz, 0, o_nnz, &petscMatrix);
......@@ -109,7 +109,7 @@ namespace AMDiS {
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
// === Init PETSc solver. ===
KSPCreate(*mpiComm, &solver);
KSPCreate(mpiComm, &solver);
KSPGetPC(solver, &pc);
KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);
......@@ -137,7 +137,7 @@ namespace AMDiS {
int nRankRows = meshDistributor->getNumberRankDofs(feSpaces);
int nOverallRows = meshDistributor->getNumberOverallDofs(feSpaces);
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscRhsVec);
VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscRhsVec);
int testddd = 1;
Parameters::get("block size", testddd);
......@@ -155,7 +155,7 @@ namespace AMDiS {
if (removeRhsNullSpace) {
MSG("Remove constant null space from the RHS!\n");
MatNullSpace sp;
MatNullSpaceCreate(*mpiComm, PETSC_TRUE, 0, PETSC_NULL, &sp);
MatNullSpaceCreate(mpiComm, PETSC_TRUE, 0, PETSC_NULL, &sp);
MatNullSpaceRemove(sp, petscRhsVec, PETSC_NULL);
MatNullSpaceDestroy(&sp);
}
......
......@@ -163,12 +163,12 @@ namespace AMDiS {
// === Create PETSc IS structurs for interior and boundary DOFs. ===
ISCreateStride(*mpiComm,
ISCreateStride(mpiComm,
nInteriorDofs * nComponents,
(rStartInteriorDofs + rStartBoundaryDofs) * nComponents,
1, &interiorIs);
ISCreateStride(*mpiComm,
ISCreateStride(mpiComm,
nBoundaryDofs * nComponents,
(rStartInteriorDofs + rStartBoundaryDofs + nInteriorDofs) * nComponents,
1, &boundaryIs);
......@@ -189,22 +189,22 @@ namespace AMDiS {
int nOverallBoundaryRows = nOverallBoundaryDofs * nComponents;
MatCreateMPIAIJ(*mpiComm,
MatCreateMPIAIJ(mpiComm,
nInteriorRows, nInteriorRows,
nOverallInteriorRows, nOverallInteriorRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA11);
MatCreateMPIAIJ(*mpiComm,
MatCreateMPIAIJ(mpiComm,
nBoundaryRows, nBoundaryRows,
nOverallBoundaryRows, nOverallBoundaryRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA22);
MatCreateMPIAIJ(*mpiComm,
MatCreateMPIAIJ(mpiComm,
nInteriorRows, nBoundaryRows,
nOverallInteriorRows, nOverallBoundaryRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA12);
MatCreateMPIAIJ(*mpiComm,
MatCreateMPIAIJ(mpiComm,
nBoundaryRows, nInteriorRows,
nOverallBoundaryRows, nOverallInteriorRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA21);
......@@ -237,7 +237,7 @@ namespace AMDiS {
tmpIS[0] = interiorIs;
tmpIS[1] = boundaryIs;
MatCreateNest(*mpiComm, 2, &tmpIS[0], 2, &tmpIS[0], &tmpMat[0][0], &petscMatrix);
MatCreateNest(mpiComm, 2, &tmpIS[0], 2, &tmpIS[0], &tmpMat[0][0], &petscMatrix);
MatNestSetVecType(petscMatrix, VECNEST);
MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
......@@ -246,8 +246,8 @@ namespace AMDiS {
int nRankRows = meshDistributor->getNumberRankDofs(feSpace) * nComponents;
int nOverallRows = meshDistributor->getNumberOverallDofs(feSpace) * nComponents;
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscSolVec);
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscTmpVec);
VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscSolVec);
VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscTmpVec);
}
......@@ -260,7 +260,7 @@ namespace AMDiS {
int nRankRows = meshDistributor->getNumberRankDofs(feSpace) * nComponents;
int nOverallRows = meshDistributor->getNumberOverallDofs(feSpace) * nComponents;
VecCreateMPI(*mpiComm, nRankRows, nOverallRows, &petscRhsVec);
VecCreateMPI(mpiComm, nRankRows, nOverallRows, &petscRhsVec);
for (int i = 0; i < nComponents; i++)
setDofVector(petscRhsVec, vec->getDOFVector(i), nComponents, i);
......@@ -278,7 +278,7 @@ namespace AMDiS {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
int nComponents = vec.getSize();
KSPCreate(*mpiComm, &solver);
KSPCreate(mpiComm, &solver);
KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);
......
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "parallel/SubDomainSolver.h"
namespace AMDiS {
using namespace std;
void SubDomainSolver::fillPetscMatrix(Matrix<DOFMatrix*> *mat)
{
}
void SubDomainSolver::fillPetscRhs(SystemVector *vec)
{
}
void SubDomainSolver::solvePetscMatrix(SystemVector &vec, AdaptInfo *adaptInfo)
{
}
void SubDomainSolver::destroyMatrixData()
{
}
void SubDomainSolver::solve(Vec &rhs, Vec &sol)
{
}
}
......@@ -37,8 +37,8 @@ namespace AMDiS {
class SubDomainSolver {
public:
SubDomainSolver(MeshDistributor *md,
MPI::Intracomm* mpiComm0,
MPI::Intracomm* mpiComm1)
MPI::Intracomm &mpiComm0,
MPI::Intracomm &mpiComm1)
: meshDistributor(md),
coarseSpaceMpiComm(mpiComm0),
subDomainMpiComm(mpiComm1),
......@@ -50,15 +50,22 @@ namespace AMDiS {
coarseSpace = coarseDofs;
}
void solve(Vec &rhs, Vec &sol)
{}
void fillPetscMatrix(Matrix<DOFMatrix*> *mat);
void fillPetscRhs(SystemVector *vec);
void solvePetscMatrix(SystemVector &vec, AdaptInfo *adaptInfo);
void destroyMatrixData();
void solve(Vec &rhs, Vec &sol);
protected:
MeshDistributor *meshDistributor;
MPI::Intracomm* coarseSpaceMpiComm;
MPI::Intracomm coarseSpaceMpiComm;
MPI::Intracomm* subDomainMpiComm;
MPI::Intracomm subDomainMpiComm;
ParallelDofMapping* coarseSpace;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment