Commit bc02aae5 authored by Thomas Witkowski's avatar Thomas Witkowski

More work on FETI-DP for multilevel test, part II.

parent a33fd12b
......@@ -187,7 +187,8 @@ namespace AMDiS {
PetscSolverFeti::PetscSolverFeti()
: PetscSolver(),
schurPrimalSolver(0),
multiLevelTest(false)
multiLevelTest(false),
subDomainSolver(NULL)
{
FUNCNAME("PetscSolverFeti::PetscSolverFeti()");
......@@ -213,10 +214,6 @@ namespace AMDiS {
schurPrimalSolver);
Parameters::get("parallel->multi level test", multiLevelTest);
if (multiLevelTest) {
subDomainSolver = new SubDomainSolver(meshDistributor, mpiComm, mpiSelfComm);
}
}
......@@ -271,9 +268,7 @@ namespace AMDiS {
// If multi level test, inform sub domain solver about coarse space.
if (multiLevelTest) {
subDomainSolver->setCoarseSpace(&primalDofMap);
}
subDomainSolver->setDofMapping(&primalDofMap, &localDofMap);
}
......@@ -492,9 +487,9 @@ namespace AMDiS {
if (schurPrimalSolver == 0) {
MSG("Create iterative schur primal solver!\n");
schurPrimalData.mat_primal_primal = &mat_primal_primal;
schurPrimalData.mat_primal_b = &mat_primal_b;
schurPrimalData.mat_b_primal = &mat_b_primal;
schurPrimalData.mat_primal_primal = &(subDomainSolver->getMatCoarseCoarse());
schurPrimalData.mat_primal_b = &(subDomainSolver->getMatCoarseInt());
schurPrimalData.mat_b_primal = &(subDomainSolver->getMatIntCoarse());
schurPrimalData.fetiSolver = this;
VecCreateMPI(mpiComm,
......@@ -520,8 +515,6 @@ namespace AMDiS {
} else {
MSG("Create direct schur primal solver!\n");
TEST_EXIT(ksp_b)("No solver object for local problem!\n");
double wtime = MPI::Wtime();
int nRowsRankPrimal = primalDofMap.getRankDofs();
......@@ -544,13 +537,13 @@ namespace AMDiS {
for (int i = 0; i < nRowsRankB; i++) {
PetscInt row = localDofMap.getStartDofs() + i;
MatGetRow(mat_b_primal, row, &nCols, &cols, &values);
MatGetRow(subDomainSolver->getMatIntCoarse(), row, &nCols, &cols, &values);
for (int j = 0; j < nCols; j++)
if (values[j] != 0.0)
mat_b_primal_cols[cols[j]].push_back(make_pair(i, values[j]));
MatRestoreRow(mat_b_primal, row, &nCols, &cols, &values);
MatRestoreRow(subDomainSolver->getMatIntCoarse(), row, &nCols, &cols, &values);
}
TEST_EXIT(static_cast<int>(mat_b_primal_cols.size()) ==
......@@ -569,7 +562,7 @@ namespace AMDiS {
VecAssemblyBegin(tmpVec);
VecAssemblyEnd(tmpVec);
KSPSolve(ksp_b, tmpVec, tmpVec);
subDomainSolver->solve(tmpVec, tmpVec);
PetscScalar *tmpValues;
VecGetArray(tmpVec, &tmpValues);
......@@ -586,19 +579,21 @@ namespace AMDiS {
MatAssemblyBegin(matBPi, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matBPi, MAT_FINAL_ASSEMBLY);
MatMatMult(mat_primal_b, matBPi, MAT_INITIAL_MATRIX, PETSC_DEFAULT, &matPrimal);
MatAXPY(mat_primal_primal, -1.0, matPrimal, DIFFERENT_NONZERO_PATTERN);
MatMatMult(subDomainSolver->getMatCoarseInt(), matBPi, MAT_INITIAL_MATRIX, PETSC_DEFAULT, &matPrimal);
MatAXPY(subDomainSolver->getMatCoarseCoarse(), -1.0, matPrimal, DIFFERENT_NONZERO_PATTERN);
MatDestroy(&matPrimal);
MatDestroy(&matBPi);
MatInfo minfo;
MatGetInfo(mat_primal_primal, MAT_GLOBAL_SUM, &minfo);
MatGetInfo(subDomainSolver->getMatCoarseCoarse(), MAT_GLOBAL_SUM, &minfo);
MSG("Schur primal matrix nnz = %f\n", minfo.nz_used);
KSPCreate(mpiComm, &ksp_schur_primal);
KSPSetOperators(ksp_schur_primal, mat_primal_primal,
mat_primal_primal, SAME_NONZERO_PATTERN);
KSPSetOperators(ksp_schur_primal,
subDomainSolver->getMatCoarseCoarse(),
subDomainSolver->getMatCoarseCoarse(),
SAME_NONZERO_PATTERN);
KSPSetOptionsPrefix(ksp_schur_primal, "schur_primal_");
KSPSetType(ksp_schur_primal, KSPPREONLY);
PC pc_schur_primal;
......@@ -640,8 +635,8 @@ namespace AMDiS {
// === Create FETI-DP solver object. ===
fetiData.mat_primal_b = &mat_primal_b;
fetiData.mat_b_primal = &mat_b_primal;
fetiData.mat_primal_b = &(subDomainSolver->getMatCoarseInt());
fetiData.mat_b_primal = &(subDomainSolver->getMatIntCoarse());
fetiData.mat_lagrange = &mat_lagrange;
fetiData.fetiSolver = this;
fetiData.ksp_schur_primal = &ksp_schur_primal;
......@@ -910,6 +905,9 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverFeti::fillPetscMatrix()");
if (subDomainSolver == NULL)
subDomainSolver = new SubDomainSolver(meshDistributor, mpiComm, mpiSelfComm);
// === Create all sets and indices. ===
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
......@@ -935,6 +933,7 @@ namespace AMDiS {
int nRowsRankPrimal = primalDofMap.getRankDofs();
int nRowsOverallPrimal = primalDofMap.getOverallDofs();
#if 0
MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 60, PETSC_NULL,
&mat_b_b);
......@@ -952,7 +951,10 @@ namespace AMDiS {
nRowsRankPrimal, nRowsRankB,
nRowsOverallPrimal, nRowsOverallB,
30, PETSC_NULL, 30, PETSC_NULL, &mat_primal_b);
#endif
subDomainSolver->fillPetscMatrix(mat);
// === Create matrices for FETI-DP preconditioner. ===
......@@ -1098,32 +1100,41 @@ namespace AMDiS {
for (unsigned int k = 0; k < cols.size(); k++)
cols[k] = primalDofMap.getMatIndex(j, cols[k]);
#if 0
MatSetValues(mat_primal_primal, 1, &rowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
#endif
if (colsOther.size()) {
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] = localDofMap.getMatIndex(j, colsOther[k]);
#if 0
MatSetValues(mat_primal_b, 1, &rowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
#endif
}
} else {
int localRowIndex = localDofMap.getLocalMatIndex(i, *cursor);
for (unsigned int k = 0; k < cols.size(); k++)
cols[k] = localDofMap.getLocalMatIndex(j, cols[k]);
#if 0
MatSetValues(mat_b_b, 1, &localRowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
#endif
if (colsOther.size()) {
int globalRowIndex = localDofMap.getMatIndex(i, *cursor);
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] = primalDofMap.getMatIndex(j, colsOther[k]);
#if 0
MatSetValues(mat_b_primal, 1, &globalRowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
#endif
}
}
......@@ -1164,6 +1175,7 @@ namespace AMDiS {
}
}
#if 0
// === Start global assembly procedure. ===
MatAssemblyBegin(mat_b_b, MAT_FINAL_ASSEMBLY);
......@@ -1177,7 +1189,7 @@ namespace AMDiS {
MatAssemblyBegin(mat_primal_b, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(mat_primal_b, MAT_FINAL_ASSEMBLY);
#endif
// === Start global assembly procedure for preconditioner matrices. ===
......@@ -1203,19 +1215,6 @@ namespace AMDiS {
createMatLagrange(feSpaces);
// === Create solver for the non primal (thus local) variables. ===
KSPCreate(PETSC_COMM_SELF, &ksp_b);
KSPSetOperators(ksp_b, mat_b_b, mat_b_b, SAME_NONZERO_PATTERN);
KSPSetOptionsPrefix(ksp_b, "interior_");
KSPSetType(ksp_b, KSPPREONLY);
PC pc_b;
KSPGetPC(ksp_b, &pc_b);
PCSetType(pc_b, PCLU);
PCFactorSetMatSolverPackage(pc_b, MATSOLVERUMFPACK);
KSPSetFromOptions(ksp_b);
// === Create PETSc solver for the Schur complement on primal variables. ===
createSchurPrimalKsp(feSpaces);
......@@ -1231,6 +1230,7 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverFeti::fillPetscRhs()");
#if 0
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(vec);
VecCreateMPI(mpiComm,
......@@ -1258,6 +1258,9 @@ namespace AMDiS {
VecAssemblyBegin(f_primal);
VecAssemblyEnd(f_primal);
#else
subDomainSolver->fillPetscRhs(vec);
#endif
}
......@@ -1278,7 +1281,7 @@ namespace AMDiS {
VecRestoreArray(rhs, &rhsValues);
VecRestoreArray(tmp, &tmpValues);
KSPSolve(ksp_b, tmp, tmp);
subDomainSolver->solve(tmp, tmp);
VecGetArray(tmp, &tmpValues);
VecGetArray(sol, &rhsValues);
......@@ -1312,32 +1315,36 @@ namespace AMDiS {
// Some temporary vectors.
Vec tmp_b0, tmp_b1, tmp_lagrange0, tmp_primal0, tmp_primal1;
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(), localDofMap.getOverallDofs(), &tmp_b0);
VecCreateMPI(mpiComm,
localDofMap.getRankDofs(), localDofMap.getOverallDofs(), &tmp_b1);
MatGetVecs(mat_lagrange, PETSC_NULL, &tmp_lagrange0);
MatGetVecs(mat_lagrange, PETSC_NULL, &vec_rhs);
VecDuplicate(f_b, &tmp_b0);
VecDuplicate(f_b, &tmp_b1);
MatGetVecs(mat_primal_primal, PETSC_NULL, &tmp_primal0);
MatGetVecs(mat_primal_primal, PETSC_NULL, &tmp_primal1);
MatGetVecs(subDomainSolver->getMatCoarseCoarse(), PETSC_NULL, &tmp_primal0);
MatGetVecs(subDomainSolver->getMatCoarseCoarse(), PETSC_NULL, &tmp_primal1);
// === Create new rhs ===
// d = L inv(K_BB) f_B - L inv(K_BB) K_BPi inv(S_PiPi) [f_Pi - K_PiB inv(K_BB) f_B]
// vec_rhs = L * inv(K_BB) * f_B
solveLocalProblem(f_b, tmp_b0);
solveLocalProblem(subDomainSolver->getRhsInterior(), tmp_b0);
MatMult(mat_lagrange, tmp_b0, vec_rhs);
// tmp_primal0 = M_PiB * inv(K_BB) * f_B
MatMult(mat_primal_b, tmp_b0, tmp_primal0);
MatMult(subDomainSolver->getMatCoarseInt(), tmp_b0, tmp_primal0);
// tmp_primal0 = f_Pi - M_PiB * inv(K_BB) * f_B
VecAXPBY(tmp_primal0, 1.0, -1.0, f_primal);
VecAXPBY(tmp_primal0, 1.0, -1.0, subDomainSolver->getRhsCoarseSpace());
// tmp_primal0 = inv(S_PiPi) (f_Pi - M_PiB * inv(K_BB) * f_B)
KSPSolve(ksp_schur_primal, tmp_primal0, tmp_primal0);
//
MatMult(mat_b_primal, tmp_primal0, tmp_b0);
MatMult(subDomainSolver->getMatIntCoarse(), tmp_primal0, tmp_b0);
solveLocalProblem(tmp_b0, tmp_b0);
MatMult(mat_lagrange, tmp_b0, tmp_lagrange0);
......@@ -1350,13 +1357,13 @@ namespace AMDiS {
// === Solve for u_primals. ===
VecCopy(f_primal, tmp_primal0);
solveLocalProblem(f_b, tmp_b0);
MatMult(mat_primal_b, tmp_b0, tmp_primal1);
VecCopy(subDomainSolver->getRhsCoarseSpace(), tmp_primal0);
solveLocalProblem(subDomainSolver->getRhsInterior(), tmp_b0);
MatMult(subDomainSolver->getMatCoarseInt(), tmp_b0, tmp_primal1);
VecAXPBY(tmp_primal0, -1.0, 1.0, tmp_primal1);
MatMultTranspose(mat_lagrange, vec_rhs, tmp_b0);
solveLocalProblem(tmp_b0, tmp_b0);
MatMult(mat_primal_b, tmp_b0, tmp_primal1);
MatMult(subDomainSolver->getMatCoarseInt(), tmp_b0, tmp_primal1);
VecAXPBY(tmp_primal0, 1.0, 1.0, tmp_primal1);
KSPSolve(ksp_schur_primal, tmp_primal0, tmp_primal0);
......@@ -1364,11 +1371,11 @@ namespace AMDiS {
// === Solve for u_b. ===
VecCopy(f_b, tmp_b0);
VecCopy(subDomainSolver->getRhsInterior(), tmp_b0);
MatMultTranspose(mat_lagrange, vec_rhs, tmp_b1);
VecAXPBY(tmp_b0, -1.0, 1.0, tmp_b1);
MatMult(mat_b_primal, tmp_primal0, tmp_b1);
MatMult(subDomainSolver->getMatIntCoarse(), tmp_primal0, tmp_b1);
VecAXPBY(tmp_b0, -1.0, 1.0, tmp_b1);
solveLocalProblem(tmp_b0, tmp_b0);
......@@ -1385,9 +1392,13 @@ namespace AMDiS {
VecDestroy(&tmp_lagrange0);
VecDestroy(&tmp_primal0);
VecDestroy(&tmp_primal1);
#if 0
VecDestroy(&f_b);
VecDestroy(&f_primal);
#else
subDomainSolver->solvePetscMatrix(vec, NULL);
#endif
}
......@@ -1395,10 +1406,13 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverFeti::destroyMatrixData()");
#if 0
MatDestroy(&mat_b_b);
MatDestroy(&mat_primal_primal);
MatDestroy(&mat_b_primal);
MatDestroy(&mat_primal_b);
#endif
MatDestroy(&mat_lagrange);
// === Destroy preconditioner data structures. ===
......@@ -1415,8 +1429,6 @@ namespace AMDiS {
destroySchurPrimalKsp();
destroyFetiKsp();
KSPDestroy(&ksp_b);
}
......
......@@ -179,6 +179,7 @@ namespace AMDiS {
/// ranks in which the DOF is contained in.
map<const FiniteElemSpace*, DofIndexToPartitions> boundaryDofRanks;
#if 0
/// Global PETSc matrix of non primal variables.
Mat mat_b_b;
......@@ -188,16 +189,21 @@ namespace AMDiS {
/// Global PETSc matrices that connect the primal with the non
/// primal variables.
Mat mat_b_primal, mat_primal_b;
#endif
/// Global PETSc matrix of Lagrange variables.
Mat mat_lagrange;
#if 0
/// Right hand side PETSc vectors for primal and non primal variables.
Vec f_b, f_primal;
#endif
#if 0
/// PETSc solver object that inverts the matrix of non primal
/// variables, \ref mat_b_b
KSP ksp_b;
#endif
/// 0: Solve the Schur complement on primal variables with iterative solver.
/// 1: Create the Schur complement matrix explicitly and solve it with a
......
......@@ -11,6 +11,7 @@
#include "parallel/SubDomainSolver.h"
#include "SystemVector.h"
namespace AMDiS {
......@@ -19,25 +20,258 @@ namespace AMDiS {
void SubDomainSolver::fillPetscMatrix(Matrix<DOFMatrix*> *mat)
{
FUNCNAME("SubDomainSolver::fillPetscMatrix()");
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
int nRowsRankInterior = interiorMap->getRankDofs();
int nRowsOverallInterior = interiorMap->getOverallDofs();
int nRowsRankCoarse = coarseSpaceMap->getRankDofs();
int nRowsOverallCoarse = coarseSpaceMap->getOverallDofs();
MatCreateSeqAIJ(mpiCommInterior, nRowsRankInterior, nRowsRankInterior,
60, PETSC_NULL, &matIntInt);
MatCreateMPIAIJ(mpiCommCoarseSpace,
nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse,
60, PETSC_NULL, 60, PETSC_NULL, &matCoarseCoarse);
MatCreateMPIAIJ(mpiCommCoarseSpace,
nRowsRankCoarse, nRowsRankInterior,
nRowsOverallCoarse, nRowsOverallInterior,
60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt);
MatCreateMPIAIJ(mpiCommCoarseSpace,
nRowsRankInterior, nRowsRankCoarse,
nRowsOverallInterior, nRowsOverallCoarse,
60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse);
// === Prepare traverse of sequentially created matrices. ===
using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
namespace traits = mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix;
typedef traits::range_generator<row, Matrix>::type cursor_type;
typedef traits::range_generator<nz, cursor_type>::type icursor_type;
vector<int> cols, colsOther;
vector<double> values, valuesOther;
cols.reserve(300);
colsOther.reserve(300);
values.reserve(300);
valuesOther.reserve(300);
// === Traverse all sequentially created matrices and add the values to ===
// === the global PETSc matrices. ===
int nComponents = mat->getSize();
for (int i = 0; i < nComponents; i++) {
for (int j = 0; j < nComponents; j++) {
if (!(*mat)[i][j])
continue;
traits::col<Matrix>::type col((*mat)[i][j]->getBaseMatrix());
traits::const_value<Matrix>::type value((*mat)[i][j]->getBaseMatrix());
// Traverse all rows.
for (cursor_type cursor = begin<row>((*mat)[i][j]->getBaseMatrix()),
cend = end<row>((*mat)[i][j]->getBaseMatrix()); cursor != cend; ++cursor) {
bool rowPrimal = isCoarseSpace(feSpaces[i], *cursor);
cols.clear();
colsOther.clear();
values.clear();
valuesOther.clear();
// Traverse all columns.
for (icursor_type icursor = begin<nz>(cursor), icend = end<nz>(cursor);
icursor != icend; ++icursor) {
bool colPrimal = isCoarseSpace(feSpaces[j], col(*icursor));
if (colPrimal) {
if (rowPrimal) {
cols.push_back(col(*icursor));
values.push_back(value(*icursor));
} else {
colsOther.push_back(col(*icursor));
valuesOther.push_back(value(*icursor));
}
} else {
if (rowPrimal) {
colsOther.push_back(col(*icursor));
valuesOther.push_back(value(*icursor));
} else {
cols.push_back(col(*icursor));
values.push_back(value(*icursor));
}
}
} // for each nnz in row
// === Set matrix values. ===
if (rowPrimal) {
int rowIndex = coarseSpaceMap->getMatIndex(i, *cursor);
for (unsigned int k = 0; k < cols.size(); k++)
cols[k] = coarseSpaceMap->getMatIndex(j, cols[k]);
MatSetValues(matCoarseCoarse, 1, &rowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
if (colsOther.size()) {
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] = interiorMap->getMatIndex(j, colsOther[k]);
MatSetValues(matCoarseInt, 1, &rowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
}
} else {
int localRowIndex = interiorMap->getLocalMatIndex(i, *cursor);
for (unsigned int k = 0; k < cols.size(); k++)
cols[k] = interiorMap->getLocalMatIndex(j, cols[k]);
MatSetValues(matIntInt, 1, &localRowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
if (colsOther.size()) {
int globalRowIndex = interiorMap->getMatIndex(i, *cursor);
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] = coarseSpaceMap->getMatIndex(j, colsOther[k]);
MatSetValues(matIntCoarse, 1, &globalRowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
}
}
}
}
}
// === Start global assembly procedure. ===
MatAssemblyBegin(matIntInt, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matIntInt, MAT_FINAL_ASSEMBLY);
MatAssemblyBegin(matCoarseCoarse, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matCoarseCoarse, MAT_FINAL_ASSEMBLY);
MatAssemblyBegin(matIntCoarse, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matIntCoarse, MAT_FINAL_ASSEMBLY);
MatAssemblyBegin(matCoarseInt, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matCoarseInt, MAT_FINAL_ASSEMBLY);
// === Create solver for the non primal (thus local) variables. ===
KSPCreate(mpiCommInterior, &kspInterior);
KSPSetOperators(kspInterior, matIntInt, matIntInt, SAME_NONZERO_PATTERN);
KSPSetOptionsPrefix(kspInterior, "interior_");
KSPSetType(kspInterior, KSPPREONLY);
PC pcInterior;
KSPGetPC(kspInterior, &pcInterior);
PCSetType(pcInterior, PCLU);
PCFactorSetMatSolverPackage(pcInterior, MATSOLVERUMFPACK);
KSPSetFromOptions(kspInterior);
}
void SubDomainSolver::fillPetscRhs(SystemVector *vec)
{
FUNCNAME("SubDomainSolver::fillPetscRhs()");
VecCreateMPI(mpiCommCoarseSpace,
coarseSpaceMap->getRankDofs(), coarseSpaceMap->getOverallDofs(),
&rhsCoarseSpace);
VecCreateMPI(mpiCommCoarseSpace,
interiorMap->getRankDofs(), interiorMap->getOverallDofs(),
&rhsInterior);
for (int i = 0; i < vec->getSize(); i++) {
const FiniteElemSpace *feSpace = vec->getDOFVector(i)->getFeSpace();
DOFVector<double>::Iterator dofIt(vec->getDOFVector(i), USED_DOFS);
for (dofIt.reset(); !dofIt.end(); ++dofIt) {
int index = dofIt.getDOFIndex();
if (isCoarseSpace(feSpace, index)) {
index = coarseSpaceMap->getMatIndex(i, index);
VecSetValue(rhsCoarseSpace, index, *dofIt, ADD_VALUES);
} else {
index = interiorMap->getMatIndex(i, index);
VecSetValue(rhsInterior, index, *dofIt, INSERT_VALUES);
}
}
}
VecAssemblyBegin(rhsCoarseSpace);
VecAssemblyEnd(rhsCoarseSpace);
VecAssemblyBegin(rhsInterior);
VecAssemblyEnd(rhsInterior);
}
void SubDomainSolver::solvePetscMatrix(SystemVector &vec, AdaptInfo *adaptInfo)
{
FUNCNAME("SubDomainSolver::solvePetscMatrix()");
VecDestroy(&rhsCoarseSpace);
VecDestroy(&rhsInterior);
}
void SubDomainSolver::destroyMatrixData()
{
FUNCNAME("SubDomainSolver::destroyMatrixData()");
MatDestroy(&matIntInt);
MatDestroy(&matCoarseCoarse);
MatDestroy(&matCoarseInt);
MatDestroy(&matIntCoarse);
KSPDestroy(&kspInterior);
}
void SubDomainSolver::solve(Vec &rhs, Vec &sol)
{
KSPSolve(kspInterior, rhs, sol);
}
vector<const FiniteElemSpace*> SubDomainSolver::getFeSpaces(Matrix<DOFMatrix*> *mat)
{
FUNCNAME("SubDomainSolver::getFeSpaces()");
int nComponents = mat->getNumRows();
vector<const FiniteElemSpace*> result(nComponents);
for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j]) {
result[i] = (*mat)[i][j]->getRowFeSpace();
break;