Liebe Gitlab-Nutzerin, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit 6ef1e048 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Some work on nested schur complement approach.

parent b640da9e
...@@ -136,7 +136,7 @@ ...@@ -136,7 +136,7 @@
#if HAVE_PARALLEL_MTL4 #if HAVE_PARALLEL_MTL4
#include "parallel/Mtl4Solver.h" #include "parallel/Mtl4Solver.h"
#else #else
#include "parallel/PetscSolver.h" #include "parallel/PetscProblemStat.h"
#endif #endif
#endif #endif
......
...@@ -1545,6 +1545,11 @@ namespace AMDiS { ...@@ -1545,6 +1545,11 @@ namespace AMDiS {
BoundaryType *bound = BoundaryType *bound =
useGetBound ? new BoundaryType[basisFcts->getNumber()] : NULL; useGetBound ? new BoundaryType[basisFcts->getNumber()] : NULL;
if (matrix)
matrix->startInsertion(matrix->getNnz());
if (vector)
vector->set(0.0);
// == Traverse mesh and assemble. == // == Traverse mesh and assemble. ==
......
...@@ -23,6 +23,10 @@ ...@@ -23,6 +23,10 @@
#ifndef AMDIS_SERIALIZER_H #ifndef AMDIS_SERIALIZER_H
#define AMDIS_SERIALIZER_H #define AMDIS_SERIALIZER_H
#if HAVE_PARALLEL_DOMAIN_AMDIS
#include <mpi.h>
#endif
#include <map> #include <map>
#include "boost/lexical_cast.hpp" #include "boost/lexical_cast.hpp"
......
...@@ -1702,35 +1702,26 @@ namespace AMDiS { ...@@ -1702,35 +1702,26 @@ namespace AMDiS {
} }
// === Get starting position for global rank DOF ordering. ==== // Get displacment for global rank DOF ordering and global DOF number.
nRankDofs = rankDofs.size(); nRankDofs = rankDofs.size();
mpiComm.Scan(&nRankDofs, &rstart, 1, MPI_INT, MPI_SUM); mpi::getDofNumbering(mpiComm, nRankDofs, rstart, nOverallDofs);
rstart -= nRankDofs;
// === Stores for all rank owned DOFs a new global index. === // Stores for all rank owned DOFs a new global index.
DofIndexMap rankDofsNewGlobalIndex; DofIndexMap rankDofsNewGlobalIndex;
for (int i = 0; i < nRankDofs; i++) for (int i = 0; i < nRankDofs; i++)
rankDofsNewGlobalIndex[rankDofs[i]] = i + rstart; rankDofsNewGlobalIndex[rankDofs[i]] = i + rstart;
// === Calculate number of overall DOFs of all partitions. ===
nOverallDofs = 0;
mpiComm.Allreduce(&nRankDofs, &nOverallDofs, 1, MPI_INT, MPI_SUM);
// === Send and receive new DOF indices. === // === Send and receive new DOF indices. ===
#if (DEBUG != 0) #if (DEBUG != 0)
ParallelDebug::testDofContainerCommunication(*this, sendDofs, recvDofs); ParallelDebug::testDofContainerCommunication(*this, sendDofs, recvDofs);
#endif #endif
int i = 0;
StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm, false); StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm, false);
for (RankToDofContainer::iterator sendIt = sendDofs.begin(); for (RankToDofContainer::iterator sendIt = sendDofs.begin();
sendIt != sendDofs.end(); ++sendIt, i++) { sendIt != sendDofs.end(); ++sendIt) {
stdMpi.getSendData(sendIt->first).resize(0); stdMpi.getSendData(sendIt->first).resize(0);
stdMpi.getSendData(sendIt->first).reserve(sendIt->second.size()); stdMpi.getSendData(sendIt->first).reserve(sendIt->second.size());
for (DofContainer::iterator dofIt = sendIt->second.begin(); for (DofContainer::iterator dofIt = sendIt->second.begin();
...@@ -1751,10 +1742,10 @@ namespace AMDiS { ...@@ -1751,10 +1742,10 @@ namespace AMDiS {
for (RankToDofContainer::iterator recvIt = recvDofs.begin(); for (RankToDofContainer::iterator recvIt = recvDofs.begin();
recvIt != recvDofs.end(); ++recvIt) { recvIt != recvDofs.end(); ++recvIt) {
int j = 0; int i = 0;
for (DofContainer::iterator dofIt = recvIt->second.begin(); for (DofContainer::iterator dofIt = recvIt->second.begin();
dofIt != recvIt->second.end(); ++dofIt) { dofIt != recvIt->second.end(); ++dofIt) {
rankDofsNewGlobalIndex[*dofIt] = stdMpi.getRecvData(recvIt->first)[j++]; rankDofsNewGlobalIndex[*dofIt] = stdMpi.getRecvData(recvIt->first)[i++];
isRankDof[**dofIt] = false; isRankDof[**dofIt] = false;
} }
} }
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <map> #include <map>
#include <set> #include <set>
#include <mpi.h>
#include "AMDiS_fwd.h" #include "AMDiS_fwd.h"
#include "Mesh.h" #include "Mesh.h"
......
...@@ -48,6 +48,31 @@ namespace AMDiS { ...@@ -48,6 +48,31 @@ namespace AMDiS {
{ {
srand(time(NULL) * (MPI::COMM_WORLD.Get_rank() + 1)); srand(time(NULL) * (MPI::COMM_WORLD.Get_rank() + 1));
} }
/** \brief
* In many situations a rank computes a number of local DOFs. Then all
* ranks want to know the number of global DOFs and the starting
* displacment number of the DOF numbering in each rank.
*
* \param[in] mpiComm The MPI communicator.
* \param[in] nRankDofs The number of local DOFs.
* \param[out] rStartDofs Displacment of the DOF numbering. On rank n
* this is the sum of all local DOF numbers in
* ranks 0 to n - 1.
* \param[out] nOverallDofs Global sum of nRankDofs. Is equal on all
* ranks.
*/
inline void getDofNumbering(MPI::Intracomm& mpiComm,
int nRankDofs,
int& rStartDofs,
int& nOverallDofs)
{
rStartDofs = 0;
nOverallDofs = 0;
mpiComm.Scan(&nRankDofs, &rStartDofs, 1, MPI_INT, MPI_SUM);
rStartDofs -= nRankDofs;
mpiComm.Allreduce(&nRankDofs, &nOverallDofs, 1, MPI_INT, MPI_SUM);
}
} }
} }
......
...@@ -29,6 +29,7 @@ namespace AMDiS { ...@@ -29,6 +29,7 @@ namespace AMDiS {
double wtime = MPI::Wtime(); double wtime = MPI::Wtime();
petscSolver->setMeshDistributor(meshDistributor);
petscSolver->fillPetscMatrix(systemMatrix, rhs); petscSolver->fillPetscMatrix(systemMatrix, rhs);
petscSolver->solvePetscMatrix(*solution, adaptInfo); petscSolver->solvePetscMatrix(*solution, adaptInfo);
......
...@@ -42,12 +42,12 @@ namespace AMDiS { ...@@ -42,12 +42,12 @@ namespace AMDiS {
if (name == "petsc-schur") { if (name == "petsc-schur") {
#ifdef HAVE_PETSC_DEV #ifdef HAVE_PETSC_DEV
petscSolver = new PetscSolverSchur(meshDistributor); petscSolver = new PetscSolverSchur();
#else #else
ERROR_EXIT("Petsc schur complement solver is only supported when petsc-dev is used!\n"); ERROR_EXIT("Petsc schur complement solver is only supported when petsc-dev is used!\n");
#endif #endif
} else if (name == "petsc" || name == "") { } else if (name == "petsc" || name == "") {
petscSolver = new PetscSolverGlobalMatrix(meshDistributor); petscSolver = new PetscSolverGlobalMatrix();
} else { } else {
ERROR_EXIT("No parallel solver %s available!\n", name.c_str()); ERROR_EXIT("No parallel solver %s available!\n", name.c_str());
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "parallel/PetscSolver.h" #include "parallel/PetscSolver.h"
#include "parallel/StdMpi.h" #include "parallel/StdMpi.h"
#include "parallel/MpiHelper.h"
namespace AMDiS { namespace AMDiS {
...@@ -30,6 +31,10 @@ namespace AMDiS { ...@@ -30,6 +31,10 @@ namespace AMDiS {
{ {
FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()"); FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()");
TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
TEST_EXIT_DBG(mat)("No DOF matrix defined!\n");
TEST_EXIT_DBG(vec)("NO DOF vector defined!\n");
double wtime = MPI::Wtime(); double wtime = MPI::Wtime();
int nComponents = mat->getNumRows(); int nComponents = mat->getNumRows();
int nRankRows = meshDistributor->getNumberRankDofs() * nComponents; int nRankRows = meshDistributor->getNumberRankDofs() * nComponents;
...@@ -233,9 +238,9 @@ namespace AMDiS { ...@@ -233,9 +238,9 @@ namespace AMDiS {
for (cursor_type cursor = begin<row>(mat->getBaseMatrix()), for (cursor_type cursor = begin<row>(mat->getBaseMatrix()),
cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) { cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) {
// Global index of the current row dof. // Global index of the current row DOF.
int globalRowDof = meshDistributor->mapLocalToGlobal(*cursor); int globalRowDof = meshDistributor->mapLocalToGlobal(*cursor);
// Test if the current row dof is a periodic dof. // Test if the current row DOF is a periodic DOF.
bool periodicRow = meshDistributor->isPeriodicDof(globalRowDof); bool periodicRow = meshDistributor->isPeriodicDof(globalRowDof);
if (!periodicRow) { if (!periodicRow) {
...@@ -264,7 +269,7 @@ namespace AMDiS { ...@@ -264,7 +269,7 @@ namespace AMDiS {
if (!periodicCol) { if (!periodicCol) {
// Calculate the exact position of the column index in the PETSc matrix. // Calculate the exact position of the column index in the PETSc matrix.
cols.push_back(globalColDof * dispMult + dispAddCol); cols.push_back(colIndex);
values.push_back(value(*icursor)); values.push_back(value(*icursor));
} else { } else {
// === Row index is not periodic, but column index is. === // === Row index is not periodic, but column index is. ===
...@@ -434,10 +439,10 @@ namespace AMDiS { ...@@ -434,10 +439,10 @@ namespace AMDiS {
if (rankOnly && !meshDistributor->getIsRankDof(dofIt.getDOFIndex())) if (rankOnly && !meshDistributor->getIsRankDof(dofIt.getDOFIndex()))
continue; continue;
// Calculate global row index of the dof. // Calculate global row index of the DOF.
DegreeOfFreedom globalRowDof = DegreeOfFreedom globalRowDof =
meshDistributor->mapLocalToGlobal(dofIt.getDOFIndex()); meshDistributor->mapLocalToGlobal(dofIt.getDOFIndex());
// Calculate PETSc index of the row dof. // Calculate PETSc index of the row DOF.
int index = globalRowDof * dispMult + dispAdd; int index = globalRowDof * dispMult + dispAdd;
if (meshDistributor->isPeriodicDof(globalRowDof)) { if (meshDistributor->isPeriodicDof(globalRowDof)) {
...@@ -451,7 +456,7 @@ namespace AMDiS { ...@@ -451,7 +456,7 @@ namespace AMDiS {
VecSetValues(petscVec, 1, &mappedIndex, &value, ADD_VALUES); VecSetValues(petscVec, 1, &mappedIndex, &value, ADD_VALUES);
} }
} else { } else {
// The dof index is not periodic. // The DOF index is not periodic.
double value = *dofIt; double value = *dofIt;
VecSetValues(petscVec, 1, &index, &value, ADD_VALUES); VecSetValues(petscVec, 1, &index, &value, ADD_VALUES);
} }
...@@ -652,33 +657,42 @@ namespace AMDiS { ...@@ -652,33 +657,42 @@ namespace AMDiS {
} }
#ifdef HAVE_PETSC_DEV #ifdef HAVE_PETSC_DEV
void PetscSolverSchur::fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec) void PetscSolverSchur::updateDofData()
{ {
FUNCNAME("PetscSolverSchur::fillPetscMatrix()"); FUNCNAME("PetscSolverSchur::updateDofData()");
int nComponents = mat->getNumRows(); TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
MPI::Intracomm& mpiComm = meshDistributor->getMpiComm();
typedef map<int, DofContainer> RankToDofContainer; typedef map<int, DofContainer> RankToDofContainer;
typedef map<DegreeOfFreedom, bool> DofIndexToBool; typedef map<DegreeOfFreedom, bool> DofIndexToBool;
std::set<DegreeOfFreedom> boundaryDofsSet; boundaryDofs.clear();
std::set<DegreeOfFreedom> boundaryLocalDofs; std::set<DegreeOfFreedom> boundaryLocalDofs;
RankToDofContainer& sendDofs = meshDistributor->getSendDofs(); RankToDofContainer& sendDofs = meshDistributor->getSendDofs();
for (RankToDofContainer::iterator rankIt = sendDofs.begin(); for (RankToDofContainer::iterator rankIt = sendDofs.begin();
rankIt != sendDofs.end(); ++rankIt) rankIt != sendDofs.end(); ++rankIt) {
for (DofContainer::iterator dofIt = rankIt->second.begin(); for (DofContainer::iterator dofIt = rankIt->second.begin();
dofIt != rankIt->second.end(); ++dofIt) { dofIt != rankIt->second.end(); ++dofIt) {
boundaryLocalDofs.insert(**dofIt); boundaryLocalDofs.insert(**dofIt);
for (int i = 0; i < nComponents; i++) boundaryDofs.insert(meshDistributor->mapLocalToGlobal(**dofIt));
boundaryDofsSet.insert(meshDistributor->mapLocalToGlobal(**dofIt) * nComponents + i);
} }
}
boundaryDofs.clear(); nBoundaryDofs = boundaryDofs.size();
boundaryDofs.insert(boundaryDofs.begin(), mpi::getDofNumbering(mpiComm, nBoundaryDofs,
boundaryDofsSet.begin(), boundaryDofsSet.end()); rStartBoundaryDofs, nOverallBoundaryDofs);
int counter = rStartBoundaryDofs;
for (std::set<DegreeOfFreedom>::iterator it = boundaryDofs.begin();
it != boundaryDofs.end(); ++it)
mapGlobalBoundaryDof[*it] = counter++;
std::set<DegreeOfFreedom> otherBoundaryLocalDofs; std::set<DegreeOfFreedom> otherBoundaryLocalDofs;
RankToDofContainer& recvDofs = meshDistributor->getRecvDofs(); RankToDofContainer& recvDofs = meshDistributor->getRecvDofs();
for (RankToDofContainer::iterator rankIt = recvDofs.begin(); for (RankToDofContainer::iterator rankIt = recvDofs.begin();
...@@ -687,19 +701,153 @@ namespace AMDiS { ...@@ -687,19 +701,153 @@ namespace AMDiS {
dofIt != rankIt->second.end(); ++dofIt) dofIt != rankIt->second.end(); ++dofIt)
otherBoundaryLocalDofs.insert(**dofIt); otherBoundaryLocalDofs.insert(**dofIt);
std::set<DegreeOfFreedom> interiorDofsSet; interiorDofs.clear();
DofIndexToBool& isRankDof = meshDistributor->getIsRankDof(); DofIndexToBool& isRankDof = meshDistributor->getIsRankDof();
for (DofIndexToBool::iterator dofIt = isRankDof.begin(); for (DofIndexToBool::iterator dofIt = isRankDof.begin();
dofIt != isRankDof.end(); ++dofIt) dofIt != isRankDof.end(); ++dofIt) {
if (dofIt->second && if (dofIt->second &&
boundaryLocalDofs.count(dofIt->first) == 0 && boundaryLocalDofs.count(dofIt->first) == 0 &&
otherBoundaryLocalDofs.count(dofIt->first) == 0) otherBoundaryLocalDofs.count(dofIt->first) == 0)
for (int i = 0; i < nComponents; i++) interiorDofs.insert(meshDistributor->mapLocalToGlobal(dofIt->first));
interiorDofsSet.insert(meshDistributor->mapLocalToGlobal(dofIt->first) * nComponents + i); }
interiorDofs.clear(); nInteriorDofs = interiorDofs.size();
interiorDofs.insert(interiorDofs.begin(), mpi::getDofNumbering(mpiComm, nInteriorDofs,
interiorDofsSet.begin(), interiorDofsSet.end()); rStartInteriorDofs, nOverallInteriorDofs);
counter = rStartInteriorDofs;
for (std::set<DegreeOfFreedom>::iterator it = interiorDofs.begin();
it != interiorDofs.end(); ++it)
mapGlobalInteriorDof[*it] = counter++;
TEST_EXIT_DBG(nInteriorDofs > 0)("Should not happen!\n");
StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm, false);
for (RankToDofContainer::iterator sendIt = meshDistributor->getSendDofs().begin();
sendIt != meshDistributor->getSendDofs().end(); ++sendIt) {
stdMpi.getSendData(sendIt->first).resize(0);
stdMpi.getSendData(sendIt->first).reserve(sendIt->second.size());
for (DofContainer::iterator dofIt = sendIt->second.begin();
dofIt != sendIt->second.end(); ++dofIt) {
int globalSendDof = meshDistributor->mapLocalToGlobal(**dofIt);
TEST_EXIT_DBG(mapGlobalBoundaryDof.count(globalSendDof))
("No mapping for boundary DOF %d!\n", globalSendDof);
stdMpi.getSendData(sendIt->first).push_back(mapGlobalBoundaryDof[globalSendDof]);
}
}
stdMpi.updateSendDataSize();
stdMpi.recv(meshDistributor->getRecvDofs());
stdMpi.startCommunication();
for (RankToDofContainer::iterator recvIt = meshDistributor->getRecvDofs().begin();
recvIt != meshDistributor->getRecvDofs().end(); ++recvIt) {
int i = 0;
for (DofContainer::iterator dofIt = recvIt->second.begin();
dofIt != recvIt->second.end(); ++dofIt) {
int globalRecvDof = meshDistributor->mapLocalToGlobal(**dofIt);
mapGlobalBoundaryDof[globalRecvDof] = stdMpi.getRecvData(recvIt->first)[i++];
}
}
}
void PetscSolverSchur::fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec)
{
FUNCNAME("PetscSolverSchur::fillPetscMatrix()");
updateDofData();
int nComponents = mat->getNumRows();
int nInteriorRows = nInteriorDofs * nComponents;
int nOverallInteriorRows = nOverallInteriorDofs * nComponents;
int nBoundaryRows = nBoundaryDofs * nComponents;
int nOverallBoundaryRows = nOverallBoundaryDofs * nComponents;
MSG("INTERIOR LOCAL MAT: %d x %d\n", nInteriorRows, nInteriorRows);
MSG("INTERIOR GLOBAL MAT: %d x %d\n", nOverallInteriorRows, nOverallInteriorRows);
MSG("BOUNDARY LOCAL MAT: %d x %d\n", nBoundaryRows, nBoundaryRows);
MSG("BOUNDARY GLOBAL MAT: %d x %d\n", nOverallBoundaryRows, nOverallBoundaryRows);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
nInteriorRows, nInteriorRows,
nOverallInteriorRows, nOverallInteriorRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA11);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
nBoundaryRows, nBoundaryRows,
nOverallBoundaryRows, nOverallBoundaryRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA22);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
nInteriorRows, nBoundaryRows,
nOverallInteriorRows, nOverallBoundaryRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA12);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
nBoundaryRows, nInteriorRows,
nOverallBoundaryRows, nOverallInteriorRows,
100, PETSC_NULL, 100, PETSC_NULL, &matA21);
for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j])
setDofMatrix((*mat)[i][j], nComponents, i, j);
MatAssemblyBegin(matA11, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matA11, MAT_FINAL_ASSEMBLY);
MatAssemblyBegin(matA12, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matA12, MAT_FINAL_ASSEMBLY);
MatAssemblyBegin(matA21, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matA21, MAT_FINAL_ASSEMBLY);
MatAssemblyBegin(matA22, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matA22, MAT_FINAL_ASSEMBLY);
Mat tmpMat[2][2];
tmpMat[0][0] = matA11;
tmpMat[0][1] = matA12;
tmpMat[1][0] = matA21;
tmpMat[1][1] = matA22;
MatCreateNest(PETSC_COMM_WORLD, 2, PETSC_NULL, 2, PETSC_NULL,
&tmpMat[0][0], &petscMatrix);
MatNestSetVecType(petscMatrix, VECNEST);
MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
MatGetVecs(matA12, &petscRhsVec2, &petscRhsVec1);
MatGetVecs(matA12, &petscSolVec2, &petscSolVec1);
for (int i = 0; i < nComponents; i++)
setDofVector(petscRhsVec, vec->getDOFVector(i), nComponents, i);
Vec tmpVec[2];
tmpVec[0] = petscRhsVec1;
tmpVec[1] = petscRhsVec2;
VecCreateNest(PETSC_COMM_WORLD, 2, PETSC_NULL, tmpVec, &petscRhsVec);
tmpVec[0] = petscSolVec1;
tmpVec[1] = petscSolVec2;
VecCreateNest(PETSC_COMM_WORLD, 2, PETSC_NULL, tmpVec, &petscSolVec);
VecAssemblyBegin(petscRhsVec1);
VecAssemblyEnd(petscRhsVec1);
VecAssemblyBegin(petscRhsVec2);
VecAssemblyEnd(petscRhsVec2);
VecAssemblyBegin(petscRhsVec);
VecAssemblyEnd(petscRhsVec);
} }
...@@ -711,23 +859,138 @@ namespace AMDiS { ...@@ -711,23 +859,138 @@ namespace AMDiS {
PC pc; PC pc;
KSPCreate(PETSC_COMM_WORLD, &solver); KSPCreate(PETSC_COMM_WORLD, &solver);
KSPGetPC(solver, &pc); KSPGetPC(solver, &pc);
PCSetType(pc, PCFIELDSPLIT); // PCSetType(pc, PCFIELDSPLIT);
const PetscInt interiorField[1] = {0};
IS interiorIs; const PetscInt boundaryField[1] = {1};
ISCreateGeneral(PETSC_COMM_WORLD, interiorDofs.size(), &(interiorDofs[0]), PCFieldSplitSetFields(pc, "interior", 1, interiorField);
PETSC_COPY_VALUES, &interiorIs); PCFieldSplitSetFields(pc, "boundary", 1, boundaryField);
PCFieldSplitSetIS(pc, "interior", interiorIs);
IS boundaryIs; KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
ISCreateGeneral(PETSC_COMM_WORLD, boundaryDofs.size(), &(boundaryDofs[0]),
PETSC_COPY_VALUES, &boundaryIs);
PCFieldSplitSetIS(pc, "boundary", boundaryIs);
// KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT); KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);
KSPSetFromOptions(solver); KSPSetFromOptions(solver);
PCSetFromOptions(pc); PCSetFromOptions(pc);
KSPSolve(solver, petscRhsVec, petscSolVec);
KSPDestroy(solver);
VecDestroy(petscRhsVec1);
VecDestroy(petscRhsVec2);
VecDestroy(petscRhsVec);
VecDestroy(petscSolVec1);
VecDestroy(petscSolVec2);
VecDestroy(petscSolVec);
MatDestroy(matA11);
MatDestroy(matA12);
MatDestroy(matA21);
MatDestroy(matA22);
MatDestroy(petscMatrix);
MSG("SOLVED IT!\n");
exit(0);
}
void PetscSolverSchur::setDofMatrix(DOFMatrix* mat, int dispMult,