Commit 66a86905 authored by Thomas Witkowski's avatar Thomas Witkowski

Blub

parent 41a2b884
...@@ -438,9 +438,12 @@ namespace AMDiS { ...@@ -438,9 +438,12 @@ namespace AMDiS {
file << "</VTKFile>\n"; file << "</VTKFile>\n";
#if HAVE_PARALLEL_DOMAIN_AMDIS #if HAVE_PARALLEL_DOMAIN_AMDIS
if (MPI::COMM_WORLD.Get_rank() == 0) if (MPI::COMM_WORLD.Get_rank() == 0) {
vector<string> componentNames;
componentNames.push_back("elvalue");
VtkWriter::writeParallelFile(fname + ".pvtu", MPI::COMM_WORLD.Get_size(), VtkWriter::writeParallelFile(fname + ".pvtu", MPI::COMM_WORLD.Get_size(),
fname, ".vtu", 1); fname, ".vtu", componentNames);
}
#endif #endif
} }
} }
......
...@@ -140,11 +140,17 @@ namespace AMDiS { ...@@ -140,11 +140,17 @@ namespace AMDiS {
vtkWriter.writeFile(fn + paraviewFileExt); vtkWriter.writeFile(fn + paraviewFileExt);
#if HAVE_PARALLEL_DOMAIN_AMDIS #if HAVE_PARALLEL_DOMAIN_AMDIS
if (MPI::COMM_WORLD.Get_rank() == 0) if (MPI::COMM_WORLD.Get_rank() == 0) {
vector<string> componentNames;
for (unsigned int i = 0; i < dataCollectors.size(); i++)
componentNames.push_back(dataCollectors[i]->getValues()->getName());
vtkWriter.writeParallelFile(paraFilename + paraviewParallelFileExt, vtkWriter.writeParallelFile(paraFilename + paraviewParallelFileExt,
MPI::COMM_WORLD.Get_size(), MPI::COMM_WORLD.Get_size(),
filename, postfix, filename,
dataCollectors.size()); postfix,
componentNames);
}
#endif #endif
MSG("ParaView file written to %s\n", (fn + paraviewFileExt).c_str()); MSG("ParaView file written to %s\n", (fn + paraviewFileExt).c_str());
......
...@@ -68,7 +68,7 @@ namespace AMDiS { ...@@ -68,7 +68,7 @@ namespace AMDiS {
void VtkWriter::writeParallelFile(string name, int nRanks, void VtkWriter::writeParallelFile(string name, int nRanks,
string fnPrefix, string fnPostfix, string fnPrefix, string fnPostfix,
int nComponents) vector<string> &componentNames)
{ {
FUNCNAME("VtkWriter::writeParallelFile()"); FUNCNAME("VtkWriter::writeParallelFile()");
...@@ -94,9 +94,10 @@ namespace AMDiS { ...@@ -94,9 +94,10 @@ namespace AMDiS {
<< " </PCells>\n"; << " </PCells>\n";
file << " <PPointData>\n"; file << " <PPointData>\n";
for (int i = 0; i < nComponents; i++) for (unsigned int i = 0; i < componentNames.size(); i++)
file << " <PDataArray type=\"Float32\" Name=\"value" file << " <PDataArray type=\"Float32\" Name=\""
<< i << "\" format=\"ascii\"/>\n"; << componentNames[i]
<< "\" format=\"ascii\"/>\n";
file << " </PPointData>\n"; file << " </PPointData>\n";
...@@ -235,11 +236,16 @@ namespace AMDiS { ...@@ -235,11 +236,16 @@ namespace AMDiS {
TEST_EXIT(sPos >= 0)("Failed to find file postfix!\n"); TEST_EXIT(sPos >= 0)("Failed to find file postfix!\n");
string name = filename.substr(0, sPos); string name = filename.substr(0, sPos);
if (MPI::COMM_WORLD.Get_rank() == 0) if (MPI::COMM_WORLD.Get_rank() == 0) {
vector<string> componentNames;
for (unsigned int i = 0; i < dcList.size(); i++)
componentNames.push_back(dcList[i]->getValues()->getName());
writer.writeParallelFile(name + ".pvtu", writer.writeParallelFile(name + ".pvtu",
MPI::COMM_WORLD.Get_size(), MPI::COMM_WORLD.Get_size(),
name, ".vtu", name, ".vtu",
static_cast<int>(dcList.size())); componentNames);
}
filename = name + "-p" + lexical_cast<string>(MPI::COMM_WORLD.Get_rank()) + "-.vtu"; filename = name + "-p" + lexical_cast<string>(MPI::COMM_WORLD.Get_rank()) + "-.vtu";
} }
......
...@@ -55,7 +55,7 @@ namespace AMDiS { ...@@ -55,7 +55,7 @@ namespace AMDiS {
/// Writes a pvtu file, which contains the links to all the rank files. /// Writes a pvtu file, which contains the links to all the rank files.
static void writeParallelFile(string name, int nRanks, static void writeParallelFile(string name, int nRanks,
string fnPrefix, string fnPostfix, string fnPrefix, string fnPostfix,
int nComponents); vector<string> &componentNames);
/// May be used to simply write ParaView files. /// May be used to simply write ParaView files.
static void writeFile(DOFVector<double> *values, static void writeFile(DOFVector<double> *values,
......
...@@ -178,37 +178,41 @@ namespace AMDiS { ...@@ -178,37 +178,41 @@ namespace AMDiS {
inline Mat& getMatIntInt() inline Mat& getMatIntInt()
{ {
return matIntInt; return mat[0][0];
// return matIntInt;
} }
inline Mat& getMatCoarseCoarse() inline Mat& getMatCoarseCoarse()
{ {
FUNCNAME("PetscSolver::getMatCoarseCoarse()"); FUNCNAME("PetscSolver::getMatCoarseCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size()) TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
("Subdomain solver does not contain a coarse space!\n"); ("Subdomain solver does not contain a coarse space!\n");
return matCoarseCoarse; return mat[1][1];
// return matCoarseCoarse;
} }
inline Mat& getMatIntCoarse() inline Mat& getMatIntCoarse()
{ {
FUNCNAME("PetscSolver::getMatIntCoarse()"); FUNCNAME("PetscSolver::getMatIntCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size()) TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
("Subdomain solver does not contain a coarse space!\n"); ("Subdomain solver does not contain a coarse space!\n");
return matIntCoarse; return mat[0][1];
// return matIntCoarse;
} }
inline Mat& getMatCoarseInt() inline Mat& getMatCoarseInt()
{ {
FUNCNAME("PetscSolver::getMatCoarseInt()"); FUNCNAME("PetscSolver::getMatCoarseInt()");
TEST_EXIT_DBG(coarseSpaceMap.size()) TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
("Subdomain solver does not contain a coarse space!\n"); ("Subdomain solver does not contain a coarse space!\n");
return matCoarseInt; return mat[1][0];
// return matCoarseInt;
} }
protected: protected:
...@@ -251,7 +255,7 @@ namespace AMDiS { ...@@ -251,7 +255,7 @@ namespace AMDiS {
/// Parallel DOF mapping of the (optional) coarse space. Allows to define /// Parallel DOF mapping of the (optional) coarse space. Allows to define
/// different coarse spaces for different components. /// different coarse spaces for different components.
std::map<int, ParallelDofMapping*> coarseSpaceMap; map<int, ParallelDofMapping*> coarseSpaceMap;
int mpiRank; int mpiRank;
...@@ -260,7 +264,9 @@ namespace AMDiS { ...@@ -260,7 +264,9 @@ namespace AMDiS {
MPI::Intracomm mpiCommLocal; MPI::Intracomm mpiCommLocal;
/// Petsc's matrix structure. /// Petsc's matrix structure.
Mat matIntInt, matCoarseCoarse, matIntCoarse, matCoarseInt; // Mat matIntInt, matCoarseCoarse, matIntCoarse, matCoarseInt;
vector<vector<Mat> > mat;
/// PETSc's vector structures for the rhs vector, the solution vector and a /// PETSc's vector structures for the rhs vector, the solution vector and a
/// temporary vector for calculating the final residuum. /// temporary vector for calculating the final residuum.
......
...@@ -338,14 +338,14 @@ namespace AMDiS { ...@@ -338,14 +338,14 @@ namespace AMDiS {
for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) { for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i); const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
createPrimals(feSpace); createPrimals(feSpace);
createDuals(feSpace); createDuals(feSpace);
createInterfaceNodes(feSpace); createInterfaceNodes(feSpace);
createIndexB(feSpace); createIndexB(feSpace);
} }
primalDofMap.update(); primalDofMap.update();
...@@ -1352,11 +1352,12 @@ namespace AMDiS { ...@@ -1352,11 +1352,12 @@ namespace AMDiS {
// === Create all sets and indices. === // === Create all sets and indices. ===
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat); vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
initialize(feSpaces); initialize(feSpaces);
createFetiData(); createFetiData();
// === Create matrices for the FETI-DP method. === // === Create matrices for the FETI-DP method. ===
if (printTimings) { if (printTimings) {
......
...@@ -199,7 +199,10 @@ namespace AMDiS { ...@@ -199,7 +199,10 @@ namespace AMDiS {
inline bool isInterface(const FiniteElemSpace *feSpace, inline bool isInterface(const FiniteElemSpace *feSpace,
DegreeOfFreedom dof) DegreeOfFreedom dof)
{ {
return interfaceDofMap[feSpace].isSet(dof); if (feSpace == fullInterface)
return interfaceDofMap[feSpace].isSet(dof);
return false;
} }
protected: protected:
......
...@@ -16,17 +16,22 @@ ...@@ -16,17 +16,22 @@
namespace AMDiS { namespace AMDiS {
void PetscSolverGlobalBlockMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *mat) void PetscSolverGlobalBlockMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *seqMat)
{ {
FUNCNAME("PetscSolverGlobalBlockMatrix::fillPetscMatrix()"); FUNCNAME("PetscSolverGlobalBlockMatrix::fillPetscMatrix()");
TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n"); TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n"); TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(mat)("No DOF matrix defined!\n"); TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
mat.resize(1);
mat[0].resize(1);
Mat &matIntInt = mat[0][0];
double wtime = MPI::Wtime(); double wtime = MPI::Wtime();
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
nComponents = mat->getNumRows(); nComponents = seqMat->getNumRows();
int nRankRows = (*interiorMap)[feSpace].nRankDofs; int nRankRows = (*interiorMap)[feSpace].nRankDofs;
int nOverallRows = (*interiorMap)[feSpace].nOverallDofs; int nOverallRows = (*interiorMap)[feSpace].nOverallDofs;
...@@ -63,9 +68,9 @@ namespace AMDiS { ...@@ -63,9 +68,9 @@ namespace AMDiS {
for (int i = 0; i < nComponents; i++) for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++) for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j]) { if ((*seqMat)[i][j]) {
int idx = componentInBlock[i] * nBlocks + componentInBlock[j]; int idx = componentInBlock[i] * nBlocks + componentInBlock[j];
setDofMatrix(nestMat[idx], (*mat)[i][j], setDofMatrix(nestMat[idx], (*seqMat)[i][j],
compNthInBlock[i], compNthInBlock[j]); compNthInBlock[i], compNthInBlock[j]);
} }
...@@ -178,7 +183,7 @@ namespace AMDiS { ...@@ -178,7 +183,7 @@ namespace AMDiS {
if (nestMat[i] != PETSC_NULL) if (nestMat[i] != PETSC_NULL)
MatDestroy(&(nestMat[i])); MatDestroy(&(nestMat[i]));
MatDestroy(&matIntInt); MatDestroy(&mat[0][0]);
KSPDestroy(&kspInterior); KSPDestroy(&kspInterior);
} }
...@@ -196,14 +201,14 @@ namespace AMDiS { ...@@ -196,14 +201,14 @@ namespace AMDiS {
void PetscSolverGlobalBlockMatrix::setDofMatrix(Mat& petscMat, void PetscSolverGlobalBlockMatrix::setDofMatrix(Mat& petscMat,
DOFMatrix* mat, DOFMatrix* seqMat,
int dispRowBlock, int dispRowBlock,
int dispColBlock) int dispColBlock)
{ {
FUNCNAME("PetscSolverGlobalBlockMatrix::setDofMatrix()"); FUNCNAME("PetscSolverGlobalBlockMatrix::setDofMatrix()");
TEST_EXIT(mat)("No DOFMatrix!\n");
TEST_EXIT(petscMat)("No PETSc matrix!\n"); TEST_EXIT(petscMat)("No PETSc matrix!\n");
TEST_EXIT(seqMat)("No DOFMatrix!\n");
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
...@@ -211,8 +216,8 @@ namespace AMDiS { ...@@ -211,8 +216,8 @@ namespace AMDiS {
namespace traits = mtl::traits; namespace traits = mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix; typedef DOFMatrix::base_matrix_type Matrix;
traits::col<Matrix>::type col(mat->getBaseMatrix()); traits::col<Matrix>::type col(seqMat->getBaseMatrix());
traits::const_value<Matrix>::type value(mat->getBaseMatrix()); traits::const_value<Matrix>::type value(seqMat->getBaseMatrix());
typedef traits::range_generator<row, Matrix>::type cursor_type; typedef traits::range_generator<row, Matrix>::type cursor_type;
typedef traits::range_generator<nz, cursor_type>::type icursor_type; typedef traits::range_generator<nz, cursor_type>::type icursor_type;
...@@ -228,8 +233,8 @@ namespace AMDiS { ...@@ -228,8 +233,8 @@ namespace AMDiS {
// === Traverse all rows of the dof matrix and insert row wise the values === // === Traverse all rows of the dof matrix and insert row wise the values ===
// === to the PETSc matrix. === // === to the PETSc matrix. ===
for (cursor_type cursor = begin<row>(mat->getBaseMatrix()), for (cursor_type cursor = begin<row>(seqMat->getBaseMatrix()),
cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) { cend = end<row>(seqMat->getBaseMatrix()); cursor != cend; ++cursor) {
// Global index of the current row DOF. // Global index of the current row DOF.
int rowIndex = (*interiorMap)[feSpace][*cursor].global + dispRowIndex; int rowIndex = (*interiorMap)[feSpace][*cursor].global + dispRowIndex;
......
...@@ -17,26 +17,30 @@ ...@@ -17,26 +17,30 @@
namespace AMDiS { namespace AMDiS {
void PetscSolverGlobalMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *mat) void PetscSolverGlobalMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *seqMat)
{ {
FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()"); FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()");
if (coarseSpaceMap.size()) { if (coarseSpaceMap.size()) {
updateSubdomainData(); updateSubdomainData();
fillPetscMatrixWithCoarseSpace(mat); fillPetscMatrixWithCoarseSpace(seqMat);
return; return;
} }
mat.resize(1);
mat[0].resize(1);
Mat &matIntInt = mat[0][0];
TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n"); TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n"); TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(mat)("No DOF matrix defined!\n"); TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
double wtime = MPI::Wtime(); double wtime = MPI::Wtime();
// === If required, recompute non zero structure of the matrix. === // === If required, recompute non zero structure of the matrix. ===
if (checkMeshChange(mat)) if (checkMeshChange(seqMat))
nnzInterior.create(mat, mpiCommGlobal, *interiorMap, nnzInterior.create(seqMat, mpiCommGlobal, *interiorMap,
&(meshDistributor->getPeriodicMap()), &(meshDistributor->getPeriodicMap()),
meshDistributor->getElementObjectDb()); meshDistributor->getElementObjectDb());
...@@ -73,11 +77,11 @@ namespace AMDiS { ...@@ -73,11 +77,11 @@ namespace AMDiS {
// === Transfer values from DOF matrices to the PETSc matrix. === // === Transfer values from DOF matrices to the PETSc matrix. ===
int nComponents = mat->getNumRows(); int nComponents = seqMat->getNumRows();
for (int i = 0; i < nComponents; i++) for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++) for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j]) if ((*seqMat)[i][j])
setDofMatrix((*mat)[i][j], i, j); setDofMatrix((*seqMat)[i][j], i, j);
#if (DEBUG != 0) #if (DEBUG != 0)
MSG("Fill petsc matrix 2 needed %.5f seconds\n", MPI::Wtime() - wtime); MSG("Fill petsc matrix 2 needed %.5f seconds\n", MPI::Wtime() - wtime);
...@@ -133,13 +137,49 @@ namespace AMDiS { ...@@ -133,13 +137,49 @@ namespace AMDiS {
} }
void PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace(Matrix<DOFMatrix*> *mat) void PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace(Matrix<DOFMatrix*> *seqMat)
{ {
FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace()"); FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace()");
TEST_EXIT_DBG(interiorMap)("Should not happen!\n"); TEST_EXIT_DBG(interiorMap)("Should not happen!\n");
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat); vector<const FiniteElemSpace*> feSpaces = getFeSpaces(seqMat);
vector<ParallelDofMapping*> uniqueCoarseMap;
if (coarseSpaceMap.size()) {
TEST_EXIT_DBG(coarseSpaceMap.size() == seqMat->getSize())
("Wrong sizes %d %d\n", coarseSpaceMap.size(), seqMat->getSize());
std::set<ParallelDofMapping*> tmp;
for (map<int, ParallelDofMapping*>::iterator it = coarseSpaceMap.begin();
it != coarseSpaceMap.end(); ++it) {
if (tmp.count(it->second) == 0) {
tmp.insert(it->second);
uniqueCoarseMap.push_back(it->second);
}
}
}
int nCoarseMap = uniqueCoarseMap.size();
mat.resize(nCoarseMap + 1);
for (int i = 0; i < nCoarseMap + 1; i++)
mat[i].resize(nCoarseMap + 1);
vector<int> componentIthCoarseMap(coarseSpaceMap.size());
for (unsigned int i = 0; i < componentIthCoarseMap.size(); i++) {
bool found = false;
for (int j = 0; j < nCoarseMap; j++) {
if (coarseSpaceMap[i] == uniqueCoarseMap[j]) {
componentIthCoarseMap[i] = j;
found = true;
break;
}
}
TEST_EXIT_DBG(found)("Should not happen!\n");
}
int nRowsRankInterior = interiorMap->getRankDofs(); int nRowsRankInterior = interiorMap->getRankDofs();
int nRowsOverallInterior = interiorMap->getOverallDofs(); int nRowsOverallInterior = interiorMap->getOverallDofs();
...@@ -147,8 +187,8 @@ namespace AMDiS { ...@@ -147,8 +187,8 @@ namespace AMDiS {
// === If required, recompute non zero structure of the matrix. === // === If required, recompute non zero structure of the matrix. ===
bool localMatrix = (subdomainLevel == 0); bool localMatrix = (subdomainLevel == 0);
if (checkMeshChange(mat, localMatrix)) { if (checkMeshChange(seqMat, localMatrix)) {
nnzInterior.create(mat, mpiCommGlobal, *interiorMap, NULL, nnzInterior.create(seqMat, mpiCommGlobal, *interiorMap, NULL,
meshDistributor->getElementObjectDb(), meshDistributor->getElementObjectDb(),
localMatrix); localMatrix);
...@@ -162,40 +202,69 @@ namespace AMDiS { ...@@ -162,40 +202,69 @@ namespace AMDiS {
} }
} }
mat.resize(nCoarseMap + 1);
if (localMatrix) { if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior, MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior,
0, nnzInterior.dnnz, 0, nnzInterior.dnnz,
&matIntInt); &mat[0][0]);
} else { } else {
MatCreateAIJ(mpiCommLocal, MatCreateAIJ(mpiCommLocal,
nRowsRankInterior, nRowsRankInterior, nRowsRankInterior, nRowsRankInterior,
nRowsOverallInterior, nRowsOverallInterior, nRowsOverallInterior, nRowsOverallInterior,
0, nnzInterior.dnnz, 0, nnzInterior.onnz, 0, nnzInterior.dnnz, 0, nnzInterior.onnz,
&matIntInt); &mat[0][0]);
} }
if (coarseSpaceMap.size()) { if (coarseSpaceMap.size()) {
int nRowsRankCoarse = coarseSpaceMap[0]->getRankDofs(); for (int i = 0; i < nCoarseMap; i++) {
int nRowsOverallCoarse = coarseSpaceMap[0]->getOverallDofs(); ParallelDofMapping* cMap = uniqueCoarseMap[i];
MatCreateAIJ(mpiCommGlobal, int nRowsRankCoarse = cMap->getRankDofs();
nRowsRankCoarse, nRowsRankCoarse, int nRowsOverallCoarse = cMap->getOverallDofs();
nRowsOverallCoarse, nRowsOverallCoarse,
0, nnzCoarse.dnnz, 0, nnzCoarse.onnz, MatCreateAIJ(mpiCommGlobal,
&matCoarseCoarse); nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse,
MatCreateAIJ(mpiCommGlobal, 0, PETSC_NULL, 0, PETSC_NULL,
nRowsRankCoarse, nRowsRankInterior, &mat[i + 1][i + 1]);
nRowsOverallCoarse, nGlobalOverallInterior,
0, nnzCoarseInt.dnnz, 0, nnzCoarseInt.onnz, MSG("REMOVE THIS LINE WHEN FINISHED!\n");
&matCoarseInt); MatSetOption(mat[i + 1][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
MatCreateAIJ(mpiCommGlobal, for (int j = 0; j < nCoarseMap + 1; j++) {
nRowsRankInterior, nRowsRankCoarse, int nRowsRankMat = (j == 0 ? nRowsRankInterior : uniqueCoarseMap[j - 1]->getRankDofs());
nGlobalOverallInterior, nRowsOverallCoarse,