Commit 53c5f965 authored by Thomas Witkowski's avatar Thomas Witkowski

Some work on using the nnz structure of the matrix in FETI-DP method.

parent 81d8d089
...@@ -85,6 +85,7 @@ namespace AMDiS { ...@@ -85,6 +85,7 @@ namespace AMDiS {
repartitionIthChange(20), repartitionIthChange(20),
nMeshChangesAfterLastRepartitioning(0), nMeshChangesAfterLastRepartitioning(0),
repartitioningCounter(0), repartitioningCounter(0),
repartitioningFailed(0),
debugOutputDir(""), debugOutputDir(""),
lastMeshChangeIndex(0), lastMeshChangeIndex(0),
createBoundaryDofFlag(0), createBoundaryDofFlag(0),
...@@ -1006,7 +1007,11 @@ namespace AMDiS { ...@@ -1006,7 +1007,11 @@ namespace AMDiS {
INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n", INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n",
MPI::Wtime() - first); MPI::Wtime() - first);
if (tryRepartition && if (repartitioningFailed > 0) {
MSG_DBG("Repartitioning not tried because it has failed in the past!\n");
repartitioningFailed--;
} else if (tryRepartition &&
repartitioningAllowed && repartitioningAllowed &&
nMeshChangesAfterLastRepartitioning >= repartitionIthChange) { nMeshChangesAfterLastRepartitioning >= repartitionIthChange) {
repartitionMesh(); repartitionMesh();
...@@ -1314,6 +1319,7 @@ namespace AMDiS { ...@@ -1314,6 +1319,7 @@ namespace AMDiS {
bool partitioningSucceed = bool partitioningSucceed =
partitioner->partition(elemWeights, ADAPTIVE_REPART); partitioner->partition(elemWeights, ADAPTIVE_REPART);
if (!partitioningSucceed) { if (!partitioningSucceed) {
repartitioningFailed = 20;
MSG("Mesh partitioner created empty partition!\n"); MSG("Mesh partitioner created empty partition!\n");
return; return;
} }
...@@ -1321,6 +1327,7 @@ namespace AMDiS { ...@@ -1321,6 +1327,7 @@ namespace AMDiS {
// In the case the partitioner does not create a new mesh partition, return // In the case the partitioner does not create a new mesh partition, return
// without and changes. // without and changes.
if (!partitioner->meshChanged()) { if (!partitioner->meshChanged()) {
repartitioningFailed = 20;
MSG("Mesh partition does not create a new partition!\n"); MSG("Mesh partition does not create a new partition!\n");
return; return;
} }
......
...@@ -527,6 +527,11 @@ namespace AMDiS { ...@@ -527,6 +527,11 @@ namespace AMDiS {
/// variable is used only for debug outputs. /// variable is used only for debug outputs.
int repartitioningCounter; int repartitioningCounter;
/// If repartitioning of the mesh fail, this variable has a positive value
/// that gives the number of mesh changes the mesh distributer will wait
/// before trying new mesh repartitioning.
int repartitioningFailed;
/// Directory name where all debug output files should be written to. /// Directory name where all debug output files should be written to.
string debugOutputDir; string debugOutputDir;
......
...@@ -171,7 +171,7 @@ namespace AMDiS { ...@@ -171,7 +171,7 @@ namespace AMDiS {
ParMetisPartitioner(MPI::Intracomm *comm) ParMetisPartitioner(MPI::Intracomm *comm)
: MeshPartitioner(comm), : MeshPartitioner(comm),
parMetisMesh(NULL), parMetisMesh(NULL),
itr(1000.0) itr(1000000.0)
{} {}
~ParMetisPartitioner(); ~ParMetisPartitioner();
......
...@@ -132,6 +132,25 @@ namespace AMDiS { ...@@ -132,6 +132,25 @@ namespace AMDiS {
return dofMap[d]; return dofMap[d];
} }
/** \brief
* Searchs the map for a given DOF. It does not fail, if the DOF is not
* mapped by this mapping. In this case, it returns false. If the DOF is
* mapped, the result is stored and the function returns true.
*
* \param[in] dof DOF index for which a mapping is searched.
* \param[out] index In the case that the DOF is mapped, the result
* is stored here.
*/
inline bool find(DegreeOfFreedom dof, MultiIndex& index)
{
DofMap::iterator it = dofMap.find(dof);
if (it == dofMap.end())
return false;
index = it->second;
return true;
}
/// Inserts a new DOF to rank's mapping. The DOF is assumed to be owend by /// Inserts a new DOF to rank's mapping. The DOF is assumed to be owend by
/// the rank. /// the rank.
void insertRankDof(DegreeOfFreedom dof0, DegreeOfFreedom dof1 = -1) void insertRankDof(DegreeOfFreedom dof0, DegreeOfFreedom dof1 = -1)
......
...@@ -417,12 +417,11 @@ namespace AMDiS { ...@@ -417,12 +417,11 @@ namespace AMDiS {
("Should not happen!\n"); ("Should not happen!\n");
} }
// If multi level test, inform sub domain solver about coarse space.
subdomain->setDofMapping(&localDofMap, &primalDofMap); subdomain->setDofMapping(&localDofMap, &primalDofMap);
if (printTimings) { if (printTimings) {
timeCounter = MPI::Wtime() - timeCounter; timeCounter = MPI::Wtime() - timeCounter;
MSG("FETI-DP timing 01: %.5f seconds (creation of basic data structures)", MSG("FETI-DP timing 01: %.5f seconds (creation of basic data structures)\n",
timeCounter); timeCounter);
} }
} }
...@@ -693,6 +692,7 @@ namespace AMDiS { ...@@ -693,6 +692,7 @@ namespace AMDiS {
lagrangeMap.getOverallDofs(), nGlobalOverallInterior, lagrangeMap.getOverallDofs(), nGlobalOverallInterior,
2, PETSC_NULL, 2, PETSC_NULL, 2, PETSC_NULL, 2, PETSC_NULL,
&mat_lagrange); &mat_lagrange);
MatSetOption(mat_lagrange, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
// === Create for all duals the corresponding Lagrange constraints. On === // === Create for all duals the corresponding Lagrange constraints. On ===
// === each rank we traverse all pairs (n, m) of ranks, with n < m, === // === each rank we traverse all pairs (n, m) of ranks, with n < m, ===
...@@ -792,7 +792,9 @@ namespace AMDiS { ...@@ -792,7 +792,9 @@ namespace AMDiS {
MatCreateAIJ(mpiCommGlobal, MatCreateAIJ(mpiCommGlobal,
nRowsRankB, nRowsRankPrimal, nRowsRankB, nRowsRankPrimal,
nGlobalOverallInterior, nRowsOverallPrimal, nGlobalOverallInterior, nRowsOverallPrimal,
30, PETSC_NULL, 30, PETSC_NULL, &matBPi); 150, PETSC_NULL, 150, PETSC_NULL, &matBPi);
MatSetOption(matBPi, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
Mat matPrimal; Mat matPrimal;
PetscInt nCols; PetscInt nCols;
...@@ -1225,23 +1227,30 @@ namespace AMDiS { ...@@ -1225,23 +1227,30 @@ namespace AMDiS {
int nRowsDual = dualDofMap.getRankDofs(); int nRowsDual = dualDofMap.getRankDofs();
MatCreateSeqAIJ(PETSC_COMM_SELF, MatCreateSeqAIJ(PETSC_COMM_SELF,
nRowsDual, nRowsDual, 60, PETSC_NULL, nRowsDual, nRowsDual, 100, PETSC_NULL,
&mat_duals_duals); &mat_duals_duals);
MatSetOption(mat_duals_duals, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
if (fetiPreconditioner == FETI_DIRICHLET) { if (fetiPreconditioner == FETI_DIRICHLET) {
int nRowsInterior = interiorDofMap.getRankDofs(); int nRowsInterior = interiorDofMap.getRankDofs();
MatCreateSeqAIJ(PETSC_COMM_SELF, MatCreateSeqAIJ(PETSC_COMM_SELF,
nRowsInterior, nRowsInterior, 60, PETSC_NULL, nRowsInterior, nRowsInterior, 100, PETSC_NULL,
&mat_interior_interior); &mat_interior_interior);
MatSetOption(mat_interior_interior,
MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
MatCreateSeqAIJ(PETSC_COMM_SELF, MatCreateSeqAIJ(PETSC_COMM_SELF,
nRowsInterior, nRowsDual, 60, PETSC_NULL, nRowsInterior, nRowsDual, 100, PETSC_NULL,
&mat_interior_duals); &mat_interior_duals);
MatSetOption(mat_interior_duals,
MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
MatCreateSeqAIJ(PETSC_COMM_SELF, MatCreateSeqAIJ(PETSC_COMM_SELF,
nRowsDual, nRowsInterior, 60, PETSC_NULL, nRowsDual, nRowsInterior, 100, PETSC_NULL,
&mat_duals_interior); &mat_duals_interior);
MatSetOption(mat_duals_interior,
MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
} }
// === Prepare traverse of sequentially created matrices. === // === Prepare traverse of sequentially created matrices. ===
......
...@@ -33,31 +33,10 @@ namespace AMDiS { ...@@ -33,31 +33,10 @@ namespace AMDiS {
double wtime = MPI::Wtime(); double wtime = MPI::Wtime();
// === If required, recompute non zero structure of the matrix. ===
// === Check if mesh was changed and, in this case, recompute matrix === if (checkMeshChange(mat))
// === nnz structure and matrix indices. ===
int recvAllValues = 0;
int sendValue =
static_cast<int>(meshDistributor->getLastMeshChangeIndex() != lastMeshNnz);
mpiCommGlobal.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM);
if (!d_nnz || recvAllValues != 0 || alwaysCreateNnzStructure) {
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
interiorMap->setComputeMatIndex(true, true);
interiorMap->update(feSpaces);
if (d_nnz) {
delete [] d_nnz;
d_nnz = NULL;
delete [] o_nnz;
o_nnz = NULL;
}
updateSubdomainData();
createPetscNnzStructure(mat); createPetscNnzStructure(mat);
lastMeshNnz = meshDistributor->getLastMeshChangeIndex();
}
// === Create PETSc vector (solution and a temporary vector). === // === Create PETSc vector (solution and a temporary vector). ===
...@@ -72,7 +51,7 @@ namespace AMDiS { ...@@ -72,7 +51,7 @@ namespace AMDiS {
MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows, MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows,
nOverallRows, nOverallRows, nOverallRows, nOverallRows,
0, d_nnz, 0, o_nnz, &matIntInt); 0, matInteriorDiagNnz, 0, matInteriorOffdiagNnz, &matIntInt);
MatSetOption(matIntInt, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE); MatSetOption(matIntInt, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
...@@ -149,14 +128,32 @@ namespace AMDiS { ...@@ -149,14 +128,32 @@ namespace AMDiS {
int nRowsRankInterior = interiorMap->getRankDofs(); int nRowsRankInterior = interiorMap->getRankDofs();
int nRowsOverallInterior = interiorMap->getOverallDofs(); int nRowsOverallInterior = interiorMap->getOverallDofs();
if (subdomainLevel == 0) { // === If required, recompute non zero structure of the matrix. ===
bool localMatrix = (subdomainLevel == 0);
if (checkMeshChange(mat, localMatrix)) {
createPetscNnzStructureWithCoarseSpace(mat,
*interiorMap,
matInteriorDiagNnz,
matInteriorOffdiagNnz,
localMatrix);
if (coarseSpaceMap)
createPetscNnzStructureWithCoarseSpace(mat,
*coarseSpaceMap,
matCoarseDiagNnz,
matCoarseOffdiagNnz);
}
if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior, MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior,
60, PETSC_NULL, &matIntInt); 0, matInteriorDiagNnz, &matIntInt);
} else { } else {
MatCreateAIJ(mpiCommLocal, MatCreateAIJ(mpiCommLocal,
nRowsRankInterior, nRowsRankInterior, nRowsRankInterior, nRowsRankInterior,
nRowsOverallInterior, nRowsOverallInterior, nRowsOverallInterior, nRowsOverallInterior,
60, PETSC_NULL, 60, PETSC_NULL, &matIntInt); 0, matInteriorDiagNnz, 0, matInteriorOffdiagNnz,
&matIntInt);
} }
if (coarseSpaceMap) { if (coarseSpaceMap) {
...@@ -166,17 +163,23 @@ namespace AMDiS { ...@@ -166,17 +163,23 @@ namespace AMDiS {
MatCreateAIJ(mpiCommGlobal, MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankCoarse, nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse, nRowsOverallCoarse, nRowsOverallCoarse,
60, PETSC_NULL, 60, PETSC_NULL, &matCoarseCoarse); 0, matCoarseDiagNnz, 0, matCoarseOffdiagNnz,
&matCoarseCoarse);
MatCreateAIJ(mpiCommGlobal, MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankInterior, nRowsRankCoarse, nRowsRankInterior,
nRowsOverallCoarse, nGlobalOverallInterior, nRowsOverallCoarse, nGlobalOverallInterior,
60, PETSC_NULL, 60, PETSC_NULL, &matCoarseInt); 100, PETSC_NULL, 100, PETSC_NULL,
&matCoarseInt);
MatSetOption(matCoarseInt, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
MatCreateAIJ(mpiCommGlobal, MatCreateAIJ(mpiCommGlobal,
nRowsRankInterior, nRowsRankCoarse, nRowsRankInterior, nRowsRankCoarse,
nGlobalOverallInterior, nRowsOverallCoarse, nGlobalOverallInterior, nRowsOverallCoarse,
60, PETSC_NULL, 60, PETSC_NULL, &matIntCoarse); 100, PETSC_NULL, 100, PETSC_NULL,
// 0, matInteriorDiagNnz, 0, matCoarseOffdiagNnz,
&matIntCoarse);
MatSetOption(matIntCoarse, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
} }
// === Prepare traverse of sequentially created matrices. === // === Prepare traverse of sequentially created matrices. ===
...@@ -564,6 +567,52 @@ namespace AMDiS { ...@@ -564,6 +567,52 @@ namespace AMDiS {
} }
bool PetscSolverGlobalMatrix::checkMeshChange(Matrix<DOFMatrix*> *mat,
bool localMatrix)
{
FUNCNAME("PetscSolverGlobalMatrix::checkMeshChange()");
int recvAllValues = 0;
int sendValue =
static_cast<int>(meshDistributor->getLastMeshChangeIndex() != lastMeshNnz);
mpiCommGlobal.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM);
if (!matInteriorDiagNnz || recvAllValues != 0 || alwaysCreateNnzStructure) {
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
interiorMap->setComputeMatIndex(true, !localMatrix);
interiorMap->update(feSpaces);
if (matInteriorDiagNnz) {
delete [] matInteriorDiagNnz;
matInteriorDiagNnz = NULL;
}
if (matInteriorOffdiagNnz) {
delete [] matInteriorOffdiagNnz;
matInteriorOffdiagNnz = NULL;
}
if (matCoarseDiagNnz) {
delete [] matCoarseDiagNnz;
matCoarseDiagNnz = NULL;
}
if (matCoarseOffdiagNnz) {
delete [] matCoarseOffdiagNnz;
matCoarseOffdiagNnz = NULL;
}
updateSubdomainData();
lastMeshNnz = meshDistributor->getLastMeshChangeIndex();
return true;
}
return false;
}
void PetscSolverGlobalMatrix::createFieldSplit(PC pc) void PetscSolverGlobalMatrix::createFieldSplit(PC pc)
{ {
FUNCNAME("PetscSolverGlobalMatrix::createFieldSplit()"); FUNCNAME("PetscSolverGlobalMatrix::createFieldSplit()");
...@@ -985,18 +1034,17 @@ namespace AMDiS { ...@@ -985,18 +1034,17 @@ namespace AMDiS {
{ {
FUNCNAME("PetscSolverGlobalMatrix::createPetscNnzStructure()"); FUNCNAME("PetscSolverGlobalMatrix::createPetscNnzStructure()");
TEST_EXIT_DBG(!d_nnz)("There is something wrong!\n"); TEST_EXIT_DBG(!matInteriorDiagNnz)("There is something wrong!\n");
TEST_EXIT_DBG(!o_nnz)("There is something wrong!\n");
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat); vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
int nRankRows = interiorMap->getRankDofs(); int nRankRows = interiorMap->getRankDofs();
int rankStartIndex = interiorMap->getStartDofs(); int rankStartIndex = interiorMap->getStartDofs();
d_nnz = new int[nRankRows]; matInteriorDiagNnz = new int[nRankRows];
o_nnz = new int[nRankRows]; matInteriorOffdiagNnz = new int[nRankRows];
for (int i = 0; i < nRankRows; i++) { for (int i = 0; i < nRankRows; i++) {
d_nnz[i] = 0; matInteriorDiagNnz[i] = 0;
o_nnz[i] = 0; matInteriorOffdiagNnz[i] = 0;
} }
using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end; using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
...@@ -1091,13 +1139,13 @@ namespace AMDiS { ...@@ -1091,13 +1139,13 @@ namespace AMDiS {
if (value(*icursor) != 0.0 || petscRowIdx == petscColIdx) { if (value(*icursor) != 0.0 || petscRowIdx == petscColIdx) {
// The row DOF is a rank DOF, if also the column is a rank DOF, // The row DOF is a rank DOF, if also the column is a rank DOF,
// increment the d_nnz values for this row, otherwise the // increment the matInteriorDiagNnz values for this row,
// o_nnz value. // otherwise the matInteriorDiagNnz value.
if (petscColIdx >= rankStartIndex && if (petscColIdx >= rankStartIndex &&
petscColIdx < rankStartIndex + nRankRows) petscColIdx < rankStartIndex + nRankRows)
d_nnz[localPetscRowIdx]++; matInteriorDiagNnz[localPetscRowIdx]++;
else else
o_nnz[localPetscRowIdx]++; matInteriorOffdiagNnz[localPetscRowIdx]++;
} }
} }
} else { } else {
...@@ -1154,9 +1202,242 @@ namespace AMDiS { ...@@ -1154,9 +1202,242 @@ namespace AMDiS {
r, localRowIdx, nRankRows, it->first); r, localRowIdx, nRankRows, it->first);
if (c < rankStartIndex || c >= rankStartIndex + nRankRows) if (c < rankStartIndex || c >= rankStartIndex + nRankRows)
o_nnz[localRowIdx]++; matInteriorOffdiagNnz[localRowIdx]++;
else
matInteriorDiagNnz[localRowIdx]++;
}
}
}
// The above algorithm for calculating the number of nnz per row over-
// approximates the value, i.e., the number is always equal or larger to
// the real number of nnz values in the global parallel matrix. For small
// matrices, the problem may arise, that the result is larger than the
// number of elements in a row. This is fixed in the following.
if (nRankRows < 100)
for (int i = 0; i < nRankRows; i++)
matInteriorDiagNnz[i] = std::min(matInteriorDiagNnz[i], nRankRows);
}
void PetscSolverGlobalMatrix::createPetscNnzStructureWithCoarseSpace(Matrix<DOFMatrix*> *mat,
ParallelDofMapping &dofMap,
int*& diagNnz,
int*& offdiagNnz,
bool localMatrix)
{
FUNCNAME("PetscSolverGlobalMatrix::createPetscNnzStructure()");
TEST_EXIT_DBG(!diagNnz)("There is something wrong!\n");
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
int nRankRows = dofMap.getRankDofs();
int rankStartIndex = dofMap.getStartDofs();
diagNnz = new int[nRankRows];
for (int i = 0; i < nRankRows; i++)
diagNnz[i] = 0;
if (localMatrix == false) {
offdiagNnz = new int[nRankRows];
for (int i = 0; i < nRankRows; i++)
offdiagNnz[i] = 0;
}
using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
namespace traits = mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix;
typedef vector<pair<int, int> > MatrixNnzEntry;
typedef map<int, DofContainer> RankToDofContainer;
// Stores to each rank a list of nnz entries (i.e. pairs of row and column
// index) that this rank will send to. These nnz entries will be assembled
// on this rank, but because the row DOFs are not DOFs of this rank they
// will be send to the owner of the row DOFs.
map<int, MatrixNnzEntry> sendMatrixEntry;
// Maps to each DOF that must be send to another rank the rank number of the
// receiving rank.
map<pair<DegreeOfFreedom, int>, int> sendDofToRank;
// First, create for all ranks, to which we send data to, MatrixNnzEntry
// object with 0 entries.
for (unsigned int i = 0; i < feSpaces.size(); i++) {
for (DofComm::Iterator it(meshDistributor->getDofComm().getRecvDofs(), feSpaces[i]);
!it.end(); it.nextRank()) {
sendMatrixEntry[it.getRank()].resize(0);
for (; !it.endDofIter(); it.nextDof())
sendDofToRank[make_pair(it.getDofIndex(), i)] = it.getRank();
}
}
// Create list of ranks from which we receive data from.
std::set<int> recvFromRank;
for (unsigned int i = 0; i < feSpaces.size(); i++)
for (DofComm::Iterator it(meshDistributor->getDofComm().getSendDofs(), feSpaces[i]);
!it.end(); it.nextRank())
recvFromRank.insert(it.getRank());
// === Traverse matrices to create nnz data. ===
int nComponents = mat->getNumRows();
for (int i = 0; i < nComponents; i++) {
for (int j = 0; j < nComponents; j++) {
if (!(*mat)[i][j])
continue;
TEST_EXIT_DBG((*mat)[i][j]->getRowFeSpace() == feSpaces[i])
("Should not happen!\n");
TEST_EXIT_DBG((*mat)[i][j]->getColFeSpace() == feSpaces[j])
("Should not happen!\n");
Matrix bmat = (*mat)[i][j]->getBaseMatrix();
traits::col<Matrix>::type col(bmat);
traits::const_value<Matrix>::type value(bmat);
typedef traits::range_generator<row, Matrix>::type cursor_type;
typedef traits::range_generator<nz, cursor_type>::type icursor_type;
MultiIndex rowDofIndex;
for (cursor_type cursor = begin<row>(bmat),
cend = end<row>(bmat); cursor != cend; ++cursor) {
if (dofMap[feSpaces[i]].find(*cursor, rowDofIndex) == false)
continue;
// The corresponding global matrix row index of the current row DOF.
int petscRowIdx = 0;
if (localMatrix) {
petscRowIdx = dofMap.getLocalMatIndex(i, *cursor);
} else {
if (dofMap.isMatIndexFromGlobal())
petscRowIdx = dofMap.getMatIndex(i, rowDofIndex.global);
else
petscRowIdx = dofMap.getMatIndex(i, *cursor);
}
if (localMatrix || dofMap[feSpaces[i]].isRankDof(*cursor)) {
// === The current row DOF is a rank DOF, so create the ===
// === corresponding nnz values directly on rank's nnz data. ===
// This is the local row index of the local PETSc matrix.
int localPetscRowIdx = petscRowIdx;
if (localMatrix == false) {
localPetscRowIdx -= rankStartIndex;