Commit 5d66f8f9 authored by Thomas Witkowski's avatar Thomas Witkowski

Nothing really special

parent 2bec218f
...@@ -1022,34 +1022,61 @@ namespace AMDiS { ...@@ -1022,34 +1022,61 @@ namespace AMDiS {
printImbalanceFactor(); printImbalanceFactor();
} }
void MeshDistributor::printImbalanceFactor() void MeshDistributor::getImbalanceFactor(double &imbalance,
int &minDofs,
int &maxDofs,
int &sumDofs)
{ {
FUNCNAME("MeshDistributor::printImbalanceFactor()"); FUNCNAME("MeshDistributor::getImbalanceFactor()");
vector<int> nDofsInRank(mpiSize); vector<int> nDofsInRank(mpiSize);
int nDofs = mesh->getDofAdmin(0).getUsedDofs(); int nDofs = mesh->getDofAdmin(0).getUsedDofs();
mpiComm.Gather(&nDofs, 1, MPI_INT, &(nDofsInRank[0]), 1, MPI_INT, 0); mpiComm.Gather(&nDofs, 1, MPI_INT, &(nDofsInRank[0]), 1, MPI_INT, 0);
if (mpiRank == 0) { if (mpiRank == 0) {
int nOverallDofs = 0; sumDofs = 0;
int maxDofs = numeric_limits<int>::min(); minDofs = numeric_limits<int>::max();
int minDofs = numeric_limits<int>::max(); maxDofs = numeric_limits<int>::min();
for (int i = 0; i < mpiSize; i++) { for (int i = 0; i < mpiSize; i++) {
nOverallDofs += nDofsInRank[i]; sumDofs += nDofsInRank[i];
maxDofs = std::max(maxDofs, nDofsInRank[i]);
minDofs = std::min(minDofs, nDofsInRank[i]); minDofs = std::min(minDofs, nDofsInRank[i]);
maxDofs = std::max(maxDofs, nDofsInRank[i]);
} }
// int avrgDofs = nOverallDofs / mpiSize; int avrgDofs = sumDofs / mpiSize;
// double imbalance0 = imbalance = ((static_cast<double>(maxDofs) / avrgDofs) - 1.0);
// (static_cast<double>(maxDofs - avrgDofs) / avrgDofs) * 100.0;
double imbalance1 = (static_cast<double>(maxDofs) / minDofs - 1.0) * 100.0;
MSG("Imbalancing factor: %.1f\n", imbalance1);
} }
} }
double MeshDistributor::getImbalanceFactor()
{
double factor;
int a = 0;
int b = 0;
int c = 0;
getImbalanceFactor(factor, a, b, c);
return factor;
}
void MeshDistributor::printImbalanceFactor()
{
FUNCNAME("MeshDistributor::printImbalanceFactor()");
double imbalanceFactor = 0.0;
int minDofs = 0;
int maxDofs = 0;
int sumDofs = 0;
getImbalanceFactor(imbalanceFactor, minDofs, maxDofs, sumDofs);
if (mpiRank == 0)
MSG("Imbalancing factor: %.2f [ minDofs = %d, maxDofs = %d, sumDofs = %d ]\n",
imbalanceFactor * 100.0, minDofs, maxDofs, sumDofs);
}
bool MeshDistributor::checkAndAdaptBoundary(RankToBoundMap &allBound) bool MeshDistributor::checkAndAdaptBoundary(RankToBoundMap &allBound)
{ {
FUNCNAME("MeshDistributor::checkAndAdaptBoundary()"); FUNCNAME("MeshDistributor::checkAndAdaptBoundary()");
...@@ -1217,34 +1244,18 @@ namespace AMDiS { ...@@ -1217,34 +1244,18 @@ namespace AMDiS {
void MeshDistributor::repartitionMesh() void MeshDistributor::repartitionMesh()
{ {
FUNCNAME("MeshDistributor::repartitionMesh()"); FUNCNAME("MeshDistributor::repartitionMesh()");
// === First we check if the rank with the maximum number of DOFs has at ===
// === least 20% more DOFs than the rank with the minimum number of DOFs. ===
// === In this case, the mesh will be repartition. ===
double inbalanceFactor = 1.2; // === First, check if the load is unbalanced on the ranks. ===
Parameters::get("parallel->repartitioning->inbalance", inbalanceFactor);
int repartitioning = 0; int repartitioning = 0;
vector<int> nDofsInRank(mpiSize); double imbalanceFactor = getImbalanceFactor();
int nDofs = mesh->getDofAdmin(0).getUsedDofs();
mpiComm.Gather(&nDofs, 1, MPI_INT, &(nDofsInRank[0]), 1, MPI_INT, 0);
if (mpiRank == 0) { if (mpiRank == 0) {
int nOverallDofs = 0; double imbalanceRepartitionBound = 0.2;
int minDofs = numeric_limits<int>::max(); Parameters::get("parallel->repartitioning->imbalance",
int maxDofs = numeric_limits<int>::min(); imbalanceRepartitionBound);
for (int i = 0; i < mpiSize; i++) {
nOverallDofs += nDofsInRank[i];
minDofs = std::min(minDofs, nDofsInRank[i]);
maxDofs = std::max(maxDofs, nDofsInRank[i]);
}
MSG("Overall DOFs: %d Min DOFs: %d Max DOFs: %d\n",
nOverallDofs, minDofs, maxDofs);
if (static_cast<double>(maxDofs) / static_cast<double>(minDofs) > if (imbalanceFactor > imbalanceRepartitionBound)
inbalanceFactor)
repartitioning = 1; repartitioning = 1;
mpiComm.Bcast(&repartitioning, 1, MPI_INT, 0); mpiComm.Bcast(&repartitioning, 1, MPI_INT, 0);
...@@ -1283,6 +1294,19 @@ namespace AMDiS { ...@@ -1283,6 +1294,19 @@ namespace AMDiS {
} }
} }
double maxWeight = -1.0;
double sumWeight = 0.0;
for (map<int, double>::iterator it = elemWeights.begin();
it != elemWeights.end(); ++it) {
maxWeight = std::max(maxWeight, it->second);
sumWeight += it->second;
}
mpi::globalMax(maxWeight);
mpi::globalAdd(sumWeight);
MSG("Partition weight: sum = %e max = %e\n", sumWeight, maxWeight);
// === Run mesh partitioner to calculate a new mesh partitioning. === // === Run mesh partitioner to calculate a new mesh partitioning. ===
partitioner->setLocalGlobalDofMap(&(dofMap[feSpaces[0]].getMap())); partitioner->setLocalGlobalDofMap(&(dofMap[feSpaces[0]].getMap()));
...@@ -1298,7 +1322,7 @@ namespace AMDiS { ...@@ -1298,7 +1322,7 @@ namespace AMDiS {
// without and changes. // without and changes.
if (!partitioner->meshChanged()) { if (!partitioner->meshChanged()) {
MSG("Mesh partition does not create a new partition!\n"); MSG("Mesh partition does not create a new partition!\n");
return; return;
} }
TEST_EXIT_DBG(!(partitioner->getSendElements().size() == mesh->getMacroElements().size() && TEST_EXIT_DBG(!(partitioner->getSendElements().size() == mesh->getMacroElements().size() &&
...@@ -1514,27 +1538,7 @@ namespace AMDiS { ...@@ -1514,27 +1538,7 @@ namespace AMDiS {
check3dValidMesh(); check3dValidMesh();
MSG("Mesh repartitioning needed %.5f seconds\n", MPI::Wtime() - timePoint); MSG("Mesh repartitioning needed %.5f seconds\n", MPI::Wtime() - timePoint);
// === Print DOF information to screen. ===
nDofs = mesh->getDofAdmin(0).getUsedDofs();
mpiComm.Gather(&nDofs, 1, MPI_INT, &(nDofsInRank[0]), 1, MPI_INT, 0);
if (mpiRank == 0) {
int nOverallDofs = 0;
int minDofs = numeric_limits<int>::max();
int maxDofs = numeric_limits<int>::min();
for (int i = 0; i < mpiSize; i++) {
nOverallDofs += nDofsInRank[i];
minDofs = std::min(minDofs, nDofsInRank[i]);
maxDofs = std::max(maxDofs, nDofsInRank[i]);
}
MSG("Overall DOFs: %d Min DOFs: %d Max DOFs: %d\n",
nOverallDofs, minDofs, maxDofs);
}
} }
......
...@@ -97,22 +97,25 @@ namespace AMDiS { ...@@ -97,22 +97,25 @@ namespace AMDiS {
*/ */
void checkMeshChange(bool tryRepartition = true); void checkMeshChange(bool tryRepartition = true);
/** \brief /// Checks if is required to repartition the mesh. If this is the case, a new
* Checks if is required to repartition the mesh. If this is the case, a new /// partition will be created and the mesh will be redistributed between the
* partition will be created and the mesh will be redistributed between the /// ranks.
* ranks.
*/
void repartitionMesh(); void repartitionMesh();
void getImbalanceFactor(double &imbalance,
int &minDofs,
int &maxDofs,
int &sumDofs);
double getImbalanceFactor();
/// Calculates the imbalancing factor and prints it to screen. /// Calculates the imbalancing factor and prints it to screen.
void printImbalanceFactor(); void printImbalanceFactor();
/** \brief /// Test, if the mesh consists of macro elements only. The mesh partitioning
* Test, if the mesh consists of macro elements only. The mesh partitioning /// of the parallelization works for macro meshes only and would fail, if the
* of the parallelization works for macro meshes only and would fail, if the /// mesh is already refined in some way. Therefore, this function will exit
* mesh is already refined in some way. Therefore, this function will exit /// the program if it finds a non macro element in the mesh.
* the program if it finds a non macro element in the mesh.
*/
void testForMacroMesh(); void testForMacroMesh();
/// Set for each element on the partitioning level the number of /// Set for each element on the partitioning level the number of
......
...@@ -290,17 +290,18 @@ namespace AMDiS { ...@@ -290,17 +290,18 @@ namespace AMDiS {
vector<double> tpwgts(mpiSize); vector<double> tpwgts(mpiSize);
double ubvec = 1.05; double ubvec = 1.05;
int options[4] = {0, 0, 15, 1}; // default options int options[4] = {0, 0, 15, PARMETIS_PSR_COUPLED}; // default options
int edgecut = -1; int edgecut = -1;
vector<int> part(nElements); vector<int> part(nElements);
// set tpwgts // set tpwgts
for (int i = 0; i < mpiSize; i++) for (int i = 0; i < mpiSize; i++)
tpwgts[i] = 1.0 / nparts; tpwgts[i] = 1.0 / static_cast<double>(nparts);
float scale = 10000.0 / maxWgt; float scale = 10000.0 / maxWgt;
for (int i = 0; i < nElements; i++) for (int i = 0; i < nElements; i++)
wgts[i] = static_cast<int>(floatWgts[i] * scale); wgts[i] = floatWgts[i];
// wgts[i] = static_cast<int>(floatWgts[i] * scale);
// === Start ParMETIS. === // === Start ParMETIS. ===
......
...@@ -86,16 +86,6 @@ namespace AMDiS { ...@@ -86,16 +86,6 @@ namespace AMDiS {
double wtime = MPI::Wtime(); double wtime = MPI::Wtime();
#if 0
double vm, rss;
processMemUsage(vm, rss);
MSG("STAGE 1\n");
MSG("My memory usage is VM = %.1f MB RSS = %.1f MB\n", vm, rss);
mpi::globalAdd(vm);
mpi::globalAdd(rss);
MSG("Overall memory usage is VM = %.1f MB RSS = %.1f MB\n", vm, rss);
#endif
if (createMatrixData) { if (createMatrixData) {
petscSolver->setMeshDistributor(meshDistributor, petscSolver->setMeshDistributor(meshDistributor,
meshDistributor->getMpiComm(), meshDistributor->getMpiComm(),
...@@ -106,33 +96,19 @@ namespace AMDiS { ...@@ -106,33 +96,19 @@ namespace AMDiS {
petscSolver->fillPetscRhs(rhs); petscSolver->fillPetscRhs(rhs);
#if 0 INFO(info, 8)("creation of parallel data structures needed %.5f seconds\n",
processMemUsage(vm, rss); MPI::Wtime() - wtime);
MSG("STAGE 2\n"); wtime = MPI::Wtime();
MSG("My memory usage is VM = %.1f MB RSS = %.1f MB\n", vm, rss);
mpi::globalAdd(vm);
mpi::globalAdd(rss);
MSG("Overall memory usage is VM = %.1f MB RSS = %.1f MB\n", vm, rss);
#endif
petscSolver->solvePetscMatrix(*solution, adaptInfo); petscSolver->solvePetscMatrix(*solution, adaptInfo);
INFO(info, 8)("solution of discrete system needed %.5f seconds\n",
MPI::Wtime() - wtime);
petscSolver->destroyVectorData(); petscSolver->destroyVectorData();
if (!storeMatrixData) if (!storeMatrixData)
petscSolver->destroyMatrixData(); petscSolver->destroyMatrixData();
#if 0
processMemUsage(vm, rss);
MSG("STAGE 3\n");
MSG("My memory usage is VM = %.1f MB RSS = %.1f MB\n", vm, rss);
mpi::globalAdd(vm);
mpi::globalAdd(rss);
MSG("Overall memory usage is VM = %.1f MB RSS = %.1f MB\n", vm, rss);
#endif
INFO(info, 8)("solution of discrete system needed %.5f seconds\n",
MPI::Wtime() - wtime);
} }
} }
...@@ -126,7 +126,9 @@ namespace AMDiS { ...@@ -126,7 +126,9 @@ namespace AMDiS {
if (!zeroStartVector) if (!zeroStartVector)
KSPSetInitialGuessNonzero(kspInterior, PETSC_TRUE); KSPSetInitialGuessNonzero(kspInterior, PETSC_TRUE);
MSG("Fill petsc matrix needed %.5f seconds\n", MPI::Wtime() - wtime); #if (DEBUG != 0)
MSG("Fill petsc matrix 3 needed %.5f seconds\n", MPI::Wtime() - wtime);
#endif
} }
...@@ -640,17 +642,18 @@ namespace AMDiS { ...@@ -640,17 +642,18 @@ namespace AMDiS {
// Get periodic mapping object // Get periodic mapping object
PeriodicMap &perMap = meshDistributor->getPeriodicMap(); PeriodicMap &perMap = meshDistributor->getPeriodicMap();
// === Traverse all rows of the dof matrix and insert row wise the values === const FiniteElemSpace *rowFe = mat->getRowFeSpace();
const FiniteElemSpace *colFe = mat->getColFeSpace();
DofMap& rowMap = (*interiorMap)[rowFe].getMap();
DofMap& colMap = (*interiorMap)[colFe].getMap();
// === Traverse all rows of the DOF matrix and insert row wise the values ===
// === to the PETSc matrix. === // === to the PETSc matrix. ===
for (cursor_type cursor = begin<row>(mat->getBaseMatrix()), for (cursor_type cursor = begin<row>(mat->getBaseMatrix()),
cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) { cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) {
const FiniteElemSpace *rowFe = mat->getRowFeSpace();
const FiniteElemSpace *colFe = mat->getColFeSpace();
// Global index of the current row DOF. // Global index of the current row DOF.
int globalRowDof = (*interiorMap)[rowFe][*cursor].global; int globalRowDof = rowMap[*cursor].global;
// Test if the current row DOF is a periodic DOF. // Test if the current row DOF is a periodic DOF.
bool periodicRow = perMap.isPeriodic(rowFe, globalRowDof); bool periodicRow = perMap.isPeriodic(rowFe, globalRowDof);
...@@ -668,7 +671,7 @@ namespace AMDiS { ...@@ -668,7 +671,7 @@ namespace AMDiS {
icursor != icend; ++icursor) { icursor != icend; ++icursor) {
// Global index of the current column index. // Global index of the current column index.
int globalColDof = (*interiorMap)[colFe][col(*icursor)].global; int globalColDof = colMap[col(*icursor)].global;
// Test if the current col dof is a periodic dof. // Test if the current col dof is a periodic dof.
bool periodicCol = perMap.isPeriodic(colFe, globalColDof); bool periodicCol = perMap.isPeriodic(colFe, globalColDof);
// Get PETSc's mat col index. // Get PETSc's mat col index.
...@@ -680,8 +683,8 @@ namespace AMDiS { ...@@ -680,8 +683,8 @@ namespace AMDiS {
if (!periodicCol) { if (!periodicCol) {
// Calculate the exact position of the column index in the PETSc matrix. // Calculate the exact position of the column index in the PETSc matrix.
cols.push_back(colIndex); cols.push_back(colIndex);
values.push_back(value(*icursor)); values.push_back(value(*icursor));
} else { } else {
// === Row index is not periodic, but column index is. === // === Row index is not periodic, but column index is. ===
...@@ -727,8 +730,8 @@ namespace AMDiS { ...@@ -727,8 +730,8 @@ namespace AMDiS {
} }
} }
MatSetValues(matIntInt, 1, &rowIndex, cols.size(), MatSetValues(matIntInt, 1, &rowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES); &(cols[0]), &(values[0]), ADD_VALUES);
} else { } else {
// === Row DOF index is periodic. === // === Row DOF index is periodic. ===
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment