Commit e2994995 authored by Thomas Witkowski's avatar Thomas Witkowski

Set sebastian mode in mesh distributor to handle dirichlet and periodic...

Set sebastian mode in mesh distributor to handle dirichlet and periodic boundaries in a proper way. Must be generalized.
parent f0ee47dd
......@@ -108,6 +108,9 @@ if(AMDIS_NEED_BDDCML)
set(AMDIS_BDDCML_LIB @BDDCML_LIB@)
list(APPEND AMDIS_LIBRARIES ${AMDIS_BDDCML_LIB})
set(AMDIS_BDDCML_LINK_LIST @BDDCML_LINK_LIST@)
list(APPEND AMDIS_LIBRARIES ${AMDIS_BDDCML_LINK_LIST})
endif(AMDIS_NEED_BDDCML)
......
......@@ -35,10 +35,9 @@ endif()
SET(ENABLE_PARALLEL_DOMAIN "OFF" CACHE STRING "use parallel domain decomposition. please set to one of: PMTL, PETSC, OFF" )
option(USE_PETSC_DEV false)
option(ENABLE_ZOLTAN false)
option(ENABLE_UMFPACK "use umfpack solver" false)
option(ENABLE_UMFPACK "Use of UMFPACK solver" false)
option(ENABLE_BDDCML "Use of BDDCML library" false)
if(ENABLE_INTEL)
Message("please set the icc manually")
INCLUDE (CMakeForceCompiler)
......@@ -285,6 +284,8 @@ endif(ENABLE_UMFPACK)
if(ENABLE_BDDCML)
SET(BDDCML_LINK_LIST "" CACHE STRING "Further libraries to be linked with BDDCML")
find_file(BDDCML_H bddcml_interface_c.h
HINTS ENV CPATH
DOC "Header bddcml_interface_c.h for the BDDCML library.")
......@@ -307,6 +308,7 @@ if(ENABLE_BDDCML)
else()
message(FATAL_ERROR "Could not find the BDDCML library")
endif()
endif(ENABLE_BDDCML)
......
......@@ -76,6 +76,9 @@ namespace AMDiS {
int verboseLevel = 2;
int numbase = 0;
// MSG("call to \"bddcml_init\" with the following arguments:\n");
// MSG(" %d, [%d, %d], %d, %d, %d, %d, %d\n", nLevel, nSubdomains[0], nSubdomains[1], nLevel, nSubPerProc, c2f, verboseLevel, numbase);
bddcml_init(&nLevel, nSubdomains, &nLevel, &nSubPerProc,
&c2f, &verboseLevel, &numbase);
......@@ -225,11 +228,56 @@ namespace AMDiS {
// Number of non-zero entries in matrix
int la = i_sparse.size();
MSG("LOCAL LA = %d\n", la);
// MSG("LOCAL LA = %d\n", la);
// Matrix is assembled
int is_assembled_int = 0;
/*
string tmp="";
MSG("call to \"bddcml_upload_subdomain_data\" with the following arguments (each in one line):\n");
MSG(" nelem = %d\n", nelem);
MSG(" nnod = %d\n", nnod);
MSG(" ndof = %d\n", ndof);
MSG(" ndim = %d\n", ndim);
MSG(" meshdim = %d\n", meshdim);
MSG(" isub = %d\n", isub);
MSG(" nelems = %d\n", nelems);
MSG(" nnods = %d\n", nnods);
MSG(" ndofs = %d\n", ndofs);
MSG(" inet = [%d, %d, %d, %d, %d, %d]\n", inet[0], inet[1], inet[2], inet[3], inet[4], inet[5]);
MSG(" linet = %d\n", linet);
MSG(" nnet = [%d, %d]\n", nnet[0], nnet[1]);
MSG(" lnnet = %d\n", nelems);
MSG(" nndf = [%d, %d, %d, %d]\n", nndf[0], nndf[1], nndf[2], nndf[3]);
MSG(" lnndf = %d\n", nnods);
MSG(" isngn = [%d, %d, %d, %d]\n", isngn[0], isngn[1], isngn[2], isngn[3]);
MSG(" lisngn = %d\n", nnods);
MSG(" isvgvn = [%d, %d, %d, %d]\n", isvgvn[0], isvgvn[1], isvgvn[2], isvgvn[3]);
MSG(" lisvgvn = %d\n", nnods);
MSG(" isegn = [%d, %d]\n", isegn[0], isegn[1]);
MSG(" lisegn = %d\n", nelems);
MSG(" xyz = [%f, %f, %f, %f, %f, %f, %f, %f]\n", xyz[0], xyz[1], xyz[2], xyz[3], xyz[4], xyz[5], xyz[6], xyz[7]);
MSG(" lxyz1 = %d\n", lxyz1);
MSG(" lxyz2 = %d\n", lxyz2);
MSG(" ifix = [%d, %d, %d, %d]\n", ifix[0], ifix[1], ifix[2], ifix[3]);
MSG(" lifix = %d\n", ndofs);
MSG(" fixv = [%f, %f, %f, %f]\n", fixv[0], fixv[1], fixv[2], fixv[3]);
MSG(" lfixv = %d\n", ndofs);
MSG(" rhs = [%f, %f, %f, %f]\n", rhs[0], rhs[1], rhs[2], rhs[3]);
MSG(" lrhs = %d\n", ndofs);
MSG(" is_rhs_complete = %d\n", is_rhs_complete);
MSG(" sol = [%f, %f, %f, %f]\n", sol[0], sol[1], sol[2], sol[3]);
MSG(" lsol = %d\n", ndofs);
MSG(" matrixtype = %d\n", matrixtype);
MSG(" ispare = [%d, %d, %d, %d, %d, %d, %d, %d, %d, %d]\n", i_sparse[0], i_sparse[1], i_sparse[2], i_sparse[3], i_sparse[4], i_sparse[5], i_sparse[6], i_sparse[7], i_sparse[8], i_sparse[9]);
MSG(" jspare = [%d, %d, %d, %d, %d, %d, %d, %d, %d, %d]\n", j_sparse[0], j_sparse[1], j_sparse[2], j_sparse[3], j_sparse[4], j_sparse[5], j_sparse[6], j_sparse[7], j_sparse[8], j_sparse[9]);
MSG(" a_spare = [%f, %f, %f, %f, %f, %f, %f, %f, %f, %f]\n", a_sparse[0], a_sparse[1], a_sparse[2], a_sparse[3], a_sparse[4], a_sparse[5], a_sparse[6], a_sparse[7], a_sparse[8], a_sparse[9]);
MSG(" la = %d\n", la);
MSG(" is_assembled_int = %d\n", is_assembled_int);
*/
bddcml_upload_subdomain_data(&nelem,
&nnod,
......@@ -276,13 +324,18 @@ namespace AMDiS {
int parallel_division_int = 1;
int use_arithmetic_int = 1;
int use_adaptive_int = 0;
MSG("BDDC POINT A\n");
// MSG("call to \"bddcml_setup_preconditioner\" with the following arguments (each in one line):\n");
// MSG(" %d\n", matrixtype);
// MSG(" %d\n", use_defaults_int);
// MSG(" %d\n", parallel_division_int);
// MSG(" %d\n", use_arithmetic_int);
// MSG(" %d\n", use_adaptive_int);
bddcml_setup_preconditioner(&matrixtype,
&use_defaults_int,
&parallel_division_int,
&use_arithmetic_int,
&use_adaptive_int);
MSG("BDDC POINT B\n");
int method = 1;
double tol = 1.e-6;
......@@ -301,8 +354,6 @@ namespace AMDiS {
&converged_reason,
&condition_number);
MSG("BDDC POINT C\n");
MSG("BDDCML converged reason: %d within %d iterations \n",
converged_reason, num_iter);
......@@ -354,8 +405,10 @@ namespace AMDiS {
double val = value(*icursor);
i_sparse.push_back(rowIndex);
j_sparse.push_back(colIndex);
// i_sparse.push_back(rowIndex);
// j_sparse.push_back(colIndex);
i_sparse.push_back(*cursor * nComponents + ithRowComponent);
j_sparse.push_back(col(*icursor) * nComponents + ithColComponent);
a_sparse.push_back(val);
}
}
......
......@@ -85,7 +85,8 @@ namespace AMDiS {
repartitioningCounter(0),
debugOutputDir(""),
lastMeshChangeIndex(0),
createBoundaryDofFlag(0)
createBoundaryDofFlag(0),
sebastianMode(false)
{
FUNCNAME("MeshDistributor::ParalleDomainBase()");
......@@ -238,10 +239,9 @@ namespace AMDiS {
createInteriorBoundaryInfo();
#if (DEBUG != 0)
#if (DEBUG != 0)
ParallelDebug::printBoundaryInfo(*this);
#endif
// === Remove neighbourhood relations due to periodic bounday conditions. ===
......@@ -1576,8 +1576,18 @@ namespace AMDiS {
bound.type = it->second;
AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank);
b = bound;
if (sebastianMode) {
if (bound.rankObj.elIndex == 3 &&
bound.rankObj.ithObj == 1 ||
bound.rankObj.elIndex == 78 &&
bound.rankObj.ithObj == 2) {
AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank);
b = bound;
}
} else {
AtomicBoundary& b = periodicBoundary.getNewAtomic(otherElementRank);
b = bound;
}
}
}
......
......@@ -711,6 +711,8 @@ namespace AMDiS {
map<const FiniteElemSpace*, BoundaryDofInfo> boundaryDofInfo;
public:
bool sebastianMode;
/// The boundary DOFs are sorted by subobject entities, i.e., first all
/// face DOFs, edge DOFs and to the last vertex DOFs will be set to
/// communication structure vectors, \ref sendDofs and \ref recvDofs.
......
......@@ -31,6 +31,7 @@ namespace AMDiS {
assembleMatrix,
assembleVector);
#if (DEBUG != 0)
double vm, rss;
processMemUsage(vm, rss);
vm /= 1024.0;
......@@ -42,6 +43,7 @@ namespace AMDiS {
mpi::globalAdd(rss);
MSG("Overall memory usage is VM = %.1f MB RSS = %.1f MB\n", vm, rss);
#endif
}
......
......@@ -25,7 +25,6 @@ namespace AMDiS {
double wtime = MPI::Wtime();
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
createGlobalDofMapping(feSpaces);
int nRankRows = meshDistributor->getNumberRankDofs(feSpaces);
int nOverallRows = meshDistributor->getNumberOverallDofs(feSpaces);
......@@ -43,6 +42,11 @@ namespace AMDiS {
recvAllValues = 1;
if (!d_nnz || recvAllValues != 0 || alwaysCreateNnzStructure) {
// Global DOF mapping must only be recomputed, if the NNZ structure has
// changed (thus, also the mesh has changed).
createGlobalDofMapping(feSpaces);
if (d_nnz) {
delete [] d_nnz;
d_nnz = NULL;
......@@ -81,7 +85,7 @@ namespace AMDiS {
for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j])
setDofMatrix((*mat)[i][j], nComponents, i, j);
setDofMatrix((*mat)[i][j], i, j);
#if (DEBUG != 0)
MSG("Fill petsc matrix 2 needed %.5f seconds\n", MPI::Wtime() - wtime);
......@@ -122,7 +126,7 @@ namespace AMDiS {
// === Transfer values from DOF vector to the PETSc vector. ===
for (int i = 0; i < vec->getSize(); i++)
setDofVector(petscRhsVec, vec->getDOFVector(i), vec->getSize(), i);
setDofVector(petscRhsVec, vec->getDOFVector(i), i);
VecAssemblyBegin(petscRhsVec);
VecAssemblyEnd(petscRhsVec);
......@@ -141,7 +145,7 @@ namespace AMDiS {
VecSet(petscSolVec, 0.0);
for (int i = 0; i < nComponents; i++)
setDofVector(petscSolVec, vec.getDOFVector(i), nComponents, i, true);
setDofVector(petscSolVec, vec.getDOFVector(i), i, true);
VecAssemblyBegin(petscSolVec);
VecAssemblyEnd(petscSolVec);
......@@ -193,8 +197,8 @@ namespace AMDiS {
}
void PetscSolverGlobalMatrix::setDofMatrix(DOFMatrix* mat, int dispMult,
int dispAddRow, int dispAddCol)
void PetscSolverGlobalMatrix::setDofMatrix(DOFMatrix* mat,
int nRowMat, int nColMat)
{
FUNCNAME("PetscSolverGlobalMatrix::setDofMatrix()");
......@@ -239,7 +243,7 @@ namespace AMDiS {
// === Row DOF index is not periodic. ===
// Get PETSc's mat row index.
int rowIndex = dofToMatIndex.get(dispAddRow, globalRowDof);
int rowIndex = dofToMatIndex.get(nRowMat, globalRowDof);
cols.clear();
values.clear();
......@@ -253,7 +257,7 @@ namespace AMDiS {
// Test if the current col dof is a periodic dof.
bool periodicCol = perMap.isPeriodic(colFe, globalColDof);
// Get PETSc's mat col index.
int colIndex = dofToMatIndex.get(dispAddCol, globalColDof);
int colIndex = dofToMatIndex.get(nColMat, globalColDof);
// Ignore all zero entries, expect it is a diagonal entry.
if (value(*icursor) == 0.0 && rowIndex != colIndex)
......@@ -302,7 +306,7 @@ namespace AMDiS {
}
for (unsigned int i = 0; i < newCols.size(); i++) {
cols.push_back(dofToMatIndex.get(dispAddCol, newCols[i]));
cols.push_back(dofToMatIndex.get(nColMat, newCols[i]));
values.push_back(scaledValue);
}
}
......@@ -323,7 +327,6 @@ namespace AMDiS {
// Traverse all column entries.
for (icursor_type icursor = begin<nz>(cursor), icend = end<nz>(cursor);
icursor != icend; ++icursor) {
// Global index of the current column index.
int globalColDof =
meshDistributor->mapDofToGlobal(colFe, col(*icursor));
......@@ -392,8 +395,8 @@ namespace AMDiS {
// === Translate the matrix entries to PETSc's matrix.
for (unsigned int i = 0; i < entry.size(); i++) {
int rowIdx = dofToMatIndex.get(dispAddRow, entry[i].first);
int colIdx = dofToMatIndex.get(dispAddCol, entry[i].second);
int rowIdx = dofToMatIndex.get(nRowMat, entry[i].first);
int colIdx = dofToMatIndex.get(nColMat, entry[i].second);
colsMap[rowIdx].push_back(colIdx);
valsMap[rowIdx].push_back(scaledValue);
......@@ -419,8 +422,7 @@ namespace AMDiS {
void PetscSolverGlobalMatrix::setDofVector(Vec& petscVec,
DOFVector<double>* vec,
int dispMult,
int dispAdd,
int nRowVec,
bool rankOnly)
{
FUNCNAME("PetscSolverGlobalMatrix::setDofVector()");
......@@ -438,7 +440,7 @@ namespace AMDiS {
DegreeOfFreedom globalRowDof =
meshDistributor->mapDofToGlobal(feSpace, dofIt.getDOFIndex());
// Get PETSc's mat index of the row DOF.
int index = dofToMatIndex.get(dispAdd, globalRowDof);
int index = dofToMatIndex.get(nRowVec, globalRowDof);
if (perMap.isPeriodic(feSpace, globalRowDof)) {
std::set<int>& perAsc = perMap.getAssociations(feSpace, globalRowDof);
......@@ -448,7 +450,7 @@ namespace AMDiS {
for (std::set<int>::iterator perIt = perAsc.begin();
perIt != perAsc.end(); ++perIt) {
int mappedDof = perMap.map(feSpace, *perIt, globalRowDof);
int mappedIndex = dofToMatIndex.get(dispAdd, mappedDof);
int mappedIndex = dofToMatIndex.get(nRowVec, mappedDof);
VecSetValues(petscVec, 1, &mappedIndex, &value, ADD_VALUES);
}
} else {
......
......@@ -108,12 +108,11 @@ namespace AMDiS {
void createPetscNnzStructure(Matrix<DOFMatrix*> *mat);
/// Takes a DOF matrix and sends the values to the global PETSc matrix.
void setDofMatrix(DOFMatrix* mat, int dispMult = 1,
int dispAddRow = 0, int dispAddCol = 0);
void setDofMatrix(DOFMatrix* mat, int nRowMat = 0, int nColMat = 0);
/// Takes a DOF vector and sends its values to a given PETSc vector.
void setDofVector(Vec& petscVec, DOFVector<double>* vec,
int disMult = 1, int dispAdd = 0, bool rankOnly = false);
int nRowVec, bool rankOnly = false);
void createGlobalDofMapping(vector<const FiniteElemSpace*> &feSpaces);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment