Liebe Gitlab-Nutzerin, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit 6758eb3d authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Before doing the last big change.

parent 531bf547
......@@ -513,7 +513,8 @@ namespace AMDiS {
Parameters::get(initFileStr, solverType);
OEMSolverCreator *solverCreator =
dynamic_cast<OEMSolverCreator*>(CreatorMap<OEMSolver>::getCreator(solverType, initFileStr));
TEST_EXIT(solverCreator)("no solver type\n");
TEST_EXIT(solverCreator)
("No valid solver type found in parameter \"%s\"\n", initFileStr.c_str());
solverCreator->setName(initFileStr);
solver = solverCreator->create();
solver->initParameters();
......
......@@ -228,7 +228,7 @@ namespace AMDiS {
if (writePartMesh > 0) {
debug::writeElementIndexMesh(mesh , debugOutputDir + "elementIndex");
ParallelDebug::writePartitioning(*this, debugOutputDir + "part.vtu");
ParallelDebug::writePartitioning(*this, debugOutputDir + "part");
}
}
......@@ -901,7 +901,7 @@ namespace AMDiS {
void MeshDistributor::createMeshLevelStructure()
{
FUNCNAME("MeshDistributor::createMeshLevelStructure()");
FUNCNAME("MeshDistributor::createMeshLevelStructure()");
if (mpiSize != 16)
return;
......@@ -1663,10 +1663,14 @@ namespace AMDiS {
// MSG("Memory usage of element object database = %5.f KByte\n",
// static_cast<double>(memsize / 1024.0));
MSG("MAKE INT BOUND!\n");
intBoundary.create(levelData, 0, elObjDb);
ParallelDebug::printBoundaryInfo(intBoundary);
MSG("TEST = %d\n", levelData.getLevelNumber());
if (levelData.getLevelNumber() > 1) {
MSG("MAKE INT SD BOUND!\n");
intBoundarySd.create(levelData, 1, elObjDb);
ParallelDebug::printBoundaryInfo(intBoundarySd, 0, true);
}
......@@ -1847,18 +1851,11 @@ namespace AMDiS {
int test = 0;
Parameters::get("parallel->remove periodic boundary", test);
// if (test == 0) {
//121106 replaced if statement
if(true){
//write latest mesh properties to vtu file
debug::writeElementIndexMesh(mesh , debugOutputDir + "elementIndex");
ParallelDebug::writePartitioning(*this, debugOutputDir + "part.vtu");
ParallelDebug::testCommonDofs(*this, true);
ParallelDebug::testGlobalIndexByCoords(*this);
if (test == 0) {
ParallelDebug::testCommonDofs(*this, true);
ParallelDebug::testGlobalIndexByCoords(*this);
}
#else
for (int i = 0; i < static_cast<int>(dofMaps.size()); i++) {
......
......@@ -191,6 +191,16 @@ namespace AMDiS {
return mpiComm;
}
inline MPI::Intracomm getLocalMpiComm()
{
if (levelData.getLevelNumber() == 1)
return PETSC_COMM_SELF;
TEST_EXIT(levelData.getLevelNumber() == 2)("Not yet implemented!\n");
return levelData.getMpiComm(1);
}
inline bool isInitialized()
{
return initialized;
......
......@@ -61,6 +61,28 @@ namespace AMDiS {
}
void ParallelCoarseSpaceSolver::setMeshDistributor(MeshDistributor *md)
{
FUNCNAME("ParallelCoarseSpaceSolver::setMeshDistributor()");
meshDistributor = md;
mpiCommGlobal = meshDistributor->getMpiComm();
mpiCommLocal = meshDistributor->getLocalMpiComm();
}
void ParallelCoarseSpaceSolver::setMeshDistributor(MeshDistributor *md,
MPI::Intracomm mpiComm0,
MPI::Intracomm mpiComm1)
{
FUNCNAME("ParallelCoarseSpaceSolver::setMeshDistributor()");
meshDistributor = md;
mpiCommGlobal = mpiComm0;
mpiCommLocal = mpiComm1;
}
void ParallelCoarseSpaceSolver::prepare()
{
FUNCNAME("ParallelCoarseSpaceSolver:prepare()");
......@@ -116,7 +138,7 @@ namespace AMDiS {
bool localMatrix = (coarseSpaceMap.size() && subdomainLevel == 0);
if (checkMeshChange()) {
int nMat = uniqueCoarseMap.size() + 1;
int nMat = uniqueCoarseMap.size() + 1;
nnz.resize(nMat);
for (int i = 0; i < nMat; i++) {
nnz[i].resize(nMat);
......@@ -151,21 +173,52 @@ namespace AMDiS {
int nRankInteriorRows = interiorMap->getRankDofs();
int nOverallInteriorRows = interiorMap->getOverallDofs();
MPI::Intracomm mpiGlobal = meshDistributor->getMeshLevelData().getMpiComm(subdomainLevel);
if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRankInteriorRows, nRankInteriorRows,
0, nnz[0][0].dnnz,
&mat[0][0]);
} else {
MatCreateAIJ(mpiCommGlobal, nRankInteriorRows, nRankInteriorRows,
nOverallInteriorRows, nOverallInteriorRows,
0, nnz[0][0].dnnz, 0, nnz[0][0].onnz,
&mat[0][0]);
if (subdomainLevel == 0) {
MatCreateAIJ(mpiGlobal, nRankInteriorRows, nRankInteriorRows,
nOverallInteriorRows, nOverallInteriorRows,
0, nnz[0][0].dnnz, 0, nnz[0][0].onnz,
&mat[0][0]);
} else {
MSG("WARNING: USE MULTILEVEL, MAKE THIS GENERAL!\n");
// TODO: MAKE NNZ CALCULATION GENERAL (see also creation of coarse space
// matrices some lines below)
MatCreateAIJ(mpiGlobal, nRankInteriorRows, nRankInteriorRows,
nOverallInteriorRows, nOverallInteriorRows,
300, PETSC_NULL, 300, PETSC_NULL, &mat[0][0]);
}
}
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
VecCreateMPI(mpiCommGlobal, nRankInteriorRows, nOverallInteriorRows, &vecSol[0]);
VecCreateMPI(mpiCommGlobal, nRankInteriorRows, nOverallInteriorRows, &vecRhs[0]);
if (subdomainLevel == 0) {
VecCreateMPI(mpiGlobal, nRankInteriorRows, nOverallInteriorRows,
&vecSol[0]);
VecCreateMPI(mpiGlobal, nRankInteriorRows, nOverallInteriorRows,
&vecRhs[0]);
} else {
MSG("WARNING: USE MULTILEVEL AND THIS IS A VERY BAD HACK, MAKE GENERAL!!\n");
MPI::Intracomm mpiComm =
meshDistributor->getMeshLevelData().getMpiComm(subdomainLevel - 1);
int nAllRows = nRankInteriorRows;
mpi::globalAdd(nAllRows);
VecCreateMPI(mpiComm, nRankInteriorRows, nAllRows,
&vecSol[0]);
VecCreateMPI(mpiComm, nRankInteriorRows, nAllRows,
&vecRhs[0]);
}
int nCoarseMap = uniqueCoarseMap.size();
for (int i = 0; i < nCoarseMap; i++) {
......@@ -174,16 +227,30 @@ namespace AMDiS {
int nRankCoarseRows = cMap->getRankDofs();
int nOverallCoarseRows = cMap->getOverallDofs();
MatCreateAIJ(mpiCommGlobal,
nRankCoarseRows, nRankCoarseRows,
nOverallCoarseRows, nOverallCoarseRows,
0, nnz[i + 1][i + 1].dnnz, 0, nnz[i + 1][i + 1].onnz,
&mat[i + 1][i + 1]);
if (subdomainLevel == 0) {
MatCreateAIJ(mpiCommGlobal,
nRankCoarseRows, nRankCoarseRows,
nOverallCoarseRows, nOverallCoarseRows,
0, nnz[i + 1][i + 1].dnnz, 0, nnz[i + 1][i + 1].onnz,
&mat[i + 1][i + 1]);
} else {
MSG("WARNING: USE MULTILEVEL AND THIS IS A VERY BAD HACK, MAKE GENERAL!!\n");
MPI::Intracomm mpiComm =
meshDistributor->getMeshLevelData().getMpiComm(subdomainLevel - 1);
MatCreateAIJ(mpiComm,
nRankCoarseRows, nRankCoarseRows,
nOverallCoarseRows, nOverallCoarseRows,
300, PETSC_NULL, 300, PETSC_NULL,
&mat[i + 1][i + 1]);
}
MatSetOption(mat[i + 1][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
cMap->createVec(vecSol[i + 1]);
cMap->createVec(vecRhs[i + 1]);
}
for (int i = 0; i < nCoarseMap + 1; i++) {
for (int j = 0; j < nCoarseMap + 1; j++) {
if (i == j)
......@@ -195,18 +262,43 @@ namespace AMDiS {
int nColsRankMat = (j == 0 ? nRankInteriorRows : uniqueCoarseMap[j - 1]->getRankDofs());
int nColsOverallMat = (j == 0 ? nOverallInteriorRows : uniqueCoarseMap[j - 1]->getOverallDofs());
MatCreateAIJ(mpiCommGlobal,
nRowsRankMat, nColsRankMat,
nRowsOverallMat, nColsOverallMat,
0, nnz[i][j].dnnz, 0, nnz[i][j].onnz,
&mat[i][j]);
MatCreateAIJ(mpiCommGlobal,
nColsRankMat, nRowsRankMat,
nColsOverallMat, nRowsOverallMat,
0, nnz[j][i].dnnz, 0, nnz[j][i].onnz,
&mat[j][i]);
if (subdomainLevel == 0) {
MatCreateAIJ(mpiCommGlobal,
nRowsRankMat, nColsRankMat,
nRowsOverallMat, nColsOverallMat,
0, nnz[i][j].dnnz, 0, nnz[i][j].onnz,
&mat[i][j]);
MatCreateAIJ(mpiCommGlobal,
nColsRankMat, nRowsRankMat,
nColsOverallMat, nRowsOverallMat,
0, nnz[j][i].dnnz, 0, nnz[j][i].onnz,
&mat[j][i]);
} else {
MSG("WARNING: USE MULTILEVEL AND THIS IS A VERY BAD HACK, MAKE GENERAL!!\n");
if (i == 0) {
nRowsOverallMat = nRowsRankMat;
mpi::globalAdd(nRowsOverallMat);
}
if (j == 0) {
nColsOverallMat = nColsRankMat;
mpi::globalAdd(nColsOverallMat);
}
MatCreateAIJ(mpiCommGlobal,
nRowsRankMat, nColsRankMat,
nRowsOverallMat, nColsOverallMat,
300, PETSC_NULL, 300, PETSC_NULL,
&mat[i][j]);
MatCreateAIJ(mpiCommGlobal,
nColsRankMat, nRowsRankMat,
nColsOverallMat, nRowsOverallMat,
300, PETSC_NULL, 300, PETSC_NULL,
&mat[j][i]);
}
}
}
}
}
......
......@@ -78,15 +78,13 @@ namespace AMDiS {
void setCoarseSpaceDofMapping(ParallelDofMapping *coarseDofs,
int component = -1);
/// Set mesh distributor object and MPI communicators.
void setMeshDistributor(MeshDistributor *m,
/// Set mesh distributor object
void setMeshDistributor(MeshDistributor *m);
/// Set mesh distributor object
void setMeshDistributor(MeshDistributor *md,
MPI::Intracomm mpiComm0,
MPI::Intracomm mpiComm1)
{
meshDistributor = m;
mpiCommGlobal = mpiComm0;
mpiCommLocal = mpiComm1;
}
MPI::Intracomm mpiComm1);
/// Set level of the interior discretization. Is only used for
/// multi-level methods.
......
......@@ -55,9 +55,7 @@ namespace AMDiS {
meshDistributor->setBoundaryDofRequirement(petscSolver->getBoundaryDofRequirement());
petscSolver->setMeshDistributor(meshDistributor,
meshDistributor->getMpiComm(),
PETSC_COMM_SELF);
petscSolver->setMeshDistributor(meshDistributor);
petscSolver->init(this->getComponentSpaces(), this->getFeSpaces());
}
......
......@@ -101,13 +101,25 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverFeti::initialize()");
MSG("INIT WITH MESH-LEVEL: %d\n", meshLevel);
TEST_EXIT_DBG(meshLevel + 1 == meshDistributor->getMeshLevelData().getLevelNumber())
("Mesh hierarchy does not contain %d levels!\n", meshLevel + 1);
MeshLevelData& levelData = meshDistributor->getMeshLevelData();
if (subdomain == NULL) {
subdomain = new PetscSolverGlobalMatrix("");
string subSolverInitStr = initFileStr + "->subsolver";
string solverType = "petsc";
Parameters::get(subSolverInitStr, solverType);
OEMSolverCreator *solverCreator =
dynamic_cast<OEMSolverCreator*>(CreatorMap<OEMSolver>::getCreator(solverType, initFileStr));
TEST_EXIT(solverCreator)
("No valid solver type found in parameter \"%s\"\n",
initFileStr.c_str());
solverCreator->setName(subSolverInitStr);
subdomain = dynamic_cast<PetscSolver*>(solverCreator->create());
subdomain->initParameters();
subdomain->setSymmetric(isSymmetric);
if (dirichletMode == 0)
subdomain->setHandleDirichletRows(true);
......@@ -115,14 +127,15 @@ namespace AMDiS {
subdomain->setHandleDirichletRows(false);
if (meshLevel == 0) {
subdomain->setMeshDistributor(meshDistributor,
mpiCommGlobal, mpiCommLocal);
subdomain->setMeshDistributor(meshDistributor);
} else {
subdomain->setMeshDistributor(meshDistributor,
levelData.getMpiComm(meshLevel - 1),
levelData.getMpiComm(meshLevel));
subdomain->setLevel(meshLevel);
}
delete solverCreator;
}
primalDofMap.init(levelData, componentSpaces, feSpaces);
......@@ -318,8 +331,10 @@ namespace AMDiS {
WorldVector<double> c;
feSpace->getMesh()->getDofIndexCoords(*it, feSpace, c);
if (fabs(c[0]) < e || fabs(c[1]) < e ||
fabs(c[0] - 25.0) < e || fabs(c[1] - 25.0) < e ||
if ((fabs(c[0]) < e && fabs(c[1] - 12.5) < e) ||
(fabs(c[0] - 25.0) < e && fabs(c[1] - 12.5) < e) ||
(fabs(c[0] - 12.5) < e && fabs(c[1]) < e) ||
(fabs(c[0] - 12.5) < e && fabs(c[1] - 25.0) < e) ||
(fabs(c[0] - 12.5) < e && fabs(c[1] - 12.5) < e)) {
MSG("PRIMAL COORD %f %f\n", c[0], c[1]);
primals.insert(**it);
......@@ -552,7 +567,6 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverFeti::createIndexB()");
const FiniteElemSpace *feSpace = componentSpaces[component];
DOFAdmin* admin = feSpace->getAdmin();
......@@ -937,8 +951,17 @@ namespace AMDiS {
if (augmentedLagrange == false) {
schurPrimalData.subSolver = subdomain;
localDofMap.createVec(schurPrimalData.tmp_vec_b, nGlobalOverallInterior);
if (meshLevel == 0) {
localDofMap.createVec(schurPrimalData.tmp_vec_b, nGlobalOverallInterior);
} else {
MSG("WARNING: MULTILEVEL SUPER SHIT, MAKE GENERAL!\n");
VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel - 1),
localDofMap.getRankDofs(),
nGlobalOverallInterior, &(schurPrimalData.tmp_vec_b));
}
primalDofMap.createVec(schurPrimalData.tmp_vec_primal);
MatCreateShell(mpiCommGlobal,
primalDofMap.getRankDofs(),
......@@ -1221,7 +1244,15 @@ namespace AMDiS {
fetiData.subSolver = subdomain;
fetiData.ksp_schur_primal = &ksp_schur_primal;
localDofMap.createVec(fetiData.tmp_vec_b0, nGlobalOverallInterior);
if (meshLevel == 0) {
localDofMap.createVec(fetiData.tmp_vec_b0, nGlobalOverallInterior);
} else {
MSG("WARNING: MULTILEVEL SUPER SHIT, MAKE GENERAL!\n");
VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel - 1),
localDofMap.getRankDofs(),
nGlobalOverallInterior, &(fetiData.tmp_vec_b0));
}
lagrangeMap.createVec(fetiData.tmp_vec_lagrange);
primalDofMap.createVec(fetiData.tmp_vec_primal0);
......@@ -1596,7 +1627,7 @@ namespace AMDiS {
#if (DEBUG != 0)
PetscInt nRow, nCol;
MatGetSize(subdomain->getMatInterior(), &nRow, &nCol);
MatGetLocalSize(subdomain->getMatInterior(), &nRow, &nCol);
mpi::globalAdd(nRow);
mpi::globalAdd(nCol);
......@@ -1893,7 +1924,9 @@ namespace AMDiS {
createDirichletData(*mat);
createFetiData();
TEST_EXIT(coarseSpaceMap.size() == 0)("This is not yet supported!\n");
// === Create matrices for the FETI-DP method. ===
if (printTimings)
......@@ -1944,6 +1977,8 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverFeti::fillPetscRhs()");
TEST_EXIT(coarseSpaceMap.size() == 0)("This is not yet supported!\n");
subdomain->fillPetscRhs(vec);
}
......@@ -2193,8 +2228,18 @@ namespace AMDiS {
// === Some temporary vectors. ===
Vec tmp_b0, tmp_b1;
localDofMap.createVec(tmp_b0, nGlobalOverallInterior);
localDofMap.createVec(tmp_b1, nGlobalOverallInterior);
if (meshLevel == 0) {
localDofMap.createVec(tmp_b0, nGlobalOverallInterior);
localDofMap.createVec(tmp_b1, nGlobalOverallInterior);
} else {
MSG("WARNING: MULTILEVEL SUPER SHIT, MAKE GENERAL!\n");
VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel - 1),
localDofMap.getRankDofs(),
nGlobalOverallInterior, &tmp_b0);
VecCreateMPI(meshDistributor->getMeshLevelData().getMpiComm(meshLevel - 1),
localDofMap.getRankDofs(),
nGlobalOverallInterior, &tmp_b1);
}
Vec tmp_primal0, tmp_primal1;
primalDofMap.createVec(tmp_primal0);
......
......@@ -825,9 +825,7 @@ namespace AMDiS {
PetscSolver* subSolver = new PetscSolverGlobalMatrix("");
subSolver->setKspPrefix(kspPrefix);
subSolver->setMeshDistributor(meshDistributor,
mpiCommGlobal,
mpiCommLocal);
subSolver->setMeshDistributor(meshDistributor);
subSolver->init(fe, fe);
ParallelDofMapping &subDofMap = subSolver->getDofMap();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment