Commit dc9956be authored by Thomas Witkowski's avatar Thomas Witkowski

Go to juropa.

parent ccd7eecd
......@@ -48,6 +48,8 @@ namespace AMDiS {
int nRankCols = colDofMap.getRankDofs();
int rankStartColIndex = colDofMap.getStartDofs();
MSG("RANK DOFS FOR NNZ = %d %d\n", nRankRows, (!localMatrix ? nRankRows : -1));
create(nRankRows, (!localMatrix ? nRankRows : -1));
using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
......@@ -115,7 +117,10 @@ namespace AMDiS {
const FiniteElemSpace *rowFeSpace = dofMat->getRowFeSpace();
const FiniteElemSpace *colFeSpace = dofMat->getColFeSpace();
if (rowDofMap.isDefinedFor(rowFeSpace) == false ||
colDofMap.isDefinedFor(colFeSpace) == false)
continue;
// === Prepare MTL4 iterators for the current DOF matrix. ===
......@@ -246,9 +251,11 @@ namespace AMDiS {
if (colDofMap[colFeSpace].find(col(*icursor), colDofIndex) == false)
continue;
MSG("GET COL INDEX: %d\n", colComp);
int petscColIdx = (colDofMap.isMatIndexFromGlobal() ?
colDofMap.getMatIndex(colComp, colDofIndex.global) :
colDofMap.getMatIndex(colComp, col(*icursor)));
MSG("GOT\n");
sendMatrixEntry[sendToRank].
push_back(make_pair(petscRowIdx, petscColIdx));
......
......@@ -100,12 +100,15 @@ namespace AMDiS {
TEST_EXIT_DBG(found)("Should not happen!\n");
}
for (int i = 0; i < componentIthCoarseMap.size(); i++)
MSG("COMP %d -> %d\n", i, componentIthCoarseMap[i]);
}
void ParallelCoarseSpaceMatVec::createMatVec(Matrix<DOFMatrix*>& seqMat)
{
FUNCNAME("ParallelCoarseSpaceMatVec::create()");
FUNCNAME("ParallelCoarseSpaceMatVec::createMatVec()");
// === Prepare coarse space information and generate the correct number ===
// === of empty PETSc matrix and vector objects. ===
......@@ -135,6 +138,7 @@ namespace AMDiS {
nnz[i][j].clear();
}
MSG("CREATE MAT NNZ 0 - 0\n");
nnz[0][0].create(seqMat, mpiCommGlobal, *interiorMap,
(coarseSpaceMap.size() == 0 ? &(meshDistributor->getPeriodicMap()) : NULL),
meshDistributor->getElementObjectDb(),
......@@ -145,18 +149,15 @@ namespace AMDiS {
if (i == 0 && j == 0)
continue;
if (i == j) {
nnz[i][j].create(seqMat, mpiCommGlobal, *(coarseSpaceMap[i - 1]), NULL,
meshDistributor->getElementObjectDb());
} else {
ParallelDofMapping &rowMap =
(i == 0 ? *interiorMap : *(coarseSpaceMap[i - 1]));
ParallelDofMapping &colMap =
(j == 0 ? *interiorMap : *(coarseSpaceMap[j - 1]));
nnz[i][j].create(seqMat, mpiCommGlobal, rowMap, colMap, NULL,
meshDistributor->getElementObjectDb());
}
ParallelDofMapping &rowMap =
(i == 0 ? *interiorMap : *(uniqueCoarseMap[i - 1]));
ParallelDofMapping &colMap =
(j == 0 ? *interiorMap : *(uniqueCoarseMap[j - 1]));
MSG("CREATE MAT NNZ %d - %d WITH ROWS %d\n", i, j, rowMap.getRankDofs());
nnz[i][j].create(seqMat, mpiCommGlobal, rowMap, colMap, NULL,
meshDistributor->getElementObjectDb());
}
}
}
......@@ -165,60 +166,81 @@ namespace AMDiS {
// === Create PETSc matrices and PETSc vectors with the computed ===
// === nnz data structure. ===
int nRankRows = interiorMap->getRankDofs();
int nOverallRows = interiorMap->getOverallDofs();
int nRankInteriorRows = interiorMap->getRankDofs();
int nOverallInteriorRows = interiorMap->getOverallDofs();
MSG("CREATE INTERIOR MAT!\n");
if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRankRows, nRankRows,
MatCreateSeqAIJ(mpiCommLocal, nRankInteriorRows, nRankInteriorRows,
0, nnz[0][0].dnnz,
&mat[0][0]);
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
} else {
MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows,
nOverallRows, nOverallRows,
MatCreateAIJ(mpiCommGlobal, nRankInteriorRows, nRankInteriorRows,
nOverallInteriorRows, nOverallInteriorRows,
0, nnz[0][0].dnnz, 0, nnz[0][0].onnz,
&mat[0][0]);
MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
}
VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &vecSol[0]);
VecCreateMPI(mpiCommGlobal, nRankRows, nOverallRows, &vecRhs[0]);
VecCreateMPI(mpiCommGlobal, nRankInteriorRows, nOverallInteriorRows, &vecSol[0]);
VecCreateMPI(mpiCommGlobal, nRankInteriorRows, nOverallInteriorRows, &vecRhs[0]);
MSG("DONE!\n");
int nCoarseMap = uniqueCoarseMap.size();
for (int i = 0; i < nCoarseMap; i++) {
ParallelDofMapping* cMap = uniqueCoarseMap[i];
int nRowsRankCoarse = cMap->getRankDofs();
int nRowsOverallCoarse = cMap->getOverallDofs();
int nRankCoarseRows = cMap->getRankDofs();
int nOverallCoarseRows = cMap->getOverallDofs();
MSG("CREATE MAT COARSE %d OF SIZE %d %d\n", i + 1, nRankCoarseRows, nOverallCoarseRows);
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse,
nRankCoarseRows, nRankCoarseRows,
nOverallCoarseRows, nOverallCoarseRows,
0, nnz[i + 1][i + 1].dnnz, 0, nnz[i + 1][i + 1].onnz,
&mat[i + 1][i + 1]);
VecCreateMPI(mpiCommGlobal, nRowsRankCoarse, nRowsOverallCoarse,
MSG("DONE!\n");
VecCreateMPI(mpiCommGlobal, nRankCoarseRows, nOverallCoarseRows,
&vecSol[i + 1]);
VecCreateMPI(mpiCommGlobal, nRowsRankCoarse, nRowsOverallCoarse,
VecCreateMPI(mpiCommGlobal, nRankCoarseRows, nOverallCoarseRows,
&vecRhs[i + 1]);
}
MSG("NCOARSEMAP = %d\n", nCoarseMap);
for (int i = 0; i < nCoarseMap + 1; i++) {
for (int j = 0; j < nCoarseMap + 1; j++) {
int nRowsRankMat = (j == 0 ? nRankRows : uniqueCoarseMap[j - 1]->getRankDofs());
int nRowsOverallMat = (j == 0 ? nOverallRows : uniqueCoarseMap[j - 1]->getOverallDofs());
if (i == j)
continue;
MSG("CHECK FOR %d %d\n", i, j);
int nRowsRankMat = (i == 0 ? nRankInteriorRows : uniqueCoarseMap[i - 1]->getRankDofs());
int nRowsOverallMat = (i == 0 ? nOverallInteriorRows : uniqueCoarseMap[i - 1]->getOverallDofs());
int nColsRankMat = (j == 0 ? nRankInteriorRows : uniqueCoarseMap[j - 1]->getRankDofs());
int nColsOverallMat = (j == 0 ? nOverallInteriorRows : uniqueCoarseMap[j - 1]->getOverallDofs());
MSG("CREATE-A MAT %d %d\n", i, j);
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankMat,
nRowsOverallCoarse, nRowsOverallMat,
0, nnz[i + 1][j].dnnz, 0, nnz[i + 1][j].onnz,
&mat[i + 1][j]);
nRowsRankMat, nColsRankMat,
nRowsOverallMat, nColsOverallMat,
0, nnz[i][j].dnnz, 0, nnz[i][j].onnz,
&mat[i][j]);
MSG("DONE!\n");
MSG("CREATE-B MAT %d %d WITH SIZE ... \n", j, i);
MatCreateAIJ(mpiCommGlobal,
nRowsRankMat, nRowsRankCoarse,
nRowsOverallMat, nRowsOverallCoarse,
0, nnz[j][i + 1].dnnz, 0, nnz[j][i + 1].onnz,
&mat[j][i + 1]);
nColsRankMat, nRowsRankMat,
nColsOverallMat, nRowsOverallMat,
0, nnz[j][i].dnnz, 0, nnz[j][i].onnz,
&mat[j][i]);
MSG("DONE!\n");
}
}
}
......
......@@ -388,6 +388,11 @@ namespace AMDiS {
return data.find(feSpace)->second;
}
inline bool isDefinedFor(const FiniteElemSpace* feSpace) const
{
return static_cast<bool>(data.count(feSpace));
}
/// Returns the number of solution components the mapping is defined on.
inline int getNumberOfComponents() const
{
......
......@@ -226,7 +226,8 @@ namespace AMDiS {
meshLevel(0),
rStartInterior(0),
nGlobalOverallInterior(0),
printTimings(false)
printTimings(false),
enableStokesMode(false)
{
FUNCNAME("PetscSolverFeti::PetscSolverFeti()");
......@@ -269,6 +270,15 @@ namespace AMDiS {
MeshLevelData& levelData = meshDistributor->getMeshLevelData();
vector<const FiniteElemSpace*>& uniqueFe = meshDistributor->getFeSpaces();
enableStokesMode = false;
Parameters::get("parallel->stokes mode", enableStokesMode);
if (enableStokesMode) {
MSG("========= !!!! SUPER WARNING !!! ========\n");
MSG("Use make use of stokes mode which is work in progress! We guarantee for nothing and even less than this!\n");
fullInterface = feSpaces[2];
}
if (subdomain == NULL) {
subdomain = new PetscSolverGlobalMatrix();
......@@ -289,7 +299,8 @@ namespace AMDiS {
lagrangeMap.init(levelData, feSpaces, uniqueFe);
if (fullInterface != NULL)
interfaceDofMap.init(levelData, fullInterface);
interfaceDofMap.init(levelData, feSpaces, uniqueFe);
// interfaceDofMap.init(levelData, fullInterface);
if (fetiPreconditioner != FETI_NONE) {
TEST_EXIT(meshLevel == 0)
......@@ -396,25 +407,39 @@ namespace AMDiS {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
MSG("FETI-DP data for %d-ith FE space:\n", i);
MSG(" nRankPrimals = %d nOverallPrimals = %d\n",
primalDofMap[feSpace].nRankDofs,
primalDofMap[feSpace].nOverallDofs);
MSG(" nRankDuals = %d nOverallDuals = %d\n",
dualDofMap[feSpace].nRankDofs,
dualDofMap[feSpace].nOverallDofs);
MSG(" nRankLagrange = %d nOverallLagrange = %d\n",
lagrangeMap[feSpace].nRankDofs,
lagrangeMap[feSpace].nOverallDofs);
if (feSpace == fullInterface) {
MSG(" nRankInterface = %d nOverallInterface = %d\n",
interfaceDofMap[feSpace].nRankDofs,
interfaceDofMap[feSpace].nOverallDofs);
} else {
MSG(" nRankPrimals = %d nOverallPrimals = %d\n",
primalDofMap[feSpace].nRankDofs,
primalDofMap[feSpace].nOverallDofs);
MSG(" nRankDuals = %d nOverallDuals = %d\n",
dualDofMap[feSpace].nRankDofs,
dualDofMap[feSpace].nOverallDofs);
MSG(" nRankLagrange = %d nOverallLagrange = %d\n",
lagrangeMap[feSpace].nRankDofs,
lagrangeMap[feSpace].nOverallDofs);
TEST_EXIT_DBG(localDofMap[feSpace].size() + primalDofMap[feSpace].size() ==
static_cast<unsigned int>(feSpace->getAdmin()->getUsedDofs()))
("Should not happen!\n");
TEST_EXIT_DBG(localDofMap[feSpace].size() + primalDofMap[feSpace].size() ==
static_cast<unsigned int>(feSpace->getAdmin()->getUsedDofs()))
("Should not happen!\n");
}
}
subdomain->setDofMapping(&localDofMap);
subdomain->setCoarseSpaceDofMapping(&primalDofMap);
if (enableStokesMode) {
MSG("WARNING: MAKE THIS MORE GENERAL!!!!!\n");
subdomain->setCoarseSpaceDofMapping(&primalDofMap, 0);
subdomain->setCoarseSpaceDofMapping(&primalDofMap, 1);
subdomain->setCoarseSpaceDofMapping(&interfaceDofMap, 2);
} else {
subdomain->setCoarseSpaceDofMapping(&primalDofMap);
}
if (printTimings) {
MPI::COMM_WORLD.Barrier();
......
......@@ -275,6 +275,7 @@ namespace AMDiS {
FetiLumpedPreconData fetiLumpedPreconData;
/// Matrices for Dirichlet preconditioner.
Mat mat_interior_interior, mat_duals_duals, mat_interior_duals, mat_duals_interior;
KSP ksp_interior;
......@@ -292,6 +293,8 @@ namespace AMDiS {
const FiniteElemSpace* fullInterface;
bool printTimings;
bool enableStokesMode;
};
......
......@@ -129,22 +129,25 @@ namespace AMDiS {
// === the global PETSc matrices. ===
int nComponents = seqMat->getSize();
for (int i = 0; i < nComponents; i++) {
for (int j = 0; j < nComponents; j++) {
if (!(*seqMat)[i][j])
for (int rowComponent = 0; rowComponent < nComponents; rowComponent++) {
for (int colComponent = 0; colComponent < nComponents; colComponent++) {
DOFMatrix* dofMat = (*seqMat)[rowComponent][colComponent];
if (!dofMat)
continue;
ParallelDofMapping *rowCoarseSpace = coarseSpaceMap[i];
ParallelDofMapping *colCoarseSpace = coarseSpaceMap[j];
ParallelDofMapping *rowCoarseSpace = coarseSpaceMap[rowComponent];
ParallelDofMapping *colCoarseSpace = coarseSpaceMap[colComponent];
traits::col<Matrix>::type col((*seqMat)[i][j]->getBaseMatrix());
traits::const_value<Matrix>::type value((*seqMat)[i][j]->getBaseMatrix());
traits::col<Matrix>::type col(dofMat->getBaseMatrix());
traits::const_value<Matrix>::type value(dofMat->getBaseMatrix());
// Traverse all rows.
for (cursor_type cursor = begin<row>((*seqMat)[i][j]->getBaseMatrix()),
cend = end<row>((*seqMat)[i][j]->getBaseMatrix()); cursor != cend; ++cursor) {
for (cursor_type cursor = begin<row>(dofMat->getBaseMatrix()),
cend = end<row>(dofMat->getBaseMatrix()); cursor != cend; ++cursor) {
bool isRowCoarse = isCoarseSpace(i, feSpaces[i], *cursor);
bool isRowCoarse =
isCoarseSpace(rowComponent, feSpaces[colComponent], *cursor);
cols.clear();
colsOther.clear();
......@@ -155,7 +158,8 @@ namespace AMDiS {
for (icursor_type icursor = begin<nz>(cursor), icend = end<nz>(cursor);
icursor != icend; ++icursor) {
bool isColCoarse = isCoarseSpace(j, feSpaces[j], col(*icursor));
bool isColCoarse =
isCoarseSpace(colComponent, feSpaces[colComponent], col(*icursor));
if (isColCoarse) {
if (isRowCoarse) {
......@@ -180,56 +184,60 @@ namespace AMDiS {
// === Set matrix values. ===
if (isRowCoarse) {
int rowIndex = rowCoarseSpace->getMatIndex(i, *cursor);
for (unsigned int k = 0; k < cols.size(); k++)
cols[k] = colCoarseSpace->getMatIndex(j, cols[k]);
int rowIndex = rowCoarseSpace->getMatIndex(rowComponent, *cursor);
for (unsigned int i = 0; i < cols.size(); i++)
cols[i] = colCoarseSpace->getMatIndex(colComponent, cols[i]);
// matCoarseCoarse
MatSetValues(getMatCoarseByComponent(i),
MatSetValues(getMatCoarseByComponent(rowComponent),
1, &rowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
if (colsOther.size()) {
if (subdomainLevel == 0) {
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] = interiorMap->getMatIndex(j, colsOther[k]);
for (unsigned int i = 0; i < colsOther.size(); i++)
colsOther[i] =
interiorMap->getMatIndex(colComponent, colsOther[i]);
} else {
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] =
interiorMap->getMatIndex(j, colsOther[k]) + rStartInterior;
for (unsigned int i = 0; i < colsOther.size(); i++)
colsOther[i] =
interiorMap->getMatIndex(colComponent, colsOther[i]) +
rStartInterior;
}
// matCoarseInt
MatSetValues(getMatCoarseInteriorByComponent(i),
MatSetValues(getMatCoarseInteriorByComponent(rowComponent),
1, &rowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
}
} else {
int localRowIndex =
(subdomainLevel == 0 ? interiorMap->getLocalMatIndex(i, *cursor) :
interiorMap->getMatIndex(i, *cursor));
(subdomainLevel == 0 ?
interiorMap->getLocalMatIndex(rowComponent, *cursor) :
interiorMap->getMatIndex(rowComponent, *cursor));
for (unsigned int k = 0; k < cols.size(); k++) {
for (unsigned int i = 0; i < cols.size(); i++) {
if (subdomainLevel == 0)
cols[k] = interiorMap->getLocalMatIndex(j, cols[k]);
cols[i] = interiorMap->getLocalMatIndex(colComponent, cols[i]);
else
cols[k] = interiorMap->getMatIndex(j, cols[k]);
cols[i] = interiorMap->getMatIndex(colComponent, cols[i]);
}
MatSetValues(getMatInterior(), 1, &localRowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
if (colsOther.size()) {
int globalRowIndex = interiorMap->getMatIndex(i, *cursor);
int globalRowIndex = interiorMap->getMatIndex(rowComponent, *cursor);
if (subdomainLevel != 0)
globalRowIndex += rStartInterior;
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] = colCoarseSpace->getMatIndex(j, colsOther[k]);
for (unsigned int i = 0; i < colsOther.size(); i++)
colsOther[i] =
colCoarseSpace->getMatIndex(colComponent, colsOther[i]);
// matIntCoarse
MatSetValues(getMatInteriorCoarseByComponent(i),
MatSetValues(getMatInteriorCoarseByComponent(rowComponent),
1, &globalRowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment