Commit 5574b023 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Fixed some small problems for FETI-DP with mixed finite elements.

parent e8fa1fc3
......@@ -348,6 +348,9 @@ namespace AMDiS {
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(fe_space->getMesh(), level, flag);
double a = 0.0;
double b = 0.0;
while (elInfo) {
elinfo = elInfo;
......@@ -364,18 +367,22 @@ namespace AMDiS {
int_uh_vec += quadFast->getWeight(i) * uh_vec[i];
}
a += det * int_u_vec - det * int_uh_vec;
b += fabs(det * int_u_vec - det * int_uh_vec);
l2_err_el = pow(fabs(det * int_u_vec - det * int_uh_vec), 2.0);
l2Err2 += l2_err_el;
maxErr = std::max(maxErr, l2_err_el);
// if (writeInLeafData)
// elInfo->getElement()->setEstimation(l2_err_el, component);
if (writeInLeafData)
elInfo->getElement()->setEstimation(l2_err_el, component);
estMap[elInfo->getElement()->getIndex()] = l2_err_el;
elInfo = stack.traverseNext(elInfo);
}
MSG("L2ERR values %e %e %e %e\n", a, b, l2Err2, sqrt(l2Err2));
delete [] u_vec;
if (max)
......
......@@ -113,6 +113,7 @@ namespace AMDiS {
void ParallelDofMapping::init(MPI::Intracomm *m,
vector<const FiniteElemSpace*> &fe,
vector<const FiniteElemSpace*> &uniqueFe,
bool needGlobalMapping,
bool bNonLocalDofs)
{
......@@ -120,17 +121,13 @@ namespace AMDiS {
mpiComm = m;
feSpaces = fe;
feSpacesUnique = uniqueFe;
hasNonLocalDofs = bNonLocalDofs;
// === Create a set of unique FE spaces. ===
for (unsigned int i = 0; i < feSpaces.size(); i++)
feSpacesUnique.insert(feSpaces[i]);
// === Init the mapping for all different FE spaces. ===
for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
for (vector<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it) {
addFeSpace(*it);
data[*it].setNeedGlobalMapping(needGlobalMapping);
......@@ -143,7 +140,7 @@ namespace AMDiS {
{
FUNCNAME("ParallelDofMapping::clear()");
for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
for (vector<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it)
data[*it].clear();
......@@ -164,7 +161,7 @@ namespace AMDiS {
recvDofs = &pRecv;
// Add the DOF communicator also to all FE space DOF mappings.
for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
for (vector<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it)
data[*it].setDofComm(pSend, pRecv);
}
......@@ -244,7 +241,7 @@ namespace AMDiS {
FUNCNAME("ParallelDofMapping::update()");
// First, update all FE space DOF mappings.
for (std::set<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
for (vector<const FiniteElemSpace*>::iterator it = feSpacesUnique.begin();
it != feSpacesUnique.end(); ++it)
data[*it].update();
......
......@@ -283,6 +283,9 @@ namespace AMDiS {
* \param[in] m MPI communicator.
* \param[in] fe The FE spaces of all components of the
* PDE to be solved.
* \param[in] uniqueFe Unique list of FE spaces. Thus, two
* arbitrary elements of this list are always
* different.
* \param[in] needGlobalMapping If true, the mapping computes also a global
* index for the DOFs.
* \param[in] bNonLocalDofs If true, at least one rank's mapping con-
......@@ -290,6 +293,7 @@ namespace AMDiS {
*/
void init(MPI::Intracomm *m,
vector<const FiniteElemSpace*> &fe,
vector<const FiniteElemSpace*> &uniqueFe,
bool needGlobalMapping,
bool bNonLocalDofs);
......@@ -404,7 +408,7 @@ namespace AMDiS {
/// The set of all FE spaces. It uniquly contains all different FE spaces
/// from \ref feSpaces.
std::set<const FiniteElemSpace*> feSpacesUnique;
vector<const FiniteElemSpace*> feSpacesUnique;
/// Number of DOFs owned by rank.
int nRankDofs;
......
......@@ -216,9 +216,6 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverFeti::updateDofData()");
TEST_EXIT(meshDistributor->getFeSpace()->getBasisFcts()->getDegree() == 1)
("Works for linear basis functions only!\n");
primalDofMap.clear();
dualDofMap.clear();
lagrangeMap.clear();
......@@ -227,6 +224,7 @@ namespace AMDiS {
for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
createPrimals(feSpace);
createDuals(feSpace);
createLagrange(feSpace);
......@@ -247,14 +245,15 @@ namespace AMDiS {
for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
MSG("FETI-DP data for %d-ith FE space:\n", i);
MSG("nRankPrimals = %d nOverallPrimals = %d\n",
MSG(" nRankPrimals = %d nOverallPrimals = %d\n",
primalDofMap[feSpace].nRankDofs, primalDofMap[feSpace].nOverallDofs);
MSG("nRankDuals = %d nOverallDuals = %d\n",
MSG(" nRankDuals = %d nOverallDuals = %d\n",
dualDofMap[feSpace].nRankDofs, dualDofMap[feSpace].nOverallDofs);
MSG("nRankLagrange = %d nOverallLagrange = %d\n",
MSG(" nRankLagrange = %d nOverallLagrange = %d\n",
lagrangeMap[feSpace].nRankDofs, lagrangeMap[feSpace].nOverallDofs);
TEST_EXIT_DBG(localDofMap[feSpace].size() + primalDofMap[feSpace].size() ==
......@@ -904,12 +903,17 @@ namespace AMDiS {
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
primalDofMap.init(mpiComm, feSpaces, true, true);
dualDofMap.init(mpiComm, feSpaces, false, false);
lagrangeMap.init(mpiComm, feSpaces, true, true);
localDofMap.init(mpiComm, feSpaces, false, false);
primalDofMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
true, true);
dualDofMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
false, false);
lagrangeMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
true, true);
localDofMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
false, false);
if (fetiPreconditioner == FETI_DIRICHLET)
interiorDofMap.init(mpiComm, feSpaces, false, false);
interiorDofMap.init(mpiComm, feSpaces, meshDistributor->getFeSpaces(),
false, false);
updateDofData();
......@@ -920,23 +924,23 @@ namespace AMDiS {
int nRowsRankPrimal = primalDofMap.getRankDofs();
int nRowsOverallPrimal = primalDofMap.getOverallDofs();
MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 30, PETSC_NULL,
MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 60, PETSC_NULL,
&mat_b_b);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
nRowsRankPrimal, nRowsRankPrimal,
nRowsOverallPrimal, nRowsOverallPrimal,
30, PETSC_NULL, 30, PETSC_NULL, &mat_primal_primal);
60, PETSC_NULL, 60, PETSC_NULL, &mat_primal_primal);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
nRowsRankB, nRowsRankPrimal,
nRowsOverallB, nRowsOverallPrimal,
30, PETSC_NULL, 30, PETSC_NULL, &mat_b_primal);
60, PETSC_NULL, 60, PETSC_NULL, &mat_b_primal);
MatCreateMPIAIJ(PETSC_COMM_WORLD,
nRowsRankPrimal, nRowsRankB,
nRowsOverallPrimal, nRowsOverallB,
15, PETSC_NULL, 15, PETSC_NULL, &mat_primal_b);
30, PETSC_NULL, 30, PETSC_NULL, &mat_primal_b);
// === Create matrices for FETI-DP preconditioner. ===
......@@ -945,22 +949,22 @@ namespace AMDiS {
int nRowsDual = dualDofMap.getRankDofs();
MatCreateSeqAIJ(PETSC_COMM_SELF,
nRowsDual, nRowsDual, 30, PETSC_NULL,
nRowsDual, nRowsDual, 60, PETSC_NULL,
&mat_duals_duals);
if (fetiPreconditioner == FETI_DIRICHLET) {
int nRowsInterior = interiorDofMap.getRankDofs();
MatCreateSeqAIJ(PETSC_COMM_SELF,
nRowsInterior, nRowsInterior, 30, PETSC_NULL,
nRowsInterior, nRowsInterior, 60, PETSC_NULL,
&mat_interior_interior);
MatCreateSeqAIJ(PETSC_COMM_SELF,
nRowsInterior, nRowsDual, 30, PETSC_NULL,
nRowsInterior, nRowsDual, 60, PETSC_NULL,
&mat_interior_duals);
MatCreateSeqAIJ(PETSC_COMM_SELF,
nRowsDual, nRowsInterior, 30, PETSC_NULL,
nRowsDual, nRowsInterior, 60, PETSC_NULL,
&mat_duals_interior);
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment