Commit 0498e5fc authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

On the way to an efficient FETI-DP implementation for mixed finite elements.

parent 7bc84046
...@@ -225,6 +225,7 @@ if(ENABLE_PARALLEL_DOMAIN) ...@@ -225,6 +225,7 @@ if(ENABLE_PARALLEL_DOMAIN)
${SOURCE_DIR}/parallel/DofComm.cc ${SOURCE_DIR}/parallel/DofComm.cc
${SOURCE_DIR}/parallel/CheckerPartitioner.cc ${SOURCE_DIR}/parallel/CheckerPartitioner.cc
${SOURCE_DIR}/parallel/ElementObjectDatabase.cc ${SOURCE_DIR}/parallel/ElementObjectDatabase.cc
${SOURCE_DIR}/parallel/FeSpaceMapping.cc
${SOURCE_DIR}/parallel/MeshDistributor.cc ${SOURCE_DIR}/parallel/MeshDistributor.cc
${SOURCE_DIR}/parallel/MeshManipulation.cc ${SOURCE_DIR}/parallel/MeshManipulation.cc
${SOURCE_DIR}/parallel/MeshPartitioner.cc ${SOURCE_DIR}/parallel/MeshPartitioner.cc
......
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "parallel/FeSpaceMapping.h"
namespace AMDiS {
using namespace std;
void GlobalDofMap::clear()
{
dofMap.clear();
nRankDofs = 0;
nOverallDofs = 0;
rStartDofs = 0;
}
void GlobalDofMap::update(bool add)
{
for (DofMapping::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
if (it->second == -1)
it->second = nRankDofs++;
nOverallDofs = 0;
rStartDofs = 0;
mpi::getDofNumbering(*mpiComm, nRankDofs, rStartDofs, nOverallDofs);
if (add)
addOffset(rStartDofs);
}
void GlobalDofMap::addOffset(int offset)
{
for (DofMapping::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
it->second += offset;
}
}
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
/** \file FeSpaceMapping.h */ /** \file FeSpaceMapping.h */
#include <vector>
#include <map> #include <map>
#include "parallel/MpiHelper.h" #include "parallel/MpiHelper.h"
#include "parallel/ParallelTypes.h" #include "parallel/ParallelTypes.h"
...@@ -40,13 +41,7 @@ namespace AMDiS { ...@@ -40,13 +41,7 @@ namespace AMDiS {
rStartDofs(0) rStartDofs(0)
{} {}
void clear() void clear();
{
dofMap.clear();
nRankDofs = 0;
nOverallDofs = 0;
rStartDofs = 0;
}
DegreeOfFreedom operator[](DegreeOfFreedom d) DegreeOfFreedom operator[](DegreeOfFreedom d)
{ {
...@@ -61,8 +56,7 @@ namespace AMDiS { ...@@ -61,8 +56,7 @@ namespace AMDiS {
TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n"); TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n");
dofMap[dof0] = (dof1 >= 0 ? dof1 : nRankDofs); dofMap[dof0] = dof1;
nRankDofs++;
} }
void insert(DegreeOfFreedom dof0, DegreeOfFreedom dof1) void insert(DegreeOfFreedom dof0, DegreeOfFreedom dof1)
...@@ -89,21 +83,9 @@ namespace AMDiS { ...@@ -89,21 +83,9 @@ namespace AMDiS {
return dofMap; return dofMap;
} }
void update(bool add = true) void update(bool add = true);
{
nOverallDofs = 0;
rStartDofs = 0;
mpi::getDofNumbering(*mpiComm, nRankDofs, rStartDofs, nOverallDofs);
if (add)
addOffset(rStartDofs);
}
void addOffset(int offset) void addOffset(int offset);
{
for (DofMapping::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
it->second += offset;
}
private: private:
MPI::Intracomm* mpiComm; MPI::Intracomm* mpiComm;
...@@ -147,6 +129,39 @@ namespace AMDiS { ...@@ -147,6 +129,39 @@ namespace AMDiS {
data.insert(make_pair(feSpace, T(mpiComm))); data.insert(make_pair(feSpace, T(mpiComm)));
} }
int getRankDofs(vector<const FiniteElemSpace*> &feSpaces)
{
int result = 0;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
result += data.find(feSpaces[i])->second.nRankDofs;
}
return result;
}
int getOverallDofs(vector<const FiniteElemSpace*> &feSpaces)
{
int result = 0;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
result += data.find(feSpaces[i])->second.nOverallDofs;
}
return result;
}
int getStartDofs(vector<const FiniteElemSpace*> &feSpaces)
{
int result = 0;
for (unsigned int i = 0; i < feSpaces.size(); i++) {
TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
result += data.find(feSpaces[i])->second.rStartDofs;
}
return result;
}
private: private:
MPI::Intracomm* mpiComm; MPI::Intracomm* mpiComm;
......
...@@ -216,22 +216,21 @@ namespace AMDiS { ...@@ -216,22 +216,21 @@ namespace AMDiS {
TEST_EXIT(meshDistributor->getFeSpace()->getBasisFcts()->getDegree() == 1) TEST_EXIT(meshDistributor->getFeSpace()->getBasisFcts()->getDegree() == 1)
("Works for linear basis functions only!\n"); ("Works for linear basis functions only!\n");
createPrimals(); for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
createDuals(); createPrimals(feSpace);
createDuals(feSpace);
createLagrange(); createLagrange(feSpace);
createIndexB(feSpace);
createIndexB(); }
} }
void PetscSolverFeti::createPrimals() void PetscSolverFeti::createPrimals(const FiniteElemSpace *feSpace)
{ {
FUNCNAME("PetscSolverFeti::createPrimals()"); FUNCNAME("PetscSolverFeti::createPrimals()");
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
// === Define all vertices on the interior boundaries of the macro mesh === // === Define all vertices on the interior boundaries of the macro mesh ===
// === to be primal variables. === // === to be primal variables. ===
...@@ -313,12 +312,10 @@ namespace AMDiS { ...@@ -313,12 +312,10 @@ namespace AMDiS {
} }
void PetscSolverFeti::createDuals() void PetscSolverFeti::createDuals(const FiniteElemSpace *feSpace)
{ {
FUNCNAME("PetscSolverFeti::createDuals()"); FUNCNAME("PetscSolverFeti::createDuals()");
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
// === Create for each dual node that is owned by the rank, the set === // === Create for each dual node that is owned by the rank, the set ===
// === of ranks that contain this node (denoted by W(x_j)). === // === of ranks that contain this node (denoted by W(x_j)). ===
...@@ -377,15 +374,8 @@ namespace AMDiS { ...@@ -377,15 +374,8 @@ namespace AMDiS {
dualDofMap.addFeSpace(feSpace); dualDofMap.addFeSpace(feSpace);
int nRankAllDofs = feSpace->getAdmin()->getUsedDofs();
nRankB = nRankAllDofs - primalDofMap[feSpace].size();
nOverallB = 0;
rStartB = 0;
mpi::getDofNumbering(meshDistributor->getMpiComm(),
nRankB, rStartB, nOverallB);
DofContainer allBoundaryDofs; DofContainer allBoundaryDofs;
meshDistributor->getAllBoundaryDofs(feSpace, allBoundaryDofs); meshDistributor->getAllBoundaryDofs(feSpace, allBoundaryDofs);
int nRankInteriorDofs = nRankAllDofs - allBoundaryDofs.size();
for (DofContainer::iterator it = allBoundaryDofs.begin(); for (DofContainer::iterator it = allBoundaryDofs.begin();
it != allBoundaryDofs.end(); ++it) it != allBoundaryDofs.end(); ++it)
...@@ -393,24 +383,20 @@ namespace AMDiS { ...@@ -393,24 +383,20 @@ namespace AMDiS {
dualDofMap[feSpace].insertRankDof(**it); dualDofMap[feSpace].insertRankDof(**it);
dualDofMap[feSpace].update(false); dualDofMap[feSpace].update(false);
dualDofMap[feSpace].addOffset(rStartB + nRankInteriorDofs);
MSG("nRankDuals = %d nOverallDuals = %d\n", MSG("nRankDuals = %d nOverallDuals = %d\n",
dualDofMap[feSpace].nRankDofs, dualDofMap[feSpace].nRankDofs,
dualDofMap[feSpace].nOverallDofs); dualDofMap[feSpace].nOverallDofs);
nLocalDuals = dualDofMap[feSpace].size();
} }
void PetscSolverFeti::createLagrange() void PetscSolverFeti::createLagrange(const FiniteElemSpace *feSpace)
{ {
FUNCNAME("PetscSolverFeti::createLagrange()"); FUNCNAME("PetscSolverFeti::createLagrange()");
// === Reserve for each dual node, on the rank that owns this node, the === // === Reserve for each dual node, on the rank that owns this node, the ===
// === appropriate number of Lagrange constraints. === // === appropriate number of Lagrange constraints. ===
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
lagrangeMap.addFeSpace(feSpace); lagrangeMap.addFeSpace(feSpace);
int nRankLagrange = 0; int nRankLagrange = 0;
...@@ -473,11 +459,10 @@ namespace AMDiS { ...@@ -473,11 +459,10 @@ namespace AMDiS {
} }
void PetscSolverFeti::createIndexB() void PetscSolverFeti::createIndexB(const FiniteElemSpace *feSpace)
{ {
FUNCNAME("PetscSolverFeti::createIndexB()"); FUNCNAME("PetscSolverFeti::createIndexB()");
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
localDofMap.addFeSpace(feSpace); localDofMap.addFeSpace(feSpace);
DOFAdmin* admin = feSpace->getAdmin(); DOFAdmin* admin = feSpace->getAdmin();
...@@ -495,23 +480,28 @@ namespace AMDiS { ...@@ -495,23 +480,28 @@ namespace AMDiS {
} }
} }
localDofMap[feSpace].update(false);
TEST_EXIT_DBG(nLocalInterior + TEST_EXIT_DBG(nLocalInterior +
primalDofMap[feSpace].size() + primalDofMap[feSpace].size() +
nLocalDuals == dualDofMap[feSpace].size() ==
static_cast<unsigned int>(admin->getUsedDofs())) static_cast<unsigned int>(admin->getUsedDofs()))
("Should not happen!\n"); ("Should not happen!\n");
// === And finally, add the global indicies of all dual nodes. === // === And finally, add the global indicies of all dual nodes. ===
for (DofMapping::iterator it = dualDofMap[feSpace].getMap().begin(); for (DofMapping::iterator it = dualDofMap[feSpace].getMap().begin();
it != dualDofMap[feSpace].getMap().end(); ++it) it != dualDofMap[feSpace].getMap().end(); ++it)
localDofMap[feSpace].insert(it->first, it->second - rStartB); localDofMap[feSpace].insertRankDof(it->first);
// localDofMap[feSpace].insertRankDof(it->first);
localDofMap[feSpace].update(false);
dualDofMap[feSpace].addOffset(localDofMap[feSpace].rStartDofs +
nLocalInterior);
} }
void PetscSolverFeti::createMatLagrange() void PetscSolverFeti::createMatLagrange(vector<const FiniteElemSpace*> &feSpaces)
{ {
FUNCNAME("PetscSolverFeti::createMatLagrange()"); FUNCNAME("PetscSolverFeti::createMatLagrange()");
...@@ -520,10 +510,11 @@ namespace AMDiS { ...@@ -520,10 +510,11 @@ namespace AMDiS {
// === Create distributed matrix for Lagrange constraints. === // === Create distributed matrix for Lagrange constraints. ===
MatCreateMPIAIJ(PETSC_COMM_WORLD, MatCreateMPIAIJ(PETSC_COMM_WORLD,
lagrangeMap[feSpace].nRankDofs * nComponents, lagrangeMap.getRankDofs(feSpaces),
nRankB * nComponents, // lagrangeMap[feSpace].nRankDofs * nComponents,
localDofMap[feSpace].nRankDofs * nComponents,
lagrangeMap[feSpace].nOverallDofs * nComponents, lagrangeMap[feSpace].nOverallDofs * nComponents,
nOverallB * nComponents, localDofMap[feSpace].nOverallDofs * nComponents,
2, PETSC_NULL, 2, PETSC_NULL, 2, PETSC_NULL, 2, PETSC_NULL,
&mat_lagrange); &mat_lagrange);
...@@ -585,7 +576,7 @@ namespace AMDiS { ...@@ -585,7 +576,7 @@ namespace AMDiS {
schurPrimalData.fetiSolver = this; schurPrimalData.fetiSolver = this;
VecCreateMPI(PETSC_COMM_WORLD, VecCreateMPI(PETSC_COMM_WORLD,
nRankB * nComponents, nOverallB * nComponents, localDofMap[feSpace].nRankDofs * nComponents, localDofMap[feSpace].nOverallDofs * nComponents,
&(schurPrimalData.tmp_vec_b)); &(schurPrimalData.tmp_vec_b));
VecCreateMPI(PETSC_COMM_WORLD, VecCreateMPI(PETSC_COMM_WORLD,
primalDofMap[feSpace].nRankDofs * nComponents, primalDofMap[feSpace].nOverallDofs * nComponents, primalDofMap[feSpace].nRankDofs * nComponents, primalDofMap[feSpace].nOverallDofs * nComponents,
...@@ -612,8 +603,8 @@ namespace AMDiS { ...@@ -612,8 +603,8 @@ namespace AMDiS {
int nRowsRankPrimal = primalDofMap[feSpace].nRankDofs * nComponents; int nRowsRankPrimal = primalDofMap[feSpace].nRankDofs * nComponents;
int nRowsOverallPrimal = primalDofMap[feSpace].nOverallDofs * nComponents; int nRowsOverallPrimal = primalDofMap[feSpace].nOverallDofs * nComponents;
int nRowsRankB = nRankB * nComponents; int nRowsRankB = localDofMap[feSpace].nRankDofs * nComponents;
int nRowsOverallB = nOverallB * nComponents; int nRowsOverallB = localDofMap[feSpace].nOverallDofs * nComponents;
Mat matBPi; Mat matBPi;
MatCreateMPIAIJ(PETSC_COMM_WORLD, MatCreateMPIAIJ(PETSC_COMM_WORLD,
...@@ -628,8 +619,8 @@ namespace AMDiS { ...@@ -628,8 +619,8 @@ namespace AMDiS {
map<int, vector<pair<int, double> > > mat_b_primal_cols; map<int, vector<pair<int, double> > > mat_b_primal_cols;
for (int i = 0; i < nRankB * nComponents; i++) { for (int i = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++) {
PetscInt row = rStartB * nComponents + i; PetscInt row = localDofMap[feSpace].rStartDofs * nComponents + i;
MatGetRow(mat_b_primal, row, &nCols, &cols, &values); MatGetRow(mat_b_primal, row, &nCols, &cols, &values);
for (int j = 0; j < nCols; j++) for (int j = 0; j < nCols; j++)
...@@ -647,7 +638,7 @@ namespace AMDiS { ...@@ -647,7 +638,7 @@ namespace AMDiS {
for (map<int, vector<pair<int, double> > >::iterator it = mat_b_primal_cols.begin(); for (map<int, vector<pair<int, double> > >::iterator it = mat_b_primal_cols.begin();
it != mat_b_primal_cols.end(); ++it) { it != mat_b_primal_cols.end(); ++it) {
Vec tmpVec; Vec tmpVec;
VecCreateSeq(PETSC_COMM_SELF, nRankB * nComponents, &tmpVec); VecCreateSeq(PETSC_COMM_SELF, localDofMap[feSpace].nRankDofs * nComponents, &tmpVec);
for (unsigned int i = 0; i < it->second.size(); i++) for (unsigned int i = 0; i < it->second.size(); i++)
VecSetValue(tmpVec, VecSetValue(tmpVec,
...@@ -660,9 +651,9 @@ namespace AMDiS { ...@@ -660,9 +651,9 @@ namespace AMDiS {
PetscScalar *tmpValues; PetscScalar *tmpValues;
VecGetArray(tmpVec, &tmpValues); VecGetArray(tmpVec, &tmpValues);
for (int i = 0; i < nRankB * nComponents; i++) for (int i = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++)
MatSetValue(matBPi, MatSetValue(matBPi,
rStartB * nComponents + i, localDofMap[feSpace].rStartDofs * nComponents + i,
it->first, it->first,
tmpValues[i], tmpValues[i],
ADD_VALUES); ADD_VALUES);
...@@ -731,7 +722,7 @@ namespace AMDiS { ...@@ -731,7 +722,7 @@ namespace AMDiS {
fetiData.ksp_schur_primal = &ksp_schur_primal; fetiData.ksp_schur_primal = &ksp_schur_primal;
VecCreateMPI(PETSC_COMM_WORLD, VecCreateMPI(PETSC_COMM_WORLD,
nRankB * nComponents, nOverallB * nComponents, localDofMap[feSpace].nRankDofs * nComponents, localDofMap[feSpace].nOverallDofs * nComponents,
&(fetiData.tmp_vec_b)); &(fetiData.tmp_vec_b));
VecCreateMPI(PETSC_COMM_WORLD, VecCreateMPI(PETSC_COMM_WORLD,
lagrangeMap[feSpace].nRankDofs * nComponents, lagrangeMap[feSpace].nRankDofs * nComponents,
...@@ -779,7 +770,7 @@ namespace AMDiS { ...@@ -779,7 +770,7 @@ namespace AMDiS {
fetiDirichletPreconData.ksp_interior = &ksp_interior; fetiDirichletPreconData.ksp_interior = &ksp_interior;
VecCreateMPI(PETSC_COMM_WORLD, VecCreateMPI(PETSC_COMM_WORLD,
nRankB * nComponents, nOverallB * nComponents, localDofMap[feSpace].nRankDofs * nComponents, localDofMap[feSpace].nOverallDofs * nComponents,
&(fetiDirichletPreconData.tmp_vec_b)); &(fetiDirichletPreconData.tmp_vec_b));
MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiDirichletPreconData.tmp_vec_duals0)); MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiDirichletPreconData.tmp_vec_duals0));
MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiDirichletPreconData.tmp_vec_duals1)); MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiDirichletPreconData.tmp_vec_duals1));
...@@ -797,7 +788,7 @@ namespace AMDiS { ...@@ -797,7 +788,7 @@ namespace AMDiS {
fetiLumpedPreconData.mat_duals_duals = &mat_duals_duals; fetiLumpedPreconData.mat_duals_duals = &mat_duals_duals;
VecCreateMPI(PETSC_COMM_WORLD, VecCreateMPI(PETSC_COMM_WORLD,
nRankB * nComponents, nOverallB * nComponents, localDofMap[feSpace].nRankDofs * nComponents, localDofMap[feSpace].nOverallDofs * nComponents,
&(fetiLumpedPreconData.tmp_vec_b)); &(fetiLumpedPreconData.tmp_vec_b));
MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiLumpedPreconData.tmp_vec_duals0)); MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiLumpedPreconData.tmp_vec_duals0));
MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiLumpedPreconData.tmp_vec_duals1)); MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiLumpedPreconData.tmp_vec_duals1));
...@@ -958,6 +949,8 @@ namespace AMDiS { ...@@ -958,6 +949,8 @@ namespace AMDiS {
{ {
FUNCNAME("PetscSolverFeti::fillPetscMatrix()"); FUNCNAME("PetscSolverFeti::fillPetscMatrix()");
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
nComponents = mat->getSize(); nComponents = mat->getSize();
...@@ -971,12 +964,12 @@ namespace AMDiS { ...@@ -971,12 +964,12 @@ namespace AMDiS {
// === Create matrices for the FETI-DP method. === // === Create matrices for the FETI-DP method. ===
int nRowsRankB = nRankB * nComponents; int nRowsRankB = localDofMap[feSpace].nRankDofs * nComponents;
int nRowsOverallB = nOverallB * nComponents; int nRowsOverallB = localDofMap[feSpace].nOverallDofs * nComponents;
int nRowsRankPrimal = primalDofMap[feSpace].nRankDofs * nComponents; int nRowsRankPrimal = primalDofMap[feSpace].nRankDofs * nComponents;
int nRowsOverallPrimal = primalDofMap[feSpace].nOverallDofs * nComponents; int nRowsOverallPrimal = primalDofMap[feSpace].nOverallDofs * nComponents;
int nRowsInterior = nLocalInterior * nComponents; int nRowsInterior = nLocalInterior * nComponents;
int nRowsDual = nLocalDuals * nComponents; int nRowsDual = dualDofMap[feSpace].size() * nComponents;
MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 30, PETSC_NULL, MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 30, PETSC_NULL,
&mat_b_b); &mat_b_b);
...@@ -1094,7 +1087,7 @@ namespace AMDiS { ...@@ -1094,7 +1087,7 @@ namespace AMDiS {
// Column is not a primal variable. // Column is not a primal variable.
int colIndex = int colIndex =
(localDofMap[feSpace][col(*icursor)] + rStartB) * nComponents + j; (localDofMap[feSpace][col(*icursor)] + localDofMap[feSpace].rStartDofs) * nComponents + j;
if (rowPrimal) { if (rowPrimal) {
colsOther.push_back(colIndex); colsOther.push_back(colIndex);
...@@ -1157,13 +1150,13 @@ namespace AMDiS { ...@@ -1157,13 +1150,13 @@ namespace AMDiS {
} else { } else {
int localRowIndex = localDofMap[feSpace][*cursor] * nComponents + i; int localRowIndex = localDofMap[feSpace][*cursor] * nComponents + i;
for (unsigned int k = 0; k < cols.size(); k++) for (unsigned int k = 0; k < cols.size(); k++)
cols[k] -= rStartB * nComponents; cols[k] -= localDofMap[feSpace].rStartDofs * nComponents;
MatSetValues(mat_b_b, 1, &localRowIndex, cols.size(), MatSetValues(mat_b_b, 1, &localRowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES); &(cols[0]), &(values[0]), ADD_VALUES);
if (colsOther.size()) { if (colsOther.size()) {
int globalRowIndex = int globalRowIndex =
(localDofMap[feSpace][*cursor] + rStartB) * nComponents + i; (localDofMap[feSpace][*cursor] + localDofMap[feSpace].rStartDofs) * nComponents + i;
MatSetValues(mat_b_primal, 1, &globalRowIndex, colsOther.size(), MatSetValues(mat_b_primal, 1, &globalRowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES); &(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
} }
...@@ -1258,7 +1251,7 @@ namespace AMDiS { ...@@ -1258,7 +1251,7 @@ namespace AMDiS {
// === Create and fill PETSc matrix for Lagrange constraints. === // === Create and fill PETSc matrix for Lagrange constraints. ===
createMatLagrange(); createMatLagrange(feSpaces);
// === Create solver for the non primal (thus local) variables. === // === Create solver for the non primal (thus local) variables. ===
...@@ -1288,8 +1281,8 @@ namespace AMDiS { ...@@ -1288,8 +1281,8 @@ namespace AMDiS {
int nComponents = vec->getSize(); int nComponents = vec->getSize();
VecCreateMPI(PETSC_COMM_WORLD, VecCreateMPI(PETSC_COMM_WORLD,
nRankB * nComponents, localDofMap[feSpace].nRankDofs * nComponents,
nOverallB * nComponents, &f_b); localDofMap[feSpace].nOverallDofs * nComponents,