Commit 66a86905 authored by Thomas Witkowski's avatar Thomas Witkowski

Blub

parent 41a2b884
......@@ -438,9 +438,12 @@ namespace AMDiS {
file << "</VTKFile>\n";
#if HAVE_PARALLEL_DOMAIN_AMDIS
if (MPI::COMM_WORLD.Get_rank() == 0)
if (MPI::COMM_WORLD.Get_rank() == 0) {
vector<string> componentNames;
componentNames.push_back("elvalue");
VtkWriter::writeParallelFile(fname + ".pvtu", MPI::COMM_WORLD.Get_size(),
fname, ".vtu", 1);
fname, ".vtu", componentNames);
}
#endif
}
}
......
......@@ -140,11 +140,17 @@ namespace AMDiS {
vtkWriter.writeFile(fn + paraviewFileExt);
#if HAVE_PARALLEL_DOMAIN_AMDIS
if (MPI::COMM_WORLD.Get_rank() == 0)
if (MPI::COMM_WORLD.Get_rank() == 0) {
vector<string> componentNames;
for (unsigned int i = 0; i < dataCollectors.size(); i++)
componentNames.push_back(dataCollectors[i]->getValues()->getName());
vtkWriter.writeParallelFile(paraFilename + paraviewParallelFileExt,
MPI::COMM_WORLD.Get_size(),
filename, postfix,
dataCollectors.size());
filename,
postfix,
componentNames);
}
#endif
MSG("ParaView file written to %s\n", (fn + paraviewFileExt).c_str());
......
......@@ -68,7 +68,7 @@ namespace AMDiS {
void VtkWriter::writeParallelFile(string name, int nRanks,
string fnPrefix, string fnPostfix,
int nComponents)
vector<string> &componentNames)
{
FUNCNAME("VtkWriter::writeParallelFile()");
......@@ -94,9 +94,10 @@ namespace AMDiS {
<< " </PCells>\n";
file << " <PPointData>\n";
for (int i = 0; i < nComponents; i++)
file << " <PDataArray type=\"Float32\" Name=\"value"
<< i << "\" format=\"ascii\"/>\n";
for (unsigned int i = 0; i < componentNames.size(); i++)
file << " <PDataArray type=\"Float32\" Name=\""
<< componentNames[i]
<< "\" format=\"ascii\"/>\n";
file << " </PPointData>\n";
......@@ -235,11 +236,16 @@ namespace AMDiS {
TEST_EXIT(sPos >= 0)("Failed to find file postfix!\n");
string name = filename.substr(0, sPos);
if (MPI::COMM_WORLD.Get_rank() == 0)
if (MPI::COMM_WORLD.Get_rank() == 0) {
vector<string> componentNames;
for (unsigned int i = 0; i < dcList.size(); i++)
componentNames.push_back(dcList[i]->getValues()->getName());
writer.writeParallelFile(name + ".pvtu",
MPI::COMM_WORLD.Get_size(),
name, ".vtu",
static_cast<int>(dcList.size()));
componentNames);
}
filename = name + "-p" + lexical_cast<string>(MPI::COMM_WORLD.Get_rank()) + "-.vtu";
}
......
......@@ -55,7 +55,7 @@ namespace AMDiS {
/// Writes a pvtu file, which contains the links to all the rank files.
static void writeParallelFile(string name, int nRanks,
string fnPrefix, string fnPostfix,
int nComponents);
vector<string> &componentNames);
/// May be used to simply write ParaView files.
static void writeFile(DOFVector<double> *values,
......
......@@ -178,37 +178,41 @@ namespace AMDiS {
inline Mat& getMatIntInt()
{
return matIntInt;
return mat[0][0];
// return matIntInt;
}
inline Mat& getMatCoarseCoarse()
{
FUNCNAME("PetscSolver::getMatCoarseCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size())
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
("Subdomain solver does not contain a coarse space!\n");
return matCoarseCoarse;
return mat[1][1];
// return matCoarseCoarse;
}
inline Mat& getMatIntCoarse()
{
FUNCNAME("PetscSolver::getMatIntCoarse()");
TEST_EXIT_DBG(coarseSpaceMap.size())
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
("Subdomain solver does not contain a coarse space!\n");
return matIntCoarse;
return mat[0][1];
// return matIntCoarse;
}
inline Mat& getMatCoarseInt()
{
FUNCNAME("PetscSolver::getMatCoarseInt()");
TEST_EXIT_DBG(coarseSpaceMap.size())
TEST_EXIT_DBG(coarseSpaceMap.size() && mat.size() > 1)
("Subdomain solver does not contain a coarse space!\n");
return matCoarseInt;
return mat[1][0];
// return matCoarseInt;
}
protected:
......@@ -251,7 +255,7 @@ namespace AMDiS {
/// Parallel DOF mapping of the (optional) coarse space. Allows to define
/// different coarse spaces for different components.
std::map<int, ParallelDofMapping*> coarseSpaceMap;
map<int, ParallelDofMapping*> coarseSpaceMap;
int mpiRank;
......@@ -260,7 +264,9 @@ namespace AMDiS {
MPI::Intracomm mpiCommLocal;
/// Petsc's matrix structure.
Mat matIntInt, matCoarseCoarse, matIntCoarse, matCoarseInt;
// Mat matIntInt, matCoarseCoarse, matIntCoarse, matCoarseInt;
vector<vector<Mat> > mat;
/// PETSc's vector structures for the rhs vector, the solution vector and a
/// temporary vector for calculating the final residuum.
......
......@@ -1357,6 +1357,7 @@ namespace AMDiS {
createFetiData();
// === Create matrices for the FETI-DP method. ===
if (printTimings) {
......
......@@ -199,7 +199,10 @@ namespace AMDiS {
inline bool isInterface(const FiniteElemSpace *feSpace,
DegreeOfFreedom dof)
{
if (feSpace == fullInterface)
return interfaceDofMap[feSpace].isSet(dof);
return false;
}
protected:
......
......@@ -16,17 +16,22 @@
namespace AMDiS {
void PetscSolverGlobalBlockMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *mat)
void PetscSolverGlobalBlockMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *seqMat)
{
FUNCNAME("PetscSolverGlobalBlockMatrix::fillPetscMatrix()");
TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(mat)("No DOF matrix defined!\n");
TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
mat.resize(1);
mat[0].resize(1);
Mat &matIntInt = mat[0][0];
double wtime = MPI::Wtime();
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
nComponents = mat->getNumRows();
nComponents = seqMat->getNumRows();
int nRankRows = (*interiorMap)[feSpace].nRankDofs;
int nOverallRows = (*interiorMap)[feSpace].nOverallDofs;
......@@ -63,9 +68,9 @@ namespace AMDiS {
for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j]) {
if ((*seqMat)[i][j]) {
int idx = componentInBlock[i] * nBlocks + componentInBlock[j];
setDofMatrix(nestMat[idx], (*mat)[i][j],
setDofMatrix(nestMat[idx], (*seqMat)[i][j],
compNthInBlock[i], compNthInBlock[j]);
}
......@@ -178,7 +183,7 @@ namespace AMDiS {
if (nestMat[i] != PETSC_NULL)
MatDestroy(&(nestMat[i]));
MatDestroy(&matIntInt);
MatDestroy(&mat[0][0]);
KSPDestroy(&kspInterior);
}
......@@ -196,14 +201,14 @@ namespace AMDiS {
void PetscSolverGlobalBlockMatrix::setDofMatrix(Mat& petscMat,
DOFMatrix* mat,
DOFMatrix* seqMat,
int dispRowBlock,
int dispColBlock)
{
FUNCNAME("PetscSolverGlobalBlockMatrix::setDofMatrix()");
TEST_EXIT(mat)("No DOFMatrix!\n");
TEST_EXIT(petscMat)("No PETSc matrix!\n");
TEST_EXIT(seqMat)("No DOFMatrix!\n");
const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
......@@ -211,8 +216,8 @@ namespace AMDiS {
namespace traits = mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix;
traits::col<Matrix>::type col(mat->getBaseMatrix());
traits::const_value<Matrix>::type value(mat->getBaseMatrix());
traits::col<Matrix>::type col(seqMat->getBaseMatrix());
traits::const_value<Matrix>::type value(seqMat->getBaseMatrix());
typedef traits::range_generator<row, Matrix>::type cursor_type;
typedef traits::range_generator<nz, cursor_type>::type icursor_type;
......@@ -228,8 +233,8 @@ namespace AMDiS {
// === Traverse all rows of the dof matrix and insert row wise the values ===
// === to the PETSc matrix. ===
for (cursor_type cursor = begin<row>(mat->getBaseMatrix()),
cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) {
for (cursor_type cursor = begin<row>(seqMat->getBaseMatrix()),
cend = end<row>(seqMat->getBaseMatrix()); cursor != cend; ++cursor) {
// Global index of the current row DOF.
int rowIndex = (*interiorMap)[feSpace][*cursor].global + dispRowIndex;
......
......@@ -17,26 +17,30 @@
namespace AMDiS {
void PetscSolverGlobalMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *mat)
void PetscSolverGlobalMatrix::fillPetscMatrix(Matrix<DOFMatrix*> *seqMat)
{
FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrix()");
if (coarseSpaceMap.size()) {
updateSubdomainData();
fillPetscMatrixWithCoarseSpace(mat);
fillPetscMatrixWithCoarseSpace(seqMat);
return;
}
mat.resize(1);
mat[0].resize(1);
Mat &matIntInt = mat[0][0];
TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n");
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(mat)("No DOF matrix defined!\n");
TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
double wtime = MPI::Wtime();
// === If required, recompute non zero structure of the matrix. ===
if (checkMeshChange(mat))
nnzInterior.create(mat, mpiCommGlobal, *interiorMap,
if (checkMeshChange(seqMat))
nnzInterior.create(seqMat, mpiCommGlobal, *interiorMap,
&(meshDistributor->getPeriodicMap()),
meshDistributor->getElementObjectDb());
......@@ -73,11 +77,11 @@ namespace AMDiS {
// === Transfer values from DOF matrices to the PETSc matrix. ===
int nComponents = mat->getNumRows();
int nComponents = seqMat->getNumRows();
for (int i = 0; i < nComponents; i++)
for (int j = 0; j < nComponents; j++)
if ((*mat)[i][j])
setDofMatrix((*mat)[i][j], i, j);
if ((*seqMat)[i][j])
setDofMatrix((*seqMat)[i][j], i, j);
#if (DEBUG != 0)
MSG("Fill petsc matrix 2 needed %.5f seconds\n", MPI::Wtime() - wtime);
......@@ -133,13 +137,49 @@ namespace AMDiS {
}
void PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace(Matrix<DOFMatrix*> *mat)
void PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace(Matrix<DOFMatrix*> *seqMat)
{
FUNCNAME("PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace()");
TEST_EXIT_DBG(interiorMap)("Should not happen!\n");
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
vector<const FiniteElemSpace*> feSpaces = getFeSpaces(seqMat);
vector<ParallelDofMapping*> uniqueCoarseMap;
if (coarseSpaceMap.size()) {
TEST_EXIT_DBG(coarseSpaceMap.size() == seqMat->getSize())
("Wrong sizes %d %d\n", coarseSpaceMap.size(), seqMat->getSize());
std::set<ParallelDofMapping*> tmp;
for (map<int, ParallelDofMapping*>::iterator it = coarseSpaceMap.begin();
it != coarseSpaceMap.end(); ++it) {
if (tmp.count(it->second) == 0) {
tmp.insert(it->second);
uniqueCoarseMap.push_back(it->second);
}
}
}
int nCoarseMap = uniqueCoarseMap.size();
mat.resize(nCoarseMap + 1);
for (int i = 0; i < nCoarseMap + 1; i++)
mat[i].resize(nCoarseMap + 1);
vector<int> componentIthCoarseMap(coarseSpaceMap.size());
for (unsigned int i = 0; i < componentIthCoarseMap.size(); i++) {
bool found = false;
for (int j = 0; j < nCoarseMap; j++) {
if (coarseSpaceMap[i] == uniqueCoarseMap[j]) {
componentIthCoarseMap[i] = j;
found = true;
break;
}
}
TEST_EXIT_DBG(found)("Should not happen!\n");
}
int nRowsRankInterior = interiorMap->getRankDofs();
int nRowsOverallInterior = interiorMap->getOverallDofs();
......@@ -147,8 +187,8 @@ namespace AMDiS {
// === If required, recompute non zero structure of the matrix. ===
bool localMatrix = (subdomainLevel == 0);
if (checkMeshChange(mat, localMatrix)) {
nnzInterior.create(mat, mpiCommGlobal, *interiorMap, NULL,
if (checkMeshChange(seqMat, localMatrix)) {
nnzInterior.create(seqMat, mpiCommGlobal, *interiorMap, NULL,
meshDistributor->getElementObjectDb(),
localMatrix);
......@@ -162,40 +202,69 @@ namespace AMDiS {
}
}
mat.resize(nCoarseMap + 1);
if (localMatrix) {
MatCreateSeqAIJ(mpiCommLocal, nRowsRankInterior, nRowsRankInterior,
0, nnzInterior.dnnz,
&matIntInt);
&mat[0][0]);
} else {
MatCreateAIJ(mpiCommLocal,
nRowsRankInterior, nRowsRankInterior,
nRowsOverallInterior, nRowsOverallInterior,
0, nnzInterior.dnnz, 0, nnzInterior.onnz,
&matIntInt);
&mat[0][0]);
}
if (coarseSpaceMap.size()) {
int nRowsRankCoarse = coarseSpaceMap[0]->getRankDofs();
int nRowsOverallCoarse = coarseSpaceMap[0]->getOverallDofs();
for (int i = 0; i < nCoarseMap; i++) {
ParallelDofMapping* cMap = uniqueCoarseMap[i];
int nRowsRankCoarse = cMap->getRankDofs();
int nRowsOverallCoarse = cMap->getOverallDofs();
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankCoarse,
nRowsOverallCoarse, nRowsOverallCoarse,
0, nnzCoarse.dnnz, 0, nnzCoarse.onnz,
&matCoarseCoarse);
0, PETSC_NULL, 0, PETSC_NULL,
&mat[i + 1][i + 1]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i + 1][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
for (int j = 0; j < nCoarseMap + 1; j++) {
int nRowsRankMat = (j == 0 ? nRowsRankInterior : uniqueCoarseMap[j - 1]->getRankDofs());
int nRowsOverallMat = (j == 0 ? nGlobalOverallInterior : uniqueCoarseMap[j - 1]->getOverallDofs());
MatCreateAIJ(mpiCommGlobal,
nRowsRankCoarse, nRowsRankInterior,
nRowsOverallCoarse, nGlobalOverallInterior,
0, nnzCoarseInt.dnnz, 0, nnzCoarseInt.onnz,
&matCoarseInt);
nRowsRankCoarse, nRowsRankMat,
nRowsOverallCoarse, nRowsOverallMat,
100, PETSC_NULL, 100, PETSC_NULL,
&mat[i + 1][j]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[i + 1][j], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
MatCreateAIJ(mpiCommGlobal,
nRowsRankInterior, nRowsRankCoarse,
nGlobalOverallInterior, nRowsOverallCoarse,
0, nnzIntCoarse.dnnz, 0, nnzIntCoarse.onnz,
&matIntCoarse);
nRowsRankMat, nRowsRankCoarse,
nRowsOverallMat, nRowsOverallCoarse,
0, PETSC_NULL, 0, PETSC_NULL,
&mat[j][i + 1]);
MSG("REMOVE THIS LINE WHEN FINISHED!\n");
MatSetOption(mat[j][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
}
// MatCreateAIJ(mpiCommGlobal,
// nRowsRankCoarse, nRowsRankInterior,
// nRowsOverallCoarse, nGlobalOverallInterior,
// 0, nnzCoarseInt.dnnz, 0, nnzCoarseInt.onnz,
// &matCoarseInt);
// MatCreateAIJ(mpiCommGlobal,
// nRowsRankInterior, nRowsRankCoarse,
// nGlobalOverallInterior, nRowsOverallCoarse,
// 0, nnzIntCoarse.dnnz, 0, nnzIntCoarse.onnz,
// &matIntCoarse);
}
}
......@@ -218,21 +287,21 @@ namespace AMDiS {
// === Traverse all sequentially created matrices and add the values to ===
// === the global PETSc matrices. ===
int nComponents = mat->getSize();
int nComponents = seqMat->getSize();
for (int i = 0; i < nComponents; i++) {
for (int j = 0; j < nComponents; j++) {
if (!(*mat)[i][j])
if (!(*seqMat)[i][j])
continue;
ParallelDofMapping *rowCoarseSpace = coarseSpaceMap[i];
ParallelDofMapping *colCoarseSpace = coarseSpaceMap[j];
traits::col<Matrix>::type col((*mat)[i][j]->getBaseMatrix());
traits::const_value<Matrix>::type value((*mat)[i][j]->getBaseMatrix());
traits::col<Matrix>::type col((*seqMat)[i][j]->getBaseMatrix());
traits::const_value<Matrix>::type value((*seqMat)[i][j]->getBaseMatrix());
// Traverse all rows.
for (cursor_type cursor = begin<row>((*mat)[i][j]->getBaseMatrix()),
cend = end<row>((*mat)[i][j]->getBaseMatrix()); cursor != cend; ++cursor) {
for (cursor_type cursor = begin<row>((*seqMat)[i][j]->getBaseMatrix()),
cend = end<row>((*seqMat)[i][j]->getBaseMatrix()); cursor != cend; ++cursor) {
bool isRowCoarse = isCoarseSpace(i, feSpaces[i], *cursor);
......@@ -274,7 +343,9 @@ namespace AMDiS {
for (unsigned int k = 0; k < cols.size(); k++)
cols[k] = colCoarseSpace->getMatIndex(j, cols[k]);
MatSetValues(matCoarseCoarse, 1, &rowIndex, cols.size(),
// matCoarseCoarse
MatSetValues(mat[componentIthCoarseMap[i] + 1][componentIthCoarseMap[i] + 1],
1, &rowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
if (colsOther.size()) {
......@@ -287,7 +358,9 @@ namespace AMDiS {
interiorMap->getMatIndex(j, colsOther[k]) + rStartInterior;
}
MatSetValues(matCoarseInt, 1, &rowIndex, colsOther.size(),
// matCoarseInt
MatSetValues(mat[componentIthCoarseMap[i] + 1][0],
1, &rowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
}
} else {
......@@ -302,7 +375,8 @@ namespace AMDiS {
cols[k] = interiorMap->getMatIndex(j, cols[k]);
}
MatSetValues(matIntInt, 1, &localRowIndex, cols.size(),
// matIntInt
MatSetValues(mat[0][0], 1, &localRowIndex, cols.size(),
&(cols[0]), &(values[0]), ADD_VALUES);
if (colsOther.size()) {
......@@ -314,7 +388,9 @@ namespace AMDiS {
for (unsigned int k = 0; k < colsOther.size(); k++)
colsOther[k] = colCoarseSpace->getMatIndex(j, colsOther[k]);
MatSetValues(matIntCoarse, 1, &globalRowIndex, colsOther.size(),
// matIntCoarse
MatSetValues(mat[0][componentIthCoarseMap[i] + 1],
1, &globalRowIndex, colsOther.size(),
&(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
}
}
......@@ -325,19 +401,11 @@ namespace AMDiS {
// === Start global assembly procedure. ===
MatAssemblyBegin(matIntInt, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matIntInt, MAT_FINAL_ASSEMBLY);
if (coarseSpaceMap.size()) {
MatAssemblyBegin(matCoarseCoarse, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matCoarseCoarse, MAT_FINAL_ASSEMBLY);
MatAssemblyBegin(matIntCoarse, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matIntCoarse, MAT_FINAL_ASSEMBLY);
MatAssemblyBegin(matCoarseInt, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matCoarseInt, MAT_FINAL_ASSEMBLY);
for (int i = 0; i < nCoarseMap + 1; i++) {
for (int j = 0; j < nCoarseMap + 1; j++) {
MatAssemblyBegin(mat[i][j], MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(mat[i][j], MAT_FINAL_ASSEMBLY);
}
}
......@@ -348,7 +416,7 @@ namespace AMDiS {
// === Create solver for the non primal (thus local) variables. ===
KSPCreate(mpiCommLocal, &kspInterior);
KSPSetOperators(kspInterior, matIntInt, matIntInt, SAME_NONZERO_PATTERN);
KSPSetOperators(kspInterior, mat[0][0], mat[0][0], SAME_NONZERO_PATTERN);
KSPSetOptionsPrefix(kspInterior, "interior_");
KSPSetType(kspInterior, KSPPREONLY);
KSPGetPC(kspInterior, &pcInterior);
......@@ -453,7 +521,7 @@ namespace AMDiS {
MatNullSpaceCreate(mpiCommGlobal, (hasConstantNullspace ? PETSC_TRUE : PETSC_FALSE),
1, &nullspaceBasis, &matNullspace);
MatMult(matIntInt, nullspaceBasis, petscSolVec);
MatMult(mat[0][0], nullspaceBasis, petscSolVec);
PetscReal n;
VecNorm(petscSolVec, NORM_2, &n);
MSG("NORM IS: %e\n", n);
......@@ -461,7 +529,7 @@ namespace AMDiS {
MatNullSpaceCreate(mpiCommGlobal, PETSC_TRUE, 0, PETSC_NULL, &matNullspace);
}
MatSetNullSpace(matIntInt, matNullspace);
MatSetNullSpace(mat[0][0], matNullspace);
KSPSetNullSpace(kspInterior, matNullspace);
// === Remove null space, if requested. ===
......@@ -564,14 +632,11 @@ namespace AMDiS {
exitPreconditioner(pcInterior);
MatDestroy(&matIntInt);
KSPDestroy(&kspInterior);
for (unsigned int i = 0; i < mat.size(); i++)
for (unsigned int j = 0; j < mat.size(); j++)
MatDestroy(&mat[i][j]);
if (coarseSpaceMap.size()) {
MatDestroy(&matCoarseCoarse);
MatDestroy(&matCoarseInt);
MatDestroy(&matIntCoarse);
}
KSPDestroy(&kspInterior);
if (petscSolVec != PETSC_NULL) {
VecDestroy(&petscSolVec);
......@@ -676,19 +741,19 @@ namespace AMDiS {
}
void PetscSolverGlobalMatrix::setDofMatrix(DOFMatrix* mat,
void PetscSolverGlobalMatrix::setDofMatrix(DOFMatrix* seqMat,
int nRowMat, int nColMat)
{
FUNCNAME("PetscSolverGlobalMatrix::setDofMatrix()");
TEST_EXIT(mat)("No DOFMatrix!\n");
TEST_EXIT(seqMat)("No DOFMatrix!\n");
using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
namespace traits = mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix;
traits::col<Matrix>::type col(mat->getBaseMatrix());
traits::const_value<Matrix>::type value(mat->getBaseMatrix());
traits::col<Matrix>::type col(seqMat->getBaseMatrix());
traits::const_value<Matrix>::type value(seqMat->getBaseMatrix());
typedef traits::range_generator<row, Matrix>::type cursor_type;
typedef traits::range_generator<nz, cursor_type>::type icursor_type;
......@@ -703,16 +768,16 @@ namespace AMDiS {