Commit 2eaf89db authored by Thomas Witkowski's avatar Thomas Witkowski

Added PETSc helper namespace, simplicies explicit schur primal computations in FETI-DP code.

parent fc3bdbaa
...@@ -257,6 +257,7 @@ if(ENABLE_PARALLEL_DOMAIN) ...@@ -257,6 +257,7 @@ if(ENABLE_PARALLEL_DOMAIN)
list(APPEND PARALLEL_DOMAIN_AMDIS_SRC list(APPEND PARALLEL_DOMAIN_AMDIS_SRC
${SOURCE_DIR}/parallel/BddcMlSolver.cc ${SOURCE_DIR}/parallel/BddcMlSolver.cc
${SOURCE_DIR}/parallel/ParallelCoarseSpaceMatVec.cc ${SOURCE_DIR}/parallel/ParallelCoarseSpaceMatVec.cc
${SOURCE_DIR}/parallel/PetscHelper.cc
${SOURCE_DIR}/parallel/PetscMultigridPrecon.cc ${SOURCE_DIR}/parallel/PetscMultigridPrecon.cc
${SOURCE_DIR}/parallel/PetscSolver.cc ${SOURCE_DIR}/parallel/PetscSolver.cc
${SOURCE_DIR}/parallel/PetscProblemStat.cc ${SOURCE_DIR}/parallel/PetscProblemStat.cc
......
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "parallel/PetscHelper.h"
namespace AMDiS {
namespace petsc_helper {
using namespace std;
void getMatLocalColumn(Mat mat, PetscMatCol &matCol)
{
PetscInt firstIndex, lastIndex;
MatGetOwnershipRange(mat, &firstIndex, &lastIndex);
PetscInt nCols;
const PetscInt *cols;
const PetscScalar *values;
for (int row = firstIndex; row < lastIndex; row++) {
MatGetRow(mat, row, &nCols, &cols, &values);
for (int i = 0; i < nCols; i++) {
if (values[i] != 0.0) {
matCol[cols[i]].first.push_back(row - firstIndex);
matCol[cols[i]].second.push_back(values[i]);
}
}
MatRestoreRow(mat, row, &nCols, &cols, &values);
}
}
void setMatLocalColumn(Mat mat, int column, Vec vec)
{
PetscInt firstIndex;
MatGetOwnershipRange(mat, &firstIndex, PETSC_NULL);
PetscInt vecSize;
VecGetLocalSize(vec, &vecSize);
PetscScalar *tmpValues;
VecGetArray(vec, &tmpValues);
for (int i = 0; i < vecSize; i++)
MatSetValue(mat,
firstIndex + i,
column,
tmpValues[i],
ADD_VALUES);
VecRestoreArray(vec, &tmpValues);
}
void getColumnVec(const SparseCol &matCol, Vec vec)
{
VecSet(vec, 0.0);
VecSetValues(vec, matCol.first.size(),
&(matCol.first[0]), &(matCol.second[0]), INSERT_VALUES);
VecAssemblyBegin(vec);
VecAssemblyEnd(vec);
}
}
}
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// == http://www.amdis-fem.org ==
// == ==
// ============================================================================
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
/** \file PetscHelper.h */
#ifndef AMDIS_PETSCHELPER_H
#define AMDIS_PETSCHELPER_H
#include <mpi.h>
#include <map>
#include <vector>
#include <petsc.h>
namespace AMDiS {
using namespace std;
/** \brief
* In this namespace, we collect several auxiliary functions for using
* PETSc in AMDiS. Many of these function may be replaced by new PETSc
* function in upcoming versions.
*/
namespace petsc_helper {
/// Defines a PETSc matrix column wise
typedef pair<vector<int>, vector<double> > SparseCol;
typedef map<int, SparseCol> PetscMatCol;
/** \brief
* Returns for a distributed matrix on each rank the local matrix in a
* sparce column format.
*
* \param[in] mat PETSc distributerd matrix.
* \param[out] matCol The sparse column represenation of the local matrix.
*/
void getMatLocalColumn(Mat mat, PetscMatCol &matCol);
/** \brief
* Set a local column vector in a distributed matrix.
*
* \param[out] mat Distributed matrix.
* \param[in] column Column index.
* \param[in] vec Column vector.
*/
void setMatLocalColumn(Mat mat, int column, Vec vec);
/** \brief
* Create a local PETSc vector representing the column of a matrix
* stored in \ref PetscMatCol type format.
*
* \param[in] column Sparse column representation.
* \param[out] vec Vector representing one column of the matrix.
*/
void getColumnVec(const SparseCol &matCol, Vec vec);
}
}
#endif
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "AMDiS.h" #include "AMDiS.h"
#include "MatrixVector.h" #include "MatrixVector.h"
#include "parallel/PetscHelper.h"
#include "parallel/PetscSolverFeti.h" #include "parallel/PetscSolverFeti.h"
#include "parallel/PetscSolverFetiDebug.h" #include "parallel/PetscSolverFetiDebug.h"
#include "parallel/PetscSolverFetiMonitor.h" #include "parallel/PetscSolverFetiMonitor.h"
...@@ -745,6 +746,9 @@ namespace AMDiS { ...@@ -745,6 +746,9 @@ namespace AMDiS {
TEST_EXIT_DBG(isPrimal(feSpaces[i], **it) == false) TEST_EXIT_DBG(isPrimal(feSpaces[i], **it) == false)
("Should not be primal!\n"); ("Should not be primal!\n");
if (dirichletRows[feSpaces[i]].count(**it))
continue;
int col = lagrangeMap.getMatIndex(i, **it); int col = lagrangeMap.getMatIndex(i, **it);
double value = 1.0; double value = 1.0;
MatSetValue(mat_augmented_lagrange, rowCounter, col, value, INSERT_VALUES); MatSetValue(mat_augmented_lagrange, rowCounter, col, value, INSERT_VALUES);
...@@ -818,86 +822,68 @@ namespace AMDiS { ...@@ -818,86 +822,68 @@ namespace AMDiS {
} else { } else {
MSG("Create direct schur primal solver!\n"); MSG("Create direct schur primal solver!\n");
TEST_EXIT(!augmentedLagrange)("Not yet supported!\n");
double wtime = MPI::Wtime(); double wtime = MPI::Wtime();
TEST_EXIT(!augmentedLagrange)("Not yet supported!\n");
TEST_EXIT_DBG(meshLevel == 0) TEST_EXIT_DBG(meshLevel == 0)
("Does not support for multilevel, check usage of localDofMap.\n"); ("Does not support for multilevel, check usage of localDofMap.\n");
// === First, calculate matK = inv(A_BB) A_BPi: ===
// === - get all local column vectors from A_BPi ===
// === - solve with A_BB for this column vector as the rhs vector ===
// === - set the result to the corresponding column vector of ===
// === matrix matK ===
int nRowsRankPrimal = primalDofMap.getRankDofs(); int nRowsRankPrimal = primalDofMap.getRankDofs();
int nRowsOverallPrimal = primalDofMap.getOverallDofs(); int nRowsOverallPrimal = primalDofMap.getOverallDofs();
int nRowsRankB = localDofMap.getRankDofs(); int nRowsRankB = localDofMap.getRankDofs();
Mat matBPi; // Transform matrix A_BPi into a sparse column format.
MatCreateAIJ(mpiCommGlobal, petsc_helper::PetscMatCol mat_b_primal_cols;
nRowsRankB, nRowsRankPrimal, petsc_helper::getMatLocalColumn(subdomain->getMatInteriorCoarse(),
nGlobalOverallInterior, nRowsOverallPrimal, mat_b_primal_cols);
150, PETSC_NULL, 150, PETSC_NULL, &matBPi);
MatSetOption(matBPi, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
PetscInt nCols;
const PetscInt *cols;
const PetscScalar *values;
map<int, vector<pair<int, double> > > mat_b_primal_cols;
for (int i = 0; i < nRowsRankB; i++) {
PetscInt row = localDofMap.getStartDofs() + i;
MatGetRow(subdomain->getMatInteriorCoarse(), row, &nCols, &cols, &values);
for (int j = 0; j < nCols; j++)
if (values[j] != 0.0)
mat_b_primal_cols[cols[j]].push_back(make_pair(i, values[j]));
MatRestoreRow(subdomain->getMatInteriorCoarse(), row, &nCols, &cols, &values);
}
TEST_EXIT(static_cast<int>(mat_b_primal_cols.size()) == TEST_EXIT(static_cast<int>(mat_b_primal_cols.size()) ==
primalDofMap.getLocalDofs()) primalDofMap.getLocalDofs())
("Should not happen!\n"); ("Should not happen!\n");
for (map<int, vector<pair<int, double> > >::iterator it = mat_b_primal_cols.begin(); Vec tmpVec;
it != mat_b_primal_cols.end(); ++it) { VecCreateSeq(PETSC_COMM_SELF, nRowsRankB, &tmpVec);
Vec tmpVec;
VecCreateSeq(PETSC_COMM_SELF, nRowsRankB, &tmpVec);
for (unsigned int i = 0; i < it->second.size(); i++)
VecSetValue(tmpVec,
it->second[i].first, it->second[i].second, INSERT_VALUES);
VecAssemblyBegin(tmpVec); Mat matK;
VecAssemblyEnd(tmpVec); MatCreateAIJ(mpiCommGlobal,
nRowsRankB, nRowsRankPrimal,
nGlobalOverallInterior, nRowsOverallPrimal,
150, PETSC_NULL, 150, PETSC_NULL, &matK);
MatSetOption(matK, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
// Solve for all column vectors of mat A_BPi and create matrix matK
for (petsc_helper::PetscMatCol::iterator it = mat_b_primal_cols.begin();
it != mat_b_primal_cols.end(); ++it) {
petsc_helper::getColumnVec(it->second, tmpVec);
subdomain->solve(tmpVec, tmpVec); subdomain->solve(tmpVec, tmpVec);
petsc_helper::setMatLocalColumn(matK, it->first, tmpVec);
PetscScalar *tmpValues;
VecGetArray(tmpVec, &tmpValues);
for (int i = 0; i < nRowsRankB; i++)
MatSetValue(matBPi,
localDofMap.getStartDofs() + i,
it->first,
tmpValues[i],
ADD_VALUES);
VecRestoreArray(tmpVec, &tmpValues);
VecDestroy(&tmpVec);
} }
MatAssemblyBegin(matBPi, MAT_FINAL_ASSEMBLY); VecDestroy(&tmpVec);
MatAssemblyEnd(matBPi, MAT_FINAL_ASSEMBLY);
MatAssemblyBegin(matK, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matK, MAT_FINAL_ASSEMBLY);
// Calculate: mat_schur_primal = A_PiPi - A_PiB inv(A_BB) ABPi
// = A_PiPi - A_PiB matK
MatDuplicate(subdomain->getMatCoarse(), MAT_COPY_VALUES, MatDuplicate(subdomain->getMatCoarse(), MAT_COPY_VALUES,
&mat_schur_primal); &mat_schur_primal);
Mat matPrimal; Mat matPrimal;
MatMatMult(subdomain->getMatCoarseInterior(), matBPi, MAT_INITIAL_MATRIX, MatMatMult(subdomain->getMatCoarseInterior(), matK, MAT_INITIAL_MATRIX,
PETSC_DEFAULT, &matPrimal); PETSC_DEFAULT, &matPrimal);
MatAXPY(mat_schur_primal, -1.0, matPrimal, DIFFERENT_NONZERO_PATTERN); MatAXPY(mat_schur_primal, -1.0, matPrimal, DIFFERENT_NONZERO_PATTERN);
MatDestroy(&matPrimal); MatDestroy(&matPrimal);
MatDestroy(&matBPi); MatDestroy(&matK);
// === Create KSP solver object and set appropriate solver options. ====
KSPCreate(mpiCommGlobal, &ksp_schur_primal); KSPCreate(mpiCommGlobal, &ksp_schur_primal);
KSPSetOperators(ksp_schur_primal, mat_schur_primal, mat_schur_primal, KSPSetOperators(ksp_schur_primal, mat_schur_primal, mat_schur_primal,
...@@ -910,6 +896,9 @@ namespace AMDiS { ...@@ -910,6 +896,9 @@ namespace AMDiS {
PCFactorSetMatSolverPackage(pc_schur_primal, MATSOLVERMUMPS); PCFactorSetMatSolverPackage(pc_schur_primal, MATSOLVERMUMPS);
KSPSetFromOptions(ksp_schur_primal); KSPSetFromOptions(ksp_schur_primal);
// === And finally print timings, if required. ===
if (printTimings) { if (printTimings) {
MPI::COMM_WORLD.Barrier(); MPI::COMM_WORLD.Barrier();
MatInfo minfo; MatInfo minfo;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment