Skip to content
Snippets Groups Projects
Commit a7b59f97 authored by Oliver Sander's avatar Oliver Sander Committed by sander
Browse files

Remove the getGridView methods from GlobalIndexSet and GlobalP2Mapper

The IndexSet interface class from dune-grid doesn't have it, and we want
to be like the interface method as much as possible.

[[Imported from SVN: r9921]]
parent e44c6a43
No related branches found
No related tags found
No related merge requests found
...@@ -333,10 +333,6 @@ public: ...@@ -333,10 +333,6 @@ public:
return(nLocalEntity_); return(nLocalEntity_);
} }
const GridView& getGridView() const {
return gridview_;
}
protected: protected:
/** store data members */ /** store data members */
const GridView gridview_; /** store a const reference to a gridview */ const GridView gridview_; /** store a const reference to a gridview */
......
...@@ -30,7 +30,6 @@ namespace Dune { ...@@ -30,7 +30,6 @@ namespace Dune {
typedef std::map<int,int> IndexMap; typedef std::map<int,int> IndexMap;
GlobalP2Mapper(const GridView& gridView) GlobalP2Mapper(const GridView& gridView)
: gridView_(gridView)
{ {
static_assert(GridView::dimension==2, "Only implemented for two-dimensional grids"); static_assert(GridView::dimension==2, "Only implemented for two-dimensional grids");
...@@ -116,12 +115,6 @@ namespace Dune { ...@@ -116,12 +115,6 @@ namespace Dune {
return nOwnedLocalEntity_; return nOwnedLocalEntity_;
} }
const GridView& getGridView() const {
return gridView_;
}
const GridView gridView_;
IndexMap localGlobalMap_; IndexMap localGlobalMap_;
IndexMap globalLocalMap_; IndexMap globalLocalMap_;
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <dune/gfe/parallel/mpifunctions.hh> #include <dune/gfe/parallel/mpifunctions.hh>
template<typename GUIndex, typename MatrixType, typename ColGUIndex=GUIndex> template<typename GUIndex, typename Communicator, typename MatrixType, typename ColGUIndex=GUIndex>
class MatrixCommunicator { class MatrixCommunicator {
struct TransferMatrixTuple { struct TransferMatrixTuple {
...@@ -38,22 +38,24 @@ class MatrixCommunicator { ...@@ -38,22 +38,24 @@ class MatrixCommunicator {
} }
// Get number of matrix entries on each process // Get number of matrix entries on each process
std::vector<int> localMatrixEntriesSizes(MPIFunctions::shareSizes(guIndex1_.getGridView().comm(), localMatrixEntries.size())); std::vector<int> localMatrixEntriesSizes(MPIFunctions::shareSizes(communicator_, localMatrixEntries.size()));
// Get matrix entries from every process // Get matrix entries from every process
globalMatrixEntries = MPIFunctions::gatherv(guIndex1_.getGridView().comm(), localMatrixEntries, localMatrixEntriesSizes, root_rank); globalMatrixEntries = MPIFunctions::gatherv(communicator_, localMatrixEntries, localMatrixEntriesSizes, root_rank);
} }
public: public:
MatrixCommunicator(const GUIndex& rowIndex, const int& root) MatrixCommunicator(const GUIndex& rowIndex, const Communicator& communicator, const int& root)
: guIndex1_(rowIndex), : guIndex1_(rowIndex),
guIndex2_(rowIndex), guIndex2_(rowIndex),
communicator_(communicator),
root_rank(root) root_rank(root)
{} {}
MatrixCommunicator(const GUIndex& rowIndex, const ColGUIndex& colIndex, const int& root) MatrixCommunicator(const GUIndex& rowIndex, const ColGUIndex& colIndex, const Communicator& communicator, const int& root)
: guIndex1_(rowIndex), : guIndex1_(rowIndex),
guIndex2_(colIndex), guIndex2_(colIndex),
communicator_(communicator),
root_rank(root) root_rank(root)
{} {}
...@@ -110,6 +112,7 @@ public: ...@@ -110,6 +112,7 @@ public:
private: private:
const GUIndex& guIndex1_; const GUIndex& guIndex1_;
const ColGUIndex& guIndex2_; const ColGUIndex& guIndex2_;
const Communicator& communicator_;
int root_rank; int root_rank;
std::vector<TransferMatrixTuple> globalMatrixEntries; std::vector<TransferMatrixTuple> globalMatrixEntries;
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <dune/gfe/parallel/mpifunctions.hh> #include <dune/gfe/parallel/mpifunctions.hh>
template<typename GUIndex, typename VectorType> template<typename GUIndex, typename Communicator, typename VectorType>
class VectorCommunicator { class VectorCommunicator {
struct TransferVectorTuple { struct TransferVectorTuple {
...@@ -32,18 +32,20 @@ private: ...@@ -32,18 +32,20 @@ private:
localVectorEntries.push_back(TransferVectorTuple(guIndex.index(k), localVector[k])); localVectorEntries.push_back(TransferVectorTuple(guIndex.index(k), localVector[k]));
// Get number of vector entries on each process // Get number of vector entries on each process
localVectorEntriesSizes = MPIFunctions::shareSizes(guIndex.getGridView().comm(), localVectorEntries.size()); localVectorEntriesSizes = MPIFunctions::shareSizes(communicator_, localVectorEntries.size());
// Get vector entries from every process // Get vector entries from every process
globalVectorEntries = MPIFunctions::gatherv(guIndex.getGridView().comm(), localVectorEntries, localVectorEntriesSizes, root_rank); globalVectorEntries = MPIFunctions::gatherv(communicator_, localVectorEntries, localVectorEntriesSizes, root_rank);
} }
public: public:
VectorCommunicator(const GUIndex& gi, const int& root) VectorCommunicator(const GUIndex& gi,
: guIndex(gi), root_rank(root) const Communicator& communicator,
const int& root)
: guIndex(gi), communicator_(communicator), root_rank(root)
{ {
// Get number of vector entries on each process // Get number of vector entries on each process
localVectorEntriesSizes = MPIFunctions::shareSizes(guIndex.getGridView().comm(), guIndex.nOwnedLocalEntity()); localVectorEntriesSizes = MPIFunctions::shareSizes(communicator, guIndex.nOwnedLocalEntity());
} }
VectorType reduceAdd(const VectorType& localVector) VectorType reduceAdd(const VectorType& localVector)
...@@ -76,12 +78,12 @@ public: ...@@ -76,12 +78,12 @@ public:
for (size_t k = 0; k < globalVectorEntries.size(); ++k) for (size_t k = 0; k < globalVectorEntries.size(); ++k)
globalVectorEntries[k].value_ = global[globalVectorEntries[k].globalIndex_]; globalVectorEntries[k].value_ = global[globalVectorEntries[k].globalIndex_];
const int localSize = localVectorEntriesSizes[guIndex.getGridView().comm().rank()]; const int localSize = localVectorEntriesSizes[communicator_.rank()];
// Create vector for transfer data // Create vector for transfer data
std::vector<TransferVectorTuple> localVectorEntries(localSize); std::vector<TransferVectorTuple> localVectorEntries(localSize);
MPIFunctions::scatterv(guIndex.getGridView().comm(), localVectorEntries, globalVectorEntries, localVectorEntriesSizes, root_rank); MPIFunctions::scatterv(communicator_, localVectorEntries, globalVectorEntries, localVectorEntriesSizes, root_rank);
// Create vector for local solution // Create vector for local solution
VectorType x(localSize); VectorType x(localSize);
...@@ -95,6 +97,7 @@ public: ...@@ -95,6 +97,7 @@ public:
private: private:
const GUIndex& guIndex; const GUIndex& guIndex;
const Communicator& communicator_;
int root_rank; int root_rank;
std::vector<int> localVectorEntriesSizes; std::vector<int> localVectorEntriesSizes;
......
...@@ -91,7 +91,9 @@ setup(const GridType& grid, ...@@ -91,7 +91,9 @@ setup(const GridType& grid,
#endif #endif
// Transfer all Dirichlet data to the master processor // Transfer all Dirichlet data to the master processor
VectorCommunicator<GUIndex, Dune::BitSetVector<blocksize> > vectorComm(*guIndex_, 0); VectorCommunicator<GUIndex, typename GridType::LeafGridView::CollectiveCommunication, Dune::BitSetVector<blocksize> > vectorComm(*guIndex_,
grid_->leafGridView().comm(),
0);
Dune::BitSetVector<blocksize>* globalDirichletNodes = NULL; Dune::BitSetVector<blocksize>* globalDirichletNodes = NULL;
globalDirichletNodes = new Dune::BitSetVector<blocksize>(vectorComm.reduceCopy(dirichletNodes)); globalDirichletNodes = new Dune::BitSetVector<blocksize>(vectorComm.reduceCopy(dirichletNodes));
...@@ -125,7 +127,7 @@ setup(const GridType& grid, ...@@ -125,7 +127,7 @@ setup(const GridType& grid,
delete h1SemiNorm_; delete h1SemiNorm_;
MatrixCommunicator<GUIndex, ScalarMatrixType> matrixComm(*guIndex_, 0); MatrixCommunicator<GUIndex, typename GridType::LeafGridView::CollectiveCommunication, ScalarMatrixType> matrixComm(*guIndex_, grid_->leafGridView().comm(), 0);
ScalarMatrixType* A = new ScalarMatrixType(matrixComm.reduceAdd(localA)); ScalarMatrixType* A = new ScalarMatrixType(matrixComm.reduceAdd(localA));
h1SemiNorm_ = new H1SemiNorm<CorrectionType>(*A); h1SemiNorm_ = new H1SemiNorm<CorrectionType>(*A);
...@@ -185,7 +187,10 @@ setup(const GridType& grid, ...@@ -185,7 +187,10 @@ setup(const GridType& grid,
LeafP1GUIndex p1Index(grid_->leafGridView()); LeafP1GUIndex p1Index(grid_->leafGridView());
typedef typename TruncatedCompressedMGTransfer<CorrectionType>::TransferOperatorType TransferOperatorType; typedef typename TruncatedCompressedMGTransfer<CorrectionType>::TransferOperatorType TransferOperatorType;
MatrixCommunicator<GUIndex, TransferOperatorType, LeafP1GUIndex> matrixComm(*guIndex_, p1Index, 0); MatrixCommunicator<GUIndex,
typename GridType::LeafGridView::CollectiveCommunication,
TransferOperatorType,
LeafP1GUIndex> matrixComm(*guIndex_, p1Index, grid_->leafGridView().comm(), 0);
mmgStep->mgTransfer_.back() = new PKtoP1MGTransfer<CorrectionType>; mmgStep->mgTransfer_.back() = new PKtoP1MGTransfer<CorrectionType>;
Dune::shared_ptr<TransferOperatorType> topTransferOperator = Dune::make_shared<TransferOperatorType>(matrixComm.reduceCopy(topTransferOp->getMatrix())); Dune::shared_ptr<TransferOperatorType> topTransferOperator = Dune::make_shared<TransferOperatorType>(matrixComm.reduceCopy(topTransferOp->getMatrix()));
...@@ -203,7 +208,9 @@ setup(const GridType& grid, ...@@ -203,7 +208,9 @@ setup(const GridType& grid,
LevelGUIndex coarseGUIndex(grid_->levelGridView(i+1)); LevelGUIndex coarseGUIndex(grid_->levelGridView(i+1));
typedef typename TruncatedCompressedMGTransfer<CorrectionType>::TransferOperatorType TransferOperatorType; typedef typename TruncatedCompressedMGTransfer<CorrectionType>::TransferOperatorType TransferOperatorType;
MatrixCommunicator<LevelGUIndex, TransferOperatorType> matrixComm(fineGUIndex, coarseGUIndex, 0); MatrixCommunicator<LevelGUIndex,
typename GridType::LevelGridView::CollectiveCommunication,
TransferOperatorType> matrixComm(fineGUIndex, coarseGUIndex, grid_->levelGridView(i+1).comm(), 0);
mmgStep->mgTransfer_[i] = new TruncatedCompressedMGTransfer<CorrectionType>; mmgStep->mgTransfer_[i] = new TruncatedCompressedMGTransfer<CorrectionType>;
Dune::shared_ptr<TransferOperatorType> transferOperatorMatrix = Dune::make_shared<TransferOperatorType>(matrixComm.reduceCopy(newTransferOp->getMatrix())); Dune::shared_ptr<TransferOperatorType> transferOperatorMatrix = Dune::make_shared<TransferOperatorType>(matrixComm.reduceCopy(newTransferOp->getMatrix()));
...@@ -292,8 +299,12 @@ void RiemannianTrustRegionSolver<GridType,TargetSpace>::solve() ...@@ -292,8 +299,12 @@ void RiemannianTrustRegionSolver<GridType,TargetSpace>::solve()
MatrixType stiffnessMatrix; MatrixType stiffnessMatrix;
CorrectionType rhs_global; CorrectionType rhs_global;
VectorCommunicator<GUIndex, CorrectionType> vectorComm(*guIndex_, 0); VectorCommunicator<GUIndex, typename GridType::LeafGridView::CollectiveCommunication, CorrectionType> vectorComm(*guIndex_,
MatrixCommunicator<GUIndex, MatrixType> matrixComm(*guIndex_, 0); grid_->leafGridView().comm(),
0);
MatrixCommunicator<GUIndex, typename GridType::LeafGridView::CollectiveCommunication, MatrixType> matrixComm(*guIndex_,
grid_->leafGridView().comm(),
0);
for (int i=0; i<maxTrustRegionSteps_; i++) { for (int i=0; i<maxTrustRegionSteps_; i++) {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment