diff --git a/AMDiS/src/parallel/FeSpaceMapping.h b/AMDiS/src/parallel/FeSpaceMapping.h new file mode 100644 index 0000000000000000000000000000000000000000..2344459e54713714ee4a14132da0dfb56c8c4701 --- /dev/null +++ b/AMDiS/src/parallel/FeSpaceMapping.h @@ -0,0 +1,158 @@ +// ============================================================================ +// == == +// == AMDiS - Adaptive multidimensional simulations == +// == == +// == http://www.amdis-fem.org == +// == == +// ============================================================================ +// +// Software License for AMDiS +// +// Copyright (c) 2010 Dresden University of Technology +// All rights reserved. +// Authors: Simon Vey, Thomas Witkowski et al. +// +// This file is part of AMDiS +// +// See also license.opensource.txt in the distribution. + + + +/** \file FeSpaceMapping.h */ + +#include +#include "parallel/MpiHelper.h" +#include "parallel/ParallelTypes.h" + +#ifndef AMDIS_FE_SPACE_MAPPING_H +#define AMDIS_FE_SPACE_MAPPING_H + +namespace AMDiS { + using namespace std; + + class GlobalDofMap + { + public: + GlobalDofMap(MPI::Intracomm* m) + : mpiComm(m), + nRankDofs(0), + nOverallDofs(0), + rStartDofs(0) + {} + + void clear() + { + dofMap.clear(); + nRankDofs = 0; + nOverallDofs = 0; + rStartDofs = 0; + } + + DegreeOfFreedom operator[](DegreeOfFreedom d) + { + TEST_EXIT_DBG(dofMap.count(d))("Should not happen!\n"); + + return dofMap[d]; + } + + void insertRankDof(DegreeOfFreedom dof0, DegreeOfFreedom dof1 = -1) + { + FUNCNAME("GlobalDofMap::insertRankDof()"); + + TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n"); + + dofMap[dof0] = (dof1 >= 0 ? dof1 : nRankDofs); + nRankDofs++; + } + + void insert(DegreeOfFreedom dof0, DegreeOfFreedom dof1) + { + FUNCNAME("GlobalDofMap::insert()"); + + TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n"); + + dofMap[dof0] = dof1; + } + + bool isSet(DegreeOfFreedom dof) + { + return static_cast(dofMap.count(dof)); + } + + unsigned int size() + { + return dofMap.size(); + } + + DofMapping& getMap() + { + return dofMap; + } + + void update(bool add = true) + { + nOverallDofs = 0; + rStartDofs = 0; + mpi::getDofNumbering(*mpiComm, nRankDofs, rStartDofs, nOverallDofs); + + if (add) + addOffset(rStartDofs); + } + + void addOffset(int offset) + { + for (DofMapping::iterator it = dofMap.begin(); it != dofMap.end(); ++it) + it->second += offset; + } + + private: + MPI::Intracomm* mpiComm; + + /// + DofMapping dofMap; + + public: + /// + int nRankDofs, nOverallDofs, rStartDofs; + }; + + + template + class FeSpaceData + { + public: + FeSpaceData() {} + + void setMpiComm(MPI::Intracomm *m) + { + mpiComm = m; + } + + T& operator[](const FiniteElemSpace* feSpace) + { + FUNCNAME("FeSpaceData::operator[]()"); + + TEST_EXIT_DBG(data.count(feSpace))("Should not happen!\n"); + + return data.find(feSpace)->second; + } + + void addFeSpace(const FiniteElemSpace* feSpace) + { + FUNCNAME("FeSpaceData::addFeSpace()"); + + if (data.count(feSpace)) + data.find(feSpace)->second.clear(); + else + data.insert(make_pair(feSpace, T(mpiComm))); + } + + private: + MPI::Intracomm* mpiComm; + + map data; + }; + +} + +#endif diff --git a/AMDiS/src/parallel/PetscSolver.h b/AMDiS/src/parallel/PetscSolver.h index 5e4249b052fa5b5c60b0ffaa2fcac224234159b4..cb02f06804e7ce4961ea971fa58419c4321ea82b 100644 --- a/AMDiS/src/parallel/PetscSolver.h +++ b/AMDiS/src/parallel/PetscSolver.h @@ -57,6 +57,7 @@ namespace AMDiS { { meshDistributor = m; mpiRank = meshDistributor->getMpiRank(); + mpiComm = &(meshDistributor->getMpiComm()); } /** \brief @@ -85,9 +86,15 @@ namespace AMDiS { return 0; } - KSP getSolver() { return solver; } + KSP getSolver() + { + return solver; + } - PC getPc() { return pc; } + PC getPc() + { + return pc; + } protected: void printSolutionInfo(AdaptInfo* adaptInfo, @@ -123,6 +130,8 @@ namespace AMDiS { int mpiRank; + MPI::Intracomm* mpiComm; + /// Petsc's matrix structure. Mat petscMatrix; diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc index 5932336e6533d7f8f8b62838ad8e893a5702b2f1..cd451918171c0c4084cdc1dcc5cf11a51589112a 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.cc +++ b/AMDiS/src/parallel/PetscSolverFeti.cc @@ -11,6 +11,7 @@ #include "parallel/PetscSolverFeti.h" +#include "parallel/PetscSolverFetiStructs.h" #include "parallel/StdMpi.h" #include "parallel/MpiHelper.h" #include "io/VtkWriter.h" @@ -248,32 +249,16 @@ namespace AMDiS { // === Calculate the number of primals that are owned by the rank and === // === create local indices of the primals starting at zero. === - globalPrimalIndex.clear(); - nRankPrimals = 0; + primalDofMap.addFeSpace(feSpace); for (DofIndexSet::iterator it = primals.begin(); it != primals.end(); ++it) - if (meshDistributor->getIsRankDof(feSpace, *it)) { - globalPrimalIndex[*it] = nRankPrimals; - nRankPrimals++; - } - - - // === Get overall number of primals and rank's displacement in the === - // === numbering of the primals. === - - nOverallPrimals = 0; - rStartPrimals = 0; - mpi::getDofNumbering(meshDistributor->getMpiComm(), - nRankPrimals, rStartPrimals, nOverallPrimals); - - - // === Create global primal index for all primals. === + if (meshDistributor->getIsRankDof(feSpace, *it)) + primalDofMap[feSpace].insertRankDof(*it); - for (DofMapping::iterator it = globalPrimalIndex.begin(); - it != globalPrimalIndex.end(); ++it) - it->second += rStartPrimals; + primalDofMap[feSpace].update(); MSG("nRankPrimals = %d nOverallPrimals = %d\n", - nRankPrimals, nOverallPrimals); + primalDofMap[feSpace].nRankDofs, + primalDofMap[feSpace].nOverallDofs); // === Communicate primal's global index from ranks that own the === @@ -286,8 +271,10 @@ namespace AMDiS { for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace); !it.end(); it.nextRank()) for (; !it.endDofIter(); it.nextDof()) - if (globalPrimalIndex.count(it.getDofIndex())) - stdMpi.getSendData(it.getRank()).push_back(globalPrimalIndex[it.getDofIndex()]); + if (primalDofMap[feSpace].isSet(it.getDofIndex())) { + DegreeOfFreedom d = primalDofMap[feSpace][it.getDofIndex()]; + stdMpi.getSendData(it.getRank()).push_back(d); + } stdMpi.updateSendDataSize(); @@ -313,15 +300,16 @@ namespace AMDiS { int i = 0; for (; !it.endDofIter(); it.nextDof()) { if (primals.count(it.getDofIndex()) && - meshDistributor->getIsRankDof(feSpace, it.getDofIndex()) == false) - globalPrimalIndex[it.getDofIndex()] = - stdMpi.getRecvData(it.getRank())[i++]; + meshDistributor->getIsRankDof(feSpace, it.getDofIndex()) == false) { + DegreeOfFreedom d = stdMpi.getRecvData(it.getRank())[i++]; + primalDofMap[feSpace].insert(it.getDofIndex(), d); + } } } - TEST_EXIT_DBG(primals.size() == globalPrimalIndex.size()) + TEST_EXIT_DBG(primals.size() == primalDofMap[feSpace].size()) ("Number of primals %d, but number of global primals on this rank is %d!\n", - primals.size(), globalPrimalIndex.size()); + primals.size(), primalDofMap[feSpace].size()); } @@ -340,7 +328,7 @@ namespace AMDiS { !it.end(); it.nextRank()) for (; !it.endDofIter(); it.nextDof()) { // If DOF is not primal, i.e., its a dual node - if (globalPrimalIndex.count(it.getDofIndex()) == 0) { + if (primalDofMap[feSpace].isSet(it.getDofIndex()) == false) { boundaryDofRanks[it.getDofIndex()].insert(mpiRank); boundaryDofRanks[it.getDofIndex()].insert(it.getRank()); } @@ -355,7 +343,7 @@ namespace AMDiS { for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace); !it.end(); it.nextRank()) for (; !it.endDofIter(); it.nextDof()) - if (globalPrimalIndex.count(it.getDofIndex()) == 0) + if (primalDofMap[feSpace].isSet(it.getDofIndex()) == false) stdMpi.getSendData(it.getRank()).push_back(boundaryDofRanks[it.getDofIndex()]); stdMpi.updateSendDataSize(); @@ -364,7 +352,7 @@ namespace AMDiS { !it.end(); it.nextRank()) { bool recvFromRank = false; for (; !it.endDofIter(); it.nextDof()) { - if (globalPrimalIndex.count(it.getDofIndex()) == 0) { + if (primalDofMap[feSpace].isSet(it.getDofIndex()) == false) { recvFromRank = true; break; } @@ -380,18 +368,17 @@ namespace AMDiS { !it.end(); it.nextRank()) { int i = 0; for (; !it.endDofIter(); it.nextDof()) - if (globalPrimalIndex.count(it.getDofIndex()) == 0) + if (primalDofMap[feSpace].isSet(it.getDofIndex()) == false) boundaryDofRanks[it.getDofIndex()] = stdMpi.getRecvData(it.getRank())[i++]; } // === Create global index of the dual nodes on each rank. === - duals.clear(); - globalDualIndex.clear(); + dualDofMap.addFeSpace(feSpace); int nRankAllDofs = feSpace->getAdmin()->getUsedDofs(); - nRankB = nRankAllDofs - globalPrimalIndex.size(); + nRankB = nRankAllDofs - primalDofMap[feSpace].size(); nOverallB = 0; rStartB = 0; mpi::getDofNumbering(meshDistributor->getMpiComm(), @@ -400,21 +387,17 @@ namespace AMDiS { meshDistributor->getAllBoundaryDofs(feSpace, allBoundaryDofs); int nRankInteriorDofs = nRankAllDofs - allBoundaryDofs.size(); - int nRankDuals = 0; for (DofContainer::iterator it = allBoundaryDofs.begin(); - it != allBoundaryDofs.end(); ++it) { - if (globalPrimalIndex.count(**it) == 0) { - duals.insert(**it); - globalDualIndex[**it] = rStartB + nRankInteriorDofs + nRankDuals; - nRankDuals++; - } - } + it != allBoundaryDofs.end(); ++it) + if (primalDofMap[feSpace].isSet(**it) == false) + dualDofMap[feSpace].insertRankDof(**it); - int nOverallDuals = nRankDuals; - mpi::globalAdd(nOverallDuals); + dualDofMap[feSpace].update(false); + dualDofMap[feSpace].addOffset(rStartB + nRankInteriorDofs); MSG("nRankDuals = %d nOverallDuals = %d\n", - nRankDuals, nOverallDuals); + dualDofMap[feSpace].nRankDofs, + dualDofMap[feSpace].nOverallDofs); } @@ -422,36 +405,27 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::createLagrange()"); - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); - // === Reserve for each dual node, on the rank that owns this node, the === // === appropriate number of Lagrange constraints. === - nRankLagrange = 0; - for (DofIndexSet::iterator it = duals.begin(); it != duals.end(); ++it) { - if (meshDistributor->getIsRankDof(feSpace, *it)) { - dofFirstLagrange[*it] = nRankLagrange; - int degree = boundaryDofRanks[*it].size(); + const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + dofFirstLagrange.addFeSpace(feSpace); + + int nRankLagrange = 0; + DofMapping& dualMap = dualDofMap[feSpace].getMap(); + for (DofMapping::iterator it = dualMap.begin(); it != dualMap.end(); ++it) { + if (meshDistributor->getIsRankDof(feSpace, it->first)) { + dofFirstLagrange[feSpace].insert(it->first, nRankLagrange); + int degree = boundaryDofRanks[it->first].size(); nRankLagrange += (degree * (degree - 1)) / 2; } } - - - // === Get the overall number of Lagrange constraints and create the === - // === mapping dofFirstLagrange, that defines for each dual boundary === - // === node the first Lagrange constraint global index. === - - nOverallLagrange = 0; - rStartLagrange = 0; - mpi::getDofNumbering(meshDistributor->getMpiComm(), - nRankLagrange, rStartLagrange, nOverallLagrange); - - for (DofIndexSet::iterator it = duals.begin(); it != duals.end(); ++it) - if (meshDistributor->getIsRankDof(feSpace, *it)) - dofFirstLagrange[*it] += rStartLagrange; - + dofFirstLagrange[feSpace].nRankDofs = nRankLagrange; + dofFirstLagrange[feSpace].update(); + MSG("nRankLagrange = %d nOverallLagrange = %d\n", - nRankLagrange, nOverallLagrange); + dofFirstLagrange[feSpace].nRankDofs, + dofFirstLagrange[feSpace].nOverallDofs); // === Communicate dofFirstLagrange to all other ranks. === @@ -461,10 +435,11 @@ namespace AMDiS { for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace); !it.end(); it.nextRank()) { for (; !it.endDofIter(); it.nextDof()) - if (globalPrimalIndex.count(it.getDofIndex()) == 0) { - TEST_EXIT_DBG(dofFirstLagrange.count(it.getDofIndex())) + if (primalDofMap[feSpace].isSet(it.getDofIndex()) == false) { + TEST_EXIT_DBG(dofFirstLagrange[feSpace].isSet(it.getDofIndex())) ("Should not happen!\n"); - stdMpi.getSendData(it.getRank()).push_back(dofFirstLagrange[it.getDofIndex()]); + DegreeOfFreedom d = dofFirstLagrange[feSpace][it.getDofIndex()]; + stdMpi.getSendData(it.getRank()).push_back(d); } } @@ -474,7 +449,7 @@ namespace AMDiS { !it.end(); it.nextRank()) { bool recvData = false; for (; !it.endDofIter(); it.nextDof()) - if (globalPrimalIndex.count(it.getDofIndex()) == 0) { + if (primalDofMap[feSpace].isSet(it.getDofIndex()) == false) { recvData = true; break; } @@ -488,10 +463,12 @@ namespace AMDiS { for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace); !it.end(); it.nextRank()) { int counter = 0; - for (; !it.endDofIter(); it.nextDof()) - if (globalPrimalIndex.count(it.getDofIndex()) == 0) - dofFirstLagrange[it.getDofIndex()] = - stdMpi.getRecvData(it.getRank())[counter++]; + for (; !it.endDofIter(); it.nextDof()) { + if (primalDofMap[feSpace].isSet(it.getDofIndex()) == false) { + DegreeOfFreedom d = stdMpi.getRecvData(it.getRank())[counter++]; + dofFirstLagrange[feSpace].insert(it.getDofIndex(), d); + } + } } } @@ -509,9 +486,10 @@ namespace AMDiS { // === without defining a correct index. === for (int i = 0; i < admin->getUsedSize(); i++) - if (admin->isDofFree(i) == false && globalPrimalIndex.count(i) == 0) - if (duals.count(i) == 0 && globalPrimalIndex.count(i) == 0) - localIndexB[i] = -1; + if (admin->isDofFree(i) == false && + primalDofMap[feSpace].isSet(i) == false && + dualDofMap[feSpace].isSet(i) == false) + localIndexB[i] = -1; // === Get correct index for all interior nodes. === @@ -522,18 +500,20 @@ namespace AMDiS { it->second = nLocalInterior; nLocalInterior++; } - nLocalDuals = duals.size(); + nLocalDuals = dualDofMap[feSpace].size(); - TEST_EXIT_DBG(nLocalInterior + globalPrimalIndex.size() + duals.size() == + TEST_EXIT_DBG(nLocalInterior + + primalDofMap[feSpace].size() + + dualDofMap[feSpace].size() == static_cast(admin->getUsedDofs())) ("Should not happen!\n"); // === And finally, add the global indicies of all dual nodes. === - for (DofIndexSet::iterator it = duals.begin(); - it != duals.end(); ++it) - localIndexB[*it] = globalDualIndex[*it] - rStartB; + for (DofMapping::iterator it = dualDofMap[feSpace].getMap().begin(); + it != dualDofMap[feSpace].getMap().end(); ++it) + localIndexB[it->first] = it->second - rStartB; } @@ -541,12 +521,14 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::createMatLagrange()"); + const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + // === Create distributed matrix for Lagrange constraints. === MatCreateMPIAIJ(PETSC_COMM_WORLD, - nRankLagrange * nComponents, + dofFirstLagrange[feSpace].nRankDofs * nComponents, nRankB * nComponents, - nOverallLagrange * nComponents, + dofFirstLagrange[feSpace].nOverallDofs * nComponents, nOverallB * nComponents, 2, PETSC_NULL, 2, PETSC_NULL, &mat_lagrange); @@ -558,27 +540,28 @@ namespace AMDiS { // === m == r, than the rank sets -1.0 for the corresponding === // === constraint. === - for (DofIndexSet::iterator it = duals.begin(); it != duals.end(); ++it) { - TEST_EXIT_DBG(dofFirstLagrange.count(*it))("Should not happen!\n"); - TEST_EXIT_DBG(boundaryDofRanks.count(*it))("Should not happen!\n"); + DofMapping &dualMap = dualDofMap[feSpace].getMap(); + for (DofMapping::iterator it = dualMap.begin(); it != dualMap.end(); ++it) { + TEST_EXIT_DBG(dofFirstLagrange[feSpace].isSet(it->first)) + ("Should not happen!\n"); + TEST_EXIT_DBG(boundaryDofRanks.count(it->first)) + ("Should not happen!\n"); // Global index of the first Lagrange constriant for this node. - int index = dofFirstLagrange[*it]; + int index = dofFirstLagrange[feSpace][it->first]; // Copy set of all ranks that contain this dual node. - vector W(boundaryDofRanks[*it].begin(), boundaryDofRanks[*it].end()); + vector W(boundaryDofRanks[it->first].begin(), + boundaryDofRanks[it->first].end()); // Number of ranks that contain this dual node. int degree = W.size(); - TEST_EXIT_DBG(globalDualIndex.count(*it))("Should not happen!\n"); - int dualCol = globalDualIndex[*it]; - for (int i = 0; i < degree; i++) { for (int j = i + 1; j < degree; j++) { if (W[i] == mpiRank || W[j] == mpiRank) { // Set the constraint for all components of the system. for (int k = 0; k < nComponents; k++) { int rowIndex = index * nComponents + k; - int colIndex = dualCol * nComponents + k; + int colIndex = it->second * nComponents + k; double value = (W[i] == mpiRank ? 1.0 : -1.0); MatSetValue(mat_lagrange, rowIndex, colIndex, value, INSERT_VALUES); @@ -599,6 +582,8 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::createSchurPrimal()"); + const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + if (schurPrimalSolver == 0) { MSG("Create iterative schur primal solver!\n"); @@ -611,12 +596,12 @@ namespace AMDiS { nRankB * nComponents, nOverallB * nComponents, &(schurPrimalData.tmp_vec_b)); VecCreateMPI(PETSC_COMM_WORLD, - nRankPrimals * nComponents, nOverallPrimals * nComponents, + primalDofMap[feSpace].nRankDofs * nComponents, primalDofMap[feSpace].nOverallDofs * nComponents, &(schurPrimalData.tmp_vec_primal)); MatCreateShell(PETSC_COMM_WORLD, - nRankPrimals * nComponents, nRankPrimals * nComponents, - nOverallPrimals * nComponents, nOverallPrimals * nComponents, + primalDofMap[feSpace].nRankDofs * nComponents, primalDofMap[feSpace].nRankDofs * nComponents, + primalDofMap[feSpace].nOverallDofs * nComponents, primalDofMap[feSpace].nOverallDofs * nComponents, &schurPrimalData, &mat_schur_primal); MatShellSetOperation(mat_schur_primal, MATOP_MULT, @@ -633,8 +618,8 @@ namespace AMDiS { double wtime = MPI::Wtime(); - int nRowsRankPrimal = nRankPrimals * nComponents; - int nRowsOverallPrimal = nOverallPrimals * nComponents; + int nRowsRankPrimal = primalDofMap[feSpace].nRankDofs * nComponents; + int nRowsOverallPrimal = primalDofMap[feSpace].nOverallDofs * nComponents; int nRowsRankB = nRankB * nComponents; int nRowsOverallB = nOverallB * nComponents; @@ -649,7 +634,6 @@ namespace AMDiS { const PetscInt *cols; const PetscScalar *values; - int nLocalPrimals = globalPrimalIndex.size() * nComponents; map > > mat_b_primal_cols; for (int i = 0; i < nRankB * nComponents; i++) { @@ -667,7 +651,7 @@ namespace AMDiS { mpi::globalMax(maxLocalPrimal); TEST_EXIT(mat_b_primal_cols.size() == - globalPrimalIndex.size() * nComponents)("Should not happen!\n"); + primalDofMap[feSpace].size() * nComponents)("Should not happen!\n"); for (map > >::iterator it = mat_b_primal_cols.begin(); it != mat_b_primal_cols.end(); ++it) { Vec tmpVec; @@ -744,6 +728,8 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::createFetiKsp()"); + const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + // === Create FETI-DP solver object. === fetiData.mat_primal_b = &mat_primal_b; @@ -756,15 +742,18 @@ namespace AMDiS { nRankB * nComponents, nOverallB * nComponents, &(fetiData.tmp_vec_b)); VecCreateMPI(PETSC_COMM_WORLD, - nRankLagrange * nComponents, nOverallLagrange * nComponents, + dofFirstLagrange[feSpace].nRankDofs * nComponents, + dofFirstLagrange[feSpace].nOverallDofs * nComponents, &(fetiData.tmp_vec_lagrange)); VecCreateMPI(PETSC_COMM_WORLD, - nRankPrimals * nComponents, nOverallPrimals * nComponents, + primalDofMap[feSpace].nRankDofs * nComponents, primalDofMap[feSpace].nOverallDofs * nComponents, &(fetiData.tmp_vec_primal)); MatCreateShell(PETSC_COMM_WORLD, - nRankLagrange * nComponents, nRankLagrange * nComponents, - nOverallLagrange * nComponents, nOverallLagrange * nComponents, + dofFirstLagrange[feSpace].nRankDofs * nComponents, + dofFirstLagrange[feSpace].nRankDofs * nComponents, + dofFirstLagrange[feSpace].nOverallDofs * nComponents, + dofFirstLagrange[feSpace].nOverallDofs * nComponents, &fetiData, &mat_feti); MatShellSetOperation(mat_feti, MATOP_MULT, (void(*)(void))petscMultMatFeti); @@ -892,6 +881,8 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::recoverSolution()"); + const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + // === Get local part of the solution for B variables. === PetscScalar *localSolB; @@ -902,13 +893,13 @@ namespace AMDiS { // === contained in rank's domain. === vector globalIsIndex, localIsIndex; - globalIsIndex.reserve(globalPrimalIndex.size() * nComponents); - localIsIndex.reserve(globalPrimalIndex.size() * nComponents); + globalIsIndex.reserve(primalDofMap[feSpace].size() * nComponents); + localIsIndex.reserve(primalDofMap[feSpace].size() * nComponents); { int counter = 0; - for (DofMapping::iterator it = globalPrimalIndex.begin(); - it != globalPrimalIndex.end(); ++it) { + for (DofMapping::iterator it = primalDofMap[feSpace].getMap().begin(); + it != primalDofMap[feSpace].getMap().end(); ++it) { for (int i = 0; i < nComponents; i++) { globalIsIndex.push_back(it->second * nComponents + i); localIsIndex.push_back(counter++); @@ -958,8 +949,8 @@ namespace AMDiS { } int counter = 0; - for (DofMapping::iterator it = globalPrimalIndex.begin(); - it != globalPrimalIndex.end(); ++it) { + for (DofMapping::iterator it = primalDofMap[feSpace].getMap().begin(); + it != primalDofMap[feSpace].getMap().end(); ++it) { dofVec[it->first] = localSolPrimal[counter * nComponents + i]; counter++; } @@ -975,18 +966,22 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::fillPetscMatrix()"); + const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); nComponents = mat->getSize(); // === Create all sets and indices. === + primalDofMap.setMpiComm(mpiComm); + dualDofMap.setMpiComm(mpiComm); + dofFirstLagrange.setMpiComm(mpiComm); updateDofData(); // === Create matrices for the FETI-DP method. === int nRowsRankB = nRankB * nComponents; int nRowsOverallB = nOverallB * nComponents; - int nRowsRankPrimal = nRankPrimals * nComponents; - int nRowsOverallPrimal = nOverallPrimals * nComponents; + int nRowsRankPrimal = primalDofMap[feSpace].nRankDofs * nComponents; + int nRowsOverallPrimal = primalDofMap[feSpace].nOverallDofs * nComponents; int nRowsInterior = nLocalInterior * nComponents; int nRowsDual = nLocalDuals * nComponents; @@ -1070,7 +1065,7 @@ namespace AMDiS { for (cursor_type cursor = begin((*mat)[i][j]->getBaseMatrix()), cend = end((*mat)[i][j]->getBaseMatrix()); cursor != cend; ++cursor) { - bool rowPrimal = globalPrimalIndex.count(*cursor) != 0; + bool rowPrimal = primalDofMap[feSpace].isSet(*cursor); cols.clear(); colsOther.clear(); @@ -1087,15 +1082,13 @@ namespace AMDiS { for (icursor_type icursor = begin(cursor), icend = end(cursor); icursor != icend; ++icursor) { - bool colPrimal = globalPrimalIndex.count(col(*icursor)) != 0; + bool colPrimal = primalDofMap[feSpace].isSet(col(*icursor)); if (colPrimal) { // Column is a primal variable. - TEST_EXIT_DBG(globalPrimalIndex.count(col(*icursor))) - ("No global primal index for DOF %d!\n", col(*icursor)); - - int colIndex = globalPrimalIndex[col(*icursor)] * nComponents + j; + int colIndex = + primalDofMap[feSpace][col(*icursor)] * nComponents + j; if (rowPrimal) { cols.push_back(colIndex); @@ -1163,10 +1156,8 @@ namespace AMDiS { } // for each nnz in row if (rowPrimal) { - TEST_EXIT_DBG(globalPrimalIndex.count(*cursor)) - ("Should not happen!\n"); - - int rowIndex = globalPrimalIndex[*cursor] * nComponents + i; + int rowIndex = + primalDofMap[feSpace][*cursor] * nComponents + i; MatSetValues(mat_primal_primal, 1, &rowIndex, cols.size(), &(cols[0]), &(values[0]), ADD_VALUES); @@ -1306,24 +1297,22 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::fillPetscRhs()"); + const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); int nComponents = vec->getSize(); VecCreateMPI(PETSC_COMM_WORLD, nRankB * nComponents, nOverallB * nComponents, &f_b); VecCreateMPI(PETSC_COMM_WORLD, - nRankPrimals * nComponents, - nOverallPrimals * nComponents, &f_primal); + primalDofMap[feSpace].nRankDofs * nComponents, + primalDofMap[feSpace].nOverallDofs * nComponents, &f_primal); for (int i = 0; i < nComponents; i++) { DOFVector::Iterator dofIt(vec->getDOFVector(i), USED_DOFS); for (dofIt.reset(); !dofIt.end(); ++dofIt) { int index = dofIt.getDOFIndex(); - if (globalPrimalIndex.count(index)) { - TEST_EXIT_DBG(globalPrimalIndex.count(index)) - ("Should not happen!\n"); - - index = globalPrimalIndex[index] * nComponents + i; + if (primalDofMap[feSpace].isSet(index)) { + index = primalDofMap[feSpace][index] * nComponents + i; double value = *dofIt; VecSetValues(f_primal, 1, &index, &value, ADD_VALUES); } else { @@ -1382,8 +1371,12 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::solveFetiMatrix()"); + const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + int nOverallNest = - (nOverallB + nOverallPrimals + nOverallLagrange) * nComponents; + (nOverallB + + primalDofMap[feSpace].nOverallDofs + + dofFirstLagrange[feSpace].nOverallDofs) * nComponents; Mat mat_lagrange_transpose; MatTranspose(mat_lagrange, MAT_INITIAL_MATRIX, &mat_lagrange_transpose); @@ -1417,8 +1410,8 @@ namespace AMDiS { // === mat_primal_primal === - for (int i = 0; i < nRankPrimals * nComponents; i++) { - int rowIndex = rStartPrimals * nComponents + i; + for (int i = 0; i < primalDofMap[feSpace].nRankDofs * nComponents; i++) { + int rowIndex = primalDofMap[feSpace].rStartDofs * nComponents + i; MatGetRow(mat_primal_primal, rowIndex, &nCols, &cols, &vals); int rowIndexA = nOverallB * nComponents + rowIndex; @@ -1432,9 +1425,9 @@ namespace AMDiS { PetscScalar *g_f_primal; VecGetArray(f_primal, &g_f_primal); - for (int i = 0; i < nRankPrimals * nComponents; i++) + for (int i = 0; i < primalDofMap[feSpace].nRankDofs * nComponents; i++) VecSetValue(A_global_rhs, - (nOverallB + rStartPrimals) * nComponents + i, g_f_primal[i], + (nOverallB + primalDofMap[feSpace].rStartDofs) * nComponents + i, g_f_primal[i], INSERT_VALUES); VecRestoreArray(f_primal, &g_f_primal); @@ -1455,8 +1448,8 @@ namespace AMDiS { // === mat_primal_b === - for (int i = 0; i < nRankPrimals * nComponents; i++) { - int rowIndex = rStartPrimals * nComponents + i; + for (int i = 0; i < primalDofMap[feSpace].nRankDofs * nComponents; i++) { + int rowIndex = primalDofMap[feSpace].rStartDofs * nComponents + i; MatGetRow(mat_primal_b, rowIndex, &nCols, &cols, &vals); int rowIndexA = nOverallB * nComponents + rowIndex; @@ -1467,11 +1460,11 @@ namespace AMDiS { // === mat_lagrange === - for (int i = 0; i < nRankLagrange * nComponents; i++) { - int rowIndex = rStartLagrange * nComponents + i; + for (int i = 0; i < dofFirstLagrange[feSpace].nRankDofs * nComponents; i++) { + int rowIndex = dofFirstLagrange[feSpace].rStartDofs * nComponents + i; MatGetRow(mat_lagrange, rowIndex, &nCols, &cols, &vals); - int rowIndexA = (nOverallB + nOverallPrimals) * nComponents + rowIndex; + int rowIndexA = (nOverallB + primalDofMap[feSpace].nOverallDofs) * nComponents + rowIndex; MatSetValues(A_global, 1, &rowIndexA, nCols, cols, vals, ADD_VALUES); MatRestoreRow(mat_lagrange, rowIndex, &nCols, &cols, &vals); @@ -1485,7 +1478,7 @@ namespace AMDiS { vector colsA(nCols); for (int j = 0; j < nCols; j++) - colsA[j] = (nOverallB + nOverallPrimals) * nComponents + cols[j]; + colsA[j] = (nOverallB + primalDofMap[feSpace].nOverallDofs) * nComponents + cols[j]; MatSetValues(A_global, 1, &rowIndex, nCols, &(colsA[0]), vals, ADD_VALUES); MatRestoreRow(mat_lagrange_transpose, rowIndex, &nCols, &cols, &vals); @@ -1544,12 +1537,12 @@ namespace AMDiS { localBIndex.resize(0); globalBIndex.resize(0); - localBIndex.reserve(nRankPrimals * nComponents); - globalBIndex.reserve(nRankPrimals * nComponents); + localBIndex.reserve(primalDofMap[feSpace].nRankDofs * nComponents); + globalBIndex.reserve(primalDofMap[feSpace].nRankDofs * nComponents); - for (int i = 0; i < nRankPrimals * nComponents; i++) { - localBIndex.push_back(rStartPrimals * nComponents + i); - globalBIndex.push_back(nOverallB * nComponents + rStartPrimals * nComponents + i); + for (int i = 0; i < primalDofMap[feSpace].nRankDofs * nComponents; i++) { + localBIndex.push_back(primalDofMap[feSpace].rStartDofs * nComponents + i); + globalBIndex.push_back(nOverallB * nComponents + primalDofMap[feSpace].rStartDofs * nComponents + i); } ISCreateGeneral(PETSC_COMM_SELF, localBIndex.size(), diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h index 191f549d59c94eb77d1573629859ff41935d8d31..2c949ff06a6284e47ad8c222a7ced44a67ac2280 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.h +++ b/AMDiS/src/parallel/PetscSolverFeti.h @@ -20,7 +20,11 @@ /** \file PetscSolverFeti.h */ +#include +#include "parallel/FeSpaceMapping.h" +#include "parallel/MpiHelper.h" #include "parallel/PetscSolver.h" +#include "parallel/PetscSolverFetiStructs.h" #include "parallel/ParallelTypes.h" #ifndef AMDIS_PETSC_SOLVER_FETI_H @@ -30,100 +34,7 @@ namespace AMDiS { using namespace std; - - class PetscSolverFeti; - - /** \brief - * This structure is used when defining the MatShell operation for solving - * primal schur complement. \ref petscMultMatSchurPrimal - */ - struct SchurPrimalData { - /// Pointers to the matrix containing the primal variables. - Mat *mat_primal_primal; - - /// Coupling matrices between the primal and the B variables. - Mat *mat_primal_b, *mat_b_primal; - - /// Temporal vector on the B variables. - Vec tmp_vec_b; - - /// Temporal vecor in the primal variables. - Vec tmp_vec_primal; - - PetscSolverFeti *fetiSolver; - }; - - /** \brief - * This structure is used when defining the FETI-DP operator for solving - * the system matrix reduced to the Lagrange multipliers. - * \ref petscMultMatFeti - */ - struct FetiData { - /// Coupling matrices between the primal and the B variables. - Mat *mat_primal_b, *mat_b_primal; - - /// Matrix of Lagrange variables. - Mat *mat_lagrange; - - /// Temporal vectors on the B variables. - Vec tmp_vec_b; - - /// Temporal vector on the primal variables. - Vec tmp_vec_primal; - - /// Temporal vector on the lagrange variables. - Vec tmp_vec_lagrange; - - PetscSolverFeti *fetiSolver; - - /// Pointer to the solver of the schur complement on the primal variables. - KSP *ksp_schur_primal; - }; - - - struct FetiDirichletPreconData { - /// Matrix of scaled Lagrange variables. - Mat *mat_lagrange_scaled; - - Mat *mat_interior_interior, *mat_duals_duals, *mat_interior_duals, *mat_duals_interior; - - /// Pointer to the solver for \ref PetscSolverFeti::mat_bb. - KSP *ksp_interior; - - /// Temporal vector on the B variables. - Vec tmp_vec_b; - - /// Temporal vector on the dual variables. - Vec tmp_vec_duals0, tmp_vec_duals1; - - /// Temporal vector on the interior variables. - Vec tmp_vec_interior; - }; - - - struct FetiLumpedPreconData { - /// Matrix of scaled Lagrange variables. - Mat *mat_lagrange_scaled; - - Mat *mat_duals_duals; - - /// Temporal vector on the B variables. - Vec tmp_vec_b; - - /// Temporal vector on the dual variables. - Vec tmp_vec_duals0, tmp_vec_duals1; - }; - - - typedef enum { - FETI_NONE = 0, - FETI_DIRICHLET = 1, - FETI_LUMPED = 2 - } FetiPreconditioner; - - -/** \brief * FETI-DP implementation based on PETSc. */ class PetscSolverFeti : public PetscSolver @@ -238,28 +149,18 @@ namespace AMDiS { int nComponents; /// Mapping from primal DOF indices to a global index of primals. - DofMapping globalPrimalIndex; - - /// Number of rank owned primals and global primals - int nRankPrimals, nOverallPrimals, rStartPrimals; + FeSpaceData primalDofMap; - /// Set of DOF indices that are considered to be dual variables. - DofIndexSet duals; - /// Mapping from dual DOF indices to a global index of duals. - DofMapping globalDualIndex; - - /// Stores to each dual boundary DOF the set of ranks in which the DOF - /// is contained in. - DofIndexToPartitions boundaryDofRanks; + FeSpaceData dualDofMap; /// Stores to each dual DOF index the index of the first Lagrange /// constraint that is assigned to this DOF. - DofMapping dofFirstLagrange; + FeSpaceData dofFirstLagrange; - /// Number of rank owned Lagrange variables, number of global - /// Lagrange variables. - int nRankLagrange, nOverallLagrange, rStartLagrange; + /// Stores to each dual boundary DOF the set of ranks in which the DOF + /// is contained in. + DofIndexToPartitions boundaryDofRanks; /// Index for each non primal variables to the global index of /// B variables. @@ -336,6 +237,7 @@ namespace AMDiS { // Number of local nodes that are duals. int nLocalDuals; }; + } #endif diff --git a/AMDiS/src/parallel/PetscSolverFetiStructs.h b/AMDiS/src/parallel/PetscSolverFetiStructs.h new file mode 100644 index 0000000000000000000000000000000000000000..a69dd4daa47cca995a15ff5e1bef651cb877b121 --- /dev/null +++ b/AMDiS/src/parallel/PetscSolverFetiStructs.h @@ -0,0 +1,121 @@ +// ============================================================================ +// == == +// == AMDiS - Adaptive multidimensional simulations == +// == == +// == http://www.amdis-fem.org == +// == == +// ============================================================================ +// +// Software License for AMDiS +// +// Copyright (c) 2010 Dresden University of Technology +// All rights reserved. +// Authors: Simon Vey, Thomas Witkowski et al. +// +// This file is part of AMDiS +// +// See also license.opensource.txt in the distribution. + + + +/** \file PetscSolverFetiStructs.h */ + +#ifndef AMDIS_PETSC_SOLVER_FETI_STRUCTS_H +#define AMDIS_PETSC_SOLVER_FETI_STRUCTS_H + +namespace AMDiS { + + class PetscSolverFeti; + + /** \brief + * This structure is used when defining the MatShell operation for solving + * primal schur complement. \ref petscMultMatSchurPrimal + */ + struct SchurPrimalData { + /// Pointers to the matrix containing the primal variables. + Mat *mat_primal_primal; + + /// Coupling matrices between the primal and the B variables. + Mat *mat_primal_b, *mat_b_primal; + + /// Temporal vector on the B variables. + Vec tmp_vec_b; + + /// Temporal vecor in the primal variables. + Vec tmp_vec_primal; + + PetscSolverFeti *fetiSolver; + }; + + + /** \brief + * This structure is used when defining the FETI-DP operator for solving + * the system matrix reduced to the Lagrange multipliers. + * \ref petscMultMatFeti + */ + struct FetiData { + /// Coupling matrices between the primal and the B variables. + Mat *mat_primal_b, *mat_b_primal; + + /// Matrix of Lagrange variables. + Mat *mat_lagrange; + + /// Temporal vectors on the B variables. + Vec tmp_vec_b; + + /// Temporal vector on the primal variables. + Vec tmp_vec_primal; + + /// Temporal vector on the lagrange variables. + Vec tmp_vec_lagrange; + + PetscSolverFeti *fetiSolver; + + /// Pointer to the solver of the schur complement on the primal variables. + KSP *ksp_schur_primal; + }; + + + struct FetiDirichletPreconData { + /// Matrix of scaled Lagrange variables. + Mat *mat_lagrange_scaled; + + Mat *mat_interior_interior, *mat_duals_duals, *mat_interior_duals, *mat_duals_interior; + + /// Pointer to the solver for \ref PetscSolverFeti::mat_bb. + KSP *ksp_interior; + + /// Temporal vector on the B variables. + Vec tmp_vec_b; + + /// Temporal vector on the dual variables. + Vec tmp_vec_duals0, tmp_vec_duals1; + + /// Temporal vector on the interior variables. + Vec tmp_vec_interior; + }; + + + struct FetiLumpedPreconData { + /// Matrix of scaled Lagrange variables. + Mat *mat_lagrange_scaled; + + Mat *mat_duals_duals; + + /// Temporal vector on the B variables. + Vec tmp_vec_b; + + /// Temporal vector on the dual variables. + Vec tmp_vec_duals0, tmp_vec_duals1; + }; + + + typedef enum { + FETI_NONE = 0, + FETI_DIRICHLET = 1, + FETI_LUMPED = 2 + } FetiPreconditioner; + +} + +#endif