diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc index 937ea8b371d725fecdb36b095317291edef2f02d..b3aa5f333c51de3bc5fddb4fc74ae0878ffcf4b9 100644 --- a/AMDiS/src/parallel/MeshDistributor.cc +++ b/AMDiS/src/parallel/MeshDistributor.cc @@ -496,35 +496,6 @@ namespace AMDiS { } - void MeshDistributor::synchVector(DOFVector &vec) - { - StdMpi > stdMpi(mpiComm); - - for (RankToDofContainer::iterator sendIt = sendDofs.begin(); - sendIt != sendDofs.end(); ++sendIt) { - vector dofs; - int nSendDofs = sendIt->second.size(); - dofs.reserve(nSendDofs); - - for (int i = 0; i < nSendDofs; i++) - dofs.push_back(vec[*((sendIt->second)[i])]); - - stdMpi.send(sendIt->first, dofs); - } - - for (RankToDofContainer::iterator recvIt = recvDofs.begin(); - recvIt != recvDofs.end(); ++recvIt) - stdMpi.recv(recvIt->first, recvIt->second.size()); - - stdMpi.startCommunication(); - - for (RankToDofContainer::iterator recvIt = recvDofs.begin(); - recvIt != recvDofs.end(); ++recvIt) - for (unsigned int i = 0; i < recvIt->second.size(); i++) - vec[*(recvIt->second)[i]] = stdMpi.getRecvData(recvIt->first)[i]; - } - - void MeshDistributor::synchVector(SystemVector &vec) { int nComponents = vec.getSize(); @@ -958,6 +929,26 @@ namespace AMDiS { } + void MeshDistributor::createBoundaryDofs(std::set &boundaryDofs) + { + FUNCNAME("MeshDistributor::createBoundaryDofs()"); + + boundaryDofs.clear(); + + for (RankToDofContainer::iterator it = sendDofs.begin(); + it != sendDofs.end(); ++it) + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) + boundaryDofs.insert(**dofIt); + + for (RankToDofContainer::iterator it = recvDofs.begin(); + it != recvDofs.end(); ++it) + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) + boundaryDofs.insert(**dofIt); + } + + void MeshDistributor::serialize(ostream &out, DofContainer &data) { int vecSize = data.size(); diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h index 0e6622d0ed0b012312465f6fccf4b1c1b186ba9e..e2fdd8fb68a375a18fcd9fc9e9efc07d6412d71e 100644 --- a/AMDiS/src/parallel/MeshDistributor.h +++ b/AMDiS/src/parallel/MeshDistributor.h @@ -28,6 +28,7 @@ #include "parallel/ParallelTypes.h" #include "parallel/MeshPartitioner.h" #include "parallel/InteriorBoundary.h" +#include "parallel/StdMpi.h" #include "AMDiS_fwd.h" #include "Global.h" #include "ProblemTimeInterface.h" @@ -239,6 +240,10 @@ namespace AMDiS { return recvDofs; } + /// Creates a set of all DOFs that are on interior boundaries of rank's + /// domain. Thus, it creates the union of \ref sendDofs and \ref recvDofs. + void createBoundaryDofs(std::set &boundaryDofs); + // Writes all data of this object to an output stream. void serialize(ostream &out); @@ -255,8 +260,35 @@ namespace AMDiS { * after the DOFVector is set by some user defined functions, e.g., initial * solution functions. */ - void synchVector(DOFVector &vec); - + template + void synchVector(DOFVector &vec) + { + StdMpi > stdMpi(mpiComm); + + for (RankToDofContainer::iterator sendIt = sendDofs.begin(); + sendIt != sendDofs.end(); ++sendIt) { + vector dofs; + int nSendDofs = sendIt->second.size(); + dofs.reserve(nSendDofs); + + for (int i = 0; i < nSendDofs; i++) + dofs.push_back(vec[*((sendIt->second)[i])]); + + stdMpi.send(sendIt->first, dofs); + } + + for (RankToDofContainer::iterator recvIt = recvDofs.begin(); + recvIt != recvDofs.end(); ++recvIt) + stdMpi.recv(recvIt->first, recvIt->second.size()); + + stdMpi.startCommunication(); + + for (RankToDofContainer::iterator recvIt = recvDofs.begin(); + recvIt != recvDofs.end(); ++recvIt) + for (unsigned int i = 0; i < recvIt->second.size(); i++) + vec[*(recvIt->second)[i]] = stdMpi.getRecvData(recvIt->first)[i]; + } + /** \brief * Works in the same way as the function above defined for DOFVectors. Due to * performance, this function does not call \ref synchVector for each DOFVector,