Commit 4e8ebef1 authored by Praetorius, Simon's avatar Praetorius, Simon

Initial commit

parents
Pipeline #2590 failed with stages
in 1 minute and 3 seconds
# We require version CMake version 3.1 to prevent issues
# with dune_enable_all_packages and older CMake versions.
cmake_minimum_required(VERSION 3.1)
project(remoteindices_error CXX)
if(NOT (dune-common_DIR OR dune-common_ROOT OR
"${CMAKE_PREFIX_PATH}" MATCHES ".*dune-common.*"))
string(REPLACE ${CMAKE_PROJECT_NAME} dune-common dune-common_DIR
${PROJECT_BINARY_DIR})
endif()
#find dune-common and set the module path
find_package(dune-common REQUIRED)
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/modules"
${dune-common_MODULE_PATH})
#include the dune macros
include(DuneMacros)
# start a dune project with information from dune.module
dune_project()
dune_enable_all_packages()
add_subdirectory(src)
add_subdirectory(amdis)
add_subdirectory(doc)
add_subdirectory(cmake/modules)
# finalize the dune project, e.g. generating config.h etc.
finalize_dune_project(GENERATE_CONFIG_H_CMAKE)
Preparing the Sources
=========================
Additional to the software mentioned in README you'll need the
following programs installed on your system:
cmake >= 2.8.12
Getting started
---------------
If these preliminaries are met, you should run
dunecontrol all
which will find all installed dune modules as well as all dune modules
(not installed) which sources reside in a subdirectory of the current
directory. Note that if dune is not installed properly you will either
have to add the directory where the dunecontrol script resides (probably
./dune-common/bin) to your path or specify the relative path of the script.
Most probably you'll have to provide additional information to dunecontrol
(e. g. compilers, configure options) and/or make options.
The most convenient way is to use options files in this case. The files
define four variables:
CMAKE_FLAGS flags passed to cmake (during configure)
An example options file might look like this:
#use this options to configure and make if no other options are given
CMAKE_FLAGS=" \
-DCMAKE_CXX_COMPILER=g++-5 \
-DCMAKE_CXX_FLAGS='-Wall -pedantic' \
-DCMAKE_INSTALL_PREFIX=/install/path" #Force g++-5 and set compiler flags
If you save this information into example.opts you can pass the opts file to
dunecontrol via the --opts option, e. g.
dunecontrol --opts=example.opts all
More info
---------
See
dunecontrol --help
for further options.
The full build system is described in the dune-common/doc/buildsystem (Git version) or under share/doc/dune-common/buildsystem if you installed DUNE!
#pragma once
#if HAVE_MPI
#include <array>
#include <vector>
#include <mpi.h>
#include <amdis/common/parallel/Request.hpp>
#include <amdis/common/parallel/MpiTraits.hpp>
namespace AMDiS { namespace Mpi
{
template <class T, class Operation>
void all_reduce(MPI_Comm comm, T const& in, T& out, Operation)
{
MPI_Allreduce(&in, &out, 1, type_to_mpi<T>(), op_to_mpi<Operation>(), comm);
}
template <class T, class Operation>
void all_reduce(MPI_Comm comm, T& inout, Operation)
{
MPI_Allreduce(MPI_IN_PLACE, &inout, 1, type_to_mpi<T>(), op_to_mpi<Operation>(), comm);
}
template <class T>
void all_gather(MPI_Comm comm, T const& in, std::vector<T>& out)
{
int size = 1;
MPI_Comm_size(comm, &size);
out.resize(size);
MPI_Allgather(to_void_ptr(&in), 1, type_to_mpi<T>(), to_void_ptr(out.data()), 1, type_to_mpi<T>(), comm);
}
template <class T, std::size_t N>
void all_gather(MPI_Comm comm, std::array<T,N> const& in, std::vector<T>& out)
{
int size = 1;
MPI_Comm_size(comm, &size);
out.resize(size * N);
MPI_Allgather(to_void_ptr(in.data()), N, type_to_mpi<T>(), to_void_ptr(out.data()), N, type_to_mpi<T>(), comm);
}
template <class T>
std::vector<T> all_gather(MPI_Comm comm, T const& in)
{
int size = 1;
MPI_Comm_size(comm, &size);
std::vector<T> out(size);
MPI_Allgather(to_void_ptr(&in), 1, type_to_mpi<T>(), to_void_ptr(out.data()), 1, type_to_mpi<T>(), comm);
return out;
}
template <class T, std::size_t N>
std::vector<T> all_gather(MPI_Comm comm, std::array<T,N> const& in)
{
int size = 1;
MPI_Comm_size(comm, &size);
std::vector<T> out(size * N);
MPI_Allgather(to_void_ptr(in.data()), N, type_to_mpi<T>(), to_void_ptr(out.data()), N, type_to_mpi<T>(), comm);
return out;
}
template <class T>
Request iall_gather(MPI_Comm comm, T const& in, std::vector<T>& out)
{
int size = 1;
MPI_Comm_size(comm, &size);
out.resize(size);
MPI_Request request;
MPI_Iallgather(to_void_ptr(&in), 1, type_to_mpi<T>(), to_void_ptr(out.data()), 1, type_to_mpi<T>(), comm, &request);
return {request};
}
template <class T, std::size_t N>
Request iall_gather(MPI_Comm comm, std::array<T,N> const& in, std::vector<T>& out)
{
int size = 1;
MPI_Comm_size(comm, &size);
out.resize(size * N);
MPI_Request request;
MPI_Iallgather(to_void_ptr(in.data()), N, type_to_mpi<T>(), to_void_ptr(out.data()), N, type_to_mpi<T>(), comm, &request);
return {request};
}
}} // end namespace AMDiS::Mpi
#endif // HAVE_MPI
#pragma once
#if HAVE_MPI
#include <array>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <string>
#include <type_traits>
#include <vector>
#include <mpi.h>
#include <dune/functions/common/functionconcepts.hh>
#include <amdis/common/parallel/Request.hpp>
#include <amdis/common/parallel/RecvDynamicSize.hpp>
#include <amdis/common/parallel/MpiTraits.hpp>
namespace AMDiS { namespace Mpi
{
class Communicator
{
public:
/// Constructor, stores an MPI communicator, e.g. MPI_COMM_WORLD
Communicator(MPI_Comm comm)
: comm_(comm)
, buffer_(1024)
{
MPI_Comm_size(comm_, &size_);
MPI_Comm_rank(comm_, &rank_);
MPI_Buffer_attach(buffer_.data(), sizeof(char)*buffer_.size());
}
public:
operator MPI_Comm() const { return comm_; }
int size() const { return size_; }
int rank() const { return rank_; }
public:
// send mpi datatype
template <class Data>
void send(Data const& data, int to, int tag = 0) const;
// send array of mpi datatypes
template <class T>
void send(T const* data, std::size_t size, int to, int tag = 0) const;
template <class T, std::size_t N>
void send(T const (&data)[N], int to, int tag = 0) const
{
send(&data[0], N, to, tag);
}
template <class T, std::size_t N>
void send(std::array<T,N> const& array, int to, int tag = 0) const
{
send(array.data(), N, to, tag);
}
template <class T>
void send(std::vector<T> const& vec, int to, int tag = 0) const;
void send(std::string const& str, int to, int tag = 0) const
{
MPI_Send(to_void_ptr(str.data()), int(str.size()), MPI_CHAR, to, tag, comm_);
}
// -------------------------------------------------------------------------------------
// send mpi datatype (non-blocking)
template <class Data>
Request isend(Data const& data, int to, int tag = 0) const;
// send mpi datatype (non-blocking, buffered)
template <class Data>
Request ibsend(Data const& data, int to, int tag = 0) const;
// send array of mpi datatypes (non-blocking)
template <class Data>
Request isend(Data const* data, std::size_t size, int to, int tag = 0) const;
// send array of mpi datatypes (non-blocking, buffered)
template <class Data>
Request ibsend(Data const* data, std::size_t size, int to, int tag = 0) const;
template <class T>
Request isend(std::vector<T> const& vec, int to, int tag = 0) const;
Request isend(std::string const& str, int to, int tag = 0) const
{
MPI_Request request;
MPI_Isend(to_void_ptr(str.data()), int(str.size()), MPI_CHAR, to, tag, comm_, &request);
return {request};
}
// -------------------------------------------------------------------------------------
// receive mpi datatype
template <class Data>
MPI_Status recv(Data& data, int from, int tag = 0) const;
// receive array of mpi datatypes
template <class T>
MPI_Status recv(T* data, std::size_t size, int from, int tag = 0) const;
template <class T, std::size_t N>
MPI_Status recv(T (&data)[N], int from, int tag = 0) const
{
return recv(data, N, from, tag);
}
template <class T, std::size_t N>
MPI_Status recv(std::array<T,N>& data, int from, int tag = 0) const
{
return recv(data.data(), N, from, tag);
}
template <class T>
MPI_Status recv(std::vector<T>& data, int from, int tag = 0) const;
MPI_Status recv(std::string& str, int from, int tag = 0) const
{
MPI_Status status;
MPI_Probe(from, tag, comm_, &status);
int size = 0;
MPI_Get_count(&status, MPI_CHAR, &size);
str.resize(size);
MPI_Recv(&str[0], size, MPI_CHAR, from, tag, comm_, MPI_STATUS_IGNORE);
return status;
}
// -------------------------------------------------------------------------------------
// receive mpi datatype
template <class Data>
Request irecv(Data& data, int from, int tag = 0) const;
// receive array of mpi datatypes
template <class Data>
Request irecv(Data* data, std::size_t size, int from, int tag = 0) const;
template <class T, std::size_t N>
Request irecv(T (&data)[N], int from, int tag = 0) const
{
return irecv(&data[0], N, from, tag);
}
template <class T, std::size_t N>
Request irecv(std::array<T,N>& data, int from, int tag = 0) const
{
return irecv(data.data(), N, from, tag);
}
template <class Receiver>
std::enable_if_t<Dune::Functions::Concept::isCallable<Receiver,MPI_Status>(), Request>
irecv(Receiver&& recv, int from, int tag = 0) const
{
return Request{ RecvDynamicSize(from, tag, comm_, std::forward<Receiver>(recv)) };
}
// receive vector of mpi datatypes
// 1. until message received, call MPI_Iprobe to retrieve status and size of message
// 2. resize data-vector
// 3. receive data into vector
template <class T>
Request irecv(std::vector<T>& vec, int from, int tag = 0) const;
Request irecv(std::string& str, int from, int tag = 0) const
{
return Request{RecvDynamicSize(from,tag,comm_,
[comm=comm_,&str](MPI_Status status) -> MPI_Request
{
int size = 0;
MPI_Get_count(&status, MPI_CHAR, &size);
str.resize(size);
MPI_Request req;
MPI_Irecv(&str[0], size, MPI_CHAR, status.MPI_SOURCE, status.MPI_TAG, comm, &req);
return req;
}) };
}
protected:
// free unused buffers
void check_buffers() const
{
using Buffers = std::decay_t<decltype(buffers_)>;
std::list<typename Buffers::iterator> remove;
for (auto it = buffers_.begin(); it != buffers_.end(); ++it) {
int flag;
MPI_Request_get_status(it->first, &flag, MPI_STATUS_IGNORE);
if (flag != 0)
remove.push_back(it);
}
for (auto it : remove)
buffers_.erase(it);
}
std::pair<MPI_Request, std::string>& make_buffer(MPI_Status status, std::size_t len) const
{
auto it = buffers_.emplace(buffers_.end(), MPI_Request{}, std::string(len,' '));
buffers_iterators_[{status.MPI_SOURCE, status.MPI_TAG}] = it;
return buffers_.back();
}
std::pair<MPI_Request, std::string>& get_buffer(MPI_Status status) const
{
auto it = buffers_iterators_[{status.MPI_SOURCE, status.MPI_TAG}];
return *it;
}
protected:
MPI_Comm comm_;
int rank_ = 0;
int size_ = 1;
std::vector<char> buffer_;
using BufferList = std::list< std::pair<MPI_Request, std::string> >;
mutable BufferList buffers_;
using BufferIter = BufferList::iterator;
mutable std::map<std::pair<int,int>, BufferIter> buffers_iterators_;
};
}} // end namespace AMDiS::Mpi
#endif // HAVE_MPI
#include <amdis/common/parallel/Communicator.inc.hpp>
#pragma once
#if HAVE_MPI
namespace AMDiS { namespace Mpi {
// send mpi datatype
template <class Data>
void Communicator::send(Data const& data, int to, int tag) const
{
MPI_Send(to_void_ptr(&data), 1, type_to_mpi<Data>(), to, tag, comm_);
}
// send array of mpi datatypes
template <class Data>
void Communicator::send(Data const* data, std::size_t size, int to, int tag) const
{
MPI_Send(to_void_ptr(data), int(size), type_to_mpi<Data>(), to, tag, comm_);
}
template <class T>
void Communicator::send(std::vector<T> const& vec, int to, int tag) const
{
MPI_Send(to_void_ptr(vec.data()), int(vec.size()), type_to_mpi<T>(), to, tag, comm_);
}
// -------------------------------------------------------------------------------------
// send mpi datatype (non-blocking)
template <class Data>
Request Communicator::isend(Data const& data, int to, int tag) const
{
MPI_Request request;
MPI_Isend(to_void_ptr(&data), 1, type_to_mpi<Data>(), to, tag, comm_, &request);
return {request};
}
// send mpi datatype (non-blocking, buffered)
template <class Data>
Request Communicator::ibsend(Data const& data, int to, int tag) const
{
MPI_Request request;
MPI_Ibsend(to_void_ptr(&data), 1, type_to_mpi<Data>(), to, tag, comm_, &request);
return {request};
}
// send array of mpi datatypes (non-blocking)
template <class Data>
Request Communicator::isend(Data const* data, std::size_t size, int to, int tag) const
{
MPI_Request request;
MPI_Isend(to_void_ptr(data), size, type_to_mpi<Data>(), to, tag, comm_, &request);
return {request};
}
// send array of mpi datatypes (non-blocking, buffered)
template <class Data>
Request Communicator::ibsend(Data const* data, std::size_t size, int to, int tag) const
{
MPI_Request request;
MPI_Ibsend(to_void_ptr(data), size, type_to_mpi<Data>(), to, tag, comm_, &request);
return {request};
}
template <class T>
Request Communicator::isend(std::vector<T> const& vec, int to, int tag) const
{
MPI_Request request;
MPI_Isend(to_void_ptr(vec.data()), int(vec.size()), type_to_mpi<T>(), to, tag, comm_, &request);
return {request};
}
// -------------------------------------------------------------------------------------
// receive mpi datatype
template <class Data>
MPI_Status Communicator::recv(Data& data, int from, int tag) const
{
MPI_Status status;
MPI_Recv(&data, 1, type_to_mpi<Data>(), from, tag, comm_, &status);
return status;
}
// receive array of mpi datatypes
template <class Data>
MPI_Status Communicator::recv(Data* data, std::size_t size, int from, int tag) const
{
MPI_Status status;
MPI_Recv(data, size, type_to_mpi<Data>(), from, tag, comm_, &status);
return status;
}
// receive array of mpi datatypes
template <class T>
MPI_Status Communicator::recv(std::vector<T>& vec, int from, int tag) const
{
MPI_Status status;
MPI_Probe(from, tag, comm_, &status);
int size = 0;
MPI_Get_count(&status, type_to_mpi<T>(), &size);
int min_size = std::max(size,1);
vec.resize(min_size);
MPI_Recv(vec.data(), min_size, type_to_mpi<T>(), from, tag, comm_, MPI_STATUS_IGNORE);
if (size != min_size)
vec.resize(size);
return status;
}
// -------------------------------------------------------------------------------------
// receive mpi datatype
template <class Data>
Request Communicator::irecv(Data& data, int from, int tag) const
{
MPI_Request request;
MPI_Irecv(&data, 1, type_to_mpi<Data>(), from, tag, comm_, &request);
return {request};
}
// receive array of mpi datatypes
template <class Data>
Request Communicator::irecv(Data* data, std::size_t size, int from, int tag) const
{
MPI_Request request;
MPI_Irecv(data, size, type_to_mpi<Data>(), from, tag, comm_, &request);
return {request};
}
template <class T>
Request Communicator::irecv(std::vector<T>& vec, int from, int tag) const
{
return Request{ RecvDynamicSize(from,tag,comm_,
[comm=comm_,&vec](MPI_Status status) -> MPI_Request
{
int size = 0;
MPI_Get_count(&status, type_to_mpi<T>(), &size);
int min_size = std::max(size,1);
vec.resize(min_size);
MPI_Request req;
MPI_Irecv(vec.data(), min_size, type_to_mpi<T>(), status.MPI_SOURCE, status.MPI_TAG, comm, &req);
return req;
},
[&vec](MPI_Status status)
{
int size = 0;
MPI_Get_count(&status, type_to_mpi<T>(), &size);
vec.resize(size);
}) };
}
}} // end namespace AMDiS::Mpi
#endif // HAVE_MPI
#pragma once
#if HAVE_MPI
#include <functional>
#include <type_traits>
#include <mpi.h>
#include <dune/common/parallel/mpitraits.hh>
namespace AMDiS {