Commit c53bb6c3 authored by Praetorius, Simon's avatar Praetorius, Simon

initial commit

parents
# CMAKE Configuration files
CMakeCache.txt
CMakeFiles
CMakeScripts
Testing
Makefile
cmake_install.cmake
install_manifest.txt
compile_commands.json
CTestTestfile.cmake
# directories for build and output files
build*/
output*/
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
project("mpi14")
cmake_minimum_required(VERSION 3.1)
find_package(MPI REQUIRED)
# the parameter --enable-new-dtags causes a linker problem, i.e. some libraries are
# linked without an rpath (or any other path information) and can not be found while
# running the executable. The hack below removes this flag manually from the linker flags.
string(REPLACE "-Wl,--enable-new-dtags" "" MY_MPI_CXX_LINK_FLAGS " ${MPI_CXX_LINK_FLAGS} ")
string(STRIP "${MY_MPI_CXX_LINK_FLAGS}" MY_MPI_CXX_LINK_FLAGS)
find_package(Boost 1.56 REQUIRED iostreams serialization)
add_executable(test_mpi test/test_mpi.cc)
target_include_directories(test_mpi PRIVATE src/ ${MPI_CXX_INCLUDE_PATH} ${Boost_INCLUDE_DIR})
target_compile_options(test_mpi PRIVATE ${MPI_CXX_COMPILE_FLAGS} -std=c++14 -DHAS_COMPRESSION=1)
target_link_libraries(test_mpi PRIVATE ${MY_MPI_CXX_LINK_FLAGS} ${MPI_CXX_LIBRARIES} ${Boost_LIBRARIES})
#pragma once
#include <list>
namespace mpi14
{
// Blocks until all communication operations associated with active handles in the range complete.
template <class FutureIter>
void wait_all(FutureIter first, FutureIter last)
{
std::list<FutureIter> remaining;
for (FutureIter it = first; it != last; ++it) remaining.push_back(it);
while (!remaining.empty()) {
auto remove_it = remaining.end();
for (auto it = remaining.begin(); it != remaining.end(); ++it) {
if ((*it)->test()) {
remove_it = it;
break;
}
}
if (remove_it != remaining.end())
remaining.erase(remove_it);
}
}
// Tests for completion of either one or none of the operations associated with active handles.
// In the former case, it returns an iterator to the finished handle.
template <class FutureIter>
FutureIter test_any(FutureIter first, FutureIter last)
{
for (auto it = first; it != last; ++it) {
if (it->test())
return it;
}
return last;
}
// Blocks until one of the operations associated with the active requests in the range has completed.
template <class FutureIter>
void wait_any(FutureIter first, FutureIter last)
{
while (test_any(first, last) == last) ;
}
} // end namespace mpi14
#pragma once
#include <type_traits>
#define REQUIRES(...) \
std::enable_if_t<__VA_ARGS__, int> = 0
namespace mpi14
{
template <class T>
struct Type_t
{
using type = T;
};
template <class T>
constexpr Type_t<T> Type_ = {};
template <std::size_t i>
using index_t = std::integral_constant<std::size_t,i>;
template <std::size_t i>
constexpr index_t<i> index_ = {};
} // end namespace mpi14
#pragma once
#include <iostream>
#include <list>
#include <memory>
#include <string>
#include <type_traits>
#include <mpi.h>
#include "Common.hpp"
#include "Future.hpp"
#include "Serialization.hpp"
#include "Type_Traits.hpp"
namespace mpi14
{
class Communicator
{
template <class> friend class Future;
public:
/// Constructor, stores an MPI communicator, e.g. MPI_COMM_WORLD
Communicator(MPI_Comm comm = MPI_COMM_WORLD)
: comm_(comm)
, buffer_(1024)
{
MPI_Comm_rank(comm_, &rank_);
MPI_Comm_size(comm_, &size_);
MPI_Buffer_attach(buffer_.data(), sizeof(char)*buffer_.size());
}
public:
operator MPI_Comm() const { return comm_; }
int rank() const { return rank_; }
int size() const { return size_; }
public:
// send mpi datatype
template <class Data,
REQUIRES( is_mpi_type<Data> )>
MPI_Status send(Data const& data, int to, int tag = 0) const;
// send array of mpi datatypes
template <class T,
REQUIRES( is_mpi_type<T> )>
MPI_Status send(T const* data, std::size_t size, int to, int tag = 0) const;
template <class T, std::size_t N,
REQUIRES( is_mpi_type<T> )>
MPI_Status send(T const (&data)[N], int to, int tag = 0) const
{
send(data, N, to, tag);
}
template <class T,
REQUIRES( not is_mpi_type<T> )>
MPI_Status send(std::vector<T> const& data, int to, int tag = 0) const;
// send complex datatype:
// 1. create a binary representation of data, store it in a buffer
// 2. send size of buffer (with MPI_Ibsend)
// 3. send buffer
template <class Data,
REQUIRES( not is_mpi_type<Data> )>
MPI_Status send(Data const& data, int to, int tag = 0) const;
// -------------------------------------------------------------------------------------
// send mpi datatype (non-blocking)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> isend(Data const& data, int to, int tag = 0) const;
// send mpi datatype (non-blocking, buffered)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> ibsend(Data const& data, int to, int tag = 0) const;
// send array of mpi datatypes (non-blocking)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> isend(Data const* data, std::size_t size, int to, int tag = 0) const;
// send array of mpi datatypes (non-blocking, buffered)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> ibsend(Data const* data, std::size_t size, int to, int tag = 0) const;
#if 0
template <class T,
REQUIRES( is_mpi_type<T> )>
std::shared_ptr<MPI_Request> isend(std::vector<T> const& data, int to, int tag = 0) const;
#endif
// send complex datatype: (non-blocking)
// 1. create a binary representation of data, store it in a buffer
// 2. send size of buffer (with MPI_Ibsend)
// 3. send buffer (with MPI_Ibsend)
template <class Data,
REQUIRES( not is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> isend(Data const& data, int to, int tag = 0) const;
// -------------------------------------------------------------------------------------
// receive mpi datatype
template <class Data,
REQUIRES( is_mpi_type<Data> )>
MPI_Status recv(Data& data, int from, int tag = 0) const;
// receive array of mpi datatypes
template <class T,
REQUIRES( is_mpi_type<T> )>
MPI_Status recv(T* data, std::size_t size, int from, int tag = 0) const;
template <class T, std::size_t N,
REQUIRES( is_mpi_type<T> )>
MPI_Status recv(T (&data)[N], int from, int tag = 0) const
{
return recv(data, N, from, tag);
}
// receive complex datatype
template <class Data,
REQUIRES( not is_mpi_type<Data> )>
MPI_Status recv(Data& data, int from, int tag = 0) const;
// -------------------------------------------------------------------------------------
// receive mpi datatype
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> irecv(Data& data, int from, int tag = 0) const;
// receive array of mpi datatypes
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> irecv(Data* data, std::size_t size, int from, int tag = 0) const;
// receive complex datatype asynchronousely
// NOTE: communication of data is started when Future::test() or Future::wait() is called.
template <class Data,
std::enable_if_t<not is_mpi_type<Data>, int> = 0>
Future<Data> irecv(Data& data, int from, int tag = 0) const;
protected:
void check_buffers() const;
protected:
MPI_Comm comm_;
int rank_;
int size_;
std::vector<char> buffer_;
mutable std::list< std::pair<std::shared_ptr<MPI_Request>, std::string> > buffers_;
};
} // end namespace mpi14
#include "impl/Communicator.hpp"
#pragma once
#include <mpi.h>
namespace mpi14
{
class Environment
{
public:
Environment(int& argc, char**& argv)
{
MPI_Init(&argc, &argv);
}
Environment(Environment const&) = delete;
~Environment()
{
MPI_Finalize();
}
};
} // end namespace mpi14
#pragma once
#include <vector>
#include "Serialization.hpp"
namespace mpi14
{
class FutureBase
{
friend class Communicator;
public:
FutureBase(int from, int tag, MPI_Comm comm)
: from_(from)
, tag_(tag)
, comm_(comm)
, size_(new int(-1))
, buffer_(new std::vector<char>{})
{}
virtual ~FutureBase()
{}
FutureBase(FutureBase const&) = delete;
FutureBase(FutureBase&&) = default;
FutureBase& operator=(FutureBase const&) = delete;
FutureBase& operator=(FutureBase&&) = default;
virtual void finish() {}
void wait()
{
while( !test() ) ;
}
bool test()
{
if (!size_received_) {
int flag;
MPI_Test(&size_request_, &flag, MPI_STATUS_IGNORE);
if (flag != 0) {
size_received_ = true;
assert( *size_ > 0 );
buffer_->resize(*size_);
MPI_Irecv(buffer_->data(), *size_, MPI_BYTE, from_, tag_, comm_, &data_request_);
}
} else if (!data_received_) {
int flag;
MPI_Test(&data_request_, &flag, MPI_STATUS_IGNORE);
if (flag != 0) {
data_received_ = true;
finish();
}
}
return data_received_;
}
protected:
int& size() { return *size_; }
MPI_Request& request() { return size_request_; }
protected:
int from_;
int tag_;
MPI_Comm comm_;
std::unique_ptr<int> size_;
MPI_Request size_request_;
MPI_Request data_request_;
std::unique_ptr<std::vector<char>> buffer_;
bool size_received_ = false;
bool data_received_ = false;
};
template <class Data>
class Future
: public FutureBase
{
public:
Future(Data& data, int from, int tag, MPI_Comm comm)
: FutureBase(from, tag, comm)
, data_(data)
{}
virtual void finish() override
{
serialization::load(*buffer_, data_);
}
Data& get()
{
wait();
return data_;
}
private:
Data& data_;
};
} // end namespace mpi14
#pragma once
#include <vector>
namespace mpi14
{
template <class Data>
class DefaultRequest
{
public:
using DataType = Data;
DefaultRequest(MPI_Request request, Data const* data)
: request_(request)
, data_(data)
{}
Data const* wait()
{
while( !test() ) ;
return data_;
}
std::optional<Data const*> test()
{
if (valid_)
return data_;
int flag;
MPI_Test(&request_, &flag, MPI_STATUS_IGNORE);
if (flag != 0)
valid_ = true;
if (valid_)
return data_;
else
return {};
}
protected:
MPI_Request request_;
Data const* data_;
bool valid_ = false;
};
template <class Data, class F>
class Request
: public DefaultRequest<Data>
{
public:
DefaultRequest(Data const* data, F const& f)
: DefaultRequest(MPI_REQUEST_NULL, data)
, get_request_(f)
{}
std::optional<Data const*> test()
{
if (!initialized_) {
DefaultRequest::request_ = get_request_();
initialized_ = true;
}
return DefaultRequest::test();
}
private:
F get_request_;
bool initialized_ = false;
};
template <class Data, class F>
class DependentRequest
: public DefaultRequest<Data>
{
public:
DependentRequest(Data const* data, F const& f)
: DefaultRequest(MPI_REQUEST_NULL, data)
, get_request_(f)
{}
template <class T>
std::optional<Data const*> test(std::optional<T const*> in)
{
if (!in)
return {}
if (!initialized_) {
DefaultRequest::request_ = get_request_(*in);
initialized_ = true;
}
return DefaultRequest::test();
}
private:
F get_request_;
bool initialized_ = false;
};
#if 0
int size;
Request<int> req0(&size, [&size]()
{
MPI_Request request;
MPI_Irecv(&size, 1, MPI_INT, from_, tag_, comm_, &request);
return request;
});
std::vector<double> vec;
std::vector<char> buffer;
DependentRequest<std::vector<double>> req(&vec, [&vec,&buffer](int size)
{
assert( size > 0 );
buffer.resize(size);
MPI_Request request;
MPI_Irecv(buffer.data(), size, MPI_BYTE, from_, tag_, comm_, &request);
return request;
});
#endif
} // end namespace mpi14
#pragma once
#include "Common.hpp"
namespace mpi14
{
template <class... Reqs>
class RequestChain
{
static constexpr std::size_t N = sizeof...(Reqs);
using Data = typename std::tuple_element_t<N-1, std::tuple<Reqs...>>::DataType;
RequestChain(Reqs... reqs)
: reqs_{reqs...}
{}
std::optional<Data const*> test()
{
auto op = std::get<0>(reqs_).test();
return test(op, index_<1>);
}
template <class Optional, std::size_t i = 0>
std::optional<Data const*> test(Optional op, index_t<i> = {})
{
if (!op)
return {};
auto op_next = std::get<i>(reqs_).test(op);
return test(op_next, index_<i+1>);
}
template <class Optional>
std::optional<Data const*> test(Optional&& result, index_t<N>)
{
return std::forward<Optional>(result);
}
protected:
std::tuple<Reqs...> reqs_;
};
} // end namespace mpi14
#pragma once
#include <string>
#include <sstream>
#include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
#if HAS_COMPRESSION
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/gzip.hpp>
#endif
namespace mpi14
{
namespace serialization
{
template <class Data>
std::string store(Data const& data)
{
std::ostringstream ss;
{
#if HAS_COMPRESSION
namespace ios = boost::iostreams;
ios::filtering_stream<ios::output> f;
f.push(ios::gzip_compressor());
f.push(ss);
boost::archive::binary_oarchive oa(f);
#else
boost::archive::binary_oarchive oa(ss);
#endif
oa << data;
}
return ss.str();
}
template <class Container, class Data>
void load(Container const& str, Data& data)
{
std::stringstream ss;
ss.write(str.data(), str.size());
#if HAS_COMPRESSION
namespace ios = boost::iostreams;
ios::filtering_stream<ios::input> f;
f.push(ios::gzip_decompressor());
f.push(ss);
boost::archive::binary_iarchive ia(f);
#else
boost::archive::binary_iarchive ia(ss);
#endif
ia >> data;
}
} // end namespace serialization
} // end namespace mpi14
#pragma conce
#include "Common.hpp"
namespace mpi14
{
namespace aux
{
template <class T>
struct is_mpi_type : std::false_type {};
template <> struct is_mpi_type<char> : std::true_type {};
template <> struct is_mpi_type<short> : std::true_type {};
template <> struct is_mpi_type<int> : std::true_type {};
template <> struct is_mpi_type<long> : std::true_type {};
template <> struct is_mpi_type<long long> : std::true_type {};
template <> struct is_mpi_type<signed char> : std::true_type {};
template <> struct is_mpi_type<unsigned char> : std::true_type {};
template <> struct is_mpi_type<unsigned short> : std::true_type {};
template <> struct is_mpi_type<unsigned int> : std::true_type {};
template <> struct is_mpi_type<unsigned long> : std::true_type {};
template <> struct is_mpi_type<unsigned long long> : std::true_type {};
template <> struct is_mpi_type<float> : std::true_type {};
template <> struct is_mpi_type<double> : std::true_type {};
template <> struct is_mpi_type<long double> : std::true_type {};
} // end namespace aux
template <class T>
constexpr bool is_mpi_type = aux::is_mpi_type<T>::value;
namespace aux
{
template <class T>
struct type_to_mpi
{
static_assert( mpi14::is_mpi_type<T>, "Type is not an MPI Datatype!" );
};
template <> struct type_to_mpi<char> { static constexpr MPI_Datatype value = MPI_CHAR; };
template <> struct type_to_mpi<short> { static constexpr MPI_Datatype value = MPI_SHORT; };
template <> struct type_to_mpi<int> { static constexpr MPI_Datatype value = MPI_INT; };
template <> struct type_to_mpi<long> { static constexpr MPI_Datatype value = MPI_LONG; };
template <> struct type_to_mpi<long long> { static constexpr MPI_Datatype value = MPI_LONG_LONG; };
template <> struct type_to_mpi<signed char> { static constexpr MPI_Datatype value = MPI_SIGNED_CHAR; };
template <> struct type_to_mpi<unsigned char> { static constexpr MPI_Datatype value = MPI_UNSIGNED_CHAR; };
template <> struct type_to_mpi<unsigned short> { static constexpr MPI_Datatype value = MPI_UNSIGNED_SHORT; };
template <> struct type_to_mpi<unsigned int> { static constexpr MPI_Datatype value = MPI_UNSIGNED; };
template <> struct type_to_mpi<unsigned long> { static constexpr MPI_Datatype value = MPI_UNSIGNED_LONG; };
template <> struct type_to_mpi<unsigned long long> { static constexpr MPI_Datatype value = MPI_UNSIGNED_LONG_LONG; };
template <> struct type_to_mpi<float> { static constexpr MPI_Datatype value = MPI_FLOAT; };
template <> struct type_to_mpi