Commit 540fc2c8 authored by Praetorius, Simon's avatar Praetorius, Simon

Request object added

parent a30328a6
......@@ -9,8 +9,8 @@
#include <mpi.h>
#include "Common.hpp"
#include "FutureSerialization.hpp"
#include "FutureVector.hpp"
#include "Future.hpp"
#include "Request.hpp"
#include "Serialization.hpp"
#include "Type_Traits.hpp"
......@@ -84,26 +84,26 @@ namespace mpi14
// send mpi datatype (non-blocking)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> isend(Data const& data, int to, int tag = 0) const;
Request isend(Data const& data, int to, int tag = 0) const;
// send mpi datatype (non-blocking, buffered)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> ibsend(Data const& data, int to, int tag = 0) const;
Request ibsend(Data const& data, int to, int tag = 0) const;
// send array of mpi datatypes (non-blocking)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> isend(Data const* data, std::size_t size, int to, int tag = 0) const;
Request isend(Data const* data, std::size_t size, int to, int tag = 0) const;
// send array of mpi datatypes (non-blocking, buffered)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> ibsend(Data const* data, std::size_t size, int to, int tag = 0) const;
Request ibsend(Data const* data, std::size_t size, int to, int tag = 0) const;
template <class T,
REQUIRES( is_mpi_type<T> )>
std::shared_ptr<MPI_Request> isend(std::vector<T> const& data, int to, int tag = 0) const;
Request isend(std::vector<T> const& data, int to, int tag = 0) const;
// send complex datatype: (non-blocking)
......
#pragma once
#include <type_traits>
#include <vector>
#include "FutureSerialization.hpp"
#include "FutureVector.hpp"
#include "Request.hpp"
#include "Type_Traits.hpp"
namespace mpi14
{
namespace aux
{
template <class T, class = void>
struct Future
{
using type = FutureSerialization<T>;
};
template <class T>
struct Future<std::vector<T>, std::enable_if_t<mpi14::is_mpi_type<T>> >
{
using type = FutureVector<T>;
};
template <class T>
struct Future<T, std::enable_if_t<mpi14::is_mpi_type<T>> >
{
using type = Request;
};
template <>
struct Future<void>
{
using type = Request;
};
} // end namespace aux
template <class T>
using Future = typename aux::Future<T>::type;
} // end namespace mpi14
......@@ -14,7 +14,7 @@ namespace mpi14
virtual bool test() = 0;
void wait()
virtual void wait()
{
while( !test() ) ;
}
......@@ -43,6 +43,7 @@ namespace mpi14
};
// Blocks until all communication operations associated with active handles in the range complete.
template <class FutureIter>
void wait_all(FutureIter first, FutureIter last)
......
......@@ -2,49 +2,77 @@
#include <vector>
#include <boost/optional.hpp>
namespace mpi14
{
template <class Data>
class DefaultRequest
class Request
{
public:
using DataType = Data;
DefaultRequest(MPI_Request request, Data const* data)
Request(MPI_Request request)
: request_(request)
, data_(data)
{}
Data const* wait()
// Returns an MPI_Status object if the operation identified by the request is complete.
// If the request is an active persistent request, it is marked as inactive. Any other type of
// request is deallocated and the request handle is set to MPI_REQUEST_NULL .
boost::optional<MPI_Status> test()
{
while( !test() ) ;
return data_;
MPI_Status status;
int flag = 0;
MPI_Test(&request_, &flag, &status);
if (flag)
return status;
else
return {};
}
std::optional<Data const*> test()
// Access the information associated with a request, without freeing the request
boost::optional<MPI_Status> status() const
{
if (valid_)
return data_;
int flag;
MPI_Test(&request_, &flag, MPI_STATUS_IGNORE);
if (flag != 0)
valid_ = true;
if (valid_)
return data_;
MPI_Status status;
int flag = 0;
MPI_Request_get_status(request_, &flag, &status);
if (flag)
return status;
else
return {};
}
// Returns when the operation identified by request is complete.
MPI_Status wait()
{
MPI_Status status;
MPI_Wait(&request_, &status);
}
// Returns the underlying MPI_Request handle
MPI_Request get() const { return request_; }
// Deallocate a request object without waiting for the associated communication to complete.
void free()
{
MPI_Request_free(&request_);
}
void cancel()
{
MPI_Cancel(&request_);
}
void cancel_and_free()
{
cancel();
free();
}
protected:
MPI_Request request_;
Data const* data_;
bool valid_ = false;
};
#if 0
template <class Data, class F>
class Request
: public DefaultRequest<Data>
......@@ -101,7 +129,7 @@ namespace mpi14
F get_request_;
bool initialized_ = false;
};
#endif
#if 0
int size;
......
......@@ -46,54 +46,54 @@ void Communicator::send(Data const& data, int to, int tag) const
// send mpi datatype (non-blocking)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> Communicator::isend(Data const& data, int to, int tag) const
Request Communicator::isend(Data const& data, int to, int tag) const
{
auto request = std::make_shared<MPI_Request>();
MPI_Isend(&data, 1, type_to_mpi<Data>, to, tag, comm_, request.get());
return request;
MPI_Request request;
MPI_Isend(&data, 1, type_to_mpi<Data>, to, tag, comm_, &request);
return {request};
}
// send mpi datatype (non-blocking, buffered)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> Communicator::ibsend(Data const& data, int to, int tag) const
Request Communicator::ibsend(Data const& data, int to, int tag) const
{
auto request = std::make_shared<MPI_Request>();
MPI_Ibsend(&data, 1, type_to_mpi<Data>, to, tag, comm_, request.get());
return request;
MPI_Request request;
MPI_Ibsend(&data, 1, type_to_mpi<Data>, to, tag, comm_, &request);
return {request};
}
// send array of mpi datatypes (non-blocking)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> Communicator::isend(Data const* data, std::size_t size, int to, int tag) const
Request Communicator::isend(Data const* data, std::size_t size, int to, int tag) const
{
auto request = std::make_shared<MPI_Request>();
MPI_Isend(data, size, type_to_mpi<Data>, to, tag, comm_, request.get());
return request;
MPI_Request request;
MPI_Isend(data, size, type_to_mpi<Data>, to, tag, comm_, &request);
return {request};
}
// send array of mpi datatypes (non-blocking, buffered)
template <class Data,
REQUIRES( is_mpi_type<Data> )>
std::shared_ptr<MPI_Request> Communicator::ibsend(Data const* data, std::size_t size, int to, int tag) const
Request Communicator::ibsend(Data const* data, std::size_t size, int to, int tag) const
{
auto request = std::make_shared<MPI_Request>();
MPI_Ibsend(data, size, type_to_mpi<Data>, to, tag, comm_, request.get());
return request;
MPI_Request request;
MPI_Ibsend(data, size, type_to_mpi<Data>, to, tag, comm_, &request);
return {request};
}
template <class T,
REQUIRES( is_mpi_type<T> )>
std::shared_ptr<MPI_Request> Communicator::isend(std::vector<T> const& vec, int to, int tag) const
Request Communicator::isend(std::vector<T> const& vec, int to, int tag) const
{
auto request = std::make_shared<MPI_Request>();
MPI_Isend(vec.data(), int(vec.size()), type_to_mpi<T>, to, tag, comm_, request.get());
return request;
MPI_Request request;
MPI_Isend(vec.data(), int(vec.size()), type_to_mpi<T>, to, tag, comm_, &request);
return {request};
}
......@@ -106,8 +106,6 @@ template <class Data,
std::shared_ptr<MPI_Request> Communicator::isend(Data const& data, int to, int tag) const
{
check_buffers();
MPI_Request request_size;
buffers_.emplace_back( std::make_pair(std::make_shared<MPI_Request>(), serialization::store(data)) );
auto request = buffers_.back().first;
......
......@@ -20,8 +20,7 @@ int main(int argc, char** argv)
std::vector<double> out;
int next = (rank+1)%world.size();
auto req = world.isend(in, next, 2);
MPI_Request_free(req.get());
world.isend(in, next, 2);
int prev = (rank+world.size()-1)%world.size();
auto stat = world.recv(out, prev, 2);
......
......@@ -36,10 +36,11 @@ int main(int argc, char** argv)
if (rank == 0) {
out = in;
for (int r = 1; r < world.size(); ++r) {
auto req = world.isend(in, r);
MPI_Request_free(req.get());
}
std::vector<mpi14::Request> requests;
for (int r = 1; r < world.size(); ++r)
requests.emplace_back( world.isend(in, r) );
mpi14::wait_all(requests.begin(), requests.end());
} else {
world.recv(out, 0);
}
......
......@@ -7,6 +7,7 @@
#include <mpi14/Environment.hpp>
#include <mpi14/Communicator.hpp>
#include <mpi14/Collective.hpp>
#include <mpi14/Future.hpp>
int main(int argc, char** argv)
{
......@@ -24,7 +25,7 @@ int main(int argc, char** argv)
if (rank == 0) {
outs[0] = in;
using Future = mpi14::FutureVector<int>;
using Future = mpi14::Future<std::vector<int>>;
std::vector<Future> requests;
for (int r = 1; r < world.size(); ++r)
requests.emplace_back( world.irecv(outs[r], r) );
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment