-/* Copyright (c) 2009-2021. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2009-2023. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include "smpi_replay.hpp"
+#include "simgrid/s4u/Exec.hpp"
#include "smpi_coll.hpp"
#include "smpi_comm.hpp"
+#include "smpi_config.hpp"
#include "smpi_datatype.hpp"
#include "smpi_group.hpp"
#include "smpi_request.hpp"
+#include "src/smpi/include/private.hpp"
#include "xbt/replay.hpp"
-#include <simgrid/smpi/smpi_replay.hpp>
-#include <src/smpi/include/private.hpp>
+#include "xbt/str.h"
+#include <cmath>
+#include <limits>
#include <memory>
#include <numeric>
+#include <tuple>
#include <unordered_map>
#include <vector>
-#include <tuple>
-
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_replay, smpi, "Trace Replay with SMPI");
-
// From https://stackoverflow.com/questions/7110301/generic-hash-for-tuples-in-unordered-map-unordered-set
// This is all just to make std::unordered_map work with std::tuple. If we need this in other places,
// this could go into a header file.
}
// Recursive template code derived from Matthieu M.
-template <class Tuple, size_t Index = std::tuple_size<Tuple>::value - 1> class HashValueImpl {
+template <class Tuple, size_t Index = std::tuple_size_v<Tuple> - 1> class HashValueImpl {
public:
static void apply(size_t& seed, Tuple const& tuple)
{
};
}
-using req_key_t = std::tuple</*sender*/ int, /* receiver */ int, /* tag */ int>;
-using req_storage_t = std::unordered_map<req_key_t, MPI_Request, hash_tuple::hash<std::tuple<int, int, int>>>;
-
void log_timed_action(const simgrid::xbt::ReplayAction& action, double clock)
{
if (XBT_LOG_ISENABLED(smpi_replay, xbt_log_priority_verbose)){
}
}
-/* Helper function */
+/* Helper functions */
static double parse_double(const std::string& string)
{
- return xbt_str_parse_double(string.c_str(), "%s is not a double");
+ return xbt_str_parse_double(string.c_str(), "not a double");
}
-namespace simgrid {
-namespace smpi {
+template <typename T> static T parse_integer(const std::string& string)
+{
+ double val = trunc(xbt_str_parse_double(string.c_str(), "not a double"));
+ xbt_assert(static_cast<double>(std::numeric_limits<T>::min()) <= val &&
+ val <= static_cast<double>(std::numeric_limits<T>::max()),
+ "out of range: %g", val);
+ return static_cast<T>(val);
+}
-namespace replay {
+static int parse_root(const simgrid::xbt::ReplayAction& action, unsigned i)
+{
+ return i < action.size() ? std::stoi(action[i]) : 0;
+}
+
+static MPI_Datatype parse_datatype(const simgrid::xbt::ReplayAction& action, unsigned i)
+{
+ return i < action.size() ? simgrid::smpi::Datatype::decode(action[i]) : simgrid::smpi::replay::MPI_DEFAULT_TYPE;
+}
+
+namespace simgrid::smpi::replay {
MPI_Datatype MPI_DEFAULT_TYPE;
class RequestStorage {
private:
- req_storage_t store;
+ using req_key_t = std::tuple</*sender*/ int, /* receiver */ int, /* tag */ int>;
+ using req_storage_t = std::unordered_map<req_key_t, std::list<MPI_Request>, hash_tuple::hash<std::tuple<int, int, int>>>;
+
+ req_storage_t store;
public:
RequestStorage() = default;
- int size() const { return store.size(); }
+ size_t size() const { return store.size(); }
req_storage_t& get_store() { return store; }
void get_requests(std::vector<MPI_Request>& vec) const
{
- for (auto const& pair : store) {
- auto& req = pair.second;
- auto my_proc_id = simgrid::s4u::this_actor::get_pid();
- if (req != MPI_REQUEST_NULL && (req->src() == my_proc_id || req->dst() == my_proc_id)) {
- vec.push_back(pair.second);
- pair.second->print_request("MM");
+ for (auto const& [_, reqs] : store) {
+ aid_t my_proc_id = simgrid::s4u::this_actor::get_pid();
+ for (const auto& req : reqs) {
+ if (req != MPI_REQUEST_NULL && (req->src() == my_proc_id || req->dst() == my_proc_id)) {
+ vec.push_back(req);
+ req->print_request("MM");
+ }
}
}
}
- MPI_Request find(int src, int dst, int tag)
- {
- auto it = store.find(req_key_t(src, dst, tag));
- return (it == store.end()) ? MPI_REQUEST_NULL : it->second;
- }
-
- void remove(const Request* req)
- {
- if (req == MPI_REQUEST_NULL) return;
-
- store.erase(req_key_t(req->src()-1, req->dst()-1, req->tag()));
- }
+ MPI_Request pop(int src, int dst, int tag)
+ {
+ auto it = store.find(req_key_t(src, dst, tag));
+ if (it == store.end())
+ return MPI_REQUEST_NULL;
+ MPI_Request req = it->second.front();
+ it->second.pop_front();
+ if(it->second.empty())
+ store.erase(req_key_t(src, dst, tag));
+ return req;
+ }
- void add(MPI_Request req)
- {
- if (req != MPI_REQUEST_NULL) // Can and does happen in the case of TestAction
- store.insert({req_key_t(req->src()-1, req->dst()-1, req->tag()), req});
+ void add(MPI_Request req)
+ {
+ if (req != MPI_REQUEST_NULL){ // Can and does happen in the case of TestAction
+ store[req_key_t(req->src()-1, req->dst()-1, req->tag())].push_back(req);
}
+ }
- /* Sometimes we need to re-insert MPI_REQUEST_NULL but we still need src,dst and tag */
- void addNullRequest(int src, int dst, int tag)
- {
- store.insert({req_key_t(
- MPI_COMM_WORLD->group()->actor(src)->get_pid()-1,
- MPI_COMM_WORLD->group()->actor(dst)->get_pid()-1,
- tag), MPI_REQUEST_NULL});
- }
+ /* Sometimes we need to re-insert MPI_REQUEST_NULL but we still need src,dst and tag */
+ void addNullRequest(int src, int dst, int tag)
+ {
+ int src_pid = MPI_COMM_WORLD->group()->actor(src) - 1;
+ int dest_pid = MPI_COMM_WORLD->group()->actor(dst) - 1;
+ store[req_key_t(src_pid, dest_pid, tag)].push_back(MPI_REQUEST_NULL);
+ }
};
void WaitTestParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
tag = std::stoi(action[4]);
}
-void SendRecvParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
+void SendOrRecvParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
{
CHECK_ACTION_PARAMS(action, 3, 1)
partner = std::stoi(action[2]);
tag = std::stoi(action[3]);
- size = parse_double(action[4]);
- if (action.size() > 5)
- datatype1 = simgrid::smpi::Datatype::decode(action[5]);
+ size = parse_integer<ssize_t>(action[4]);
+ datatype1 = parse_datatype(action, 5);
}
void ComputeParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
void LocationParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
{
CHECK_ACTION_PARAMS(action, 2, 0)
- filename = std::string(action[2]);
+ filename = action[2];
line = std::stoi(action[3]);
}
+void SendRecvParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
+{
+ CHECK_ACTION_PARAMS(action, 6, 0)
+ sendcount = parse_integer<int>(action[2]);
+ dst = std::stoi(action[3]);
+ recvcount = parse_integer<int>(action[4]);
+ src = std::stoi(action[5]);
+ datatype1 = parse_datatype(action, 6);
+ datatype2 = parse_datatype(action, 7);
+}
+
void BcastArgParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
{
CHECK_ACTION_PARAMS(action, 1, 2)
- size = parse_double(action[2]);
- root = (action.size() > 3) ? std::stoi(action[3]) : 0;
- if (action.size() > 4)
- datatype1 = simgrid::smpi::Datatype::decode(action[4]);
+ size = parse_integer<size_t>(action[2]);
+ root = parse_root(action, 3);
+ datatype1 = parse_datatype(action, 4);
}
void ReduceArgParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
{
CHECK_ACTION_PARAMS(action, 2, 2)
- comm_size = parse_double(action[2]);
+ comm_size = parse_integer<unsigned>(action[2]);
comp_size = parse_double(action[3]);
- root = (action.size() > 4) ? std::stoi(action[4]) : 0;
- if (action.size() > 5)
- datatype1 = simgrid::smpi::Datatype::decode(action[5]);
+ root = parse_root(action, 4);
+ datatype1 = parse_datatype(action, 5);
}
void AllReduceArgParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
{
CHECK_ACTION_PARAMS(action, 2, 1)
- comm_size = parse_double(action[2]);
+ comm_size = parse_integer<unsigned>(action[2]);
comp_size = parse_double(action[3]);
- if (action.size() > 4)
- datatype1 = simgrid::smpi::Datatype::decode(action[4]);
+ datatype1 = parse_datatype(action, 4);
}
void AllToAllArgParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
{
CHECK_ACTION_PARAMS(action, 2, 1)
comm_size = MPI_COMM_WORLD->size();
- send_size = parse_double(action[2]);
- recv_size = parse_double(action[3]);
-
- if (action.size() > 4)
- datatype1 = simgrid::smpi::Datatype::decode(action[4]);
- if (action.size() > 5)
- datatype2 = simgrid::smpi::Datatype::decode(action[5]);
+ send_size = parse_integer<int>(action[2]);
+ recv_size = parse_integer<int>(action[3]);
+ datatype1 = parse_datatype(action, 4);
+ datatype2 = parse_datatype(action, 5);
}
void GatherArgParser::parse(simgrid::xbt::ReplayAction& action, const std::string& name)
*/
CHECK_ACTION_PARAMS(action, 2, 3)
comm_size = MPI_COMM_WORLD->size();
- send_size = parse_double(action[2]);
- recv_size = parse_double(action[3]);
+ send_size = parse_integer<int>(action[2]);
+ recv_size = parse_integer<int>(action[3]);
if (name == "gather") {
- root = (action.size() > 4) ? std::stoi(action[4]) : 0;
- if (action.size() > 5)
- datatype1 = simgrid::smpi::Datatype::decode(action[5]);
- if (action.size() > 6)
- datatype2 = simgrid::smpi::Datatype::decode(action[6]);
+ root = parse_root(action, 4);
+ datatype1 = parse_datatype(action, 5);
+ datatype2 = parse_datatype(action, 6);
} else {
- if (action.size() > 4)
- datatype1 = simgrid::smpi::Datatype::decode(action[4]);
- if (action.size() > 5)
- datatype2 = simgrid::smpi::Datatype::decode(action[5]);
+ root = 0;
+ datatype1 = parse_datatype(action, 4);
+ datatype2 = parse_datatype(action, 5);
}
}
*/
comm_size = MPI_COMM_WORLD->size();
CHECK_ACTION_PARAMS(action, comm_size + 1, 2)
- send_size = parse_double(action[2]);
+ send_size = parse_integer<int>(action[2]);
disps = std::vector<int>(comm_size, 0);
recvcounts = std::make_shared<std::vector<int>>(comm_size);
if (name == "gatherv") {
- root = (action.size() > 3 + comm_size) ? std::stoi(action[3 + comm_size]) : 0;
- if (action.size() > 4 + comm_size)
- datatype1 = simgrid::smpi::Datatype::decode(action[4 + comm_size]);
- if (action.size() > 5 + comm_size)
- datatype2 = simgrid::smpi::Datatype::decode(action[5 + comm_size]);
+ root = parse_root(action, 3 + comm_size);
+ datatype1 = parse_datatype(action, 4 + comm_size);
+ datatype2 = parse_datatype(action, 5 + comm_size);
} else {
- int disp_index = 0;
+ root = 0;
+ unsigned disp_index = 0;
/* The 3 comes from "0 gather <sendcount>", which must always be present.
* The + comm_size is the recvcounts array, which must also be present
*/
- if (action.size() > 3 + comm_size + comm_size) { /* datatype + disp are specified */
- int datatype_index = 3 + comm_size;
- disp_index = datatype_index + 1;
- datatype1 = simgrid::smpi::Datatype::decode(action[datatype_index]);
- datatype2 = simgrid::smpi::Datatype::decode(action[datatype_index]);
- } else if (action.size() >
- 3 + comm_size + 2) { /* disps specified; datatype is not specified; use the default one */
+ if (action.size() > 3 + comm_size + comm_size) {
+ // datatype + disp are specified
+ datatype1 = parse_datatype(action, 3 + comm_size);
+ datatype2 = parse_datatype(action, 4 + comm_size);
+ disp_index = 5 + comm_size;
+ } else if (action.size() > 3 + comm_size + 2) {
+ // disps specified; datatype is not specified; use the default one
+ datatype1 = MPI_DEFAULT_TYPE;
+ datatype2 = MPI_DEFAULT_TYPE;
disp_index = 3 + comm_size;
- } else if (action.size() > 3 + comm_size) { /* only datatype, no disp specified */
- int datatype_index = 3 + comm_size;
- datatype1 = simgrid::smpi::Datatype::decode(action[datatype_index]);
- datatype2 = simgrid::smpi::Datatype::decode(action[datatype_index]);
+ } else {
+ // no disp specified, maybe only datatype,
+ datatype1 = parse_datatype(action, 3 + comm_size);
+ datatype2 = parse_datatype(action, 4 + comm_size);
}
if (disp_index != 0) {
- for (unsigned int i = 0; i < comm_size; i++)
+ xbt_assert(disp_index + comm_size <= action.size());
+ for (unsigned i = 0; i < comm_size; i++)
disps[i] = std::stoi(action[disp_index + i]);
}
}
4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode()
5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode()
*/
+ comm_size = MPI_COMM_WORLD->size();
CHECK_ACTION_PARAMS(action, 2, 3)
comm_size = MPI_COMM_WORLD->size();
- send_size = parse_double(action[2]);
- recv_size = parse_double(action[3]);
- root = (action.size() > 4) ? std::stoi(action[4]) : 0;
- if (action.size() > 5)
- datatype1 = simgrid::smpi::Datatype::decode(action[5]);
- if (action.size() > 6)
- datatype2 = simgrid::smpi::Datatype::decode(action[6]);
+ send_size = parse_integer<int>(action[2]);
+ recv_size = parse_integer<int>(action[3]);
+ root = parse_root(action, 4);
+ datatype1 = parse_datatype(action, 5);
+ datatype2 = parse_datatype(action, 6);
}
void ScatterVArgParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode()
5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode()
*/
+ comm_size = MPI_COMM_WORLD->size();
CHECK_ACTION_PARAMS(action, comm_size + 1, 2)
- recv_size = parse_double(action[2 + comm_size]);
+ recv_size = parse_integer<int>(action[2 + comm_size]);
disps = std::vector<int>(comm_size, 0);
sendcounts = std::make_shared<std::vector<int>>(comm_size);
- if (action.size() > 5 + comm_size)
- datatype1 = simgrid::smpi::Datatype::decode(action[4 + comm_size]);
- if (action.size() > 5 + comm_size)
- datatype2 = simgrid::smpi::Datatype::decode(action[5]);
+ root = parse_root(action, 3 + comm_size);
+ datatype1 = parse_datatype(action, 4 + comm_size);
+ datatype2 = parse_datatype(action, 5 + comm_size);
for (unsigned int i = 0; i < comm_size; i++) {
(*sendcounts)[i] = std::stoi(action[i + 2]);
}
send_size_sum = std::accumulate(sendcounts->begin(), sendcounts->end(), 0);
- root = (action.size() > 3 + comm_size) ? std::stoi(action[3 + comm_size]) : 0;
}
void ReduceScatterArgParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
CHECK_ACTION_PARAMS(action, comm_size + 1, 1)
comp_size = parse_double(action[2 + comm_size]);
recvcounts = std::make_shared<std::vector<int>>(comm_size);
- if (action.size() > 3 + comm_size)
- datatype1 = simgrid::smpi::Datatype::decode(action[3 + comm_size]);
+ datatype1 = parse_datatype(action, 3 + comm_size);
for (unsigned int i = 0; i < comm_size; i++) {
- recvcounts->push_back(std::stoi(action[i + 2]));
+ (*recvcounts)[i]= std::stoi(action[i + 2]);
}
recv_size_sum = std::accumulate(recvcounts->begin(), recvcounts->end(), 0);
}
+void ScanArgParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
+{
+ CHECK_ACTION_PARAMS(action, 2, 1)
+ size = parse_integer<size_t>(action[2]);
+ comp_size = parse_double(action[3]);
+ datatype1 = parse_datatype(action, 4);
+}
+
void AllToAllVArgParser::parse(simgrid::xbt::ReplayAction& action, const std::string&)
{
/* The structure of the alltoallv action for the rank 0 (total 4 processes) is the following:
senddisps = std::vector<int>(comm_size, 0);
recvdisps = std::vector<int>(comm_size, 0);
- if (action.size() > 5 + 2 * comm_size)
- datatype1 = simgrid::smpi::Datatype::decode(action[4 + 2 * comm_size]);
- if (action.size() > 5 + 2 * comm_size)
- datatype2 = simgrid::smpi::Datatype::decode(action[5 + 2 * comm_size]);
+ datatype1 = parse_datatype(action, 4 + 2 * comm_size);
+ datatype2 = parse_datatype(action, 5 + 2 * comm_size);
- send_buf_size = parse_double(action[2]);
- recv_buf_size = parse_double(action[3 + comm_size]);
+ send_buf_size = parse_integer<int>(action[2]);
+ recv_buf_size = parse_integer<int>(action[3 + comm_size]);
for (unsigned int i = 0; i < comm_size; i++) {
(*sendcounts)[i] = std::stoi(action[3 + i]);
(*recvcounts)[i] = std::stoi(action[4 + comm_size + i]);
std::string s = boost::algorithm::join(action, " ");
xbt_assert(req_storage.size(), "action wait not preceded by any irecv or isend: %s", s.c_str());
const WaitTestParser& args = get_args();
- MPI_Request request = req_storage.find(args.src, args.dst, args.tag);
- req_storage.remove(request);
+ MPI_Request request = req_storage.pop(args.src, args.dst, args.tag);
if (request == MPI_REQUEST_NULL) {
/* Assume that the trace is well formed, meaning the comm might have been caught by an MPI_test. Then just
return;
}
- int rank = request->comm() != MPI_COMM_NULL ? request->comm()->rank() : -1;
-
// Must be taken before Request::wait() since the request may be set to
// MPI_REQUEST_NULL by Request::wait!
bool is_wait_for_receive = (request->flags() & MPI_REQ_RECV);
- // TODO: Here we take the rank while we normally take the process id (look for get_pid())
- TRACE_smpi_comm_in(rank, __func__, new simgrid::instr::WaitTIData(args.src, args.dst, args.tag));
+
+ TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::WaitTIData("wait", args.src, args.dst, args.tag));
MPI_Status status;
Request::wait(&request, &status);
-
- TRACE_smpi_comm_out(rank);
+ if(request!=MPI_REQUEST_NULL)
+ Request::unref(&request);
+ TRACE_smpi_comm_out(get_pid());
if (is_wait_for_receive)
- TRACE_smpi_recv(args.src, args.dst, args.tag);
+ TRACE_smpi_recv(MPI_COMM_WORLD->group()->actor(args.src), MPI_COMM_WORLD->group()->actor(args.dst), args.tag);
}
void SendAction::kernel(simgrid::xbt::ReplayAction&)
{
- const SendRecvParser& args = get_args();
- int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid();
+ const SendOrRecvParser& args = get_args();
+ aid_t dst_traced = MPI_COMM_WORLD->group()->actor(args.partner);
TRACE_smpi_comm_in(
get_pid(), __func__,
void RecvAction::kernel(simgrid::xbt::ReplayAction&)
{
- const SendRecvParser& args = get_args();
+ const SendOrRecvParser& args = get_args();
TRACE_smpi_comm_in(
get_pid(), __func__,
new simgrid::instr::Pt2PtTIData(get_name(), args.partner, args.size, args.tag, Datatype::encode(args.datatype1)));
MPI_Status status;
// unknown size from the receiver point of view
- double arg_size = args.size;
- if (arg_size <= 0.0) {
+ ssize_t arg_size = args.size;
+ if (arg_size < 0) {
Request::probe(args.partner, args.tag, MPI_COMM_WORLD, &status);
arg_size = status.count;
}
TRACE_smpi_comm_out(get_pid());
if (is_recv && not TRACE_smpi_view_internals()) {
- int src_traced = MPI_COMM_WORLD->group()->actor(status.MPI_SOURCE)->get_pid();
+ aid_t src_traced = MPI_COMM_WORLD->group()->actor(status.MPI_SOURCE);
TRACE_smpi_recv(src_traced, get_pid(), args.tag);
}
}
+void SendRecvAction::kernel(simgrid::xbt::ReplayAction&)
+{
+ XBT_DEBUG("Enters SendRecv");
+ const SendRecvParser& args = get_args();
+ aid_t my_proc_id = simgrid::s4u::this_actor::get_pid();
+ aid_t src_traced = MPI_COMM_WORLD->group()->actor(args.src);
+ aid_t dst_traced = MPI_COMM_WORLD->group()->actor(args.dst);
+
+ MPI_Status status;
+ int sendtag=0;
+ int recvtag=0;
+
+ // FIXME: Hack the way to trace this one
+ auto dst_hack = std::make_shared<std::vector<int>>();
+ auto src_hack = std::make_shared<std::vector<int>>();
+ dst_hack->push_back(dst_traced);
+ src_hack->push_back(src_traced);
+ TRACE_smpi_comm_in(my_proc_id, __func__,
+ new simgrid::instr::VarCollTIData(
+ "sendRecv", -1, args.sendcount,
+ dst_hack, args.recvcount, src_hack,
+ simgrid::smpi::Datatype::encode(args.datatype1), simgrid::smpi::Datatype::encode(args.datatype2)));
+
+ TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, sendtag, args.sendcount * args.datatype1->size());
+
+ simgrid::smpi::Request::sendrecv(nullptr, args.sendcount, args.datatype1, args.dst, sendtag, nullptr, args.recvcount, args.datatype2, args.src,
+ recvtag, MPI_COMM_WORLD, &status);
+
+ TRACE_smpi_recv(src_traced, my_proc_id, recvtag);
+ TRACE_smpi_comm_out(my_proc_id);
+ XBT_DEBUG("Exits SendRecv");
+}
+
void ComputeAction::kernel(simgrid::xbt::ReplayAction&)
{
const ComputeParser& args = get_args();
{
const SleepParser& args = get_args();
XBT_DEBUG("Sleep for: %lf secs", args.time);
- int rank = simgrid::s4u::this_actor::get_pid();
- TRACE_smpi_sleeping_in(rank, args.time);
+ aid_t pid = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_sleeping_in(pid, args.time);
simgrid::s4u::this_actor::sleep_for(args.time/smpi_adjust_comp_speed());
- TRACE_smpi_sleeping_out(rank);
+ TRACE_smpi_sleeping_out(pid);
}
void LocationAction::kernel(simgrid::xbt::ReplayAction&)
{
const LocationParser& args = get_args();
- smpi_trace_set_call_location(args.filename.c_str(), args.line);
+ smpi_trace_set_call_location(args.filename.c_str(), args.line, "replay_action");
}
void TestAction::kernel(simgrid::xbt::ReplayAction&)
{
const WaitTestParser& args = get_args();
- MPI_Request request = req_storage.find(args.src, args.dst, args.tag);
- req_storage.remove(request);
+ MPI_Request request = req_storage.pop(args.src, args.dst, args.tag);
// if request is null here, this may mean that a previous test has succeeded
// Different times in traced application and replayed version may lead to this
// In this case, ignore the extra calls.
if (request != MPI_REQUEST_NULL) {
- TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::NoOpTIData("test"));
+ TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::WaitTIData("test", args.src, args.dst, args.tag));
MPI_Status status;
int flag = 0;
void WaitAllAction::kernel(simgrid::xbt::ReplayAction&)
{
- const unsigned int count_requests = req_storage.size();
-
- if (count_requests > 0) {
- TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::Pt2PtTIData("waitall", -1, count_requests, ""));
- std::vector<std::pair</*sender*/int,/*recv*/int>> sender_receiver;
+ if (req_storage.size() > 0) {
+ std::vector<std::pair</*sender*/ aid_t, /*recv*/ aid_t>> sender_receiver;
std::vector<MPI_Request> reqs;
req_storage.get_requests(reqs);
+ unsigned long count_requests = reqs.size();
+ TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::CpuTIData("waitall", count_requests));
for (auto const& req : reqs) {
if (req && (req->flags() & MPI_REQ_RECV)) {
sender_receiver.emplace_back(req->src(), req->dst());
Request::waitall(count_requests, &(reqs.data())[0], MPI_STATUSES_IGNORE);
req_storage.get_store().clear();
- for (auto const& pair : sender_receiver) {
- TRACE_smpi_recv(pair.first, pair.second, 0);
+ for (MPI_Request& req : reqs)
+ if (req != MPI_REQUEST_NULL)
+ Request::unref(&req);
+
+ for (auto const& [src, dst] : sender_receiver) {
+ TRACE_smpi_recv(src, dst, 0);
}
TRACE_smpi_comm_out(get_pid());
}
{
const BcastArgParser& args = get_args();
TRACE_smpi_comm_in(get_pid(), "action_bcast",
- new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(), -1.0,
- args.size, -1, Datatype::encode(args.datatype1), ""));
+ new simgrid::instr::CollTIData("bcast", args.root, -1.0, args.size,
+ 0, Datatype::encode(args.datatype1), ""));
colls::bcast(send_buffer(args.size * args.datatype1->size()), args.size, args.datatype1, args.root, MPI_COMM_WORLD);
{
const ReduceArgParser& args = get_args();
TRACE_smpi_comm_in(get_pid(), "action_reduce",
- new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(),
- args.comp_size, args.comm_size, -1,
- Datatype::encode(args.datatype1), ""));
+ new simgrid::instr::CollTIData("reduce", args.root, args.comp_size,
+ args.comm_size, 0, Datatype::encode(args.datatype1), ""));
colls::reduce(send_buffer(args.comm_size * args.datatype1->size()),
recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL,
args.root, MPI_COMM_WORLD);
- private_execute_flops(args.comp_size);
+ if (args.comp_size != 0.0)
+ simgrid::s4u::this_actor::exec_init(args.comp_size)
+ ->set_name("computation")
+ ->start()
+ ->wait();
TRACE_smpi_comm_out(get_pid());
}
{
const AllReduceArgParser& args = get_args();
TRACE_smpi_comm_in(get_pid(), "action_allreduce",
- new simgrid::instr::CollTIData("allreduce", -1, args.comp_size, args.comm_size, -1,
+ new simgrid::instr::CollTIData("allreduce", -1, args.comp_size, args.comm_size, 0,
Datatype::encode(args.datatype1), ""));
colls::allreduce(send_buffer(args.comm_size * args.datatype1->size()),
recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL,
MPI_COMM_WORLD);
- private_execute_flops(args.comp_size);
+ if (args.comp_size != 0.0)
+ simgrid::s4u::this_actor::exec_init(args.comp_size)
+ ->set_name("computation")
+ ->start()
+ ->wait();
TRACE_smpi_comm_out(get_pid());
}
Datatype::encode(args.datatype1),
Datatype::encode(args.datatype2)));
- colls::alltoall(send_buffer(args.send_size * args.comm_size * args.datatype1->size()), args.send_size, args.datatype1,
- recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()), args.recv_size, args.datatype2,
+ colls::alltoall(send_buffer(args.datatype1->size() * args.send_size * args.comm_size), args.send_size, args.datatype1,
+ recv_buffer(args.datatype2->size() * args.recv_size * args.comm_size), args.recv_size, args.datatype2,
MPI_COMM_WORLD);
TRACE_smpi_comm_out(get_pid());
if (get_name() == "gather") {
int rank = MPI_COMM_WORLD->rank();
colls::gather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
- (rank == args.root) ? recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()) : nullptr,
+ (rank == args.root) ? recv_buffer(args.datatype2->size() * args.recv_size * args.comm_size) : nullptr,
args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD);
} else
colls::allgather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
const ReduceScatterArgParser& args = get_args();
TRACE_smpi_comm_in(
get_pid(), "action_reducescatter",
- new simgrid::instr::VarCollTIData("reducescatter", -1, 0, nullptr, -1, args.recvcounts,
- std::to_string(args.comp_size), /* ugly hack to print comp_size */
+ new simgrid::instr::VarCollTIData(get_name(), -1, -1, nullptr, -1, args.recvcounts,
+ /* ugly as we use datatype field to pass computation as string */
+ /* and because of the trick to avoid getting 0.000000 when 0 is given */
+ args.comp_size == 0 ? "0" : std::to_string(args.comp_size),
Datatype::encode(args.datatype1)));
colls::reduce_scatter(send_buffer(args.recv_size_sum * args.datatype1->size()),
recv_buffer(args.recv_size_sum * args.datatype1->size()), args.recvcounts->data(),
args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
+ if (args.comp_size != 0.0)
+ simgrid::s4u::this_actor::exec_init(args.comp_size)
+ ->set_name("computation")
+ ->start()
+ ->wait();
+ TRACE_smpi_comm_out(get_pid());
+}
- private_execute_flops(args.comp_size);
+void ScanAction::kernel(simgrid::xbt::ReplayAction&)
+{
+ const ScanArgParser& args = get_args();
+ TRACE_smpi_comm_in(get_pid(), "action_scan",
+ new simgrid::instr::CollTIData(get_name(), -1, args.comp_size,
+ args.size, 0, Datatype::encode(args.datatype1), ""));
+ if (get_name() == "scan")
+ colls::scan(send_buffer(args.size * args.datatype1->size()),
+ recv_buffer(args.size * args.datatype1->size()), args.size,
+ args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
+ else
+ colls::exscan(send_buffer(args.size * args.datatype1->size()),
+ recv_buffer(args.size * args.datatype1->size()), args.size,
+ args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
+
+ if (args.comp_size != 0.0)
+ simgrid::s4u::this_actor::exec_init(args.comp_size)
+ ->set_name("computation")
+ ->start()
+ ->wait();
TRACE_smpi_comm_out(get_pid());
}
TRACE_smpi_comm_out(get_pid());
}
-} // Replay Namespace
-}} // namespace simgrid::smpi
+} // namespace simgrid::smpi::replay
static std::unordered_map<aid_t, simgrid::smpi::replay::RequestStorage> storage;
/** @brief Only initialize the replay, don't do it for real */
smpi_process()->mark_as_initialized();
smpi_process()->set_replaying(true);
- int my_proc_id = simgrid::s4u::this_actor::get_pid();
-
- TRACE_smpi_init(my_proc_id, "smpi_replay_run_init");
+ TRACE_smpi_init(simgrid::s4u::this_actor::get_pid(), "smpi_replay_run_init");
xbt_replay_action_register("init", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::InitAction().execute(action); });
xbt_replay_action_register("finalize", [](simgrid::xbt::ReplayAction const&) { /* nothing to do */ });
xbt_replay_action_register("comm_size", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); });
xbt_replay_action_register("recv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::RecvAction("recv", storage[simgrid::s4u::this_actor::get_pid()]).execute(action); });
xbt_replay_action_register("irecv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::RecvAction("irecv", storage[simgrid::s4u::this_actor::get_pid()]).execute(action); });
xbt_replay_action_register("test", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::TestAction(storage[simgrid::s4u::this_actor::get_pid()]).execute(action); });
+ xbt_replay_action_register("sendRecv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::SendRecvAction().execute(action); });
xbt_replay_action_register("wait", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::WaitAction(storage[simgrid::s4u::this_actor::get_pid()]).execute(action); });
xbt_replay_action_register("waitall", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::WaitAllAction(storage[simgrid::s4u::this_actor::get_pid()]).execute(action); });
xbt_replay_action_register("barrier", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::BarrierAction().execute(action); });
xbt_replay_action_register("allgather", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::GatherAction("allgather").execute(action); });
xbt_replay_action_register("allgatherv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::GatherVAction("allgatherv").execute(action); });
xbt_replay_action_register("reducescatter", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ReduceScatterAction().execute(action); });
+ xbt_replay_action_register("scan", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ScanAction("scan").execute(action); });
+ xbt_replay_action_register("exscan", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ScanAction("exscan").execute(action); });
xbt_replay_action_register("compute", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ComputeAction().execute(action); });
xbt_replay_action_register("sleep", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::SleepAction().execute(action); });
xbt_replay_action_register("location", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::LocationAction().execute(action); });
// Wait for the other actors to initialize also
simgrid::s4u::this_actor::yield();
}
+ if(_smpi_init_sleep > 0)
+ simgrid::s4u::this_actor::sleep_for(_smpi_init_sleep);
}
/** @brief actually run the replay after initialization */
-void smpi_replay_main(int rank, const char* trace_filename)
+void smpi_replay_main(int rank, const char* private_trace_filename)
{
static int active_processes = 0;
active_processes++;
storage[simgrid::s4u::this_actor::get_pid()] = simgrid::smpi::replay::RequestStorage();
std::string rank_string = std::to_string(rank);
- simgrid::xbt::replay_runner(rank_string.c_str(), trace_filename);
+ simgrid::xbt::replay_runner(rank_string.c_str(), private_trace_filename);
/* and now, finalize everything */
/* One active process will stop. Decrease the counter*/
unsigned int count_requests = storage[simgrid::s4u::this_actor::get_pid()].size();
- XBT_DEBUG("There are %ud elements in reqq[*]", count_requests);
+ XBT_DEBUG("There are %u elements in reqq[*]", count_requests);
if (count_requests > 0) {
std::vector<MPI_Request> requests(count_requests);
unsigned int i=0;
- for (auto const& pair : storage[simgrid::s4u::this_actor::get_pid()].get_store()) {
- requests[i] = pair.second;
+ for (auto const& [_, reqs] : storage[simgrid::s4u::this_actor::get_pid()].get_store()) {
+ for (const auto& req : reqs) {
+ requests[i] = req; // FIXME: overwritten at each iteration?
+ }
i++;
}
simgrid::smpi::Request::waitall(count_requests, requests.data(), MPI_STATUSES_IGNORE);
}
+
+ if (simgrid::config::get_value<bool>("smpi/barrier-finalization"))
+ simgrid::smpi::colls::barrier(MPI_COMM_WORLD);
+
active_processes--;
if(active_processes==0){
}
/** @brief chain a replay initialization and a replay start */
-void smpi_replay_run(const char* instance_id, int rank, double start_delay_flops, const char* trace_filename)
+void smpi_replay_run(const char* instance_id, int rank, double start_delay_flops, const char* private_trace_filename)
{
smpi_replay_init(instance_id, rank, start_delay_flops);
- smpi_replay_main(rank, trace_filename);
+ smpi_replay_main(rank, private_trace_filename);
}