-/* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2009-2020. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
};
}
-typedef std::tuple</*sender*/ int, /* reciever */ int, /* tag */int> req_key_t;
-typedef std::unordered_map<req_key_t, MPI_Request, hash_tuple::hash<std::tuple<int,int,int>>> req_storage_t;
+using req_key_t = std::tuple</*sender*/ int, /* receiver */ int, /* tag */ int>;
+using req_storage_t = std::unordered_map<req_key_t, MPI_Request, hash_tuple::hash<std::tuple<int, int, int>>>;
-void log_timed_action(simgrid::xbt::ReplayAction& action, double clock)
+void log_timed_action(const simgrid::xbt::ReplayAction& action, double clock)
{
if (XBT_LOG_ISENABLED(smpi_replay, xbt_log_priority_verbose)){
std::string s = boost::algorithm::join(action, " ");
req_storage_t store;
public:
- RequestStorage() {}
- int size()
- {
- return store.size();
- }
+ RequestStorage() = default;
+ int size() const { return store.size(); }
- req_storage_t& get_store()
- {
- return store;
- }
+ req_storage_t& get_store() { return store; }
- void get_requests(std::vector<MPI_Request>& vec)
- {
- for (auto& pair : store) {
- auto& req = pair.second;
- auto my_proc_id = simgrid::s4u::this_actor::get_pid();
- if (req != MPI_REQUEST_NULL && (req->src() == my_proc_id || req->dst() == my_proc_id)) {
- vec.push_back(pair.second);
- pair.second->print_request("MM");
- }
+ void get_requests(std::vector<MPI_Request>& vec) const
+ {
+ for (auto const& pair : store) {
+ auto& req = pair.second;
+ auto my_proc_id = simgrid::s4u::this_actor::get_pid();
+ if (req != MPI_REQUEST_NULL && (req->src() == my_proc_id || req->dst() == my_proc_id)) {
+ vec.push_back(pair.second);
+ pair.second->print_request("MM");
}
}
+ }
MPI_Request find(int src, int dst, int tag)
{
- req_storage_t::iterator it = store.find(req_key_t(src, dst, tag));
+ auto it = store.find(req_key_t(src, dst, tag));
return (it == store.end()) ? MPI_REQUEST_NULL : it->second;
}
- void remove(MPI_Request req)
+ void remove(const Request* req)
{
if (req == MPI_REQUEST_NULL) return;
CHECK_ACTION_PARAMS(action, comm_size + 1, 2)
send_size = parse_double(action[2]);
disps = std::vector<int>(comm_size, 0);
- recvcounts = std::shared_ptr<std::vector<int>>(new std::vector<int>(comm_size));
+ recvcounts = std::make_shared<std::vector<int>>(comm_size);
if (name == "gatherv") {
root = (action.size() > 3 + comm_size) ? std::stoi(action[3 + comm_size]) : 0;
if (action.size() > 5 + comm_size)
datatype2 = simgrid::smpi::Datatype::decode(action[5 + comm_size]);
} else {
- int datatype_index = 0;
int disp_index = 0;
/* The 3 comes from "0 gather <sendcount>", which must always be present.
* The + comm_size is the recvcounts array, which must also be present
*/
if (action.size() > 3 + comm_size + comm_size) { /* datatype + disp are specified */
- datatype_index = 3 + comm_size;
+ int datatype_index = 3 + comm_size;
disp_index = datatype_index + 1;
datatype1 = simgrid::smpi::Datatype::decode(action[datatype_index]);
datatype2 = simgrid::smpi::Datatype::decode(action[datatype_index]);
3 + comm_size + 2) { /* disps specified; datatype is not specified; use the default one */
disp_index = 3 + comm_size;
} else if (action.size() > 3 + comm_size) { /* only datatype, no disp specified */
- datatype_index = 3 + comm_size;
+ int datatype_index = 3 + comm_size;
datatype1 = simgrid::smpi::Datatype::decode(action[datatype_index]);
datatype2 = simgrid::smpi::Datatype::decode(action[datatype_index]);
}
CHECK_ACTION_PARAMS(action, comm_size + 1, 2)
recv_size = parse_double(action[2 + comm_size]);
disps = std::vector<int>(comm_size, 0);
- sendcounts = std::shared_ptr<std::vector<int>>(new std::vector<int>(comm_size));
+ sendcounts = std::make_shared<std::vector<int>>(comm_size);
if (action.size() > 5 + comm_size)
datatype1 = simgrid::smpi::Datatype::decode(action[4 + comm_size]);
comm_size = MPI_COMM_WORLD->size();
CHECK_ACTION_PARAMS(action, comm_size + 1, 1)
comp_size = parse_double(action[2 + comm_size]);
- recvcounts = std::shared_ptr<std::vector<int>>(new std::vector<int>(comm_size));
+ recvcounts = std::make_shared<std::vector<int>>(comm_size);
if (action.size() > 3 + comm_size)
datatype1 = simgrid::smpi::Datatype::decode(action[3 + comm_size]);
*/
comm_size = MPI_COMM_WORLD->size();
CHECK_ACTION_PARAMS(action, 2 * comm_size + 2, 2)
- sendcounts = std::shared_ptr<std::vector<int>>(new std::vector<int>(comm_size));
- recvcounts = std::shared_ptr<std::vector<int>>(new std::vector<int>(comm_size));
+ sendcounts = std::make_shared<std::vector<int>>(comm_size);
+ recvcounts = std::make_shared<std::vector<int>>(comm_size);
senddisps = std::vector<int>(comm_size, 0);
recvdisps = std::vector<int>(comm_size, 0);
{
std::string s = boost::algorithm::join(action, " ");
xbt_assert(req_storage.size(), "action wait not preceded by any irecv or isend: %s", s.c_str());
+ const WaitTestParser& args = get_args();
MPI_Request request = req_storage.find(args.src, args.dst, args.tag);
req_storage.remove(request);
if (request == MPI_REQUEST_NULL) {
- /* Assume that the trace is well formed, meaning the comm might have been caught by a MPI_test. Then just
+ /* Assume that the trace is well formed, meaning the comm might have been caught by an MPI_test. Then just
* return.*/
return;
}
// Must be taken before Request::wait() since the request may be set to
// MPI_REQUEST_NULL by Request::wait!
bool is_wait_for_receive = (request->flags() & MPI_REQ_RECV);
- // TODO: Here we take the rank while we normally take the process id (look for my_proc_id)
+ // TODO: Here we take the rank while we normally take the process id (look for get_pid())
TRACE_smpi_comm_in(rank, __func__, new simgrid::instr::WaitTIData(args.src, args.dst, args.tag));
MPI_Status status;
void SendAction::kernel(simgrid::xbt::ReplayAction&)
{
+ const SendRecvParser& args = get_args();
int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid();
- TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size,
- args.tag, Datatype::encode(args.datatype1)));
+ TRACE_smpi_comm_in(
+ get_pid(), __func__,
+ new simgrid::instr::Pt2PtTIData(get_name(), args.partner, args.size, args.tag, Datatype::encode(args.datatype1)));
if (not TRACE_smpi_view_internals())
- TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, args.tag, args.size * args.datatype1->size());
+ TRACE_smpi_send(get_pid(), get_pid(), dst_traced, args.tag, args.size * args.datatype1->size());
- if (name == "send") {
+ if (get_name() == "send") {
Request::send(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
- } else if (name == "isend") {
+ } else if (get_name() == "isend") {
MPI_Request request = Request::isend(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
req_storage.add(request);
} else {
- xbt_die("Don't know this action, %s", name.c_str());
+ xbt_die("Don't know this action, %s", get_name().c_str());
}
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
void RecvAction::kernel(simgrid::xbt::ReplayAction&)
{
- TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size,
- args.tag, Datatype::encode(args.datatype1)));
+ const SendRecvParser& args = get_args();
+ TRACE_smpi_comm_in(
+ get_pid(), __func__,
+ new simgrid::instr::Pt2PtTIData(get_name(), args.partner, args.size, args.tag, Datatype::encode(args.datatype1)));
MPI_Status status;
// unknown size from the receiver point of view
- if (args.size <= 0.0) {
+ double arg_size = args.size;
+ if (arg_size <= 0.0) {
Request::probe(args.partner, args.tag, MPI_COMM_WORLD, &status);
- args.size = status.count;
+ arg_size = status.count;
}
- bool is_recv = false; // Help analyzers understanding that status is not used unintialized
- if (name == "recv") {
+ bool is_recv = false; // Help analyzers understanding that status is not used uninitialized
+ if (get_name() == "recv") {
is_recv = true;
- Request::recv(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD, &status);
- } else if (name == "irecv") {
- MPI_Request request = Request::irecv(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
+ Request::recv(nullptr, arg_size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD, &status);
+ } else if (get_name() == "irecv") {
+ MPI_Request request = Request::irecv(nullptr, arg_size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
req_storage.add(request);
} else {
THROW_IMPOSSIBLE;
}
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
if (is_recv && not TRACE_smpi_view_internals()) {
int src_traced = MPI_COMM_WORLD->group()->actor(status.MPI_SOURCE)->get_pid();
- TRACE_smpi_recv(src_traced, my_proc_id, args.tag);
+ TRACE_smpi_recv(src_traced, get_pid(), args.tag);
}
}
void ComputeAction::kernel(simgrid::xbt::ReplayAction&)
{
- smpi_execute_flops(args.flops/smpi_adjust_comp_speed());
+ const ComputeParser& args = get_args();
+ if (smpi_cfg_simulate_computation()) {
+ smpi_execute_flops(args.flops/smpi_adjust_comp_speed());
+ }
+}
void SleepAction::kernel(simgrid::xbt::ReplayAction&)
{
+ const SleepParser& args = get_args();
XBT_DEBUG("Sleep for: %lf secs", args.time);
int rank = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_sleeping_in(rank, args.time);
void LocationAction::kernel(simgrid::xbt::ReplayAction&)
{
+ const LocationParser& args = get_args();
smpi_trace_set_call_location(args.filename.c_str(), args.line);
}
void TestAction::kernel(simgrid::xbt::ReplayAction&)
{
+ const WaitTestParser& args = get_args();
MPI_Request request = req_storage.find(args.src, args.dst, args.tag);
req_storage.remove(request);
// if request is null here, this may mean that a previous test has succeeded
// Different times in traced application and replayed version may lead to this
// In this case, ignore the extra calls.
if (request != MPI_REQUEST_NULL) {
- TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("test"));
+ TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::NoOpTIData("test"));
MPI_Status status;
int flag = 0;
else
req_storage.add(request);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
}
const unsigned int count_requests = req_storage.size();
if (count_requests > 0) {
- TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData("waitall", -1, count_requests, ""));
+ TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::Pt2PtTIData("waitall", -1, count_requests, ""));
std::vector<std::pair</*sender*/int,/*recv*/int>> sender_receiver;
std::vector<MPI_Request> reqs;
req_storage.get_requests(reqs);
- for (const auto& req : reqs) {
+ for (auto const& req : reqs) {
if (req && (req->flags() & MPI_REQ_RECV)) {
- sender_receiver.push_back({req->src(), req->dst()});
+ sender_receiver.emplace_back(req->src(), req->dst());
}
}
- MPI_Status status[count_requests];
- Request::waitall(count_requests, &(reqs.data())[0], status);
+ Request::waitall(count_requests, &(reqs.data())[0], MPI_STATUSES_IGNORE);
req_storage.get_store().clear();
- for (auto& pair : sender_receiver) {
+ for (auto const& pair : sender_receiver) {
TRACE_smpi_recv(pair.first, pair.second, 0);
}
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
}
void BarrierAction::kernel(simgrid::xbt::ReplayAction&)
{
- TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("barrier"));
- Colls::barrier(MPI_COMM_WORLD);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::NoOpTIData("barrier"));
+ colls::barrier(MPI_COMM_WORLD);
+ TRACE_smpi_comm_out(get_pid());
}
void BcastAction::kernel(simgrid::xbt::ReplayAction&)
{
- TRACE_smpi_comm_in(my_proc_id, "action_bcast",
- new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(),
- -1.0, args.size, -1, Datatype::encode(args.datatype1), ""));
+ const BcastArgParser& args = get_args();
+ TRACE_smpi_comm_in(get_pid(), "action_bcast",
+ new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(), -1.0,
+ args.size, -1, Datatype::encode(args.datatype1), ""));
- Colls::bcast(send_buffer(args.size * args.datatype1->size()), args.size, args.datatype1, args.root, MPI_COMM_WORLD);
+ colls::bcast(send_buffer(args.size * args.datatype1->size()), args.size, args.datatype1, args.root, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
void ReduceAction::kernel(simgrid::xbt::ReplayAction&)
{
- TRACE_smpi_comm_in(my_proc_id, "action_reduce",
- new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(),
- args.comp_size, args.comm_size, -1,
- Datatype::encode(args.datatype1), ""));
+ const ReduceArgParser& args = get_args();
+ TRACE_smpi_comm_in(get_pid(), "action_reduce",
+ new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(),
+ args.comp_size, args.comm_size, -1,
+ Datatype::encode(args.datatype1), ""));
- Colls::reduce(send_buffer(args.comm_size * args.datatype1->size()),
- recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, args.root, MPI_COMM_WORLD);
+ colls::reduce(send_buffer(args.comm_size * args.datatype1->size()),
+ recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL,
+ args.root, MPI_COMM_WORLD);
private_execute_flops(args.comp_size);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
void AllReduceAction::kernel(simgrid::xbt::ReplayAction&)
{
- TRACE_smpi_comm_in(my_proc_id, "action_allreduce", new simgrid::instr::CollTIData("allreduce", -1, args.comp_size, args.comm_size, -1,
- Datatype::encode(args.datatype1), ""));
+ const AllReduceArgParser& args = get_args();
+ TRACE_smpi_comm_in(get_pid(), "action_allreduce",
+ new simgrid::instr::CollTIData("allreduce", -1, args.comp_size, args.comm_size, -1,
+ Datatype::encode(args.datatype1), ""));
- Colls::allreduce(send_buffer(args.comm_size * args.datatype1->size()),
- recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
+ colls::allreduce(send_buffer(args.comm_size * args.datatype1->size()),
+ recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL,
+ MPI_COMM_WORLD);
private_execute_flops(args.comp_size);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
void AllToAllAction::kernel(simgrid::xbt::ReplayAction&)
{
- TRACE_smpi_comm_in(my_proc_id, "action_alltoall",
- new simgrid::instr::CollTIData("alltoall", -1, -1.0, args.send_size, args.recv_size,
- Datatype::encode(args.datatype1),
- Datatype::encode(args.datatype2)));
+ const AllToAllArgParser& args = get_args();
+ TRACE_smpi_comm_in(get_pid(), "action_alltoall",
+ new simgrid::instr::CollTIData("alltoall", -1, -1.0, args.send_size, args.recv_size,
+ Datatype::encode(args.datatype1),
+ Datatype::encode(args.datatype2)));
- Colls::alltoall(send_buffer(args.send_size * args.comm_size * args.datatype1->size()), args.send_size,
- args.datatype1, recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()),
- args.recv_size, args.datatype2, MPI_COMM_WORLD);
+ colls::alltoall(send_buffer(args.send_size * args.comm_size * args.datatype1->size()), args.send_size, args.datatype1,
+ recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()), args.recv_size, args.datatype2,
+ MPI_COMM_WORLD);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
void GatherAction::kernel(simgrid::xbt::ReplayAction&)
{
- TRACE_smpi_comm_in(my_proc_id, name.c_str(), new simgrid::instr::CollTIData(name, (name == "gather") ? args.root : -1, -1.0, args.send_size, args.recv_size,
- Datatype::encode(args.datatype1), Datatype::encode(args.datatype2)));
+ const GatherArgParser& args = get_args();
+ TRACE_smpi_comm_in(get_pid(), get_name().c_str(),
+ new simgrid::instr::CollTIData(get_name(), (get_name() == "gather") ? args.root : -1, -1.0,
+ args.send_size, args.recv_size, Datatype::encode(args.datatype1),
+ Datatype::encode(args.datatype2)));
- if (name == "gather") {
+ if (get_name() == "gather") {
int rank = MPI_COMM_WORLD->rank();
- Colls::gather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
- (rank == args.root) ? recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()) : nullptr, args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD);
- }
- else
- Colls::allgather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
- recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, MPI_COMM_WORLD);
+ colls::gather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
+ (rank == args.root) ? recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()) : nullptr,
+ args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD);
+ } else
+ colls::allgather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
+ recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2,
+ MPI_COMM_WORLD);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
void GatherVAction::kernel(simgrid::xbt::ReplayAction&)
{
int rank = MPI_COMM_WORLD->rank();
-
- TRACE_smpi_comm_in(my_proc_id, name.c_str(), new simgrid::instr::VarCollTIData(
- name, (name == "gatherv") ? args.root : -1, args.send_size, nullptr, -1, args.recvcounts,
- Datatype::encode(args.datatype1), Datatype::encode(args.datatype2)));
-
- if (name == "gatherv") {
- Colls::gatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
- (rank == args.root) ? recv_buffer(args.recv_size_sum * args.datatype2->size()) : nullptr,
- args.recvcounts->data(), args.disps.data(), args.datatype2, args.root, MPI_COMM_WORLD);
- }
- else {
- Colls::allgatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
- recv_buffer(args.recv_size_sum * args.datatype2->size()), args.recvcounts->data(),
- args.disps.data(), args.datatype2, MPI_COMM_WORLD);
+ const GatherVArgParser& args = get_args();
+ TRACE_smpi_comm_in(get_pid(), get_name().c_str(),
+ new simgrid::instr::VarCollTIData(
+ get_name(), (get_name() == "gatherv") ? args.root : -1, args.send_size, nullptr, -1,
+ args.recvcounts, Datatype::encode(args.datatype1), Datatype::encode(args.datatype2)));
+
+ if (get_name() == "gatherv") {
+ colls::gatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
+ (rank == args.root) ? recv_buffer(args.recv_size_sum * args.datatype2->size()) : nullptr,
+ args.recvcounts->data(), args.disps.data(), args.datatype2, args.root, MPI_COMM_WORLD);
+ } else {
+ colls::allgatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
+ recv_buffer(args.recv_size_sum * args.datatype2->size()), args.recvcounts->data(),
+ args.disps.data(), args.datatype2, MPI_COMM_WORLD);
}
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
void ScatterAction::kernel(simgrid::xbt::ReplayAction&)
{
int rank = MPI_COMM_WORLD->rank();
- TRACE_smpi_comm_in(my_proc_id, "action_scatter", new simgrid::instr::CollTIData(name, args.root, -1.0, args.send_size, args.recv_size,
- Datatype::encode(args.datatype1),
- Datatype::encode(args.datatype2)));
+ const ScatterArgParser& args = get_args();
+ TRACE_smpi_comm_in(get_pid(), "action_scatter",
+ new simgrid::instr::CollTIData(get_name(), args.root, -1.0, args.send_size, args.recv_size,
+ Datatype::encode(args.datatype1),
+ Datatype::encode(args.datatype2)));
- Colls::scatter(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
- (rank == args.root) ? recv_buffer(args.recv_size * args.datatype2->size()) : nullptr, args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD);
+ colls::scatter(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
+ (rank == args.root) ? recv_buffer(args.recv_size * args.datatype2->size()) : nullptr, args.recv_size,
+ args.datatype2, args.root, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
void ScatterVAction::kernel(simgrid::xbt::ReplayAction&)
{
int rank = MPI_COMM_WORLD->rank();
- TRACE_smpi_comm_in(my_proc_id, "action_scatterv", new simgrid::instr::VarCollTIData(name, args.root, -1, args.sendcounts, args.recv_size,
- nullptr, Datatype::encode(args.datatype1),
- Datatype::encode(args.datatype2)));
+ const ScatterVArgParser& args = get_args();
+ TRACE_smpi_comm_in(get_pid(), "action_scatterv",
+ new simgrid::instr::VarCollTIData(get_name(), args.root, -1, args.sendcounts, args.recv_size,
+ nullptr, Datatype::encode(args.datatype1),
+ Datatype::encode(args.datatype2)));
- Colls::scatterv((rank == args.root) ? send_buffer(args.send_size_sum * args.datatype1->size()) : nullptr,
- args.sendcounts->data(), args.disps.data(), args.datatype1,
- recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, args.root,
- MPI_COMM_WORLD);
+ colls::scatterv((rank == args.root) ? send_buffer(args.send_size_sum * args.datatype1->size()) : nullptr,
+ args.sendcounts->data(), args.disps.data(), args.datatype1,
+ recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, args.root,
+ MPI_COMM_WORLD);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
void ReduceScatterAction::kernel(simgrid::xbt::ReplayAction&)
{
- TRACE_smpi_comm_in(my_proc_id, "action_reducescatter",
+ const ReduceScatterArgParser& args = get_args();
+ TRACE_smpi_comm_in(
+ get_pid(), "action_reducescatter",
new simgrid::instr::VarCollTIData("reducescatter", -1, 0, nullptr, -1, args.recvcounts,
- std::to_string(args.comp_size), /* ugly hack to print comp_size */
- Datatype::encode(args.datatype1)));
+ std::to_string(args.comp_size), /* ugly hack to print comp_size */
+ Datatype::encode(args.datatype1)));
- Colls::reduce_scatter(send_buffer(args.recv_size_sum * args.datatype1->size()),
- recv_buffer(args.recv_size_sum * args.datatype1->size()), args.recvcounts->data(),
- args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
+ colls::reduce_scatter(send_buffer(args.recv_size_sum * args.datatype1->size()),
+ recv_buffer(args.recv_size_sum * args.datatype1->size()), args.recvcounts->data(),
+ args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
private_execute_flops(args.comp_size);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
void AllToAllVAction::kernel(simgrid::xbt::ReplayAction&)
{
- TRACE_smpi_comm_in(my_proc_id, __func__,
- new simgrid::instr::VarCollTIData(
- "alltoallv", -1, args.send_size_sum, args.sendcounts, args.recv_size_sum, args.recvcounts,
- Datatype::encode(args.datatype1), Datatype::encode(args.datatype2)));
+ const AllToAllVArgParser& args = get_args();
+ TRACE_smpi_comm_in(get_pid(), __func__,
+ new simgrid::instr::VarCollTIData(
+ "alltoallv", -1, args.send_size_sum, args.sendcounts, args.recv_size_sum, args.recvcounts,
+ Datatype::encode(args.datatype1), Datatype::encode(args.datatype2)));
- Colls::alltoallv(send_buffer(args.send_buf_size * args.datatype1->size()), args.sendcounts->data(), args.senddisps.data(), args.datatype1,
- recv_buffer(args.recv_buf_size * args.datatype2->size()), args.recvcounts->data(), args.recvdisps.data(), args.datatype2, MPI_COMM_WORLD);
+ colls::alltoallv(send_buffer(args.send_buf_size * args.datatype1->size()), args.sendcounts->data(),
+ args.senddisps.data(), args.datatype1, recv_buffer(args.recv_buf_size * args.datatype2->size()),
+ args.recvcounts->data(), args.recvdisps.data(), args.datatype2, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_comm_out(get_pid());
}
} // Replay Namespace
}} // namespace simgrid::smpi
int my_proc_id = simgrid::s4u::this_actor::get_pid();
- TRACE_smpi_init(my_proc_id);
- TRACE_smpi_computing_init(my_proc_id);
- TRACE_smpi_comm_in(my_proc_id, "smpi_replay_run_init", new simgrid::instr::NoOpTIData("init"));
- TRACE_smpi_comm_out(my_proc_id);
+ TRACE_smpi_init(my_proc_id, "smpi_replay_run_init");
xbt_replay_action_register("init", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::InitAction().execute(action); });
- xbt_replay_action_register("finalize", [](simgrid::xbt::ReplayAction&) { /* nothing to do */ });
+ xbt_replay_action_register("finalize", [](simgrid::xbt::ReplayAction const&) { /* nothing to do */ });
xbt_replay_action_register("comm_size", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); });
xbt_replay_action_register("comm_split",[](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); });
xbt_replay_action_register("comm_dup", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); });
unsigned int count_requests = storage[simgrid::s4u::this_actor::get_pid()].size();
XBT_DEBUG("There are %ud elements in reqq[*]", count_requests);
if (count_requests > 0) {
- MPI_Request requests[count_requests];
- MPI_Status status[count_requests];
+ std::vector<MPI_Request> requests(count_requests);
unsigned int i=0;
for (auto const& pair : storage[simgrid::s4u::this_actor::get_pid()].get_store()) {
requests[i] = pair.second;
i++;
}
- simgrid::smpi::Request::waitall(count_requests, requests, status);
+ simgrid::smpi::Request::waitall(count_requests, requests.data(), MPI_STATUSES_IGNORE);
}
active_processes--;