bool Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl*)
{
- MPI_Request ref = static_cast<MPI_Request>(a);
- MPI_Request req = static_cast<MPI_Request>(b);
+ auto ref = static_cast<MPI_Request>(a);
+ auto req = static_cast<MPI_Request>(b);
return match_common(req, req, ref);
}
bool Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*)
{
- MPI_Request ref = static_cast<MPI_Request>(a);
- MPI_Request req = static_cast<MPI_Request>(b);
+ auto ref = static_cast<MPI_Request>(a);
+ auto req = static_cast<MPI_Request>(b);
return match_common(req, ref, req);
}
-void Request::print_request(const char *message)
+void Request::print_request(const char* message) const
{
XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
message, this, buf_, size_, src_, dst_, tag_, flags_);
if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
mut->unlock();
} else { /* the RECV flag was not set, so this is a send */
- simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
+ const simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
xbt_assert(process, "Actor pid=%d is gone??", dst_);
int rank = src_;
if (TRACE_smpi_view_internals()) {
static int nsleeps = 1;
double speed = s4u::this_actor::get_host()->get_speed();
double maxrate = smpi_cfg_iprobe_cpu_usage();
- MPI_Request request = new Request(nullptr, 0, MPI_CHAR,
- source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(),
- simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV);
+ auto request = new Request(nullptr, 0, MPI_CHAR,
+ source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(),
+ simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV);
if (smpi_iprobe_sleep > 0) {
/** Compute the number of flops we will sleep **/
s4u::this_actor::exec_init(/*nsleeps: See comment above */ nsleeps *
return index;
}
-static int sort_accumulates(MPI_Request a, MPI_Request b)
+static int sort_accumulates(const Request* a, const Request* b)
{
return (a->tag() > b->tag());
}
int Request::grequest_complete(MPI_Request request)
{
- if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex==NULL)
+ if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex == nullptr)
return MPI_ERR_REQUEST;
request->generalized_funcs->mutex->lock();
request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
}
}
-int Request::get_nbc_requests_size(){
+int Request::get_nbc_requests_size() const
+{
return nbc_requests_size_;
}
-MPI_Request* Request::get_nbc_requests(){
+MPI_Request* Request::get_nbc_requests() const
+{
return nbc_requests_;
}
-
}
}