1 /* Copyright (c) 2007-2023. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_request.hpp"
9 #include "simgrid/Exception.hpp"
10 #include "simgrid/s4u/ConditionVariable.hpp"
11 #include "simgrid/s4u/Exec.hpp"
12 #include "simgrid/s4u/Mutex.hpp"
13 #include "smpi_comm.hpp"
14 #include "smpi_datatype.hpp"
15 #include "smpi_host.hpp"
16 #include "smpi_op.hpp"
17 #include "src/kernel/EngineImpl.hpp"
18 #include "src/kernel/activity/CommImpl.hpp"
19 #include "src/kernel/actor/ActorImpl.hpp"
20 #include "src/kernel/actor/SimcallObserver.hpp"
21 #include "src/mc/mc.h"
22 #include "src/mc/mc_replay.hpp"
23 #include "src/smpi/include/smpi_actor.hpp"
27 #include <mutex> // std::scoped_lock and std::unique_lock
29 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (request)");
31 static simgrid::config::Flag<double> smpi_iprobe_sleep(
32 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
33 static simgrid::config::Flag<double> smpi_test_sleep(
34 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
36 extern std::function<void(simgrid::kernel::activity::CommImpl*, void*, size_t)> smpi_comm_copy_data_callback;
38 namespace simgrid::smpi {
40 Request::Request(const void* buf, int count, MPI_Datatype datatype, aid_t src, aid_t dst, int tag, MPI_Comm comm,
41 unsigned flags, MPI_Op op)
42 : buf_(const_cast<void*>(buf))
45 , size_(datatype->size() * count)
55 if(op != MPI_REPLACE && op != MPI_OP_NULL)
59 detached_sender_ = nullptr;
61 // get src_host if it's available (src is valid)
62 if (auto src_process = simgrid::s4u::Actor::by_pid(src))
63 src_host_ = src_process->get_host();
65 unmatched_types_ = false;
68 if (flags & MPI_REQ_PERSISTENT)
81 void Request::unref(MPI_Request* request)
83 xbt_assert(*request != MPI_REQUEST_NULL, "freeing an already free request");
85 (*request)->refcount_--;
86 if ((*request)->refcount_ < 0) {
87 (*request)->print_request("wrong refcount");
88 xbt_die("Whoops, wrong refcount");
90 if ((*request)->refcount_ == 0) {
91 if ((*request)->flags_ & MPI_REQ_GENERALIZED) {
92 ((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state);
94 Comm::unref((*request)->comm_);
95 Datatype::unref((*request)->type_);
97 if ((*request)->op_ != MPI_REPLACE && (*request)->op_ != MPI_OP_NULL)
98 Op::unref(&(*request)->op_);
100 (*request)->print_request("Destroying");
101 F2C::free_f((*request)->f2c_id());
103 *request = MPI_REQUEST_NULL;
105 (*request)->print_request("Decrementing");
109 bool Request::match_types(MPI_Datatype stype, MPI_Datatype rtype){
111 if ((stype == rtype) ||
112 //byte and packed always match with anything
113 (stype == MPI_PACKED || rtype == MPI_PACKED || stype == MPI_BYTE || rtype == MPI_BYTE) ||
114 //complex datatypes - we don't properly match these yet, as it would mean checking each subtype recursively.
115 (stype->flags() & DT_FLAG_DERIVED || rtype->flags() & DT_FLAG_DERIVED) ||
116 //duplicated datatypes, check if underlying is ok
117 (stype->duplicated_datatype()!=MPI_DATATYPE_NULL && match_types(stype->duplicated_datatype(), rtype)) ||
118 (rtype->duplicated_datatype()!=MPI_DATATYPE_NULL && match_types(stype, rtype->duplicated_datatype())))
121 XBT_WARN("Mismatched datatypes : sending %s and receiving %s", stype->name().c_str(), rtype->name().c_str());
126 bool Request::match_common(MPI_Request req, MPI_Request sender, MPI_Request receiver)
128 xbt_assert(sender, "Cannot match against null sender");
129 xbt_assert(receiver, "Cannot match against null receiver");
130 XBT_DEBUG("Trying to match %s of sender src %ld against %ld, tag %d against %d, id %d against %d",
131 (req == receiver ? "send" : "recv"), sender->src_, receiver->src_, sender->tag_, receiver->tag_,
132 sender->comm_->id(), receiver->comm_->id());
134 if ((receiver->comm_->id() == MPI_UNDEFINED || sender->comm_->id() == MPI_UNDEFINED ||
135 receiver->comm_->id() == sender->comm_->id()) &&
136 ((receiver->src_ == MPI_ANY_SOURCE && (receiver->comm_->group()->rank(sender->src_) != MPI_UNDEFINED)) ||
137 receiver->src_ == sender->src_) &&
138 ((receiver->tag_ == MPI_ANY_TAG && sender->tag_ >= 0) || receiver->tag_ == sender->tag_)) {
139 // we match, we can transfer some values
140 if (receiver->src_ == MPI_ANY_SOURCE) {
141 receiver->real_src_ = sender->src_;
142 receiver->src_host_ = sender->src_host_;
144 if (receiver->tag_ == MPI_ANY_TAG)
145 receiver->real_tag_ = sender->tag_;
146 if ((receiver->flags_ & MPI_REQ_PROBE) == 0 && receiver->real_size_ < sender->real_size_) {
147 XBT_DEBUG("Truncating message - should not happen: receiver size : %zu < sender size : %zu", receiver->real_size_,
149 receiver->truncated_ = true;
151 //0-sized datatypes/counts should not interfere and match
152 if (sender->real_size_ != 0 && receiver->real_size_ != 0 && not match_types(sender->type_, receiver->type_))
153 receiver->unmatched_types_ = true;
154 if (sender->detached_)
155 receiver->detached_sender_ = sender; // tie the sender to the receiver, as it is detached and has to be freed in
157 req->flags_ |= MPI_REQ_MATCHED; // mark as impossible to cancel anymore
158 XBT_DEBUG("match succeeded");
164 void Request::init_buffer(int count){
165 // FIXME Handle the case of a partial shared malloc.
166 // This part handles the problem of non-contiguous memory (for the unserialization at the reception)
167 if (not smpi_process()->replaying() &&
168 ((((flags_ & MPI_REQ_RECV) != 0) && ((flags_ & MPI_REQ_ACCUMULATE) != 0)) || (type_->flags() & DT_FLAG_DERIVED))) {
169 // This part handles the problem of non-contiguous memory
174 buf_ = xbt_malloc(count*type_->size());
175 if ((type_->flags() & DT_FLAG_DERIVED) && ((flags_ & MPI_REQ_SEND) != 0)) {
176 type_->serialize(old_buf_, buf_, count);
182 bool Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl*)
184 auto* ref = static_cast<MPI_Request>(a);
185 auto* req = static_cast<MPI_Request>(b);
186 bool match = match_common(req, req, ref);
187 if (not match || ref->comm_ == MPI_COMM_UNINITIALIZED || ref->comm_->is_smp_comm())
190 if (ref->comm_->get_received_messages_count(ref->comm_->group()->rank(req->src_),
191 ref->comm_->group()->rank(req->dst_), req->tag_) == req->message_id_) {
192 if (((ref->flags_ & MPI_REQ_PROBE) == 0) && ((req->flags_ & MPI_REQ_PROBE) == 0)) {
193 XBT_DEBUG("increasing count in comm %p, which was %u from pid %ld, to pid %ld with tag %d", ref->comm_,
194 ref->comm_->get_received_messages_count(ref->comm_->group()->rank(req->src_),
195 ref->comm_->group()->rank(req->dst_), req->tag_),
196 req->src_, req->dst_, req->tag_);
197 ref->comm_->increment_received_messages_count(ref->comm_->group()->rank(req->src_),
198 ref->comm_->group()->rank(req->dst_), req->tag_);
199 if (ref->real_size_ > req->real_size_) {
200 ref->real_size_ = req->real_size_;
205 req->flags_ &= ~MPI_REQ_MATCHED;
206 ref->detached_sender_ = nullptr;
207 XBT_DEBUG("Refusing to match message, as its ID is not the one I expect. in comm %p, %u != %u, "
208 "from pid %ld to pid %ld, with tag %d",
210 ref->comm_->get_received_messages_count(ref->comm_->group()->rank(req->src_),
211 ref->comm_->group()->rank(req->dst_), req->tag_),
212 req->message_id_, req->src_, req->dst_, req->tag_);
217 bool Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*)
219 auto* ref = static_cast<MPI_Request>(a);
220 auto* req = static_cast<MPI_Request>(b);
221 return match_common(req, ref, req);
224 void Request::print_request(const char* message) const
226 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %ld, dst = %ld, tag = %d, flags = %x]", message, this, buf_,
227 size_, src_, dst_, tag_, flags_);
230 /* factories, to hide the internal flags from the caller */
231 MPI_Request Request::bsend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
233 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
234 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
235 MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_BSEND);
238 MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
240 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
241 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
242 MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED);
245 MPI_Request Request::ssend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
247 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
248 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
249 MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
252 MPI_Request Request::isend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
254 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
255 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
256 MPI_REQ_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
259 MPI_Request Request::rma_send_init(const void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
264 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src),
265 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
266 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
268 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src),
269 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
270 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED |
277 MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
279 aid_t source = MPI_PROC_NULL;
280 if (src == MPI_ANY_SOURCE)
281 source = MPI_ANY_SOURCE;
282 else if (src != MPI_PROC_NULL)
283 source = comm->group()->actor(src);
284 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
286 simgrid::s4u::this_actor::get_pid(), tag, comm,
287 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
290 MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
293 aid_t source = MPI_PROC_NULL;
294 if (src == MPI_ANY_SOURCE)
295 source = MPI_ANY_SOURCE;
296 else if (src != MPI_PROC_NULL)
297 source = comm->group()->actor(src);
300 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
301 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
302 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
305 new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
306 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
307 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op);
312 MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
314 aid_t source = MPI_PROC_NULL;
315 if (src == MPI_ANY_SOURCE)
316 source = MPI_ANY_SOURCE;
317 else if (src != MPI_PROC_NULL)
318 source = comm->group()->actor(src);
319 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
320 source, simgrid::s4u::this_actor::get_pid(), tag, comm,
321 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
324 MPI_Request Request::ibsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
326 auto* request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
327 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
328 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND);
329 if(dst != MPI_PROC_NULL)
334 MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
336 auto* request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
337 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
338 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND);
339 if(dst != MPI_PROC_NULL)
344 MPI_Request Request::issend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
346 auto* request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
347 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
348 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND);
349 if(dst != MPI_PROC_NULL)
354 MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
356 aid_t source = MPI_PROC_NULL;
357 if (src == MPI_ANY_SOURCE)
358 source = MPI_ANY_SOURCE;
359 else if (src != MPI_PROC_NULL)
360 source = comm->group()->actor(src);
361 auto* request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
362 simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV);
363 if(src != MPI_PROC_NULL)
368 int Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
370 MPI_Request request = irecv(buf, count, datatype, src, tag, comm);
371 int retval = wait(&request,status);
375 void Request::bsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
377 auto* request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
378 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
379 MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND);
381 if(dst != MPI_PROC_NULL)
383 wait(&request, MPI_STATUS_IGNORE);
386 void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
388 auto* request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
389 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
390 MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND);
391 if(dst != MPI_PROC_NULL)
393 wait(&request, MPI_STATUS_IGNORE);
396 void Request::ssend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
398 auto* request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
399 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
400 MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND);
402 if(dst != MPI_PROC_NULL)
404 wait(&request,MPI_STATUS_IGNORE);
407 void Request::sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
408 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
409 MPI_Comm comm, MPI_Status * status)
411 aid_t source = MPI_PROC_NULL;
412 if (src == MPI_ANY_SOURCE)
413 source = MPI_ANY_SOURCE;
414 else if (src != MPI_PROC_NULL)
415 source = comm->group()->actor(src);
416 aid_t destination = dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL;
418 std::array<MPI_Request, 2> requests;
419 std::array<MPI_Status, 2> stats;
420 if (aid_t myid = simgrid::s4u::this_actor::get_pid(); (destination == myid) && (source == myid)) {
421 Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
422 if (status != MPI_STATUS_IGNORE) {
423 status->MPI_SOURCE = source;
424 status->MPI_TAG = recvtag;
425 status->MPI_ERROR = MPI_SUCCESS;
426 status->count = sendcount * sendtype->size();
430 requests[0] = isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
431 requests[1] = irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
432 startall(2, requests.data());
433 waitall(2, requests.data(), stats.data());
436 if(status != MPI_STATUS_IGNORE) {
437 // Copy receive status
442 void Request::start()
444 s4u::Mailbox* mailbox;
446 xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
447 //reinitialize temporary buffer for persistent requests
448 if(real_size_ > 0 && flags_ & MPI_REQ_FINISHED){
450 init_buffer(real_size_/type_->size());
452 flags_ &= ~MPI_REQ_PREPARED;
453 flags_ &= ~MPI_REQ_FINISHED;
456 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
458 if ((flags_ & MPI_REQ_RECV) != 0) {
459 this->print_request("New recv");
461 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
463 std::unique_lock<s4u::Mutex> mut_lock;
464 if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
465 mut_lock = std::unique_lock(*process->mailboxes_mutex());
467 bool is_probe = ((flags_ & MPI_REQ_PROBE) != 0);
468 flags_ |= MPI_REQ_PROBE;
470 if (smpi_cfg_async_small_thresh() == 0 && (flags_ & MPI_REQ_RMA) == 0) {
471 mailbox = process->mailbox();
472 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < smpi_cfg_async_small_thresh()) {
473 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
474 //begin with the more appropriate one : the small one.
475 mailbox = process->mailbox_small();
476 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %s (in case of SSEND)?",
477 mailbox->get_cname());
478 simgrid::kernel::activity::ActivityImplPtr action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
480 if (action == nullptr) {
481 mailbox = process->mailbox();
482 XBT_DEBUG("No, nothing in the small mailbox test the other one : %s", mailbox->get_cname());
483 action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
484 if (action == nullptr) {
485 XBT_DEBUG("Still nothing, switch back to the small mailbox : %s", mailbox->get_cname());
486 mailbox = process->mailbox_small();
489 XBT_DEBUG("yes there was something for us in the small mailbox");
492 mailbox = process->mailbox_small();
493 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
494 simgrid::kernel::activity::ActivityImplPtr action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
496 if (action == nullptr) {
497 XBT_DEBUG("No, nothing in the permanent receive mailbox");
498 mailbox = process->mailbox();
500 XBT_DEBUG("yes there was something for us in the small mailbox");
504 flags_ &= ~MPI_REQ_PROBE;
505 kernel::actor::CommIrecvSimcall observer{process->get_actor()->get_impl(),
507 static_cast<unsigned char*>(buf_),
510 process->replaying() ? &smpi_comm_null_copy_buffer_callback
511 : smpi_comm_copy_data_callback,
514 process->call_location()->get_call_location()};
515 observer.set_tag(tag_);
517 action_ = kernel::actor::simcall_answered([&observer] { return kernel::activity::CommImpl::irecv(&observer); },
520 XBT_DEBUG("recv simcall posted");
521 } else { /* the RECV flag was not set, so this is a send */
522 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
523 xbt_assert(process, "Actor pid=%ld is gone??", dst_);
524 if (TRACE_smpi_view_internals())
525 TRACE_smpi_send(src_, src_, dst_, tag_, size_);
526 this->print_request("New send");
528 message_id_=comm_->get_sent_messages_count(comm_->group()->rank(src_), comm_->group()->rank(dst_), tag_);
529 comm_->increment_sent_messages_count(comm_->group()->rank(src_), comm_->group()->rank(dst_), tag_);
532 if ((flags_ & MPI_REQ_SSEND) == 0 && ((flags_ & MPI_REQ_RMA) != 0 || (flags_ & MPI_REQ_BSEND) != 0 ||
533 static_cast<int>(size_) < smpi_cfg_detached_send_thresh())) {
535 XBT_DEBUG("Send request %p is detached", this);
537 if (not(type_->flags() & DT_FLAG_DERIVED)) {
539 if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
540 if (smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_), buf_))
541 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
543 //we need this temporary buffer even for bsend, as it will be released in the copy callback and we don't have a way to differentiate it
544 //so actually ... don't use manually attached buffer space.
545 buf = xbt_malloc(size_);
546 memcpy(buf,oldbuf,size_);
547 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
552 //if we are giving back the control to the user without waiting for completion, we have to inject timings
553 double sleeptime = 0.0;
554 if (detached_ || ((flags_ & (MPI_REQ_ISEND | MPI_REQ_SSEND)) != 0)) { // issend should be treated as isend
555 // isend and send timings may be different
557 ((flags_ & MPI_REQ_ISEND) != 0)
558 ? simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->oisend(
559 size_, simgrid::s4u::Actor::by_pid(src_)->get_host(), simgrid::s4u::Actor::by_pid(dst_)->get_host())
560 : simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->osend(
561 size_, simgrid::s4u::Actor::by_pid(src_)->get_host(),
562 simgrid::s4u::Actor::by_pid(dst_)->get_host());
566 simgrid::s4u::this_actor::sleep_for(sleeptime);
567 XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
570 std::unique_lock<s4u::Mutex> mut_lock;
571 if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
572 mut_lock = std::unique_lock(*process->mailboxes_mutex());
574 if (not(smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)) {
575 mailbox = process->mailbox();
576 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < smpi_cfg_async_small_thresh()) { // eager mode
577 bool is_probe = ((flags_ & MPI_REQ_PROBE) != 0);
578 flags_ |= MPI_REQ_PROBE;
580 mailbox = process->mailbox();
581 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %s?", mailbox->get_cname());
582 if (not mailbox->iprobe(1, &match_send, static_cast<void*>(this))) {
583 if ((flags_ & MPI_REQ_SSEND) == 0) {
584 mailbox = process->mailbox_small();
585 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %s",
586 mailbox->get_cname());
588 mailbox = process->mailbox_small();
589 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %s?",
590 mailbox->get_cname());
591 if (not mailbox->iprobe(1, &match_send, static_cast<void*>(this))) {
592 XBT_DEBUG("No, we are first, send to large mailbox");
593 mailbox = process->mailbox();
597 XBT_DEBUG("Yes there was something for us in the large mailbox");
600 flags_ &= ~MPI_REQ_PROBE;
602 mailbox = process->mailbox();
603 XBT_DEBUG("Send request %p is in the large mailbox %s (buf: %p)", this, mailbox->get_cname(), buf_);
606 size_t payload_size_ = size_ + 16;//MPI enveloppe size (tag+dest+communicator)
607 kernel::actor::CommIsendSimcall observer{
608 simgrid::kernel::EngineImpl::get_instance()->get_actor_by_pid(src_), mailbox->get_impl(),
609 static_cast<double>(payload_size_), -1, static_cast<unsigned char*>(buf), real_size_, &match_send,
610 &xbt_free_f, // how to free the userdata if a detached send fails
611 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this,
612 // detach if msg size < eager/rdv switch limit
613 detached_, process->call_location()->get_call_location()};
614 observer.set_tag(tag_);
615 action_ = kernel::actor::simcall_answered([&observer] { return kernel::activity::CommImpl::isend(&observer); },
617 XBT_DEBUG("send simcall posted");
619 /* FIXME: detached sends are not traceable (action_ == nullptr) */
620 if (action_ != nullptr) {
621 boost::static_pointer_cast<kernel::activity::CommImpl>(action_)->set_tracing_category(
622 smpi_process()->get_tracing_category());
627 void Request::startall(int count, MPI_Request * requests)
629 if(requests== nullptr)
632 for(int i = 0; i < count; i++) {
633 if(requests[i]->src_ != MPI_PROC_NULL && requests[i]->dst_ != MPI_PROC_NULL)
634 requests[i]->start();
638 void Request::cancel()
640 this->flags_ |= MPI_REQ_CANCELLED;
641 if (this->action_ != nullptr)
642 (boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(this->action_))->cancel();
645 int Request::test(MPI_Request * request, MPI_Status * status, int* flag) {
646 // assume that *request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
647 // to avoid deadlocks if used as a break condition, such as
648 // while (MPI_Test(request, flag, status) && flag) dostuff...
649 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
650 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
651 xbt_assert(*request != MPI_REQUEST_NULL);
653 static int nsleeps = 1;
654 int ret = MPI_SUCCESS;
656 if(smpi_test_sleep > 0)
657 simgrid::s4u::this_actor::sleep_for(nsleeps * smpi_test_sleep);
659 Status::empty(status);
662 if ((*request)->flags_ & MPI_REQ_NBC){
663 *flag = finish_nbc_requests(request, 1);
666 if (((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) == 0) {
667 if ((*request)->action_ != nullptr && ((*request)->flags_ & MPI_REQ_CANCELLED) == 0){
669 kernel::actor::ActorImpl* issuer = kernel::actor::ActorImpl::self();
670 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(issuer->get_pid()));
671 kernel::actor::ActivityTestSimcall observer{issuer, (*request)->action_.get(),
672 process->call_location()->get_call_location()};
673 *flag = kernel::actor::simcall_answered(
674 [&observer] { return observer.get_activity()->test(observer.get_issuer()); }, &observer);
675 } catch (const Exception&) {
680 if (((*request)->flags_ & MPI_REQ_GENERALIZED) && not((*request)->flags_ & MPI_REQ_COMPLETE))
683 finish_wait(request, status); // may invalidate *request
684 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
685 MPI_Status tmp_status;
686 MPI_Status* mystatus;
687 if (status == MPI_STATUS_IGNORE) {
688 mystatus = &tmp_status;
689 Status::empty(mystatus);
693 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
695 nsleeps=1;//reset the number of sleeps we will do next time
696 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_PERSISTENT) == 0)
697 *request = MPI_REQUEST_NULL;
698 } else if (smpi_cfg_grow_injected_times()) {
705 int Request::testsome(int incount, MPI_Request requests[], int *count, int *indices, MPI_Status status[])
711 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
714 for (int i = 0; i < incount; i++) {
715 if (requests[i] != MPI_REQUEST_NULL && not (requests[i]->flags_ & MPI_REQ_FINISHED)) {
716 if (test(&requests[i], pstat, &flag) != MPI_SUCCESS)
720 if (status != MPI_STATUSES_IGNORE)
721 status[*count] = *pstat;
723 if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
724 requests[i] = MPI_REQUEST_NULL;
730 if(count_dead==incount)*count=MPI_UNDEFINED;
732 return MPI_ERR_IN_STATUS;
737 int Request::testany(int count, MPI_Request requests[], int *index, int* flag, MPI_Status * status)
739 std::vector<simgrid::kernel::activity::ActivityImpl*> comms;
740 comms.reserve(count);
743 int ret = MPI_SUCCESS;
744 *index = MPI_UNDEFINED;
746 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
747 for (int i = 0; i < count; i++) {
748 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
749 comms.push_back(requests[i]->action_.get());
753 if (not map.empty()) {
754 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
755 static int nsleeps = 1;
756 if(smpi_test_sleep > 0)
757 simgrid::s4u::this_actor::sleep_for(nsleeps * smpi_test_sleep);
760 kernel::actor::ActorImpl* issuer = kernel::actor::ActorImpl::self();
761 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(issuer->get_pid()));
762 kernel::actor::ActivityTestanySimcall observer{issuer, comms, process->call_location()->get_call_location()};
763 i = kernel::actor::simcall_answered(
765 return kernel::activity::ActivityImpl::test_any(observer.get_issuer(), observer.get_activities());
768 } catch (const Exception&) {
769 XBT_DEBUG("Exception in testany");
773 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
775 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_GENERALIZED) &&
776 not(requests[*index]->flags_ & MPI_REQ_COMPLETE)) {
779 finish_wait(&requests[*index],status);
780 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_GENERALIZED)){
781 MPI_Status tmp_status;
782 MPI_Status* mystatus;
783 if (status == MPI_STATUS_IGNORE) {
784 mystatus = &tmp_status;
785 Status::empty(mystatus);
789 ret=(requests[*index]->generalized_funcs)->query_fn((requests[*index]->generalized_funcs)->extra_state, mystatus);
792 if (requests[*index] != MPI_REQUEST_NULL && requests[*index]->flags_ & MPI_REQ_NBC){
793 *flag = finish_nbc_requests(&requests[*index] , 1);
796 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT))
797 requests[*index] = MPI_REQUEST_NULL;
798 XBT_DEBUG("Testany - returning with index %d", *index);
806 XBT_DEBUG("Testany on inactive handles, returning flag=1 but empty status");
807 //all requests are null or inactive, return true
809 *index = MPI_UNDEFINED;
810 Status::empty(status);
816 int Request::testall(int count, MPI_Request requests[], int* outflag, MPI_Status status[])
819 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
823 for(int i=0; i<count; i++){
824 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
825 int ret = test(&requests[i], pstat, &flag);
831 if (ret != MPI_SUCCESS)
834 Status::empty(pstat);
836 if(status != MPI_STATUSES_IGNORE) {
841 return MPI_ERR_IN_STATUS;
846 void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
848 //FIXME find another way to avoid busy waiting ?
849 // the issue here is that we have to wait on a nonexistent comm
851 iprobe(source, tag, comm, &flag, status);
852 XBT_DEBUG("Busy Waiting on probing : %d", flag);
856 void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
857 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
858 // especially when used as a break condition, such as while (MPI_Iprobe(...)) dostuff...
859 // nsleeps is a multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
860 // This can speed up the execution of certain applications by an order of magnitude, such as HPL
861 static int nsleeps = 1;
862 double speed = s4u::this_actor::get_host()->get_speed();
863 double maxrate = smpi_cfg_iprobe_cpu_usage();
865 new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source),
866 simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PROBE);
867 if (smpi_iprobe_sleep > 0) {
868 /** Compute the number of flops we will sleep **/
869 s4u::this_actor::exec_init(/*nsleeps: See comment above */ nsleeps *
870 /*(seconds * flop/s -> total flops)*/ smpi_iprobe_sleep * speed * maxrate)
872 /* Not the entire CPU can be used when iprobing: This is important for
873 * the energy consumption caused by polling with iprobes.
874 * Note also that the number of flops that was
875 * computed above contains a maxrate factor and is hence reduced (maxrate < 1)
877 ->set_bound(maxrate * speed)
881 // behave like a receive, but don't do it
882 s4u::Mailbox* mailbox;
884 request->print_request("New iprobe");
885 // We have to test both mailboxes as we don't know if we will receive one or another
886 if (smpi_cfg_async_small_thresh() > 0) {
887 mailbox = smpi_process()->mailbox_small();
888 XBT_DEBUG("Trying to probe the perm recv mailbox");
889 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
892 if (request->action_ == nullptr){
893 mailbox = smpi_process()->mailbox();
894 XBT_DEBUG("trying to probe the other mailbox");
895 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
898 if (request->action_ != nullptr){
899 kernel::activity::CommImplPtr sync_comm = boost::static_pointer_cast<kernel::activity::CommImpl>(request->action_);
900 const Request* req = static_cast<MPI_Request>(sync_comm->src_data_);
902 if (status != MPI_STATUS_IGNORE && (req->flags_ & MPI_REQ_PREPARED) == 0) {
903 status->MPI_SOURCE = comm->group()->rank(req->src_);
904 status->MPI_TAG = req->tag_;
905 status->MPI_ERROR = MPI_SUCCESS;
906 status->count = req->real_size_;
908 nsleeps = 1;//reset the number of sleeps we will do next time
912 if (smpi_cfg_grow_injected_times())
916 xbt_assert(request == MPI_REQUEST_NULL);
919 int Request::finish_nbc_requests(MPI_Request* request, int test){
923 ret = waitall((*request)->nbc_requests_.size(), (*request)->nbc_requests_.data(), MPI_STATUSES_IGNORE);
925 ret = testall((*request)->nbc_requests_.size(), (*request)->nbc_requests_.data(), &flag, MPI_STATUSES_IGNORE);
928 xbt_die("Failure when waiting on non blocking collective sub-requests");
930 XBT_DEBUG("Finishing non blocking collective request with %zu sub-requests", (*request)->nbc_requests_.size());
931 for(auto& req: (*request)->nbc_requests_){
932 if((*request)->buf_!=nullptr && req!=MPI_REQUEST_NULL){//reduce case
933 void * buf=req->buf_;
934 if((*request)->type_->flags() & DT_FLAG_DERIVED)
936 if(req->flags_ & MPI_REQ_RECV ){
937 if((*request)->op_!=MPI_OP_NULL){
938 int count=(*request)->size_/ (*request)->type_->size();
939 (*request)->op_->apply(buf, (*request)->buf_, &count, (*request)->type_);
941 smpi_free_tmp_buffer(static_cast<unsigned char*>(buf));
944 if(req!=MPI_REQUEST_NULL)
945 Request::unref(&req);
947 (*request)->nbc_requests_.clear();
952 void Request::finish_wait(MPI_Request* request, MPI_Status * status)
954 MPI_Request req = *request;
955 Status::empty(status);
956 if((req->flags_ & MPI_REQ_CANCELLED) != 0 && (req->flags_ & MPI_REQ_MATCHED) == 0) {
957 if (status!=MPI_STATUS_IGNORE)
959 if(req->detached_sender_ != nullptr)
960 unref(&(req->detached_sender_));
965 if ((req->flags_ & (MPI_REQ_PREPARED | MPI_REQ_GENERALIZED | MPI_REQ_FINISHED)) == 0) {
966 if (status != MPI_STATUS_IGNORE) {
967 if (req->src_== MPI_PROC_NULL || req->dst_== MPI_PROC_NULL){
968 Status::empty(status);
969 status->MPI_SOURCE = MPI_PROC_NULL;
971 aid_t src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
972 status->MPI_SOURCE = req->comm_->group()->rank(src);
973 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
974 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
976 // this handles the case were size in receive differs from size in send
977 status->count = req->real_size_;
979 //detached send will be finished at the other end
980 if (not(req->detached_ && ((req->flags_ & MPI_REQ_SEND) != 0))) {
981 req->print_request("Finishing");
982 MPI_Datatype datatype = req->type_;
984 // FIXME Handle the case of a partial shared malloc.
985 if (not smpi_process()->replaying() &&
986 (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) || (datatype->flags() & DT_FLAG_DERIVED))) {
987 if (smpi_switch_data_segment(simgrid::s4u::Actor::self(), req->old_buf_))
988 XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
990 if(datatype->flags() & DT_FLAG_DERIVED){
991 // This part handles the problem of non-contiguous memory the unserialization at the reception
992 if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0)
993 datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
996 } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate
997 if (datatype->size() != 0) {
998 int n = req->real_size_ / datatype->size();
999 req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
1001 xbt_free(req->buf_);
1008 if (TRACE_smpi_view_internals() && ((req->flags_ & MPI_REQ_RECV) != 0)) {
1009 aid_t rank = simgrid::s4u::this_actor::get_pid();
1010 aid_t src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
1011 TRACE_smpi_recv(src_traced, rank,req->tag_);
1013 if(req->detached_sender_ != nullptr){
1014 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
1015 simgrid::s4u::Host* dst_host = simgrid::s4u::Actor::by_pid(req->dst_)->get_host();
1016 if (double sleeptime = simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->orecv(
1017 req->real_size(), req->src_host_, dst_host);
1019 simgrid::s4u::this_actor::sleep_for(sleeptime);
1020 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
1022 unref(&(req->detached_sender_));
1024 if (req->flags_ & MPI_REQ_PERSISTENT)
1025 req->action_ = nullptr;
1026 req->flags_ |= MPI_REQ_FINISHED;
1028 if (req->truncated_ || req->unmatched_types_) {
1029 char error_string[MPI_MAX_ERROR_STRING];
1032 if(req->truncated_ )
1033 errkind = MPI_ERR_TRUNCATE;
1035 errkind = MPI_ERR_TYPE;
1036 PMPI_Error_string(errkind, error_string, &error_size);
1037 MPI_Errhandler err = (req->comm_) ? (req->comm_)->errhandler() : MPI_ERRHANDLER_NULL;
1038 if (err == MPI_ERRHANDLER_NULL || err == MPI_ERRORS_RETURN)
1039 XBT_WARN("recv - returned %.*s instead of MPI_SUCCESS", error_size, error_string);
1040 else if (err == MPI_ERRORS_ARE_FATAL)
1041 xbt_die("recv - returned %.*s instead of MPI_SUCCESS", error_size, error_string);
1043 err->call((req->comm_), errkind);
1044 if (err != MPI_ERRHANDLER_NULL)
1045 simgrid::smpi::Errhandler::unref(err);
1046 MC_assert(not MC_is_active()); /* Only fail in MC mode */
1048 if(req->src_ != MPI_PROC_NULL && req->dst_ != MPI_PROC_NULL)
1052 int Request::wait(MPI_Request * request, MPI_Status * status)
1054 // assume that *request is not MPI_REQUEST_NULL (filtered in PMPI_Wait before)
1055 xbt_assert(*request != MPI_REQUEST_NULL);
1057 int ret=MPI_SUCCESS;
1059 if((*request)->src_ == MPI_PROC_NULL || (*request)->dst_ == MPI_PROC_NULL){
1060 if (status != MPI_STATUS_IGNORE) {
1061 Status::empty(status);
1062 status->MPI_SOURCE = MPI_PROC_NULL;
1064 (*request)=MPI_REQUEST_NULL;
1068 (*request)->print_request("Waiting");
1069 if ((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) {
1070 Status::empty(status);
1074 if ((*request)->action_ != nullptr){
1076 // this is not a detached send
1077 kernel::actor::ActorImpl* issuer = kernel::actor::ActorImpl::self();
1078 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(issuer->get_pid()));
1079 kernel::actor::ActivityWaitSimcall observer{issuer, (*request)->action_.get(), -1,
1080 process->call_location()->get_call_location()};
1081 kernel::actor::simcall_blocking([issuer, &observer] { observer.get_activity()->wait_for(issuer, -1); },
1083 } catch (const CancelException&) {
1084 XBT_VERB("Request cancelled");
1088 if ((*request)->flags_ & MPI_REQ_GENERALIZED) {
1089 if (not((*request)->flags_ & MPI_REQ_COMPLETE)) {
1090 const std::scoped_lock lock(*(*request)->generalized_funcs->mutex);
1091 (*request)->generalized_funcs->cond->wait((*request)->generalized_funcs->mutex);
1093 MPI_Status tmp_status;
1094 MPI_Status* mystatus;
1095 if (status == MPI_STATUS_IGNORE) {
1096 mystatus = &tmp_status;
1097 Status::empty(mystatus);
1101 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
1104 if ((*request)->truncated_)
1105 ret = MPI_ERR_TRUNCATE;
1107 if ((*request)->flags_ & MPI_REQ_NBC)
1108 finish_nbc_requests(request, 0);
1110 finish_wait(request, status); // may invalidate *request
1111 if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & MPI_REQ_NON_PERSISTENT) != 0))
1112 *request = MPI_REQUEST_NULL;
1116 int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
1118 int index = MPI_UNDEFINED;
1121 // Wait for a request to complete
1122 std::vector<simgrid::kernel::activity::ActivityImpl*> comms;
1123 std::vector<int> map;
1124 XBT_DEBUG("Wait for one of %d", count);
1125 for(int i = 0; i < count; i++) {
1126 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED) &&
1127 not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
1128 if (requests[i]->action_ != nullptr) {
1129 XBT_DEBUG("Waiting any %p ", requests[i]);
1130 comms.push_back(requests[i]->action_.get());
1133 // This is a finished detached request, let's return this one
1134 comms.clear(); // don't do the waitany call afterwards
1136 if (requests[index]->flags_ & MPI_REQ_NBC)
1137 finish_nbc_requests(&requests[index], 0);
1138 finish_wait(&requests[i], status); // cleanup if refcount = 0
1139 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
1140 requests[i] = MPI_REQUEST_NULL; // set to null
1145 if (not comms.empty()) {
1146 XBT_DEBUG("Enter waitany for %zu comms", comms.size());
1149 kernel::actor::ActorImpl* issuer = kernel::actor::ActorImpl::self();
1150 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(issuer->get_pid()));
1151 kernel::actor::ActivityWaitanySimcall observer{issuer, comms, -1,
1152 process->call_location()->get_call_location()};
1153 i = kernel::actor::simcall_blocking(
1155 kernel::activity::ActivityImpl::wait_any_for(observer.get_issuer(), observer.get_activities(),
1156 observer.get_timeout());
1159 } catch (const CancelException&) {
1160 XBT_INFO("request cancelled");
1164 // not MPI_UNDEFINED, as this is a simix return code
1167 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
1168 if ((requests[index] == MPI_REQUEST_NULL) ||
1169 (not((requests[index]->flags_ & MPI_REQ_ACCUMULATE) && (requests[index]->flags_ & MPI_REQ_RECV)))) {
1170 finish_wait(&requests[index],status);
1171 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
1172 requests[index] = MPI_REQUEST_NULL;
1179 if (index==MPI_UNDEFINED)
1180 Status::empty(status);
1185 static int sort_accumulates(const Request* a, const Request* b)
1187 return (a->tag() > b->tag());
1190 int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
1192 std::vector<MPI_Request> accumulates;
1195 MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
1196 int retvalue = MPI_SUCCESS;
1197 //tag invalid requests in the set
1198 if (status != MPI_STATUSES_IGNORE) {
1199 for (int c = 0; c < count; c++) {
1200 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst_ == MPI_PROC_NULL ||
1201 (requests[c]->flags_ & MPI_REQ_PREPARED)) {
1202 Status::empty(&status[c]);
1203 } else if (requests[c]->src_ == MPI_PROC_NULL) {
1204 Status::empty(&status[c]);
1205 status[c].MPI_SOURCE = MPI_PROC_NULL;
1209 for (int c = 0; c < count; c++) {
1210 if (MC_is_active() || MC_record_replay_is_active()) {
1211 wait(&requests[c],pstat);
1214 index = waitany(count, requests, pstat);
1216 if (index == MPI_UNDEFINED)
1219 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_RECV) &&
1220 (requests[index]->flags_ & MPI_REQ_ACCUMULATE))
1221 accumulates.push_back(requests[index]);
1222 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
1223 requests[index] = MPI_REQUEST_NULL;
1225 if (status != MPI_STATUSES_IGNORE) {
1226 status[index] = *pstat;
1227 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
1228 retvalue = MPI_ERR_IN_STATUS;
1232 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
1233 for (auto& req : accumulates)
1234 finish_wait(&req, status);
1239 int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
1245 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1246 index = waitany(incount, requests, pstat);
1247 if(index==MPI_UNDEFINED) return MPI_UNDEFINED;
1248 if(status != MPI_STATUSES_IGNORE) {
1249 status[count] = *pstat;
1251 indices[count] = index;
1253 for (int i = 0; i < incount; i++) {
1254 if (i != index && requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
1255 test(&requests[i], pstat,&flag);
1258 if(status != MPI_STATUSES_IGNORE) {
1259 status[count] = *pstat;
1261 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
1262 requests[i]=MPI_REQUEST_NULL;
1270 MPI_Request Request::f2c(int id)
1272 if(id==MPI_FORTRAN_REQUEST_NULL)
1273 return MPI_REQUEST_NULL;
1274 return static_cast<MPI_Request>(F2C::lookup()->at(id));
1277 void Request::free_f(int id)
1279 if (id != MPI_FORTRAN_REQUEST_NULL) {
1280 F2C::lookup()->erase(id);
1284 int Request::get_status(const Request* req, int* flag, MPI_Status* status)
1286 if(req != MPI_REQUEST_NULL && req->action_ != nullptr) {
1287 req->iprobe(req->comm_->group()->rank(req->src_), req->tag_, req->comm_, flag, status);
1291 if (req != MPI_REQUEST_NULL && (req->flags_ & MPI_REQ_GENERALIZED) && not(req->flags_ & MPI_REQ_COMPLETE)) {
1297 if(req != MPI_REQUEST_NULL &&
1298 status != MPI_STATUS_IGNORE) {
1299 aid_t src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
1300 status->MPI_SOURCE = req->comm_->group()->rank(src);
1301 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
1302 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
1303 status->count = req->real_size_;
1308 int Request::grequest_start(MPI_Grequest_query_function* query_fn, MPI_Grequest_free_function* free_fn,
1309 MPI_Grequest_cancel_function* cancel_fn, void* extra_state, MPI_Request* request)
1311 *request = new Request();
1312 (*request)->flags_ |= MPI_REQ_GENERALIZED;
1313 (*request)->flags_ |= MPI_REQ_PERSISTENT;
1314 (*request)->refcount_ = 1;
1315 ((*request)->generalized_funcs) = std::make_unique<smpi_mpi_generalized_request_funcs_t>();
1316 ((*request)->generalized_funcs)->query_fn=query_fn;
1317 ((*request)->generalized_funcs)->free_fn=free_fn;
1318 ((*request)->generalized_funcs)->cancel_fn=cancel_fn;
1319 ((*request)->generalized_funcs)->extra_state=extra_state;
1320 ((*request)->generalized_funcs)->cond = simgrid::s4u::ConditionVariable::create();
1321 ((*request)->generalized_funcs)->mutex = simgrid::s4u::Mutex::create();
1325 int Request::grequest_complete(MPI_Request request)
1327 if ((not(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex == nullptr)
1328 return MPI_ERR_REQUEST;
1329 const std::scoped_lock lock(*request->generalized_funcs->mutex);
1330 request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
1331 request->generalized_funcs->cond->notify_one();
1335 void Request::start_nbc_requests(std::vector<MPI_Request> reqs){
1336 if (not reqs.empty()) {
1337 nbc_requests_ = reqs;
1338 Request::startall(reqs.size(), reqs.data());
1342 std::vector<MPI_Request> Request::get_nbc_requests() const
1344 return nbc_requests_;
1346 } // namespace simgrid::smpi