1 /* Copyright (c) 2007-2018. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_request.hpp"
8 #include "SmpiHost.hpp"
10 #include "private.hpp"
11 #include "smpi_comm.hpp"
12 #include "smpi_datatype.hpp"
13 #include "smpi_op.hpp"
14 #include "smpi_process.hpp"
15 #include "src/kernel/activity/CommImpl.hpp"
16 #include "src/mc/mc_replay.hpp"
17 #include "src/simix/ActorImpl.hpp"
18 #include "xbt/config.hpp"
22 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (reques)");
24 static simgrid::config::Flag<double> smpi_iprobe_sleep(
25 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
26 static simgrid::config::Flag<double> smpi_test_sleep(
27 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
29 std::vector<s_smpi_factor_t> smpi_ois_values;
31 extern void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t);
36 Request::Request(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags)
37 : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags)
39 void *old_buf = nullptr;
40 // FIXME Handle the case of a partial shared malloc.
41 if ((((flags & RECV) != 0) && ((flags & ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) {
42 // This part handles the problem of non-contiguous memory
47 buf_ = xbt_malloc(count*datatype->size());
48 if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & SEND) != 0)) {
49 datatype->serialize(old_buf, buf_, count);
53 // This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
55 size_ = datatype->size() * count;
60 detached_sender_ = nullptr;
65 if (flags & PERSISTENT)
72 MPI_Comm Request::comm(){
92 int Request::detached(){
96 size_t Request::size(){
100 size_t Request::real_size(){
104 void Request::unref(MPI_Request* request)
106 if((*request) != MPI_REQUEST_NULL){
107 (*request)->refcount_--;
108 if((*request)->refcount_<0) xbt_die("wrong refcount");
109 if((*request)->refcount_==0){
110 Datatype::unref((*request)->old_type_);
111 Comm::unref((*request)->comm_);
112 (*request)->print_request("Destroying");
114 *request = MPI_REQUEST_NULL;
116 (*request)->print_request("Decrementing");
119 xbt_die("freeing an already free request");
123 int Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl* ignored)
125 MPI_Request ref = static_cast<MPI_Request>(a);
126 MPI_Request req = static_cast<MPI_Request>(b);
127 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
129 xbt_assert(ref, "Cannot match recv against null reference");
130 xbt_assert(req, "Cannot match recv against null request");
131 if((ref->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
132 && ((ref->tag_ == MPI_ANY_TAG && req->tag_ >=0) || req->tag_ == ref->tag_)){
133 //we match, we can transfer some values
134 if(ref->src_ == MPI_ANY_SOURCE)
135 ref->real_src_ = req->src_;
136 if(ref->tag_ == MPI_ANY_TAG)
137 ref->real_tag_ = req->tag_;
138 if(ref->real_size_ < req->real_size_)
140 if(req->detached_==1)
141 ref->detached_sender_=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
142 XBT_DEBUG("match succeeded");
147 int Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl* ignored)
149 MPI_Request ref = static_cast<MPI_Request>(a);
150 MPI_Request req = static_cast<MPI_Request>(b);
151 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
152 xbt_assert(ref, "Cannot match send against null reference");
153 xbt_assert(req, "Cannot match send against null request");
155 if((req->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
156 && ((req->tag_ == MPI_ANY_TAG && ref->tag_ >=0)|| req->tag_ == ref->tag_)){
157 if(req->src_ == MPI_ANY_SOURCE)
158 req->real_src_ = ref->src_;
159 if(req->tag_ == MPI_ANY_TAG)
160 req->real_tag_ = ref->tag_;
161 if(req->real_size_ < ref->real_size_)
163 if(ref->detached_==1)
164 req->detached_sender_=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
165 XBT_DEBUG("match succeeded");
171 void Request::print_request(const char *message)
173 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
174 message, this, buf_, size_, src_, dst_, tag_, flags_);
178 /* factories, to hide the internal flags from the caller */
179 MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
182 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
183 comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SEND | PREPARED);
186 MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
188 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
189 comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
192 MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
194 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
195 comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | ISEND | SEND | PREPARED);
199 MPI_Request Request::rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
202 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
204 request = new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, comm->group()->actor(src)->getPid(),
205 comm->group()->actor(dst)->getPid(), tag,
206 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
208 request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(),
209 comm->group()->actor(dst)->getPid(), tag,
210 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
216 MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
218 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
219 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
220 simgrid::s4u::this_actor::getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
223 MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
226 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
228 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(), comm->group()->actor(dst)->getPid(), tag, comm,
229 RMA | NON_PERSISTENT | RECV | PREPARED);
231 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(), comm->group()->actor(dst)->getPid(), tag, comm,
232 RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
238 MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
240 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
241 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
242 simgrid::s4u::this_actor::getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
245 MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
247 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
248 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
249 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SEND);
254 MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
256 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
257 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
258 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
264 MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
266 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
267 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
268 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
269 simgrid::s4u::this_actor::getPid(), tag, comm, NON_PERSISTENT | RECV);
274 void Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
276 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
277 request = irecv(buf, count, datatype, src, tag, comm);
278 wait(&request,status);
282 void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
284 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
285 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
286 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SEND);
289 wait(&request, MPI_STATUS_IGNORE);
293 void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
295 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
296 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
297 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SSEND | SEND);
300 wait(&request,MPI_STATUS_IGNORE);
304 void Request::sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
305 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
306 MPI_Comm comm, MPI_Status * status)
308 MPI_Request requests[2];
310 int myid = simgrid::s4u::this_actor::getPid();
311 if ((comm->group()->actor(dst)->getPid() == myid) && (comm->group()->actor(src)->getPid() == myid)){
312 Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
313 if(status !=MPI_STATUS_IGNORE){
314 status->MPI_SOURCE = src;
315 status->MPI_TAG = recvtag;
316 status->MPI_ERROR = MPI_SUCCESS;
317 status->count = sendcount*sendtype->size();
321 requests[0] = isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
322 requests[1] = irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
323 startall(2, requests);
324 waitall(2, requests, stats);
327 if(status != MPI_STATUS_IGNORE) {
328 // Copy receive status
333 void Request::start()
335 smx_mailbox_t mailbox;
337 xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
342 if ((flags_ & RECV) != 0) {
343 this->print_request("New recv");
345 simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
347 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
349 xbt_mutex_t mut = process->mailboxes_mutex();
350 if (async_small_thresh != 0 || (flags_ & RMA) != 0)
351 xbt_mutex_acquire(mut);
353 if (async_small_thresh == 0 && (flags_ & RMA) == 0 ) {
354 mailbox = process->mailbox();
356 else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) {
357 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
358 //begin with the more appropriate one : the small one.
359 mailbox = process->mailbox_small();
360 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
361 smx_activity_t action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
363 if (action == nullptr) {
364 mailbox = process->mailbox();
365 XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
366 action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
367 if (action == nullptr) {
368 XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
369 mailbox = process->mailbox_small();
372 XBT_DEBUG("yes there was something for us in the large mailbox");
375 mailbox = process->mailbox_small();
376 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
377 smx_activity_t action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
379 if (action == nullptr) {
380 XBT_DEBUG("No, nothing in the permanent receive mailbox");
381 mailbox = process->mailbox();
383 XBT_DEBUG("yes there was something for us in the small mailbox");
387 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
389 action_ = simcall_comm_irecv(
390 process->process()->getImpl(), mailbox, buf_, &real_size_, &match_recv,
391 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
392 XBT_DEBUG("recv simcall posted");
394 if (async_small_thresh != 0 || (flags_ & RMA) != 0 )
395 xbt_mutex_release(mut);
396 } else { /* the RECV flag was not set, so this is a send */
397 simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
399 if (TRACE_smpi_view_internals()) {
400 TRACE_smpi_send(rank, rank, dst_, tag_, size_);
402 this->print_request("New send");
405 if ((flags_ & SSEND) == 0 && ( (flags_ & RMA) != 0
406 || static_cast<int>(size_) < xbt_cfg_get_int("smpi/send-is-detached-thresh") ) ) {
407 void *oldbuf = nullptr;
409 XBT_DEBUG("Send request %p is detached", this);
411 if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
413 if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
414 if ((smpi_privatize_global_variables != 0) && (static_cast<char*>(buf_) >= smpi_data_exe_start) &&
415 (static_cast<char*>(buf_) < smpi_data_exe_start + smpi_data_exe_size)) {
416 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
417 smpi_switch_data_segment(simgrid::s4u::Actor::byPid(src_));
419 buf = xbt_malloc(size_);
420 memcpy(buf,oldbuf,size_);
421 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
426 //if we are giving back the control to the user without waiting for completion, we have to inject timings
427 double sleeptime = 0.0;
428 if (detached_ != 0 || ((flags_ & (ISEND | SSEND)) != 0)) { // issend should be treated as isend
429 // isend and send timings may be different
430 sleeptime = ((flags_ & ISEND) != 0)
431 ? simgrid::s4u::Actor::self()->getHost()->extension<simgrid::smpi::SmpiHost>()->oisend(size_)
432 : simgrid::s4u::Actor::self()->getHost()->extension<simgrid::smpi::SmpiHost>()->osend(size_);
436 simcall_process_sleep(sleeptime);
437 XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
440 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
442 xbt_mutex_t mut=process->mailboxes_mutex();
444 if (async_small_thresh != 0 || (flags_ & RMA) != 0)
445 xbt_mutex_acquire(mut);
447 if (not(async_small_thresh != 0 || (flags_ & RMA) != 0)) {
448 mailbox = process->mailbox();
449 } else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
450 mailbox = process->mailbox();
451 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
452 smx_activity_t action = simcall_comm_iprobe(mailbox, 1, &match_send, static_cast<void*>(this));
453 if (action == nullptr) {
454 if ((flags_ & SSEND) == 0){
455 mailbox = process->mailbox_small();
456 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
458 mailbox = process->mailbox_small();
459 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
460 action = simcall_comm_iprobe(mailbox, 1, &match_send, static_cast<void*>(this));
461 if (action == nullptr) {
462 XBT_DEBUG("No, we are first, send to large mailbox");
463 mailbox = process->mailbox();
467 XBT_DEBUG("Yes there was something for us in the large mailbox");
470 mailbox = process->mailbox();
471 XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, this,buf_);
474 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
476 action_ = simcall_comm_isend(
477 simgrid::s4u::Actor::byPid(src_)->getImpl(), mailbox, size_, -1.0, buf, real_size_, &match_send,
478 &xbt_free_f, // how to free the userdata if a detached send fails
479 not process->replaying() ? smpi_comm_copy_data_callback : &smpi_comm_null_copy_buffer_callback, this,
480 // detach if msg size < eager/rdv switch limit
482 XBT_DEBUG("send simcall posted");
484 /* FIXME: detached sends are not traceable (action_ == nullptr) */
485 if (action_ != nullptr)
486 simcall_set_category(action_, TRACE_internal_smpi_get_category());
487 if (async_small_thresh != 0 || ((flags_ & RMA)!=0))
488 xbt_mutex_release(mut);
492 void Request::startall(int count, MPI_Request * requests)
494 if(requests== nullptr)
497 for(int i = 0; i < count; i++) {
498 requests[i]->start();
502 int Request::test(MPI_Request * request, MPI_Status * status) {
503 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
504 // to avoid deadlocks if used as a break condition, such as
505 // while (MPI_Test(request, flag, status) && flag) dostuff...
506 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
507 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
508 static int nsleeps = 1;
509 if(smpi_test_sleep > 0)
510 simcall_process_sleep(nsleeps*smpi_test_sleep);
512 Status::empty(status);
514 if (((*request)->flags_ & PREPARED) == 0) {
515 if ((*request)->action_ != nullptr)
516 flag = simcall_comm_test((*request)->action_);
518 finish_wait(request,status);
519 nsleeps=1;//reset the number of sleeps we will do next time
520 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & PERSISTENT) == 0)
521 *request = MPI_REQUEST_NULL;
522 } else if (xbt_cfg_get_boolean("smpi/grow-injected-times")){
529 int Request::testsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
534 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
536 for (int i = 0; i < incount; i++) {
537 if (requests[i] != MPI_REQUEST_NULL) {
538 if (test(&requests[i], pstat)) {
541 if (status != MPI_STATUSES_IGNORE)
543 if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & NON_PERSISTENT))
544 requests[i] = MPI_REQUEST_NULL;
550 if(count_dead==incount)
551 return MPI_UNDEFINED;
555 int Request::testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
557 std::vector<simgrid::kernel::activity::ActivityImplPtr> comms;
558 comms.reserve(count);
563 *index = MPI_UNDEFINED;
565 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
566 for(i = 0; i < count; i++) {
567 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & PREPARED)) {
568 comms.push_back(requests[i]->action_);
572 if (not map.empty()) {
573 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
574 static int nsleeps = 1;
575 if(smpi_test_sleep > 0)
576 simcall_process_sleep(nsleeps*smpi_test_sleep);
578 i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
579 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
581 finish_wait(&requests[*index],status);
584 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & NON_PERSISTENT)) {
585 requests[*index] = MPI_REQUEST_NULL;
591 //all requests are null or inactive, return true
593 Status::empty(status);
599 int Request::testall(int count, MPI_Request requests[], MPI_Status status[])
602 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
604 for(int i=0; i<count; i++){
605 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED)) {
606 if (test(&requests[i], pstat)!=1){
609 requests[i]=MPI_REQUEST_NULL;
612 Status::empty(pstat);
614 if(status != MPI_STATUSES_IGNORE) {
621 void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
623 //FIXME find another way to avoid busy waiting ?
624 // the issue here is that we have to wait on a nonexistent comm
626 iprobe(source, tag, comm, &flag, status);
627 XBT_DEBUG("Busy Waiting on probing : %d", flag);
631 void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
632 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
633 // especially when used as a break condition, such as while (MPI_Iprobe(...)) dostuff...
634 // nsleeps is a multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
635 // This can speed up the execution of certain applications by an order of magnitude, such as HPL
636 static int nsleeps = 1;
637 double speed = simgrid::s4u::Actor::self()->getHost()->getSpeed();
638 double maxrate = xbt_cfg_get_double("smpi/iprobe-cpu-usage");
639 MPI_Request request = new Request(nullptr, 0, MPI_CHAR,
640 source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->getPid(),
641 simgrid::s4u::this_actor::getPid(), tag, comm, PERSISTENT | RECV);
642 if (smpi_iprobe_sleep > 0) {
643 smx_activity_t iprobe_sleep = simcall_execution_start(
644 "iprobe", /* flops to executek*/ nsleeps * smpi_iprobe_sleep * speed * maxrate, /* priority */ 1.0,
645 /* performance bound */ maxrate * speed, smpi_process()->process()->getImpl()->host);
646 simcall_execution_wait(iprobe_sleep);
648 // behave like a receive, but don't do it
649 smx_mailbox_t mailbox;
651 request->print_request("New iprobe");
652 // We have to test both mailboxes as we don't know if we will receive one one or another
653 if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){
654 mailbox = smpi_process()->mailbox_small();
655 XBT_DEBUG("Trying to probe the perm recv mailbox");
656 request->action_ = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(request));
659 if (request->action_ == nullptr){
660 mailbox = smpi_process()->mailbox();
661 XBT_DEBUG("trying to probe the other mailbox");
662 request->action_ = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(request));
665 if (request->action_ != nullptr){
666 simgrid::kernel::activity::CommImplPtr sync_comm =
667 boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(request->action_);
668 MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
670 if(status != MPI_STATUS_IGNORE && (req->flags_ & PREPARED) == 0) {
671 status->MPI_SOURCE = comm->group()->rank(req->src_);
672 status->MPI_TAG = req->tag_;
673 status->MPI_ERROR = MPI_SUCCESS;
674 status->count = req->real_size_;
676 nsleeps = 1;//reset the number of sleeps we will do next time
680 if (xbt_cfg_get_boolean("smpi/grow-injected-times"))
686 void Request::finish_wait(MPI_Request* request, MPI_Status * status)
688 MPI_Request req = *request;
689 Status::empty(status);
691 if (not((req->detached_ != 0) && ((req->flags_ & SEND) != 0)) && ((req->flags_ & PREPARED) == 0)) {
692 if(status != MPI_STATUS_IGNORE) {
693 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
694 status->MPI_SOURCE = req->comm_->group()->rank(src);
695 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
696 status->MPI_ERROR = req->truncated_ != 0 ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
697 // this handles the case were size in receive differs from size in send
698 status->count = req->real_size_;
701 req->print_request("Finishing");
702 MPI_Datatype datatype = req->old_type_;
704 // FIXME Handle the case of a partial shared malloc.
705 if (((req->flags_ & ACCUMULATE) != 0) ||
706 (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
708 if (not smpi_process()->replaying() && smpi_privatize_global_variables != 0 &&
709 static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
710 static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
711 XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
712 smpi_switch_data_segment(simgrid::s4u::Actor::self());
715 if(datatype->flags() & DT_FLAG_DERIVED){
716 // This part handles the problem of non-contignous memory the unserialization at the reception
717 if((req->flags_ & RECV) && datatype->size()!=0)
718 datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
720 }else if(req->flags_ & RECV){//apply op on contiguous buffer for accumulate
721 if(datatype->size()!=0){
722 int n =req->real_size_/datatype->size();
723 req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
730 if (TRACE_smpi_view_internals() && ((req->flags_ & RECV) != 0)){
731 int rank = simgrid::s4u::this_actor::getPid();
732 int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
733 TRACE_smpi_recv(src_traced, rank,req->tag_);
735 if(req->detached_sender_ != nullptr){
736 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
738 simgrid::s4u::Actor::self()->getHost()->extension<simgrid::smpi::SmpiHost>()->orecv(req->real_size());
740 simcall_process_sleep(sleeptime);
741 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
743 unref(&(req->detached_sender_));
745 if(req->flags_ & PERSISTENT)
746 req->action_ = nullptr;
747 req->flags_ |= FINISHED;
751 void Request::wait(MPI_Request * request, MPI_Status * status)
753 (*request)->print_request("Waiting");
754 if ((*request)->flags_ & PREPARED) {
755 Status::empty(status);
759 if ((*request)->action_ != nullptr)
760 // this is not a detached send
761 simcall_comm_wait((*request)->action_, -1.0);
763 finish_wait(request,status);
764 if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & NON_PERSISTENT)!=0))
765 *request = MPI_REQUEST_NULL;
768 int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
770 s_xbt_dynar_t comms; // Keep it on stack to save some extra mallocs
771 int index = MPI_UNDEFINED;
775 // Wait for a request to complete
776 xbt_dynar_init(&comms, sizeof(smx_activity_t), [](void*ptr){
777 intrusive_ptr_release(*(simgrid::kernel::activity::ActivityImpl**)ptr);
779 int *map = xbt_new(int, count);
780 XBT_DEBUG("Wait for one of %d", count);
781 for(int i = 0; i < count; i++) {
782 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED) &&
783 not(requests[i]->flags_ & FINISHED)) {
784 if (requests[i]->action_ != nullptr) {
785 XBT_DEBUG("Waiting any %p ", requests[i]);
786 intrusive_ptr_add_ref(requests[i]->action_.get());
787 xbt_dynar_push_as(&comms, simgrid::kernel::activity::ActivityImpl*, requests[i]->action_.get());
791 // This is a finished detached request, let's return this one
792 size = 0; // so we free the dynar but don't do the waitany call
794 finish_wait(&requests[i], status); // cleanup if refcount = 0
795 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & NON_PERSISTENT))
796 requests[i] = MPI_REQUEST_NULL; // set to null
802 XBT_DEBUG("Enter waitany for %lu comms", xbt_dynar_length(&comms));
803 int i = simcall_comm_waitany(&comms, -1);
805 // not MPI_UNDEFINED, as this is a simix return code
808 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
809 if ((requests[index] == MPI_REQUEST_NULL) ||
810 (not((requests[index]->flags_ & ACCUMULATE) && (requests[index]->flags_ & RECV)))) {
811 finish_wait(&requests[index],status);
812 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & NON_PERSISTENT))
813 requests[index] = MPI_REQUEST_NULL;
818 xbt_dynar_free_data(&comms);
822 if (index==MPI_UNDEFINED)
823 Status::empty(status);
828 static int sort_accumulates(MPI_Request a, MPI_Request b)
830 return (a->tag() > b->tag());
833 int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
835 std::vector<MPI_Request> accumulates;
838 MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
839 int retvalue = MPI_SUCCESS;
840 //tag invalid requests in the set
841 if (status != MPI_STATUSES_IGNORE) {
842 for (int c = 0; c < count; c++) {
843 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst_ == MPI_PROC_NULL || (requests[c]->flags_ & PREPARED)) {
844 Status::empty(&status[c]);
845 } else if (requests[c]->src_ == MPI_PROC_NULL) {
846 Status::empty(&status[c]);
847 status[c].MPI_SOURCE = MPI_PROC_NULL;
851 for (int c = 0; c < count; c++) {
852 if (MC_is_active() || MC_record_replay_is_active()) {
853 wait(&requests[c],pstat);
856 index = waitany(count, (MPI_Request*)requests, pstat);
857 if (index == MPI_UNDEFINED)
860 if (requests[index] != MPI_REQUEST_NULL
861 && (requests[index]->flags_ & RECV)
862 && (requests[index]->flags_ & ACCUMULATE))
863 accumulates.push_back(requests[index]);
864 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & NON_PERSISTENT))
865 requests[index] = MPI_REQUEST_NULL;
867 if (status != MPI_STATUSES_IGNORE) {
868 status[index] = *pstat;
869 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
870 retvalue = MPI_ERR_IN_STATUS;
874 if (not accumulates.empty()) {
875 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
876 for (auto& req : accumulates) {
877 finish_wait(&req, status);
884 int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
888 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
890 for (int i = 0; i < incount; i++) {
891 int index = waitany(incount, requests, pstat);
892 if(index!=MPI_UNDEFINED){
893 indices[count] = index;
895 if(status != MPI_STATUSES_IGNORE) {
896 status[index] = *pstat;
898 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & NON_PERSISTENT))
899 requests[index] = MPI_REQUEST_NULL;
901 return MPI_UNDEFINED;
907 MPI_Request Request::f2c(int id) {
909 if(id==MPI_FORTRAN_REQUEST_NULL)
910 return static_cast<MPI_Request>(MPI_REQUEST_NULL);
911 return static_cast<MPI_Request>(F2C::f2c_lookup()->at(get_key_id(key, id)));
916 if (F2C::f2c_lookup() == nullptr) {
917 F2C::set_f2c_lookup(new std::unordered_map<std::string, F2C*>);
920 (*(F2C::f2c_lookup()))[get_key_id(key, F2C::f2c_id())] = this;
921 F2C::f2c_id_increment();
922 return F2C::f2c_id()-1;
925 void Request::free_f(int id)
927 if (id != MPI_FORTRAN_REQUEST_NULL) {
929 F2C::f2c_lookup()->erase(get_key_id(key, id));