1 /* Copyright (c) 2007-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include <xbt/config.hpp>
10 #include "xbt/virtu.h"
12 #include "src/mc/mc_replay.h"
13 #include "xbt/replay.h"
15 #include "src/simix/smx_private.h"
16 #include "surf/surf.h"
17 #include "simgrid/sg_config.h"
18 #include "colls/colls.h"
20 #include "src/simix/SynchroComm.hpp"
22 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
24 static int match_recv(void* a, void* b, smx_synchro_t ignored) {
25 MPI_Request ref = static_cast<MPI_Request>(a);
26 MPI_Request req = static_cast<MPI_Request>(b);
27 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
29 xbt_assert(ref, "Cannot match recv against null reference");
30 xbt_assert(req, "Cannot match recv against null request");
31 if((ref->src == MPI_ANY_SOURCE || req->src == ref->src)
32 && ((ref->tag == MPI_ANY_TAG && req->tag >=0) || req->tag == ref->tag)){
33 //we match, we can transfer some values
34 if(ref->src == MPI_ANY_SOURCE)
35 ref->real_src = req->src;
36 if(ref->tag == MPI_ANY_TAG)
37 ref->real_tag = req->tag;
38 if(ref->real_size < req->real_size)
41 ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
42 XBT_DEBUG("match succeeded");
47 static int match_send(void* a, void* b,smx_synchro_t ignored) {
48 MPI_Request ref = static_cast<MPI_Request>(a);
49 MPI_Request req = static_cast<MPI_Request>(b);
50 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
51 xbt_assert(ref, "Cannot match send against null reference");
52 xbt_assert(req, "Cannot match send against null request");
54 if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
55 && ((req->tag == MPI_ANY_TAG && ref->tag >=0)|| req->tag == ref->tag))
57 if(req->src == MPI_ANY_SOURCE)
58 req->real_src = ref->src;
59 if(req->tag == MPI_ANY_TAG)
60 req->real_tag = ref->tag;
61 if(req->real_size < ref->real_size)
64 req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
65 XBT_DEBUG("match succeeded");
70 // Methods used to parse and store the values for timing injections in smpi
71 // These are taken from surf/network.c and generalized to have more values for each factor
72 typedef struct s_smpi_factor_multival *smpi_os_factor_multival_t;
73 typedef struct s_smpi_factor_multival { // FIXME: this should be merged (deduplicated) with s_smpi_factor defined in network_smpi.c
76 double values[4];//arbitrary set to 4
77 } s_smpi_factor_multival_t;
79 xbt_dynar_t smpi_os_values = nullptr;
80 xbt_dynar_t smpi_or_values = nullptr;
81 xbt_dynar_t smpi_ois_values = nullptr;
83 static simgrid::config::Flag<double> smpi_wtime_sleep(
84 "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0);
85 static simgrid::config::Flag<double> smpi_init_sleep(
86 "smpi/init", "Time to inject inside a call to MPI_Init", 0.0);
87 static simgrid::config::Flag<double> smpi_iprobe_sleep(
88 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
89 static simgrid::config::Flag<double> smpi_test_sleep(
90 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
92 static int factor_cmp(const void *pa, const void *pb)
94 return ((static_cast<const s_smpi_factor_multival_t*>(pa))->factor > (static_cast<const s_smpi_factor_multival_t*>(pb))->factor) ? 1 :
95 ((static_cast<const s_smpi_factor_multival_t*>(pa))->factor < (static_cast<const s_smpi_factor_multival_t*>(pb))->factor) ? -1 : 0;
98 static xbt_dynar_t parse_factor(const char *smpi_coef_string)
101 unsigned int iter = 0;
102 s_smpi_factor_multival_t fact;
105 xbt_dynar_t radical_elements2 = NULL;
107 xbt_dynar_t smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_multival_t), NULL);
108 xbt_dynar_t radical_elements = xbt_str_split(smpi_coef_string, ";");
109 xbt_dynar_foreach(radical_elements, iter, value) {
110 memset(&fact, 0, sizeof(s_smpi_factor_multival_t));
111 radical_elements2 = xbt_str_split(value, ":");
112 if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5)
113 xbt_die("Malformed radical for smpi factor: '%s'", smpi_coef_string);
114 for(i =0; i<xbt_dynar_length(radical_elements2);i++ ){
117 errmsg = bprintf("Invalid factor in chunk #%d: %%s", iter+1);
118 fact.factor = xbt_str_parse_int(xbt_dynar_get_as(radical_elements2, i, char *), errmsg);
120 errmsg = bprintf("Invalid factor value %d in chunk #%d: %%s", i, iter+1);
121 fact.values[fact.nb_values] = xbt_str_parse_double(xbt_dynar_get_as(radical_elements2, i, char *), errmsg);
127 xbt_dynar_push_as(smpi_factor, s_smpi_factor_multival_t, fact);
128 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
129 xbt_dynar_free(&radical_elements2);
131 xbt_dynar_free(&radical_elements);
132 xbt_dynar_sort(smpi_factor, &factor_cmp);
133 xbt_dynar_foreach(smpi_factor, iter, fact) {
134 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
139 static double smpi_os(double size)
141 if (smpi_os_values == nullptr) {
142 smpi_os_values = parse_factor(xbt_cfg_get_string("smpi/os"));
143 smpi_register_static(smpi_os_values, xbt_dynar_free_voidp);
145 unsigned int iter = 0;
146 s_smpi_factor_multival_t fact;
148 // Iterate over all the sections that were specified and find the right
149 // value. (fact.factor represents the interval sizes; we want to find the
150 // section that has fact.factor <= size and no other such fact.factor <= size)
151 // Note: parse_factor() (used before) already sorts the dynar we iterate over!
152 xbt_dynar_foreach(smpi_os_values, iter, fact) {
153 if (size <= fact.factor) { // Values already too large, use the previously
154 // computed value of current!
155 XBT_DEBUG("os : %f <= %ld return %f", size, fact.factor, current);
158 // If the next section is too large, the current section must be used.
159 // Hence, save the cost, as we might have to use it.
160 current = fact.values[0]+fact.values[1]*size;
163 XBT_DEBUG("os : %f > %ld return %f", size, fact.factor, current);
168 static double smpi_ois(double size)
170 if (smpi_ois_values == nullptr) {
171 smpi_ois_values = parse_factor(xbt_cfg_get_string("smpi/ois"));
172 smpi_register_static(smpi_ois_values, xbt_dynar_free_voidp);
174 unsigned int iter = 0;
175 s_smpi_factor_multival_t fact;
177 // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
178 // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
179 // Note: parse_factor() (used before) already sorts the dynar we iterate over!
180 xbt_dynar_foreach(smpi_ois_values, iter, fact) {
181 if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
182 XBT_DEBUG("ois : %f <= %ld return %f", size, fact.factor, current);
185 // If the next section is too large, the current section must be used.
186 // Hence, save the cost, as we might have to use it.
187 current = fact.values[0]+fact.values[1]*size;
190 XBT_DEBUG("ois : %f > %ld return %f", size, fact.factor, current);
195 static double smpi_or(double size)
197 if (smpi_or_values == nullptr) {
198 smpi_or_values = parse_factor(xbt_cfg_get_string("smpi/or"));
199 smpi_register_static(smpi_or_values, xbt_dynar_free_voidp);
201 unsigned int iter = 0;
202 s_smpi_factor_multival_t fact;
204 // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
205 // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
206 // Note: parse_factor() (used before) already sorts the dynar we iterate over!
207 xbt_dynar_foreach(smpi_or_values, iter, fact) {
208 if (size <= fact.factor) { // Values already too large, use the previously
209 // computed value of current!
210 XBT_DEBUG("or : %f <= %ld return %f", size, fact.factor, current);
213 // If the next section is too large, the current section must be used.
214 // Hence, save the cost, as we might have to use it.
215 current=fact.values[0]+fact.values[1]*size;
218 XBT_DEBUG("or : %f > %ld return %f", size, fact.factor, current);
223 void smpi_mpi_init() {
224 if(smpi_init_sleep > 0)
225 simcall_process_sleep(smpi_init_sleep);
228 double smpi_mpi_wtime(){
230 if (smpi_process_initialized() != 0 &&
231 smpi_process_finalized() == 0 &&
232 smpi_process_get_sampling() == 0) {
234 time = SIMIX_get_clock();
235 // to avoid deadlocks if used as a break condition, such as
236 // while (MPI_Wtime(...) < time_limit) {
239 // because the time will not normally advance when only calls to MPI_Wtime
240 // are made -> deadlock (MPI_Wtime never reaches the time limit)
241 if(smpi_wtime_sleep > 0)
242 simcall_process_sleep(smpi_wtime_sleep);
245 time = SIMIX_get_clock();
250 static MPI_Request build_request(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
253 MPI_Request request = NULL;
255 void *old_buf = NULL;
257 request = xbt_new(s_smpi_mpi_request_t, 1);
259 s_smpi_subtype_t *subtype = static_cast<s_smpi_subtype_t*>(datatype->substruct);
261 if((((flags & RECV) != 0) && ((flags & ACCUMULATE) !=0)) || (datatype->sizeof_substruct != 0)){
262 // This part handles the problem of non-contiguous memory
264 buf = count==0 ? NULL : xbt_malloc(count*smpi_datatype_size(datatype));
265 if ((datatype->sizeof_substruct != 0) && ((flags & SEND) != 0)) {
266 subtype->serialize(old_buf, buf, count, datatype->substruct);
271 // This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
272 request->old_buf = old_buf;
273 request->old_type = datatype;
275 request->size = smpi_datatype_size(datatype) * count;
276 smpi_datatype_use(datatype);
280 request->comm = comm;
281 smpi_comm_use(request->comm);
282 request->action = nullptr;
283 request->flags = flags;
284 request->detached = 0;
285 request->detached_sender = nullptr;
286 request->real_src = 0;
287 request->truncated = 0;
288 request->real_size = 0;
289 request->real_tag = 0;
290 if (flags & PERSISTENT)
291 request->refcount = 1;
293 request->refcount = 0;
294 request->op = MPI_REPLACE;
301 void smpi_empty_status(MPI_Status * status)
303 if(status != MPI_STATUS_IGNORE) {
304 status->MPI_SOURCE = MPI_ANY_SOURCE;
305 status->MPI_TAG = MPI_ANY_TAG;
306 status->MPI_ERROR = MPI_SUCCESS;
311 static void smpi_mpi_request_free_voidp(void* request)
313 MPI_Request req = static_cast<MPI_Request>(request);
314 smpi_mpi_request_free(&req);
317 /* MPI Low level calls */
318 MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
319 int dst, int tag, MPI_Comm comm)
321 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
322 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(),
323 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, PERSISTENT | SEND | PREPARED);
327 MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype,
328 int dst, int tag, MPI_Comm comm)
330 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
331 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(),
332 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
336 MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
337 int src, int tag, MPI_Comm comm)
339 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
340 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype,
341 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src),
342 smpi_process_index(), tag, comm, PERSISTENT | RECV | PREPARED);
346 void smpi_mpi_start(MPI_Request request)
348 smx_mailbox_t mailbox;
350 xbt_assert(request->action == nullptr, "Cannot (re-)start unfinished communication");
351 request->flags &= ~PREPARED;
352 request->flags &= ~FINISHED;
355 if ((request->flags & RECV) != 0) {
356 print_request("New recv", request);
358 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
360 xbt_mutex_t mut = smpi_process_mailboxes_mutex();
361 if (async_small_thresh != 0 || (request->flags & RMA) != 0)
362 xbt_mutex_acquire(mut);
364 if (async_small_thresh == 0 && (request->flags & RMA) == 0 ) {
365 mailbox = smpi_process_mailbox();
367 else if (((request->flags & RMA) != 0) || static_cast<int>(request->size) < async_small_thresh) {
368 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
369 //begin with the more appropriate one : the small one.
370 mailbox = smpi_process_mailbox_small();
371 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
372 smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
374 if (action == nullptr) {
375 mailbox = smpi_process_mailbox();
376 XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
377 action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
378 if (action == nullptr) {
379 XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
380 mailbox = smpi_process_mailbox_small();
384 XBT_DEBUG("yes there was something for us in the large mailbox");
388 mailbox = smpi_process_mailbox_small();
389 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
390 smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
392 if (action == nullptr) {
393 XBT_DEBUG("No, nothing in the permanent receive mailbox");
394 mailbox = smpi_process_mailbox();
397 XBT_DEBUG("yes there was something for us in the small mailbox");
401 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
402 request->real_size=request->size;
403 request->action = simcall_comm_irecv(SIMIX_process_self(), mailbox, request->buf, &request->real_size, &match_recv,
404 ! smpi_process_get_replaying()? &smpi_comm_copy_buffer_callback
405 : &smpi_comm_null_copy_buffer_callback, request, -1.0);
406 XBT_DEBUG("recv simcall posted");
408 if (async_small_thresh != 0 || (request->flags & RMA) != 0 )
409 xbt_mutex_release(mut);
411 else { /* the RECV flag was not set, so this is a send */
412 int receiver = request->dst;
414 int rank = request->src;
415 if (TRACE_smpi_view_internals()) {
416 TRACE_smpi_send(rank, rank, receiver,request->size);
418 print_request("New send", request);
420 void* buf = request->buf;
421 if ( (request->flags & SSEND) == 0
422 && ( (request->flags & RMA) != 0 || static_cast<int>(request->size) < xbt_cfg_get_int("smpi/send-is-detached-thresh") ) ) {
424 request->detached = 1;
425 XBT_DEBUG("Send request %p is detached", request);
427 if(request->old_type->sizeof_substruct == 0){
428 oldbuf = request->buf;
429 if (!smpi_process_get_replaying() && oldbuf != NULL && request->size!=0){
430 if((smpi_privatize_global_variables != 0)
431 && (static_cast<char*>(request->buf) >= smpi_start_data_exe)
432 && (static_cast<char*>(request->buf) < smpi_start_data_exe + smpi_size_data_exe )){
433 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
434 smpi_switch_data_segment(request->src);
436 buf = xbt_malloc(request->size);
437 memcpy(buf,oldbuf,request->size);
438 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
443 //if we are giving back the control to the user without waiting for completion, we have to inject timings
444 double sleeptime = 0.0;
445 if(request->detached != 0 || ((request->flags & (ISEND|SSEND)) != 0)){// issend should be treated as isend
446 //isend and send timings may be different
447 sleeptime = ((request->flags & ISEND) != 0) ? smpi_ois(request->size) : smpi_os(request->size);
451 simcall_process_sleep(sleeptime);
452 XBT_DEBUG("sending size of %zu : sleep %f ", request->size, sleeptime);
455 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
457 xbt_mutex_t mut=smpi_process_remote_mailboxes_mutex(receiver);
459 if (async_small_thresh != 0 || (request->flags & RMA) != 0)
460 xbt_mutex_acquire(mut);
462 if (!(async_small_thresh != 0 || (request->flags & RMA) !=0)) {
463 mailbox = smpi_process_remote_mailbox(receiver);
465 else if (((request->flags & RMA) != 0) || static_cast<int>(request->size) < async_small_thresh) { // eager mode
466 mailbox = smpi_process_remote_mailbox(receiver);
467 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
468 smx_synchro_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast<void*>(request));
469 if (action == nullptr) {
470 if ((request->flags & SSEND) == 0){
471 mailbox = smpi_process_remote_mailbox_small(receiver);
472 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
475 mailbox = smpi_process_remote_mailbox_small(receiver);
476 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
477 action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast<void*>(request));
478 if (action == nullptr) {
479 XBT_DEBUG("No, we are first, send to large mailbox");
480 mailbox = smpi_process_remote_mailbox(receiver);
485 XBT_DEBUG("Yes there was something for us in the large mailbox");
489 mailbox = smpi_process_remote_mailbox(receiver);
490 XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, request,request->buf);
493 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
494 request->real_size=request->size;
495 request->action = simcall_comm_isend(SIMIX_process_from_PID(request->src+1), mailbox, request->size, -1.0,
496 buf, request->real_size, &match_send,
497 &xbt_free_f, // how to free the userdata if a detached send fails
498 !smpi_process_get_replaying() ? &smpi_comm_copy_buffer_callback
499 : &smpi_comm_null_copy_buffer_callback, request,
500 // detach if msg size < eager/rdv switch limit
502 XBT_DEBUG("send simcall posted");
504 /* FIXME: detached sends are not traceable (request->action == nullptr) */
505 if (request->action != nullptr)
506 simcall_set_category(request->action, TRACE_internal_smpi_get_category());
508 if (async_small_thresh != 0 || ((request->flags & RMA)!=0))
509 xbt_mutex_release(mut);
513 void smpi_mpi_startall(int count, MPI_Request * requests)
515 if(requests== nullptr)
518 for(int i = 0; i < count; i++) {
519 smpi_mpi_start(requests[i]);
523 void smpi_mpi_request_free(MPI_Request * request)
525 if((*request) != MPI_REQUEST_NULL){
526 (*request)->refcount--;
527 if((*request)->refcount<0) xbt_die("wrong refcount");
529 if((*request)->refcount==0){
530 smpi_datatype_unuse((*request)->old_type);
531 smpi_comm_unuse((*request)->comm);
532 print_request("Destroying", (*request));
534 *request = MPI_REQUEST_NULL;
536 print_request("Decrementing", (*request));
539 xbt_die("freeing an already free request");
543 MPI_Request smpi_rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
546 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
548 request = build_request(buf==MPI_BOTTOM ? NULL : buf , count, datatype, src, dst, tag,
549 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
551 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, src, dst, tag,
552 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
558 MPI_Request smpi_rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
561 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
563 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, src, dst, tag,
564 comm, RMA | NON_PERSISTENT | RECV | PREPARED);
566 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, src, dst, tag,
567 comm, RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
573 MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
575 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
576 request = build_request(buf==MPI_BOTTOM ? NULL : buf , count, datatype, smpi_process_index(),
577 smpi_group_index(smpi_comm_group(comm), dst), tag,comm, PERSISTENT | ISEND | SEND | PREPARED);
581 MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
583 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
584 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(),
585 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | ISEND | SEND);
586 smpi_mpi_start(request);
590 MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
592 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
593 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(),
594 smpi_group_index(smpi_comm_group(comm), dst), tag,comm, NON_PERSISTENT | ISEND | SSEND | SEND);
595 smpi_mpi_start(request);
599 MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
601 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
602 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
603 smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
604 comm, PERSISTENT | RECV | PREPARED);
608 MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
610 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
611 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
612 smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag, comm,
613 NON_PERSISTENT | RECV);
614 smpi_mpi_start(request);
618 void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
620 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
621 request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
622 smpi_mpi_wait(&request, status);
626 void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
628 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
629 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(),
630 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | SEND);
632 smpi_mpi_start(request);
633 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
637 void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
639 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
640 request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(),
641 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | SSEND | SEND);
643 smpi_mpi_start(request);
644 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
648 void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
649 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
650 MPI_Comm comm, MPI_Status * status)
652 MPI_Request requests[2];
654 int myid=smpi_process_index();
655 if ((smpi_group_index(smpi_comm_group(comm), dst) == myid) && (smpi_group_index(smpi_comm_group(comm), src) == myid)){
656 smpi_datatype_copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
659 requests[0] = smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
660 requests[1] = smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
661 smpi_mpi_startall(2, requests);
662 smpi_mpi_waitall(2, requests, stats);
663 smpi_mpi_request_free(&requests[0]);
664 smpi_mpi_request_free(&requests[1]);
665 if(status != MPI_STATUS_IGNORE) {
666 // Copy receive status
671 int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
673 return status->count / smpi_datatype_size(datatype);
676 static void finish_wait(MPI_Request * request, MPI_Status * status)
678 MPI_Request req = *request;
679 smpi_empty_status(status);
681 if(!((req->detached != 0) && ((req->flags & SEND) != 0)) && ((req->flags & PREPARED) == 0)){
682 if(status != MPI_STATUS_IGNORE) {
683 int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
684 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src);
685 status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
686 status->MPI_ERROR = req->truncated != 0 ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
687 // this handles the case were size in receive differs from size in send
688 status->count = req->real_size;
691 print_request("Finishing", req);
692 MPI_Datatype datatype = req->old_type;
694 if(((req->flags & ACCUMULATE) != 0) || (datatype->sizeof_substruct != 0)){
695 if (!smpi_process_get_replaying()){
696 if( smpi_privatize_global_variables != 0 && (static_cast<char*>(req->old_buf) >= smpi_start_data_exe)
697 && ((char*)req->old_buf < smpi_start_data_exe + smpi_size_data_exe )){
698 XBT_VERB("Privatization : We are unserializing to a zone in global memory - Switch data segment ");
699 smpi_switch_data_segment(smpi_process_index());
703 if(datatype->sizeof_substruct != 0){
704 // This part handles the problem of non-contignous memory the unserialization at the reception
705 s_smpi_subtype_t *subtype = static_cast<s_smpi_subtype_t*>(datatype->substruct);
706 if(req->flags & RECV)
707 subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) ,
708 datatype->substruct, req->op);
710 }else if(req->flags & RECV){//apply op on contiguous buffer for accumulate
711 int n =req->real_size/smpi_datatype_size(datatype);
712 smpi_op_apply(req->op, req->buf, req->old_buf, &n, &datatype);
718 if (TRACE_smpi_view_internals() && ((req->flags & RECV) != 0)){
719 int rank = smpi_process_index();
720 int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
721 TRACE_smpi_recv(rank, src_traced, rank);
724 if(req->detached_sender!=NULL){
726 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
727 double sleeptime = smpi_or(req->real_size);
729 simcall_process_sleep(sleeptime);
730 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size, sleeptime);
733 smpi_mpi_request_free(&(req->detached_sender));
735 if(req->flags & PERSISTENT)
737 req->flags |= FINISHED;
739 smpi_mpi_request_free(request);
742 int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
743 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
745 // to avoid deadlocks if used as a break condition, such as
746 // while (MPI_Test(request, flag, status) && flag) {
748 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
749 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
750 static int nsleeps = 1;
751 if(smpi_test_sleep > 0)
752 simcall_process_sleep(nsleeps*smpi_test_sleep);
754 smpi_empty_status(status);
756 if (((*request)->flags & PREPARED) == 0) {
757 if ((*request)->action != NULL)
758 flag = simcall_comm_test((*request)->action);
760 finish_wait(request, status);
761 nsleeps=1;//reset the number of sleeps we will do next time
762 if (*request != MPI_REQUEST_NULL && ((*request)->flags & PERSISTENT)==0)
763 *request = MPI_REQUEST_NULL;
771 int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
779 *index = MPI_UNDEFINED;
780 comms = xbt_dynar_new(sizeof(smx_synchro_t), NULL);
781 map = xbt_new(int, count);
782 for(i = 0; i < count; i++) {
783 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action && !(requests[i]->flags & PREPARED)) {
784 xbt_dynar_push(comms, &requests[i]->action);
790 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
791 static int nsleeps = 1;
792 if(smpi_test_sleep > 0)
793 simcall_process_sleep(nsleeps*smpi_test_sleep);
795 i = simcall_comm_testany(comms);
796 // not MPI_UNDEFINED, as this is a simix return code
799 finish_wait(&requests[*index], status);
800 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT))
801 requests[*index] = MPI_REQUEST_NULL;
808 //all requests are null or inactive, return true
810 smpi_empty_status(status);
813 xbt_dynar_free(&comms);
818 int smpi_mpi_testall(int count, MPI_Request requests[], MPI_Status status[])
821 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
824 for(i=0; i<count; i++){
825 if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED)) {
826 if (smpi_mpi_test(&requests[i], pstat)!=1){
829 requests[i]=MPI_REQUEST_NULL;
832 smpi_empty_status(pstat);
834 if(status != MPI_STATUSES_IGNORE) {
841 void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
843 //FIXME find another way to avoid busy waiting ?
844 // the issue here is that we have to wait on a nonexistent comm
846 smpi_mpi_iprobe(source, tag, comm, &flag, status);
847 XBT_DEBUG("Busy Waiting on probing : %d", flag);
851 void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
853 MPI_Request request = build_request(NULL, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
854 smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag, comm, PERSISTENT | RECV);
856 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
857 // (especially when used as a break condition, such as while(MPI_Iprobe(...)) ... )
858 // multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
859 static int nsleeps = 1;
860 if(smpi_iprobe_sleep > 0)
861 simcall_process_sleep(nsleeps*smpi_iprobe_sleep);
862 // behave like a receive, but don't do it
863 smx_mailbox_t mailbox;
865 print_request("New iprobe", request);
866 // We have to test both mailboxes as we don't know if we will receive one one or another
867 if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){
868 mailbox = smpi_process_mailbox_small();
869 XBT_DEBUG("Trying to probe the perm recv mailbox");
870 request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv, static_cast<void*>(request));
873 if (request->action == nullptr){
874 mailbox = smpi_process_mailbox();
875 XBT_DEBUG("trying to probe the other mailbox");
876 request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
879 if (request->action != nullptr){
880 simgrid::simix::Comm *sync_comm = static_cast<simgrid::simix::Comm*>(request->action);
881 MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
883 if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED) == 0) {
884 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
885 status->MPI_TAG = req->tag;
886 status->MPI_ERROR = MPI_SUCCESS;
887 status->count = req->real_size;
889 nsleeps = 1;//reset the number of sleeps we will do next time
895 smpi_mpi_request_free(&request);
900 void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
902 print_request("Waiting", *request);
903 if ((*request)->flags & PREPARED) {
904 smpi_empty_status(status);
908 if ((*request)->action != NULL)
909 // this is not a detached send
910 simcall_comm_wait((*request)->action, -1.0);
912 finish_wait(request, status);
913 if (*request != MPI_REQUEST_NULL && (((*request)->flags & NON_PERSISTENT)!=0))
914 *request = MPI_REQUEST_NULL;
917 int smpi_mpi_waitany(int count, MPI_Request requests[], MPI_Status * status)
922 int index = MPI_UNDEFINED;
926 // Wait for a request to complete
927 comms = xbt_dynar_new(sizeof(smx_synchro_t), NULL);
928 map = xbt_new(int, count);
929 XBT_DEBUG("Wait for one of %d", count);
930 for(i = 0; i < count; i++) {
931 if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED) && !(requests[i]->flags & FINISHED)) {
932 if (requests[i]->action != NULL) {
933 XBT_DEBUG("Waiting any %p ", requests[i]);
934 xbt_dynar_push(comms, &requests[i]->action);
938 //This is a finished detached request, let's return this one
939 size=0;//so we free the dynar but don't do the waitany call
941 finish_wait(&requests[i], status);//cleanup if refcount = 0
942 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags & NON_PERSISTENT))
943 requests[i]=MPI_REQUEST_NULL;//set to null
949 i = simcall_comm_waitany(comms);
951 // not MPI_UNDEFINED, as this is a simix return code
954 finish_wait(&requests[index], status);
955 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags & NON_PERSISTENT))
956 requests[index] = MPI_REQUEST_NULL;
960 xbt_dynar_free(&comms);
963 if (index==MPI_UNDEFINED)
964 smpi_empty_status(status);
969 int smpi_mpi_waitall(int count, MPI_Request requests[], MPI_Status status[])
973 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
974 int retvalue = MPI_SUCCESS;
975 //tag invalid requests in the set
976 if (status != MPI_STATUSES_IGNORE) {
977 for (c = 0; c < count; c++) {
978 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL || (requests[c]->flags & PREPARED)) {
979 smpi_empty_status(&status[c]);
980 } else if (requests[c]->src == MPI_PROC_NULL) {
981 smpi_empty_status(&status[c]);
982 status[c].MPI_SOURCE = MPI_PROC_NULL;
986 for(c = 0; c < count; c++) {
988 if (MC_is_active() || MC_record_replay_is_active()) {
989 smpi_mpi_wait(&requests[c], pstat);
992 index = smpi_mpi_waitany(count, requests, pstat);
993 if (index == MPI_UNDEFINED)
995 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags & NON_PERSISTENT))
996 requests[index]=MPI_REQUEST_NULL;
998 if (status != MPI_STATUSES_IGNORE) {
999 status[index] = *pstat;
1000 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
1001 retvalue = MPI_ERR_IN_STATUS;
1008 int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
1014 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1016 for(i = 0; i < incount; i++)
1018 index=smpi_mpi_waitany(incount, requests, pstat);
1019 if(index!=MPI_UNDEFINED){
1020 indices[count] = index;
1022 if(status != MPI_STATUSES_IGNORE) {
1023 status[index] = *pstat;
1025 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags & NON_PERSISTENT))
1026 requests[index]=MPI_REQUEST_NULL;
1028 return MPI_UNDEFINED;
1034 int smpi_mpi_testsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
1040 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1042 for(i = 0; i < incount; i++) {
1043 if((requests[i] != MPI_REQUEST_NULL)) {
1044 if(smpi_mpi_test(&requests[i], pstat)) {
1047 if(status != MPI_STATUSES_IGNORE) {
1050 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->flags & NON_PERSISTENT)
1051 requests[i]=MPI_REQUEST_NULL;
1057 if(count_dead==incount)
1058 return MPI_UNDEFINED;
1062 void smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
1064 smpi_coll_tuned_bcast_binomial_tree(buf, count, datatype, root, comm);
1067 void smpi_mpi_barrier(MPI_Comm comm)
1069 smpi_coll_tuned_barrier_ompi_basic_linear(comm);
1072 void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1073 void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
1075 int system_tag = COLL_TAG_GATHER;
1076 int rank, size, src, index;
1077 MPI_Aint lb = 0, recvext = 0;
1078 MPI_Request *requests;
1080 rank = smpi_comm_rank(comm);
1081 size = smpi_comm_size(comm);
1083 // Send buffer to root
1084 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
1086 smpi_datatype_extent(recvtype, &lb, &recvext);
1087 // Local copy from root
1088 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + root * recvcount * recvext, recvcount, recvtype);
1089 // Receive buffers from senders
1090 requests = xbt_new(MPI_Request, size - 1);
1092 for(src = 0; src < size; src++) {
1094 requests[index] = smpi_irecv_init(static_cast<char*>(recvbuf) + src * recvcount * recvext, recvcount, recvtype,
1095 src, system_tag, comm);
1099 // Wait for completion of irecv's.
1100 smpi_mpi_startall(size - 1, requests);
1101 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1102 for(src = 0; src < size-1; src++) {
1103 smpi_mpi_request_free(&requests[src]);
1109 void smpi_mpi_reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op,
1114 int rank = smpi_comm_rank(comm);
1117 /* arbitrarily choose root as rank 0 */
1118 size = smpi_comm_size(comm);
1120 displs = xbt_new(int, size);
1121 for (i = 0; i < size; i++) {
1123 count += recvcounts[i];
1125 tmpbuf=static_cast<void*>(smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype)));
1127 mpi_coll_reduce_fun(sendbuf, tmpbuf, count, datatype, op, 0, comm);
1128 smpi_mpi_scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, 0, comm);
1130 smpi_free_tmp_buffer(tmpbuf);
1133 void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs,
1134 MPI_Datatype recvtype, int root, MPI_Comm comm)
1136 int system_tag = COLL_TAG_GATHERV;
1137 int rank, size, src, index;
1138 MPI_Aint lb = 0, recvext = 0;
1139 MPI_Request *requests;
1141 rank = smpi_comm_rank(comm);
1142 size = smpi_comm_size(comm);
1144 // Send buffer to root
1145 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
1147 smpi_datatype_extent(recvtype, &lb, &recvext);
1148 // Local copy from root
1149 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + displs[root] * recvext,
1150 recvcounts[root], recvtype);
1151 // Receive buffers from senders
1152 requests = xbt_new(MPI_Request, size - 1);
1154 for(src = 0; src < size; src++) {
1156 requests[index] = smpi_irecv_init(static_cast<char*>(recvbuf) + displs[src] * recvext,
1157 recvcounts[src], recvtype, src, system_tag, comm);
1161 // Wait for completion of irecv's.
1162 smpi_mpi_startall(size - 1, requests);
1163 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1164 for(src = 0; src < size-1; src++) {
1165 smpi_mpi_request_free(&requests[src]);
1171 void smpi_mpi_allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1172 void *recvbuf,int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
1174 int system_tag = COLL_TAG_ALLGATHER;
1175 int rank, size, other, index;
1176 MPI_Aint lb = 0, recvext = 0;
1177 MPI_Request *requests;
1179 rank = smpi_comm_rank(comm);
1180 size = smpi_comm_size(comm);
1181 // FIXME: check for errors
1182 smpi_datatype_extent(recvtype, &lb, &recvext);
1183 // Local copy from self
1184 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount, recvtype);
1185 // Send/Recv buffers to/from others;
1186 requests = xbt_new(MPI_Request, 2 * (size - 1));
1188 for(other = 0; other < size; other++) {
1190 requests[index] = smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,comm);
1192 requests[index] = smpi_irecv_init(static_cast<char *>(recvbuf) + other * recvcount * recvext, recvcount, recvtype, other,
1197 // Wait for completion of all comms.
1198 smpi_mpi_startall(2 * (size - 1), requests);
1199 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1200 for(other = 0; other < 2*(size-1); other++) {
1201 smpi_mpi_request_free(&requests[other]);
1206 void smpi_mpi_allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
1207 int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm)
1209 int system_tag = COLL_TAG_ALLGATHERV;
1210 int rank, size, other, index;
1211 MPI_Aint lb = 0, recvext = 0;
1212 MPI_Request *requests;
1214 rank = smpi_comm_rank(comm);
1215 size = smpi_comm_size(comm);
1216 smpi_datatype_extent(recvtype, &lb, &recvext);
1217 // Local copy from self
1218 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char *>(recvbuf) + displs[rank] * recvext,recvcounts[rank], recvtype);
1219 // Send buffers to others;
1220 requests = xbt_new(MPI_Request, 2 * (size - 1));
1222 for(other = 0; other < size; other++) {
1225 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag, comm);
1227 requests[index] = smpi_irecv_init(static_cast<char *>(recvbuf) + displs[other] * recvext, recvcounts[other],
1228 recvtype, other, system_tag, comm);
1232 // Wait for completion of all comms.
1233 smpi_mpi_startall(2 * (size - 1), requests);
1234 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1235 for(other = 0; other < 2*(size-1); other++) {
1236 smpi_mpi_request_free(&requests[other]);
1241 void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1242 void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
1244 int system_tag = COLL_TAG_SCATTER;
1245 int rank, size, dst, index;
1246 MPI_Aint lb = 0, sendext = 0;
1247 MPI_Request *requests;
1249 rank = smpi_comm_rank(comm);
1250 size = smpi_comm_size(comm);
1252 // Recv buffer from root
1253 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
1255 smpi_datatype_extent(sendtype, &lb, &sendext);
1256 // Local copy from root
1257 if(recvbuf!=MPI_IN_PLACE){
1258 smpi_datatype_copy(static_cast<char *>(sendbuf) + root * sendcount * sendext,
1259 sendcount, sendtype, recvbuf, recvcount, recvtype);
1261 // Send buffers to receivers
1262 requests = xbt_new(MPI_Request, size - 1);
1264 for(dst = 0; dst < size; dst++) {
1266 requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + dst * sendcount * sendext, sendcount, sendtype, dst,
1271 // Wait for completion of isend's.
1272 smpi_mpi_startall(size - 1, requests);
1273 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1274 for(dst = 0; dst < size-1; dst++) {
1275 smpi_mpi_request_free(&requests[dst]);
1281 void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcount,
1282 MPI_Datatype recvtype, int root, MPI_Comm comm)
1284 int system_tag = COLL_TAG_SCATTERV;
1285 int rank, size, dst, index;
1286 MPI_Aint lb = 0, sendext = 0;
1287 MPI_Request *requests;
1289 rank = smpi_comm_rank(comm);
1290 size = smpi_comm_size(comm);
1292 // Recv buffer from root
1293 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
1295 smpi_datatype_extent(sendtype, &lb, &sendext);
1296 // Local copy from root
1297 if(recvbuf!=MPI_IN_PLACE){
1298 smpi_datatype_copy(static_cast<char *>(sendbuf) + displs[root] * sendext, sendcounts[root],
1299 sendtype, recvbuf, recvcount, recvtype);
1301 // Send buffers to receivers
1302 requests = xbt_new(MPI_Request, size - 1);
1304 for(dst = 0; dst < size; dst++) {
1306 requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + displs[dst] * sendext, sendcounts[dst],
1307 sendtype, dst, system_tag, comm);
1311 // Wait for completion of isend's.
1312 smpi_mpi_startall(size - 1, requests);
1313 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1314 for(dst = 0; dst < size-1; dst++) {
1315 smpi_mpi_request_free(&requests[dst]);
1321 void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root,
1324 int system_tag = COLL_TAG_REDUCE;
1325 int rank, size, src, index;
1326 MPI_Aint lb = 0, dataext = 0;
1327 MPI_Request *requests;
1330 char* sendtmpbuf = static_cast<char *>(sendbuf);
1333 rank = smpi_comm_rank(comm);
1334 size = smpi_comm_size(comm);
1335 //non commutative case, use a working algo from openmpi
1336 if(!smpi_op_is_commute(op)){
1337 smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count, datatype, op, root, comm);
1341 if( sendbuf == MPI_IN_PLACE ) {
1342 sendtmpbuf = static_cast<char *>(smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype)));
1343 smpi_datatype_copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
1347 // Send buffer to root
1348 smpi_mpi_send(sendtmpbuf, count, datatype, root, system_tag, comm);
1350 smpi_datatype_extent(datatype, &lb, &dataext);
1351 // Local copy from root
1352 if (sendtmpbuf != NULL && recvbuf != NULL)
1353 smpi_datatype_copy(sendtmpbuf, count, datatype, recvbuf, count, datatype);
1354 // Receive buffers from senders
1355 requests = xbt_new(MPI_Request, size - 1);
1356 tmpbufs = xbt_new(void *, size - 1);
1358 for(src = 0; src < size; src++) {
1360 if (!smpi_process_get_replaying())
1361 tmpbufs[index] = xbt_malloc(count * dataext);
1363 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1365 smpi_irecv_init(tmpbufs[index], count, datatype, src, system_tag, comm);
1369 // Wait for completion of irecv's.
1370 smpi_mpi_startall(size - 1, requests);
1371 for(src = 0; src < size - 1; src++) {
1372 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1373 XBT_DEBUG("finished waiting any request with index %d", index);
1374 if(index == MPI_UNDEFINED) {
1377 smpi_mpi_request_free(&requests[index]);
1379 if(op) /* op can be MPI_OP_NULL that does nothing */
1380 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1382 for(index = 0; index < size - 1; index++) {
1383 smpi_free_tmp_buffer(tmpbufs[index]);
1389 if( sendbuf == MPI_IN_PLACE ) {
1390 smpi_free_tmp_buffer(sendtmpbuf);
1394 void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1396 smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
1397 smpi_mpi_bcast(recvbuf, count, datatype, 0, comm);
1400 void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1402 int system_tag = -888;
1403 int rank, size, other, index;
1404 MPI_Aint lb = 0, dataext = 0;
1405 MPI_Request *requests;
1408 rank = smpi_comm_rank(comm);
1409 size = smpi_comm_size(comm);
1411 smpi_datatype_extent(datatype, &lb, &dataext);
1413 // Local copy from self
1414 smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
1416 // Send/Recv buffers to/from others;
1417 requests = xbt_new(MPI_Request, size - 1);
1418 tmpbufs = xbt_new(void *, rank);
1420 for(other = 0; other < rank; other++) {
1421 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1422 requests[index] = smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
1425 for(other = rank + 1; other < size; other++) {
1426 requests[index] = smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1429 // Wait for completion of all comms.
1430 smpi_mpi_startall(size - 1, requests);
1432 if(smpi_op_is_commute(op)){
1433 for(other = 0; other < size - 1; other++) {
1434 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1435 if(index == MPI_UNDEFINED) {
1439 // #Request is below rank: it's a irecv
1440 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1444 //non commutative case, wait in order
1445 for(other = 0; other < size - 1; other++) {
1446 smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
1448 smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
1452 for(index = 0; index < rank; index++) {
1453 smpi_free_tmp_buffer(tmpbufs[index]);
1455 for(index = 0; index < size-1; index++) {
1456 smpi_mpi_request_free(&requests[index]);
1462 void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1464 int system_tag = -888;
1465 int rank, size, other, index;
1466 MPI_Aint lb = 0, dataext = 0;
1467 MPI_Request *requests;
1469 int recvbuf_is_empty=1;
1470 rank = smpi_comm_rank(comm);
1471 size = smpi_comm_size(comm);
1473 smpi_datatype_extent(datatype, &lb, &dataext);
1475 // Send/Recv buffers to/from others;
1476 requests = xbt_new(MPI_Request, size - 1);
1477 tmpbufs = xbt_new(void *, rank);
1479 for(other = 0; other < rank; other++) {
1480 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1482 smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
1485 for(other = rank + 1; other < size; other++) {
1487 smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1490 // Wait for completion of all comms.
1491 smpi_mpi_startall(size - 1, requests);
1492 if(smpi_op_is_commute(op)){
1493 for(other = 0; other < size - 1; other++) {
1494 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1495 if(index == MPI_UNDEFINED) {
1499 if(recvbuf_is_empty){
1500 smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
1503 // #Request is below rank: it's a irecv
1504 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1508 //non commutative case, wait in order
1509 for(other = 0; other < size - 1; other++) {
1510 smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
1512 if(recvbuf_is_empty){
1513 smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
1515 }else smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
1519 for(index = 0; index < rank; index++) {
1520 smpi_free_tmp_buffer(tmpbufs[index]);
1522 for(index = 0; index < size-1; index++) {
1523 smpi_mpi_request_free(&requests[index]);