1 /* Copyright (c) 2007, 2008, 2009, 2010. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
10 #include "xbt/replay.h"
12 #include "simix/smx_private.h"
13 #include "surf/surf.h"
14 #include "simgrid/sg_config.h"
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
20 static int match_recv(void* a, void* b, smx_action_t ignored) {
21 MPI_Request ref = (MPI_Request)a;
22 MPI_Request req = (MPI_Request)b;
23 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
25 xbt_assert(ref, "Cannot match recv against null reference");
26 xbt_assert(req, "Cannot match recv against null request");
27 if((ref->src == MPI_ANY_SOURCE || req->src == ref->src)
28 && (ref->tag == MPI_ANY_TAG || req->tag == ref->tag)){
29 //we match, we can transfer some values
30 // FIXME : move this to the copy function ?
31 if(ref->src == MPI_ANY_SOURCE)ref->real_src = req->src;
32 if(ref->tag == MPI_ANY_TAG)ref->real_tag = req->tag;
33 if(ref->real_size < req->real_size) ref->truncated = 1;
35 ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
41 static int match_send(void* a, void* b,smx_action_t ignored) {
42 MPI_Request ref = (MPI_Request)a;
43 MPI_Request req = (MPI_Request)b;
44 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
45 xbt_assert(ref, "Cannot match send against null reference");
46 xbt_assert(req, "Cannot match send against null request");
48 if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
49 && (req->tag == MPI_ANY_TAG || req->tag == ref->tag))
51 if(req->src == MPI_ANY_SOURCE)req->real_src = ref->src;
52 if(req->tag == MPI_ANY_TAG)req->real_tag = ref->tag;
53 if(req->real_size < ref->real_size) req->truncated = 1;
55 req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
63 typedef struct s_smpi_factor *smpi_factor_t;
64 typedef struct s_smpi_factor {
67 double values[4];//arbitrary set to 4
69 xbt_dynar_t smpi_os_values = NULL;
70 xbt_dynar_t smpi_or_values = NULL;
71 xbt_dynar_t smpi_ois_values = NULL;
73 // Methods used to parse and store the values for timing injections in smpi
74 // These are taken from surf/network.c and generalized to have more factors
75 // These methods should be merged with those in surf/network.c (moved somewhere in xbt ?)
77 static int factor_cmp(const void *pa, const void *pb)
79 return (((s_smpi_factor_t*)pa)->factor > ((s_smpi_factor_t*)pb)->factor);
83 static xbt_dynar_t parse_factor(const char *smpi_coef_string)
86 unsigned int iter = 0;
89 xbt_dynar_t smpi_factor, radical_elements, radical_elements2 = NULL;
91 smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_t), NULL);
92 radical_elements = xbt_str_split(smpi_coef_string, ";");
93 xbt_dynar_foreach(radical_elements, iter, value) {
95 radical_elements2 = xbt_str_split(value, ":");
96 if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5)
97 xbt_die("Malformed radical for smpi factor!");
98 for(i =0; i<xbt_dynar_length(radical_elements2);i++ ){
100 fact.factor = atol(xbt_dynar_get_as(radical_elements2, i, char *));
102 fact.values[fact.nb_values] = atof(xbt_dynar_get_as(radical_elements2, i, char *));
107 xbt_dynar_push_as(smpi_factor, s_smpi_factor_t, fact);
108 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
109 xbt_dynar_free(&radical_elements2);
111 xbt_dynar_free(&radical_elements);
113 xbt_dynar_sort(smpi_factor, &factor_cmp);
114 xbt_dynar_foreach(smpi_factor, iter, fact) {
115 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
120 static double smpi_os(double size)
122 if (!smpi_os_values) {
123 smpi_os_values = parse_factor(sg_cfg_get_string("smpi/os"));
124 smpi_register_static(smpi_os_values, xbt_dynar_free_voidp);
126 unsigned int iter = 0;
127 s_smpi_factor_t fact;
129 xbt_dynar_foreach(smpi_os_values, iter, fact) {
130 if (size <= fact.factor) {
131 XBT_DEBUG("os : %lf <= %ld return %f", size, fact.factor, current);
134 current=fact.values[0]+fact.values[1]*size;
137 XBT_DEBUG("os : %lf > %ld return %f", size, fact.factor, current);
142 static double smpi_ois(double size)
144 if (!smpi_ois_values) {
145 smpi_ois_values = parse_factor(sg_cfg_get_string("smpi/ois"));
146 smpi_register_static(smpi_ois_values, xbt_dynar_free_voidp);
148 unsigned int iter = 0;
149 s_smpi_factor_t fact;
151 xbt_dynar_foreach(smpi_ois_values, iter, fact) {
152 if (size <= fact.factor) {
153 XBT_DEBUG("ois : %lf <= %ld return %f", size, fact.factor, current);
156 current=fact.values[0]+fact.values[1]*size;
159 XBT_DEBUG("ois : %lf > %ld return %f", size, fact.factor, current);
164 static double smpi_or(double size)
166 if (!smpi_or_values) {
167 smpi_or_values = parse_factor(sg_cfg_get_string("smpi/or"));
168 smpi_register_static(smpi_or_values, xbt_dynar_free_voidp);
170 unsigned int iter = 0;
171 s_smpi_factor_t fact;
173 xbt_dynar_foreach(smpi_or_values, iter, fact) {
174 if (size <= fact.factor) {
175 XBT_DEBUG("or : %lf <= %ld return %f", size, fact.factor, current);
178 current=fact.values[0]+fact.values[1]*size;
180 XBT_DEBUG("or : %lf > %ld return %f", size, fact.factor, current);
185 static MPI_Request build_request(void *buf, int count,
186 MPI_Datatype datatype, int src, int dst,
187 int tag, MPI_Comm comm, unsigned flags)
191 void *old_buf = NULL;
193 request = xbt_new(s_smpi_mpi_request_t, 1);
195 s_smpi_subtype_t *subtype = datatype->substruct;
197 if(datatype->has_subtype == 1){
198 // This part handles the problem of non-contiguous memory
200 buf = xbt_malloc(count*smpi_datatype_size(datatype));
202 subtype->serialize(old_buf, buf, count, datatype->substruct);
207 // This part handles the problem of non-contiguous memory (for the
208 // unserialisation at the reception)
209 request->old_buf = old_buf;
210 request->old_type = datatype;
212 request->size = smpi_datatype_size(datatype) * count;
216 request->comm = comm;
217 request->action = NULL;
218 request->flags = flags;
219 request->detached = 0;
220 request->detached_sender = NULL;
222 request->truncated = 0;
223 request->real_size = 0;
224 request->real_tag = 0;
231 if (flags & SEND) smpi_datatype_unuse(datatype);
237 void smpi_empty_status(MPI_Status * status)
239 if(status != MPI_STATUS_IGNORE) {
240 status->MPI_SOURCE = MPI_ANY_SOURCE;
241 status->MPI_TAG = MPI_ANY_TAG;
242 status->MPI_ERROR = MPI_SUCCESS;
247 void smpi_action_trace_run(char *path)
251 xbt_dict_cursor_t cursor;
255 action_fp = fopen(path, "r");
256 xbt_assert(action_fp != NULL, "Cannot open %s: %s", path,
260 if (!xbt_dict_is_empty(action_queues)) {
262 ("Not all actions got consumed. If the simulation ended successfully (without deadlock), you may want to add new processes to your deployment file.");
265 xbt_dict_foreach(action_queues, cursor, name, todo) {
266 XBT_WARN("Still %lu actions for %s", xbt_dynar_length(todo), name);
272 xbt_dict_free(&action_queues);
273 action_queues = xbt_dict_new_homogeneous(NULL);
276 static void smpi_mpi_request_free_voidp(void* request)
278 MPI_Request req = request;
279 smpi_mpi_request_free(&req);
282 /* MPI Low level calls */
283 MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
284 int dst, int tag, MPI_Comm comm)
286 MPI_Request request =
287 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
288 comm, PERSISTENT | SEND);
293 MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype,
294 int dst, int tag, MPI_Comm comm)
296 MPI_Request request =
297 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
298 comm, PERSISTENT | SSEND | SEND);
303 MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
304 int src, int tag, MPI_Comm comm)
306 MPI_Request request =
307 build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
308 comm, PERSISTENT | RECV);
313 void smpi_mpi_start(MPI_Request request)
317 xbt_assert(!request->action,
318 "Cannot (re)start a non-finished communication");
319 if(request->flags & RECV) {
320 print_request("New recv", request);
321 if (request->size < sg_cfg_get_int("smpi/async_small_thres"))
322 mailbox = smpi_process_mailbox_small();
324 mailbox = smpi_process_mailbox();
325 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
326 request->real_size=request->size;
327 smpi_datatype_use(request->old_type);
328 request->action = simcall_comm_irecv(mailbox, request->buf, &request->real_size, &match_recv, request);
330 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
331 double sleeptime = request->detached ? smpi_or(request->size) : 0.0;
333 simcall_process_sleep(sleeptime);
334 XBT_DEBUG("receiving size of %zu : sleep %lf ", request->size, smpi_or(request->size));
340 int receiver = smpi_group_index(smpi_comm_group(request->comm), request->dst);
343 int rank = smpi_process_index();
344 if (TRACE_smpi_view_internals()) {
345 TRACE_smpi_send(rank, rank, receiver);
348 /* if(receiver == MPI_UNDEFINED) {*/
349 /* XBT_WARN("Trying to send a message to a wrong rank");*/
352 print_request("New send", request);
353 if (request->size < sg_cfg_get_int("smpi/async_small_thres")) { // eager mode
354 mailbox = smpi_process_remote_mailbox_small(receiver);
356 XBT_DEBUG("Send request %p is not in the permanent receive mailbox (buf: %p)",request,request->buf);
357 mailbox = smpi_process_remote_mailbox(receiver);
359 if ( (! (request->flags & SSEND)) && (request->size < sg_cfg_get_int("smpi/send_is_detached_thres"))) {
361 request->detached = 1;
363 if(request->old_type->has_subtype == 0){
364 oldbuf = request->buf;
365 if (oldbuf && request->size!=0){
366 request->buf = xbt_malloc(request->size);
367 memcpy(request->buf,oldbuf,request->size);
370 XBT_DEBUG("Send request %p is detached; buf %p copied into %p",request,oldbuf,request->buf);
372 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
373 request->real_size=request->size;
374 smpi_datatype_use(request->old_type);
376 //if we are giving back the control to the user without waiting for completion, we have to inject timings
377 double sleeptime =0.0;
378 if(request->detached || (request->flags & (ISEND|SSEND))){// issend should be treated as isend
379 //isend and send timings may be different
380 sleeptime = (request->flags & ISEND)? smpi_ois(request->size) : smpi_os(request->size);
384 simcall_process_sleep(sleeptime);
385 XBT_DEBUG("sending size of %zu : sleep %lf ", request->size, smpi_os(request->size));
389 simcall_comm_isend(mailbox, request->size, -1.0,
390 request->buf, request->real_size,
392 &smpi_mpi_request_free_voidp, // how to free the userdata if a detached send fails
394 // detach if msg size < eager/rdv switch limit
398 /* FIXME: detached sends are not traceable (request->action == NULL) */
400 simcall_set_category(request->action, TRACE_internal_smpi_get_category());
408 void smpi_mpi_startall(int count, MPI_Request * requests)
412 for(i = 0; i < count; i++) {
413 smpi_mpi_start(requests[i]);
417 void smpi_mpi_request_free(MPI_Request * request)
420 if((*request) != MPI_REQUEST_NULL){
421 (*request)->refcount--;
422 if((*request)->refcount<0) xbt_die("wrong refcount");
424 if((*request)->refcount==0){
426 *request = MPI_REQUEST_NULL;
429 xbt_die("freeing an already free request");
433 MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype,
434 int dst, int tag, MPI_Comm comm)
436 MPI_Request request =
437 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
438 comm, NON_PERSISTENT | SEND);
443 MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype,
444 int dst, int tag, MPI_Comm comm)
446 MPI_Request request =
447 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
448 comm, NON_PERSISTENT | ISEND | SEND);
450 smpi_mpi_start(request);
454 MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype,
455 int dst, int tag, MPI_Comm comm)
457 MPI_Request request =
458 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
459 comm, NON_PERSISTENT | ISEND | SSEND | SEND);
460 smpi_mpi_start(request);
466 MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
467 int src, int tag, MPI_Comm comm)
469 MPI_Request request =
470 build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
471 comm, NON_PERSISTENT | RECV);
475 MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype,
476 int src, int tag, MPI_Comm comm)
478 MPI_Request request =
479 build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
480 comm, NON_PERSISTENT | RECV);
482 smpi_mpi_start(request);
486 void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src,
487 int tag, MPI_Comm comm, MPI_Status * status)
490 request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
491 smpi_mpi_wait(&request, status);
496 void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst,
497 int tag, MPI_Comm comm)
499 MPI_Request request =
500 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
501 comm, NON_PERSISTENT | SEND);
503 smpi_mpi_start(request);
504 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
508 void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype,
509 int dst, int tag, MPI_Comm comm)
511 MPI_Request request = smpi_mpi_issend(buf, count, datatype, dst, tag, comm);
512 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
515 void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
516 int dst, int sendtag, void *recvbuf, int recvcount,
517 MPI_Datatype recvtype, int src, int recvtag,
518 MPI_Comm comm, MPI_Status * status)
520 MPI_Request requests[2];
524 smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
526 smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
527 smpi_mpi_startall(2, requests);
528 smpi_mpi_waitall(2, requests, stats);
529 if(status != MPI_STATUS_IGNORE) {
530 // Copy receive status
535 int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
537 return status->count / smpi_datatype_size(datatype);
540 static void finish_wait(MPI_Request * request, MPI_Status * status)
542 MPI_Request req = *request;
543 if(status != MPI_STATUS_IGNORE)
544 smpi_empty_status(status);
546 if(!(req->detached && req->flags & SEND)){
547 if(status != MPI_STATUS_IGNORE) {
548 status->MPI_SOURCE = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
549 status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
550 status->MPI_ERROR = req->truncated ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
551 // this handles the case were size in receive differs from size in send
552 // FIXME: really this should just contain the count of receive-type blocks,
554 status->count = req->real_size;
557 print_request("Finishing", req);
558 MPI_Datatype datatype = req->old_type;
560 if(datatype->has_subtype == 1){
561 // This part handles the problem of non-contignous memory
562 // the unserialization at the reception
563 s_smpi_subtype_t *subtype = datatype->substruct;
564 if(req->flags & RECV) {
565 subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) , datatype->substruct);
567 if(req->detached == 0) free(req->buf);
569 smpi_datatype_unuse(datatype);
574 if (TRACE_smpi_view_internals()) {
575 if(req->flags & RECV){
576 int rank = smpi_process_index();
577 int src_traced = smpi_group_index(smpi_comm_group(req->comm), req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
578 TRACE_smpi_recv(rank, src_traced, rank);
583 if(req->detached_sender!=NULL){
584 smpi_mpi_request_free(&(req->detached_sender));
587 if(req->flags & NON_PERSISTENT) {
588 smpi_mpi_request_free(request);
594 int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
597 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
598 if ((*request)->action == NULL)
601 flag = simcall_comm_test((*request)->action);
603 finish_wait(request, status);
604 request=MPI_REQUEST_NULL;
606 smpi_empty_status(status);
611 int smpi_mpi_testany(int count, MPI_Request requests[], int *index,
618 *index = MPI_UNDEFINED;
621 comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
622 map = xbt_new(int, count);
624 for(i = 0; i < count; i++) {
625 if((requests[i]!=MPI_REQUEST_NULL) && requests[i]->action) {
626 xbt_dynar_push(comms, &requests[i]->action);
632 i = simcall_comm_testany(comms);
633 // not MPI_UNDEFINED, as this is a simix return code
636 finish_wait(&requests[*index], status);
640 //all requests are null or inactive, return true
642 smpi_empty_status(status);
645 xbt_dynar_free(&comms);
652 int smpi_mpi_testall(int count, MPI_Request requests[],
656 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
659 for(i=0; i<count; i++){
660 if(requests[i]!= MPI_REQUEST_NULL){
661 if (smpi_mpi_test(&requests[i], pstat)!=1){
665 smpi_empty_status(pstat);
667 if(status != MPI_STATUSES_IGNORE) {
674 void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
676 //FIXME find another wait to avoid busy waiting ?
677 // the issue here is that we have to wait on a nonexistent comm
679 smpi_mpi_iprobe(source, tag, comm, &flag, status);
680 XBT_DEBUG("Busy Waiting on probing : %d", flag);
682 simcall_process_sleep(0.0001);
687 void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
688 MPI_Request request =build_request(NULL, 0, MPI_CHAR, source, smpi_comm_rank(comm), tag,
689 comm, NON_PERSISTENT | RECV);
691 // behave like a receive, but don't do it
694 print_request("New iprobe", request);
695 // We have to test both mailboxes as we don't know if we will receive one one or another
696 if (sg_cfg_get_int("smpi/async_small_thres")>0){
697 mailbox = smpi_process_mailbox_small();
698 XBT_DEBUG("trying to probe the perm recv mailbox");
699 request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
701 if (request->action==NULL){
702 mailbox = smpi_process_mailbox();
703 XBT_DEBUG("trying to probe the other mailbox");
704 request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
708 MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
710 if(status != MPI_STATUS_IGNORE) {
711 status->MPI_SOURCE = req->src;
712 status->MPI_TAG = req->tag;
713 status->MPI_ERROR = MPI_SUCCESS;
714 status->count = req->real_size;
718 smpi_mpi_request_free(&request);
723 void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
725 print_request("Waiting", *request);
726 if ((*request)->action != NULL) { // this is not a detached send
727 simcall_comm_wait((*request)->action, -1.0);
729 finish_wait(request, status);
731 // FIXME for a detached send, finish_wait is not called:
734 int smpi_mpi_waitany(int count, MPI_Request requests[],
741 index = MPI_UNDEFINED;
743 // Wait for a request to complete
744 comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
745 map = xbt_new(int, count);
747 XBT_DEBUG("Wait for one of %d", count);
748 for(i = 0; i < count; i++) {
749 if(requests[i] != MPI_REQUEST_NULL) {
750 if (requests[i]->action != NULL) {
751 XBT_DEBUG("Waiting any %p ", requests[i]);
752 xbt_dynar_push(comms, &requests[i]->action);
756 //This is a finished detached request, let's return this one
757 size=0;//so we free the dynar but don't do the waitany call
759 finish_wait(&requests[i], status);//cleanup if refcount = 0
760 requests[i]=MPI_REQUEST_NULL;//set to null
766 i = simcall_comm_waitany(comms);
768 // not MPI_UNDEFINED, as this is a simix return code
771 finish_wait(&requests[index], status);
775 xbt_dynar_free(&comms);
778 if (index==MPI_UNDEFINED)
779 smpi_empty_status(status);
784 int smpi_mpi_waitall(int count, MPI_Request requests[],
789 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
790 int retvalue = MPI_SUCCESS;
791 //tag invalid requests in the set
792 if (status != MPI_STATUSES_IGNORE) {
793 for (c = 0; c < count; c++) {
794 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL) {
795 smpi_empty_status(&status[c]);
796 } else if (requests[c]->src == MPI_PROC_NULL) {
797 smpi_empty_status(&status[c]);
798 status[c].MPI_SOURCE = MPI_PROC_NULL;
802 for(c = 0; c < count; c++) {
803 if (MC_is_active()) {
804 smpi_mpi_wait(&requests[c], pstat);
807 index = smpi_mpi_waitany(count, requests, pstat);
808 if (index == MPI_UNDEFINED)
810 requests[index]=MPI_REQUEST_NULL;
812 if (status != MPI_STATUSES_IGNORE) {
813 status[index] = *pstat;
814 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
815 retvalue = MPI_ERR_IN_STATUS;
822 int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices,
827 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
830 for(i = 0; i < incount; i++)
832 index=smpi_mpi_waitany(incount, requests, pstat);
833 if(index!=MPI_UNDEFINED){
834 indices[count] = index;
836 if(status != MPI_STATUSES_IGNORE) {
837 status[index] = *pstat;
839 requests[index]=MPI_REQUEST_NULL;
841 return MPI_UNDEFINED;
847 int smpi_mpi_testsome(int incount, MPI_Request requests[], int *indices,
850 int i, count, count_dead;
852 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
856 for(i = 0; i < incount; i++) {
857 if((requests[i] != MPI_REQUEST_NULL)) {
858 if(smpi_mpi_test(&requests[i], pstat)) {
861 if(status != MPI_STATUSES_IGNORE) {
864 requests[i]=MPI_REQUEST_NULL;
871 if(count_dead==incount)return MPI_UNDEFINED;
875 void smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
878 // arity=2: a binary tree, arity=4 seem to be a good setting (see P2P-MPI))
879 nary_tree_bcast(buf, count, datatype, root, comm, 4);
882 void smpi_mpi_barrier(MPI_Comm comm)
884 // arity=2: a binary tree, arity=4 seem to be a good setting (see P2P-MPI))
885 nary_tree_barrier(comm, 4);
888 void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
889 void *recvbuf, int recvcount, MPI_Datatype recvtype,
890 int root, MPI_Comm comm)
892 int system_tag = 666;
893 int rank, size, src, index;
894 MPI_Aint lb = 0, recvext = 0;
895 MPI_Request *requests;
897 rank = smpi_comm_rank(comm);
898 size = smpi_comm_size(comm);
900 // Send buffer to root
901 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
903 // FIXME: check for errors
904 smpi_datatype_extent(recvtype, &lb, &recvext);
905 // Local copy from root
906 smpi_datatype_copy(sendbuf, sendcount, sendtype,
907 (char *)recvbuf + root * recvcount * recvext, recvcount, recvtype);
908 // Receive buffers from senders
909 requests = xbt_new(MPI_Request, size - 1);
911 for(src = 0; src < size; src++) {
913 requests[index] = smpi_irecv_init((char *)recvbuf + src * recvcount * recvext,
915 src, system_tag, comm);
919 // Wait for completion of irecv's.
920 smpi_mpi_startall(size - 1, requests);
921 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
926 void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
927 void *recvbuf, int *recvcounts, int *displs,
928 MPI_Datatype recvtype, int root, MPI_Comm comm)
930 int system_tag = 666;
931 int rank, size, src, index;
932 MPI_Aint lb = 0, recvext = 0;
933 MPI_Request *requests;
935 rank = smpi_comm_rank(comm);
936 size = smpi_comm_size(comm);
938 // Send buffer to root
939 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
941 // FIXME: check for errors
942 smpi_datatype_extent(recvtype, &lb, &recvext);
943 // Local copy from root
944 smpi_datatype_copy(sendbuf, sendcount, sendtype,
945 (char *)recvbuf + displs[root] * recvext,
946 recvcounts[root], recvtype);
947 // Receive buffers from senders
948 requests = xbt_new(MPI_Request, size - 1);
950 for(src = 0; src < size; src++) {
953 smpi_irecv_init((char *)recvbuf + displs[src] * recvext,
954 recvcounts[src], recvtype, src, system_tag, comm);
958 // Wait for completion of irecv's.
959 smpi_mpi_startall(size - 1, requests);
960 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
965 void smpi_mpi_allgather(void *sendbuf, int sendcount,
966 MPI_Datatype sendtype, void *recvbuf,
967 int recvcount, MPI_Datatype recvtype,
970 int system_tag = 666;
971 int rank, size, other, index;
972 MPI_Aint lb = 0, recvext = 0;
973 MPI_Request *requests;
975 rank = smpi_comm_rank(comm);
976 size = smpi_comm_size(comm);
977 // FIXME: check for errors
978 smpi_datatype_extent(recvtype, &lb, &recvext);
979 // Local copy from self
980 smpi_datatype_copy(sendbuf, sendcount, sendtype,
981 (char *)recvbuf + rank * recvcount * recvext, recvcount,
983 // Send/Recv buffers to/from others;
984 requests = xbt_new(MPI_Request, 2 * (size - 1));
986 for(other = 0; other < size; other++) {
989 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
992 requests[index] = smpi_irecv_init((char *)recvbuf + other * recvcount * recvext,
993 recvcount, recvtype, other,
998 // Wait for completion of all comms.
999 smpi_mpi_startall(2 * (size - 1), requests);
1000 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1004 void smpi_mpi_allgatherv(void *sendbuf, int sendcount,
1005 MPI_Datatype sendtype, void *recvbuf,
1006 int *recvcounts, int *displs,
1007 MPI_Datatype recvtype, MPI_Comm comm)
1009 int system_tag = 666;
1010 int rank, size, other, index;
1011 MPI_Aint lb = 0, recvext = 0;
1012 MPI_Request *requests;
1014 rank = smpi_comm_rank(comm);
1015 size = smpi_comm_size(comm);
1016 // FIXME: check for errors
1017 smpi_datatype_extent(recvtype, &lb, &recvext);
1018 // Local copy from self
1019 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1020 (char *)recvbuf + displs[rank] * recvext,
1021 recvcounts[rank], recvtype);
1022 // Send buffers to others;
1023 requests = xbt_new(MPI_Request, 2 * (size - 1));
1025 for(other = 0; other < size; other++) {
1028 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
1032 smpi_irecv_init((char *)recvbuf + displs[other] * recvext, recvcounts[other],
1033 recvtype, other, system_tag, comm);
1037 // Wait for completion of all comms.
1038 smpi_mpi_startall(2 * (size - 1), requests);
1039 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1043 void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1044 void *recvbuf, int recvcount, MPI_Datatype recvtype,
1045 int root, MPI_Comm comm)
1047 int system_tag = 666;
1048 int rank, size, dst, index;
1049 MPI_Aint lb = 0, sendext = 0;
1050 MPI_Request *requests;
1052 rank = smpi_comm_rank(comm);
1053 size = smpi_comm_size(comm);
1055 // Recv buffer from root
1056 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
1059 // FIXME: check for errors
1060 smpi_datatype_extent(sendtype, &lb, &sendext);
1061 // Local copy from root
1062 smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
1063 sendcount, sendtype, recvbuf, recvcount, recvtype);
1064 // Send buffers to receivers
1065 requests = xbt_new(MPI_Request, size - 1);
1067 for(dst = 0; dst < size; dst++) {
1069 requests[index] = smpi_isend_init((char *)sendbuf + dst * sendcount * sendext,
1070 sendcount, sendtype, dst,
1075 // Wait for completion of isend's.
1076 smpi_mpi_startall(size - 1, requests);
1077 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1082 void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs,
1083 MPI_Datatype sendtype, void *recvbuf, int recvcount,
1084 MPI_Datatype recvtype, int root, MPI_Comm comm)
1086 int system_tag = 666;
1087 int rank, size, dst, index;
1088 MPI_Aint lb = 0, sendext = 0;
1089 MPI_Request *requests;
1091 rank = smpi_comm_rank(comm);
1092 size = smpi_comm_size(comm);
1094 // Recv buffer from root
1095 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
1098 // FIXME: check for errors
1099 smpi_datatype_extent(sendtype, &lb, &sendext);
1100 // Local copy from root
1101 smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
1102 sendtype, recvbuf, recvcount, recvtype);
1103 // Send buffers to receivers
1104 requests = xbt_new(MPI_Request, size - 1);
1106 for(dst = 0; dst < size; dst++) {
1109 smpi_isend_init((char *)sendbuf + displs[dst] * sendext, sendcounts[dst],
1110 sendtype, dst, system_tag, comm);
1114 // Wait for completion of isend's.
1115 smpi_mpi_startall(size - 1, requests);
1116 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1121 void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
1122 MPI_Datatype datatype, MPI_Op op, int root,
1125 int system_tag = 666;
1126 int rank, size, src, index;
1127 MPI_Aint lb = 0, dataext = 0;
1128 MPI_Request *requests;
1131 rank = smpi_comm_rank(comm);
1132 size = smpi_comm_size(comm);
1134 // Send buffer to root
1135 smpi_mpi_send(sendbuf, count, datatype, root, system_tag, comm);
1137 // FIXME: check for errors
1138 smpi_datatype_extent(datatype, &lb, &dataext);
1139 // Local copy from root
1140 if (sendbuf && recvbuf)
1141 smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
1142 // Receive buffers from senders
1143 //TODO: make a MPI_barrier here ?
1144 requests = xbt_new(MPI_Request, size - 1);
1145 tmpbufs = xbt_new(void *, size - 1);
1147 for(src = 0; src < size; src++) {
1149 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1151 tmpbufs[index] = xbt_malloc(count * dataext);
1153 smpi_irecv_init(tmpbufs[index], count, datatype, src,
1158 // Wait for completion of irecv's.
1159 smpi_mpi_startall(size - 1, requests);
1160 for(src = 0; src < size - 1; src++) {
1161 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1162 XBT_DEBUG("finished waiting any request with index %d", index);
1163 if(index == MPI_UNDEFINED) {
1166 if(op) /* op can be MPI_OP_NULL that does nothing */
1167 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1169 for(index = 0; index < size - 1; index++) {
1170 xbt_free(tmpbufs[index]);
1177 void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count,
1178 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1180 smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
1181 smpi_mpi_bcast(recvbuf, count, datatype, 0, comm);
1184 void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count,
1185 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1187 int system_tag = 666;
1188 int rank, size, other, index;
1189 MPI_Aint lb = 0, dataext = 0;
1190 MPI_Request *requests;
1193 rank = smpi_comm_rank(comm);
1194 size = smpi_comm_size(comm);
1196 // FIXME: check for errors
1197 smpi_datatype_extent(datatype, &lb, &dataext);
1199 // Local copy from self
1200 smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
1202 // Send/Recv buffers to/from others;
1203 requests = xbt_new(MPI_Request, size - 1);
1204 tmpbufs = xbt_new(void *, rank);
1206 for(other = 0; other < rank; other++) {
1207 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1209 tmpbufs[index] = xbt_malloc(count * dataext);
1211 smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
1215 for(other = rank + 1; other < size; other++) {
1217 smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1220 // Wait for completion of all comms.
1221 smpi_mpi_startall(size - 1, requests);
1222 for(other = 0; other < size - 1; other++) {
1223 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1224 if(index == MPI_UNDEFINED) {
1228 // #Request is below rank: it's a irecv
1229 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1232 for(index = 0; index < rank; index++) {
1233 xbt_free(tmpbufs[index]);