-/* Copyright (c) 2007, 2008, 2009, 2010. The SimGrid Team.
+/* Copyright (c) 2007-2013. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include <errno.h>
#include "simix/smx_private.h"
#include "surf/surf.h"
-
+#include "simgrid/sg_config.h"
+#include "colls/colls.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
xbt_assert(ref, "Cannot match recv against null reference");
xbt_assert(req, "Cannot match recv against null request");
- return (ref->src == MPI_ANY_SOURCE || req->src == ref->src)
- && (ref->tag == MPI_ANY_TAG || req->tag == ref->tag);
+ if((ref->src == MPI_ANY_SOURCE || req->src == ref->src)
+ && ((ref->tag == MPI_ANY_TAG && req->tag >=0) || req->tag == ref->tag)){
+ //we match, we can transfer some values
+ // FIXME : move this to the copy function ?
+ if(ref->src == MPI_ANY_SOURCE)ref->real_src = req->src;
+ if(ref->tag == MPI_ANY_TAG)ref->real_tag = req->tag;
+ if(ref->real_size < req->real_size) ref->truncated = 1;
+ if(req->detached==1){
+ ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
+ }
+ XBT_DEBUG("match succeeded");
+ return 1;
+ }else return 0;
}
static int match_send(void* a, void* b,smx_action_t ignored) {
XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
xbt_assert(ref, "Cannot match send against null reference");
xbt_assert(req, "Cannot match send against null request");
- return (req->src == MPI_ANY_SOURCE || req->src == ref->src)
- && (req->tag == MPI_ANY_TAG || req->tag == ref->tag);
+
+ if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
+ && ((req->tag == MPI_ANY_TAG && ref->tag >=0)|| req->tag == ref->tag))
+ {
+ if(req->src == MPI_ANY_SOURCE)req->real_src = ref->src;
+ if(req->tag == MPI_ANY_TAG)req->real_tag = ref->tag;
+ if(req->real_size < ref->real_size) req->truncated = 1;
+ if(ref->detached==1){
+ req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
+ }
+ XBT_DEBUG("match succeeded");
+ return 1;
+ } else return 0;
+}
+
+
+typedef struct s_smpi_factor *smpi_factor_t;
+typedef struct s_smpi_factor {
+ long factor;
+ int nb_values;
+ double values[4];//arbitrary set to 4
+} s_smpi_factor_t;
+xbt_dynar_t smpi_os_values = NULL;
+xbt_dynar_t smpi_or_values = NULL;
+xbt_dynar_t smpi_ois_values = NULL;
+
+// Methods used to parse and store the values for timing injections in smpi
+// These are taken from surf/network.c and generalized to have more factors
+// These methods should be merged with those in surf/network.c (moved somewhere in xbt ?)
+
+static int factor_cmp(const void *pa, const void *pb)
+{
+ return (((s_smpi_factor_t*)pa)->factor > ((s_smpi_factor_t*)pb)->factor);
+}
+
+
+static xbt_dynar_t parse_factor(const char *smpi_coef_string)
+{
+ char *value = NULL;
+ unsigned int iter = 0;
+ s_smpi_factor_t fact;
+ int i=0;
+ xbt_dynar_t smpi_factor, radical_elements, radical_elements2 = NULL;
+
+ smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_t), NULL);
+ radical_elements = xbt_str_split(smpi_coef_string, ";");
+ xbt_dynar_foreach(radical_elements, iter, value) {
+ fact.nb_values=0;
+ radical_elements2 = xbt_str_split(value, ":");
+ if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5)
+ xbt_die("Malformed radical for smpi factor!");
+ for(i =0; i<xbt_dynar_length(radical_elements2);i++ ){
+ if (i==0){
+ fact.factor = atol(xbt_dynar_get_as(radical_elements2, i, char *));
+ }else{
+ fact.values[fact.nb_values] = atof(xbt_dynar_get_as(radical_elements2, i, char *));
+ fact.nb_values++;
+ }
+ }
+
+ xbt_dynar_push_as(smpi_factor, s_smpi_factor_t, fact);
+ XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
+ xbt_dynar_free(&radical_elements2);
+ }
+ xbt_dynar_free(&radical_elements);
+ iter=0;
+ xbt_dynar_sort(smpi_factor, &factor_cmp);
+ xbt_dynar_foreach(smpi_factor, iter, fact) {
+ XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
+ }
+ return smpi_factor;
+}
+
+static double smpi_os(double size)
+{
+ if (!smpi_os_values) {
+ smpi_os_values = parse_factor(sg_cfg_get_string("smpi/os"));
+ smpi_register_static(smpi_os_values, xbt_dynar_free_voidp);
+ }
+ unsigned int iter = 0;
+ s_smpi_factor_t fact;
+ double current=0.0;
+ xbt_dynar_foreach(smpi_os_values, iter, fact) {
+ if (size <= fact.factor) {
+ XBT_DEBUG("os : %lf <= %ld return %f", size, fact.factor, current);
+ return current;
+ }else{
+ current=fact.values[0]+fact.values[1]*size;
+ }
+ }
+ XBT_DEBUG("os : %lf > %ld return %f", size, fact.factor, current);
+
+ return current;
+}
+
+static double smpi_ois(double size)
+{
+ if (!smpi_ois_values) {
+ smpi_ois_values = parse_factor(sg_cfg_get_string("smpi/ois"));
+ smpi_register_static(smpi_ois_values, xbt_dynar_free_voidp);
+ }
+ unsigned int iter = 0;
+ s_smpi_factor_t fact;
+ double current=0.0;
+ xbt_dynar_foreach(smpi_ois_values, iter, fact) {
+ if (size <= fact.factor) {
+ XBT_DEBUG("ois : %lf <= %ld return %f", size, fact.factor, current);
+ return current;
+ }else{
+ current=fact.values[0]+fact.values[1]*size;
+ }
+ }
+ XBT_DEBUG("ois : %lf > %ld return %f", size, fact.factor, current);
+
+ return current;
+}
+
+static double smpi_or(double size)
+{
+ if (!smpi_or_values) {
+ smpi_or_values = parse_factor(sg_cfg_get_string("smpi/or"));
+ smpi_register_static(smpi_or_values, xbt_dynar_free_voidp);
+ }
+ unsigned int iter = 0;
+ s_smpi_factor_t fact;
+ double current=0.0;
+ xbt_dynar_foreach(smpi_or_values, iter, fact) {
+ if (size <= fact.factor) {
+ XBT_DEBUG("or : %lf <= %ld return %f", size, fact.factor, current);
+ return current;
+ }else
+ current=fact.values[0]+fact.values[1]*size;
+ }
+ XBT_DEBUG("or : %lf > %ld return %f", size, fact.factor, current);
+
+ return current;
}
static MPI_Request build_request(void *buf, int count,
MPI_Datatype datatype, int src, int dst,
int tag, MPI_Comm comm, unsigned flags)
{
- MPI_Request request;
+ MPI_Request request = NULL;
void *old_buf = NULL;
s_smpi_subtype_t *subtype = datatype->substruct;
if(datatype->has_subtype == 1){
- // This part handles the problem of non-contignous memory
+ // This part handles the problem of non-contiguous memory
old_buf = buf;
- buf = malloc(count*smpi_datatype_size(datatype));
+ buf = count==0 ? NULL : xbt_malloc(count*smpi_datatype_size(datatype));
if (flags & SEND) {
subtype->serialize(old_buf, buf, count, datatype->substruct);
}
}
request->buf = buf;
- // This part handles the problem of non-contignous memory (for the
+ // This part handles the problem of non-contiguous memory (for the
// unserialisation at the reception)
request->old_buf = old_buf;
request->old_type = datatype;
request->action = NULL;
request->flags = flags;
request->detached = 0;
+ request->detached_sender = NULL;
+ request->real_src = 0;
+
+ request->truncated = 0;
+ request->real_size = 0;
+ request->real_tag = 0;
+
+ request->refcount=1;
#ifdef HAVE_TRACING
request->send = 0;
request->recv = 0;
#endif
+ if (flags & SEND) smpi_datatype_unuse(datatype);
+
return request;
}
-void smpi_empty_status(MPI_Status * status) {
+void smpi_empty_status(MPI_Status * status)
+{
if(status != MPI_STATUS_IGNORE) {
- status->MPI_SOURCE=MPI_ANY_SOURCE;
- status->MPI_TAG=MPI_ANY_TAG;
- status->count=0;
+ status->MPI_SOURCE = MPI_ANY_SOURCE;
+ status->MPI_TAG = MPI_ANY_TAG;
+ status->MPI_ERROR = MPI_SUCCESS;
+ status->count=0;
}
}
MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
int dst, int tag, MPI_Comm comm)
{
- MPI_Request request =
- build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
- comm, PERSISTENT | SEND);
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, PERSISTENT | SEND | PREPARED);
+ request->refcount++;
+ return request;
+}
+MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, PERSISTENT | SSEND | SEND | PREPARED);
+ request->refcount++;
return request;
}
MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
int src, int tag, MPI_Comm comm)
{
- MPI_Request request =
- build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
- comm, PERSISTENT | RECV);
-
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
+ comm, PERSISTENT | RECV | PREPARED);
+ request->refcount++;
return request;
}
{
smx_rdv_t mailbox;
- xbt_assert(!request->action,
- "Cannot (re)start a non-finished communication");
- if(request->flags & RECV) {
+ xbt_assert(!request->action, "Cannot (re)start a non-finished communication");
+ request->flags &= ~PREPARED;
+ if (request->flags & RECV) {
print_request("New recv", request);
- if (request->size < surf_cfg_get_int("smpi/async_small_thres"))
+ //FIXME: if receive is posted with a large size, but send is smaller, mailboxes may not match !
+ if (request->size < sg_cfg_get_int("smpi/async_small_thres"))
mailbox = smpi_process_mailbox_small();
else
mailbox = smpi_process_mailbox();
+ // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
+ request->real_size=request->size;
+ smpi_datatype_use(request->old_type);
+ smpi_comm_use(request->comm);
+ request->action = simcall_comm_irecv(mailbox, request->buf, &request->real_size, &match_recv, request);
+
+ //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
+ double sleeptime = request->detached ? smpi_or(request->size) : 0.0;
+ if(sleeptime!=0.0){
+ simcall_process_sleep(sleeptime);
+ XBT_DEBUG("receiving size of %zu : sleep %lf ", request->size, smpi_or(request->size));
+ }
- // FIXME: SIMIX does not yet support non-contiguous datatypes
- request->action = simcall_comm_irecv(mailbox, request->buf, &request->size, &match_recv, request);
} else {
- int receiver = smpi_group_index(smpi_comm_group(request->comm), request->dst);
+
+ int receiver = request->dst;//smpi_group_index(smpi_comm_group(request->comm), request->dst);
+
+ #ifdef HAVE_TRACING
+ int rank = smpi_process_index();
+ if (TRACE_smpi_view_internals()) {
+ TRACE_smpi_send(rank, rank, receiver,request->size);
+ }
+ #endif
/* if(receiver == MPI_UNDEFINED) {*/
/* XBT_WARN("Trying to send a message to a wrong rank");*/
/* return;*/
/* }*/
print_request("New send", request);
- if (request->size < surf_cfg_get_int("smpi/async_small_thres")) { // eager mode
+ if (request->size < sg_cfg_get_int("smpi/async_small_thres")) { // eager mode
mailbox = smpi_process_remote_mailbox_small(receiver);
}else{
XBT_DEBUG("Send request %p is not in the permanent receive mailbox (buf: %p)",request,request->buf);
mailbox = smpi_process_remote_mailbox(receiver);
}
- if (request->size < 64*1024 ) { //(FIXME: this limit should be configurable)
+ if ( (! (request->flags & SSEND)) && (request->size < sg_cfg_get_int("smpi/send_is_detached_thres"))) {
void *oldbuf = NULL;
+ request->detached = 1;
+ request->refcount++;
if(request->old_type->has_subtype == 0){
oldbuf = request->buf;
- request->detached = 1;
- if (oldbuf){
- request->buf = malloc(request->size);
+ if (oldbuf && request->size!=0){
+ request->buf = xbt_malloc(request->size);
memcpy(request->buf,oldbuf,request->size);
}
}
XBT_DEBUG("Send request %p is detached; buf %p copied into %p",request,oldbuf,request->buf);
}
+ // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
+ request->real_size=request->size;
+ smpi_datatype_use(request->old_type);
+ smpi_comm_use(request->comm);
+
+ //if we are giving back the control to the user without waiting for completion, we have to inject timings
+ double sleeptime =0.0;
+ if(request->detached || (request->flags & (ISEND|SSEND))){// issend should be treated as isend
+ //isend and send timings may be different
+ sleeptime = (request->flags & ISEND)? smpi_ois(request->size) : smpi_os(request->size);
+ }
+
+ if(sleeptime!=0.0){
+ simcall_process_sleep(sleeptime);
+ XBT_DEBUG("sending size of %zu : sleep %lf ", request->size, smpi_os(request->size));
+ }
+
request->action =
simcall_comm_isend(mailbox, request->size, -1.0,
- request->buf, request->size,
+ request->buf, request->real_size,
&match_send,
&smpi_mpi_request_free_voidp, // how to free the userdata if a detached send fails
request,
/* FIXME: detached sends are not traceable (request->action == NULL) */
if (request->action)
simcall_set_category(request->action, TRACE_internal_smpi_get_category());
+
#endif
}
void smpi_mpi_request_free(MPI_Request * request)
{
- xbt_free(*request);
- *request = MPI_REQUEST_NULL;
+ if((*request) != MPI_REQUEST_NULL){
+ (*request)->refcount--;
+ if((*request)->refcount<0) xbt_die("wrong refcount");
+
+ if((*request)->refcount==0){
+ print_request("Destroying", (*request));
+ xbt_free(*request);
+ *request = MPI_REQUEST_NULL;
+ }else{
+ print_request("Decrementing", (*request));
+
+ }
+ }else{
+ xbt_die("freeing an already free request");
+ }
}
MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype,
int dst, int tag, MPI_Comm comm)
{
- MPI_Request request =
- build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
- comm, NON_PERSISTENT | SEND);
-
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf , count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | ISEND | SEND | PREPARED);
return request;
}
MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype,
int dst, int tag, MPI_Comm comm)
{
- MPI_Request request =
- smpi_isend_init(buf, count, datatype, dst, tag, comm);
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM?(void*)0:buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | ISEND | SEND);
+ smpi_mpi_start(request);
+ return request;
+}
+MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | ISEND | SSEND | SEND);
smpi_mpi_start(request);
return request;
}
+
+
MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
int src, int tag, MPI_Comm comm)
{
- MPI_Request request =
- build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
- comm, NON_PERSISTENT | RECV);
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
+ comm, NON_PERSISTENT | RECV | PREPARED);
return request;
}
MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype,
int src, int tag, MPI_Comm comm)
{
- MPI_Request request =
- smpi_irecv_init(buf, count, datatype, src, tag, comm);
-
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
+ comm, NON_PERSISTENT | RECV);
smpi_mpi_start(request);
return request;
}
void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src,
int tag, MPI_Comm comm, MPI_Status * status)
{
- MPI_Request request;
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
smpi_mpi_wait(&request, status);
+ request = NULL;
}
void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst,
int tag, MPI_Comm comm)
{
- MPI_Request request;
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | SEND);
+
+ smpi_mpi_start(request);
+ smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
+ request = NULL;
+}
+
+void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | SSEND | SEND);
- request = smpi_mpi_isend(buf, count, datatype, dst, tag, comm);
+ smpi_mpi_start(request);
smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
+ request = NULL;
}
void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
{
MPI_Request requests[2];
MPI_Status stats[2];
-
+ int myid=smpi_process_index();
+ if ((smpi_group_index(smpi_comm_group(comm), dst) == myid) && (smpi_group_index(smpi_comm_group(comm), src) == myid)) {
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ recvbuf, recvcount, recvtype);
+ return;
+ }
requests[0] =
smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
requests[1] =
smpi_mpi_waitall(2, requests, stats);
if(status != MPI_STATUS_IGNORE) {
// Copy receive status
- memcpy(status, &stats[1], sizeof(MPI_Status));
+ *status = stats[1];
}
}
static void finish_wait(MPI_Request * request, MPI_Status * status)
{
MPI_Request req = *request;
- // if we have a sender, we should use its data, and not the data from the receive
- //FIXME : mail fail if req->action has already been freed, the pointer being invalid
- if((req->action)&&
- (req->src==MPI_ANY_SOURCE || req->tag== MPI_ANY_TAG))
- req = (MPI_Request)SIMIX_comm_get_src_data((*request)->action);
+ smpi_empty_status(status);
- if(status != MPI_STATUS_IGNORE) {
- status->MPI_SOURCE = req->src;
- status->MPI_TAG = req->tag;
- //if((*request)->action && ((MPI_Request)SIMIX_comm_get_src_data((*request)->action))->size == (*request)->size)
- status->MPI_ERROR = MPI_SUCCESS;
- //else status->MPI_ERROR = MPI_ERR_TRUNCATE;
- // this handles the case were size in receive differs from size in send
- // FIXME: really this should just contain the count of receive-type blocks,
- // right?
- status->count = req->size;
- }
- req = *request;
+ if(!(req->detached && req->flags & SEND) && !(req->flags & PREPARED)){
+ if(status != MPI_STATUS_IGNORE) {
+ int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
+ status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src);
+ status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
+ status->MPI_ERROR = req->truncated ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
+ // this handles the case were size in receive differs from size in send
+ // FIXME: really this should just contain the count of receive-type blocks,
+ // right?
+ status->count = req->real_size;
+ }
- print_request("Finishing", req);
- MPI_Datatype datatype = req->old_type;
- if(datatype->has_subtype == 1){
+ print_request("Finishing", req);
+ MPI_Datatype datatype = req->old_type;
+
+ if(datatype->has_subtype == 1){
// This part handles the problem of non-contignous memory
// the unserialization at the reception
- s_smpi_subtype_t *subtype = datatype->substruct;
- if(req->flags & RECV) {
- subtype->unserialize(req->buf, req->old_buf, req->size/smpi_datatype_size(datatype) , datatype->substruct);
+ s_smpi_subtype_t *subtype = datatype->substruct;
+ if(req->flags & RECV) {
+ subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) , datatype->substruct);
+ }
+ if(req->detached == 0) free(req->buf);
}
- if(req->detached == 0) free(req->buf);
+ smpi_comm_unuse(req->comm);
+ smpi_datatype_unuse(datatype);
+
+ }
+
+#ifdef HAVE_TRACING
+ if (TRACE_smpi_view_internals()) {
+ if(req->flags & RECV){
+ int rank = smpi_process_index();
+ int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
+ TRACE_smpi_recv(rank, src_traced, rank);
+ }
+ }
+#endif
+
+ if(req->detached_sender!=NULL){
+ smpi_mpi_request_free(&(req->detached_sender));
}
if(req->flags & NON_PERSISTENT) {
int flag;
//assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
- if ((*request)->action == NULL)
- flag = 1;
- else
- flag = simcall_comm_test((*request)->action);
- if(flag) {
- finish_wait(request, status);
- }else{
- smpi_empty_status(status);
+ smpi_empty_status(status);
+ flag = 1;
+ if (!((*request)->flags & PREPARED)) {
+ if ((*request)->action != NULL)
+ flag = simcall_comm_test((*request)->action);
+ if (flag) {
+ finish_wait(request, status);
+ *request = MPI_REQUEST_NULL;
+ }
}
return flag;
}
*index = MPI_UNDEFINED;
flag = 0;
- if(count > 0) {
- comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
- map = xbt_new(int, count);
- size = 0;
- for(i = 0; i < count; i++) {
- if((requests[i]!=MPI_REQUEST_NULL) && requests[i]->action) {
- xbt_dynar_push(comms, &requests[i]->action);
- map[size] = i;
- size++;
- }
+ comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
+ map = xbt_new(int, count);
+ size = 0;
+ for(i = 0; i < count; i++) {
+ if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action &&
+ !(requests[i]->flags & PREPARED)) {
+ xbt_dynar_push(comms, &requests[i]->action);
+ map[size] = i;
+ size++;
}
- if(size > 0) {
- i = simcall_comm_testany(comms);
- // not MPI_UNDEFINED, as this is a simix return code
- if(i != -1) {
- *index = map[i];
- finish_wait(&requests[*index], status);
- flag = 1;
- }
- }else{
- //all requests are null or inactive, return true
- flag=1;
- smpi_empty_status(status);
+ }
+ if(size > 0) {
+ i = simcall_comm_testany(comms);
+ // not MPI_UNDEFINED, as this is a simix return code
+ if(i != -1) {
+ *index = map[i];
+ finish_wait(&requests[*index], status);
+ requests[*index] = MPI_REQUEST_NULL;
+ flag = 1;
}
- xbt_free(map);
- xbt_dynar_free(&comms);
+ }else{
+ //all requests are null or inactive, return true
+ flag=1;
+ smpi_empty_status(status);
}
+ xbt_free(map);
+ xbt_dynar_free(&comms);
return flag;
}
int flag=1;
int i;
for(i=0; i<count; i++){
- if(requests[i]!= MPI_REQUEST_NULL){
+ if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED)) {
if (smpi_mpi_test(&requests[i], pstat)!=1){
flag=0;
+ }else{
+ requests[i]=MPI_REQUEST_NULL;
}
}else{
smpi_empty_status(pstat);
}
if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[i], pstat, sizeof(*pstat));
+ status[i] = *pstat;
}
}
return flag;
while(flag==0){
smpi_mpi_iprobe(source, tag, comm, &flag, status);
XBT_DEBUG("Busy Waiting on probing : %d", flag);
- if(!flag) {
- simcall_process_sleep(0.0001);
- }
}
}
void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
- MPI_Request request =build_request(NULL, 0, MPI_CHAR, source, smpi_comm_rank(comm), tag,
+
+ MPI_Request request =build_request(NULL, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag,
comm, NON_PERSISTENT | RECV);
+ //to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
+ double sleeptime= sg_cfg_get_double("smpi/iprobe");
+ //multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
+ static int nsleeps = 1;
+
+ simcall_process_sleep(sleeptime);
+
// behave like a receive, but don't do it
smx_rdv_t mailbox;
print_request("New iprobe", request);
// We have to test both mailboxes as we don't know if we will receive one one or another
- if (surf_cfg_get_int("smpi/async_small_thres")>0){
+ if (sg_cfg_get_int("smpi/async_small_thres")>0){
mailbox = smpi_process_mailbox_small();
XBT_DEBUG("trying to probe the perm recv mailbox");
request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
if(request->action){
MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
*flag = 1;
- if(status != MPI_STATUS_IGNORE) {
- status->MPI_SOURCE = req->src;
+ if(status != MPI_STATUS_IGNORE && !(req->flags & PREPARED)) {
+ status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
status->MPI_TAG = req->tag;
status->MPI_ERROR = MPI_SUCCESS;
- status->count = req->size;
+ status->count = req->real_size;
}
+ nsleeps=1;//reset the number of sleeps we will do next time
+ }
+ else {
+ *flag = 0;
+ nsleeps++;
}
- else *flag = 0;
smpi_mpi_request_free(&request);
return;
void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
{
print_request("Waiting", *request);
+ if ((*request)->flags & PREPARED) {
+ smpi_empty_status(status);
+ return;
+ }
+
if ((*request)->action != NULL) { // this is not a detached send
simcall_comm_wait((*request)->action, -1.0);
- finish_wait(request, status);
}
+
+#ifdef HAVE_MC
+ if(MC_is_active())
+ (*request)->action->comm.dst_data = NULL; // dangling pointer : dst_data is freed with a wait, need to set it to NULL for system state comparison
+#endif
+
+ finish_wait(request, status);
+ *request = MPI_REQUEST_NULL;
// FIXME for a detached send, finish_wait is not called:
}
comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
map = xbt_new(int, count);
size = 0;
- XBT_DEBUG("Wait for one of");
+ XBT_DEBUG("Wait for one of %d", count);
for(i = 0; i < count; i++) {
- if((requests[i] != MPI_REQUEST_NULL) && (requests[i]->action != NULL)) {
- print_request("Waiting any ", requests[i]);
- xbt_dynar_push(comms, &requests[i]->action);
- map[size] = i;
- size++;
+ if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED)) {
+ if (requests[i]->action != NULL) {
+ XBT_DEBUG("Waiting any %p ", requests[i]);
+ xbt_dynar_push(comms, &requests[i]->action);
+ map[size] = i;
+ size++;
+ }else{
+ //This is a finished detached request, let's return this one
+ size=0;//so we free the dynar but don't do the waitany call
+ index=i;
+ finish_wait(&requests[i], status);//cleanup if refcount = 0
+ requests[i]=MPI_REQUEST_NULL;//set to null
+ break;
+ }
}
}
if(size > 0) {
if (i != -1) {
index = map[i];
finish_wait(&requests[index], status);
+ requests[index] = MPI_REQUEST_NULL;
}
}
xbt_free(map);
int index, c;
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
- int retvalue=MPI_SUCCESS;
+ int retvalue = MPI_SUCCESS;
//tag invalid requests in the set
- for(c = 0; c < count; c++) {
- if(requests[c]==MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL ){
- if(status != MPI_STATUSES_IGNORE)
+ if (status != MPI_STATUSES_IGNORE) {
+ for (c = 0; c < count; c++) {
+ if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL ||
+ (requests[c]->flags & PREPARED)) {
smpi_empty_status(&status[c]);
- }else if(requests[c]->src == MPI_PROC_NULL ){
- if(status != MPI_STATUSES_IGNORE) {
+ } else if (requests[c]->src == MPI_PROC_NULL) {
smpi_empty_status(&status[c]);
- status[c].MPI_SOURCE=MPI_PROC_NULL;
+ status[c].MPI_SOURCE = MPI_PROC_NULL;
}
}
}
-
for(c = 0; c < count; c++) {
- if(MC_is_active()) {
- smpi_mpi_wait(&requests[c], pstat);
- index = c;
- } else {
- index = smpi_mpi_waitany(count, requests, pstat);
- if(index == MPI_UNDEFINED) {
- break;
- }
- if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[index], pstat, sizeof(*pstat));
- if(status[index].MPI_ERROR==MPI_ERR_TRUNCATE)retvalue=MPI_ERR_IN_STATUS;
-
- }
+ if (MC_is_active()) {
+ smpi_mpi_wait(&requests[c], pstat);
+ index = c;
+ } else {
+ index = smpi_mpi_waitany(count, requests, pstat);
+ if (index == MPI_UNDEFINED)
+ break;
+ requests[index]=MPI_REQUEST_NULL;
+ }
+ if (status != MPI_STATUSES_IGNORE) {
+ status[index] = *pstat;
+ if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
+ retvalue = MPI_ERR_IN_STATUS;
}
}
+
return retvalue;
}
indices[count] = index;
count++;
if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[index], pstat, sizeof(*pstat));
+ status[index] = *pstat;
}
+ requests[index]=MPI_REQUEST_NULL;
}else{
return MPI_UNDEFINED;
}
for(i = 0; i < incount; i++) {
if((requests[i] != MPI_REQUEST_NULL)) {
if(smpi_mpi_test(&requests[i], pstat)) {
- indices[count] = i;
+ indices[i] = 1;
count++;
if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[i], pstat, sizeof(*pstat));
+ status[i] = *pstat;
}
+ requests[i]=MPI_REQUEST_NULL;
+
}
}else{
count_dead++;
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_GATHER;
int rank, size, src, index;
MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
}
}
+
+void smpi_mpi_reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
+ MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ int i, size, count;
+ int *displs;
+ int rank = smpi_process_index();
+ void *tmpbuf;
+
+ /* arbitrarily choose root as rank 0 */
+ size = smpi_comm_size(comm);
+ count = 0;
+ displs = xbt_new(int, size);
+ for (i = 0; i < size; i++) {
+ displs[i] = count;
+ count += recvcounts[i];
+ }
+ tmpbuf=(void*)xbt_malloc(count*smpi_datatype_get_extent(datatype));
+ mpi_coll_reduce_fun(sendbuf, tmpbuf, count, datatype, op, 0, comm);
+ smpi_mpi_scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf,
+ recvcounts[rank], datatype, 0, comm);
+ xbt_free(displs);
+ xbt_free(tmpbuf);
+}
+
void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_GATHERV;
int rank, size, src, index;
MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
int recvcount, MPI_Datatype recvtype,
MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_ALLGATHER;
int rank, size, other, index;
MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
int *recvcounts, int *displs,
MPI_Datatype recvtype, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_ALLGATHERV;
int rank, size, other, index;
MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_SCATTER;
int rank, size, dst, index;
MPI_Aint lb = 0, sendext = 0;
MPI_Request *requests;
// FIXME: check for errors
smpi_datatype_extent(sendtype, &lb, &sendext);
// Local copy from root
- smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
- sendcount, sendtype, recvbuf, recvcount, recvtype);
+ if(recvbuf!=MPI_IN_PLACE){
+ smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
+ sendcount, sendtype, recvbuf, recvcount, recvtype);
+ }
// Send buffers to receivers
requests = xbt_new(MPI_Request, size - 1);
index = 0;
MPI_Datatype sendtype, void *recvbuf, int recvcount,
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_SCATTERV;
int rank, size, dst, index;
MPI_Aint lb = 0, sendext = 0;
MPI_Request *requests;
// FIXME: check for errors
smpi_datatype_extent(sendtype, &lb, &sendext);
// Local copy from root
- smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
+ if(recvbuf!=MPI_IN_PLACE){
+ smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
sendtype, recvbuf, recvcount, recvtype);
+ }
// Send buffers to receivers
requests = xbt_new(MPI_Request, size - 1);
index = 0;
MPI_Datatype datatype, MPI_Op op, int root,
MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_REDUCE;
int rank, size, src, index;
MPI_Aint lb = 0, dataext = 0;
MPI_Request *requests;
void **tmpbufs;
+
+ char* sendtmpbuf = (char*) sendbuf;
+ if( sendbuf == MPI_IN_PLACE ) {
+ sendtmpbuf = (char *)xbt_malloc(count*smpi_datatype_get_extent(datatype));
+ smpi_datatype_copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
+ }
+
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
+ //non commutative case, use a working algo from openmpi
+ if(!smpi_op_is_commute(op)){
+ smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count,
+ datatype, op, root, comm);
+ return;
+ }
+
if(rank != root) {
// Send buffer to root
- smpi_mpi_send(sendbuf, count, datatype, root, system_tag, comm);
+ smpi_mpi_send(sendtmpbuf, count, datatype, root, system_tag, comm);
} else {
// FIXME: check for errors
smpi_datatype_extent(datatype, &lb, &dataext);
// Local copy from root
- if (sendbuf && recvbuf)
- smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
+ if (sendtmpbuf && recvbuf)
+ smpi_datatype_copy(sendtmpbuf, count, datatype, recvbuf, count, datatype);
// Receive buffers from senders
//TODO: make a MPI_barrier here ?
requests = xbt_new(MPI_Request, size - 1);
}
xbt_free(tmpbufs);
xbt_free(requests);
+
+ if( sendbuf == MPI_IN_PLACE ) {
+ xbt_free(sendtmpbuf);
+ }
}
}
void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = -888;
int rank, size, other, index;
MPI_Aint lb = 0, dataext = 0;
MPI_Request *requests;
}
// Wait for completion of all comms.
smpi_mpi_startall(size - 1, requests);
- for(other = 0; other < size - 1; other++) {
- index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
- if(index == MPI_UNDEFINED) {
- break;
- }
- if(index < rank) {
- // #Request is below rank: it's a irecv
- smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
+
+ if(smpi_op_is_commute(op)){
+ for(other = 0; other < size - 1; other++) {
+ index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
+ if(index == MPI_UNDEFINED) {
+ break;
+ }
+ if(index < rank) {
+ // #Request is below rank: it's a irecv
+ smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
+ }
+ }
+ }else{
+ //non commutative case, wait in order
+ for(other = 0; other < size - 1; other++) {
+ smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
+ if(index < rank) {
+ smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
+ }
+ }
+ }
+ for(index = 0; index < rank; index++) {
+ xbt_free(tmpbufs[index]);
+ }
+ xbt_free(tmpbufs);
+ xbt_free(requests);
+}
+
+void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ int system_tag = -888;
+ int rank, size, other, index;
+ MPI_Aint lb = 0, dataext = 0;
+ MPI_Request *requests;
+ void **tmpbufs;
+ int recvbuf_is_empty=1;
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+
+ // FIXME: check for errors
+ smpi_datatype_extent(datatype, &lb, &dataext);
+
+ // Send/Recv buffers to/from others;
+ requests = xbt_new(MPI_Request, size - 1);
+ tmpbufs = xbt_new(void *, rank);
+ index = 0;
+ for(other = 0; other < rank; other++) {
+ // FIXME: possibly overkill we we have contiguous/noncontiguous data
+ // mapping...
+ tmpbufs[index] = xbt_malloc(count * dataext);
+ requests[index] =
+ smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
+ comm);
+ index++;
+ }
+ for(other = rank + 1; other < size; other++) {
+ requests[index] =
+ smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
+ index++;
+ }
+ // Wait for completion of all comms.
+ smpi_mpi_startall(size - 1, requests);
+ if(smpi_op_is_commute(op)){
+ for(other = 0; other < size - 1; other++) {
+ index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
+ if(index == MPI_UNDEFINED) {
+ break;
+ }
+ if(index < rank) {
+ if(recvbuf_is_empty){
+ smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
+ recvbuf_is_empty=0;
+ }else
+ // #Request is below rank: it's a irecv
+ smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
+ }
+ }
+ }else{
+ //non commutative case, wait in order
+ for(other = 0; other < size - 1; other++) {
+ smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
+ if(index < rank) {
+ if(recvbuf_is_empty){
+ smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
+ recvbuf_is_empty=0;
+ }else smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
+ }
}
}
for(index = 0; index < rank; index++) {