+/* Copyright (c) 2007, 2008, 2009, 2010. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
#include "private.h"
#include "xbt/time.h"
+#include "mc/mc.h"
+#include "xbt/replay.h"
+#include <errno.h>
+#include "surf/surf.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi,
"Logging specific to SMPI (base)");
-XBT_LOG_EXTERNAL_CATEGORY(smpi_base);
-XBT_LOG_EXTERNAL_CATEGORY(smpi_bench);
-XBT_LOG_EXTERNAL_CATEGORY(smpi_kernel);
-XBT_LOG_EXTERNAL_CATEGORY(smpi_mpi);
-XBT_LOG_EXTERNAL_CATEGORY(smpi_receiver);
-XBT_LOG_EXTERNAL_CATEGORY(smpi_sender);
-XBT_LOG_EXTERNAL_CATEGORY(smpi_util);
-
-smpi_mpi_global_t smpi_mpi_global = NULL;
-
-/**
- * Operations of MPI_OP : implemented=land,sum,min,max
- **/
-void smpi_mpi_land_func(void *a, void *b, int *length,
- MPI_Datatype * datatype);
-
-void smpi_mpi_land_func(void *a, void *b, int *length,
- MPI_Datatype * datatype)
-{
- int i;
- if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] && y[i];
- }
- }
+
+static int match_recv(void* a, void* b, smx_action_t ignored) {
+ MPI_Request ref = (MPI_Request)a;
+ MPI_Request req = (MPI_Request)b;
+
+ xbt_assert(ref, "Cannot match recv against null reference");
+ xbt_assert(req, "Cannot match recv against null request");
+ return (ref->src == MPI_ANY_SOURCE || req->src == ref->src)
+ && (ref->tag == MPI_ANY_TAG || req->tag == ref->tag);
}
-/**
- * sum two vectors element-wise
- *
- * @param a the first vectors
- * @param b the second vectors
- * @return the second vector is modified and contains the element-wise sums
- **/
-void smpi_mpi_sum_func(void *a, void *b, int *length,
- MPI_Datatype * datatype);
+static int match_send(void* a, void* b,smx_action_t ignored) {
+ MPI_Request ref = (MPI_Request)a;
+ MPI_Request req = (MPI_Request)b;
-void smpi_mpi_sum_func(void *a, void *b, int *length, MPI_Datatype * datatype)
+ xbt_assert(ref, "Cannot match send against null reference");
+ xbt_assert(req, "Cannot match send against null request");
+ return (req->src == MPI_ANY_SOURCE || req->src == ref->src)
+ && (req->tag == MPI_ANY_TAG || req->tag == ref->tag);
+}
+
+static MPI_Request build_request(void *buf, int count,
+ MPI_Datatype datatype, int src, int dst,
+ int tag, MPI_Comm comm, unsigned flags)
{
- int i;
- if ((*datatype == smpi_mpi_global->mpi_byte) || (*datatype == smpi_mpi_global->mpi_char)) {
- char *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_float) {
- float *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_double) {
- double *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- }
+ MPI_Request request;
+
+ request = xbt_new(s_smpi_mpi_request_t, 1);
+ request->buf = buf;
+ // FIXME: this will have to be changed to support non-contiguous datatypes
+ request->size = smpi_datatype_size(datatype) * count;
+ request->src = src;
+ request->dst = dst;
+ request->tag = tag;
+ request->comm = comm;
+ request->action = NULL;
+ request->flags = flags;
+#ifdef HAVE_TRACING
+ request->send = 0;
+ request->recv = 0;
+#endif
+ return request;
}
-/**
- *i multiply two vectors element-wise
- *
- * @param a the first vectors
- * @param b the second vectors
- * @return the second vector is modified and contains the element-wise products
- **/
-void smpi_mpi_prod_func(void *a, void *b, int *length,
- MPI_Datatype * datatype);
-
-void smpi_mpi_prod_func(void *a, void *b, int *length, MPI_Datatype * datatype)
-{
- int i;
- if ((*datatype == smpi_mpi_global->mpi_byte) || (*datatype == smpi_mpi_global->mpi_char)) {
- char *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] * y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] * y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_float) {
- float *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] * y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_double) {
- double *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] * y[i];
+
+void smpi_action_trace_run(char *path)
+{
+ char *name;
+ xbt_dynar_t todo;
+ xbt_dict_cursor_t cursor;
+
+ action_fp=NULL;
+ if (path) {
+ action_fp = fopen(path, "r");
+ xbt_assert(action_fp != NULL, "Cannot open %s: %s", path,
+ strerror(errno));
+ }
+
+ if (!xbt_dict_is_empty(action_queues)) {
+ XBT_WARN
+ ("Not all actions got consumed. If the simulation ended successfully (without deadlock), you may want to add new processes to your deployment file.");
+
+
+ xbt_dict_foreach(action_queues, cursor, name, todo) {
+ XBT_WARN("Still %lu actions for %s", xbt_dynar_length(todo), name);
}
}
+
+ if (path)
+ fclose(action_fp);
+ xbt_dict_free(&action_queues);
+ action_queues = xbt_dict_new_homogeneous(NULL);
}
-/**
- * compute the min of two vectors element-wise
- **/
-void smpi_mpi_min_func(void *a, void *b, int *length,
- MPI_Datatype * datatype);
-void smpi_mpi_min_func(void *a, void *b, int *length, MPI_Datatype * datatype)
+static void smpi_mpi_request_free_voidp(void* request)
{
- int i;
- if ((*datatype == smpi_mpi_global->mpi_byte) || (*datatype == smpi_mpi_global->mpi_char)) {
- char *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
- } else {
- if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
- } else {
- if (*datatype == smpi_mpi_global->mpi_float) {
- float *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
- } else {
- if (*datatype == smpi_mpi_global->mpi_double) {
- double *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
+ MPI_Request req = request;
+ smpi_mpi_request_free(&req);
+}
- }
- }
- }
- }
+/* MPI Low level calls */
+MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
+ comm, PERSISTENT | SEND);
+
+ return request;
}
-/**
- * compute the max of two vectors element-wise
- **/
-void smpi_mpi_max_func(void *a, void *b, int *length,
- MPI_Datatype * datatype);
+MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
+ int src, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
+ comm, PERSISTENT | RECV);
+
+ return request;
+}
-void smpi_mpi_max_func(void *a, void *b, int *length, MPI_Datatype * datatype)
+void smpi_mpi_start(MPI_Request request)
{
- int i;
- if ((*datatype == smpi_mpi_global->mpi_byte) || (*datatype == smpi_mpi_global->mpi_char)) {
- char *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
+ smx_rdv_t mailbox;
+ int detached = 0;
+
+ xbt_assert(!request->action,
+ "Cannot (re)start a non-finished communication");
+ if(request->flags & RECV) {
+ print_request("New recv", request);
+ if (request->size < xbt_cfg_get_int(_surf_cfg_set, "smpi/async_small_thres"))
+ mailbox = smpi_process_mailbox_small();
+ else
+ mailbox = smpi_process_mailbox();
+
+ // FIXME: SIMIX does not yet support non-contiguous datatypes
+ request->action = simcall_comm_irecv(mailbox, request->buf, &request->size, &match_recv, request);
+ } else {
+ print_request("New send", request);
+
+ if (request->size < xbt_cfg_get_int(_surf_cfg_set, "smpi/async_small_thres")) { // eager mode => detached send (FIXME: this limit should be configurable)
+ mailbox = smpi_process_remote_mailbox_small(
+ smpi_group_index(smpi_comm_group(request->comm), request->dst));
+ }else{
+ XBT_DEBUG("Send request %p is not in the permanent receive mailbox (buf: %p)",request,request->buf);
+ mailbox = smpi_process_remote_mailbox(
+ smpi_group_index(smpi_comm_group(request->comm), request->dst));
}
- } else if (*datatype == smpi_mpi_global->mpi_float) {
- float *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
+ if (request->size < 64*1024 ) { //(FIXME: this limit should be configurable)
+ void *oldbuf = request->buf;
+ detached = 1;
+ request->buf = malloc(request->size);
+ if (oldbuf)
+ memcpy(request->buf,oldbuf,request->size);
+ XBT_DEBUG("Send request %p is detached; buf %p copied into %p",request,oldbuf,request->buf);
}
- } else if (*datatype == smpi_mpi_global->mpi_double) {
- double *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
+
+ request->action =
+ simcall_comm_isend(mailbox, request->size, -1.0,
+ request->buf, request->size,
+ &match_send,
+ &smpi_mpi_request_free_voidp, // how to free the userdata if a detached send fails
+ request,
+ // detach if msg size < eager/rdv switch limit
+ detached);
+
+ #ifdef HAVE_TRACING
+ /* FIXME: detached sends are not traceable (request->action == NULL) */
+ if (request->action)
+ simcall_set_category(request->action, TRACE_internal_smpi_get_category());
+ #endif
+
}
- }
}
+void smpi_mpi_startall(int count, MPI_Request * requests)
+{
+ int i;
+ for(i = 0; i < count; i++) {
+ smpi_mpi_start(requests[i]);
+ }
+}
-
-/**
- * tell the MPI rank of the calling process (from its SIMIX process id)
- **/
-int smpi_mpi_comm_rank(smpi_mpi_communicator_t comm)
+void smpi_mpi_request_free(MPI_Request * request)
{
- return comm->index_to_rank_map[smpi_process_index()];
+ xbt_free(*request);
+ *request = MPI_REQUEST_NULL;
}
-void smpi_process_init(int *argc, char ***argv)
+MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
{
- smpi_process_data_t pdata;
+ MPI_Request request =
+ build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
+ comm, NON_PERSISTENT | SEND);
- // initialize some local variables
+ return request;
+}
- pdata = xbt_new(s_smpi_process_data_t, 1);
- SIMIX_process_set_data(SIMIX_process_self(), pdata);
+MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ smpi_isend_init(buf, count, datatype, dst, tag, comm);
- /* get rank from command line, and remove it from argv */
- pdata->index = atoi((*argv)[1]);
- DEBUG1("I'm rank %d", pdata->index);
- if (*argc > 2) {
- memmove((*argv)[1], (*argv)[2], sizeof(char *) * (*argc - 2));
- (*argv)[(*argc) - 1] = NULL;
- }
- (*argc)--;
+ smpi_mpi_start(request);
+ return request;
+}
- pdata->mutex = SIMIX_mutex_init();
- pdata->cond = SIMIX_cond_init();
- pdata->finalize = 0;
+MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
+ int src, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
+ comm, NON_PERSISTENT | RECV);
- pdata->pending_recv_request_queue = xbt_fifo_new();
- pdata->pending_send_request_queue = xbt_fifo_new();
- pdata->received_message_queue = xbt_fifo_new();
+ return request;
+}
- pdata->main = SIMIX_process_self();
- pdata->sender = SIMIX_process_create("smpi_sender",
- smpi_sender, pdata,
- SIMIX_host_get_name(SIMIX_host_self()),
- 0, NULL,
- /*props */ NULL);
- pdata->receiver = SIMIX_process_create("smpi_receiver",
- smpi_receiver, pdata,
- SIMIX_host_get_name(SIMIX_host_self
- ()), 0, NULL,
- /*props */ NULL);
+MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype,
+ int src, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ smpi_irecv_init(buf, count, datatype, src, tag, comm);
- smpi_global->main_processes[pdata->index] = SIMIX_process_self();
- return;
+ smpi_mpi_start(request);
+ return request;
}
-void smpi_process_finalize()
+void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src,
+ int tag, MPI_Comm comm, MPI_Status * status)
{
- smpi_process_data_t pdata = SIMIX_process_get_data(SIMIX_process_self());
-
- pdata->finalize = 2; /* Tell sender and receiver to quit */
- SIMIX_process_resume(pdata->sender);
- SIMIX_process_resume(pdata->receiver);
- while (pdata->finalize > 0) { /* wait until it's done */
- SIMIX_cond_wait(pdata->cond, pdata->mutex);
- }
+ MPI_Request request;
- SIMIX_mutex_destroy(pdata->mutex);
- SIMIX_cond_destroy(pdata->cond);
- xbt_fifo_free(pdata->pending_recv_request_queue);
- xbt_fifo_free(pdata->pending_send_request_queue);
- xbt_fifo_free(pdata->received_message_queue);
- xbt_free(pdata);
+ request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
+ smpi_mpi_wait(&request, status);
}
-/*int smpi_mpi_barrier(smpi_mpi_communicator_t comm)
+
+void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst,
+ int tag, MPI_Comm comm)
{
+ MPI_Request request;
- SIMIX_mutex_lock(comm->barrier_mutex);
- ++comm->barrier_count;
- if (comm->barrier_count > comm->size) { // only happens on second barrier...
- comm->barrier_count = 0;
- } else if (comm->barrier_count == comm->size) {
- SIMIX_cond_broadcast(comm->barrier_cond);
- }
- while (comm->barrier_count < comm->size) {
- SIMIX_cond_wait(comm->barrier_cond, comm->barrier_mutex);
+ request = smpi_mpi_isend(buf, count, datatype, dst, tag, comm);
+ smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
+}
+
+void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
+ int dst, int sendtag, void *recvbuf, int recvcount,
+ MPI_Datatype recvtype, int src, int recvtag,
+ MPI_Comm comm, MPI_Status * status)
+{
+ MPI_Request requests[2];
+ MPI_Status stats[2];
+
+ requests[0] =
+ smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
+ requests[1] =
+ smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
+ smpi_mpi_startall(2, requests);
+ smpi_mpi_waitall(2, requests, stats);
+ if(status != MPI_STATUS_IGNORE) {
+ // Copy receive status
+ memcpy(status, &stats[1], sizeof(MPI_Status));
}
- SIMIX_mutex_unlock(comm->barrier_mutex);
+}
- return MPI_SUCCESS;
+int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
+{
+ return status->count / smpi_datatype_size(datatype);
}
-*/
-int smpi_mpi_isend(smpi_mpi_request_t request)
+static void finish_wait(MPI_Request * request, MPI_Status * status)
{
- smpi_process_data_t pdata = SIMIX_process_get_data(SIMIX_process_self());
- int retval = MPI_SUCCESS;
+ MPI_Request req = *request;
+ // if we have a sender, we should use its data, and not the data from the receive
+ if((req->action)&&
+ (req->src==MPI_ANY_SOURCE || req->tag== MPI_ANY_TAG))
+ req = (MPI_Request)SIMIX_comm_get_src_data((*request)->action);
+
+ if(status != MPI_STATUS_IGNORE) {
+ status->MPI_SOURCE = req->src;
+ status->MPI_TAG = req->tag;
+ status->MPI_ERROR = MPI_SUCCESS;
+ // FIXME: really this should just contain the count of receive-type blocks,
+ // right?
+ status->count = req->size;
+ }
+ req = *request;
- if (NULL == request) {
- retval = MPI_ERR_INTERN;
+ print_request("Finishing", req);
+ if(req->flags & NON_PERSISTENT) {
+ smpi_mpi_request_free(request);
} else {
- xbt_fifo_push(pdata->pending_send_request_queue, request);
- SIMIX_process_resume(pdata->sender);
+ req->action = NULL;
}
+}
+
+int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
+int flag;
- return retval;
+ if ((*request)->action == NULL)
+ flag = 1;
+ else
+ flag = simcall_comm_test((*request)->action);
+ if(flag) {
+ smpi_mpi_wait(request, status);
+ }
+ return flag;
}
-int smpi_mpi_irecv(smpi_mpi_request_t request)
+int smpi_mpi_testany(int count, MPI_Request requests[], int *index,
+ MPI_Status * status)
{
- int retval = MPI_SUCCESS;
- smpi_process_data_t pdata = SIMIX_process_get_data(SIMIX_process_self());
-
- if (NULL == request) {
- retval = MPI_ERR_INTERN;
- } else {
- xbt_fifo_push(pdata->pending_recv_request_queue, request);
+ xbt_dynar_t comms;
+ int i, flag, size;
+ int* map;
- if (SIMIX_process_is_suspended(pdata->receiver)) {
- SIMIX_process_resume(pdata->receiver);
+ *index = MPI_UNDEFINED;
+ flag = 0;
+ if(count > 0) {
+ comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
+ map = xbt_new(int, count);
+ size = 0;
+ for(i = 0; i < count; i++) {
+ if(requests[i]->action) {
+ xbt_dynar_push(comms, &requests[i]->action);
+ map[size] = i;
+ size++;
+ }
}
+ if(size > 0) {
+ i = simcall_comm_testany(comms);
+ // FIXME: MPI_UNDEFINED or does SIMIX have a return code?
+ if(i != MPI_UNDEFINED) {
+ *index = map[i];
+ smpi_mpi_wait(&requests[*index], status);
+ flag = 1;
+ }
+ }
+ xbt_free(map);
+ xbt_dynar_free(&comms);
}
+ return flag;
+}
- return retval;
+void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
+ int flag=0;
+ //FIXME find another wait to avoid busy waiting ?
+ // the issue here is that we have to wait on a nonexistent comm
+ MPI_Request request;
+ while(flag==0){
+ request = smpi_mpi_iprobe(source, tag, comm, &flag, status);
+ XBT_DEBUG("Busy Waiting on probing : %d", flag);
+ if(!flag) {
+ smpi_mpi_request_free(&request);
+ simcall_process_sleep(0.0001);
+ }
+ }
}
-void print_req( smpi_mpi_request_t r );
-void print_req( smpi_mpi_request_t r ) {
- printf("***req %p-> src=%d dst=%d tag=%d completed=0x%x consumed=0x%x\n",r,r->src,r->dst,r->tag,r->completed,r->consumed);
+MPI_Request smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
+ MPI_Request request =build_request(NULL, 0, MPI_CHAR, source, smpi_comm_rank(comm), tag,
+ comm, NON_PERSISTENT | RECV);
+ // behave like a receive, but don't do it
+ smx_rdv_t mailbox;
+
+ print_request("New iprobe", request);
+ // We have to test both mailboxes as we don't know if we will receive one one or another
+ if (xbt_cfg_get_int(_surf_cfg_set, "smpi/async_small_thres")>0){
+ mailbox = smpi_process_mailbox_small();
+ request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
+
+ }
+ if (request->action==NULL){
+ mailbox = smpi_process_mailbox();
+ request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
+ }
+
+ if(request->action){
+ MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
+ *flag=true;
+ if(status != MPI_STATUS_IGNORE) {
+ status->MPI_SOURCE = req->src;
+ status->MPI_TAG = req->tag;
+ status->MPI_ERROR = MPI_SUCCESS;
+ status->count = req->size;
+ }
+ }
+ else *flag=false;
+
+ return request;
}
+void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
+{
+ print_request("Waiting", *request);
+ if ((*request)->action != NULL) { // this is not a detached send
+ simcall_comm_wait((*request)->action, -1.0);
+ finish_wait(request, status);
+ }
+ // FIXME for a detached send, finish_wait is not called:
+}
-/**
- * wait and friends ...
- **/
-int smpi_mpi_wait(smpi_mpi_request_t request, smpi_mpi_status_t * status)
+int smpi_mpi_waitany(int count, MPI_Request requests[],
+ MPI_Status * status)
{
- int retval = MPI_SUCCESS;
+ xbt_dynar_t comms;
+ int i, size, index;
+ int *map;
+
+ index = MPI_UNDEFINED;
+ if(count > 0) {
+ // Wait for a request to complete
+ comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
+ map = xbt_new(int, count);
+ size = 0;
+ XBT_DEBUG("Wait for one of");
+ for(i = 0; i < count; i++) {
+ if((requests[i] != MPI_REQUEST_NULL) && (requests[i]->action != NULL)) {
+ print_request("Waiting any ", requests[i]);
+ xbt_dynar_push(comms, &requests[i]->action);
+ map[size] = i;
+ size++;
+ }
+ }
+ if(size > 0) {
+ i = simcall_comm_waitany(comms);
- if (NULL == request) {
- retval = MPI_ERR_INTERN;
- } else {
- SIMIX_mutex_lock(request->mutex);
+ // FIXME: MPI_UNDEFINED or does SIMIX have a return code?
+ if (i != MPI_UNDEFINED) {
+ index = map[i];
+ finish_wait(&requests[index], status);
-#ifdef DEBUG_STEPH
- print_req( request ); //@@
-#endif
- while (!request->completed) {
- SIMIX_cond_wait(request->cond, request->mutex);
+ }
}
- if (NULL != status) {
- status->MPI_SOURCE = request->src;
- status->MPI_TAG = request->tag;
- status->MPI_ERROR = MPI_SUCCESS;
+ xbt_free(map);
+ xbt_dynar_free(&comms);
+ }
+ return index;
+}
+
+void smpi_mpi_waitall(int count, MPI_Request requests[],
+ MPI_Status status[])
+{
+ int index, c;
+ MPI_Status stat;
+ MPI_Status *pstat = status == MPI_STATUS_IGNORE ? MPI_STATUS_IGNORE : &stat;
+
+ for(c = 0; c < count; c++) {
+ if(MC_IS_ENABLED) {
+ smpi_mpi_wait(&requests[c], pstat);
+ index = c;
+ } else {
+ index = smpi_mpi_waitany(count, requests, pstat);
+ if(index == MPI_UNDEFINED) {
+ break;
+ }
+ }
+ if(status != MPI_STATUS_IGNORE) {
+ memcpy(&status[index], pstat, sizeof(*pstat));
}
- SIMIX_mutex_unlock(request->mutex);
}
+}
- return retval;
+int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices,
+ MPI_Status status[])
+{
+ int i, count, index;
+
+ count = 0;
+ for(i = 0; i < incount; i++) {
+ if(smpi_mpi_testany(incount, requests, &index, status)) {
+ indices[count] = index;
+ count++;
+ }
+ }
+ return count;
}
-/**
- * waitall
- **/
-int smpi_mpi_waitall(int count, smpi_mpi_request_t requests[],
- smpi_mpi_status_t status[])
+void smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
+ MPI_Comm comm)
{
- int cpt;
- int index;
- int retval;
- smpi_mpi_status_t stat;
+ // arity=2: a binary tree, arity=4 seem to be a good setting (see P2P-MPI))
+ nary_tree_bcast(buf, count, datatype, root, comm, 4);
+}
- for (cpt = 0; cpt < count; cpt++) {
- retval = smpi_mpi_waitany(count, requests, &index, &stat);
- if (retval != MPI_SUCCESS)
- return retval;
- if (MPI_STATUS_IGNORE != status)
- memcpy(&(status[index]), &stat, sizeof(stat));
- }
- return MPI_SUCCESS;
+void smpi_mpi_barrier(MPI_Comm comm)
+{
+ // arity=2: a binary tree, arity=4 seem to be a good setting (see P2P-MPI))
+ nary_tree_barrier(comm, 4);
}
-/**
- * waitany
- **/
-int smpi_mpi_waitany(int count, smpi_mpi_request_t * requests, int *index,
- smpi_mpi_status_t * status)
+void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
{
- int cpt;
+ int system_tag = 666;
+ int rank, size, src, index;
+ MPI_Aint lb = 0, recvext = 0;
+ MPI_Request *requests;
+
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+ if(rank != root) {
+ // Send buffer to root
+ smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
+ } else {
+ // FIXME: check for errors
+ smpi_datatype_extent(recvtype, &lb, &recvext);
+ // Local copy from root
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ (char *)recvbuf + root * recvcount * recvext, recvcount, recvtype);
+ // Receive buffers from senders
+ requests = xbt_new(MPI_Request, size - 1);
+ index = 0;
+ for(src = 0; src < size; src++) {
+ if(src != root) {
+ requests[index] = smpi_irecv_init((char *)recvbuf + src * recvcount * recvext,
+ recvcount, recvtype,
+ src, system_tag, comm);
+ index++;
+ }
+ }
+ // Wait for completion of irecv's.
+ smpi_mpi_startall(size - 1, requests);
+ smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
+ xbt_free(requests);
+ }
+}
- *index = MPI_UNDEFINED;
- if (NULL == requests) {
- return MPI_ERR_INTERN;
+void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
+ void *recvbuf, int *recvcounts, int *displs,
+ MPI_Datatype recvtype, int root, MPI_Comm comm)
+{
+ int system_tag = 666;
+ int rank, size, src, index;
+ MPI_Aint lb = 0, recvext = 0;
+ MPI_Request *requests;
+
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+ if(rank != root) {
+ // Send buffer to root
+ smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
+ } else {
+ // FIXME: check for errors
+ smpi_datatype_extent(recvtype, &lb, &recvext);
+ // Local copy from root
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ (char *)recvbuf + displs[root] * recvext,
+ recvcounts[root], recvtype);
+ // Receive buffers from senders
+ requests = xbt_new(MPI_Request, size - 1);
+ index = 0;
+ for(src = 0; src < size; src++) {
+ if(src != root) {
+ requests[index] =
+ smpi_irecv_init((char *)recvbuf + displs[src] * recvext,
+ recvcounts[src], recvtype, src, system_tag, comm);
+ index++;
+ }
+ }
+ // Wait for completion of irecv's.
+ smpi_mpi_startall(size - 1, requests);
+ smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
+ xbt_free(requests);
}
- /* First check if one of them is already done */
- for (cpt = 0; cpt < count; cpt++) {
-#ifdef DEBUG_STEPH
- printf("...exam req[%d] of msg from [%d]\n",cpt,requests[cpt]->src);
-#endif
- if (requests[cpt]->completed && !requests[cpt]->consumed) { /* got ya */
-#ifdef DEBUG_STEPH
- printf("...found match req[%d] of msg from [%d]\n",cpt,requests[cpt]->src);
-#endif
- *index = cpt;
- goto found_request;
+}
+
+void smpi_mpi_allgather(void *sendbuf, int sendcount,
+ MPI_Datatype sendtype, void *recvbuf,
+ int recvcount, MPI_Datatype recvtype,
+ MPI_Comm comm)
+{
+ int system_tag = 666;
+ int rank, size, other, index;
+ MPI_Aint lb = 0, recvext = 0;
+ MPI_Request *requests;
+
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+ // FIXME: check for errors
+ smpi_datatype_extent(recvtype, &lb, &recvext);
+ // Local copy from self
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ (char *)recvbuf + rank * recvcount * recvext, recvcount,
+ recvtype);
+ // Send/Recv buffers to/from others;
+ requests = xbt_new(MPI_Request, 2 * (size - 1));
+ index = 0;
+ for(other = 0; other < size; other++) {
+ if(other != rank) {
+ requests[index] =
+ smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
+ comm);
+ index++;
+ requests[index] = smpi_irecv_init((char *)recvbuf + other * recvcount * recvext,
+ recvcount, recvtype, other,
+ system_tag, comm);
+ index++;
}
}
- /* If none found, block */
- /* FIXME: should use a SIMIX_cond_waitany, when implemented. For now, block on the first one */
- while (1) {
- for (cpt = 0; cpt < count; cpt++) {
+ // Wait for completion of all comms.
+ smpi_mpi_startall(2 * (size - 1), requests);
+ smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
+ xbt_free(requests);
+}
-#ifdef DEBUG_STEPH
- print_req( requests[cpt] );
-#endif
- if (!requests[cpt]->completed) { /* this one is not done, wait on it */
-#ifdef DEBUG_STEPH
- printf("... blocked waiting a msg %d->%d, tag=%d\n",requests[cpt]->src,requests[cpt]->dst,requests[cpt]->tag);
-#endif
- while (!requests[cpt]->completed)
- SIMIX_cond_wait(requests[cpt]->cond, requests[cpt]->mutex);
+void smpi_mpi_allgatherv(void *sendbuf, int sendcount,
+ MPI_Datatype sendtype, void *recvbuf,
+ int *recvcounts, int *displs,
+ MPI_Datatype recvtype, MPI_Comm comm)
+{
+ int system_tag = 666;
+ int rank, size, other, index;
+ MPI_Aint lb = 0, recvext = 0;
+ MPI_Request *requests;
+
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+ // FIXME: check for errors
+ smpi_datatype_extent(recvtype, &lb, &recvext);
+ // Local copy from self
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ (char *)recvbuf + displs[rank] * recvext,
+ recvcounts[rank], recvtype);
+ // Send buffers to others;
+ requests = xbt_new(MPI_Request, 2 * (size - 1));
+ index = 0;
+ for(other = 0; other < size; other++) {
+ if(other != rank) {
+ requests[index] =
+ smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
+ comm);
+ index++;
+ requests[index] =
+ smpi_irecv_init((char *)recvbuf + displs[other] * recvext, recvcounts[other],
+ recvtype, other, system_tag, comm);
+ index++;
+ }
+ }
+ // Wait for completion of all comms.
+ smpi_mpi_startall(2 * (size - 1), requests);
+ smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
+ xbt_free(requests);
+}
- *index = cpt;
- goto found_request;
+void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
+{
+ int system_tag = 666;
+ int rank, size, dst, index;
+ MPI_Aint lb = 0, sendext = 0;
+ MPI_Request *requests;
+
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+ if(rank != root) {
+ // Recv buffer from root
+ smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
+ MPI_STATUS_IGNORE);
+ } else {
+ // FIXME: check for errors
+ smpi_datatype_extent(sendtype, &lb, &sendext);
+ // Local copy from root
+ smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
+ sendcount, sendtype, recvbuf, recvcount, recvtype);
+ // Send buffers to receivers
+ requests = xbt_new(MPI_Request, size - 1);
+ index = 0;
+ for(dst = 0; dst < size; dst++) {
+ if(dst != root) {
+ requests[index] = smpi_isend_init((char *)sendbuf + dst * sendcount * sendext,
+ sendcount, sendtype, dst,
+ system_tag, comm);
+ index++;
}
}
- if (cpt == count) /* they are all done. Damn user */
- return MPI_ERR_REQUEST;
+ // Wait for completion of isend's.
+ smpi_mpi_startall(size - 1, requests);
+ smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
+ xbt_free(requests);
}
+}
-found_request:
-#ifdef DEBUG_STEPH
- print_req( requests[cpt] );
-#endif
- requests[*index]->consumed = 1;
-#ifdef DEBUG_STEPH
- print_req( requests[cpt] );
- printf("...accessing *req[%d]->consumed\n",cpt);
-#endif
- if (NULL != status) {
- status->MPI_SOURCE = requests[*index]->src;
- status->MPI_TAG = requests[*index]->tag;
- status->MPI_ERROR = MPI_SUCCESS;
+void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs,
+ MPI_Datatype sendtype, void *recvbuf, int recvcount,
+ MPI_Datatype recvtype, int root, MPI_Comm comm)
+{
+ int system_tag = 666;
+ int rank, size, dst, index;
+ MPI_Aint lb = 0, sendext = 0;
+ MPI_Request *requests;
+
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+ if(rank != root) {
+ // Recv buffer from root
+ smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
+ MPI_STATUS_IGNORE);
+ } else {
+ // FIXME: check for errors
+ smpi_datatype_extent(sendtype, &lb, &sendext);
+ // Local copy from root
+ smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
+ sendtype, recvbuf, recvcount, recvtype);
+ // Send buffers to receivers
+ requests = xbt_new(MPI_Request, size - 1);
+ index = 0;
+ for(dst = 0; dst < size; dst++) {
+ if(dst != root) {
+ requests[index] =
+ smpi_isend_init((char *)sendbuf + displs[dst] * sendext, sendcounts[dst],
+ sendtype, dst, system_tag, comm);
+ index++;
+ }
+ }
+ // Wait for completion of isend's.
+ smpi_mpi_startall(size - 1, requests);
+ smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
+ xbt_free(requests);
+ }
+}
+
+void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, int root,
+ MPI_Comm comm)
+{
+ int system_tag = 666;
+ int rank, size, src, index;
+ MPI_Aint lb = 0, dataext = 0;
+ MPI_Request *requests;
+ void **tmpbufs;
+
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+ if(rank != root) {
+ // Send buffer to root
+ smpi_mpi_send(sendbuf, count, datatype, root, system_tag, comm);
+ } else {
+ // FIXME: check for errors
+ smpi_datatype_extent(datatype, &lb, &dataext);
+ // Local copy from root
+ if (sendbuf && recvbuf)
+ smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
+ // Receive buffers from senders
+ //TODO: make a MPI_barrier here ?
+ requests = xbt_new(MPI_Request, size - 1);
+ tmpbufs = xbt_new(void *, size - 1);
+ index = 0;
+ for(src = 0; src < size; src++) {
+ if(src != root) {
+ // FIXME: possibly overkill we we have contiguous/noncontiguous data
+ // mapping...
+ tmpbufs[index] = xbt_malloc(count * dataext);
+ requests[index] =
+ smpi_irecv_init(tmpbufs[index], count, datatype, src,
+ system_tag, comm);
+ index++;
+ }
+ }
+ // Wait for completion of irecv's.
+ smpi_mpi_startall(size - 1, requests);
+ for(src = 0; src < size - 1; src++) {
+ index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
+ XBT_VERB("finished waiting any request with index %d", index);
+ if(index == MPI_UNDEFINED) {
+ break;
+ }
+ if(op) /* op can be MPI_OP_NULL that does nothing */
+ smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
+ }
+ for(index = 0; index < size - 1; index++) {
+ xbt_free(tmpbufs[index]);
+ }
+ xbt_free(tmpbufs);
+ xbt_free(requests);
}
- return MPI_SUCCESS;
+}
+void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
+ smpi_mpi_bcast(recvbuf, count, datatype, 0, comm);
+}
+
+void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ int system_tag = 666;
+ int rank, size, other, index;
+ MPI_Aint lb = 0, dataext = 0;
+ MPI_Request *requests;
+ void **tmpbufs;
+
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+
+ // FIXME: check for errors
+ smpi_datatype_extent(datatype, &lb, &dataext);
+
+ // Local copy from self
+ smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
+
+ // Send/Recv buffers to/from others;
+ requests = xbt_new(MPI_Request, size - 1);
+ tmpbufs = xbt_new(void *, rank);
+ index = 0;
+ for(other = 0; other < rank; other++) {
+ // FIXME: possibly overkill we we have contiguous/noncontiguous data
+ // mapping...
+ tmpbufs[index] = xbt_malloc(count * dataext);
+ requests[index] =
+ smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
+ comm);
+ index++;
+ }
+ for(other = rank + 1; other < size; other++) {
+ requests[index] =
+ smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
+ index++;
+ }
+ // Wait for completion of all comms.
+ smpi_mpi_startall(size - 1, requests);
+ for(other = 0; other < size - 1; other++) {
+ index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
+ if(index == MPI_UNDEFINED) {
+ break;
+ }
+ if(index < rank) {
+ // #Request is below rank: it's a irecv
+ smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
+ }
+ }
+ for(index = 0; index < rank; index++) {
+ xbt_free(tmpbufs[index]);
+ }
+ xbt_free(tmpbufs);
+ xbt_free(requests);
}