-/* Copyright (c) 2007, 2008, 2009, 2010. The SimGrid Team.
+/* Copyright (c) 2007-2013. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "simix/smx_private.h"
#include "surf/surf.h"
#include "simgrid/sg_config.h"
-
+#include "colls/colls.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
xbt_assert(ref, "Cannot match recv against null reference");
xbt_assert(req, "Cannot match recv against null request");
if((ref->src == MPI_ANY_SOURCE || req->src == ref->src)
- && (ref->tag == MPI_ANY_TAG || req->tag == ref->tag)){
+ && ((ref->tag == MPI_ANY_TAG && req->tag >=0) || req->tag == ref->tag)){
//we match, we can transfer some values
// FIXME : move this to the copy function ?
if(ref->src == MPI_ANY_SOURCE)ref->real_src = req->src;
if(req->detached==1){
ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
}
+ XBT_DEBUG("match succeeded");
return 1;
}else return 0;
}
xbt_assert(req, "Cannot match send against null request");
if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
- && (req->tag == MPI_ANY_TAG || req->tag == ref->tag))
+ && ((req->tag == MPI_ANY_TAG && ref->tag >=0)|| req->tag == ref->tag))
{
if(req->src == MPI_ANY_SOURCE)req->real_src = ref->src;
if(req->tag == MPI_ANY_TAG)req->real_tag = ref->tag;
if(ref->detached==1){
req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
}
-
+ XBT_DEBUG("match succeeded");
return 1;
} else return 0;
}
static double smpi_os(double size)
{
- if (!smpi_os_values)
- smpi_os_values =
- parse_factor(sg_cfg_get_string("smpi/os"));
-
+ if (!smpi_os_values) {
+ smpi_os_values = parse_factor(sg_cfg_get_string("smpi/os"));
+ smpi_register_static(smpi_os_values, xbt_dynar_free_voidp);
+ }
unsigned int iter = 0;
s_smpi_factor_t fact;
double current=0.0;
static double smpi_ois(double size)
{
- if (!smpi_ois_values)
- smpi_ois_values =
- parse_factor(sg_cfg_get_string("smpi/ois"));
-
+ if (!smpi_ois_values) {
+ smpi_ois_values = parse_factor(sg_cfg_get_string("smpi/ois"));
+ smpi_register_static(smpi_ois_values, xbt_dynar_free_voidp);
+ }
unsigned int iter = 0;
s_smpi_factor_t fact;
double current=0.0;
static double smpi_or(double size)
{
- if (!smpi_or_values)
- smpi_or_values =
- parse_factor(sg_cfg_get_string("smpi/or"));
-
+ if (!smpi_or_values) {
+ smpi_or_values = parse_factor(sg_cfg_get_string("smpi/or"));
+ smpi_register_static(smpi_or_values, xbt_dynar_free_voidp);
+ }
unsigned int iter = 0;
s_smpi_factor_t fact;
double current=0.0;
if(datatype->has_subtype == 1){
// This part handles the problem of non-contiguous memory
old_buf = buf;
- buf = xbt_malloc(count*smpi_datatype_size(datatype));
+ buf = count==0 ? NULL : xbt_malloc(count*smpi_datatype_size(datatype));
if (flags & SEND) {
subtype->serialize(old_buf, buf, count, datatype->substruct);
}
}
-void smpi_empty_status(MPI_Status * status) {
+void smpi_empty_status(MPI_Status * status)
+{
if(status != MPI_STATUS_IGNORE) {
- status->MPI_SOURCE=MPI_ANY_SOURCE;
- status->MPI_TAG=MPI_ANY_TAG;
- status->count=0;
+ status->MPI_SOURCE = MPI_ANY_SOURCE;
+ status->MPI_TAG = MPI_ANY_TAG;
+ status->MPI_ERROR = MPI_SUCCESS;
+ status->count=0;
}
}
int dst, int tag, MPI_Comm comm)
{
MPI_Request request =
- build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
- comm, PERSISTENT | SEND);
+ build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, PERSISTENT | SEND | PREPARED);
request->refcount++;
return request;
}
int dst, int tag, MPI_Comm comm)
{
MPI_Request request =
- build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
- comm, PERSISTENT | SSEND | SEND);
+ build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, PERSISTENT | SSEND | SEND | PREPARED);
request->refcount++;
return request;
}
int src, int tag, MPI_Comm comm)
{
MPI_Request request =
- build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
- comm, PERSISTENT | RECV);
+ build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
+ comm, PERSISTENT | RECV | PREPARED);
request->refcount++;
return request;
}
xbt_assert(!request->action,
"Cannot (re)start a non-finished communication");
+ if(request->flags & PREPARED)request->flags &= ~PREPARED;
if(request->flags & RECV) {
print_request("New recv", request);
+ //FIXME: if receive is posted with a large size, but send is smaller, mailboxes may not match !
if (request->size < sg_cfg_get_int("smpi/async_small_thres"))
mailbox = smpi_process_mailbox_small();
else
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
request->real_size=request->size;
smpi_datatype_use(request->old_type);
+ smpi_comm_use(request->comm);
request->action = simcall_comm_irecv(mailbox, request->buf, &request->real_size, &match_recv, request);
//integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
} else {
- int receiver = smpi_group_index(smpi_comm_group(request->comm), request->dst);
+
+ int receiver = request->dst;//smpi_group_index(smpi_comm_group(request->comm), request->dst);
+
+ #ifdef HAVE_TRACING
+ int rank = smpi_process_index();
+ if (TRACE_smpi_view_internals()) {
+ TRACE_smpi_send(rank, rank, receiver,request->size);
+ }
+ #endif
/* if(receiver == MPI_UNDEFINED) {*/
/* XBT_WARN("Trying to send a message to a wrong rank");*/
/* return;*/
request->refcount++;
if(request->old_type->has_subtype == 0){
oldbuf = request->buf;
- if (oldbuf){
+ if (oldbuf && request->size!=0){
request->buf = xbt_malloc(request->size);
memcpy(request->buf,oldbuf,request->size);
}
}
XBT_DEBUG("Send request %p is detached; buf %p copied into %p",request,oldbuf,request->buf);
}
+
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
request->real_size=request->size;
smpi_datatype_use(request->old_type);
+ smpi_comm_use(request->comm);
//if we are giving back the control to the user without waiting for completion, we have to inject timings
double sleeptime =0.0;
/* FIXME: detached sends are not traceable (request->action == NULL) */
if (request->action)
simcall_set_category(request->action, TRACE_internal_smpi_get_category());
+
#endif
}
void smpi_mpi_request_free(MPI_Request * request)
{
-
if((*request) != MPI_REQUEST_NULL){
(*request)->refcount--;
if((*request)->refcount<0) xbt_die("wrong refcount");
if((*request)->refcount==0){
+ print_request("Destroying", (*request));
xbt_free(*request);
*request = MPI_REQUEST_NULL;
+ }else{
+ print_request("Decrementing", (*request));
+
}
}else{
xbt_die("freeing an already free request");
int dst, int tag, MPI_Comm comm)
{
MPI_Request request =
- build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
- comm, NON_PERSISTENT | SEND);
+ build_request(buf==MPI_BOTTOM ? (void*)0 : buf , count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | ISEND | SEND | PREPARED);
return request;
}
int dst, int tag, MPI_Comm comm)
{
MPI_Request request =
- build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
+ build_request(buf==MPI_BOTTOM?(void*)0:buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
comm, NON_PERSISTENT | ISEND | SEND);
smpi_mpi_start(request);
int dst, int tag, MPI_Comm comm)
{
MPI_Request request =
- build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
+ build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
comm, NON_PERSISTENT | ISEND | SSEND | SEND);
smpi_mpi_start(request);
return request;
int src, int tag, MPI_Comm comm)
{
MPI_Request request =
- build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
- comm, NON_PERSISTENT | RECV);
+ build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
+ comm, NON_PERSISTENT | RECV | PREPARED);
return request;
}
int src, int tag, MPI_Comm comm)
{
MPI_Request request =
- build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
+ build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
comm, NON_PERSISTENT | RECV);
smpi_mpi_start(request);
int tag, MPI_Comm comm)
{
MPI_Request request =
- build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
+ build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
comm, NON_PERSISTENT | SEND);
-
smpi_mpi_start(request);
smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype,
int dst, int tag, MPI_Comm comm)
{
- MPI_Request request = smpi_mpi_issend(buf, count, datatype, dst, tag, comm);
+ MPI_Request request =
+ build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | SSEND | SEND);
+
+ smpi_mpi_start(request);
smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
}
{
MPI_Request requests[2];
MPI_Status stats[2];
-
+ int myid=smpi_process_index();
+ if ((smpi_group_index(smpi_comm_group(comm), dst) == myid) && (smpi_group_index(smpi_comm_group(comm), src) == myid)) {
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ recvbuf, recvcount, recvtype);
+ return;
+ }
requests[0] =
smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
requests[1] =
smpi_mpi_waitall(2, requests, stats);
if(status != MPI_STATUS_IGNORE) {
// Copy receive status
- memcpy(status, &stats[1], sizeof(MPI_Status));
+ *status = stats[1];
}
}
static void finish_wait(MPI_Request * request, MPI_Status * status)
{
MPI_Request req = *request;
- if(!(req->detached && req->flags & SEND)){
- if(status != MPI_STATUS_IGNORE) {
- status->MPI_SOURCE = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
+ if(status != MPI_STATUS_IGNORE)
+ smpi_empty_status(status);
+
+ if(!(req->detached && req->flags & SEND) && !(req->flags & PREPARED)){
+ if(status != MPI_STATUS_IGNORE) {
+ int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
+ status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src);
status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
- if(req->truncated)
- status->MPI_ERROR = MPI_ERR_TRUNCATE;
- else status->MPI_ERROR = MPI_SUCCESS ;
+ status->MPI_ERROR = req->truncated ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
// this handles the case were size in receive differs from size in send
// FIXME: really this should just contain the count of receive-type blocks,
// right?
}
if(req->detached == 0) free(req->buf);
}
+ smpi_comm_unuse(req->comm);
smpi_datatype_unuse(datatype);
+
}
+#ifdef HAVE_TRACING
+ if (TRACE_smpi_view_internals()) {
+ if(req->flags & RECV){
+ int rank = smpi_process_index();
+ int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
+ TRACE_smpi_recv(rank, src_traced, rank);
+ }
+ }
+#endif
+
if(req->detached_sender!=NULL){
smpi_mpi_request_free(&(req->detached_sender));
}
else
flag = simcall_comm_test((*request)->action);
if(flag) {
- (*request)->refcount++;
finish_wait(request, status);
+ request=MPI_REQUEST_NULL;
}else{
smpi_empty_status(status);
}
*index = MPI_UNDEFINED;
flag = 0;
- if(count > 0) {
- comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
- map = xbt_new(int, count);
- size = 0;
- for(i = 0; i < count; i++) {
- if((requests[i]!=MPI_REQUEST_NULL) && requests[i]->action) {
- xbt_dynar_push(comms, &requests[i]->action);
- map[size] = i;
- size++;
- }
+ comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
+ map = xbt_new(int, count);
+ size = 0;
+ for(i = 0; i < count; i++) {
+ if((requests[i]!=MPI_REQUEST_NULL) && requests[i]->action) {
+ xbt_dynar_push(comms, &requests[i]->action);
+ map[size] = i;
+ size++;
}
- if(size > 0) {
- i = simcall_comm_testany(comms);
- // not MPI_UNDEFINED, as this is a simix return code
- if(i != -1) {
- *index = map[i];
- finish_wait(&requests[*index], status);
- flag = 1;
- }
- }else{
- //all requests are null or inactive, return true
- flag=1;
- smpi_empty_status(status);
+ }
+ if(size > 0) {
+ i = simcall_comm_testany(comms);
+ // not MPI_UNDEFINED, as this is a simix return code
+ if(i != -1) {
+ *index = map[i];
+ finish_wait(&requests[*index], status);
+ flag = 1;
}
- xbt_free(map);
- xbt_dynar_free(&comms);
+ }else{
+ //all requests are null or inactive, return true
+ flag=1;
+ smpi_empty_status(status);
}
+ xbt_free(map);
+ xbt_dynar_free(&comms);
return flag;
}
if(requests[i]!= MPI_REQUEST_NULL){
if (smpi_mpi_test(&requests[i], pstat)!=1){
flag=0;
+ }else{
+ requests[i]=MPI_REQUEST_NULL;
}
}else{
smpi_empty_status(pstat);
}
if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[i], pstat, sizeof(*pstat));
+ status[i] = *pstat;
}
}
return flag;
while(flag==0){
smpi_mpi_iprobe(source, tag, comm, &flag, status);
XBT_DEBUG("Busy Waiting on probing : %d", flag);
- if(!flag) {
- simcall_process_sleep(0.0001);
- }
}
}
void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
- MPI_Request request =build_request(NULL, 0, MPI_CHAR, source, smpi_comm_rank(comm), tag,
+
+ MPI_Request request =build_request(NULL, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag,
comm, NON_PERSISTENT | RECV);
+ //to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
+ double sleeptime= sg_cfg_get_double("smpi/iprobe");
+ //multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
+ static int nsleeps = 1;
+
+ simcall_process_sleep(sleeptime);
+
// behave like a receive, but don't do it
smx_rdv_t mailbox;
if(request->action){
MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
*flag = 1;
- if(status != MPI_STATUS_IGNORE) {
- status->MPI_SOURCE = req->src;
+ if(status != MPI_STATUS_IGNORE && !(req->flags & PREPARED)) {
+ status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
status->MPI_TAG = req->tag;
status->MPI_ERROR = MPI_SUCCESS;
status->count = req->real_size;
}
+ nsleeps=1;//reset the number of sleeps we will do next time
+ }
+ else {
+ *flag = 0;
+ nsleeps++;
}
- else *flag = 0;
smpi_mpi_request_free(&request);
return;
simcall_comm_wait((*request)->action, -1.0);
}
finish_wait(request, status);
-
+ request=MPI_REQUEST_NULL;
// FIXME for a detached send, finish_wait is not called:
}
int index, c;
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
- int retvalue=MPI_SUCCESS;
+ int retvalue = MPI_SUCCESS;
//tag invalid requests in the set
- for(c = 0; c < count; c++) {
- if(requests[c]==MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL ){
- if(status != MPI_STATUSES_IGNORE)
+ if (status != MPI_STATUSES_IGNORE) {
+ for (c = 0; c < count; c++) {
+ if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL) {
smpi_empty_status(&status[c]);
- }else if(requests[c]->src == MPI_PROC_NULL ){
- if(status != MPI_STATUSES_IGNORE) {
+ } else if (requests[c]->src == MPI_PROC_NULL) {
smpi_empty_status(&status[c]);
- status[c].MPI_SOURCE=MPI_PROC_NULL;
+ status[c].MPI_SOURCE = MPI_PROC_NULL;
}
}
}
for(c = 0; c < count; c++) {
- if(MC_is_active()) {
- smpi_mpi_wait(&requests[c], pstat);
- index = c;
- } else {
- index = smpi_mpi_waitany(count, requests, pstat);
- if(index == MPI_UNDEFINED) {
- break;
- }
- if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[index], pstat, sizeof(*pstat));
- if(status[index].MPI_ERROR==MPI_ERR_TRUNCATE)retvalue=MPI_ERR_IN_STATUS;
-
- }
+ if (MC_is_active()) {
+ smpi_mpi_wait(&requests[c], pstat);
+ index = c;
+ } else {
+ index = smpi_mpi_waitany(count, requests, pstat);
+ if (index == MPI_UNDEFINED)
+ break;
+ requests[index]=MPI_REQUEST_NULL;
+ }
+ if (status != MPI_STATUSES_IGNORE) {
+ status[index] = *pstat;
+ if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
+ retvalue = MPI_ERR_IN_STATUS;
}
}
indices[count] = index;
count++;
if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[index], pstat, sizeof(*pstat));
+ status[index] = *pstat;
}
+ requests[index]=MPI_REQUEST_NULL;
}else{
return MPI_UNDEFINED;
}
for(i = 0; i < incount; i++) {
if((requests[i] != MPI_REQUEST_NULL)) {
if(smpi_mpi_test(&requests[i], pstat)) {
- indices[count] = i;
+ indices[i] = 1;
count++;
if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[i], pstat, sizeof(*pstat));
+ status[i] = *pstat;
}
+ requests[i]=MPI_REQUEST_NULL;
+
}
}else{
count_dead++;
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_GATHER;
int rank, size, src, index;
MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
}
}
+
+void smpi_mpi_reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
+ MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ int i, size, count;
+ int *displs;
+ int rank = smpi_process_index();
+ void *tmpbuf;
+
+ /* arbitrarily choose root as rank 0 */
+ size = smpi_comm_size(comm);
+ count = 0;
+ displs = xbt_new(int, size);
+ for (i = 0; i < size; i++) {
+ displs[i] = count;
+ count += recvcounts[i];
+ }
+ tmpbuf=(void*)xbt_malloc(count*smpi_datatype_get_extent(datatype));
+ mpi_coll_reduce_fun(sendbuf, tmpbuf, count, datatype, op, 0, comm);
+ smpi_mpi_scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf,
+ recvcounts[rank], datatype, 0, comm);
+ xbt_free(displs);
+ xbt_free(tmpbuf);
+}
+
void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_GATHERV;
int rank, size, src, index;
MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
int recvcount, MPI_Datatype recvtype,
MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_ALLGATHER;
int rank, size, other, index;
MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
int *recvcounts, int *displs,
MPI_Datatype recvtype, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_ALLGATHERV;
int rank, size, other, index;
MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_SCATTER;
int rank, size, dst, index;
MPI_Aint lb = 0, sendext = 0;
MPI_Request *requests;
// FIXME: check for errors
smpi_datatype_extent(sendtype, &lb, &sendext);
// Local copy from root
- smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
- sendcount, sendtype, recvbuf, recvcount, recvtype);
+ if(recvbuf!=MPI_IN_PLACE){
+ smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
+ sendcount, sendtype, recvbuf, recvcount, recvtype);
+ }
// Send buffers to receivers
requests = xbt_new(MPI_Request, size - 1);
index = 0;
MPI_Datatype sendtype, void *recvbuf, int recvcount,
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_SCATTERV;
int rank, size, dst, index;
MPI_Aint lb = 0, sendext = 0;
MPI_Request *requests;
// FIXME: check for errors
smpi_datatype_extent(sendtype, &lb, &sendext);
// Local copy from root
- smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
+ if(recvbuf!=MPI_IN_PLACE){
+ smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
sendtype, recvbuf, recvcount, recvtype);
+ }
// Send buffers to receivers
requests = xbt_new(MPI_Request, size - 1);
index = 0;
MPI_Datatype datatype, MPI_Op op, int root,
MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = COLL_TAG_REDUCE;
int rank, size, src, index;
MPI_Aint lb = 0, dataext = 0;
MPI_Request *requests;
void **tmpbufs;
+
+ char* sendtmpbuf = (char*) sendbuf;
+ if( sendbuf == MPI_IN_PLACE ) {
+ sendtmpbuf = (char *)xbt_malloc(count*smpi_datatype_get_extent(datatype));
+ smpi_datatype_copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
+ }
+
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
+ //non commutative case, use a working algo from openmpi
+ if(!smpi_op_is_commute(op)){
+ smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count,
+ datatype, op, root, comm);
+ return;
+ }
+
if(rank != root) {
// Send buffer to root
- smpi_mpi_send(sendbuf, count, datatype, root, system_tag, comm);
+ smpi_mpi_send(sendtmpbuf, count, datatype, root, system_tag, comm);
} else {
// FIXME: check for errors
smpi_datatype_extent(datatype, &lb, &dataext);
// Local copy from root
- if (sendbuf && recvbuf)
- smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
+ if (sendtmpbuf && recvbuf)
+ smpi_datatype_copy(sendtmpbuf, count, datatype, recvbuf, count, datatype);
// Receive buffers from senders
//TODO: make a MPI_barrier here ?
requests = xbt_new(MPI_Request, size - 1);
}
xbt_free(tmpbufs);
xbt_free(requests);
+
+ if( sendbuf == MPI_IN_PLACE ) {
+ xbt_free(sendtmpbuf);
+ }
}
}
void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
- int system_tag = 666;
+ int system_tag = -888;
int rank, size, other, index;
MPI_Aint lb = 0, dataext = 0;
MPI_Request *requests;
}
// Wait for completion of all comms.
smpi_mpi_startall(size - 1, requests);
- for(other = 0; other < size - 1; other++) {
- index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
- if(index == MPI_UNDEFINED) {
- break;
+
+ if(smpi_op_is_commute(op)){
+ for(other = 0; other < size - 1; other++) {
+ index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
+ if(index == MPI_UNDEFINED) {
+ break;
+ }
+ if(index < rank) {
+ // #Request is below rank: it's a irecv
+ smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
+ }
+ }
+ }else{
+ //non commutative case, wait in order
+ for(other = 0; other < size - 1; other++) {
+ smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
+ if(index < rank) {
+ smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
+ }
+ }
+ }
+ for(index = 0; index < rank; index++) {
+ xbt_free(tmpbufs[index]);
+ }
+ xbt_free(tmpbufs);
+ xbt_free(requests);
+}
+
+void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ int system_tag = -888;
+ int rank, size, other, index;
+ MPI_Aint lb = 0, dataext = 0;
+ MPI_Request *requests;
+ void **tmpbufs;
+ int recvbuf_is_empty=1;
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+
+ // FIXME: check for errors
+ smpi_datatype_extent(datatype, &lb, &dataext);
+
+ // Send/Recv buffers to/from others;
+ requests = xbt_new(MPI_Request, size - 1);
+ tmpbufs = xbt_new(void *, rank);
+ index = 0;
+ for(other = 0; other < rank; other++) {
+ // FIXME: possibly overkill we we have contiguous/noncontiguous data
+ // mapping...
+ tmpbufs[index] = xbt_malloc(count * dataext);
+ requests[index] =
+ smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
+ comm);
+ index++;
+ }
+ for(other = rank + 1; other < size; other++) {
+ requests[index] =
+ smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
+ index++;
+ }
+ // Wait for completion of all comms.
+ smpi_mpi_startall(size - 1, requests);
+ if(smpi_op_is_commute(op)){
+ for(other = 0; other < size - 1; other++) {
+ index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
+ if(index == MPI_UNDEFINED) {
+ break;
+ }
+ if(index < rank) {
+ if(recvbuf_is_empty){
+ smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
+ recvbuf_is_empty=0;
+ }else
+ // #Request is below rank: it's a irecv
+ smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
+ }
}
- if(index < rank) {
- // #Request is below rank: it's a irecv
- smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
+ }else{
+ //non commutative case, wait in order
+ for(other = 0; other < size - 1; other++) {
+ smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
+ if(index < rank) {
+ if(recvbuf_is_empty){
+ smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
+ recvbuf_is_empty=0;
+ }else smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
+ }
}
}
for(index = 0; index < rank; index++) {