/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <simgrid/s4u/host.hpp>
-
-#include "private.h"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
+#include "src/smpi/private.h"
+#include "src/smpi/smpi_comm.hpp"
+#include "src/smpi/smpi_coll.hpp"
+#include "src/smpi/smpi_datatype_derived.hpp"
+#include "src/smpi/smpi_op.hpp"
+#include "src/smpi/smpi_process.hpp"
+#include "src/smpi/smpi_request.hpp"
+#include "src/smpi/smpi_status.hpp"
+#include "src/smpi/smpi_win.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_pmpi, smpi, "Logging specific to SMPI (pmpi)");
int PMPI_Init(int *argc, char ***argv)
{
- // PMPI_Init is call only one time by only by SMPI process
+ xbt_assert(simgrid::s4u::Engine::isInitialized(),
+ "Your MPI program was not properly initialized. The easiest is to use smpirun to start it.");
+ // PMPI_Init is called only once per SMPI process
int already_init;
MPI_Initialized(&already_init);
if(already_init == 0){
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
TRACE_smpi_finalize(smpi_process()->index());
- smpi_process()->destroy();
return MPI_SUCCESS;
}
int PMPI_Get_library_version (char *version,int *len){
smpi_bench_end();
- snprintf(version,MPI_MAX_LIBRARY_VERSION_STRING,"SMPI Version %d.%d. Copyright The Simgrid Team 2007-2015",
+ snprintf(version, MPI_MAX_LIBRARY_VERSION_STRING, "SMPI Version %d.%d. Copyright The Simgrid Team 2007-2017",
SIMGRID_VERSION_MAJOR, SIMGRID_VERSION_MINOR);
*len = strlen(version) > MPI_MAX_LIBRARY_VERSION_STRING ? MPI_MAX_LIBRARY_VERSION_STRING : strlen(version);
smpi_bench_begin();
int PMPI_Abort(MPI_Comm comm, int errorcode)
{
smpi_bench_end();
- smpi_process()->destroy();
// FIXME: should kill all processes in comm instead
simcall_process_kill(SIMIX_process_self());
return MPI_SUCCESS;
int retval = 0;
smpi_bench_end();
- if ((flag == nullptr) || (status == nullptr)) {
+ if (flag == nullptr) {
retval = MPI_ERR_ARG;
} else if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int root_traced = comm->group()->index(root);
- int i = 0;
int size = comm->size();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_GATHERV;
int dt_size_recv = 1;
if (known == 0)
dt_size_recv = recvtype->size();
- if ((comm->rank() == root)) {
+ if (comm->rank() == root) {
extra->recvcounts = xbt_new(int, size);
- for (i = 0; i < size; i++) // copy data to avoid bad free
+ for (int i = 0; i < size; i++) // copy data to avoid bad free
extra->recvcounts[i] = recvcounts[i] * dt_size_recv;
}
TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
}
int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int root_traced = comm->group()->index(root);
- int i = 0;
int size = comm->size();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_SCATTERV;
int dt_size_send = 1;
if (known == 0)
dt_size_send = sendtype->size();
- if ((comm->rank() == root)) {
+ if (comm->rank() == root) {
extra->sendcounts = xbt_new(int, size);
- for (i = 0; i < size; i++) // copy data to avoid bad free
+ for (int i = 0; i < size; i++) // copy data to avoid bad free
extra->sendcounts[i] = sendcounts[i] * dt_size_send;
}
extra->datatype2 = encode_datatype(recvtype, &known);
retval = MPI_SUCCESS;
} else if (target_rank <0){
retval = MPI_ERR_RANK;
- } else if (target_disp <0){
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
retval = MPI_ERR_ARG;
} else if ((origin_count < 0 || target_count < 0) ||
(origin_addr==nullptr && origin_count > 0)){
return retval;
}
+int PMPI_Rget( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((!origin_datatype->is_valid()) || (!target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
int retval = 0;
retval = MPI_SUCCESS;
} else if (target_rank <0){
retval = MPI_ERR_RANK;
- } else if (target_disp <0){
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
retval = MPI_ERR_ARG;
} else if ((origin_count < 0 || target_count < 0) ||
(origin_addr==nullptr && origin_count > 0)){
return retval;
}
+int PMPI_Rput( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((!origin_datatype->is_valid()) || (!target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int dst_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr);
+ TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size());
+
+ retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, request);
+
+ TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){
int retval = 0;
retval = MPI_SUCCESS;
} else if (target_rank <0){
retval = MPI_ERR_RANK;
- } else if (target_disp <0){
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
retval = MPI_ERR_ARG;
} else if ((origin_count < 0 || target_count < 0) ||
(origin_addr==nullptr && origin_count > 0)){
return retval;
}
+int PMPI_Raccumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((!origin_datatype->is_valid()) ||
+ (!target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, op, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Get_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){
retval = MPI_SUCCESS;
} else if (target_rank <0){
retval = MPI_ERR_RANK;
- } else if (target_disp <0){
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
retval = MPI_ERR_ARG;
} else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
- (origin_addr==nullptr && origin_count > 0) ||
+ (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
(result_addr==nullptr && result_count > 0)){
retval = MPI_ERR_COUNT;
- } else if ((!origin_datatype->is_valid()) ||
+ } else if ((origin_datatype!=MPI_DATATYPE_NULL && !origin_datatype->is_valid()) ||
(!target_datatype->is_valid())||
(!result_datatype->is_valid())) {
retval = MPI_ERR_TYPE;
return retval;
}
+
+int PMPI_Rget_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
+int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
+MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
+ (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
+ (result_addr==nullptr && result_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((origin_datatype!=MPI_DATATYPE_NULL && !origin_datatype->is_valid()) ||
+ (!target_datatype->is_valid())||
+ (!result_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
+ result_count, result_datatype, target_rank, target_disp,
+ target_count, target_datatype, op, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Fetch_and_op(void *origin_addr, void *result_addr, MPI_Datatype dtype, int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win){
+ return PMPI_Get_accumulate(origin_addr, origin_addr==nullptr?0:1, dtype, result_addr, 1, dtype, target_rank, target_disp, 1, dtype, op, win);
+}
+
+int PMPI_Compare_and_swap(void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank,
+ MPI_Aint target_disp, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if (origin_addr==nullptr || result_addr==nullptr || compare_addr==nullptr){
+ retval = MPI_ERR_COUNT;
+ } else if (!datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->compare_and_swap( origin_addr, compare_addr, result_addr, datatype,
+ target_rank, target_disp);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Win_post(MPI_Group group, int assert, MPI_Win win){
int retval = 0;
smpi_bench_end();
return retval;
}
+int PMPI_Win_lock_all(int assert, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->lock_all(assert);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_unlock_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->unlock_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush(int rank, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush(rank);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_local(int rank, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_local(rank);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_local_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_local_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr){
void *ptr = xbt_malloc(size);
if(ptr==nullptr)
int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) {
static int one = 1;
static int zero = 0;
- static int tag_ub = 1000000;
+ static int tag_ub = INT_MAX;
static int last_used_code = MPI_ERR_LASTCODE;
if (comm==MPI_COMM_NULL){