-/* Copyright (c) 2007-2018. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "private.hpp"
+#include "simgrid/instr.h"
#include "simgrid/s4u/Engine.hpp"
#include "simgrid/s4u/Host.hpp"
+#include "simgrid/version.h"
#include "smpi_comm.hpp"
#include "smpi_datatype_derived.hpp"
-#include "smpi_process.hpp"
#include "smpi_status.hpp"
-#include "src/simix/ActorImpl.hpp"
+#include "src/kernel/actor/ActorImpl.hpp"
+#include "src/smpi/include/smpi_actor.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_pmpi, smpi, "Logging specific to SMPI (pmpi)");
{
//need to end bench otherwise categories for execution tasks are wrong
smpi_bench_end();
- TRACE_internal_smpi_set_category (category);
+ if (category != nullptr) {
+ // declare category
+ TRACE_category(category);
+ smpi_process()->set_tracing_category(category);
+ }
//begin bench after changing process's category
smpi_bench_begin();
}
/* PMPI User level calls */
-int PMPI_Init(int *argc, char ***argv)
+int PMPI_Init(int*, char***)
{
xbt_assert(simgrid::s4u::Engine::is_initialized(),
"Your MPI program was not properly initialized. The easiest is to use smpirun to start it.");
- // PMPI_Init is called only once per SMPI process
- int already_init;
- MPI_Initialized(&already_init);
- if(already_init == 0){
- simgrid::smpi::Process::init(argc, argv);
- smpi_process()->mark_as_initialized();
- int rank = simgrid::s4u::this_actor::get_pid();
- TRACE_smpi_init(rank);
- TRACE_smpi_comm_in(rank, __func__, new simgrid::instr::NoOpTIData("init"));
- TRACE_smpi_comm_out(rank);
- TRACE_smpi_computing_init(rank);
- TRACE_smpi_sleeping_init(rank);
- smpi_bench_begin();
+
+ if(smpi_process()->initializing()){
+ XBT_WARN("SMPI is already initializing - MPI_Init called twice ?");
+ return MPI_ERR_OTHER;
+ }
+ if(smpi_process()->initialized()){
+ XBT_WARN("SMPI already initialized once - MPI_Init called twice ?");
+ return MPI_ERR_OTHER;
+ }
+ if(smpi_process()->finalized()){
+ XBT_WARN("SMPI already finalized");
+ return MPI_ERR_OTHER;
}
+ simgrid::smpi::ActorExt::init();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_init(rank_traced, __func__);
+ smpi_bench_begin();
+ smpi_process()->mark_as_initialized();
+
smpi_mpi_init();
return MPI_SUCCESS;
int PMPI_Finalize()
{
smpi_bench_end();
- int rank = simgrid::s4u::this_actor::get_pid();
- TRACE_smpi_comm_in(rank, __func__, new simgrid::instr::NoOpTIData("finalize"));
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::NoOpTIData("finalize"));
smpi_process()->finalize();
- TRACE_smpi_comm_out(rank);
- TRACE_smpi_finalize(rank);
+ TRACE_smpi_comm_out(rank_traced);
return MPI_SUCCESS;
}
}
int PMPI_Get_library_version (char *version,int *len){
- smpi_bench_end();
- snprintf(version, MPI_MAX_LIBRARY_VERSION_STRING, "SMPI Version %d.%d. Copyright The Simgrid Team 2007-2018",
+ snprintf(version, MPI_MAX_LIBRARY_VERSION_STRING, "SMPI Version %d.%d. Copyright The SimGrid Team 2007-2021",
SIMGRID_VERSION_MAJOR, SIMGRID_VERSION_MINOR);
*len = strlen(version) > MPI_MAX_LIBRARY_VERSION_STRING ? MPI_MAX_LIBRARY_VERSION_STRING : strlen(version);
- smpi_bench_begin();
return MPI_SUCCESS;
}
}
}
-int PMPI_Abort(MPI_Comm /*comm*/, int /*errorcode*/)
+int PMPI_Abort(MPI_Comm comm, int /*errorcode*/)
{
smpi_bench_end();
- // FIXME: should kill all processes in comm instead
- smx_actor_t process = SIMIX_process_self();
- simgrid::simix::simcall([process] { SIMIX_process_kill(process, process); });
+ CHECK_COMM(1)
+ XBT_WARN("MPI_Abort was called, something went probably wrong in this simulation ! Killing all processes sharing the same MPI_COMM_WORLD");
+ smx_actor_t myself = SIMIX_process_self();
+ for (int i = 0; i < comm->size(); i++){
+ smx_actor_t actor = simgrid::kernel::actor::ActorImpl::by_pid(comm->group()->actor(i));
+ if (actor != nullptr && actor != myself)
+ simgrid::kernel::actor::simcall([actor] { actor->exit(); });
+ }
+ // now ourself
+ simgrid::kernel::actor::simcall([myself] { myself->exit(); });
return MPI_SUCCESS;
}
return sg_maxmin_precision;
}
-int PMPI_Address(void *location, MPI_Aint * address)
+int PMPI_Address(const void* location, MPI_Aint* address)
{
if (address==nullptr) {
return MPI_ERR_ARG;
}
}
-int PMPI_Get_address(void *location, MPI_Aint * address)
+int PMPI_Get_address(const void *location, MPI_Aint * address)
{
return PMPI_Address(location, address);
}
+MPI_Aint PMPI_Aint_add(MPI_Aint address, MPI_Aint disp)
+{
+ xbt_assert(address <= PTRDIFF_MAX - disp, "overflow in MPI_Aint_add");
+ return address + disp;
+}
+
+MPI_Aint PMPI_Aint_diff(MPI_Aint address, MPI_Aint disp)
+{
+ xbt_assert(address >= PTRDIFF_MIN + disp, "underflow in MPI_Aint_diff");
+ return address - disp;
+}
+
int PMPI_Get_processor_name(char *name, int *resultlen)
{
- strncpy(name, sg_host_self()->get_cname(),
- strlen(sg_host_self()->get_cname()) < MPI_MAX_PROCESSOR_NAME - 1 ? strlen(sg_host_self()->get_cname()) + 1
- : MPI_MAX_PROCESSOR_NAME - 1);
- *resultlen = strlen(name) > MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name);
+ int len = std::min<int>(sg_host_self()->get_name().size(), MPI_MAX_PROCESSOR_NAME - 1);
+ std::string(sg_host_self()->get_name()).copy(name, len);
+ name[len] = '\0';
+ *resultlen = len;
return MPI_SUCCESS;
}
-int PMPI_Get_count(MPI_Status * status, MPI_Datatype datatype, int *count)
+int PMPI_Get_count(const MPI_Status * status, MPI_Datatype datatype, int *count)
{
if (status == nullptr || count == nullptr) {
return MPI_ERR_ARG;
size_t size = datatype->size();
if (size == 0) {
*count = 0;
- return MPI_SUCCESS;
} else if (status->count % size != 0) {
- return MPI_UNDEFINED;
+ *count = MPI_UNDEFINED;
} else {
*count = simgrid::smpi::Status::get_count(status, datatype);
- return MPI_SUCCESS;
}
+ return MPI_SUCCESS;
}
}
int PMPI_Alloc_mem(MPI_Aint size, MPI_Info /*info*/, void* baseptr)
{
+ CHECK_NEGATIVE(1, MPI_ERR_COUNT, size)
void *ptr = xbt_malloc(size);
- if(ptr==nullptr)
- return MPI_ERR_NO_MEM;
- else {
- *static_cast<void**>(baseptr) = ptr;
- return MPI_SUCCESS;
- }
+ *static_cast<void**>(baseptr) = ptr;
+ return MPI_SUCCESS;
}
int PMPI_Free_mem(void *baseptr){
return MPI_SUCCESS;
}
-int PMPI_Error_string(int errorcode, char* string, int* resultlen){
- if (errorcode<0 || string ==nullptr){
+int PMPI_Error_string(int errorcode, char* string, int* resultlen)
+{
+ static const std::vector<const char*> smpi_error_string = {FOREACH_ERROR(GENERATE_STRING)};
+ if (errorcode < 0 || static_cast<size_t>(errorcode) >= smpi_error_string.size() || string == nullptr)
return MPI_ERR_ARG;
- } else {
- static const char *smpi_error_string[] = {
- FOREACH_ERROR(GENERATE_STRING)
- };
- *resultlen = strlen(smpi_error_string[errorcode]);
- strncpy(string, smpi_error_string[errorcode], *resultlen);
- return MPI_SUCCESS;
- }
+
+ int len = snprintf(string, MPI_MAX_ERROR_STRING, "%s", smpi_error_string[errorcode]);
+ *resultlen = std::min(len, MPI_MAX_ERROR_STRING - 1);
+ return MPI_SUCCESS;
}
int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) {
- smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr};
- smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr};
+ smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr,nullptr,nullptr,nullptr};
+ smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr,nullptr,nullptr,nullptr};
return simgrid::smpi::Keyval::keyval_create<simgrid::smpi::Comm>(_copy_fn, _delete_fn, keyval, extra_state);
}
int PMPI_Keyval_free(int* keyval) {
+ CHECK_NULL(1, MPI_ERR_ARG, keyval)
+ CHECK_VAL(1, MPI_KEYVAL_INVALID, MPI_ERR_KEYVAL, *keyval)
return simgrid::smpi::Keyval::keyval_free<simgrid::smpi::Comm>(keyval);
}
+
+int PMPI_Buffer_attach(void *buf, int size){
+ if(buf==nullptr)
+ return MPI_ERR_BUFFER;
+ if(size<0)
+ return MPI_ERR_ARG;
+ return smpi_process()->set_bsend_buffer(buf, size);
+}
+
+int PMPI_Buffer_detach(void* buffer, int* size){
+ smpi_process()->bsend_buffer((void**)buffer, size);
+ return smpi_process()->set_bsend_buffer(nullptr, 0);
+}