-/* Copyright (c) 2007-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include "mc/mc.h"
#include "private.h"
#include "private.hpp"
-#include "smpi_mpi_dt_private.h"
-#include "mc/mc.h"
-#include "src/mc/mc_record.h"
-#include "xbt/replay.h"
-#include "surf/surf.h"
-#include "src/simix/smx_private.h"
+#include "simgrid/s4u/Mailbox.hpp"
#include "simgrid/sg_config.h"
+#include "src/kernel/activity/SynchroComm.hpp"
+#include "src/mc/mc_record.h"
#include "src/mc/mc_replay.h"
#include "src/msg/msg_private.h"
-#include "src/kernel/activity/SynchroComm.hpp"
+#include "src/simix/smx_private.h"
+#include "surf/surf.h"
+#include "xbt/replay.hpp"
#include <float.h> /* DBL_MAX */
#include <fstream>
double simulated;
int *argc;
char ***argv;
- smx_mailbox_t mailbox;
- smx_mailbox_t mailbox_small;
+ simgrid::s4u::MailboxPtr mailbox;
+ simgrid::s4u::MailboxPtr mailbox_small;
xbt_mutex_t mailboxes_mutex;
xbt_os_timer_t timer;
MPI_Comm comm_self;
int sampling; /* inside an SMPI_SAMPLE_ block? */
char* instance_id;
bool replaying; /* is the process replaying a trace */
- xbt_bar_t finalization_barrier;
+ msg_bar_t finalization_barrier;
int return_value;
smpi_trace_call_location_t trace_call_loc;
#if HAVE_PAPI
MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr;
MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr;
+void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t) = &smpi_comm_copy_buffer_callback;
+
#define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
static char *get_mailbox_name(char *str, int index)
void smpi_process_init(int *argc, char ***argv)
{
+ if (process_data == nullptr){
+ printf("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
+ exit(1);
+ }
if (argc != nullptr && argv != nullptr) {
- smx_process_t proc = SIMIX_process_self();
+ smx_actor_t proc = SIMIX_process_self();
proc->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
char* instance_id = (*argv)[1];
int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
}
MPI_Comm* temp_comm_world;
- xbt_bar_t temp_bar;
+ msg_bar_t temp_bar;
smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar);
smpi_process_data_t data = smpi_process_remote_data(index);
data->comm_world = temp_comm_world;
data->instance_id = instance_id;
data->replaying = false;
- simdata_process_t simdata = static_cast<simdata_process_t>(simcall_process_get_data(proc));
- simdata->data = data;
+ static_cast<simgrid::MsgActorExt*>(proc->data)->data = data;
if (*argc > 3) {
memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
data->argc = argc;
data->argv = argv;
// set the process attached to the mailbox
- simcall_mbox_set_receiver(data->mailbox_small, proc);
+ data->mailbox_small->setReceiver(simgrid::s4u::Actor::self());
XBT_DEBUG("<%d> New process in the game: %p", index, proc);
}
xbt_assert(smpi_process_data(),
int index = smpi_process_index();
// wait for all pending asynchronous comms to finish
- xbt_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier);
+ MSG_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier);
}
/** @brief Check if a process is finalized */
int index = smpi_process_index();
if (index != MPI_UNDEFINED)
return process_data[index_to_process_data[index]]->replaying;
- else return (_xbt_replay_is_active() != 0);
+ else
+ return !simgrid::xbt::replay_is_active();
}
int smpi_global_size()
smpi_process_data_t smpi_process_data()
{
- simdata_process_t simdata = static_cast<simdata_process_t>(SIMIX_process_self_get_data());
- return static_cast<smpi_process_data_t>(simdata->data);
+ simgrid::MsgActorExt* msgExt = static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data);
+ return static_cast<smpi_process_data_t>(msgExt->data);
}
smpi_process_data_t smpi_process_remote_data(int index)
smx_mailbox_t smpi_process_mailbox()
{
smpi_process_data_t data = smpi_process_data();
- return data->mailbox;
+ return data->mailbox->getImpl();
}
smx_mailbox_t smpi_process_mailbox_small()
{
smpi_process_data_t data = smpi_process_data();
- return data->mailbox_small;
+ return data->mailbox_small->getImpl();
}
xbt_mutex_t smpi_process_mailboxes_mutex()
smx_mailbox_t smpi_process_remote_mailbox(int index)
{
smpi_process_data_t data = smpi_process_remote_data(index);
- return data->mailbox;
+ return data->mailbox->getImpl();
}
smx_mailbox_t smpi_process_remote_mailbox_small(int index)
{
smpi_process_data_t data = smpi_process_remote_data(index);
- return data->mailbox_small;
+ return data->mailbox_small->getImpl();
}
xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index)
{
smpi_process_data_t data = smpi_process_data();
if(data->comm_self==MPI_COMM_NULL){
- MPI_Group group = smpi_group_new(1);
- data->comm_self = smpi_comm_new(group, nullptr);
- smpi_group_set_mapping(group, smpi_process_index(), 0);
+ MPI_Group group = new Group(1);
+ data->comm_self = new Comm(group, nullptr);
+ group->set_mapping(smpi_process_index(), 0);
}
return data->comm_self;
return data->sampling;
}
-void print_request(const char *message, MPI_Request request)
+void smpi_comm_set_copy_data_callback(void (*callback) (smx_activity_t, void*, size_t))
{
- XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
- message, request, request->buf, request->size, request->src, request->dst, request->tag, request->flags);
+ smpi_comm_copy_data_callback = callback;
}
void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size)
){
XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
-
- smpi_switch_data_segment((static_cast<smpi_process_data_t>((static_cast<simdata_process_t>(SIMIX_process_get_data(comm->src_proc))->data))->index));
+ smpi_switch_data_segment(
+ (static_cast<smpi_process_data_t>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index));
tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
memcpy(tmpbuff, buff, buff_size);
}
if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
&& ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
- smpi_switch_data_segment((static_cast<smpi_process_data_t>((static_cast<simdata_process_t>(SIMIX_process_get_data(comm->dst_proc))->data))->index));
+ smpi_switch_data_segment(
+ (static_cast<smpi_process_data_t>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index));
}
memcpy(comm->dst_buff, tmpbuff, buff_size);
void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size)
{
- return;
+ /* nothing done in this version */
}
static void smpi_check_options(){
xbt_assert(xbt_cfg_get_int("smpi/async-small-thresh") <= xbt_cfg_get_int("smpi/send-is-detached-thresh"));
- if (xbt_cfg_is_default_value("smpi/running-power")) {
+ if (xbt_cfg_is_default_value("smpi/host-speed")) {
XBT_INFO("You did not set the power of the host running the simulation. "
"The timings will certainly not be accurate. "
- "Use the option \"--cfg=smpi/running-power:<flops>\" to set its value."
+ "Use the option \"--cfg=smpi/host-speed:<flops>\" to set its value."
"Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information.");
}
+
+ xbt_assert(xbt_cfg_get_double("smpi/cpu-threshold") >=0,
+ "The 'smpi/cpu-threshold' option cannot have negative values [anymore]. If you want to discard "
+ "the simulation of any computation, please use 'smpi/simulate-computation:no' instead.");
}
int smpi_enabled() {
process_data[i] = new s_smpi_process_data_t;
process_data[i]->argc = nullptr;
process_data[i]->argv = nullptr;
- process_data[i]->mailbox = simcall_mbox_create(get_mailbox_name(name, i));
- process_data[i]->mailbox_small = simcall_mbox_create(get_mailbox_name_small(name, i));
+ process_data[i]->mailbox = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, i));
+ process_data[i]->mailbox_small = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, i));
process_data[i]->mailboxes_mutex = xbt_mutex_init();
process_data[i]->timer = xbt_os_timer_new();
if (MC_is_active())
//if the process was launched through smpirun script we generate a global mpi_comm_world
//if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
if(smpirun){
- group = smpi_group_new(process_count);
- MPI_COMM_WORLD = smpi_comm_new(group, nullptr);
+ group = new Group(process_count);
+ MPI_COMM_WORLD = new Comm(group, nullptr);
MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
- xbt_bar_t bar=xbt_barrier_init(process_count);
+ msg_bar_t bar = MSG_barrier_init(process_count);
for (i = 0; i < process_count; i++) {
- smpi_group_set_mapping(group, i, i);
+ group->set_mapping(i, i);
process_data[i]->finalization_barrier = bar;
}
}
void smpi_global_destroy()
{
int count = smpi_process_count();
- int i;
smpi_bench_destroy();
if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
- while (smpi_group_unuse(smpi_comm_group(MPI_COMM_WORLD)) > 0);
- xbt_barrier_destroy(process_data[0]->finalization_barrier);
+ delete MPI_COMM_WORLD->group();
+ MSG_barrier_destroy(process_data[0]->finalization_barrier);
}else{
smpi_deployment_cleanup_instances();
}
- for (i = 0; i < count; i++) {
+ for (int i = 0; i < count; i++) {
if(process_data[i]->comm_self!=MPI_COMM_NULL){
- smpi_comm_destroy(process_data[i]->comm_self);
+ Comm::destroy(process_data[i]->comm_self);
}
if(process_data[i]->comm_intra!=MPI_COMM_NULL){
- smpi_comm_destroy(process_data[i]->comm_intra);
+ Comm::destroy(process_data[i]->comm_intra);
}
xbt_os_timer_free(process_data[i]->timer);
xbt_mutex_destroy(process_data[i]->mailboxes_mutex);
process_data = nullptr;
if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
- smpi_comm_cleanup_smp(MPI_COMM_WORLD);
- smpi_comm_cleanup_attributes(MPI_COMM_WORLD);
- if(smpi_coll_cleanup_callback!=nullptr)
- smpi_coll_cleanup_callback();
- xbt_free(MPI_COMM_WORLD);
+ MPI_COMM_WORLD->cleanup_smp();
+ MPI_COMM_WORLD->cleanup_attributes();
+ if(Colls::smpi_coll_cleanup_callback!=nullptr)
+ Colls::smpi_coll_cleanup_callback();
+ delete MPI_COMM_WORLD;
}
MPI_COMM_WORLD = MPI_COMM_NULL;
smpi_free_static();
}
+extern "C" {
+
#ifndef WIN32
void __attribute__ ((weak)) user_main_()
{
xbt_die("Should not be in this smpi_simulated_main");
- return;
}
int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
#endif
-extern "C" {
static void smpi_init_logs(){
/* Connect log categories. See xbt/log.c */
XBT_LOG_CONNECT(smpi_coll);
XBT_LOG_CONNECT(smpi_colls);
XBT_LOG_CONNECT(smpi_comm);
+ XBT_LOG_CONNECT(smpi_datatype);
XBT_LOG_CONNECT(smpi_dvfs);
XBT_LOG_CONNECT(smpi_group);
XBT_LOG_CONNECT(smpi_kernel);
XBT_LOG_CONNECT(smpi_mpi);
- XBT_LOG_CONNECT(smpi_mpi_dt);
+ XBT_LOG_CONNECT(smpi_memory);
+ XBT_LOG_CONNECT(smpi_op);
XBT_LOG_CONNECT(smpi_pmpi);
+ XBT_LOG_CONNECT(smpi_request);
XBT_LOG_CONNECT(smpi_replay);
XBT_LOG_CONNECT(smpi_rma);
+ XBT_LOG_CONNECT(smpi_utils);
}
}
static void smpi_init_options(){
- int gather_id = find_coll_description(mpi_coll_gather_description, xbt_cfg_get_string("smpi/gather"),"gather");
- mpi_coll_gather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, int, MPI_Comm)>
- (mpi_coll_gather_description[gather_id].coll);
-
- int allgather_id = find_coll_description(mpi_coll_allgather_description,
- xbt_cfg_get_string("smpi/allgather"),"allgather");
- mpi_coll_allgather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
- (mpi_coll_allgather_description[allgather_id].coll);
-
- int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
- xbt_cfg_get_string("smpi/allgatherv"),"allgatherv");
- mpi_coll_allgatherv_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
- (mpi_coll_allgatherv_description[allgatherv_id].coll);
-
- int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
- xbt_cfg_get_string("smpi/allreduce"),"allreduce");
- mpi_coll_allreduce_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
- (mpi_coll_allreduce_description[allreduce_id].coll);
-
- int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
- xbt_cfg_get_string("smpi/alltoall"),"alltoall");
- mpi_coll_alltoall_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
- (mpi_coll_alltoall_description[alltoall_id].coll);
-
- int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
- xbt_cfg_get_string("smpi/alltoallv"),"alltoallv");
- mpi_coll_alltoallv_fun = reinterpret_cast<int (*)(void *, int *, int *, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
- (mpi_coll_alltoallv_description[alltoallv_id].coll);
-
- int bcast_id = find_coll_description(mpi_coll_bcast_description, xbt_cfg_get_string("smpi/bcast"),"bcast");
- mpi_coll_bcast_fun = reinterpret_cast<int (*)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com)>
- (mpi_coll_bcast_description[bcast_id].coll);
-
- int reduce_id = find_coll_description(mpi_coll_reduce_description, xbt_cfg_get_string("smpi/reduce"),"reduce");
- mpi_coll_reduce_fun = reinterpret_cast<int (*)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)>
- (mpi_coll_reduce_description[reduce_id].coll);
-
- int reduce_scatter_id =
- find_coll_description(mpi_coll_reduce_scatter_description,
- xbt_cfg_get_string("smpi/reduce-scatter"),"reduce_scatter");
- mpi_coll_reduce_scatter_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
- (mpi_coll_reduce_scatter_description[reduce_scatter_id].coll);
-
- int scatter_id = find_coll_description(mpi_coll_scatter_description, xbt_cfg_get_string("smpi/scatter"),"scatter");
- mpi_coll_scatter_fun = reinterpret_cast<int (*)(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)>
- (mpi_coll_scatter_description[scatter_id].coll);
-
- int barrier_id = find_coll_description(mpi_coll_barrier_description, xbt_cfg_get_string("smpi/barrier"),"barrier");
- mpi_coll_barrier_fun = reinterpret_cast<int (*)(MPI_Comm comm)>
- (mpi_coll_barrier_description[barrier_id].coll);
-
- smpi_coll_cleanup_callback=nullptr;
+
+ Colls::set_collectives();
+ Colls::smpi_coll_cleanup_callback=nullptr;
smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
- smpi_running_power = xbt_cfg_get_double("smpi/running-power");
+ smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");
if (smpi_cpu_threshold < 0)
smpi_cpu_threshold = DBL_MAX;
+
+ char* val = xbt_cfg_get_string("smpi/shared-malloc");
+ if (!strcasecmp(val, "yes") || !strcmp(val, "1") || !strcasecmp(val, "on") || !strcasecmp(val, "global")) {
+ smpi_cfg_shared_malloc = shmalloc_global;
+ } else if (!strcasecmp(val, "local")) {
+ smpi_cfg_shared_malloc = shmalloc_local;
+ } else if (!strcasecmp(val, "no") || !strcmp(val, "0") || !strcasecmp(val, "off")) {
+ smpi_cfg_shared_malloc = shmalloc_none;
+ } else {
+ xbt_die("Invalid value '%s' for option smpi/shared-malloc. Possible values: 'on' or 'global', 'local', 'off'",
+ val);
+ }
}
int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[])
SIMIX_global_init(&argc, argv);
MSG_init(&argc,argv);
- SMPI_switch_data_segment = smpi_switch_data_segment;
+ SMPI_switch_data_segment = &smpi_switch_data_segment;
smpi_init_options();
// parse the platform file: get the host list
SIMIX_create_environment(argv[1]);
- SIMIX_comm_set_copy_data_callback(&smpi_comm_copy_buffer_callback);
+ SIMIX_comm_set_copy_data_callback(smpi_comm_copy_data_callback);
SIMIX_function_register_default(realmain);
SIMIX_launch_application(argv[2]);