-/* Copyright (c) 2007-2022. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2023. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "mc/mc.h"
#include "simgrid/Exception.hpp"
#include "simgrid/plugins/file_system.h"
#include "simgrid/s4u/Engine.hpp"
#include "smpi_host.hpp"
#include "src/kernel/EngineImpl.hpp"
#include "src/kernel/activity/CommImpl.hpp"
+#include "src/mc/mc.h"
+#include "src/mc/mc_replay.hpp"
#include "src/smpi/include/smpi_actor.hpp"
#include "xbt/config.hpp"
#include "xbt/file.hpp"
#endif
#if HAVE_PAPI
-#include "papi.h"
+#include <papi.h>
#endif
#if not defined(__APPLE__) && not defined(__HAIKU__)
* See https://www.akkadia.org/drepper/dsohowto.pdf
* and https://lists.freebsd.org/pipermail/freebsd-current/2016-March/060284.html
*/
-#if !RTLD_DEEPBIND || HAVE_SANITIZER_ADDRESS || HAVE_SANITIZER_THREAD
+#if !defined(RTLD_DEEPBIND) || !RTLD_DEEPBIND || HAVE_SANITIZER_ADDRESS || HAVE_SANITIZER_THREAD
#define WANT_RTLD_DEEPBIND 0
#else
#define WANT_RTLD_DEEPBIND RTLD_DEEPBIND
#endif
#if HAVE_PAPI
-std::map</* computation unit name */ std::string, papi_process_data, std::less<>> units2papi_setup;
+ std::map</* computation unit name */ std::string, papi_process_data, std::less<>> units2papi_setup;
#endif
-std::unordered_map<std::string, double> location2speedup;
+ std::unordered_map<std::string, double> location2speedup;
-static int smpi_exit_status = 0;
-xbt_os_timer_t global_timer;
-static std::vector<std::string> privatize_libs_paths;
+ static int smpi_exit_status = 0;
+ static xbt_os_timer_t global_timer;
+ static std::vector<std::string> privatize_libs_paths;
-// No instance gets manually created; check also the smpirun.in script as
-// this default name is used there as well (when the <actor> tag is generated).
-static const std::string smpi_default_instance_name("smpirun");
+ // No instance gets manually created; check also the smpirun.in script as
+ // this default name is used there as well (when the <actor> tag is generated).
+ static const std::string smpi_default_instance_name("smpirun");
-static simgrid::config::Flag<std::string>
- smpi_hostfile("smpi/hostfile",
- "Classical MPI hostfile containing list of machines to dispatch "
- "the processes, one per line",
- "");
+ static simgrid::config::Flag<std::string>
+ smpi_hostfile("smpi/hostfile",
+ "Classical MPI hostfile containing list of machines to dispatch "
+ "the processes, one per line",
+ "");
-static simgrid::config::Flag<std::string> smpi_replay("smpi/replay",
- "Replay a trace instead of executing the application", "");
+ static simgrid::config::Flag<std::string> smpi_replay("smpi/replay",
+ "Replay a trace instead of executing the application", "");
-static simgrid::config::Flag<int> smpi_np("smpi/np", "Number of processes to be created", 0);
+ static simgrid::config::Flag<int> smpi_np("smpi/np", "Number of processes to be created", 0);
-static simgrid::config::Flag<int> smpi_map("smpi/map", "Display the mapping between nodes and processes", 0);
+ static simgrid::config::Flag<int> smpi_map("smpi/map", "Display the mapping between nodes and processes", 0);
-std::function<void(simgrid::kernel::activity::CommImpl*, void*, size_t)> smpi_comm_copy_data_callback =
- &smpi_comm_copy_buffer_callback;
+ std::function<void(simgrid::kernel::activity::CommImpl*, void*, size_t)> smpi_comm_copy_data_callback =
+ &smpi_comm_copy_buffer_callback;
-simgrid::smpi::ActorExt* smpi_process()
-{
- simgrid::s4u::ActorPtr me = simgrid::s4u::Actor::self();
+ simgrid::smpi::ActorExt* smpi_process()
+ {
+ simgrid::s4u::ActorPtr me = simgrid::s4u::Actor::self();
- if (me == nullptr) // This happens sometimes (eg, when linking against NS3 because it pulls openMPI...)
- return nullptr;
+ if (me == nullptr) // This happens sometimes (eg, when linking against NS3 because it pulls openMPI...)
+ return nullptr;
- return me->extension<simgrid::smpi::ActorExt>();
-}
+ return me->extension<simgrid::smpi::ActorExt>();
+ }
simgrid::smpi::ActorExt* smpi_process_remote(simgrid::s4u::ActorPtr actor)
{
/* nothing done in this version */
}
-int smpi_enabled() {
- return MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED;
-}
-
static void smpi_init_papi()
{
#if HAVE_PAPI
* configuration tools */
return 0;
}
+ if (argv[0] == std::string("--help-coll")) {
+ std::cerr << simgrid::smpi::colls::get_smpi_coll_help();
+ return 0;
+ }
smpi_init_options_internal(true);
simgrid::s4u::Engine engine(&argc, argv);
engine.load_platform(argv[1]);
engine.set_default_comm_data_copy_callback(smpi_comm_copy_buffer_callback);
+ xbt_assert(not MC_is_active() || smpi_cfg_privatization() != SmpiPrivStrategies::MMAP,
+ "Please use the dlopen privatization schema when model-checking SMPI code");
+
if (smpi_cfg_privatization() == SmpiPrivStrategies::DLOPEN)
smpi_init_privatization_dlopen(executable);
else
SMPI_app_instance_register(smpi_default_instance_name.c_str(), nullptr, rank_counts);
MPI_COMM_WORLD = *smpi_deployment_comm_world(smpi_default_instance_name);
- /* Clean IO before the run */
+ /* Flush output streams before and after the run */
fflush(stdout);
fflush(stderr);
engine.get_impl()->run(-1);
+ fflush(stderr);
+ fflush(stdout);
+
xbt_os_walltimer_stop(global_timer);
simgrid::smpi::utils::print_time_analysis(xbt_os_timer_elapsed(global_timer));
return smpi_exit_status;
}
-static bool smpi_inited = false;
int SMPI_is_inited()
{
- return smpi_inited;
+ return MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED;
}
+
// Called either directly from the user code, or from the code called by smpirun
void SMPI_init(){
smpi_init_options_internal(false);
}
smpi_init_papi();
smpi_check_options();
- smpi_inited = true;
}
void SMPI_finalize()
smpi_init_fortran_types();
if(_smpi_init_sleep > 0)
simgrid::s4u::this_actor::sleep_for(_smpi_init_sleep);
- if (not MC_is_active()) {
+ if (not MC_is_active() && not MC_record_replay_is_active()) {
smpi_deployment_startup_barrier(smpi_process()->get_instance_id());
}
}