-/* Copyright (c) 2007-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "private.hpp"
#include "simgrid/s4u/Mailbox.hpp"
#include "simgrid/sg_config.h"
-#include "smpi_mpi_dt_private.h"
#include "src/kernel/activity/SynchroComm.hpp"
#include "src/mc/mc_record.h"
#include "src/mc/mc_replay.h"
#include "src/msg/msg_private.h"
#include "src/simix/smx_private.h"
#include "surf/surf.h"
-#include "xbt/replay.h"
+#include "xbt/replay.hpp"
#include <float.h> /* DBL_MAX */
#include <fstream>
int smpi_universe_size = 0;
int* index_to_process_data = nullptr;
extern double smpi_total_benched_time;
-extern xbt_dict_t smpi_type_keyvals;
-extern xbt_dict_t smpi_comm_keyvals;
xbt_os_timer_t global_timer;
MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
MPI_Errhandler *MPI_ERRORS_RETURN = nullptr;
int index = smpi_process_index();
if (index != MPI_UNDEFINED)
return process_data[index_to_process_data[index]]->replaying;
- else return (_xbt_replay_is_active() != 0);
+ else
+ return !simgrid::xbt::replay_is_active();
}
int smpi_global_size()
{
smpi_process_data_t data = smpi_process_data();
if(data->comm_self==MPI_COMM_NULL){
- MPI_Group group = new simgrid::SMPI::Group(1);
- data->comm_self = new simgrid::SMPI::Comm(group, nullptr);
+ MPI_Group group = new Group(1);
+ data->comm_self = new Comm(group, nullptr);
group->set_mapping(smpi_process_index(), 0);
}
return data->sampling;
}
-void print_request(const char *message, MPI_Request request)
-{
- XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
- message, request, request->buf, request->size, request->src, request->dst, request->tag, request->flags);
-}
-
void smpi_comm_set_copy_data_callback(void (*callback) (smx_activity_t, void*, size_t))
{
smpi_comm_copy_data_callback = callback;
//if the process was launched through smpirun script we generate a global mpi_comm_world
//if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
if(smpirun){
- group = new simgrid::SMPI::Group(process_count);
- MPI_COMM_WORLD = new simgrid::SMPI::Comm(group, nullptr);
+ group = new Group(process_count);
+ MPI_COMM_WORLD = new Comm(group, nullptr);
MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
msg_bar_t bar = MSG_barrier_init(process_count);
smpi_bench_destroy();
if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
- while (MPI_COMM_WORLD->group()->unuse() > 0);
+ delete MPI_COMM_WORLD->group();
MSG_barrier_destroy(process_data[0]->finalization_barrier);
}else{
smpi_deployment_cleanup_instances();
}
for (int i = 0; i < count; i++) {
if(process_data[i]->comm_self!=MPI_COMM_NULL){
- process_data[i]->comm_self->destroy();
+ Comm::destroy(process_data[i]->comm_self);
}
if(process_data[i]->comm_intra!=MPI_COMM_NULL){
- process_data[i]->comm_intra->destroy();
+ Comm::destroy(process_data[i]->comm_intra);
}
xbt_os_timer_free(process_data[i]->timer);
xbt_mutex_destroy(process_data[i]->mailboxes_mutex);
if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
MPI_COMM_WORLD->cleanup_smp();
MPI_COMM_WORLD->cleanup_attributes();
- if(smpi_coll_cleanup_callback!=nullptr)
- smpi_coll_cleanup_callback();
+ if(Colls::smpi_coll_cleanup_callback!=nullptr)
+ Colls::smpi_coll_cleanup_callback();
delete MPI_COMM_WORLD;
}
}
xbt_free(index_to_process_data);
- if(smpi_type_keyvals!=nullptr)
- xbt_dict_free(&smpi_type_keyvals);
- if(smpi_comm_keyvals!=nullptr)
- xbt_dict_free(&smpi_comm_keyvals);
if(smpi_privatize_global_variables)
smpi_destroy_global_memory_segments();
smpi_free_static();
}
+extern "C" {
+
#ifndef WIN32
void __attribute__ ((weak)) user_main_()
#endif
-extern "C" {
static void smpi_init_logs(){
/* Connect log categories. See xbt/log.c */
XBT_LOG_CONNECT(smpi_coll);
XBT_LOG_CONNECT(smpi_colls);
XBT_LOG_CONNECT(smpi_comm);
+ XBT_LOG_CONNECT(smpi_datatype);
XBT_LOG_CONNECT(smpi_dvfs);
XBT_LOG_CONNECT(smpi_group);
XBT_LOG_CONNECT(smpi_kernel);
XBT_LOG_CONNECT(smpi_mpi);
- XBT_LOG_CONNECT(smpi_mpi_dt);
+ XBT_LOG_CONNECT(smpi_memory);
+ XBT_LOG_CONNECT(smpi_op);
XBT_LOG_CONNECT(smpi_pmpi);
+ XBT_LOG_CONNECT(smpi_request);
XBT_LOG_CONNECT(smpi_replay);
XBT_LOG_CONNECT(smpi_rma);
+ XBT_LOG_CONNECT(smpi_utils);
}
}
static void smpi_init_options(){
- int gather_id = find_coll_description(mpi_coll_gather_description, xbt_cfg_get_string("smpi/gather"),"gather");
- mpi_coll_gather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, int, MPI_Comm)>
- (mpi_coll_gather_description[gather_id].coll);
-
- int allgather_id = find_coll_description(mpi_coll_allgather_description,
- xbt_cfg_get_string("smpi/allgather"),"allgather");
- mpi_coll_allgather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
- (mpi_coll_allgather_description[allgather_id].coll);
-
- int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
- xbt_cfg_get_string("smpi/allgatherv"),"allgatherv");
- mpi_coll_allgatherv_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
- (mpi_coll_allgatherv_description[allgatherv_id].coll);
-
- int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
- xbt_cfg_get_string("smpi/allreduce"),"allreduce");
- mpi_coll_allreduce_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
- (mpi_coll_allreduce_description[allreduce_id].coll);
-
- int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
- xbt_cfg_get_string("smpi/alltoall"),"alltoall");
- mpi_coll_alltoall_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
- (mpi_coll_alltoall_description[alltoall_id].coll);
-
- int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
- xbt_cfg_get_string("smpi/alltoallv"),"alltoallv");
- mpi_coll_alltoallv_fun = reinterpret_cast<int (*)(void *, int *, int *, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
- (mpi_coll_alltoallv_description[alltoallv_id].coll);
-
- int bcast_id = find_coll_description(mpi_coll_bcast_description, xbt_cfg_get_string("smpi/bcast"),"bcast");
- mpi_coll_bcast_fun = reinterpret_cast<int (*)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com)>
- (mpi_coll_bcast_description[bcast_id].coll);
-
- int reduce_id = find_coll_description(mpi_coll_reduce_description, xbt_cfg_get_string("smpi/reduce"),"reduce");
- mpi_coll_reduce_fun = reinterpret_cast<int (*)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)>
- (mpi_coll_reduce_description[reduce_id].coll);
-
- int reduce_scatter_id =
- find_coll_description(mpi_coll_reduce_scatter_description,
- xbt_cfg_get_string("smpi/reduce-scatter"),"reduce_scatter");
- mpi_coll_reduce_scatter_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
- (mpi_coll_reduce_scatter_description[reduce_scatter_id].coll);
-
- int scatter_id = find_coll_description(mpi_coll_scatter_description, xbt_cfg_get_string("smpi/scatter"),"scatter");
- mpi_coll_scatter_fun = reinterpret_cast<int (*)(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)>
- (mpi_coll_scatter_description[scatter_id].coll);
-
- int barrier_id = find_coll_description(mpi_coll_barrier_description, xbt_cfg_get_string("smpi/barrier"),"barrier");
- mpi_coll_barrier_fun = reinterpret_cast<int (*)(MPI_Comm comm)>
- (mpi_coll_barrier_description[barrier_id].coll);
-
- smpi_coll_cleanup_callback=nullptr;
+
+ Colls::set_collectives();
+ Colls::smpi_coll_cleanup_callback=nullptr;
smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");