1 /* Copyright (c) 2007-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
8 #include "smpi_mpi_dt_private.h"
10 #include "xbt/replay.h"
11 #include "surf/surf.h"
12 #include "simix/smx_private.h"
13 #include "simgrid/sg_config.h"
15 #include <float.h> /* DBL_MAX */
20 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi,
21 "Logging specific to SMPI (kernel)");
23 typedef struct s_smpi_process_data {
28 smx_rdv_t mailbox_small;
33 void *data; /* user data */
36 int sampling; /* inside an SMPI_SAMPLE_ block? */
38 xbt_bar_t finalization_barrier;
39 } s_smpi_process_data_t;
41 static smpi_process_data_t *process_data = NULL;
42 int process_count = 0;
43 int* index_to_process_data = NULL;
46 MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
47 int MPI_UNIVERSE_SIZE;
49 MPI_Errhandler *MPI_ERRORS_RETURN = NULL;
50 MPI_Errhandler *MPI_ERRORS_ARE_FATAL = NULL;
51 MPI_Errhandler *MPI_ERRHANDLER_NULL = NULL;
53 #define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
55 static char *get_mailbox_name(char *str, int index)
57 snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", (int) (sizeof(int) * 2),
62 static char *get_mailbox_name_small(char *str, int index)
64 snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", (int) (sizeof(int) * 2),
69 void smpi_process_init(int *argc, char ***argv)
72 smpi_process_data_t data;
76 proc = SIMIX_process_self();
77 //FIXME: dirty cleanup method to avoid using msg cleanup functions on these processes when using MSG+SMPI
78 proc->context->cleanup_func=SIMIX_process_cleanup;
79 char* instance_id = (*argv)[1];
80 int rank = atoi((*argv)[2]);
81 index = smpi_process_index_of_smx_process(proc);
83 if(!index_to_process_data){
84 index_to_process_data=(int*)xbt_malloc(SIMIX_process_count()*sizeof(int));
86 MPI_Comm* temp_comm_world;
88 smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world ,&temp_bar);
89 data = smpi_process_remote_data(index);
90 data->comm_world = temp_comm_world;
91 if(temp_bar != NULL) data->finalization_barrier = temp_bar;
93 data->instance_id = instance_id;
94 xbt_free(simcall_process_get_data(proc));
95 simcall_process_set_data(proc, data);
98 memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
99 (*argv)[(*argc) - 1] = NULL;
100 (*argv)[(*argc) - 2] = NULL;
105 // set the process attached to the mailbox
106 simcall_rdv_set_receiver(data->mailbox_small, proc);
108 XBT_DEBUG("<%d> New process in the game: %p", index, proc);
110 if(smpi_privatize_global_variables){
111 smpi_switch_data_segment(index);
115 if (smpi_process_data() == NULL)
116 xbt_die("smpi_process_data() returned NULL. You probably gave a NULL parameter to MPI_Init. Although it's required by MPI-2, this is currently not supported by SMPI.");
119 void smpi_process_destroy(void)
121 int index = smpi_process_index();
122 if(smpi_privatize_global_variables){
123 smpi_switch_data_segment(index);
125 process_data[index_to_process_data[index]]->state = SMPI_FINALIZED;
126 XBT_DEBUG("<%d> Process left the game", index);
130 * @brief Prepares the current process for termination.
132 void smpi_process_finalize(void)
134 // This leads to an explosion of the search graph
135 // which cannot be reduced:
139 int index = smpi_process_index();
140 // wait for all pending asynchronous comms to finish
141 xbt_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier);
145 * @brief Check if a process is finalized
147 int smpi_process_finalized()
149 int index = smpi_process_index();
150 if (index != MPI_UNDEFINED)
151 return (process_data[index_to_process_data[index]]->state == SMPI_FINALIZED);
157 * @brief Check if a process is initialized
159 int smpi_process_initialized(void)
161 int index = smpi_process_index();
162 return ( (index != MPI_UNDEFINED)
163 && (process_data[index_to_process_data[index]]->state == SMPI_INITIALIZED));
167 * @brief Mark a process as initialized (=MPI_Init called)
169 void smpi_process_mark_as_initialized(void)
171 int index = smpi_process_index();
172 if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
173 process_data[index_to_process_data[index]]->state = SMPI_INITIALIZED;
177 int smpi_global_size(void)
179 char *value = getenv("SMPI_GLOBAL_SIZE");
183 "Please set env var SMPI_GLOBAL_SIZE to expected number of processes.\n");
189 smpi_process_data_t smpi_process_data(void)
191 return SIMIX_process_self_get_data(SIMIX_process_self());
194 smpi_process_data_t smpi_process_remote_data(int index)
196 return process_data[index_to_process_data[index]];
199 void smpi_process_set_user_data(void *data)
201 smpi_process_data_t process_data = smpi_process_data();
202 process_data->data = data;
205 void *smpi_process_get_user_data()
207 smpi_process_data_t process_data = smpi_process_data();
208 return process_data->data;
211 int smpi_process_count(void)
213 return process_count;
216 int smpi_process_index(void)
218 smpi_process_data_t data = smpi_process_data();
219 //return -1 if not initialized
220 return data ? data->index : MPI_UNDEFINED;
223 MPI_Comm smpi_process_comm_world(void)
225 smpi_process_data_t data = smpi_process_data();
226 //return MPI_COMM_NULL if not initialized
227 return data ? *data->comm_world : MPI_COMM_NULL;
230 smx_rdv_t smpi_process_mailbox(void)
232 smpi_process_data_t data = smpi_process_data();
233 return data->mailbox;
236 smx_rdv_t smpi_process_mailbox_small(void)
238 smpi_process_data_t data = smpi_process_data();
239 return data->mailbox_small;
242 smx_rdv_t smpi_process_remote_mailbox(int index)
244 smpi_process_data_t data = smpi_process_remote_data(index);
245 return data->mailbox;
249 smx_rdv_t smpi_process_remote_mailbox_small(int index)
251 smpi_process_data_t data = smpi_process_remote_data(index);
252 return data->mailbox_small;
255 xbt_os_timer_t smpi_process_timer(void)
257 smpi_process_data_t data = smpi_process_data();
261 void smpi_process_simulated_start(void)
263 smpi_process_data_t data = smpi_process_data();
264 data->simulated = SIMIX_get_clock();
267 double smpi_process_simulated_elapsed(void)
269 smpi_process_data_t data = smpi_process_data();
270 return SIMIX_get_clock() - data->simulated;
273 MPI_Comm smpi_process_comm_self(void)
275 smpi_process_data_t data = smpi_process_data();
276 if(data->comm_self==MPI_COMM_NULL){
277 MPI_Group group = smpi_group_new(1);
278 data->comm_self = smpi_comm_new(group, NULL);
279 smpi_group_set_mapping(group, smpi_process_index(), 0);
282 return data->comm_self;
285 MPI_Comm smpi_process_get_comm_intra(void)
287 smpi_process_data_t data = smpi_process_data();
288 return data->comm_intra;
291 void smpi_process_set_comm_intra(MPI_Comm comm)
293 smpi_process_data_t data = smpi_process_data();
294 data->comm_intra = comm;
297 void smpi_process_set_sampling(int s)
299 smpi_process_data_t data = smpi_process_data();
303 int smpi_process_get_sampling(void)
305 smpi_process_data_t data = smpi_process_data();
306 return data->sampling;
310 void print_request(const char *message, MPI_Request request)
313 ("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
314 message, request, request->buf, request->size, request->src,
315 request->dst, request->tag, request->flags);
318 void smpi_comm_copy_buffer_callback(smx_action_t comm,
319 void *buff, size_t buff_size)
321 XBT_DEBUG("Copy the data over");
322 if(_xbt_replay_is_active()) return;
325 if((smpi_privatize_global_variables)
326 && ((char*)buff >= start_data_exe)
327 && ((char*)buff < start_data_exe + size_data_exe )
329 XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
330 smpi_switch_data_segment(((smpi_process_data_t)SIMIX_process_get_data(comm->comm.src_proc))->index);
331 tmpbuff = (void*)xbt_malloc(buff_size);
332 memcpy(tmpbuff, buff, buff_size);
336 if((smpi_privatize_global_variables)
337 && ((char*)comm->comm.dst_buff >= start_data_exe)
338 && ((char*)comm->comm.dst_buff < start_data_exe + size_data_exe )
340 XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
341 smpi_switch_data_segment(((smpi_process_data_t)SIMIX_process_get_data(comm->comm.dst_proc))->index);
345 memcpy(comm->comm.dst_buff, tmpbuff, buff_size);
346 if (comm->comm.detached) {
347 // if this is a detached send, the source buffer was duplicated by SMPI
348 // sender to make the original buffer available to the application ASAP
350 //It seems that the request is used after the call there this should
351 //be free somewhereelse but where???
352 //xbt_free(comm->comm.src_data);// inside SMPI the request is keep
353 //inside the user data and should be free
354 comm->comm.src_buff = NULL;
357 if(tmpbuff!=buff)xbt_free(tmpbuff);
361 static void smpi_check_options(){
362 //check correctness of MPI parameters
364 xbt_assert(sg_cfg_get_int("smpi/async_small_thres") <=
365 sg_cfg_get_int("smpi/send_is_detached_thres"));
367 if (sg_cfg_is_default_value("smpi/running_power")) {
368 XBT_INFO("You did not set the power of the host running the simulation. "
369 "The timings will certainly not be accurate. "
370 "Use the option \"--cfg=smpi/running_power:<flops>\" to set its value."
371 "Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information.");
375 int smpi_enabled(void) {
376 return process_data != NULL;
379 void smpi_global_init(void)
383 char name[MAILBOX_NAME_MAXLEN];
387 if (process_count == 0){
388 process_count = SIMIX_process_count();
391 process_data = xbt_new0(smpi_process_data_t, process_count);
392 for (i = 0; i < process_count; i++) {
393 process_data[i] = xbt_new(s_smpi_process_data_t, 1);
394 //process_data[i]->index = i;
395 process_data[i]->argc = NULL;
396 process_data[i]->argv = NULL;
397 process_data[i]->mailbox = simcall_rdv_create(get_mailbox_name(name, i));
398 process_data[i]->mailbox_small =
399 simcall_rdv_create(get_mailbox_name_small(name, i));
400 process_data[i]->timer = xbt_os_timer_new();
402 MC_ignore_heap(process_data[i]->timer, xbt_os_timer_size());
403 process_data[i]->comm_self = MPI_COMM_NULL;
404 process_data[i]->comm_intra = MPI_COMM_NULL;
405 process_data[i]->comm_world = NULL;
406 process_data[i]->state = SMPI_UNINITIALIZED;
407 process_data[i]->sampling = 0;
408 process_data[i]->finalization_barrier = NULL;
410 //if the process was launched through smpirun script
411 //we generate a global mpi_comm_world
412 //if not, we let MPI_COMM_NULL, and the comm world
413 //will be private to each mpi instance
415 group = smpi_group_new(process_count);
416 MPI_COMM_WORLD = smpi_comm_new(group, NULL);
417 xbt_bar_t bar=xbt_barrier_init(process_count);
419 MPI_UNIVERSE_SIZE = smpi_comm_size(MPI_COMM_WORLD);
420 for (i = 0; i < process_count; i++) {
421 smpi_group_set_mapping(group, i, i);
422 process_data[i]->finalization_barrier = bar;
427 void smpi_global_destroy(void)
429 int count = smpi_process_count();
432 smpi_bench_destroy();
433 if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
434 while (smpi_group_unuse(smpi_comm_group(MPI_COMM_WORLD)) > 0);
435 xbt_free(MPI_COMM_WORLD);
436 xbt_barrier_destroy(process_data[0]->finalization_barrier);
438 smpi_deployment_cleanup_instances();
440 MPI_COMM_WORLD = MPI_COMM_NULL;
441 for (i = 0; i < count; i++) {
442 if(process_data[i]->comm_self!=MPI_COMM_NULL){
443 smpi_group_unuse(smpi_comm_group(process_data[i]->comm_self));
444 smpi_comm_destroy(process_data[i]->comm_self);
446 if(process_data[i]->comm_intra!=MPI_COMM_NULL){
447 smpi_group_unuse(smpi_comm_group(process_data[i]->comm_intra));
448 smpi_comm_destroy(process_data[i]->comm_intra);
450 xbt_os_timer_free(process_data[i]->timer);
451 simcall_rdv_destroy(process_data[i]->mailbox);
452 simcall_rdv_destroy(process_data[i]->mailbox_small);
453 xbt_free(process_data[i]);
455 xbt_free(process_data);
458 xbt_free(index_to_process_data);
459 if(smpi_privatize_global_variables)
460 smpi_destroy_global_memory_segments();
465 void __attribute__ ((weak)) user_main_()
467 xbt_die("Should not be in this smpi_simulated_main");
471 int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
473 smpi_process_init(&argc, &argv);
478 int __attribute__ ((weak)) main(int argc, char **argv)
480 return smpi_main(smpi_simulated_main_, argc, argv);
485 static void smpi_init_logs(){
487 /* Connect log categories. See xbt/log.c */
488 XBT_LOG_CONNECT(smpi); /* Keep this line as soon as possible in this
489 function: xbt_log_appender_file.c depends on it
490 DO NOT connect this in XBT or so, or it will be
491 useless to xbt_log_appender_file.c */
493 XBT_LOG_CONNECT(instr_smpi);
495 XBT_LOG_CONNECT(smpi_base);
496 XBT_LOG_CONNECT(smpi_bench);
497 XBT_LOG_CONNECT(smpi_coll);
498 XBT_LOG_CONNECT(smpi_colls);
499 XBT_LOG_CONNECT(smpi_comm);
500 XBT_LOG_CONNECT(smpi_dvfs);
501 XBT_LOG_CONNECT(smpi_group);
502 XBT_LOG_CONNECT(smpi_kernel);
503 XBT_LOG_CONNECT(smpi_mpi);
504 XBT_LOG_CONNECT(smpi_mpi_dt);
505 XBT_LOG_CONNECT(smpi_pmpi);
506 XBT_LOG_CONNECT(smpi_replay);
507 XBT_LOG_CONNECT(smpi_rma);
512 static void smpi_init_options(){
513 int gather_id = find_coll_description(mpi_coll_gather_description,
514 sg_cfg_get_string("smpi/gather"));
515 mpi_coll_gather_fun = (int (*)(void *, int, MPI_Datatype,
516 void *, int, MPI_Datatype, int, MPI_Comm))
517 mpi_coll_gather_description[gather_id].coll;
519 int allgather_id = find_coll_description(mpi_coll_allgather_description,
520 sg_cfg_get_string("smpi/allgather"));
521 mpi_coll_allgather_fun = (int (*)(void *, int, MPI_Datatype,
522 void *, int, MPI_Datatype, MPI_Comm))
523 mpi_coll_allgather_description[allgather_id].coll;
525 int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
526 sg_cfg_get_string("smpi/allgatherv"));
527 mpi_coll_allgatherv_fun = (int (*)(void *, int, MPI_Datatype, void *, int *,
528 int *, MPI_Datatype, MPI_Comm))
529 mpi_coll_allgatherv_description[allgatherv_id].coll;
531 int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
532 sg_cfg_get_string("smpi/allreduce"));
533 mpi_coll_allreduce_fun = (int (*)(void *sbuf, void *rbuf, int rcount,
534 MPI_Datatype dtype, MPI_Op op,
536 mpi_coll_allreduce_description[allreduce_id].coll;
538 int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
539 sg_cfg_get_string("smpi/alltoall"));
540 mpi_coll_alltoall_fun = (int (*)(void *, int, MPI_Datatype,
541 void *, int, MPI_Datatype, MPI_Comm))
542 mpi_coll_alltoall_description[alltoall_id].coll;
544 int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
545 sg_cfg_get_string("smpi/alltoallv"));
546 mpi_coll_alltoallv_fun = (int (*)(void *, int *, int *, MPI_Datatype,
547 void *, int *, int *, MPI_Datatype,
549 mpi_coll_alltoallv_description[alltoallv_id].coll;
551 int bcast_id = find_coll_description(mpi_coll_bcast_description,
552 sg_cfg_get_string("smpi/bcast"));
553 mpi_coll_bcast_fun = (int (*)(void *buf, int count, MPI_Datatype datatype,
554 int root, MPI_Comm com))
555 mpi_coll_bcast_description[bcast_id].coll;
557 int reduce_id = find_coll_description(mpi_coll_reduce_description,
558 sg_cfg_get_string("smpi/reduce"));
559 mpi_coll_reduce_fun = (int (*)(void *buf, void *rbuf, int count,
560 MPI_Datatype datatype, MPI_Op op,
561 int root, MPI_Comm comm))
562 mpi_coll_reduce_description[reduce_id].coll;
564 int reduce_scatter_id =
565 find_coll_description(mpi_coll_reduce_scatter_description,
566 sg_cfg_get_string("smpi/reduce_scatter"));
567 mpi_coll_reduce_scatter_fun = (int (*)(void *sbuf, void *rbuf, int *rcounts,
568 MPI_Datatype dtype, MPI_Op op,
570 mpi_coll_reduce_scatter_description[reduce_scatter_id].coll;
572 int scatter_id = find_coll_description(mpi_coll_scatter_description,
573 sg_cfg_get_string("smpi/scatter"));
574 mpi_coll_scatter_fun = (int (*)(void *sendbuf, int sendcount,
575 MPI_Datatype sendtype, void *recvbuf,
576 int recvcount, MPI_Datatype recvtype,
577 int root, MPI_Comm comm))
578 mpi_coll_scatter_description[scatter_id].coll;
580 int barrier_id = find_coll_description(mpi_coll_barrier_description,
581 sg_cfg_get_string("smpi/barrier"));
582 mpi_coll_barrier_fun = (int (*)(MPI_Comm comm))
583 mpi_coll_barrier_description[barrier_id].coll;
585 smpi_cpu_threshold = sg_cfg_get_double("smpi/cpu_threshold");
586 smpi_running_power = sg_cfg_get_double("smpi/running_power");
587 smpi_privatize_global_variables = sg_cfg_get_boolean("smpi/privatize_global_variables");
588 if (smpi_cpu_threshold < 0)
589 smpi_cpu_threshold = DBL_MAX;
593 int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[])
595 srand(SMPI_RAND_SEED);
597 if (getenv("SMPI_PRETEND_CC") != NULL) {
598 /* Hack to ensure that smpicc can pretend to be a simple
599 * compiler. Particularly handy to pass it to the configuration tools */
606 TRACE_global_init(&argc, argv);
608 TRACE_add_start_function(TRACE_smpi_alloc);
609 TRACE_add_end_function(TRACE_smpi_release);
612 SIMIX_global_init(&argc, argv);
616 // parse the platform file: get the host list
617 SIMIX_create_environment(argv[1]);
618 SIMIX_comm_set_copy_data_callback(&smpi_comm_copy_buffer_callback);
619 SIMIX_function_register_default(realmain);
620 SIMIX_launch_application(argv[2]);
624 smpi_check_options();
626 if(smpi_privatize_global_variables)
627 smpi_initialize_global_memory_segments();
629 /* Clean IO before the run */
634 MC_do_the_modelcheck_for_real();
638 if (sg_cfg_get_boolean("smpi/display_timing"))
639 XBT_INFO("Simulation time: %g seconds.", SIMIX_get_clock());
641 smpi_global_destroy();
650 // This function can be called from extern file, to initialize logs, options, and processes of smpi
651 // without the need of smpirun
656 smpi_check_options();
658 if (TRACE_is_enabled() && TRACE_is_configured()) {
662 if(smpi_privatize_global_variables)
663 smpi_initialize_global_memory_segments();
666 void SMPI_finalize(){
667 smpi_global_destroy();