-/* Copyright (c) 2009, 2010, 2011, 2012. The SimGrid Team.
+/* Copyright (c) 2009-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-
#include "private.h"
#include <stdio.h>
#include <xbt.h>
}
}
-typedef struct {
- xbt_dynar_t isends; /* of MPI_Request */
- xbt_dynar_t irecvs; /* of MPI_Request */
-} s_smpi_replay_globals_t, *smpi_replay_globals_t;
-
-
/* Helper function */
static double parse_double(const char *string)
{
static MPI_Datatype decode_datatype(const char *const action)
{
// Declared datatypes,
-
+
switch(atoi(action))
{
case 0:
break;
default:
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
-
+
}
return MPI_CURRENT_TYPE;
}
+
+const char* encode_datatype(MPI_Datatype datatype)
+{
+
+ //default type for output is set to MPI_BYTE
+ // MPI_DEFAULT_TYPE is not set for output, use directly MPI_BYTE
+ if (datatype==MPI_BYTE){
+ return "";
+ }
+ if(datatype==MPI_DOUBLE)
+ return "0";
+ if(datatype==MPI_INT)
+ return "1";
+ if(datatype==MPI_CHAR)
+ return "2";
+ if(datatype==MPI_SHORT)
+ return "3";
+ if(datatype==MPI_LONG)
+ return "4";
+ if(datatype==MPI_FLOAT)
+ return "5";
+
+ // default - not implemented.
+ // do not warn here as we pass in this function even for other trace formats
+ return "-1";
+}
+
static void action_init(const char *const *action)
{
int i;
XBT_DEBUG("Initialize the counters");
- smpi_replay_globals_t globals = xbt_new(s_smpi_replay_globals_t, 1);
- globals->isends = xbt_dynar_new(sizeof(MPI_Request),NULL);
- globals->irecvs = xbt_dynar_new(sizeof(MPI_Request),NULL);
if(action[2]) MPI_DEFAULT_TYPE= MPI_DOUBLE; // default MPE dataype
else MPI_DEFAULT_TYPE= MPI_BYTE; // default TAU datatype
-
- smpi_process_set_user_data((void*) globals);
/* start a simulated timer */
smpi_process_simulated_start();
if (!reqq) {
reqq=xbt_new0(xbt_dynar_t,active_processes);
-
+
for(i=0;i<active_processes;i++){
- reqq[i]=xbt_dynar_new(sizeof(MPI_Request),NULL);
+ reqq[i]=xbt_dynar_new(sizeof(MPI_Request),&xbt_free_ref);
}
}
}
static void action_finalize(const char *const *action)
{
- smpi_replay_globals_t globals =
- (smpi_replay_globals_t) smpi_process_get_user_data();
- if (globals){
- XBT_DEBUG("There are %lu isends and %lu irecvs in the dynars",
- xbt_dynar_length(globals->isends),xbt_dynar_length(globals->irecvs));
- xbt_dynar_free_container(&(globals->isends));
- xbt_dynar_free_container(&(globals->irecvs));
- }
- free(globals);
}
static void action_comm_size(const char *const *action)
static void action_compute(const char *const *action)
{
double clock = smpi_process_simulated_elapsed();
- smpi_execute_flops(parse_double(action[2]));
+ double flops= parse_double(action[2]);
+#ifdef HAVE_TRACING
+ int rank = smpi_comm_rank(MPI_COMM_WORLD);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type=TRACING_COMPUTING;
+ extra->comp_size=flops;
+ TRACE_smpi_computing_in(rank, extra);
+#endif
+ smpi_execute_flops(flops);
+#ifdef HAVE_TRACING
+ TRACE_smpi_computing_out(rank);
+#endif
log_timed_action (action, clock);
}
} else {
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
}
-
+
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
- TRACE_smpi_computing_out(rank);
+
int dst_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), to);
- TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__);
- TRACE_smpi_send(rank, rank, dst_traced);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_SEND;
+ extra->send_size = size;
+ extra->src = rank;
+ extra->dst = dst_traced;
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
+ TRACE_smpi_send(rank, rank, dst_traced, size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
smpi_mpi_send(NULL, size, MPI_CURRENT_TYPE, to , 0, MPI_COMM_WORLD);
#ifdef HAVE_TRACING
TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
- TRACE_smpi_computing_in(rank);
#endif
}
if(action[4]) MPI_CURRENT_TYPE=decode_datatype(action[4]);
else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- smpi_replay_globals_t globals =
- (smpi_replay_globals_t) smpi_process_get_user_data();
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
- TRACE_smpi_computing_out(rank);
int dst_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), to);
- TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__);
- TRACE_smpi_send(rank, rank, dst_traced);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_ISEND;
+ extra->send_size = size;
+ extra->src = rank;
+ extra->dst = dst_traced;
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
+ TRACE_smpi_send(rank, rank, dst_traced, size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
request = smpi_mpi_isend(NULL, size, MPI_CURRENT_TYPE, to, 0,MPI_COMM_WORLD);
-
+
#ifdef HAVE_TRACING
TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
request->send = 1;
- TRACE_smpi_computing_in(rank);
#endif
- xbt_dynar_push(globals->isends,&request);
xbt_dynar_push(reqq[smpi_comm_rank(MPI_COMM_WORLD)],&request);
log_timed_action (action, clock);
if(action[4]) MPI_CURRENT_TYPE=decode_datatype(action[4]);
else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
-
+
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
int src_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), from);
- TRACE_smpi_computing_out(rank);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_RECV;
+ extra->send_size = size;
+ extra->src = src_traced;
+ extra->dst = rank;
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra);
#endif
+ //unknow size from the receiver pov
+ if(size==-1){
+ smpi_mpi_probe(from, 0, MPI_COMM_WORLD, &status);
+ size=status.count;
+ }
+
smpi_mpi_recv(NULL, size, MPI_CURRENT_TYPE, from, 0, MPI_COMM_WORLD, &status);
#ifdef HAVE_TRACING
TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
TRACE_smpi_recv(rank, src_traced, rank);
- TRACE_smpi_computing_in(rank);
#endif
log_timed_action (action, clock);
double clock = smpi_process_simulated_elapsed();
MPI_Request request;
- smpi_replay_globals_t globals =
- (smpi_replay_globals_t) smpi_process_get_user_data();
-
if(action[4]) MPI_CURRENT_TYPE=decode_datatype(action[4]);
else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
int src_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), from);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_IRECV;
+ extra->send_size = size;
+ extra->src = src_traced;
+ extra->dst = rank;
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra);
#endif
+ MPI_Status status;
+ //unknow size from the receiver pov
+ if(size==-1){
+ smpi_mpi_probe(from, 0, MPI_COMM_WORLD, &status);
+ size=status.count;
+ }
request = smpi_mpi_irecv(NULL, size, MPI_CURRENT_TYPE, from, 0, MPI_COMM_WORLD);
-
+
#ifdef HAVE_TRACING
TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
request->recv = 1;
#endif
- xbt_dynar_push(globals->irecvs,&request);
xbt_dynar_push(reqq[smpi_comm_rank(MPI_COMM_WORLD)],&request);
log_timed_action (action, clock);
}
+static void action_test(const char *const *action){
+ double clock = smpi_process_simulated_elapsed();
+ MPI_Request request;
+ MPI_Status status;
+ int flag = TRUE;
+
+ request = xbt_dynar_pop_as(reqq[smpi_comm_rank(MPI_COMM_WORLD)],MPI_Request);
+ xbt_assert(request != NULL, "found null request in reqq");
+
+#ifdef HAVE_TRACING
+ int rank = smpi_comm_rank(MPI_COMM_WORLD);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type=TRACING_TEST;
+ TRACE_smpi_testing_in(rank, extra);
+#endif
+ flag = smpi_mpi_test(&request, &status);
+ XBT_DEBUG("MPI_Test result: %d", flag);
+ /* push back request in dynar to be caught by a subsequent wait. if the test
+ * did succeed, the request is now NULL.
+ */
+ xbt_dynar_push_as(reqq[smpi_comm_rank(MPI_COMM_WORLD)],MPI_Request, request);
+
+#ifdef HAVE_TRACING
+ TRACE_smpi_testing_out(rank);
+#endif
+
+ log_timed_action (action, clock);
+}
+
static void action_wait(const char *const *action){
double clock = smpi_process_simulated_elapsed();
MPI_Request request;
MPI_Status status;
- smpi_replay_globals_t globals =
- (smpi_replay_globals_t) smpi_process_get_user_data();
- xbt_assert(xbt_dynar_length(globals->irecvs),
- "action wait not preceded by any irecv: %s",
+ xbt_assert(xbt_dynar_length(reqq[smpi_comm_rank(MPI_COMM_WORLD)]),
+ "action wait not preceded by any irecv or isend: %s",
xbt_str_join_array(action," "));
- request = xbt_dynar_pop_as(globals->irecvs,MPI_Request);
+ request = xbt_dynar_pop_as(reqq[smpi_comm_rank(MPI_COMM_WORLD)],MPI_Request);
+
+ if (!request){
+ /* Assuming that the trace is well formed, this mean the comm might have
+ * been caught by a MPI_test. Then just return.
+ */
+ return;
+ }
+
#ifdef HAVE_TRACING
- int rank = request && request->comm != MPI_COMM_NULL
+ int rank = request->comm != MPI_COMM_NULL
? smpi_comm_rank(request->comm)
: -1;
- TRACE_smpi_computing_out(rank);
MPI_Group group = smpi_comm_group(request->comm);
int src_traced = smpi_group_rank(group, request->src);
int dst_traced = smpi_group_rank(group, request->dst);
int is_wait_for_receive = request->recv;
- TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_WAIT;
+ TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra);
#endif
smpi_mpi_wait(&request, &status);
#ifdef HAVE_TRACING
if (is_wait_for_receive) {
TRACE_smpi_recv(rank, src_traced, dst_traced);
}
- TRACE_smpi_computing_in(rank);
#endif
log_timed_action (action, clock);
if (count_requests>0) {
MPI_Request requests[count_requests];
MPI_Status status[count_requests];
-
+
/* The reqq is an array of dynars. Its index corresponds to the rank.
Thus each rank saves its own requests to the array request. */
xbt_dynar_foreach(reqq[smpi_comm_rank(MPI_COMM_WORLD)],i,requests[i]);
-
+
#ifdef HAVE_TRACING
//save information from requests
-
+
xbt_dynar_t srcs = xbt_dynar_new(sizeof(int), NULL);
xbt_dynar_t dsts = xbt_dynar_new(sizeof(int), NULL);
xbt_dynar_t recvs = xbt_dynar_new(sizeof(int), NULL);
}
}
int rank_traced = smpi_process_index();
- TRACE_smpi_computing_out(rank_traced);
-
- TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__);
- #endif
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_WAITALL;
+ extra->send_size=count_requests;
+ TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra);
+ #endif
smpi_mpi_waitall(count_requests, requests, status);
xbt_dynar_free(&srcs);
xbt_dynar_free(&dsts);
xbt_dynar_free(&recvs);
- TRACE_smpi_computing_in(rank_traced);
#endif
-
- xbt_dynar_reset(reqq[smpi_comm_rank(MPI_COMM_WORLD)]);
+
+ xbt_dynar_free_container(&(reqq[smpi_comm_rank(MPI_COMM_WORLD)]));
}
log_timed_action (action, clock);
}
double clock = smpi_process_simulated_elapsed();
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
- TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_BARRIER;
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
#endif
smpi_mpi_barrier(MPI_COMM_WORLD);
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- TRACE_smpi_computing_in(rank);
#endif
log_timed_action (action, clock);
MPI_CURRENT_TYPE=decode_datatype(action[4]);
}
}
-
+
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
- TRACE_smpi_computing_out(rank);
- int root_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), 0);
- TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__);
+ int root_traced = smpi_group_index(smpi_comm_group(MPI_COMM_WORLD), root);
+
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_BCAST;
+ extra->send_size = size;
+ extra->root = root_traced;
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
+
#endif
- smpi_mpi_bcast(NULL, size, MPI_CURRENT_TYPE, root, MPI_COMM_WORLD);
+ mpi_coll_bcast_fun(NULL, size, MPI_CURRENT_TYPE, root, MPI_COMM_WORLD);
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
- TRACE_smpi_computing_in(rank);
#endif
log_timed_action (action, clock);
double clock = smpi_process_simulated_elapsed();
int root=0;
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
-
+
if(action[4]) {
- root= atoi(action[4]);
- if(action[5]) {
- MPI_CURRENT_TYPE=decode_datatype(action[5]);
- }
+ root= atoi(action[4]);
+ if(action[5]) {
+ MPI_CURRENT_TYPE=decode_datatype(action[5]);
+ }
}
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
- TRACE_smpi_computing_out(rank);
- int root_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), 0);
- TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__);
+ int root_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), root);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_REDUCE;
+ extra->send_size = comm_size;
+ extra->comp_size = comp_size;
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ extra->root = root_traced;
+
+ TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,extra);
#endif
mpi_coll_reduce_fun(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, root, MPI_COMM_WORLD);
smpi_execute_flops(comp_size);
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
- TRACE_smpi_computing_in(rank);
#endif
log_timed_action (action, clock);
static void action_allReduce(const char *const *action) {
double comm_size = parse_double(action[2]);
double comp_size = parse_double(action[3]);
-
+
if(action[4]) MPI_CURRENT_TYPE=decode_datatype(action[4]);
else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
-
+
double clock = smpi_process_simulated_elapsed();
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
- TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_ALLREDUCE;
+ extra->send_size = comm_size;
+ extra->comp_size = comp_size;
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
#endif
- mpi_coll_reduce_fun(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, 0, MPI_COMM_WORLD);
+ mpi_coll_reduce_fun(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, 0, MPI_COMM_WORLD);
smpi_execute_flops(comp_size);
- mpi_coll_bcast_fun(NULL, comm_size, MPI_CURRENT_TYPE, 0, MPI_COMM_WORLD);
+ mpi_coll_bcast_fun(NULL, comm_size, MPI_CURRENT_TYPE, 0, MPI_COMM_WORLD);
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- TRACE_smpi_computing_in(rank);
#endif
log_timed_action (action, clock);
int send_size = parse_double(action[2]);
int recv_size = parse_double(action[3]);
MPI_Datatype MPI_CURRENT_TYPE2;
-
+
if(action[4]) {
MPI_CURRENT_TYPE=decode_datatype(action[4]);
MPI_CURRENT_TYPE2=decode_datatype(action[5]);
#ifdef HAVE_TRACING
int rank = smpi_process_index();
- TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_ALLTOALL;
+ extra->send_size = send_size;
+ extra->recv_size = recv_size;
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2);
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
#endif
-
+
mpi_coll_alltoall_fun(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, MPI_COMM_WORLD);
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- TRACE_smpi_computing_in(rank);
#endif
log_timed_action (action, clock);
static void action_gather(const char *const *action) {
-
-
/*
The structure of the gather action for the rank 0 (total 4 processes)
is the following:
3) 0 is the root node
4) 0 is the send datatype id, see decode_datatype()
5) 0 is the recv datatype id, see decode_datatype()
-
*/
double clock = smpi_process_simulated_elapsed();
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE;
}
- void *send = calloc(send_size, smpi_datatype_size(MPI_CURRENT_TYPE));
- void *recv = calloc(recv_size, smpi_datatype_size(MPI_CURRENT_TYPE2));
+ void *send = calloc(send_size, smpi_datatype_size(MPI_CURRENT_TYPE));
+ void *recv = NULL;
int root=atoi(action[4]);
int rank = smpi_process_index();
recv = calloc(recv_size*comm_size, smpi_datatype_size(MPI_CURRENT_TYPE2));
#ifdef HAVE_TRACING
- TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_GATHER;
+ extra->send_size = send_size;
+ extra->recv_size = recv_size;
+ extra->root = root;
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2);
+
+ TRACE_smpi_collective_in(rank, root, __FUNCTION__, extra);
#endif
smpi_mpi_gather(send, send_size, MPI_CURRENT_TYPE,
recv, recv_size, MPI_CURRENT_TYPE2,
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- TRACE_smpi_computing_in(rank);
#endif
log_timed_action (action, clock);
}
+
+static void action_gatherv(const char *const *action) {
+ /*
+ The structure of the gatherv action for the rank 0 (total 4 processes)
+ is the following:
+ 0 gather 68 68 10 10 10 0 0 0
+
+ where:
+ 1) 68 is the sendcount
+ 2) 68 10 10 10 is the recvcounts
+ 3) 0 is the root node
+ 4) 0 is the send datatype id, see decode_datatype()
+ 5) 0 is the recv datatype id, see decode_datatype()
+ */
+ double clock = smpi_process_simulated_elapsed();
+ int comm_size = smpi_comm_size(MPI_COMM_WORLD);
+ int send_size = parse_double(action[2]);
+ int *disps = xbt_new0(int, comm_size);
+ int *recvcounts = xbt_new0(int, comm_size);
+ int i=0,recv_sum=0;
+
+ MPI_Datatype MPI_CURRENT_TYPE2;
+ if(action[4+comm_size]) {
+ MPI_CURRENT_TYPE=decode_datatype(action[4+comm_size]);
+ MPI_CURRENT_TYPE2=decode_datatype(action[5+comm_size]);
+ } else {
+ MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
+ MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE;
+ }
+ void *send = calloc(send_size, smpi_datatype_size(MPI_CURRENT_TYPE));
+ void *recv = NULL;
+ for(i=0;i<comm_size;i++) {
+ recvcounts[i] = atoi(action[i+3]);
+ recv_sum=recv_sum+recvcounts[i];
+ disps[i] = 0;
+ }
+
+ int root=atoi(action[3+comm_size]);
+ int rank = smpi_process_index();
+
+ if(rank==root)
+ recv = calloc(recv_sum, smpi_datatype_size(MPI_CURRENT_TYPE2));
+
+#ifdef HAVE_TRACING
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_GATHERV;
+ extra->send_size = send_size;
+ extra->recvcounts= xbt_malloc(comm_size*sizeof(int));
+ for(i=0; i< comm_size; i++)//copy data to avoid bad free
+ extra->recvcounts[i] = recvcounts[i];
+ extra->root = root;
+ extra->num_processes = comm_size;
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2);
+
+ TRACE_smpi_collective_in(rank, root, __FUNCTION__, extra);
+#endif
+smpi_mpi_gatherv(send, send_size, MPI_CURRENT_TYPE,
+ recv, recvcounts, disps, MPI_CURRENT_TYPE2,
+ root, MPI_COMM_WORLD);
+
+#ifdef HAVE_TRACING
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+#endif
+
+ log_timed_action (action, clock);
+ xbt_free(recvcounts);
+ xbt_free(send);
+ xbt_free(recv);
+ xbt_free(disps);
+
+}
+
static void action_reducescatter(const char *const *action) {
-
+
/*
The structure of the reducescatter action for the rank 0 (total 4 processes)
is the following:
1) The first four values after the name of the action declare the recvcounts array
2) The value 11346849 is the amount of instructions
3) The last value corresponds to the datatype, see decode_datatype().
-
+
We analyze a MPI_Reduce_scatter call to one MPI_Reduce and one MPI_Scatterv.
-
+
*/
double clock = smpi_process_simulated_elapsed();
}
#ifdef HAVE_TRACING
- TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_REDUCE_SCATTER;
+ extra->send_size = 0;
+ extra->recvcounts= xbt_malloc(comm_size*sizeof(int));
+ for(i=0; i< comm_size; i++)//copy data to avoid bad free
+ extra->recvcounts[i] = recvcounts[i];
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ extra->comp_size = comp_size;
+ extra->num_processes = comm_size;
+
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
#endif
mpi_coll_reduce_fun(NULL, NULL, recv_sum, MPI_CURRENT_TYPE, MPI_OP_NULL,
root, MPI_COMM_WORLD);
smpi_mpi_scatterv(NULL, recvcounts, disps, MPI_CURRENT_TYPE, NULL,
recvcounts[rank], MPI_CURRENT_TYPE, 0, MPI_COMM_WORLD);
smpi_execute_flops(comp_size);
-
-
+
+
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- TRACE_smpi_computing_in(rank);
#endif
-
+ xbt_free(recvcounts);
+ xbt_free(disps);
log_timed_action (action, clock);
}
static void action_allgatherv(const char *const *action) {
-
+
/*
The structure of the allgatherv action for the rank 0 (total 4 processes)
is the following:
-0 allGatherV 275427 275427 275427 275427 204020 0 275427 550854 826281
+0 allGatherV 275427 275427 275427 275427 204020
where:
1) 275427 is the sendcount
2) The next four elements declare the recvcounts array
- 3) The next four values declare the disps array
- 4) No more values mean that the datatype for sent and receive buffer
+ 3) No more values mean that the datatype for sent and receive buffer
is the default one, see decode_datatype().
*/
double clock = smpi_process_simulated_elapsed();
-
+
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
int i=0;
int sendcount=atoi(action[2]);
int recv_sum=0;
MPI_Datatype MPI_CURRENT_TYPE2;
- if(action[3+2*comm_size]) {
- MPI_CURRENT_TYPE = decode_datatype(action[3+2*comm_size]);
- MPI_CURRENT_TYPE2 = decode_datatype(action[4+2*comm_size]);
+ if(action[3+comm_size]) {
+ MPI_CURRENT_TYPE = decode_datatype(action[3+comm_size]);
+ MPI_CURRENT_TYPE2 = decode_datatype(action[4+comm_size]);
} else {
MPI_CURRENT_TYPE = MPI_DEFAULT_TYPE;
MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
for(i=0;i<comm_size;i++) {
recvcounts[i] = atoi(action[i+3]);
recv_sum=recv_sum+recvcounts[i];
- disps[i] = atoi(action[i+3+comm_size]);
}
void *recvbuf = calloc(recv_sum, smpi_datatype_size(MPI_CURRENT_TYPE2));
#ifdef HAVE_TRACING
- int rank = MPI_COMM_WORLD != MPI_COMM_NULL ? smpi_process_index() : -1;
- TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ int rank = smpi_process_index();
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_ALLGATHERV;
+ extra->send_size = sendcount;
+ extra->recvcounts= xbt_malloc(comm_size*sizeof(int));
+ for(i=0; i< comm_size; i++)//copy data to avoid bad free
+ extra->recvcounts[i] = recvcounts[i];
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2);
+ extra->num_processes = comm_size;
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
#endif
-
+
mpi_coll_allgatherv_fun(sendbuf, sendcount, MPI_CURRENT_TYPE, recvbuf, recvcounts, disps, MPI_CURRENT_TYPE2, MPI_COMM_WORLD);
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- TRACE_smpi_computing_in(rank);
#endif
-
+
log_timed_action (action, clock);
xbt_free(sendbuf);
xbt_free(recvbuf);
/*
The structure of the allToAllV action for the rank 0 (total 4 processes)
is the following:
- 0 allToAllV 100 1 7 10 12 5 10 20 45 100 1 70 10 5 1 5 77 90
+ 0 allToAllV 100 1 7 10 12 100 1 70 10 5
where:
1) 100 is the size of the send buffer *sizeof(int),
2) 1 7 10 12 is the sendcounts array
- 3) 5 10 20 45 is the sdispls array
- 4) 100*sizeof(int) is the size of the receiver buffer
- 5) 1 70 10 5 is the recvcounts array
- 6) 1 5 77 90 is the rdispls array
-
+ 3) 100*sizeof(int) is the size of the receiver buffer
+ 4) 1 70 10 5 is the recvcounts array
+
*/
-
-
+
+
double clock = smpi_process_simulated_elapsed();
-
+
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
int send_buf_size=0,recv_buf_size=0,i=0;
int *sendcounts = xbt_new0(int, comm_size);
int *recvdisps = xbt_new0(int, comm_size);
MPI_Datatype MPI_CURRENT_TYPE2;
-
+
send_buf_size=parse_double(action[2]);
- recv_buf_size=parse_double(action[3+2*comm_size]);
- if(action[4+4*comm_size]) {
- MPI_CURRENT_TYPE=decode_datatype(action[4+4*comm_size]);
- MPI_CURRENT_TYPE2=decode_datatype(action[5+4*comm_size]);
+ recv_buf_size=parse_double(action[3+comm_size]);
+ if(action[4+2*comm_size]) {
+ MPI_CURRENT_TYPE=decode_datatype(action[4+2*comm_size]);
+ MPI_CURRENT_TYPE2=decode_datatype(action[5+2*comm_size]);
}
else {
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE;
}
-
+
void *sendbuf = calloc(send_buf_size, smpi_datatype_size(MPI_CURRENT_TYPE));
void *recvbuf = calloc(recv_buf_size, smpi_datatype_size(MPI_CURRENT_TYPE2));
for(i=0;i<comm_size;i++) {
sendcounts[i] = atoi(action[i+3]);
- senddisps[i] = atoi(action[i+3+comm_size]);
- recvcounts[i] = atoi(action[i+4+2*comm_size]);
- recvdisps[i] = atoi(action[i+4+3*comm_size]);
+ recvcounts[i] = atoi(action[i+4+comm_size]);
}
-
+
#ifdef HAVE_TRACING
- int rank = MPI_COMM_WORLD != MPI_COMM_NULL ? smpi_process_index() : -1;
- TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ int rank = smpi_process_index();
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_ALLTOALLV;
+ extra->recvcounts= xbt_malloc(comm_size*sizeof(int));
+ extra->sendcounts= xbt_malloc(comm_size*sizeof(int));
+ extra->num_processes = comm_size;
+
+ for(i=0; i< comm_size; i++){//copy data to avoid bad free
+ extra->send_size += sendcounts[i];
+ extra->sendcounts[i] = sendcounts[i];
+ extra->recv_size += recvcounts[i];
+ extra->recvcounts[i] = recvcounts[i];
+ }
+ extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE);
+ extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2);
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
#endif
- mpi_coll_alltoallv_fun(sendbuf, sendcounts, senddisps, MPI_CURRENT_TYPE,
+ mpi_coll_alltoallv_fun(sendbuf, sendcounts, senddisps, MPI_CURRENT_TYPE,
recvbuf, recvcounts, recvdisps, MPI_CURRENT_TYPE,
MPI_COMM_WORLD);
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- TRACE_smpi_computing_in(rank);
#endif
-
+
log_timed_action (action, clock);
xbt_free(sendbuf);
xbt_free(recvbuf);
xbt_free(recvcounts);
xbt_free(senddisps);
xbt_free(recvdisps);
-
-
}
void smpi_replay_init(int *argc, char***argv){
- PMPI_Init(argc, argv);
+ smpi_process_init(argc, argv);
+ smpi_process_mark_as_initialized();
+#ifdef HAVE_TRACING
+ int rank = smpi_process_index();
+ TRACE_smpi_init(rank);
+ TRACE_smpi_computing_init(rank);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_INIT;
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+#endif
+
if (!smpi_process_index()){
_xbt_replay_action_init();
xbt_replay_action_register("init", action_init);
xbt_replay_action_register("Isend", action_Isend);
xbt_replay_action_register("recv", action_recv);
xbt_replay_action_register("Irecv", action_Irecv);
+ xbt_replay_action_register("test", action_test);
xbt_replay_action_register("wait", action_wait);
xbt_replay_action_register("waitAll", action_waitall);
xbt_replay_action_register("barrier", action_barrier);
xbt_replay_action_register("allToAll", action_allToAll);
xbt_replay_action_register("allToAllV", action_allToAllv);
xbt_replay_action_register("gather", action_gather);
+ xbt_replay_action_register("gatherV", action_gatherv);
xbt_replay_action_register("allGatherV", action_allgatherv);
xbt_replay_action_register("reduceScatter", action_reducescatter);
xbt_replay_action_register("compute", action_compute);
int smpi_replay_finalize(){
double sim_time= 1.;
/* One active process will stop. Decrease the counter*/
- active_processes--;
XBT_DEBUG("There are %lu elements in reqq[*]",
xbt_dynar_length(reqq[smpi_comm_rank(MPI_COMM_WORLD)]));
- xbt_dynar_free(&reqq[smpi_comm_rank(MPI_COMM_WORLD)]);
+ if (!xbt_dynar_is_empty(reqq[smpi_comm_rank(MPI_COMM_WORLD)])){
+ int count_requests=xbt_dynar_length(reqq[smpi_comm_rank(MPI_COMM_WORLD)]);
+ MPI_Request requests[count_requests];
+ MPI_Status status[count_requests];
+ unsigned int i;
+
+ xbt_dynar_foreach(reqq[smpi_comm_rank(MPI_COMM_WORLD)],i,requests[i]);
+ smpi_mpi_waitall(count_requests, requests, status);
+ active_processes--;
+ } else {
+ active_processes--;
+ }
+
+ xbt_dynar_free_container(&(reqq[smpi_comm_rank(MPI_COMM_WORLD)]));
+
if(!active_processes){
/* Last process alive speaking */
/* end the simulated timer */
sim_time = smpi_process_simulated_elapsed();
- XBT_INFO("Simulation time %g", sim_time);
+ XBT_INFO("Simulation time %f", sim_time);
_xbt_replay_action_exit();
xbt_free(reqq);
reqq = NULL;
}
smpi_mpi_barrier(MPI_COMM_WORLD);
- return PMPI_Finalize();
+#ifdef HAVE_TRACING
+ int rank = smpi_process_index();
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_FINALIZE;
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+#endif
+ smpi_process_finalize();
+#ifdef HAVE_TRACING
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ TRACE_smpi_finalize(smpi_process_index());
+#endif
+ smpi_process_destroy();
+ return MPI_SUCCESS;
}