static MPI_Datatype decode_datatype(const char *const action)
{
// Declared datatypes,
-
+
switch(atoi(action))
{
case 0:
break;
default:
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
-
+
}
return MPI_CURRENT_TYPE;
}
if(action[2]) MPI_DEFAULT_TYPE= MPI_DOUBLE; // default MPE dataype
else MPI_DEFAULT_TYPE= MPI_BYTE; // default TAU datatype
-
+
smpi_process_set_user_data((void*) globals);
/* start a simulated timer */
if (!reqq) {
reqq=xbt_new0(xbt_dynar_t,active_processes);
-
+
for(i=0;i<active_processes;i++){
reqq[i]=xbt_dynar_new(sizeof(MPI_Request),NULL);
}
} else {
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
}
-
+
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
TRACE_smpi_computing_out(rank);
int dst_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), to);
- TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__);
- TRACE_smpi_send(rank, rank, dst_traced);
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, size*smpi_datatype_size(MPI_CURRENT_TYPE));
+ TRACE_smpi_send(rank, rank, dst_traced, size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
smpi_mpi_send(NULL, size, MPI_CURRENT_TYPE, to , 0, MPI_COMM_WORLD);
int rank = smpi_comm_rank(MPI_COMM_WORLD);
TRACE_smpi_computing_out(rank);
int dst_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), to);
- TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__);
- TRACE_smpi_send(rank, rank, dst_traced);
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, size*smpi_datatype_size(MPI_CURRENT_TYPE));
+ TRACE_smpi_send(rank, rank, dst_traced, size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
request = smpi_mpi_isend(NULL, size, MPI_CURRENT_TYPE, to, 0,MPI_COMM_WORLD);
int src_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), from);
TRACE_smpi_computing_out(rank);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
smpi_mpi_recv(NULL, size, MPI_CURRENT_TYPE, from, 0, MPI_COMM_WORLD, &status);
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
int src_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), from);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
request = smpi_mpi_irecv(NULL, size, MPI_CURRENT_TYPE, from, 0, MPI_COMM_WORLD);
int src_traced = smpi_group_rank(group, request->src);
int dst_traced = smpi_group_rank(group, request->dst);
int is_wait_for_receive = request->recv;
- TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__);
+ TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, -1);
#endif
smpi_mpi_wait(&request, &status);
#ifdef HAVE_TRACING
/* The reqq is an array of dynars. Its index corresponds to the rank.
Thus each rank saves its own requests to the array request. */
xbt_dynar_foreach(reqq[smpi_comm_rank(MPI_COMM_WORLD)],i,requests[i]);
-
+
#ifdef HAVE_TRACING
//save information from requests
-
+
xbt_dynar_t srcs = xbt_dynar_new(sizeof(int), NULL);
xbt_dynar_t dsts = xbt_dynar_new(sizeof(int), NULL);
xbt_dynar_t recvs = xbt_dynar_new(sizeof(int), NULL);
int rank_traced = smpi_process_index();
TRACE_smpi_computing_out(rank_traced);
- TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__);
+ TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__, count_requests);
#endif
smpi_mpi_waitall(count_requests, requests, status);
xbt_dynar_free(&recvs);
TRACE_smpi_computing_in(rank_traced);
#endif
-
+
xbt_dynar_reset(reqq[smpi_comm_rank(MPI_COMM_WORLD)]);
}
log_timed_action (action, clock);
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, smpi_comm_size(MPI_COMM_WORLD));
#endif
smpi_mpi_barrier(MPI_COMM_WORLD);
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
TRACE_smpi_computing_out(rank);
int root_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), 0);
- TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__);
+ TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
smpi_mpi_bcast(NULL, size, MPI_CURRENT_TYPE, root, MPI_COMM_WORLD);
double clock = smpi_process_simulated_elapsed();
int root=0;
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
-
+
if(action[4]) {
root= atoi(action[4]);
if(action[5]) {
int rank = smpi_comm_rank(MPI_COMM_WORLD);
TRACE_smpi_computing_out(rank);
int root_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), 0);
- TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__);
+ TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,comm_size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
mpi_coll_reduce_fun(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, root, MPI_COMM_WORLD);
smpi_execute_flops(comp_size);
#ifdef HAVE_TRACING
int rank = smpi_comm_rank(MPI_COMM_WORLD);
TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__,comp_size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
mpi_coll_reduce_fun(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, 0, MPI_COMM_WORLD);
smpi_execute_flops(comp_size);
#ifdef HAVE_TRACING
int rank = smpi_process_index();
TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__,send_size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
mpi_coll_alltoall_fun(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, MPI_COMM_WORLD);
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE;
}
- void *send = calloc(send_size, smpi_datatype_size(MPI_CURRENT_TYPE));
- void *recv = calloc(recv_size, smpi_datatype_size(MPI_CURRENT_TYPE2));
+ void *send = calloc(send_size, smpi_datatype_size(MPI_CURRENT_TYPE));
+ void *recv = calloc(recv_size, smpi_datatype_size(MPI_CURRENT_TYPE2));
int root=atoi(action[4]);
int rank = smpi_process_index();
#ifdef HAVE_TRACING
TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__,send_size*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
smpi_mpi_gather(send, send_size, MPI_CURRENT_TYPE,
recv, recv_size, MPI_CURRENT_TYPE2,
static void action_reducescatter(const char *const *action) {
-
+
/*
The structure of the reducescatter action for the rank 0 (total 4 processes)
is the following:
1) The first four values after the name of the action declare the recvcounts array
2) The value 11346849 is the amount of instructions
3) The last value corresponds to the datatype, see decode_datatype().
-
+
We analyze a MPI_Reduce_scatter call to one MPI_Reduce and one MPI_Scatterv.
-
+
*/
double clock = smpi_process_simulated_elapsed();
#ifdef HAVE_TRACING
TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, recv_sum*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
mpi_coll_reduce_fun(NULL, NULL, recv_sum, MPI_CURRENT_TYPE, MPI_OP_NULL,
root, MPI_COMM_WORLD);
smpi_mpi_scatterv(NULL, recvcounts, disps, MPI_CURRENT_TYPE, NULL,
recvcounts[rank], MPI_CURRENT_TYPE, 0, MPI_COMM_WORLD);
smpi_execute_flops(comp_size);
-
-
+
+
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
TRACE_smpi_computing_in(rank);
static void action_allgatherv(const char *const *action) {
-
+
/*
The structure of the allgatherv action for the rank 0 (total 4 processes)
is the following:
-0 allGatherV 275427 275427 275427 275427 204020 0 275427 550854 826281
+0 allGatherV 275427 275427 275427 275427 204020
where:
1) 275427 is the sendcount
2) The next four elements declare the recvcounts array
- 3) The next four values declare the disps array
- 4) No more values mean that the datatype for sent and receive buffer
+ 3) No more values mean that the datatype for sent and receive buffer
is the default one, see decode_datatype().
*/
double clock = smpi_process_simulated_elapsed();
-
+
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
int i=0;
int sendcount=atoi(action[2]);
int recv_sum=0;
MPI_Datatype MPI_CURRENT_TYPE2;
- if(action[3+2*comm_size]) {
- MPI_CURRENT_TYPE = decode_datatype(action[3+2*comm_size]);
- MPI_CURRENT_TYPE2 = decode_datatype(action[4+2*comm_size]);
+ if(action[3+comm_size]) {
+ MPI_CURRENT_TYPE = decode_datatype(action[3+comm_size]);
+ MPI_CURRENT_TYPE2 = decode_datatype(action[4+comm_size]);
} else {
MPI_CURRENT_TYPE = MPI_DEFAULT_TYPE;
MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
for(i=0;i<comm_size;i++) {
recvcounts[i] = atoi(action[i+3]);
recv_sum=recv_sum+recvcounts[i];
- disps[i] = atoi(action[i+3+comm_size]);
}
void *recvbuf = calloc(recv_sum, smpi_datatype_size(MPI_CURRENT_TYPE2));
#ifdef HAVE_TRACING
int rank = MPI_COMM_WORLD != MPI_COMM_NULL ? smpi_process_index() : -1;
TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__,sendcount*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
-
+
mpi_coll_allgatherv_fun(sendbuf, sendcount, MPI_CURRENT_TYPE, recvbuf, recvcounts, disps, MPI_CURRENT_TYPE2, MPI_COMM_WORLD);
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
TRACE_smpi_computing_in(rank);
#endif
-
+
log_timed_action (action, clock);
xbt_free(sendbuf);
xbt_free(recvbuf);
/*
The structure of the allToAllV action for the rank 0 (total 4 processes)
is the following:
- 0 allToAllV 100 1 7 10 12 5 10 20 45 100 1 70 10 5 1 5 77 90
+ 0 allToAllV 100 1 7 10 12 100 1 70 10 5
where:
1) 100 is the size of the send buffer *sizeof(int),
2) 1 7 10 12 is the sendcounts array
- 3) 5 10 20 45 is the sdispls array
- 4) 100*sizeof(int) is the size of the receiver buffer
- 5) 1 70 10 5 is the recvcounts array
- 6) 1 5 77 90 is the rdispls array
-
+ 3) 100*sizeof(int) is the size of the receiver buffer
+ 4) 1 70 10 5 is the recvcounts array
+
*/
-
-
+
+
double clock = smpi_process_simulated_elapsed();
-
+
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
int send_buf_size=0,recv_buf_size=0,i=0;
int *sendcounts = xbt_new0(int, comm_size);
int *recvdisps = xbt_new0(int, comm_size);
MPI_Datatype MPI_CURRENT_TYPE2;
-
+
send_buf_size=parse_double(action[2]);
- recv_buf_size=parse_double(action[3+2*comm_size]);
- if(action[4+4*comm_size]) {
- MPI_CURRENT_TYPE=decode_datatype(action[4+4*comm_size]);
- MPI_CURRENT_TYPE2=decode_datatype(action[5+4*comm_size]);
+ recv_buf_size=parse_double(action[3+comm_size]);
+ if(action[4+2*comm_size]) {
+ MPI_CURRENT_TYPE=decode_datatype(action[4+2*comm_size]);
+ MPI_CURRENT_TYPE2=decode_datatype(action[5+2*comm_size]);
}
else {
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE;
}
-
+
void *sendbuf = calloc(send_buf_size, smpi_datatype_size(MPI_CURRENT_TYPE));
void *recvbuf = calloc(recv_buf_size, smpi_datatype_size(MPI_CURRENT_TYPE2));
for(i=0;i<comm_size;i++) {
sendcounts[i] = atoi(action[i+3]);
- senddisps[i] = atoi(action[i+3+comm_size]);
- recvcounts[i] = atoi(action[i+4+2*comm_size]);
- recvdisps[i] = atoi(action[i+4+3*comm_size]);
+ recvcounts[i] = atoi(action[i+4+comm_size]);
}
-
+
#ifdef HAVE_TRACING
int rank = MPI_COMM_WORLD != MPI_COMM_NULL ? smpi_process_index() : -1;
TRACE_smpi_computing_out(rank);
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+ int count=0;
+ for(i=0;i<comm_size;i++) count+=sendcounts[i];
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__,count*smpi_datatype_size(MPI_CURRENT_TYPE));
#endif
- mpi_coll_alltoallv_fun(sendbuf, sendcounts, senddisps, MPI_CURRENT_TYPE,
+ mpi_coll_alltoallv_fun(sendbuf, sendcounts, senddisps, MPI_CURRENT_TYPE,
recvbuf, recvcounts, recvdisps, MPI_CURRENT_TYPE,
MPI_COMM_WORLD);
#ifdef HAVE_TRACING
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
TRACE_smpi_computing_in(rank);
#endif
-
+
log_timed_action (action, clock);
xbt_free(sendbuf);
xbt_free(recvbuf);
xbt_free(recvcounts);
xbt_free(senddisps);
xbt_free(recvdisps);
-
-
}
void smpi_replay_init(int *argc, char***argv){