-#include "colls.h"
+/* Copyright (c) 2013-2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "colls_private.h"
/* change number of core per smp-node
we assume that number of core per process will be the same for all implementations */
{
int comm_size, rank;
void *tmp_buf;
- int tag = 50;
+ int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
/*
#ifdef MPICH2_REDUCTION
MPI_User_function * uop = MPIR_Op_table[op % 16 - 1];
uop = op_ptr->op;
#endif
*/
- MPI_Comm_size(comm, &comm_size);
- MPI_Comm_rank(comm, &rank);
+ comm_size = smpi_comm_size(comm);
+ rank = smpi_comm_rank(comm);
MPI_Aint extent;
- MPI_Type_extent(dtype, &extent);
- tmp_buf = (void *) malloc(count * extent);
+ extent = smpi_datatype_get_extent(dtype);
+ tmp_buf = (void *) xbt_malloc(count * extent);
int intra_rank, inter_rank;
intra_rank = rank % num_core;
}
- MPI_Sendrecv(send_buf, count, dtype, rank, tag,
+ smpi_mpi_sendrecv(send_buf, count, dtype, rank, tag,
recv_buf, count, dtype, rank, tag, comm, &status);
src = (inter_rank * num_core) + (intra_rank | mask);
// if (src < ((inter_rank + 1) * num_core)) {
if (src < comm_size) {
- MPI_Recv(tmp_buf, count, dtype, src, tag, comm, &status);
- star_reduction(op, tmp_buf, recv_buf, &count, &dtype);
+ smpi_mpi_recv(tmp_buf, count, dtype, src, tag, comm, &status);
+ smpi_op_apply(op, tmp_buf, recv_buf, &count, &dtype);
//printf("Node %d recv from node %d when mask is %d\n", rank, src, mask);
}
} else {
dst = (inter_rank * num_core) + (intra_rank & (~mask));
- MPI_Send(recv_buf, count, dtype, dst, tag, comm);
+ smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm);
//printf("Node %d send to node %d when mask is %d\n", rank, dst, mask);
break;
}
((inter_rank - 2 - i +
inter_comm_size) % inter_comm_size) * seg_count * extent;
- MPI_Sendrecv((char *) recv_buf + send_offset, seg_count, dtype, to,
+ smpi_mpi_sendrecv((char *) recv_buf + send_offset, seg_count, dtype, to,
tag + i, tmp_buf, seg_count, dtype, from, tag + i, comm,
&status);
// result is in rbuf
- star_reduction(op, tmp_buf, (char *) recv_buf + recv_offset, &seg_count,
+ smpi_op_apply(op, tmp_buf, (char *) recv_buf + recv_offset, &seg_count,
&dtype);
}
((inter_rank - 1 - i +
inter_comm_size) % inter_comm_size) * seg_count * extent;
- MPI_Sendrecv((char *) recv_buf + send_offset, seg_count, dtype, to,
+ smpi_mpi_sendrecv((char *) recv_buf + send_offset, seg_count, dtype, to,
tag + i, (char *) recv_buf + recv_offset, seg_count, dtype,
from, tag + i, comm, &status);
if ((mask & inter_rank) == 0) {
src = (inter_rank | mask) * num_core;
if (src < comm_size) {
- MPI_Recv(tmp_buf, count, dtype, src, tag, comm, &status);
+ smpi_mpi_recv(tmp_buf, count, dtype, src, tag, comm, &status);
(* uop) (tmp_buf, recv_buf, &count, &dtype);
//printf("Node %d recv from node %d when mask is %d\n", rank, src, mask);
}
}
else {
dst = (inter_rank & (~mask)) * num_core;
- MPI_Send(recv_buf, count, dtype, dst, tag, comm);
+ smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm);
//printf("Node %d send to node %d when mask is %d\n", rank, dst, mask);
break;
}
if (inter_rank & mask) {
src = (inter_rank - mask) * num_core;
//printf("Node %d recv from node %d when mask is %d\n", rank, src, mask);
- MPI_Recv(recv_buf, count, dtype, src, tag, comm, &status);
+ smpi_mpi_recv(recv_buf, count, dtype, src, tag, comm, &status);
break;
}
mask <<= 1;
dst = (inter_rank + mask) * num_core;
if (dst < comm_size) {
//printf("Node %d send to node %d when mask is %d\n", rank, dst, mask);
- MPI_Send(recv_buf, count, dtype, dst, tag, comm);
+ smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm);
}
}
mask >>= 1;
if (intra_rank & mask) {
src = (inter_rank * num_core) + (intra_rank - mask);
//printf("Node %d recv from node %d when mask is %d\n", rank, src, mask);
- MPI_Recv(recv_buf, count, dtype, src, tag, comm, &status);
+ smpi_mpi_recv(recv_buf, count, dtype, src, tag, comm, &status);
break;
}
mask <<= 1;
dst = (inter_rank * num_core) + (intra_rank + mask);
if (dst < comm_size) {
//printf("Node %d send to node %d when mask is %d\n", rank, dst, mask);
- MPI_Send(recv_buf, count, dtype, dst, tag, comm);
+ smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm);
}
mask >>= 1;
}