1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "colls_private.h"
8 //#include <star-reduction.c>
10 int reduce_arrival_pattern_aware_segment_size_in_byte = 8192;
13 #define HEADER_SIZE 1024
20 /* Non-topology-specific pipelined linear-reduce function */
21 int smpi_coll_tuned_reduce_arrival_pattern_aware(void *buf, void *rbuf,
23 MPI_Datatype datatype,
27 int rank = smpi_comm_rank(comm);
28 int tag = -COLL_TAG_REDUCE;
31 MPI_Request *send_request_array;
32 MPI_Request *recv_request_array;
33 MPI_Status *send_status_array;
34 MPI_Status *recv_status_array;
36 MPI_Status temp_status_array[MAX_NODE];
38 int size = smpi_comm_size(comm);
43 int flag_array[MAX_NODE];
44 int already_received[MAX_NODE];
46 int header_buf[HEADER_SIZE];
47 char temp_buf[MAX_NODE];
50 smpi_datatype_extent(datatype, &lb, &extent);
52 /* source and destination */
55 /* segment is segment size in number of elements (not bytes) */
56 int segment = reduce_arrival_pattern_aware_segment_size_in_byte / extent;
59 int pipe_length = count / segment;
61 /* use for buffer offset for sending and receiving data = segment size in byte */
62 int increment = segment * extent;
64 /* if the input size is not divisible by segment size =>
65 the small remainder will be done with native implementation */
66 int remainder = count % segment;
69 /* value == 0 means root has not send data (or header) to the node yet */
70 for (i = 0; i < MAX_NODE; i++) {
71 already_received[i] = 0;
75 tmp_buf = (char *) smpi_get_tmp_sendbuffer(count * extent);
77 smpi_mpi_sendrecv(buf, count, datatype, rank, tag, rbuf, count, datatype, rank,
82 /* when a message is smaller than a block size => no pipeline */
83 if (count <= segment) {
88 while (sent_count < (size - 1)) {
90 for (i = 1; i < size; i++) {
91 if (already_received[i] == 0) {
92 smpi_mpi_iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
94 simcall_process_sleep(0.0001);
99 /* recv 1-byte message */
100 for (i = 0; i < size; i++) {
104 /* 1-byte message arrive */
105 if ((flag_array[i] == 1) && (already_received[i] == 0)) {
106 smpi_mpi_recv(temp_buf, 1, MPI_CHAR, i, tag, comm, &status);
107 header_buf[header_index] = i;
112 //printf("root send to %d recv from %d : data = ",to,from);
114 for (i=0;i<=header_index;i++) {
115 printf("%d ",header_buf[i]);
119 /* will receive in the next step */
120 already_received[i] = 1;
124 /* send header followed by receive and reduce data */
125 if (header_index != 0) {
126 header_buf[header_index] = -1;
128 from = header_buf[header_index - 1];
130 smpi_mpi_send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
131 smpi_mpi_recv(tmp_buf, count, datatype, from, tag, comm, &status);
132 smpi_op_apply(op, tmp_buf, rbuf, &count, &datatype);
141 /* send 1-byte message to root */
142 smpi_mpi_send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
144 /* wait for header and data, forward when required */
145 smpi_mpi_recv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm,
147 // smpi_mpi_recv(buf,count,datatype,MPI_ANY_SOURCE,tag,comm,&status);
149 /* search for where it is */
151 while (rank != header_buf[myordering]) {
156 if (header_buf[myordering + 1] != -1) {
157 smpi_mpi_send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
160 //printf("node %d ordering %d\n",rank,myordering);
162 /* receive, reduce, and forward data */
165 if (myordering == 0) {
166 if (header_buf[myordering + 1] == -1) {
169 to = header_buf[myordering + 1];
171 smpi_mpi_send(rbuf, count, datatype, to, tag, comm);
174 /* recv, reduce, send */
176 if (header_buf[myordering + 1] == -1) {
179 to = header_buf[myordering + 1];
181 from = header_buf[myordering - 1];
182 smpi_mpi_recv(tmp_buf, count, datatype, from, tag, comm, &status);
183 smpi_op_apply(op, tmp_buf, rbuf, &count, &datatype);
184 smpi_mpi_send(rbuf, count, datatype, to, tag, comm);
190 // printf("node %d start\n",rank);
193 (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
195 (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
197 (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
199 (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
204 int will_send[MAX_NODE];
205 for (i = 0; i < MAX_NODE; i++)
208 /* loop until all data are received (sent) */
209 while (sent_count < (size - 1)) {
211 for (k = 0; k < 1; k++) {
212 for (i = 1; i < size; i++) {
215 if ((already_received[i] == 0) && (will_send[i] == 0)) {
216 smpi_mpi_iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
217 &temp_status_array[i]);
218 if (flag_array[i] == 1) {
220 smpi_mpi_recv(&temp_buf[i], 1, MPI_CHAR, i, tag, comm,
222 //printf("recv from %d\n",i);
227 } /* end of probing */
231 /* recv 1-byte message */
232 for (i = 1; i < size; i++) {
235 /* message arrived in this round (put in the header) */
236 if ((will_send[i] == 1) && (already_received[i] == 0)) {
237 header_buf[header_index] = i;
241 /* will send in the next step */
242 already_received[i] = 1;
246 /* send header followed by data */
247 if (header_index != 0) {
248 header_buf[header_index] = -1;
252 smpi_mpi_send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
254 /* recv data - pipeline */
255 from = header_buf[header_index - 1];
256 for (i = 0; i < pipe_length; i++) {
257 smpi_mpi_recv(tmp_buf + (i * increment), segment, datatype, from, tag,
259 smpi_op_apply(op, tmp_buf + (i * increment),
260 (char *)rbuf + (i * increment), &segment, &datatype);
263 } /* while loop (sent_count < size-1 ) */
269 /* send 1-byte message to root */
270 smpi_mpi_send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
273 /* wait for header forward when required */
274 request=smpi_mpi_irecv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm);
275 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
277 /* search for where it is */
280 while (rank != header_buf[myordering]) {
284 /* send header when required */
285 if (header_buf[myordering + 1] != -1) {
286 smpi_mpi_send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
290 /* (receive, reduce), and send data */
291 if (header_buf[myordering + 1] == -1) {
294 to = header_buf[myordering + 1];
298 if (myordering == 0) {
299 for (i = 0; i < pipe_length; i++) {
300 send_request_array[i]= smpi_mpi_isend((char *)rbuf + (i * increment), segment, datatype, to, tag, comm);
302 smpi_mpi_waitall((pipe_length), send_request_array, send_status_array);
305 /* receive, reduce, and send */
307 from = header_buf[myordering - 1];
308 for (i = 0; i < pipe_length; i++) {
309 recv_request_array[i]=smpi_mpi_irecv(tmp_buf + (i * increment), segment, datatype, from, tag, comm);
311 for (i = 0; i < pipe_length; i++) {
312 smpi_mpi_wait(&recv_request_array[i], MPI_STATUS_IGNORE);
313 smpi_op_apply(op, tmp_buf + (i * increment), (char *)rbuf + (i * increment),
314 &segment, &datatype);
315 send_request_array[i]=smpi_mpi_isend((char *)rbuf + (i * increment), segment, datatype, to, tag, comm);
317 smpi_mpi_waitall((pipe_length), send_request_array, send_status_array);
324 free(send_request_array);
325 free(recv_request_array);
326 free(send_status_array);
327 free(recv_status_array);
329 //printf("node %d done\n",rank);
333 /* if root is not zero send root after finished
334 this can be modified to make it faster by using logical src, dst.
338 smpi_mpi_send(rbuf, count, datatype, root, tag, comm);
339 } else if (rank == root) {
340 smpi_mpi_recv(rbuf, count, datatype, 0, tag, comm, &status);
345 /* when count is not divisible by block size, use default BCAST for the remainder */
346 if ((remainder != 0) && (count > segment)) {
347 smpi_mpi_reduce((char *)buf + (pipe_length * increment),
348 (char *)rbuf + (pipe_length * increment), remainder, datatype, op, root,
352 smpi_free_tmp_buffer(tmp_buf);