2 //#include <star-reduction.c>
4 int reduce_arrival_pattern_aware_segment_size_in_byte = 8192;
7 #define HEADER_SIZE 1024
14 /* Non-topology-specific pipelined linear-reduce function */
15 int smpi_coll_tuned_reduce_arrival_pattern_aware(void *buf, void *rbuf,
17 MPI_Datatype datatype,
22 MPI_Comm_rank(comm, &rank);
27 MPI_Request *send_request_array;
28 MPI_Request *recv_request_array;
29 MPI_Status *send_status_array;
30 MPI_Status *recv_status_array;
32 MPI_Status temp_status_array[MAX_NODE];
39 int flag_array[MAX_NODE];
40 int already_received[MAX_NODE];
42 int header_buf[HEADER_SIZE];
43 char temp_buf[MAX_NODE];
46 MPI_Type_extent(datatype, &extent);
48 /* source and destination */
51 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
52 MPI_Comm_size(MPI_COMM_WORLD, &size);
55 /* segment is segment size in number of elements (not bytes) */
56 int segment = reduce_arrival_pattern_aware_segment_size_in_byte / extent;
59 int pipe_length = count / segment;
61 /* use for buffer offset for sending and receiving data = segment size in byte */
62 int increment = segment * extent;
64 /* if the input size is not divisible by segment size =>
65 the small remainder will be done with native implementation */
66 int remainder = count % segment;
69 /* value == 0 means root has not send data (or header) to the node yet */
70 for (i = 0; i < MAX_NODE; i++) {
71 already_received[i] = 0;
75 tmp_buf = (char *) malloc(count * extent);
77 MPI_Sendrecv(buf, count, datatype, rank, tag, rbuf, count, datatype, rank,
82 /* when a message is smaller than a block size => no pipeline */
83 if (count <= segment) {
88 while (sent_count < (size - 1)) {
90 for (i = 1; i < size; i++) {
91 if (already_received[i] == 0)
92 MPI_Iprobe(i, MPI_ANY_TAG, MPI_COMM_WORLD, &flag_array[i],
97 /* recv 1-byte message */
98 for (i = 0; i < size; i++) {
102 /* 1-byte message arrive */
103 if ((flag_array[i] == 1) && (already_received[i] == 0)) {
104 MPI_Recv(temp_buf, 1, MPI_CHAR, i, tag, MPI_COMM_WORLD, &status);
105 header_buf[header_index] = i;
110 //printf("root send to %d recv from %d : data = ",to,from);
112 for (i=0;i<=header_index;i++) {
113 printf("%d ",header_buf[i]);
117 /* will receive in the next step */
118 already_received[i] = 1;
122 /* send header followed by receive and reduce data */
123 if (header_index != 0) {
124 header_buf[header_index] = -1;
126 from = header_buf[header_index - 1];
128 MPI_Send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
129 MPI_Recv(tmp_buf, count, datatype, from, tag, comm, &status);
130 star_reduction(op, tmp_buf, rbuf, &count, &datatype);
139 /* send 1-byte message to root */
140 MPI_Send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
142 /* wait for header and data, forward when required */
143 MPI_Recv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm,
145 // MPI_Recv(buf,count,datatype,MPI_ANY_SOURCE,tag,comm,&status);
147 /* search for where it is */
149 while (rank != header_buf[myordering]) {
154 if (header_buf[myordering + 1] != -1) {
155 MPI_Send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
158 //printf("node %d ordering %d\n",rank,myordering);
160 /* receive, reduce, and forward data */
163 if (myordering == 0) {
164 if (header_buf[myordering + 1] == -1) {
167 to = header_buf[myordering + 1];
169 MPI_Send(rbuf, count, datatype, to, tag, comm);
172 /* recv, reduce, send */
174 if (header_buf[myordering + 1] == -1) {
177 to = header_buf[myordering + 1];
179 from = header_buf[myordering - 1];
180 MPI_Recv(tmp_buf, count, datatype, header_buf[myordering - 1], tag,
182 star_reduction(op, tmp_buf, rbuf, &count, &datatype);
183 MPI_Send(rbuf, count, datatype, to, tag, comm);
189 // printf("node %d start\n",rank);
192 (MPI_Request *) malloc((size + pipe_length) * sizeof(MPI_Request));
194 (MPI_Request *) malloc((size + pipe_length) * sizeof(MPI_Request));
196 (MPI_Status *) malloc((size + pipe_length) * sizeof(MPI_Status));
198 (MPI_Status *) malloc((size + pipe_length) * sizeof(MPI_Status));
203 int will_send[MAX_NODE];
204 for (i = 0; i < MAX_NODE; i++)
207 /* loop until all data are received (sent) */
208 while (sent_count < (size - 1)) {
210 for (k = 0; k < 1; k++) {
211 for (i = 1; i < size; i++) {
214 if ((already_received[i] == 0) && (will_send[i] == 0)) {
215 MPI_Iprobe(i, MPI_ANY_TAG, MPI_COMM_WORLD, &flag_array[i],
216 &temp_status_array[i]);
217 if (flag_array[i] == 1) {
219 MPI_Recv(&temp_buf[i], 1, MPI_CHAR, i, tag, MPI_COMM_WORLD,
221 //printf("recv from %d\n",i);
226 } /* end of probing */
230 /* recv 1-byte message */
231 for (i = 1; i < size; i++) {
234 /* message arrived in this round (put in the header) */
235 if ((will_send[i] == 1) && (already_received[i] == 0)) {
236 header_buf[header_index] = i;
240 /* will send in the next step */
241 already_received[i] = 1;
245 /* send header followed by data */
246 if (header_index != 0) {
247 header_buf[header_index] = -1;
251 MPI_Send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
253 /* recv data - pipeline */
254 from = header_buf[header_index - 1];
255 for (i = 0; i < pipe_length; i++) {
256 MPI_Recv(tmp_buf + (i * increment), segment, datatype, from, tag,
258 star_reduction(op, tmp_buf + (i * increment),
259 (char *)rbuf + (i * increment), &segment, &datatype);
262 } /* while loop (sent_count < size-1 ) */
268 /* send 1-byte message to root */
269 MPI_Send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
272 /* wait for header forward when required */
273 MPI_Irecv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm,
275 MPI_Wait(&request, MPI_STATUS_IGNORE);
277 /* search for where it is */
280 while (rank != header_buf[myordering]) {
284 /* send header when required */
285 if (header_buf[myordering + 1] != -1) {
286 MPI_Send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
290 /* (receive, reduce), and send data */
291 if (header_buf[myordering + 1] == -1) {
294 to = header_buf[myordering + 1];
298 if (myordering == 0) {
299 for (i = 0; i < pipe_length; i++) {
300 MPI_Isend((char *)rbuf + (i * increment), segment, datatype, to, tag, comm,
301 &send_request_array[i]);
303 MPI_Waitall((pipe_length), send_request_array, send_status_array);
306 /* receive, reduce, and send */
308 from = header_buf[myordering - 1];
309 for (i = 0; i < pipe_length; i++) {
310 MPI_Irecv(tmp_buf + (i * increment), segment, datatype, from, tag,
311 comm, &recv_request_array[i]);
313 for (i = 0; i < pipe_length; i++) {
314 MPI_Wait(&recv_request_array[i], MPI_STATUS_IGNORE);
315 star_reduction(op, tmp_buf + (i * increment), (char *)rbuf + (i * increment),
316 &segment, &datatype);
317 MPI_Isend((char *)rbuf + (i * increment), segment, datatype, to, tag, comm,
318 &send_request_array[i]);
320 MPI_Waitall((pipe_length), send_request_array, send_status_array);
327 free(send_request_array);
328 free(recv_request_array);
329 free(send_status_array);
330 free(recv_status_array);
332 //printf("node %d done\n",rank);
336 /* if root is not zero send root after finished
337 this can be modified to make it faster by using logical src, dst.
341 MPI_Send(rbuf, count, datatype, root, tag, comm);
342 } else if (rank == root) {
343 MPI_Recv(rbuf, count, datatype, 0, tag, comm, &status);
348 /* when count is not divisible by block size, use default BCAST for the remainder */
349 if ((remainder != 0) && (count > segment)) {
350 MPI_Reduce((char *)buf + (pipe_length * increment),
351 (char *)rbuf + (pipe_length * increment), remainder, datatype, op, root,