1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "colls_private.h"
9 static int bcast_NTSL_segment_size_in_byte = 8192;
11 #define HEADER_SIZE 1024
14 /* Non-topology-specific pipelined linear-bcast function */
15 int smpi_coll_tuned_bcast_arrival_pattern_aware(void *buf, int count,
16 MPI_Datatype datatype, int root,
19 int tag = -COLL_TAG_BCAST;
22 MPI_Request *send_request_array;
23 MPI_Request *recv_request_array;
24 MPI_Status *send_status_array;
25 MPI_Status *recv_status_array;
27 MPI_Status temp_status_array[MAX_NODE];
34 int flag_array[MAX_NODE];
35 int already_sent[MAX_NODE];
36 int to_clean[MAX_NODE];
37 int header_buf[HEADER_SIZE];
38 char temp_buf[MAX_NODE];
41 extent = smpi_datatype_get_extent(datatype);
48 rank = smpi_comm_rank(comm);
49 size = smpi_comm_size(comm);
52 /* segment is segment size in number of elements (not bytes) */
53 int segment = bcast_NTSL_segment_size_in_byte / extent;
56 int pipe_length = count / segment;
58 /* use for buffer offset for sending and receiving data = segment size in byte */
59 int increment = segment * extent;
61 /* if the input size is not divisible by segment size =>
62 the small remainder will be done with native implementation */
63 int remainder = count % segment;
65 /* if root is not zero send to rank zero first
66 this can be modified to make it faster by using logical src, dst.
70 smpi_mpi_send(buf, count, datatype, 0, tag, comm);
71 } else if (rank == 0) {
72 smpi_mpi_recv(buf, count, datatype, root, tag, comm, &status);
76 /* value == 0 means root has not send data (or header) to the node yet */
77 for (i = 0; i < MAX_NODE; i++) {
82 /* when a message is smaller than a block size => no pipeline */
83 if (count <= segment) {
87 while (sent_count < (size - 1)) {
88 for (i = 1; i < size; i++) {
89 smpi_mpi_iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
94 /* recv 1-byte message */
95 for (i = 1; i < size; i++) {
98 if ((flag_array[i] == 1) && (already_sent[i] == 0)) {
99 smpi_mpi_recv(temp_buf, 1, MPI_CHAR, i, tag, comm, &status);
100 header_buf[header_index] = i;
104 /* will send in the next step */
109 /* send header followed by data */
110 if (header_index != 0) {
111 header_buf[header_index] = -1;
113 smpi_mpi_send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
114 smpi_mpi_send(buf, count, datatype, to, tag, comm);
117 /* randomly MPI_Send to one */
119 /* search for the first node that never received data before */
120 for (i = 1; i < size; i++) {
121 if (already_sent[i] == 0) {
124 smpi_mpi_send(header_buf, HEADER_SIZE, MPI_INT, i, tag, comm);
125 smpi_mpi_send(buf, count, datatype, i, tag, comm);
140 /* send 1-byte message to root */
141 smpi_mpi_send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
143 /* wait for header and data, forward when required */
144 smpi_mpi_recv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm,
146 smpi_mpi_recv(buf, count, datatype, MPI_ANY_SOURCE, tag, comm, &status);
148 /* search for where it is */
150 while (rank != header_buf[myordering]) {
154 /* send header followed by data */
155 if (header_buf[myordering + 1] != -1) {
156 smpi_mpi_send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
158 smpi_mpi_send(buf, count, datatype, header_buf[myordering + 1], tag, comm);
165 (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
167 (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
169 (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
171 (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
174 //double start2 = MPI_Wtime();
177 while (sent_count < (size - 1)) {
179 //start = MPI_Wtime();
180 for (i = 1; i < size; i++) {
181 smpi_mpi_iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
182 &temp_status_array[i]);
184 //total = MPI_Wtime() - start;
186 //printf("Iprobe time = %.2f\n",total);
190 /* recv 1-byte message */
191 for (i = 1; i < size; i++) {
193 if ((flag_array[i] == 1) && (already_sent[i] == 0)) {
194 smpi_mpi_recv(&temp_buf[i], 1, MPI_CHAR, i, tag, comm,
196 header_buf[header_index] = i;
200 /* will send in the next step */
204 //total = MPI_Wtime() - start;
206 //printf("Recv 1-byte time = %.2f\n",total);
209 if (header_index != 0) {
210 printf("header index = %d node = ",header_index);
211 for (i=0;i<header_index;i++) {
212 printf("%d ",header_buf[i]);
218 /* send header followed by data */
219 if (header_index != 0) {
220 header_buf[header_index] = -1;
223 //start = MPI_Wtime();
226 smpi_mpi_send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
228 //total = MPI_Wtime() - start;
230 //printf("\tSend header to %d time = %.2f\n",to,total);
232 //start = MPI_Wtime();
234 /* send data - non-pipeline case */
237 //if (header_index == 1) {
238 smpi_mpi_send(buf, count, datatype, to, tag, comm);
242 /* send data - pipeline */
244 for (i = 0; i < pipe_length; i++) {
245 smpi_mpi_send((char *)buf + (i * increment), segment, datatype, to, tag, comm);
247 //smpi_mpi_waitall((pipe_length), send_request_array, send_status_array);
249 //total = MPI_Wtime() - start;
251 //printf("\tSend data to %d time = %.2f\n",to,total);
257 /* randomly MPI_Send to one node */
259 /* search for the first node that never received data before */
260 for (i = 1; i < size; i++) {
261 if (already_sent[i] == 0) {
266 //start = MPI_Wtime();
267 smpi_mpi_send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
269 /* still need to chop data so that we can use the same non-root code */
270 for (j = 0; j < pipe_length; j++) {
271 smpi_mpi_send((char *)buf + (j * increment), segment, datatype, to, tag,
275 //smpi_mpi_send(buf,count,datatype,to,tag,comm);
276 //smpi_mpi_wait(&request,MPI_STATUS_IGNORE);
278 //total = MPI_Wtime() - start;
280 //printf("SEND TO SINGLE node %d time = %.2f\n",i,total);
293 for(i=0; i<size; i++)
294 if(to_clean[i]!=0)smpi_mpi_recv(&temp_buf[i], 1, MPI_CHAR, i, tag, comm,
296 //total = MPI_Wtime() - start2;
298 //printf("Node zero iter = %d time = %.2f\n",iteration,total);
304 /* send 1-byte message to root */
305 smpi_mpi_send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
307 /* wait for header forward when required */
308 request = smpi_mpi_irecv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm);
309 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
311 /* search for where it is */
313 while (rank != header_buf[myordering]) {
317 /* send header when required */
318 if (header_buf[myordering + 1] != -1) {
319 smpi_mpi_send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
326 //if (header_buf[1] == -1) {
327 request = smpi_mpi_irecv(buf, count, datatype, 0, tag, comm);
328 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
329 //printf("\t\tnode %d ordering = %d receive data from root\n",rank,myordering);
331 for (i = 0; i < pipe_length; i++) {
332 recv_request_array[i] = smpi_mpi_irecv((char *)buf + (i * increment), segment, datatype, MPI_ANY_SOURCE,
338 if (header_buf[myordering + 1] != -1) {
339 for (i = 0; i < pipe_length; i++) {
340 smpi_mpi_wait(&recv_request_array[i], MPI_STATUS_IGNORE);
341 send_request_array[i] = smpi_mpi_isend((char *)buf + (i * increment), segment, datatype,
342 header_buf[myordering + 1], tag, comm);
344 smpi_mpi_waitall((pipe_length), send_request_array, send_status_array);
346 smpi_mpi_waitall(pipe_length, recv_request_array, recv_status_array);
351 free(send_request_array);
352 free(recv_request_array);
353 free(send_status_array);
354 free(recv_status_array);
357 /* when count is not divisible by block size, use default BCAST for the remainder */
358 if ((remainder != 0) && (count > segment)) {
359 XBT_WARN("MPI_bcast_arrival_pattern_aware use default MPI_bcast.");
360 smpi_mpi_bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm);