2 //#include <star-reduction.c>
4 int reduce_NTSL_segment_size_in_byte = 8192;
6 /* Non-topology-specific pipelined linear-bcast function
7 0->1, 1->2 ,2->3, ....., ->last node : in a pipeline fashion
9 int smpi_coll_tuned_reduce_NTSL(void *buf, void *rbuf, int count,
10 MPI_Datatype datatype, MPI_Op op, int root,
15 MPI_Request *send_request_array;
16 MPI_Request *recv_request_array;
17 MPI_Status *send_status_array;
18 MPI_Status *recv_status_array;
22 MPI_Type_extent(datatype, &extent);
24 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
25 MPI_Comm_size(MPI_COMM_WORLD, &size);
27 /* source node and destination nodes (same through out the functions) */
28 int to = (rank - 1 + size) % size;
29 int from = (rank + 1) % size;
31 /* segment is segment size in number of elements (not bytes) */
32 int segment = reduce_NTSL_segment_size_in_byte / extent;
35 int pipe_length = count / segment;
37 /* use for buffer offset for sending and receiving data = segment size in byte */
38 int increment = segment * extent;
40 /* if the input size is not divisible by segment size =>
41 the small remainder will be done with native implementation */
42 int remainder = count % segment;
44 /* if root is not zero send to rank zero first
45 this can be modified to make it faster by using logical src, dst.
51 MPI_Send(buf,count,datatype,0,tag,comm);
54 MPI_Recv(buf,count,datatype,root,tag,comm,&status);
60 tmp_buf = (char *) malloc(count * extent);
62 MPI_Sendrecv(buf, count, datatype, rank, tag, rbuf, count, datatype, rank,
65 /* when a message is smaller than a block size => no pipeline */
66 if (count <= segment) {
68 MPI_Recv(tmp_buf, count, datatype, from, tag, comm, &status);
69 star_reduction(op, tmp_buf, rbuf, &count, &datatype);
70 } else if (rank == ((root - 1 + size) % size)) {
71 MPI_Send(rbuf, count, datatype, to, tag, comm);
73 MPI_Recv(tmp_buf, count, datatype, from, tag, comm, &status);
74 star_reduction(op, tmp_buf, rbuf, &count, &datatype);
75 MPI_Send(rbuf, count, datatype, to, tag, comm);
84 (MPI_Request *) malloc((size + pipe_length) * sizeof(MPI_Request));
86 (MPI_Request *) malloc((size + pipe_length) * sizeof(MPI_Request));
88 (MPI_Status *) malloc((size + pipe_length) * sizeof(MPI_Status));
90 (MPI_Status *) malloc((size + pipe_length) * sizeof(MPI_Status));
94 for (i = 0; i < pipe_length; i++) {
95 MPI_Irecv((char *) tmp_buf + (i * increment), segment, datatype, from,
96 (tag + i), comm, &recv_request_array[i]);
98 for (i = 0; i < pipe_length; i++) {
99 MPI_Wait(&recv_request_array[i], &status);
100 star_reduction(op, tmp_buf + (i * increment), (char *)rbuf + (i * increment),
101 &segment, &datatype);
105 /* last node only sends data */
106 else if (rank == ((root - 1 + size) % size)) {
107 for (i = 0; i < pipe_length; i++) {
108 MPI_Isend((char *)rbuf + (i * increment), segment, datatype, to, (tag + i),
109 comm, &send_request_array[i]);
111 MPI_Waitall((pipe_length), send_request_array, send_status_array);
114 /* intermediate nodes relay (receive, reduce, then send) data */
116 for (i = 0; i < pipe_length; i++) {
117 MPI_Irecv((char *) tmp_buf + (i * increment), segment, datatype, from,
118 (tag + i), comm, &recv_request_array[i]);
120 for (i = 0; i < pipe_length; i++) {
121 MPI_Wait(&recv_request_array[i], &status);
122 star_reduction(op, tmp_buf + (i * increment), (char *)rbuf + (i * increment),
123 &segment, &datatype);
124 MPI_Isend((char *) rbuf + (i * increment), segment, datatype, to,
125 (tag + i), comm, &send_request_array[i]);
127 MPI_Waitall((pipe_length), send_request_array, send_status_array);
130 free(send_request_array);
131 free(recv_request_array);
132 free(send_status_array);
133 free(recv_status_array);
136 /* when count is not divisible by block size, use default BCAST for the remainder */
137 if ((remainder != 0) && (count > segment)) {
138 MPI_Reduce((char *)buf + (pipe_length * increment),
139 (char *)rbuf + (pipe_length * increment), remainder, datatype, op, root,