3 int smpi_coll_tuned_allreduce_rab_rdb(void *sbuff, void *rbuff, int count,
4 MPI_Datatype dtype, MPI_Op op,
7 int nprocs, rank, type_size, tag = 543;
8 int mask, dst, pof2, newrank, rem, newdst, i,
9 send_idx, recv_idx, last_idx, send_cnt, recv_cnt, *cnts, *disps;
14 #ifdef MPICH2_REDUCTION
15 MPI_User_function *uop = MPIR_Op_table[op % 16 - 1];
17 MPI_User_function *uop;
18 struct MPIR_OP *op_ptr;
19 op_ptr = (MPI_User_function *) MPIR_ToPointer(op);
23 MPI_Comm_size(comm, &nprocs);
24 MPI_Comm_rank(comm, &rank);
26 MPI_Type_extent(dtype, &extent);
27 tmp_buf = (void *) malloc(count * extent);
29 printf("Could not allocate memory for tmp_buf\n");
33 MPIR_Localcopy(sbuff, count, dtype, rbuff, count, dtype);
35 MPI_Type_size(dtype, &type_size);
37 // find nearest power-of-two less than or equal to comm_size
39 while (pof2 <= nprocs)
45 // In the non-power-of-two case, all even-numbered
46 // processes of rank < 2*rem send their data to
47 // (rank+1). These even-numbered processes no longer
48 // participate in the algorithm until the very end. The
49 // remaining processes form a nice power-of-two.
55 MPI_Send(rbuff, count, dtype, rank + 1, tag, comm);
57 // temporarily set the rank to -1 so that this
58 // process does not pariticipate in recursive
63 MPI_Recv(tmp_buf, count, dtype, rank - 1, tag, comm, &status);
64 // do the reduction on received data. since the
65 // ordering is right, it doesn't matter whether
66 // the operation is commutative or not.
67 (*uop) (tmp_buf, rbuff, &count, &dtype);
74 else // rank >= 2 * rem
77 // If op is user-defined or count is less than pof2, use
78 // recursive doubling algorithm. Otherwise do a reduce-scatter
79 // followed by allgather. (If op is user-defined,
80 // derived datatypes are allowed and the user could pass basic
81 // datatypes on one process and derived on another as long as
82 // the type maps are the same. Breaking up derived
83 // datatypes to do the reduce-scatter is tricky, therefore
84 // using recursive doubling in that case.)
87 // do a reduce-scatter followed by allgather. for the
88 // reduce-scatter, calculate the count that each process receives
89 // and the displacement within the buffer
91 cnts = (int *) malloc(pof2 * sizeof(int));
92 disps = (int *) malloc(pof2 * sizeof(int));
94 for (i = 0; i < (pof2 - 1); i++)
95 cnts[i] = count / pof2;
96 cnts[pof2 - 1] = count - (count / pof2) * (pof2 - 1);
99 for (i = 1; i < pof2; i++)
100 disps[i] = disps[i - 1] + cnts[i - 1];
103 send_idx = recv_idx = 0;
105 while (mask < pof2) {
106 newdst = newrank ^ mask;
107 // find real rank of dest
108 dst = (newdst < rem) ? newdst * 2 + 1 : newdst + rem;
110 send_cnt = recv_cnt = 0;
111 if (newrank < newdst) {
112 send_idx = recv_idx + pof2 / (mask * 2);
113 for (i = send_idx; i < last_idx; i++)
115 for (i = recv_idx; i < send_idx; i++)
118 recv_idx = send_idx + pof2 / (mask * 2);
119 for (i = send_idx; i < recv_idx; i++)
121 for (i = recv_idx; i < last_idx; i++)
125 // Send data from recvbuf. Recv into tmp_buf
126 MPI_Sendrecv((char *) rbuff + disps[send_idx] * extent, send_cnt,
128 (char *) tmp_buf + disps[recv_idx] * extent, recv_cnt,
129 dtype, dst, tag, comm, &status);
131 // tmp_buf contains data received in this step.
132 // recvbuf contains data accumulated so far
134 // This algorithm is used only for predefined ops
135 // and predefined ops are always commutative.
136 (*uop) ((char *) tmp_buf + disps[recv_idx] * extent,
137 (char *) rbuff + disps[recv_idx] * extent, &recv_cnt, &dtype);
139 // update send_idx for next iteration
143 // update last_idx, but not in last iteration because the value
144 // is needed in the allgather step below.
146 last_idx = recv_idx + pof2 / mask;
149 // now do the allgather
153 newdst = newrank ^ mask;
154 // find real rank of dest
155 dst = (newdst < rem) ? newdst * 2 + 1 : newdst + rem;
157 send_cnt = recv_cnt = 0;
158 if (newrank < newdst) {
159 // update last_idx except on first iteration
160 if (mask != pof2 / 2)
161 last_idx = last_idx + pof2 / (mask * 2);
163 recv_idx = send_idx + pof2 / (mask * 2);
164 for (i = send_idx; i < recv_idx; i++)
166 for (i = recv_idx; i < last_idx; i++)
169 recv_idx = send_idx - pof2 / (mask * 2);
170 for (i = send_idx; i < last_idx; i++)
172 for (i = recv_idx; i < send_idx; i++)
176 MPI_Sendrecv((char *) rbuff + disps[send_idx] * extent, send_cnt,
178 (char *) rbuff + disps[recv_idx] * extent, recv_cnt,
179 dtype, dst, tag, comm, &status);
181 if (newrank > newdst)
191 // In the non-power-of-two case, all odd-numbered processes of
192 // rank < 2 * rem send the result to (rank-1), the ranks who didn't
193 // participate above.
195 if (rank < 2 * rem) {
197 MPI_Send(rbuff, count, dtype, rank - 1, tag, comm);
199 MPI_Recv(rbuff, count, dtype, rank + 1, tag, comm, &status);