1 /* Copyright (c) 2013-2022. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
8 * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
9 * University Research and Technology
10 * Corporation. All rights reserved.
11 * Copyright (c) 2004-2009 The University of Tennessee and The University
12 * of Tennessee Research Foundation. All rights
14 * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
15 * University of Stuttgart. All rights reserved.
16 * Copyright (c) 2004-2005 The Regents of the University of California.
17 * All rights reserved.
18 * Copyright (c) 2009 University of Houston. All rights reserved.
20 * Additional copyrights may follow
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are
26 * - Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer listed
31 * in this license in the documentation and/or other materials
32 * provided with the distribution.
34 * - Neither the name of the copyright holders nor the names of its
35 * contributors may be used to endorse or promote products derived from
36 * this software without specific prior written permission.
38 * The copyright holders provide no reassurances that the source code
39 * provided does not infringe any patent, copyright, or any other
40 * intellectual property rights of third parties. The copyright holders
41 * disclaim any liability to any recipient for claims brought against
42 * recipient by any third party for infringement of that parties
43 * intellectual property rights.
45 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 * ompi_coll_tuned_allreduce_intra_ring_segmented
61 * Function: Pipelined ring algorithm for allreduce operation
62 * Accepts: Same as MPI_Allreduce(), segment size
63 * Returns: MPI_SUCCESS or error code
65 * Description: Implements pipelined ring algorithm for allreduce:
66 * user supplies suggested segment size for the pipelining of
68 * The segment size determines the number of phases, np, for
69 * the algorithm execution.
70 * The message is automatically divided into blocks of
71 * approximately (count / (np * segcount)) elements.
72 * At the end of reduction phase, allgather like step is
74 * Algorithm requires (np + 1)*(N - 1) steps.
76 * Limitations: The algorithm DOES NOT preserve order of operations so it
77 * can be used only for commutative operations.
78 * In addition, algorithm cannot work if the total size is
79 * less than size * segment size.
80 * Example on 3 nodes with 2 phases
90 * COMPUTATION PHASE 0 (a)
91 * Step 0: rank r sends block ra to rank (r+1) and receives block (r-1)a
92 * from rank (r-1) [with wraparound].
94 * [00a] [00a+10a] [20a]
96 * [01a] [11a] [11a+21a]
98 * [22a+02a] [12a] [22a]
101 * Step 1: rank r sends block (r-1)a to rank (r+1) and receives block
102 * (r-2)a from rank (r-1) [with wraparound].
104 * [00a] [00a+10a] [00a+10a+20a]
106 * [11a+21a+01a] [11a] [11a+21a]
108 * [22a+02a] [22a+02a+12a] [22a]
111 * COMPUTATION PHASE 1 (b)
112 * Step 0: rank r sends block rb to rank (r+1) and receives block (r-1)b
113 * from rank (r-1) [with wraparound].
115 * [00a] [00a+10a] [20a]
116 * [00b] [00b+10b] [20b]
117 * [01a] [11a] [11a+21a]
118 * [01b] [11b] [11b+21b]
119 * [22a+02a] [12a] [22a]
120 * [22b+02b] [12b] [22b]
122 * Step 1: rank r sends block (r-1)b to rank (r+1) and receives block
123 * (r-2)b from rank (r-1) [with wraparound].
125 * [00a] [00a+10a] [00a+10a+20a]
126 * [00b] [10b] [0bb+10b+20b]
127 * [11a+21a+01a] [11a] [11a+21a]
128 * [11b+21b+01b] [11b] [21b]
129 * [22a+02a] [22a+02a+12a] [22a]
130 * [02b] [22b+01b+12b] [22b]
133 * DISTRIBUTION PHASE: ring ALLGATHER with ranks shifted by 1 (same as
134 * in regular ring algorithm.
138 #define COLL_TUNED_COMPUTED_SEGCOUNT(SEGSIZE, TYPELNG, SEGCOUNT) \
139 if( ((SEGSIZE) >= (TYPELNG)) && \
140 ((SEGSIZE) < ((TYPELNG) * (SEGCOUNT))) ) { \
142 (SEGCOUNT) = (int)((SEGSIZE) / (TYPELNG)); \
143 residual = (SEGSIZE) - (SEGCOUNT) * (TYPELNG); \
144 if( residual > ((TYPELNG) >> 1) ) \
148 #define COLL_TUNED_COMPUTE_BLOCKCOUNT( COUNT, NUM_BLOCKS, SPLIT_INDEX, \
149 EARLY_BLOCK_COUNT, LATE_BLOCK_COUNT ) \
150 EARLY_BLOCK_COUNT = LATE_BLOCK_COUNT = COUNT / NUM_BLOCKS; \
151 SPLIT_INDEX = COUNT % NUM_BLOCKS; \
152 if (0 != SPLIT_INDEX) { \
153 EARLY_BLOCK_COUNT = EARLY_BLOCK_COUNT + 1; \
156 #include "../colls_private.hpp"
158 namespace simgrid::smpi {
159 int allreduce__ompi_ring_segmented(const void *sbuf, void *rbuf, int count,
164 int ret = MPI_SUCCESS;
166 int k, recv_from, send_to;
167 int early_blockcount, late_blockcount, split_rank;
168 int segcount, max_segcount;
169 int num_phases, phase;
173 char *tmpsend = nullptr, *tmprecv = nullptr;
174 unsigned char* inbuf[2] = {nullptr, nullptr};
175 ptrdiff_t true_extent, extent;
176 ptrdiff_t block_offset, max_real_segsize;
177 MPI_Request reqs[2] = {nullptr, nullptr};
178 const size_t segsize = 1 << 20; /* 1 MB */
179 int size = comm->size();
180 int rank = comm->rank();
182 XBT_DEBUG("coll:tuned:allreduce_intra_ring_segmented rank %d, count %d", rank, count);
184 /* Special case for size == 1 */
186 if (MPI_IN_PLACE != sbuf) {
187 ret= Datatype::copy(sbuf, count, dtype,rbuf, count, dtype);
188 if (ret < 0) { line = __LINE__; goto error_hndl; }
193 /* Determine segment count based on the suggested segment size */
194 extent = dtype->get_extent();
195 if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
196 true_extent = dtype->get_extent();
197 if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
198 typelng = dtype->size();
199 if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
201 COLL_TUNED_COMPUTED_SEGCOUNT(segsize, typelng, segcount)
203 /* Special case for count less than size * segcount - use regular ring */
204 if (count < size * segcount) {
205 XBT_DEBUG( "coll:tuned:allreduce_ring_segmented rank %d/%d, count %d, switching to regular ring", rank, size, count);
206 return (allreduce__lr(sbuf, rbuf, count, dtype, op, comm));
209 /* Determine the number of phases of the algorithm */
210 num_phases = count / (size * segcount);
211 if ((count % (size * segcount) >= size) &&
212 (count % (size * segcount) > ((size * segcount) / 2))) {
216 /* Determine the number of elements per block and corresponding
218 The blocks are divided into "early" and "late" ones:
219 blocks 0 .. (split_rank - 1) are "early" and
220 blocks (split_rank) .. (size - 1) are "late".
221 Early blocks are at most 1 element larger than the late ones.
222 Note, these blocks will be split into num_phases segments,
223 out of the largest one will have max_segcount elements.
225 COLL_TUNED_COMPUTE_BLOCKCOUNT( count, size, split_rank,
226 early_blockcount, late_blockcount )
227 COLL_TUNED_COMPUTE_BLOCKCOUNT( early_blockcount, num_phases, inbi,
229 max_real_segsize = true_extent + (max_segcount - 1) * extent;
231 /* Allocate and initialize temporary buffers */
232 inbuf[0] = smpi_get_tmp_sendbuffer(max_real_segsize);
233 if (nullptr == inbuf[0]) {
239 inbuf[1] = smpi_get_tmp_recvbuffer(max_real_segsize);
240 if (nullptr == inbuf[1]) {
247 /* Handle MPI_IN_PLACE */
248 if (MPI_IN_PLACE != sbuf) {
249 ret= Datatype::copy(sbuf, count, dtype,rbuf, count, dtype);
250 if (ret < 0) { line = __LINE__; goto error_hndl; }
253 /* Computation loop: for each phase, repeat ring allreduce computation loop */
254 for (phase = 0; phase < num_phases; phase ++) {
255 ptrdiff_t phase_offset;
256 int early_phase_segcount, late_phase_segcount, split_phase, phase_count;
259 For each of the remote nodes:
260 - post irecv for block (r-1)
262 To do this, first compute block offset and count, and use block offset
263 to compute phase offset.
264 - in loop for every step k = 2 .. n
265 - post irecv for block (r + n - k) % n
266 - wait on block (r + n - k + 1) % n to arrive
267 - compute on block (r + n - k + 1) % n
268 - send block (r + n - k + 1) % n
269 - wait on block (r + 1)
270 - compute on block (r + 1)
271 - send block (r + 1) to rank (r + 1)
272 Note that we must be careful when computing the beginning of buffers and
273 for send operations and computation we must compute the exact block size.
275 send_to = (rank + 1) % size;
276 recv_from = (rank + size - 1) % size;
279 /* Initialize first receive from the neighbor on the left */
280 reqs[inbi] = Request::irecv(inbuf[inbi], max_segcount, dtype, recv_from,
282 /* Send first block (my block) to the neighbor on the right:
283 - compute my block and phase offset
285 block_offset = ((rank < split_rank)?
286 (rank * early_blockcount) :
287 (rank * late_blockcount + split_rank));
288 block_count = ((rank < split_rank)? early_blockcount : late_blockcount);
289 COLL_TUNED_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
290 early_phase_segcount, late_phase_segcount)
291 phase_count = ((phase < split_phase)?
292 (early_phase_segcount) : (late_phase_segcount));
293 phase_offset = ((phase < split_phase)?
294 (phase * early_phase_segcount) :
295 (phase * late_phase_segcount + split_phase));
296 tmpsend = ((char*)rbuf) + (block_offset + phase_offset) * extent;
297 Request::send(tmpsend, phase_count, dtype, send_to,
300 for (k = 2; k < size; k++) {
301 const int prevblock = (rank + size - k + 1) % size;
305 /* Post irecv for the current block */
306 reqs[inbi] = Request::irecv(inbuf[inbi], max_segcount, dtype, recv_from,
308 if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
310 /* Wait on previous block to arrive */
311 Request::wait(&reqs[inbi ^ 0x1], MPI_STATUS_IGNORE);
313 /* Apply operation on previous block: result goes to rbuf
314 rbuf[prevblock] = inbuf[inbi ^ 0x1] (op) rbuf[prevblock]
316 block_offset = ((prevblock < split_rank)?
317 (prevblock * early_blockcount) :
318 (prevblock * late_blockcount + split_rank));
319 block_count = ((prevblock < split_rank)?
320 early_blockcount : late_blockcount);
321 COLL_TUNED_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
322 early_phase_segcount, late_phase_segcount)
323 phase_count = ((phase < split_phase)?
324 (early_phase_segcount) : (late_phase_segcount));
325 phase_offset = ((phase < split_phase)?
326 (phase * early_phase_segcount) :
327 (phase * late_phase_segcount + split_phase));
328 tmprecv = ((char*)rbuf) + (block_offset + phase_offset) * extent;
329 if(op!=MPI_OP_NULL) op->apply( inbuf[inbi ^ 0x1], tmprecv, &phase_count, dtype);
330 /* send previous block to send_to */
331 Request::send(tmprecv, phase_count, dtype, send_to,
335 /* Wait on the last block to arrive */
336 Request::wait(&reqs[inbi], MPI_STATUS_IGNORE);
339 /* Apply operation on the last block (from neighbor (rank + 1)
340 rbuf[rank+1] = inbuf[inbi] (op) rbuf[rank + 1] */
341 recv_from = (rank + 1) % size;
342 block_offset = ((recv_from < split_rank)?
343 (recv_from * early_blockcount) :
344 (recv_from * late_blockcount + split_rank));
345 block_count = ((recv_from < split_rank)?
346 early_blockcount : late_blockcount);
347 COLL_TUNED_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
348 early_phase_segcount, late_phase_segcount)
349 phase_count = ((phase < split_phase)?
350 (early_phase_segcount) : (late_phase_segcount));
351 phase_offset = ((phase < split_phase)?
352 (phase * early_phase_segcount) :
353 (phase * late_phase_segcount + split_phase));
354 tmprecv = ((char*)rbuf) + (block_offset + phase_offset) * extent;
355 if(op!=MPI_OP_NULL) op->apply( inbuf[inbi], tmprecv, &phase_count, dtype);
358 /* Distribution loop - variation of ring allgather */
359 send_to = (rank + 1) % size;
360 recv_from = (rank + size - 1) % size;
361 for (k = 0; k < size - 1; k++) {
362 const int recv_data_from = (rank + size - k) % size;
363 const int send_data_from = (rank + 1 + size - k) % size;
364 const int send_block_offset =
365 ((send_data_from < split_rank)?
366 (send_data_from * early_blockcount) :
367 (send_data_from * late_blockcount + split_rank));
368 const int recv_block_offset =
369 ((recv_data_from < split_rank)?
370 (recv_data_from * early_blockcount) :
371 (recv_data_from * late_blockcount + split_rank));
372 block_count = ((send_data_from < split_rank)?
373 early_blockcount : late_blockcount);
375 tmprecv = (char*)rbuf + recv_block_offset * extent;
376 tmpsend = (char*)rbuf + send_block_offset * extent;
378 Request::sendrecv(tmpsend, block_count, dtype, send_to,
380 tmprecv, early_blockcount, dtype, recv_from,
382 comm, MPI_STATUS_IGNORE);
386 smpi_free_tmp_buffer(inbuf[0]);
387 smpi_free_tmp_buffer(inbuf[1]);
392 XBT_DEBUG("%s:%4d\tRank %d Error occurred %d\n",
393 __FILE__, line, rank, ret);
394 smpi_free_tmp_buffer(inbuf[0]);
395 smpi_free_tmp_buffer(inbuf[1]);
398 } // namespace simgrid::smpi