1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
8 * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
9 * University Research and Technology
10 * Corporation. All rights reserved.
11 * Copyright (c) 2004-2009 The University of Tennessee and The University
12 * of Tennessee Research Foundation. All rights
14 * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
15 * University of Stuttgart. All rights reserved.
16 * Copyright (c) 2004-2005 The Regents of the University of California.
17 * All rights reserved.
19 * Additional copyrights may follow
22 #include "colls_private.h"
23 #include "coll_tuned_topo.h"
25 /* Todo: gather_intra_generic, gather_intra_binary, gather_intra_chain,
26 * gather_intra_pipeline, segmentation? */
28 smpi_coll_tuned_gather_ompi_binomial(void *sbuf, int scount,
30 void *rbuf, int rcount,
44 ompi_coll_tree_t* bmtree;
46 MPI_Aint sextent, slb, strue_lb, strue_extent;
47 MPI_Aint rextent, rlb, rtrue_lb, rtrue_extent;
50 size = smpi_comm_size(comm);
51 rank = smpi_comm_rank(comm);
54 "smpi_coll_tuned_gather_ompi_binomial rank %d", rank);
56 /* create the binomial tree */
57 // COLL_TUNED_UPDATE_IN_ORDER_BMTREE( comm, tuned_module, root );
58 bmtree = ompi_coll_tuned_topo_build_in_order_bmtree(comm, root);
59 // data->cached_in_order_bmtree;
61 smpi_datatype_extent(sdtype, &slb, &sextent);
62 smpi_datatype_extent(sdtype, &strue_lb, &strue_extent);
64 vrank = (rank - root + size) % size;
67 smpi_datatype_extent(rdtype, &rlb, &rextent);
68 smpi_datatype_extent(rdtype, &rtrue_lb, &rtrue_extent);
70 /* root on 0, just use the recv buffer */
72 if (sbuf != MPI_IN_PLACE) {
73 err = smpi_datatype_copy(sbuf, scount, sdtype,
74 ptmp, rcount, rdtype);
75 if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
78 /* root is not on 0, allocate temp buffer for recv,
79 * rotate data at the end */
80 tempbuf = (char *) malloc(rtrue_extent + (rcount*size - 1) * rextent);
81 if (NULL == tempbuf) {
82 err= MPI_ERR_OTHER; line = __LINE__; goto err_hndl;
86 if (sbuf != MPI_IN_PLACE) {
87 /* copy from sbuf to temp buffer */
88 err = smpi_datatype_copy(sbuf, scount, sdtype,
89 ptmp, rcount, rdtype);
90 if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
92 /* copy from rbuf to temp buffer */
93 err = smpi_datatype_copy((char *) rbuf + rank*rextent*rcount, rcount, rdtype, ptmp, rcount, rdtype );
94 if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
98 } else if (!(vrank % 2)) {
99 /* other non-leaf nodes, allocate temp buffer for data received from
100 * children, the most we need is half of the total data elements due
101 * to the property of binimoal tree */
102 tempbuf = (char *) malloc(strue_extent + (scount*size - 1) * sextent);
103 if (NULL == tempbuf) {
104 err= MPI_ERR_OTHER; line = __LINE__; goto err_hndl;
107 ptmp = tempbuf - slb;
108 /* local copy to tempbuf */
109 err = smpi_datatype_copy(sbuf, scount, sdtype,
110 ptmp, scount, sdtype);
111 if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
113 /* use sdtype,scount as rdtype,rdcount since they are ignored on
120 /* leaf nodes, no temp buffer needed, use sdtype,scount as
121 * rdtype,rdcount since they are ignored on non-root procs */
122 ptmp = (char *) sbuf;
127 /* all non-leaf nodes recv from children */
128 for (i = 0; i < bmtree->tree_nextsize; i++) {
129 int mycount = 0, vkid;
130 /* figure out how much data I have to send to this child */
131 vkid = (bmtree->tree_next[i] - root + size) % size;
132 mycount = vkid - vrank;
133 if (mycount > (size - vkid))
134 mycount = size - vkid;
138 "smpi_coll_tuned_gather_ompi_binomial rank %d recv %d mycount = %d",
139 rank, bmtree->tree_next[i], mycount);
141 smpi_mpi_recv(ptmp + total_recv*rextent, mycount, rdtype,
142 bmtree->tree_next[i], COLL_TAG_GATHER,
145 total_recv += mycount;
150 /* all nodes except root send to parents */
152 "smpi_coll_tuned_gather_ompi_binomial rank %d send %d count %d\n",
153 rank, bmtree->tree_prev, total_recv);
155 smpi_mpi_send(ptmp, total_recv, sdtype,
162 /* rotate received data on root if root != 0 */
163 err = smpi_datatype_copy(ptmp, rcount*(size - root), rdtype,
164 (char *) rbuf + rextent*root*rcount, rcount*(size - root), rdtype );
165 if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
168 err = smpi_datatype_copy( ptmp + rextent*rcount*(size-root), rcount*root,rdtype,
169 (char *) rbuf,rcount*root,rdtype);
170 if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
174 } else if (!(vrank % 2)) {
175 /* other non-leaf nodes */
185 XBT_DEBUG( "%s:%4d\tError occurred %d, rank %2d",
186 __FILE__, line, err, rank);
191 * gather_intra_linear_sync
193 * Function: - synchronized gather operation with
194 * Accepts: - same arguments as MPI_Gather(), first segment size
195 * Returns: - MPI_SUCCESS or error code
198 smpi_coll_tuned_gather_ompi_linear_sync(void *sbuf, int scount,
200 void *rbuf, int rcount,
208 int first_segment_count;
213 int first_segment_size=0;
214 size = smpi_comm_size(comm);
215 rank = smpi_comm_rank(comm);
217 size_t dsize, block_size;
219 dsize= smpi_datatype_size(rdtype);
220 block_size = dsize * rcount;
222 dsize=smpi_datatype_size(sdtype);
223 block_size = dsize * scount;
226 if (block_size > 92160){
227 first_segment_size = 32768;
229 first_segment_size = 1024;
233 "smpi_coll_tuned_gather_ompi_linear_sync rank %d, segment %d", rank, first_segment_size);
236 /* Non-root processes:
237 - receive zero byte message from the root,
238 - send the first segment of the data synchronously,
239 - send the second segment of the data.
242 typelng= smpi_datatype_size(sdtype);
243 smpi_datatype_extent(sdtype, &lb, &extent);
244 first_segment_count = scount;
245 COLL_TUNED_COMPUTED_SEGCOUNT( (size_t) first_segment_size, typelng,
246 first_segment_count );
248 smpi_mpi_recv(sbuf, 0, MPI_BYTE, root,
250 comm, MPI_STATUS_IGNORE);
252 smpi_mpi_send(sbuf, first_segment_count, sdtype, root,
256 smpi_mpi_send((char*)sbuf + extent * first_segment_count,
257 (scount - first_segment_count), sdtype,
258 root, COLL_TAG_GATHER,
264 - For every non-root node:
265 - post irecv for the first segment of the message
266 - send zero byte message to signal node to send the message
267 - post irecv for the second segment of the message
268 - wait for the first segment to complete
269 - Copy local data if necessary
270 - Waitall for all the second segments to complete.
273 MPI_Request *reqs = NULL, first_segment_req;
274 reqs = (MPI_Request *) calloc(size, sizeof(MPI_Request ));
275 if (NULL == reqs) { ret = -1; line = __LINE__; goto error_hndl; }
277 typelng=smpi_datatype_size(rdtype);
278 smpi_datatype_extent(rdtype, &lb, &extent);
279 first_segment_count = rcount;
280 COLL_TUNED_COMPUTED_SEGCOUNT( (size_t)first_segment_size, typelng,
281 first_segment_count );
283 for (i = 0; i < size; ++i) {
286 reqs[i] = MPI_REQUEST_NULL;
290 /* irecv for the first segment from i */
291 ptmp = (char*)rbuf + i * rcount * extent;
292 first_segment_req = smpi_mpi_irecv(ptmp, first_segment_count, rdtype, i,
293 COLL_TAG_GATHER, comm
296 /* send sync message */
297 smpi_mpi_send(rbuf, 0, MPI_BYTE, i,
301 /* irecv for the second segment */
302 ptmp = (char*)rbuf + (i * rcount + first_segment_count) * extent;
303 reqs[i]=smpi_mpi_irecv(ptmp, (rcount - first_segment_count),
304 rdtype, i, COLL_TAG_GATHER, comm
307 /* wait on the first segment to complete */
308 smpi_mpi_wait(&first_segment_req, MPI_STATUS_IGNORE);
311 /* copy local data if necessary */
312 if (MPI_IN_PLACE != sbuf) {
313 ret = smpi_datatype_copy(sbuf, scount, sdtype,
314 (char*)rbuf + rank * rcount * extent,
316 if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
319 /* wait all second segments to complete */
320 ret = smpi_mpi_waitall(size, reqs, MPI_STATUSES_IGNORE);
321 if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
331 "ERROR_HNDL: node %d file %s line %d error %d\n",
332 rank, __FILE__, line, ret );
337 * Linear functions are copied from the BASIC coll module
338 * they do not segment the message and are simple implementations
339 * but for some small number of nodes and/or small data sizes they
340 * are just as fast as tuned/tree based segmenting operations
341 * and as such may be selected by the decision functions
342 * These are copied into this module due to the way we select modules
343 * in V1. i.e. in V2 we will handle this differently and so will not
344 * have to duplicate code.
345 * JPG following the examples from other coll_tuned implementations. Dec06.
348 /* copied function (with appropriate renaming) starts here */
352 * Function: - basic gather operation
353 * Accepts: - same arguments as MPI_Gather()
354 * Returns: - MPI_SUCCESS or error code
357 smpi_coll_tuned_gather_ompi_basic_linear(void *sbuf, int scount,
359 void *rbuf, int rcount,
373 size = smpi_comm_size(comm);
374 rank = smpi_comm_rank(comm);
376 /* Everyone but root sends data and returns. */
378 "ompi_coll_tuned_gather_intra_basic_linear rank %d", rank);
381 smpi_mpi_send(sbuf, scount, sdtype, root,
387 /* I am the root, loop receiving the data. */
389 smpi_datatype_extent(rdtype, &lb, &extent);
390 incr = extent * rcount;
391 for (i = 0, ptmp = (char *) rbuf; i < size; ++i, ptmp += incr) {
393 if (MPI_IN_PLACE != sbuf) {
394 err = smpi_datatype_copy(sbuf, scount, sdtype,
395 ptmp, rcount, rdtype);
400 smpi_mpi_recv(ptmp, rcount, rdtype, i,
402 comm, MPI_STATUS_IGNORE);
405 if (MPI_SUCCESS != err) {