1 /* smpi_coll.c -- various optimized routing for collectives */
3 /* Copyright (c) 2009-2014. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include "colls/colls.h"
15 #include "simgrid/sg_config.h"
17 s_mpi_coll_description_t mpi_coll_gather_description[] = {
19 "gather default collective",
21 COLL_GATHERS(COLL_DESCRIPTION, COLL_COMMA),
22 {NULL, NULL, NULL} /* this array must be NULL terminated */
26 s_mpi_coll_description_t mpi_coll_allgather_description[] = {
28 "allgather default collective",
30 COLL_ALLGATHERS(COLL_DESCRIPTION, COLL_COMMA),
31 {NULL, NULL, NULL} /* this array must be NULL terminated */
34 s_mpi_coll_description_t mpi_coll_allgatherv_description[] = {
36 "allgatherv default collective",
38 COLL_ALLGATHERVS(COLL_DESCRIPTION, COLL_COMMA),
39 {NULL, NULL, NULL} /* this array must be NULL terminated */
42 s_mpi_coll_description_t mpi_coll_allreduce_description[] = {
44 "allreduce default collective",
46 COLL_ALLREDUCES(COLL_DESCRIPTION, COLL_COMMA),
47 {NULL, NULL, NULL} /* this array must be NULL terminated */
50 s_mpi_coll_description_t mpi_coll_reduce_scatter_description[] = {
52 "reduce_scatter default collective",
53 smpi_mpi_reduce_scatter},
54 COLL_REDUCE_SCATTERS(COLL_DESCRIPTION, COLL_COMMA),
55 {NULL, NULL, NULL} /* this array must be NULL terminated */
58 s_mpi_coll_description_t mpi_coll_scatter_description[] = {
60 "scatter default collective",
62 COLL_SCATTERS(COLL_DESCRIPTION, COLL_COMMA),
63 {NULL, NULL, NULL} /* this array must be NULL terminated */
66 s_mpi_coll_description_t mpi_coll_barrier_description[] = {
68 "barrier default collective",
70 COLL_BARRIERS(COLL_DESCRIPTION, COLL_COMMA),
71 {NULL, NULL, NULL} /* this array must be NULL terminated */
73 s_mpi_coll_description_t mpi_coll_alltoall_description[] = {
75 "Ompi alltoall default collective",
76 smpi_coll_tuned_alltoall_ompi2},
77 COLL_ALLTOALLS(COLL_DESCRIPTION, COLL_COMMA),
79 "Alltoall Bruck (SG) collective",
80 smpi_coll_tuned_alltoall_bruck},
82 "Alltoall basic linear (SG) collective",
83 smpi_coll_tuned_alltoall_basic_linear},
84 {NULL, NULL, NULL} /* this array must be NULL terminated */
87 s_mpi_coll_description_t mpi_coll_alltoallv_description[] = {
89 "Ompi alltoallv default collective",
90 smpi_coll_basic_alltoallv},
91 COLL_ALLTOALLVS(COLL_DESCRIPTION, COLL_COMMA),
92 {NULL, NULL, NULL} /* this array must be NULL terminated */
95 s_mpi_coll_description_t mpi_coll_bcast_description[] = {
97 "bcast default collective",
99 COLL_BCASTS(COLL_DESCRIPTION, COLL_COMMA),
100 {NULL, NULL, NULL} /* this array must be NULL terminated */
103 s_mpi_coll_description_t mpi_coll_reduce_description[] = {
105 "reduce default collective",
107 COLL_REDUCES(COLL_DESCRIPTION, COLL_COMMA),
108 {NULL, NULL, NULL} /* this array must be NULL terminated */
113 /** Displays the long description of all registered models, and quit */
114 void coll_help(const char *category, s_mpi_coll_description_t * table)
117 printf("Long description of the %s models accepted by this simulator:\n",
119 for (i = 0; table[i].name; i++)
120 printf(" %s: %s\n", table[i].name, table[i].description);
123 int find_coll_description(s_mpi_coll_description_t * table,
127 char *name_list = NULL;
129 if(name==NULL){//no argument provided, use active selector's algorithm
130 name=(char*)sg_cfg_get_string("smpi/coll_selector");
133 for (i = 0; table[i].name; i++)
134 if (!strcmp(name, table[i].name)) {
139 // collective seems not handled by the active selector, try with default one
140 name=(char*)"default";
141 for (i = 0; table[i].name; i++)
142 if (!strcmp(name, table[i].name)) {
147 xbt_die("No collective is valid! This is a bug.");
148 name_list = xbt_strdup(table[0].name);
149 for (i = 1; table[i].name; i++) {
151 xbt_realloc(name_list,
152 strlen(name_list) + strlen(table[i].name) + 3);
153 strcat(name_list, ", ");
154 strcat(name_list, table[i].name);
156 xbt_die("Collective '%s' is invalid! Valid collectives are: %s.", name, name_list);
160 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi,
161 "Logging specific to SMPI (coll)");
163 int (*mpi_coll_gather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, int root, MPI_Comm);
164 int (*mpi_coll_allgather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
165 int (*mpi_coll_allgatherv_fun)(void *, int, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
166 int (*mpi_coll_allreduce_fun)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
167 int (*mpi_coll_alltoall_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
168 int (*mpi_coll_alltoallv_fun)(void *, int*, int*, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
169 int (*mpi_coll_bcast_fun)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com);
170 int (*mpi_coll_reduce_fun)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
171 int (*mpi_coll_reduce_scatter_fun)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype,MPI_Op op,MPI_Comm comm);
172 int (*mpi_coll_scatter_fun)(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,int root, MPI_Comm comm);
173 int (*mpi_coll_barrier_fun)(MPI_Comm comm);
183 typedef struct s_proc_tree *proc_tree_t;
188 static proc_tree_t alloc_tree(int arity)
193 tree = xbt_new(struct s_proc_tree, 1);
194 tree->PROCTREE_A = arity;
196 tree->numChildren = 0;
197 tree->child = xbt_new(int, arity);
198 for (i = 0; i < arity; i++) {
209 static void free_tree(proc_tree_t tree)
211 xbt_free(tree->child);
216 * Build the tree depending on a process rank (index) and the group size (extent)
217 * @param root the rank of the tree root
218 * @param rank the rank of the calling process
219 * @param size the total number of processes
221 static void build_tree(int root, int rank, int size, proc_tree_t * tree)
223 int index = (rank - root + size) % size;
224 int firstChildIdx = index * (*tree)->PROCTREE_A + 1;
228 (*tree)->root = root;
230 for (i = 0; i < (*tree)->PROCTREE_A && firstChildIdx + i < size; i++) {
231 (*tree)->child[i] = (firstChildIdx + i + root) % size;
232 (*tree)->numChildren++;
238 (*tree)->parent = (((index - 1) / (*tree)->PROCTREE_A) + root) % size;
245 static void tree_bcast(void *buf, int count, MPI_Datatype datatype,
246 MPI_Comm comm, proc_tree_t tree)
248 int system_tag = COLL_TAG_BCAST;
250 MPI_Request *requests;
252 rank = smpi_comm_rank(comm);
253 /* wait for data from my parent in the tree */
255 XBT_DEBUG("<%d> tree_bcast(): i am not root: recv from %d, tag=%d)",
256 rank, tree->parent, system_tag + rank);
257 smpi_mpi_recv(buf, count, datatype, tree->parent, system_tag + rank,
258 comm, MPI_STATUS_IGNORE);
260 requests = xbt_new(MPI_Request, tree->numChildren);
261 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
263 /* iniates sends to ranks lower in the tree */
264 for (i = 0; i < tree->numChildren; i++) {
265 if (tree->child[i] == -1) {
266 requests[i] = MPI_REQUEST_NULL;
268 XBT_DEBUG("<%d> send to <%d>, tag=%d", rank, tree->child[i],
269 system_tag + tree->child[i]);
271 smpi_isend_init(buf, count, datatype, tree->child[i],
272 system_tag + tree->child[i], comm);
275 smpi_mpi_startall(tree->numChildren, requests);
276 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
277 for(i = 0; i < tree->numChildren; i++) {
278 if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]);
286 static void tree_antibcast(void *buf, int count, MPI_Datatype datatype,
287 MPI_Comm comm, proc_tree_t tree)
289 int system_tag = COLL_TAG_BCAST;
291 MPI_Request *requests;
293 rank = smpi_comm_rank(comm);
294 // everyone sends to its parent, except root.
296 XBT_DEBUG("<%d> tree_antibcast(): i am not root: send to %d, tag=%d)",
297 rank, tree->parent, system_tag + rank);
298 smpi_mpi_send(buf, count, datatype, tree->parent, system_tag + rank,
301 //every one receives as many messages as it has children
302 requests = xbt_new(MPI_Request, tree->numChildren);
303 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
305 for (i = 0; i < tree->numChildren; i++) {
306 if (tree->child[i] == -1) {
307 requests[i] = MPI_REQUEST_NULL;
309 XBT_DEBUG("<%d> recv from <%d>, tag=%d", rank, tree->child[i],
310 system_tag + tree->child[i]);
312 smpi_irecv_init(buf, count, datatype, tree->child[i],
313 system_tag + tree->child[i], comm);
316 smpi_mpi_startall(tree->numChildren, requests);
317 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
318 for(i = 0; i < tree->numChildren; i++) {
319 if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]);
325 * bcast with a binary, ternary, or whatever tree ..
327 void nary_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
328 MPI_Comm comm, int arity)
330 proc_tree_t tree = alloc_tree(arity);
333 rank = smpi_comm_rank(comm);
334 size = smpi_comm_size(comm);
335 build_tree(root, rank, size, &tree);
336 tree_bcast(buf, count, datatype, comm, tree);
341 * barrier with a binary, ternary, or whatever tree ..
343 void nary_tree_barrier(MPI_Comm comm, int arity)
345 proc_tree_t tree = alloc_tree(arity);
349 rank = smpi_comm_rank(comm);
350 size = smpi_comm_size(comm);
351 build_tree(0, rank, size, &tree);
352 tree_antibcast(&dummy, 1, MPI_CHAR, comm, tree);
353 tree_bcast(&dummy, 1, MPI_CHAR, comm, tree);
357 int smpi_coll_tuned_alltoall_ompi2(void *sendbuf, int sendcount,
358 MPI_Datatype sendtype, void *recvbuf,
359 int recvcount, MPI_Datatype recvtype,
363 size = smpi_comm_size(comm);
364 sendsize = smpi_datatype_size(sendtype) * sendcount;
365 if (sendsize < 200 && size > 12) {
367 smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, sendtype,
368 recvbuf, recvcount, recvtype,
370 } else if (sendsize < 3000) {
372 smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount,
374 recvcount, recvtype, comm);
377 smpi_coll_tuned_alltoall_ring(sendbuf, sendcount, sendtype,
378 recvbuf, recvcount, recvtype,
386 * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
387 * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
390 int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount,
391 MPI_Datatype sendtype, void *recvbuf,
392 int recvcount, MPI_Datatype recvtype,
395 int system_tag = 777;
396 int i, rank, size, err, count;
398 MPI_Aint sendext = 0;
399 MPI_Aint recvext = 0;
400 MPI_Request *requests;
402 // FIXME: check implementation
403 rank = smpi_comm_rank(comm);
404 size = smpi_comm_size(comm);
405 XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);
406 err = smpi_datatype_extent(sendtype, &lb, &sendext);
407 err = smpi_datatype_extent(recvtype, &lb, &recvext);
408 /* Local copy from self */
410 smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
412 (char *)recvbuf + rank * recvcount * recvext,
413 recvcount, recvtype);
414 if (err == MPI_SUCCESS && size > 1) {
415 /* Initiate all send/recv to/from others. */
416 requests = xbt_new(MPI_Request, 2 * (size - 1));
418 /* Create all receives that will be posted first */
419 for (i = 0; i < size; ++i) {
421 XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]",
426 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
427 recvtype, i, system_tag, comm);
430 /* Now create all sends */
431 for (i = 0; i < size; ++i) {
433 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]",
438 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
439 sendtype, i, system_tag, comm);
442 /* Wait for them all. */
443 smpi_mpi_startall(count, requests);
444 XBT_DEBUG("<%d> wait for %d requests", rank, count);
445 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
446 for(i = 0; i < count; i++) {
447 if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]);
455 * Alltoall basic_linear (STARMPI:alltoall-simple)
457 int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount,
458 MPI_Datatype sendtype,
459 void *recvbuf, int recvcount,
460 MPI_Datatype recvtype,
463 int system_tag = 888;
464 int i, rank, size, err, count;
465 MPI_Aint lb = 0, sendext = 0, recvext = 0;
466 MPI_Request *requests;
469 rank = smpi_comm_rank(comm);
470 size = smpi_comm_size(comm);
471 XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank);
472 err = smpi_datatype_extent(sendtype, &lb, &sendext);
473 err = smpi_datatype_extent(recvtype, &lb, &recvext);
474 /* simple optimization */
475 err = smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
477 (char *)recvbuf + rank * recvcount * recvext,
478 recvcount, recvtype);
479 if (err == MPI_SUCCESS && size > 1) {
480 /* Initiate all send/recv to/from others. */
481 requests = xbt_new(MPI_Request, 2 * (size - 1));
482 /* Post all receives first -- a simple optimization */
484 for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
486 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
487 recvtype, i, system_tag, comm);
490 /* Now post all sends in reverse order
491 * - We would like to minimize the search time through message queue
492 * when messages actually arrive in the order in which they were posted.
493 * TODO: check the previous assertion
495 for (i = (rank + size - 1) % size; i != rank; i = (i + size - 1) % size) {
497 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
498 sendtype, i, system_tag, comm);
501 /* Wait for them all. */
502 smpi_mpi_startall(count, requests);
503 XBT_DEBUG("<%d> wait for %d requests", rank, count);
504 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
505 for(i = 0; i < count; i++) {
506 if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]);
513 int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts,
514 int *senddisps, MPI_Datatype sendtype,
515 void *recvbuf, int *recvcounts,
516 int *recvdisps, MPI_Datatype recvtype,
519 int system_tag = 889;
520 int i, rank, size, err, count;
521 MPI_Aint lb = 0, sendext = 0, recvext = 0;
522 MPI_Request *requests;
525 rank = smpi_comm_rank(comm);
526 size = smpi_comm_size(comm);
527 XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank);
528 err = smpi_datatype_extent(sendtype, &lb, &sendext);
529 err = smpi_datatype_extent(recvtype, &lb, &recvext);
530 /* Local copy from self */
532 smpi_datatype_copy((char *)sendbuf + senddisps[rank] * sendext,
533 sendcounts[rank], sendtype,
534 (char *)recvbuf + recvdisps[rank] * recvext,
535 recvcounts[rank], recvtype);
536 if (err == MPI_SUCCESS && size > 1) {
537 /* Initiate all send/recv to/from others. */
538 requests = xbt_new(MPI_Request, 2 * (size - 1));
540 /* Create all receives that will be posted first */
541 for (i = 0; i < size; ++i) {
542 if (i == rank || recvcounts[i] == 0) {
544 ("<%d> skip request creation [src = %d, recvcounts[src] = %d]",
545 rank, i, recvcounts[i]);
549 smpi_irecv_init((char *)recvbuf + recvdisps[i] * recvext,
550 recvcounts[i], recvtype, i, system_tag, comm);
553 /* Now create all sends */
554 for (i = 0; i < size; ++i) {
555 if (i == rank || sendcounts[i] == 0) {
557 ("<%d> skip request creation [dst = %d, sendcounts[dst] = %d]",
558 rank, i, sendcounts[i]);
562 smpi_isend_init((char *)sendbuf + senddisps[i] * sendext,
563 sendcounts[i], sendtype, i, system_tag, comm);
566 /* Wait for them all. */
567 smpi_mpi_startall(count, requests);
568 XBT_DEBUG("<%d> wait for %d requests", rank, count);
569 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
570 for(i = 0; i < count; i++) {
571 if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]);