1 /* smpi_coll.c -- various optimized routing for collectives */
3 /* Copyright (c) 2009-2014. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include "colls/colls.h"
15 #include "simgrid/sg_config.h"
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi,
18 "Logging specific to SMPI (coll)");
20 s_mpi_coll_description_t mpi_coll_gather_description[] = {
22 "gather default collective",
24 COLL_GATHERS(COLL_DESCRIPTION, COLL_COMMA),
25 {NULL, NULL, NULL} /* this array must be NULL terminated */
29 s_mpi_coll_description_t mpi_coll_allgather_description[] = {
31 "allgather default collective",
33 COLL_ALLGATHERS(COLL_DESCRIPTION, COLL_COMMA),
34 {NULL, NULL, NULL} /* this array must be NULL terminated */
37 s_mpi_coll_description_t mpi_coll_allgatherv_description[] = {
39 "allgatherv default collective",
41 COLL_ALLGATHERVS(COLL_DESCRIPTION, COLL_COMMA),
42 {NULL, NULL, NULL} /* this array must be NULL terminated */
45 s_mpi_coll_description_t mpi_coll_allreduce_description[] = {
47 "allreduce default collective",
49 COLL_ALLREDUCES(COLL_DESCRIPTION, COLL_COMMA),
50 {NULL, NULL, NULL} /* this array must be NULL terminated */
53 s_mpi_coll_description_t mpi_coll_reduce_scatter_description[] = {
55 "reduce_scatter default collective",
56 smpi_mpi_reduce_scatter},
57 COLL_REDUCE_SCATTERS(COLL_DESCRIPTION, COLL_COMMA),
58 {NULL, NULL, NULL} /* this array must be NULL terminated */
61 s_mpi_coll_description_t mpi_coll_scatter_description[] = {
63 "scatter default collective",
65 COLL_SCATTERS(COLL_DESCRIPTION, COLL_COMMA),
66 {NULL, NULL, NULL} /* this array must be NULL terminated */
69 s_mpi_coll_description_t mpi_coll_barrier_description[] = {
71 "barrier default collective",
73 COLL_BARRIERS(COLL_DESCRIPTION, COLL_COMMA),
74 {NULL, NULL, NULL} /* this array must be NULL terminated */
76 s_mpi_coll_description_t mpi_coll_alltoall_description[] = {
78 "Ompi alltoall default collective",
79 smpi_coll_tuned_alltoall_ompi2},
80 COLL_ALLTOALLS(COLL_DESCRIPTION, COLL_COMMA),
82 "Alltoall Bruck (SG) collective",
83 smpi_coll_tuned_alltoall_bruck},
85 "Alltoall basic linear (SG) collective",
86 smpi_coll_tuned_alltoall_basic_linear},
87 {NULL, NULL, NULL} /* this array must be NULL terminated */
90 s_mpi_coll_description_t mpi_coll_alltoallv_description[] = {
92 "Ompi alltoallv default collective",
93 smpi_coll_basic_alltoallv},
94 COLL_ALLTOALLVS(COLL_DESCRIPTION, COLL_COMMA),
95 {NULL, NULL, NULL} /* this array must be NULL terminated */
98 s_mpi_coll_description_t mpi_coll_bcast_description[] = {
100 "bcast default collective",
102 COLL_BCASTS(COLL_DESCRIPTION, COLL_COMMA),
103 {NULL, NULL, NULL} /* this array must be NULL terminated */
106 s_mpi_coll_description_t mpi_coll_reduce_description[] = {
108 "reduce default collective",
110 COLL_REDUCES(COLL_DESCRIPTION, COLL_COMMA),
111 {NULL, NULL, NULL} /* this array must be NULL terminated */
116 /** Displays the long description of all registered models, and quit */
117 void coll_help(const char *category, s_mpi_coll_description_t * table)
120 printf("Long description of the %s models accepted by this simulator:\n",
122 for (i = 0; table[i].name; i++)
123 printf(" %s: %s\n", table[i].name, table[i].description);
126 int find_coll_description(s_mpi_coll_description_t * table,
127 char *name, const char *desc)
130 char *name_list = NULL;
132 if(name==NULL){//no argument provided, use active selector's algorithm
133 name=(char*)sg_cfg_get_string("smpi/coll_selector");
136 for (i = 0; table[i].name; i++)
137 if (!strcmp(name, table[i].name)) {
138 if (strcmp(table[i].name,"default"))
139 XBT_INFO("Switch to algorithm %s for collective %s",table[i].name,desc);
144 // collective seems not handled by the active selector, try with default one
145 name=(char*)"default";
146 for (i = 0; table[i].name; i++)
147 if (!strcmp(name, table[i].name)) {
152 xbt_die("No collective is valid for '%s'! This is a bug.",name);
153 name_list = xbt_strdup(table[0].name);
154 for (i = 1; table[i].name; i++) {
156 xbt_realloc(name_list,
157 strlen(name_list) + strlen(table[i].name) + 3);
158 strcat(name_list, ", ");
159 strcat(name_list, table[i].name);
161 xbt_die("Collective '%s' is invalid! Valid collectives are: %s.", name, name_list);
165 int (*mpi_coll_gather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, int root, MPI_Comm);
166 int (*mpi_coll_allgather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
167 int (*mpi_coll_allgatherv_fun)(void *, int, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
168 int (*mpi_coll_allreduce_fun)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
169 int (*mpi_coll_alltoall_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
170 int (*mpi_coll_alltoallv_fun)(void *, int*, int*, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
171 int (*mpi_coll_bcast_fun)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com);
172 int (*mpi_coll_reduce_fun)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
173 int (*mpi_coll_reduce_scatter_fun)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype,MPI_Op op,MPI_Comm comm);
174 int (*mpi_coll_scatter_fun)(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,int root, MPI_Comm comm);
175 int (*mpi_coll_barrier_fun)(MPI_Comm comm);
185 typedef struct s_proc_tree *proc_tree_t;
190 static proc_tree_t alloc_tree(int arity)
195 tree = xbt_new(struct s_proc_tree, 1);
196 tree->PROCTREE_A = arity;
198 tree->numChildren = 0;
199 tree->child = xbt_new(int, arity);
200 for (i = 0; i < arity; i++) {
211 static void free_tree(proc_tree_t tree)
213 xbt_free(tree->child);
218 * Build the tree depending on a process rank (index) and the group size (extent)
219 * @param root the rank of the tree root
220 * @param rank the rank of the calling process
221 * @param size the total number of processes
223 static void build_tree(int root, int rank, int size, proc_tree_t * tree)
225 int index = (rank - root + size) % size;
226 int firstChildIdx = index * (*tree)->PROCTREE_A + 1;
230 (*tree)->root = root;
232 for (i = 0; i < (*tree)->PROCTREE_A && firstChildIdx + i < size; i++) {
233 (*tree)->child[i] = (firstChildIdx + i + root) % size;
234 (*tree)->numChildren++;
240 (*tree)->parent = (((index - 1) / (*tree)->PROCTREE_A) + root) % size;
247 static void tree_bcast(void *buf, int count, MPI_Datatype datatype,
248 MPI_Comm comm, proc_tree_t tree)
250 int system_tag = COLL_TAG_BCAST;
252 MPI_Request *requests;
254 rank = smpi_comm_rank(comm);
255 /* wait for data from my parent in the tree */
257 XBT_DEBUG("<%d> tree_bcast(): i am not root: recv from %d, tag=%d)",
258 rank, tree->parent, system_tag + rank);
259 smpi_mpi_recv(buf, count, datatype, tree->parent, system_tag + rank,
260 comm, MPI_STATUS_IGNORE);
262 requests = xbt_new(MPI_Request, tree->numChildren);
263 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
265 /* iniates sends to ranks lower in the tree */
266 for (i = 0; i < tree->numChildren; i++) {
267 if (tree->child[i] == -1) {
268 requests[i] = MPI_REQUEST_NULL;
270 XBT_DEBUG("<%d> send to <%d>, tag=%d", rank, tree->child[i],
271 system_tag + tree->child[i]);
273 smpi_isend_init(buf, count, datatype, tree->child[i],
274 system_tag + tree->child[i], comm);
277 smpi_mpi_startall(tree->numChildren, requests);
278 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
279 for(i = 0; i < tree->numChildren; i++) {
280 if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]);
288 static void tree_antibcast(void *buf, int count, MPI_Datatype datatype,
289 MPI_Comm comm, proc_tree_t tree)
291 int system_tag = COLL_TAG_BCAST;
293 MPI_Request *requests;
295 rank = smpi_comm_rank(comm);
296 // everyone sends to its parent, except root.
298 XBT_DEBUG("<%d> tree_antibcast(): i am not root: send to %d, tag=%d)",
299 rank, tree->parent, system_tag + rank);
300 smpi_mpi_send(buf, count, datatype, tree->parent, system_tag + rank,
303 //every one receives as many messages as it has children
304 requests = xbt_new(MPI_Request, tree->numChildren);
305 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
307 for (i = 0; i < tree->numChildren; i++) {
308 if (tree->child[i] == -1) {
309 requests[i] = MPI_REQUEST_NULL;
311 XBT_DEBUG("<%d> recv from <%d>, tag=%d", rank, tree->child[i],
312 system_tag + tree->child[i]);
314 smpi_irecv_init(buf, count, datatype, tree->child[i],
315 system_tag + tree->child[i], comm);
318 smpi_mpi_startall(tree->numChildren, requests);
319 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
320 for(i = 0; i < tree->numChildren; i++) {
321 if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]);
327 * bcast with a binary, ternary, or whatever tree ..
329 void nary_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
330 MPI_Comm comm, int arity)
332 proc_tree_t tree = alloc_tree(arity);
335 rank = smpi_comm_rank(comm);
336 size = smpi_comm_size(comm);
337 build_tree(root, rank, size, &tree);
338 tree_bcast(buf, count, datatype, comm, tree);
343 * barrier with a binary, ternary, or whatever tree ..
345 void nary_tree_barrier(MPI_Comm comm, int arity)
347 proc_tree_t tree = alloc_tree(arity);
351 rank = smpi_comm_rank(comm);
352 size = smpi_comm_size(comm);
353 build_tree(0, rank, size, &tree);
354 tree_antibcast(&dummy, 1, MPI_CHAR, comm, tree);
355 tree_bcast(&dummy, 1, MPI_CHAR, comm, tree);
359 int smpi_coll_tuned_alltoall_ompi2(void *sendbuf, int sendcount,
360 MPI_Datatype sendtype, void *recvbuf,
361 int recvcount, MPI_Datatype recvtype,
365 size = smpi_comm_size(comm);
366 sendsize = smpi_datatype_size(sendtype) * sendcount;
367 if (sendsize < 200 && size > 12) {
369 smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, sendtype,
370 recvbuf, recvcount, recvtype,
372 } else if (sendsize < 3000) {
374 smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount,
376 recvcount, recvtype, comm);
379 smpi_coll_tuned_alltoall_ring(sendbuf, sendcount, sendtype,
380 recvbuf, recvcount, recvtype,
388 * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
389 * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
392 int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount,
393 MPI_Datatype sendtype, void *recvbuf,
394 int recvcount, MPI_Datatype recvtype,
397 int system_tag = 777;
398 int i, rank, size, err, count;
400 MPI_Aint sendext = 0;
401 MPI_Aint recvext = 0;
402 MPI_Request *requests;
404 // FIXME: check implementation
405 rank = smpi_comm_rank(comm);
406 size = smpi_comm_size(comm);
407 XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);
408 smpi_datatype_extent(sendtype, &lb, &sendext);
409 smpi_datatype_extent(recvtype, &lb, &recvext);
410 /* Local copy from self */
412 smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
414 (char *)recvbuf + rank * recvcount * recvext,
415 recvcount, recvtype);
416 if (err == MPI_SUCCESS && size > 1) {
417 /* Initiate all send/recv to/from others. */
418 requests = xbt_new(MPI_Request, 2 * (size - 1));
420 /* Create all receives that will be posted first */
421 for (i = 0; i < size; ++i) {
423 XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]",
428 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
429 recvtype, i, system_tag, comm);
432 /* Now create all sends */
433 for (i = 0; i < size; ++i) {
435 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]",
440 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
441 sendtype, i, system_tag, comm);
444 /* Wait for them all. */
445 smpi_mpi_startall(count, requests);
446 XBT_DEBUG("<%d> wait for %d requests", rank, count);
447 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
448 for(i = 0; i < count; i++) {
449 if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]);
457 * Alltoall basic_linear (STARMPI:alltoall-simple)
459 int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount,
460 MPI_Datatype sendtype,
461 void *recvbuf, int recvcount,
462 MPI_Datatype recvtype,
465 int system_tag = 888;
466 int i, rank, size, err, count;
467 MPI_Aint lb = 0, sendext = 0, recvext = 0;
468 MPI_Request *requests;
471 rank = smpi_comm_rank(comm);
472 size = smpi_comm_size(comm);
473 XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank);
474 smpi_datatype_extent(sendtype, &lb, &sendext);
475 smpi_datatype_extent(recvtype, &lb, &recvext);
476 /* simple optimization */
477 err = smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
479 (char *)recvbuf + rank * recvcount * recvext,
480 recvcount, recvtype);
481 if (err == MPI_SUCCESS && size > 1) {
482 /* Initiate all send/recv to/from others. */
483 requests = xbt_new(MPI_Request, 2 * (size - 1));
484 /* Post all receives first -- a simple optimization */
486 for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
488 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
489 recvtype, i, system_tag, comm);
492 /* Now post all sends in reverse order
493 * - We would like to minimize the search time through message queue
494 * when messages actually arrive in the order in which they were posted.
495 * TODO: check the previous assertion
497 for (i = (rank + size - 1) % size; i != rank; i = (i + size - 1) % size) {
499 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
500 sendtype, i, system_tag, comm);
503 /* Wait for them all. */
504 smpi_mpi_startall(count, requests);
505 XBT_DEBUG("<%d> wait for %d requests", rank, count);
506 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
507 for(i = 0; i < count; i++) {
508 if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]);
515 int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts,
516 int *senddisps, MPI_Datatype sendtype,
517 void *recvbuf, int *recvcounts,
518 int *recvdisps, MPI_Datatype recvtype,
521 int system_tag = 889;
522 int i, rank, size, err, count;
523 MPI_Aint lb = 0, sendext = 0, recvext = 0;
524 MPI_Request *requests;
527 rank = smpi_comm_rank(comm);
528 size = smpi_comm_size(comm);
529 XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank);
530 smpi_datatype_extent(sendtype, &lb, &sendext);
531 smpi_datatype_extent(recvtype, &lb, &recvext);
532 /* Local copy from self */
534 smpi_datatype_copy((char *)sendbuf + senddisps[rank] * sendext,
535 sendcounts[rank], sendtype,
536 (char *)recvbuf + recvdisps[rank] * recvext,
537 recvcounts[rank], recvtype);
538 if (err == MPI_SUCCESS && size > 1) {
539 /* Initiate all send/recv to/from others. */
540 requests = xbt_new(MPI_Request, 2 * (size - 1));
542 /* Create all receives that will be posted first */
543 for (i = 0; i < size; ++i) {
544 if (i == rank || recvcounts[i] == 0) {
546 ("<%d> skip request creation [src = %d, recvcounts[src] = %d]",
547 rank, i, recvcounts[i]);
551 smpi_irecv_init((char *)recvbuf + recvdisps[i] * recvext,
552 recvcounts[i], recvtype, i, system_tag, comm);
555 /* Now create all sends */
556 for (i = 0; i < size; ++i) {
557 if (i == rank || sendcounts[i] == 0) {
559 ("<%d> skip request creation [dst = %d, sendcounts[dst] = %d]",
560 rank, i, sendcounts[i]);
564 smpi_isend_init((char *)sendbuf + senddisps[i] * sendext,
565 sendcounts[i], sendtype, i, system_tag, comm);
568 /* Wait for them all. */
569 smpi_mpi_startall(count, requests);
570 XBT_DEBUG("<%d> wait for %d requests", rank, count);
571 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
572 for(i = 0; i < count; i++) {
573 if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]);