1 /* smpi_coll.c -- various optimized routing for collectives */
3 /* Copyright (c) 2009-2015. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include "colls/colls.h"
15 #include "simgrid/sg_config.h"
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI (coll)");
19 s_mpi_coll_description_t mpi_coll_gather_description[] = {
20 {"default", "gather default collective", reinterpret_cast<void*>(&smpi_mpi_gather)},
21 COLL_GATHERS(COLL_DESCRIPTION, COLL_COMMA), {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
24 s_mpi_coll_description_t mpi_coll_allgather_description[] = { {"default", "allgather default collective",
25 reinterpret_cast<void*>(&smpi_mpi_allgather)}, COLL_ALLGATHERS(COLL_DESCRIPTION, COLL_COMMA), {nullptr, nullptr, nullptr}};
27 s_mpi_coll_description_t mpi_coll_allgatherv_description[] = {{"default", "allgatherv default collective",
28 reinterpret_cast<void*>(&smpi_mpi_allgatherv)}, COLL_ALLGATHERVS(COLL_DESCRIPTION, COLL_COMMA),
29 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
32 s_mpi_coll_description_t mpi_coll_allreduce_description[] = { {"default", "allreduce default collective",
33 reinterpret_cast<void*>(&smpi_mpi_allreduce)}, COLL_ALLREDUCES(COLL_DESCRIPTION, COLL_COMMA),
34 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
37 s_mpi_coll_description_t mpi_coll_reduce_scatter_description[] = {{"default", "reduce_scatter default collective",
38 reinterpret_cast<void*>(&smpi_mpi_reduce_scatter)}, COLL_REDUCE_SCATTERS(COLL_DESCRIPTION, COLL_COMMA),
39 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
42 s_mpi_coll_description_t mpi_coll_scatter_description[] = { {"default", "scatter default collective",
43 reinterpret_cast<void*>(&smpi_mpi_scatter)}, COLL_SCATTERS(COLL_DESCRIPTION, COLL_COMMA), {nullptr, nullptr, nullptr}};
45 s_mpi_coll_description_t mpi_coll_barrier_description[] = { {"default", "barrier default collective",
46 reinterpret_cast<void*>(&smpi_mpi_barrier)}, COLL_BARRIERS(COLL_DESCRIPTION, COLL_COMMA), {nullptr, nullptr, nullptr}};
48 s_mpi_coll_description_t mpi_coll_alltoall_description[] = { {"default", "Ompi alltoall default collective",
49 reinterpret_cast<void*>(&smpi_coll_tuned_alltoall_ompi2)}, COLL_ALLTOALLS(COLL_DESCRIPTION, COLL_COMMA),
50 {"bruck", "Alltoall Bruck (SG) collective",
51 reinterpret_cast<void*>(&smpi_coll_tuned_alltoall_bruck)},
52 {"basic_linear", "Alltoall basic linear (SG) collective",
53 reinterpret_cast<void*>(&smpi_coll_tuned_alltoall_basic_linear)}, {nullptr, nullptr, nullptr}};
55 s_mpi_coll_description_t mpi_coll_alltoallv_description[] = { {"default", "Ompi alltoallv default collective",
56 reinterpret_cast<void*>(&smpi_coll_basic_alltoallv)}, COLL_ALLTOALLVS(COLL_DESCRIPTION, COLL_COMMA),
57 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
60 s_mpi_coll_description_t mpi_coll_bcast_description[] = { {"default", "bcast default collective ",
61 reinterpret_cast<void*>(&smpi_mpi_bcast)}, COLL_BCASTS(COLL_DESCRIPTION, COLL_COMMA), {nullptr, nullptr, nullptr}};
63 s_mpi_coll_description_t mpi_coll_reduce_description[] = { {"default", "reduce default collective",
64 reinterpret_cast<void*>(&smpi_mpi_reduce)}, COLL_REDUCES(COLL_DESCRIPTION, COLL_COMMA), {nullptr, nullptr, nullptr} };
68 /** Displays the long description of all registered models, and quit */
69 void coll_help(const char *category, s_mpi_coll_description_t * table)
71 printf("Long description of the %s models accepted by this simulator:\n", category);
72 for (int i = 0; table[i].name; i++)
73 printf(" %s: %s\n", table[i].name, table[i].description);
76 int find_coll_description(s_mpi_coll_description_t * table, char *name, const char *desc)
78 char *name_list = nullptr;
80 if (name==nullptr || name[0] == '\0') {
81 //no argument provided, use active selector's algorithm
82 name=static_cast<char*>(xbt_cfg_get_string("smpi/coll-selector"));
85 for (int i = 0; table[i].name; i++)
86 if (!strcmp(name, table[i].name)) {
87 if (strcmp(table[i].name,"default"))
88 XBT_INFO("Switch to algorithm %s for collective %s",table[i].name,desc);
93 // collective seems not handled by the active selector, try with default one
94 for (int i = 0; table[i].name; i++)
95 if (!strcmp("default", table[i].name)) {
100 xbt_die("No collective is valid for '%s'! This is a bug.",name);
101 name_list = xbt_strdup(table[0].name);
102 for (int i = 1; table[i].name; i++) {
103 name_list = static_cast<char*>(xbt_realloc(name_list, strlen(name_list) + strlen(table[i].name) + 3));
104 strncat(name_list, ", ",2);
105 strncat(name_list, table[i].name, strlen(table[i].name));
107 xbt_die("Collective '%s' is invalid! Valid collectives are: %s.", name, name_list);
111 int (*mpi_coll_gather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, int root, MPI_Comm);
112 int (*mpi_coll_allgather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
113 int (*mpi_coll_allgatherv_fun)(void *, int, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
114 int (*mpi_coll_allreduce_fun)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
115 int (*mpi_coll_alltoall_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
116 int (*mpi_coll_alltoallv_fun)(void *, int*, int*, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
117 int (*mpi_coll_bcast_fun)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com);
118 int (*mpi_coll_reduce_fun)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
119 int (*mpi_coll_reduce_scatter_fun)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype,MPI_Op op,MPI_Comm comm);
120 int (*mpi_coll_scatter_fun)(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,int root, MPI_Comm comm);
121 int (*mpi_coll_barrier_fun)(MPI_Comm comm);
122 void (*smpi_coll_cleanup_callback)();
125 int smpi_coll_tuned_alltoall_ompi2(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
126 int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
128 int size = smpi_comm_size(comm);
129 int sendsize = smpi_datatype_size(sendtype) * sendcount;
130 if (sendsize < 200 && size > 12) {
131 return smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
132 } else if (sendsize < 3000) {
133 return smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
135 return smpi_coll_tuned_alltoall_ring(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
142 * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
143 * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not less...
145 int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount, MPI_Datatype sendtype,
146 void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
148 int system_tag = 777;
152 MPI_Aint sendext = 0;
153 MPI_Aint recvext = 0;
154 MPI_Request *requests;
156 // FIXME: check implementation
157 int rank = smpi_comm_rank(comm);
158 int size = smpi_comm_size(comm);
159 XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);
160 smpi_datatype_extent(sendtype, &lb, &sendext);
161 smpi_datatype_extent(recvtype, &lb, &recvext);
162 /* Local copy from self */
163 int err = smpi_datatype_copy(static_cast<char *>(sendbuf) + rank * sendcount * sendext, sendcount, sendtype,
164 static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount, recvtype);
165 if (err == MPI_SUCCESS && size > 1) {
166 /* Initiate all send/recv to/from others. */
167 requests = xbt_new(MPI_Request, 2 * (size - 1));
169 /* Create all receives that will be posted first */
170 for (i = 0; i < size; ++i) {
172 requests[count] = smpi_irecv_init(static_cast<char *>(recvbuf) + i * recvcount * recvext, recvcount,
173 recvtype, i, system_tag, comm);
176 XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]", rank, i, recvcount);
179 /* Now create all sends */
180 for (i = 0; i < size; ++i) {
182 requests[count] = smpi_isend_init(static_cast<char *>(sendbuf) + i * sendcount * sendext, sendcount,
183 sendtype, i, system_tag, comm);
186 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]", rank, i, sendcount);
189 /* Wait for them all. */
190 smpi_mpi_startall(count, requests);
191 XBT_DEBUG("<%d> wait for %d requests", rank, count);
192 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
193 for(i = 0; i < count; i++) {
194 if(requests[i]!=MPI_REQUEST_NULL)
195 smpi_mpi_request_free(&requests[i]);
203 * Alltoall basic_linear (STARMPI:alltoall-simple)
205 int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount, MPI_Datatype sendtype,
206 void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
208 int system_tag = 888;
211 MPI_Aint lb = 0, sendext = 0, recvext = 0;
212 MPI_Request *requests;
215 int rank = smpi_comm_rank(comm);
216 int size = smpi_comm_size(comm);
217 XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank);
218 smpi_datatype_extent(sendtype, &lb, &sendext);
219 smpi_datatype_extent(recvtype, &lb, &recvext);
220 /* simple optimization */
221 int err = smpi_datatype_copy(static_cast<char *>(sendbuf) + rank * sendcount * sendext, sendcount, sendtype,
222 static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount, recvtype);
223 if (err == MPI_SUCCESS && size > 1) {
224 /* Initiate all send/recv to/from others. */
225 requests = xbt_new(MPI_Request, 2 * (size - 1));
226 /* Post all receives first -- a simple optimization */
228 for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
229 requests[count] = smpi_irecv_init(static_cast<char *>(recvbuf) + i * recvcount * recvext, recvcount,
230 recvtype, i, system_tag, comm);
233 /* Now post all sends in reverse order
234 * - We would like to minimize the search time through message queue
235 * when messages actually arrive in the order in which they were posted.
236 * TODO: check the previous assertion
238 for (i = (rank + size - 1) % size; i != rank; i = (i + size - 1) % size) {
239 requests[count] = smpi_isend_init(static_cast<char *>(sendbuf) + i * sendcount * sendext, sendcount,
240 sendtype, i, system_tag, comm);
243 /* Wait for them all. */
244 smpi_mpi_startall(count, requests);
245 XBT_DEBUG("<%d> wait for %d requests", rank, count);
246 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
247 for(i = 0; i < count; i++) {
248 if(requests[i]!=MPI_REQUEST_NULL)
249 smpi_mpi_request_free(&requests[i]);
256 int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts, int *senddisps, MPI_Datatype sendtype,
257 void *recvbuf, int *recvcounts, int *recvdisps, MPI_Datatype recvtype, MPI_Comm comm)
259 int system_tag = 889;
263 MPI_Aint sendext = 0;
264 MPI_Aint recvext = 0;
265 MPI_Request *requests;
268 int rank = smpi_comm_rank(comm);
269 int size = smpi_comm_size(comm);
270 XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank);
271 smpi_datatype_extent(sendtype, &lb, &sendext);
272 smpi_datatype_extent(recvtype, &lb, &recvext);
273 /* Local copy from self */
274 int err = smpi_datatype_copy(static_cast<char *>(sendbuf) + senddisps[rank] * sendext, sendcounts[rank], sendtype,
275 static_cast<char *>(recvbuf) + recvdisps[rank] * recvext, recvcounts[rank], recvtype);
276 if (err == MPI_SUCCESS && size > 1) {
277 /* Initiate all send/recv to/from others. */
278 requests = xbt_new(MPI_Request, 2 * (size - 1));
280 /* Create all receives that will be posted first */
281 for (i = 0; i < size; ++i) {
282 if (i != rank && recvcounts[i] != 0) {
283 requests[count] = smpi_irecv_init(static_cast<char *>(recvbuf) + recvdisps[i] * recvext,
284 recvcounts[i], recvtype, i, system_tag, comm);
287 XBT_DEBUG("<%d> skip request creation [src = %d, recvcounts[src] = %d]", rank, i, recvcounts[i]);
290 /* Now create all sends */
291 for (i = 0; i < size; ++i) {
292 if (i != rank && sendcounts[i] != 0) {
293 requests[count] = smpi_isend_init(static_cast<char *>(sendbuf) + senddisps[i] * sendext,
294 sendcounts[i], sendtype, i, system_tag, comm);
297 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcounts[dst] = %d]", rank, i, sendcounts[i]);
300 /* Wait for them all. */
301 smpi_mpi_startall(count, requests);
302 XBT_DEBUG("<%d> wait for %d requests", rank, count);
303 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
304 for(i = 0; i < count; i++) {
305 if(requests[i]!=MPI_REQUEST_NULL)
306 smpi_mpi_request_free(&requests[i]);