Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
add openmpi scatter linear_nb algorithm
[simgrid.git] / src / smpi / colls / smpi_coll.cpp
1 /* smpi_coll.c -- various optimized routing for collectives                 */
2
3 /* Copyright (c) 2009-2022. The SimGrid Team. All rights reserved.          */
4
5 /* This program is free software; you can redistribute it and/or modify it
6  * under the terms of the license (GNU LGPL) which comes with this package. */
7
8 #include "smpi_coll.hpp"
9 #include "private.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_datatype.hpp"
12 #include "smpi_op.hpp"
13 #include "smpi_request.hpp"
14 #include "xbt/config.hpp"
15
16 #include <map>
17
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI collectives.");
19
20 namespace simgrid {
21 namespace smpi {
22
23 std::map<std::string, std::vector<s_mpi_coll_description_t>, std::less<>> smpi_coll_descriptions(
24     {{std::string("gather"),
25       {{"default", "gather default collective", (void*)gather__default},
26        {"ompi", "gather ompi collective", (void*)gather__ompi},
27        {"ompi_basic_linear", "gather ompi_basic_linear collective", (void*)gather__ompi_basic_linear},
28        {"ompi_binomial", "gather ompi_binomial collective", (void*)gather__ompi_binomial},
29        {"ompi_linear_sync", "gather ompi_linear_sync collective", (void*)gather__ompi_linear_sync},
30        {"mpich", "gather mpich collective", (void*)gather__mpich},
31        {"mvapich2", "gather mvapich2 collective", (void*)gather__mvapich2},
32        {"mvapich2_two_level", "gather mvapich2_two_level collective", (void*)gather__mvapich2_two_level},
33        {"impi", "gather impi collective", (void*)gather__impi},
34        {"automatic", "gather automatic collective", (void*)gather__automatic}}},
35
36      {"allgather",
37       {{"default", "allgather default collective", (void*)allgather__default},
38        {"2dmesh", "allgather 2dmesh collective", (void*)allgather__2dmesh},
39        {"3dmesh", "allgather 3dmesh collective", (void*)allgather__3dmesh},
40        {"bruck", "allgather bruck collective", (void*)allgather__bruck},
41        {"GB", "allgather GB collective", (void*)allgather__GB},
42        {"loosely_lr", "allgather loosely_lr collective", (void*)allgather__loosely_lr},
43        {"NTSLR", "allgather NTSLR collective", (void*)allgather__NTSLR},
44        {"NTSLR_NB", "allgather NTSLR_NB collective", (void*)allgather__NTSLR_NB},
45        {"pair", "allgather pair collective", (void*)allgather__pair},
46        {"rdb", "allgather rdb collective", (void*)allgather__rdb},
47        {"rhv", "allgather rhv collective", (void*)allgather__rhv},
48        {"ring", "allgather ring collective", (void*)allgather__ring},
49        {"SMP_NTS", "allgather SMP_NTS collective", (void*)allgather__SMP_NTS},
50        {"smp_simple", "allgather smp_simple collective", (void*)allgather__smp_simple},
51        {"spreading_simple", "allgather spreading_simple collective", (void*)allgather__spreading_simple},
52        {"ompi", "allgather ompi collective", (void*)allgather__ompi},
53        {"ompi_neighborexchange", "allgather ompi_neighborexchange collective", (void*)allgather__ompi_neighborexchange},
54        {"mvapich2", "allgather mvapich2 collective", (void*)allgather__mvapich2},
55        {"mvapich2_smp", "allgather mvapich2_smp collective", (void*)allgather__mvapich2_smp},
56        {"mpich", "allgather mpich collective", (void*)allgather__mpich},
57        {"impi", "allgather impi collective", (void*)allgather__impi},
58        {"automatic", "allgather automatic collective", (void*)allgather__automatic}}},
59
60      {"allgatherv",
61       {{"default", "allgatherv default collective", (void*)allgatherv__default},
62        {"GB", "allgatherv GB collective", (void*)allgatherv__GB},
63        {"pair", "allgatherv pair collective", (void*)allgatherv__pair},
64        {"ring", "allgatherv ring collective", (void*)allgatherv__ring},
65        {"ompi", "allgatherv ompi collective", (void*)allgatherv__ompi},
66        {"ompi_neighborexchange", "allgatherv ompi_neighborexchange collective",
67         (void*)allgatherv__ompi_neighborexchange},
68        {"ompi_bruck", "allgatherv ompi_bruck collective", (void*)allgatherv__ompi_bruck},
69        {"mpich", "allgatherv mpich collective", (void*)allgatherv__mpich},
70        {"mpich_rdb", "allgatherv mpich_rdb collective", (void*)allgatherv__mpich_rdb},
71        {"mpich_ring", "allgatherv mpich_ring collective", (void*)allgatherv__mpich_ring},
72        {"mvapich2", "allgatherv mvapich2 collective", (void*)allgatherv__mvapich2},
73        {"impi", "allgatherv impi collective", (void*)allgatherv__impi},
74        {"automatic", "allgatherv automatic collective", (void*)allgatherv__automatic}}},
75
76      {"allreduce",
77       {{"default", "allreduce default collective", (void*)allreduce__default},
78        {"lr", "allreduce lr collective", (void*)allreduce__lr},
79        {"rab1", "allreduce rab1 collective", (void*)allreduce__rab1},
80        {"rab2", "allreduce rab2 collective", (void*)allreduce__rab2},
81        {"rab_rdb", "allreduce rab_rdb collective", (void*)allreduce__rab_rdb},
82        {"rdb", "allreduce rdb collective", (void*)allreduce__rdb},
83        {"smp_binomial", "allreduce smp_binomial collective", (void*)allreduce__smp_binomial},
84        {"smp_binomial_pipeline", "allreduce smp_binomial_pipeline collective", (void*)allreduce__smp_binomial_pipeline},
85        {"smp_rdb", "allreduce smp_rdb collective", (void*)allreduce__smp_rdb},
86        {"smp_rsag", "allreduce smp_rsag collective", (void*)allreduce__smp_rsag},
87        {"smp_rsag_lr", "allreduce smp_rsag_lr collective", (void*)allreduce__smp_rsag_lr},
88        {"smp_rsag_rab", "allreduce smp_rsag_rab collective", (void*)allreduce__smp_rsag_rab},
89        {"redbcast", "allreduce redbcast collective", (void*)allreduce__redbcast},
90        {"ompi", "allreduce ompi collective", (void*)allreduce__ompi},
91        {"ompi_ring_segmented", "allreduce ompi_ring_segmented collective", (void*)allreduce__ompi_ring_segmented},
92        {"mpich", "allreduce mpich collective", (void*)allreduce__mpich},
93        {"mvapich2", "allreduce mvapich2 collective", (void*)allreduce__mvapich2},
94        {"mvapich2_rs", "allreduce mvapich2_rs collective", (void*)allreduce__mvapich2_rs},
95        {"mvapich2_two_level", "allreduce mvapich2_two_level collective", (void*)allreduce__mvapich2_two_level},
96        {"impi", "allreduce impi collective", (void*)allreduce__impi},
97        {"rab", "allreduce rab collective", (void*)allreduce__rab},
98        {"automatic", "allreduce automatic collective", (void*)allreduce__automatic}}},
99
100      {"reduce_scatter",
101       {{"default", "reduce_scatter default collective", (void*)reduce_scatter__default},
102        {"ompi", "reduce_scatter ompi collective", (void*)reduce_scatter__ompi},
103        {"ompi_basic_recursivehalving", "reduce_scatter ompi_basic_recursivehalving collective",
104         (void*)reduce_scatter__ompi_basic_recursivehalving},
105        {"ompi_ring", "reduce_scatter ompi_ring collective", (void*)reduce_scatter__ompi_ring},
106        {"ompi_butterfly", "reduce_scatter ompi_butterfly collective", (void*)reduce_scatter__ompi_butterfly},
107        {"mpich", "reduce_scatter mpich collective", (void*)reduce_scatter__mpich},
108        {"mpich_pair", "reduce_scatter mpich_pair collective", (void*)reduce_scatter__mpich_pair},
109        {"mpich_rdb", "reduce_scatter mpich_rdb collective", (void*)reduce_scatter__mpich_rdb},
110        {"mpich_noncomm", "reduce_scatter mpich_noncomm collective", (void*)reduce_scatter__mpich_noncomm},
111        {"mvapich2", "reduce_scatter mvapich2 collective", (void*)reduce_scatter__mvapich2},
112        {"impi", "reduce_scatter impi collective", (void*)reduce_scatter__impi},
113        {"automatic", "reduce_scatter automatic collective", (void*)reduce_scatter__automatic}}},
114
115      {"scatter",
116       {{"default", "scatter default collective", (void*)scatter__default},
117        {"ompi", "scatter ompi collective", (void*)scatter__ompi},
118        {"ompi_basic_linear", "scatter ompi_basic_linear collective", (void*)scatter__ompi_basic_linear},
119        {"ompi_linear_nb", "scatter ompi_linear nonblocking collective", (void*)scatter__ompi_linear_nb},
120        {"ompi_binomial", "scatter ompi_binomial collective", (void*)scatter__ompi_binomial},
121        {"mpich", "scatter mpich collective", (void*)scatter__mpich},
122        {"mvapich2", "scatter mvapich2 collective", (void*)scatter__mvapich2},
123        {"mvapich2_two_level_binomial", "scatter mvapich2_two_level_binomial collective",
124         (void*)scatter__mvapich2_two_level_binomial},
125        {"mvapich2_two_level_direct", "scatter mvapich2_two_level_direct collective",
126         (void*)scatter__mvapich2_two_level_direct},
127        {"impi", "scatter impi collective", (void*)scatter__impi},
128        {"automatic", "scatter automatic collective", (void*)scatter__automatic}}},
129
130      {"barrier",
131       {{"default", "barrier default collective", (void*)barrier__default},
132        {"ompi", "barrier ompi collective", (void*)barrier__ompi},
133        {"ompi_basic_linear", "barrier ompi_basic_linear collective", (void*)barrier__ompi_basic_linear},
134        {"ompi_two_procs", "barrier ompi_two_procs collective", (void*)barrier__ompi_two_procs},
135        {"ompi_tree", "barrier ompi_tree collective", (void*)barrier__ompi_tree},
136        {"ompi_bruck", "barrier ompi_bruck collective", (void*)barrier__ompi_bruck},
137        {"ompi_recursivedoubling", "barrier ompi_recursivedoubling collective", (void*)barrier__ompi_recursivedoubling},
138        {"ompi_doublering", "barrier ompi_doublering collective", (void*)barrier__ompi_doublering},
139        {"mpich_smp", "barrier mpich_smp collective", (void*)barrier__mpich_smp},
140        {"mpich", "barrier mpich collective", (void*)barrier__mpich},
141        {"mvapich2_pair", "barrier mvapich2_pair collective", (void*)barrier__mvapich2_pair},
142        {"mvapich2", "barrier mvapich2 collective", (void*)barrier__mvapich2},
143        {"impi", "barrier impi collective", (void*)barrier__impi},
144        {"automatic", "barrier automatic collective", (void*)barrier__automatic}}},
145
146      {"alltoall",
147       {{"default", "alltoall default collective", (void*)alltoall__default},
148        {"2dmesh", "alltoall 2dmesh collective", (void*)alltoall__2dmesh},
149        {"3dmesh", "alltoall 3dmesh collective", (void*)alltoall__3dmesh},
150        {"basic_linear", "alltoall basic_linear collective", (void*)alltoall__basic_linear},
151        {"bruck", "alltoall bruck collective", (void*)alltoall__bruck},
152        {"pair", "alltoall pair collective", (void*)alltoall__pair},
153        {"pair_rma", "alltoall pair_rma collective", (void*)alltoall__pair_rma},
154        {"pair_light_barrier", "alltoall pair_light_barrier collective", (void*)alltoall__pair_light_barrier},
155        {"pair_mpi_barrier", "alltoall pair_mpi_barrier collective", (void*)alltoall__pair_mpi_barrier},
156        {"pair_one_barrier", "alltoall pair_one_barrier collective", (void*)alltoall__pair_one_barrier},
157        {"rdb", "alltoall rdb collective", (void*)alltoall__rdb},
158        {"ring", "alltoall ring collective", (void*)alltoall__ring},
159        {"ring_light_barrier", "alltoall ring_light_barrier collective", (void*)alltoall__ring_light_barrier},
160        {"ring_mpi_barrier", "alltoall ring_mpi_barrier collective", (void*)alltoall__ring_mpi_barrier},
161        {"ring_one_barrier", "alltoall ring_one_barrier collective", (void*)alltoall__ring_one_barrier},
162        {"mvapich2", "alltoall mvapich2 collective", (void*)alltoall__mvapich2},
163        {"mvapich2_scatter_dest", "alltoall mvapich2_scatter_dest collective", (void*)alltoall__mvapich2_scatter_dest},
164        {"ompi", "alltoall ompi collective", (void*)alltoall__ompi},
165        {"mpich", "alltoall mpich collective", (void*)alltoall__mpich},
166        {"impi", "alltoall impi collective", (void*)alltoall__impi},
167        {"automatic", "alltoall automatic collective", (void*)alltoall__automatic}}},
168
169      {"alltoallv",
170       {{"default", "alltoallv default collective", (void*)alltoallv__default},
171        {"bruck", "alltoallv bruck collective", (void*)alltoallv__bruck},
172        {"pair", "alltoallv pair collective", (void*)alltoallv__pair},
173        {"pair_light_barrier", "alltoallv pair_light_barrier collective", (void*)alltoallv__pair_light_barrier},
174        {"pair_mpi_barrier", "alltoallv pair_mpi_barrier collective", (void*)alltoallv__pair_mpi_barrier},
175        {"pair_one_barrier", "alltoallv pair_one_barrier collective", (void*)alltoallv__pair_one_barrier},
176        {"ring", "alltoallv ring collective", (void*)alltoallv__ring},
177        {"ring_light_barrier", "alltoallv ring_light_barrier collective", (void*)alltoallv__ring_light_barrier},
178        {"ring_mpi_barrier", "alltoallv ring_mpi_barrier collective", (void*)alltoallv__ring_mpi_barrier},
179        {"ring_one_barrier", "alltoallv ring_one_barrier collective", (void*)alltoallv__ring_one_barrier},
180        {"ompi", "alltoallv ompi collective", (void*)alltoallv__ompi},
181        {"mpich", "alltoallv mpich collective", (void*)alltoallv__mpich},
182        {"ompi_basic_linear", "alltoallv ompi_basic_linear collective", (void*)alltoallv__ompi_basic_linear},
183        {"mvapich2", "alltoallv mvapich2 collective", (void*)alltoallv__mvapich2},
184        {"impi", "alltoallv impi collective", (void*)alltoallv__impi},
185        {"automatic", "alltoallv automatic collective", (void*)alltoallv__automatic}}},
186
187      {"bcast",
188       {{"default", "bcast default collective", (void*)bcast__default},
189        {"arrival_pattern_aware", "bcast arrival_pattern_aware collective", (void*)bcast__arrival_pattern_aware},
190        {"arrival_pattern_aware_wait", "bcast arrival_pattern_aware_wait collective",
191         (void*)bcast__arrival_pattern_aware_wait},
192        {"arrival_scatter", "bcast arrival_scatter collective", (void*)bcast__arrival_scatter},
193        {"binomial_tree", "bcast binomial_tree collective", (void*)bcast__binomial_tree},
194        {"flattree", "bcast flattree collective", (void*)bcast__flattree},
195        {"flattree_pipeline", "bcast flattree_pipeline collective", (void*)bcast__flattree_pipeline},
196        {"NTSB", "bcast NTSB collective", (void*)bcast__NTSB},
197        {"NTSL", "bcast NTSL collective", (void*)bcast__NTSL},
198        {"NTSL_Isend", "bcast NTSL_Isend collective", (void*)bcast__NTSL_Isend},
199        {"scatter_LR_allgather", "bcast scatter_LR_allgather collective", (void*)bcast__scatter_LR_allgather},
200        {"scatter_rdb_allgather", "bcast scatter_rdb_allgather collective", (void*)bcast__scatter_rdb_allgather},
201        {"SMP_binary", "bcast SMP_binary collective", (void*)bcast__SMP_binary},
202        {"SMP_binomial", "bcast SMP_binomial collective", (void*)bcast__SMP_binomial},
203        {"SMP_linear", "bcast SMP_linear collective", (void*)bcast__SMP_linear},
204        {"ompi", "bcast ompi collective", (void*)bcast__ompi},
205        {"ompi_split_bintree", "bcast ompi_split_bintree collective", (void*)bcast__ompi_split_bintree},
206        {"ompi_pipeline", "bcast ompi_pipeline collective", (void*)bcast__ompi_pipeline},
207        {"mpich", "bcast mpich collective", (void*)bcast__mpich},
208        {"mvapich2", "bcast mvapich2 collective", (void*)bcast__mvapich2},
209        {"mvapich2_inter_node", "bcast mvapich2_inter_node collective", (void*)bcast__mvapich2_inter_node},
210        {"mvapich2_intra_node", "bcast mvapich2_intra_node collective", (void*)bcast__mvapich2_intra_node},
211        {"mvapich2_knomial_intra_node", "bcast mvapich2_knomial_intra_node collective",
212         (void*)bcast__mvapich2_knomial_intra_node},
213        {"impi", "bcast impi collective", (void*)bcast__impi},
214        {"automatic", "bcast automatic collective", (void*)bcast__automatic}}},
215
216      {"reduce",
217       {{"default", "reduce default collective", (void*)reduce__default},
218        {"arrival_pattern_aware", "reduce arrival_pattern_aware collective", (void*)reduce__arrival_pattern_aware},
219        {"binomial", "reduce binomial collective", (void*)reduce__binomial},
220        {"flat_tree", "reduce flat_tree collective", (void*)reduce__flat_tree},
221        {"NTSL", "reduce NTSL collective", (void*)reduce__NTSL},
222        {"scatter_gather", "reduce scatter_gather collective", (void*)reduce__scatter_gather},
223        {"ompi", "reduce ompi collective", (void*)reduce__ompi},
224        {"ompi_chain", "reduce ompi_chain collective", (void*)reduce__ompi_chain},
225        {"ompi_pipeline", "reduce ompi_pipeline collective", (void*)reduce__ompi_pipeline},
226        {"ompi_basic_linear", "reduce ompi_basic_linear collective", (void*)reduce__ompi_basic_linear},
227        {"ompi_in_order_binary", "reduce ompi_in_order_binary collective", (void*)reduce__ompi_in_order_binary},
228        {"ompi_binary", "reduce ompi_binary collective", (void*)reduce__ompi_binary},
229        {"ompi_binomial", "reduce ompi_binomial collective", (void*)reduce__ompi_binomial},
230        {"mpich", "reduce mpich collective", (void*)reduce__mpich},
231        {"mvapich2", "reduce mvapich2 collective", (void*)reduce__mvapich2},
232        {"mvapich2_knomial", "reduce mvapich2_knomial collective", (void*)reduce__mvapich2_knomial},
233        {"mvapich2_two_level", "reduce mvapich2_two_level collective", (void*)reduce__mvapich2_two_level},
234        {"impi", "reduce impi collective", (void*)reduce__impi},
235        {"rab", "reduce rab collective", (void*)reduce__rab},
236        {"automatic", "reduce automatic collective", (void*)reduce__automatic}}}});
237
238 // Needed by the automatic selector weird implementation
239 std::vector<s_mpi_coll_description_t>* colls::get_smpi_coll_descriptions(const std::string& name)
240 {
241   auto iter = smpi_coll_descriptions.find(name);
242   xbt_assert(iter != smpi_coll_descriptions.end(), "No collective named %s. This is a bug.", name.c_str());
243   return &iter->second;
244 }
245
246 static s_mpi_coll_description_t* find_coll_description(const std::string& collective, const std::string& algo)
247 {
248   std::vector<s_mpi_coll_description_t>* table = colls::get_smpi_coll_descriptions(collective);
249   xbt_assert(not table->empty(), "No registered algorithm for collective '%s'! This is a bug.", collective.c_str());
250
251   for (auto& desc : *table) {
252     if (algo == desc.name) {
253       if (desc.name != "default")
254         XBT_INFO("Switch to algorithm %s for collective %s", desc.name.c_str(), collective.c_str());
255       return &desc;
256     }
257   }
258
259   std::string name_list = table->at(0).name;
260   for (unsigned long i = 1; i < table->size(); i++)
261     name_list = name_list + ", " + table->at(i).name;
262   xbt_die("Collective '%s' has no algorithm '%s'! Valid algorithms: %s.", collective.c_str(), algo.c_str(), name_list.c_str());
263 }
264
265 int (*colls::gather)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
266                      MPI_Datatype recv_type, int root, MPI_Comm comm);
267 int (*colls::allgather)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
268                         MPI_Datatype recv_type, MPI_Comm comm);
269 int (*colls::allgatherv)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff,
270                          const int* recv_count, const int* recv_disps, MPI_Datatype recv_type, MPI_Comm comm);
271 int (*colls::alltoall)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
272                        MPI_Datatype recv_type, MPI_Comm comm);
273 int (*colls::alltoallv)(const void* send_buff, const int* send_counts, const int* send_disps, MPI_Datatype send_type,
274                         void* recv_buff, const int* recv_counts, const int* recv_disps, MPI_Datatype recv_type,
275                         MPI_Comm comm);
276 int (*colls::bcast)(void* buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
277 int (*colls::reduce)(const void* buf, void* rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
278 int (*colls::allreduce)(const void* sbuf, void* rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
279 int (*colls::reduce_scatter)(const void* sbuf, void* rbuf, const int* rcounts, MPI_Datatype dtype, MPI_Op op,
280                              MPI_Comm comm);
281 int (*colls::scatter)(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
282                       MPI_Datatype recvtype, int root, MPI_Comm comm);
283 int (*colls::barrier)(MPI_Comm comm);
284
285 void (*colls::smpi_coll_cleanup_callback)();
286
287 #define COLL_SETTER(cat, ret, args, args2)                                                                             \
288   void colls::_XBT_CONCAT(set_, cat)(const std::string& name)                                                          \
289   {                                                                                                                    \
290     auto desc = find_coll_description(_XBT_STRINGIFY(cat), name);                                                      \
291     cat       = reinterpret_cast<ret(*) args>(desc->coll);                                                             \
292     xbt_assert(cat != nullptr, "Collective " _XBT_STRINGIFY(cat) " set to nullptr!");                                  \
293   }
294 COLL_APPLY(COLL_SETTER, COLL_GATHER_SIG, "")
295 COLL_APPLY(COLL_SETTER,COLL_ALLGATHER_SIG,"")
296 COLL_APPLY(COLL_SETTER,COLL_ALLGATHERV_SIG,"")
297 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SIG,"")
298 COLL_APPLY(COLL_SETTER,COLL_ALLREDUCE_SIG,"")
299 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SCATTER_SIG,"")
300 COLL_APPLY(COLL_SETTER,COLL_SCATTER_SIG,"")
301 COLL_APPLY(COLL_SETTER,COLL_BARRIER_SIG,"")
302 COLL_APPLY(COLL_SETTER,COLL_BCAST_SIG,"")
303 COLL_APPLY(COLL_SETTER,COLL_ALLTOALL_SIG,"")
304 COLL_APPLY(COLL_SETTER,COLL_ALLTOALLV_SIG,"")
305
306 void colls::set_collectives()
307 {
308   std::string selector_name = simgrid::config::get_value<std::string>("smpi/coll-selector");
309   if (selector_name.empty())
310     selector_name = "default";
311
312   std::pair<std::string, std::function<void(std::string)>> setter_callbacks[] = {
313       {"gather", &colls::set_gather},         {"allgather", &colls::set_allgather},
314       {"allgatherv", &colls::set_allgatherv}, {"allreduce", &colls::set_allreduce},
315       {"alltoall", &colls::set_alltoall},     {"alltoallv", &colls::set_alltoallv},
316       {"reduce", &colls::set_reduce},         {"reduce_scatter", &colls::set_reduce_scatter},
317       {"scatter", &colls::set_scatter},       {"bcast", &colls::set_bcast},
318       {"barrier", &colls::set_barrier}};
319
320   for (auto& elem : setter_callbacks) {
321     std::string name = simgrid::config::get_value<std::string>(("smpi/" + elem.first).c_str());
322     if (name.empty())
323       name = selector_name;
324
325     (elem.second)(name);
326   }
327 }
328
329 //Implementations of the single algorithm collectives
330
331 int colls::gatherv(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, const int* recvcounts,
332                    const int* displs, MPI_Datatype recvtype, int root, MPI_Comm comm)
333 {
334   MPI_Request request;
335   colls::igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm, &request, 0);
336   return Request::wait(&request, MPI_STATUS_IGNORE);
337 }
338
339 int colls::scatterv(const void* sendbuf, const int* sendcounts, const int* displs, MPI_Datatype sendtype, void* recvbuf,
340                     int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
341 {
342   MPI_Request request;
343   colls::iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, &request, 0);
344   return Request::wait(&request, MPI_STATUS_IGNORE);
345 }
346
347 int colls::scan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
348 {
349   int system_tag = -888;
350   MPI_Aint lb      = 0;
351   MPI_Aint dataext = 0;
352
353   int rank = comm->rank();
354   int size = comm->size();
355
356   datatype->extent(&lb, &dataext);
357
358   // Local copy from self
359   Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype);
360
361   // Send/Recv buffers to/from others
362   auto* requests = new MPI_Request[size - 1];
363   auto** tmpbufs = new unsigned char*[rank];
364   int index = 0;
365   for (int other = 0; other < rank; other++) {
366     tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
367     requests[index] = Request::irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
368     index++;
369   }
370   for (int other = rank + 1; other < size; other++) {
371     requests[index] = Request::isend_init(sendbuf, count, datatype, other, system_tag, comm);
372     index++;
373   }
374   // Wait for completion of all comms.
375   Request::startall(size - 1, requests);
376
377   if(op != MPI_OP_NULL && op->is_commutative()){
378     for (int other = 0; other < size - 1; other++) {
379       index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
380       if(index == MPI_UNDEFINED) {
381         break;
382       }
383       if(index < rank) {
384         // #Request is below rank: it's an irecv
385         op->apply( tmpbufs[index], recvbuf, &count, datatype);
386       }
387     }
388   }else{
389     //non commutative case, wait in order
390     for (int other = 0; other < size - 1; other++) {
391       Request::wait(&(requests[other]), MPI_STATUS_IGNORE);
392       if(index < rank && op!=MPI_OP_NULL) {
393         op->apply( tmpbufs[other], recvbuf, &count, datatype);
394       }
395     }
396   }
397   for(index = 0; index < rank; index++) {
398     smpi_free_tmp_buffer(tmpbufs[index]);
399   }
400   for(index = 0; index < size-1; index++) {
401     Request::unref(&requests[index]);
402   }
403   delete[] tmpbufs;
404   delete[] requests;
405   return MPI_SUCCESS;
406 }
407
408 int colls::exscan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
409 {
410   int system_tag = -888;
411   MPI_Aint lb         = 0;
412   MPI_Aint dataext    = 0;
413   int recvbuf_is_empty=1;
414   int rank = comm->rank();
415   int size = comm->size();
416
417   datatype->extent(&lb, &dataext);
418
419   // Send/Recv buffers to/from others
420   auto* requests = new MPI_Request[size - 1];
421   auto** tmpbufs = new unsigned char*[rank];
422   int index = 0;
423   for (int other = 0; other < rank; other++) {
424     tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
425     requests[index] = Request::irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
426     index++;
427   }
428   for (int other = rank + 1; other < size; other++) {
429     requests[index] = Request::isend_init(sendbuf, count, datatype, other, system_tag, comm);
430     index++;
431   }
432   // Wait for completion of all comms.
433   Request::startall(size - 1, requests);
434
435   if(op != MPI_OP_NULL && op->is_commutative()){
436     for (int other = 0; other < size - 1; other++) {
437       index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
438       if(index == MPI_UNDEFINED) {
439         break;
440       }
441       if(index < rank) {
442         if(recvbuf_is_empty){
443           Datatype::copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
444           recvbuf_is_empty=0;
445         } else
446           // #Request is below rank: it's an irecv
447           op->apply( tmpbufs[index], recvbuf, &count, datatype);
448       }
449     }
450   }else{
451     //non commutative case, wait in order
452     for (int other = 0; other < size - 1; other++) {
453      Request::wait(&(requests[other]), MPI_STATUS_IGNORE);
454       if(index < rank) {
455         if (recvbuf_is_empty) {
456           Datatype::copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
457           recvbuf_is_empty = 0;
458         } else
459           if(op!=MPI_OP_NULL)
460             op->apply( tmpbufs[other], recvbuf, &count, datatype);
461       }
462     }
463   }
464   for(index = 0; index < rank; index++) {
465     smpi_free_tmp_buffer(tmpbufs[index]);
466   }
467   for(index = 0; index < size-1; index++) {
468     Request::unref(&requests[index]);
469   }
470   delete[] tmpbufs;
471   delete[] requests;
472   return MPI_SUCCESS;
473 }
474
475 int colls::alltoallw(const void* sendbuf, const int* sendcounts, const int* senddisps, const MPI_Datatype* sendtypes,
476                      void* recvbuf, const int* recvcounts, const int* recvdisps, const MPI_Datatype* recvtypes,
477                      MPI_Comm comm)
478 {
479   MPI_Request request;
480   colls::ialltoallw(sendbuf, sendcounts, senddisps, sendtypes, recvbuf, recvcounts, recvdisps, recvtypes, comm,
481                     &request, 0);
482   return Request::wait(&request, MPI_STATUS_IGNORE);
483 }
484
485 }
486 }