1 /* Copyright (c) 2013-2022. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.hpp"
8 /* IMPLEMENTED BY PITCH PATARASUK
9 Non-topology-specific (however, number of cores/node need to be changed)
10 all-reduce operation designed for smp clusters
11 It uses 2-layer communication: binomial for both intra-communication
15 Use -DMPICH2 if this code does not compile.
16 MPICH1 code also work on MPICH2 on our cluster and the performance are similar.
17 This code assume commutative and associative reduce operator (MPI_SUM, MPI_MAX, etc).
20 //#include <star-reduction.c>
23 This function performs all-reduce operation as follow.
24 1) binomial_tree reduce inside each SMP node
25 2) binomial_tree reduce intra-communication between root of each SMP node
26 3) binomial_tree bcast intra-communication between root of each SMP node
27 4) binomial_tree bcast inside each SMP node
31 int allreduce__smp_binomial(const void *send_buf, void *recv_buf,
32 int count, MPI_Datatype dtype,
33 MPI_Op op, MPI_Comm comm)
36 int tag = COLL_TAG_ALLREDUCE;
39 if(comm->get_leaders_comm()==MPI_COMM_NULL){
43 if (comm->is_uniform()){
44 num_core = comm->get_intra_comm()->size();
48 comm_size=comm->size();
51 dtype->extent(&lb, &extent);
52 unsigned char* tmp_buf = smpi_get_tmp_sendbuffer(count * extent);
54 /* compute intra and inter ranking */
55 int intra_rank, inter_rank;
56 intra_rank = rank % num_core;
57 inter_rank = rank / num_core;
59 /* size of processes participate in intra communications =>
60 should be equal to number of machines */
61 int inter_comm_size = (comm_size + num_core - 1) / num_core;
63 /* copy input buffer to output buffer */
64 Request::sendrecv(send_buf, count, dtype, rank, tag,
65 recv_buf, count, dtype, rank, tag, comm, &status);
67 /* start binomial reduce intra communication inside each SMP node */
69 while (mask < num_core) {
70 if ((mask & intra_rank) == 0) {
71 src = (inter_rank * num_core) + (intra_rank | mask);
72 if (src < comm_size) {
73 Request::recv(tmp_buf, count, dtype, src, tag, comm, &status);
74 if(op!=MPI_OP_NULL) op->apply( tmp_buf, recv_buf, &count, dtype);
77 dst = (inter_rank * num_core) + (intra_rank & (~mask));
78 Request::send(recv_buf, count, dtype, dst, tag, comm);
84 /* start binomial reduce inter-communication between each SMP nodes:
85 each node only have one process that can communicate to other nodes */
86 if (intra_rank == 0) {
88 while (mask < inter_comm_size) {
89 if ((mask & inter_rank) == 0) {
90 src = (inter_rank | mask) * num_core;
91 if (src < comm_size) {
92 Request::recv(tmp_buf, count, dtype, src, tag, comm, &status);
93 if(op!=MPI_OP_NULL) op->apply( tmp_buf, recv_buf, &count, dtype);
96 dst = (inter_rank & (~mask)) * num_core;
97 Request::send(recv_buf, count, dtype, dst, tag, comm);
104 /* start binomial broadcast inter-communication between each SMP nodes:
105 each node only have one process that can communicate to other nodes */
106 if (intra_rank == 0) {
108 while (mask < inter_comm_size) {
109 if (inter_rank & mask) {
110 src = (inter_rank - mask) * num_core;
111 Request::recv(recv_buf, count, dtype, src, tag, comm, &status);
119 if (inter_rank < inter_comm_size) {
120 dst = (inter_rank + mask) * num_core;
121 if (dst < comm_size) {
122 Request::send(recv_buf, count, dtype, dst, tag, comm);
129 /* start binomial broadcast intra-communication inside each SMP nodes */
130 int num_core_in_current_smp = num_core;
131 if (inter_rank == (inter_comm_size - 1)) {
132 num_core_in_current_smp = comm_size - (inter_rank * num_core);
135 while (mask < num_core_in_current_smp) {
136 if (intra_rank & mask) {
137 src = (inter_rank * num_core) + (intra_rank - mask);
138 Request::recv(recv_buf, count, dtype, src, tag, comm, &status);
146 dst = (inter_rank * num_core) + (intra_rank + mask);
147 if (dst < comm_size) {
148 Request::send(recv_buf, count, dtype, dst, tag, comm);
153 smpi_free_tmp_buffer(tmp_buf);