1 /* Copyright (c) 2013-2022. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.hpp"
13 int allgather__SMP_NTS(const void *sbuf, int scount,
14 MPI_Datatype stype, void *rbuf,
15 int rcount, MPI_Datatype rtype,
18 int src, dst, comm_size, rank;
19 comm_size = comm->size();
21 MPI_Aint rextent, sextent;
22 rextent = rtype->get_extent();
23 sextent = stype->get_extent();
24 int tag = COLL_TAG_ALLGATHER;
26 int i, send_offset, recv_offset;
27 int intra_rank, inter_rank;
29 if(comm->get_leaders_comm()==MPI_COMM_NULL){
33 if (comm->is_uniform()){
34 num_core = comm->get_intra_comm()->size();
38 intra_rank = rank % num_core;
39 inter_rank = rank / num_core;
40 int inter_comm_size = (comm_size + num_core - 1) / num_core;
41 int num_core_in_current_smp = num_core;
43 if(comm_size%num_core)
44 throw std::invalid_argument(xbt::string_printf(
45 "allgather SMP NTS algorithm can't be used with non multiple of NUM_CORE=%d number of processes!", num_core));
47 /* for too small number of processes, use default implementation */
48 if (comm_size <= num_core) {
49 XBT_INFO("MPI_allgather_SMP_NTS: comm_size <= num_core, use default MPI_allgather.");
50 allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm);
54 // the last SMP node may have fewer number of running processes than all others
55 if (inter_rank == (inter_comm_size - 1)) {
56 num_core_in_current_smp = comm_size - (inter_rank * num_core);
58 //copy corresponding message from sbuf to rbuf
59 recv_offset = rank * rextent * rcount;
60 Request::sendrecv(sbuf, scount, stype, rank, tag,
61 ((char *) rbuf + recv_offset), rcount, rtype, rank, tag, comm,
64 //gather to root of each SMP
66 for (i = 1; i < num_core_in_current_smp; i++) {
69 (inter_rank * num_core) + (intra_rank + i) % (num_core_in_current_smp);
71 (inter_rank * num_core) + (intra_rank - i +
72 num_core_in_current_smp) %
73 (num_core_in_current_smp);
74 recv_offset = src * rextent * rcount;
76 Request::sendrecv(sbuf, scount, stype, dst, tag,
77 ((char *) rbuf + recv_offset), rcount, rtype, src, tag, comm,
82 // INTER-SMP-ALLGATHER
83 // Every root of each SMP node post INTER-Sendrecv, then do INTRA-Bcast for each receiving message
84 // Use logical ring algorithm
87 if (intra_rank == 0) {
88 auto* rrequest_array = new MPI_Request[inter_comm_size - 1];
89 auto* srequest_array = new MPI_Request[inter_comm_size - 1];
91 src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * num_core;
92 dst = ((inter_rank + 1) % inter_comm_size) * num_core;
94 // post all inter Irecv
95 for (i = 0; i < inter_comm_size - 1; i++) {
97 ((inter_rank - i - 1 +
98 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
99 rrequest_array[i] = Request::irecv((char *)rbuf + recv_offset, rcount * num_core,
100 rtype, src, tag + i, comm);
103 // send first message
106 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
107 srequest_array[0] = Request::isend((char *)rbuf + send_offset, scount * num_core,
108 stype, dst, tag, comm);
110 // loop : recv-inter , send-inter, send-intra (linear-bcast)
111 for (i = 0; i < inter_comm_size - 2; i++) {
113 ((inter_rank - i - 1 +
114 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
115 Request::wait(&rrequest_array[i], MPI_STATUS_IGNORE);
116 srequest_array[i + 1] = Request::isend((char *)rbuf + recv_offset, scount * num_core,
117 stype, dst, tag + i + 1, comm);
118 if (num_core_in_current_smp > 1) {
119 Request::send((char *)rbuf + recv_offset, scount * num_core,
120 stype, (rank + 1), tag + i + 1, comm);
124 // recv last message and send_intra
126 ((inter_rank - i - 1 +
127 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
128 //recv_offset = ((inter_rank + 1) % inter_comm_size) * num_core * sextent * scount;
129 //i=inter_comm_size-2;
130 Request::wait(&rrequest_array[i], MPI_STATUS_IGNORE);
131 if (num_core_in_current_smp > 1) {
132 Request::send((char *)rbuf + recv_offset, scount * num_core,
133 stype, (rank + 1), tag + i + 1, comm);
136 Request::waitall(inter_comm_size - 1, srequest_array, MPI_STATUSES_IGNORE);
137 delete[] rrequest_array;
138 delete[] srequest_array;
140 // last rank of each SMP
141 else if (intra_rank == (num_core_in_current_smp - 1)) {
142 for (i = 0; i < inter_comm_size - 1; i++) {
144 ((inter_rank - i - 1 +
145 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
146 Request::recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
147 rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
150 // intermediate rank of each SMP
152 for (i = 0; i < inter_comm_size - 1; i++) {
154 ((inter_rank - i - 1 +
155 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
156 Request::recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
157 rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
158 Request::send((char *) rbuf + recv_offset, (scount * num_core), stype,
159 (rank + 1), tag + i + 1, comm);