1 /* Copyright (c) 2013-2017. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.hpp"
13 int Coll_allgather_SMP_NTS::allgather(void *sbuf, int scount,
14 MPI_Datatype stype, void *rbuf,
15 int rcount, MPI_Datatype rtype,
18 int src, dst, comm_size, rank;
19 comm_size = comm->size();
21 MPI_Aint rextent, sextent;
22 rextent = rtype->get_extent();
23 sextent = stype->get_extent();
24 int tag = COLL_TAG_ALLGATHER;
26 int i, send_offset, recv_offset;
27 int intra_rank, inter_rank;
29 if(comm->get_leaders_comm()==MPI_COMM_NULL){
33 if (comm->is_uniform()){
34 num_core = comm->get_intra_comm()->size();
38 intra_rank = rank % num_core;
39 inter_rank = rank / num_core;
40 int inter_comm_size = (comm_size + num_core - 1) / num_core;
41 int num_core_in_current_smp = num_core;
43 if(comm_size%num_core)
44 THROWF(arg_error,0, "allgather SMP NTS algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", num_core);
46 /* for too small number of processes, use default implementation */
47 if (comm_size <= num_core) {
48 XBT_WARN("MPI_allgather_SMP_NTS use default MPI_allgather.");
49 Coll_allgather_default::allgather(sbuf, scount, stype, rbuf, rcount, rtype, comm);
53 // the last SMP node may have fewer number of running processes than all others
54 if (inter_rank == (inter_comm_size - 1)) {
55 num_core_in_current_smp = comm_size - (inter_rank * num_core);
57 //copy corresponding message from sbuf to rbuf
58 recv_offset = rank * rextent * rcount;
59 Request::sendrecv(sbuf, scount, stype, rank, tag,
60 ((char *) rbuf + recv_offset), rcount, rtype, rank, tag, comm,
63 //gather to root of each SMP
65 for (i = 1; i < num_core_in_current_smp; i++) {
68 (inter_rank * num_core) + (intra_rank + i) % (num_core_in_current_smp);
70 (inter_rank * num_core) + (intra_rank - i +
71 num_core_in_current_smp) %
72 (num_core_in_current_smp);
73 recv_offset = src * rextent * rcount;
75 Request::sendrecv(sbuf, scount, stype, dst, tag,
76 ((char *) rbuf + recv_offset), rcount, rtype, src, tag, comm,
81 // INTER-SMP-ALLGATHER
82 // Every root of each SMP node post INTER-Sendrecv, then do INTRA-Bcast for each receiving message
83 // Use logical ring algorithm
86 if (intra_rank == 0) {
87 MPI_Request *rrequest_array = xbt_new(MPI_Request, inter_comm_size - 1);
88 MPI_Request *srequest_array = xbt_new(MPI_Request, inter_comm_size - 1);
90 src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * num_core;
91 dst = ((inter_rank + 1) % inter_comm_size) * num_core;
93 // post all inter Irecv
94 for (i = 0; i < inter_comm_size - 1; i++) {
96 ((inter_rank - i - 1 +
97 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
98 rrequest_array[i] = Request::irecv((char *)rbuf + recv_offset, rcount * num_core,
99 rtype, src, tag + i, comm);
102 // send first message
105 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
106 srequest_array[0] = Request::isend((char *)rbuf + send_offset, scount * num_core,
107 stype, dst, tag, comm);
109 // loop : recv-inter , send-inter, send-intra (linear-bcast)
110 for (i = 0; i < inter_comm_size - 2; i++) {
112 ((inter_rank - i - 1 +
113 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
114 Request::wait(&rrequest_array[i], MPI_STATUS_IGNORE);
115 srequest_array[i + 1] = Request::isend((char *)rbuf + recv_offset, scount * num_core,
116 stype, dst, tag + i + 1, comm);
117 if (num_core_in_current_smp > 1) {
118 Request::send((char *)rbuf + recv_offset, scount * num_core,
119 stype, (rank + 1), tag + i + 1, comm);
123 // recv last message and send_intra
125 ((inter_rank - i - 1 +
126 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
127 //recv_offset = ((inter_rank + 1) % inter_comm_size) * num_core * sextent * scount;
128 //i=inter_comm_size-2;
129 Request::wait(&rrequest_array[i], MPI_STATUS_IGNORE);
130 if (num_core_in_current_smp > 1) {
131 Request::send((char *)rbuf + recv_offset, scount * num_core,
132 stype, (rank + 1), tag + i + 1, comm);
135 Request::waitall(inter_comm_size - 1, srequest_array, MPI_STATUSES_IGNORE);
136 xbt_free(rrequest_array);
137 xbt_free(srequest_array);
139 // last rank of each SMP
140 else if (intra_rank == (num_core_in_current_smp - 1)) {
141 for (i = 0; i < inter_comm_size - 1; i++) {
143 ((inter_rank - i - 1 +
144 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
145 Request::recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
146 rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
149 // intermediate rank of each SMP
151 for (i = 0; i < inter_comm_size - 1; i++) {
153 ((inter_rank - i - 1 +
154 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
155 Request::recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
156 rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
157 Request::send((char *) rbuf + recv_offset, (scount * num_core), stype,
158 (rank + 1), tag + i + 1, comm);