1 /* Copyright (c) 2013-2022. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
8 * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
9 * University Research and Technology
10 * Corporation. All rights reserved.
11 * Copyright (c) 2004-2009 The University of Tennessee and The University
12 * of Tennessee Research Foundation. All rights
14 * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
15 * University of Stuttgart. All rights reserved.
16 * Copyright (c) 2004-2005 The Regents of the University of California.
17 * All rights reserved.
19 * Additional copyrights may follow
21 /* -*- Mode: C; c-basic-offset:4 ; -*- */
22 /* Copyright (c) 2001-2014, The Ohio State University. All rights
25 * This file is part of the MVAPICH2 software package developed by the
26 * team members of The Ohio State University's Network-Based Computing
27 * Laboratory (NBCL), headed by Professor Dhabaleswar K. (DK) Panda.
29 * For detailed copyright and licensing information, please refer to the
30 * copyright file COPYRIGHT in the top level MVAPICH2 directory.
34 * (C) 2001 by Argonne National Laboratory.
35 * See COPYRIGHT in top-level directory.
38 #include "../colls_private.hpp"
40 extern int (*MV2_Bcast_function) (void *buffer, int count, MPI_Datatype datatype,
41 int root, MPI_Comm comm_ptr);
43 extern int (*MV2_Bcast_intra_node_function) (void *buffer, int count, MPI_Datatype datatype,
44 int root, MPI_Comm comm_ptr);
46 extern int zcpy_knomial_factor;
47 extern int mv2_pipelined_zcpy_knomial_factor;
48 extern int bcast_segment_size;
49 extern int mv2_inter_node_knomial_factor;
50 extern int mv2_intra_node_knomial_factor;
51 extern int mv2_bcast_two_level_system_size;
52 #define INTRA_NODE_ROOT 0
54 #define MPIR_Pipelined_Bcast_Zcpy_MV2 bcast__mpich
55 #define MPIR_Pipelined_Bcast_MV2 bcast__mpich
56 #define MPIR_Bcast_binomial_MV2 bcast__binomial_tree
57 #define MPIR_Bcast_scatter_ring_allgather_shm_MV2 bcast__scatter_LR_allgather
58 #define MPIR_Bcast_scatter_doubling_allgather_MV2 bcast__scatter_rdb_allgather
59 #define MPIR_Bcast_scatter_ring_allgather_MV2 bcast__scatter_LR_allgather
60 #define MPIR_Shmem_Bcast_MV2 bcast__mpich
61 #define MPIR_Bcast_tune_inter_node_helper_MV2 bcast__mvapich2_inter_node
62 #define MPIR_Bcast_inter_node_helper_MV2 bcast__mvapich2_inter_node
63 #define MPIR_Knomial_Bcast_intra_node_MV2 bcast__mvapich2_knomial_intra_node
64 #define MPIR_Bcast_intra_MV2 bcast__mvapich2_intra_node
66 extern int zcpy_knomial_factor;
67 extern int mv2_pipelined_zcpy_knomial_factor;
68 extern int bcast_segment_size;
69 extern int mv2_inter_node_knomial_factor;
70 extern int mv2_intra_node_knomial_factor;
71 #define mv2_bcast_two_level_system_size 64
72 #define mv2_bcast_short_msg 16384
73 #define mv2_bcast_large_msg 512*1024
74 #define mv2_knomial_intra_node_threshold 131072
75 #define mv2_scatter_rd_inter_leader_bcast 1
76 namespace simgrid::smpi {
77 int bcast__mvapich2_inter_node(void *buffer,
79 MPI_Datatype datatype,
84 int mpi_errno = MPI_SUCCESS;
85 MPI_Comm shmem_comm, leader_comm;
86 int local_rank, local_size, global_rank = -1;
87 int leader_root, leader_of_root;
91 //comm_size = comm->size();
93 if (MV2_Bcast_function == nullptr) {
94 MV2_Bcast_function = bcast__mpich;
97 if (MV2_Bcast_intra_node_function == nullptr) {
98 MV2_Bcast_intra_node_function = bcast__mpich;
101 if(comm->get_leaders_comm()==MPI_COMM_NULL){
105 shmem_comm = comm->get_intra_comm();
106 local_rank = shmem_comm->rank();
107 local_size = shmem_comm->size();
109 leader_comm = comm->get_leaders_comm();
111 if ((local_rank == 0) && (local_size > 1)) {
112 global_rank = leader_comm->rank();
115 int* leaders_map = comm->get_leaders_map();
116 leader_of_root = comm->group()->rank(leaders_map[root]);
117 leader_root = leader_comm->group()->rank(leaders_map[root]);
120 if (local_size > 1) {
121 if ((local_rank == 0) && (root != rank) && (leader_root == global_rank)) {
122 Request::recv(buffer, count, datatype, root,
123 COLL_TAG_BCAST, comm, MPI_STATUS_IGNORE);
125 if ((local_rank != 0) && (root == rank)) {
126 Request::send(buffer, count, datatype,
127 leader_of_root, COLL_TAG_BCAST, comm);
130 #if defined(_MCST_SUPPORT_)
131 if (comm_ptr->ch.is_mcast_ok) {
132 mpi_errno = MPIR_Mcast_inter_node_MV2(buffer, count, datatype, root, comm_ptr,
134 if (mpi_errno == MPI_SUCCESS) {
142 if (local_rank == 0) {
143 leader_comm = comm->get_leaders_comm();
147 if (MV2_Bcast_function == &MPIR_Pipelined_Bcast_MV2) {
148 mpi_errno = MPIR_Pipelined_Bcast_MV2(buffer, count, datatype,
150 } else if (MV2_Bcast_function == &MPIR_Bcast_scatter_ring_allgather_shm_MV2) {
151 mpi_errno = MPIR_Bcast_scatter_ring_allgather_shm_MV2(buffer, count,
155 if (local_rank == 0) {
156 /* if (MV2_Bcast_function == &MPIR_Knomial_Bcast_inter_node_wrapper_MV2) {
157 mpi_errno = MPIR_Knomial_Bcast_inter_node_wrapper_MV2(buffer, count,
161 mpi_errno = MV2_Bcast_function(buffer, count, datatype,
162 leader_root, leader_comm);
171 int bcast__mvapich2_knomial_intra_node(void *buffer,
173 MPI_Datatype datatype,
174 int root, MPI_Comm comm)
176 int local_size = 0, rank;
177 int mpi_errno = MPI_SUCCESS;
178 int src, dst, mask, relative_rank;
180 if (MV2_Bcast_function == nullptr) {
181 MV2_Bcast_function = bcast__mpich;
184 if (MV2_Bcast_intra_node_function == nullptr) {
185 MV2_Bcast_intra_node_function = bcast__mpich;
188 if(comm->get_leaders_comm()==MPI_COMM_NULL){
192 local_size = comm->size();
195 auto* reqarray = new MPI_Request[2 * mv2_intra_node_knomial_factor];
197 auto* starray = new MPI_Status[2 * mv2_intra_node_knomial_factor];
199 /* intra-node k-nomial bcast */
200 if (local_size > 1) {
201 relative_rank = (rank >= root) ? rank - root : rank - root + local_size;
204 while (mask < local_size) {
205 if (relative_rank % (mv2_intra_node_knomial_factor * mask)) {
206 src = relative_rank / (mv2_intra_node_knomial_factor * mask) *
207 (mv2_intra_node_knomial_factor * mask) + root;
208 if (src >= local_size) {
212 Request::recv(buffer, count, datatype, src,
213 COLL_TAG_BCAST, comm,
217 mask *= mv2_intra_node_knomial_factor;
219 mask /= mv2_intra_node_knomial_factor;
223 for (k = 1; k < mv2_intra_node_knomial_factor; k++) {
224 if (relative_rank + mask * k < local_size) {
225 dst = rank + mask * k;
226 if (dst >= local_size) {
229 reqarray[reqs++]=Request::isend(buffer, count, datatype, dst,
230 COLL_TAG_BCAST, comm);
233 Request::waitall(reqs, reqarray, starray);
235 mask /= mv2_intra_node_knomial_factor;
244 int bcast__mvapich2_intra_node(void *buffer,
246 MPI_Datatype datatype,
247 int root, MPI_Comm comm)
249 int mpi_errno = MPI_SUCCESS;
251 bool two_level_bcast = true;
253 bool is_homogeneous, is_contig;
255 unsigned char* tmp_buf = nullptr;
260 if (MV2_Bcast_function == nullptr) {
261 MV2_Bcast_function = bcast__mpich;
264 if (MV2_Bcast_intra_node_function == nullptr) {
265 MV2_Bcast_intra_node_function = bcast__mpich;
268 if(comm->get_leaders_comm()==MPI_COMM_NULL){
272 comm_size = comm->size();
273 // rank = comm->rank();
275 if (HANDLE_GET_KIND(datatype) == HANDLE_KIND_BUILTIN)*/
278 MPID_Datatype_get_ptr(datatype, dtp);
279 is_contig = dtp->is_contig;
282 is_homogeneous = true;
283 #ifdef MPID_HAS_HETERO
284 if (comm_ptr->is_hetero)
285 is_homogeneous = false;
288 /* MPI_Type_size() might not give the accurate size of the packed
289 * datatype for heterogeneous systems (because of padding, encoding,
290 * etc). On the other hand, MPI_Pack_size() can become very
291 * expensive, depending on the implementation, especially for
292 * heterogeneous systems. We want to use MPI_Type_size() wherever
293 * possible, and MPI_Pack_size() in other places.
295 //if (is_homogeneous) {
296 type_size=datatype->size();
299 /* MPIR_Pack_size_impl(1, datatype, &type_size);*/
301 nbytes = (size_t) (count) * (type_size);
302 if (comm_size <= mv2_bcast_two_level_system_size) {
303 if (nbytes > mv2_bcast_short_msg && nbytes < mv2_bcast_large_msg) {
304 two_level_bcast = true;
306 two_level_bcast = false;
311 #if defined(_MCST_SUPPORT_)
312 || comm_ptr->ch.is_mcast_ok
316 if (not is_contig || not is_homogeneous) {
317 tmp_buf = smpi_get_tmp_sendbuffer(nbytes);
319 /* TODO: Pipeline the packing and communication */
321 /* if (rank == root) {*/
323 /* MPIR_Pack_impl(buffer, count, datatype, tmp_buf, nbytes, &position);*/
325 /* MPIU_ERR_POP(mpi_errno);*/
329 shmem_comm = comm->get_intra_comm();
330 if (not is_contig || not is_homogeneous) {
331 mpi_errno = MPIR_Bcast_inter_node_helper_MV2(tmp_buf, nbytes, MPI_BYTE, root, comm);
334 MPIR_Bcast_inter_node_helper_MV2(buffer, count, datatype, root,
338 /* We are now done with the inter-node phase */
339 if (nbytes <= mv2_knomial_intra_node_threshold) {
340 if (not is_contig || not is_homogeneous) {
341 mpi_errno = MPIR_Shmem_Bcast_MV2(tmp_buf, nbytes, MPI_BYTE, root, shmem_comm);
343 mpi_errno = MPIR_Shmem_Bcast_MV2(buffer, count, datatype,
347 if (not is_contig || not is_homogeneous) {
348 mpi_errno = MPIR_Knomial_Bcast_intra_node_MV2(tmp_buf, nbytes, MPI_BYTE, INTRA_NODE_ROOT, shmem_comm);
351 MPIR_Knomial_Bcast_intra_node_MV2(buffer, count,
359 if (nbytes <= mv2_bcast_short_msg) {
360 mpi_errno = MPIR_Bcast_binomial_MV2(buffer, count, datatype, root,
363 if (mv2_scatter_rd_inter_leader_bcast) {
364 mpi_errno = MPIR_Bcast_scatter_ring_allgather_MV2(buffer, count,
370 MPIR_Bcast_scatter_doubling_allgather_MV2(buffer, count,
382 } // namespace simgrid::smpi