1 /* Copyright (c) 2013-2023. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
8 * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
9 * University Research and Technology
10 * Corporation. All rights reserved.
11 * Copyright (c) 2004-2009 The University of Tennessee and The University
12 * of Tennessee Research Foundation. All rights
14 * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
15 * University of Stuttgart. All rights reserved.
16 * Copyright (c) 2004-2005 The Regents of the University of California.
17 * All rights reserved.
19 * Additional copyrights may follow
21 /* -*- Mode: C; c-basic-offset:4 ; -*- */
22 /* Copyright (c) 2001-2014, The Ohio State University. All rights
25 * This file is part of the MVAPICH2 software package developed by the
26 * team members of The Ohio State University's Network-Based Computing
27 * Laboratory (NBCL), headed by Professor Dhabaleswar K. (DK) Panda.
29 * For detailed copyright and licensing information, please refer to the
30 * copyright file COPYRIGHT in the top level MVAPICH2 directory.
34 * (C) 2001 by Argonne National Laboratory.
35 * See COPYRIGHT in top-level directory.
38 #include "../colls_private.hpp"
40 extern int (*MV2_Bcast_function) (void *buffer, int count, MPI_Datatype datatype,
41 int root, MPI_Comm comm_ptr);
43 extern int (*MV2_Bcast_intra_node_function) (void *buffer, int count, MPI_Datatype datatype,
44 int root, MPI_Comm comm_ptr);
46 extern int zcpy_knomial_factor;
47 extern int mv2_pipelined_zcpy_knomial_factor;
48 extern int bcast_segment_size;
49 extern int mv2_inter_node_knomial_factor;
50 extern int mv2_intra_node_knomial_factor;
51 #define INTRA_NODE_ROOT 0
53 #define MPIR_Pipelined_Bcast_Zcpy_MV2 bcast__mpich
54 #define MPIR_Pipelined_Bcast_MV2 bcast__mpich
55 #define MPIR_Bcast_binomial_MV2 bcast__binomial_tree
56 #define MPIR_Bcast_scatter_ring_allgather_shm_MV2 bcast__scatter_LR_allgather
57 #define MPIR_Bcast_scatter_doubling_allgather_MV2 bcast__scatter_rdb_allgather
58 #define MPIR_Bcast_scatter_ring_allgather_MV2 bcast__scatter_LR_allgather
59 #define MPIR_Shmem_Bcast_MV2 bcast__mpich
60 #define MPIR_Bcast_tune_inter_node_helper_MV2 bcast__mvapich2_inter_node
61 #define MPIR_Bcast_inter_node_helper_MV2 bcast__mvapich2_inter_node
62 #define MPIR_Knomial_Bcast_intra_node_MV2 bcast__mvapich2_knomial_intra_node
63 #define MPIR_Bcast_intra_MV2 bcast__mvapich2_intra_node
65 extern int zcpy_knomial_factor;
66 extern int mv2_pipelined_zcpy_knomial_factor;
67 extern int bcast_segment_size;
68 extern int mv2_inter_node_knomial_factor;
69 extern int mv2_intra_node_knomial_factor;
70 #define mv2_bcast_two_level_system_size 64
71 #define mv2_bcast_short_msg 16384
72 #define mv2_bcast_large_msg 512*1024
73 #define mv2_knomial_intra_node_threshold 131072
74 #define mv2_scatter_rd_inter_leader_bcast 1
75 namespace simgrid::smpi {
76 int bcast__mvapich2_inter_node(void *buffer,
78 MPI_Datatype datatype,
83 int mpi_errno = MPI_SUCCESS;
84 MPI_Comm shmem_comm, leader_comm;
85 int local_rank, local_size, global_rank = -1;
86 int leader_root, leader_of_root;
90 //comm_size = comm->size();
92 if (MV2_Bcast_function == nullptr) {
93 MV2_Bcast_function = bcast__mpich;
96 if (MV2_Bcast_intra_node_function == nullptr) {
97 MV2_Bcast_intra_node_function = bcast__mpich;
100 if(comm->get_leaders_comm()==MPI_COMM_NULL){
104 shmem_comm = comm->get_intra_comm();
105 local_rank = shmem_comm->rank();
106 local_size = shmem_comm->size();
108 leader_comm = comm->get_leaders_comm();
110 if ((local_rank == 0) && (local_size > 1)) {
111 global_rank = leader_comm->rank();
114 int* leaders_map = comm->get_leaders_map();
115 leader_of_root = comm->group()->rank(leaders_map[root]);
116 leader_root = leader_comm->group()->rank(leaders_map[root]);
119 if (local_size > 1) {
120 if ((local_rank == 0) && (root != rank) && (leader_root == global_rank)) {
121 Request::recv(buffer, count, datatype, root,
122 COLL_TAG_BCAST, comm, MPI_STATUS_IGNORE);
124 if ((local_rank != 0) && (root == rank)) {
125 Request::send(buffer, count, datatype,
126 leader_of_root, COLL_TAG_BCAST, comm);
129 #if defined(_MCST_SUPPORT_)
130 if (comm_ptr->ch.is_mcast_ok) {
131 mpi_errno = MPIR_Mcast_inter_node_MV2(buffer, count, datatype, root, comm_ptr,
133 if (mpi_errno == MPI_SUCCESS) {
141 if (local_rank == 0) {
142 leader_comm = comm->get_leaders_comm();
146 if (MV2_Bcast_function == &MPIR_Pipelined_Bcast_MV2) {
147 mpi_errno = MPIR_Pipelined_Bcast_MV2(buffer, count, datatype,
149 } else if (MV2_Bcast_function == &MPIR_Bcast_scatter_ring_allgather_shm_MV2) {
150 mpi_errno = MPIR_Bcast_scatter_ring_allgather_shm_MV2(buffer, count,
154 if (local_rank == 0) {
155 /* if (MV2_Bcast_function == &MPIR_Knomial_Bcast_inter_node_wrapper_MV2) {
156 mpi_errno = MPIR_Knomial_Bcast_inter_node_wrapper_MV2(buffer, count,
160 mpi_errno = MV2_Bcast_function(buffer, count, datatype,
161 leader_root, leader_comm);
170 int bcast__mvapich2_knomial_intra_node(void *buffer,
172 MPI_Datatype datatype,
173 int root, MPI_Comm comm)
175 int local_size = 0, rank;
176 int mpi_errno = MPI_SUCCESS;
177 int src, dst, mask, relative_rank;
179 if (MV2_Bcast_function == nullptr) {
180 MV2_Bcast_function = bcast__mpich;
183 if (MV2_Bcast_intra_node_function == nullptr) {
184 MV2_Bcast_intra_node_function = bcast__mpich;
187 if(comm->get_leaders_comm()==MPI_COMM_NULL){
191 local_size = comm->size();
194 auto* reqarray = new MPI_Request[2 * mv2_intra_node_knomial_factor];
196 auto* starray = new MPI_Status[2 * mv2_intra_node_knomial_factor];
198 /* intra-node k-nomial bcast */
199 if (local_size > 1) {
200 relative_rank = (rank >= root) ? rank - root : rank - root + local_size;
203 while (mask < local_size) {
204 if (relative_rank % (mv2_intra_node_knomial_factor * mask)) {
205 src = relative_rank / (mv2_intra_node_knomial_factor * mask) *
206 (mv2_intra_node_knomial_factor * mask) + root;
207 if (src >= local_size) {
211 Request::recv(buffer, count, datatype, src,
212 COLL_TAG_BCAST, comm,
216 mask *= mv2_intra_node_knomial_factor;
218 mask /= mv2_intra_node_knomial_factor;
222 for (k = 1; k < mv2_intra_node_knomial_factor; k++) {
223 if (relative_rank + mask * k < local_size) {
224 dst = rank + mask * k;
225 if (dst >= local_size) {
228 reqarray[reqs++]=Request::isend(buffer, count, datatype, dst,
229 COLL_TAG_BCAST, comm);
232 Request::waitall(reqs, reqarray, starray);
234 mask /= mv2_intra_node_knomial_factor;
243 int bcast__mvapich2_intra_node(void *buffer,
245 MPI_Datatype datatype,
246 int root, MPI_Comm comm)
248 int mpi_errno = MPI_SUCCESS;
250 bool two_level_bcast = true;
252 bool is_homogeneous, is_contig;
254 unsigned char* tmp_buf = nullptr;
259 if (MV2_Bcast_function == nullptr) {
260 MV2_Bcast_function = bcast__mpich;
263 if (MV2_Bcast_intra_node_function == nullptr) {
264 MV2_Bcast_intra_node_function = bcast__mpich;
267 if(comm->get_leaders_comm()==MPI_COMM_NULL){
271 comm_size = comm->size();
272 // rank = comm->rank();
274 if (HANDLE_GET_KIND(datatype) == HANDLE_KIND_BUILTIN)*/
277 MPID_Datatype_get_ptr(datatype, dtp);
278 is_contig = dtp->is_contig;
281 is_homogeneous = true;
282 #ifdef MPID_HAS_HETERO
283 if (comm_ptr->is_hetero)
284 is_homogeneous = false;
287 /* MPI_Type_size() might not give the accurate size of the packed
288 * datatype for heterogeneous systems (because of padding, encoding,
289 * etc). On the other hand, MPI_Pack_size() can become very
290 * expensive, depending on the implementation, especially for
291 * heterogeneous systems. We want to use MPI_Type_size() wherever
292 * possible, and MPI_Pack_size() in other places.
294 //if (is_homogeneous) {
295 type_size=datatype->size();
298 /* MPIR_Pack_size_impl(1, datatype, &type_size);*/
300 nbytes = (size_t) (count) * (type_size);
301 if (comm_size <= mv2_bcast_two_level_system_size) {
302 if (nbytes > mv2_bcast_short_msg && nbytes < mv2_bcast_large_msg) {
303 two_level_bcast = true;
305 two_level_bcast = false;
310 #if defined(_MCST_SUPPORT_)
311 || comm_ptr->ch.is_mcast_ok
315 if (not is_contig || not is_homogeneous) {
316 tmp_buf = smpi_get_tmp_sendbuffer(nbytes);
318 /* TODO: Pipeline the packing and communication */
320 /* if (rank == root) {*/
322 /* MPIR_Pack_impl(buffer, count, datatype, tmp_buf, nbytes, &position);*/
324 /* MPIU_ERR_POP(mpi_errno);*/
328 shmem_comm = comm->get_intra_comm();
329 if (not is_contig || not is_homogeneous) {
330 mpi_errno = MPIR_Bcast_inter_node_helper_MV2(tmp_buf, nbytes, MPI_BYTE, root, comm);
333 MPIR_Bcast_inter_node_helper_MV2(buffer, count, datatype, root,
337 /* We are now done with the inter-node phase */
338 if (nbytes <= mv2_knomial_intra_node_threshold) {
339 if (not is_contig || not is_homogeneous) {
340 mpi_errno = MPIR_Shmem_Bcast_MV2(tmp_buf, nbytes, MPI_BYTE, root, shmem_comm);
342 mpi_errno = MPIR_Shmem_Bcast_MV2(buffer, count, datatype,
346 if (not is_contig || not is_homogeneous) {
347 mpi_errno = MPIR_Knomial_Bcast_intra_node_MV2(tmp_buf, nbytes, MPI_BYTE, INTRA_NODE_ROOT, shmem_comm);
350 MPIR_Knomial_Bcast_intra_node_MV2(buffer, count,
358 if (nbytes <= mv2_bcast_short_msg) {
359 mpi_errno = MPIR_Bcast_binomial_MV2(buffer, count, datatype, root,
362 if (mv2_scatter_rd_inter_leader_bcast) {
363 mpi_errno = MPIR_Bcast_scatter_ring_allgather_MV2(buffer, count,
369 MPIR_Bcast_scatter_doubling_allgather_MV2(buffer, count,
381 } // namespace simgrid::smpi