1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "colls_private.h"
13 int smpi_coll_tuned_allgather_loosely_lr(void *sbuf, int scount,
14 MPI_Datatype stype, void *rbuf,
15 int rcount, MPI_Datatype rtype,
19 int tag = COLL_TAG_ALLGATHER;
20 int i, j, send_offset, recv_offset;
21 int intra_rank, inter_rank, inter_comm_size, intra_comm_size;
22 int inter_dst, inter_src;
24 comm_size = smpi_comm_size(comm);
26 int num_core = simcall_host_get_core(SIMIX_host_self());
27 // do we use the default one or the number of cores in the platform ?
28 // if the number of cores is one, the platform may be simulated with 1 node = 1 core
29 if (num_core == 1) num_core = NUM_CORE;
31 if(comm_size%num_core)
32 THROWF(arg_error,0, "allgather loosely lr algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ",num_core);
34 rank = smpi_comm_rank(comm);
35 MPI_Aint rextent, sextent;
36 rextent = smpi_datatype_get_extent(rtype);
37 sextent = smpi_datatype_get_extent(stype);
38 MPI_Request inter_rrequest;
39 MPI_Request rrequest_array[128];
40 MPI_Request srequest_array[128];
41 MPI_Request inter_srequest_array[128];
44 int rrequest_count = 0;
45 int srequest_count = 0;
46 int inter_srequest_count = 0;
50 intra_rank = rank % num_core;
51 inter_rank = rank / num_core;
52 inter_comm_size = (comm_size + num_core - 1) / num_core;
53 intra_comm_size = num_core;
57 //copy corresponding message from sbuf to rbuf
58 recv_offset = rank * rextent * rcount;
59 smpi_mpi_sendrecv(sbuf, scount, stype, rank, tag,
60 (char *)rbuf + recv_offset, rcount, rtype, rank, tag, comm, &status);
63 int inter_send_offset, inter_recv_offset;
67 inter_srequest_count = 0;
69 for (i = 0; i < inter_comm_size; i++) {
71 // inter_communication
73 inter_dst = (rank + intra_comm_size) % comm_size;
74 inter_src = (rank - intra_comm_size + comm_size) % comm_size;
77 ((inter_rank - 1 - i +
78 inter_comm_size) % inter_comm_size) * intra_comm_size + intra_rank;
81 inter_comm_size) % inter_comm_size) * intra_comm_size + intra_rank;
83 inter_send_offset = dst_seg * sextent * scount;
84 inter_recv_offset = src_seg * rextent * rcount;
86 for (j = 0; j < intra_comm_size; j++) {
88 // inter communication
89 if (intra_rank == j) {
90 if (i != inter_comm_size - 1) {
92 inter_rrequest = smpi_mpi_irecv((char *)rbuf + inter_recv_offset, rcount, rtype,
93 inter_src, tag, comm);
94 inter_srequest_array[inter_srequest_count++] = smpi_mpi_isend((char *)rbuf + inter_send_offset, scount, stype,
95 inter_dst, tag, comm);
99 src = inter_rank * intra_comm_size + j;
100 dst = inter_rank * intra_comm_size + j;
104 inter_comm_size) % inter_comm_size) * intra_comm_size + j;
107 inter_comm_size) % inter_comm_size) * intra_comm_size + intra_rank;
109 send_offset = dst_seg * sextent * scount;
110 recv_offset = src_seg * rextent * rcount;
113 if (j != intra_rank) {
115 rrequest_array[rrequest_count++] = smpi_mpi_irecv((char *)rbuf + recv_offset, rcount, rtype, src, tag, comm);
116 srequest_array[srequest_count++] = smpi_mpi_isend((char *)rbuf + send_offset, scount, stype, dst, tag, comm);
122 // wait for inter communication to finish for these rounds (# of round equals num_core)
123 if (i != inter_comm_size - 1) {
124 smpi_mpi_wait(&inter_rrequest, &status);
129 smpi_mpi_waitall(rrequest_count, rrequest_array, MPI_STATUSES_IGNORE);
130 smpi_mpi_waitall(srequest_count, srequest_array, MPI_STATUSES_IGNORE);
131 smpi_mpi_waitall(inter_srequest_count, inter_srequest_array, MPI_STATUSES_IGNORE);