-/* Copyright (c) 2013-2019. The SimGrid Team.
+/* Copyright (c) 2013-2023. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
* less...
**/
-namespace simgrid{
-namespace smpi{
-int Coll_alltoallv_bruck::alltoallv(const void *sendbuf, const int *sendcounts, const int *senddisps,
- MPI_Datatype sendtype, void *recvbuf,
- const int *recvcounts,const int *recvdisps, MPI_Datatype recvtype,
- MPI_Comm comm)
+namespace simgrid::smpi {
+int alltoallv__bruck(const void *sendbuf, const int *sendcounts, const int *senddisps,
+ MPI_Datatype sendtype, void *recvbuf,
+ const int *recvcounts,const int *recvdisps, MPI_Datatype recvtype,
+ MPI_Comm comm)
{
int system_tag = COLL_TAG_ALLTOALLV;
int i, rank, size, err, count;
int ii, ss, dst;
/* post only bblock isends/irecvs at a time as suggested by Tony Ladd */
for (ii = 0; ii < size; ii += bblock) {
- MPI_Request* requests = new MPI_Request[2 * bblock];
+ auto* requests = new MPI_Request[2 * bblock];
ss = size - ii < bblock ? size - ii : bblock;
count = 0;
count++;
}
/* Wait for them all. */
- // Colls::startall(count, requests);
+ // colls::startall(count, requests);
XBT_DEBUG("<%d> wait for %d requests", rank, count);
Request::waitall(count, requests, MPI_STATUSES_IGNORE);
delete[] requests;
}
return MPI_SUCCESS;
}
-}
-}
+} // namespace simgrid::smpi