-/* Copyright (c) 2013-2017. The SimGrid Team.
+/* Copyright (c) 2013-2023. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "../colls_private.h"
+#include "../colls_private.hpp"
//#include <star-reduction.c>
int reduce_NTSL_segment_size_in_byte = 8192;
-/* Non-topology-specific pipelined linear-bcast function
+/* Non-topology-specific pipelined linear-bcast function
0->1, 1->2 ,2->3, ....., ->last node : in a pipeline fashion
*/
-namespace simgrid{
-namespace smpi{
-int Coll_reduce_NTSL::reduce(void *buf, void *rbuf, int count,
- MPI_Datatype datatype, MPI_Op op, int root,
- MPI_Comm comm)
+namespace simgrid::smpi {
+int reduce__NTSL(const void *buf, void *rbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, int root,
+ MPI_Comm comm)
{
int tag = COLL_TAG_REDUCE;
MPI_Status status;
- MPI_Request *send_request_array;
- MPI_Request *recv_request_array;
- MPI_Status *send_status_array;
- MPI_Status *recv_status_array;
int rank, size;
int i;
MPI_Aint extent;
/* use for buffer offset for sending and receiving data = segment size in byte */
int increment = segment * extent;
- /* if the input size is not divisible by segment size =>
+ /* if the input size is not divisible by segment size =>
the small remainder will be done with native implementation */
int remainder = count % segment;
Request::send(buf,count,datatype,0,tag,comm);
}
else if (rank == 0) {
- Request::recv(buf,count,datatype,root,tag,comm,&status);
+ Request::recv(buf,count,datatype,root,tag,comm,&status);
}
}
*/
- char *tmp_buf;
- tmp_buf = (char *) smpi_get_tmp_sendbuffer(count * extent);
+ unsigned char* tmp_buf = smpi_get_tmp_sendbuffer(count * extent);
Request::sendrecv(buf, count, datatype, rank, tag, rbuf, count, datatype, rank,
tag, comm, &status);
/* pipeline */
else {
- send_request_array =
- (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
- recv_request_array =
- (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
- send_status_array =
- (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
- recv_status_array =
- (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
+ auto* send_request_array = new MPI_Request[size + pipe_length];
+ auto* recv_request_array = new MPI_Request[size + pipe_length];
+ auto* send_status_array = new MPI_Status[size + pipe_length];
+ auto* recv_status_array = new MPI_Status[size + pipe_length];
/* root recv data */
if (rank == root) {
for (i = 0; i < pipe_length; i++) {
- recv_request_array[i] = Request::irecv((char *) tmp_buf + (i * increment), segment, datatype, from,
- (tag + i), comm);
+ recv_request_array[i] = Request::irecv(tmp_buf + (i * increment), segment, datatype, from, (tag + i), comm);
}
for (i = 0; i < pipe_length; i++) {
Request::wait(&recv_request_array[i], &status);
/* intermediate nodes relay (receive, reduce, then send) data */
else {
for (i = 0; i < pipe_length; i++) {
- recv_request_array[i] = Request::irecv((char *) tmp_buf + (i * increment), segment, datatype, from,
- (tag + i), comm);
+ recv_request_array[i] = Request::irecv(tmp_buf + (i * increment), segment, datatype, from, (tag + i), comm);
}
for (i = 0; i < pipe_length; i++) {
Request::wait(&recv_request_array[i], &status);
Request::waitall((pipe_length), send_request_array, send_status_array);
}
- free(send_request_array);
- free(recv_request_array);
- free(send_status_array);
- free(recv_status_array);
+ delete[] send_request_array;
+ delete[] recv_request_array;
+ delete[] send_status_array;
+ delete[] recv_status_array;
} /* end pipeline */
- /* when count is not divisible by block size, use default BCAST for the remainder */
if ((remainder != 0) && (count > segment)) {
- XBT_WARN("MPI_reduce_NTSL use default MPI_reduce.");
- Coll_reduce_default::reduce((char *)buf + (pipe_length * increment),
- (char *)rbuf + (pipe_length * increment), remainder, datatype, op, root,
- comm);
+ XBT_INFO("MPI_reduce_NTSL: count is not divisible by block size, use default MPI_reduce for remainder.");
+ reduce__default((char *)buf + (pipe_length * increment),
+ (char *)rbuf + (pipe_length * increment), remainder, datatype, op, root,
+ comm);
}
- free(tmp_buf);
+ smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
-}
-}
+} // namespace simgrid::smpi