X-Git-Url: http://bilbo.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/13fb4af932a02ea0bd4293d1e55ac071de326f80..9afa0d0db01da2ae64e48fb594cc87c186dde192:/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp diff --git a/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp b/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp index 3256363a12..445596b8ce 100644 --- a/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp +++ b/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp @@ -1,10 +1,10 @@ -/* Copyright (c) 2013-2017. The SimGrid Team. +/* Copyright (c) 2013-2023. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "../colls_private.h" +#include "../colls_private.hpp" int bcast_arrival_pattern_aware_wait_segment_size_in_byte = 8192; @@ -15,20 +15,14 @@ int bcast_arrival_pattern_aware_wait_segment_size_in_byte = 8192; #ifndef BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE #define BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE 128 #endif -namespace simgrid{ -namespace smpi{ +namespace simgrid::smpi { /* Non-topology-specific pipelined linear-bcast function */ -int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, - MPI_Datatype datatype, - int root, MPI_Comm comm) +int bcast__arrival_pattern_aware_wait(void *buf, int count, + MPI_Datatype datatype, + int root, MPI_Comm comm) { MPI_Status status; MPI_Request request; - MPI_Request *send_request_array; - MPI_Request *recv_request_array; - MPI_Status *send_status_array; - MPI_Status *recv_status_array; - MPI_Status temp_status_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE]; @@ -98,25 +92,18 @@ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, /* start pipeline bcast */ - send_request_array = - (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); - recv_request_array = - (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); - send_status_array = - (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); - recv_status_array = - (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); + auto* send_request_array = new MPI_Request[size + pipe_length]; + auto* recv_request_array = new MPI_Request[size + pipe_length]; + auto* send_status_array = new MPI_Status[size + pipe_length]; + auto* recv_status_array = new MPI_Status[size + pipe_length]; /* root */ if (rank == 0) { sent_count = 0; - int iteration = 0; for (i = 0; i < BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE; i++) will_send[i] = 0; while (sent_count < (size - 1)) { - iteration++; - /* loop k times to let more processes arrive before start sending data */ for (k = 0; k < 3; k++) { for (i = 1; i < size; i++) { @@ -239,20 +226,18 @@ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, } } - free(send_request_array); - free(recv_request_array); - free(send_status_array); - free(recv_status_array); + delete[] send_request_array; + delete[] recv_request_array; + delete[] send_status_array; + delete[] recv_status_array; /* end pipeline */ - /* when count is not divisible by block size, use default BCAST for the remainder */ if ((remainder != 0) && (count > segment)) { - XBT_WARN("MPI_bcast_arrival_pattern_aware_wait use default MPI_bcast."); - Colls::bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm); + XBT_INFO("MPI_bcast_arrival_pattern_aware_wait: count is not divisible by block size, use default MPI_bcast for remainder."); + colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm); } return MPI_SUCCESS; } -} -} +} // namespace simgrid::smpi