From c40f6bc2301d8b7039f7ced0cd40b2bf6e3879ed Mon Sep 17 00:00:00 2001 From: Augustin Degomme Date: Mon, 21 Mar 2022 00:36:15 +0100 Subject: [PATCH] remove spurious barrier calls in comm creation and win creation. standard allows them but they are not mandatory I think (and slow down mc) --- src/smpi/mpi/smpi_comm.cpp | 1 - src/smpi/mpi/smpi_win.cpp | 3 --- .../coll-allreduce-with-leaks.tesh | 14 +++++++------- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/smpi/mpi/smpi_comm.cpp b/src/smpi/mpi/smpi_comm.cpp index 84f604dba8..7e19385778 100644 --- a/src/smpi/mpi/smpi_comm.cpp +++ b/src/smpi/mpi/smpi_comm.cpp @@ -55,7 +55,6 @@ Comm::Comm(MPI_Group group, MPI_Topology topo, bool smp, int in_id) colls::bcast(&id, 1, MPI_INT, 0, this); XBT_DEBUG("Communicator %p has id %d", this, id); id_=id;//only set here, as we don't want to change it in the middle of the bcast - colls::barrier(this); } } diff --git a/src/smpi/mpi/smpi_win.cpp b/src/smpi/mpi/smpi_win.cpp index 0a34e11871..8eb022d210 100644 --- a/src/smpi/mpi/smpi_win.cpp +++ b/src/smpi/mpi/smpi_win.cpp @@ -70,9 +70,6 @@ Win::Win(void* base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, colls::bcast(&bar_ptr, sizeof(s4u::Barrier*), MPI_BYTE, 0, comm); if (rank_ != 0) bar_ = s4u::BarrierPtr(bar_ptr); - bar_->wait(); - }else{ - colls::barrier(comm); } this->add_f(); } diff --git a/teshsuite/smpi/coll-allreduce-with-leaks/coll-allreduce-with-leaks.tesh b/teshsuite/smpi/coll-allreduce-with-leaks/coll-allreduce-with-leaks.tesh index 25c5561e01..8c55e68d8a 100644 --- a/teshsuite/smpi/coll-allreduce-with-leaks/coll-allreduce-with-leaks.tesh +++ b/teshsuite/smpi/coll-allreduce-with-leaks/coll-allreduce-with-leaks.tesh @@ -18,13 +18,13 @@ $ $VALGRIND_NO_LEAK_CHECK ${bindir:=.}/../../../smpi_script/bin/smpirun -map -ho > [0.000000] [smpi/INFO] [rank 13] -> Ginette > [0.000000] [smpi/INFO] [rank 14] -> Ginette > [0.000000] [smpi/INFO] [rank 15] -> Ginette -> [0.023780] [smpi_utils/INFO] Probable memory leaks in your code: SMPI detected 32 unfreed MPI handles: -> [0.023780] [smpi_utils/INFO] 16 leaked handles of type MPI_Comm at coll-allreduce-with-leaks.c:23 -> [0.023780] [smpi_utils/INFO] 16 leaked handles of type MPI_Group at coll-allreduce-with-leaks.c:23 -> [0.023780] [smpi_utils/INFO] Probable memory leaks in your code: SMPI detected 32 unfreed buffers: -> [0.023780] [smpi_utils/INFO] coll-allreduce-with-leaks.c:28: leaked allocations of total size 1504, called 16 times, with minimum size 64 and maximum size 124 -> [0.023780] [smpi_utils/INFO] coll-allreduce-with-leaks.c:27: leaked allocations of total size 1024, called 16 times, each with size 64 -> [0.023780] [smpi_utils/INFO] Memory Usage: Simulated application allocated 2528 bytes during its lifetime through malloc/calloc calls. +> [0.015765] [smpi_utils/INFO] Probable memory leaks in your code: SMPI detected 32 unfreed MPI handles: +> [0.015765] [smpi_utils/INFO] 16 leaked handles of type MPI_Comm at coll-allreduce-with-leaks.c:23 +> [0.015765] [smpi_utils/INFO] 16 leaked handles of type MPI_Group at coll-allreduce-with-leaks.c:23 +> [0.015765] [smpi_utils/INFO] Probable memory leaks in your code: SMPI detected 32 unfreed buffers: +> [0.015765] [smpi_utils/INFO] coll-allreduce-with-leaks.c:28: leaked allocations of total size 1504, called 16 times, with minimum size 64 and maximum size 124 +> [0.015765] [smpi_utils/INFO] coll-allreduce-with-leaks.c:27: leaked allocations of total size 1024, called 16 times, each with size 64 +> [0.015765] [smpi_utils/INFO] Memory Usage: Simulated application allocated 2528 bytes during its lifetime through malloc/calloc calls. > Largest allocation at once from a single process was 124 bytes, at coll-allreduce-with-leaks.c:28. It was called 1 times during the whole simulation. > If this is too much, consider sharing allocations for computation buffers. > This can be done automatically by setting --cfg=smpi/auto-shared-malloc-thresh to the minimum size wanted size (this can alter execution if data content is necessary) -- 2.20.1