X-Git-Url: http://bilbo.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/9335baa2b23f940577448a84168eb0b50d3dd966..bfe580c9acb43ca1ca658f9111d4cd6f5951bbdc:/src/smpi/colls/allgather/allgather-rhv.cpp diff --git a/src/smpi/colls/allgather/allgather-rhv.cpp b/src/smpi/colls/allgather/allgather-rhv.cpp index e0d180c7f0..c2a4e5f51a 100644 --- a/src/smpi/colls/allgather/allgather-rhv.cpp +++ b/src/smpi/colls/allgather/allgather-rhv.cpp @@ -1,22 +1,20 @@ -/* Copyright (c) 2013-2017. The SimGrid Team. +/* Copyright (c) 2013-2023. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "../colls_private.h" - -namespace simgrid{ -namespace smpi{ +#include "../colls_private.hpp" +namespace simgrid::smpi { // now only work with power of two processes int -Coll_allgather_rhv::allgather(void *sbuf, int send_count, - MPI_Datatype send_type, void *rbuf, - int recv_count, MPI_Datatype recv_type, - MPI_Comm comm) +allgather__rhv(const void *sbuf, int send_count, + MPI_Datatype send_type, void *rbuf, + int recv_count, MPI_Datatype recv_type, + MPI_Comm comm) { MPI_Status status; MPI_Aint s_extent, r_extent; @@ -28,11 +26,11 @@ Coll_allgather_rhv::allgather(void *sbuf, int send_count, unsigned int mask; int curr_count; - // get size of the communicator, followed by rank + // get size of the communicator, followed by rank unsigned int num_procs = comm->size(); if((num_procs&(num_procs-1))) - THROWF(arg_error,0, "allgather rhv algorithm can't be used with non power of two number of processes ! "); + throw std::invalid_argument("allgather rhv algorithm can't be used with non power of two number of processes!"); unsigned int rank = comm->rank(); @@ -45,10 +43,10 @@ Coll_allgather_rhv::allgather(void *sbuf, int send_count, recv_chunk = r_extent * recv_count; if (send_chunk != recv_chunk) { - XBT_WARN("MPI_allgather_rhv use default MPI_allgather."); - Coll_allgather_default::allgather(sbuf, send_count, send_type, rbuf, recv_count, - recv_type, comm); - return MPI_SUCCESS; + XBT_INFO("MPI_allgather_rhv: send_chunk != recv_chunk, use default MPI_allgather."); + allgather__default(sbuf, send_count, send_type, rbuf, recv_count, + recv_type, comm); + return MPI_SUCCESS; } // compute starting offset location to perform local copy @@ -63,8 +61,6 @@ Coll_allgather_rhv::allgather(void *sbuf, int send_count, size /= 2; } - // printf("node %d base_offset %d\n",rank,base_offset); - //perform a remote copy dst = base_offset; @@ -75,7 +71,6 @@ Coll_allgather_rhv::allgather(void *sbuf, int send_count, mask >>= 1; i = 1; - int phase = 0; curr_count = recv_count; while (mask >= 1) { // destination pair for both send and recv @@ -92,22 +87,15 @@ Coll_allgather_rhv::allgather(void *sbuf, int send_count, send_offset = send_base_offset * recv_chunk; recv_offset = recv_base_offset * recv_chunk; - // printf("node %d send to %d in phase %d s_offset = %d r_offset = %d count = %d\n",rank,dst,phase, send_base_offset, recv_base_offset, curr_count); - - Request::sendrecv((char *)rbuf + send_offset, curr_count, recv_type, dst, tag, - (char *)rbuf + recv_offset, curr_count, recv_type, dst, tag, - comm, &status); - + Request::sendrecv((char*)rbuf + send_offset, curr_count, recv_type, dst, tag, (char*)rbuf + recv_offset, curr_count, + recv_type, dst, tag, comm, &status); curr_count *= 2; i *= 2; mask >>= 1; - phase++; } return MPI_SUCCESS; } - -} -} +} // namespace simgrid::smpi