Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
smpi: many classes died tonight, but that will save kitten on the long term.
authorMartin Quinson <martin.quinson@ens-rennes.fr>
Sat, 16 Nov 2019 01:57:58 +0000 (02:57 +0100)
committerMartin Quinson <martin.quinson@ens-rennes.fr>
Sat, 16 Nov 2019 01:58:02 +0000 (02:58 +0100)
smpi::Coll was a class with 11 static methods (one per MPI function
that has several implementations: allgather, allgatherv, etc) and
nothing else.

That class was derivated for each implementation of a given MPI
function, and the resulting class was overloading only one of the
methods while the others were ignored. There was well over 100 such
child classes.

The overloaded methods were static in their class, and always used as
a function (the class was never instanciated).

All this was written with some macros to reduce the burden.

Instead, we now use regular functions in the smpi namespace.

108 files changed:
include/smpi/forward.hpp
src/smpi/colls/allgather/allgather-2dmesh.cpp
src/smpi/colls/allgather/allgather-3dmesh.cpp
src/smpi/colls/allgather/allgather-GB.cpp
src/smpi/colls/allgather/allgather-NTSLR-NB.cpp
src/smpi/colls/allgather/allgather-NTSLR.cpp
src/smpi/colls/allgather/allgather-SMP-NTS.cpp
src/smpi/colls/allgather/allgather-bruck.cpp
src/smpi/colls/allgather/allgather-loosely-lr.cpp
src/smpi/colls/allgather/allgather-mvapich-smp.cpp
src/smpi/colls/allgather/allgather-ompi-neighborexchange.cpp
src/smpi/colls/allgather/allgather-pair.cpp
src/smpi/colls/allgather/allgather-rdb.cpp
src/smpi/colls/allgather/allgather-rhv.cpp
src/smpi/colls/allgather/allgather-ring.cpp
src/smpi/colls/allgather/allgather-smp-simple.cpp
src/smpi/colls/allgather/allgather-spreading-simple.cpp
src/smpi/colls/allgatherv/allgatherv-GB.cpp
src/smpi/colls/allgatherv/allgatherv-mpich-rdb.cpp
src/smpi/colls/allgatherv/allgatherv-mpich-ring.cpp
src/smpi/colls/allgatherv/allgatherv-ompi-bruck.cpp
src/smpi/colls/allgatherv/allgatherv-ompi-neighborexchange.cpp
src/smpi/colls/allgatherv/allgatherv-pair.cpp
src/smpi/colls/allgatherv/allgatherv-ring.cpp
src/smpi/colls/allreduce/allreduce-lr.cpp
src/smpi/colls/allreduce/allreduce-mvapich-rs.cpp
src/smpi/colls/allreduce/allreduce-mvapich-two-level.cpp
src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp
src/smpi/colls/allreduce/allreduce-rab-rdb.cpp
src/smpi/colls/allreduce/allreduce-rab1.cpp
src/smpi/colls/allreduce/allreduce-rab2.cpp
src/smpi/colls/allreduce/allreduce-rdb.cpp
src/smpi/colls/allreduce/allreduce-redbcast.cpp
src/smpi/colls/allreduce/allreduce-smp-binomial-pipeline.cpp
src/smpi/colls/allreduce/allreduce-smp-binomial.cpp
src/smpi/colls/allreduce/allreduce-smp-rdb.cpp
src/smpi/colls/allreduce/allreduce-smp-rsag-lr.cpp
src/smpi/colls/allreduce/allreduce-smp-rsag-rab.cpp
src/smpi/colls/allreduce/allreduce-smp-rsag.cpp
src/smpi/colls/alltoall/alltoall-2dmesh.cpp
src/smpi/colls/alltoall/alltoall-3dmesh.cpp
src/smpi/colls/alltoall/alltoall-basic-linear.cpp
src/smpi/colls/alltoall/alltoall-bruck.cpp
src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp
src/smpi/colls/alltoall/alltoall-pair-light-barrier.cpp
src/smpi/colls/alltoall/alltoall-pair-mpi-barrier.cpp
src/smpi/colls/alltoall/alltoall-pair-one-barrier.cpp
src/smpi/colls/alltoall/alltoall-pair.cpp
src/smpi/colls/alltoall/alltoall-rdb.cpp
src/smpi/colls/alltoall/alltoall-ring-light-barrier.cpp
src/smpi/colls/alltoall/alltoall-ring-mpi-barrier.cpp
src/smpi/colls/alltoall/alltoall-ring-one-barrier.cpp
src/smpi/colls/alltoall/alltoall-ring.cpp
src/smpi/colls/alltoallv/alltoallv-bruck.cpp
src/smpi/colls/alltoallv/alltoallv-ompi-basic-linear.cpp
src/smpi/colls/alltoallv/alltoallv-pair-light-barrier.cpp
src/smpi/colls/alltoallv/alltoallv-pair-mpi-barrier.cpp
src/smpi/colls/alltoallv/alltoallv-pair-one-barrier.cpp
src/smpi/colls/alltoallv/alltoallv-pair.cpp
src/smpi/colls/alltoallv/alltoallv-ring-light-barrier.cpp
src/smpi/colls/alltoallv/alltoallv-ring-mpi-barrier.cpp
src/smpi/colls/alltoallv/alltoallv-ring-one-barrier.cpp
src/smpi/colls/alltoallv/alltoallv-ring.cpp
src/smpi/colls/barrier/barrier-mpich-smp.cpp
src/smpi/colls/barrier/barrier-mvapich2-pair.cpp
src/smpi/colls/barrier/barrier-ompi.cpp
src/smpi/colls/bcast/bcast-NTSB.cpp
src/smpi/colls/bcast/bcast-NTSL-Isend.cpp
src/smpi/colls/bcast/bcast-NTSL.cpp
src/smpi/colls/bcast/bcast-SMP-binary.cpp
src/smpi/colls/bcast/bcast-SMP-binomial.cpp
src/smpi/colls/bcast/bcast-SMP-linear.cpp
src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp
src/smpi/colls/bcast/bcast-arrival-pattern-aware.cpp
src/smpi/colls/bcast/bcast-arrival-scatter.cpp
src/smpi/colls/bcast/bcast-binomial-tree.cpp
src/smpi/colls/bcast/bcast-flattree-pipeline.cpp
src/smpi/colls/bcast/bcast-flattree.cpp
src/smpi/colls/bcast/bcast-mvapich-smp.cpp
src/smpi/colls/bcast/bcast-ompi-pipeline.cpp
src/smpi/colls/bcast/bcast-ompi-split-bintree.cpp
src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp
src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp
src/smpi/colls/gather/gather-mvapich.cpp
src/smpi/colls/gather/gather-ompi.cpp
src/smpi/colls/reduce/reduce-NTSL.cpp
src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp
src/smpi/colls/reduce/reduce-binomial.cpp
src/smpi/colls/reduce/reduce-flat-tree.cpp
src/smpi/colls/reduce/reduce-mvapich-knomial.cpp
src/smpi/colls/reduce/reduce-mvapich-two-level.cpp
src/smpi/colls/reduce/reduce-ompi.cpp
src/smpi/colls/reduce/reduce-rab.cpp
src/smpi/colls/reduce/reduce-scatter-gather.cpp
src/smpi/colls/reduce_scatter/reduce_scatter-mpich.cpp
src/smpi/colls/reduce_scatter/reduce_scatter-ompi.cpp
src/smpi/colls/scatter/scatter-mvapich-two-level.cpp
src/smpi/colls/scatter/scatter-ompi.cpp
src/smpi/colls/smpi_automatic_selector.cpp
src/smpi/colls/smpi_coll.cpp
src/smpi/colls/smpi_default_selector.cpp
src/smpi/colls/smpi_intel_mpi_selector.cpp
src/smpi/colls/smpi_mpich_selector.cpp
src/smpi/colls/smpi_mvapich2_selector.cpp
src/smpi/colls/smpi_mvapich2_selector_stampede.hpp
src/smpi/colls/smpi_openmpi_selector.cpp
src/smpi/include/smpi_coll.hpp
src/smpi/mpi/smpi_comm.cpp

index fd92dd6..b9e0f29 100644 (file)
@@ -13,7 +13,6 @@
 namespace simgrid {
 namespace smpi {
 
-class Coll;
 class Colls;
 class Comm;
 class Datatype;
index 0bf9bbc..0a774ad 100644 (file)
@@ -111,9 +111,9 @@ namespace smpi{
 
 
 int
-Coll_allgather_2dmesh::allgather(const void *send_buff, int send_count, MPI_Datatype
-                                 send_type, void *recv_buff, int recv_count,
-                                 MPI_Datatype recv_type, MPI_Comm comm)
+allgather__2dmesh(const void *send_buff, int send_count, MPI_Datatype
+                  send_type, void *recv_buff, int recv_count,
+                  MPI_Datatype recv_type, MPI_Comm comm)
 {
   MPI_Aint extent;
 
index fa370fe..02e5d12 100644 (file)
@@ -96,10 +96,10 @@ namespace simgrid{
 namespace smpi{
 
 
-int Coll_allgather_3dmesh::allgather(const void *send_buff, int send_count,
-                                     MPI_Datatype send_type, void *recv_buff,
-                                     int recv_count, MPI_Datatype recv_type,
-                                     MPI_Comm comm)
+int allgather__3dmesh(const void *send_buff, int send_count,
+                      MPI_Datatype send_type, void *recv_buff,
+                      int recv_count, MPI_Datatype recv_type,
+                      MPI_Comm comm)
 {
   MPI_Aint extent;
 
index 26ded0b..a0a320a 100644 (file)
@@ -9,10 +9,10 @@
 namespace simgrid{
 namespace smpi{
 // Allgather - gather/bcast algorithm
-int Coll_allgather_GB::allgather(const void *send_buff, int send_count,
-                                 MPI_Datatype send_type, void *recv_buff,
-                                 int recv_count, MPI_Datatype recv_type,
-                                 MPI_Comm comm)
+int allgather__GB(const void *send_buff, int send_count,
+                  MPI_Datatype send_type, void *recv_buff,
+                  int recv_count, MPI_Datatype recv_type,
+                  MPI_Comm comm)
 {
   int num_procs;
   num_procs = comm->size();
index 7ffaed9..d1cdb55 100644 (file)
@@ -11,9 +11,9 @@ namespace smpi{
 
 // Allgather-Non-Topology-Specific-Logical-Ring algorithm
 int
-Coll_allgather_NTSLR_NB::allgather(const void *sbuf, int scount, MPI_Datatype stype,
-                                   void *rbuf, int rcount, MPI_Datatype rtype,
-                                   MPI_Comm comm)
+allgather__NTSLR_NB(const void *sbuf, int scount, MPI_Datatype stype,
+                    void *rbuf, int rcount, MPI_Datatype rtype,
+                    MPI_Comm comm)
 {
   MPI_Aint rextent, sextent;
   MPI_Status status, status2;
@@ -31,7 +31,7 @@ Coll_allgather_NTSLR_NB::allgather(const void *sbuf, int scount, MPI_Datatype st
   // irregular case use default MPI fucntions
   if (scount * sextent != rcount * rextent) {
     XBT_WARN("MPI_allgather_NTSLR_NB use default MPI_allgather.");
-    Coll_allgather_default::allgather(sbuf, scount, stype, rbuf, rcount, rtype, comm);
+    allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm);
     return MPI_SUCCESS;
   }
 
index 8c65ed8..c7a5380 100644 (file)
@@ -11,9 +11,9 @@ namespace smpi{
 
 // Allgather-Non-Topology-Specific-Logical-Ring algorithm
 int
-Coll_allgather_NTSLR::allgather(const void *sbuf, int scount, MPI_Datatype stype,
-                                void *rbuf, int rcount, MPI_Datatype rtype,
-                                MPI_Comm comm)
+allgather__NTSLR(const void *sbuf, int scount, MPI_Datatype stype,
+                 void *rbuf, int rcount, MPI_Datatype rtype,
+                 MPI_Comm comm)
 {
   MPI_Aint rextent, sextent;
   MPI_Status status;
@@ -29,7 +29,7 @@ Coll_allgather_NTSLR::allgather(const void *sbuf, int scount, MPI_Datatype stype
   // irregular case use default MPI fucntions
   if (scount * sextent != rcount * rextent) {
     XBT_WARN("MPI_allgather_NTSLR use default MPI_allgather.");
-    Coll_allgather_default::allgather(sbuf, scount, stype, rbuf, rcount, rtype, comm);
+    allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm);
     return MPI_SUCCESS;
   }
 
index cb1acff..afaabc3 100644 (file)
@@ -10,10 +10,10 @@ namespace simgrid{
 namespace smpi{
 
 
-int Coll_allgather_SMP_NTS::allgather(const void *sbuf, int scount,
-                                      MPI_Datatype stype, void *rbuf,
-                                      int rcount, MPI_Datatype rtype,
-                                      MPI_Comm comm)
+int allgather__SMP_NTS(const void *sbuf, int scount,
+                       MPI_Datatype stype, void *rbuf,
+                       int rcount, MPI_Datatype rtype,
+                       MPI_Comm comm)
 {
   int src, dst, comm_size, rank;
   comm_size = comm->size();
@@ -47,7 +47,7 @@ int Coll_allgather_SMP_NTS::allgather(const void *sbuf, int scount,
   /* for too small number of processes, use default implementation */
   if (comm_size <= num_core) {
     XBT_WARN("MPI_allgather_SMP_NTS use default MPI_allgather.");
-    Coll_allgather_default::allgather(sbuf, scount, stype, rbuf, rcount, rtype, comm);
+    allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm);
     return MPI_SUCCESS;
   }
 
index b2c49f7..0c3706e 100644 (file)
@@ -72,10 +72,10 @@ namespace smpi{
 
 
 
-int Coll_allgather_bruck::allgather(const void *send_buff, int send_count,
-                                    MPI_Datatype send_type, void *recv_buff,
-                                    int recv_count, MPI_Datatype recv_type,
-                                    MPI_Comm comm)
+int allgather__bruck(const void *send_buff, int send_count,
+                     MPI_Datatype send_type, void *recv_buff,
+                     int recv_count, MPI_Datatype recv_type,
+                     MPI_Comm comm)
 {
   // MPI variables
   MPI_Status status;
index 5596db8..2cbbe65 100644 (file)
@@ -10,10 +10,10 @@ namespace simgrid{
 namespace smpi{
 
 
-int Coll_allgather_loosely_lr::allgather(const void *sbuf, int scount,
-                                         MPI_Datatype stype, void *rbuf,
-                                         int rcount, MPI_Datatype rtype,
-                                         MPI_Comm comm)
+int allgather__loosely_lr(const void *sbuf, int scount,
+                          MPI_Datatype stype, void *rbuf,
+                          int rcount, MPI_Datatype rtype,
+                          MPI_Comm comm)
 {
   int comm_size, rank;
   int tag = COLL_TAG_ALLGATHER;
index 1a73c48..b53f6f9 100644 (file)
@@ -38,7 +38,7 @@
 namespace simgrid{
 namespace smpi{
 
-int Coll_allgather_mvapich2_smp::allgather(const void *sendbuf,int sendcnt, MPI_Datatype sendtype,
+int allgather__mvapich2_smp(const void *sendbuf,int sendcnt, MPI_Datatype sendtype,
                             void *recvbuf, int recvcnt,MPI_Datatype recvtype,
                             MPI_Comm  comm)
 {
@@ -138,7 +138,7 @@ int Coll_allgather_mvapich2_smp::allgather(const void *sendbuf,int sendcnt, MPI_
 
 
 
-            mpi_errno = Coll_allgather_mpich::allgather(sendtmpbuf,
+            mpi_errno = allgather__mpich(sendtmpbuf,
                                                (recvcnt*local_size),
                                                recvtype,
                                                recvbuf, (recvcnt*local_size), recvtype,
index 0b0a0b2..ad4f6a2 100644 (file)
@@ -68,7 +68,7 @@ namespace simgrid{
 namespace smpi{
 
 int
-Coll_allgather_ompi_neighborexchange::allgather(const void *sbuf, int scount,
+allgather__ompi_neighborexchange(const void *sbuf, int scount,
                                                  MPI_Datatype sdtype,
                                                  void* rbuf, int rcount,
                                                  MPI_Datatype rdtype,
@@ -90,9 +90,9 @@ Coll_allgather_ompi_neighborexchange::allgather(const void *sbuf, int scount,
       XBT_DEBUG(
                    "coll:tuned:allgather_intra_neighborexchange WARNING: odd size %d, switching to ring algorithm",
                    size);
-      return Coll_allgather_ring::allgather(sbuf, scount, sdtype,
-                                                  rbuf, rcount, rdtype,
-                                                  comm);
+      return allgather__ring(sbuf, scount, sdtype,
+                             rbuf, rcount, rdtype,
+                             comm);
    }
 
    XBT_DEBUG(
index 5505bd5..35372f7 100644 (file)
@@ -70,10 +70,10 @@ namespace smpi{
 
 
 int
-Coll_allgather_pair::allgather(const void *send_buff, int send_count,
-                               MPI_Datatype send_type, void *recv_buff,
-                               int recv_count, MPI_Datatype recv_type,
-                               MPI_Comm comm)
+allgather__pair(const void *send_buff, int send_count,
+                MPI_Datatype send_type, void *recv_buff,
+                int recv_count, MPI_Datatype recv_type,
+                MPI_Comm comm)
 {
 
   MPI_Aint extent;
index 5811d72..7c62e75 100644 (file)
@@ -10,10 +10,10 @@ namespace simgrid{
 namespace smpi{
 
 int
-Coll_allgather_rdb::allgather(const void *sbuf, int send_count,
-                              MPI_Datatype send_type, void *rbuf,
-                              int recv_count, MPI_Datatype recv_type,
-                              MPI_Comm comm)
+allgather__rdb(const void *sbuf, int send_count,
+               MPI_Datatype send_type, void *rbuf,
+               int recv_count, MPI_Datatype recv_type,
+               MPI_Comm comm)
 {
   // MPI variables
   MPI_Status status;
index 9673350..3fc59a6 100644 (file)
@@ -13,10 +13,10 @@ namespace smpi{
 // now only work with power of two processes
 
 int
-Coll_allgather_rhv::allgather(const void *sbuf, int send_count,
-                              MPI_Datatype send_type, void *rbuf,
-                              int recv_count, MPI_Datatype recv_type,
-                              MPI_Comm comm)
+allgather__rhv(const void *sbuf, int send_count,
+               MPI_Datatype send_type, void *rbuf,
+               int recv_count, MPI_Datatype recv_type,
+               MPI_Comm comm)
 {
   MPI_Status status;
   MPI_Aint s_extent, r_extent;
@@ -46,8 +46,8 @@ Coll_allgather_rhv::allgather(const void *sbuf, int send_count,
 
   if (send_chunk != recv_chunk) {
     XBT_WARN("MPI_allgather_rhv use default MPI_allgather.");
-    Coll_allgather_default::allgather(sbuf, send_count, send_type, rbuf, recv_count,
-                              recv_type, comm);
+    allgather__default(sbuf, send_count, send_type, rbuf, recv_count,
+                       recv_type, comm);
     return MPI_SUCCESS;
   }
 
index fa619db..9716db3 100644 (file)
@@ -69,10 +69,10 @@ namespace smpi{
 
 
 int
-Coll_allgather_ring::allgather(const void *send_buff, int send_count,
-                               MPI_Datatype send_type, void *recv_buff,
-                               int recv_count, MPI_Datatype recv_type,
-                               MPI_Comm comm)
+allgather__ring(const void *send_buff, int send_count,
+                MPI_Datatype send_type, void *recv_buff,
+                int recv_count, MPI_Datatype recv_type,
+                MPI_Comm comm)
 {
 
   MPI_Aint extent;
index c11d643..8163bfb 100644 (file)
@@ -10,10 +10,10 @@ namespace simgrid{
 namespace smpi{
 
 
-int Coll_allgather_smp_simple::allgather(const void *send_buf, int scount,
-                                         MPI_Datatype stype, void *recv_buf,
-                                         int rcount, MPI_Datatype rtype,
-                                         MPI_Comm comm)
+int allgather__smp_simple(const void *send_buf, int scount,
+                          MPI_Datatype stype, void *recv_buf,
+                          int rcount, MPI_Datatype rtype,
+                          MPI_Comm comm)
 {
   int src, dst, comm_size, rank;
   comm_size = comm->size();
index 0536c32..06332d5 100644 (file)
@@ -72,11 +72,11 @@ namespace smpi{
 
 
 int
-Coll_allgather_spreading_simple::allgather(const void *send_buff, int send_count,
-                                           MPI_Datatype send_type,
-                                           void *recv_buff, int recv_count,
-                                           MPI_Datatype recv_type,
-                                           MPI_Comm comm)
+allgather__spreading_simple(const void *send_buff, int send_count,
+                            MPI_Datatype send_type,
+                            void *recv_buff, int recv_count,
+                            MPI_Datatype recv_type,
+                            MPI_Comm comm)
 {
   MPI_Aint extent;
   int i, src, dst, rank, num_procs, num_reqs;
index d3de949..d6f9e51 100644 (file)
@@ -10,10 +10,10 @@ namespace simgrid{
 namespace smpi{
 
 // Allgather - gather/bcast algorithm
-int Coll_allgatherv_GB::allgatherv(const void *send_buff, int send_count,
-                                 MPI_Datatype send_type, void *recv_buff,
-                                 const int *recv_counts, const int *recv_disps, MPI_Datatype recv_type,
-                                 MPI_Comm comm)
+int allgatherv__GB(const void *send_buff, int send_count,
+                   MPI_Datatype send_type, void *recv_buff,
+                   const int *recv_counts, const int *recv_disps, MPI_Datatype recv_type,
+                   MPI_Comm comm)
 {
   Colls::gatherv(send_buff, send_count, send_type, recv_buff, recv_counts, recv_disps, recv_type, 0, comm);
   int num_procs, i, current, max = 0;
index 1434b0e..0087dd7 100644 (file)
@@ -13,7 +13,7 @@
 namespace simgrid{
 namespace smpi{
 
-int Coll_allgatherv_mpich_rdb::allgatherv (
+int allgatherv__mpich_rdb(
   const void *sendbuf,
   int sendcount,
   MPI_Datatype sendtype,
index 155b24f..7855b11 100644 (file)
 namespace simgrid{
 namespace smpi{
 
-int
-Coll_allgatherv_mpich_ring::allgatherv(const void *sendbuf, int sendcount,
-    MPI_Datatype send_type, void *recvbuf,
-    const int *recvcounts, const int *displs, MPI_Datatype recvtype,
-    MPI_Comm comm)
+int allgatherv__mpich_ring(const void *sendbuf, int sendcount,
+                           MPI_Datatype send_type, void *recvbuf,
+                           const int *recvcounts, const int *displs, MPI_Datatype recvtype,
+                           MPI_Comm comm)
 {
 
   char * sbuf = NULL, * rbuf = NULL;
index 91d9977..dae1b2d 100644 (file)
  *         [6]    [6]    [6]    [6]    [6]    [6]    [6]
  */
 
-namespace simgrid{
-namespace smpi{
-
-int Coll_allgatherv_ompi_bruck::allgatherv(const void *sbuf, int scount,
-                                           MPI_Datatype sdtype,
-                                           void *rbuf, const int *rcounts,
-                                           const int *rdispls,
-                                           MPI_Datatype rdtype,
-                                           MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+
+int allgatherv__ompi_bruck(const void *sbuf, int scount,
+                           MPI_Datatype sdtype,
+                           void *rbuf, const int *rcounts,
+                           const int *rdispls,
+                           MPI_Datatype rdtype,
+                           MPI_Comm comm)
 {
    int sendto, recvfrom, blockcount, i;
    unsigned int distance;
index 74542de..d026ec5 100644 (file)
@@ -69,11 +69,11 @@ namespace simgrid{
 namespace smpi{
 
 int
-Coll_allgatherv_ompi_neighborexchange::allgatherv(const void *sbuf, int scount,
-                                                  MPI_Datatype sdtype,
-                                                  void* rbuf, const int *rcounts, const int *rdispls,
-                                                  MPI_Datatype rdtype,
-                                                  MPI_Comm comm)
+allgatherv__ompi_neighborexchange(const void *sbuf, int scount,
+                                  MPI_Datatype sdtype,
+                                  void* rbuf, const int *rcounts, const int *rdispls,
+                                  MPI_Datatype rdtype,
+                                  MPI_Comm comm)
 {
     int line = -1;
     int rank, size;
@@ -89,10 +89,9 @@ Coll_allgatherv_ompi_neighborexchange::allgatherv(const void *sbuf, int scount,
     rank = comm->rank();
 
     if (size % 2) {
-        XBT_DEBUG(
-                     "coll:tuned:allgatherv_ompi_neighborexchange WARNING: odd size %d, switching to ring algorithm",
+        XBT_DEBUG("allgatherv__ompi_neighborexchange WARNING: odd size %d, switching to ring algorithm",
                      size);
-        return Coll_allgatherv_ring::allgatherv(sbuf, scount, sdtype,
+        return allgatherv__ring(sbuf, scount, sdtype,
                                                      rbuf, rcounts,
                                                      rdispls, rdtype,
                                                      comm);
index f721970..7bf4271 100644 (file)
@@ -69,10 +69,10 @@ namespace simgrid{
 namespace smpi{
 
 int
-Coll_allgatherv_pair::allgatherv(const void *send_buff, int send_count,
-                               MPI_Datatype send_type, void *recv_buff,
-                               const int *recv_counts, const int *recv_disps, MPI_Datatype recv_type,
-                               MPI_Comm comm)
+allgatherv__pair(const void *send_buff, int send_count,
+                 MPI_Datatype send_type, void *recv_buff,
+                 const int *recv_counts, const int *recv_disps, MPI_Datatype recv_type,
+                 MPI_Comm comm)
 {
 
   MPI_Aint extent;
index d6a9bec..851c6c4 100644 (file)
@@ -68,10 +68,10 @@ namespace simgrid{
 namespace smpi{
 
 int
-Coll_allgatherv_ring::allgatherv(const void *send_buff, int send_count,
-                               MPI_Datatype send_type, void *recv_buff,
-                               const int *recv_counts, const int *recv_disps, MPI_Datatype recv_type,
-                               MPI_Comm comm)
+allgatherv__ring(const void *send_buff, int send_count,
+                 MPI_Datatype send_type, void *recv_buff,
+                 const int *recv_counts, const int *recv_disps, MPI_Datatype recv_type,
+                 MPI_Comm comm)
 {
 
   MPI_Aint extent;
index 9df5ace..5cd9bb6 100644 (file)
 */
 
 //#include <star-reduction.c>
-namespace simgrid{
-namespace smpi{
-int
-Coll_allreduce_lr::allreduce(const void *sbuf, void *rbuf, int rcount,
-                             MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int allreduce__lr(const void *sbuf, void *rbuf, int rcount,
+                  MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
 {
   int tag = COLL_TAG_ALLREDUCE;
   MPI_Status status;
@@ -40,7 +39,7 @@ Coll_allreduce_lr::allreduce(const void *sbuf, void *rbuf, int rcount,
   /* when communication size is smaller than number of process (not support) */
   if (rcount < size) {
     XBT_WARN("MPI_allreduce_lr use default MPI_allreduce.");
-    Coll_allreduce_default::allreduce(sbuf, rbuf, rcount, dtype, op, comm);
+    allreduce__default(sbuf, rbuf, rcount, dtype, op, comm);
     return MPI_SUCCESS;
   }
 
index 5c29b30..cbfdaa7 100644 (file)
 #include "../colls_private.hpp"
 #include <algorithm>
 
-namespace simgrid{
-namespace smpi{
-int Coll_allreduce_mvapich2_rs::allreduce(const void *sendbuf,
-                            void *recvbuf,
-                            int count,
-                            MPI_Datatype datatype,
-                            MPI_Op op, MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int allreduce__mvapich2_rs(const void *sendbuf,
+                           void *recvbuf,
+                           int count,
+                           MPI_Datatype datatype,
+                           MPI_Op op, MPI_Comm comm)
 {
     int mpi_errno = MPI_SUCCESS;
     int newrank = 0;
index 3250168..61ee55d 100644 (file)
@@ -37,8 +37,8 @@
 
 #include "../colls_private.hpp"
 
-#define MPIR_Allreduce_pt2pt_rd_MV2 Coll_allreduce_rdb::allreduce
-#define MPIR_Allreduce_pt2pt_rs_MV2 Coll_allreduce_mvapich2_rs::allreduce
+#define MPIR_Allreduce_pt2pt_rd_MV2 allreduce__rdb
+#define MPIR_Allreduce_pt2pt_rs_MV2 allreduce__mvapich2_rs
 
 extern int (*MV2_Allreducection)(const void *sendbuf,
     void *recvbuf,
@@ -78,7 +78,7 @@ static  int MPIR_Allreduce_reduce_shmem_MV2(const void *sendbuf,
 
 
 /* general two level allreduce helper function */
-int Coll_allreduce_mvapich2_two_level::allreduce(const void *sendbuf,
+int allreduce__mvapich2_two_level(const void *sendbuf,
                              void *recvbuf,
                              int count,
                              MPI_Datatype datatype,
@@ -92,9 +92,9 @@ int Coll_allreduce_mvapich2_two_level::allreduce(const void *sendbuf,
 
     //if not set (use of the algo directly, without mvapich2 selector)
     if(MV2_Allreduce_intra_function==NULL)
-      MV2_Allreduce_intra_function = Coll_allreduce_mpich::allreduce;
+      MV2_Allreduce_intra_function = allreduce__mpich;
     if(MV2_Allreducection==NULL)
-      MV2_Allreducection = Coll_allreduce_rdb::allreduce;
+      MV2_Allreducection = allreduce__rdb;
 
     if(comm->get_leaders_comm()==MPI_COMM_NULL){
       comm->init_smp();
index 48ce160..dfa142a 100644 (file)
 
 #include "../colls_private.hpp"
 
-namespace simgrid{
-namespace smpi{
-int
-Coll_allreduce_ompi_ring_segmented::allreduce(const void *sbuf, void *rbuf, int count,
-                                               MPI_Datatype dtype,
-                                               MPI_Op op,
-                                               MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int allreduce__ompi_ring_segmented(const void *sbuf, void *rbuf, int count,
+                                   MPI_Datatype dtype,
+                                   MPI_Op op,
+                                   MPI_Comm comm)
 {
    int ret = MPI_SUCCESS;
    int line;
@@ -205,8 +204,7 @@ Coll_allreduce_ompi_ring_segmented::allreduce(const void *sbuf, void *rbuf, int
    /* Special case for count less than size * segcount - use regular ring */
    if (count < size * segcount) {
       XBT_DEBUG( "coll:tuned:allreduce_ring_segmented rank %d/%d, count %d, switching to regular ring", rank, size, count);
-      return (Coll_allreduce_lr::allreduce(sbuf, rbuf, count, dtype, op,
-                                                   comm));
+      return (allreduce__lr(sbuf, rbuf, count, dtype, op, comm));
    }
 
    /* Determine the number of phases of the algorithm */
index d9f4d2c..209fbf4 100644 (file)
@@ -7,9 +7,9 @@
 #include "../colls_private.hpp"
 namespace simgrid{
 namespace smpi{
-int Coll_allreduce_rab_rdb::allreduce(const void *sbuff, void *rbuff, int count,
-                                      MPI_Datatype dtype, MPI_Op op,
-                                      MPI_Comm comm)
+int allreduce__rab_rdb(const void *sbuff, void *rbuff, int count,
+                       MPI_Datatype dtype, MPI_Op op,
+                       MPI_Comm comm)
 {
   int tag = COLL_TAG_ALLREDUCE;
   unsigned int mask, pof2, i, recv_idx, last_idx, send_idx, send_cnt;
index 34be1cd..355b6fe 100644 (file)
@@ -9,9 +9,9 @@
 namespace simgrid{
 namespace smpi{
 // NP pow of 2 for now
-int Coll_allreduce_rab1::allreduce(const void *sbuff, void *rbuff,
-                                   int count, MPI_Datatype dtype,
-                                   MPI_Op op, MPI_Comm comm)
+int allreduce__rab1(const void *sbuff, void *rbuff,
+                    int count, MPI_Datatype dtype,
+                    MPI_Op op, MPI_Comm comm)
 {
   MPI_Status status;
   MPI_Aint extent;
index ae9f3a2..b060f47 100644 (file)
@@ -10,9 +10,9 @@
 namespace simgrid{
 namespace smpi{
 // this requires that count >= NP
-int Coll_allreduce_rab2::allreduce(const void *sbuff, void *rbuff,
-                                   int count, MPI_Datatype dtype,
-                                   MPI_Op op, MPI_Comm comm)
+int allreduce__rab2(const void *sbuff, void *rbuff,
+                    int count, MPI_Datatype dtype,
+                    MPI_Op op, MPI_Comm comm)
 {
   MPI_Aint s_extent;
   int i, rank, nprocs;
index ba19511..e0859ce 100644 (file)
@@ -8,8 +8,8 @@
 //#include <star-reduction.c>
 namespace simgrid{
 namespace smpi{
-int Coll_allreduce_rdb::allreduce(const void *sbuff, void *rbuff, int count,
-                                  MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
+int allreduce__rdb(const void *sbuff, void *rbuff, int count,
+                   MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
 {
   int nprocs, rank, tag = COLL_TAG_ALLREDUCE;
   int mask, dst, pof2, newrank, rem, newdst;
index 1f5863c..8c967ce 100644 (file)
@@ -7,9 +7,9 @@
 #include "../colls_private.hpp"
 namespace simgrid{
 namespace smpi{
-int Coll_allreduce_redbcast::allreduce(const void *buf, void *buf2, int count,
-                                       MPI_Datatype datatype, MPI_Op op,
-                                       MPI_Comm comm)
+int allreduce__redbcast(const void *buf, void *buf2, int count,
+                        MPI_Datatype datatype, MPI_Op op,
+                        MPI_Comm comm)
 {
   Colls::reduce(buf, buf2, count, datatype, op, 0, comm);
   Colls::bcast(buf2, count, datatype, 0, comm);
index eb85fbd..e772a91 100644 (file)
@@ -38,10 +38,10 @@ This fucntion performs all-reduce operation as follow. ** in a pipeline fashion
 */
 namespace simgrid{
 namespace smpi{
-int Coll_allreduce_smp_binomial_pipeline::allreduce(const void *send_buf,
-                                                    void *recv_buf, int count,
-                                                    MPI_Datatype dtype,
-                                                    MPI_Op op, MPI_Comm comm)
+int allreduce__smp_binomial_pipeline(const void *send_buf,
+                                     void *recv_buf, int count,
+                                     MPI_Datatype dtype,
+                                     MPI_Op op, MPI_Comm comm)
 {
   int comm_size, rank;
   int tag = COLL_TAG_ALLREDUCE;
index cea1643..81e6b9d 100644 (file)
@@ -28,9 +28,9 @@ This fucntion performs all-reduce operation as follow.
 */
 namespace simgrid{
 namespace smpi{
-int Coll_allreduce_smp_binomial::allreduce(const void *send_buf, void *recv_buf,
-                                           int count, MPI_Datatype dtype,
-                                           MPI_Op op, MPI_Comm comm)
+int allreduce__smp_binomial(const void *send_buf, void *recv_buf,
+                            int count, MPI_Datatype dtype,
+                            MPI_Op op, MPI_Comm comm)
 {
   int comm_size, rank;
   int tag = COLL_TAG_ALLREDUCE;
index 12f6448..eb6c76a 100644 (file)
@@ -27,9 +27,9 @@ This fucntion performs all-reduce operation as follow.
 */
 namespace simgrid{
 namespace smpi{
-int Coll_allreduce_smp_rdb::allreduce(const void *send_buf, void *recv_buf, int count,
-                                      MPI_Datatype dtype, MPI_Op op,
-                                      MPI_Comm comm)
+int allreduce__smp_rdb(const void *send_buf, void *recv_buf, int count,
+                       MPI_Datatype dtype, MPI_Op op,
+                       MPI_Comm comm)
 {
   int comm_size, rank;
   int tag = COLL_TAG_ALLREDUCE;
index fd5718f..caf0808 100644 (file)
@@ -16,9 +16,9 @@ This fucntion performs all-reduce operation as follow.
 */
 namespace simgrid{
 namespace smpi{
-int Coll_allreduce_smp_rsag_lr::allreduce(const void *send_buf, void *recv_buf,
-                                          int count, MPI_Datatype dtype,
-                                          MPI_Op op, MPI_Comm comm)
+int allreduce__smp_rsag_lr(const void *send_buf, void *recv_buf,
+                           int count, MPI_Datatype dtype,
+                           MPI_Op op, MPI_Comm comm)
 {
   int comm_size, rank;
   int tag = COLL_TAG_ALLREDUCE;
index b4bfacc..2b40358 100644 (file)
@@ -20,9 +20,9 @@ This fucntion performs all-reduce operation as follow.
 */
 namespace simgrid{
 namespace smpi{
-int Coll_allreduce_smp_rsag_rab::allreduce(const void *sbuf, void *rbuf, int count,
-                                           MPI_Datatype dtype, MPI_Op op,
-                                           MPI_Comm comm)
+int allreduce__smp_rsag_rab(const void *sbuf, void *rbuf, int count,
+                            MPI_Datatype dtype, MPI_Op op,
+                            MPI_Comm comm)
 {
   int comm_size, rank;
   int tag = COLL_TAG_ALLREDUCE;
index 06d5e41..c1316e8 100644 (file)
@@ -15,9 +15,9 @@ This fucntion performs all-reduce operation as follow.
 */
 namespace simgrid{
 namespace smpi{
-int Coll_allreduce_smp_rsag::allreduce(const void *send_buf, void *recv_buf,
-                                       int count, MPI_Datatype dtype, MPI_Op op,
-                                       MPI_Comm comm)
+int allreduce__smp_rsag(const void *send_buf, void *recv_buf,
+                        int count, MPI_Datatype dtype, MPI_Op op,
+                        MPI_Comm comm)
 {
   int comm_size, rank;
   int tag = COLL_TAG_ALLREDUCE;
index 918a242..7da2954 100644 (file)
@@ -55,10 +55,10 @@ static int alltoall_check_is_2dmesh(int num, int *i, int *j)
 namespace simgrid{
 namespace smpi{
 
-int Coll_alltoall_2dmesh::alltoall(const void *send_buff, int send_count,
-                                    MPI_Datatype send_type,
-                                    void *recv_buff, int recv_count,
-                                    MPI_Datatype recv_type, MPI_Comm comm)
+int alltoall__2dmesh(const void *send_buff, int send_count,
+                     MPI_Datatype send_type,
+                     void *recv_buff, int recv_count,
+                     MPI_Datatype recv_type, MPI_Comm comm)
 {
   MPI_Status s;
   MPI_Aint extent;
index 4bf3d39..485d7b1 100644 (file)
@@ -47,10 +47,10 @@ static int alltoall_check_is_3dmesh(int num, int *i, int *j, int *k)
 }
 namespace simgrid{
 namespace smpi{
-int Coll_alltoall_3dmesh::alltoall(const void *send_buff, int send_count,
-                                    MPI_Datatype send_type,
-                                    void *recv_buff, int recv_count,
-                                    MPI_Datatype recv_type, MPI_Comm comm)
+int alltoall__3dmesh(const void *send_buff, int send_count,
+                     MPI_Datatype send_type,
+                     void *recv_buff, int recv_count,
+                     MPI_Datatype recv_type, MPI_Comm comm)
 {
   MPI_Aint extent;
   MPI_Status status;
index c8aeff2..799bccc 100644 (file)
@@ -13,8 +13,8 @@ namespace simgrid{
 namespace smpi{
 
 
-int Coll_alltoall_basic_linear::alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
-                                          void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
+int alltoall__basic_linear(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
+                           void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
 {
   int system_tag = COLL_TAG_ALLTOALL;
   int i;
index 568ee25..0f001cd 100644 (file)
@@ -32,10 +32,10 @@ namespace smpi{
 
 
 int
-Coll_alltoall_bruck::alltoall(const void *send_buff, int send_count,
-                               MPI_Datatype send_type, void *recv_buff,
-                               int recv_count, MPI_Datatype recv_type,
-                               MPI_Comm comm)
+alltoall__bruck(const void *send_buff, int send_count,
+                MPI_Datatype send_type, void *recv_buff,
+                int recv_count, MPI_Datatype recv_type,
+                MPI_Comm comm)
 {
   MPI_Status status;
   MPI_Aint extent;
index e4dc8a0..0930b20 100644 (file)
 #include "../colls_private.hpp"
 namespace simgrid{
 namespace smpi{
-int Coll_alltoall_mvapich2_scatter_dest::alltoall(
-                            const void *sendbuf,
-                            int sendcount,
-                            MPI_Datatype sendtype,
-                            void *recvbuf,
-                            int recvcount,
-                            MPI_Datatype recvtype,
-                            MPI_Comm comm)
+int alltoall__mvapich2_scatter_dest(const void *sendbuf,
+                                    int sendcount,
+                                    MPI_Datatype sendtype,
+                                    void *recvbuf,
+                                    int recvcount,
+                                    MPI_Datatype recvtype,
+                                    MPI_Comm comm)
 {
     int          comm_size, i, j;
     MPI_Aint     sendtype_extent = 0, recvtype_extent = 0;
index bcaaaf2..26b629d 100644 (file)
 namespace simgrid{
 namespace smpi{
 int
-Coll_alltoall_pair_light_barrier::alltoall(const void *send_buff, int send_count,
-                                            MPI_Datatype send_type,
-                                            void *recv_buff, int recv_count,
-                                            MPI_Datatype recv_type,
-                                            MPI_Comm comm)
+alltoall__pair_light_barrier(const void *send_buff, int send_count,
+                             MPI_Datatype send_type,
+                             void *recv_buff, int recv_count,
+                             MPI_Datatype recv_type,
+                             MPI_Comm comm)
 {
   MPI_Aint send_chunk, recv_chunk;
   MPI_Status s;
index 2dcf9e3..b113714 100644 (file)
 namespace simgrid{
 namespace smpi{
 int
-Coll_alltoall_pair_mpi_barrier::alltoall(const void *send_buff, int send_count,
-                                          MPI_Datatype send_type,
-                                          void *recv_buff, int recv_count,
-                                          MPI_Datatype recv_type, MPI_Comm comm)
+alltoall__pair_mpi_barrier(const void *send_buff, int send_count,
+                           MPI_Datatype send_type,
+                           void *recv_buff, int recv_count,
+                           MPI_Datatype recv_type, MPI_Comm comm)
 {
   MPI_Status s;
   MPI_Aint send_chunk, recv_chunk;
index a440cd8..79c5133 100644 (file)
 namespace simgrid{
 namespace smpi{
 int
-Coll_alltoall_pair_one_barrier::alltoall(const void *send_buff, int send_count,
-                                          MPI_Datatype send_type,
-                                          void *recv_buff, int recv_count,
-                                          MPI_Datatype recv_type, MPI_Comm comm)
+alltoall__pair_one_barrier(const void *send_buff, int send_count,
+                           MPI_Datatype send_type,
+                           void *recv_buff, int recv_count,
+                           MPI_Datatype recv_type, MPI_Comm comm)
 {
 
   MPI_Aint send_chunk, recv_chunk;
index 25ba861..6b53837 100644 (file)
@@ -30,9 +30,9 @@
  ****************************************************************************/
 namespace simgrid{
 namespace smpi{
-int Coll_alltoall_pair_rma::alltoall(const void *send_buff, int send_count, MPI_Datatype send_type,
-                  void *recv_buff, int recv_count, MPI_Datatype recv_type,
-                  MPI_Comm comm)
+int alltoall__pair_rma(const void *send_buff, int send_count, MPI_Datatype send_type,
+                       void *recv_buff, int recv_count, MPI_Datatype recv_type,
+                       MPI_Comm comm)
 {
 
   MPI_Aint send_chunk, recv_chunk;
@@ -64,10 +64,10 @@ int Coll_alltoall_pair_rma::alltoall(const void *send_buff, int send_count, MPI_
 }
 
 
-int Coll_alltoall_pair::alltoall(const void *send_buff, int send_count,
-                                  MPI_Datatype send_type,
-                                  void *recv_buff, int recv_count,
-                                  MPI_Datatype recv_type, MPI_Comm comm)
+int alltoall__pair(const void *send_buff, int send_count,
+                   MPI_Datatype send_type,
+                   void *recv_buff, int recv_count,
+                   MPI_Datatype recv_type, MPI_Comm comm)
 {
 
   MPI_Aint send_chunk, recv_chunk;
index 942288e..f073888 100644 (file)
  ****************************************************************************/
 namespace simgrid{
 namespace smpi{
-int Coll_alltoall_rdb::alltoall(const void *send_buff, int send_count,
-                                 MPI_Datatype send_type,
-                                 void *recv_buff, int recv_count,
-                                 MPI_Datatype recv_type, MPI_Comm comm)
+int alltoall__rdb(const void *send_buff, int send_count,
+                  MPI_Datatype send_type,
+                  void *recv_buff, int recv_count,
+                  MPI_Datatype recv_type, MPI_Comm comm)
 {
   /* MPI variables */
   MPI_Status status;
index 39fbde1..9c50d21 100644 (file)
 namespace simgrid{
 namespace smpi{
 int
-Coll_alltoall_ring_light_barrier::alltoall(const void *send_buff, int send_count,
-                                            MPI_Datatype send_type,
-                                            void *recv_buff, int recv_count,
-                                            MPI_Datatype recv_type,
-                                            MPI_Comm comm)
+alltoall__ring_light_barrier(const void *send_buff, int send_count,
+                             MPI_Datatype send_type,
+                             void *recv_buff, int recv_count,
+                             MPI_Datatype recv_type,
+                             MPI_Comm comm)
 {
   MPI_Aint send_chunk, recv_chunk;
   MPI_Status s;
index 2c61768..1d72cc6 100644 (file)
 namespace simgrid{
 namespace smpi{
 int
-Coll_alltoall_ring_mpi_barrier::alltoall(const void *send_buff, int send_count,
-                                          MPI_Datatype send_type,
-                                          void *recv_buff, int recv_count,
-                                          MPI_Datatype recv_type, MPI_Comm comm)
+alltoall__ring_mpi_barrier(const void *send_buff, int send_count,
+                           MPI_Datatype send_type,
+                           void *recv_buff, int recv_count,
+                           MPI_Datatype recv_type, MPI_Comm comm)
 {
   MPI_Status s;
   MPI_Aint send_chunk, recv_chunk;
index c52d5c5..b184b18 100644 (file)
 namespace simgrid{
 namespace smpi{
 int
-Coll_alltoall_ring_one_barrier::alltoall(const void *send_buff, int send_count,
-                                          MPI_Datatype send_type,
-                                          void *recv_buff, int recv_count,
-                                          MPI_Datatype recv_type, MPI_Comm comm)
+alltoall__ring_one_barrier(const void *send_buff, int send_count,
+                           MPI_Datatype send_type,
+                           void *recv_buff, int recv_count,
+                           MPI_Datatype recv_type, MPI_Comm comm)
 {
   MPI_Status s;
   MPI_Aint send_chunk, recv_chunk;
index 6d51492..7068328 100644 (file)
  * Author: Ahmad Faraj
 
  ****************************************************************************/
-namespace simgrid{
-namespace smpi{
-int
-Coll_alltoall_ring::alltoall(const void *send_buff, int send_count,
-                              MPI_Datatype send_type, void *recv_buff,
-                              int recv_count, MPI_Datatype recv_type,
-                              MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int alltoall__ring(const void *send_buff, int send_count,
+                   MPI_Datatype send_type, void *recv_buff,
+                   int recv_count, MPI_Datatype recv_type,
+                   MPI_Comm comm)
 {
   MPI_Status s;
   MPI_Aint send_chunk, recv_chunk;
index 8b3794f..d32380b 100644 (file)
  **/
 namespace simgrid{
 namespace smpi{
-int Coll_alltoallv_bruck::alltoallv(const void *sendbuf, const int *sendcounts, const int *senddisps,
-                                   MPI_Datatype sendtype, void *recvbuf,
-                                   const int *recvcounts,const int *recvdisps, MPI_Datatype recvtype,
-                                   MPI_Comm comm)
+int alltoallv__bruck(const void *sendbuf, const int *sendcounts, const int *senddisps,
+                     MPI_Datatype sendtype, void *recvbuf,
+                     const int *recvcounts,const int *recvdisps, MPI_Datatype recvtype,
+                     MPI_Comm comm)
 {
   int system_tag = COLL_TAG_ALLTOALLV;
   int i, rank, size, err, count;
index de140a3..d317ed5 100644 (file)
  */
 namespace simgrid{
 namespace smpi{
-int
-Coll_alltoallv_ompi_basic_linear::alltoallv(const void *sbuf, const int *scounts, const int *sdisps,
-                                            MPI_Datatype sdtype,
-                                            void *rbuf, const int *rcounts, const int *rdisps,
-                                            MPI_Datatype rdtype,
-                                            MPI_Comm comm)
+int alltoallv__ompi_basic_linear(const void *sbuf, const int *scounts, const int *sdisps,
+                                 MPI_Datatype sdtype,
+                                 void *rbuf, const int *rcounts, const int *rdisps,
+                                 MPI_Datatype rdtype,
+                                 MPI_Comm comm)
 {
     int i, size, rank;
     char *psnd, *prcv;
index 448ca1a..655c4b1 100644 (file)
  * Author: Ahmad Faraj
 
  ****************************************************************************/
-namespace simgrid{
-namespace smpi{
-int
-Coll_alltoallv_pair_light_barrier::alltoallv(const void *send_buff, const int *send_counts, const int *send_disps,
-                                            MPI_Datatype send_type,
-                                            void *recv_buff, const int *recv_counts, const int *recv_disps,
-                                            MPI_Datatype recv_type,
-                                            MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int alltoallv__pair_light_barrier(const void *send_buff, const int *send_counts, const int *send_disps,
+                                  MPI_Datatype send_type,
+                                  void *recv_buff, const int *recv_counts, const int *recv_disps,
+                                  MPI_Datatype recv_type,
+                                  MPI_Comm comm)
 {
   MPI_Aint send_chunk, recv_chunk;
   MPI_Status s;
index b63b487..68d9852 100644 (file)
  * Author: Ahmad Faraj
 
  ****************************************************************************/
-namespace simgrid{
-namespace smpi{
-int
-Coll_alltoallv_pair_mpi_barrier::alltoallv(const void *send_buff, const int *send_counts, const int *send_disps,
-                                          MPI_Datatype send_type,
-                                          void *recv_buff, const int *recv_counts, const int *recv_disps,
-                                          MPI_Datatype recv_type, MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int alltoallv__pair_mpi_barrier(const void *send_buff, const int *send_counts, const int *send_disps,
+                                MPI_Datatype send_type,
+                                void *recv_buff, const int *recv_counts, const int *recv_disps,
+                                MPI_Datatype recv_type, MPI_Comm comm)
 {
   MPI_Status s;
   MPI_Aint send_chunk, recv_chunk;
index 3a78afe..7ef7a10 100644 (file)
  ****************************************************************************/
 namespace simgrid{
 namespace smpi{
-int
-Coll_alltoallv_pair_one_barrier::alltoallv(const void *send_buff, const int *send_counts, const int *send_disps,
-                                          MPI_Datatype send_type,
-                                          void *recv_buff,  const int *recv_counts, const int *recv_disps,                                                                                  MPI_Datatype recv_type, MPI_Comm comm)
+int alltoallv__pair_one_barrier(const void *send_buff, const int *send_counts, const int *send_disps,
+                                MPI_Datatype send_type,
+                                void *recv_buff,  const int *recv_counts, const int *recv_disps,                                                                                  MPI_Datatype recv_type, MPI_Comm comm)
 {
 
   MPI_Aint send_chunk, recv_chunk;
index 1492073..ff7ed2a 100644 (file)
  * Author: Ahmad Faraj
 
  ****************************************************************************/
-namespace simgrid{
-namespace smpi{
-int Coll_alltoallv_pair::alltoallv(const void *send_buff, const int *send_counts, const int *send_disps,
-                                  MPI_Datatype send_type,
-                                  void *recv_buff, const int *recv_counts, const int *recv_disps,
-                                  MPI_Datatype recv_type, MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int alltoallv__pair(const void *send_buff, const int *send_counts, const int *send_disps,
+                    MPI_Datatype send_type,
+                    void *recv_buff, const int *recv_counts, const int *recv_disps,
+                    MPI_Datatype recv_type, MPI_Comm comm)
 {
 
   MPI_Aint send_chunk, recv_chunk;
index 87ccfdc..05d0ada 100644 (file)
  ****************************************************************************/
 namespace simgrid{
 namespace smpi{
-int
-Coll_alltoallv_ring_light_barrier::alltoallv(const void *send_buff, const int *send_counts, const int *send_disps,
-                                            MPI_Datatype send_type,
-                                            void *recv_buff, const int *recv_counts, const int *recv_disps,
-                                            MPI_Datatype recv_type,
-                                            MPI_Comm comm)
+int alltoallv__ring_light_barrier(const void *send_buff, const int *send_counts, const int *send_disps,
+                                  MPI_Datatype send_type,
+                                  void *recv_buff, const int *recv_counts, const int *recv_disps,
+                                  MPI_Datatype recv_type,
+                                  MPI_Comm comm)
 {
   MPI_Aint send_chunk, recv_chunk;
   MPI_Status s;
index 69202b4..44a32f4 100644 (file)
  ****************************************************************************/
 namespace simgrid{
 namespace smpi{
-int
-Coll_alltoallv_ring_mpi_barrier::alltoallv(const void *send_buff, const int *send_counts, const int *send_disps,
-                                          MPI_Datatype send_type,
-                                          void *recv_buff, const int *recv_counts, const int *recv_disps,
-                                          MPI_Datatype recv_type, MPI_Comm comm)
+int alltoallv__ring_mpi_barrier(const void *send_buff, const int *send_counts, const int *send_disps,
+                                MPI_Datatype send_type,
+                                void *recv_buff, const int *recv_counts, const int *recv_disps,
+                                MPI_Datatype recv_type, MPI_Comm comm)
 {
   MPI_Status s;
   MPI_Aint send_chunk, recv_chunk;
index 09e712d..f265fba 100644 (file)
  ****************************************************************************/
 namespace simgrid{
 namespace smpi{
-int
-Coll_alltoallv_ring_one_barrier::alltoallv(const void *send_buff, const int *send_counts, const int *send_disps,
-                                          MPI_Datatype send_type,
-                                          void *recv_buff, const int *recv_counts, const int *recv_disps,
-                                          MPI_Datatype recv_type, MPI_Comm comm)
+int alltoallv__ring_one_barrier(const void *send_buff, const int *send_counts, const int *send_disps,
+                                MPI_Datatype send_type,
+                                void *recv_buff, const int *recv_counts, const int *recv_disps,
+                                MPI_Datatype recv_type, MPI_Comm comm)
 {
   MPI_Status s;
   MPI_Aint send_chunk, recv_chunk;
index fbb0e53..53969d3 100644 (file)
@@ -27,9 +27,9 @@
  ****************************************************************************/
 namespace simgrid{
 namespace smpi{
-int Coll_alltoallv_ring::alltoallv(const void* send_buff, const int* send_counts, const int* send_disps, MPI_Datatype send_type,
-                                   void* recv_buff, const int* recv_counts, const int* recv_disps, MPI_Datatype recv_type,
-                                   MPI_Comm comm)
+int alltoallv__ring(const void* send_buff, const int* send_counts, const int* send_disps, MPI_Datatype send_type,
+                    void* recv_buff, const int* recv_counts, const int* recv_disps, MPI_Datatype recv_type,
+                    MPI_Comm comm)
 {
   MPI_Status s;
   MPI_Aint send_chunk, recv_chunk;
index cf37e01..a0e3027 100644 (file)
@@ -17,7 +17,7 @@
 
 namespace simgrid{
 namespace smpi{
-int Coll_barrier_mpich_smp::barrier(MPI_Comm comm)
+int barrier__mpich_smp(MPI_Comm comm)
 {
     int mpi_errno = MPI_SUCCESS;
     int mpi_errno_ret = MPI_SUCCESS;
@@ -32,7 +32,7 @@ int Coll_barrier_mpich_smp::barrier(MPI_Comm comm)
     local_rank = shmem_comm->rank();
     /* do the intranode barrier on all nodes */
     if (shmem_comm != NULL) {
-        mpi_errno = Coll_barrier_mpich::barrier(shmem_comm);
+        mpi_errno = barrier__mpich(shmem_comm);
         if (mpi_errno) {
           mpi_errno_ret+=mpi_errno;
         }
@@ -41,7 +41,7 @@ int Coll_barrier_mpich_smp::barrier(MPI_Comm comm)
     leader_comm = comm->get_leaders_comm();
     /* do the barrier across roots of all nodes */
     if (leader_comm != NULL && local_rank == 0) {
-        mpi_errno = Coll_barrier_mpich::barrier(leader_comm);
+        mpi_errno = barrier__mpich(leader_comm);
         if (mpi_errno) {
           mpi_errno_ret+=mpi_errno;
         }
@@ -52,7 +52,7 @@ int Coll_barrier_mpich_smp::barrier(MPI_Comm comm)
      * anything) */
     if (shmem_comm != NULL) {
         int i = 0;
-        mpi_errno = Coll_bcast_mpich::bcast(&i, 1, MPI_BYTE, 0, shmem_comm);
+        mpi_errno = bcast__mpich(&i, 1, MPI_BYTE, 0, shmem_comm);
         if (mpi_errno) {
           mpi_errno_ret+=mpi_errno;
         }
index 9d9c782..e877c7d 100644 (file)
@@ -43,7 +43,7 @@
 #include "../colls_private.hpp"
 namespace simgrid{
 namespace smpi{
-int Coll_barrier_mvapich2_pair::barrier(MPI_Comm comm)
+int barrier__mvapich2_pair(MPI_Comm comm)
 {
 
     int size, rank;
index e87e5fd..802481f 100644 (file)
@@ -42,9 +42,9 @@
  * synchronous guarantee made by last ring of sends are synchronous
  *
  */
-namespace simgrid{
-namespace smpi{
-int Coll_barrier_ompi_doublering::barrier(MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int barrier__ompi_doublering(MPI_Comm comm)
 {
     int rank, size;
     int left, right;
@@ -104,7 +104,7 @@ int Coll_barrier_ompi_doublering::barrier(MPI_Comm comm)
  * To make synchronous, uses sync sends and sync sendrecvs
  */
 
-int Coll_barrier_ompi_recursivedoubling::barrier(MPI_Comm comm)
+int barrier__ompi_recursivedoubling(MPI_Comm comm)
 {
     int rank, size, adjsize;
     int mask, remote;
@@ -178,7 +178,7 @@ int Coll_barrier_ompi_recursivedoubling::barrier(MPI_Comm comm)
  * To make synchronous, uses sync sends and sync sendrecvs
  */
 
-int Coll_barrier_ompi_bruck::barrier(MPI_Comm comm)
+int barrier__ompi_bruck(MPI_Comm comm)
 {
     int rank, size;
     int distance, to, from;
@@ -210,7 +210,7 @@ int Coll_barrier_ompi_bruck::barrier(MPI_Comm comm)
  * To make synchronous, uses sync sends and sync sendrecvs
  */
 /* special case for two processes */
-int Coll_barrier_ompi_two_procs::barrier(MPI_Comm comm)
+int barrier__ompi_two_procs(MPI_Comm comm)
 {
     int remote;
 
@@ -242,7 +242,7 @@ int Coll_barrier_ompi_two_procs::barrier(MPI_Comm comm)
 
 /* copied function (with appropriate renaming) starts here */
 
-int Coll_barrier_ompi_basic_linear::barrier(MPI_Comm comm)
+int barrier__ompi_basic_linear(MPI_Comm comm)
 {
     int i;
     int size = comm->size();
@@ -292,7 +292,7 @@ int Coll_barrier_ompi_basic_linear::barrier(MPI_Comm comm)
  * Another recursive doubling type algorithm, but in this case
  * we go up the tree and back down the tree.
  */
-int Coll_barrier_ompi_tree::barrier(MPI_Comm comm)
+int barrier__ompi_tree(MPI_Comm comm)
 {
     int rank, size, depth;
     int jump, partner;
index 8decb7b..632eda4 100644 (file)
@@ -9,8 +9,8 @@
 int bcast_NTSB_segment_size_in_byte = 8192;
 namespace simgrid{
 namespace smpi{
-int Coll_bcast_NTSB::bcast(void *buf, int count, MPI_Datatype datatype,
-                               int root, MPI_Comm comm)
+int bcast__NTSB(void *buf, int count, MPI_Datatype datatype,
+                int root, MPI_Comm comm)
 {
   int tag = COLL_TAG_BCAST;
   MPI_Status status;
index 57857c0..0ceb9d8 100644 (file)
@@ -13,8 +13,8 @@ static int bcast_NTSL_segment_size_in_byte = 8192;
 */
 namespace simgrid{
 namespace smpi{
-int Coll_bcast_NTSL_Isend::bcast(void *buf, int count, MPI_Datatype datatype,
-                               int root, MPI_Comm comm)
+int bcast__NTSL_Isend(void *buf, int count, MPI_Datatype datatype,
+                      int root, MPI_Comm comm)
 {
   int tag = COLL_TAG_BCAST;
   MPI_Status status;
index 97e55c8..115abd9 100644 (file)
@@ -13,8 +13,8 @@ static int bcast_NTSL_segment_size_in_byte = 8192;
 */
 namespace simgrid{
 namespace smpi{
-int Coll_bcast_NTSL::bcast(void *buf, int count, MPI_Datatype datatype,
-                               int root, MPI_Comm comm)
+int bcast__NTSL(void *buf, int count, MPI_Datatype datatype,
+                int root, MPI_Comm comm)
 {
   int tag = COLL_TAG_BCAST;
   MPI_Status status;
index 3ce7918..259b90d 100644 (file)
@@ -9,9 +9,9 @@
 int bcast_SMP_binary_segment_byte = 8192;
 namespace simgrid{
 namespace smpi{
-int Coll_bcast_SMP_binary::bcast(void *buf, int count,
-                                     MPI_Datatype datatype, int root,
-                                     MPI_Comm comm)
+int bcast__SMP_binary(void *buf, int count,
+                      MPI_Datatype datatype, int root,
+                      MPI_Comm comm)
 {
   int tag = COLL_TAG_BCAST;
   MPI_Status status;
@@ -31,8 +31,7 @@ int Coll_bcast_SMP_binary::bcast(void *buf, int count,
     host_num_core = comm->get_intra_comm()->size();
   }else{
     //implementation buggy in this case
-    return Coll_bcast_mpich::bcast( buf , count, datatype,
-              root, comm);
+    return bcast__mpich(buf , count, datatype, root, comm);
   }
 
   int segment = bcast_SMP_binary_segment_byte / extent;
index d5b1481..986bf92 100644 (file)
@@ -7,9 +7,9 @@
 #include "../colls_private.hpp"
 namespace simgrid{
 namespace smpi{
-int Coll_bcast_SMP_binomial::bcast(void *buf, int count,
-                                       MPI_Datatype datatype, int root,
-                                       MPI_Comm comm)
+int bcast__SMP_binomial(void *buf, int count,
+                        MPI_Datatype datatype, int root,
+                        MPI_Comm comm)
 {
   int mask = 1;
   int size;
@@ -28,8 +28,7 @@ int Coll_bcast_SMP_binomial::bcast(void *buf, int count,
     num_core = comm->get_intra_comm()->size();
   }else{
     //implementation buggy in this case
-    return Coll_bcast_mpich::bcast( buf , count, datatype,
-              root, comm);
+    return bcast__mpich(buf, count, datatype, root, comm);
   }
 
   int to_intra, to_inter;
index a8c6bec..ea576fe 100644 (file)
@@ -9,9 +9,9 @@
 int bcast_SMP_linear_segment_byte = 8192;
 namespace simgrid{
 namespace smpi{
-int Coll_bcast_SMP_linear::bcast(void *buf, int count,
-                                     MPI_Datatype datatype, int root,
-                                     MPI_Comm comm)
+int bcast__SMP_linear(void *buf, int count,
+                      MPI_Datatype datatype, int root,
+                      MPI_Comm comm)
 {
   int tag = COLL_TAG_BCAST;
   MPI_Status status;
@@ -31,8 +31,7 @@ int Coll_bcast_SMP_linear::bcast(void *buf, int count,
     num_core = comm->get_intra_comm()->size();
   }else{
     //implementation buggy in this case
-    return Coll_bcast_mpich::bcast( buf , count, datatype,
-              root, comm);
+    return bcast__mpich(buf, count, datatype, root, comm);
   }
 
   int segment = bcast_SMP_linear_segment_byte / extent;
@@ -52,7 +51,7 @@ int Coll_bcast_SMP_linear::bcast(void *buf, int count,
   // call native when MPI communication size is too small
   if (size <= num_core) {
     XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast.");
-    Coll_bcast_default::bcast(buf, count, datatype, root, comm);
+    bcast__default(buf, count, datatype, root, comm);
     return MPI_SUCCESS;
   }
   // if root is not zero send to rank zero first
index 46e6125..6f1dc20 100644 (file)
@@ -18,9 +18,9 @@ int bcast_arrival_pattern_aware_wait_segment_size_in_byte = 8192;
 namespace simgrid{
 namespace smpi{
 /* Non-topology-specific pipelined linear-bcast function */
-int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count,
-                                                     MPI_Datatype datatype,
-                                                     int root, MPI_Comm comm)
+int bcast__arrival_pattern_aware_wait(void *buf, int count,
+                                      MPI_Datatype datatype,
+                                      int root, MPI_Comm comm)
 {
   MPI_Status status;
   MPI_Request request;
index 1552b72..fab160a 100644 (file)
@@ -14,9 +14,9 @@ static int bcast_NTSL_segment_size_in_byte = 8192;
 namespace simgrid{
 namespace smpi{
 /* Non-topology-specific pipelined linear-bcast function */
-int Coll_bcast_arrival_pattern_aware::bcast(void *buf, int count,
-                                                MPI_Datatype datatype, int root,
-                                                MPI_Comm comm)
+int bcast__arrival_pattern_aware(void *buf, int count,
+                                 MPI_Datatype datatype, int root,
+                                 MPI_Comm comm)
 {
   int tag = -COLL_TAG_BCAST;
   MPI_Status status;
index 7891c76..0e13cfa 100644 (file)
@@ -16,9 +16,9 @@
 namespace simgrid{
 namespace smpi{
 /* Non-topology-specific pipelined linear-bcast function */
-int Coll_bcast_arrival_scatter::bcast(void *buf, int count,
-                                          MPI_Datatype datatype, int root,
-                                          MPI_Comm comm)
+int bcast__arrival_scatter(void *buf, int count,
+                           MPI_Datatype datatype, int root,
+                           MPI_Comm comm)
 {
   int tag = -COLL_TAG_BCAST;//in order to use ANY_TAG, make this one positive
   int header_tag = -10;
index 251ed58..29d5a65 100644 (file)
@@ -67,12 +67,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  * Author: MPIH / modified by Ahmad Faraj
 
  ****************************************************************************/
-namespace simgrid{
-namespace smpi{
-int
-Coll_bcast_binomial_tree::bcast(void *buff, int count,
-                                    MPI_Datatype data_type, int root,
-                                    MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int bcast__binomial_tree(void *buff, int count,
+                         MPI_Datatype data_type, int root,
+                         MPI_Comm comm)
 {
   int src, dst, rank, num_procs, mask, relative_rank;
   int tag = COLL_TAG_BCAST;
index 07e4384..2899d05 100644 (file)
@@ -7,12 +7,11 @@
 #include "../colls_private.hpp"
 
 int flattree_segment_in_byte = 8192;
-namespace simgrid{
-namespace smpi{
-int
-Coll_bcast_flattree_pipeline::bcast(void *buff, int count,
-                                        MPI_Datatype data_type, int root,
-                                        MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int bcast__flattree_pipeline(void *buff, int count,
+                             MPI_Datatype data_type, int root,
+                             MPI_Comm comm)
 {
   int i, j, rank, num_procs;
   int tag = COLL_TAG_BCAST;
@@ -26,7 +25,7 @@ Coll_bcast_flattree_pipeline::bcast(void *buff, int count,
   int increment = segment * extent;
   if (pipe_length==0) {
     XBT_WARN("MPI_bcast_flattree_pipeline use default MPI_bcast_flattree.");
-    return Coll_bcast_flattree::bcast(buff, count, data_type, root, comm);
+    return bcast__flattree(buff, count, data_type, root, comm);
   }
   rank = comm->rank();
   num_procs = comm->size();
index 175c875..5c73a9f 100644 (file)
@@ -5,11 +5,10 @@
  * under the terms of the license (GNU LGPL) which comes with this package. */
 
 #include "../colls_private.hpp"
-namespace simgrid{
-namespace smpi{
-int
-Coll_bcast_flattree::bcast(void *buff, int count, MPI_Datatype data_type,
-                               int root, MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int bcast__flattree(void *buff, int count, MPI_Datatype data_type,
+                    int root, MPI_Comm comm)
 {
   int i, rank, num_procs;
   int tag = COLL_TAG_BCAST;
index 8abda57..70239f6 100644 (file)
@@ -51,17 +51,17 @@ extern int mv2_intra_node_knomial_factor;
 extern int mv2_bcast_two_level_system_size;
 #define INTRA_NODE_ROOT 0
 
-#define MPIR_Pipelined_Bcast_Zcpy_MV2 Coll_bcast_mpich::bcast
-#define MPIR_Pipelined_Bcast_MV2 Coll_bcast_mpich::bcast
-#define MPIR_Bcast_binomial_MV2 Coll_bcast_binomial_tree::bcast
-#define MPIR_Bcast_scatter_ring_allgather_shm_MV2 Coll_bcast_scatter_LR_allgather::bcast
-#define MPIR_Bcast_scatter_doubling_allgather_MV2 Coll_bcast_scatter_rdb_allgather::bcast
-#define MPIR_Bcast_scatter_ring_allgather_MV2 Coll_bcast_scatter_LR_allgather::bcast
-#define MPIR_Shmem_Bcast_MV2 Coll_bcast_mpich::bcast
-#define MPIR_Bcast_tune_inter_node_helper_MV2 Coll_bcast_mvapich2_inter_node::bcast
-#define MPIR_Bcast_inter_node_helper_MV2 Coll_bcast_mvapich2_inter_node::bcast
-#define MPIR_Knomial_Bcast_intra_node_MV2 Coll_bcast_mvapich2_knomial_intra_node::bcast
-#define MPIR_Bcast_intra_MV2 Coll_bcast_mvapich2_intra_node::bcast
+#define MPIR_Pipelined_Bcast_Zcpy_MV2 bcast__mpich
+#define MPIR_Pipelined_Bcast_MV2 bcast__mpich
+#define MPIR_Bcast_binomial_MV2 bcast__binomial_tree
+#define MPIR_Bcast_scatter_ring_allgather_shm_MV2 bcast__scatter_LR_allgather
+#define MPIR_Bcast_scatter_doubling_allgather_MV2 bcast__scatter_rdb_allgather
+#define MPIR_Bcast_scatter_ring_allgather_MV2 bcast__scatter_LR_allgather
+#define MPIR_Shmem_Bcast_MV2 bcast__mpich
+#define MPIR_Bcast_tune_inter_node_helper_MV2 bcast__mvapich2_inter_node
+#define MPIR_Bcast_inter_node_helper_MV2 bcast__mvapich2_inter_node
+#define MPIR_Knomial_Bcast_intra_node_MV2 bcast__mvapich2_knomial_intra_node
+#define MPIR_Bcast_intra_MV2 bcast__mvapich2_intra_node
 
 extern int zcpy_knomial_factor;
 extern int mv2_pipelined_zcpy_knomial_factor;
@@ -73,13 +73,13 @@ extern int mv2_intra_node_knomial_factor;
 #define mv2_bcast_large_msg            512*1024
 #define mv2_knomial_intra_node_threshold 131072
 #define mv2_scatter_rd_inter_leader_bcast 1
-namespace simgrid{
-namespace smpi{
-int Coll_bcast_mvapich2_inter_node::bcast(void *buffer,
-                                                 int count,
-                                                 MPI_Datatype datatype,
-                                                 int root,
-                                                 MPI_Comm  comm)
+namespace simgrid {
+namespace smpi {
+int bcast__mvapich2_inter_node(void *buffer,
+                               int count,
+                               MPI_Datatype datatype,
+                               int root,
+                               MPI_Comm  comm)
 {
     int rank;
     int mpi_errno = MPI_SUCCESS;
@@ -93,11 +93,11 @@ int Coll_bcast_mvapich2_inter_node::bcast(void *buffer,
 
 
     if (MV2_Bcast_function==NULL){
-      MV2_Bcast_function=Coll_bcast_mpich::bcast;
+      MV2_Bcast_function = bcast__mpich;
     }
 
     if (MV2_Bcast_intra_node_function==NULL){
-      MV2_Bcast_intra_node_function= Coll_bcast_mpich::bcast;
+      MV2_Bcast_intra_node_function = bcast__mpich;
     }
 
     if(comm->get_leaders_comm()==MPI_COMM_NULL){
@@ -170,21 +170,21 @@ int Coll_bcast_mvapich2_inter_node::bcast(void *buffer,
 }
 
 
-int Coll_bcast_mvapich2_knomial_intra_node::bcast(void *buffer,
-                                      int count,
-                                      MPI_Datatype datatype,
-                                      int root, MPI_Comm  comm)
+int bcast__mvapich2_knomial_intra_node(void *buffer,
+                                       int count,
+                                       MPI_Datatype datatype,
+                                       int root, MPI_Comm  comm)
 {
     int local_size = 0, rank;
     int mpi_errno = MPI_SUCCESS;
     int src, dst, mask, relative_rank;
     int k;
     if (MV2_Bcast_function==NULL){
-      MV2_Bcast_function=Coll_bcast_mpich::bcast;
+      MV2_Bcast_function = bcast__mpich;
     }
 
     if (MV2_Bcast_intra_node_function==NULL){
-      MV2_Bcast_intra_node_function= Coll_bcast_mpich::bcast;
+      MV2_Bcast_intra_node_function = bcast__mpich;
     }
 
     if(comm->get_leaders_comm()==MPI_COMM_NULL){
@@ -243,10 +243,10 @@ int Coll_bcast_mvapich2_knomial_intra_node::bcast(void *buffer,
 }
 
 
-int Coll_bcast_mvapich2_intra_node::bcast(void *buffer,
-                         int count,
-                         MPI_Datatype datatype,
-                         int root, MPI_Comm  comm)
+int bcast__mvapich2_intra_node(void *buffer,
+                               int count,
+                               MPI_Datatype datatype,
+                               int root, MPI_Comm  comm)
 {
     int mpi_errno = MPI_SUCCESS;
     int comm_size;
@@ -260,11 +260,11 @@ int Coll_bcast_mvapich2_intra_node::bcast(void *buffer,
     if (count == 0)
         return MPI_SUCCESS;
     if (MV2_Bcast_function==NULL){
-      MV2_Bcast_function=Coll_bcast_mpich::bcast;
+      MV2_Bcast_function = bcast__mpich;
     }
 
     if (MV2_Bcast_intra_node_function==NULL){
-      MV2_Bcast_intra_node_function= Coll_bcast_mpich::bcast;
+      MV2_Bcast_intra_node_function = bcast__mpich;
     }
 
     if(comm->get_leaders_comm()==MPI_COMM_NULL){
index 7a5cbeb..393b714 100644 (file)
 
 namespace simgrid{
 namespace smpi{
-int Coll_bcast_ompi_pipeline::bcast( void* buffer,
-                                      int original_count,
-                                      MPI_Datatype datatype,
-                                      int root,
-                                      MPI_Comm comm)
+int bcast__ompi_pipeline( void* buffer,
+                          int original_count,
+                          MPI_Datatype datatype,
+                          int root,
+                          MPI_Comm comm)
 {
     int count_by_segment = original_count;
     size_t type_size;
index be50cd2..b4e7de5 100644 (file)
 #include "../coll_tuned_topo.hpp"
 #include "../colls_private.hpp"
 #define MAXTREEFANOUT 32
-namespace simgrid{
-namespace smpi{
-
-int
-Coll_bcast_ompi_split_bintree::bcast ( void* buffer,
-                                            int count,
-                                            MPI_Datatype datatype,
-                                            int root,
-                                            MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+
+int bcast__ompi_split_bintree( void* buffer,
+                               int count,
+                               MPI_Datatype datatype,
+                               int root,
+                               MPI_Comm comm)
 {
     unsigned int segsize ;
     int rank, size;
@@ -136,8 +135,7 @@ Coll_bcast_ompi_split_bintree::bcast ( void* buffer,
         (segsize > counts[0] * type_size) ||
         (segsize > counts[1] * type_size) ) {
         /* call linear version here ! */
-        return (Coll_bcast_SMP_linear::bcast ( buffer, count, datatype,
-                                                    root, comm));
+        return bcast__SMP_linear( buffer, count, datatype, root, comm);
     }
     type_extent = datatype->get_extent();
 
index 41844ce..5ac3dc6 100644 (file)
@@ -67,12 +67,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  * Author: MPIH / modified by Ahmad Faraj
 
  ****************************************************************************/
-namespace simgrid{
-namespace smpi{
-int
-Coll_bcast_scatter_LR_allgather::bcast(void *buff, int count,
-                                           MPI_Datatype data_type, int root,
-                                           MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int bcast__scatter_LR_allgather(void *buff, int count,
+                                MPI_Datatype data_type, int root,
+                                MPI_Comm comm)
 {
   MPI_Aint extent;
   MPI_Status status;
index d8cb946..9cf489b 100644 (file)
@@ -102,8 +102,7 @@ static int scatter_for_bcast(
 }
 
 
-int
-Coll_bcast_scatter_rdb_allgather::bcast (
+int bcast__scatter_rdb_allgather(
     void *buffer,
     int count,
     MPI_Datatype datatype,
index adeb3ce..c5ca478 100644 (file)
@@ -38,9 +38,9 @@
 #include "../colls_private.hpp"
 #include <algorithm>
 
-#define MPIR_Gather_MV2_Direct Coll_gather_ompi_basic_linear::gather
-#define MPIR_Gather_MV2_two_level_Direct Coll_gather_ompi_basic_linear::gather
-#define MPIR_Gather_intra Coll_gather_mpich::gather
+#define MPIR_Gather_MV2_Direct gather__ompi_basic_linear
+#define MPIR_Gather_MV2_two_level_Direct gather__ompi_basic_linear
+#define MPIR_Gather_intra gather__mpich
 typedef int (*MV2_Gather_function_ptr) (const void *sendbuf,
     int sendcnt,
     MPI_Datatype sendtype,
@@ -127,14 +127,14 @@ static int MPIR_pt_pt_intra_gather( const void *sendbuf, int sendcnt, MPI_Dataty
 
 
 
-int Coll_gather_mvapich2_two_level::gather(const void *sendbuf,
-                                            int sendcnt,
-                                            MPI_Datatype sendtype,
-                                            void *recvbuf,
-                                            int recvcnt,
-                                            MPI_Datatype recvtype,
-                                            int root,
-                                            MPI_Comm comm)
+int gather__mvapich2_two_level(const void *sendbuf,
+                               int sendcnt,
+                               MPI_Datatype sendtype,
+                               void *recvbuf,
+                               int recvcnt,
+                               MPI_Datatype recvtype,
+                               int root,
+                               MPI_Comm comm)
 {
   unsigned char* leader_gather_buf = NULL;
   int comm_size, rank;
@@ -151,7 +151,7 @@ int Coll_gather_mvapich2_two_level::gather(const void *sendbuf,
 
   // if not set (use of the algo directly, without mvapich2 selector)
   if (MV2_Gather_intra_node_function == NULL)
-    MV2_Gather_intra_node_function = Coll_gather_mpich::gather;
+    MV2_Gather_intra_node_function = gather__mpich;
 
   if (comm->get_leaders_comm() == MPI_COMM_NULL) {
     comm->init_smp();
index 812ec45..36fbe6b 100644 (file)
 #include "../coll_tuned_topo.hpp"
 #include "../colls_private.hpp"
 
-namespace simgrid{
-namespace smpi{
+namespace simgrid {
+namespace smpi {
 
-int Coll_gather_ompi_binomial::gather(const void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
-                                      MPI_Datatype rdtype, int root, MPI_Comm comm)
+int gather__ompi_binomial(const void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
+                          MPI_Datatype rdtype, int root, MPI_Comm comm)
 {
     int line = -1;
     int i;
@@ -202,12 +202,12 @@ int Coll_gather_ompi_binomial::gather(const void* sbuf, int scount, MPI_Datatype
  *  Accepts:  - same arguments as MPI_Gather(), first segment size
  *  Returns:  - MPI_SUCCESS or error code
  */
-int Coll_gather_ompi_linear_sync::gather(const void *sbuf, int scount,
-                                         MPI_Datatype sdtype,
-                                         void *rbuf, int rcount,
-                                         MPI_Datatype rdtype,
-                                         int root,
-                                         MPI_Comm comm)
+int gather__ompi_linear_sync(const void *sbuf, int scount,
+                             MPI_Datatype sdtype,
+                             void *rbuf, int rcount,
+                             MPI_Datatype rdtype,
+                             int root,
+                             MPI_Comm comm)
 {
     int i;
     int ret, line;
@@ -355,8 +355,8 @@ int Coll_gather_ompi_linear_sync::gather(const void *sbuf, int scount,
  *  Accepts:  - same arguments as MPI_Gather()
  *  Returns:  - MPI_SUCCESS or error code
  */
-int Coll_gather_ompi_basic_linear::gather(const void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
-                                          MPI_Datatype rdtype, int root, MPI_Comm comm)
+int gather__ompi_basic_linear(const void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
+                              MPI_Datatype rdtype, int root, MPI_Comm comm)
 {
     int i;
     int err;
index b341762..a3f4e60 100644 (file)
@@ -12,11 +12,11 @@ int reduce_NTSL_segment_size_in_byte = 8192;
 /* Non-topology-specific pipelined linear-bcast function
    0->1, 1->2 ,2->3, ....., ->last node : in a pipeline fashion
 */
-namespace simgrid{
-namespace smpi{
-int Coll_reduce_NTSL::reduce(const void *buf, void *rbuf, int count,
-                                MPI_Datatype datatype, MPI_Op op, int root,
-                                MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int reduce__NTSL(const void *buf, void *rbuf, int count,
+                 MPI_Datatype datatype, MPI_Op op, int root,
+                 MPI_Comm comm)
 {
   int tag = COLL_TAG_REDUCE;
   MPI_Status status;
@@ -133,9 +133,9 @@ int Coll_reduce_NTSL::reduce(const void *buf, void *rbuf, int count,
   /* when count is not divisible by block size, use default BCAST for the remainder */
   if ((remainder != 0) && (count > segment)) {
     XBT_WARN("MPI_reduce_NTSL use default MPI_reduce.");
-    Coll_reduce_default::reduce((char *)buf + (pipe_length * increment),
-               (char *)rbuf + (pipe_length * increment), remainder, datatype, op, root,
-               comm);
+    reduce__default((char *)buf + (pipe_length * increment),
+                    (char *)rbuf + (pipe_length * increment), remainder, datatype, op, root,
+                    comm);
   }
 
   smpi_free_tmp_buffer(tmp_buf);
index 1ff8bd4..63c4245 100644 (file)
@@ -19,11 +19,11 @@ int reduce_arrival_pattern_aware_segment_size_in_byte = 8192;
 namespace simgrid{
 namespace smpi{
 /* Non-topology-specific pipelined linear-reduce function */
-int Coll_reduce_arrival_pattern_aware::reduce(const void *buf, void *rbuf,
-                                                 int count,
-                                                 MPI_Datatype datatype,
-                                                 MPI_Op op, int root,
-                                                 MPI_Comm comm)
+int reduce__arrival_pattern_aware(const void *buf, void *rbuf,
+                                  int count,
+                                  MPI_Datatype datatype,
+                                  MPI_Op op, int root,
+                                  MPI_Comm comm)
 {
   int rank = comm->rank();
   int tag = -COLL_TAG_REDUCE;
@@ -332,8 +332,8 @@ int Coll_reduce_arrival_pattern_aware::reduce(const void *buf, void *rbuf,
 
   /* when count is not divisible by block size, use default BCAST for the remainder */
   if ((remainder != 0) && (count > segment)) {
-    Coll_reduce_default::reduce((char*)buf + (pipe_length * increment), (char*)rbuf + (pipe_length * increment),
-                                remainder, datatype, op, root, comm);
+    reduce__default((char*)buf + (pipe_length * increment), (char*)rbuf + (pipe_length * increment),
+                    remainder, datatype, op, root, comm);
   }
 
   smpi_free_tmp_buffer(tmp_buf);
index 3525231..a47f49a 100644 (file)
@@ -10,9 +10,9 @@
 //#include <star-reduction.c>
 namespace simgrid{
 namespace smpi{
-int Coll_reduce_binomial::reduce(const void *sendbuf, void *recvbuf, int count,
-                                    MPI_Datatype datatype, MPI_Op op, int root,
-                                    MPI_Comm comm)
+int reduce__binomial(const void *sendbuf, void *recvbuf, int count,
+                     MPI_Datatype datatype, MPI_Op op, int root,
+                     MPI_Comm comm)
 {
   MPI_Status status;
   int comm_size, rank;
index e9ffc8a..2eaffae 100644 (file)
@@ -6,12 +6,11 @@
 
 #include "../colls_private.hpp"
 //#include <star-reduction.c>
-namespace simgrid{
-namespace smpi{
-int
-Coll_reduce_flat_tree::reduce(const void *sbuf, void *rbuf, int count,
-                                 MPI_Datatype dtype, MPI_Op op,
-                                 int root, MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int reduce__flat_tree(const void *sbuf, void *rbuf, int count,
+                     MPI_Datatype dtype, MPI_Op op,
+                     int root, MPI_Comm comm)
 {
   int i, tag = COLL_TAG_REDUCE;
   int size;
index 57069b0..866ab8a 100644 (file)
@@ -116,9 +116,9 @@ static int MPIR_Reduce_knomial_trace(int root, int reduce_knomial_factor,
     return 0;
 }
 
-namespace simgrid{
-namespace smpi{
-int Coll_reduce_mvapich2_knomial::reduce (
+namespace simgrid {
+namespace smpi {
+int reduce__mvapich2_knomial(
         const void *sendbuf,
         void *recvbuf,
         int count,
index 4d47d23..f3017a8 100644 (file)
 #define SHMEM_COLL_BLOCK_SIZE (local_size * mv2_g_shmem_coll_max_msg_size)
 #define mv2_use_knomial_reduce 1
 
-#define MPIR_Reduce_inter_knomial_wrapper_MV2 Coll_reduce_mvapich2_knomial::reduce
-#define MPIR_Reduce_intra_knomial_wrapper_MV2 Coll_reduce_mvapich2_knomial::reduce
-#define MPIR_Reduce_binomial_MV2 Coll_reduce_binomial::reduce
-#define MPIR_Reduce_redscat_gather_MV2 Coll_reduce_scatter_gather::reduce
-#define MPIR_Reduce_shmem_MV2 Coll_reduce_ompi_basic_linear::reduce
+#define MPIR_Reduce_inter_knomial_wrapper_MV2 reduce__mvapich2_knomial
+#define MPIR_Reduce_intra_knomial_wrapper_MV2 reduce__mvapich2_knomial
+#define MPIR_Reduce_binomial_MV2 reduce__binomial
+#define MPIR_Reduce_redscat_gather_MV2 reduce__scatter_gather
+#define MPIR_Reduce_shmem_MV2 reduce__ompi_basic_linear
 
 extern int (*MV2_Reduce_function)( const void *sendbuf,
     void *recvbuf,
@@ -73,15 +73,15 @@ static int (*reduce_fn)(const void *sendbuf,
                              int count,
                              MPI_Datatype datatype,
                              MPI_Op op, int root, MPI_Comm  comm);
-namespace simgrid{
-namespace smpi{
-int Coll_reduce_mvapich2_two_level::reduce( const void *sendbuf,
-                                     void *recvbuf,
-                                     int count,
-                                     MPI_Datatype datatype,
-                                     MPI_Op op,
-                                     int root,
-                                     MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int reduce__mvapich2_two_level( const void *sendbuf,
+                                void *recvbuf,
+                                int count,
+                                MPI_Datatype datatype,
+                                MPI_Op op,
+                                int root,
+                                MPI_Comm comm)
 {
     int mpi_errno = MPI_SUCCESS;
     int my_rank, total_size, local_rank, local_size;
@@ -96,9 +96,9 @@ int Coll_reduce_mvapich2_two_level::reduce( const void *sendbuf,
 
     //if not set (use of the algo directly, without mvapich2 selector)
     if(MV2_Reduce_function==NULL)
-      MV2_Reduce_function=Coll_reduce_mpich::reduce;
+      MV2_Reduce_function = reduce__mpich;
     if(MV2_Reduce_intra_function==NULL)
-      MV2_Reduce_intra_function=Coll_reduce_mpich::reduce;
+      MV2_Reduce_intra_function = reduce__mpich;
 
     if(comm->get_leaders_comm()==MPI_COMM_NULL){
       comm->init_smp();
index dd72d7b..3f3cdb6 100644 (file)
@@ -328,11 +328,11 @@ int smpi_coll_tuned_ompi_reduce_generic(const void* sendbuf, void* recvbuf, int
 */
 
 
-int Coll_reduce_ompi_chain::reduce(const void *sendbuf, void *recvbuf, int count,
-                                        MPI_Datatype datatype,
-                                        MPI_Op  op, int root,
-                                        MPI_Comm  comm
-                                        )
+int reduce__ompi_chain(const void *sendbuf, void *recvbuf, int count,
+                       MPI_Datatype datatype,
+                       MPI_Op  op, int root,
+                       MPI_Comm  comm
+                       )
 {
     uint32_t segsize=64*1024;
     int segcount = count;
@@ -356,10 +356,10 @@ int Coll_reduce_ompi_chain::reduce(const void *sendbuf, void *recvbuf, int count
 }
 
 
-int Coll_reduce_ompi_pipeline::reduce(const void *sendbuf, void *recvbuf,
-                                           int count, MPI_Datatype datatype,
-                                           MPI_Op  op, int root,
-                                           MPI_Comm  comm  )
+int reduce__ompi_pipeline(const void *sendbuf, void *recvbuf,
+                          int count, MPI_Datatype datatype,
+                          MPI_Op  op, int root,
+                          MPI_Comm  comm  )
 {
 
     uint32_t segsize;
@@ -400,10 +400,10 @@ int Coll_reduce_ompi_pipeline::reduce(const void *sendbuf, void *recvbuf,
                                            segcount, 0);
 }
 
-int Coll_reduce_ompi_binary::reduce(const void *sendbuf, void *recvbuf,
-                                         int count, MPI_Datatype datatype,
-                                         MPI_Op  op, int root,
-                                         MPI_Comm  comm)
+int reduce__ompi_binary(const void *sendbuf, void *recvbuf,
+                        int count, MPI_Datatype datatype,
+                        MPI_Op  op, int root,
+                        MPI_Comm  comm)
 {
     uint32_t segsize;
     int segcount = count;
@@ -430,10 +430,10 @@ int Coll_reduce_ompi_binary::reduce(const void *sendbuf, void *recvbuf,
                                            segcount, 0);
 }
 
-int Coll_reduce_ompi_binomial::reduce(const void *sendbuf, void *recvbuf,
-                                           int count, MPI_Datatype datatype,
-                                           MPI_Op  op, int root,
-                                           MPI_Comm  comm)
+int reduce__ompi_binomial(const void *sendbuf, void *recvbuf,
+                          int count, MPI_Datatype datatype,
+                          MPI_Op  op, int root,
+                          MPI_Comm  comm)
 {
 
     uint32_t segsize=0;
@@ -477,11 +477,11 @@ int Coll_reduce_ompi_binomial::reduce(const void *sendbuf, void *recvbuf,
  * Accepts:       same as MPI_Reduce()
  * Returns:       MPI_SUCCESS or error code
  */
-int Coll_reduce_ompi_in_order_binary::reduce(const void *sendbuf, void *recvbuf,
-                                                  int count,
-                                                  MPI_Datatype datatype,
-                                                  MPI_Op  op, int root,
-                                                  MPI_Comm  comm)
+int reduce__ompi_in_order_binary(const void *sendbuf, void *recvbuf,
+                                 int count,
+                                 MPI_Datatype datatype,
+                                 MPI_Op  op, int root,
+                                 MPI_Comm  comm)
 {
     uint32_t segsize=0;
     int ret;
@@ -586,12 +586,11 @@ int Coll_reduce_ompi_in_order_binary::reduce(const void *sendbuf, void *recvbuf,
  *  Returns:    - MPI_SUCCESS or error code
  */
 
-int
-Coll_reduce_ompi_basic_linear::reduce(const void *sbuf, void *rbuf, int count,
-                                          MPI_Datatype dtype,
-                                          MPI_Op op,
-                                          int root,
-                                          MPI_Comm comm)
+int reduce__ompi_basic_linear(const void *sbuf, void *rbuf, int count,
+                              MPI_Datatype dtype,
+                              MPI_Op op,
+                              int root,
+                              MPI_Comm comm)
 {
     int i, rank, size;
     ptrdiff_t true_extent, lb, extent;
index ef73930..4e8cdb0 100644 (file)
@@ -949,12 +949,12 @@ static int MPI_I_anyReduce(const void* Sendbuf, void* Recvbuf, int count, MPI_Da
 #endif /*REDUCE_LIMITS*/
 
 
-int Coll_reduce_rab::reduce(const void* Sendbuf, void* Recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
+int reduce__rab(const void* Sendbuf, void* Recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
 {
   return( MPI_I_anyReduce(Sendbuf, Recvbuf, count, datatype, op, root, comm, 0) );
 }
 
-int Coll_allreduce_rab::allreduce(const void* Sendbuf, void* Recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+int allreduce__rab(const void* Sendbuf, void* Recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
 {
   return( MPI_I_anyReduce(Sendbuf, Recvbuf, count, datatype, op,   -1, comm, 1) );
 }
index 2c2eead..fa74378 100644 (file)
@@ -12,9 +12,9 @@
  */
 namespace simgrid{
 namespace smpi{
-int Coll_reduce_scatter_gather::reduce(const void *sendbuf, void *recvbuf,
-                                          int count, MPI_Datatype datatype,
-                                          MPI_Op op, int root, MPI_Comm comm)
+int reduce__scatter_gather(const void *sendbuf, void *recvbuf,
+                           int count, MPI_Datatype datatype,
+                           MPI_Op op, int root, MPI_Comm comm)
 {
   MPI_Status status;
   int comm_size, rank, pof2, rem, newrank;
index 3679390..2f75f1f 100644 (file)
@@ -24,8 +24,8 @@ static inline int MPIU_Mirror_permutation(unsigned int x, int bits)
 namespace simgrid{
 namespace smpi{
 
-int Coll_reduce_scatter_mpich_pair::reduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts[],
-                              MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+int reduce_scatter__mpich_pair(const void *sendbuf, void *recvbuf, const int recvcounts[],
+                               MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
 {
     int   rank, comm_size, i;
     MPI_Aint extent, true_extent, true_lb;
@@ -144,8 +144,8 @@ int Coll_reduce_scatter_mpich_pair::reduce_scatter(const void *sendbuf, void *re
 }
 
 
-int Coll_reduce_scatter_mpich_noncomm::reduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts[],
-                              MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+int reduce_scatter__mpich_noncomm(const void *sendbuf, void *recvbuf, const int recvcounts[],
+                                  MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
 {
     int mpi_errno = MPI_SUCCESS;
     int comm_size = comm->size() ;
@@ -264,7 +264,7 @@ int Coll_reduce_scatter_mpich_noncomm::reduce_scatter(const void *sendbuf, void
 
 
 
-int Coll_reduce_scatter_mpich_rdb::reduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts[],
+int reduce_scatter__mpich_rdb(const void *sendbuf, void *recvbuf, const int recvcounts[],
                               MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
 {
     int   rank, comm_size, i;
index 93ae0dc..79f77d7 100644 (file)
  *  Returns:    - MPI_SUCCESS or error code
  *  Limitation: - Works only for commutative operations.
  */
-namespace simgrid{
-namespace smpi{
-int
-Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter(const void *sbuf,
-                                                            void *rbuf,
-                                                            const int *rcounts,
-                                                            MPI_Datatype dtype,
-                                                            MPI_Op op,
-                                                            MPI_Comm comm
-                                                            )
+namespace simgrid {
+namespace smpi {
+int reduce_scatter__ompi_basic_recursivehalving(const void *sbuf,
+                                                void *rbuf,
+                                                const int *rcounts,
+                                                MPI_Datatype dtype,
+                                                MPI_Op op,
+                                                MPI_Comm comm
+                                                )
 {
     int i, rank, size, count, err = MPI_SUCCESS;
     int tmp_size = 1, remain = 0, tmp_rank;
@@ -348,12 +347,11 @@ Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter(const void *sbuf
  *    DONE :)
  *
  */
-int
-Coll_reduce_scatter_ompi_ring::reduce_scatter(const void *sbuf, void *rbuf, const int *rcounts,
-                                          MPI_Datatype dtype,
-                                          MPI_Op op,
-                                          MPI_Comm comm
-                                          )
+int reduce_scatter__ompi_ring(const void *sbuf, void *rbuf, const int *rcounts,
+                              MPI_Datatype dtype,
+                              MPI_Op op,
+                              MPI_Comm comm
+                              )
 {
     int ret, line, rank, size, i, k, recv_from, send_to, total_count, max_block_count;
     int inbi;
index 35e57e4..ec056fa 100644 (file)
@@ -36,8 +36,8 @@
  */
 #include "../colls_private.hpp"
 
-#define MPIR_Scatter_MV2_Binomial Coll_scatter_ompi_binomial::scatter
-#define MPIR_Scatter_MV2_Direct Coll_scatter_ompi_basic_linear::scatter
+#define MPIR_Scatter_MV2_Binomial scatter__ompi_binomial
+#define MPIR_Scatter_MV2_Direct scatter__ompi_basic_linear
 
 extern int (*MV2_Scatter_intra_function) (const void *sendbuf, int sendcount, MPI_Datatype sendtype,
     void *recvbuf, int recvcount, MPI_Datatype recvtype,
@@ -46,13 +46,13 @@ extern int (*MV2_Scatter_intra_function) (const void *sendbuf, int sendcount, MP
 namespace simgrid{
 namespace smpi{
 
-int Coll_scatter_mvapich2_two_level_direct::scatter(const void *sendbuf,
-                                      int sendcnt,
-                                      MPI_Datatype sendtype,
-                                      void *recvbuf,
-                                      int recvcnt,
-                                      MPI_Datatype recvtype,
-                                      int root, MPI_Comm  comm)
+int scatter__mvapich2_two_level_direct(const void *sendbuf,
+                                       int sendcnt,
+                                       MPI_Datatype sendtype,
+                                       void *recvbuf,
+                                       int recvcnt,
+                                       MPI_Datatype recvtype,
+                                       int root, MPI_Comm  comm)
 {
     int comm_size, rank;
     int local_rank, local_size;
@@ -66,7 +66,7 @@ int Coll_scatter_mvapich2_two_level_direct::scatter(const void *sendbuf,
     MPI_Comm shmem_comm, leader_comm;
     //if not set (use of the algo directly, without mvapich2 selector)
     if(MV2_Scatter_intra_function==NULL)
-      MV2_Scatter_intra_function=Coll_scatter_mpich::scatter;
+      MV2_Scatter_intra_function = scatter__mpich;
 
     if(comm->get_leaders_comm()==MPI_COMM_NULL){
       comm->init_smp();
@@ -223,13 +223,13 @@ int Coll_scatter_mvapich2_two_level_direct::scatter(const void *sendbuf,
 }
 
 
-int Coll_scatter_mvapich2_two_level_binomial::scatter(const void *sendbuf,
-                                        int sendcnt,
-                                        MPI_Datatype sendtype,
-                                        void *recvbuf,
-                                        int recvcnt,
-                                        MPI_Datatype recvtype,
-                                        int root, MPI_Comm comm)
+int scatter__mvapich2_two_level_binomial(const void *sendbuf,
+                                         int sendcnt,
+                                         MPI_Datatype sendtype,
+                                         void *recvbuf,
+                                         int recvcnt,
+                                         MPI_Datatype recvtype,
+                                         int root, MPI_Comm comm)
 {
     int comm_size, rank;
     int local_rank, local_size;
@@ -245,7 +245,7 @@ int Coll_scatter_mvapich2_two_level_binomial::scatter(const void *sendbuf,
 
     //if not set (use of the algo directly, without mvapich2 selector)
     if(MV2_Scatter_intra_function==NULL)
-      MV2_Scatter_intra_function=Coll_scatter_mpich::scatter;
+      MV2_Scatter_intra_function = scatter__mpich;
 
     if(comm->get_leaders_comm()==MPI_COMM_NULL){
       comm->init_smp();
index 3138569..50d24eb 100644 (file)
@@ -25,8 +25,8 @@
 namespace simgrid{
 namespace smpi{
 
-int Coll_scatter_ompi_binomial::scatter(const void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
-                                        MPI_Datatype rdtype, int root, MPI_Comm comm)
+int scatter__ompi_binomial(const void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
+                           MPI_Datatype rdtype, int root, MPI_Comm comm)
 {
     int line = -1;
     int i;
@@ -194,8 +194,8 @@ int Coll_scatter_ompi_binomial::scatter(const void* sbuf, int scount, MPI_Dataty
  *  Accepts:  - same arguments as MPI_Scatter()
  *  Returns:  - MPI_SUCCESS or error code
  */
-int Coll_scatter_ompi_basic_linear::scatter(const void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
-                                            MPI_Datatype rdtype, int root, MPI_Comm comm)
+int scatter__ompi_basic_linear(const void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
+                               MPI_Datatype rdtype, int root, MPI_Comm comm)
 {
     int i, rank, size, err;
     char *ptmp;
index 2772069..f01efcf 100644 (file)
@@ -23,7 +23,7 @@
   }
 
 #define AUTOMATIC_COLL_BENCH(cat, ret, args, args2)                                                                    \
-  ret _XBT_CONCAT3(Coll_, cat, _automatic)::cat(COLL_UNPAREN args)                                                     \
+  ret _XBT_CONCAT2(cat, __automatic)(COLL_UNPAREN args)                                                                \
   {                                                                                                                    \
     double time1, time2, time_min = DBL_MAX;                                                                           \
     int min_coll = -1, global_coll = -1;                                                                               \
@@ -34,7 +34,7 @@
         continue;                                                                                                      \
       if (Colls::_XBT_CONCAT3(mpi_coll_, cat, _description)[i].name == "default")                                      \
         continue;                                                                                                      \
-      Coll_barrier_default::barrier(comm);                                                                             \
+      barrier__default(comm);                                                                                          \
       TRACE_AUTO_COLL(cat)                                                                                             \
       time1 = SIMIX_get_clock();                                                                                       \
       try {                                                                                                            \
@@ -44,7 +44,7 @@
       }                                                                                                                \
       time2   = SIMIX_get_clock();                                                                                     \
       buf_out = time2 - time1;                                                                                         \
-      Coll_reduce_default::reduce((void*)&buf_out, (void*)&buf_in, 1, MPI_DOUBLE, MPI_MAX, 0, comm);                   \
+      reduce__default((void*)&buf_out, (void*)&buf_in, 1, MPI_DOUBLE, MPI_MAX, 0, comm);                               \
       if (time2 - time1 < time_min) {                                                                                  \
         min_coll = i;                                                                                                  \
         time_min = time2 - time1;                                                                                      \
index 2978f86..fdc25ee 100644 (file)
@@ -13,7 +13,7 @@
 #include "smpi_request.hpp"
 #include "xbt/config.hpp"
 
-XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI (coll)");
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI collectives.");
 
 #define COLL_SETTER(cat, ret, args, args2)                                                                             \
   int(*Colls::cat) args;                                                                                               \
@@ -81,7 +81,20 @@ int Colls::find_coll_description(s_mpi_coll_description_t* table, const std::str
   return -1;
 }
 
-COLL_APPLY(COLL_SETTER,COLL_GATHER_SIG,"");
+int(*Colls::gather) (const void *send_buff, int send_count, MPI_Datatype send_type,
+                     void *recv_buff, int recv_count, MPI_Datatype recv_type,
+                         int root, MPI_Comm comm);
+void Colls::set_gather(const std::string& name)
+{
+  int id = find_coll_description(mpi_coll_gather_description, name, "gather");
+  gather = reinterpret_cast<int(*)(const void *send_buff, int send_count, MPI_Datatype send_type,
+                                    void *recv_buff, int recv_count, MPI_Datatype recv_type,
+                                        int root, MPI_Comm comm)>(mpi_coll_gather_description[id].coll);
+  if (gather == nullptr)
+    xbt_die("Collective gather set to nullptr!");
+}
+
+//COLL_APPLY(COLL_SETTER,COLL_GATHER_SIG,"");
 COLL_APPLY(COLL_SETTER,COLL_ALLGATHER_SIG,"");
 COLL_APPLY(COLL_SETTER,COLL_ALLGATHERV_SIG,"");
 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SIG,"");
index 6ae0e42..4b00cd1 100644 (file)
 namespace simgrid{
 namespace smpi{
 
-int Coll_bcast_default::bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
+int bcast__default(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
 {
-  return Coll_bcast_binomial_tree::bcast(buf, count, datatype, root, comm);
+  return bcast__binomial_tree(buf, count, datatype, root, comm);
 }
 
-int Coll_barrier_default::barrier(MPI_Comm comm)
+int barrier__default(MPI_Comm comm)
 {
-  return Coll_barrier_ompi_basic_linear::barrier(comm);
+  return barrier__ompi_basic_linear(comm);
 }
 
 
-int Coll_gather_default::gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
+int gather__default(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
                      void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
 {
   MPI_Request request;
@@ -30,8 +30,8 @@ int Coll_gather_default::gather(const void *sendbuf, int sendcount, MPI_Datatype
   return Request::wait(&request, MPI_STATUS_IGNORE);
 }
 
-int Coll_reduce_scatter_default::reduce_scatter(const void *sendbuf, void *recvbuf, const int *recvcounts, MPI_Datatype datatype, MPI_Op op,
-                             MPI_Comm comm)
+int reduce_scatter__default(const void *sendbuf, void *recvbuf, const int *recvcounts, MPI_Datatype datatype, MPI_Op op,
+                            MPI_Comm comm)
 {
   int rank = comm->rank();
 
@@ -45,7 +45,7 @@ int Coll_reduce_scatter_default::reduce_scatter(const void *sendbuf, void *recvb
   }
   unsigned char* tmpbuf = smpi_get_tmp_sendbuffer(count * datatype->get_extent());
 
-  int ret = Coll_reduce_default::reduce(sendbuf, tmpbuf, count, datatype, op, 0, comm);
+  int ret = reduce__default(sendbuf, tmpbuf, count, datatype, op, 0, comm);
   if(ret==MPI_SUCCESS)
     ret = Colls::scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, 0, comm);
   delete[] displs;
@@ -54,16 +54,16 @@ int Coll_reduce_scatter_default::reduce_scatter(const void *sendbuf, void *recvb
 }
 
 
-int Coll_allgather_default::allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
-                        void *recvbuf,int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
+int allgather__default(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
+                       void *recvbuf,int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
 {
   MPI_Request request;
   Colls::iallgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, &request);
   return Request::wait(&request, MPI_STATUS_IGNORE);
 }
 
-int Coll_allgatherv_default::allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
-                         const int *recvcounts, const int *displs, MPI_Datatype recvtype, MPI_Comm comm)
+int allgatherv__default(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
+                        const int *recvcounts, const int *displs, MPI_Datatype recvtype, MPI_Comm comm)
 {
   MPI_Request request;
   Colls::iallgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm, &request, 0);
@@ -78,45 +78,45 @@ int Coll_allgatherv_default::allgatherv(const void *sendbuf, int sendcount, MPI_
   return MPI_SUCCESS;
 }
 
-int Coll_scatter_default::scatter(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
-                      void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
+int scatter__default(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
+                     void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
 {
   MPI_Request request;
   Colls::iscatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm, &request, 0);
   return Request::wait(&request, MPI_STATUS_IGNORE);
 }
 
-int Coll_reduce_default::reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root,
-                     MPI_Comm comm)
+int reduce__default(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root,
+                    MPI_Comm comm)
 {
   //non commutative case, use a working algo from openmpi
   if (op != MPI_OP_NULL && (datatype->flags() & DT_FLAG_DERIVED || not op->is_commutative())) {
-    return Coll_reduce_ompi_basic_linear::reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+    return reduce__ompi_basic_linear(sendbuf, recvbuf, count, datatype, op, root, comm);
   }
   MPI_Request request;
   Colls::ireduce(sendbuf, recvbuf, count, datatype, op, root, comm, &request, 0);
   return Request::wait(&request, MPI_STATUS_IGNORE);
 }
 
-int Coll_allreduce_default::allreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+int allreduce__default(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
 {
   //FIXME: have mpi_ireduce and iallreduce handle derived datatypes correctly
   if(datatype->flags() & DT_FLAG_DERIVED)
-    return Coll_allreduce_ompi::allreduce(sendbuf, recvbuf, count, datatype, op, comm);
+    return allreduce__ompi(sendbuf, recvbuf, count, datatype, op, comm);
   int ret;
-  ret = Coll_reduce_default::reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
+  ret = reduce__default(sendbuf, recvbuf, count, datatype, op, 0, comm);
   if(ret==MPI_SUCCESS)
-    ret = Coll_bcast_default::bcast(recvbuf, count, datatype, 0, comm);
+    ret = bcast__default(recvbuf, count, datatype, 0, comm);
   return ret;
 }
 
-int Coll_alltoall_default::alltoall(const void *sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount, MPI_Datatype rdtype, MPI_Comm comm)
+int alltoall__default(const void *sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount, MPI_Datatype rdtype, MPI_Comm comm)
 {
-  return Coll_alltoall_ompi::alltoall(sbuf, scount, sdtype, rbuf, rcount, rdtype, comm);
+  return alltoall__ompi(sbuf, scount, sdtype, rbuf, rcount, rdtype, comm);
 }
 
-int Coll_alltoallv_default::alltoallv(const void *sendbuf, const int *sendcounts, const int *senddisps, MPI_Datatype sendtype,
-                              void *recvbuf, const int *recvcounts, const int *recvdisps, MPI_Datatype recvtype, MPI_Comm comm)
+int alltoallv__default(const void *sendbuf, const int *sendcounts, const int *senddisps, MPI_Datatype sendtype,
+                       void *recvbuf, const int *recvcounts, const int *recvdisps, MPI_Datatype recvtype, MPI_Comm comm)
 {
   MPI_Request request;
   Colls::ialltoallv(sendbuf, sendcounts, senddisps, sendtype, recvbuf, recvcounts, recvdisps, recvtype, comm, &request, 0);
index a126d72..7a87309 100644 (file)
@@ -55,14 +55,14 @@ int (*intel_allreduce_functions_table[])(const void *sendbuf,
       int count,
       MPI_Datatype datatype,
       MPI_Op op, MPI_Comm comm) ={
-      Coll_allreduce_rdb::allreduce,
-      Coll_allreduce_rab1::allreduce,
-      Coll_allreduce_redbcast::allreduce,
-      Coll_allreduce_mvapich2_two_level::allreduce,
-      Coll_allreduce_smp_binomial::allreduce,
-      Coll_allreduce_mvapich2_two_level::allreduce,
-      Coll_allreduce_ompi_ring_segmented::allreduce,
-      Coll_allreduce_ompi_ring_segmented::allreduce
+      allreduce__rdb,
+      allreduce__rab1,
+      allreduce__redbcast,
+      allreduce__mvapich2_two_level,
+      allreduce__smp_binomial,
+      allreduce__mvapich2_two_level,
+      allreduce__ompi_ring_segmented,
+      allreduce__ompi_ring_segmented
 };
 
 intel_tuning_table_element intel_allreduce_table[] =
@@ -639,10 +639,10 @@ int (*intel_alltoall_functions_table[])(const void *sbuf, int scount,
                                              void* rbuf, int rcount,
                                              MPI_Datatype rdtype,
                                              MPI_Comm comm) ={
-      Coll_alltoall_bruck::alltoall,
-      Coll_alltoall_mvapich2_scatter_dest::alltoall,
-      Coll_alltoall_pair::alltoall,
-      Coll_alltoall_mvapich2::alltoall//Plum is proprietary ? (and super efficient)
+      alltoall__bruck,
+      alltoall__mvapich2_scatter_dest,
+      alltoall__pair,
+      alltoall__mvapich2//Plum is proprietary ? (and super efficient)
 };
 
 /*I_MPI_ADJUST_BARRIER
@@ -659,15 +659,15 @@ MPI_Barrier
 */
 static int intel_barrier_gather_scatter(MPI_Comm comm){
     //our default barrier performs a antibcast/bcast
-    Coll_barrier_default::barrier(comm);
+    barrier__default(comm);
     return MPI_SUCCESS;
 }
 
 int (*intel_barrier_functions_table[])(MPI_Comm comm) ={
-      Coll_barrier_ompi_basic_linear::barrier,
-      Coll_barrier_ompi_recursivedoubling::barrier,
-      Coll_barrier_ompi_basic_linear::barrier,
-      Coll_barrier_ompi_recursivedoubling::barrier,
+      barrier__ompi_basic_linear,
+      barrier__ompi_recursivedoubling,
+      barrier__ompi_basic_linear,
+      barrier__ompi_recursivedoubling,
       intel_barrier_gather_scatter,
       intel_barrier_gather_scatter
 };
@@ -801,15 +801,15 @@ MPI_Bcast
 int (*intel_bcast_functions_table[])(void *buff, int count,
                                           MPI_Datatype datatype, int root,
                                           MPI_Comm  comm) ={
-      Coll_bcast_binomial_tree::bcast,
-      //Coll_bcast_scatter_rdb_allgather::bcast,
-      Coll_bcast_NTSL::bcast,
-      Coll_bcast_NTSL::bcast,
-      Coll_bcast_SMP_binomial::bcast,
-      //Coll_bcast_scatter_rdb_allgather::bcast,
-      Coll_bcast_NTSL::bcast,
-      Coll_bcast_SMP_linear::bcast,
-      Coll_bcast_mvapich2::bcast,//we don't know shumilin's algo'
+      bcast__binomial_tree,
+      //bcast__scatter_rdb_allgather,
+      bcast__NTSL,
+      bcast__NTSL,
+      bcast__SMP_binomial,
+      //bcast__scatter_rdb_allgather,
+      bcast__NTSL,
+      bcast__SMP_linear,
+      bcast__mvapich2,//we don't know shumilin's algo'
 };
 
 intel_tuning_table_element intel_bcast_table[] =
@@ -971,12 +971,12 @@ int (*intel_reduce_functions_table[])(const void *sendbuf, void *recvbuf,
                                             int count, MPI_Datatype  datatype,
                                             MPI_Op   op, int root,
                                             MPI_Comm   comm) ={
-      Coll_reduce_mvapich2::reduce,
-      Coll_reduce_binomial::reduce,
-      Coll_reduce_mvapich2::reduce,
-      Coll_reduce_mvapich2_two_level::reduce,
-      Coll_reduce_rab::reduce,
-      Coll_reduce_rab::reduce
+      reduce__mvapich2,
+      reduce__binomial,
+      reduce__mvapich2,
+      reduce__mvapich2_two_level,
+      reduce__rab,
+      reduce__rab
 };
 
 intel_tuning_table_element intel_reduce_table[] =
@@ -1061,7 +1061,7 @@ static  int intel_reduce_scatter_reduce_scatterv(const void *sbuf, void *rbuf,
                                                     MPI_Op  op,
                                                     MPI_Comm  comm)
 {
-  Coll_reduce_scatter_default::reduce_scatter(sbuf, rbuf, rcounts,dtype, op,comm);
+  reduce_scatter__default(sbuf, rbuf, rcounts,dtype, op,comm);
   return MPI_SUCCESS;
 }
 
@@ -1072,9 +1072,9 @@ static  int  intel_reduce_scatter_recursivehalving(const void *sbuf, void *rbuf,
                                                     MPI_Comm  comm)
 {
   if(op==MPI_OP_NULL || op->is_commutative())
-    return Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter(sbuf, rbuf, rcounts,dtype, op,comm);
+    return reduce_scatter__ompi_basic_recursivehalving(sbuf, rbuf, rcounts,dtype, op,comm);
   else
-    return Coll_reduce_scatter_mvapich2::reduce_scatter(sbuf, rbuf, rcounts,dtype, op,comm);
+    return reduce_scatter__mvapich2(sbuf, rbuf, rcounts,dtype, op,comm);
 }
 
 int (*intel_reduce_scatter_functions_table[])( const void *sbuf, void *rbuf,
@@ -1084,8 +1084,8 @@ int (*intel_reduce_scatter_functions_table[])( const void *sbuf, void *rbuf,
                                                     MPI_Comm  comm
                                                     ) ={
       intel_reduce_scatter_recursivehalving,
-      Coll_reduce_scatter_mpich_pair::reduce_scatter,
-      Coll_reduce_scatter_mpich_rdb::reduce_scatter,
+      reduce_scatter__mpich_pair,
+      reduce_scatter__mpich_rdb,
       intel_reduce_scatter_reduce_scatterv,
       intel_reduce_scatter_reduce_scatterv
 };
@@ -1493,10 +1493,10 @@ int (*intel_allgather_functions_table[])(const void *sbuf, int scount,
                                               MPI_Datatype rdtype,
                                               MPI_Comm  comm
                                                     ) ={
-      Coll_allgather_rdb::allgather,
-      Coll_allgather_bruck::allgather,
-      Coll_allgather_ring::allgather,
-      Coll_allgather_GB::allgather
+      allgather__rdb,
+      allgather__bruck,
+      allgather__ring,
+      allgather__GB
 };
 
 intel_tuning_table_element intel_allgather_table[] =
@@ -1663,10 +1663,10 @@ int (*intel_allgatherv_functions_table[])(const void *sbuf, int scount,
                                                MPI_Datatype rdtype,
                                                MPI_Comm  comm
                                                     ) ={
-      Coll_allgatherv_mpich_rdb::allgatherv,
-      Coll_allgatherv_ompi_bruck::allgatherv,
-      Coll_allgatherv_ring::allgatherv,
-      Coll_allgatherv_GB::allgatherv
+      allgatherv__mpich_rdb,
+      allgatherv__ompi_bruck,
+      allgatherv__ring,
+      allgatherv__GB
 };
 
 intel_tuning_table_element intel_allgatherv_table[] =
@@ -1874,9 +1874,9 @@ int (*intel_gather_functions_table[])(const void *sbuf, int scount,
                                            int root,
                                            MPI_Comm  comm
                                                     ) ={
-      Coll_gather_ompi_binomial::gather,
-      Coll_gather_ompi_binomial::gather,
-      Coll_gather_mvapich2::gather
+      gather__ompi_binomial,
+      gather__ompi_binomial,
+      gather__mvapich2
 };
 
 intel_tuning_table_element intel_gather_table[] =
@@ -1977,9 +1977,9 @@ int (*intel_scatter_functions_table[])(const void *sbuf, int scount,
                                             MPI_Datatype rdtype,
                                             int root, MPI_Comm  comm
                                                     ) ={
-      Coll_scatter_ompi_binomial::scatter,
-      Coll_scatter_ompi_binomial::scatter,
-      Coll_scatter_mvapich2::scatter
+      scatter__ompi_binomial,
+      scatter__ompi_binomial,
+      scatter__mvapich2
 };
 
 intel_tuning_table_element intel_scatter_table[] =
@@ -2151,8 +2151,8 @@ int (*intel_alltoallv_functions_table[])(const void *sbuf, const int *scounts, c
                                               MPI_Datatype rdtype,
                                               MPI_Comm  comm
                                                     ) ={
-      Coll_alltoallv_ompi_basic_linear::alltoallv,
-      Coll_alltoallv_bruck::alltoallv
+      alltoallv__ompi_basic_linear,
+      alltoallv__bruck
 };
 
 intel_tuning_table_element intel_alltoallv_table[] =
@@ -2263,7 +2263,7 @@ intel_tuning_table_element intel_alltoallv_table[] =
   size_t block_dsize = 1;
 
 #define IMPI_COLL_SELECT(cat, ret, args, args2)                                                                        \
-  ret _XBT_CONCAT3(Coll_, cat, _impi)::cat(COLL_UNPAREN args)                                                          \
+  ret _XBT_CONCAT2(cat, __impi)(COLL_UNPAREN args)                                                          \
   {                                                                                                                    \
     int comm_size = comm->size();                                                                                      \
     int i         = 0;                                                                                                 \
index 11de914..3c0f27a 100644 (file)
@@ -58,7 +58,7 @@
 */
 namespace simgrid{
 namespace smpi{
-int Coll_allreduce_mpich::allreduce(const void *sbuf, void *rbuf, int count,
+int allreduce__mpich(const void *sbuf, void *rbuf, int count,
                         MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
 {
     size_t dsize, block_dsize;
@@ -74,7 +74,7 @@ int Coll_allreduce_mpich::allreduce(const void *sbuf, void *rbuf, int count,
         comm->init_smp();
       }
       if(op->is_commutative())
-        return Coll_allreduce_mvapich2_two_level::allreduce (sbuf, rbuf,count, dtype, op, comm);
+        return allreduce__mvapich2_two_level(sbuf, rbuf,count, dtype, op, comm);
     }
 
     /* find nearest power-of-two less than or equal to comm_size */
@@ -84,10 +84,10 @@ int Coll_allreduce_mpich::allreduce(const void *sbuf, void *rbuf, int count,
 
     if (block_dsize > large_message && count >= pof2 && (op==MPI_OP_NULL || op->is_commutative())) {
       //for long messages
-       return Coll_allreduce_rab_rdb::allreduce (sbuf, rbuf, count, dtype, op, comm);
+       return allreduce__rab_rdb(sbuf, rbuf, count, dtype, op, comm);
     }else {
       //for short ones and count < pof2
-      return Coll_allreduce_rdb::allreduce (sbuf, rbuf, count, dtype, op, comm);
+      return allreduce__rdb(sbuf, rbuf, count, dtype, op, comm);
     }
 }
 
@@ -138,11 +138,11 @@ int Coll_allreduce_mpich::allreduce(const void *sbuf, void *rbuf, int count,
    End Algorithm: MPI_Alltoall
 */
 
-int Coll_alltoall_mpich::alltoall(const void *sbuf, int scount,
-                                             MPI_Datatype sdtype,
-                                             void* rbuf, int rcount,
-                                             MPI_Datatype rdtype,
-                                             MPI_Comm comm)
+int alltoall__mpich(const void *sbuf, int scount,
+                    MPI_Datatype sdtype,
+                    void* rbuf, int rcount,
+                    MPI_Datatype rdtype,
+                    MPI_Comm comm)
 {
     int communicator_size;
     size_t dsize, block_dsize;
@@ -168,42 +168,42 @@ int Coll_alltoall_mpich::alltoall(const void *sbuf, int scount,
     block_dsize = dsize * scount;
 
     if ((block_dsize < short_size) && (communicator_size >= 8)) {
-        return Coll_alltoall_bruck::alltoall(sbuf, scount, sdtype,
-                                                    rbuf, rcount, rdtype,
-                                                    comm);
+        return alltoall__bruck(sbuf, scount, sdtype,
+                               rbuf, rcount, rdtype,
+                               comm);
 
     } else if (block_dsize < medium_size) {
-        return Coll_alltoall_mvapich2_scatter_dest::alltoall(sbuf, scount, sdtype,
-                                                           rbuf, rcount, rdtype,
-                                                           comm);
+        return alltoall__mvapich2_scatter_dest(sbuf, scount, sdtype,
+                                               rbuf, rcount, rdtype,
+                                               comm);
     }else if (communicator_size%2){
-        return Coll_alltoall_pair::alltoall(sbuf, scount, sdtype,
-                                                           rbuf, rcount, rdtype,
-                                                           comm);
+        return alltoall__pair(sbuf, scount, sdtype,
+                              rbuf, rcount, rdtype,
+                              comm);
     }
 
-    return Coll_alltoall_ring::alltoall (sbuf, scount, sdtype,
-                                                    rbuf, rcount, rdtype,
-                                                    comm);
+    return alltoall__ring(sbuf, scount, sdtype,
+                          rbuf, rcount, rdtype,
+                          comm);
 }
 
-int Coll_alltoallv_mpich::alltoallv(const void *sbuf, const int *scounts, const int *sdisps,
-                                              MPI_Datatype sdtype,
-                                              void *rbuf, const int *rcounts, const int *rdisps,
-                                              MPI_Datatype rdtype,
-                                              MPI_Comm  comm
-                                              )
+int alltoallv__mpich(const void *sbuf, const int *scounts, const int *sdisps,
+                     MPI_Datatype sdtype,
+                     void *rbuf, const int *rcounts, const int *rdisps,
+                     MPI_Datatype rdtype,
+                     MPI_Comm  comm
+                     )
 {
     /* For starters, just keep the original algorithm. */
-    return Coll_alltoallv_bruck::alltoallv(sbuf, scounts, sdisps, sdtype,
-                                                        rbuf, rcounts, rdisps,rdtype,
-                                                        comm);
+    return alltoallv__bruck(sbuf, scounts, sdisps, sdtype,
+                            rbuf, rcounts, rdisps,rdtype,
+                            comm);
 }
 
 
-int Coll_barrier_mpich::barrier(MPI_Comm  comm)
+int barrier__mpich(MPI_Comm  comm)
 {
-    return Coll_barrier_ompi_bruck::barrier(comm);
+    return barrier__ompi_bruck(comm);
 }
 
 /* This is the default implementation of broadcast. The algorithm is:
@@ -249,9 +249,9 @@ int Coll_barrier_mpich::barrier(MPI_Comm  comm)
 */
 
 
-int Coll_bcast_mpich::bcast(void *buff, int count,
-                                          MPI_Datatype datatype, int root,
-                                          MPI_Comm  comm
+int bcast__mpich(void *buff, int count,
+                 MPI_Datatype datatype, int root,
+                 MPI_Comm  comm
                                           )
 {
     /* Decision function based on MX results for
@@ -268,7 +268,7 @@ int Coll_bcast_mpich::bcast(void *buff, int count,
         comm->init_smp();
       }
       if(comm->is_uniform())
-        return Coll_bcast_SMP_binomial::bcast(buff, count, datatype, root, comm);
+        return bcast__SMP_binomial(buff, count, datatype, root, comm);
     }
 
     communicator_size = comm->size();
@@ -281,18 +281,15 @@ int Coll_bcast_mpich::bcast(void *buff, int count,
        single-element broadcasts */
     if ((message_size < small_message_size) || (communicator_size <= 8)) {
         /* Binomial without segmentation */
-        return  Coll_bcast_binomial_tree::bcast (buff, count, datatype,
-                                                      root, comm);
+        return  bcast__binomial_tree(buff, count, datatype, root, comm);
 
     } else if (message_size < intermediate_message_size && !(communicator_size%2)) {
         // SplittedBinary with 1KB segments
-        return Coll_bcast_scatter_rdb_allgather::bcast(buff, count, datatype,
-                                                         root, comm);
+        return bcast__scatter_rdb_allgather(buff, count, datatype, root, comm);
 
     }
      //Handle large message sizes
-     return Coll_bcast_scatter_LR_allgather::bcast (buff, count, datatype,
-                                                     root, comm);
+     return bcast__scatter_LR_allgather(buff, count, datatype, root, comm);
 
 }
 
@@ -353,7 +350,7 @@ int Coll_bcast_mpich::bcast(void *buff, int count,
 */
 
 
-int Coll_reduce_mpich::reduce(const void *sendbuf, void *recvbuf,
+int reduce__mpich(const void *sendbuf, void *recvbuf,
                                             int count, MPI_Datatype  datatype,
                                             MPI_Op   op, int root,
                                             MPI_Comm   comm
@@ -367,7 +364,7 @@ int Coll_reduce_mpich::reduce(const void *sendbuf, void *recvbuf,
         comm->init_smp();
       }
       if (op->is_commutative() == 1)
-        return Coll_reduce_mvapich2_two_level::reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+        return reduce__mvapich2_two_level(sendbuf, recvbuf, count, datatype, op, root, comm);
     }
 
     communicator_size = comm->size();
@@ -381,9 +378,9 @@ int Coll_reduce_mpich::reduce(const void *sendbuf, void *recvbuf,
     pof2 >>= 1;
 
     if ((count < pof2) || (message_size < 2048) || (op != MPI_OP_NULL && not op->is_commutative())) {
-      return Coll_reduce_binomial::reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+      return reduce__binomial(sendbuf, recvbuf, count, datatype, op, root, comm);
     }
-        return Coll_reduce_scatter_gather::reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+    return reduce__scatter_gather(sendbuf, recvbuf, count, datatype, op, root, comm);
 }
 
 
@@ -436,12 +433,12 @@ int Coll_reduce_mpich::reduce(const void *sendbuf, void *recvbuf,
 */
 
 
-int Coll_reduce_scatter_mpich::reduce_scatter(const void *sbuf, void *rbuf,
-                                                    const int *rcounts,
-                                                    MPI_Datatype dtype,
-                                                    MPI_Op  op,
-                                                    MPI_Comm  comm
-                                                    )
+int reduce_scatter__mpich(const void *sbuf, void *rbuf,
+                          const int *rcounts,
+                          MPI_Datatype dtype,
+                          MPI_Op  op,
+                          MPI_Comm  comm
+                          )
 {
     int comm_size, i;
     size_t total_message_size;
@@ -458,9 +455,7 @@ int Coll_reduce_scatter_mpich::reduce_scatter(const void *sbuf, void *rbuf,
     }
 
     if( (op==MPI_OP_NULL || op->is_commutative()) &&  total_message_size > 524288) {
-        return Coll_reduce_scatter_mpich_pair::reduce_scatter (sbuf, rbuf, rcounts,
-                                                                    dtype, op,
-                                                                    comm);
+        return reduce_scatter__mpich_pair(sbuf, rbuf, rcounts, dtype, op, comm);
     } else if ((op != MPI_OP_NULL && not op->is_commutative())) {
       int is_block_regular = 1;
       for (i = 0; i < (comm_size - 1); ++i) {
@@ -477,12 +472,12 @@ int Coll_reduce_scatter_mpich::reduce_scatter(const void *sbuf, void *rbuf,
 
       if (pof2 == comm_size && is_block_regular) {
         /* noncommutative, pof2 size, and block regular */
-        return Coll_reduce_scatter_mpich_noncomm::reduce_scatter(sbuf, rbuf, rcounts, dtype, op, comm);
+        return reduce_scatter__mpich_noncomm(sbuf, rbuf, rcounts, dtype, op, comm);
       }
 
-      return Coll_reduce_scatter_mpich_rdb::reduce_scatter(sbuf, rbuf, rcounts, dtype, op, comm);
+      return reduce_scatter__mpich_rdb(sbuf, rbuf, rcounts, dtype, op, comm);
     }else{
-       return Coll_reduce_scatter_mpich_rdb::reduce_scatter(sbuf, rbuf, rcounts, dtype, op, comm);
+       return reduce_scatter__mpich_rdb(sbuf, rbuf, rcounts, dtype, op, comm);
     }
 }
 
@@ -532,12 +527,12 @@ int Coll_reduce_scatter_mpich::reduce_scatter(const void *sbuf, void *rbuf,
    End Algorithm: MPI_Allgather
 */
 
-int Coll_allgather_mpich::allgather(const void *sbuf, int scount,
-                                              MPI_Datatype sdtype,
-                                              void* rbuf, int rcount,
-                                              MPI_Datatype rdtype,
-                                              MPI_Comm  comm
-                                              )
+int allgather__mpich(const void *sbuf, int scount,
+                     MPI_Datatype sdtype,
+                     void* rbuf, int rcount,
+                     MPI_Datatype rdtype,
+                     MPI_Comm  comm
+                     )
 {
     int communicator_size, pow2_size;
     size_t dsize, total_dsize;
@@ -560,17 +555,11 @@ int Coll_allgather_mpich::allgather(const void *sbuf, int scount,
        - for everything else use ring.
     */
     if ((pow2_size == communicator_size) && (total_dsize < 524288)) {
-        return Coll_allgather_rdb::allgather(sbuf, scount, sdtype,
-                                                                 rbuf, rcount, rdtype,
-                                                                 comm);
+        return allgather__rdb(sbuf, scount, sdtype, rbuf, rcount, rdtype, comm);
     } else if (total_dsize <= 81920) {
-        return Coll_allgather_bruck::allgather(sbuf, scount, sdtype,
-                                                     rbuf, rcount, rdtype,
-                                                     comm);
+        return allgather__bruck(sbuf, scount, sdtype, rbuf, rcount, rdtype, comm);
     }
-    return Coll_allgather_ring::allgather(sbuf, scount, sdtype,
-                                                rbuf, rcount, rdtype,
-                                                comm);
+    return allgather__ring(sbuf, scount, sdtype, rbuf, rcount, rdtype, comm);
 }
 
 
@@ -610,13 +599,13 @@ int Coll_allgather_mpich::allgather(const void *sbuf, int scount,
 
    End Algorithm: MPI_Allgatherv
 */
-int Coll_allgatherv_mpich::allgatherv(const void *sbuf, int scount,
-                                               MPI_Datatype sdtype,
-                                               void* rbuf, const int *rcounts,
-                                               const int *rdispls,
-                                               MPI_Datatype rdtype,
-                                               MPI_Comm  comm
-                                               )
+int allgatherv__mpich(const void *sbuf, int scount,
+                      MPI_Datatype sdtype,
+                      void* rbuf, const int *rcounts,
+                      const int *rdispls,
+                      MPI_Datatype rdtype,
+                      MPI_Comm  comm
+                      )
 {
     int communicator_size, pow2_size,i;
     size_t total_dsize;
@@ -633,17 +622,11 @@ int Coll_allgatherv_mpich::allgatherv(const void *sbuf, int scount,
     for (pow2_size  = 1; pow2_size < communicator_size; pow2_size <<=1);
 
     if ((pow2_size == communicator_size) && (total_dsize < 524288)) {
-        return Coll_allgatherv_mpich_rdb::allgatherv(sbuf, scount, sdtype,
-                                                                 rbuf, rcounts, rdispls, rdtype,
-                                                                 comm);
+        return allgatherv__mpich_rdb(sbuf, scount, sdtype, rbuf, rcounts, rdispls, rdtype, comm);
     } else if (total_dsize <= 81920) {
-        return Coll_allgatherv_ompi_bruck::allgatherv(sbuf, scount, sdtype,
-                                                     rbuf, rcounts, rdispls, rdtype,
-                                                     comm);
+        return allgatherv__ompi_bruck(sbuf, scount, sdtype, rbuf, rcounts, rdispls, rdtype, comm);
     }
-    return Coll_allgatherv_mpich_ring::allgatherv(sbuf, scount, sdtype,
-                                                rbuf, rcounts, rdispls, rdtype,
-                                                comm);
+    return allgatherv__mpich_ring(sbuf, scount, sdtype, rbuf, rcounts, rdispls, rdtype, comm);
 }
 
 /* This is the default implementation of gather. The algorithm is:
@@ -668,17 +651,17 @@ int Coll_allgatherv_mpich::allgatherv(const void *sbuf, int scount,
    End Algorithm: MPI_Gather
 */
 
-int Coll_gather_mpich::gather(const void *sbuf, int scount,
-                                           MPI_Datatype sdtype,
-                                           void* rbuf, int rcount,
-                                           MPI_Datatype rdtype,
-                                           int root,
-                                           MPI_Comm  comm
-                                           )
+int gather__mpich(const void *sbuf, int scount,
+                  MPI_Datatype sdtype,
+                  void* rbuf, int rcount,
+                  MPI_Datatype rdtype,
+                  int root,
+                  MPI_Comm  comm
+                  )
 {
-        return Coll_gather_ompi_binomial::gather (sbuf, scount, sdtype,
-                                                      rbuf, rcount, rdtype,
-                                                      root, comm);
+    return gather__ompi_binomial(sbuf, scount, sdtype,
+                                 rbuf, rcount, rdtype,
+                                 root, comm);
 }
 
 /* This is the default implementation of scatter. The algorithm is:
@@ -703,12 +686,12 @@ int Coll_gather_mpich::gather(const void *sbuf, int scount,
 */
 
 
-int Coll_scatter_mpich::scatter(const void *sbuf, int scount,
-                                            MPI_Datatype sdtype,
-                                            void* rbuf, int rcount,
-                                            MPI_Datatype rdtype,
-                                            int root, MPI_Comm  comm
-                                            )
+int scatter__mpich(const void *sbuf, int scount,
+                   MPI_Datatype sdtype,
+                   void* rbuf, int rcount,
+                   MPI_Datatype rdtype,
+                   int root, MPI_Comm  comm
+                   )
 {
   std::unique_ptr<unsigned char[]> tmp_buf;
   if(comm->rank()!=root){
@@ -717,7 +700,7 @@ int Coll_scatter_mpich::scatter(const void *sbuf, int scount,
     scount = rcount;
     sdtype = rdtype;
   }
-  return Coll_scatter_ompi_binomial::scatter(sbuf, scount, sdtype, rbuf, rcount, rdtype, root, comm);
+  return scatter__ompi_binomial(sbuf, scount, sdtype, rbuf, rcount, rdtype, root, comm);
 }
 }
 }
index d7840cf..2128f77 100644 (file)
 
 #include "smpi_mvapich2_selector_stampede.hpp"
 
-namespace simgrid{
-namespace smpi{
+namespace simgrid {
+namespace smpi {
 
 
-int Coll_alltoall_mvapich2::alltoall( const void *sendbuf, int sendcount,
-    MPI_Datatype sendtype,
-    void* recvbuf, int recvcount,
-    MPI_Datatype recvtype,
-    MPI_Comm comm)
+int alltoall__mvapich2( const void *sendbuf, int sendcount,
+                        MPI_Datatype sendtype,
+                        void* recvbuf, int recvcount,
+                        MPI_Datatype recvtype,
+                        MPI_Comm comm)
 {
 
   if(mv2_alltoall_table_ppn_conf==NULL)
@@ -78,7 +78,7 @@ int Coll_alltoall_mvapich2::alltoall( const void *sendbuf, int sendcount,
   return (mpi_errno);
 }
 
-int Coll_allgather_mvapich2::allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
+int allgather__mvapich2(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
     void *recvbuf, int recvcount, MPI_Datatype recvtype,
     MPI_Comm comm)
 {
@@ -157,7 +157,7 @@ int Coll_allgather_mvapich2::allgather(const void *sendbuf, int sendcount, MPI_D
                             recvbuf, recvcount, recvtype,
                             comm);
       }else{
-      mpi_errno = Coll_allgather_mpich::allgather(sendbuf, sendcount, sendtype,
+      mpi_errno = allgather__mpich(sendbuf, sendcount, sendtype,
                             recvbuf, recvcount, recvtype,
                             comm);
       }
@@ -179,7 +179,7 @@ int Coll_allgather_mvapich2::allgather(const void *sendbuf, int sendcount, MPI_D
   return mpi_errno;
 }
 
-int Coll_gather_mvapich2::gather(const void *sendbuf,
+int gather__mvapich2(const void *sendbuf,
     int sendcnt,
     MPI_Datatype sendtype,
     void *recvbuf,
@@ -245,7 +245,7 @@ int Coll_gather_mvapich2::gather(const void *sendbuf,
 
     } else {
   // Indeed, direct (non SMP-aware)gather is MPICH one
-  mpi_errno = Coll_gather_mpich::gather(sendbuf, sendcnt, sendtype,
+  mpi_errno = gather__mpich(sendbuf, sendcnt, sendtype,
       recvbuf, recvcnt, recvtype,
       root, comm);
   }
@@ -253,7 +253,7 @@ int Coll_gather_mvapich2::gather(const void *sendbuf,
   return mpi_errno;
 }
 
-int Coll_allgatherv_mvapich2::allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
+int allgatherv__mvapich2(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
     void *recvbuf, const int *recvcounts, const int *displs,
     MPI_Datatype recvtype, MPI_Comm  comm )
 {
@@ -315,7 +315,7 @@ int Coll_allgatherv_mvapich2::allgatherv(const void *sendbuf, int sendcount, MPI
 
 
 
-int Coll_allreduce_mvapich2::allreduce(const void *sendbuf,
+int allreduce__mvapich2(const void *sendbuf,
     void *recvbuf,
     int count,
     MPI_Datatype datatype,
@@ -434,7 +434,7 @@ int Coll_allreduce_mvapich2::allreduce(const void *sendbuf,
 }
 
 
-int Coll_alltoallv_mvapich2::alltoallv(const void *sbuf, const int *scounts, const int *sdisps,
+int alltoallv__mvapich2(const void *sbuf, const int *scounts, const int *sdisps,
     MPI_Datatype sdtype,
     void *rbuf, const int *rcounts, const int *rdisps,
     MPI_Datatype rdtype,
@@ -443,28 +443,28 @@ int Coll_alltoallv_mvapich2::alltoallv(const void *sbuf, const int *scounts, con
 {
 
   if (sbuf == MPI_IN_PLACE) {
-      return Coll_alltoallv_ompi_basic_linear::alltoallv(sbuf, scounts, sdisps, sdtype,
-          rbuf, rcounts, rdisps,rdtype,
-          comm);
+      return alltoallv__ompi_basic_linear(sbuf, scounts, sdisps, sdtype,
+                                          rbuf, rcounts, rdisps, rdtype,
+                                          comm);
   } else     /* For starters, just keep the original algorithm. */
-  return Coll_alltoallv_ring::alltoallv(sbuf, scounts, sdisps, sdtype,
-      rbuf, rcounts, rdisps,rdtype,
-      comm);
+  return alltoallv__ring(sbuf, scounts, sdisps, sdtype,
+                         rbuf, rcounts, rdisps, rdtype,
+                         comm);
 }
 
 
-int Coll_barrier_mvapich2::barrier(MPI_Comm  comm)
+int barrier__mvapich2(MPI_Comm  comm)
 {
-  return Coll_barrier_mvapich2_pair::barrier(comm);
+  return barrier__mvapich2_pair(comm);
 }
 
 
 
 
-int Coll_bcast_mvapich2::bcast(void *buffer,
-    int count,
-    MPI_Datatype datatype,
-    int root, MPI_Comm comm)
+int bcast__mvapich2(void *buffer,
+                    int count,
+                    MPI_Datatype datatype,
+                    int root, MPI_Comm comm)
 {
     int mpi_errno = MPI_SUCCESS;
     int comm_size/*, rank*/;
@@ -656,7 +656,7 @@ int Coll_bcast_mvapich2::bcast(void *buffer,
 
 
 
-int Coll_reduce_mvapich2::reduce(const void *sendbuf,
+int reduce__mvapich2(const void *sendbuf,
     void *recvbuf,
     int count,
     MPI_Datatype datatype,
@@ -774,7 +774,7 @@ int Coll_reduce_mvapich2::reduce(const void *sendbuf,
 }
 
 
-int Coll_reduce_scatter_mvapich2::reduce_scatter(const void *sendbuf, void *recvbuf, const int *recvcnts,
+int reduce_scatter__mvapich2(const void *sendbuf, void *recvbuf, const int *recvcnts,
     MPI_Datatype datatype, MPI_Op op,
     MPI_Comm comm)
 {
@@ -838,9 +838,9 @@ int Coll_reduce_scatter_mvapich2::reduce_scatter(const void *sendbuf, void *recv
               recvcnts, datatype,
               op, comm);
       }
-      mpi_errno =  Coll_reduce_scatter_mpich_rdb::reduce_scatter(sendbuf, recvbuf,
-          recvcnts, datatype,
-          op, comm);
+      mpi_errno =  reduce_scatter__mpich_rdb(sendbuf, recvbuf,
+                                             recvcnts, datatype,
+                                             op, comm);
   }
   delete[] disps;
   return mpi_errno;
@@ -849,7 +849,7 @@ int Coll_reduce_scatter_mvapich2::reduce_scatter(const void *sendbuf, void *recv
 
 
 
-int Coll_scatter_mvapich2::scatter(const void *sendbuf,
+int scatter__mvapich2(const void *sendbuf,
     int sendcnt,
     MPI_Datatype sendtype,
     void *recvbuf,
index 9762cf2..25df8f1 100644 (file)
@@ -42,11 +42,11 @@ int mv2_alltoall_num_ppn_conf                             = 1;
 int* mv2_size_alltoall_tuning_table                       = NULL;
 mv2_alltoall_tuning_table** mv2_alltoall_thresholds_table = NULL;
 
-#define MPIR_Alltoall_bruck_MV2 simgrid::smpi::Coll_alltoall_bruck::alltoall
-#define MPIR_Alltoall_RD_MV2 simgrid::smpi::Coll_alltoall_rdb::alltoall
-#define MPIR_Alltoall_Scatter_dest_MV2 simgrid::smpi::Coll_alltoall_mvapich2_scatter_dest::alltoall
-#define MPIR_Alltoall_pairwise_MV2 simgrid::smpi::Coll_alltoall_pair::alltoall
-#define MPIR_Alltoall_inplace_MV2 simgrid::smpi::Coll_alltoall_ring::alltoall
+#define MPIR_Alltoall_bruck_MV2 simgrid::smpi::alltoall__bruck
+#define MPIR_Alltoall_RD_MV2 simgrid::smpi::alltoall__rdb
+#define MPIR_Alltoall_Scatter_dest_MV2 simgrid::smpi::alltoall__mvapich2_scatter_dest
+#define MPIR_Alltoall_pairwise_MV2 simgrid::smpi::alltoall__pair
+#define MPIR_Alltoall_inplace_MV2 simgrid::smpi::alltoall__ring
 
 static void init_mv2_alltoall_tables_stampede()
 {
@@ -358,10 +358,10 @@ static int MPIR_Allgather_RD_Allgather_Comm_MV2(const void* sendbuf, int sendcou
   return 0;
 }
 
-#define MPIR_Allgather_Bruck_MV2 simgrid::smpi::Coll_allgather_bruck::allgather
-#define MPIR_Allgather_RD_MV2 simgrid::smpi::Coll_allgather_rdb::allgather
-#define MPIR_Allgather_Ring_MV2 simgrid::smpi::Coll_allgather_ring::allgather
-#define MPIR_2lvl_Allgather_MV2 simgrid::smpi::Coll_allgather_mvapich2_smp::allgather
+#define MPIR_Allgather_Bruck_MV2 simgrid::smpi::allgather__bruck
+#define MPIR_Allgather_RD_MV2 simgrid::smpi::allgather__rdb
+#define MPIR_Allgather_Ring_MV2 simgrid::smpi::allgather__ring
+#define MPIR_2lvl_Allgather_MV2 simgrid::smpi::allgather__mvapich2_smp
 
 static void init_mv2_allgather_tables_stampede()
 {
@@ -583,9 +583,9 @@ typedef int (*MV2_Gather_function_ptr)(const void* sendbuf, int sendcnt, MPI_Dat
 MV2_Gather_function_ptr MV2_Gather_inter_leader_function = NULL;
 MV2_Gather_function_ptr MV2_Gather_intra_node_function   = NULL;
 
-#define MPIR_Gather_MV2_Direct simgrid::smpi::Coll_gather_ompi_basic_linear::gather
-#define MPIR_Gather_MV2_two_level_Direct simgrid::smpi::Coll_gather_mvapich2_two_level::gather
-#define MPIR_Gather_intra simgrid::smpi::Coll_gather_mpich::gather
+#define MPIR_Gather_MV2_Direct simgrid::smpi::gather__ompi_basic_linear
+#define MPIR_Gather_MV2_two_level_Direct simgrid::smpi::gather__mvapich2_two_level
+#define MPIR_Gather_intra simgrid::smpi::gather__mpich
 
 static void init_mv2_gather_tables_stampede()
 {
@@ -668,9 +668,9 @@ int (*MV2_Allgatherv_function)(const void* sendbuf, int sendcount, MPI_Datatype
 int mv2_size_allgatherv_tuning_table                         = 0;
 mv2_allgatherv_tuning_table* mv2_allgatherv_thresholds_table = NULL;
 
-#define MPIR_Allgatherv_Rec_Doubling_MV2 simgrid::smpi::Coll_allgatherv_mpich_rdb::allgatherv
-#define MPIR_Allgatherv_Bruck_MV2 simgrid::smpi::Coll_allgatherv_ompi_bruck::allgatherv
-#define MPIR_Allgatherv_Ring_MV2 simgrid::smpi::Coll_allgatherv_mpich_ring::allgatherv
+#define MPIR_Allgatherv_Rec_Doubling_MV2 simgrid::smpi::allgatherv__mpich_rdb
+#define MPIR_Allgatherv_Bruck_MV2 simgrid::smpi::allgatherv__ompi_bruck
+#define MPIR_Allgatherv_Ring_MV2 simgrid::smpi::allgatherv__mpich_ring
 
 static void init_mv2_allgatherv_tables_stampede()
 {
@@ -780,9 +780,9 @@ static int MPIR_Allreduce_reduce_shmem_MV2(const void* sendbuf, void* recvbuf, i
   return MPI_SUCCESS;
 }
 
-#define MPIR_Allreduce_pt2pt_rd_MV2 simgrid::smpi::Coll_allreduce_rdb::allreduce
-#define MPIR_Allreduce_pt2pt_rs_MV2 simgrid::smpi::Coll_allreduce_mvapich2_rs::allreduce
-#define MPIR_Allreduce_two_level_MV2 simgrid::smpi::Coll_allreduce_mvapich2_two_level::allreduce
+#define MPIR_Allreduce_pt2pt_rd_MV2 simgrid::smpi::allreduce__rdb
+#define MPIR_Allreduce_pt2pt_rs_MV2 simgrid::smpi::allreduce__mvapich2_rs
+#define MPIR_Allreduce_two_level_MV2 simgrid::smpi::allreduce__mvapich2_two_level
 
 static void init_mv2_allreduce_tables_stampede()
 {
@@ -955,17 +955,17 @@ int mv2_intra_node_knomial_factor     = 4;
 
 #define INTRA_NODE_ROOT 0
 
-#define MPIR_Pipelined_Bcast_Zcpy_MV2 simgrid::smpi::Coll_bcast_mpich::bcast
-#define MPIR_Pipelined_Bcast_MV2 simgrid::smpi::Coll_bcast_mpich::bcast
-#define MPIR_Bcast_binomial_MV2 simgrid::smpi::Coll_bcast_binomial_tree::bcast
-#define MPIR_Bcast_scatter_ring_allgather_shm_MV2 simgrid::smpi::Coll_bcast_scatter_LR_allgather::bcast
-#define MPIR_Bcast_scatter_doubling_allgather_MV2 simgrid::smpi::Coll_bcast_scatter_rdb_allgather::bcast
-#define MPIR_Bcast_scatter_ring_allgather_MV2 simgrid::smpi::Coll_bcast_scatter_LR_allgather::bcast
-#define MPIR_Shmem_Bcast_MV2 simgrid::smpi::Coll_bcast_mpich::bcast
-#define MPIR_Bcast_tune_inter_node_helper_MV2 simgrid::smpi::Coll_bcast_mvapich2_inter_node::bcast
-#define MPIR_Bcast_inter_node_helper_MV2 simgrid::smpi::Coll_bcast_mvapich2_inter_node::bcast
-#define MPIR_Knomial_Bcast_intra_node_MV2 simgrid::smpi::Coll_bcast_mvapich2_knomial_intra_node::bcast
-#define MPIR_Bcast_intra_MV2 simgrid::smpi::Coll_bcast_mvapich2_intra_node::bcast
+#define MPIR_Pipelined_Bcast_Zcpy_MV2 simgrid::smpi::bcast__mpich
+#define MPIR_Pipelined_Bcast_MV2 simgrid::smpi::bcast__mpich
+#define MPIR_Bcast_binomial_MV2 simgrid::smpi::bcast__binomial_tree
+#define MPIR_Bcast_scatter_ring_allgather_shm_MV2 simgrid::smpi::bcast__scatter_LR_allgather
+#define MPIR_Bcast_scatter_doubling_allgather_MV2 simgrid::smpi::bcast__scatter_rdb_allgather
+#define MPIR_Bcast_scatter_ring_allgather_MV2 simgrid::smpi::bcast__scatter_LR_allgather
+#define MPIR_Shmem_Bcast_MV2 simgrid::smpi::bcast__mpich
+#define MPIR_Bcast_tune_inter_node_helper_MV2 simgrid::smpi::bcast__mvapich2_inter_node
+#define MPIR_Bcast_inter_node_helper_MV2 simgrid::smpi::bcast__mvapich2_inter_node
+#define MPIR_Knomial_Bcast_intra_node_MV2 simgrid::smpi::bcast__mvapich2_knomial_intra_node
+#define MPIR_Bcast_intra_MV2 simgrid::smpi::bcast__mvapich2_intra_node
 
 static void init_mv2_bcast_tables_stampede()
 {
@@ -1176,12 +1176,12 @@ int (*MV2_Reduce_function)(const void* sendbuf, void* recvbuf, int count, MPI_Da
 int (*MV2_Reduce_intra_function)(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root,
                                  MPI_Comm comm_ptr) = NULL;
 
-#define MPIR_Reduce_inter_knomial_wrapper_MV2 simgrid::smpi::Coll_reduce_mvapich2_knomial::reduce
-#define MPIR_Reduce_intra_knomial_wrapper_MV2 simgrid::smpi::Coll_reduce_mvapich2_knomial::reduce
-#define MPIR_Reduce_binomial_MV2 simgrid::smpi::Coll_reduce_binomial::reduce
-#define MPIR_Reduce_redscat_gather_MV2 simgrid::smpi::Coll_reduce_scatter_gather::reduce
-#define MPIR_Reduce_shmem_MV2 simgrid::smpi::Coll_reduce_ompi_basic_linear::reduce
-#define MPIR_Reduce_two_level_helper_MV2 simgrid::smpi::Coll_reduce_mvapich2_two_level::reduce
+#define MPIR_Reduce_inter_knomial_wrapper_MV2 simgrid::smpi::reduce__mvapich2_knomial
+#define MPIR_Reduce_intra_knomial_wrapper_MV2 simgrid::smpi::reduce__mvapich2_knomial
+#define MPIR_Reduce_binomial_MV2 simgrid::smpi::reduce__binomial
+#define MPIR_Reduce_redscat_gather_MV2 simgrid::smpi::reduce__scatter_gather
+#define MPIR_Reduce_shmem_MV2 simgrid::smpi::reduce__ompi_basic_linear
+#define MPIR_Reduce_two_level_helper_MV2 simgrid::smpi::reduce__mvapich2_two_level
 
 static void init_mv2_reduce_tables_stampede()
 {
@@ -1400,13 +1400,12 @@ int (*MV2_Red_scat_function)(const void* sendbuf, void* recvbuf, const int* recv
 static int MPIR_Reduce_Scatter_Basic_MV2(const void* sendbuf, void* recvbuf, const int* recvcnts, MPI_Datatype datatype, MPI_Op op,
                                          MPI_Comm comm)
 {
-  simgrid::smpi::Coll_reduce_scatter_default::reduce_scatter(sendbuf, recvbuf, recvcnts, datatype, op, comm);
+  simgrid::smpi::reduce_scatter__default(sendbuf, recvbuf, recvcnts, datatype, op, comm);
   return MPI_SUCCESS;
 }
-#define MPIR_Reduce_scatter_non_comm_MV2 simgrid::smpi::Coll_reduce_scatter_mpich_noncomm::reduce_scatter
-#define MPIR_Reduce_scatter_Rec_Halving_MV2                                                                            \
-  simgrid::smpi::Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter
-#define MPIR_Reduce_scatter_Pair_Wise_MV2 simgrid::smpi::Coll_reduce_scatter_mpich_pair::reduce_scatter
+#define MPIR_Reduce_scatter_non_comm_MV2 simgrid::smpi::reduce_scatter__mpich_noncomm
+#define MPIR_Reduce_scatter_Rec_Halving_MV2 simgrid::smpi::reduce_scatter__ompi_basic_recursivehalving
+#define MPIR_Reduce_scatter_Pair_Wise_MV2 simgrid::smpi::reduce_scatter__mpich_pair
 
 static void init_mv2_reduce_scatter_tables_stampede()
 {
@@ -1504,10 +1503,10 @@ int MPIR_Scatter_mcst_wrap_MV2(const void* sendbuf, int sendcnt, MPI_Datatype se
   return 0;
 }
 
-#define MPIR_Scatter_MV2_Binomial simgrid::smpi::Coll_scatter_ompi_binomial::scatter
-#define MPIR_Scatter_MV2_Direct simgrid::smpi::Coll_scatter_ompi_basic_linear::scatter
-#define MPIR_Scatter_MV2_two_level_Binomial simgrid::smpi::Coll_scatter_mvapich2_two_level_binomial::scatter
-#define MPIR_Scatter_MV2_two_level_Direct simgrid::smpi::Coll_scatter_mvapich2_two_level_direct::scatter
+#define MPIR_Scatter_MV2_Binomial simgrid::smpi::scatter__ompi_binomial
+#define MPIR_Scatter_MV2_Direct simgrid::smpi::scatter__ompi_basic_linear
+#define MPIR_Scatter_MV2_two_level_Binomial simgrid::smpi::scatter__mvapich2_two_level_binomial
+#define MPIR_Scatter_MV2_two_level_Direct simgrid::smpi::scatter__mvapich2_two_level_direct
 
 static void init_mv2_scatter_tables_stampede()
 {
index 3461709..46eca5e 100644 (file)
@@ -8,11 +8,11 @@
 
 #include "colls_private.hpp"
 
-namespace simgrid{
-namespace smpi{
+namespace simgrid {
+namespace smpi {
 
-int Coll_allreduce_ompi::allreduce(const void *sbuf, void *rbuf, int count,
-                        MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
+int allreduce__ompi(const void *sbuf, void *rbuf, int count,
+                    MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
 {
     size_t dsize, block_dsize;
     int comm_size = comm->size();
@@ -29,9 +29,7 @@ int Coll_allreduce_ompi::allreduce(const void *sbuf, void *rbuf, int count,
     block_dsize = dsize * count;
 
     if (block_dsize < intermediate_message) {
-        return (Coll_allreduce_rdb::allreduce (sbuf, rbuf,
-                                                                   count, dtype,
-                                                                   op, comm));
+        return allreduce__rdb(sbuf, rbuf, count, dtype, op, comm);
     }
 
     if( ((op==MPI_OP_NULL) || op->is_commutative()) && (count > comm_size) ) {
@@ -39,27 +37,22 @@ int Coll_allreduce_ompi::allreduce(const void *sbuf, void *rbuf, int count,
         if ((comm_size * segment_size >= block_dsize)) {
             //FIXME: ok, these are not the right algorithms, try to find closer ones
             // lr is a good match for allreduce_ring (difference is mainly the use of sendrecv)
-            return Coll_allreduce_lr::allreduce(sbuf, rbuf, count, dtype,
-                                              op, comm);
+            return allreduce__lr(sbuf, rbuf, count, dtype, op, comm);
         } else {
-           return (Coll_allreduce_ompi_ring_segmented::allreduce (sbuf, rbuf,
-                                                                    count, dtype,
-                                                                    op, comm
-                                                                    /*segment_size*/));
+           return allreduce__ompi_ring_segmented(sbuf, rbuf, count, dtype, op, comm /*segment_size*/);
         }
     }
 
-    return (Coll_allreduce_redbcast::allreduce(sbuf, rbuf, count,
-                                                            dtype, op, comm));
+    return allreduce__redbcast(sbuf, rbuf, count, dtype, op, comm);
 }
 
 
 
-int Coll_alltoall_ompi::alltoall(const void *sbuf, int scount,
-                                             MPI_Datatype sdtype,
-                                             void* rbuf, int rcount,
-                                             MPI_Datatype rdtype,
-                                             MPI_Comm comm)
+int alltoall__ompi(const void *sbuf, int scount,
+                   MPI_Datatype sdtype,
+                   void* rbuf, int rcount,
+                   MPI_Datatype rdtype,
+                   MPI_Comm comm)
 {
     int communicator_size;
     size_t dsize, block_dsize;
@@ -73,40 +66,36 @@ int Coll_alltoall_ompi::alltoall(const void *sbuf, int scount,
     block_dsize = dsize * scount;
 
     if ((block_dsize < 200) && (communicator_size > 12)) {
-        return Coll_alltoall_bruck::alltoall(sbuf, scount, sdtype,
-                                                    rbuf, rcount, rdtype,
-                                                    comm);
+        return alltoall__bruck(sbuf, scount, sdtype,
+                               rbuf, rcount, rdtype, comm);
 
     } else if (block_dsize < 3000) {
-        return Coll_alltoall_basic_linear::alltoall(sbuf, scount, sdtype,
-                                                           rbuf, rcount, rdtype,
-                                                           comm);
+        return alltoall__basic_linear(sbuf, scount, sdtype,
+                                      rbuf, rcount, rdtype, comm);
     }
 
-    return Coll_alltoall_ring::alltoall (sbuf, scount, sdtype,
-                                                    rbuf, rcount, rdtype,
-                                                    comm);
+    return alltoall__ring(sbuf, scount, sdtype,
+                          rbuf, rcount, rdtype, comm);
 }
 
-int Coll_alltoallv_ompi::alltoallv(const void *sbuf, const int *scounts, const int *sdisps,
-                                              MPI_Datatype sdtype,
-                                              void *rbuf, const int *rcounts, const int *rdisps,
-                                              MPI_Datatype rdtype,
-                                              MPI_Comm  comm
-                                              )
+int alltoallv__ompi(const void *sbuf, const int *scounts, const int *sdisps,
+                    MPI_Datatype sdtype,
+                    void *rbuf, const int *rcounts, const int *rdisps,
+                    MPI_Datatype rdtype,
+                    MPI_Comm  comm
+                    )
 {
     /* For starters, just keep the original algorithm. */
-    return Coll_alltoallv_ring::alltoallv(sbuf, scounts, sdisps, sdtype,
-                                                        rbuf, rcounts, rdisps,rdtype,
-                                                        comm);
+    return alltoallv__ring(sbuf, scounts, sdisps, sdtype,
+                           rbuf, rcounts, rdisps,rdtype,
+                           comm);
 }
 
-
-int Coll_barrier_ompi::barrier(MPI_Comm  comm)
+int barrier__ompi(MPI_Comm  comm)
 {    int communicator_size = comm->size();
 
     if( 2 == communicator_size )
-        return Coll_barrier_ompi_two_procs::barrier(comm);
+        return barrier__ompi_two_procs(comm);
 /*     * Basic optimisation. If we have a power of 2 number of nodes*/
 /*     * the use the recursive doubling algorithm, otherwise*/
 /*     * bruck is the one we want.*/
@@ -115,18 +104,15 @@ int Coll_barrier_ompi::barrier(MPI_Comm  comm)
         for( ; communicator_size > 0; communicator_size >>= 1 ) {
             if( communicator_size & 0x1 ) {
                 if( has_one )
-                    return Coll_barrier_ompi_bruck::barrier(comm);
+                    return barrier__ompi_bruck(comm);
                 has_one = 1;
             }
         }
     }
-    return Coll_barrier_ompi_recursivedoubling::barrier(comm);
+    return barrier__ompi_recursivedoubling(comm);
 }
 
-int Coll_bcast_ompi::bcast(void *buff, int count,
-                                          MPI_Datatype datatype, int root,
-                                          MPI_Comm  comm
-                                          )
+int bcast__ompi(void *buff, int count, MPI_Datatype datatype, int root, MPI_Comm  comm)
 {
     /* Decision function based on MX results for
        messages up to 36MB and communicator sizes up to 64 nodes */
@@ -153,82 +139,73 @@ int Coll_bcast_ompi::bcast(void *buff, int count,
        single-element broadcasts */
     if ((message_size < small_message_size) || (count <= 1)) {
         /* Binomial without segmentation */
-        return  Coll_bcast_binomial_tree::bcast (buff, count, datatype,
-                                                      root, comm);
+        return bcast__binomial_tree(buff, count, datatype, root, comm);
 
     } else if (message_size < intermediate_message_size) {
         // SplittedBinary with 1KB segments
-        return Coll_bcast_ompi_split_bintree::bcast(buff, count, datatype,
-                                                         root, comm);
+        return bcast__ompi_split_bintree(buff, count, datatype, root, comm);
 
     }
      //Handle large message sizes
     else if (communicator_size < (a_p128 * message_size + b_p128)) {
         //Pipeline with 128KB segments
         //segsize = 1024  << 7;
-        return Coll_bcast_ompi_pipeline::bcast (buff, count, datatype,
-                                                     root, comm);
+        return bcast__ompi_pipeline(buff, count, datatype, root, comm);
 
 
     } else if (communicator_size < 13) {
         // Split Binary with 8KB segments
-        return Coll_bcast_ompi_split_bintree::bcast(buff, count, datatype,
-                                                         root, comm);
+        return bcast__ompi_split_bintree(buff, count, datatype, root, comm);
 
     } else if (communicator_size < (a_p64 * message_size + b_p64)) {
         // Pipeline with 64KB segments
         //segsize = 1024 << 6;
-        return Coll_bcast_ompi_pipeline::bcast (buff, count, datatype,
-                                                     root, comm);
+        return bcast__ompi_pipeline(buff, count, datatype, root, comm);
 
 
     } else if (communicator_size < (a_p16 * message_size + b_p16)) {
         //Pipeline with 16KB segments
         //segsize = 1024 << 4;
-        return Coll_bcast_ompi_pipeline::bcast (buff, count, datatype,
-                                                     root, comm);
+        return bcast__ompi_pipeline(buff, count, datatype, root, comm);
 
 
     }
     /* Pipeline with 8KB segments */
     //segsize = 1024 << 3;
-    return Coll_bcast_flattree_pipeline::bcast (buff, count, datatype,
-                                                 root, comm
-                                                 /*segsize*/);
+    return bcast__flattree_pipeline(buff, count, datatype, root, comm /*segsize*/);
 #if 0
     /* this is based on gige measurements */
 
     if (communicator_size  < 4) {
-        return Coll_bcast_intra_basic_linear::bcast (buff, count, datatype, root, comm, module);
+        return bcast__intra_basic_linear(buff, count, datatype, root, comm, module);
     }
     if (communicator_size == 4) {
         if (message_size < 524288) segsize = 0;
         else segsize = 16384;
-        return Coll_bcast_intra_bintree::bcast (buff, count, datatype, root, comm, module, segsize);
+        return bcast__intra_bintree(buff, count, datatype, root, comm, module, segsize);
     }
     if (communicator_size <= 8 && message_size < 4096) {
-        return Coll_bcast_intra_basic_linear::bcast (buff, count, datatype, root, comm, module);
+        return bcast__intra_basic_linear(buff, count, datatype, root, comm, module);
     }
     if (communicator_size > 8 && message_size >= 32768 && message_size < 524288) {
         segsize = 16384;
-        return  Coll_bcast_intra_bintree::bcast (buff, count, datatype, root, comm, module, segsize);
+        return  bcast__intra_bintree(buff, count, datatype, root, comm, module, segsize);
     }
     if (message_size >= 524288) {
         segsize = 16384;
-        return Coll_bcast_intra_pipeline::bcast (buff, count, datatype, root, comm, module, segsize);
+        return bcast__intra_pipeline(buff, count, datatype, root, comm, module, segsize);
     }
     segsize = 0;
     /* once tested can swap this back in */
-    /* return Coll_bcast_intra_bmtree::bcast (buff, count, datatype, root, comm, segsize); */
-    return Coll_bcast_intra_bintree::bcast (buff, count, datatype, root, comm, module, segsize);
+    /* return bcast__intra_bmtree(buff, count, datatype, root, comm, segsize); */
+    return bcast__intra_bintree(buff, count, datatype, root, comm, module, segsize);
 #endif  /* 0 */
 }
 
-int Coll_reduce_ompi::reduce(const void *sendbuf, void *recvbuf,
-                                            int count, MPI_Datatype  datatype,
-                                            MPI_Op   op, int root,
-                                            MPI_Comm   comm
-                                            )
+int reduce__ompi(const void *sendbuf, void *recvbuf,
+                 int count, MPI_Datatype  datatype,
+                 MPI_Op   op, int root,
+                 MPI_Comm   comm)
 {
     int communicator_size=0;
     //int segsize = 0;
@@ -257,35 +234,34 @@ int Coll_reduce_ompi::reduce(const void *sendbuf, void *recvbuf,
      */
     if ((op != MPI_OP_NULL) && not op->is_commutative()) {
       if ((communicator_size < 12) && (message_size < 2048)) {
-        return Coll_reduce_ompi_basic_linear::reduce(sendbuf, recvbuf, count, datatype, op, root, comm /*, module*/);
+        return reduce__ompi_basic_linear(sendbuf, recvbuf, count, datatype, op, root, comm /*, module*/);
       }
-      return Coll_reduce_ompi_in_order_binary::reduce(sendbuf, recvbuf, count, datatype, op, root, comm /*, module,
+      return reduce__ompi_in_order_binary(sendbuf, recvbuf, count, datatype, op, root, comm /*, module,
                                                              0, max_requests*/);
     }
 
     if ((communicator_size < 8) && (message_size < 512)){
         /* Linear_0K */
-        return Coll_reduce_ompi_basic_linear::reduce (sendbuf, recvbuf, count, datatype, op, root, comm);
+        return reduce__ompi_basic_linear(sendbuf, recvbuf, count, datatype, op, root, comm);
     } else if (((communicator_size < 8) && (message_size < 20480)) ||
                (message_size < 2048) || (count <= 1)) {
         /* Binomial_0K */
         //segsize = 0;
-        return Coll_reduce_ompi_binomial::reduce(sendbuf, recvbuf, count, datatype, op, root, comm/*, module,
-                                                     segsize, max_requests*/);
+        return reduce__ompi_binomial(sendbuf, recvbuf, count, datatype, op, root, comm/*, module, segsize, max_requests*/);
     } else if (communicator_size > (a1 * message_size + b1)) {
         // Binomial_1K
         //segsize = 1024;
-        return Coll_reduce_ompi_binomial::reduce(sendbuf, recvbuf, count, datatype, op, root, comm/*, module,
+        return reduce__ompi_binomial(sendbuf, recvbuf, count, datatype, op, root, comm/*, module,
                                                      segsize, max_requests*/);
     } else if (communicator_size > (a2 * message_size + b2)) {
         // Pipeline_1K
         //segsize = 1024;
-        return Coll_reduce_ompi_pipeline::reduce (sendbuf, recvbuf, count, datatype, op, root, comm/*, module,
+        return reduce__ompi_pipeline(sendbuf, recvbuf, count, datatype, op, root, comm/*, module,
                                                       segsize, max_requests*/);
     } else if (communicator_size > (a3 * message_size + b3)) {
         // Binary_32K
         //segsize = 32*1024;
-        return Coll_reduce_ompi_binary::reduce( sendbuf, recvbuf, count, datatype, op, root,
+        return reduce__ompi_binary( sendbuf, recvbuf, count, datatype, op, root,
                                                     comm/*, module, segsize, max_requests*/);
     }
 //    if (communicator_size > (a4 * message_size + b4)) {
@@ -295,7 +271,7 @@ int Coll_reduce_ompi::reduce(const void *sendbuf, void *recvbuf,
         // Pipeline_64K
 //        segsize = 64*1024;
 //    }
-    return Coll_reduce_ompi_pipeline::reduce (sendbuf, recvbuf, count, datatype, op, root, comm/*, module,
+    return reduce__ompi_pipeline(sendbuf, recvbuf, count, datatype, op, root, comm/*, module,
                                                   segsize, max_requests*/);
 
 #if 0
@@ -305,8 +281,8 @@ int Coll_reduce_ompi::reduce(const void *sendbuf, void *recvbuf,
         fanout = communicator_size - 1;
         /* when linear implemented or taken from basic put here, right now using chain as a linear system */
         /* it is implemented and I shouldn't be calling a chain with a fanout bigger than MAXTREEFANOUT from topo.h! */
-        return Coll_reduce_intra_basic_linear::reduce (sendbuf, recvbuf, count, datatype, op, root, comm, module);
-        /*        return Coll_reduce_intra_chain::reduce (sendbuf, recvbuf, count, datatype, op, root, comm, segsize, fanout); */
+        return reduce__intra_basic_linear(sendbuf, recvbuf, count, datatype, op, root, comm, module);
+        /*        return reduce__intra_chain(sendbuf, recvbuf, count, datatype, op, root, comm, segsize, fanout); */
     }
     if (message_size < 524288) {
         if (message_size <= 65536 ) {
@@ -318,21 +294,21 @@ int Coll_reduce_ompi::reduce(const void *sendbuf, void *recvbuf,
         }
         /* later swap this for a binary tree */
         /*         fanout = 2; */
-        return Coll_reduce_intra_chain::reduce (sendbuf, recvbuf, count, datatype, op, root, comm, module,
-                                                   segsize, fanout, max_requests);
+        return reduce__intra_chain(sendbuf, recvbuf, count, datatype, op, root, comm, module,
+                                   segsize, fanout, max_requests);
     }
     segsize = 1024;
-    return Coll_reduce_intra_pipeline::reduce (sendbuf, recvbuf, count, datatype, op, root, comm, module,
-                                                  segsize, max_requests);
+    return reduce__intra_pipeline(sendbuf, recvbuf, count, datatype, op, root, comm, module,
+                                  segsize, max_requests);
 #endif  /* 0 */
 }
 
-int Coll_reduce_scatter_ompi::reduce_scatter(const void *sbuf, void *rbuf,
-                                                    const int *rcounts,
-                                                    MPI_Datatype dtype,
-                                                    MPI_Op  op,
-                                                    MPI_Comm  comm
-                                                    )
+int reduce_scatter__ompi(const void *sbuf, void *rbuf,
+                         const int *rcounts,
+                         MPI_Datatype dtype,
+                         MPI_Op  op,
+                         MPI_Comm  comm
+                         )
 {
     int comm_size, i, pow2;
     size_t total_message_size, dsize;
@@ -342,7 +318,7 @@ int Coll_reduce_scatter_ompi::reduce_scatter(const void *sbuf, void *rbuf,
     const size_t large_message_size = 256 * 1024;
     int zerocounts = 0;
 
-    XBT_DEBUG("Coll_reduce_scatter_ompi::reduce_scatter");
+    XBT_DEBUG("reduce_scatter__ompi");
 
     comm_size = comm->size();
     // We need data size for decision function
@@ -356,7 +332,7 @@ int Coll_reduce_scatter_ompi::reduce_scatter(const void *sbuf, void *rbuf,
     }
 
     if (((op != MPI_OP_NULL) && not op->is_commutative()) || (zerocounts)) {
-      Coll_reduce_scatter_default::reduce_scatter(sbuf, rbuf, rcounts, dtype, op, comm);
+      reduce_scatter__default(sbuf, rbuf, rcounts, dtype, op, comm);
       return MPI_SUCCESS;
     }
 
@@ -368,25 +344,17 @@ int Coll_reduce_scatter_ompi::reduce_scatter(const void *sbuf, void *rbuf,
     if ((total_message_size <= small_message_size) ||
         ((total_message_size <= large_message_size) && (pow2 == comm_size)) ||
         (comm_size >= a * total_message_size + b)) {
-        return
-            Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter(sbuf, rbuf, rcounts,
-                                                                        dtype, op,
-                                                                        comm);
+        return reduce_scatter__ompi_basic_recursivehalving(sbuf, rbuf, rcounts, dtype, op, comm);
     }
-    return Coll_reduce_scatter_ompi_ring::reduce_scatter(sbuf, rbuf, rcounts,
-                                                     dtype, op,
-                                                     comm);
-
-
-
+    return reduce_scatter__ompi_ring(sbuf, rbuf, rcounts, dtype, op, comm);
 }
 
-int Coll_allgather_ompi::allgather(const void *sbuf, int scount,
-                                              MPI_Datatype sdtype,
-                                              void* rbuf, int rcount,
-                                              MPI_Datatype rdtype,
-                                              MPI_Comm  comm
-                                              )
+int allgather__ompi(const void *sbuf, int scount,
+                    MPI_Datatype sdtype,
+                    void* rbuf, int rcount,
+                    MPI_Datatype rdtype,
+                    MPI_Comm  comm
+                    )
 {
     int communicator_size, pow2_size;
     size_t dsize, total_dsize;
@@ -395,9 +363,9 @@ int Coll_allgather_ompi::allgather(const void *sbuf, int scount,
 
     /* Special case for 2 processes */
     if (communicator_size == 2) {
-        return Coll_allgather_pair::allgather (sbuf, scount, sdtype,
-                                                          rbuf, rcount, rdtype,
-                                                          comm/*, module*/);
+        return allgather__pair(sbuf, scount, sdtype,
+                               rbuf, rcount, rdtype,
+                               comm/*, module*/);
     }
 
     /* Determine complete data size */
@@ -416,23 +384,23 @@ int Coll_allgather_ompi::allgather(const void *sbuf, int scount,
     */
     if (total_dsize < 50000) {
         if (pow2_size == communicator_size) {
-            return Coll_allgather_rdb::allgather(sbuf, scount, sdtype,
-                                                                     rbuf, rcount, rdtype,
-                                                                     comm);
+            return allgather__rdb(sbuf, scount, sdtype,
+                                  rbuf, rcount, rdtype,
+                                  comm);
         } else {
-            return Coll_allgather_bruck::allgather(sbuf, scount, sdtype,
-                                                         rbuf, rcount, rdtype,
-                                                         comm);
+            return allgather__bruck(sbuf, scount, sdtype,
+                                    rbuf, rcount, rdtype,
+                                    comm);
         }
     } else {
         if (communicator_size % 2) {
-            return Coll_allgather_ring::allgather(sbuf, scount, sdtype,
-                                                        rbuf, rcount, rdtype,
-                                                        comm);
+            return allgather__ring(sbuf, scount, sdtype,
+                                   rbuf, rcount, rdtype,
+                                   comm);
         } else {
-            return  Coll_allgather_ompi_neighborexchange::allgather(sbuf, scount, sdtype,
-                                                                     rbuf, rcount, rdtype,
-                                                                     comm);
+            return allgather__ompi_neighborexchange(sbuf, scount, sdtype,
+                                                    rbuf, rcount, rdtype,
+                                                    comm);
         }
     }
 
@@ -447,27 +415,27 @@ int Coll_allgather_ompi::allgather(const void *sbuf, int scount,
        - for everything else use ring.
     */
     if ((pow2_size == communicator_size) && (total_dsize < 524288)) {
-        return Coll_allgather_rdb::allgather(sbuf, scount, sdtype,
-                                                                 rbuf, rcount, rdtype,
-                                                                 comm);
+        return allgather__rdb(sbuf, scount, sdtype,
+                              rbuf, rcount, rdtype,
+                              comm);
     } else if (total_dsize <= 81920) {
-        return Coll_allgather_bruck::allgather(sbuf, scount, sdtype,
-                                                     rbuf, rcount, rdtype,
-                                                     comm);
+        return allgather__bruck(sbuf, scount, sdtype,
+                                rbuf, rcount, rdtype,
+                                comm);
     }
-    return Coll_allgather_ring::allgather(sbuf, scount, sdtype,
-                                                rbuf, rcount, rdtype,
-                                                comm);
+    return allgather__ring(sbuf, scount, sdtype,
+                           rbuf, rcount, rdtype,
+                           comm);
 #endif  /* defined(USE_MPICH2_DECISION) */
 }
 
-int Coll_allgatherv_ompi::allgatherv(const void *sbuf, int scount,
-                                               MPI_Datatype sdtype,
-                                               void* rbuf, const int *rcounts,
-                                               const int *rdispls,
-                                               MPI_Datatype rdtype,
-                                               MPI_Comm  comm
-                                               )
+int allgatherv__ompi(const void *sbuf, int scount,
+                     MPI_Datatype sdtype,
+                     void* rbuf, const int *rcounts,
+                     const int *rdispls,
+                     MPI_Datatype rdtype,
+                     MPI_Comm  comm
+                     )
 {
     int i;
     int communicator_size;
@@ -477,9 +445,9 @@ int Coll_allgatherv_ompi::allgatherv(const void *sbuf, int scount,
 
     /* Special case for 2 processes */
     if (communicator_size == 2) {
-        return Coll_allgatherv_pair::allgatherv(sbuf, scount, sdtype,
-                                                           rbuf, rcounts, rdispls, rdtype,
-                                                           comm);
+        return allgatherv__pair(sbuf, scount, sdtype,
+                                rbuf, rcounts, rdispls, rdtype,
+                                comm);
     }
 
     /* Determine complete data size */
@@ -491,30 +459,30 @@ int Coll_allgatherv_ompi::allgatherv(const void *sbuf, int scount,
 
     /* Decision based on allgather decision.   */
     if (total_dsize < 50000) {
-        return Coll_allgatherv_ompi_bruck::allgatherv(sbuf, scount, sdtype,
-                                                      rbuf, rcounts, rdispls, rdtype,
-                                                      comm);
+        return allgatherv__ompi_bruck(sbuf, scount, sdtype,
+                                      rbuf, rcounts, rdispls, rdtype,
+                                      comm);
 
     } else {
         if (communicator_size % 2) {
-            return Coll_allgatherv_ring::allgatherv(sbuf, scount, sdtype,
-                                                         rbuf, rcounts, rdispls, rdtype,
-                                                         comm);
+            return allgatherv__ring(sbuf, scount, sdtype,
+                                    rbuf, rcounts, rdispls, rdtype,
+                                    comm);
         } else {
-            return  Coll_allgatherv_ompi_neighborexchange::allgatherv(sbuf, scount, sdtype,
-                                                                      rbuf, rcounts, rdispls, rdtype,
-                                                                      comm);
+            return  allgatherv__ompi_neighborexchange(sbuf, scount, sdtype,
+                                                      rbuf, rcounts, rdispls, rdtype,
+                                                      comm);
         }
     }
 }
 
-int Coll_gather_ompi::gather(const void *sbuf, int scount,
-                                           MPI_Datatype sdtype,
-                                           void* rbuf, int rcount,
-                                           MPI_Datatype rdtype,
-                                           int root,
-                                           MPI_Comm  comm
-                                           )
+int gather__ompi(const void *sbuf, int scount,
+                 MPI_Datatype sdtype,
+                 void* rbuf, int rcount,
+                 MPI_Datatype rdtype,
+                 int root,
+                 MPI_Comm  comm
+                 )
 {
     //const int large_segment_size = 32768;
     //const int small_segment_size = 1024;
@@ -549,31 +517,31 @@ int Coll_gather_ompi::gather(const void *sbuf, int scount,
 /*                                                         root, comm);*/
 
 /*    } else*/ if (block_size > intermediate_block_size) {
-        return Coll_gather_ompi_linear_sync::gather (sbuf, scount, sdtype,
-                                                         rbuf, rcount, rdtype,
-                                                         root, comm);
+        return gather__ompi_linear_sync(sbuf, scount, sdtype,
+                                        rbuf, rcount, rdtype,
+                                        root, comm);
 
     } else if ((communicator_size > large_communicator_size) ||
                ((communicator_size > small_communicator_size) &&
                 (block_size < small_block_size))) {
-        return Coll_gather_ompi_binomial::gather (sbuf, scount, sdtype,
-                                                      rbuf, rcount, rdtype,
-                                                      root, comm);
+        return gather__ompi_binomial(sbuf, scount, sdtype,
+                                     rbuf, rcount, rdtype,
+                                     root, comm);
 
     }
     // Otherwise, use basic linear
-    return Coll_gather_ompi_basic_linear::gather (sbuf, scount, sdtype,
-                                                      rbuf, rcount, rdtype,
-                                                      root, comm);
+    return gather__ompi_basic_linear(sbuf, scount, sdtype,
+                                     rbuf, rcount, rdtype,
+                                     root, comm);
 }
 
 
-int Coll_scatter_ompi::scatter(const void *sbuf, int scount,
-                                            MPI_Datatype sdtype,
-                                            void* rbuf, int rcount,
-                                            MPI_Datatype rdtype,
-                                            int root, MPI_Comm  comm
-                                            )
+int scatter__ompi(const void *sbuf, int scount,
+                  MPI_Datatype sdtype,
+                  void* rbuf, int rcount,
+                  MPI_Datatype rdtype,
+                  int root, MPI_Comm  comm
+                  )
 {
     const size_t small_block_size = 300;
     const int small_comm_size = 10;
@@ -602,11 +570,11 @@ int Coll_scatter_ompi::scatter(const void *sbuf, int scount,
         scount = rcount;
         sdtype = rdtype;
       }
-      return Coll_scatter_ompi_binomial::scatter(sbuf, scount, sdtype, rbuf, rcount, rdtype, root, comm);
+      return scatter__ompi_binomial(sbuf, scount, sdtype, rbuf, rcount, rdtype, root, comm);
     }
-    return Coll_scatter_ompi_basic_linear::scatter (sbuf, scount, sdtype,
-                                                       rbuf, rcount, rdtype,
-                                                       root, comm);
+    return scatter__ompi_basic_linear(sbuf, scount, sdtype,
+                                      rbuf, rcount, rdtype,
+                                      root, comm);
 }
 
 }
index 537510b..0264478 100644 (file)
 #define COLL_DESCRIPTION(cat, ret, args, name)                                                                         \
   {                                                                                                                    \
     _XBT_STRINGIFY(name)                                                                                               \
-    , _XBT_STRINGIFY(cat) " " _XBT_STRINGIFY(name) " collective", (void*)_XBT_CONCAT4(Coll_, cat, _, name)::cat        \
+    , _XBT_STRINGIFY(cat) " " _XBT_STRINGIFY(name) " collective", (void*)_XBT_CONCAT3(cat, __, name)        \
   }
 
 #define COLL_PROTO(cat, ret, args, name)                                                                               \
-  class _XBT_CONCAT4(Coll_, cat, _, name) : public Coll {                                                              \
-  public:                                                                                                              \
-    static ret cat(COLL_UNPAREN args);                                                                                 \
-  };
+  ret _XBT_CONCAT3(cat, __, name) args;
 
 #define COLL_UNPAREN(...)  __VA_ARGS__
 
@@ -96,6 +93,10 @@ public:
   static void set_collectives();
 
   // for each collective type, create the set_* prototype, the description array and the function pointer
+//  static void set_gather(const std::string& name);
+//  static s_mpi_coll_description_t mpi_coll_gather_description[];
+//  static int(*gather)(const void *send_buff, int send_count, MPI_Datatype send_type, void *recv_buff, int recv_count, MPI_Datatype recv_type,
+//                      int root, MPI_Comm comm);
   COLL_APPLY(COLL_DEFS, COLL_GATHER_SIG, "")
   COLL_APPLY(COLL_DEFS, COLL_ALLGATHER_SIG, "")
   COLL_APPLY(COLL_DEFS, COLL_ALLGATHERV_SIG, "")
@@ -160,22 +161,6 @@ public:
   static void (*smpi_coll_cleanup_callback)();
 };
 
-class Coll {
-public:
-  // for each collective type, create a function member
-  COLL_APPLY(COLL_SIG, COLL_GATHER_SIG, "")
-  COLL_APPLY(COLL_SIG, COLL_ALLGATHER_SIG, "")
-  COLL_APPLY(COLL_SIG, COLL_ALLGATHERV_SIG, "")
-  COLL_APPLY(COLL_SIG, COLL_REDUCE_SIG, "")
-  COLL_APPLY(COLL_SIG, COLL_ALLREDUCE_SIG, "")
-  COLL_APPLY(COLL_SIG, COLL_REDUCE_SCATTER_SIG, "")
-  COLL_APPLY(COLL_SIG, COLL_SCATTER_SIG, "")
-  COLL_APPLY(COLL_SIG, COLL_BARRIER_SIG, "")
-  COLL_APPLY(COLL_SIG, COLL_BCAST_SIG, "")
-  COLL_APPLY(COLL_SIG, COLL_ALLTOALL_SIG, "")
-  COLL_APPLY(COLL_SIG, COLL_ALLTOALLV_SIG, "")
-};
-
 /*************
  * GATHER *
  *************/
index be9d0fa..fdd8b69 100644 (file)
@@ -250,7 +250,7 @@ MPI_Comm Comm::split(int color, int key)
   } else {
     recvbuf = nullptr;
   }
-  Coll_gather_default::gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
+  gather__default(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
   xbt_free(sendbuf);
   /* Do the actual job */
   if (myrank == 0) {
@@ -393,7 +393,7 @@ void Comm::init_smp(){
   std::fill_n(leaders_map, comm_size, 0);
   std::fill_n(leader_list, comm_size, -1);
 
-  Coll_allgather_ring::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
+  allgather__ring(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
 
   if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
     // we need to switch as the called function may silently touch global variables
@@ -451,7 +451,7 @@ void Comm::init_smp(){
   if(comm_intra->rank()==0) {
     int is_uniform       = 1;
     int* non_uniform_map = xbt_new0(int,leader_group_size);
-    Coll_allgather_ring::allgather(&my_local_size, 1, MPI_INT,
+    allgather__ring(&my_local_size, 1, MPI_INT,
         non_uniform_map, 1, MPI_INT, leader_comm);
     for(i=0; i < leader_group_size; i++) {
       if(non_uniform_map[0] != non_uniform_map[i]) {
@@ -466,7 +466,7 @@ void Comm::init_smp(){
     }
     is_uniform_=is_uniform;
   }
-  Coll_bcast_scatter_LR_allgather::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
+  bcast__scatter_LR_allgather(&(is_uniform_),1, MPI_INT, 0, comm_intra );
 
   if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
     // we need to switch as the called function may silently touch global variables
@@ -485,7 +485,7 @@ void Comm::init_smp(){
   }
 
   int global_blocked;
-  Coll_allreduce_default::allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
+  allreduce__default(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
 
   if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
     if(this->rank()==0){