Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Add simple example using smpi_execute_* calls inside MPI code to simulate computation.
authorAugustin Degomme <adegomme@gmail.com>
Tue, 2 Mar 2021 22:36:56 +0000 (23:36 +0100)
committerAugustin Degomme <adegomme@gmail.com>
Tue, 2 Mar 2021 22:37:13 +0000 (23:37 +0100)
This could double as a basic MPI example, as we are missing one, I think.

MANIFEST.in
examples/smpi/CMakeLists.txt
examples/smpi/simple-execute/simple-execute.c [new file with mode: 0644]
examples/smpi/simple-execute/simple-execute.tesh [new file with mode: 0644]

index ca71a3a..f85de4f 100644 (file)
@@ -631,6 +631,8 @@ include examples/smpi/replay_multiple_manual_deploy/workload_compute_consecutive
 include examples/smpi/replay_multiple_manual_deploy/workload_compute_simple
 include examples/smpi/replay_multiple_manual_deploy/workload_mixed2_same_time
 include examples/smpi/replay_multiple_manual_deploy/workload_mixed2_same_time_and_resources
+include examples/smpi/simple-execute/simple-execute.c
+include examples/smpi/simple-execute/simple-execute.tesh
 include examples/smpi/smpi_s4u_masterworker/deployment_masterworker_mailbox_smpi.xml
 include examples/smpi/smpi_s4u_masterworker/masterworker_mailbox_smpi.cpp
 include examples/smpi/smpi_s4u_masterworker/s4u_smpi.tesh
index 0e8feaa..cbe0b34 100644 (file)
@@ -5,7 +5,7 @@ if(enable_smpi)
 
   file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/mc/")
 
-  foreach(x replay ampi_test trace trace_simple trace_call_location energy gemm)
+  foreach(x replay ampi_test trace trace_simple trace_call_location energy gemm simple-execute)
     add_executable       (smpi_${x} EXCLUDE_FROM_ALL ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x})
     target_link_libraries(smpi_${x} simgrid)
     set_target_properties(smpi_${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
@@ -28,7 +28,7 @@ endif()
 foreach(x ampi_test replay)
   set(examples_src  ${examples_src}  ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.cpp)
 endforeach()
-foreach(x trace trace_simple trace_call_location energy gemm)
+foreach(x trace trace_simple trace_call_location energy gemm simple-execute)
   set(examples_src  ${examples_src}  ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.c)
 endforeach()
 foreach(x bugged1 bugged2 bugged1_liveness only_send_deterministic mutual_exclusion non_termination1
@@ -39,6 +39,7 @@ endforeach()
 set(examples_src  ${examples_src}                                                                          PARENT_SCOPE)
 set(tesh_files    ${tesh_files}    ${CMAKE_CURRENT_SOURCE_DIR}/energy/energy.tesh
                                    ${CMAKE_CURRENT_SOURCE_DIR}/trace/trace.tesh
+                                   ${CMAKE_CURRENT_SOURCE_DIR}/simple-execute/simple-execute.tesh
                                    ${CMAKE_CURRENT_SOURCE_DIR}/gemm/gemm.tesh
                                    ${CMAKE_CURRENT_SOURCE_DIR}/trace_simple/trace_simple.tesh
                                    ${CMAKE_CURRENT_SOURCE_DIR}/trace_call_location/trace_call_location.tesh
@@ -79,6 +80,7 @@ if(enable_smpi)
   endif()
 
   ADD_TESH(smpi-tracing        --setenv bindir=${CMAKE_BINARY_DIR}/examples/smpi/trace --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/trace --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_BINARY_DIR}/examples/smpi/trace ${CMAKE_HOME_DIRECTORY}/examples/smpi/trace/trace.tesh)
+  ADD_TESH(smpi-simple-execute       --setenv bindir=${CMAKE_BINARY_DIR}/examples/smpi/simple-execute --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/simple-execute --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_BINARY_DIR}/examples/smpi/simple-execute ${CMAKE_HOME_DIRECTORY}/examples/smpi/simple-execute/simple-execute.tesh)
   ADD_TESH(smpi-tracing-simple --setenv bindir=${CMAKE_BINARY_DIR}/examples/smpi/trace_simple --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/trace_simple --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_BINARY_DIR}/examples/smpi/trace_simple ${CMAKE_HOME_DIRECTORY}/examples/smpi/trace_simple/trace_simple.tesh)
   ADD_TESH(smpi-tracing-call-location --setenv bindir=${CMAKE_BINARY_DIR}/examples/smpi/trace_call_location --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_BINARY_DIR}/examples/smpi/trace_call_location ${CMAKE_HOME_DIRECTORY}/examples/smpi/trace_call_location/trace_call_location.tesh)
   ADD_TESH(smpi-replay         --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_BINARY_DIR}/examples/smpi ${CMAKE_HOME_DIRECTORY}/examples/smpi/replay/replay.tesh)
diff --git a/examples/smpi/simple-execute/simple-execute.c b/examples/smpi/simple-execute/simple-execute.c
new file mode 100644 (file)
index 0000000..1b5fb81
--- /dev/null
@@ -0,0 +1,70 @@
+/* A simple example ping-pong program to test MPI_Send and MPI_Recv */
+
+/* Copyright (c) 2009-2021. The SimGrid Team.
+ * All rights reserved.                                                     */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdio.h>
+#include <mpi.h>
+
+/* This test performs a simple pingpong between 2 processes
+   Each process calls smpi_execute or smpi_execute_flops */
+
+int main(int argc, char *argv[])
+{
+  const int tag1 = 42;
+  const int tag2 = 43; /* Message tag */
+  int size;
+  int rank;
+  int msg = 99;
+  MPI_Status status;
+  int err = MPI_Init(&argc, &argv); /* Initialize MPI */
+
+  if (err != MPI_SUCCESS) {
+    printf("MPI initialization failed!\n");
+    exit(1);
+  }
+  MPI_Comm_size(MPI_COMM_WORLD, &size);   /* Get nr of tasks */
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);   /* Get id of this process */
+  if (size != 2) {
+    printf("run this program with exactly 2 processes (-np 2)\n");
+    MPI_Finalize();
+    exit(0);
+  }
+  if (0 == rank) {
+    printf("\n    *** Ping-pong test (MPI_Send/MPI_Recv) ***\n\n");
+  }
+
+  /* start ping-pong tests between several pairs */
+  if ( rank == 0) {
+    int dst = 1;
+    printf("[%d] About to send 1st message '%d' to process [%d]\n", rank, msg, dst);
+    MPI_Send(&msg, 1, MPI_INT, dst, tag1, MPI_COMM_WORLD);
+
+    /* Inject five seconds of fake computation time */
+    /* We are in a public file, not internal to simgrid, so _benched flavour is preferred, as it protects against accidental skip */
+    /* smpi_execute_benched here is mostly equivalent to sleep, which is intercepted by smpi and turned into smpi_sleep */
+    /* Difference with sleep is only for energy consumption */
+    smpi_execute_benched(5.0);
+
+    MPI_Recv(&msg, 1, MPI_INT, dst, tag2, MPI_COMM_WORLD, &status);     /* Receive a message */
+    printf("[%d] Received reply message '%d' from process [%d]\n", rank, msg, dst);
+  } else {
+    int src = 0;
+    MPI_Recv(&msg, 1, MPI_INT, src, tag1, MPI_COMM_WORLD, &status);     /* Receive a message */
+    printf("[%d] Received 1st message '%d' from process [%d]\n", rank, msg, src);
+    msg++;
+
+    /* Inject 762.96 Mflops of computation time - Host Jupiter is 76.296Mf per second, so this should amount to 10s */
+    /* We are in a public file, not internal to simgrid, so _benched flavour is preferred, as it protects against accidental skip */
+    smpi_execute_flops_benched(762960000);
+
+    printf("[%d] After a nap, increment message's value to  '%d'\n", rank, msg);
+    printf("[%d] About to send back message '%d' to process [%d]\n", rank, msg, src);
+    MPI_Send(&msg, 1, MPI_INT, src, tag2, MPI_COMM_WORLD);
+  }
+  MPI_Finalize();
+  return 0;
+}
diff --git a/examples/smpi/simple-execute/simple-execute.tesh b/examples/smpi/simple-execute/simple-execute.tesh
new file mode 100644 (file)
index 0000000..143fbf5
--- /dev/null
@@ -0,0 +1,39 @@
+p Simple pingpong test with execute calls to demonstrate SMPI
+! output sort
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml -np 2 ${bindir:=.}/smpi_simple-execute -s --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/simulate-computation:no --log=smpi_mpi.:verbose --log=smpi_bench.thresh:debug --log=no_loc
+> [rank 0] -> Tremblay
+> [rank 1] -> Jupiter
+> [Tremblay:0:(1) 0.000000] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Init
+> [Jupiter:1:(2) 0.000000] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Init
+> [Tremblay:0:(1) 0.000000] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Init
+> [Tremblay:0:(1) 0.000000] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Comm_size
+> [Tremblay:0:(1) 0.000000] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Comm_size
+> [Tremblay:0:(1) 0.000000] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Comm_rank
+> [Tremblay:0:(1) 0.000000] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Comm_rank
+> [Tremblay:0:(1) 0.000000] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Send
+> [Jupiter:1:(2) 0.000000] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Init
+> [Jupiter:1:(2) 0.000000] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Comm_size
+> [Jupiter:1:(2) 0.000000] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Comm_size
+> [Jupiter:1:(2) 0.000000] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Comm_rank
+> [Jupiter:1:(2) 0.000000] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Comm_rank
+> [Jupiter:1:(2) 0.000000] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Recv
+> [Tremblay:0:(1) 0.000000] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Send
+> [Tremblay:0:(1) 0.000000] [smpi_bench/DEBUG] Handle real computation time: 490475000.000000 flops
+> [Jupiter:1:(2) 0.002948] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Recv
+> [Jupiter:1:(2) 0.002948] [smpi_bench/DEBUG] Handle real computation time: 762960000.000000 flops
+> [Tremblay:0:(1) 5.000000] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Recv
+> [Jupiter:1:(2) 10.002948] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Send
+> [Jupiter:1:(2) 10.002948] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Send
+> [Jupiter:1:(2) 10.002948] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Finalize
+> [Jupiter:1:(2) 10.002948] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Finalize
+> [Tremblay:0:(1) 10.005896] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Recv
+> [Tremblay:0:(1) 10.005896] [smpi_mpi/VERBOSE] SMPI - Entering MPI_Finalize
+> [Tremblay:0:(1) 10.005896] [smpi_mpi/VERBOSE] SMPI - Leaving MPI_Finalize
+> 
+>     *** Ping-pong test (MPI_Send/MPI_Recv) ***
+> 
+> [0] About to send 1st message '99' to process [1]
+> [1] Received 1st message '99' from process [0]
+> [1] After a nap, increment message's value to  '100'
+> [1] About to send back message '100' to process [0]
+> [0] Received reply message '100' from process [1]