if(smpi_is_shared(buff, src_private_blocks, &src_offset)) {
src_private_blocks = shift_and_frame_private_blocks(src_private_blocks, src_offset, buff_size);
if (src_private_blocks.size()==1 && (src_private_blocks[0].second - src_private_blocks[0].first)==buff_size){//simple shared malloc ... return.
- XBT_DEBUG("Sender %p is shared. Let's ignore it.", buff);
+ XBT_VERB("Sender is shared. Let's ignore it.");
smpi_cleanup_comm_after_copy(comm, buff);
return;
}
if (smpi_is_shared((char*)comm->dst_buff_, dst_private_blocks, &dst_offset)) {
dst_private_blocks = shift_and_frame_private_blocks(dst_private_blocks, dst_offset, buff_size);
if (src_private_blocks.size()==1 && (src_private_blocks[0].second - src_private_blocks[0].first)==buff_size){//simple shared malloc ... return.
- XBT_DEBUG("Receiver %p is shared. Let's ignore it.", (char*)comm->dst_buff_);
+ XBT_VERB("Receiver is shared. Let's ignore it.");
smpi_cleanup_comm_after_copy(comm, buff);
return;
}
if(smpi_is_shared(sendbuf,private_blocks,&offset)
&& (private_blocks.size()==1
&& (private_blocks[0].second - private_blocks[0].first)==(unsigned long)(sendcount * sendtype->get_extent()))){
+ XBT_VERB("sendbuf is shared. Ignoring copies");
return 0;
}
if(smpi_is_shared(recvbuf,private_blocks,&offset)
&& (private_blocks.size()==1
&& (private_blocks[0].second - private_blocks[0].first)==(unsigned long)(recvcount * recvtype->get_extent()))){
+ XBT_VERB("recvbuf is shared. Ignoring copies");
return 0;
}
foreach(x coll-allgather coll-allgatherv coll-allreduce coll-alltoall coll-alltoallv coll-barrier coll-bcast
coll-gather coll-reduce coll-reduce-scatter coll-scatter macro-sample pt2pt-dsend pt2pt-pingpong
type-hvector type-indexed type-struct type-vector bug-17132 gh-139 timers privatization
- io-simple io-simple-at io-all io-all-at io-shared io-ordered)
+ io-simple io-simple-at io-all io-all-at io-shared io-ordered topo-cart-sub)
add_executable (${x} EXCLUDE_FROM_ALL ${x}/${x}.c)
target_link_libraries(${x} simgrid)
set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
coll-gather coll-reduce coll-reduce-scatter coll-scatter macro-sample pt2pt-dsend pt2pt-pingpong
type-hvector type-indexed type-struct type-vector bug-17132 gh-139 timers privatization
macro-shared auto-shared macro-partial-shared macro-partial-shared-communication
- io-simple io-simple-at io-all io-all-at io-shared io-ordered)
+ io-simple io-simple-at io-all io-all-at io-shared io-ordered topo-cart-sub)
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.c)
endforeach()
foreach(x coll-allgather coll-allgatherv coll-allreduce coll-alltoall coll-alltoallv coll-barrier coll-bcast
coll-gather coll-reduce coll-reduce-scatter coll-scatter macro-sample pt2pt-dsend pt2pt-pingpong
- type-hvector type-indexed type-struct type-vector bug-17132 timers io-simple io-simple-at io-all io-all-at io-shared io-ordered)
+ type-hvector type-indexed type-struct type-vector bug-17132 timers io-simple io-simple-at io-all io-all-at io-shared io-ordered topo-cart-sub)
ADD_TESH_FACTORIES(tesh-smpi-${x} "thread;ucontext;raw;boost" --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/${x} --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/${x} ${x}.tesh)
endforeach()
printf("[%d] After change, the value in the shared buffer is: %" PRIu64"\n", rank, *buf);
+ //try to send/receive shared data, to check if we skip the copies correctly.
+ if(rank==0)
+ MPI_Send(buf, 1, MPI_AINT, 1, 100, MPI_COMM_WORLD);
+ else if (rank ==1)
+ MPI_Recv(buf, 1, MPI_AINT, 0, 100, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+
+ //same thing with an MPI_IN_PLACE collective (no)
+ if (rank == 0)
+ MPI_Scatter(buf, 1, MPI_AINT, MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, 0, MPI_COMM_WORLD);
+ else
+ MPI_Scatter(NULL, -1, MPI_DATATYPE_NULL, buf, 1, MPI_AINT, 0, MPI_COMM_WORLD);
SMPI_SHARED_FREE(buf);
MPI_Finalize();
p Test compute
! output sort
! timeout 5
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/macro-shared --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/macro-shared --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning --log=smpi_kernel.thres:verbose "--log=root.fmt:(%P@%h)%e%m%n"
+> (maestro@) You did not set the power of the host running the simulation. The timings will certainly not be accurate. Use the option "--cfg=smpi/host-speed:<flops>" to set its value. Check https://simgrid.org/doc/latest/Configuring_SimGrid.html#automatic-benchmarking-of-smpi-code for more information.
+> (maestro@) Receiver is shared. Let's ignore it.
+> (maestro@) Receiver is shared. Let's ignore it.
+> (maestro@) Receiver is shared. Let's ignore it.
+> (maestro@) Receiver is shared. Let's ignore it.
> [0] After change, the value in the shared buffer is: 16053117601147974045
> [0] The value in the shared buffer is: 4
> [1] After change, the value in the shared buffer is: 16053117601147974045
--- /dev/null
+/* Copyright (c) 2019. Jonathan Borne.
+*/
+/* Copyright (c) 2009-2019. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+#define DIM 2
+#define Q 2
+/* Where DIM is the dimension of the Grid (2D) */
+/* and Q is the number of processes per dimension */
+#define N 3
+/* Local matrices size N*N */
+
+int main(int argc, char **argv){
+ /* Nb of nodes in the grid:
+ initialized by MPI_Comm_size according to commandline -np value */
+ int nbNodes;
+
+ /* Communicators */
+ MPI_Comm gridComm, lineComm;
+ /* Current process ranks */
+ int rank, gridSize, myGridRank, myLineRank, myColRank;
+ /* coords: used to get myLineRank and myColRank
+ initialized by MPI_Cart_coords
+ */
+ int coords[DIM];
+ /* dims: Integer array of size ndims specifying the number
+ of processes in each dimension.
+ if init value is 0 it is reset by MPI_Dims_create.
+ */
+ int dims[DIM];
+ for(int i=0; i<DIM; i++){
+ dims[i] = Q;
+ }
+ /* periods:
+ Logical array of size ndims specifying whether the grid is
+ periodic (true) or not (false) in each dimension. */
+ int periods[DIM];
+ for(int i=0; i<DIM; i++){
+ periods[i] = 1;
+ }
+ /* reorder: do not allows rank reordering when creating the grid comm */
+ int reorder = 0;
+ /* remainDims[]: used to set which dimension is kept in subcommunicators */
+ int remainDim[DIM];
+
+ /* Local Matrix */
+ int *A = (int *)malloc(N * N * sizeof(int));
+
+ /* Init */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &nbNodes);
+
+ printf("rank %d: Alive \n", rank);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Set dims[] values to descibe a grid of nbNodes and DIM dimensions*/
+ MPI_Cart_create(MPI_COMM_WORLD, DIM, dims, periods, reorder,
+ &gridComm);
+
+ if(gridComm == MPI_COMM_NULL) printf("error grid NULLCOMM \n");
+
+ MPI_Comm_rank(gridComm, &myGridRank);
+ MPI_Comm_size(gridComm, &gridSize);
+ MPI_Cart_coords(gridComm, myGridRank, DIM, coords);
+ myLineRank = coords[0];
+ myColRank = coords[1];
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Create a line communicator for current process */
+ remainDim[0] = 0;
+ remainDim[1] = 1;
+ MPI_Cart_sub(gridComm, remainDim , &lineComm);
+
+ /* Check if lineComm was initialized */
+ if(lineComm == MPI_COMM_NULL) printf("(%d,%d): ERR (lineComm == NULLCOMM)\n",
+ myLineRank, myColRank);
+
+ /* A Initialization */
+ for(int i=0; i<N; i++){
+ for(int j=0; j<N; j++){
+ *(A + (i*N) + j) = i==j?rank:0;
+ }
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Broadcast */
+ int root = 0;
+ MPI_Bcast(A, N*N, MPI_INT, root, lineComm);
+
+ /* Print A */
+ printf("process:(%d,%d) \n", myLineRank,
+ myColRank);
+
+ printf("-------------------\n");
+ for(int i=0; i<N; i++){
+ for(int j=0; j<N; j++){
+ printf("%d ", *(A + (i*N) + j));
+ }
+ printf("\n");
+ }
+ printf("-------------------\n");
+ free(A);
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+p Test cart_sub
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/topo-cart-sub -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+> [rank 0] -> Tremblay
+> [rank 1] -> Jupiter
+> [rank 2] -> Fafard
+> [rank 3] -> Ginette
+> rank 0: Alive
+> rank 1: Alive
+> rank 2: Alive
+> rank 3: Alive
+> process:(0,0)
+> -------------------
+> 0 0 0
+> 0 0 0
+> 0 0 0
+> -------------------
+> process:(1,0)
+> -------------------
+> 2 0 0
+> 0 2 0
+> 0 0 2
+> -------------------
+> process:(0,1)
+> -------------------
+> 0 0 0
+> 0 0 0
+> 0 0 0
+> -------------------
+> process:(1,1)
+> -------------------
+> 2 0 0
+> 0 2 0
+> 0 0 2
+> -------------------
+
get_python(){
found=$(grep -c "Compile Python bindings .....: ON" ./consoleText)
if [ $found != 0 ]; then
- echo "✔"
+ grep -m 1 "Found PythonInterp" ./consoleText| sed "s/.*-- Found PythonInterp.*found suitable version \"\([a-zA-Z0-9\.]*\)\",.*/\1/g"
else
echo ""
fi