# teshsuite/xbt
ADD_TEST(xbt-log-large ${TESH_COMMAND} ${TESH_OPTION} --cd ${CMAKE_BINARY_DIR}/teshsuite ${CMAKE_HOME_DIRECTORY}/teshsuite/xbt/log_large_test.tesh)
ADD_TEST(xbt-log-parallel ${TESH_COMMAND} ${TESH_OPTION} --cd ${CMAKE_BINARY_DIR}/teshsuite ${CMAKE_HOME_DIRECTORY}/teshsuite/xbt/parallel_log_crashtest.tesh)
- IF(HAVE_MMAP)
+ IF(HAVE_MMALLOC)
IF(${ARCH_32_BITS})
ADD_TEST(xbt-mmalloc-32 ${TESH_COMMAND} ${TESH_OPTION} --cd ${CMAKE_BINARY_DIR}/teshsuite ${CMAKE_HOME_DIRECTORY}/teshsuite/xbt/mmalloc_32.tesh)
ELSE()
)
if(HAVE_thread_storage_run)
- set(HAVE_THREAD_LOCAL_STORAGE 0)
-else()
set(HAVE_THREAD_LOCAL_STORAGE 1)
+else()
+ set(HAVE_THREAD_LOCAL_STORAGE 0)
endif()
# Our usage of mmap is Linux-specific (flag MAP_ANONYMOUS), but kFreeBSD uses a GNU libc
IF(NOT "${CMAKE_SYSTEM}" MATCHES "Linux" AND NOT "${CMAKE_SYSTEM}" MATCHES "kFreeBSD" AND NOT "${CMAKE_SYSTEM}" MATCHES "GNU" AND NOT "${CMAKE_SYSTEM}" MATCHES "Darwin")
SET(HAVE_MMAP 0)
message(STATUS "Warning: MMAP is thought as non functional on this architecture (${CMAKE_SYSTEM})")
-ENDIF(NOT "${CMAKE_SYSTEM}" MATCHES "Linux" AND NOT "${CMAKE_SYSTEM}" MATCHES "kFreeBSD" AND NOT "${CMAKE_SYSTEM}" MATCHES "GNU" AND NOT "${CMAKE_SYSTEM}" MATCHES "Darwin")
+ENDIF()
+
+if(HAVE_MMAP AND HAVE_THREAD_LOCAL_STORAGE)
+ SET(HAVE_MMALLOC 1)
+else()
+ SET(HAVE_MMALLOC 0)
+endif()
if(WIN32) #THOSE FILES ARE FUNCTIONS ARE NOT DETECTED BUT THEY SHOULD...
set(HAVE_UCONTEXT_H 1)
SET(MALLOCATOR_IS_WANTED 0)
endif()
-if(enable_model-checking AND HAVE_MMAP)
+if(enable_model-checking AND HAVE_MMALLOC)
SET(HAVE_MC 1)
SET(MMALLOC_WANT_OVERRIDE_LEGACY 1)
include(FindLibunwind)
src/xbt_modinter.h
)
-if(HAVE_MMAP)
+if(HAVE_MMALLOC)
set(XBT_SRC
${XBT_SRC}
src/xbt/mmalloc/mm.c
message("HAVE_ASPRINTF ...............: ${HAVE_ASPRINTF}")
message("HAVE_VASPRINTF ..............: ${HAVE_VASPRINTF}")
message("HAVE_MMAP ...................: ${HAVE_MMAP}")
+ message("HAVE_THREAD_LOCAL_STORAGE ...: ${HAVE_THREAD_LOCAL_STORAGE}")
+ message("HAVE_MMALLOC ................: ${HAVE_MMALLOC}")
message("")
message("CONTEXT_THREADS .............: ${CONTEXT_THREADS}")
message("CONTEXT_UCONTEXT ............: ${CONTEXT_UCONTEXT}")
/* This file is AUTOMATICALLY GENERATED by Cmake. Edit the following
template instead buildtools/Cmake/src/internal_config.h.in */
-/* Copyright (c) 2004-2013. The SimGrid Team.
+/* Copyright (c) 2004-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/* Define to 1 if you have the <execinfo.h> header file. */
#cmakedefine HAVE_EXECINFO_H @HAVE_EXECINFO_H@
-/* Define to 1 if you have the `mmap' function. */
-#cmakedefine HAVE_MMAP @HAVE_MMAP@
+/* Define to 1 if mmalloc is compiled in. */
+#cmakedefine HAVE_MMALLOC @HAVE_MMALLOC@
/* Define to 1 if you have the `getdtablesize' function. */
#cmakedefine HAVE_GETDTABLESIZE @HAVE_GETDTABLESIZE@
- \ref SURF_resources
- \ref SURF_build_api
- \ref SURF_interface
- - \ref SURF_routing
+ - \ref SURF_routing_interface
- \ref SURF_cpu_interface
- \ref SURF_network_interface
- \ref SURF_storage_interface
/* platf.h - Public interface to the SimGrid platforms */
-/* Copyright (c) 2004-2013. The SimGrid Team.
+/* Copyright (c) 2004-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
static inline char* sg_storage_name(sg_storage_t storage) {
return storage->key;
}
-/* Type for any simgrid size */
+/** @ingroup m_datatypes_management_details
+ * @brief Type for any simgrid size
+ */
typedef unsigned long long sg_size_t;
/*
/* simgrid_config.h - Results of the configure made visible to user code */
-/* Copyright (c) 2009-2013. The SimGrid Team.
+/* Copyright (c) 2009-2014. The SimGrid Team.
All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#endif
-/* Define to 1 if you have the `mmap' function. */
-#cmakedefine HAVE_MMAP @HAVE_MMAP@
+/* Define to 1 if mmalloc is compiled in. */
+#cmakedefine HAVE_MMALLOC @HAVE_MMALLOC@
/* Get the config */
#undef SIMGRID_NEED_ASPRINTF
-/* Copyright (c) 2004-2013. The SimGrid Team.
+/* Copyright (c) 2004-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
*
* @param resource [description]
* @param cpu [description]
- * @param long [description]
+ * @param mask [description]
*/
XBT_PUBLIC(void) surf_vm_workstation_set_affinity(surf_resource_t resource, surf_resource_t cpu, unsigned long mask);
* @brief Get the workstation power peak
* @details [long description]
*
- * @param resource The surf workstation
+ * @param host The surf workstation
* @return The power peak
*/
XBT_PUBLIC(double) surf_workstation_get_current_power_peak(surf_resource_t host);
* @brief [brief description]
* @details [long description]
*
- * @param resource [description]
+ * @param host [description]
* @param pstate_index [description]
*
* @return [description]
* @brief [brief description]
* @details [long description]
*
- * @param resource [description]
+ * @param host [description]
* @return [description]
*/
XBT_PUBLIC(int) surf_workstation_get_nb_pstates(surf_resource_t host);
* @brief [brief description]
* @details [long description]
*
- * @param resource [description]
+ * @param host [description]
* @param pstate_index [description]
*/
XBT_PUBLIC(void) surf_workstation_set_power_peak_at(surf_resource_t host, int pstate_index);
/**
* @brief Get the consumed energy (in joules) of a workstation
*
- * @param resource The surf workstation
+ * @param host The surf workstation
* @return The consumed energy
*/
XBT_PUBLIC(double) surf_workstation_get_consumed_energy(surf_resource_t host);
*
* @param action The surf cpu action
* @param cpu [description]
- * @param long [description]
+ * @param mask [description]
*/
XBT_PUBLIC(void) surf_cpu_action_set_affinity(surf_action_t action, surf_resource_t cpu, unsigned long mask);
-/* Copyright (c) 2004-2013. The SimGrid Team.
+/* Copyright (c) 2004-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/** \ingroup msg_storage_management
* \brief Returns the free space size of a storage element
* \param name the name of a storage
- * \return the free space size of the storage element (as a sg_size_t)
+ * \return the free space size of the storage element (as a #sg_size_t)
*/
sg_size_t MSG_storage_get_free_size(const char* name){
return simcall_storage_get_free_size(name);
/** \ingroup msg_storage_management
* \brief Returns the used space size of a storage element
* \param name the name of a storage
- * \return the used space size of the storage element (as a sg_size_t)
+ * \return the used space size of the storage element (as a #sg_size_t)
*/
sg_size_t MSG_storage_get_used_size(const char* name){
return simcall_storage_get_used_size(name);
#include "smx_private.h"
#include "xbt/fifo.h"
#include "xbt/xbt_os_thread.h"
-#include "../mc/mc_private.h"
+#ifdef HAVE_MC
+#include "mc/mc_private.h"
+#endif
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(simix_smurf, simix,
"Logging specific to SIMIX (SMURF)");
int i, send_offset, recv_offset;
int intra_rank, inter_rank;
- intra_rank = rank % NUM_CORE;
- inter_rank = rank / NUM_CORE;
- int inter_comm_size = (comm_size + NUM_CORE - 1) / NUM_CORE;
- int num_core_in_current_smp = NUM_CORE;
- if(comm_size%NUM_CORE)
- THROWF(arg_error,0, "allgather SMP NTS algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", NUM_CORE);
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
+
+
+ intra_rank = rank % num_core;
+ inter_rank = rank / num_core;
+ int inter_comm_size = (comm_size + num_core - 1) / num_core;
+ int num_core_in_current_smp = num_core;
+
+ if(comm_size%num_core)
+ THROWF(arg_error,0, "allgather SMP NTS algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", num_core);
/* for too small number of processes, use default implementation */
- if (comm_size <= NUM_CORE) {
+ if (comm_size <= num_core) {
XBT_WARN("MPI_allgather_SMP_NTS use default MPI_allgather.");
smpi_mpi_allgather(sbuf, scount, stype, rbuf, rcount, rtype, comm);
return MPI_SUCCESS;
// the last SMP node may have fewer number of running processes than all others
if (inter_rank == (inter_comm_size - 1)) {
- num_core_in_current_smp = comm_size - (inter_rank * NUM_CORE);
+ num_core_in_current_smp = comm_size - (inter_rank * num_core);
}
//copy corresponding message from sbuf to rbuf
recv_offset = rank * rextent * rcount;
for (i = 1; i < num_core_in_current_smp; i++) {
dst =
- (inter_rank * NUM_CORE) + (intra_rank + i) % (num_core_in_current_smp);
+ (inter_rank * num_core) + (intra_rank + i) % (num_core_in_current_smp);
src =
- (inter_rank * NUM_CORE) + (intra_rank - i +
+ (inter_rank * num_core) + (intra_rank - i +
num_core_in_current_smp) %
(num_core_in_current_smp);
recv_offset = src * rextent * rcount;
MPI_Request *rrequest_array = xbt_new(MPI_Request, inter_comm_size - 1);
MPI_Request *srequest_array = xbt_new(MPI_Request, inter_comm_size - 1);
- src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * NUM_CORE;
- dst = ((inter_rank + 1) % inter_comm_size) * NUM_CORE;
+ src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * num_core;
+ dst = ((inter_rank + 1) % inter_comm_size) * num_core;
// post all inter Irecv
for (i = 0; i < inter_comm_size - 1; i++) {
recv_offset =
((inter_rank - i - 1 +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
- rrequest_array[i] = smpi_mpi_irecv((char *)rbuf + recv_offset, rcount * NUM_CORE,
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
+ rrequest_array[i] = smpi_mpi_irecv((char *)rbuf + recv_offset, rcount * num_core,
rtype, src, tag + i, comm);
}
// send first message
send_offset =
((inter_rank +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
- srequest_array[0] = smpi_mpi_isend((char *)rbuf + send_offset, scount * NUM_CORE,
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
+ srequest_array[0] = smpi_mpi_isend((char *)rbuf + send_offset, scount * num_core,
stype, dst, tag, comm);
// loop : recv-inter , send-inter, send-intra (linear-bcast)
for (i = 0; i < inter_comm_size - 2; i++) {
recv_offset =
((inter_rank - i - 1 +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
smpi_mpi_wait(&rrequest_array[i], MPI_STATUS_IGNORE);
- srequest_array[i + 1] = smpi_mpi_isend((char *)rbuf + recv_offset, scount * NUM_CORE,
+ srequest_array[i + 1] = smpi_mpi_isend((char *)rbuf + recv_offset, scount * num_core,
stype, dst, tag + i + 1, comm);
if (num_core_in_current_smp > 1) {
- smpi_mpi_send((char *)rbuf + recv_offset, scount * NUM_CORE,
+ smpi_mpi_send((char *)rbuf + recv_offset, scount * num_core,
stype, (rank + 1), tag + i + 1, comm);
}
}
// recv last message and send_intra
recv_offset =
((inter_rank - i - 1 +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
- //recv_offset = ((inter_rank + 1) % inter_comm_size) * NUM_CORE * sextent * scount;
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
+ //recv_offset = ((inter_rank + 1) % inter_comm_size) * num_core * sextent * scount;
//i=inter_comm_size-2;
smpi_mpi_wait(&rrequest_array[i], MPI_STATUS_IGNORE);
if (num_core_in_current_smp > 1) {
- smpi_mpi_send((char *)rbuf + recv_offset, scount * NUM_CORE,
+ smpi_mpi_send((char *)rbuf + recv_offset, scount * num_core,
stype, (rank + 1), tag + i + 1, comm);
}
for (i = 0; i < inter_comm_size - 1; i++) {
recv_offset =
((inter_rank - i - 1 +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
- smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * NUM_CORE), rtype,
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
+ smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
}
}
for (i = 0; i < inter_comm_size - 1; i++) {
recv_offset =
((inter_rank - i - 1 +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
- smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * NUM_CORE), rtype,
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
+ smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
- smpi_mpi_send((char *) rbuf + recv_offset, (scount * NUM_CORE), stype,
+ smpi_mpi_send((char *) rbuf + recv_offset, (scount * num_core), stype,
(rank + 1), tag + i + 1, comm);
}
}
comm_size = smpi_comm_size(comm);
- if(comm_size%4)
- THROWF(arg_error,0, "allgather loosely lr algorithm can't be used with non multiple of NUM_CORE=4 number of processes ! ");
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
+
+ if(comm_size%num_core)
+ THROWF(arg_error,0, "allgather loosely lr algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ",num_core);
rank = smpi_comm_rank(comm);
MPI_Aint rextent, sextent;
MPI_Status status;
- intra_rank = rank % NUM_CORE;
- inter_rank = rank / NUM_CORE;
- inter_comm_size = (comm_size + NUM_CORE - 1) / NUM_CORE;
- intra_comm_size = NUM_CORE;
+ intra_rank = rank % num_core;
+ inter_rank = rank / num_core;
+ inter_comm_size = (comm_size + num_core - 1) / num_core;
+ intra_comm_size = num_core;
int src_seg, dst_seg;
} // intra loop
- // wait for inter communication to finish for these rounds (# of round equals NUM_CORE)
+ // wait for inter communication to finish for these rounds (# of round equals num_core)
if (i != inter_comm_size - 1) {
smpi_mpi_wait(&inter_rrequest, &status);
}
int src, dst, comm_size, rank;
comm_size = smpi_comm_size(comm);
- if(comm_size%NUM_CORE)
- THROWF(arg_error,0, "allgather SMP simple algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", NUM_CORE);
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
+
+ if(comm_size%num_core)
+ THROWF(arg_error,0, "allgather SMP simple algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", num_core);
rank = smpi_comm_rank(comm);
MPI_Aint rextent, sextent;
MPI_Status status;
int i, send_offset, recv_offset;
int intra_rank, inter_rank;
- int num_core = NUM_CORE;
intra_rank = rank % num_core;
inter_rank = rank / num_core;
int inter_comm_size = (comm_size + num_core - 1) / num_core;
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
comm_size = smpi_comm_size(comm);
rank = smpi_comm_rank(comm);
void *tmp_buf;
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
- int num_core = NUM_CORE;
+
+
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
MPI_Status status;
comm_size=smpi_comm_size(comm);
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
/*
#ifdef MPICH2_REDUCTION
MPI_User_function * uop = MPIR_Op_table[op % 16 - 1];
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
/*
#ifdef MPICH2_REDUCTION
MPI_User_function * uop = MPIR_Op_table[op % 16 - 1];
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
comm_size = smpi_comm_size(comm);
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
/*
#ifdef MPICH2_REDUCTION
MPI_User_function * uop = MPIR_Op_table[op % 16 - 1];
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
+ int host_num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (host_num_core == 1) host_num_core = NUM_CORE;
- if(size%NUM_CORE)
- THROWF(arg_error,0, "bcast SMP binary can't be used with non multiple of NUM_CORE=%d number of processes ! ",NUM_CORE);
+ if(size%host_num_core)
+ THROWF(arg_error,0, "bcast SMP binary can't be used with non multiple of NUM_CORE=%d number of processes ! ",host_num_core);
int segment = bcast_SMP_binary_segment_byte / extent;
int pipe_length = count / segment;
int remainder = count % segment;
- int to_intra_left = (rank / NUM_CORE) * NUM_CORE + (rank % NUM_CORE) * 2 + 1;
- int to_intra_right = (rank / NUM_CORE) * NUM_CORE + (rank % NUM_CORE) * 2 + 2;
- int to_inter_left = ((rank / NUM_CORE) * 2 + 1) * NUM_CORE;
- int to_inter_right = ((rank / NUM_CORE) * 2 + 2) * NUM_CORE;
- int from_inter = (((rank / NUM_CORE) - 1) / 2) * NUM_CORE;
- int from_intra = (rank / NUM_CORE) * NUM_CORE + ((rank % NUM_CORE) - 1) / 2;
+ int to_intra_left = (rank / host_num_core) * host_num_core + (rank % host_num_core) * 2 + 1;
+ int to_intra_right = (rank / host_num_core) * host_num_core + (rank % host_num_core) * 2 + 2;
+ int to_inter_left = ((rank / host_num_core) * 2 + 1) * host_num_core;
+ int to_inter_right = ((rank / host_num_core) * 2 + 2) * host_num_core;
+ int from_inter = (((rank / host_num_core) - 1) / 2) * host_num_core;
+ int from_intra = (rank / host_num_core) * host_num_core + ((rank % host_num_core) - 1) / 2;
int increment = segment * extent;
- int base = (rank / NUM_CORE) * NUM_CORE;
- int num_core = NUM_CORE;
- if (((rank / NUM_CORE) * NUM_CORE) == ((size / NUM_CORE) * NUM_CORE))
- num_core = size - (rank / NUM_CORE) * NUM_CORE;
+ int base = (rank / host_num_core) * host_num_core;
+ int num_core = host_num_core;
+ if (((rank / host_num_core) * host_num_core) == ((size / host_num_core) * host_num_core))
+ num_core = size - (rank / host_num_core) * host_num_core;
// if root is not zero send to rank zero first
if (root != 0) {
// when a message is smaller than a block size => no pipeline
if (count <= segment) {
// case ROOT-of-each-SMP
- if (rank % NUM_CORE == 0) {
+ if (rank % host_num_core == 0) {
// case ROOT
if (rank == 0) {
//printf("node %d left %d right %d\n",rank,to_inter_left,to_inter_right);
(MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
// case ROOT-of-each-SMP
- if (rank % NUM_CORE == 0) {
+ if (rank % host_num_core == 0) {
// case ROOT
if (rank == 0) {
for (i = 0; i < pipe_length; i++) {
size = smpi_comm_size(comm);
rank = smpi_comm_rank(comm);
- if(size%NUM_CORE)
- THROWF(arg_error,0, "bcast SMP binomial can't be used with non multiple of NUM_CORE=%d number of processes ! ",NUM_CORE);
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
+
+ if(size%num_core)
+ THROWF(arg_error,0, "bcast SMP binomial can't be used with non multiple of NUM_CORE=%d number of processes ! ",num_core);
int to_intra, to_inter;
int from_intra, from_inter;
- int inter_rank = rank / NUM_CORE;
- int inter_size = (size - 1) / NUM_CORE + 1;
- int intra_rank = rank % NUM_CORE;
- int intra_size = NUM_CORE;
- if (((rank / NUM_CORE) * NUM_CORE) == ((size / NUM_CORE) * NUM_CORE))
- intra_size = size - (rank / NUM_CORE) * NUM_CORE;
+ int inter_rank = rank / num_core;
+ int inter_size = (size - 1) / num_core + 1;
+ int intra_rank = rank % num_core;
+ int intra_size = num_core;
+ if (((rank / num_core) * num_core) == ((size / num_core) * num_core))
+ intra_size = size - (rank / num_core) * num_core;
// if root is not zero send to rank zero first
if (root != 0) {
mask = 1;
while (mask < inter_size) {
if (inter_rank & mask) {
- from_inter = (inter_rank - mask) * NUM_CORE;
+ from_inter = (inter_rank - mask) * num_core;
//printf("Node %d recv from node %d when mask is %d\n", rank, from_inter, mask);
smpi_mpi_recv(buf, count, datatype, from_inter, tag, comm, &status);
break;
while (mask > 0) {
if (inter_rank < inter_size) {
- to_inter = (inter_rank + mask) * NUM_CORE;
+ to_inter = (inter_rank + mask) * num_core;
if (to_inter < size) {
//printf("Node %d send to node %d when mask is %d\n", rank, to_inter, mask);
smpi_mpi_send(buf, count, datatype, to_inter, tag, comm);
}
// SECOND STEP every root-of-each-SMP send to all children with binomial tree
// base is a rank of root-of-each-SMP
- int base = (rank / NUM_CORE) * NUM_CORE;
+ int base = (rank / num_core) * num_core;
mask = 1;
while (mask < intra_size) {
if (intra_rank & mask) {
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
- if(size%NUM_CORE)
- THROWF(arg_error,0, "bcast SMP linear can't be used with non multiple of NUM_CORE=%d number of processes ! ",NUM_CORE);
+ if(size%num_core)
+ THROWF(arg_error,0, "bcast SMP linear can't be used with non multiple of num_core=%d number of processes!",num_core);
int segment = bcast_SMP_linear_segment_byte / extent;
int pipe_length = count / segment;
/* leader of each SMP do inter-communication
and act as a root for intra-communication */
- int to_inter = (rank + NUM_CORE) % size;
+ int to_inter = (rank + num_core) % size;
int to_intra = (rank + 1) % size;
- int from_inter = (rank - NUM_CORE + size) % size;
+ int from_inter = (rank - num_core + size) % size;
int from_intra = (rank + size - 1) % size;
// call native when MPI communication size is too small
- if (size <= NUM_CORE) {
+ if (size <= num_core) {
XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast.");
smpi_mpi_bcast(buf, count, datatype, root, comm);
return MPI_SUCCESS;
smpi_mpi_send(buf, count, datatype, to_intra, tag, comm);
}
// case last ROOT of each SMP
- else if (rank == (((size - 1) / NUM_CORE) * NUM_CORE)) {
+ else if (rank == (((size - 1) / num_core) * num_core)) {
request = smpi_mpi_irecv(buf, count, datatype, from_inter, tag, comm);
smpi_mpi_wait(&request, &status);
smpi_mpi_send(buf, count, datatype, to_intra, tag, comm);
}
// case intermediate ROOT of each SMP
- else if (rank % NUM_CORE == 0) {
+ else if (rank % num_core == 0) {
request = smpi_mpi_irecv(buf, count, datatype, from_inter, tag, comm);
smpi_mpi_wait(&request, &status);
smpi_mpi_send(buf, count, datatype, to_inter, tag, comm);
smpi_mpi_send(buf, count, datatype, to_intra, tag, comm);
}
// case last non-ROOT of each SMP
- else if (((rank + 1) % NUM_CORE == 0) || (rank == (size - 1))) {
+ else if (((rank + 1) % num_core == 0) || (rank == (size - 1))) {
request = smpi_mpi_irecv(buf, count, datatype, from_intra, tag, comm);
smpi_mpi_wait(&request, &status);
}
(MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
// case ROOT of each SMP
- if (rank % NUM_CORE == 0) {
+ if (rank % num_core == 0) {
// case real root
if (rank == 0) {
for (i = 0; i < pipe_length; i++) {
}
}
// case last ROOT of each SMP
- else if (rank == (((size - 1) / NUM_CORE) * NUM_CORE)) {
+ else if (rank == (((size - 1) / num_core) * num_core)) {
for (i = 0; i < pipe_length; i++) {
request_array[i] = smpi_mpi_irecv((char *) buf + (i * increment), segment, datatype,
from_inter, (tag + i), comm);
}
}
} else { // case last non-ROOT of each SMP
- if (((rank + 1) % NUM_CORE == 0) || (rank == (size - 1))) {
+ if (((rank + 1) % num_core == 0) || (rank == (size - 1))) {
for (i = 0; i < pipe_length; i++) {
request_array[i] = smpi_mpi_irecv((char *) buf + (i * increment), segment, datatype,
from_intra, (tag + i), comm);
#include "colls_private.h"
+#ifdef HAVE_MC
#include "mc/mc_private.h"
+#endif
#include <float.h>
//attempt to do a quick autotuning version of the collective,
ret smpi_coll_tuned_ ## cat ## _ ## automatic(COLL_UNPAREN args)\
{\
double time1, time2, time_min=DBL_MAX;\
- int min_coll=-1, global_coll=-1;\
- int i;\
+ volatile int min_coll=-1, global_coll=-1;\
+ volatile int i;\
xbt_ex_t ex;\
double buf_in, buf_out, max_min=DBL_MAX;\
for (i = 0; mpi_coll_##cat##_description[i].name; i++){\
mpi_coll_##cat##_description[i].coll) args2 ;\
}\
CATCH(ex) {\
+ xbt_ex_free(ex);\
continue;\
}\
time2 = SIMIX_get_clock();\
shift 2
;;
+ "-machinefile")
+ HOSTFILE="$2"
+ if [ ! -f "${HOSTFILE}" ]; then
+ echo "[$0] ** error: the file '${HOSTFILE}' does not exist. Aborting."
+ exit 1
+ fi
+ shift 2
+ ;;
+
"-ext")
EXTOPT="$2"
shift 2
##-----------------------------------
-# Basic checks on the provided arguments
-if [ -z "${EXEC}" ] ; then
- echo "You must provide a program to execute."
- usage
- exit 1
-fi
if [ -z "${HOSTFILE}" ] && [ -z "${PLATFORM}" ] ; then
echo "No hostfile nor platform specified."
HOSTFILE="$(mktemp tmphostXXXXXX)"
perl -ne 'print "$1\n" if /.*<host.*?id="(.*?)".*?\/>.*/' ${PLATFORM} > ${HOSTFILE}
fi
+UNROLLEDHOSTFILETMP=0
+
+#parse if our lines are terminated by :num_process
+multiple_processes=`grep -c ":" $HOSTFILE`
+if [ "${multiple_processes}" -gt 0 ] ; then
+ UNROLLEDHOSTFILETMP=1
+ UNROLLEDHOSTFILE="$(mktemp tmphostXXXXXX)"
+ perl -ne ' do{ for ( 1 .. $2 ) { print "$1\n" } } if /(.*?):(\d+).*/' ${HOSTFILE} > ${UNROLLEDHOSTFILE}
+ if [ ${HOSTFILETMP} = 1 ] ; then
+ rm ${HOSTFILE}
+ HOSTFILETMP=0
+ fi
+ HOSTFILE=$UNROLLEDHOSTFILE
+fi
+
# Don't use wc -l to compute it to avoid issues with trailing \n at EOF
hostfile_procs=`grep -c "[a-zA-Z0-9]" $HOSTFILE`
if [ ${HOSTFILETMP} = 1 ] ; then
echo "Generated hostfile ${HOSTFILE} keeped."
fi
+ if [ ${UNROLLEDHOSTFILETMP} = 1 ] ; then
+ echo "Generated unrolled hostfile ${UNROLLEDHOSTFILE} keeped."
+ fi
fi
${EXEC} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP}
status=$?
if [ ${HOSTFILETMP} = 1 ] ; then
rm ${HOSTFILE}
fi
+ if [ ${UNROLLEDHOSTFILETMP} = 1 ] ; then
+ rm ${UNROLLEDHOSTFILE}
+ fi
rm ${APPLICATIONTMP}
fi
-/* Copyright (c) 2004-2013. The SimGrid Team.
+/* Copyright (c) 2004-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after Cpu creation *
- * @detail Callback functions have the following signature: `void(CpuPtr)`
+ * @details Callback functions have the following signature: `void(CpuPtr)`
*/
extern surf_callback(void, CpuPtr) cpuCreatedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after Cpu destruction *
- * @detail Callback functions have the following signature: `void(CpuPtr)`
+ * @details Callback functions have the following signature: `void(CpuPtr)`
*/
extern surf_callback(void, CpuPtr) cpuDestructedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after Cpu State changed *
- * @detail Callback functions have the following signature: `void(CpuActionPtr)`
+ * @details Callback functions have the following signature: `void(CpuActionPtr)`
*/
extern surf_callback(void, CpuPtr) cpuStateChangedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after CpuAction State changed *
- * @detail Callback functions have the following signature: `void(CpuActionPtr)`
+ * @details Callback functions have the following signature: `void(CpuActionPtr)`
*/
extern surf_callback(void, CpuActionPtr) cpuActionStateChangedCallbacks;
* @details [TODO]
*
* @param cpu [TODO]
- * @param long [TODO]
+ * @param mask [TODO]
*/
virtual void setAffinity(CpuPtr cpu, unsigned long mask);
-/* Copyright (c) 2004-2013. The SimGrid Team.
+/* Copyright (c) 2004-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after NetworkLink creation *
- * @detail Callback functions have the following signature: `void(NetworkLinkPtr)`
+ * @details Callback functions have the following signature: `void(NetworkLinkPtr)`
*/
extern surf_callback(void, NetworkLinkPtr) networkLinkCreatedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after NetworkLink destruction *
- * @detail Callback functions have the following signature: `void(NetworkLinkPtr)`
+ * @details Callback functions have the following signature: `void(NetworkLinkPtr)`
*/
extern surf_callback(void, NetworkLinkPtr) networkLinkDestructedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after NetworkLink State changed *
- * @detail Callback functions have the following signature: `void(NetworkLinkActionPtr)`
+ * @details Callback functions have the following signature: `void(NetworkLinkActionPtr)`
*/
extern surf_callback(void, NetworkLinkPtr) networkLinkStateChangedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after NetworkAction State changed *
- * @detail Callback functions have the following signature: `void(NetworkActionPtr)`
+ * @details Callback functions have the following signature: `void(NetworkActionPtr)`
*/
extern surf_callback(void, NetworkActionPtr) networkActionStateChangedCallbacks;
-/* Copyright (c) 2004-2013. The SimGrid Team.
+/* Copyright (c) 2004-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after Storage creation *
- * @detail Callback functions have the following signature: `void(StoragePtr)`
+ * @details Callback functions have the following signature: `void(StoragePtr)`
*/
extern surf_callback(void, StoragePtr) storageCreatedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after Storage destruction *
- * @detail Callback functions have the following signature: `void(StoragePtr)`
+ * @details Callback functions have the following signature: `void(StoragePtr)`
*/
extern surf_callback(void, StoragePtr) storageDestructedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after Storage State changed *
- * @detail Callback functions have the following signature: `void(StorageActionPtr)`
+ * @details Callback functions have the following signature: `void(StorageActionPtr)`
*/
extern surf_callback(void, StoragePtr) storageStateChangedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after StorageAction State changed *
- * @detail Callback functions have the following signature: `void(StorageActionPtr)`
+ * @details Callback functions have the following signature: `void(StorageActionPtr)`
*/
extern surf_callback(void, StorageActionPtr) storageActionStateChangedCallbacks;
* @param model StorageModel associated to this Storage
* @param name The name of the Storage
* @param props Dictionary of properties associated to this Storage
- * @param constraint The lmm constraint associated to this Storage if it is part of a LMM component
- * @param model [description]
- * @param name [description]
- * @param props [description]
* @param type_id [description]
* @param content_name [description]
* @param content_type [description]
* @param size [description]
*/
Storage(ModelPtr model, const char *name, xbt_dict_t props,
- const char* type_id, char *content_name, char *content_type, sg_size_t size);
+ const char* type_id, char *content_name, char *content_type,
+ sg_size_t size);
/**
* @brief Storage constructor
* @param model StorageModel associated to this Storage
* @param name The name of the Storage
* @param props Dictionary of properties associated to this Storage
- * @param constraint The lmm constraint associated to this Storage if it is part of a LMM component
* @param maxminSystem [description]
* @param bread [description]
* @param bwrite [description]
* @param size [description]
*/
Storage(ModelPtr model, const char *name, xbt_dict_t props,
- lmm_system_t maxminSystem, double bread, double bwrite, double bconnection,
- const char* type_id, char *content_name, char *content_type, sg_size_t size);
+ lmm_system_t maxminSystem, double bread, double bwrite,
+ double bconnection,
+ const char* type_id, char *content_name, char *content_type,
+ sg_size_t size);
/**
* @brief Storage destructor
{}
Resource::Resource(surf_model_t model, const char *name, xbt_dict_t props)
- : m_stateCurrent(SURF_RESOURCE_ON)
- , p_name(xbt_strdup(name)), p_properties(props), p_model(model)
- , m_running(true)
+ : p_name(xbt_strdup(name)), p_properties(props), p_model(model)
+ , m_running(true), m_stateCurrent(SURF_RESOURCE_ON)
{}
Resource::Resource(surf_model_t model, const char *name, xbt_dict_t props, lmm_constraint_t constraint)
- : m_stateCurrent(SURF_RESOURCE_ON)
- , p_name(xbt_strdup(name)), p_properties(props), p_model(model)
- , m_running(true), p_constraint(constraint)
+ : p_name(xbt_strdup(name)), p_properties(props), p_model(model)
+ , m_running(true), m_stateCurrent(SURF_RESOURCE_ON), p_constraint(constraint)
{}
Resource::Resource(surf_model_t model, const char *name, xbt_dict_t props, e_surf_resource_state_t stateInit)
- : m_stateCurrent(stateInit)
- , p_name(xbt_strdup(name)), p_properties(props), p_model(model)
- , m_running(true)
+ : p_name(xbt_strdup(name)), p_properties(props), p_model(model)
+ , m_running(true), m_stateCurrent(stateInit)
{}
Resource::~Resource() {
* @brief share the resources
* @details Share the resources between the actions
*
- * @param
+ * @param now [TODO]
* @return the date of the next action will finish
*/
virtual double shareResources(double now);
AS.id = cluster->id;
if(cluster->topology == SURF_CLUSTER_TORUS){
- XBT_DEBUG("<AS id=\"%s\"\trouting=\"Torus_Cluster\">", cluster->id);
- AS.routing = A_surfxml_AS_routing_Cluster___torus;
- sg_platf_new_AS_begin(&AS);
- ((AsClusterTorusPtr)current_routing)->parse_specific_arguments(cluster);
+ XBT_DEBUG("<AS id=\"%s\"\trouting=\"Torus_Cluster\">", cluster->id);
+ AS.routing = A_surfxml_AS_routing_Cluster___torus;
+ sg_platf_new_AS_begin(&AS);
+ ((AsClusterTorusPtr)current_routing)->parse_specific_arguments(cluster);
}else{
XBT_DEBUG("<AS id=\"%s\"\trouting=\"Cluster\">", cluster->id);
AS.routing = A_surfxml_AS_routing_Cluster;
-/* Copyright (c) 2009, 2010, 2011, 2013. The SimGrid Team.
+/* Copyright (c) 2009-2011, 2013-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
if (link.policy == SURF_LINK_FULLDUPLEX) {
char *tmp_link = bprintf("%s_UP", link_id);
- info.link_up =
- xbt_lib_get_or_null(link_lib, tmp_link, SURF_LINK_LEVEL);
- free(tmp_link);
- tmp_link = bprintf("%s_DOWN", link_id);
- info.link_down =
- xbt_lib_get_or_null(link_lib, tmp_link, SURF_LINK_LEVEL);
- free(tmp_link);
- } else {
- info.link_up = xbt_lib_get_or_null(link_lib, link_id, SURF_LINK_LEVEL);
- info.link_down = info.link_up;
- }
- xbt_dynar_set(p_linkUpDownList, position,
- &info);
-
+ info.link_up = xbt_lib_get_or_null(link_lib, tmp_link, SURF_LINK_LEVEL);
+ xbt_free(tmp_link);
+ tmp_link = bprintf("%s_DOWN", link_id);
+ info.link_down = xbt_lib_get_or_null(link_lib, tmp_link, SURF_LINK_LEVEL);
+ xbt_free(tmp_link);
+ } else {
+ info.link_up = xbt_lib_get_or_null(link_lib, link_id, SURF_LINK_LEVEL);
+ info.link_down = info.link_up;
+ }
+ xbt_dynar_set(p_linkUpDownList, position, &info);
+ xbt_free(link_id);
}
int AsCluster::parsePU(RoutingEdgePtr elm) {
p_dimensions = NULL;
}
+/* Creation routing model functions */
+AsClusterTorus::~AsClusterTorus()
+{
+ if(p_dimensions) xbt_dynar_free(&p_dimensions);
+}
void AsClusterTorus::create_links_for_node(sg_platf_cluster_cbarg_t cluster, int id, int rank, int position){
unsigned int iter;
char *groups;
- p_dimensions = xbt_str_split(cluster->topo_parameters, ",");
+ xbt_dynar_t dimensions = xbt_str_split(cluster->topo_parameters, ",");
- if (!xbt_dynar_is_empty(p_dimensions)) {
+ if (!xbt_dynar_is_empty(dimensions)) {
+ p_dimensions= xbt_dynar_new(sizeof(int), NULL);
/**
* We are in a torus cluster
* Parse attribute dimensions="dim1,dim2,dim3,...,dimN"
* and safe it in a dynarray.
* Additionally, we need to know how many ranks we have in total
*/
- xbt_dynar_foreach(p_dimensions, iter, groups) {
- int tmp = surf_parse_get_int(xbt_dynar_get_as(p_dimensions, iter, char *));
+ xbt_dynar_foreach(dimensions, iter, groups) {
+ int tmp = surf_parse_get_int(xbt_dynar_get_as(dimensions, iter, char *));
xbt_dynar_set_as(p_dimensions, iter, int, tmp);
}
p_nb_links_per_node = xbt_dynar_length(p_dimensions);
}
+ xbt_dynar_free(&dimensions);
}
void AsClusterTorus::getRouteAndLatency(RoutingEdgePtr src, RoutingEdgePtr dst, sg_platf_route_cbarg_t route, double *lat){
assert(linkOffset >= 0);
}
- XBT_DEBUG("torus_get_route_and_latency - current_node: %lu, next_node: %lu, linkOffset is %lu",
+ XBT_DEBUG("torus_get_route_and_latency - current_node: %i, next_node: %u, linkOffset is %i",
current_node, next_node, linkOffset);
break;
class AsClusterTorus: public AsCluster {
public:
AsClusterTorus();
+ virtual ~AsClusterTorus();
virtual void create_links_for_node(sg_platf_cluster_cbarg_t cluster, int id, int rank, int position);
virtual void getRouteAndLatency(RoutingEdgePtr src, RoutingEdgePtr dst, sg_platf_route_cbarg_t into, double *latency);
void parse_specific_arguments(sg_platf_cluster_cbarg_t cluster);
-/* Copyright (c) 2004-2013. The SimGrid Team.
+/* Copyright (c) 2004-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after WorkstationVM creation *
- * @detail Callback functions have the following signature: `void(WorkstationVMPtr)`
+ * @details Callback functions have the following signature: `void(WorkstationVMPtr)`
*/
extern surf_callback(void, WorkstationVMPtr) workstationVMCreatedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after WorkstationVM destruction *
- * @detail Callback functions have the following signature: `void(WorkstationVMPtr)`
+ * @details Callback functions have the following signature: `void(WorkstationVMPtr)`
*/
extern surf_callback(void, WorkstationVMPtr) workstationVMDestructedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after WorkstationVM State changed *
- * @detail Callback functions have the following signature: `void(WorkstationVMActionPtr)`
+ * @details Callback functions have the following signature: `void(WorkstationVMActionPtr)`
*/
extern surf_callback(void, WorkstationVMPtr) workstationVMStateChangedCallbacks;
-/* Copyright (c) 2004-2013. The SimGrid Team.
+/* Copyright (c) 2004-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after Workstation creation *
- * @detail Callback functions have the following signature: `void(WorkstationPtr)`
+ * @details Callback functions have the following signature: `void(WorkstationPtr)`
*/
extern surf_callback(void, WorkstationPtr) workstationCreatedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after Workstation destruction *
- * @detail Callback functions have the following signature: `void(WorkstationPtr)`
+ * @details Callback functions have the following signature: `void(WorkstationPtr)`
*/
extern surf_callback(void, WorkstationPtr) workstationDestructedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after Workstation State changed *
- * @detail Callback functions have the following signature: `void(WorkstationActionPtr)`
+ * @details Callback functions have the following signature: `void(WorkstationActionPtr)`
*/
extern surf_callback(void, WorkstationPtr) workstationStateChangedCallbacks;
/** @ingroup SURF_callbacks
* @brief Callbacks handler which emit the callbacks after WorkstationAction State changed *
- * @detail Callback functions have the following signature: `void(WorkstationActionPtr)`
+ * @details Callback functions have the following signature: `void(WorkstationActionPtr)`
*/
extern surf_callback(void, WorkstationActionPtr) workstationActionStateChangedCallbacks;
* @brief List directory contents of a path
* @details [long description]
*
+ * @param mount [description]
* @param path The path to the directory
* @return The StorageAction corresponding to the ls action
*/
-/* Copyright (c) 2007-2010, 2013. The SimGrid Team.
+/* Copyright (c) 2007-2010, 2013-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
m_powerCurrent = power_initial;
if (power_trace)
- p_power.event = tmgr_history_add_trace(history, power_trace, 0.0, 0, static_cast<ResourcePtr>(this));
+ p_power.event = tmgr_history_add_trace(history, power_trace, 0.0, 0,
+ static_cast<ResourcePtr>(this));
+ else
+ p_power.event = NULL;
setState(state_initial);
if (state_trace)
/* log - a generic logging facility in the spirit of log4j */
-/* Copyright (c) 2004-2013. The SimGrid Team.
+/* Copyright (c) 2004-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
XBT_LOG_CONNECT(xbt);
XBT_LOG_CONNECT(graphxml_parse);
XBT_LOG_CONNECT(log);
-#if HAVE_MMAP
+#if HAVE_MMALLOC
XBT_LOG_CONNECT(mm_diff);
#endif
XBT_LOG_CONNECT(module);
XBT_LOG_CONNECT(surf_route);
XBT_LOG_CONNECT(surf_routing_generic);
XBT_LOG_CONNECT(surf_route_cluster);
+ XBT_LOG_CONNECT(surf_route_cluster_torus);
XBT_LOG_CONNECT(surf_route_dijkstra);
XBT_LOG_CONNECT(surf_route_floyd);
XBT_LOG_CONNECT(surf_route_full);
/* module handling */
-/* Copyright (c) 2006-2013. The SimGrid Team.
+/* Copyright (c) 2006-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "xbt/misc.h"
-#include "simgrid_config.h" /*HAVE_MMAP _XBT_WIN32 */
-#include "internal_config.h" /* MMALLOC_WANT_OVERRIDE_LEGACY */
+#include "simgrid_config.h" /* _XBT_WIN32 */
+#include "internal_config.h" /* MMALLOC_WANT_OVERRIDE_LEGACY */
#include "xbt/sysdep.h"
#include "xbt/log.h"
-/* Copyright (c) 2009-2010, 2012. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2012, 2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
int main(int argc, char *argv[])
{
- int i;
+ int i, n;
double d;
MPI_Init(&argc, &argv);
+ n = argc > 1 ? atoi(argv[1]) : 0;
d = 2.0;
- for (i = 0; i < atoi(argv[1]); i++) {
+ for (i = 0; i < n; i++) {
if (d < 10000) {
d = d * d;
} else {
p Test compute
-! setenv LD_LIBRARY_PATH=../../lib
-! output sort
! timeout 5
-$ ../../smpi_script/bin/smpirun -platform ${srcdir:=.}/../../examples/msg/small_platform_with_routers.xml -hostfile ${srcdir:=.}/hostfile -np 2 ./compute 0
-> 0 2.000000
-> 0 2.000000
-> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+$ ../../smpi_script/bin/smpirun -platform ${srcdir:=.}/../../examples/msg/small_platform_with_routers.xml -hostfile ${srcdir:=.}/hostfile -np 3 --log=root.thres:warning ./compute 7
+> 7 16.000000
+> 7 16.000000
+> 7 16.000000
+
+p Test compute only once
+! timeout 5
+$ ../../smpi_script/bin/smpirun -platform ${srcdir:=.}/../../examples/msg/small_platform_with_routers.xml -hostfile ${srcdir:=.}/hostfile -np 3 --log=root.thres:warning ./compute2 7
+> 7 16.000000
p Test compute and bench
-! setenv LD_LIBRARY_PATH=../../lib
! output sort
! timeout 45
-$ ../../smpi_script/bin/smpirun -platform ${srcdir:=.}/../../examples/msg/small_platform_with_routers.xml -hostfile ${srcdir:=.}/hostfile -np 2 ./compute3
-> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
-> [0] The result of the computation is: 65536.000000
-> [1] The result of the computation is: 2.000000
-> [rank:0] Run the first computation. It's globally benched, and I want no more than 3 benchmarks (thres<0)
-> [rank:0] Run the first computation. It's globally benched, and I want no more than 3 benchmarks (thres<0)
-> [rank:0] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)
-> [rank:0] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)
-> [rank:1] Run the first computation. It's globally benched, and I want no more than 3 benchmarks (thres<0)
-> [rank:1] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)
-> [rank:1] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)
-
+$ ../../smpi_script/bin/smpirun -platform ${srcdir:=.}/../../examples/msg/small_platform_with_routers.xml -hostfile ${srcdir:=.}/hostfile -np 3 --log=root.thres:warning ./compute3 quiet
+> (0) Run the first computation. It's globally benched, and I want no more than 4 benchmarks (thres<0)
+> (0) Run the first computation. It's globally benched, and I want no more than 4 benchmarks (thres<0)
+> (0) Run the first computation. It's globally benched, and I want no more than 4 benchmarks (thres<0)
+> (0) Run the first computation. It's globally benched, and I want no more than 4 benchmarks (thres<0)
+> (1) [rank:0] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)
+> (1) [rank:0] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)
+> (1) [rank:1] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)
+> (1) [rank:1] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)
+> (1) [rank:2] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)
+> (1) [rank:2] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)
+> (2) [rank:0] Done.
+> (2) [rank:1] Done.
+> (2) [rank:2] Done.
-/* Copyright (c) 2009-2010, 2012. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2012, 2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
int main(int argc, char *argv[])
{
- int i;
+ int i, n;
double d;
MPI_Init(&argc, &argv);
+ n = argc > 1 ? atoi(argv[1]) : 0;
d = 2.0;
-/* SMPI_DO_ONCE */ {
- for (i = 0; i < atoi(argv[1]); i++) {
+ /* Run it only once across the whole set of processes */
+ SMPI_SAMPLE_GLOBAL(1, -1) {
+ for (i = 0; i < n; i++) {
if (d < 10000) {
d = d * d;
} else {
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-/* This example should be instructive to learn about SMPI_SAMPLE_LOCAL and
+/* This example should be instructive to learn about SMPI_SAMPLE_LOCAL and
SMPI_SAMPLE_GLOBAL macros for execution sampling */
#include <stdio.h>
#include <mpi.h>
+static double compute(double d0)
+{
+ double d = d0;
+ int j;
+ for (j = 0; j < 100 * 1000 * 1000; j++) { /* 100 kflop */
+ if (d < 100000) {
+ d = d * d;
+ } else {
+ d = 2;
+ }
+ }
+ return d;
+}
+
int main(int argc, char *argv[])
{
- int i,j;
+ int verbose;
+ int i, n;
double d;
MPI_Init(&argc, &argv);
+ verbose = argc <= 1;
+ MPI_Comm_size(MPI_COMM_WORLD, &n);
d = 2.0;
- for (i=0;i<5;i++) {
- SMPI_SAMPLE_GLOBAL(3,-1) { // I want no more than 3 benchs (thres<0)
- fprintf(stderr,"[rank:%d] Run the first computation. It's globally benched, and I want no more than 3 benchmarks (thres<0)\n", smpi_process_index());
-
- for (j=0;j<100*1000*1000;j++) { // 100 kflop
- if (d < 100000) {
- d = d * d;
- } else {
- d = 2;
- }
- }
+ for (i = 0; i < 5; i++) {
+ /* I want no more than n + 1 benchs (thres < 0) */
+ SMPI_SAMPLE_GLOBAL(n + 1, -1) {
+ if (verbose)
+ fprintf(stderr, "(%12.6f) [rank:%d]", MPI_Wtime(), smpi_process_index());
+ else
+ fprintf(stderr, "(0)");
+ fprintf(stderr, " Run the first computation. It's globally benched, "
+ "and I want no more than %d benchmarks (thres<0)\n", n + 1);
+ d = compute(2.0);
}
}
- for (i=0;i<5;i++) {
- SMPI_SAMPLE_LOCAL(0, 0.1) { // I want the standard error to go below 0.1 second. Two tests at least will be run (count is not >0)
- fprintf(stderr,"[rank:%d] Run the first (locally benched) computation. It's locally benched, and I want the standard error to go below 0.1 second (count is not >0)\n", smpi_process_index());
- for (j=0;j<100*1000*1000;j++) { // 100 kflop
- if (d < 100000) {
- d = d * d;
- } else {
- d = 2;
- }
- }
+ n = 0;
+ for (i = 0; i < 5; i++) {
+ /* I want the standard error to go below 0.1 second.
+ * Two tests at least will be run (count is not > 0) */
+ SMPI_SAMPLE_LOCAL(0, 0.1) {
+ if (verbose || n++ < 2) {
+ if (verbose)
+ fprintf(stderr, "(%12.6f)", MPI_Wtime());
+ else
+ fprintf(stderr, "(1)");
+ fprintf(stderr,
+ " [rank:%d] Run the first (locally benched) computation. "
+ "It's locally benched, and I want the standard error to go "
+ "below 0.1 second (count is not >0)\n", smpi_process_index());
+ }
+ d = compute(d);
}
}
-
- fprintf(stderr,"[%d] The result of the computation is: %f\n", smpi_process_index(), d);
+
+ if (verbose)
+ fprintf(stderr, "(%12.6f) [rank:%d] The result of the computation is: %f\n",
+ MPI_Wtime(), smpi_process_index(), d);
+ else
+ fprintf(stderr, "(2) [rank:%d] Done.\n", smpi_process_index());
MPI_Finalize();
return 0;
include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
- add_executable(attr2type attr2type.c)
+# add_executable(attr2type attr2type.c)
add_executable(attrend2 attrend2.c)
- add_executable(attrend attrend.c)
- add_executable(attrerr attrerr.c)
- add_executable(attrerrcomm attrerrcomm.c)
- add_executable(attrerrtype attrerrtype.c)
- add_executable(attric attric.c)
- add_executable(attrorder attrorder.c)
- add_executable(attrordercomm attrordercomm.c)
- add_executable(attrordertype attrordertype.c)
- add_executable(attrt attrt.c)
- add_executable(baseattr2 baseattr2.c)
- add_executable(baseattrcomm baseattrcomm.c)
- add_executable(fkeyval fkeyval.c)
- add_executable(fkeyvalcomm fkeyvalcomm.c)
- add_executable(fkeyvaltype fkeyvaltype.c)
- add_executable(keyval_double_free keyval_double_free.c)
+# add_executable(attrend attrend.c)
+# add_executable(attrerr attrerr.c)
+# add_executable(attrerrcomm attrerrcomm.c)
+# add_executable(attrerrtype attrerrtype.c)
+# add_executable(attric attric.c)
+# add_executable(attrorder attrorder.c)
+# add_executable(attrordercomm attrordercomm.c)
+# add_executable(attrordertype attrordertype.c)
+# add_executable(attrt attrt.c)
+# add_executable(baseattr2 baseattr2.c)
+# add_executable(baseattrcomm baseattrcomm.c)
+# add_executable(fkeyval fkeyval.c)
+# add_executable(fkeyvalcomm fkeyvalcomm.c)
+# add_executable(fkeyvaltype fkeyvaltype.c)
+# add_executable(keyval_double_free keyval_double_free.c)
- target_link_libraries(attr2type simgrid mtest_c)
+# target_link_libraries(attr2type simgrid mtest_c)
target_link_libraries(attrend2 simgrid mtest_c)
- target_link_libraries(attrend simgrid mtest_c)
- target_link_libraries(attrerr simgrid mtest_c)
- target_link_libraries(attrerrcomm simgrid mtest_c)
- target_link_libraries(attrerrtype simgrid mtest_c)
- target_link_libraries(attric simgrid mtest_c)
- target_link_libraries(attrorder simgrid mtest_c)
- target_link_libraries(attrordercomm simgrid mtest_c)
- target_link_libraries(attrordertype simgrid mtest_c)
- target_link_libraries(attrt simgrid mtest_c)
- target_link_libraries(baseattr2 simgrid mtest_c)
- target_link_libraries(baseattrcomm simgrid mtest_c)
- target_link_libraries(fkeyval simgrid mtest_c)
- target_link_libraries(fkeyvalcomm simgrid mtest_c)
- target_link_libraries(fkeyvaltype simgrid mtest_c)
- target_link_libraries(keyval_double_free simgrid mtest_c)
+# target_link_libraries(attrend simgrid mtest_c)
+# target_link_libraries(attrerr simgrid mtest_c)
+# target_link_libraries(attrerrcomm simgrid mtest_c)
+# target_link_libraries(attrerrtype simgrid mtest_c)
+# target_link_libraries(attric simgrid mtest_c)
+# target_link_libraries(attrorder simgrid mtest_c)
+# target_link_libraries(attrordercomm simgrid mtest_c)
+# target_link_libraries(attrordertype simgrid mtest_c)
+# target_link_libraries(attrt simgrid mtest_c)
+# target_link_libraries(baseattr2 simgrid mtest_c)
+# target_link_libraries(baseattrcomm simgrid mtest_c)
+# target_link_libraries(fkeyval simgrid mtest_c)
+# target_link_libraries(fkeyvalcomm simgrid mtest_c)
+# target_link_libraries(fkeyvaltype simgrid mtest_c)
+# target_link_libraries(keyval_double_free simgrid mtest_c)
endif()
add_executable(alltoall1 alltoall1.c)
add_executable(alltoallv0 alltoallv0.c)
add_executable(alltoallv alltoallv.c)
- add_executable(alltoallw1 alltoallw1.c)
- add_executable(alltoallw2 alltoallw2.c)
- add_executable(alltoallw_zeros alltoallw_zeros.c)
- add_executable(bcast2 bcast2.c)
- add_executable(bcast3 bcast3.c)
+# add_executable(alltoallw1 alltoallw1.c)
+# add_executable(alltoallw2 alltoallw2.c)
+# add_executable(alltoallw_zeros alltoallw_zeros.c)
+# add_executable(bcast2 bcast2.c)
+# add_executable(bcast3 bcast3.c)
add_executable(bcasttest bcasttest.c)
add_executable(bcastzerotype bcastzerotype.c)
add_executable(coll10 coll10.c)
add_executable(exscan2 exscan2.c)
add_executable(exscan exscan.c)
add_executable(gather2 gather2.c)
- add_executable(gather2_save gather2_save.c)
+# add_executable(gather2_save gather2_save.c)
add_executable(gather gather.c)
add_executable(iallred iallred.c)
add_executable(ibarrier ibarrier.c)
- add_executable(icallgather icallgather.c)
- add_executable(icallgatherv icallgatherv.c)
- add_executable(icallreduce icallreduce.c)
- add_executable(icalltoall icalltoall.c)
- add_executable(icalltoallv icalltoallv.c)
- add_executable(icalltoallw icalltoallw.c)
- add_executable(icbarrier icbarrier.c)
- add_executable(icbcast icbcast.c)
- add_executable(icgather icgather.c)
- add_executable(icgatherv icgatherv.c)
- add_executable(icreduce icreduce.c)
- add_executable(icscatter icscatter.c)
- add_executable(icscatterv icscatterv.c)
+# add_executable(icallgather icallgather.c)
+# add_executable(icallgatherv icallgatherv.c)
+# add_executable(icallreduce icallreduce.c)
+# add_executable(icalltoall icalltoall.c)
+# add_executable(icalltoallv icalltoallv.c)
+# add_executable(icalltoallw icalltoallw.c)
+# add_executable(icbarrier icbarrier.c)
+# add_executable(icbcast icbcast.c)
+# add_executable(icgather icgather.c)
+# add_executable(icgatherv icgatherv.c)
+# add_executable(icreduce icreduce.c)
+# add_executable(icscatter icscatter.c)
+# add_executable(icscatterv icscatterv.c)
add_executable(longuser longuser.c)
add_executable(nonblocking2 nonblocking2.c)
add_executable(nonblocking3 nonblocking3.c)
add_executable(nonblocking nonblocking.c)
- add_executable(opband opband.c)
- add_executable(opbor opbor.c)
- add_executable(opbxor opbxor.c)
+# add_executable(opband opband.c)
+# add_executable(opbor opbor.c)
+# add_executable(opbxor opbxor.c)
add_executable(op_commutative op_commutative.c)
- add_executable(opland opland.c)
- add_executable(oplor oplor.c)
- add_executable(oplxor oplxor.c)
- add_executable(opmax opmax.c)
- add_executable(opmaxloc opmaxloc.c)
- add_executable(opmin opmin.c)
- add_executable(opminloc opminloc.c)
- add_executable(opprod opprod.c)
- add_executable(opsum opsum.c)
+# add_executable(opland opland.c)
+# add_executable(oplor oplor.c)
+# add_executable(oplxor oplxor.c)
+# add_executable(opmax opmax.c)
+# add_executable(opmaxloc opmaxloc.c)
+# add_executable(opmin opmin.c)
+# add_executable(opminloc opminloc.c)
+# add_executable(opprod opprod.c)
+# add_executable(opsum opsum.c)
add_executable(red3 red3.c)
add_executable(red4 red4.c)
add_executable(redscat2 redscat2.c)
add_executable(red_scat_block2 red_scat_block2.c)
add_executable(red_scat_block red_scat_block.c)
add_executable(redscat redscat.c)
- add_executable(redscatinter redscatinter.c)
+# add_executable(redscatinter redscatinter.c)
add_executable(reduce_mpich reduce.c)
add_executable(reduce_local reduce_local.c)
add_executable(scantst scantst.c)
add_executable(scatter2 scatter2.c)
add_executable(scatter3 scatter3.c)
add_executable(scattern scattern.c)
- add_executable(scatterv scatterv.c)
- add_executable(uoplong uoplong.c)
+# add_executable(scatterv scatterv.c)
+# add_executable(uoplong uoplong.c)
target_link_libraries(allgather2 simgrid mtest_c)
target_link_libraries(allgather3 simgrid mtest_c)
target_link_libraries(alltoall1 simgrid mtest_c)
target_link_libraries(alltoallv0 simgrid mtest_c)
target_link_libraries(alltoallv simgrid mtest_c)
- target_link_libraries(alltoallw1 simgrid mtest_c)
- target_link_libraries(alltoallw2 simgrid mtest_c)
- target_link_libraries(alltoallw_zeros simgrid mtest_c)
- target_link_libraries(bcast2 simgrid mtest_c)
- target_link_libraries(bcast3 simgrid mtest_c)
+# target_link_libraries(alltoallw1 simgrid mtest_c)
+# target_link_libraries(alltoallw2 simgrid mtest_c)
+# target_link_libraries(alltoallw_zeros simgrid mtest_c)
+# target_link_libraries(bcast2 simgrid mtest_c)
+# target_link_libraries(bcast3 simgrid mtest_c)
target_link_libraries(bcasttest simgrid mtest_c)
target_link_libraries(bcastzerotype simgrid mtest_c)
target_link_libraries(coll10 simgrid mtest_c)
target_link_libraries(exscan2 simgrid mtest_c)
target_link_libraries(exscan simgrid mtest_c)
target_link_libraries(gather2 simgrid mtest_c)
- target_link_libraries(gather2_save simgrid mtest_c)
+# target_link_libraries(gather2_save simgrid mtest_c)
target_link_libraries(gather simgrid mtest_c)
target_link_libraries(iallred simgrid mtest_c)
target_link_libraries(ibarrier simgrid mtest_c)
- target_link_libraries(icallgather simgrid mtest_c)
- target_link_libraries(icallgatherv simgrid mtest_c)
- target_link_libraries(icallreduce simgrid mtest_c)
- target_link_libraries(icalltoall simgrid mtest_c)
- target_link_libraries(icalltoallv simgrid mtest_c)
- target_link_libraries(icalltoallw simgrid mtest_c)
- target_link_libraries(icbarrier simgrid mtest_c)
- target_link_libraries(icbcast simgrid mtest_c)
- target_link_libraries(icgather simgrid mtest_c)
- target_link_libraries(icgatherv simgrid mtest_c)
- target_link_libraries(icreduce simgrid mtest_c)
- target_link_libraries(icscatter simgrid mtest_c)
- target_link_libraries(icscatterv simgrid mtest_c)
+# target_link_libraries(icallgather simgrid mtest_c)
+# target_link_libraries(icallgatherv simgrid mtest_c)
+# target_link_libraries(icallreduce simgrid mtest_c)
+# target_link_libraries(icalltoall simgrid mtest_c)
+# target_link_libraries(icalltoallv simgrid mtest_c)
+# target_link_libraries(icalltoallw simgrid mtest_c)
+# target_link_libraries(icbarrier simgrid mtest_c)
+# target_link_libraries(icbcast simgrid mtest_c)
+# target_link_libraries(icgather simgrid mtest_c)
+# target_link_libraries(icgatherv simgrid mtest_c)
+# target_link_libraries(icreduce simgrid mtest_c)
+# target_link_libraries(icscatter simgrid mtest_c)
+# target_link_libraries(icscatterv simgrid mtest_c)
target_link_libraries(longuser simgrid mtest_c)
target_link_libraries(nonblocking2 simgrid mtest_c)
target_link_libraries(nonblocking3 simgrid mtest_c)
target_link_libraries(nonblocking simgrid mtest_c)
- target_link_libraries(opband simgrid mtest_c)
- target_link_libraries(opbor simgrid mtest_c)
- target_link_libraries(opbxor simgrid mtest_c)
+# target_link_libraries(opband simgrid mtest_c)
+# target_link_libraries(opbor simgrid mtest_c)
+# target_link_libraries(opbxor simgrid mtest_c)
target_link_libraries(op_commutative simgrid mtest_c)
- target_link_libraries(opland simgrid mtest_c)
- target_link_libraries(oplor simgrid mtest_c)
- target_link_libraries(oplxor simgrid mtest_c)
- target_link_libraries(opmax simgrid mtest_c)
- target_link_libraries(opmaxloc simgrid mtest_c)
- target_link_libraries(opmin simgrid mtest_c)
- target_link_libraries(opminloc simgrid mtest_c)
- target_link_libraries(opprod simgrid mtest_c)
- target_link_libraries(opsum simgrid mtest_c)
+# target_link_libraries(opland simgrid mtest_c)
+# target_link_libraries(oplor simgrid mtest_c)
+# target_link_libraries(oplxor simgrid mtest_c)
+# target_link_libraries(opmax simgrid mtest_c)
+# target_link_libraries(opmaxloc simgrid mtest_c)
+# target_link_libraries(opmin simgrid mtest_c)
+# target_link_libraries(opminloc simgrid mtest_c)
+# target_link_libraries(opprod simgrid mtest_c)
+# target_link_libraries(opsum simgrid mtest_c)
target_link_libraries(red3 simgrid mtest_c)
target_link_libraries(red4 simgrid mtest_c)
target_link_libraries(redscat2 simgrid mtest_c)
target_link_libraries(red_scat_block2 simgrid mtest_c)
target_link_libraries(red_scat_block simgrid mtest_c)
target_link_libraries(redscat simgrid mtest_c)
- target_link_libraries(redscatinter simgrid mtest_c)
+# target_link_libraries(redscatinter simgrid mtest_c)
target_link_libraries(reduce_mpich simgrid mtest_c)
target_link_libraries(reduce_local simgrid mtest_c)
target_link_libraries(scantst simgrid mtest_c)
target_link_libraries(scatter2 simgrid mtest_c)
target_link_libraries(scatter3 simgrid mtest_c)
target_link_libraries(scattern simgrid mtest_c)
- target_link_libraries(scatterv simgrid mtest_c)
- target_link_libraries(uoplong simgrid mtest_c)
+# target_link_libraries(scatterv simgrid mtest_c)
+# target_link_libraries(uoplong simgrid mtest_c)
endif()
add_executable(comm_group_rand comm_group_rand.c)
# add_executable(comm_idup comm_idup.c)
add_executable(comm_info comm_info.c)
- add_executable(commname commname.c)
+# add_executable(commname commname.c)
add_executable(ctxalloc ctxalloc.c)
add_executable(ctxsplit ctxsplit.c)
add_executable(dup dup.c)
- add_executable(dupic dupic.c)
+# add_executable(dupic dupic.c)
add_executable(dup_with_info dup_with_info.c)
- add_executable(ic1 ic1.c)
- add_executable(ic2 ic2.c)
- add_executable(iccreate iccreate.c)
- add_executable(icgroup icgroup.c)
- add_executable(icm icm.c)
- add_executable(icsplit icsplit.c)
- add_executable(probe-intercomm probe-intercomm.c)
+# add_executable(ic1 ic1.c)
+# add_executable(ic2 ic2.c)
+# add_executable(iccreate iccreate.c)
+# add_executable(icgroup icgroup.c)
+# add_executable(icm icm.c)
+# add_executable(icsplit icsplit.c)
+# add_executable(probe-intercomm probe-intercomm.c)
target_link_libraries(cmfree simgrid mtest_c)
target_link_libraries(cmsplit2 simgrid mtest_c)
target_link_libraries(comm_group_rand simgrid mtest_c)
# target_link_libraries(comm_idup simgrid mtest_c)
target_link_libraries(comm_info simgrid mtest_c)
- target_link_libraries(commname simgrid mtest_c)
+# target_link_libraries(commname simgrid mtest_c)
target_link_libraries(ctxalloc simgrid mtest_c)
target_link_libraries(ctxsplit simgrid mtest_c)
target_link_libraries(dup simgrid mtest_c)
- target_link_libraries(dupic simgrid mtest_c)
+# target_link_libraries(dupic simgrid mtest_c)
target_link_libraries(dup_with_info simgrid mtest_c)
- target_link_libraries(ic1 simgrid mtest_c)
- target_link_libraries(ic2 simgrid mtest_c)
- target_link_libraries(iccreate simgrid mtest_c)
- target_link_libraries(icgroup simgrid mtest_c)
- target_link_libraries(icm simgrid mtest_c)
- target_link_libraries(icsplit simgrid mtest_c)
- target_link_libraries(probe-intercomm simgrid mtest_c)
+# target_link_libraries(ic1 simgrid mtest_c)
+# target_link_libraries(ic2 simgrid mtest_c)
+# target_link_libraries(iccreate simgrid mtest_c)
+# target_link_libraries(icgroup simgrid mtest_c)
+# target_link_libraries(icm simgrid mtest_c)
+# target_link_libraries(icsplit simgrid mtest_c)
+# target_link_libraries(probe-intercomm simgrid mtest_c)
endif()
include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
- add_executable(blockindexed-misc blockindexed-misc.c)
+# add_executable(blockindexed-misc blockindexed-misc.c)
add_executable(blockindexed-zero-count blockindexed-zero-count.c)
- add_executable(contents contents.c)
- add_executable(contigstruct contigstruct.c)
+# add_executable(contents contents.c)
+# add_executable(contigstruct contigstruct.c)
add_executable(contig-zero-count contig-zero-count.c)
add_executable(cxx-types cxx-types.c)
- add_executable(darray-cyclic darray-cyclic.c)
- add_executable(darray-pack darray-pack.c)
+# add_executable(darray-cyclic darray-cyclic.c)
+# add_executable(darray-pack darray-pack.c)
add_executable(gaddress gaddress.c)
- add_executable(get-elements get-elements.c)
- add_executable(get-elements-pairtype get-elements-pairtype.c)
- add_executable(getpartelm getpartelm.c)
+# add_executable(get-elements get-elements.c)
+# add_executable(get-elements-pairtype get-elements-pairtype.c)
+# add_executable(getpartelm getpartelm.c)
add_executable(hindexed_block hindexed_block.c)
add_executable(hindexed_block_contents hindexed_block_contents.c)
- add_executable(hindexed-zeros hindexed-zeros.c)
- add_executable(indexed-misc indexed-misc.c)
- add_executable(large-count large-count.c)
- add_executable(lbub lbub.c)
- add_executable(localpack localpack.c)
+# add_executable(hindexed-zeros hindexed-zeros.c)
+# add_executable(indexed-misc indexed-misc.c)
+# add_executable(large-count large-count.c)
+# add_executable(lbub lbub.c)
+# add_executable(localpack localpack.c)
add_executable(longdouble longdouble.c)
- add_executable(lots-of-types lots-of-types.c)
- add_executable(pairtype-pack pairtype-pack.c)
- add_executable(pairtype-size-extent pairtype-size-extent.c)
+# add_executable(lots-of-types lots-of-types.c)
+# add_executable(pairtype-pack pairtype-pack.c)
+# add_executable(pairtype-size-extent pairtype-size-extent.c)
add_executable(simple-commit simple-commit.c)
- add_executable(simple-pack simple-pack.c)
- add_executable(simple-pack-external simple-pack-external.c)
- add_executable(simple-resized simple-resized.c)
+# add_executable(simple-pack simple-pack.c)
+# add_executable(simple-pack-external simple-pack-external.c)
+# add_executable(simple-resized simple-resized.c)
add_executable(simple-size-extent simple-size-extent.c)
- add_executable(sizedtypes sizedtypes.c)
- add_executable(slice-pack slice-pack.c)
- add_executable(slice-pack-external slice-pack-external.c)
+# add_executable(sizedtypes sizedtypes.c)
+# add_executable(slice-pack slice-pack.c)
+# add_executable(slice-pack-external slice-pack-external.c)
add_executable(struct-derived-zeros struct-derived-zeros.c)
- add_executable(struct-empty-el struct-empty-el.c)
+# add_executable(struct-empty-el struct-empty-el.c)
add_executable(struct-ezhov struct-ezhov.c)
- add_executable(struct-no-real-types struct-no-real-types.c)
- add_executable(struct-pack struct-pack.c)
+# add_executable(struct-no-real-types struct-no-real-types.c)
+# add_executable(struct-pack struct-pack.c)
add_executable(struct-verydeep struct-verydeep.c)
add_executable(struct-zero-count struct-zero-count.c)
- add_executable(subarray subarray.c)
- add_executable(subarray-pack subarray-pack.c)
+# add_executable(subarray subarray.c)
+# add_executable(subarray-pack subarray-pack.c)
add_executable(tfree tfree.c)
- add_executable(tmatchsize tmatchsize.c)
- add_executable(transpose-pack transpose-pack.c)
- add_executable(tresized2 tresized2.c)
- add_executable(tresized tresized.c)
- add_executable(triangular-pack triangular-pack.c)
+# add_executable(tmatchsize tmatchsize.c)
+# add_executable(transpose-pack transpose-pack.c)
+# add_executable(tresized2 tresized2.c)
+# add_executable(tresized tresized.c)
+# add_executable(triangular-pack triangular-pack.c)
add_executable(typecommit typecommit.c)
- add_executable(typefree typefree.c)
+# add_executable(typefree typefree.c)
add_executable(typelb typelb.c)
- add_executable(typename typename.c)
- add_executable(unpack unpack.c)
- add_executable(unusual-noncontigs unusual-noncontigs.c)
- add_executable(zero-blklen-vector zero-blklen-vector.c)
- add_executable(zeroblks zeroblks.c)
+# add_executable(typename typename.c)
+# add_executable(unpack unpack.c)
+# add_executable(unusual-noncontigs unusual-noncontigs.c)
+# add_executable(zero-blklen-vector zero-blklen-vector.c)
+# add_executable(zeroblks zeroblks.c)
add_executable(zeroparms zeroparms.c)
- target_link_libraries(blockindexed-misc simgrid mtest_c)
+# target_link_libraries(blockindexed-misc simgrid mtest_c)
target_link_libraries(blockindexed-zero-count simgrid mtest_c)
- target_link_libraries(contents simgrid mtest_c)
- target_link_libraries(contigstruct simgrid mtest_c)
+# target_link_libraries(contents simgrid mtest_c)
+# target_link_libraries(contigstruct simgrid mtest_c)
target_link_libraries(contig-zero-count simgrid mtest_c)
target_link_libraries(cxx-types simgrid mtest_c)
- target_link_libraries(darray-cyclic simgrid mtest_c)
- target_link_libraries(darray-pack simgrid mtest_c)
+# target_link_libraries(darray-cyclic simgrid mtest_c)
+# target_link_libraries(darray-pack simgrid mtest_c)
target_link_libraries(gaddress simgrid mtest_c)
- target_link_libraries(get-elements simgrid mtest_c)
- target_link_libraries(get-elements-pairtype simgrid mtest_c)
- target_link_libraries(getpartelm simgrid mtest_c)
+# target_link_libraries(get-elements simgrid mtest_c)
+# target_link_libraries(get-elements-pairtype simgrid mtest_c)
+# target_link_libraries(getpartelm simgrid mtest_c)
target_link_libraries(hindexed_block simgrid mtest_c)
target_link_libraries(hindexed_block_contents simgrid mtest_c)
- target_link_libraries(hindexed-zeros simgrid mtest_c)
- target_link_libraries(indexed-misc simgrid mtest_c)
- target_link_libraries(large-count simgrid mtest_c)
- target_link_libraries(lbub simgrid mtest_c)
- target_link_libraries(localpack simgrid mtest_c)
+# target_link_libraries(hindexed-zeros simgrid mtest_c)
+# target_link_libraries(indexed-misc simgrid mtest_c)
+# target_link_libraries(large-count simgrid mtest_c)
+# target_link_libraries(lbub simgrid mtest_c)
+# target_link_libraries(localpack simgrid mtest_c)
target_link_libraries(longdouble simgrid mtest_c)
- target_link_libraries(lots-of-types simgrid mtest_c)
- target_link_libraries(pairtype-pack simgrid mtest_c)
- target_link_libraries(pairtype-size-extent simgrid mtest_c)
+# target_link_libraries(lots-of-types simgrid mtest_c)
+# target_link_libraries(pairtype-pack simgrid mtest_c)
+# target_link_libraries(pairtype-size-extent simgrid mtest_c)
target_link_libraries(simple-commit simgrid mtest_c)
- target_link_libraries(simple-pack simgrid mtest_c)
- target_link_libraries(simple-pack-external simgrid mtest_c)
- target_link_libraries(simple-resized simgrid mtest_c)
+# target_link_libraries(simple-pack simgrid mtest_c)
+# target_link_libraries(simple-pack-external simgrid mtest_c)
+# target_link_libraries(simple-resized simgrid mtest_c)
target_link_libraries(simple-size-extent simgrid mtest_c)
- target_link_libraries(sizedtypes simgrid mtest_c)
- target_link_libraries(slice-pack simgrid mtest_c)
- target_link_libraries(slice-pack-external simgrid mtest_c)
+# target_link_libraries(sizedtypes simgrid mtest_c)
+# target_link_libraries(slice-pack simgrid mtest_c)
+# target_link_libraries(slice-pack-external simgrid mtest_c)
target_link_libraries(struct-derived-zeros simgrid mtest_c)
- target_link_libraries(struct-empty-el simgrid mtest_c)
+# target_link_libraries(struct-empty-el simgrid mtest_c)
target_link_libraries(struct-ezhov simgrid mtest_c)
- target_link_libraries(struct-no-real-types simgrid mtest_c)
- target_link_libraries(struct-pack simgrid mtest_c)
+# target_link_libraries(struct-no-real-types simgrid mtest_c)
+# target_link_libraries(struct-pack simgrid mtest_c)
target_link_libraries(struct-verydeep simgrid mtest_c)
target_link_libraries(struct-zero-count simgrid mtest_c)
- target_link_libraries(subarray simgrid mtest_c)
- target_link_libraries(subarray-pack simgrid mtest_c)
+# target_link_libraries(subarray simgrid mtest_c)
+# target_link_libraries(subarray-pack simgrid mtest_c)
target_link_libraries(tfree simgrid mtest_c)
- target_link_libraries(tmatchsize simgrid mtest_c)
- target_link_libraries(transpose-pack simgrid mtest_c)
- target_link_libraries(tresized2 simgrid mtest_c)
- target_link_libraries(tresized simgrid mtest_c)
- target_link_libraries(triangular-pack simgrid mtest_c)
+# target_link_libraries(tmatchsize simgrid mtest_c)
+# target_link_libraries(transpose-pack simgrid mtest_c)
+# target_link_libraries(tresized2 simgrid mtest_c)
+# target_link_libraries(tresized simgrid mtest_c)
+# target_link_libraries(triangular-pack simgrid mtest_c)
target_link_libraries(typecommit simgrid mtest_c)
- target_link_libraries(typefree simgrid mtest_c)
+# target_link_libraries(typefree simgrid mtest_c)
target_link_libraries(typelb simgrid mtest_c)
- target_link_libraries(typename simgrid mtest_c)
- target_link_libraries(unpack simgrid mtest_c)
- target_link_libraries(unusual-noncontigs simgrid mtest_c)
- target_link_libraries(zero-blklen-vector simgrid mtest_c)
- target_link_libraries(zeroblks simgrid mtest_c)
+# target_link_libraries(typename simgrid mtest_c)
+# target_link_libraries(unpack simgrid mtest_c)
+# target_link_libraries(unusual-noncontigs simgrid mtest_c)
+# target_link_libraries(zero-blklen-vector simgrid mtest_c)
+# target_link_libraries(zeroblks simgrid mtest_c)
target_link_libraries(zeroparms simgrid mtest_c)
endif()
#blockindexed-misc 1
#needs MPI_Pack, MPI_unpack, MPI_Pack_size
#indexed-misc 1
-#nees MPI_Type_create_subarray
+#needs MPI_Type_create_subarray
#subarray-pack 1
#subarray 2
-#nees MPI_Type_create_darray
+#needs MPI_Type_create_darray
#darray-pack 1
#darray-pack 9
# darray-pack 72
include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
- add_executable(allredint8f allredint8f.f)
- add_executable(allredopttf allredopttf.f)
+# add_executable(allredint8f allredint8f.f)
+# add_executable(allredopttf allredopttf.f)
add_executable(alltoallvf alltoallvf.f)
- add_executable(alltoallwf alltoallwf.f)
+# add_executable(alltoallwf alltoallwf.f)
add_executable(exscanf exscanf.f)
add_executable(inplacef inplacef.f)
# add_executable(nonblockingf nonblockingf.f)
add_executable(uallreducef uallreducef.f)
add_executable(vw_inplacef vw_inplacef.f)
- target_link_libraries(allredint8f simgrid mtest_f77)
- target_link_libraries(allredopttf simgrid mtest_f77)
+# target_link_libraries(allredint8f simgrid mtest_f77)
+# target_link_libraries(allredopttf simgrid mtest_f77)
target_link_libraries(alltoallvf simgrid mtest_f77)
- target_link_libraries(alltoallwf simgrid mtest_f77)
+# target_link_libraries(alltoallwf simgrid mtest_f77)
target_link_libraries(exscanf simgrid mtest_f77)
target_link_libraries(inplacef simgrid mtest_f77)
# target_link_libraries(nonblockingf simgrid mtest_f77)
integer count, datatype
integer i
-! if (datatype .ne. MPI_INTEGER) then
-! write(6,*) 'Invalid datatype passed to user_op()'
-! return
-! endif
+ if (.false.) then
+ if (datatype .ne. MPI_INTEGER) then
+ write(6,*) 'Invalid datatype passed to user_op()'
+ return
+ endif
+ endif
do i=1, count
cout(i) = cin(i) + cout(i)
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
# add_executable(commerrf commerrf.f)
- add_executable(commnamef commnamef.f)
+# add_executable(commnamef commnamef.f)
# target_link_libraries(commerrf simgrid mtest_f77)
- target_link_libraries(commnamef simgrid mtest_f77)
+# target_link_libraries(commnamef simgrid mtest_f77)
endif()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/typeaints.h.in ${CMAKE_CURRENT_SOURCE_DIR}/typeaints.h @ONLY)
- add_executable(allctypesf allctypesf.f)
+# add_executable(allctypesf allctypesf.f)
add_executable(gaddressf gaddressf.f)
- add_executable(hindex1f hindex1f.f)
- add_executable(hindexed_blockf hindexed_blockf.f)
- add_executable(packef packef.f)
- add_executable(typecntsf typecntsf.f)
- add_executable(typem2f typem2f.f)
- add_executable(typename3f typename3f.f)
- add_executable(typenamef typenamef.f)
- add_executable(typesnamef typesnamef.f)
- add_executable(typesubf typesubf.f)
+# add_executable(hindex1f hindex1f.f)
+# add_executable(hindexed_blockf hindexed_blockf.f)
+# add_executable(packef packef.f)
+# add_executable(typecntsf typecntsf.f)
+# add_executable(typem2f typem2f.f)
+# add_executable(typename3f typename3f.f)
+# add_executable(typenamef typenamef.f)
+# add_executable(typesnamef typesnamef.f)
+# add_executable(typesubf typesubf.f)
- target_link_libraries(allctypesf simgrid mtest_f77)
+# target_link_libraries(allctypesf simgrid mtest_f77)
target_link_libraries(gaddressf simgrid mtest_f77)
- target_link_libraries(hindex1f simgrid mtest_f77)
- target_link_libraries(hindexed_blockf simgrid mtest_f77)
- target_link_libraries(packef simgrid mtest_f77)
- target_link_libraries(typecntsf simgrid mtest_f77)
- target_link_libraries(typem2f simgrid mtest_f77)
- target_link_libraries(typename3f simgrid mtest_f77)
- target_link_libraries(typenamef simgrid mtest_f77)
- target_link_libraries(typesnamef simgrid mtest_f77)
- target_link_libraries(typesubf simgrid mtest_f77)
+# target_link_libraries(hindex1f simgrid mtest_f77)
+# target_link_libraries(hindexed_blockf simgrid mtest_f77)
+# target_link_libraries(packef simgrid mtest_f77)
+# target_link_libraries(typecntsf simgrid mtest_f77)
+# target_link_libraries(typem2f simgrid mtest_f77)
+# target_link_libraries(typename3f simgrid mtest_f77)
+# target_link_libraries(typenamef simgrid mtest_f77)
+# target_link_libraries(typesnamef simgrid mtest_f77)
+# target_link_libraries(typesubf simgrid mtest_f77)
endif()
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
add_executable(allpairf allpairf.f)
- add_executable(greqf greqf.f dummyf.f)
+# add_executable(greqf greqf.f dummyf.f)
#add_executable(mprobef mprobef.f)
- add_executable(statusesf statusesf.f)
+# add_executable(statusesf statusesf.f)
target_link_libraries(allpairf simgrid mtest_f77)
- target_link_libraries(greqf simgrid mtest_f77)
+# target_link_libraries(greqf simgrid mtest_f77)
#target_link_libraries(mprobef simgrid mtest_f77)
- target_link_libraries(statusesf simgrid mtest_f77)
+# target_link_libraries(statusesf simgrid mtest_f77)
endif()
set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
- add_executable(allredint8f90 allredint8f90.f90)
- add_executable(allredopttf90 allredopttf90.f90)
+# add_executable(allredint8f90 allredint8f90.f90)
+# add_executable(allredopttf90 allredopttf90.f90)
add_executable(alltoallvf90 alltoallvf90.f90)
- add_executable(alltoallwf90 alltoallwf90.f90)
+# add_executable(alltoallwf90 alltoallwf90.f90)
add_executable(exscanf90 exscanf90.f90)
add_executable(inplacef90 inplacef90.f90)
# add_executable(nonblockingf90 nonblockingf90.f90)
add_executable(split_typef90 split_typef90.f90)
add_executable(uallreducef90 uallreducef90.f90)
add_executable(vw_inplacef90 vw_inplacef90.f90)
- target_link_libraries(allredint8f90 simgrid mtest_f90)
- target_link_libraries(allredopttf90 simgrid mtest_f90)
+# target_link_libraries(allredint8f90 simgrid mtest_f90)
+# target_link_libraries(allredopttf90 simgrid mtest_f90)
target_link_libraries(alltoallvf90 simgrid mtest_f90)
- target_link_libraries(alltoallwf90 simgrid mtest_f90)
+# target_link_libraries(alltoallwf90 simgrid mtest_f90)
target_link_libraries(exscanf90 simgrid mtest_f90)
target_link_libraries(inplacef90 simgrid mtest_f90)
# target_link_libraries(nonblockingf90 simgrid mtest_f90)
integer count, datatype
integer i
-! if (datatype .ne. MPI_INTEGER) then
-! write(6,*) 'Invalid datatype passed to user_op()'
-! return
-! endif
+ if (.false.) then
+ if (datatype .ne. MPI_INTEGER) then
+ write(6,*) 'Invalid datatype passed to user_op()'
+ return
+ endif
+ endif
do i=1, count
cout(i) = cin(i) + cout(i)
set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
- add_executable(allctypesf90 allctypesf90.f90)
+# add_executable(allctypesf90 allctypesf90.f90)
# add_executable(createf90 createf90.f90)
add_executable(gaddressf90 gaddressf90.f90)
# add_executable(get_elem_d get_elem_d.f90)
# add_executable(get_elem_u get_elem_u.f90)
- add_executable(hindex1f90 hindex1f90.f90)
- add_executable(hindexed_blockf90 hindexed_blockf90.f90)
+# add_executable(hindex1f90 hindex1f90.f90)
+# add_executable(hindexed_blockf90 hindexed_blockf90.f90)
add_executable(indtype indtype.f90)
add_executable(kinds kinds.f90)
- add_executable(packef90 packef90.f90)
+# add_executable(packef90 packef90.f90)
# add_executable(sizeof sizeof.f90)
# add_executable(structf structf.f90)
# add_executable(trf90 trf90.f90)
- add_executable(typecntsf90 typecntsf90.f90)
- add_executable(typem2f90 typem2f90.f90)
- add_executable(typename3f90 typename3f90.f90)
- add_executable(typenamef90 typenamef90.f90)
- add_executable(typesnamef90 typesnamef90.f90)
- add_executable(typesubf90 typesubf90.f90)
- target_link_libraries(allctypesf90 simgrid mtest_f90)
+# add_executable(typecntsf90 typecntsf90.f90)
+# add_executable(typem2f90 typem2f90.f90)
+# add_executable(typename3f90 typename3f90.f90)
+# add_executable(typenamef90 typenamef90.f90)
+# add_executable(typesnamef90 typesnamef90.f90)
+# add_executable(typesubf90 typesubf90.f90)
+# target_link_libraries(allctypesf90 simgrid mtest_f90)
# target_link_libraries(createf90 simgrid mtest_f90)
target_link_libraries(gaddressf90 simgrid mtest_f90)
# target_link_libraries(get_elem_d simgrid mtest_f90)
# target_link_libraries(get_elem_u simgrid mtest_f90)
- target_link_libraries(hindex1f90 simgrid mtest_f90)
- target_link_libraries(hindexed_blockf90 simgrid mtest_f90)
+# target_link_libraries(hindex1f90 simgrid mtest_f90)
+# target_link_libraries(hindexed_blockf90 simgrid mtest_f90)
target_link_libraries(indtype simgrid mtest_f90)
target_link_libraries(kinds simgrid mtest_f90)
- target_link_libraries(packef90 simgrid mtest_f90)
+# target_link_libraries(packef90 simgrid mtest_f90)
# target_link_libraries(sizeof simgrid mtest_f90)
# target_link_libraries(structf simgrid mtest_f90)
# target_link_libraries(trf90 simgrid mtest_f90)
- target_link_libraries(typecntsf90 simgrid mtest_f90)
- target_link_libraries(typem2f90 simgrid mtest_f90)
- target_link_libraries(typename3f90 simgrid mtest_f90)
- target_link_libraries(typenamef90 simgrid mtest_f90)
- target_link_libraries(typesnamef90 simgrid mtest_f90)
- target_link_libraries(typesubf90 simgrid mtest_f90)
+# target_link_libraries(typecntsf90 simgrid mtest_f90)
+# target_link_libraries(typem2f90 simgrid mtest_f90)
+# target_link_libraries(typename3f90 simgrid mtest_f90)
+# target_link_libraries(typenamef90 simgrid mtest_f90)
+# target_link_libraries(typesnamef90 simgrid mtest_f90)
+# target_link_libraries(typesubf90 simgrid mtest_f90)
endif()
set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
- add_executable(allpairf90 allpairf90.f90)
- add_executable(greqf90 greqf90.f90 dummyf90.f90)
+# add_executable(allpairf90 allpairf90.f90)
+# add_executable(greqf90 greqf90.f90 dummyf90.f90)
# add_executable(mprobef90 mprobef90.f90)
add_executable(statusesf90 statusesf90.f90)
- target_link_libraries(allpairf90 simgrid mtest_f90)
- target_link_libraries(greqf90 simgrid mtest_f90)
+# target_link_libraries(allpairf90 simgrid mtest_f90)
+# target_link_libraries(greqf90 simgrid mtest_f90)
# target_link_libraries(mprobef90 simgrid mtest_f90)
target_link_libraries(statusesf90 simgrid mtest_f90)
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
add_executable(groupcreate groupcreate.c)
- add_executable(groupnullincl groupnullincl.c)
+# add_executable(groupnullincl groupnullincl.c)
add_executable(grouptest2 grouptest2.c)
add_executable(grouptest grouptest.c)
add_executable(gtranks gtranks.c)
- add_executable(gtranksperf gtranksperf.c)
+# add_executable(gtranksperf gtranksperf.c)
target_link_libraries(groupcreate simgrid mtest_c)
- target_link_libraries(groupnullincl simgrid mtest_c)
+# target_link_libraries(groupnullincl simgrid mtest_c)
target_link_libraries(grouptest2 simgrid mtest_c)
target_link_libraries(grouptest simgrid mtest_c)
target_link_libraries(gtranks simgrid mtest_c)
- target_link_libraries(gtranksperf simgrid mtest_c)
+# target_link_libraries(gtranksperf simgrid mtest_c)
endif()
include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
- add_executable(attrself attrself.c)
+# add_executable(attrself attrself.c)
add_executable(exitst1 exitst1.c)
add_executable(exitst2 exitst2.c)
- add_executable(exitst3 exitst3.c)
+# add_executable(exitst3 exitst3.c)
add_executable(finalized finalized.c)
add_executable(initstat initstat.c)
add_executable(library_version library_version.c)
- add_executable(timeout timeout.c)
+# add_executable(timeout timeout.c)
add_executable(version version.c)
- target_link_libraries(attrself simgrid mtest_c)
+# target_link_libraries(attrself simgrid mtest_c)
target_link_libraries(exitst1 simgrid mtest_c)
target_link_libraries(exitst2 simgrid mtest_c)
- target_link_libraries(exitst3 simgrid mtest_c)
+# target_link_libraries(exitst3 simgrid mtest_c)
target_link_libraries(finalized simgrid mtest_c)
target_link_libraries(initstat simgrid mtest_c)
target_link_libraries(library_version simgrid mtest_c)
- target_link_libraries(timeout simgrid mtest_c)
+# target_link_libraries(timeout simgrid mtest_c)
target_link_libraries(version simgrid mtest_c)
endif()
add_executable(anyall anyall.c)
add_executable(bottom bottom.c)
- add_executable(bsend1 bsend1.c)
- add_executable(bsend2 bsend2.c)
- add_executable(bsend3 bsend3.c)
- add_executable(bsend4 bsend4.c)
- add_executable(bsend5 bsend5.c)
- add_executable(bsendalign bsendalign.c)
- add_executable(bsendfrag bsendfrag.c)
- add_executable(bsendpending bsendpending.c)
- add_executable(cancelrecv cancelrecv.c)
+# add_executable(bsend1 bsend1.c)
+# add_executable(bsend2 bsend2.c)
+# add_executable(bsend3 bsend3.c)
+# add_executable(bsend4 bsend4.c)
+# add_executable(bsend5 bsend5.c)
+# add_executable(bsendalign bsendalign.c)
+# add_executable(bsendfrag bsendfrag.c)
+# add_executable(bsendpending bsendpending.c)
+# add_executable(cancelrecv cancelrecv.c)
add_executable(eagerdt eagerdt.c)
- add_executable(greq1 greq1.c)
- add_executable(icsend icsend.c)
+# add_executable(greq1 greq1.c)
+# add_executable(icsend icsend.c)
add_executable(inactivereq inactivereq.c)
add_executable(isendself isendself.c)
add_executable(isendselfprobe isendselfprobe.c)
- add_executable(large_message large_message.c)
+# add_executable(large_message large_message.c)
add_executable(mprobe mprobe.c)
- add_executable(pingping pingping.c)
+# add_executable(pingping pingping.c)
add_executable(probenull probenull.c)
add_executable(probe-unexp probe-unexp.c)
- add_executable(pscancel pscancel.c)
- add_executable(rcancel rcancel.c)
- add_executable(rqfreeb rqfreeb.c)
- add_executable(rqstatus rqstatus.c)
- add_executable(scancel2 scancel2.c)
- add_executable(scancel scancel.c)
+# add_executable(pscancel pscancel.c)
+# add_executable(rcancel rcancel.c)
+# add_executable(rqfreeb rqfreeb.c)
+# add_executable(rqstatus rqstatus.c)
+# add_executable(scancel2 scancel2.c)
+# add_executable(scancel scancel.c)
add_executable(sendall sendall.c)
add_executable(sendflood sendflood.c)
- add_executable(sendrecv1 sendrecv1.c)
+# add_executable(sendrecv1 sendrecv1.c)
add_executable(sendrecv2 sendrecv2.c)
add_executable(sendrecv3 sendrecv3.c)
- add_executable(sendself sendself.c)
+# add_executable(sendself sendself.c)
add_executable(waitany-null waitany-null.c)
add_executable(waittestnull waittestnull.c)
target_link_libraries(anyall simgrid mtest_c)
target_link_libraries(bottom simgrid mtest_c)
- target_link_libraries(bsend1 simgrid mtest_c)
- target_link_libraries(bsend2 simgrid mtest_c)
- target_link_libraries(bsend3 simgrid mtest_c)
- target_link_libraries(bsend4 simgrid mtest_c)
- target_link_libraries(bsend5 simgrid mtest_c)
- target_link_libraries(bsendalign simgrid mtest_c)
- target_link_libraries(bsendfrag simgrid mtest_c)
- target_link_libraries(bsendpending simgrid mtest_c)
- target_link_libraries(cancelrecv simgrid mtest_c)
+# target_link_libraries(bsend1 simgrid mtest_c)
+# target_link_libraries(bsend2 simgrid mtest_c)
+# target_link_libraries(bsend3 simgrid mtest_c)
+# target_link_libraries(bsend4 simgrid mtest_c)
+# target_link_libraries(bsend5 simgrid mtest_c)
+# target_link_libraries(bsendalign simgrid mtest_c)
+# target_link_libraries(bsendfrag simgrid mtest_c)
+# target_link_libraries(bsendpending simgrid mtest_c)
+# target_link_libraries(cancelrecv simgrid mtest_c)
target_link_libraries(eagerdt simgrid mtest_c)
- target_link_libraries(greq1 simgrid mtest_c)
- target_link_libraries(icsend simgrid mtest_c)
+# target_link_libraries(greq1 simgrid mtest_c)
+# target_link_libraries(icsend simgrid mtest_c)
target_link_libraries(inactivereq simgrid mtest_c)
target_link_libraries(isendself simgrid mtest_c)
target_link_libraries(isendselfprobe simgrid mtest_c)
- target_link_libraries(large_message simgrid mtest_c)
+# target_link_libraries(large_message simgrid mtest_c)
target_link_libraries(mprobe simgrid mtest_c)
- target_link_libraries(pingping simgrid mtest_c)
+# target_link_libraries(pingping simgrid mtest_c)
target_link_libraries(probenull simgrid mtest_c)
target_link_libraries(probe-unexp simgrid mtest_c)
- target_link_libraries(pscancel simgrid mtest_c)
- target_link_libraries(rcancel simgrid mtest_c)
- target_link_libraries(rqfreeb simgrid mtest_c)
- target_link_libraries(rqstatus simgrid mtest_c)
- target_link_libraries(scancel2 simgrid mtest_c)
- target_link_libraries(scancel simgrid mtest_c)
+# target_link_libraries(pscancel simgrid mtest_c)
+# target_link_libraries(rcancel simgrid mtest_c)
+# target_link_libraries(rqfreeb simgrid mtest_c)
+# target_link_libraries(rqstatus simgrid mtest_c)
+# target_link_libraries(scancel2 simgrid mtest_c)
+# target_link_libraries(scancel simgrid mtest_c)
target_link_libraries(sendall simgrid mtest_c)
target_link_libraries(sendflood simgrid mtest_c)
- target_link_libraries(sendrecv1 simgrid mtest_c)
+# target_link_libraries(sendrecv1 simgrid mtest_c)
target_link_libraries(sendrecv2 simgrid mtest_c)
target_link_libraries(sendrecv3 simgrid mtest_c)
- target_link_libraries(sendself simgrid mtest_c)
+# target_link_libraries(sendself simgrid mtest_c)
target_link_libraries(waitany-null simgrid mtest_c)
target_link_libraries(waittestnull simgrid mtest_c)
add_executable(log_large_test log_large_test.c)
add_executable(parallel_log_crashtest parallel_log_crashtest.c)
-if(HAVE_MMAP)
+if(HAVE_MMALLOC)
add_executable(mmalloc_test mmalloc_test.c)
endif()
add_executable(parmap_test parmap_test.c)
### Add definitions for compile
target_link_libraries(log_large_test simgrid)
target_link_libraries(parallel_log_crashtest simgrid)
-if(HAVE_MMAP)
+if(HAVE_MMALLOC)
target_link_libraries(mmalloc_test simgrid)
endif()
target_link_libraries(parmap_test simgrid)