ADD_TEST(smpi-mpich3-coll-ompi-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/send_is_detached_thres:0 -execarg=--cfg=smpi/privatize_global_variables:yes)
ADD_TEST(smpi-mpich3-coll-mpich-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:mpich -execarg=--cfg=smpi/privatize_global_variables:yes)
set_tests_properties(smpi-mpich3-coll-thread smpi-mpich3-coll-ompi-thread smpi-mpich3-coll-mpich-thread PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
+
+ ADD_TEST(smpi-mpich3-topo-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/topo perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/topo -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ set_tests_properties(smpi-mpich3-topo-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
if(CONTEXT_UCONTEXT)
ADD_TEST(smpi-mpich3-coll-ompi-ucontext ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:ucontext -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/send_is_detached_thres:0 -execarg=--cfg=smpi/privatize_global_variables:yes)
set_tests_properties(smpi-mpich3-coll-ompi-ucontext PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
src/smpi/smpi_mpi_dt.c
src/smpi/smpi_pmpi.c
src/smpi/smpi_replay.c
+ src/smpi/smpi_topo.c
src/smpi/colls/smpi_openmpi_selector.c
src/smpi/colls/smpi_mpich_selector.c
src/smpi/colls/colls_global.c
#define MPI_ERR_PENDING 14
#define MPI_ERR_BUFFER 15
#define MPI_ERR_NAME 16
+#define MPI_ERR_DIMS 17
+#define MPI_ERR_TOPOLOGY 18
+#define MPI_ERR_NO_MEM 19
#define MPI_ERRCODES_IGNORE (int *)0
#define MPI_IDENT 0
#define MPI_SIMILAR 1
XBT_PUBLIC_DATA( MPI_Op ) MPI_BOR;
XBT_PUBLIC_DATA( MPI_Op ) MPI_BXOR;
+struct s_smpi_mpi_topology;
+typedef struct s_smpi_mpi_topology *MPI_Topology;
+
struct s_smpi_mpi_group;
typedef struct s_smpi_mpi_group *MPI_Group;
int smpi_process_initialized(void);
void smpi_process_mark_as_initialized(void);
+void smpi_topo_destroy(MPI_Topology topo);
+MPI_Topology smpi_topo_create(int ndims);
+int smpi_mpi_cart_create(MPI_Comm comm_old, int ndims, int dims[],
+ int periodic[], int reorder, MPI_Comm *comm_cart);
+int smpi_mpi_cart_shift(MPI_Comm comm, int direction, int disp,
+ int *rank_source, int *rank_dest);
+int smpi_mpi_cart_rank(MPI_Comm comm, int* coords, int* rank);
+int smpi_mpi_cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords);
+int smpi_mpi_cart_coords(MPI_Comm comm, int rank, int maxdims,
+ int coords[]);
+int smpi_mpi_cartdim_get(MPI_Comm comm, int *ndims);
+int smpi_mpi_dims_create(int nnodes, int ndims, int dims[]);
+int smpi_mpi_cart_sub(MPI_Comm comm, const int remain_dims[], MPI_Comm *newcomm);
smpi_process_data_t smpi_process_data(void);
smpi_process_data_t smpi_process_remote_data(int index);
int smpi_group_size(MPI_Group group);
int smpi_group_compare(MPI_Group group1, MPI_Group group2);
-MPI_Comm smpi_comm_new(MPI_Group group);
+MPI_Topology smpi_comm_topo(MPI_Comm comm);
+MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo);
void smpi_comm_destroy(MPI_Comm comm);
MPI_Group smpi_comm_group(MPI_Comm comm);
int smpi_comm_size(MPI_Comm comm);
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi,
"Logging specific to SMPI (comm)");
+
+
+/* Support for cartesian topology was added, but there are 2 other types of
+ * topology, graph et dist graph. In order to support them, we have to add a
+ * field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
+
typedef struct s_smpi_mpi_communicator {
MPI_Group group;
+ MPIR_Topo_type topoType;
+ MPI_Topology topo; // to be replaced by an union
int refcount;
} s_smpi_mpi_communicator_t;
return 1;
}
-MPI_Comm smpi_comm_new(MPI_Group group)
+MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo)
{
MPI_Comm comm;
comm->group = group;
smpi_group_use(comm->group);
comm->refcount=1;
+ comm->topo = topo;
return comm;
}
void smpi_comm_destroy(MPI_Comm comm)
{
smpi_group_unuse(comm->group);
+ smpi_topo_destroy(comm->topo); // there's no use count on topos
smpi_comm_unuse(comm);
}
return comm->group;
}
+MPI_Topology smpi_comm_topo(MPI_Comm comm) {
+ if (comm != MPI_COMM_NULL)
+ return comm->topo;
+ return NULL;
+}
+
int smpi_comm_size(MPI_Comm comm)
{
return smpi_group_size(smpi_comm_group(comm));
}
} /* otherwise, exit with group_out == NULL */
}
- return group_out ? smpi_comm_new(group_out) : MPI_COMM_NULL;
+ return group_out ? smpi_comm_new(group_out, NULL) : MPI_COMM_NULL;
}
void smpi_comm_use(MPI_Comm comm){
if(comm->refcount==0)
xbt_free(comm);
}
+
smpi_process_data_t data = smpi_process_data();
if(data->comm_self==MPI_COMM_NULL){
MPI_Group group = smpi_group_new(1);
- data->comm_self = smpi_comm_new(group);
+ data->comm_self = smpi_comm_new(group, NULL);
smpi_group_set_mapping(group, smpi_process_index(), 0);
}
process_data[i]->sampling = 0;
}
group = smpi_group_new(process_count);
- MPI_COMM_WORLD = smpi_comm_new(group);
+ MPI_COMM_WORLD = smpi_comm_new(group, NULL);
MPI_UNIVERSE_SIZE = smpi_comm_size(MPI_COMM_WORLD);
for (i = 0; i < process_count; i++) {
smpi_group_set_mapping(group, i, i);
} else if (newcomm == NULL) {
retval = MPI_ERR_ARG;
} else {
- *newcomm = smpi_comm_new(smpi_comm_group(comm));
+ *newcomm = smpi_comm_new(smpi_comm_group(comm), smpi_comm_topo(comm));
retval = MPI_SUCCESS;
}
return retval;
retval = MPI_SUCCESS;
}else{
- *newcomm = smpi_comm_new(group);
+ *newcomm = smpi_comm_new(group, NULL);
retval = MPI_SUCCESS;
}
return retval;
return MPI_SUCCESS;
}
-/* The following calls are not yet implemented and will fail at runtime. */
-/* Once implemented, please move them above this notice. */
+/* The topo part of MPI_COMM_WORLD should always be NULL. When other topologies
+ * will be implemented, not only should we check if the topology is NULL, but
+ * we should check if it is the good topology type (so we have to add a
+ * MPIR_Topo_Type field, and replace the MPI_Topology field by an union)*/
-#define NOT_YET_IMPLEMENTED {\
- XBT_WARN("Not yet implemented : %s. Please contact the Simgrid team if support is needed", __FUNCTION__);\
- return MPI_SUCCESS;\
- }
+int PMPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periodic, int reorder, MPI_Comm* comm_cart) {
+ int retval = 0;
+ smpi_bench_end();
+ if (comm_old == MPI_COMM_NULL){
+ return MPI_ERR_COMM;
+ }
+ else if (ndims < 0 ||
+ (ndims > 0 && (dims == NULL ||
+ periodic == NULL)) ||
+ comm_cart == NULL) {
+ return MPI_ERR_ARG;
+ }
+ retval = smpi_mpi_cart_create(comm_old, ndims, dims, periodic, reorder, comm_cart);
+ smpi_bench_begin();
-int PMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype){
- NOT_YET_IMPLEMENTED
+ return retval;
}
-int PMPI_Type_set_name(MPI_Datatype datatype, char * name)
-{
- NOT_YET_IMPLEMENTED
+int PMPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) {
+ if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == NULL) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if (coords == NULL) {
+ return MPI_ERR_ARG;
+ }
+ return smpi_mpi_cart_rank(comm, coords, rank);
}
-int PMPI_Type_get_name(MPI_Datatype datatype, char * name, int* len)
-{
- NOT_YET_IMPLEMENTED
+int PMPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* dest) {
+ if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == NULL) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if (source == NULL || dest == NULL || direction < 0 ) {
+ return MPI_ERR_ARG;
+ }
+ return smpi_mpi_cart_shift(comm, direction, displ, source, dest);
}
-int PMPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, int* size) {
- NOT_YET_IMPLEMENTED
+int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) {
+ if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == NULL) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if (rank < 0 || rank >= smpi_comm_size(comm)) {
+ return MPI_ERR_RANK;
+ }
+ if (maxdims <= 0) {
+ return MPI_ERR_ARG;
+ }
+ if(coords == NULL) {
+ return MPI_ERR_ARG;
+ }
+ return smpi_mpi_cart_coords(comm, rank, maxdims, coords);
}
-int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) {
- NOT_YET_IMPLEMENTED
+int PMPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) {
+ if(comm == NULL || smpi_comm_topo(comm) == NULL) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if(maxdims <= 0 || dims == NULL || periods == NULL || coords == NULL) {
+ return MPI_ERR_ARG;
+ }
+ return smpi_mpi_cart_get(comm, maxdims, dims, periods, coords);
}
-int PMPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periods, int reorder, MPI_Comm* comm_cart) {
- NOT_YET_IMPLEMENTED
+int PMPI_Cartdim_get(MPI_Comm comm, int* ndims) {
+ if (comm == MPI_COMM_NULL || smpi_comm_topo(comm) == NULL) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if (ndims == NULL) {
+ return MPI_ERR_ARG;
+ }
+ return smpi_mpi_cartdim_get(comm, ndims);
}
-int PMPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) {
- NOT_YET_IMPLEMENTED
+int PMPI_Dims_create(int nnodes, int ndims, int* dims) {
+ if(dims == NULL) {
+ return MPI_ERR_ARG;
+ }
+ if (ndims < 1 || nnodes < 1) {
+ return MPI_ERR_DIMS;
+ }
+
+ return smpi_mpi_dims_create(nnodes, ndims, dims);
}
-int PMPI_Cart_map(MPI_Comm comm_old, int ndims, int* dims, int* periods, int* newrank) {
- NOT_YET_IMPLEMENTED
+int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) {
+ if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == NULL) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if (comm_new == NULL) {
+ return MPI_ERR_ARG;
+ }
+ return smpi_mpi_cart_sub(comm, remain_dims, comm_new);
}
-int PMPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) {
- NOT_YET_IMPLEMENTED
+
+/* The following calls are not yet implemented and will fail at runtime. */
+/* Once implemented, please move them above this notice. */
+
+#define NOT_YET_IMPLEMENTED { \
+ XBT_WARN("Not yet implemented : %s. Please contact the Simgrid team if support is needed", __FUNCTION__); \
+ return MPI_SUCCESS; \
+ }
+
+
+int PMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype){
+ NOT_YET_IMPLEMENTED
}
-int PMPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* dest) {
- NOT_YET_IMPLEMENTED
+int PMPI_Type_set_name(MPI_Datatype datatype, char * name)
+{
+ NOT_YET_IMPLEMENTED
}
-int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) {
- NOT_YET_IMPLEMENTED
+int PMPI_Type_get_name(MPI_Datatype datatype, char * name, int* len)
+{
+ NOT_YET_IMPLEMENTED
}
-int PMPI_Cartdim_get(MPI_Comm comm, int* ndims) {
- NOT_YET_IMPLEMENTED
+int PMPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, int* size) {
+ NOT_YET_IMPLEMENTED
+}
+
+
+int PMPI_Cart_map(MPI_Comm comm_old, int ndims, int* dims, int* periods, int* newrank) {
+ NOT_YET_IMPLEMENTED
}
+
int PMPI_Graph_create(MPI_Comm comm_old, int nnodes, int* index, int* edges, int reorder, MPI_Comm* comm_graph) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, int* index, int* edges) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Graph_map(MPI_Comm comm_old, int nnodes, int* index, int* edges, int* newrank) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Graph_neighbors(MPI_Comm comm, int rank, int maxneighbors, int* neighbors) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Graph_neighbors_count(MPI_Comm comm, int rank, int* nneighbors) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Graphdims_get(MPI_Comm comm, int* nnodes, int* nedges) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Topo_test(MPI_Comm comm, int* top_type) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Errhandler_create(MPI_Handler_function* function, MPI_Errhandler* errhandler) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Errhandler_free(MPI_Errhandler* errhandler) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Errhandler_get(MPI_Comm comm, MPI_Errhandler* errhandler) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Error_string(int errorcode, char* string, int* resultlen) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler errhandler) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Comm_get_errhandler(MPI_Comm comm, MPI_Errhandler* errhandler) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Cancel(MPI_Request* request) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Buffer_attach(void* buffer, int size) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Buffer_detach(void* buffer, int* size) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Comm_test_inter(MPI_Comm comm, int* flag) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Comm_get_attr (MPI_Comm comm, int comm_keyval, void *attribute_val, int *flag)
{
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Comm_set_attr (MPI_Comm comm, int comm_keyval, void *attribute_val)
{
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Comm_delete_attr (MPI_Comm comm, int comm_keyval)
{
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Comm_create_keyval(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval, void* extra_state)
{
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Comm_free_keyval(int* keyval) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Pcontrol(const int level )
{
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Unpack(void* inbuf, int insize, int* position, void* outbuf, int outcount, MPI_Datatype type, MPI_Comm comm) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Type_get_attr (MPI_Datatype type, int type_keyval, void *attribute_val, int* flag)
}
int PMPI_Intercomm_create(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm* comm_out) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Intercomm_merge(MPI_Comm comm, int high, MPI_Comm* comm_out) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Bsend(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Bsend_init(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Ibsend(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Comm_remote_group(MPI_Comm comm, MPI_Group* group) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Comm_remote_size(MPI_Comm comm, int* size) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Attr_delete(MPI_Comm comm, int keyval) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Attr_put(MPI_Comm comm, int keyval, void* attr_value) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Rsend(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Rsend_init(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Irsend(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Keyval_free(int* keyval) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Test_cancelled(MPI_Status* status, int* flag) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Pack(void* inbuf, int incount, MPI_Datatype type, void* outbuf, int outcount, int* position, MPI_Comm comm) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Pack_external_size(char *datarep, int incount, MPI_Datatype datatype, MPI_Aint *size){
}
int PMPI_Get_elements(MPI_Status* status, MPI_Datatype datatype, int* elements) {
- NOT_YET_IMPLEMENTED
-}
-
-int PMPI_Dims_create(int nnodes, int ndims, int* dims) {
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Win_fence( int assert, MPI_Win win){
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Win_free( MPI_Win* win){
- NOT_YET_IMPLEMENTED
+ NOT_YET_IMPLEMENTED
}
int PMPI_Win_create( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win){
}
int PMPI_Get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
NOT_YET_IMPLEMENTED
}
int PMPI_Type_get_envelope( MPI_Datatype datatype, int *num_integers,
- int *num_addresses, int *num_datatypes, int *combiner){
+ int *num_addresses, int *num_datatypes, int *combiner){
NOT_YET_IMPLEMENTED
}
int PMPI_Type_get_contents(MPI_Datatype datatype, int max_integers, int max_addresses,
- int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses,
- MPI_Datatype* array_of_datatypes){
+ int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses,
+ MPI_Datatype* array_of_datatypes){
NOT_YET_IMPLEMENTED
}
}
int PMPI_Alltoallw( void *sendbuf, int *sendcnts, int *sdispls, MPI_Datatype *sendtypes,
- void *recvbuf, int *recvcnts, int *rdispls, MPI_Datatype *recvtypes,
- MPI_Comm comm){
+ void *recvbuf, int *recvcnts, int *rdispls, MPI_Datatype *recvtypes,
+ MPI_Comm comm){
NOT_YET_IMPLEMENTED
}
}
int PMPI_Comm_spawn_multiple( int count, char **array_of_commands, char*** array_of_argv,
- int* array_of_maxprocs, MPI_Info* array_of_info, int root,
- MPI_Comm comm, MPI_Comm *intercomm, int* array_of_errcodes){
+ int* array_of_maxprocs, MPI_Info* array_of_info, int root,
+ MPI_Comm comm, MPI_Comm *intercomm, int* array_of_errcodes){
NOT_YET_IMPLEMENTED
}
--- /dev/null
+#include "xbt/sysdep.h"
+#include "smpi/smpi.h"
+#include "private.h"
+
+#include <math.h>
+
+typedef struct s_smpi_mpi_topology {
+ int nnodes;
+ int ndims;
+ int *dims;
+ int *periodic;
+ int *position;
+} s_smpi_mpi_topology_t;
+
+
+
+void smpi_topo_destroy(MPI_Topology topo) {
+ if (topo) {
+ if(topo->dims) {
+ free(topo->dims);
+ }
+ if(topo->periodic) {
+ free(topo->periodic);
+ }
+ if(topo->position) {
+ free(topo->position);
+ }
+ free(topo);
+ }
+}
+
+MPI_Topology smpi_topo_create(int ndims) {
+ MPI_Topology topo = xbt_malloc(sizeof(*topo));
+ topo->nnodes = 0;
+ topo->ndims = ndims;
+ topo->dims = xbt_malloc(ndims * sizeof(*topo->dims));
+ topo->periodic = xbt_malloc(ndims * sizeof(*topo->periodic));
+ topo->position = xbt_malloc(ndims * sizeof(*topo->position));
+ return topo;
+}
+
+/* reorder is ignored, don't know what would be the consequences of a dumb
+ * reordering but neither do I see the point of reordering*/
+int smpi_mpi_cart_create(MPI_Comm comm_old, int ndims, int dims[],
+ int periods[], int reorder, MPI_Comm *comm_cart) {
+ int retval = MPI_SUCCESS;
+ int i;
+ MPI_Topology topo;
+ MPI_Group newGroup, oldGroup;
+ int rank, nranks, newSize;
+
+
+
+ rank = smpi_comm_rank(comm_old);
+
+
+
+ newSize = 1;
+ if(ndims != 0) {
+ topo = smpi_topo_create(ndims);
+ for (i = 0 ; i < ndims ; i++) {
+ newSize *= dims[i];
+ }
+ if(rank >= newSize) {
+ *comm_cart = MPI_COMM_NULL;
+ return retval;
+ }
+ oldGroup = smpi_comm_group(comm_old);
+ newGroup = smpi_group_new(newSize);
+ for (i = 0 ; i < newSize ; i++) {
+ smpi_group_set_mapping(newGroup, smpi_group_index(oldGroup, i), i);
+ }
+
+ topo->nnodes = newSize;
+
+ memcpy(topo->dims, dims, ndims * sizeof(*topo->dims));
+ memcpy(topo->periodic, periods, ndims * sizeof(*topo->periodic));
+
+ // code duplication... See smpi_mpi_cart_coords
+ nranks = newSize;
+ for (i=0; i<ndims; i++)
+ {
+ topo->dims[i] = dims[i];
+ topo->periodic[i] = periods[i];
+ nranks = nranks / dims[i];
+ /* FIXME: nranks could be zero (?) */
+ topo->position[i] = rank / nranks;
+ rank = rank % nranks;
+ }
+
+ *comm_cart = smpi_comm_new(newGroup, topo);
+ }
+ else {
+ if (rank == 0) {
+ topo = smpi_topo_create(ndims);
+ *comm_cart = smpi_comm_new(smpi_comm_group(MPI_COMM_SELF), topo);
+ }
+ else {
+ *comm_cart = MPI_COMM_NULL;
+ }
+ }
+ return retval;
+}
+
+int smpi_mpi_cart_sub(MPI_Comm comm, const int remain_dims[], MPI_Comm *newcomm) {
+ MPI_Topology oldTopo = smpi_comm_topo(comm);
+ int oldNDims = oldTopo->ndims;
+ int i, j = 0, newNDims, *newDims = NULL, *newPeriodic = NULL;
+
+ if (remain_dims == NULL && oldNDims != 0) {
+ return MPI_ERR_ARG;
+ }
+ newNDims = 0;
+ for (i = 0 ; i < oldNDims ; i++) {
+ if (remain_dims[i]) newNDims++;
+ }
+
+ if (newNDims > 0) {
+ newDims = malloc(newNDims * sizeof(*newDims));
+ newPeriodic = malloc(newNDims * sizeof(*newPeriodic));
+
+ // that should not segfault
+ for (i = 0 ; j < newNDims ; i++) {
+ if(remain_dims[i]) {
+ newDims[j] = oldTopo->dims[i];
+ newPeriodic[j] = oldTopo->periodic[i];
+ j++;
+ }
+ }
+ }
+ return smpi_mpi_cart_create(comm, newNDims, newDims, newPeriodic, 0, newcomm);
+}
+
+
+
+
+int smpi_mpi_cart_coords(MPI_Comm comm, int rank, int maxdims,
+ int coords[]) {
+ int nnodes;
+ int i;
+ MPI_Topology topo = smpi_comm_topo(comm);
+
+ nnodes = topo->nnodes;
+ for ( i=0; i < topo->ndims; i++ ) {
+ nnodes = nnodes / topo->dims[i];
+ coords[i] = rank / nnodes;
+ rank = rank % nnodes;
+ }
+ return MPI_SUCCESS;
+}
+
+int smpi_mpi_cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) {
+ MPI_Topology topo = smpi_comm_topo(comm);
+ int i;
+ for(i = 0 ; i < maxdims ; i++) {
+ dims[i] = topo->dims[i];
+ periods[i] = topo->periodic[i];
+ coords[i] = topo->position[i];
+ }
+ return MPI_SUCCESS;
+}
+
+int smpi_mpi_cart_rank(MPI_Comm comm, int* coords, int* rank) {
+ MPI_Topology topo = smpi_comm_topo(comm);
+ int ndims = topo->ndims;
+ int multiplier, coord,i;
+ *rank = 0;
+ multiplier = 1;
+
+
+
+ for ( i=ndims-1; i >=0; i-- ) {
+ coord = coords[i];
+
+ /* Should we check first for args correction, then process,
+ * or check while we work (as it is currently done) ? */
+ if (coord >= topo->dims[i]) {
+ if ( topo->periodic[i] ) {
+ coord = coord % topo->dims[i];
+ }
+ else {
+ // Should I do that ?
+ *rank = -1;
+ return MPI_ERR_ARG;
+ }
+ }
+ else if (coord < 0) {
+ if(topo->periodic[i]) {
+ coord = coord % topo->dims[i];
+ if (coord) coord = topo->dims[i] + coord;
+ }
+ else {
+ *rank = -1;
+ return MPI_ERR_ARG;
+ }
+ }
+
+ *rank += multiplier * coord;
+ multiplier *= topo->dims[i];
+ }
+ return MPI_SUCCESS;
+}
+
+int smpi_mpi_cart_shift(MPI_Comm comm, int direction, int disp,
+ int *rank_source, int *rank_dest) {
+ MPI_Topology topo = smpi_comm_topo(comm);
+ int position[topo->ndims];
+
+
+ if(topo->ndims == 0) {
+ return MPI_ERR_ARG;
+ }
+ if (topo->ndims < direction) {
+ return MPI_ERR_DIMS;
+ }
+
+ smpi_mpi_cart_coords(comm, smpi_comm_rank(comm), topo->ndims, position);
+ position[direction] += disp;
+
+ if(position[direction] < 0 || position[direction] >= topo->dims[direction]) {
+ if(topo->periodic[direction]) {
+ position[direction] %= topo->dims[direction];
+ smpi_mpi_cart_rank(comm, position, rank_dest);
+ }
+ else {
+ *rank_dest = MPI_PROC_NULL;
+ }
+ }
+ else {
+ smpi_mpi_cart_rank(comm, position, rank_dest);
+ }
+
+ position[direction] = topo->position[direction] - disp;
+ if(position[direction] < 0 || position[direction] >= topo->dims[direction]) {
+ if(topo->periodic[direction]) {
+ position[direction] %= topo->dims[direction];
+ smpi_mpi_cart_rank(comm, position, rank_source);
+ }
+ else {
+ *rank_source = MPI_PROC_NULL;
+ }
+ }
+ else {
+ smpi_mpi_cart_rank(comm, position, rank_source);
+ }
+
+ return MPI_SUCCESS;
+}
+
+int smpi_mpi_cartdim_get(MPI_Comm comm, int *ndims) {
+ MPI_Topology topo = smpi_comm_topo(comm);
+
+ *ndims = topo->ndims;
+ return MPI_SUCCESS;
+}
+
+
+
+// Everything below has been taken from ompi, but could be easily rewritten.
+
+/*
+ * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
+ * University Research and Technology
+ * Corporation. All rights reserved.
+ * Copyright (c) 2004-2005 The University of Tennessee and The University
+ * of Tennessee Research Foundation. All rights
+ * reserved.
+ * Copyright (c) 2004-2014 High Performance Computing Center Stuttgart,
+ * University of Stuttgart. All rights reserved.
+ * Copyright (c) 2004-2005 The Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 2012 Los Alamos National Security, LLC. All rights
+ * reserved.
+ * Copyright (c) 2014 Intel, Inc. All rights reserved
+ * $COPYRIGHT$
+ *
+ * Additional copyrights may follow
+ *
+ * $HEADER$
+ */
+
+
+/* static functions */
+static int assignnodes(int ndim, int nfactor, int *pfacts,int **pdims);
+static int getfactors(int num, int *nfators, int **factors);
+
+/*
+ * This is a utility function, no need to have anything in the lower
+ * layer for this at all
+ */
+int smpi_mpi_dims_create(int nnodes, int ndims, int dims[])
+{
+ int i;
+ int freeprocs;
+ int freedims;
+ int nfactors;
+ int *factors;
+ int *procs;
+ int *p;
+ int err;
+
+ /* Get # of free-to-be-assigned processes and # of free dimensions */
+ freeprocs = nnodes;
+ freedims = 0;
+ for (i = 0, p = dims; i < ndims; ++i,++p) {
+ if (*p == 0) {
+ ++freedims;
+ } else if ((*p < 0) || ((nnodes % *p) != 0)) {
+ return MPI_ERR_DIMS;
+
+ } else {
+ freeprocs /= *p;
+ }
+ }
+
+ if (freedims == 0) {
+ if (freeprocs == 1) {
+ return MPI_SUCCESS;
+ }
+ return MPI_ERR_DIMS;
+ }
+
+ if (freeprocs == 1) {
+ for (i = 0; i < ndims; ++i, ++dims) {
+ if (*dims == 0) {
+ *dims = 1;
+ }
+ }
+ return MPI_SUCCESS;
+ }
+
+ /* Factor the number of free processes */
+ if (MPI_SUCCESS != (err = getfactors(freeprocs, &nfactors, &factors))) {
+ return err;
+ }
+
+ /* Assign free processes to free dimensions */
+ if (MPI_SUCCESS != (err = assignnodes(freedims, nfactors, factors, &procs))) {
+ return err;
+ }
+
+ /* Return assignment results */
+ p = procs;
+ for (i = 0; i < ndims; ++i, ++dims) {
+ if (*dims == 0) {
+ *dims = *p++;
+ }
+ }
+
+ free((char *) factors);
+ free((char *) procs);
+
+ /* all done */
+ return MPI_SUCCESS;
+}
+
+/*
+ * assignnodes
+ *
+ * Function: - assign processes to dimensions
+ * - get "best-balanced" grid
+ * - greedy bin-packing algorithm used
+ * - sort dimensions in decreasing order
+ * - dimensions array dynamically allocated
+ * Accepts: - # of dimensions
+ * - # of prime factors
+ * - array of prime factors
+ * - ptr to array of dimensions (returned value)
+ * Returns: - 0 or ERROR
+ */
+static int
+assignnodes(int ndim, int nfactor, int *pfacts, int **pdims)
+{
+ int *bins;
+ int i, j;
+ int n;
+ int f;
+ int *p;
+ int *pmin;
+
+ if (0 >= ndim) {
+ return MPI_ERR_DIMS;
+ }
+
+ /* Allocate and initialize the bins */
+ bins = (int *) malloc((unsigned) ndim * sizeof(int));
+ if (NULL == bins) {
+ return MPI_ERR_NO_MEM;
+ }
+ *pdims = bins;
+
+ for (i = 0, p = bins; i < ndim; ++i, ++p) {
+ *p = 1;
+ }
+
+ /* Loop assigning factors from the highest to the lowest */
+ for (j = nfactor - 1; j >= 0; --j) {
+ f = pfacts[j];
+ /* Assign a factor to the smallest bin */
+ pmin = bins;
+ for (i = 1, p = pmin + 1; i < ndim; ++i, ++p) {
+ if (*p < *pmin) {
+ pmin = p;
+ }
+ }
+ *pmin *= f;
+ }
+
+ /* Sort dimensions in decreasing order (O(n^2) for now) */
+ for (i = 0, pmin = bins; i < ndim - 1; ++i, ++pmin) {
+ for (j = i + 1, p = pmin + 1; j < ndim; ++j, ++p) {
+ if (*p > *pmin) {
+ n = *p;
+ *p = *pmin;
+ *pmin = n;
+ }
+ }
+ }
+
+ return MPI_SUCCESS;
+}
+
+/*
+ * getfactors
+ *
+ * Function: - factorize a number
+ * Accepts: - number
+ * - # prime factors
+ * - array of prime factors
+ * Returns: - MPI_SUCCESS or ERROR
+ */
+static int
+getfactors(int num, int *nfactors, int **factors) {
+ int size;
+ int d;
+ int i;
+ int sqrtnum;
+
+ if(num < 2) {
+ (*nfactors) = 0;
+ (*factors) = NULL;
+ return MPI_SUCCESS;
+ }
+ /* Allocate the array of prime factors which cannot exceed log_2(num) entries */
+ sqrtnum = ceil(sqrt(num));
+ size = ceil(log(num) / log(2));
+ *factors = (int *) malloc((unsigned) size * sizeof(int));
+
+ i = 0;
+ /* determine all occurences of factor 2 */
+ while((num % 2) == 0) {
+ num /= 2;
+ (*factors)[i++] = 2;
+ }
+ /* determine all occurences of uneven prime numbers up to sqrt(num) */
+ d = 3;
+ for(d = 3; (num > 1) && (d < sqrtnum); d += 2) {
+ while((num % d) == 0) {
+ num /= d;
+ (*factors)[i++] = d;
+ }
+ }
+ /* as we looped only up to sqrt(num) one factor > sqrt(num) may be left over */
+ if(num != 1) {
+ (*factors)[i++] = num;
+ }
+ (*nfactors) = i;
+ return MPI_SUCCESS;
+}
+
pt2pt
#
#spawn
-#topo
+topo
#perf
#io
f77
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
-# add_executable(cartcreates cartcreates.c)
-# add_executable(cartmap1 cartmap1.c)
-# add_executable(cartshift1 cartshift1.c)
-# add_executable(cartsuball cartsuball.c)
-# add_executable(cartzero cartzero.c)
+add_executable(cartcreates cartcreates.c)
+ #add_executable(cartmap1 cartmap1.c)
+add_executable(cartshift1 cartshift1.c)
+add_executable(cartsuball cartsuball.c)
+add_executable(cartzero cartzero.c)
# add_executable(dgraph_unwgt dgraph_unwgt.c)
-# add_executable(dims1 dims1.c)
-# add_executable(dims2 dims2.c)
+#add_executable(dims1 dims1.c)
+#add_executable(dims2 dims2.c)
# add_executable(distgraph1 distgraph1.c)
# add_executable(graphcr2 graphcr2.c)
# add_executable(graphcr graphcr.c)
# add_executable(graphmap1 graphmap1.c)
-# add_executable(neighb_coll neighb_coll.c)
+#add_executable(neighb_coll neighb_coll.c)
# add_executable(topodup topodup.c)
-# add_executable(topotest topotest.c)
+#add_executable(topotest topotest.c)
-# target_link_libraries(cartcreates simgrid mtest_c)
-# target_link_libraries(cartmap1 simgrid mtest_c)
-# target_link_libraries(cartshift1 simgrid mtest_c)
-# target_link_libraries(cartsuball simgrid mtest_c)
-# target_link_libraries(cartzero simgrid mtest_c)
+target_link_libraries(cartcreates simgrid mtest_c)
+# target_link_libraries(cartmap1 simgrid mtest_c)
+target_link_libraries(cartshift1 simgrid mtest_c)
+target_link_libraries(cartsuball simgrid mtest_c)
+target_link_libraries(cartzero simgrid mtest_c)
# target_link_libraries(dgraph_unwgt simgrid mtest_c)
# target_link_libraries(dims1 simgrid mtest_c)
# target_link_libraries(dims2 simgrid mtest_c)
#need Cart implem
#cartmap1 4
-#cartzero 4
-#cartshift1 4
-#cartsuball 4
-#cartcreates 4
+cartzero 4
+cartshift1 4
+cartsuball 4
+cartcreates 4
#need MPI_Dims_create
#dims1 4
#dims2 1