ADD_TEST(test-smpi-mpich3-attr-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/attr perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/attr -tests=testlist -execarg=--cfg=contexts/factory:raw)
ADD_TEST(test-smpi-mpich3-comm-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/comm perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/comm -tests=testlist -execarg=--cfg=contexts/factory:raw)
ADD_TEST(test-smpi-mpich3-init-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/init perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/init -tests=testlist -execarg=--cfg=contexts/factory:raw)
- ADD_TEST(test-smpi-mpich3-datatype-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/datatype perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/datatype -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(test-smpi-mpich3-datatype-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/datatype perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/datatype -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatize_global_variables:yes)
ADD_TEST(test-smpi-mpich3-group-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/group perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/group -tests=testlist -execarg=--cfg=contexts/factory:raw)
ADD_TEST(test-smpi-mpich3-pt2pt-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/pt2pt perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/pt2pt -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatize_global_variables:yes)
ADD_TEST(test-smpi-mpich3-topo-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/topo perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/topo -tests=testlist -execarg=--cfg=contexts/factory:raw)
${CMAKE_CURRENT_SOURCE_DIR}/checktests
${CMAKE_CURRENT_SOURCE_DIR}/generate_report
${CMAKE_CURRENT_SOURCE_DIR}/util/mtest.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/util/dtypes.c
${CMAKE_CURRENT_SOURCE_DIR}/util/mtest_manual.c
${CMAKE_CURRENT_SOURCE_DIR}/f77/testlist
${CMAKE_CURRENT_SOURCE_DIR}/f90/testlist
${CMAKE_CURRENT_SOURCE_DIR}/include/mpitestconf.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/include/dtypes.h
${CMAKE_CURRENT_SOURCE_DIR}/include/mpitest.h
PARENT_SCOPE)
add_executable(longuser longuser.c)
# add_executable(nonblocking2 nonblocking2.c)
# add_executable(nonblocking3 nonblocking3.c)
-# add_executable(nonblocking4 nonblocking3.c)
+# add_executable(nonblocking4 nonblocking4.c)
# add_executable(nonblocking nonblocking.c)
# add_executable(opband opband.c)
# add_executable(opbor opbor.c)
${CMAKE_CURRENT_SOURCE_DIR}/nonblocking2.c
${CMAKE_CURRENT_SOURCE_DIR}/nonblocking3.c
${CMAKE_CURRENT_SOURCE_DIR}/nonblocking.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/nonblocking4.c
${CMAKE_CURRENT_SOURCE_DIR}/opband.c
${CMAKE_CURRENT_SOURCE_DIR}/opbor.c
${CMAKE_CURRENT_SOURCE_DIR}/opbxor.c
add_executable(comm_group_half comm_group_half.c)
add_executable(comm_group_rand comm_group_rand.c)
# add_executable(comm_idup comm_idup.c)
+ # add_executable(comm_idup_mul comm_idup_mul.c)
+ # add_executable(comm_idup_overlap comm_idup_overlap.c)
add_executable(comm_info comm_info.c)
# add_executable(commname commname.c)
add_executable(ctxalloc ctxalloc.c)
${CMAKE_CURRENT_SOURCE_DIR}/comm_group_half.c
${CMAKE_CURRENT_SOURCE_DIR}/comm_group_rand.c
${CMAKE_CURRENT_SOURCE_DIR}/comm_idup.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm_idup_overlap.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm_idup_mul.c
${CMAKE_CURRENT_SOURCE_DIR}/comm_info.c
${CMAKE_CURRENT_SOURCE_DIR}/commname.c
${CMAKE_CURRENT_SOURCE_DIR}/ctxalloc.c
MPI_Comm_split( MPI_COMM_WORLD, 0, size - rank, &tmpComm[i] );
}
- MPI_Sendrecv( 0, 0, MPI_INT, source, 1,
- 0, 0, MPI_INT, source, 1, MPI_COMM_WORLD, &status );
+ MPI_Sendrecv( NULL, 0, MPI_INT, source, 1,
+ NULL, 0, MPI_INT, source, 1, MPI_COMM_WORLD, &status );
MPI_Wait( &req, &status );
for (i=0; i<NELM; i++) {
MPI_Comm_split( MPI_COMM_WORLD, 0, size - rank, &tmpComm[i] );
}
/* Synchronize with the receiver */
- MPI_Sendrecv( 0, 0, MPI_INT, dest, 1,
- 0, 0, MPI_INT, dest, 1, MPI_COMM_WORLD, &status );
+ MPI_Sendrecv( NULL, 0, MPI_INT, dest, 1,
+ NULL, 0, MPI_INT, dest, 1, MPI_COMM_WORLD, &status );
MPI_Send( buf, NELM, MPI_INT, dest, 0, comm );
free( buf );
}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2013 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/*
+ * Test creating multiple communicators with MPI_Comm_idup.
+ */
+
+#include <stdio.h>
+#include <mpi.h>
+
+#define NUM_ITER 2
+
+int main(int argc, char **argv)
+{
+ int i, rank;
+ MPI_Comm comms[NUM_ITER];
+ MPI_Request req[NUM_ITER];
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ for (i = 0; i < NUM_ITER; i++)
+ MPI_Comm_idup(MPI_COMM_WORLD, &comms[i], &req[i]);
+
+ MPI_Waitall(NUM_ITER, req, MPI_STATUSES_IGNORE);
+
+ for (i = 0; i < NUM_ITER; i++)
+ MPI_Comm_free(&comms[i]);
+
+ if (rank == 0)
+ printf(" No Errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2013 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <mpi.h>
+
+int main(int argc, char **argv)
+{
+ int i, rank, size, color;
+ MPI_Group group;
+ MPI_Comm primary[2], secondary[2], tmp;
+ MPI_Request req[2];
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ /* Each pair of processes creates a communicator */
+ for (i = 0; i < size; i++) {
+ if (rank == i)
+ MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &primary[0]);
+ else if (rank == (i + 1) % size)
+ MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &secondary[0]);
+ else {
+ MPI_Comm_split(MPI_COMM_WORLD, 1, 0, &tmp);
+ MPI_Comm_free(&tmp);
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ /* Each pair dups the communicator such that the dups are
+ * overlapping. If this were done with MPI_Comm_dup, this should
+ * deadlock. */
+ MPI_Comm_idup(primary[0], &primary[1], &req[0]);
+ MPI_Comm_idup(secondary[0], &secondary[1], &req[1]);
+ MPI_Waitall(2, req, MPI_STATUSES_IGNORE);
+
+ for (i = 0; i < 2; i++) {
+ MPI_Comm_free(&primary[i]);
+ MPI_Comm_free(&secondary[i]);
+ }
+
+ if (rank == 0)
+ printf(" No Errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
}
/* Can we communicate with this new communicator? */
+ dest = MPI_PROC_NULL;
if (rank == 0) {
dest = size - 1;
a = rank;
MPI_Barrier(comm);
/* Can we communicate with this new communicator? */
+ dest = MPI_PROC_NULL;
if (rank == 0) {
dest = size - 1;
a = rank;
comm_idup 2 mpiversion=3.0
comm_idup 4 mpiversion=3.0
comm_idup 9 mpiversion=3.0
+comm_idup_mul 2 mpiversion=3.0
+comm_idup_overlap 2 mpiversion=3.0
dup_with_info 2 mpiversion=3.0
dup_with_info 4 mpiversion=3.0
dup_with_info 9 mpiversion=3.0
add_executable(cxx-types cxx-types.c)
# add_executable(darray-cyclic darray-cyclic.c)
# add_executable(darray-pack darray-pack.c)
+ add_executable(dataalign dataalign.c)
add_executable(gaddress gaddress.c)
# add_executable(get-elements get-elements.c)
# add_executable(get-elements-pairtype get-elements-pairtype.c)
# add_executable(getpartelm getpartelm.c)
+# add_executable(get-struct get-struct.c)
add_executable(hindexed_block hindexed_block.c)
add_executable(hindexed_block_contents hindexed_block_contents.c)
# add_executable(hindexed-zeros hindexed-zeros.c)
+# add_executable(hvecblklen hvecblklen.c)
# add_executable(indexed-misc indexed-misc.c)
# add_executable(large-count large-count.c)
+# add_executable(large_type large_type.c)
+# add_executable(large_type_sendrec large_type_sendrec.c)
# add_executable(lbub lbub.c)
# add_executable(localpack localpack.c)
add_executable(longdouble longdouble.c)
# add_executable(lots-of-types lots-of-types.c)
# add_executable(pairtype-pack pairtype-pack.c)
# add_executable(pairtype-size-extent pairtype-size-extent.c)
+# add_executable(segtest segtest.c)
+ add_executable(sendrecvt2 ${CMAKE_CURRENT_SOURCE_DIR}/../util/dtypes.c sendrecvt2.c)
+ add_executable(sendrecvt4 ${CMAKE_CURRENT_SOURCE_DIR}/../util/dtypes.c sendrecvt4.c)
add_executable(simple-commit simple-commit.c)
# add_executable(simple-pack simple-pack.c)
# add_executable(simple-pack-external simple-pack-external.c)
add_executable(struct-ezhov struct-ezhov.c)
# add_executable(struct-no-real-types struct-no-real-types.c)
# add_executable(struct-pack struct-pack.c)
+# add_executable(structpack2 structpack2.c)
add_executable(struct-verydeep struct-verydeep.c)
add_executable(struct-zero-count struct-zero-count.c)
# add_executable(subarray subarray.c)
add_executable(typename typename.c)
# add_executable(unpack unpack.c)
# add_executable(unusual-noncontigs unusual-noncontigs.c)
+# add_executable(vecblklen vecblklen.c)
# add_executable(zero-blklen-vector zero-blklen-vector.c)
# add_executable(zeroblks zeroblks.c)
add_executable(zeroparms zeroparms.c)
target_link_libraries(cxx-types simgrid mtest_c)
# target_link_libraries(darray-cyclic simgrid mtest_c)
# target_link_libraries(darray-pack simgrid mtest_c)
+ target_link_libraries(dataalign simgrid mtest_c)
target_link_libraries(gaddress simgrid mtest_c)
# target_link_libraries(get-elements simgrid mtest_c)
# target_link_libraries(get-elements-pairtype simgrid mtest_c)
# target_link_libraries(getpartelm simgrid mtest_c)
+# target_link_libraries(get-struct simgrid mtest_c)
target_link_libraries(hindexed_block simgrid mtest_c)
target_link_libraries(hindexed_block_contents simgrid mtest_c)
-# target_link_libraries(hindexed-zeros simgrid mtest_c)
+# target_link_libraries(hindexed-zeros simgrid mtest_c).
+# target_link_libraries(hvecblklen simgrid mtest_c)
# target_link_libraries(indexed-misc simgrid mtest_c)
# target_link_libraries(large-count simgrid mtest_c)
+# target_link_libraries(large_type simgrid mtest_c)
+# target_link_libraries(large_type_sendrec simgrid mtest_c)
# target_link_libraries(lbub simgrid mtest_c)
# target_link_libraries(localpack simgrid mtest_c)
target_link_libraries(longdouble simgrid mtest_c)
# target_link_libraries(lots-of-types simgrid mtest_c)
# target_link_libraries(pairtype-pack simgrid mtest_c)
# target_link_libraries(pairtype-size-extent simgrid mtest_c)
+# target_link_libraries(segtest simgrid mtest_c)
+ target_link_libraries(sendrecvt2 simgrid mtest_c)
+ target_link_libraries(sendrecvt4 simgrid mtest_c)
target_link_libraries(simple-commit simgrid mtest_c)
# target_link_libraries(simple-pack simgrid mtest_c)
# target_link_libraries(simple-pack-external simgrid mtest_c)
target_link_libraries(struct-ezhov simgrid mtest_c)
# target_link_libraries(struct-no-real-types simgrid mtest_c)
# target_link_libraries(struct-pack simgrid mtest_c)
+# target_link_libraries(structpack2 simgrid mtest_c)
target_link_libraries(struct-verydeep simgrid mtest_c)
target_link_libraries(struct-zero-count simgrid mtest_c)
# target_link_libraries(subarray simgrid mtest_c)
target_link_libraries(typename simgrid mtest_c)
# target_link_libraries(unpack simgrid mtest_c)
# target_link_libraries(unusual-noncontigs simgrid mtest_c)
+# target_link_libraries(vecblklen simgrid mtest_c)
# target_link_libraries(zero-blklen-vector simgrid mtest_c)
# target_link_libraries(zeroblks simgrid mtest_c)
target_link_libraries(zeroparms simgrid mtest_c)
${CMAKE_CURRENT_SOURCE_DIR}/cxx-types.c
${CMAKE_CURRENT_SOURCE_DIR}/darray-cyclic.c
${CMAKE_CURRENT_SOURCE_DIR}/darray-pack.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/dataalign.c
${CMAKE_CURRENT_SOURCE_DIR}/gaddress.c
${CMAKE_CURRENT_SOURCE_DIR}/get-elements.c
${CMAKE_CURRENT_SOURCE_DIR}/get-elements-pairtype.c
${CMAKE_CURRENT_SOURCE_DIR}/getpartelm.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/get-struct.c
${CMAKE_CURRENT_SOURCE_DIR}/hindexed_block.c
${CMAKE_CURRENT_SOURCE_DIR}/hindexed_block_contents.c
${CMAKE_CURRENT_SOURCE_DIR}/hindexed-zeros.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/hvecblklen.c
${CMAKE_CURRENT_SOURCE_DIR}/indexed-misc.c
${CMAKE_CURRENT_SOURCE_DIR}/large-count.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/large_type.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/large_type_sendrec.c
${CMAKE_CURRENT_SOURCE_DIR}/lbub.c
${CMAKE_CURRENT_SOURCE_DIR}/localpack.c
${CMAKE_CURRENT_SOURCE_DIR}/longdouble.c
${CMAKE_CURRENT_SOURCE_DIR}/lots-of-types.c
${CMAKE_CURRENT_SOURCE_DIR}/pairtype-pack.c
${CMAKE_CURRENT_SOURCE_DIR}/pairtype-size-extent.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/segtest.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/sendrecvt2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/sendrecvt4.c
${CMAKE_CURRENT_SOURCE_DIR}/simple-commit.c
${CMAKE_CURRENT_SOURCE_DIR}/simple-pack.c
${CMAKE_CURRENT_SOURCE_DIR}/simple-pack-external.c
${CMAKE_CURRENT_SOURCE_DIR}/struct-ezhov.c
${CMAKE_CURRENT_SOURCE_DIR}/struct-no-real-types.c
${CMAKE_CURRENT_SOURCE_DIR}/struct-pack.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/structpack2.c
${CMAKE_CURRENT_SOURCE_DIR}/struct-verydeep.c
${CMAKE_CURRENT_SOURCE_DIR}/struct-zero-count.c
${CMAKE_CURRENT_SOURCE_DIR}/subarray.c
${CMAKE_CURRENT_SOURCE_DIR}/typename.c
${CMAKE_CURRENT_SOURCE_DIR}/unpack.c
${CMAKE_CURRENT_SOURCE_DIR}/unusual-noncontigs.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/vecblklen.c
${CMAKE_CURRENT_SOURCE_DIR}/zero-blklen-vector.c
${CMAKE_CURRENT_SOURCE_DIR}/zeroblks.c
${CMAKE_CURRENT_SOURCE_DIR}/zeroparms.c
if ((i == rank) && (array[i] != rank)) {
errs++;
- if (verbose) fprintf(stderr, "array[%d] = %d; should be %d\n",
- i, array[i], rank);
+ if (verbose) fprintf(stderr, "[2d array rank=%d]:array[%d] = %d; should be %d\n",
+ rank, i, array[i], rank);
}
else if ((i != rank) && (array[i] != 0)) {
errs++;
- if (verbose) fprintf(stderr, "array[%d] = %d; should be %d\n",
- i, array[i], 0);
+ if (verbose) fprintf(stderr, "[2d array rank=%d]:array[%d] = %d; should be %d\n",
+ rank, i, array[i], 0);
}
}
MPI_Type_free(&darray);
for (i=0; i < 4*rank; i++) {
if (array[i] != 0) {
errs++;
- if (verbose) fprintf(stderr, "array[%d] = %d; should be %d\n",
- i, array[i], 0);
+ if (verbose) fprintf(stderr, "[4d array rank=%d]:array[%d] = %d; should be %d\n",
+ rank, i, array[i], 0);
}
}
for (i=4*rank; i < 4*rank + 4; i++) {
if (array[i] != i) {
errs++;
- if (verbose) fprintf(stderr, "array[%d] = %d; should be %d\n",
- i, array[i], i);
+ if (verbose) fprintf(stderr, "[4d array rank=%d]:array[%d] = %d; should be %d\n",
+ rank, i, array[i], i);
}
}
for (i=4*rank+4; i < 72; i++) {
if (array[i] != 0) {
errs++;
- if (verbose) fprintf(stderr, "array[%d] = %d; should be %d\n",
- i, array[i], 0);
+ if (verbose) fprintf(stderr, "[4d array rank=%d]:array[%d] = %d; should be %d\n",
+ rank, i, array[i], 0);
}
}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2014 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+/* The next is for isprint */
+#include <ctype.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[])
+{
+ struct a { int i;
+ char c;
+ } s[10], s1[10];
+ int j;
+ int errs = 0;
+ int rank, size, tsize;
+ MPI_Aint text;
+ int blens[2];
+ MPI_Aint disps[2];
+ MPI_Datatype bases[2];
+ MPI_Datatype str, con;
+ MPI_Status status;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ for( j = 0; j < 10; j ++ ) {
+ s[j].i = j + rank;
+ s[j].c = j + rank + 'a';
+ }
+
+ blens[0] = blens[1] = 1;
+ disps[0] = 0; disps[1] = sizeof(int);
+ bases[0] = MPI_INT; bases[1] = MPI_CHAR;
+ MPI_Type_struct( 2, blens, disps, bases, &str );
+ MPI_Type_commit( &str );
+ MPI_Type_contiguous( 10, str, &con );
+ MPI_Type_commit( &con );
+ MPI_Type_size( con, &tsize );
+ MPI_Type_extent( con, &text );
+
+ MTestPrintfMsg( 0, "Size of MPI array is %d, extent is %d\n",
+ tsize, text );
+
+ /* The following block of code is only for verbose-level output */
+ {
+ void * p1, *p2;
+ p1 = s;
+ p2 = &(s[10].i); /* This statement may fail on some systems */
+ MTestPrintfMsg( 0,
+ "C array starts at %p and ends at %p for a length of %d\n",
+ s, &(s[9].c), (char *)p2-(char *)p1 );
+ }
+
+ MPI_Type_extent( str, &text );
+ MPI_Type_size( str, &tsize );
+ MTestPrintfMsg( 0, "Size of MPI struct is %d, extent is %d\n",
+ tsize, (int)text );
+ MTestPrintfMsg( 0, "Size of C struct is %d\n", sizeof(struct a) );
+ if (text != sizeof(struct a)) {
+ fprintf( stderr,
+ "Extent of struct a (%d) does not match sizeof (%d)\n",
+ (int)text, (int)sizeof(struct a) );
+ errs++;
+ }
+
+ MPI_Send( s, 1, con, rank ^ 1, 0, MPI_COMM_WORLD );
+ MPI_Recv( s1, 1, con, rank ^ 1, 0, MPI_COMM_WORLD, &status );
+
+ for( j = 0; j < 10; j++ ) {
+ MTestPrintfMsg( 0, "%d Sent: %d %c, Got: %d %c\n", rank,
+ s[j].i, s[j].c, s1[j].i, s1[j].c );
+ if ( s1[j].i != j + status.MPI_SOURCE ) {
+ errs++;
+ fprintf( stderr, "Got s[%d].i = %d; expected %d\n", j, s1[j].i,
+ j + status.MPI_SOURCE );
+ }
+ if ( s1[j].c != 'a' + j + status.MPI_SOURCE ) {
+ errs++;
+ /* If the character is not a printing character,
+ this can generate a file that diff, for example,
+ believes is a binary file */
+ if (isprint( (int)(s1[j].c) )) {
+ fprintf( stderr, "Got s[%d].c = %c; expected %c\n",
+ j, s1[j].c, j + status.MPI_SOURCE + 'a');
+ }
+ else {
+ fprintf( stderr, "Got s[%d].c = %x; expected %c\n",
+ j, (int)s1[j].c, j + status.MPI_SOURCE + 'a');
+ }
+ }
+ }
+
+ MPI_Type_free( &str );
+ MPI_Type_free( &con );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
MPI_Type_create_struct(3, blks, disps, types, &stype);
MPI_Type_commit(&stype);
- err = MPI_Sendrecv(&foo, 1, stype, 0, 0,
- &bar, 2, MPI_DOUBLE_INT, 0, 0,
+ err = MPI_Sendrecv((const void *) &foo, 1, stype, 0, 0,
+ (void *) &bar, 2, MPI_DOUBLE_INT, 0, 0,
MPI_COMM_SELF, &recvstatus);
if (err != MPI_SUCCESS) {
errs++;
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2014 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <string.h>
+
+/* Communicating a datatype built out of structs
+ * This test was motivated by the failure of an example program for
+ * RMA involving simple operations on a struct that included a struct
+ *
+ * The observed failure was a SEGV in the MPI_Get
+ *
+ *
+ */
+#define MAX_KEY_SIZE 64
+#define MAX_VALUE_SIZE 256
+typedef struct {
+ MPI_Aint disp;
+ int rank;
+ void *lptr;
+} Rptr;
+typedef struct {
+ Rptr next;
+ char key[MAX_KEY_SIZE], value[MAX_VALUE_SIZE];
+} ListElm;
+Rptr nullDptr = {0,-1,0};
+
+int testCases = -1;
+#define BYTE_ONLY 0x1
+#define TWO_STRUCT 0x2
+int isOneLevel = 0;
+
+int main(int argc, char **argv)
+{
+ int errors=0;
+ Rptr headDptr;
+ ListElm *headLptr=0;
+ int i, wrank;
+ MPI_Datatype dptrType, listelmType;
+ MPI_Win listwin;
+
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD,&wrank);
+
+ for (i=1; i<argc; i++) {
+ if (strcmp(argv[i], "-byteonly") == 0) {
+ testCases = BYTE_ONLY;
+ }
+ else if (strcmp(argv[i], "-twostruct") == 0) {
+ testCases = TWO_STRUCT;
+ }
+ else if (strcmp(argv[i], "-onelevel") == 0) {
+ isOneLevel = 1;
+ }
+ else {
+ printf("Unrecognized argument %s\n", argv[i] );
+ }
+ }
+
+ /* Create the datatypes that we will use to move the data */
+ {
+ int blens[3];
+ MPI_Aint displ[3];
+ MPI_Datatype dtypes[3];
+ ListElm sampleElm;
+
+ blens[0] = 1; blens[1] = 1;
+ MPI_Get_address( &nullDptr.disp, &displ[0] );
+ MPI_Get_address( &nullDptr.rank, &displ[1] );
+ displ[1] = displ[1] - displ[0];
+ displ[0] = 0;
+ dtypes[0] = MPI_AINT;
+ dtypes[1] = MPI_INT;
+ MPI_Type_create_struct(2, blens, displ, dtypes, &dptrType);
+ MPI_Type_commit(&dptrType);
+
+ if (isOneLevel) {
+ blens[0] = sizeof(nullDptr); dtypes[0] = MPI_BYTE;
+ }
+ else {
+ blens[0] = 1; dtypes[0] = dptrType;
+ }
+ blens[1] = MAX_KEY_SIZE; dtypes[1] = MPI_CHAR;
+ blens[2] = MAX_VALUE_SIZE; dtypes[2] = MPI_CHAR;
+ MPI_Get_address(&sampleElm.next,&displ[0]);
+ MPI_Get_address(&sampleElm.key[0],&displ[1]);
+ MPI_Get_address(&sampleElm.value[0],&displ[2]);
+ displ[2] -= displ[0];
+ displ[1] -= displ[0];
+ displ[0] = 0;
+ for (i=0; i<3; i++) {
+ MTestPrintfMsg(0,"%d:count=%d,disp=%ld\n",i, blens[i], displ[i]);
+ }
+ MPI_Type_create_struct(3, blens, displ, dtypes, &listelmType);
+ MPI_Type_commit(&listelmType);
+ }
+
+ MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &listwin);
+
+ headDptr.rank = 0;
+ if (wrank == 0) {
+ /* Create 1 list element (the head) and initialize it */
+ MPI_Alloc_mem(sizeof(ListElm), MPI_INFO_NULL, &headLptr);
+ MPI_Get_address(headLptr, &headDptr.disp);
+ headLptr->next.rank = -1;
+ headLptr->next.disp = (MPI_Aint)MPI_BOTTOM;
+ headLptr->next.lptr = 0;
+ strncpy(headLptr->key,"key1",MAX_KEY_SIZE);
+ strncpy(headLptr->value,"value1",MAX_VALUE_SIZE);
+ MPI_Win_attach(listwin, headLptr, sizeof(ListElm));
+ }
+ MPI_Bcast(&headDptr.disp, 1, MPI_AINT, 0, MPI_COMM_WORLD);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (wrank == 1) {
+ ListElm headcopy;
+
+ MPI_Win_lock_all(0, listwin);
+ /* Get head element with simple get of BYTES */
+ if (testCases & BYTE_ONLY) {
+ headcopy.next.rank=100;
+ headcopy.next.disp=0xefefefef;
+ MPI_Get(&headcopy, sizeof(ListElm), MPI_BYTE,
+ headDptr.rank, headDptr.disp,
+ sizeof(ListElm), MPI_BYTE, listwin);
+ MPI_Win_flush(headDptr.rank, listwin);
+ if (headcopy.next.rank != -1 &&
+ headcopy.next.disp != (MPI_Aint)MPI_BOTTOM) {
+ errors++;
+ printf("MPI_BYTE: headcopy contains incorrect next:<%d,%ld>\n",
+ headcopy.next.rank, (long)headcopy.next.disp);
+ }
+ }
+
+ if (testCases & TWO_STRUCT) {
+ headcopy.next.rank=100;
+ headcopy.next.disp=0xefefefef;
+ /* Get head element using struct of struct type. This is
+ not an identical get to the simple BYTE one above but should
+ work */
+ MPI_Get(&headcopy, 1, listelmType, headDptr.rank, headDptr.disp,
+ 1, listelmType, listwin);
+ MPI_Win_flush(headDptr.rank, listwin);
+ if (headcopy.next.rank != -1 &&
+ headcopy.next.disp != (MPI_Aint)MPI_BOTTOM) {
+ errors++;
+ printf("ListelmType: headcopy contains incorrect next:<%d,%ld>\n",
+ headcopy.next.rank, (long)headcopy.next.disp);
+ }
+ }
+
+ MPI_Win_unlock_all(listwin);
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (wrank == 0) {
+ MPI_Win_detach(listwin,headLptr);
+ MPI_Free_mem(headLptr);
+ }
+ MPI_Win_free(&listwin);
+ MPI_Type_free(&dptrType);
+ MPI_Type_free(&listelmType);
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2014 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpitest.h"
+
+/* Inspired by the Intel MPI_Type_hvector_blklen test.
+ Added to include a test of a dataloop optimization that failed.
+*/
+int main( int argc, char *argv[] )
+{
+ MPI_Datatype ot, ot2, newtype;
+ int position, psize, insize, outsize;
+ signed char *inbuf=0, *outbuf=0, *pbuf=0, *p;
+ int i, j, k;
+ int errs = 0;
+ int veccount=16, stride=16;
+
+ MTest_Init( &argc, &argv );
+ /*
+ * Create a type with some padding
+ */
+ MPI_Type_contiguous( 59, MPI_CHAR, &ot );
+ MPI_Type_create_resized( ot, 0, 64, &ot2 );
+ /*
+ Use a vector type with a block size equal to the stride - thus
+ tiling the target memory with copies of old type. This is not
+ a contiguous copy since oldtype has a gap at the end.
+ */
+ MPI_Type_hvector( veccount, stride, stride*64, ot2, &newtype );
+ MPI_Type_commit( &newtype );
+
+ insize = veccount * stride * 64;
+ outsize = insize;
+ inbuf = (char *)malloc( insize );
+ outbuf = (char *)malloc( outsize );
+ for (i=0; i<outsize; i++) {
+ inbuf[i] = i % 64;
+ outbuf[i] = -1;
+ }
+
+ MPI_Pack_size( 1, newtype, MPI_COMM_WORLD, &psize );
+ pbuf = (char *)malloc( psize );
+
+ position = 0;
+ MPI_Pack( inbuf, 1, newtype, pbuf, psize, &position, MPI_COMM_WORLD );
+ psize = position;
+ position = 0;
+ MPI_Unpack( pbuf, psize, &position, outbuf, 1, newtype, MPI_COMM_WORLD );
+
+
+ /* Check the output */
+ p = outbuf;
+ for (i=0; i<veccount; i++) {
+ for (j=0; j<stride; j++) {
+ for (k=0; k<59; k++) {
+ if (*p != k % 64) {
+ errs++;
+ fprintf( stderr, "[%d,%d,%d]expected %d but saw %d\n",
+ i, j, k, (k%64), *p );
+ }
+ p++;
+ }
+ for (k=59; k<64; k++) {
+ if (*p != -1) {
+ errs++;
+ fprintf( stderr, "[%d,%d,%d]expected -1 but saw %d\n",
+ i, j, k, *p );
+ }
+ p++;
+ }
+ }
+ }
+
+ free( pbuf );
+ free( inbuf );
+ free( outbuf );
+
+ MPI_Type_free( &ot );
+ MPI_Type_free( &ot2 );
+ MPI_Type_free( &newtype );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ return 0;
+}
#include <stdlib.h>
#include <mpi.h>
-#define equals(a, b) ((long long)(a) == (long long)(b))
-
/* assert-like macro that bumps the err count and emits a message */
#define check(x_) \
do { \
} \
} while (0)
-/* Abort when using unimplemented functions. Currently, it should not happen,
- * since sizeof(MPI_Count) == sizeof(int), but it avoids compile errors about
- * undefined functions. */
-#define err_unimpl(func) do { \
- fprintf(stderr, "ERROR: %s is not implemented\n", #func); \
- abort(); \
- } while (0)
-
-#define MPI_Type_size_x(a,b) err_unimpl(MPI_Type_size_x)
-#define MPI_Type_get_extent_x(a,b,c) err_unimpl(MPI_Type_get_extent_x)
-#define MPI_Type_get_true_extent_x(a,b,c) err_unimpl(MPI_Type_get_true_extent_x)
-#define MPI_Get_elements_x(a,b,c) err_unimpl(MPI_Get_elements_x)
-#define MPI_Status_set_elements_x(a,b,c) err_unimpl(MPI_Status_set_elements_x)
-
int main(int argc, char *argv[])
{
int errs = 0;
int size, elements, count;
MPI_Aint lb, extent;
MPI_Count size_x, lb_x, extent_x, elements_x;
- double imx4i_true_extent;
+ MPI_Count imx4i_true_extent;
MPI_Datatype imax_contig = MPI_DATATYPE_NULL;
MPI_Datatype four_ints = MPI_DATATYPE_NULL;
MPI_Datatype imx4i = MPI_DATATYPE_NULL;
/* MPI_Type_size */
MPI_Type_size(imax_contig, &size);
- check(equals(size, INT_MAX));
+ check(size == INT_MAX);
MPI_Type_size(four_ints, &size);
- check(equals(size, 4*sizeof(int)));
+ check(size == 4*sizeof(int));
MPI_Type_size(imx4i, &size);
- check(equals(size, MPI_UNDEFINED)); /* should overflow an int */
+ check(size == MPI_UNDEFINED); /* should overflow an int */
MPI_Type_size(imx4i_rsz, &size);
- check(equals(size, MPI_UNDEFINED)); /* should overflow an int */
+ check(size == MPI_UNDEFINED); /* should overflow an int */
/* MPI_Type_size_x */
MPI_Type_size_x(imax_contig, &size_x);
- check(equals(size_x, INT_MAX));
+ check(size_x == INT_MAX);
MPI_Type_size_x(four_ints, &size_x);
- check(equals(size_x, 4*sizeof(int)));
+ check(size_x == 4*sizeof(int));
MPI_Type_size_x(imx4i, &size_x);
- check(equals(size_x, 4LL*sizeof(int)*(INT_MAX/2))); /* should overflow an int */
+ check(size_x == 4LL*sizeof(int)*(INT_MAX/2)); /* should overflow an int */
MPI_Type_size_x(imx4i_rsz, &size_x);
- check(equals(size_x, 4LL*sizeof(int)*(INT_MAX/2))); /* should overflow an int */
+ check(size_x == 4LL*sizeof(int)*(INT_MAX/2)); /* should overflow an int */
/* MPI_Type_get_extent */
MPI_Type_get_extent(imax_contig, &lb, &extent);
- check(equals(lb, 0));
- check(equals(extent, INT_MAX));
+ check(lb == 0);
+ check(extent == INT_MAX);
MPI_Type_get_extent(four_ints, &lb, &extent);
- check(equals(lb, 0));
- check(equals(extent, 4*sizeof(int)));
+ check(lb == 0);
+ check(extent == 4*sizeof(int));
MPI_Type_get_extent(imx4i, &lb, &extent);
- check(equals(lb, 0));
+ check(lb == 0);
if (sizeof(MPI_Aint) == sizeof(int))
- check(equals(extent, MPI_UNDEFINED));
+ check(extent == MPI_UNDEFINED);
else
- check(equals(extent, imx4i_true_extent));
+ check(extent == imx4i_true_extent);
MPI_Type_get_extent(imx4i_rsz, &lb, &extent);
- check(equals(lb, INT_MAX));
- check(equals(extent, -1024));
+ check(lb == INT_MAX);
+ check(extent == -1024);
/* MPI_Type_get_extent_x */
MPI_Type_get_extent_x(imax_contig, &lb_x, &extent_x);
- check(equals(lb_x, 0));
- check(equals(extent_x, INT_MAX));
+ check(lb_x == 0);
+ check(extent_x == INT_MAX);
MPI_Type_get_extent_x(four_ints, &lb_x, &extent_x);
- check(equals(lb_x, 0));
- check(equals(extent_x, 4*sizeof(int)));
+ check(lb_x == 0);
+ check(extent_x == 4*sizeof(int));
MPI_Type_get_extent_x(imx4i, &lb_x, &extent_x);
- check(equals(lb_x, 0));
- check(equals(extent_x, imx4i_true_extent));
+ check(lb_x == 0);
+ check(extent_x == imx4i_true_extent);
MPI_Type_get_extent_x(imx4i_rsz, &lb_x, &extent_x);
- check(equals(lb_x, INT_MAX));
- check(equals(extent_x, -1024));
+ check(lb_x == INT_MAX);
+ check(extent_x == -1024);
/* MPI_Type_get_true_extent */
MPI_Type_get_true_extent(imax_contig, &lb, &extent);
- check(equals(lb, 0));
- check(equals(extent, INT_MAX));
+ check(lb == 0);
+ check(extent == INT_MAX);
MPI_Type_get_true_extent(four_ints, &lb, &extent);
- check(equals(lb, 0));
- check(equals(extent, 4*sizeof(int)));
+ check(lb == 0);
+ check(extent == 4*sizeof(int));
MPI_Type_get_true_extent(imx4i, &lb, &extent);
- check(equals(lb, 0));
+ check(lb == 0);
if (sizeof(MPI_Aint) == sizeof(int))
- check(equals(extent, MPI_UNDEFINED));
+ check(extent == MPI_UNDEFINED);
else
- check(equals(extent, imx4i_true_extent));
+ check(extent == imx4i_true_extent);
MPI_Type_get_true_extent(imx4i_rsz, &lb, &extent);
- check(equals(lb, 0));
+ check(lb == 0);
if (sizeof(MPI_Aint) == sizeof(int))
- check(equals(extent, MPI_UNDEFINED));
+ check(extent == MPI_UNDEFINED);
else
- check(equals(extent, imx4i_true_extent));
+ check(extent == imx4i_true_extent);
/* MPI_Type_get_true_extent_x */
MPI_Type_get_true_extent_x(imax_contig, &lb_x, &extent_x);
- check(equals(lb_x, 0));
- check(equals(extent_x, INT_MAX));
+ check(lb_x == 0);
+ check(extent_x == INT_MAX);
MPI_Type_get_true_extent_x(four_ints, &lb_x, &extent_x);
- check(equals(lb_x, 0));
- check(equals(extent_x, 4*sizeof(int)));
+ check(lb_x == 0);
+ check(extent_x == 4*sizeof(int));
MPI_Type_get_true_extent_x(imx4i, &lb_x, &extent_x);
- check(equals(lb_x, 0));
- check(equals(extent_x, imx4i_true_extent));
+ check(lb_x == 0);
+ check(extent_x == imx4i_true_extent);
MPI_Type_get_true_extent_x(imx4i_rsz, &lb_x, &extent_x);
- check(equals(lb_x, 0));
- check(equals(extent_x, imx4i_true_extent));
+ check(lb_x == 0);
+ check(extent_x == imx4i_true_extent);
/* MPI_{Status_set_elements,Get_elements}{,_x} */
MPI_Get_elements(&status, MPI_INT, &elements);
MPI_Get_elements_x(&status, MPI_INT, &elements_x);
MPI_Get_count(&status, MPI_INT, &count);
- check(equals(elements, 10));
- check(equals(elements_x, 10));
- check(equals(count, 10));
+ check(elements == 10);
+ check(elements_x == 10);
+ check(count == 10);
/* set_x simple */
MPI_Status_set_elements_x(&status, MPI_INT, 10);
MPI_Get_elements(&status, MPI_INT, &elements);
MPI_Get_elements_x(&status, MPI_INT, &elements_x);
MPI_Get_count(&status, MPI_INT, &count);
- check(equals(elements, 10));
- check(equals(elements_x, 10));
- check(equals(count, 10));
+ check(elements == 10);
+ check(elements_x == 10);
+ check(count == 10);
/* Sets elements corresponding to count=1 of the given MPI datatype, using
* set_elements and set_elements_x. Checks expected values are returned by
MPI_Get_elements(&status, (type_), &elements); \
MPI_Get_elements_x(&status, (type_), &elements_x); \
MPI_Get_count(&status, (type_), &count); \
- check(equals(elements, (elts_))); \
- check(equals(elements_x, (elts_))); \
- check(equals(count, 1)); \
+ check(elements == (elts_)); \
+ check(elements_x == (elts_)); \
+ check(count == 1); \
} \
\
elements = elements_x = count = 0xfeedface; \
MPI_Get_elements_x(&status, (type_), &elements_x); \
MPI_Get_count(&status, (type_), &count); \
if ((elts_) > INT_MAX) { \
- check(equals(elements, MPI_UNDEFINED)); \
+ check(elements == MPI_UNDEFINED); \
} \
else { \
- check(equals(elements, (elts_))); \
+ check(elements == (elts_)); \
} \
- check(equals(elements_x, (elts_))); \
- check(equals(count, 1)); \
+ check(elements_x == (elts_)); \
+ check(count == 1); \
} while (0) \
check_set_elements(imax_contig, INT_MAX);
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2013 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+static MPI_Datatype make_largexfer_type_struct(MPI_Offset nbytes)
+{
+ int typechunk_size = 1024*1024; /* in bytes: TODO: figure out how big a
+ chunk is really needed */
+ int chunk_count;
+ int remainder=0;
+ MPI_Datatype memtype, chunktype;
+
+ /* need to cook up a new datatype to accomodate large datatypes */
+ /* first pass: chunks of 1 MiB plus an additional remainder. Does require
+ * 8 byte MPI_Aint, which should have been checked for earlier */
+
+ chunk_count = nbytes/typechunk_size;
+ remainder = nbytes % typechunk_size;
+ MPI_Type_contiguous(typechunk_size, MPI_BYTE, &chunktype);
+ MPI_Type_commit(&chunktype);
+
+ /* a zero remainder means we can just count contigs */
+ if (remainder == 0) {
+ MPI_Type_contiguous(chunk_count, chunktype, &memtype);
+ MPI_Type_free(&chunktype);
+ } else {
+ if (sizeof(MPI_Aint) <= sizeof(int)) {
+ return MPI_DATATYPE_NULL;
+ }
+ /* struct type: some number of chunks plus remaining bytes tacked
+ * on at end */
+ int lens[] = {chunk_count, remainder};
+ MPI_Aint disp[] = {0, (MPI_Aint) typechunk_size * (MPI_Aint)chunk_count};
+ MPI_Datatype types[] = {chunktype, MPI_BYTE};
+
+ MPI_Type_struct(2, lens, disp, types, &memtype);
+ MPI_Type_free(&chunktype);
+ }
+ MPI_Type_commit(&memtype);
+ return memtype;
+}
+static MPI_Datatype make_largexfer_type_hindexed(MPI_Offset nbytes)
+{
+ int i, count;
+ int chunk_size = 1024*1024;
+ int *blocklens;
+ MPI_Aint *disp;
+ MPI_Datatype memtype;
+
+ /* need to cook up a new datatype to accomodate large datatypes */
+ /* Does require 8 byte MPI_Aint, which should have been checked for earlier
+ */
+
+ if (sizeof(MPI_Aint) <= sizeof(int)) {
+ return MPI_DATATYPE_NULL;
+ }
+
+ /* ceiling division */
+ count = 1 + ((nbytes -1) / chunk_size );
+
+ blocklens = calloc(count, sizeof(int));
+ disp = calloc(count, sizeof(MPI_Aint));
+
+
+ for (i=0; i<(count-1); i++) {
+ blocklens[i] = chunk_size;
+ disp[i] = (MPI_Aint)chunk_size*i;
+ }
+ blocklens[count-1] = nbytes-((MPI_Aint)chunk_size*i);
+ disp[count-1] = (MPI_Aint)chunk_size*(count-1);
+
+ MPI_Type_create_hindexed(count, blocklens, disp, MPI_BYTE, &memtype);
+ MPI_Type_commit(&memtype);
+
+ return memtype;
+}
+
+
+int testtype(MPI_Datatype type, MPI_Offset expected) {
+ MPI_Count size, lb, extent;
+ int nerrors=0;
+ MPI_Type_size_x(type, &size);
+
+ if (size < 0) {
+ printf("ERROR: type size apparently overflowed integer\n");
+ nerrors++;
+ }
+
+ if (size != expected) {
+ printf("reported type size %lld does not match expected %lld\n",
+ size, expected);
+ nerrors++;
+ }
+
+ MPI_Type_get_true_extent_x(type, &lb, &extent);
+ if (lb != 0) {
+ printf("ERROR: type should have lb of 0, reported %lld\n", lb);
+ nerrors ++;
+ }
+
+ if (extent != size) {
+ printf("ERROR: extent should match size, not %lld\n", extent);
+ nerrors ++;
+ }
+ return nerrors;
+}
+
+
+int main(int argc, char **argv)
+{
+
+ int nerrors=0, i;
+ int rank, size;
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+#define NR_TYPES 3
+ MPI_Offset expected_sizes[NR_TYPES] = {1024UL*1024UL*2400UL,
+ 2346319872,
+ 2346319872};
+ MPI_Datatype types[NR_TYPES];
+
+ /* a contig type, itself large, but does not need 8 byte aints */
+ types[0] = make_largexfer_type_struct(expected_sizes[0]);
+ /* struct with addresses out past 2 GiB */
+ types[1] = make_largexfer_type_struct(expected_sizes[1]);
+ /* similar, but with hindexed type */
+ types[2] = make_largexfer_type_hindexed(expected_sizes[2]);
+
+ for (i=0; i<NR_TYPES; i++) {
+ if (types[i] != MPI_DATATYPE_NULL) {
+ nerrors += testtype(types[i], expected_sizes[i]);
+ MPI_Type_free(&(types[i]));
+ }
+ }
+
+ MPI_Finalize();
+ if (rank == 0) {
+ if (nerrors) {
+ printf("found %d errors\n", nerrors);
+ } else {
+ printf(" No errors\n");
+ }
+ }
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2013 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* Defines INT32_MAX, which is not appropriate for int types. */
+#include <stdint.h>
+
+/* Defines INT_MAX */
+#include <limits.h>
+
+#include <mpi.h>
+
+#include <assert.h>
+static void verbose_abort(int errorcode)
+{
+ /* We do not check error codes here
+ * because if MPI is in a really sorry state,
+ * all of them might fail. */
+
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ char errorstring[MPI_MAX_ERROR_STRING];
+ memset(errorstring, 0, MPI_MAX_ERROR_STRING); /* optional */
+
+ int errorclass;
+ MPI_Error_class(errorcode, &errorclass);
+
+ int resultlen;
+ MPI_Error_string(errorcode, errorstring, &resultlen);
+
+ fprintf(stderr, "%d: MPI failed (%d: %s) \n", rank, errorclass, errorstring);
+ fflush(stderr); /* almost certainly redundant with the following... */
+
+ MPI_Abort(MPI_COMM_WORLD, errorclass);
+
+ return;
+}
+#define MPI_ASSERT(rc) \
+ do { if ((rc)!=MPI_SUCCESS) verbose_abort(rc); } while (0)
+
+int Type_contiguous_x(MPI_Count count, MPI_Datatype oldtype,
+ MPI_Datatype * newtype);
+
+#define BIGMPI_MAX INT_MAX
+
+/*
+ * Synopsis
+ *
+ * int Type_contiguous_x(MPI_Count count,
+ * MPI_Datatype oldtype,
+ * MPI_Datatype * newtype)
+ *
+ * Input Parameters
+ *
+ * count replication count (nonnegative integer)
+ * oldtype old datatype (handle)
+ *
+ * Output Parameter
+ *
+ * newtype new datatype (handle)
+ *
+ */
+int Type_contiguous_x(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype * newtype)
+{
+ MPI_Count c = count/BIGMPI_MAX;
+ MPI_Count r = count%BIGMPI_MAX;
+
+ MPI_Datatype chunk;
+ MPI_ASSERT(MPI_Type_contiguous(BIGMPI_MAX, oldtype, &chunk));
+
+ MPI_Datatype chunks;
+ MPI_ASSERT(MPI_Type_contiguous(c, chunk, &chunks));
+
+ MPI_Datatype remainder;
+ MPI_ASSERT(MPI_Type_contiguous(r, oldtype, &remainder));
+
+ int typesize;
+ MPI_ASSERT(MPI_Type_size(oldtype, &typesize));
+
+ MPI_Aint remdisp = (MPI_Aint)c*BIGMPI_MAX*typesize; /* must explicit-cast to avoid overflow */
+ int array_of_blocklengths[2] = {1,1};
+ MPI_Aint array_of_displacements[2] = {0,remdisp};
+ MPI_Datatype array_of_types[2] = {chunks,remainder};
+
+ MPI_ASSERT(MPI_Type_create_struct(2, array_of_blocklengths, array_of_displacements, array_of_types, newtype));
+ MPI_ASSERT(MPI_Type_commit(newtype));
+
+ MPI_ASSERT(MPI_Type_free(&chunk));
+ MPI_ASSERT(MPI_Type_free(&chunks));
+ MPI_ASSERT(MPI_Type_free(&remainder));
+
+ return MPI_SUCCESS;
+}
+
+
+int main(int argc, char * argv[])
+{
+ int provided;
+ size_t i;
+ MPI_Count j;
+ MPI_ASSERT(MPI_Init_thread(&argc, &argv, MPI_THREAD_SINGLE, &provided));
+
+ int rank, size;
+ MPI_ASSERT(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
+ MPI_ASSERT(MPI_Comm_size(MPI_COMM_WORLD, &size));
+
+ int logn = (argc>1) ? atoi(argv[1]) : 32;
+ size_t count = (size_t)1<<logn; /* explicit cast required */
+
+ MPI_Datatype bigtype;
+ MPI_ASSERT(Type_contiguous_x( (MPI_Count)count, MPI_CHAR, &bigtype));
+ MPI_ASSERT(MPI_Type_commit(&bigtype));
+
+ MPI_Request requests[2];
+ MPI_Status statuses[2];
+
+ char * rbuf = NULL;
+ char * sbuf = NULL;
+
+ if (rank==(size-1)) {
+ rbuf = malloc( count * sizeof(char)); assert(rbuf!=NULL);
+ for (i=0; i<count; i++)
+ rbuf[i] = 'a';
+
+ MPI_ASSERT(MPI_Irecv(rbuf, 1, bigtype, 0, 0, MPI_COMM_WORLD, &(requests[1]) ));
+ }
+ if (rank==0) {
+ sbuf = malloc( count * sizeof(char)); assert(sbuf!=NULL);
+ for (i=0; i<count; i++)
+ sbuf[i] = 'z';
+
+ MPI_ASSERT(MPI_Isend(sbuf, 1, bigtype, size-1, 0, MPI_COMM_WORLD, &(requests[0]) ));
+ }
+
+ MPI_Count ocount[2];
+
+ if (size==1) {
+ MPI_ASSERT(MPI_Waitall(2, requests, statuses));
+ MPI_ASSERT(MPI_Get_elements_x( &(statuses[1]), MPI_CHAR, &(ocount[1])));
+ }
+ else {
+ if (rank==(size-1)) {
+ MPI_ASSERT(MPI_Wait( &(requests[1]), &(statuses[1]) ));
+ MPI_ASSERT(MPI_Get_elements_x( &(statuses[1]), MPI_CHAR, &(ocount[1]) ));
+ } else if (rank==0) {
+ MPI_ASSERT(MPI_Wait( &(requests[0]), &(statuses[0]) ));
+ /* No valid fields in status from a send request (MPI-3 p53,
+ line 1-5) */
+ }
+ }
+
+ /* correctness check */
+ if (rank==(size-1)) {
+ MPI_Count errors = 0;
+ for (j=0; j<count; j++)
+ errors += ( rbuf[j] != 'z' );
+ if (errors == 0) {
+ printf(" No Errors\n");
+ } else {
+ printf("errors = %lld \n", errors);
+ }
+ }
+
+ if (rbuf) free(rbuf);
+ if (sbuf) free(sbuf);
+
+ MPI_ASSERT(MPI_Type_free(&bigtype));
+
+ MPI_ASSERT(MPI_Finalize());
+
+ return 0;
+}
parse_args(argc, argv);
for (i=0; pairtypes[i].atype != (MPI_Datatype) -1; i++) {
- int atype_size, ptype_size, stype_size, handbuilt_extent=0;
+ int atype_size, ptype_size, stype_size, handbuilt_extent;
MPI_Aint ptype_extent, stype_extent, dummy_lb;
types[0] = pairtypes[i].atype;
if (verbose) fprintf(stderr,
"extent of %s (%d) does not match extent of either hand-built MPI struct (%d) or equivalent C struct (%d)\n",
- pairtypes[i].name, (int) stype_extent,
- (int) ptype_extent,
+ pairtypes[i].name, (int) ptype_extent,
+ (int) stype_extent,
handbuilt_extent);
}
MPI_Type_free( &stype );
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpiimpl.h"
+#include <stdio.h>
+
+/*
+ * Simple segment test, including timing code
+ */
+
+/*
+ * Build datatype structures
+ *
+ * Contiguous
+ * n = 1, 4, 16, 64, 128, 512, 2048, 8196 ints
+ * Vector
+ * blocksize = 1, 4, 64 ints
+ * stride = 1, 64, 127
+ * Block Indexed
+ * blocksize = 1, 4 ints
+ * offsets = i*24 for i = 0 to n, n = 0, 64, 512
+ * Indexed
+ * blocksizes = 1, 2, 4, 3, 7, 5, 6
+ * offsets = i*24 for i = 0 to n, n = 0, 4, 7, 64, 512
+ * (Wrap blocksizes to match offsets)
+ *
+ * Also need a few nested datatypes, such as vector of vectors
+ * Do the versions in Using MPI
+ *
+ */
+
+/*
+ * Routines to create dataloops for basic dataloops
+ */
+/*
+ * Contig
+ */
+MPID_Dataloop *MPID_Dataloop_init_contig( int count )
+{
+ MPID_Dataloop *ct;
+
+ ct = (MPID_Dataloop *)MPIU_Malloc( sizeof(MPID_Dataloop ) );
+ ct->kind = MPID_DTYPE_CONTIG | DATALOOP_FINAL_MASK;
+ ct->loop_params.c_t.count = count;
+ ct->loop_params.c_t.dataloop = 0;
+ ct->extent = count;
+ ct->handle = 0;
+
+ return ct;
+}
+
+/*
+ * Vector
+ */
+MPID_Dataloop *MPID_Dataloop_init_vector( int count, int blocksize,
+ int stride )
+{
+ MPID_Dataloop *v;
+
+ v = (MPID_Dataloop *)MPIU_Malloc( sizeof(MPID_Dataloop) );
+ v->kind = MPID_DTYPE_VECTOR | DATALOOP_FINAL_MASK;
+ v->loop_params.v_t.count = count;
+ v->loop_params.v_t.blocksize = blocksize;
+ v->loop_params.v_t.stride = stride;
+ v->loop_params.v_t.dataloop = 0;
+ v->extent = (count-1)*stride + blocksize;
+ v->handle = 0;
+
+ return v;
+}
+
+/*
+ * Block indexed
+ */
+MPID_Dataloop *MPID_Dataloop_init_blockindexed( int count, int blocksize,
+ MPI_Aint *offset )
+{
+ MPID_Dataloop *bi;
+ MPI_Aint extent;
+ int i;
+
+ bi = (MPID_Dataloop *)MPIU_Malloc( sizeof(MPID_Dataloop) );
+ bi->kind = MPID_DTYPE_BLOCKINDEXED | DATALOOP_FINAL_MASK;
+ bi->loop_params.bi_t.count = count;
+ bi->loop_params.bi_t.blocksize = blocksize;
+ bi->loop_params.bi_t.offset =
+ (MPI_Aint *)MPIU_Malloc( sizeof(MPI_Aint) * count );
+ for (i=0; i<count; i++) {
+ bi->loop_params.bi_t.offset[i] = offset[i];
+ if (offset[i] + blocksize > extent)
+ extent = offset[i] + blocksize;
+ }
+ bi->loop_params.bi_t.dataloop = 0;
+ bi->extent = extent;
+ bi->handle = 0;
+
+ return bi;
+}
+
+/*
+ * Indexed
+ */
+MPID_Dataloop *MPID_Dataloop_init_indexed( int count, int *blocksize,
+ MPI_Aint *offset )
+{
+ MPID_Dataloop *it;
+ MPI_Aint extent = 0;
+ int i;
+
+ it = (MPID_Dataloop *)MPIU_Malloc( sizeof(MPID_Dataloop) );
+ it->kind = MPID_DTYPE_INDEXED | DATALOOP_FINAL_MASK;
+ it->loop_params.i_t.count = count;
+ it->loop_params.i_t.blocksize = (int *)MPIU_Malloc( sizeof(int) * count );
+ it->loop_params.i_t.offset =
+ (MPI_Aint *)MPIU_Malloc( sizeof(MPI_Aint) * count );
+ for (i=0; i<count; i++) {
+ it->loop_params.i_t.offset[i] = offset[i];
+ it->loop_params.i_t.blocksize[i] = blocksize[i];
+ if (offset[i] + blocksize[i] > extent)
+ extent = offset[i] + blocksize[i];
+ }
+ it->loop_params.i_t.dataloop = 0;
+ it->extent = extent;
+ it->handle = 0;
+
+ return it;
+}
+
+int main( int argc, char **argv )
+{
+ /* MPID_Dataloop *vecloop; */
+ MPI_Datatype vectype;
+ int count=200, blocksize=4, stride = 7*4;
+ char *src_buf, *dest_buf;
+ int i,j,k;
+ double r1, r2;
+
+ MPI_Init( &argc, &argv );
+
+/* vecloop = MPID_Dataloop_init_vector( count, blocksize, stride ); */
+
+ MPI_Type_vector( count, 1, 7, MPI_INT, &vectype );
+
+ /* Initialize the data */
+ src_buf = (char *)MPIU_Malloc( (count - 1) * stride + blocksize );
+ for (i=0; i<(count-1)*stride+blocksize; i++)
+ src_buf[i] = -i;
+ for (i=0; i<count; i++) {
+ for (j=0; j<blocksize; j++)
+ src_buf[i*stride+j] = i*blocksize + j;
+ }
+ dest_buf = (char *)MPIU_Malloc( count*blocksize );
+ for (i=0; i<count*blocksize; i++) {
+ dest_buf[i] = -i;
+ }
+ r1 = MPI_Wtime();
+ for (i=0; i<100; i++) {
+ int position = 0;
+ /*MPID_Segment_pack( vecloop, src_buf, dest_buf );*/
+ MPI_Pack( src_buf, count, vectype, dest_buf, count*blocksize,
+ &position, MPI_COMM_WORLD );
+ }
+ r2 = MPI_Wtime();
+ printf( "Timer for vector pack is %e\n", (r2-r1)/100 );
+ for (i=0; i<count*blocksize; i++) {
+ if (dest_buf[i] != (char)i) {
+ printf( "Error at location %d\n", i );
+ }
+ }
+ r1 = MPI_Wtime();
+ for (k=0; k<100; k++) {
+ char *dest=dest_buf, *src=src_buf;
+ for (i=0; i<count; i++) {
+ for (j=0; j<blocksize; j++)
+ *dest++ = src[j];
+ src+= stride;
+ }
+ }
+ r2 = MPI_Wtime();
+ printf( "Timer for hand vector pack is %e\n", (r2-r1)/100 );
+
+ r1 = MPI_Wtime();
+ for (k=0; k<100; k++) {
+ int *dest=(int*)dest_buf, *src=(int*)src_buf;
+ int bsize = blocksize >> 2;
+ int istride = stride >> 2;
+ if (bsize == 1) {
+ for (i=0; i<count; i++) {
+ *dest++ = *src;
+ src+= istride;
+ }
+ }
+ else {
+ for (i=0; i<count; i++) {
+ for (j=0; j<bsize; j++)
+ *dest++ = src[j];
+ src+= istride;
+ }
+ }
+ }
+ r2 = MPI_Wtime();
+ printf( "Timer for hand vector pack (int) is %e\n", (r2-r1)/100 );
+
+ MPI_Finalize();
+ return 0;
+}
+
+/*
+ * Nested vector.
+ * The y-z subface is
+ * Type_vector( ey-sy+1, 1, nx, MPI_DOUBLE, &newx1 );
+ * Type_hvector( ez-sz+1, 1, nx*ny_sizeof(double), newx1, &newx );
+ * This gives the a(i,sy:ey,sz:ez) of a(nx,ny,nz) (in Fortran notation)
+ */
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2014 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <string.h>
+#include "dtypes.h"
+
+/*
+ This program is derived from one in the MPICH-1 test suite. It
+ tests a wide variety of basic and derived datatypes.
+ */
+int main( int argc, char **argv)
+{
+ MPI_Datatype *types;
+ void **inbufs, **outbufs;
+ int *counts, *bytesize, ntype;
+ MPI_Comm comm;
+ int rank, np, partner, tag, count;
+ int i, j, k, err, world_rank, errloc;
+ MPI_Status status;
+ char *obuf;
+ char myname[MPI_MAX_OBJECT_NAME];
+ int mynamelen;
+
+ MTest_Init( &argc, &argv );
+
+ /*
+ * Check for -basiconly to select only the simple datatypes
+ */
+ for (i=1; i<argc; i++) {
+ if (!argv[i]) break;
+ if (strcmp( argv[i], "-basiconly" ) == 0) {
+ MTestDatatype2BasicOnly();
+ }
+ }
+
+ MTestDatatype2Allocate( &types, &inbufs, &outbufs, &counts, &bytesize,
+ &ntype );
+ MTestDatatype2Generate( types, inbufs, outbufs, counts, bytesize, &ntype );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+
+ /* Test over a wide range of datatypes and communicators */
+ err = 0;
+ tag = 0;
+ while (MTestGetIntracomm( &comm, 2 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &np );
+ if (np < 2) continue;
+ if (world_rank == 0)
+ MTestPrintfMsg( 10, "Testing communicator number %s\n",
+ MTestGetIntracommName() );
+
+ tag++;
+ for (j=0; j<ntype; j++) {
+ MPI_Type_get_name( types[j], myname, &mynamelen );
+ if (world_rank == 0)
+ MTestPrintfMsg( 10, "Testing type %s\n", myname );
+ if (rank == 0) {
+ partner = np - 1;
+ MPI_Send( inbufs[j], counts[j], types[j], partner, tag, comm );
+ }
+ else if (rank == np-1) {
+ partner = 0;
+ obuf = outbufs[j];
+ for (k=0; k<bytesize[j]; k++)
+ obuf[k] = 0;
+ MPI_Recv( outbufs[j], counts[j], types[j], partner, tag,
+ comm, &status );
+ /* Test correct */
+ MPI_Get_count( &status, types[j], &count );
+ if (count != counts[j]) {
+ fprintf( stderr,
+ "Error in counts (got %d expected %d) with type %s\n",
+ count, counts[j], myname );
+ err++;
+ }
+ if (status.MPI_SOURCE != partner) {
+ fprintf( stderr,
+ "Error in source (got %d expected %d) with type %s\n",
+ status.MPI_SOURCE, partner, myname );
+ err++;
+ }
+ if ((errloc = MTestDatatype2Check( inbufs[j], outbufs[j],
+ bytesize[j] ))) {
+ char *p1, *p2;
+ fprintf( stderr,
+ "Error in data with type %s (type %d on %d) at byte %d\n",
+ myname, j, world_rank, errloc - 1 );
+ p1 = (char *)inbufs[j];
+ p2 = (char *)outbufs[j];
+ fprintf( stderr,
+ "Got %x expected %x\n", p1[errloc-1], p2[errloc-1] );
+ err++;
+ }
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTestDatatype2Free( types, inbufs, outbufs, counts, bytesize, ntype );
+ MTest_Finalize( err );
+ MPI_Finalize();
+ return MTestReturnValue( err );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2014 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include "dtypes.h"
+
+
+/*
+ This program is derived from one in the MPICH-1 test suite
+
+ This version sends and receives EVERYTHING from MPI_BOTTOM, by putting
+ the data into a structure.
+ */
+int main( int argc, char **argv )
+{
+ MPI_Datatype *types;
+ void **inbufs, **outbufs;
+ int *counts, *bytesize, ntype;
+ MPI_Comm comm;
+ int rank, np, partner, tag, count;
+ int j, k, err, world_rank, errloc;
+ MPI_Status status;
+ char *obuf;
+ MPI_Datatype offsettype;
+ int blen;
+ MPI_Aint displ, extent, natural_extent;
+ char myname[MPI_MAX_OBJECT_NAME];
+ int mynamelen;
+
+ MTest_Init( &argc, &argv );
+
+ MTestDatatype2Allocate( &types, &inbufs, &outbufs, &counts, &bytesize,
+ &ntype );
+ MTestDatatype2Generate( types, inbufs, outbufs, counts, bytesize, &ntype );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+
+ /* Test over a wide range of datatypes and communicators */
+ err = 0;
+ tag = 0;
+ while (MTestGetIntracomm( &comm, 2 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &np );
+ if (np < 2) continue;
+ tag++;
+ for (j=0; j<ntype; j++) {
+ MPI_Type_get_name( types[j], myname, &mynamelen );
+ if (world_rank == 0)
+ MTestPrintfMsg( 10, "Testing type %s\n", myname );
+ if (rank == 0) {
+ MPI_Get_address( inbufs[j], &displ );
+ blen = 1;
+ MPI_Type_create_struct( 1, &blen, &displ, types + j,
+ &offsettype );
+ MPI_Type_commit( &offsettype );
+ /* Warning: if the type has an explicit MPI_UB, then using a
+ simple shift of the offset won't work. For now, we skip
+ types whose extents are negative; the correct solution is
+ to add, where required, an explicit MPI_UB */
+ MPI_Type_extent( offsettype, &extent );
+ if (extent < 0) {
+ if (world_rank == 0)
+ MTestPrintfMsg( 10,
+ "... skipping (appears to have explicit MPI_UB\n" );
+ MPI_Type_free( &offsettype );
+ continue;
+ }
+ MPI_Type_extent( types[j], &natural_extent );
+ if (natural_extent != extent) {
+ MPI_Type_free( &offsettype );
+ continue;
+ }
+ partner = np - 1;
+ MPI_Send( MPI_BOTTOM, counts[j], offsettype, partner, tag,
+ comm );
+ MPI_Type_free( &offsettype );
+ }
+ else if (rank == np-1) {
+ partner = 0;
+ obuf = outbufs[j];
+ for (k=0; k<bytesize[j]; k++)
+ obuf[k] = 0;
+ MPI_Get_address( outbufs[j], &displ );
+ blen = 1;
+ MPI_Type_create_struct( 1, &blen, &displ, types + j,
+ &offsettype );
+ MPI_Type_commit( &offsettype );
+ /* Warning: if the type has an explicit MPI_UB, then using a
+ simple shift of the offset won't work. For now, we skip
+ types whose extents are negative; the correct solution is
+ to add, where required, an explicit MPI_UB */
+ MPI_Type_extent( offsettype, &extent );
+ if (extent < 0) {
+ MPI_Type_free( &offsettype );
+ continue;
+ }
+ MPI_Type_extent( types[j], &natural_extent );
+ if (natural_extent != extent) {
+ MPI_Type_free( &offsettype );
+ continue;
+ }
+ MPI_Recv( MPI_BOTTOM, counts[j], offsettype,
+ partner, tag, comm, &status );
+ /* Test for correctness */
+ MPI_Get_count( &status, types[j], &count );
+ if (count != counts[j]) {
+ fprintf( stderr,
+ "Error in counts (got %d expected %d) with type %s\n",
+ count, counts[j], myname );
+ err++;
+ }
+ if (status.MPI_SOURCE != partner) {
+ fprintf( stderr,
+ "Error in source (got %d expected %d) with type %s\n",
+ status.MPI_SOURCE, partner, myname );
+ err++;
+ }
+ if ((errloc = MTestDatatype2Check( inbufs[j], outbufs[j],
+ bytesize[j] ))) {
+ fprintf( stderr,
+ "Error in data with type %s (type %d on %d) at byte %d\n",
+ myname, j, world_rank, errloc - 1 );
+ if (err < 10) {
+ /* Give details on only the first 10 errors */
+ unsigned char *in_p = (unsigned char *)inbufs[j],
+ *out_p = (unsigned char *)outbufs[j];
+ int jj;
+ jj = errloc - 1;
+ jj &= 0xfffffffc; /* lop off a few bits */
+ in_p += jj;
+ out_p += jj;
+ fprintf( stderr, "%02x%02x%02x%02x should be %02x%02x%02x%02x\n",
+ out_p[0], out_p[1], out_p[2], out_p[3],
+ in_p[0], in_p[1], in_p[2], in_p[3] );
+ }
+ err++;
+ }
+ MPI_Type_free( &offsettype );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTestDatatype2Free( types, inbufs, outbufs, counts, bytesize, ntype );
+ MTest_Finalize( err );
+ MPI_Finalize();
+ return MTestReturnValue( err );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2014 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+/* The next is for isprint */
+#include <ctype.h>
+
+int main( int argc, char *argv[])
+{
+ struct a { int i;
+ char c;
+ } s[10], s1[10];
+ int j;
+ int errs = 0, toterrs;
+ int rank, size, tsize;
+ MPI_Aint text;
+ int blens[2];
+ MPI_Aint disps[2];
+ MPI_Datatype bases[2];
+ MPI_Datatype str, con;
+ char *buffer;
+ int bufsize, position, insize;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ for( j = 0; j < 10; j ++ ) {
+ s[j].i = j + rank;
+ s[j].c = j + rank + 'a';
+ }
+
+ blens[0] = blens[1] = 1;
+ disps[0] = 0; disps[1] = sizeof(int);
+ bases[0] = MPI_INT; bases[1] = MPI_CHAR;
+ MPI_Type_struct( 2, blens, disps, bases, &str );
+ MPI_Type_commit( &str );
+ MPI_Type_contiguous( 10, str, &con );
+ MPI_Type_commit( &con );
+ MPI_Type_size( con, &tsize );
+ MPI_Type_extent( con, &text );
+
+#ifdef DEBUG
+ printf("Size of MPI array is %d, extent is %d\n", tsize, text );
+#endif
+
+#ifdef DEBUG
+ {
+ void * p1, *p2;
+ p1 = s;
+ p2 = &(s[10].i); /* This statement may fail on some systems */
+ printf("C array starts at %p and ends at %p for a length of %d\n",
+ s, &(s[9].c), (char *)p2-(char *)p1 );
+ }
+#endif
+ MPI_Type_extent( str, &text );
+#ifdef DEBUG
+ MPI_Type_size( str, &tsize );
+ printf("Size of MPI struct is %d, extent is %d\n", tsize, (int)text );
+ printf("Size of C struct is %d\n", sizeof(struct a) );
+#endif
+ if (text != sizeof(struct a)) {
+ printf( "Extent of struct a (%d) does not match sizeof (%d)\n",
+ (int)text, (int)sizeof(struct a) );
+ errs++;
+ }
+
+ MPI_Pack_size(1, con, MPI_COMM_WORLD, &bufsize);
+ buffer = (char *) malloc(bufsize);
+
+ position = 0;
+ MPI_Pack(s,1,con,buffer,bufsize,&position,MPI_COMM_WORLD);
+ insize = position;
+ position = 0;
+ MPI_Unpack(buffer,insize,&position,s1,1,con,MPI_COMM_WORLD );
+
+ for( j = 0; j < 10; j++ ) {
+#ifdef DEBUG
+ printf("%d Sent: %d %c, Got: %d %c\n", rank,
+ s[j].i, s[j].c, s1[j].i, s1[j].c );
+#endif
+ if ( s1[j].i != j + rank ) {
+ errs++;
+ printf( "Got s[%d].i = %d (%x); expected %d\n", j, s1[j].i,
+ s1[j].i, j + rank );
+ }
+ if ( s1[j].c != 'a' + j + rank ) {
+ errs++;
+ /* If the character is not a printing character,
+ this can generate an file that diff, for example,
+ believes is a binary file */
+ if (isprint( (int)(s1[j].c) )) {
+ printf( "Got s[%d].c = %c; expected %c\n", j, s1[j].c,
+ j + rank + 'a');
+ }
+ else {
+ printf( "Got s[%d].c = %x; expected %c\n", j, (int)s1[j].c,
+ j + rank + 'a');
+ }
+ }
+ }
+
+ MPI_Type_free( &str );
+ MPI_Type_free( &con );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
#transpose-pack 1
#slice-pack 1
#struct-pack 1
+#structpack2 1
typecommit 1
typename 1
typefree 1
#needs MPI_Type_create_resized
tresized 2
#tresized2 2
+sendrecvt2 2
+sendrecvt4 2
#needs MPI_Type_match_size
#tmatchsize 1
tfree 2
hindexed_block 1 mpiversion=3.0
hindexed_block_contents 1 mpiversion=3.0
longdouble 1
+#We still have some alignment issues
+#dataalign 2
+#Needs MPI_Win_create_dynamic MPI_Win_attach MPI_Win_flush
+#get-struct 2
#large-count 1 mpiversion=3.0 xfail=ticket1767
+#large_type 1 mpiversion=3.0
+#large_type_sendrec 2 arg=31 mpiversion=3.0
+#large_type_sendrec 2 arg=32 mpiversion=3.0 timeLimit=360
cxx-types 1 mpiversion=3.0
MPI_Type_commit( &tmpType[i] );
}
- MPI_Sendrecv( 0, 0, MPI_INT, source, 1,
- 0, 0, MPI_INT, source, 1, comm, &status );
+ MPI_Sendrecv( NULL, 0, MPI_INT, source, 1,
+ NULL, 0, MPI_INT, source, 1, comm, &status );
MPI_Wait( &req, &status );
for (i=0; i<VEC_NELM; i++) {
buf = (int *)malloc( VEC_NELM * sizeof(int) );
for (i=0; i<VEC_NELM; i++) buf[i] = i;
/* Synchronize with the receiver */
- MPI_Sendrecv( 0, 0, MPI_INT, dest, 1,
- 0, 0, MPI_INT, dest, 1, comm, &status );
+ MPI_Sendrecv( NULL, 0, MPI_INT, dest, 1,
+ NULL, 0, MPI_INT, dest, 1, comm, &status );
MPI_Send( buf, VEC_NELM, MPI_INT, dest, 0, comm );
free( buf );
}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2014 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpitest.h"
+
+/* Inspired by the Intel MPI_Type_vector_blklen test.
+ Added to include a test of a dataloop optimization that failed.
+*/
+int main( int argc, char *argv[] )
+{
+ MPI_Datatype ot, ot2, newtype;
+ int position, psize, insize, outsize;
+ signed char *inbuf=0, *outbuf=0, *pbuf=0, *p;
+ int i, j, k;
+ int errs = 0;
+ int veccount=16, stride=16;
+
+ MTest_Init( &argc, &argv );
+ /*
+ * Create a type with some padding
+ */
+ MPI_Type_contiguous( 59, MPI_CHAR, &ot );
+ MPI_Type_create_resized( ot, 0, 64, &ot2 );
+ /*
+ Use a vector type with a block size equal to the stride - thus
+ tiling the target memory with copies of old type. This is not
+ a contiguous copy since oldtype has a gap at the end.
+ */
+ MPI_Type_vector( veccount, stride, stride, ot2, &newtype );
+ MPI_Type_commit( &newtype );
+
+ insize = veccount * stride * 64;
+ outsize = insize;
+ inbuf = (char *)malloc( insize );
+ outbuf = (char *)malloc( outsize );
+ for (i=0; i<outsize; i++) {
+ inbuf[i] = i % 64;
+ outbuf[i] = -1;
+ }
+
+ MPI_Pack_size( 1, newtype, MPI_COMM_WORLD, &psize );
+ pbuf = (char *)malloc( psize );
+
+ position = 0;
+ MPI_Pack( inbuf, 1, newtype, pbuf, psize, &position, MPI_COMM_WORLD );
+ psize = position;
+ position = 0;
+ MPI_Unpack( pbuf, psize, &position, outbuf, 1, newtype, MPI_COMM_WORLD );
+
+
+ /* Check the output */
+ p = outbuf;
+ for (i=0; i<veccount; i++) {
+ for (j=0; j<stride; j++) {
+ for (k=0; k<59; k++) {
+ if (*p != k % 64) {
+ errs++;
+ fprintf( stderr, "[%d,%d,%d]expected %d but saw %d\n",
+ i, j, k, (k%64), *p );
+ }
+ p++;
+ }
+ for (k=59; k<64; k++) {
+ if (*p != -1) {
+ errs++;
+ fprintf( stderr, "[%d,%d,%d]expected -1 but saw %d\n",
+ i, j, k, *p );
+ }
+ p++;
+ }
+ }
+ }
+
+ free( pbuf );
+ free( inbuf );
+ free( outbuf );
+
+ MPI_Type_free( &ot );
+ MPI_Type_free( &ot2 );
+ MPI_Type_free( &newtype );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+#ifndef MPITEST_DTYPES
+#define MPITEST_DTYPES
+
+void MTestDatatype2Generate ( MPI_Datatype *, void **, void **, int *, int *,
+ int * );
+void MTestDatatype2Allocate ( MPI_Datatype **, void ***, void ***,
+ int **, int **, int * );
+int MTestDatatype2Check ( void *, void *, int );
+int MTestDatatype2CheckAndPrint ( void *, void *, int, char *, int );
+void MTestDatatype2Free ( MPI_Datatype *, void **, void **,
+ int *, int *, int );
+void MTestDatatype2BasicOnly( void );
+#endif
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2014 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include "mpitest.h"
+#include "dtypes.h"
+#if defined(HAVE_STDIO_H) || defined(STDC_HEADERS)
+#include <stdio.h>
+#endif
+#if defined(HAVE_STDLIB_H) || defined(STDC_HEADERS)
+#include <stdlib.h>
+#endif
+#if defined(HAVE_STRING_H) || defined(STDC_HEADERS)
+#include <string.h>
+#endif
+#ifdef HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+
+/* This file contains code to generate a variety of MPI datatypes for testing
+ the various MPI routines.
+
+ To simplify the test code, this generates an array of datatypes, buffers with
+ data and buffers with no data (0 bits) for use in send and receive
+ routines of various types.
+
+ In addition, this doesn't even test all of the possibilities. For example,
+ there is currently no test of sending more than one item defined with
+ MPI_Type_contiguous .
+
+ Note also that this test assumes that the sending and receive types are
+ the same. MPI requires only that the type signatures match, which is
+ a weaker requirement.
+
+ This code was drawn from the MPICH-1 test suite and modified to fit the
+ new MPICH test suite. It provides an alternative set of datatype tests
+ to the ones in mtest.c.
+
+ */
+
+/* Change this to test only the basic, predefined types */
+static int basic_only = 0;
+
+/*
+ Arrays types, inbufs, outbufs, and counts are allocated by the
+ CALLER. n on input is the maximum number; on output, it is the
+ number defined.
+
+ See MTestDatatype2Allocate below for a routine to allocate these arrays.
+
+ We may want to add a routine to call to check that the proper data
+ has been received.
+ */
+
+/*
+ Add a predefined MPI type to the tests. _count instances of the
+ type will be sent.
+*/
+#define SETUPBASICTYPE(_mpitype,_ctype,_count) { \
+ int i; _ctype *a; \
+ if (cnt > *n) {*n = cnt; return; } \
+ types[cnt] = _mpitype; \
+ inbufs[cnt] = (void *)calloc( _count,sizeof(_ctype) ); \
+ outbufs[cnt] = (void *)malloc( sizeof(_ctype) * (_count) ); \
+ a = (_ctype *)inbufs[cnt]; for (i=0; i<(_count); i++) a[i] = i; \
+ a = (_ctype *)outbufs[cnt]; for (i=0; i<(_count); i++) a[i] = 0; \
+ counts[cnt] = _count; bytesize[cnt] = sizeof(_ctype) * (_count); cnt++; }
+
+/*
+ Add a contiguous version of a predefined type. Send one instance of
+ the type which contains _count copies of the predefined type.
+ */
+#define SETUPCONTIGTYPE(_mpitype,_ctype,_count) { \
+ int i; _ctype *a; char*myname; \
+ char _basename[MPI_MAX_OBJECT_NAME]; int _basenamelen;\
+ if (cnt > *n) {*n = cnt; return; }\
+ MPI_Type_contiguous( _count, _mpitype, types + cnt );\
+ MPI_Type_commit( types + cnt );\
+ inbufs[cnt] = (void *)calloc( _count, sizeof(_ctype) ); \
+ outbufs[cnt] = (void *)malloc( sizeof(_ctype) * (_count) ); \
+ a = (_ctype *)inbufs[cnt]; for (i=0; i<(_count); i++) a[i] = i; \
+ a = (_ctype *)outbufs[cnt]; for (i=0; i<(_count); i++) a[i] = 0; \
+ myname = (char *)malloc(100);\
+ MPI_Type_get_name( _mpitype, _basename, &_basenamelen ); \
+ snprintf( myname, 100, "Contig type %s", _basename ); \
+ MPI_Type_set_name( types[cnt], myname ); \
+ free( myname ); \
+ counts[cnt] = 1; bytesize[cnt] = sizeof(_ctype) * (_count); cnt++; }
+
+/*
+ Create a vector with _count elements, separated by stride _stride,
+ of _mpitype. Each block has a single element.
+ */
+#define SETUPVECTORTYPE(_mpitype,_ctype,_count,_stride,_name) { \
+ int i; _ctype *a; char *myname; \
+ char _basename[MPI_MAX_OBJECT_NAME]; int _basenamelen;\
+ if (cnt > *n) {*n = cnt; return; }\
+ MPI_Type_vector( _count, 1, _stride, _mpitype, types + cnt ); \
+ MPI_Type_commit( types + cnt );\
+ inbufs[cnt] = (void *)calloc( sizeof(_ctype) * (_count) * (_stride),1); \
+ outbufs[cnt] = (void *)calloc( sizeof(_ctype) * (_count) * (_stride),1); \
+ a = (_ctype *)inbufs[cnt]; for (i=0; i<(_count); i++) a[i*(_stride)] = i; \
+ a = (_ctype *)outbufs[cnt]; for (i=0; i<(_count); i++) a[i*(_stride)] = 0; \
+ myname = (char *)malloc(100);\
+ MPI_Type_get_name( _mpitype, _basename, &_basenamelen ); \
+ snprintf( myname, 100, "Vector type %s", _basename ); \
+ MPI_Type_set_name( types[cnt], myname ); \
+ free( myname ); \
+ counts[cnt] = 1; bytesize[cnt] = sizeof(_ctype) * (_count) * (_stride) ;\
+ cnt++; }
+
+/* This indexed type is setup like a contiguous type .
+ Note that systems may try to convert this to contiguous, so we'll
+ eventually need a test that has holes in it */
+#define SETUPINDEXTYPE(_mpitype,_ctype,_count,_name) { \
+ int i; int *lens, *disp; _ctype *a; char *myname; \
+ char _basename[MPI_MAX_OBJECT_NAME]; int _basenamelen;\
+ if (cnt > *n) {*n = cnt; return; }\
+ lens = (int *)malloc( (_count) * sizeof(int) ); \
+ disp = (int *)malloc( (_count) * sizeof(int) ); \
+ for (i=0; i<(_count); i++) { lens[i] = 1; disp[i] = i; } \
+ MPI_Type_indexed( (_count), lens, disp, _mpitype, types + cnt );\
+ free( lens ); free( disp ); \
+ MPI_Type_commit( types + cnt );\
+ inbufs[cnt] = (void *)calloc( (_count), sizeof(_ctype) ); \
+ outbufs[cnt] = (void *)malloc( sizeof(_ctype) * (_count) ); \
+ a = (_ctype *)inbufs[cnt]; for (i=0; i<(_count); i++) a[i] = i; \
+ a = (_ctype *)outbufs[cnt]; for (i=0; i<(_count); i++) a[i] = 0; \
+ myname = (char *)malloc(100);\
+ MPI_Type_get_name( _mpitype, _basename, &_basenamelen ); \
+ snprintf( myname, 100, "Index type %s", _basename ); \
+ MPI_Type_set_name( types[cnt], myname ); \
+ free( myname ); \
+ counts[cnt] = 1; bytesize[cnt] = sizeof(_ctype) * (_count); cnt++; }
+
+/* This defines a structure of two basic members; by chosing things like
+ (char, double), various packing and alignment tests can be made */
+#define SETUPSTRUCT2TYPE(_mpitype1,_ctype1,_mpitype2,_ctype2,_count,_tname) { \
+ int i; char *myname; \
+ MPI_Datatype b[3]; int cnts[3]; \
+ struct name { _ctype1 a1; _ctype2 a2; } *a, samp; \
+ MPI_Aint disp[3]; \
+ if (cnt > *n) {*n = cnt; return; } \
+ b[0] = _mpitype1; b[1] = _mpitype2; b[2] = MPI_UB; \
+ cnts[0] = 1; cnts[1] = 1; cnts[2] = 1; \
+ MPI_Get_address( &(samp.a2), &disp[1] ); \
+ MPI_Get_address( &(samp.a1), &disp[0] ); \
+ MPI_Get_address( &(samp) + 1, &disp[2] ); \
+ disp[1] = disp[1] - disp[0]; disp[2] = disp[2] - disp[0]; disp[0] = 0; \
+ MPI_Type_create_struct( 3, cnts, disp, b, types + cnt ); \
+ MPI_Type_commit( types + cnt ); \
+ inbufs[cnt] = (void *)calloc( sizeof(struct name) * (_count),1); \
+ outbufs[cnt] = (void *)calloc( sizeof(struct name) * (_count),1); \
+ a = (struct name *)inbufs[cnt]; for (i=0; i<(_count); i++) { a[i].a1 = i; \
+ a[i].a2 = i; } \
+ a = (struct name *)outbufs[cnt]; for (i=0; i<(_count); i++) { a[i].a1 = 0; \
+ a[i].a2 = 0; } \
+ myname = (char *)malloc(100); \
+ snprintf( myname, 100, "Struct type %s", _tname ); \
+ MPI_Type_set_name( types[cnt], myname ); \
+ free( myname ); \
+ counts[cnt] = (_count); bytesize[cnt] = sizeof(struct name) * (_count);cnt++; }
+
+/* This accomplished the same effect as VECTOR, but allow a count of > 1 */
+#define SETUPSTRUCTTYPEUB(_mpitype,_ctype,_count,_stride) { \
+ int i; _ctype *a; char *myname; \
+ int blens[2]; MPI_Aint disps[2]; MPI_Datatype mtypes[2]; \
+ char _basename[MPI_MAX_OBJECT_NAME]; int _basenamelen;\
+ if (cnt > *n) {*n = cnt; return; } \
+ blens[0] = 1; blens[1] = 1; disps[0] = 0; \
+ disps[1] = (_stride) * sizeof(_ctype); \
+ mtypes[0] = _mpitype; mtypes[1] = MPI_UB; \
+ MPI_Type_create_struct( 2, blens, disps, mtypes, types + cnt ); \
+ MPI_Type_commit( types + cnt ); \
+ inbufs[cnt] = (void *)calloc( sizeof(_ctype) * (_count) * (_stride),1);\
+ outbufs[cnt] = (void *)calloc( sizeof(_ctype) * (_count) * (_stride),1);\
+ a = (_ctype *)inbufs[cnt]; for (i=0; i<(_count); i++) a[i*(_stride)] = i; \
+ a = (_ctype *)outbufs[cnt]; for (i=0; i<(_count); i++) a[i*(_stride)] = 0; \
+ myname = (char *)malloc(100); \
+ MPI_Type_get_name( _mpitype, _basename, &_basenamelen ); \
+ snprintf( myname, 100, "Struct (MPI_UB) type %s", _basename ); \
+ MPI_Type_set_name( types[cnt], myname ); \
+ free( myname ); \
+ counts[cnt] = (_count); \
+ bytesize[cnt] = sizeof(_ctype) * (_count) * (_stride);\
+ cnt++; }
+
+/*
+ * Set whether only the basic types should be generated
+ */
+void MTestDatatype2BasicOnly( void )
+{
+ basic_only = 1;
+}
+
+static int nbasic_types = 0;
+/* On input, n is the size of the various buffers. On output,
+ it is the number available types
+ */
+void MTestDatatype2Generate( MPI_Datatype *types, void **inbufs, void **outbufs,
+ int *counts, int *bytesize, int *n )
+{
+ int cnt = 0; /* Number of defined types */
+ int typecnt = 10; /* Number of instances to send in most cases */
+ int stride = 9; /* Number of elements in vector to stride */
+
+ /* First, generate an element of each basic type */
+ SETUPBASICTYPE(MPI_CHAR,char,typecnt);
+ SETUPBASICTYPE(MPI_SHORT,short,typecnt);
+ SETUPBASICTYPE(MPI_INT,int,typecnt);
+ SETUPBASICTYPE(MPI_LONG,long,typecnt);
+ SETUPBASICTYPE(MPI_UNSIGNED_CHAR,unsigned char,typecnt);
+ SETUPBASICTYPE(MPI_UNSIGNED_SHORT,unsigned short,typecnt);
+ SETUPBASICTYPE(MPI_UNSIGNED,unsigned,typecnt);
+ SETUPBASICTYPE(MPI_UNSIGNED_LONG,unsigned long,typecnt);
+ SETUPBASICTYPE(MPI_FLOAT,float,typecnt);
+ SETUPBASICTYPE(MPI_DOUBLE,double,typecnt);
+ SETUPBASICTYPE(MPI_BYTE,char,typecnt);
+#ifdef HAVE_LONG_LONG_INT
+ SETUPBASICTYPE(MPI_LONG_LONG_INT,long long,typecnt);
+#endif
+#ifdef HAVE_LONG_DOUBLE
+ SETUPBASICTYPE(MPI_LONG_DOUBLE,long double,typecnt);
+#endif
+ nbasic_types = cnt;
+
+ if (basic_only) {
+ *n = cnt;
+ return;
+ }
+ /* Generate contiguous data items */
+ SETUPCONTIGTYPE(MPI_CHAR,char,typecnt);
+ SETUPCONTIGTYPE(MPI_SHORT,short,typecnt);
+ SETUPCONTIGTYPE(MPI_INT,int,typecnt);
+ SETUPCONTIGTYPE(MPI_LONG,long,typecnt);
+ SETUPCONTIGTYPE(MPI_UNSIGNED_CHAR,unsigned char,typecnt);
+ SETUPCONTIGTYPE(MPI_UNSIGNED_SHORT,unsigned short,typecnt);
+ SETUPCONTIGTYPE(MPI_UNSIGNED,unsigned,typecnt);
+ SETUPCONTIGTYPE(MPI_UNSIGNED_LONG,unsigned long,typecnt);
+ SETUPCONTIGTYPE(MPI_FLOAT,float,typecnt);
+ SETUPCONTIGTYPE(MPI_DOUBLE,double,typecnt);
+ SETUPCONTIGTYPE(MPI_BYTE,char,typecnt);
+#ifdef HAVE_LONG_LONG_INT
+ SETUPCONTIGTYPE(MPI_LONG_LONG_INT,long long,typecnt);
+#endif
+#ifdef HAVE_LONG_DOUBLE
+ SETUPCONTIGTYPE(MPI_LONG_DOUBLE,long double,typecnt);
+#endif
+
+ /* Generate vector items */
+ SETUPVECTORTYPE(MPI_CHAR,char,typecnt,stride,"MPI_CHAR");
+ SETUPVECTORTYPE(MPI_SHORT,short,typecnt,stride,"MPI_SHORT");
+ SETUPVECTORTYPE(MPI_INT,int,typecnt,stride,"MPI_INT");
+ SETUPVECTORTYPE(MPI_LONG,long,typecnt,stride,"MPI_LONG");
+ SETUPVECTORTYPE(MPI_UNSIGNED_CHAR,unsigned char,typecnt,stride,"MPI_UNSIGNED_CHAR");
+ SETUPVECTORTYPE(MPI_UNSIGNED_SHORT,unsigned short,typecnt,stride,"MPI_UNSIGNED_SHORT");
+ SETUPVECTORTYPE(MPI_UNSIGNED,unsigned,typecnt,stride,"MPI_UNSIGNED");
+ SETUPVECTORTYPE(MPI_UNSIGNED_LONG,unsigned long,typecnt,stride,"MPI_UNSIGNED_LONG");
+ SETUPVECTORTYPE(MPI_FLOAT,float,typecnt,stride,"MPI_FLOAT");
+ SETUPVECTORTYPE(MPI_DOUBLE,double,typecnt,stride,"MPI_DOUBLE");
+ SETUPVECTORTYPE(MPI_BYTE,char,typecnt,stride,"MPI_BYTE");
+#ifdef HAVE_LONG_LONG_INT
+ SETUPVECTORTYPE(MPI_LONG_LONG_INT,long long,typecnt,stride,"MPI_LONG_LONG_INT");
+#endif
+#ifdef HAVE_LONG_DOUBLE
+ SETUPVECTORTYPE(MPI_LONG_DOUBLE,long double,typecnt,stride,"MPI_LONG_DOUBLE");
+#endif
+
+ /* Generate indexed items */
+ SETUPINDEXTYPE(MPI_CHAR,char,typecnt,"MPI_CHAR");
+ SETUPINDEXTYPE(MPI_SHORT,short,typecnt,"MPI_SHORT");
+ SETUPINDEXTYPE(MPI_INT,int,typecnt,"MPI_INT");
+ SETUPINDEXTYPE(MPI_LONG,long,typecnt,"MPI_LONG");
+ SETUPINDEXTYPE(MPI_UNSIGNED_CHAR,unsigned char,typecnt,"MPI_UNSIGNED_CHAR");
+ SETUPINDEXTYPE(MPI_UNSIGNED_SHORT,unsigned short,typecnt,"MPI_UNSIGNED_SHORT");
+ SETUPINDEXTYPE(MPI_UNSIGNED,unsigned,typecnt,"MPI_UNSIGNED");
+ SETUPINDEXTYPE(MPI_UNSIGNED_LONG,unsigned long,typecnt,"MPI_UNSIGNED_LONG");
+ SETUPINDEXTYPE(MPI_FLOAT,float,typecnt,"MPI_FLOAT");
+ SETUPINDEXTYPE(MPI_DOUBLE,double,typecnt,"MPI_DOUBLE");
+ SETUPINDEXTYPE(MPI_BYTE,char,typecnt,"MPI_BYTE");
+#ifdef HAVE_LONG_LONG_INT
+ SETUPINDEXTYPE(MPI_LONG_LONG_INT,long long,typecnt,"MPI_LONG_LONG_INT");
+#endif
+#ifdef HAVE_LONG_DOUBLE
+ SETUPINDEXTYPE(MPI_LONG_DOUBLE,long double,typecnt,"MPI_LONG_DOUBLE");
+#endif
+
+ /* Generate struct items */
+ SETUPSTRUCT2TYPE(MPI_CHAR,char,MPI_DOUBLE,double,typecnt,"char-double");
+ SETUPSTRUCT2TYPE(MPI_DOUBLE,double,MPI_CHAR,char,typecnt,"double-char");
+ SETUPSTRUCT2TYPE(MPI_UNSIGNED,unsigned,MPI_DOUBLE,double,typecnt,"unsigned-double");
+ SETUPSTRUCT2TYPE(MPI_FLOAT,float,MPI_LONG,long,typecnt,"float-long");
+ SETUPSTRUCT2TYPE(MPI_UNSIGNED_CHAR,unsigned char,MPI_CHAR,char,typecnt,
+ "unsigned char-char");
+ SETUPSTRUCT2TYPE(MPI_UNSIGNED_SHORT,unsigned short,MPI_DOUBLE,double,
+ typecnt,"unsigned short-double");
+
+ /* Generate struct using MPI_UB */
+ SETUPSTRUCTTYPEUB(MPI_CHAR,char,typecnt,stride);
+ SETUPSTRUCTTYPEUB(MPI_SHORT,short,typecnt,stride);
+ SETUPSTRUCTTYPEUB(MPI_INT,int,typecnt,stride);
+ SETUPSTRUCTTYPEUB(MPI_LONG,long,typecnt,stride);
+ SETUPSTRUCTTYPEUB(MPI_UNSIGNED_CHAR,unsigned char,typecnt,stride);
+ SETUPSTRUCTTYPEUB(MPI_UNSIGNED_SHORT,unsigned short,typecnt,stride);
+ SETUPSTRUCTTYPEUB(MPI_UNSIGNED,unsigned,typecnt,stride);
+ SETUPSTRUCTTYPEUB(MPI_UNSIGNED_LONG,unsigned long,typecnt,stride);
+ SETUPSTRUCTTYPEUB(MPI_FLOAT,float,typecnt,stride);
+ SETUPSTRUCTTYPEUB(MPI_DOUBLE,double,typecnt,stride);
+ SETUPSTRUCTTYPEUB(MPI_BYTE,char,typecnt,stride);
+
+ /* 60 different entries to this point + 4 for long long and
+ 4 for long double */
+ *n = cnt;
+}
+
+/*
+ MAX_TEST should be 1 + actual max (allows us to check that it was,
+ indeed, large enough)
+ */
+#define MAX_TEST 70
+void MTestDatatype2Allocate( MPI_Datatype **types, void ***inbufs,
+ void ***outbufs,
+ int **counts, int **bytesize, int *n )
+{
+ *types = (MPI_Datatype *)malloc( MAX_TEST * sizeof(MPI_Datatype) );
+ *inbufs = (void **) malloc( MAX_TEST * sizeof(void *) );
+ *outbufs = (void **) malloc( MAX_TEST * sizeof(void *) );
+ *counts = (int *) malloc( MAX_TEST * sizeof(int) );
+ *bytesize = (int *) malloc( MAX_TEST * sizeof(int) );
+ *n = MAX_TEST;
+}
+
+int MTestDatatype2Check( void *inbuf, void *outbuf, int size_bytes )
+{
+ char *in = (char *)inbuf, *out = (char *)outbuf;
+ int i;
+ for (i=0; i<size_bytes; i++) {
+ if (in[i] != out[i]) {
+ return i + 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * This is a version of CheckData that prints error messages
+ */
+int MTestDatatype2CheckAndPrint( void *inbuf, void *outbuf, int size_bytes,
+ char *typename, int typenum )
+{
+ int errloc, world_rank;
+
+ if ((errloc = MTestDatatype2Check( inbuf, outbuf, size_bytes ))) {
+ char *p1, *p2;
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ fprintf( stderr,
+ "Error in data with type %s (type %d on %d) at byte %d of %d\n",
+ typename, typenum, world_rank, errloc - 1, size_bytes );
+ p1 = (char *)inbuf;
+ p2 = (char *)outbuf;
+ fprintf( stderr,
+ "Got %x expected %x\n", p2[errloc-1], p1[errloc-1] );
+ }
+ return errloc;
+}
+
+void MTestDatatype2Free( MPI_Datatype *types, void **inbufs, void **outbufs,
+ int *counts, int *bytesize, int n )
+{
+ int i;
+ for (i=0; i<n; i++) {
+ if (inbufs[i])
+ free( inbufs[i] );
+ if (outbufs[i])
+ free( outbufs[i] );
+ /* Only if not basic ... */
+ if (i >= nbasic_types)
+ MPI_Type_free( types + i );
+ }
+ free( inbufs );
+ free( outbufs );
+ free( counts );
+ free( bytesize );
+}