#include <stdio.h>
#include <string.h>
#include <assert.h>
-/* USE_STRICT_MPI may be defined in mpitestconf.h */
-#include "mpitestconf.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
/* we sleep with probability 1/CHANCE_OF_SLEEP */
#define CHANCE_OF_SLEEP (1000)
/* JITTER_DELAY is denominated in microseconds (us) */
-#define JITTER_DELAY (50000) /* 0.05 seconds */
+#define JITTER_DELAY (50000) /* 0.05 seconds */
/* NUM_COMMS is the number of communicators on which ops will be posted */
#define NUM_COMMS (4)
} \
} while (0)
-/* Since MPICH is currently the only NBC implementation in existence, just use
- * this quick-and-dirty #ifdef to decide whether to test the nonblocking
- * collectives. Eventually we can add a configure option or configure test, or
- * the MPI-3 standard will be released and these can be gated on a MPI_VERSION
- * check */
-#if !defined(USE_STRICT_MPI) && defined(MPICH)
-#define TEST_NBC_ROUTINES 1
-#endif
-
-#if defined(TEST_NBC_ROUTINES)
/* Intended to act like "rand_r", but we can be sure that it will exist and be
* consistent across all of comm world. Returns a number in the range
* [0,GEN_PRN_MAX] */
* a=279470273, good primitive root of m from "TABLES OF LINEAR
* CONGRUENTIAL GENERATORS OF DIFFERENT SIZES AND GOOD
* LATTICE STRUCTURE", by Pierre L’Ecuyer */
- return (279470273UL * (unsigned long)x) % 4294967291UL;
+ return (279470273UL * (unsigned long) x) % 4294967291UL;
}
/* given a random unsigned int value "rndval_" from gen_prn, this evaluates to a
((unsigned int)((min_) + ((rndval_) * (1.0 / (GEN_PRN_MAX+1.0)) * ((max_) - (min_)))))
-static void sum_fn(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+static void sum_fn(void *invec, void *inoutvec, int *len, MPI_Datatype * datatype)
{
int i;
int *in = invec;
/* used to keep track of buffers that should be freed after the corresponding
* operation has completed */
struct laundry {
- int case_num; /* which test case initiated this req/laundry */
+ int case_num; /* which test case initiated this req/laundry */
MPI_Comm comm;
int *buf;
int *recvbuf;
{
l->case_num = -1;
l->comm = MPI_COMM_NULL;
- if (l->buf) free(l->buf);
- if (l->recvbuf) free(l->recvbuf);
- if (l->sendcounts) free(l->sendcounts);
- if (l->recvcounts) free(l->recvcounts);
- if (l->sdispls) free(l->sdispls);
- if (l->rdispls) free(l->rdispls);
- if (l->sendtypes) free(l->sendtypes);
- if (l->recvtypes) free(l->recvtypes);
+ if (l->buf)
+ free(l->buf);
+ if (l->recvbuf)
+ free(l->recvbuf);
+ if (l->sendcounts)
+ free(l->sendcounts);
+ if (l->recvcounts)
+ free(l->recvcounts);
+ if (l->sdispls)
+ free(l->sdispls);
+ if (l->rdispls)
+ free(l->rdispls);
+ if (l->sendtypes)
+ free(l->sendtypes);
+ if (l->recvtypes)
+ free(l->recvtypes);
}
/* Starts a "random" operation on "comm" corresponding to "rndnum" and returns
* in (*req) a request handle corresonding to that operation. This call should
* be considered collective over comm (with a consistent value for "rndnum"),
* even though the operation may only be a point-to-point request. */
-static void start_random_nonblocking(MPI_Comm comm, unsigned int rndnum, MPI_Request *req, struct laundry *l)
+static void start_random_nonblocking(MPI_Comm comm, unsigned int rndnum, MPI_Request * req,
+ struct laundry *l)
{
int i, j;
int rank, size;
int *rdispls = NULL;
int *sendtypes = NULL;
int *recvtypes = NULL;
- char *buf_alias = NULL;
+ signed char *buf_alias = NULL;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
l->case_num = -1;
l->comm = comm;
- l->buf = buf = malloc(COUNT*size*sizeof(int));
- l->recvbuf = recvbuf = malloc(COUNT*size*sizeof(int));
- l->sendcounts = sendcounts = malloc(size*sizeof(int));
- l->recvcounts = recvcounts = malloc(size*sizeof(int));
- l->sdispls = sdispls = malloc(size*sizeof(int));
- l->rdispls = rdispls = malloc(size*sizeof(int));
- l->sendtypes = sendtypes = malloc(size*sizeof(MPI_Datatype));
- l->recvtypes = recvtypes = malloc(size*sizeof(MPI_Datatype));
+ l->buf = buf = malloc(COUNT * size * sizeof(int));
+ l->recvbuf = recvbuf = malloc(COUNT * size * sizeof(int));
+ l->sendcounts = sendcounts = malloc(size * sizeof(int));
+ l->recvcounts = recvcounts = malloc(size * sizeof(int));
+ l->sdispls = sdispls = malloc(size * sizeof(int));
+ l->rdispls = rdispls = malloc(size * sizeof(int));
+ l->sendtypes = sendtypes = malloc(size * sizeof(MPI_Datatype));
+ l->recvtypes = recvtypes = malloc(size * sizeof(MPI_Datatype));
#define NUM_CASES (21)
l->case_num = rand_range(rndnum, 0, NUM_CASES);
switch (l->case_num) {
- case 0: /* MPI_Ibcast */
- for (i = 0; i < COUNT; ++i) {
- if (rank == 0) {
- buf[i] = i;
- }
- else {
- buf[i] = 0xdeadbeef;
- }
- }
- MPI_Ibcast(buf, COUNT, MPI_INT, 0, comm, req);
- break;
-
- case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
- /* FIXME fiddle with PRIME and buffer allocation s.t. PRIME is much larger (1021?) */
- buf_alias = (char *)buf;
- my_assert(COUNT*size*sizeof(int) > PRIME); /* sanity */
- for (i = 0; i < PRIME; ++i) {
- if (rank == 0)
- buf_alias[i] = i;
- else
- buf_alias[i] = 0xdb;
+ case 0: /* MPI_Ibcast */
+ for (i = 0; i < COUNT; ++i) {
+ if (rank == 0) {
+ buf[i] = i;
}
- for (i = PRIME; i < COUNT * size * sizeof(int); ++i) {
- buf_alias[i] = 0xbf;
+ else {
+ buf[i] = 0xdeadbeef;
}
- MPI_Ibcast(buf, PRIME, MPI_SIGNED_CHAR, 0, comm, req);
- break;
-
- case 2: /* MPI_Ibarrier */
- MPI_Ibarrier(comm, req);
- break;
+ }
+ MPI_Ibcast(buf, COUNT, MPI_INT, 0, comm, req);
+ break;
+
+ case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
+ /* FIXME fiddle with PRIME and buffer allocation s.t. PRIME is much larger (1021?) */
+ buf_alias = (signed char *) buf;
+ my_assert(COUNT * size * sizeof(int) > PRIME); /* sanity */
+ for (i = 0; i < PRIME; ++i) {
+ if (rank == 0)
+ buf_alias[i] = i;
+ else
+ buf_alias[i] = 0xdb;
+ }
+ for (i = PRIME; i < COUNT * size * sizeof(int); ++i) {
+ buf_alias[i] = 0xbf;
+ }
+ MPI_Ibcast(buf_alias, PRIME, MPI_SIGNED_CHAR, 0, comm, req);
+ break;
- case 3: /* MPI_Ireduce */
- for (i = 0; i < COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
- }
- MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, 0, comm, req);
- break;
+ case 2: /* MPI_Ibarrier */
+ MPI_Ibarrier(comm, req);
+ break;
- case 4: /* same again, use a user op and free it before the wait */
- {
- MPI_Op op = MPI_OP_NULL;
- MPI_Op_create(sum_fn, /*commute=*/1, &op);
- for (i = 0; i < COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
- }
- MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, op, 0, comm, req);
- MPI_Op_free(&op);
- }
- break;
+ case 3: /* MPI_Ireduce */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, 0, comm, req);
+ break;
- case 5: /* MPI_Iallreduce */
+ case 4: /* same again, use a user op and free it before the wait */
+ {
+ MPI_Op op = MPI_OP_NULL;
+ MPI_Op_create(sum_fn, /*commute= */ 1, &op);
for (i = 0; i < COUNT; ++i) {
buf[i] = rank + i;
recvbuf[i] = 0xdeadbeef;
}
- MPI_Iallreduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
- break;
+ MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, op, 0, comm, req);
+ MPI_Op_free(&op);
+ }
+ break;
- case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
- for (i = 0; i < size; ++i) {
- sendcounts[i] = COUNT;
- recvcounts[i] = COUNT;
- sdispls[i] = COUNT * i;
- rdispls[i] = COUNT * i;
- for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + (i * j);
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
+ case 5: /* MPI_Iallreduce */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iallreduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
+
+ case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ sendcounts[i] = COUNT;
+ recvcounts[i] = COUNT;
+ sdispls[i] = COUNT * i;
+ rdispls[i] = COUNT * i;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i * COUNT + j] = rank + (i * j);
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Ialltoallv(buf, sendcounts, sdispls, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
- break;
-
- case 7: /* MPI_Igather */
- for (i = 0; i < size*COUNT; ++i) {
+ }
+ MPI_Ialltoallv(buf, sendcounts, sdispls, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT,
+ comm, req);
+ break;
+
+ case 7: /* MPI_Igather */
+ for (i = 0; i < size * COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
+ break;
+
+ case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
+ {
+ MPI_Datatype type = MPI_DATATYPE_NULL;
+ MPI_Type_dup(MPI_INT, &type);
+ for (i = 0; i < size * COUNT; ++i) {
buf[i] = rank + i;
recvbuf[i] = 0xdeadbeef;
}
- MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
- break;
-
- case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
- {
- MPI_Datatype type = MPI_DATATYPE_NULL;
- MPI_Type_dup(MPI_INT, &type);
- for (i = 0; i < size*COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
- }
- MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, type, 0, comm, req);
- MPI_Type_free(&type); /* should cause implementations that don't refcount
- correctly to blow up or hang in the wait */
- }
- break;
-
- case 9: /* MPI_Iscatter */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- if (rank == 0)
- buf[i*COUNT+j] = i + j;
- else
- buf[i*COUNT+j] = 0xdeadbeef;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
- }
- MPI_Iscatter(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
- break;
-
- case 10: /* MPI_Iscatterv */
- for (i = 0; i < size; ++i) {
- /* weak test, just test the regular case where all counts are equal */
- sendcounts[i] = COUNT;
- sdispls[i] = i * COUNT;
- for (j = 0; j < COUNT; ++j) {
- if (rank == 0)
- buf[i*COUNT+j] = i + j;
- else
- buf[i*COUNT+j] = 0xdeadbeef;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
- }
- MPI_Iscatterv(buf, sendcounts, sdispls, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
- break;
+ MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, type, 0, comm, req);
+ MPI_Type_free(&type); /* should cause implementations that don't refcount
+ * correctly to blow up or hang in the wait */
+ }
+ break;
- case 11: /* MPI_Ireduce_scatter */
- for (i = 0; i < size; ++i) {
- recvcounts[i] = COUNT;
- for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + i;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
+ case 9: /* MPI_Iscatter */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ if (rank == 0)
+ buf[i * COUNT + j] = i + j;
+ else
+ buf[i * COUNT + j] = 0xdeadbeef;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Ireduce_scatter(buf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm, req);
- break;
-
- case 12: /* MPI_Ireduce_scatter_block */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + i;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
+ }
+ MPI_Iscatter(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
+ break;
+
+ case 10: /* MPI_Iscatterv */
+ for (i = 0; i < size; ++i) {
+ /* weak test, just test the regular case where all counts are equal */
+ sendcounts[i] = COUNT;
+ sdispls[i] = i * COUNT;
+ for (j = 0; j < COUNT; ++j) {
+ if (rank == 0)
+ buf[i * COUNT + j] = i + j;
+ else
+ buf[i * COUNT + j] = 0xdeadbeef;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Ireduce_scatter_block(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
- break;
+ }
+ MPI_Iscatterv(buf, sendcounts, sdispls, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
+ break;
- case 13: /* MPI_Igatherv */
- for (i = 0; i < size*COUNT; ++i) {
- buf[i] = 0xdeadbeef;
- recvbuf[i] = 0xdeadbeef;
- }
- for (i = 0; i < COUNT; ++i) {
- buf[i] = rank + i;
- }
- for (i = 0; i < size; ++i) {
- recvcounts[i] = COUNT;
- rdispls[i] = i * COUNT;
+ case 11: /* MPI_Ireduce_scatter */
+ for (i = 0; i < size; ++i) {
+ recvcounts[i] = COUNT;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i * COUNT + j] = rank + i;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Igatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, 0, comm, req);
- break;
+ }
+ MPI_Ireduce_scatter(buf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm, req);
+ break;
- case 14: /* MPI_Ialltoall */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + (i * j);
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
+ case 12: /* MPI_Ireduce_scatter_block */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[i * COUNT + j] = rank + i;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Ialltoall(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
- break;
+ }
+ MPI_Ireduce_scatter_block(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
- case 15: /* MPI_Iallgather */
- for (i = 0; i < size*COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
- }
- MPI_Iallgather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
- break;
+ case 13: /* MPI_Igatherv */
+ for (i = 0; i < size * COUNT; ++i) {
+ buf[i] = 0xdeadbeef;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ }
+ for (i = 0; i < size; ++i) {
+ recvcounts[i] = COUNT;
+ rdispls[i] = i * COUNT;
+ }
+ MPI_Igatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, 0, comm, req);
+ break;
- case 16: /* MPI_Iallgatherv */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
- recvcounts[i] = COUNT;
- rdispls[i] = i * COUNT;
+ case 14: /* MPI_Ialltoall */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[i * COUNT + j] = rank + (i * j);
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- for (i = 0; i < COUNT; ++i)
- buf[i] = rank + i;
- MPI_Iallgatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
- break;
+ }
+ MPI_Ialltoall(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
+ break;
- case 17: /* MPI_Iscan */
- for (i = 0; i < COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
- }
- MPI_Iscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
- break;
+ case 15: /* MPI_Iallgather */
+ for (i = 0; i < size * COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iallgather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
+ break;
- case 18: /* MPI_Iexscan */
- for (i = 0; i < COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
+ case 16: /* MPI_Iallgatherv */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Iexscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
- break;
+ recvcounts[i] = COUNT;
+ rdispls[i] = i * COUNT;
+ }
+ for (i = 0; i < COUNT; ++i)
+ buf[i] = rank + i;
+ MPI_Iallgatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
+ break;
+
+ case 17: /* MPI_Iscan */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
- case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
- for (i = 0; i < size; ++i) {
- sendcounts[i] = COUNT;
- recvcounts[i] = COUNT;
- sdispls[i] = COUNT * i * sizeof(int);
- rdispls[i] = COUNT * i * sizeof(int);
- sendtypes[i] = MPI_INT;
- recvtypes[i] = MPI_INT;
- for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + (i * j);
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
+ case 18: /* MPI_Iexscan */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iexscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
+
+ case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ sendcounts[i] = COUNT;
+ recvcounts[i] = COUNT;
+ sdispls[i] = COUNT * i * sizeof(int);
+ rdispls[i] = COUNT * i * sizeof(int);
+ sendtypes[i] = MPI_INT;
+ recvtypes[i] = MPI_INT;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i * COUNT + j] = rank + (i * j);
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Ialltoallw(buf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, req);
- break;
+ }
+ MPI_Ialltoallw(buf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes,
+ comm, req);
+ break;
- case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
- /* even ranks send to odd ranks, but only if we have a full pair */
- if ((rank % 2 != 0) || (rank != size-1)) {
- for (j = 0; j < COUNT; ++j) {
- buf[j] = j;
- recvbuf[j] = 0xdeadbeef;
- }
- if (rank % 2 == 0)
- MPI_Isend(buf, COUNT, MPI_INT, rank+1, 5, comm, req);
- else
- MPI_Irecv(recvbuf, COUNT, MPI_INT, rank-1, 5, comm, req);
+ case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
+ /* even ranks send to odd ranks, but only if we have a full pair */
+ if ((rank % 2 != 0) || (rank != size - 1)) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[j] = j;
+ recvbuf[j] = 0xdeadbeef;
}
- break;
+ if (rank % 2 == 0)
+ MPI_Isend(buf, COUNT, MPI_INT, rank + 1, 5, comm, req);
+ else
+ MPI_Irecv(recvbuf, COUNT, MPI_INT, rank - 1, 5, comm, req);
+ }
+ break;
- default:
- fprintf(stderr, "unexpected value for l->case_num=%d)\n", (l->case_num));
- MPI_Abort(comm, 1);
- exit(1);
- break;
+ default:
+ fprintf(stderr, "unexpected value for l->case_num=%d)\n", (l->case_num));
+ MPI_Abort(comm, 1);
+ break;
}
}
{
int i, j;
int rank, size;
- MPI_Comm comm = l->comm;
- int *buf = l->buf;
- int *recvbuf = l->recvbuf;
+ MPI_Comm comm = l->comm;
+ int *buf = l->buf;
+ int *recvbuf = l->recvbuf;
int *sendcounts = l->sendcounts;
int *recvcounts = l->recvcounts;
- int *sdispls = l->sdispls;
- int *rdispls = l->rdispls;
- int *sendtypes = l->sendtypes;
- int *recvtypes = l->recvtypes;
- char *buf_alias = (char *)buf;
+ int *sdispls = l->sdispls;
+ int *rdispls = l->rdispls;
+ int *sendtypes = l->sendtypes;
+ int *recvtypes = l->recvtypes;
+ char *buf_alias = (char *) buf;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
/* these cases all correspond to cases in start_random_nonblocking */
switch (l->case_num) {
- case 0: /* MPI_Ibcast */
- for (i = 0; i < COUNT; ++i) {
- if (buf[i] != i)
- printf("buf[%d]=%d i=%d\n", i, buf[i], i);
- my_assert(buf[i] == i);
- }
- break;
+ case 0: /* MPI_Ibcast */
+ for (i = 0; i < COUNT; ++i) {
+ if (buf[i] != i)
+ printf("buf[%d]=%d i=%d\n", i, buf[i], i);
+ my_assert(buf[i] == i);
+ }
+ break;
- case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
- for (i = 0; i < PRIME; ++i) {
- if (buf_alias[i] != i)
- printf("buf_alias[%d]=%d i=%d\n", i, buf_alias[i], i);
- my_assert(buf_alias[i] == i);
- }
- break;
+ case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
+ for (i = 0; i < PRIME; ++i) {
+ if (buf_alias[i] != i)
+ printf("buf_alias[%d]=%d i=%d\n", i, buf_alias[i], i);
+ my_assert(buf_alias[i] == i);
+ }
+ break;
- case 2: /* MPI_Ibarrier */
- /* nothing to check */
- break;
+ case 2: /* MPI_Ibarrier */
+ /* nothing to check */
+ break;
- case 3: /* MPI_Ireduce */
- if (rank == 0) {
- for (i = 0; i < COUNT; ++i) {
- if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
- printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
- my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
- }
+ case 3: /* MPI_Ireduce */
+ if (rank == 0) {
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size - 1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i],
+ ((size * (size - 1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size - 1) / 2) + (i * size)));
}
- break;
+ }
+ break;
- case 4: /* same again, use a user op and free it before the wait */
- if (rank == 0) {
- for (i = 0; i < COUNT; ++i) {
- if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
- printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
- my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
- }
+ case 4: /* same again, use a user op and free it before the wait */
+ if (rank == 0) {
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size - 1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i],
+ ((size * (size - 1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size - 1) / 2) + (i * size)));
}
- break;
+ }
+ break;
+
+ case 5: /* MPI_Iallreduce */
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size - 1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i],
+ ((size * (size - 1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size - 1) / 2) + (i * size)));
+ }
+ break;
- case 5: /* MPI_Iallreduce */
- for (i = 0; i < COUNT; ++i) {
- if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
- printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
- my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j))); */
+ my_assert(recvbuf[i * COUNT + j] == (i + (rank * j)));
}
- break;
+ }
+ break;
- case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
+ case 7: /* MPI_Igather */
+ if (rank == 0) {
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j)));*/
- my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
- }
- }
- break;
-
- case 7: /* MPI_Igather */
- if (rank == 0) {
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
- }
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
- else {
- for (i = 0; i < size*COUNT; ++i) {
- my_assert(recvbuf[i] == 0xdeadbeef);
- }
+ }
+ else {
+ for (i = 0; i < size * COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
- if (rank == 0) {
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
- }
+ case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
+ if (rank == 0) {
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
- else {
- for (i = 0; i < size*COUNT; ++i) {
- my_assert(recvbuf[i] == 0xdeadbeef);
- }
+ }
+ else {
+ for (i = 0; i < size * COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 9: /* MPI_Iscatter */
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[j] == rank + j);
- }
- if (rank != 0) {
- for (i = 0; i < size*COUNT; ++i) {
- /* check we didn't corrupt the sendbuf somehow */
- my_assert(buf[i] == 0xdeadbeef);
- }
+ case 9: /* MPI_Iscatter */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == rank + j);
+ }
+ if (rank != 0) {
+ for (i = 0; i < size * COUNT; ++i) {
+ /* check we didn't corrupt the sendbuf somehow */
+ my_assert(buf[i] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 10: /* MPI_Iscatterv */
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[j] == rank + j);
- }
- if (rank != 0) {
- for (i = 0; i < size*COUNT; ++i) {
- /* check we didn't corrupt the sendbuf somehow */
- my_assert(buf[i] == 0xdeadbeef);
- }
- }
- for (i = 1; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- /* check we didn't corrupt the rest of the recvbuf */
- my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
- }
+ case 10: /* MPI_Iscatterv */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == rank + j);
+ }
+ if (rank != 0) {
+ for (i = 0; i < size * COUNT; ++i) {
+ /* check we didn't corrupt the sendbuf somehow */
+ my_assert(buf[i] == 0xdeadbeef);
}
- break;
-
- case 11: /* MPI_Ireduce_scatter */
+ }
+ for (i = 1; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
- }
- for (i = 1; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- /* check we didn't corrupt the rest of the recvbuf */
- my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
- }
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i * COUNT + j] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 12: /* MPI_Ireduce_scatter_block */
+ case 11: /* MPI_Ireduce_scatter */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
+ }
+ for (i = 1; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
- }
- for (i = 1; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- /* check we didn't corrupt the rest of the recvbuf */
- my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
- }
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i * COUNT + j] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 13: /* MPI_Igatherv */
- if (rank == 0) {
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
- }
- }
- }
- else {
- for (i = 0; i < size*COUNT; ++i) {
- my_assert(recvbuf[i] == 0xdeadbeef);
- }
+ case 12: /* MPI_Ireduce_scatter_block */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
+ }
+ for (i = 1; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i * COUNT + j] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 14: /* MPI_Ialltoall */
+ case 13: /* MPI_Igatherv */
+ if (rank == 0) {
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (i * j)));*/
- my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
- break;
-
- case 15: /* MPI_Iallgather */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
- }
+ }
+ else {
+ for (i = 0; i < size * COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 16: /* MPI_Iallgatherv */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
- }
+ case 14: /* MPI_Ialltoall */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (i * j))); */
+ my_assert(recvbuf[i * COUNT + j] == (i + (rank * j)));
}
- break;
+ }
+ break;
- case 17: /* MPI_Iscan */
- for (i = 0; i < COUNT; ++i) {
- my_assert(recvbuf[i] == ((rank * (rank+1) / 2) + (i * (rank + 1))));
+ case 15: /* MPI_Iallgather */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
- break;
+ }
+ break;
- case 18: /* MPI_Iexscan */
- for (i = 0; i < COUNT; ++i) {
- if (rank == 0)
- my_assert(recvbuf[i] == 0xdeadbeef);
- else
- my_assert(recvbuf[i] == ((rank * (rank+1) / 2) + (i * (rank + 1)) - (rank + i)));
+ case 16: /* MPI_Iallgatherv */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
- break;
+ }
+ break;
- case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j)));*/
- my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
- }
+ case 17: /* MPI_Iscan */
+ for (i = 0; i < COUNT; ++i) {
+ my_assert(recvbuf[i] == ((rank * (rank + 1) / 2) + (i * (rank + 1))));
+ }
+ break;
+
+ case 18: /* MPI_Iexscan */
+ for (i = 0; i < COUNT; ++i) {
+ if (rank == 0)
+ my_assert(recvbuf[i] == 0xdeadbeef);
+ else
+ my_assert(recvbuf[i] == ((rank * (rank + 1) / 2) + (i * (rank + 1)) - (rank + i)));
+ }
+ break;
+
+ case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j))); */
+ my_assert(recvbuf[i * COUNT + j] == (i + (rank * j)));
}
- break;
+ }
+ break;
- case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
- /* even ranks send to odd ranks, but only if we have a full pair */
- if ((rank % 2 != 0) || (rank != size-1)) {
- for (j = 0; j < COUNT; ++j) {
- /* only odd procs did a recv */
- if (rank % 2 == 0) {
- my_assert(recvbuf[j] == 0xdeadbeef);
- }
- else {
- if (recvbuf[j] != j) printf("recvbuf[%d]=%d j=%d\n", j, recvbuf[j], j);
- my_assert(recvbuf[j] == j);
- }
+ case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
+ /* even ranks send to odd ranks, but only if we have a full pair */
+ if ((rank % 2 != 0) || (rank != size - 1)) {
+ for (j = 0; j < COUNT; ++j) {
+ /* only odd procs did a recv */
+ if (rank % 2 == 0) {
+ my_assert(recvbuf[j] == 0xdeadbeef);
+ }
+ else {
+ if (recvbuf[j] != j)
+ printf("recvbuf[%d]=%d j=%d\n", j, recvbuf[j], j);
+ my_assert(recvbuf[j] == j);
}
}
- break;
+ }
+ break;
- default:
- printf("invalid case_num (%d) detected\n", l->case_num);
- assert(0);
- break;
+ default:
+ printf("invalid case_num (%d) detected\n", l->case_num);
+ assert(0);
+ break;
}
}
+
#undef NUM_CASES
-static void complete_something_somehow(unsigned int rndnum, int numreqs, MPI_Request reqs[], int *outcount, int indices[])
+static void complete_something_somehow(unsigned int rndnum, int numreqs, MPI_Request reqs[],
+ int *outcount, int indices[])
{
int i, idx, flag;
#define COMPLETION_CASES (8)
switch (rand_range(rndnum, 0, COMPLETION_CASES)) {
- case 0:
- MPI_Waitall(numreqs, reqs, MPI_STATUSES_IGNORE);
- *outcount = numreqs;
- for (i = 0; i < numreqs; ++i) {
- indices[i] = i;
- }
- break;
-
- case 1:
- MPI_Testsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
- if (*outcount == MPI_UNDEFINED) {
- *outcount = 0;
- }
- break;
-
- case 2:
- MPI_Waitsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
- if (*outcount == MPI_UNDEFINED) {
- *outcount = 0;
- }
- break;
-
- case 3:
- MPI_Waitany(numreqs, reqs, &idx, MPI_STATUS_IGNORE);
- if (idx == MPI_UNDEFINED) {
- *outcount = 0;
- }
- else {
- *outcount = 1;
- indices[0] = idx;
- }
- break;
+ case 0:
+ MPI_Waitall(numreqs, reqs, MPI_STATUSES_IGNORE);
+ *outcount = numreqs;
+ for (i = 0; i < numreqs; ++i) {
+ indices[i] = i;
+ }
+ break;
- case 4:
- MPI_Testany(numreqs, reqs, &idx, &flag, MPI_STATUS_IGNORE);
- if (idx == MPI_UNDEFINED) {
- *outcount = 0;
- }
- else {
- *outcount = 1;
- indices[0] = idx;
- }
- break;
+ case 1:
+ MPI_Testsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
+ if (*outcount == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ break;
- case 5:
- MPI_Testall(numreqs, reqs, &flag, MPI_STATUSES_IGNORE);
- if (flag) {
- *outcount = numreqs;
- for (i = 0; i < numreqs; ++i) {
- indices[i] = i;
- }
- }
- else {
- *outcount = 0;
- }
- break;
+ case 2:
+ MPI_Waitsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
+ if (*outcount == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ break;
- case 6:
- /* select a new random index and wait on it */
- rndnum = gen_prn(rndnum);
- idx = rand_range(rndnum, 0, numreqs);
- MPI_Wait(&reqs[idx], MPI_STATUS_IGNORE);
+ case 3:
+ MPI_Waitany(numreqs, reqs, &idx, MPI_STATUS_IGNORE);
+ if (idx == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ else {
*outcount = 1;
indices[0] = idx;
- break;
-
- case 7:
- /* select a new random index and wait on it */
- rndnum = gen_prn(rndnum);
- idx = rand_range(rndnum, 0, numreqs);
- MPI_Test(&reqs[idx], &flag, MPI_STATUS_IGNORE);
- *outcount = (flag ? 1 : 0);
+ }
+ break;
+
+ case 4:
+ MPI_Testany(numreqs, reqs, &idx, &flag, MPI_STATUS_IGNORE);
+ if (idx == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ else {
+ *outcount = 1;
indices[0] = idx;
- break;
+ }
+ break;
- default:
- assert(0);
- break;
+ case 5:
+ MPI_Testall(numreqs, reqs, &flag, MPI_STATUSES_IGNORE);
+ if (flag) {
+ *outcount = numreqs;
+ for (i = 0; i < numreqs; ++i) {
+ indices[i] = i;
+ }
+ }
+ else {
+ *outcount = 0;
+ }
+ break;
+
+ case 6:
+ /* select a new random index and wait on it */
+ rndnum = gen_prn(rndnum);
+ idx = rand_range(rndnum, 0, numreqs);
+ MPI_Wait(&reqs[idx], MPI_STATUS_IGNORE);
+ *outcount = 1;
+ indices[0] = idx;
+ break;
+
+ case 7:
+ /* select a new random index and wait on it */
+ rndnum = gen_prn(rndnum);
+ idx = rand_range(rndnum, 0, numreqs);
+ MPI_Test(&reqs[idx], &flag, MPI_STATUS_IGNORE);
+ *outcount = (flag ? 1 : 0);
+ indices[0] = idx;
+ break;
+
+ default:
+ assert(0);
+ break;
}
#undef COMPLETION_CASES
}
-#endif /* defined(TEST_NBC_ROUTINES) */
int main(int argc, char **argv)
{
- int wrank, wsize;
-#if defined(TEST_NBC_ROUTINES)
int i, num_posted, num_completed;
+ int wrank, wsize;
unsigned int seed = 0x10bc;
unsigned int post_seq, complete_seq;
struct laundry larr[WINDOW];
int indices[WINDOW];
MPI_Comm comms[NUM_COMMS];
MPI_Comm comm;
-#endif
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
MPI_Comm_size(MPI_COMM_WORLD, &wsize);
-#if defined(TEST_NBC_ROUTINES)
-
/* it is critical that all processes in the communicator start with a
* consistent value for "post_seq" */
post_seq = complete_seq = gen_prn(seed);
/* "randomly" and infrequently introduce some jitter into the system */
if (0 == rand_range(gen_prn(complete_seq + wrank), 0, CHANCE_OF_SLEEP)) {
- usleep(JITTER_DELAY); /* take a short nap */
+ usleep(JITTER_DELAY); /* take a short nap */
}
}
MPI_Comm_free(&comms[i]);
}
-#endif /* defined(TEST_NBC_ROUTINES) */
-
if (wrank == 0) {
if (errs)
printf("found %d errors\n", errs);
return 0;
}
-