1 /* -*- Mode: C; c-basic-offset:4 ; -*- */
3 * (C) 2001 by Argonne National Laboratory.
4 * See COPYRIGHT in top-level directory.
7 /* One-Sided MPI 2-D Strided Accumulate Test
9 * Author: James Dinan <dinan@mcs.anl.gov>
10 * Date : December, 2010
12 * This code performs N strided put operations followed by get operations into
13 * a 2d patch of a shared array. The array has dimensions [X, Y] and the
14 * subarray has dimensions [SUB_X, SUB_Y] and begins at index [0, 0]. The
15 * input and output buffers are specified using an MPI indexed type.
31 int main(int argc, char **argv)
33 int i, j, rank, nranks, peer, bufsize, errors;
34 double *win_buf, *src_buf, *dst_buf;
37 MTest_Init(&argc, &argv);
39 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
40 MPI_Comm_size(MPI_COMM_WORLD, &nranks);
42 bufsize = XDIM * YDIM * sizeof(double);
43 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &win_buf);
44 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);
45 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &dst_buf);
47 for (i = 0; i < XDIM * YDIM; i++) {
48 *(win_buf + i) = -1.0;
49 *(src_buf + i) = 1.0 + rank;
52 MPI_Win_create(win_buf, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);
54 peer = (rank + 1) % nranks;
56 /* Perform ITERATIONS strided accumulate operations */
58 for (i = 0; i < ITERATIONS; i++) {
59 int idx_rem[SUB_YDIM];
60 int blk_len[SUB_YDIM];
61 MPI_Datatype src_type, dst_type;
63 for (j = 0; j < SUB_YDIM; j++) {
64 idx_rem[j] = j * XDIM;
65 blk_len[j] = SUB_XDIM;
68 MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
69 MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
71 MPI_Type_commit(&src_type);
72 MPI_Type_commit(&dst_type);
75 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
76 MPI_Get_accumulate(src_buf, 1, src_type, dst_buf, 1, src_type, peer, 0,
77 1, dst_type, MPI_REPLACE, buf_win);
78 MPI_Win_unlock(peer, buf_win);
81 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
82 MPI_Get_accumulate(src_buf, 1, src_type, dst_buf, 1, src_type, peer, 0,
83 1, dst_type, MPI_NO_OP, buf_win);
84 MPI_Win_unlock(peer, buf_win);
86 MPI_Type_free(&src_type);
87 MPI_Type_free(&dst_type);
90 MPI_Barrier(MPI_COMM_WORLD);
92 /* Verify that the results are correct */
94 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
96 for (i = 0; i < SUB_XDIM; i++) {
97 for (j = 0; j < SUB_YDIM; j++) {
98 const double actual = *(win_buf + i + j * XDIM);
99 const double expected = (1.0 + ((rank + nranks - 1) % nranks));
100 if (fabs(actual - expected) > 1.0e-10) {
101 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
102 rank, j, i, expected, actual););
108 for (i = SUB_XDIM; i < XDIM; i++) {
109 for (j = 0; j < SUB_YDIM; j++) {
110 const double actual = *(win_buf + i + j * XDIM);
111 const double expected = -1.0;
112 if (fabs(actual - expected) > 1.0e-10) {
113 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
114 rank, j, i, expected, actual););
120 for (i = 0; i < XDIM; i++) {
121 for (j = SUB_YDIM; j < YDIM; j++) {
122 const double actual = *(win_buf + i + j * XDIM);
123 const double expected = -1.0;
124 if (fabs(actual - expected) > 1.0e-10) {
125 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
126 rank, j, i, expected, actual););
132 MPI_Win_unlock(rank, buf_win);
134 MPI_Win_free(&buf_win);
135 MPI_Free_mem(win_buf);
136 MPI_Free_mem(src_buf);
137 MPI_Free_mem(dst_buf);
139 MTest_Finalize(errors);
141 return MTestReturnValue(errors);