1 /* -*- Mode: C; c-basic-offset:4 ; -*- */
3 * (C) 2011 by Argonne National Laboratory.
4 * See COPYRIGHT in top-level directory.
7 /* One-Sided MPI 2-D Strided Accumulate Test
9 * Author: James Dinan <dinan@mcs.anl.gov>
10 * Date : November, 2012
12 * This code performs N strided put operations followed by get operations into
13 * a 2d patch of a shared array. The array has dimensions [X, Y] and the
14 * subarray has dimensions [SUB_X, SUB_Y] and begins at index [0, 0]. The
15 * input and output buffers are specified using an MPI indexed type.
31 int main(int argc, char **argv)
33 int rank, nranks, rank_world, nranks_world;
34 int i, j, peer, bufsize, errors;
35 double *win_buf, *src_buf, *dst_buf;
39 MTest_Init(&argc, &argv);
41 MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
42 MPI_Comm_size(MPI_COMM_WORLD, &nranks_world);
44 MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank_world, MPI_INFO_NULL, &shr_comm);
46 MPI_Comm_rank(shr_comm, &rank);
47 MPI_Comm_size(shr_comm, &nranks);
49 bufsize = XDIM * YDIM * sizeof(double);
50 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);
51 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &dst_buf);
53 MPI_Win_allocate_shared(bufsize, 1, MPI_INFO_NULL, shr_comm, &win_buf, &buf_win);
55 MPI_Win_fence(0, buf_win);
57 for (i = 0; i < XDIM * YDIM; i++) {
58 *(win_buf + i) = -1.0;
59 *(src_buf + i) = 1.0 + rank;
62 MPI_Win_fence(0, buf_win);
64 peer = (rank + 1) % nranks;
66 /* Perform ITERATIONS strided accumulate operations */
68 for (i = 0; i < ITERATIONS; i++) {
69 int idx_rem[SUB_YDIM];
70 int blk_len[SUB_YDIM];
71 MPI_Datatype src_type, dst_type;
73 for (j = 0; j < SUB_YDIM; j++) {
74 idx_rem[j] = j * XDIM;
75 blk_len[j] = SUB_XDIM;
78 MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
79 MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
81 MPI_Type_commit(&src_type);
82 MPI_Type_commit(&dst_type);
85 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
86 MPI_Get_accumulate(src_buf, 1, src_type, dst_buf, 1, src_type, peer, 0,
87 1, dst_type, MPI_REPLACE, buf_win);
88 MPI_Win_unlock(peer, buf_win);
91 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
92 MPI_Get_accumulate(src_buf, 1, src_type, dst_buf, 1, src_type, peer, 0,
93 1, dst_type, MPI_NO_OP, buf_win);
94 MPI_Win_unlock(peer, buf_win);
96 MPI_Type_free(&src_type);
97 MPI_Type_free(&dst_type);
100 MPI_Barrier(MPI_COMM_WORLD);
102 /* Verify that the results are correct */
104 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
106 for (i = 0; i < SUB_XDIM; i++) {
107 for (j = 0; j < SUB_YDIM; j++) {
108 const double actual = *(win_buf + i + j * XDIM);
109 const double expected = (1.0 + ((rank + nranks - 1) % nranks));
110 if (fabs(actual - expected) > 1.0e-10) {
111 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
112 rank, j, i, expected, actual););
118 for (i = SUB_XDIM; i < XDIM; i++) {
119 for (j = 0; j < SUB_YDIM; j++) {
120 const double actual = *(win_buf + i + j * XDIM);
121 const double expected = -1.0;
122 if (fabs(actual - expected) > 1.0e-10) {
123 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
124 rank, j, i, expected, actual););
130 for (i = 0; i < XDIM; i++) {
131 for (j = SUB_YDIM; j < YDIM; j++) {
132 const double actual = *(win_buf + i + j * XDIM);
133 const double expected = -1.0;
134 if (fabs(actual - expected) > 1.0e-10) {
135 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
136 rank, j, i, expected, actual););
142 MPI_Win_unlock(rank, buf_win);
144 MPI_Win_free(&buf_win);
145 MPI_Free_mem(src_buf);
146 MPI_Free_mem(dst_buf);
147 MPI_Comm_free(&shr_comm);
149 MTest_Finalize(errors);
151 return MTestReturnValue(errors);