1 /* -*- Mode: C; c-basic-offset:4 ; -*- */
3 This is like ictest.c, but it creates communictors that are valid only
4 at the "leaders"; other members of the local communicator are NOT
5 in the remote communicator. This is done by creating two communicators:
6 0, + odd rank and even rank. Only 0 is in in both communicators.
8 This test originally tested the part of the standard that allowed the
9 leader to be in both groups. This has been disallowed. This test was
10 recently changed to operate correctly under the new definition.
12 Note that it generates unordered printf output, and is not suitable for
21 int main( int argc, char **argv )
23 int size, rank, key, lrank, rsize, result, remLeader = 0;
26 MPI_Comm mySecondComm;
27 MPI_Comm evenComm, oddComm, remComm;
28 int errors = 0, sum_errors;
32 MPI_Init ( &argc, &argv );
33 MPI_Comm_rank ( MPI_COMM_WORLD, &rank);
34 MPI_Comm_size ( MPI_COMM_WORLD, &size);
36 /* Only works for 2 or more processes */
38 MPI_Comm merge1, merge2, merge3, merge4;
40 /* Generate membership key in the range [0,1] */
42 /* Create the even communicator */
43 MPI_Comm_split ( MPI_COMM_WORLD, key, rank, &evenComm );
45 /* Odd rank communicator discarded */
46 MPI_Comm_free( &evenComm );
49 /* Create the odd communicator */
50 MPI_Comm_split ( MPI_COMM_WORLD, key, rank, &oddComm );
52 /* Even rank communicator discarded */
53 MPI_Comm_free( &oddComm );
56 /* Create the odd + 0 communicator */
57 if (rank == 0) key = 1;
58 MPI_Comm_split( MPI_COMM_WORLD, key, rank, &remComm );
60 /* Even rank communicator discarded */
61 MPI_Comm_free( &remComm );
64 MPI_Comm_rank( remComm, &lrank );
66 printf( "[%d] lrank in remComm is %d (color = %d, key=%d)\n",
67 rank, lrank, rank, key );
69 remLeader = (lrank == 0) ? 1 : 0;
71 /* Now, choose the local and remote communicators */
80 /* Check that the leader is who we think he is */
81 MPI_Comm_rank( myComm, &lrank );
83 printf( "[%d] local rank is %d\n", rank, lrank );
87 MPI_Comm_rank( myComm, &trank );
89 printf( "[%d] Comm split improperly ordered group (myComm)\n",
94 MPI_Comm_rank( remComm, &trank );
96 printf( "[%d] Comm split improperly ordered group (remComm)\n",
102 /* Perform the intercomm create and test it */
103 /* local leader is first process in local_comm, i.e., has rank 0 */
104 /* remote leader is process 0 (if odd) or 1 (if even) in remComm */
105 MPI_Intercomm_create (myComm, 0, remComm, remLeader, 1, &myFirstComm );
108 printf( "[%d] through intercom create\n", rank );
111 MPI_Barrier( MPI_COMM_WORLD );
113 printf( "[%d] through barrier at end of intercom create\n", rank );
118 /* Try to dup this communicator */
119 MPI_Comm_dup ( myFirstComm, &mySecondComm );
123 printf( "[%d] through comm dup\n", rank );
126 MPI_Barrier( MPI_COMM_WORLD );
128 printf( "[%d] through barrier at end of comm dup\n", rank );
133 /* Each member shares data with his "partner". Note that process 0 in
134 MPI_COMM_WORLD is sending to itself, since it is process 0 in both
136 MPI_Comm_rank( mySecondComm, &lrank );
137 MPI_Comm_remote_size( mySecondComm, &rsize );
140 printf( "[%d] lrank in secondcomm is %d and remote size is %d\n",
141 rank, lrank, rsize );
145 /* Send key * size + rank in communicator */
149 myval = key * size + lrank;
152 printf( "[%d] exchanging %d with %d in intercomm\n",
153 rank, myval, lrank );
156 MPI_Sendrecv (&myval, 1, MPI_INT, lrank, 0,
157 &hisval, 1, MPI_INT, lrank, 0, mySecondComm, &status);
158 if (hisval != (lrank + (!key)*size)) {
159 printf( "[%d] expected %d but got %d\n", rank, lrank + (!key)*size,
166 printf("[%d] Failed!\n",rank);
170 /* Key is 1 for oddComm, 0 for evenComm (note both contain 0 in WORLD) */
171 MPI_Intercomm_merge ( mySecondComm, key, &merge1 );
172 MPI_Intercomm_merge ( mySecondComm, (key+1)%2, &merge2 );
173 MPI_Intercomm_merge ( mySecondComm, 0, &merge3 );
174 MPI_Intercomm_merge ( mySecondComm, 1, &merge4 );
176 MPI_Comm_compare( merge1, MPI_COMM_WORLD, &result );
177 if (result != MPI_SIMILAR && size > 2) {
178 printf( "[%d] comparision with merge1 failed\n", rank );
182 /* Free communicators */
183 MPI_Comm_free( &myComm );
184 /* remComm may have been freed above */
185 if (remComm != MPI_COMM_NULL)
186 MPI_Comm_free( &remComm );
187 MPI_Comm_free( &myFirstComm );
188 MPI_Comm_free( &mySecondComm );
189 MPI_Comm_free( &merge1 );
190 MPI_Comm_free( &merge2 );
191 MPI_Comm_free( &merge3 );
192 MPI_Comm_free( &merge4 );
195 printf("[%d] Failed - at least 2 nodes must be used\n",rank);
198 MPI_Barrier( MPI_COMM_WORLD );
199 MPI_Allreduce( &errors, &sum_errors, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
200 if (sum_errors > 0) {
201 printf( "%d errors on process %d\n", errors, rank );
203 else if (rank == 0) {
204 printf( " No Errors\n" );