2 /* Creator: Bronis R. de Supinski (bronis@llnl.gov) Thu Nov 30 2000 */
3 /* no-error-derived-comms.c -- do some MPI calls without any errors */
10 #define RUN_COMM_CREATE
11 #define RUN_INTERCOMM_CREATE
12 #define RUN_CART_CREATE
13 #define RUN_GRAPH_CREATE
15 #define RUN_INTERCOMM_MERGE
19 #define DCOMM_CALL_COUNT 7 /* MPI_Cart_create; MPI_Cart_sub;
20 MPI_Comm_create; MPI_Comm_dup;
21 MPI_Comm_split; MPI_Graph_create;
22 and MPI_Intercomm_merge; store
23 MPI_Intercomm_create separately... */
26 #define INTERCOMM_CREATE_TAG 666
30 main (int argc, char **argv)
36 char processor_name[128];
41 MPI_Comm intercomm = MPI_COMM_NULL;
42 MPI_Comm dcomms[DCOMM_CALL_COUNT];
43 MPI_Group world_group, dgroup;
44 int intersize, dnprocs[DCOMM_CALL_COUNT], drank[DCOMM_CALL_COUNT];
45 int dims[TWOD], periods[TWOD], remain_dims[TWOD];
46 int graph_index[] = { 2, 3, 4, 6 };
47 int graph_edges[] = { 1, 3, 0, 3, 0, 2 };
50 MPI_Init (&argc, &argv);
51 MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
52 MPI_Comm_rank (MPI_COMM_WORLD, &rank);
53 MPI_Get_processor_name (processor_name, &namelen);
54 printf ("(%d) is alive on %s\n", rank, processor_name);
57 MPI_Barrier (MPI_COMM_WORLD);
59 /* probably want number to be higher... */
61 printf ("not enough tasks\n");
64 if (DCOMM_CALL_COUNT > 0) {
66 /* create all of the derived communicators... */
67 /* simplest is created by MPI_Comm_dup... */
68 MPI_Comm_dup (MPI_COMM_WORLD, &dcomms[0]);
70 dcomms[0] = MPI_COMM_NULL;
74 if (DCOMM_CALL_COUNT > 1) {
75 #ifdef RUN_COMM_CREATE
76 /* use subset of MPI_COMM_WORLD group for MPI_Comm_create... */
77 MPI_Comm_group (MPI_COMM_WORLD, &world_group);
78 granks = (int *) malloc (sizeof(int) * (nprocs/2));
79 for (i = 0; i < nprocs/2; i++)
81 MPI_Group_incl (world_group, nprocs/2, granks, &dgroup);
82 MPI_Comm_create (MPI_COMM_WORLD, dgroup, &dcomms[1]);
83 MPI_Group_free (&world_group);
84 MPI_Group_free (&dgroup);
87 dcomms[1] = MPI_COMM_NULL;
91 if (DCOMM_CALL_COUNT > 2) {
93 /* split into thirds with inverted ranks... */
94 MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &dcomms[2]);
96 dcomms[2] = MPI_COMM_NULL;
100 #ifdef RUN_INTERCOMM_CREATE
101 if ((DCOMM_CALL_COUNT < 2) || (dcomms[2] == MPI_COMM_NULL)) {
102 MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &temp);
108 MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD,
109 (((nprocs % 3) == 2) && ((rank % 3) == 2)) ?
110 nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3),
111 INTERCOMM_CREATE_TAG, &intercomm);
113 if ((DCOMM_CALL_COUNT < 2) || (dcomms[2] == MPI_COMM_NULL)) {
114 MPI_Comm_free (&temp);
118 if (DCOMM_CALL_COUNT > 3) {
119 #ifdef RUN_CART_CREATE
120 /* create a 2 X nprocs/2 torus topology, allow reordering */
123 periods[0] = periods[1] = 1;
124 MPI_Cart_create (MPI_COMM_WORLD, TWOD, dims, periods, 1, &dcomms[3]);
126 dcomms[3] = MPI_COMM_NULL;
130 if (DCOMM_CALL_COUNT > 4) {
131 #ifdef RUN_GRAPH_CREATE
132 /* create the graph on p.268 MPI: The Complete Reference... */
133 MPI_Graph_create (MPI_COMM_WORLD, GRAPH_SZ,
134 graph_index, graph_edges, 1, &dcomms[4]);
136 dcomms[4] = MPI_COMM_NULL;
140 if (DCOMM_CALL_COUNT > 5) {
142 #ifndef RUN_CART_CREATE
143 /* need to make cartesian communicator temporarily... */
144 /* create a 2 X nprocs/2 torus topology, allow reordering */
147 periods[0] = periods[1] = 1;
148 MPI_Cart_create (MPI_COMM_WORLD, TWOD, dims, periods, 1, &dcomms[3]);
150 if (dcomms[3] != MPI_COMM_NULL) {
151 /* create 2 1 X nprocs/2 topologies... */
154 MPI_Cart_sub (dcomms[3], remain_dims, &dcomms[5]);
155 #ifndef RUN_CART_CREATE
156 /* free up temporarily created cartesian communicator... */
157 MPI_Comm_free (&dcomms[3]);
161 dcomms[5] = MPI_COMM_NULL;
164 dcomms[5] = MPI_COMM_NULL;
168 if (DCOMM_CALL_COUNT > 6) {
169 #ifdef RUN_INTERCOMM_MERGE
170 #ifndef RUN_INTERCOMM_CREATE
171 #ifndef RUN_COMM_SPLIT
172 /* need to make split communicator temporarily... */
173 /* split into thirds with inverted ranks... */
174 MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &dcomms[2]);
177 /* create an intercommunicator and merge it... */
179 #ifndef RUN_INTERCOMM_CREATE
180 MPI_Intercomm_create (dcomms[2], 0, MPI_COMM_WORLD,
181 (((nprocs % 3) == 2) && ((rank % 3) == 2)) ?
182 nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3),
183 INTERCOMM_CREATE_TAG, &intercomm);
186 MPI_Intercomm_merge (intercomm, ((rank % 3) == 1), &dcomms[6]);
188 #ifndef RUN_INTERCOMM_CREATE
189 /* we are done with intercomm... */
190 MPI_Comm_free (&intercomm);
194 dcomms[6] = MPI_COMM_NULL;
196 #ifndef RUN_INTERCOMM_CREATE
197 #ifndef RUN_COMM_SPLIT
198 if (dcomms[2] != MPI_COMM_NULL)
199 /* free up temporarily created split communicator... */
200 MPI_Comm_free (&dcomms[2]);
204 dcomms[6] = MPI_COMM_NULL;
208 /* get all of the sizes and ranks... */
209 for (i = 0; i < DCOMM_CALL_COUNT; i++) {
210 if (dcomms[i] != MPI_COMM_NULL) {
211 MPI_Comm_size (dcomms[i], &dnprocs[i]);
212 MPI_Comm_rank (dcomms[i], &drank[i]);
220 #ifdef RUN_INTERCOMM_CREATE
221 /* get the intercomm remote size... */
223 MPI_Comm_remote_size (intercomm, &intersize);
227 /* do some point to point on all of the dcomms... */
228 for (i = 0; i < DCOMM_CALL_COUNT; i++) {
229 if (dnprocs[i] > 1) {
231 for (j = 1; j < dnprocs[i]; j++) {
232 MPI_Recv (buf, buf_size, MPI_INT, j, 0, dcomms[i], &status);
236 memset (buf, 1, buf_size*sizeof(int));
238 MPI_Send (buf, buf_size, MPI_INT, 0, 0, dcomms[i]);
243 #ifdef RUN_INTERCOMM_CREATE
244 /* do some point to point on the intercomm... */
245 if ((rank % 3) == 1) {
246 for (j = 0; j < intersize; j++) {
247 MPI_Recv (buf, buf_size, MPI_INT, j, 0, intercomm, &status);
250 else if ((rank % 3) == 2) {
251 for (j = 0; j < intersize; j++) {
252 memset (buf, 1, buf_size*sizeof(int));
254 MPI_Send (buf, buf_size, MPI_INT, j, 0, intercomm);
259 /* do a bcast on all of the dcomms... */
260 for (i = 0; i < DCOMM_CALL_COUNT; i++) {
261 /* IBM's implementation gets error with comm over MPI_COMM_NULL... */
263 MPI_Bcast (buf, buf_size, MPI_INT, 0, dcomms[i]);
266 /* use any source receives... */
267 for (i = 0; i < DCOMM_CALL_COUNT; i++) {
268 if (dnprocs[i] > 1) {
270 for (j = 1; j < dnprocs[i]; j++) {
271 MPI_Recv (buf, buf_size, MPI_INT,
272 MPI_ANY_SOURCE, 0, dcomms[i], &status);
276 memset (buf, 1, buf_size*sizeof(int));
278 MPI_Send (buf, buf_size, MPI_INT, 0, 0, dcomms[i]);
283 #ifdef RUN_INTERCOMM_CREATE
284 /* do any source receives on the intercomm... */
285 if ((rank % 3) == 1) {
286 for (j = 0; j < intersize; j++) {
287 MPI_Recv (buf, buf_size, MPI_INT,
288 MPI_ANY_SOURCE, 0, intercomm, &status);
291 else if ((rank % 3) == 2) {
292 for (j = 0; j < intersize; j++) {
293 memset (buf, 1, buf_size*sizeof(int));
295 MPI_Send (buf, buf_size, MPI_INT, j, 0, intercomm);
300 /* do a barrier on all of the dcomms... */
301 for (i = 0; i < DCOMM_CALL_COUNT; i++) {
302 /* IBM's implementation gets with communication over MPI_COMM_NULL... */
304 MPI_Barrier (dcomms[i]);
307 /* free all of the derived communicators... */
308 for (i = 0; i < DCOMM_CALL_COUNT; i++) {
309 /* freeing MPI_COMM_NULL is explicitly defined as erroneous... */
311 MPI_Comm_free (&dcomms[i]);
314 #ifdef RUN_INTERCOMM_CREATE
316 /* we are done with intercomm... */
317 MPI_Comm_free (&intercomm);
321 MPI_Barrier (MPI_COMM_WORLD);
324 printf ("(%d) Finished normally\n", rank);