1 /* pmm - parallel matrix multiplication "double diffusion" */
3 /* Copyright (c) 2006-2015. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
9 #include "simgrid/msg.h"
10 #include "xbt/matrix.h"
12 #include "xbt/xbt_os_time.h"
14 /** @addtogroup MSG_examples
16 * - <b>pmm/msg_pmm.c</b>: Parallel Matrix Multiplication is a little application. This is something that most MPI
17 * developers have written during their class, here implemented using MSG instead of MPI.
20 XBT_LOG_NEW_DEFAULT_CATEGORY(msg_pmm, "Messages specific for this msg example");
22 /* This example should always be executed using a deployment of
23 * GRID_SIZE * GRID_SIZE nodes. */
24 #define GRID_SIZE 3 /* Modify to adjust the grid's size */
25 #define NODE_MATRIX_SIZE 300 /* Amount of work done by each node*/
27 #define GRID_NUM_NODES GRID_SIZE * GRID_SIZE
28 #define MATRIX_SIZE NODE_MATRIX_SIZE * GRID_SIZE
29 #define MAILBOX_NAME_SIZE 10
30 #define NEIGHBOURS_COUNT GRID_SIZE - 1
33 * The job sent to every node
35 typedef struct s_node_job{
38 int nodes_in_row[NEIGHBOURS_COUNT];
39 int nodes_in_col[NEIGHBOURS_COUNT];
42 } s_node_job_t, *node_job_t;
45 * Structure for recovering results
47 typedef struct s_result {
51 } s_result_t, *result_t;
53 int node(int argc, char **argv);
54 static void create_jobs(xbt_matrix_t A, xbt_matrix_t B, node_job_t *jobs);
55 static void broadcast_jobs(node_job_t *jobs);
56 static node_job_t wait_job(int selfid);
57 static void broadcast_matrix(xbt_matrix_t M, int num_nodes, int *nodes);
58 static void get_sub_matrix(xbt_matrix_t *sM, int selfid);
59 static void receive_results(result_t *results);
60 static void task_cleanup(void *arg);
62 int node(int argc, char **argv)
65 char my_mbox[MAILBOX_NAME_SIZE];
66 node_job_t myjob, jobs[GRID_NUM_NODES];
67 xbt_matrix_t A, B, C, sA, sB, sC;
70 xbt_assert(argc != 1, "Wrong number of arguments for this node");
72 /* Initialize the node's data-structures */
73 myid = xbt_str_parse_int(argv[1], "Invalid ID received as first node parameter: %s");
74 snprintf(my_mbox, MAILBOX_NAME_SIZE - 1, "%d", myid);
75 sC = xbt_matrix_double_new_zeros(NODE_MATRIX_SIZE, NODE_MATRIX_SIZE);
78 /* Create the matrices to multiply and one to store the result */
79 A = xbt_matrix_double_new_id(MATRIX_SIZE, MATRIX_SIZE);
80 B = xbt_matrix_double_new_seq(MATRIX_SIZE, MATRIX_SIZE);
81 C = xbt_matrix_double_new_zeros(MATRIX_SIZE, MATRIX_SIZE);
83 /* Create the nodes' jobs */
84 create_jobs(A, B, jobs);
86 /* Get own job first */
89 /* Broadcast the rest of the jobs to the other nodes */
90 broadcast_jobs(jobs + 1);
93 A = B = C = NULL; /* Avoid warning at compilation */
94 myjob = wait_job(myid);
97 /* Multiplication main-loop */
98 XBT_VERB("Start Multiplication's Main-loop");
99 for(k=0; k < GRID_SIZE; k++){
101 XBT_VERB("Broadcast sA(%d,%d) to row %d", myjob->row, k, myjob->row);
102 broadcast_matrix(myjob->A, NEIGHBOURS_COUNT, myjob->nodes_in_row);
106 XBT_VERB("Broadcast sB(%d,%d) to col %d", k, myjob->col, myjob->col);
107 broadcast_matrix(myjob->B, NEIGHBOURS_COUNT, myjob->nodes_in_col);
110 if(myjob->row == k && myjob->col == k){
111 xbt_matrix_double_addmult(myjob->A, myjob->B, sC);
112 }else if(myjob->row == k){
113 get_sub_matrix(&sA, myid);
114 xbt_matrix_double_addmult(sA, myjob->B, sC);
116 }else if(myjob->col == k){
117 get_sub_matrix(&sB, myid);
118 xbt_matrix_double_addmult(myjob->A, sB, sC);
121 get_sub_matrix(&sA, myid);
122 get_sub_matrix(&sB, myid);
123 xbt_matrix_double_addmult(sA, sB, sC);
129 /* Node 0: gather the results and reconstruct the final matrix */
132 result_t results[GRID_NUM_NODES] = {0};
134 XBT_VERB("Multiplication done.");
136 /* Get the result from the nodes in the GRID */
137 receive_results(results);
139 /* First add our results */
140 xbt_matrix_copy_values(C, sC, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, 0, 0, 0, 0, NULL);
142 /* Reconstruct the rest of the result matrix */
143 for (node = 1; node < GRID_NUM_NODES; node++){
144 xbt_matrix_copy_values(C, results[node]->sC, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE,
145 NODE_MATRIX_SIZE * results[node]->row, NODE_MATRIX_SIZE * results[node]->col,
147 xbt_matrix_free(results[node]->sC);
148 xbt_free(results[node]);
151 //xbt_matrix_dump(C, "C:res", 0, xbt_matrix_dump_display_double);
157 /* The rest: return the result to node 0 */
161 XBT_VERB("Multiplication done. Send the sub-result.");
163 result = xbt_new0(s_result_t, 1);
164 result->row = myjob->row;
165 result->col = myjob->col;
166 result->sC = xbt_matrix_new_sub(sC, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, 0, 0, NULL);
167 task = MSG_task_create("result",100,100,result);
168 MSG_task_send(task, "0");
171 /* Clean up and finish*/
173 xbt_matrix_free(myjob->A);
174 xbt_matrix_free(myjob->B);
180 * Broadcast the jobs to the nodes of the grid (except to node 0)
182 static void broadcast_jobs(node_job_t *jobs)
185 char node_mbox[MAILBOX_NAME_SIZE];
187 msg_comm_t comms[GRID_NUM_NODES - 1] = {0};
189 XBT_VERB("Broadcast Jobs");
190 for (node = 1; node < GRID_NUM_NODES; node++){
191 task = MSG_task_create("Job", 100, 100, jobs[node-1]);
192 snprintf(node_mbox, MAILBOX_NAME_SIZE - 1, "%d", node);
193 comms[node-1] = MSG_task_isend(task, node_mbox);
196 MSG_comm_waitall(comms, GRID_NUM_NODES-1, -1);
197 for (node = 1; node < GRID_NUM_NODES; node++)
198 MSG_comm_destroy(comms[node - 1]);
201 static node_job_t wait_job(int selfid)
203 msg_task_t task = NULL;
204 char self_mbox[MAILBOX_NAME_SIZE];
207 snprintf(self_mbox, MAILBOX_NAME_SIZE - 1, "%d", selfid);
208 err = MSG_task_receive(&task, self_mbox);
209 xbt_assert(err == MSG_OK, "Error while receiving from %s (%d)", self_mbox, (int)err);
210 job = (node_job_t)MSG_task_get_data(task);
211 MSG_task_destroy(task);
212 XBT_VERB("Got Job (%d,%d)", job->row, job->col);
217 static void broadcast_matrix(xbt_matrix_t M, int num_nodes, int *nodes)
220 char node_mbox[MAILBOX_NAME_SIZE];
224 for(node=0; node < num_nodes; node++){
225 snprintf(node_mbox, MAILBOX_NAME_SIZE - 1, "%d", nodes[node]);
226 sM = xbt_matrix_new_sub(M, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, 0, 0, NULL);
227 task = MSG_task_create("sub-matrix", 100, 100, sM);
228 MSG_task_dsend(task, node_mbox, task_cleanup);
229 XBT_DEBUG("sub-matrix sent to %s", node_mbox);
233 static void get_sub_matrix(xbt_matrix_t *sM, int selfid)
235 msg_task_t task = NULL;
236 char node_mbox[MAILBOX_NAME_SIZE];
239 XBT_VERB("Get sub-matrix");
241 snprintf(node_mbox, MAILBOX_NAME_SIZE - 1, "%d", selfid);
242 err = MSG_task_receive(&task, node_mbox);
244 xbt_die("Error while receiving from %s (%d)", node_mbox, (int)err);
245 *sM = (xbt_matrix_t)MSG_task_get_data(task);
246 MSG_task_destroy(task);
249 static void task_cleanup(void *arg){
250 msg_task_t task = (msg_task_t)arg;
251 xbt_matrix_t m = (xbt_matrix_t)MSG_task_get_data(task);
253 MSG_task_destroy(task);
256 int main(int argc, char *argv[])
258 #ifdef BENCH_THIS_CODE
259 xbt_os_cputimer_t timer = xbt_os_timer_new();
262 MSG_init(&argc, argv);
264 MSG_create_environment(argv[1]);
266 MSG_function_register("node", node);
267 for(int i = 0 ; i< 9; i++) {
268 char *hostname = bprintf("node-%d.acme.org", i);
269 char **argvF = xbt_new(char *, 3);
270 argvF[0] = xbt_strdup("node");
271 argvF[1] = bprintf("%d", i);
273 MSG_process_create_with_arguments("node", node, NULL, MSG_host_by_name(hostname), 2, argvF);
277 #ifdef BENCH_THIS_CODE
278 xbt_os_cputimer_start(timer);
280 msg_error_t res = MSG_main();
281 #ifdef BENCH_THIS_CODE
282 xbt_os_cputimer_stop(timer);
284 XBT_CRITICAL("Simulated time: %g", MSG_get_clock());
286 return res != MSG_OK;
289 static void create_jobs(xbt_matrix_t A, xbt_matrix_t B, node_job_t *jobs)
291 int node, j, k, row = 0, col = 0;
293 for (node = 0; node < GRID_NUM_NODES; node++){
294 XBT_VERB("Create job %d", node);
295 jobs[node] = xbt_new0(s_node_job_t, 1);
296 jobs[node]->row = row;
297 jobs[node]->col = col;
299 /* Compute who are the nodes in the same row and column */
300 /* than the node receiving this job */
301 for (j = 0, k = 0; j < GRID_SIZE; j++) {
302 if (node != (GRID_SIZE * row) + j) {
303 jobs[node]->nodes_in_row[k] = (GRID_SIZE * row) + j;
308 for (j = 0, k = 0; j < GRID_SIZE; j++) {
309 if (node != (GRID_SIZE * j) + col) {
310 jobs[node]->nodes_in_col[k] = (GRID_SIZE * j) + col;
315 /* Assign a sub matrix of A and B to the job */
317 xbt_matrix_new_sub(A, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE * row, NODE_MATRIX_SIZE * col, NULL);
319 xbt_matrix_new_sub(B, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE * row, NODE_MATRIX_SIZE * col, NULL);
321 if (++col >= GRID_SIZE){
328 static void receive_results(result_t *results){
330 msg_comm_t comms[GRID_NUM_NODES-1] = {0};
331 msg_task_t tasks[GRID_NUM_NODES-1] = {0};
333 XBT_VERB("Receive Results.");
335 /* Get the result from the nodes in the GRID */
336 for (node = 1; node < GRID_NUM_NODES; node++){
337 comms[node-1] = MSG_task_irecv(&tasks[node-1], "0");
340 MSG_comm_waitall(comms, GRID_NUM_NODES - 1, -1);
341 for (node = 1; node < GRID_NUM_NODES; node++)
342 MSG_comm_destroy(comms[node - 1]);
344 /* Reconstruct the result matrix */
345 for (node = 1; node < GRID_NUM_NODES; node++){
346 results[node] = (result_t)MSG_task_get_data(tasks[node-1]);
347 MSG_task_destroy(tasks[node-1]);