1 /* Copyright (c) 2007-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "simgrid/msg.h"
8 XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
10 static int worker_main(int argc, char *argv[])
12 msg_task_t task = MSG_process_get_data(MSG_process_self());
13 MSG_task_execute(task);
15 XBT_INFO("task %p bye", task);
22 double prev_computation_amount;
26 static void task_data_init_clock(struct task_data *t)
28 t->prev_computation_amount = MSG_task_get_flops_amount(t->task);
29 t->prev_clock = MSG_get_clock();
32 static void task_data_get_clock(struct task_data *t)
34 double now_computation_amount = MSG_task_get_flops_amount(t->task);
35 double now_clock = MSG_get_clock();
37 double done = t->prev_computation_amount - now_computation_amount;
38 double duration = now_clock - t->prev_clock;
40 XBT_INFO("%s: %f fops/s", MSG_task_get_name(t->task), done / duration);
42 t->prev_computation_amount = now_computation_amount;
43 t->prev_clock = now_clock;
46 static void test_pm_pin(void)
48 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
49 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
50 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
51 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
58 t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
59 t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
60 t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
61 t4.task = MSG_task_create("Task4", 1e16, 0, NULL);
63 MSG_process_create("worker1", worker_main, t1.task, pm1);
64 MSG_process_create("worker2", worker_main, t2.task, pm1);
65 MSG_process_create("worker3", worker_main, t3.task, pm1);
66 MSG_process_create("worker4", worker_main, t4.task, pm1);
68 XBT_INFO("## 1. start 4 tasks on PM1 (2 cores)");
69 task_data_init_clock(&t1);
70 task_data_init_clock(&t2);
71 task_data_init_clock(&t3);
72 task_data_init_clock(&t4);
74 MSG_process_sleep(10);
75 task_data_get_clock(&t1);
76 task_data_get_clock(&t2);
77 task_data_get_clock(&t3);
78 task_data_get_clock(&t4);
80 XBT_INFO("## 2. pin all tasks to CPU0");
81 MSG_task_set_affinity(t1.task, pm1, 0x01);
82 MSG_task_set_affinity(t2.task, pm1, 0x01);
83 MSG_task_set_affinity(t3.task, pm1, 0x01);
84 MSG_task_set_affinity(t4.task, pm1, 0x01);
86 MSG_process_sleep(10);
87 task_data_get_clock(&t1);
88 task_data_get_clock(&t2);
89 task_data_get_clock(&t3);
90 task_data_get_clock(&t4);
92 XBT_INFO("## 3. clear the affinity of task4");
93 MSG_task_set_affinity(t4.task, pm1, 0);
95 MSG_process_sleep(10);
96 task_data_get_clock(&t1);
97 task_data_get_clock(&t2);
98 task_data_get_clock(&t3);
99 task_data_get_clock(&t4);
101 XBT_INFO("## 4. clear the affinity of task3");
102 MSG_task_set_affinity(t3.task, pm1, 0);
104 MSG_process_sleep(10);
105 task_data_get_clock(&t1);
106 task_data_get_clock(&t2);
107 task_data_get_clock(&t3);
108 task_data_get_clock(&t4);
110 XBT_INFO("## 5. clear the affinity of task2");
111 MSG_task_set_affinity(t2.task, pm1, 0);
113 MSG_process_sleep(10);
114 task_data_get_clock(&t1);
115 task_data_get_clock(&t2);
116 task_data_get_clock(&t3);
117 task_data_get_clock(&t4);
119 XBT_INFO("## 6. pin all tasks to CPU0 of another PM (no effect now)");
120 MSG_task_set_affinity(t1.task, pm0, 0);
121 MSG_task_set_affinity(t2.task, pm0, 0);
122 MSG_task_set_affinity(t3.task, pm2, 0);
123 MSG_task_set_affinity(t4.task, pm2, 0);
125 MSG_process_sleep(10);
126 task_data_get_clock(&t1);
127 task_data_get_clock(&t2);
128 task_data_get_clock(&t3);
129 task_data_get_clock(&t4);
131 MSG_task_cancel(t1.task);
132 MSG_task_cancel(t2.task);
133 MSG_task_cancel(t3.task);
134 MSG_task_cancel(t4.task);
135 MSG_process_sleep(10);
136 MSG_task_destroy(t1.task);
137 MSG_task_destroy(t2.task);
138 MSG_task_destroy(t3.task);
139 MSG_task_destroy(t4.task);
142 static void test_vm_pin(void)
144 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
145 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t); // 1 cores
146 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t); // 2 cores
147 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); // 4 cores
149 /* set up VMs on PM2 (4 cores) */
150 msg_vm_t vm0 = MSG_vm_create_core(pm2, "VM0");
151 msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
152 msg_vm_t vm2 = MSG_vm_create_core(pm2, "VM2");
153 msg_vm_t vm3 = MSG_vm_create_core(pm2, "VM3");
155 s_vm_params_t params;
156 memset(¶ms, 0, sizeof(params));
157 params.ramsize = 1L * 1024 * 1024;
158 params.skip_stage1 = 1;
159 params.skip_stage2 = 1;
160 //params.mig_speed = 1L * 1024 * 1024;
161 MSG_host_set_params(vm0, ¶ms);
162 MSG_host_set_params(vm1, ¶ms);
163 MSG_host_set_params(vm2, ¶ms);
164 MSG_host_set_params(vm3, ¶ms);
171 /* set up tasks and processes */
177 t0.task = MSG_task_create("Task0", 1e16, 0, NULL);
178 t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
179 t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
180 t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
182 MSG_process_create("worker0", worker_main, t0.task, vm0);
183 MSG_process_create("worker1", worker_main, t1.task, vm1);
184 MSG_process_create("worker2", worker_main, t2.task, vm2);
185 MSG_process_create("worker3", worker_main, t3.task, vm3);
187 /* start experiments */
188 XBT_INFO("## 1. start 4 VMs on PM2 (4 cores)");
189 task_data_init_clock(&t0);
190 task_data_init_clock(&t1);
191 task_data_init_clock(&t2);
192 task_data_init_clock(&t3);
194 MSG_process_sleep(10);
195 task_data_get_clock(&t0);
196 task_data_get_clock(&t1);
197 task_data_get_clock(&t2);
198 task_data_get_clock(&t3);
200 XBT_INFO("## 2. pin all VMs to CPU0 of PM2");
201 MSG_vm_set_affinity(vm0, pm2, 0x01);
202 MSG_vm_set_affinity(vm1, pm2, 0x01);
203 MSG_vm_set_affinity(vm2, pm2, 0x01);
204 MSG_vm_set_affinity(vm3, pm2, 0x01);
206 MSG_process_sleep(10);
207 task_data_get_clock(&t0);
208 task_data_get_clock(&t1);
209 task_data_get_clock(&t2);
210 task_data_get_clock(&t3);
212 XBT_INFO("## 3. pin all VMs to CPU0 of PM1 (no effect at now)");
213 /* Because VMs are on PM2, the below operations do not effect computation now. */
214 MSG_vm_set_affinity(vm0, pm1, 0x01);
215 MSG_vm_set_affinity(vm1, pm1, 0x01);
216 MSG_vm_set_affinity(vm2, pm1, 0x01);
217 MSG_vm_set_affinity(vm3, pm1, 0x01);
219 MSG_process_sleep(10);
220 task_data_get_clock(&t0);
221 task_data_get_clock(&t1);
222 task_data_get_clock(&t2);
223 task_data_get_clock(&t3);
225 XBT_INFO("## 4. unpin VM0, and pin VM2 and VM3 to CPU1 of PM2");
226 MSG_vm_set_affinity(vm0, pm2, 0x00);
227 MSG_vm_set_affinity(vm2, pm2, 0x02);
228 MSG_vm_set_affinity(vm3, pm2, 0x02);
230 MSG_process_sleep(10);
231 task_data_get_clock(&t0);
232 task_data_get_clock(&t1);
233 task_data_get_clock(&t2);
234 task_data_get_clock(&t3);
236 XBT_INFO("## 5. migrate all VMs to PM0 (only 1 CPU core)");
237 MSG_vm_migrate(vm0, pm0);
238 MSG_vm_migrate(vm1, pm0);
239 MSG_vm_migrate(vm2, pm0);
240 MSG_vm_migrate(vm3, pm0);
242 MSG_process_sleep(10);
243 task_data_get_clock(&t0);
244 task_data_get_clock(&t1);
245 task_data_get_clock(&t2);
246 task_data_get_clock(&t3);
248 MSG_process_sleep(10);
249 task_data_get_clock(&t0);
250 task_data_get_clock(&t1);
251 task_data_get_clock(&t2);
252 task_data_get_clock(&t3);
254 XBT_INFO("## 6. migrate all VMs to PM1 (2 CPU cores, with affinity settings)");
255 MSG_vm_migrate(vm0, pm1);
256 MSG_vm_migrate(vm1, pm1);
257 MSG_vm_migrate(vm2, pm1);
258 MSG_vm_migrate(vm3, pm1);
260 MSG_process_sleep(10);
261 task_data_get_clock(&t0);
262 task_data_get_clock(&t1);
263 task_data_get_clock(&t2);
264 task_data_get_clock(&t3);
266 MSG_process_sleep(10);
267 task_data_get_clock(&t0);
268 task_data_get_clock(&t1);
269 task_data_get_clock(&t2);
270 task_data_get_clock(&t3);
273 XBT_INFO("## 7. clear affinity settings on PM1");
274 MSG_vm_set_affinity(vm0, pm1, 0);
275 MSG_vm_set_affinity(vm1, pm1, 0);
276 MSG_vm_set_affinity(vm2, pm1, 0);
277 MSG_vm_set_affinity(vm3, pm1, 0);
279 MSG_process_sleep(10);
280 task_data_get_clock(&t0);
281 task_data_get_clock(&t1);
282 task_data_get_clock(&t2);
283 task_data_get_clock(&t3);
285 MSG_process_sleep(10);
286 task_data_get_clock(&t0);
287 task_data_get_clock(&t1);
288 task_data_get_clock(&t2);
289 task_data_get_clock(&t3);
291 /* clean up everything */
292 MSG_task_cancel(t0.task);
293 MSG_task_cancel(t1.task);
294 MSG_task_cancel(t2.task);
295 MSG_task_cancel(t3.task);
296 MSG_process_sleep(10);
297 MSG_task_destroy(t0.task);
298 MSG_task_destroy(t1.task);
299 MSG_task_destroy(t2.task);
300 MSG_task_destroy(t3.task);
308 static int master_main(int argc, char *argv[])
310 XBT_INFO("=== Test PM (set affinity) ===");
313 XBT_INFO("=== Test VM (set affinity) ===");
319 int main(int argc, char *argv[])
321 /* Get the arguments */
322 MSG_init(&argc, argv);
324 /* load the platform file */
326 printf("Usage: %s examples/msg/cloud/multicore_plat.xml\n", argv[0]);
330 MSG_create_environment(argv[1]);
332 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
333 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
334 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
335 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
337 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm0), MSG_host_get_core_number(pm0),
338 MSG_host_get_speed(pm0));
339 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm1), MSG_host_get_core_number(pm1),
340 MSG_host_get_speed(pm1));
341 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm2), MSG_host_get_core_number(pm2),
342 MSG_host_get_speed(pm2));
344 MSG_process_create("master", master_main, NULL, pm0);
346 int res = MSG_main();
347 XBT_INFO("Bye (simulation time %g)", MSG_get_clock());
349 return !(res == MSG_OK);