1 /* Copyright (c) 2007-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "simgrid/msg.h"
8 XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
10 static int worker_main(int argc, char *argv[])
12 msg_task_t task = MSG_process_get_data(MSG_process_self());
13 MSG_task_execute(task);
15 XBT_INFO("task %p bye", task);
22 double prev_computation_amount;
26 static void task_data_init_clock(struct task_data *t)
28 t->prev_computation_amount = MSG_task_get_flops_amount(t->task);
29 t->prev_clock = MSG_get_clock();
32 static void task_data_get_clock(struct task_data *t)
34 double now_computation_amount = MSG_task_get_flops_amount(t->task);
35 double now_clock = MSG_get_clock();
37 double done = t->prev_computation_amount - now_computation_amount;
38 double duration = now_clock - t->prev_clock;
40 XBT_INFO("%s: %f fops/s", MSG_task_get_name(t->task), done / duration);
42 t->prev_computation_amount = now_computation_amount;
43 t->prev_clock = now_clock;
46 static void test_pm_pin(void)
48 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
49 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
50 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
51 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
52 xbt_dynar_free(&hosts_dynar);
59 t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
60 t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
61 t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
62 t4.task = MSG_task_create("Task4", 1e16, 0, NULL);
64 MSG_process_create("worker1", worker_main, t1.task, pm2);
65 MSG_process_create("worker2", worker_main, t2.task, pm2);
66 MSG_process_create("worker3", worker_main, t3.task, pm2);
67 MSG_process_create("worker4", worker_main, t4.task, pm2);
69 XBT_INFO("## 1. start 4 tasks on PM2 (2 cores)");
70 task_data_init_clock(&t1);
71 task_data_init_clock(&t2);
72 task_data_init_clock(&t3);
73 task_data_init_clock(&t4);
75 MSG_process_sleep(10);
76 task_data_get_clock(&t1);
77 task_data_get_clock(&t2);
78 task_data_get_clock(&t3);
79 task_data_get_clock(&t4);
81 XBT_INFO("## 2. pin all tasks to CPU0");
82 MSG_task_set_affinity(t1.task, pm2, 0x01);
83 MSG_task_set_affinity(t2.task, pm2, 0x01);
84 MSG_task_set_affinity(t3.task, pm2, 0x01);
85 MSG_task_set_affinity(t4.task, pm2, 0x01);
87 MSG_process_sleep(10);
88 task_data_get_clock(&t1);
89 task_data_get_clock(&t2);
90 task_data_get_clock(&t3);
91 task_data_get_clock(&t4);
93 XBT_INFO("## 3. clear the affinity of task4");
94 MSG_task_set_affinity(t4.task, pm2, 0);
96 MSG_process_sleep(10);
97 task_data_get_clock(&t1);
98 task_data_get_clock(&t2);
99 task_data_get_clock(&t3);
100 task_data_get_clock(&t4);
102 XBT_INFO("## 4. clear the affinity of task3");
103 MSG_task_set_affinity(t3.task, pm2, 0);
105 MSG_process_sleep(10);
106 task_data_get_clock(&t1);
107 task_data_get_clock(&t2);
108 task_data_get_clock(&t3);
109 task_data_get_clock(&t4);
111 XBT_INFO("## 5. clear the affinity of task2");
112 MSG_task_set_affinity(t2.task, pm2, 0);
114 MSG_process_sleep(10);
115 task_data_get_clock(&t1);
116 task_data_get_clock(&t2);
117 task_data_get_clock(&t3);
118 task_data_get_clock(&t4);
120 XBT_INFO("## 6. pin all tasks to CPU0 of another PM (no effect now)");
121 MSG_task_set_affinity(t1.task, pm0, 0);
122 MSG_task_set_affinity(t2.task, pm0, 0);
123 MSG_task_set_affinity(t3.task, pm1, 0);
124 MSG_task_set_affinity(t4.task, pm1, 0);
126 MSG_process_sleep(10);
127 task_data_get_clock(&t1);
128 task_data_get_clock(&t2);
129 task_data_get_clock(&t3);
130 task_data_get_clock(&t4);
132 MSG_task_cancel(t1.task);
133 MSG_task_cancel(t2.task);
134 MSG_task_cancel(t3.task);
135 MSG_task_cancel(t4.task);
136 MSG_process_sleep(10);
137 MSG_task_destroy(t1.task);
138 MSG_task_destroy(t2.task);
139 MSG_task_destroy(t3.task);
140 MSG_task_destroy(t4.task);
143 static void test_vm_pin(void)
145 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
146 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t); // 1 cores
147 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t); // 2 cores
148 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); // 4 cores
149 xbt_dynar_free(&hosts_dynar);
151 /* set up VMs on PM1 (4 cores) */
152 msg_vm_t vm0 = MSG_vm_create_core(pm1, "VM0");
153 msg_vm_t vm1 = MSG_vm_create_core(pm1, "VM1");
154 msg_vm_t vm2 = MSG_vm_create_core(pm1, "VM2");
155 msg_vm_t vm3 = MSG_vm_create_core(pm1, "VM3");
157 s_vm_params_t params;
158 memset(¶ms, 0, sizeof(params));
159 params.ramsize = 1L * 1024 * 1024;
160 params.skip_stage1 = 1;
161 params.skip_stage2 = 1;
162 //params.mig_speed = 1L * 1024 * 1024;
163 MSG_host_set_params(vm0, ¶ms);
164 MSG_host_set_params(vm1, ¶ms);
165 MSG_host_set_params(vm2, ¶ms);
166 MSG_host_set_params(vm3, ¶ms);
173 /* set up tasks and processes */
179 t0.task = MSG_task_create("Task0", 1e16, 0, NULL);
180 t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
181 t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
182 t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
184 MSG_process_create("worker0", worker_main, t0.task, vm0);
185 MSG_process_create("worker1", worker_main, t1.task, vm1);
186 MSG_process_create("worker2", worker_main, t2.task, vm2);
187 MSG_process_create("worker3", worker_main, t3.task, vm3);
189 /* start experiments */
190 XBT_INFO("## 1. start 4 VMs on PM1 (4 cores)");
191 task_data_init_clock(&t0);
192 task_data_init_clock(&t1);
193 task_data_init_clock(&t2);
194 task_data_init_clock(&t3);
196 MSG_process_sleep(10);
197 task_data_get_clock(&t0);
198 task_data_get_clock(&t1);
199 task_data_get_clock(&t2);
200 task_data_get_clock(&t3);
202 XBT_INFO("## 2. pin all VMs to CPU0 of PM1");
203 MSG_vm_set_affinity(vm0, pm1, 0x01);
204 MSG_vm_set_affinity(vm1, pm1, 0x01);
205 MSG_vm_set_affinity(vm2, pm1, 0x01);
206 MSG_vm_set_affinity(vm3, pm1, 0x01);
208 MSG_process_sleep(10);
209 task_data_get_clock(&t0);
210 task_data_get_clock(&t1);
211 task_data_get_clock(&t2);
212 task_data_get_clock(&t3);
214 XBT_INFO("## 3. pin all VMs to CPU0 of PM2(no effect at now)");
215 /* Because VMs are on PM2, the below operations do not effect computation now. */
216 MSG_vm_set_affinity(vm0, pm2, 0x01);
217 MSG_vm_set_affinity(vm1, pm2, 0x01);
218 MSG_vm_set_affinity(vm2, pm2, 0x01);
219 MSG_vm_set_affinity(vm3, pm2, 0x01);
221 MSG_process_sleep(10);
222 task_data_get_clock(&t0);
223 task_data_get_clock(&t1);
224 task_data_get_clock(&t2);
225 task_data_get_clock(&t3);
227 XBT_INFO("## 4. unpin VM0, and pin VM2 and VM3 to CPU1 of PM1");
228 MSG_vm_set_affinity(vm0, pm1, 0x00);
229 MSG_vm_set_affinity(vm2, pm1, 0x02);
230 MSG_vm_set_affinity(vm3, pm1, 0x02);
232 MSG_process_sleep(10);
233 task_data_get_clock(&t0);
234 task_data_get_clock(&t1);
235 task_data_get_clock(&t2);
236 task_data_get_clock(&t3);
238 XBT_INFO("## 5. migrate all VMs to PM0 (only 1 CPU core)");
239 MSG_vm_migrate(vm0, pm0);
240 MSG_vm_migrate(vm1, pm0);
241 MSG_vm_migrate(vm2, pm0);
242 MSG_vm_migrate(vm3, pm0);
244 MSG_process_sleep(10);
245 task_data_get_clock(&t0);
246 task_data_get_clock(&t1);
247 task_data_get_clock(&t2);
248 task_data_get_clock(&t3);
250 MSG_process_sleep(10);
251 task_data_get_clock(&t0);
252 task_data_get_clock(&t1);
253 task_data_get_clock(&t2);
254 task_data_get_clock(&t3);
256 XBT_INFO("## 6. migrate all VMs to PM2 (2 CPU cores, with affinity settings)");
257 MSG_vm_migrate(vm0, pm2);
258 MSG_vm_migrate(vm1, pm2);
259 MSG_vm_migrate(vm2, pm2);
260 MSG_vm_migrate(vm3, pm2);
262 MSG_process_sleep(10);
263 task_data_get_clock(&t0);
264 task_data_get_clock(&t1);
265 task_data_get_clock(&t2);
266 task_data_get_clock(&t3);
268 MSG_process_sleep(10);
269 task_data_get_clock(&t0);
270 task_data_get_clock(&t1);
271 task_data_get_clock(&t2);
272 task_data_get_clock(&t3);
275 XBT_INFO("## 7. clear affinity settings on PM1");
276 MSG_vm_set_affinity(vm0, pm2, 0);
277 MSG_vm_set_affinity(vm1, pm2, 0);
278 MSG_vm_set_affinity(vm2, pm2, 0);
279 MSG_vm_set_affinity(vm3, pm2, 0);
281 MSG_process_sleep(10);
282 task_data_get_clock(&t0);
283 task_data_get_clock(&t1);
284 task_data_get_clock(&t2);
285 task_data_get_clock(&t3);
287 MSG_process_sleep(10);
288 task_data_get_clock(&t0);
289 task_data_get_clock(&t1);
290 task_data_get_clock(&t2);
291 task_data_get_clock(&t3);
293 /* clean up everything */
294 MSG_task_cancel(t0.task);
295 MSG_task_cancel(t1.task);
296 MSG_task_cancel(t2.task);
297 MSG_task_cancel(t3.task);
298 MSG_process_sleep(10);
299 MSG_task_destroy(t0.task);
300 MSG_task_destroy(t1.task);
301 MSG_task_destroy(t2.task);
302 MSG_task_destroy(t3.task);
310 static int master_main(int argc, char *argv[])
312 XBT_INFO("=== Test PM (set affinity) ===");
315 XBT_INFO("=== Test VM (set affinity) ===");
321 int main(int argc, char *argv[])
323 /* Get the arguments */
324 MSG_init(&argc, argv);
326 /* load the platform file */
328 printf("Usage: %s examples/msg/cloud/multicore_plat.xml\n", argv[0]);
332 MSG_create_environment(argv[1]);
334 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
335 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
336 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
337 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
338 xbt_dynar_free(&hosts_dynar);
340 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm0), MSG_host_get_core_number(pm0),
341 MSG_host_get_speed(pm0));
342 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm1), MSG_host_get_core_number(pm1),
343 MSG_host_get_speed(pm1));
344 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm2), MSG_host_get_core_number(pm2),
345 MSG_host_get_speed(pm2));
347 MSG_process_create("master", master_main, NULL, pm0);
349 int res = MSG_main();
350 XBT_INFO("Bye (simulation time %g)", MSG_get_clock());
352 return !(res == MSG_OK);