+ xbt_free(mbox);
+ // XBT_INFO("shutdown done");
+}
+
+static void request_overhead(msg_task_t comm_task, double computation)
+{
+ char *mbox = bprintf("__mb_task_tx_overhead_%s", MSG_task_get_name(comm_task));
+
+ msg_task_t task = MSG_task_create("micro", computation, 0, NULL);
+
+ // XBT_INFO("req overhead");
+ msg_error_t ret = MSG_task_send(task, mbox);
+ xbt_assert(ret == MSG_OK);
+
+ xbt_free(mbox);
+}
+
+/* alpha is (floating_operations / bytes).
+ *
+ * When actual migration traffic was 32 mbytes/s, we observed the CPU
+ * utilization of the main thread of the Qemu process was 10 %.
+ * alpha = 0.1 * C / (32 * 1024 * 1024)
+ * where the CPU capacity of the PM is C flops/s.
+ *
+ * */
+static void task_send_bounded_with_cpu_overhead(msg_task_t comm_task, char *mbox, double mig_speed, double alpha)
+{
+ const double chunk_size = 1024 * 1024 * 10;
+ double remaining = MSG_task_get_data_size(comm_task);
+
+ start_overhead_process(comm_task);
+
+
+ while (remaining > 0) {
+ double data_size = chunk_size;
+ if (remaining < chunk_size)
+ data_size = remaining;
+
+ remaining -= data_size;
+
+ // XBT_INFO("remaining %f bytes", remaining);
+
+
+ double clock_sta = MSG_get_clock();
+
+ /* create a micro task */
+ {
+ char *mtask_name = bprintf("__micro_%s", MSG_task_get_name(comm_task));
+ msg_task_t mtask = MSG_task_create(mtask_name, 0, data_size, NULL);
+
+ request_overhead(comm_task, data_size * alpha);
+
+ msg_error_t ret = MSG_task_send(mtask, mbox);
+ xbt_assert(ret == MSG_OK);
+
+ xbt_free(mtask_name);
+ }
+
+#if 0
+ {
+ /* In the real world, sending data involves small CPU computation. */
+ char *mtask_name = bprintf("__micro_%s", MSG_task_get_name(comm_task));
+ msg_task_t mtask = MSG_task_create(mtask_name, data_size * alpha, data_size, NULL);
+ MSG_task_execute(mtask);
+ MSG_task_destroy(mtask);
+ xbt_free(mtask_name);
+ }
+#endif
+
+ /* TODO */
+
+ double clock_end = MSG_get_clock();
+
+
+ if (mig_speed > 0) {
+ /*
+ * (max bandwidth) > data_size / ((elapsed time) + time_to_sleep)
+ *
+ * Thus, we get
+ * time_to_sleep > data_size / (max bandwidth) - (elapsed time)
+ *
+ * If time_to_sleep is smaller than zero, the elapsed time was too big. We
+ * do not need a micro sleep.
+ **/
+ double time_to_sleep = data_size / mig_speed - (clock_end - clock_sta);
+ if (time_to_sleep > 0)
+ MSG_process_sleep(time_to_sleep);
+
+
+ //XBT_INFO("duration %f", clock_end - clock_sta);
+ //XBT_INFO("time_to_sleep %f", time_to_sleep);
+ }
+ }
+
+ // XBT_INFO("%s", MSG_task_get_name(comm_task));
+ shutdown_overhead_process(comm_task);
+
+}
+
+
+#if 0
+static void make_cpu_overhead_of_data_transfer(msg_task_t comm_task, double init_comm_size)
+{
+ double prev_remaining = init_comm_size;
+
+ for (;;) {
+ double remaining = MSG_task_get_remaining_communication(comm_task);
+ if (remaining == 0)
+ need_exit = 1;
+
+ double sent = prev_remaining - remaining;
+ double comp_size = sent * overhead;
+
+
+ char *comp_task_name = bprintf("__sender_overhead%s", MSG_task_get_name(comm_task));
+ msg_task_t comp_task = MSG_task_create(comp_task_name, comp_size, 0, NULL);
+ MSG_task_execute(comp_task);
+ MSG_task_destroy(comp_task);
+
+ if (need_exit)
+ break;
+
+ prev_remaining = remaining;
+
+ }
+
+ xbt_free(comp_task_name);
+}
+#endif
+
+// #define USE_MICRO_TASK 1
+
+#if 0
+// const double alpha = 0.1L * 1.0E8 / (32L * 1024 * 1024);
+// const double alpha = 0.25L * 1.0E8 / (85L * 1024 * 1024);
+// const double alpha = 0.20L * 1.0E8 / (85L * 1024 * 1024);
+// const double alpha = 0.25L * 1.0E8 / (85L * 1024 * 1024);
+// const double alpha = 0.32L * 1.0E8 / (24L * 1024 * 1024); // makes super good values for 32 mbytes/s
+//const double alpha = 0.32L * 1.0E8 / (32L * 1024 * 1024);
+// const double alpha = 0.56L * 1.0E8 / (80L * 1024 * 1024);
+////const double alpha = 0.20L * 1.0E8 / (80L * 1024 * 1024);
+// const double alpha = 0.56L * 1.0E8 / (90L * 1024 * 1024);
+// const double alpha = 0.66L * 1.0E8 / (90L * 1024 * 1024);
+// const double alpha = 0.20L * 1.0E8 / (80L * 1024 * 1024);
+
+/* CPU 22% when 80Mbyte/s */
+const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024);
+#endif
+
+
+static void send_migration_data(const char *vm_name, const char *src_pm_name, const char *dst_pm_name,
+ sg_size_t size, char *mbox, int stage, int stage2_round, double mig_speed, double xfer_cpu_overhead)
+{
+ char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, stage);
+ msg_task_t task = MSG_task_create(task_name, 0, size, NULL);
+
+ /* TODO: clean up */
+
+ double clock_sta = MSG_get_clock();
+
+#ifdef USE_MICRO_TASK
+
+ task_send_bounded_with_cpu_overhead(task, mbox, mig_speed, xfer_cpu_overhead);
+
+#else
+ msg_error_t ret;
+ if (mig_speed > 0)
+ ret = MSG_task_send_bounded(task, mbox, mig_speed);
+ else
+ ret = MSG_task_send(task, mbox);
+ xbt_assert(ret == MSG_OK);
+#endif
+
+ double clock_end = MSG_get_clock();
+ double duration = clock_end - clock_sta;
+ double actual_speed = size / duration;
+#ifdef USE_MICRO_TASK
+ double cpu_utilization = size * xfer_cpu_overhead / duration / 1.0E8;
+#else
+ double cpu_utilization = 0;
+#endif
+
+
+
+
+ if (stage == 2){
+ XBT_DEBUG("mig-stage%d.%d: sent %llu duration %f actual_speed %f (target %f) cpu %f", stage, stage2_round, size, duration, actual_speed, mig_speed, cpu_utilization);}
+ else{
+ XBT_DEBUG("mig-stage%d: sent %llu duration %f actual_speed %f (target %f) cpu %f", stage, size, duration, actual_speed, mig_speed, cpu_utilization);
+ }
+