X-Git-Url: http://bilbo.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/0d30bd16eb15415a787a20b2c7ece468500c2487..9dc2163bdad52675b0891b2fd9cc00131eb8e0d0:/src/msg/msg_vm.c diff --git a/src/msg/msg_vm.c b/src/msg/msg_vm.c index e423f9ea88..bcb2318b61 100644 --- a/src/msg/msg_vm.c +++ b/src/msg/msg_vm.c @@ -328,7 +328,8 @@ static int migration_rx_fun(int argc, char *argv[]) const char *src_pm_name = argv[2]; const char *dst_pm_name = argv[3]; msg_vm_t vm = MSG_get_host_by_name(vm_name); - msg_vm_t dst_pm = MSG_get_host_by_name(dst_pm_name); + msg_host_t src_pm = MSG_get_host_by_name(src_pm_name); + msg_host_t dst_pm = MSG_get_host_by_name(dst_pm_name); s_ws_params_t params; @@ -362,9 +363,21 @@ static int migration_rx_fun(int argc, char *argv[]) } + /* deinstall the current affinity setting */ + simcall_vm_set_affinity(vm, src_pm, 0); + simcall_vm_migrate(vm, dst_pm); simcall_vm_resume(vm); + /* install the affinity setting of the VM on the destination pm */ + { + msg_host_priv_t priv = msg_host_resource_priv(vm); + + unsigned long affinity_mask = (unsigned long) xbt_dict_get_or_null_ext(priv->affinity_mask_db, (char *) dst_pm, sizeof(msg_host_t)); + simcall_vm_set_affinity(vm, dst_pm, affinity_mask); + XBT_INFO("set affinity(0x%04lx@%s) for %s", affinity_mask, MSG_host_get_name(dst_pm), MSG_host_get_name(vm)); + } + { char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, 4); @@ -455,7 +468,8 @@ static double lookup_computed_flop_counts(msg_vm_t vm, int stage_for_fancy_debug dirty_page_t dp = NULL; xbt_dict_foreach(priv->dp_objs, cursor, key, dp) { double remaining = MSG_task_get_remaining_computation(dp->task); - double clock = MSG_get_clock(); + + double clock = MSG_get_clock(); // total += calc_updated_pages(key, vm, dp, remaining, clock); total += get_computed(key, vm, dp, remaining, clock); @@ -762,7 +776,7 @@ static void make_cpu_overhead_of_data_transfer(msg_task_t comm_task, double init } #endif -#define USE_MICRO_TASK 1 +// #define USE_MICRO_TASK 1 #if 0 // const double alpha = 0.1L * 1.0E8 / (32L * 1024 * 1024); @@ -861,7 +875,8 @@ static double send_stage1(msg_host_t vm, const char *src_pm_name, const char *ds const char *vm_name = MSG_host_get_name(vm); char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name); - const long chunksize = 1024 * 1024 * 100; + // const long chunksize = 1024 * 1024 * 100; + const long chunksize = 1024L * 1024 * 100000; long remaining = ramsize; double computed_total = 0; @@ -877,12 +892,12 @@ static double send_stage1(msg_host_t vm, const char *src_pm_name, const char *ds double computed = lookup_computed_flop_counts(vm, 1, 0); computed_total += computed; - { - double updated_size = get_updated_size(computed, dp_rate, dp_cap); + // { + // double updated_size = get_updated_size(computed, dp_rate, dp_cap); - double overhead = dpt_cpu_overhead * updated_size; - launch_deferred_exec_process(vm, overhead, 10000); - } + // double overhead = dpt_cpu_overhead * updated_size; + // launch_deferred_exec_process(vm, overhead, 10000); + // } } return computed_total; @@ -890,6 +905,14 @@ static double send_stage1(msg_host_t vm, const char *src_pm_name, const char *ds +static double get_threshold_value(double bandwidth, double max_downtime) +{ + /* This value assumes the network link is 1Gbps. */ + // double threshold = max_downtime * 125 * 1024 * 1024; + double threshold = max_downtime * bandwidth; + + return threshold; +} static int migration_tx_fun(int argc, char *argv[]) { @@ -922,8 +945,7 @@ static int migration_tx_fun(int argc, char *argv[]) max_downtime = 0.03; } - /* This value assumes the network link is 1Gbps. */ - double threshold = max_downtime * 125 * 1024 * 1024; + double threshold = 0.00001; /* TODO: cleanup */ /* setting up parameters has done */ @@ -943,8 +965,15 @@ static int migration_tx_fun(int argc, char *argv[]) // send_migration_data(vm_name, src_pm_name, dst_pm_name, ramsize, mbox, 1, 0, mig_speed, xfer_cpu_overhead); /* send ramsize, but split it */ + double clock_prev_send = MSG_get_clock(); + computed_during_stage1 = send_stage1(vm, src_pm_name, dst_pm_name, ramsize, mig_speed, xfer_cpu_overhead, dp_rate, dp_cap, dpt_cpu_overhead); remaining_size -= ramsize; + + double clock_post_send = MSG_get_clock(); + double bandwidth = ramsize / (clock_post_send - clock_prev_send); + threshold = get_threshold_value(bandwidth, max_downtime); + XBT_INFO("actual banwdidth %f, threshold %f", bandwidth / 1024 / 1024, threshold); } @@ -974,27 +1003,40 @@ static int migration_tx_fun(int argc, char *argv[]) stage2_round, updated_size, computed_during_stage1, dp_rate, dp_cap); - if (stage2_round != 0) { - /* during stage1, we have already created overhead tasks */ - double overhead = dpt_cpu_overhead * updated_size; - XBT_DEBUG("updated %f overhead %f", updated_size, overhead); - launch_deferred_exec_process(vm, overhead, 10000); - } + // if (stage2_round != 0) { + // /* during stage1, we have already created overhead tasks */ + // double overhead = dpt_cpu_overhead * updated_size; + // XBT_DEBUG("updated %f overhead %f", updated_size, overhead); + // launch_deferred_exec_process(vm, overhead, 10000); + // } { remaining_size += updated_size; - XBT_DEBUG("mig-stage2.%d: remaining_size %f (%s threshold %f)", stage2_round, + XBT_INFO("mig-stage2.%d: remaining_size %f (%s threshold %f)", stage2_round, remaining_size, (remaining_size < threshold) ? "<" : ">", threshold); if (remaining_size < threshold) break; } + double clock_prev_send = MSG_get_clock(); send_migration_data(vm_name, src_pm_name, dst_pm_name, updated_size, mbox, 2, stage2_round, mig_speed, xfer_cpu_overhead); + double clock_post_send = MSG_get_clock(); + + double bandwidth = updated_size / (clock_post_send - clock_prev_send); + threshold = get_threshold_value(bandwidth, max_downtime); + XBT_INFO("actual banwdidth %f, threshold %f", bandwidth / 1024 / 1024, threshold); + + + + + + + remaining_size -= updated_size; stage2_round += 1; } @@ -1221,9 +1263,35 @@ msg_host_t MSG_vm_get_pm(msg_vm_t vm) * * * 2. - * Note that bound == 0 means no bound (i.e., unlimited). + * Note that bound == 0 means no bound (i.e., unlimited). But, if a host has + * multiple CPU cores, the CPU share of a computation task (or a VM) never + * exceeds the capacity of a CPU core. */ void MSG_vm_set_bound(msg_vm_t vm, double bound) { return simcall_vm_set_bound(vm, bound); } + + +/** @brief Set the CPU affinity of a given VM. + * @ingroup msg_VMs + * + * This function changes the CPU affinity of a given VM. Usage is the same as + * MSG_task_set_affinity(). See the MSG_task_set_affinity() for details. + */ +void MSG_vm_set_affinity(msg_vm_t vm, msg_host_t pm, unsigned long mask) +{ + msg_host_priv_t priv = msg_host_resource_priv(vm); + + if (mask == 0) + xbt_dict_remove_ext(priv->affinity_mask_db, (char *) pm, sizeof(pm)); + else + xbt_dict_set_ext(priv->affinity_mask_db, (char *) pm, sizeof(pm), (void *) mask, NULL); + + msg_host_t pm_now = MSG_vm_get_pm(vm); + if (pm_now == pm) { + XBT_INFO("set affinity(0x%04lx@%s) for %s", mask, MSG_host_get_name(pm), MSG_host_get_name(vm)); + simcall_vm_set_affinity(vm, pm, mask); + } else + XBT_INFO("set affinity(0x%04lx@%s) for %s (not active now)", mask, MSG_host_get_name(pm), MSG_host_get_name(vm)); +}