+ if (opt::log_rate >= 0) {
+ XBT_INFO("Initial load: %g", real_load);
+ XBT_VERB("Initial expected load: %g", expected_load);
+ }
+ XBT_VERB("Starting...");
+ mutex.acquire();
+ lb_thread->start();
+ while (lb_iter <= opt::comp_iter_delay)
+ cond.wait(mutex);
+ mutex.release();
+ double sleep_duration = opt::comp_time_delay - MSG_get_clock();
+ if (sleep_duration > 0.0)
+ MSG_process_sleep(sleep_duration);
+ compute_loop();
+ lb_thread->wait();
+ XBT_VERB("Done.");
+ return 0;
+}
+
+void process::load_balance_loop()
+{
+ using std::placeholders::_1;
+
+ double next_iter_after_date = MSG_get_clock() + opt::min_lb_iter_duration;
+ while (still_running()) {
+ if (lb_iter == opt::comp_iter_delay) {
+ mutex.acquire();
+ ++lb_iter;
+ cond.signal();
+ mutex.release();
+ } else {
+ ++lb_iter;
+ }
+
+ ctrl_receive(0.0);
+
+ mutex.acquire();
+ if (!opt::bookkeeping)
+ expected_load = real_load - get_sum_of_to_send();
+ // nothing to do with opt::bookkeeping
+
+ if (opt::log_rate && lb_iter % opt::log_rate == 0) {
+ XBT_INFO("(%u:%u:%u) current load: %g",
+ lb_iter, comp_iter, all_comp_iter, real_load);
+ XBT_VERB("... expected load: %g", expected_load);
+ }
+
+ if (expected_load > 0.0)
+ load_balance();
+
+ print_loads(true, xbt_log_priority_debug);
+
+ // send
+ comm.ctrl_flush(false);
+ std::for_each(neigh.begin(), neigh.end(),
+ std::bind(&process::ctrl_send, this, _1));
+ prev_load_broadcast = expected_load;
+ mutex.release();
+
+ sleep_until_date(next_iter_after_date, opt::min_lb_iter_duration);
+ }
+
+ XBT_VERB("Going to finalize for %s...", __func__);
+ XBT_DEBUG("send CTRL_CLOSE to %zu neighbor%s",
+ neigh.size(), ESSE(neigh.size()));
+ std::for_each(neigh.begin(), neigh.end(),
+ std::bind(&process::ctrl_close, this, _1));
+ while (ctrl_close_pending) {
+ comm.ctrl_flush(false);
+ XBT_DEBUG("waiting for %d CTRL_CLOSE", ctrl_close_pending);
+ ctrl_receive(-1.0);
+ }
+ comm.ctrl_flush(true);
+}
+
+void process::compute_loop()
+{
+ using std::placeholders::_1;
+
+ double next_iter_after_date = MSG_get_clock() + opt::min_comp_iter_duration;
+ while (still_running()) {
+ // receive (do not block if there is something to compute)
+ data_receive(real_load > 0.0 ? 0.0 : opt::min_comp_iter_duration);
+
+ // send
+ comm.data_flush(false);
+ mutex.acquire();
+ real_load += received_load;
+ received_load = 0.0;
+ std::for_each(neigh.begin(), neigh.end(),
+ std::bind(&process::data_send, this, _1));
+ mutex.release();
+
+ ++all_comp_iter;
+ if (real_load == 0.0)
+ continue;
+
+ convergence_check();
+
+ // compute
+ ++comp_iter;
+ double flops = opt::comp_cost(real_load);
+ m_task_t task = MSG_task_create("computation", flops, 0.0, NULL);
+ TRACE_msg_set_task_category(task, TRACE_CAT_COMP);
+ XBT_DEBUG("compute %g flop%s", flops, ESSE(flops));
+ MSG_task_execute(task);
+ add_comp_amount(flops);
+ MSG_task_destroy(task);
+
+ sleep_until_date(next_iter_after_date, opt::min_comp_iter_duration);
+ }
+
+ XBT_VERB("Going to finalize for %s...", __func__);
+ finalizing = true;
+ XBT_DEBUG("send DATA_CLOSE to %zu neighbor%s",
+ neigh.size(), ESSE(neigh.size()));
+ std::for_each(neigh.begin(), neigh.end(),
+ std::bind(&process::data_close, this, _1));
+ while (data_close_pending) {
+ comm.data_flush(false);
+ XBT_DEBUG("waiting for %d DATA_CLOSE", data_close_pending);
+ data_receive(-1.0);
+ }
+ real_load += received_load;
+ received_load = 0.0;
+ total_load_running -= real_load;
+ convergence_check();
+ comm.data_flush(true);
+}
+
+void process::convergence_check()
+{
+ double load_diff = std::fabs(real_load - total_load_average);
+ bool converged = load_diff <= load_diff_threshold;
+
+ if (convergence >= 0.0) {
+ if (!converged) {
+ XBT_VERB("current load has diverged: %g (%.4g%%)",
+ real_load, 100.0 * load_diff / total_load_average);
+ convergence = -1.0;
+ }
+ } else {
+ if (converged) {
+ XBT_VERB("current load has converged: %g (%.4g%%)",
+ real_load, 100.0 * load_diff / total_load_average);
+ convergence = MSG_get_clock();
+ }
+ }
+}
+
+bool process::still_running()
+{
+ static bool last_status = true;
+
+ if (!last_status) {
+ /* nop */
+
+ } else if (opt::exit_request) {
+ XBT_VERB("Global exit requested");
+ last_status = false;
+
+ } else if (opt::time_limit && MSG_get_clock() >= opt::time_limit) {
+ XBT_VERB("Reached time limit: %g/%g", MSG_get_clock(), opt::time_limit);
+ last_status = false;
+
+ } else if (opt::lb_maxiter && lb_iter >= opt::lb_maxiter) {
+ XBT_VERB("Reached lb_maxiter: %d/%d", lb_iter, opt::lb_maxiter);
+ last_status = false;
+
+ } else if (opt::comp_maxiter && comp_iter >= opt::comp_maxiter) {
+ XBT_VERB("Reached comp_maxiter: %d/%d", comp_iter, opt::comp_maxiter);
+ last_status = false;
+
+ } else if (opt::exit_on_close && close_received) {
+ XBT_VERB("Close received");
+ last_status = false;
+
+ } else if (real_load == 0.0 && !data_close_pending) {
+ XBT_VERB("I'm a poor lonesome process, and I have no load...");
+ last_status = false;
+
+ } else if (100.0 * total_load_running / total_load_init <=
+ opt::load_ratio_threshold) {
+ // fixme: this check should be implemented with a distributed
+ // algorithm, and not a shared global variable!
+ XBT_VERB("No more load to balance in system.");
+ last_status = false;
+ }
+
+ return last_status;
+}
+
+double process::get_sum_of_to_send() const
+{
+ using std::placeholders::_1;
+ using std::placeholders::_2;
+
+ return std::accumulate(neigh.begin(), neigh.end(), 0.0,
+ std::bind(std::plus<double>(), _1,
+ std::bind(&neighbor::get_to_send, _2)));
+}