double process::total_load_running = 0.0;
double process::total_load_exit = 0.0;
+namespace {
+
+ void sleep_until_date(double& date, double duration = 0.0)
+ {
+ double sleep_duration = date - MSG_get_clock();
+ if (sleep_duration > 0.0)
+ MSG_process_sleep(sleep_duration);
+ date = MSG_get_clock() + duration;
+ }
+
+}
+
process::process(int argc, char* argv[])
{
if (argc < 2 || !(std::istringstream(argv[1]) >> real_load))
comp_iter = lb_iter = 0;
- compute_thread = new_msg_thread("compute",
- std::tr1::bind(&process::compute_loop,
- this));
+ lb_thread = new_msg_thread("loba",
+ std::tr1::bind(&process::load_balance_loop,
+ this));
e_xbt_log_priority_t logp = xbt_log_priority_verbose;
if (!LOG_ISENABLED(logp))
process::~process()
{
- delete compute_thread;
+ delete lb_thread;
total_load_exit += real_load;
+ if (opt::log_rate < 0)
+ return;
if (opt::bookkeeping) {
XBT_INFO("Final load after %d:%d iterations: %g ; expected: %g",
lb_iter, comp_iter, real_load, expected_load);
} else {
- XBT_INFO("Final load after %d iterations: %g",
- lb_iter, real_load);
- if (lb_iter != comp_iter)
- XBT_WARN("lb_iter (%d) and comp_iter (%d) differ!",
- lb_iter, comp_iter);
+ XBT_INFO("Final load after %d:%d iterations: %g",
+ lb_iter, comp_iter, real_load);
}
XBT_VERB("Total computation for this process: %g", comp);
}
int process::run()
{
- XBT_INFO("Initial load: %g", real_load);
+ if (opt::log_rate >= 0)
+ XBT_INFO("Initial load: %g", real_load);
XBT_VERB("Starting...");
- compute_thread->start();
- load_balance_loop();
- compute_thread->wait();
+ lb_thread->start();
+ compute_loop();
+ lb_thread->wait();
XBT_VERB("Done.");
return 0;
}
using std::tr1::bind;
using std::tr1::placeholders::_1;
+ double next_iter_after_date = MSG_get_clock() + opt::min_lb_iter_duration;
while (still_running()) {
++lb_iter;
// send
std::for_each(neigh.begin(), neigh.end(),
bind(&process::ctrl_send, this, _1));
+ prev_load_broadcast = get_load();
- // block on receiving unless there is something to compute or
- // to send
- ctrl_receive(opt::min_lb_iter_duration);
+ sleep_until_date(next_iter_after_date, opt::min_lb_iter_duration);
+ ctrl_receive(0.0);
comm.ctrl_flush(false);
}
using std::tr1::bind;
using std::tr1::placeholders::_1;
- double next_iter_after_date = 0.0;
+ double next_iter_after_date = MSG_get_clock() + opt::min_comp_iter_duration;
while (still_running()) {
// receive
- double sleep_duration = real_load
- ? std::max(MSG_get_clock() - next_iter_after_date, 0.0)
- : opt::min_comp_iter_duration;
- data_receive(sleep_duration);
+ if (real_load > 0.0)
+ data_receive(0.0);
+ else
+ data_receive(opt::min_comp_iter_duration);
comm.data_flush(false);
comp += flops;
MSG_task_destroy(task);
- next_iter_after_date = MSG_get_clock() + opt::min_comp_iter_duration;
+ sleep_until_date(next_iter_after_date, opt::min_comp_iter_duration);
}
XBT_VERB("Going to finalize for %s...", __func__);
+ // last send, for not losing load scheduled to be sent
+ std::for_each(neigh.begin(), neigh.end(),
+ bind(&process::data_send, this, _1));
+ finalizing = true;
+ total_load_running -= real_load;
XBT_DEBUG("send DATA_CLOSE to %zu neighbor%s",
neigh.size(), ESSE(neigh.size()));
std::for_each(neigh.begin(), neigh.end(),
opt::load_ratio_threshold) {
// fixme: this check should be implemented with a distributed
// algorithm, and not a shared global variable!
- // fixme: should this chunk be moved before call to receive() ?
XBT_VERB("No more load to balance in system.");
last_status = false;
}
} else {
load_to_send = nb.get_to_send();
nb.set_to_send(0.0);
+ // do not update real_load here
}
if (load_to_send > 0.0)
comm.data_send(nb.get_data_mbox(),