#include <algorithm>
#include <cmath>
-#include <functional>
#include <iterator>
#include <numeric>
#include <stdexcept>
#include <sstream>
#include <xbt/log.h>
-#include <xbt/time.h>
XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(proc);
#include "process.h"
+mutex_t *process::proc_mutex;
+
double process::total_load_init = 0.0;
double process::total_load_running = 0.0;
double process::total_load_exit = 0.0;
int process::process_counter = 0;
double process::total_load_average;
+double process::average_load_ratio;
double process::load_diff_threshold;
+std::atomic<int> process::convergence_counter(0);
+
namespace {
void sleep_until_date(double& date, double duration)
pneigh.reserve(neigh.size());
for (unsigned i = 0 ; i < neigh.size() ; i++) {
neighbor* ptr = &neigh[i];
- m_host_t host = MSG_get_host_by_name(ptr->get_name());
+ msg_host_t host = MSG_get_host_by_name(ptr->get_name());
pneigh.push_back(ptr);
rev_neigh.insert(std::make_pair(host, ptr));
}
- // Note: there should not be race condition with the current
- // version of Simgrid, when updating the global variables.
-
prev_load_broadcast = -1; // force sending of load on first send_all()
expected_load = real_load;
- total_load_running += real_load;
- total_load_init += real_load;
received_load = 0.0;
+ idle_duration = 0.0;
convergence = -1.0;
+ proc_mutex->acquire();
process_counter++;
+ convergence_counter++;
+ total_load_init += real_load;
+ total_load_running += real_load;
total_load_average = total_load_running / process_counter;
+ if (opt::avg_load_ratio >= 0.0)
+ average_load_ratio = opt::avg_load_ratio;
+ else
+ average_load_ratio = 100.0 *
+ (process_counter / -opt::avg_load_ratio) / total_load_average;
load_diff_threshold = (opt::load_ratio_threshold +
- opt::avg_load_ratio * total_load_average) / 100.0;
+ average_load_ratio * total_load_average) / 100.0;
+ proc_mutex->release();
ctrl_close_pending = data_close_pending = neigh.size();
close_received = false;
all_comp_iter = comp_iter = lb_iter = 0;
- lb_thread = new_msg_thread("loba",
- std::bind(&process::load_balance_loop, this));
+ lb_thread = new_msg_thread("loba", [this]() { this->load_balance_loop(); });
e_xbt_log_priority_t logp = xbt_log_priority_verbose;
if (!LOG_ISENABLED(logp))
oss << ESSE(neigh.size()) << ": ";
std::transform(neigh.begin(), neigh.end() - 1,
std::ostream_iterator<const char*>(oss, ", "),
- std::mem_fn(&neighbor::get_name));
+ [](const neighbor& neigh) { return neigh.get_name(); });
oss << neigh.back().get_name();
}
XBT_LOG(logp, "Got %s.", oss.str().c_str());
process::~process()
{
delete lb_thread;
+ proc_mutex->acquire();
total_load_exit += real_load;
+ proc_mutex->release();
xbt_assert(received_load == 0.0,
"received_load is %g, but should be 0.0 !", received_load);
if (opt::log_rate < 0)
lb_iter, comp_iter, all_comp_iter, real_load);
if (convergence >= 0.0)
XBT_INFO("Convergence within %g%% was achieved at time %g",
- opt::avg_load_ratio, convergence);
+ average_load_ratio, convergence);
else
XBT_INFO("Convergence within %g%% was not achieved",
- opt::avg_load_ratio);
+ average_load_ratio);
XBT_VERB("Expected load was: %g", expected_load);
XBT_VERB("Total computation for this process: %g", get_comp_amount());
print_loads(true, xbt_log_priority_debug);
}
+double process::get_iter_deviation() const
+{
+ double average_cost = opt::comp_cost(total_load_average); // fixme: get locked?
+ // Do not count idle periods
+ double comp_iter_opt = acc.comp_amount / average_cost;
+/*
+ // Add iterations that could have been achieved while beeing idle
+ // (kept for documentation)
+ double self_speed = MSG_get_host_speed(MSG_host_self());
+ double average_duration = average_cost / self_speed;
+ comp_iter_opt += idle_duration / average_duration;
+*/
+ return comp_iter - comp_iter_opt;
+}
+
int process::run()
{
if (opt::log_rate >= 0) {
void process::load_balance_loop()
{
- using std::placeholders::_1;
-
double next_iter_after_date = MSG_get_clock() + opt::min_lb_iter_duration;
while (still_running()) {
if (lb_iter == opt::comp_iter_delay) {
// send
comm.ctrl_flush(false);
- std::for_each(neigh.begin(), neigh.end(),
- std::bind(&process::ctrl_send, this, _1));
+ for (neighbor& n : neigh)
+ ctrl_send(n);
prev_load_broadcast = expected_load;
mutex.release();
XBT_VERB("Going to finalize for %s...", __func__);
XBT_DEBUG("send CTRL_CLOSE to %zu neighbor%s",
neigh.size(), ESSE(neigh.size()));
- std::for_each(neigh.begin(), neigh.end(),
- std::bind(&process::ctrl_close, this, _1));
+ for (neighbor& n : neigh)
+ ctrl_close(n);
while (ctrl_close_pending) {
comm.ctrl_flush(false);
XBT_DEBUG("waiting for %d CTRL_CLOSE", ctrl_close_pending);
void process::compute_loop()
{
- using std::placeholders::_1;
-
double next_iter_after_date = MSG_get_clock() + opt::min_comp_iter_duration;
+ double idle_since_date = 0.0;
while (still_running()) {
+ // receive
+ // if there is something to compute, do not block
+ // else, block the duration of an *lb* iteration
+ data_receive(real_load > 0.0 ? 0.0 : opt::min_lb_iter_duration);
- do {
- // receive
- // if there is something to compute, do not block
- // else, block the duration of an *lb* iteration
- data_receive(real_load > 0.0 ? 0.0 : opt::min_lb_iter_duration);
-
- // send
- comm.data_flush(false);
- mutex.acquire();
- real_load += received_load;
- received_load = 0.0;
- std::for_each(neigh.begin(), neigh.end(),
- std::bind(&process::data_send, this, _1));
- mutex.release();
-
- ++all_comp_iter;
+ // send
+ comm.data_flush(false);
+ mutex.acquire();
+ real_load += received_load;
+ received_load = 0.0;
+ for (neighbor& n : neigh)
+ data_send(n);
+ mutex.release();
- } while (real_load == 0.0);
+ ++all_comp_iter;
+ if (real_load == 0.0)
+ continue;
convergence_check();
// compute
+ idle_duration += MSG_get_clock() - idle_since_date;
++comp_iter;
double flops = opt::comp_cost(real_load);
- m_task_t task = MSG_task_create("computation", flops, 0.0, NULL);
- TRACE_msg_set_task_category(task, TRACE_CAT_COMP);
+ msg_task_t task = MSG_task_create("computation", flops, 0.0, NULL);
+ // MSG_task_set_category(task, TRACE_CAT_COMP);
XBT_DEBUG("compute %g flop%s", flops, ESSE(flops));
MSG_task_execute(task);
add_comp_amount(flops);
MSG_task_destroy(task);
+ idle_since_date = MSG_get_clock();
+
sleep_until_date(next_iter_after_date, opt::min_comp_iter_duration);
}
XBT_VERB("Going to finalize for %s...", __func__);
+ // Note: idle duration is not counted during finalization
finalizing = true;
XBT_DEBUG("send DATA_CLOSE to %zu neighbor%s",
neigh.size(), ESSE(neigh.size()));
- std::for_each(neigh.begin(), neigh.end(),
- std::bind(&process::data_close, this, _1));
+ for (neighbor& n : neigh)
+ data_close(n);
while (data_close_pending) {
comm.data_flush(false);
XBT_DEBUG("waiting for %d DATA_CLOSE", data_close_pending);
}
real_load += received_load;
received_load = 0.0;
+ proc_mutex->acquire();
total_load_running -= real_load;
+ proc_mutex->release();
convergence_check();
comm.data_flush(true);
}
void process::convergence_check()
{
- double load_diff = std::fabs(real_load - total_load_average);
+ double average = total_load_average; // fixme: get locked?
+ double load_diff = std::fabs(real_load - average);
bool converged = load_diff <= load_diff_threshold;
- if (convergence >= 0.0) {
- if (!converged) {
- XBT_VERB("current load has diverged: %g (%.4g%%)",
- real_load, 100.0 * load_diff / total_load_average);
- convergence = -1.0;
- }
- } else {
- if (converged) {
+ if (converged) {
+ if (convergence < 0) {
XBT_VERB("current load has converged: %g (%.4g%%)",
- real_load, 100.0 * load_diff / total_load_average);
+ real_load, 100.0 * load_diff / average);
convergence = MSG_get_clock();
+ local_convergence_counter = opt::exit_on_convergence;
+ }
+ if (local_convergence_counter > 0 && --local_convergence_counter == 0)
+ --convergence_counter;
+ } else {
+ if (convergence >= 0.0) {
+ XBT_VERB("current load has diverged: %g (%.4g%%)",
+ real_load, 100.0 * load_diff / average);
+ convergence = -1.0;
+ if (local_convergence_counter == 0)
+ ++convergence_counter;
}
}
}
XBT_VERB("Reached comp_maxiter: %d/%d", comp_iter, opt::comp_maxiter);
last_status = false;
+ } else if (opt::exit_on_convergence && convergence_counter == 0) {
+ XBT_VERB("Global convergence detected");
+ last_status = false;
+
} else if (opt::exit_on_close && close_received) {
XBT_VERB("Close received");
last_status = false;
last_status = false;
} else if (100.0 * total_load_running / total_load_init <=
- opt::load_ratio_threshold) {
+ opt::load_ratio_threshold) { // fixme: get locked?
// fixme: this check should be implemented with a distributed
// algorithm, and not a shared global variable!
XBT_VERB("No more load to balance in system.");
double process::get_sum_of_to_send() const
{
- using std::placeholders::_1;
- using std::placeholders::_2;
-
return std::accumulate(neigh.begin(), neigh.end(), 0.0,
- std::bind(std::plus<double>(), _1,
- std::bind(&neighbor::get_to_send, _2)));
+ [](double x, const neighbor& neigh) {
+ return x + neigh.get_to_send();
+ });
}
void process::load_balance()
else
excess_load = real_load;
- double balance = nb.get_debt() - nb.get_credit();
+ double balance;
+ if (nb.get_credit() > 0.0)
+ balance = nb.get_debt() - nb.get_credit();
+ else
+ balance = nb.get_debt();
load_to_send = std::min(excess_load,
std::max(0.0, balance));
void process::ctrl_receive(double timeout)
{
message* msg;
- m_host_t from;
+ msg_host_t from;
XBT_DEBUG("%sblocking receive on ctrl (%g)", "\0non-" + !timeout, timeout);
while (ctrl_close_pending && comm.ctrl_recv(msg, from, timeout)) {
void process::data_receive(double timeout)
{
message* msg;
- m_host_t from;
+ msg_host_t from;
XBT_DEBUG("%sblocking receive on data (%g)", "\0non-" + !timeout, timeout);
while (data_close_pending && comm.data_recv(msg, from, timeout)) {
}
}
-void process::handle_message(message* msg, m_host_t from)
+void process::handle_message(message* msg, msg_host_t from)
{
switch (msg->get_type()) {
case message::CTRL: {
delete msg;
}
-#define print_loads_generic(vec, verbose, logp, cat) \
- if (_XBT_LOG_ISENABLEDV((*cat), logp)) { \
- using std::placeholders::_1; \
- XBT_XCLOG(cat, logp, "My load: %g (real); %g (expected). " \
- "Neighbor loads:", real_load, expected_load); \
- std::for_each(vec.begin(), vec.end(), \
- std::bind(&neighbor::print, _1, verbose, logp, cat)); \
- } else ((void)0)
-
void process::print_loads(bool verbose,
e_xbt_log_priority_t logp,
xbt_log_category_t cat) const
{
- print_loads_generic(neigh, verbose, logp, cat);
+ if (!_XBT_LOG_ISENABLEDV((*cat), logp))
+ return;
+ XBT_XCLOG(cat, logp, "My load: %g (real); %g (expected). Neighbor loads:",
+ real_load, expected_load);
+ for (const neighbor& n : neigh)
+ n.print(verbose, logp, cat);
}
void process::print_loads_p(bool verbose,
e_xbt_log_priority_t logp,
xbt_log_category_t cat) const
{
- print_loads_generic(pneigh, verbose, logp, cat);
+ if (!_XBT_LOG_ISENABLEDV((*cat), logp))
+ return;
+ XBT_XCLOG(cat, logp, "My load: %g (real); %g (expected). Neighbor loads:",
+ real_load, expected_load);
+ for (const neighbor* n : pneigh)
+ n->print(verbose, logp, cat);
}
#undef print_loads_generic