]> AND Private Git Repository - loba.git/blob - process.cpp
Logo AND Algorithmique Numérique Distribuée

Private GIT Repository
modif expe raph
[loba.git] / process.cpp
1 #include <algorithm>
2 #include <cmath>
3 #include <functional>
4 #include <iterator>
5 #include <numeric>
6 #include <stdexcept>
7 #include <sstream>
8 #include <xbt/log.h>
9 #include <xbt/time.h>
10
11 XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(proc);
12
13 #include "misc.h"
14 #include "options.h"
15 #include "tracing.h"
16
17 #include "process.h"
18
19 double process::total_load_init = 0.0;
20 double process::total_load_running = 0.0;
21 double process::total_load_exit = 0.0;
22
23 int process::process_counter = 0;
24 double process::total_load_average;
25 double process::load_diff_threshold;
26
27 namespace {
28
29     void sleep_until_date(double& date, double duration)
30     {
31         double sleep_duration = date - MSG_get_clock();
32         if (sleep_duration > 0.0)
33             MSG_process_sleep(sleep_duration);
34         date = MSG_get_clock() + duration;
35     }
36
37 }
38
39 process::process(int argc, char* argv[])
40 {
41     if (argc < 2 || !(std::istringstream(argv[1]) >> real_load))
42         throw std::invalid_argument("bad or missing initial load parameter");
43
44     double iload = std::trunc(real_load);
45     if (opt::integer_transfer && real_load != iload) {
46         XBT_WARN("Initial load %g is not an integer.  Truncate it.",
47                  real_load);
48         real_load = iload;
49     }
50
51     neigh.assign(argv + 2, argv + argc);
52
53     pneigh.reserve(neigh.size());
54     for (unsigned i = 0 ; i < neigh.size() ; i++) {
55         neighbor* ptr = &neigh[i];
56         m_host_t host = MSG_get_host_by_name(ptr->get_name());
57         pneigh.push_back(ptr);
58         rev_neigh.insert(std::make_pair(host, ptr));
59     }
60
61     // Note: there should not be race condition with the current
62     // version of Simgrid, when updating the global variables.
63
64     prev_load_broadcast = -1;   // force sending of load on first send_all()
65     expected_load = real_load;
66     total_load_running += real_load;
67     total_load_init += real_load;
68     received_load = 0.0;
69
70     idle_duration = 0.0;
71     convergence = -1.0;
72
73     process_counter++;
74     total_load_average = total_load_running / process_counter;
75     load_diff_threshold = (opt::load_ratio_threshold +
76                            opt::avg_load_ratio * total_load_average) / 100.0;
77
78     ctrl_close_pending = data_close_pending = neigh.size();
79     close_received = false;
80     finalizing = false;
81
82     all_comp_iter = comp_iter = lb_iter = 0;
83
84     lb_thread = new_msg_thread("loba",
85                                std::bind(&process::load_balance_loop, this));
86
87     e_xbt_log_priority_t logp = xbt_log_priority_verbose;
88     if (!LOG_ISENABLED(logp))
89         return;
90     std::ostringstream oss;
91     oss << neigh.size() << " neighbor";
92     if (!neigh.empty()) {
93         oss << ESSE(neigh.size()) << ": ";
94         std::transform(neigh.begin(), neigh.end() - 1,
95                        std::ostream_iterator<const char*>(oss, ", "),
96                        std::mem_fn(&neighbor::get_name));
97         oss << neigh.back().get_name();
98     }
99     XBT_LOG(logp, "Got %s.", oss.str().c_str());
100     print_loads(false, logp);
101 }
102
103 process::~process()
104 {
105     delete lb_thread;
106     total_load_exit += real_load;
107     xbt_assert(received_load == 0.0,
108                "received_load is %g, but should be 0.0 !", received_load);
109     if (opt::log_rate < 0)
110         return;
111     XBT_INFO("Final load after %d:%d:%d iterations: %g",
112              lb_iter, comp_iter, all_comp_iter, real_load);
113     if (convergence >= 0.0)
114         XBT_INFO("Convergence within %g%% was achieved at time %g",
115                  opt::avg_load_ratio, convergence);
116     else
117         XBT_INFO("Convergence within %g%% was not achieved",
118                  opt::avg_load_ratio);
119     XBT_VERB("Expected load was: %g", expected_load);
120     XBT_VERB("Total computation for this process: %g", get_comp_amount());
121     print_loads(true, xbt_log_priority_debug);
122 }
123
124 double process::get_iter_deviation() const
125 {
126     double average_cost = opt::comp_cost(total_load_average);
127     // Do not count idle periods
128     double comp_iter_opt = acc.comp_amount / average_cost;
129 /*
130     // Add iterations that could have been achieved while beeing idle
131     // (kept for documentation)
132     double self_speed = MSG_get_host_speed(MSG_host_self());
133     double average_duration = average_cost / self_speed;
134     comp_iter_opt += idle_duration / average_duration;
135 */
136     return comp_iter - comp_iter_opt;
137 }
138
139 int process::run()
140 {
141     if (opt::log_rate >= 0) {
142         XBT_INFO("Initial load: %g", real_load);
143         XBT_VERB("Initial expected load: %g", expected_load);
144     }
145     XBT_VERB("Starting...");
146     mutex.acquire();
147     lb_thread->start();
148     while (lb_iter <= opt::comp_iter_delay)
149         cond.wait(mutex);
150     mutex.release();
151     double sleep_duration = opt::comp_time_delay - MSG_get_clock();
152     if (sleep_duration > 0.0)
153         MSG_process_sleep(sleep_duration);
154     compute_loop();
155     lb_thread->wait();
156     XBT_VERB("Done.");
157     return 0;
158 }
159
160 void process::load_balance_loop()
161 {
162     using std::placeholders::_1;
163
164     double next_iter_after_date = MSG_get_clock() + opt::min_lb_iter_duration;
165     while (still_running()) {
166         if (lb_iter == opt::comp_iter_delay) {
167             mutex.acquire();
168             ++lb_iter;
169             cond.signal();
170             mutex.release();
171         } else {
172             ++lb_iter;
173         }
174
175         ctrl_receive(0.0);
176
177         mutex.acquire();
178         if (!opt::bookkeeping)
179             expected_load = real_load - get_sum_of_to_send();
180         // nothing to do with opt::bookkeeping
181
182         if (opt::log_rate && lb_iter % opt::log_rate == 0) {
183             XBT_INFO("(%u:%u:%u) current load: %g",
184                      lb_iter, comp_iter, all_comp_iter, real_load);
185             XBT_VERB("... expected load: %g", expected_load);
186         }
187
188         if (expected_load > 0.0)
189             load_balance();
190
191         print_loads(true, xbt_log_priority_debug);
192
193         // send
194         comm.ctrl_flush(false);
195         std::for_each(neigh.begin(), neigh.end(),
196                       std::bind(&process::ctrl_send, this, _1));
197         prev_load_broadcast = expected_load;
198         mutex.release();
199
200         sleep_until_date(next_iter_after_date, opt::min_lb_iter_duration);
201     }
202
203     XBT_VERB("Going to finalize for %s...", __func__);
204     XBT_DEBUG("send CTRL_CLOSE to %zu neighbor%s",
205               neigh.size(), ESSE(neigh.size()));
206     std::for_each(neigh.begin(), neigh.end(),
207                   std::bind(&process::ctrl_close, this, _1));
208     while (ctrl_close_pending) {
209         comm.ctrl_flush(false);
210         XBT_DEBUG("waiting for %d CTRL_CLOSE", ctrl_close_pending);
211         ctrl_receive(-1.0);
212     }
213     comm.ctrl_flush(true);
214 }
215
216 void process::compute_loop()
217 {
218     using std::placeholders::_1;
219
220     double next_iter_after_date = MSG_get_clock() + opt::min_comp_iter_duration;
221     double idle_since_date = 0.0;
222     while (still_running()) {
223         // receive
224         // if there is something to compute, do not block
225         // else, block the duration of an *lb* iteration
226         data_receive(real_load > 0.0 ? 0.0 : opt::min_lb_iter_duration);
227
228         // send
229         comm.data_flush(false);
230         mutex.acquire();
231         real_load += received_load;
232         received_load = 0.0;
233         std::for_each(neigh.begin(), neigh.end(),
234                       std::bind(&process::data_send, this, _1));
235         mutex.release();
236
237         ++all_comp_iter;
238         if (real_load == 0.0)
239             continue;
240
241         convergence_check();
242
243         // compute
244         idle_duration += MSG_get_clock() - idle_since_date;
245         ++comp_iter;
246         double flops = opt::comp_cost(real_load);
247         m_task_t task = MSG_task_create("computation", flops, 0.0, NULL);
248         TRACE_msg_set_task_category(task, TRACE_CAT_COMP);
249         XBT_DEBUG("compute %g flop%s", flops, ESSE(flops));
250         MSG_task_execute(task);
251         add_comp_amount(flops);
252         MSG_task_destroy(task);
253
254         idle_since_date = MSG_get_clock();
255
256         sleep_until_date(next_iter_after_date, opt::min_comp_iter_duration);
257     }
258
259     XBT_VERB("Going to finalize for %s...", __func__);
260     // Note: idle duration is not counted during finalization
261     finalizing = true;
262     XBT_DEBUG("send DATA_CLOSE to %zu neighbor%s",
263               neigh.size(), ESSE(neigh.size()));
264     std::for_each(neigh.begin(), neigh.end(),
265                   std::bind(&process::data_close, this, _1));
266     while (data_close_pending) {
267         comm.data_flush(false);
268         XBT_DEBUG("waiting for %d DATA_CLOSE", data_close_pending);
269         data_receive(-1.0);
270     }
271     real_load += received_load;
272     received_load = 0.0;
273     total_load_running -= real_load;
274     convergence_check();
275     comm.data_flush(true);
276 }
277
278 void process::convergence_check()
279 {
280     double load_diff = std::fabs(real_load - total_load_average);
281     bool converged = load_diff <= load_diff_threshold;
282
283     if (convergence >= 0.0) {
284         if (!converged) {
285             XBT_VERB("current load has diverged: %g (%.4g%%)",
286                      real_load, 100.0 * load_diff / total_load_average);
287             convergence = -1.0;
288         }
289     } else {
290         if (converged) {
291             XBT_VERB("current load has converged: %g (%.4g%%)",
292                      real_load,  100.0 * load_diff / total_load_average);
293             convergence = MSG_get_clock();
294         }
295     }
296 }
297
298 bool process::still_running()
299 {
300     static bool last_status = true;
301
302     if (!last_status) {
303         /* nop */
304
305     } else if (opt::exit_request) {
306         XBT_VERB("Global exit requested");
307         last_status = false;
308
309     } else if (opt::time_limit && MSG_get_clock() >= opt::time_limit) {
310         XBT_VERB("Reached time limit: %g/%g", MSG_get_clock(), opt::time_limit);
311         last_status = false;
312
313     } else if (opt::lb_maxiter && lb_iter >= opt::lb_maxiter) {
314         XBT_VERB("Reached lb_maxiter: %d/%d", lb_iter, opt::lb_maxiter);
315         last_status = false;
316
317     } else if (opt::comp_maxiter && comp_iter >= opt::comp_maxiter) {
318         XBT_VERB("Reached comp_maxiter: %d/%d", comp_iter, opt::comp_maxiter);
319         last_status = false;
320
321     } else if (opt::exit_on_close && close_received) {
322         XBT_VERB("Close received");
323         last_status = false;
324
325     } else if (real_load == 0.0 && !data_close_pending) {
326         XBT_VERB("I'm a poor lonesome process, and I have no load...");
327         last_status = false;
328
329     } else if (100.0 * total_load_running / total_load_init <=
330                opt::load_ratio_threshold) {
331         // fixme: this check should be implemented with a distributed
332         // algorithm, and not a shared global variable!
333         XBT_VERB("No more load to balance in system.");
334         last_status = false;
335     }
336
337     return last_status;
338 }
339
340 double process::get_sum_of_to_send() const
341 {
342     using std::placeholders::_1;
343     using std::placeholders::_2;
344
345     return std::accumulate(neigh.begin(), neigh.end(), 0.0,
346                            std::bind(std::plus<double>(), _1,
347                                      std::bind(&neighbor::get_to_send, _2)));
348 }
349
350 void process::load_balance()
351 {
352     if (lb_iter == 1)           // warn only once
353         XBT_WARN("process::load_balance() is a no-op!");
354 }
355
356 void process::send(neighbor& nb, double amount)
357 {
358     expected_load -= amount;
359     nb.set_to_send(nb.get_to_send() + amount);
360     nb.set_load(nb.get_load() + amount);
361 }
362
363 void process::ctrl_send(neighbor& nb)
364 {
365     double info_to_send = expected_load;
366     double debt_to_send;
367     if (opt::bookkeeping) {     // bookkeeping
368         debt_to_send = nb.get_to_send();
369         if (debt_to_send > 0.0) {
370             nb.set_to_send(0.0);
371             nb.set_debt(nb.get_debt() + debt_to_send);
372         }
373     } else {                    // !bookkeeping
374         debt_to_send = 0.0;
375     }
376     if (info_to_send != prev_load_broadcast || debt_to_send > 0.0) {
377         message* msg = new message(message::CTRL, info_to_send, debt_to_send);
378         add_ctrl_send_mesg(msg->get_size());
379         comm.ctrl_send(nb.get_ctrl_mbox(), msg);
380     }
381 }
382
383 double process::compute_load_to_send(double desired)
384 {
385     if (opt::integer_transfer)
386         desired = std::floor(desired);
387     return desired >= opt::min_transfer_amount ? desired : 0.0;
388 }
389
390 void process::data_send(neighbor& nb)
391 {
392     double load_to_send;
393     if (opt::bookkeeping) {     // bookkeeping
394         double excess_load;     // load amount we are able to send
395         if (opt::egocentric)
396             excess_load = std::max(0.0, real_load - expected_load);
397         else
398             excess_load = real_load;
399
400         double balance = nb.get_debt() - nb.get_credit();
401         load_to_send = std::min(excess_load,
402                                 std::max(0.0, balance));
403
404         // adjust load to send (rounding, truncation, etc.)
405         load_to_send = compute_load_to_send(load_to_send);
406         if (load_to_send > 0.0)
407             nb.set_debt(nb.get_debt() - load_to_send);
408     } else {                    // !bookkeeping
409         load_to_send = compute_load_to_send(nb.get_to_send());
410         if (load_to_send > 0.0)
411             nb.set_to_send(nb.get_to_send() - load_to_send);
412     }
413     real_load -= load_to_send;
414     while (load_to_send > 0.0) {
415         double amount;
416         if (opt::max_transfer_amount)
417             amount = std::min(load_to_send, opt::max_transfer_amount);
418         else
419             amount = load_to_send;
420         message* msg = new message(message::DATA, amount);
421         add_data_send_mesg(msg->get_size());
422         comm.data_send(nb.get_data_mbox(), msg);
423         load_to_send -= amount;
424     }
425 }
426
427 void process::ctrl_close(neighbor& nb)
428 {
429     comm.ctrl_send(nb.get_ctrl_mbox(), new message(message::CTRL_CLOSE, 0.0));
430 }
431
432 void process::data_close(neighbor& nb)
433 {
434     comm.data_send(nb.get_data_mbox(), new message(message::DATA_CLOSE, 0.0));
435 }
436
437 void process::ctrl_receive(double timeout)
438 {
439     message* msg;
440     m_host_t from;
441
442     XBT_DEBUG("%sblocking receive on ctrl (%g)", "\0non-" + !timeout, timeout);
443     while (ctrl_close_pending && comm.ctrl_recv(msg, from, timeout)) {
444         if (msg->get_type() != message::CTRL_CLOSE)
445             add_ctrl_recv_mesg(msg->get_size());
446         handle_message(msg, from);
447         timeout = 0.0;
448     }
449 }
450
451 void process::data_receive(double timeout)
452 {
453     message* msg;
454     m_host_t from;
455
456     XBT_DEBUG("%sblocking receive on data (%g)", "\0non-" + !timeout, timeout);
457     while (data_close_pending && comm.data_recv(msg, from, timeout)) {
458         if (msg->get_type() != message::DATA_CLOSE)
459             add_data_recv_mesg(msg->get_size());
460         handle_message(msg, from);
461         timeout = 0.0;
462     }
463 }
464
465 void process::handle_message(message* msg, m_host_t from)
466 {
467     switch (msg->get_type()) {
468     case message::CTRL: {
469         neighbor* n = rev_neigh[from];
470         n->set_load(msg->get_amount() + n->get_to_send());
471         if (opt::bookkeeping) {
472             double credit = msg->get_credit();
473             expected_load += credit;
474             n->set_credit(n->get_credit() + credit);
475         }
476         break;
477     }
478     case message::DATA: {
479         neighbor* n = rev_neigh[from];
480         double ld = msg->get_amount();
481         received_load += ld;
482         n->set_credit(n->get_credit() - ld);
483         break;
484     }
485     case message::CTRL_CLOSE:
486         ctrl_close_pending--;
487         close_received = true;
488         break;
489     case message::DATA_CLOSE:
490         data_close_pending--;
491         close_received = true;
492         break;
493     }
494     delete msg;
495 }
496
497 #define print_loads_generic(vec, verbose, logp, cat)                    \
498     if (_XBT_LOG_ISENABLEDV((*cat), logp)) {                            \
499         using std::placeholders::_1;                                    \
500         XBT_XCLOG(cat, logp, "My load: %g (real); %g (expected).  "     \
501                   "Neighbor loads:", real_load, expected_load);         \
502         std::for_each(vec.begin(), vec.end(),                           \
503                       std::bind(&neighbor::print, _1, verbose, logp, cat)); \
504     } else ((void)0)
505
506 void process::print_loads(bool verbose,
507                           e_xbt_log_priority_t logp,
508                           xbt_log_category_t cat) const
509 {
510     print_loads_generic(neigh, verbose, logp, cat);
511 }
512
513 void process::print_loads_p(bool verbose,
514                             e_xbt_log_priority_t logp,
515                             xbt_log_category_t cat) const
516 {
517     print_loads_generic(pneigh, verbose, logp, cat);
518 }
519
520 #undef print_loads_generic
521
522 // Local variables:
523 // mode: c++
524 // End: