1 /* Copyright (c) 2007, 2008, 2009, 2010. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
8 * Modelling the proportional fairness using the Lagrange Optimization
9 * Approach. For a detailed description see:
10 * "ssh://username@scm.gforge.inria.fr/svn/memo/people/pvelho/lagrange/ppf.ps".
13 #include "xbt/sysdep.h"
14 #include "maxmin_private.h"
21 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_lagrange, surf,
22 "Logging specific to SURF (lagrange)");
23 XBT_LOG_NEW_SUBCATEGORY(surf_lagrange_dichotomy, surf_lagrange,
24 "Logging specific to SURF (lagrange dichotomy)");
26 #define SHOW_EXPR(expr) XBT_CDEBUG(surf_lagrange,#expr " = %g",expr);
28 double (*func_f_def) (lmm_variable_t, double);
29 double (*func_fp_def) (lmm_variable_t, double);
30 double (*func_fpi_def) (lmm_variable_t, double);
33 * Local prototypes to implement the lagrangian optimization with optimal step, also called dichotomy.
35 //solves the proportional fairness using a lagrange optimizition with dichotomy step
36 void lagrange_solve(lmm_system_t sys);
37 //computes the value of the dichotomy using a initial values, init, with a specific variable or constraint
38 static double dichotomy(double init, double diff(double, void *),
39 void *var_cnst, double min_error);
40 //computes the value of the differential of constraint param_cnst applied to lambda
41 static double partial_diff_lambda(double lambda, void *param_cnst);
43 static int __check_feasible(xbt_swag_t cnst_list, xbt_swag_t var_list,
46 xbt_swag_t elem_list = NULL;
47 lmm_element_t elem = NULL;
48 lmm_constraint_t cnst = NULL;
49 lmm_variable_t var = NULL;
53 xbt_swag_foreach(cnst, cnst_list) {
55 elem_list = &(cnst->element_set);
56 xbt_swag_foreach(elem, elem_list) {
63 if (double_positive(tmp - cnst->bound)) {
66 ("The link (%p) is over-used. Expected less than %f and got %f",
67 cnst, cnst->bound, tmp);
71 ("Checking feasability for constraint (%p): sat = %f, lambda = %f ",
72 cnst, tmp - cnst->bound, cnst->lambda);
75 xbt_swag_foreach(var, var_list) {
80 XBT_DEBUG("Checking feasability for variable (%p): sat = %f mu = %f", var,
81 var->value - var->bound, var->mu);
83 if (double_positive(var->value - var->bound)) {
86 ("The variable (%p) is too large. Expected less than %f and got %f",
87 var, var->bound, var->value);
94 static double new_value(lmm_variable_t var)
99 for (i = 0; i < var->cnsts_number; i++) {
100 tmp += (var->cnsts[i].constraint)->lambda;
104 XBT_DEBUG("\t Working on var (%p). cost = %e; Weight = %e", var, tmp,
106 //uses the partial differential inverse function
107 return var->func_fpi(var, tmp);
110 static double new_mu(lmm_variable_t var)
113 double sigma_i = 0.0;
116 for (j = 0; j < var->cnsts_number; j++) {
117 sigma_i += (var->cnsts[j].constraint)->lambda;
119 mu_i = var->func_fp(var, var->bound) - sigma_i;
125 static double dual_objective(xbt_swag_t var_list, xbt_swag_t cnst_list)
127 lmm_constraint_t cnst = NULL;
128 lmm_variable_t var = NULL;
132 xbt_swag_foreach(var, var_list) {
133 double sigma_i = 0.0;
139 for (j = 0; j < var->cnsts_number; j++)
140 sigma_i += (var->cnsts[j].constraint)->lambda;
145 XBT_DEBUG("var %p : sigma_i = %1.20f", var, sigma_i);
147 obj += var->func_f(var, var->func_fpi(var, sigma_i)) -
148 sigma_i * var->func_fpi(var, sigma_i);
151 obj += var->mu * var->bound;
154 xbt_swag_foreach(cnst, cnst_list)
155 obj += cnst->lambda * cnst->bound;
160 void lagrange_solve(lmm_system_t sys)
163 * Lagrange Variables.
165 int max_iterations = 100;
166 double epsilon_min_error = MAXMIN_PRECISION;
167 double dichotomy_min_error = 1e-14;
168 double overall_modification = 1;
171 * Variables to manipulate the data structure proposed to model the maxmin
172 * fairness. See docummentation for more details.
174 xbt_swag_t cnst_list = NULL;
175 lmm_constraint_t cnst = NULL;
177 xbt_swag_t var_list = NULL;
178 lmm_variable_t var = NULL;
181 * Auxiliar variables.
188 XBT_DEBUG("Iterative method configuration snapshot =====>");
189 XBT_DEBUG("#### Maximum number of iterations : %d", max_iterations);
190 XBT_DEBUG("#### Minimum error tolerated : %e",
192 XBT_DEBUG("#### Minimum error tolerated (dichotomy) : %e",
193 dichotomy_min_error);
195 if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) {
199 if (!(sys->modified))
206 cnst_list = &(sys->active_constraint_set);
207 xbt_swag_foreach(cnst, cnst_list) {
209 cnst->new_lambda = 2.0;
210 XBT_DEBUG("#### cnst(%p)->lambda : %e", cnst, cnst->lambda);
214 * Initialize the var list variable with only the active variables.
215 * Associate an index in the swag variables. Initialize mu.
217 var_list = &(sys->variable_set);
219 xbt_swag_foreach(var, var_list) {
224 if (var->bound < 0.0) {
225 XBT_DEBUG("#### NOTE var(%d) is a boundless variable", i);
227 var->value = new_value(var);
231 var->value = new_value(var);
233 XBT_DEBUG("#### var(%p) ->weight : %e", var, var->weight);
234 XBT_DEBUG("#### var(%p) ->mu : %e", var, var->mu);
235 XBT_DEBUG("#### var(%p) ->weight: %e", var, var->weight);
236 XBT_DEBUG("#### var(%p) ->bound: %e", var, var->bound);
237 for (i = 0; i < var->cnsts_number; i++) {
238 if (var->cnsts[i].value == 0.0)
241 if (nb == var->cnsts_number)
247 * Compute dual objective.
249 obj = dual_objective(var_list, cnst_list);
252 * While doesn't reach a minimun error or a number maximum of iterations.
254 while (overall_modification > epsilon_min_error
255 && iteration < max_iterations) {
256 /* int dual_updated=0; */
259 XBT_DEBUG("************** ITERATION %d **************", iteration);
260 XBT_DEBUG("-------------- Gradient Descent ----------");
263 * Improve the value of mu_i
265 xbt_swag_foreach(var, var_list) {
268 if (var->bound >= 0) {
269 XBT_DEBUG("Working on var (%p)", var);
270 var->new_mu = new_mu(var);
271 /* dual_updated += (fabs(var->new_mu-var->mu)>dichotomy_min_error); */
272 /* XBT_DEBUG("dual_updated (%d) : %1.20f",dual_updated,fabs(var->new_mu-var->mu)); */
273 XBT_DEBUG("Updating mu : var->mu (%p) : %1.20f -> %1.20f", var,
274 var->mu, var->new_mu);
275 var->mu = var->new_mu;
277 new_obj = dual_objective(var_list, cnst_list);
278 XBT_DEBUG("Improvement for Objective (%g -> %g) : %g", obj, new_obj,
280 xbt_assert(obj - new_obj >= -epsilon_min_error,
281 "Our gradient sucks! (%1.20f)", obj - new_obj);
287 * Improve the value of lambda_i
289 xbt_swag_foreach(cnst, cnst_list) {
290 XBT_DEBUG("Working on cnst (%p)", cnst);
292 dichotomy(cnst->lambda, partial_diff_lambda, cnst,
293 dichotomy_min_error);
294 /* dual_updated += (fabs(cnst->new_lambda-cnst->lambda)>dichotomy_min_error); */
295 /* XBT_DEBUG("dual_updated (%d) : %1.20f",dual_updated,fabs(cnst->new_lambda-cnst->lambda)); */
296 XBT_DEBUG("Updating lambda : cnst->lambda (%p) : %1.20f -> %1.20f",
297 cnst, cnst->lambda, cnst->new_lambda);
298 cnst->lambda = cnst->new_lambda;
300 new_obj = dual_objective(var_list, cnst_list);
301 XBT_DEBUG("Improvement for Objective (%g -> %g) : %g", obj, new_obj,
303 xbt_assert(obj - new_obj >= -epsilon_min_error,
304 "Our gradient sucks! (%1.20f)", obj - new_obj);
309 * Now computes the values of each variable (\rho) based on
310 * the values of \lambda and \mu.
312 XBT_DEBUG("-------------- Check convergence ----------");
313 overall_modification = 0;
314 xbt_swag_foreach(var, var_list) {
315 if (var->weight <= 0)
318 tmp = new_value(var);
320 overall_modification =
321 MAX(overall_modification, fabs(var->value - tmp));
324 XBT_DEBUG("New value of var (%p) = %e, overall_modification = %e",
325 var, var->value, overall_modification);
329 XBT_DEBUG("-------------- Check feasability ----------");
330 if (!__check_feasible(cnst_list, var_list, 0))
331 overall_modification = 1.0;
332 XBT_DEBUG("Iteration %d: overall_modification : %f", iteration,
333 overall_modification);
334 /* if(!dual_updated) { */
335 /* XBT_WARN("Could not improve the convergence at iteration %d. Drop it!",iteration); */
340 __check_feasible(cnst_list, var_list, 1);
342 if (overall_modification <= epsilon_min_error) {
343 XBT_DEBUG("The method converges in %d iterations.", iteration);
345 if (iteration >= max_iterations) {
347 ("Method reach %d iterations, which is the maximum number of iterations allowed.",
350 /* XBT_INFO("Method converged after %d iterations", iteration); */
352 if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) {
358 * Returns a double value corresponding to the result of a dichotomy proccess with
359 * respect to a given variable/constraint (\mu in the case of a variable or \lambda in
360 * case of a constraint) and a initial value init.
362 * @param init initial value for \mu or \lambda
363 * @param diff a function that computes the differential of with respect a \mu or \lambda
364 * @param var_cnst a pointer to a variable or constraint
365 * @param min_erro a minimun error tolerated
367 * @return a double correponding to the result of the dichotomyal process
369 static double dichotomy(double init, double diff(double, void *),
370 void *var_cnst, double min_error)
373 double overall_error;
375 double min_diff, max_diff, middle_diff;
385 min_diff = max_diff = middle_diff = 0.0;
388 if ((diff_0 = diff(1e-16, var_cnst)) >= 0) {
389 XBT_CDEBUG(surf_lagrange_dichotomy, "returning 0.0 (diff = %e)", diff_0);
394 min_diff = diff(min, var_cnst);
395 max_diff = diff(max, var_cnst);
397 while (overall_error > min_error) {
398 XBT_CDEBUG(surf_lagrange_dichotomy,
399 "[min, max] = [%1.20f, %1.20f] || diffmin, diffmax = %1.20f, %1.20f",
400 min, max, min_diff, max_diff);
402 if (min_diff > 0 && max_diff > 0) {
404 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing min");
406 min_diff = diff(min, var_cnst);
408 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing max");
412 } else if (min_diff < 0 && max_diff < 0) {
414 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing max");
416 max_diff = diff(max, var_cnst);
418 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing min");
422 } else if (min_diff < 0 && max_diff > 0) {
423 middle = (max + min) / 2.0;
424 XBT_CDEBUG(surf_lagrange_dichotomy, "Trying (max+min)/2 : %1.20f",
427 if ((min == middle) || (max == middle)) {
428 XBT_CWARN(surf_lagrange_dichotomy,
429 "Cannot improve the convergence! min=max=middle=%1.20f, diff = %1.20f."
430 " Reaching the 'double' limits. Maybe scaling your function would help ([%1.20f,%1.20f]).",
431 min, max - min, min_diff, max_diff);
434 middle_diff = diff(middle, var_cnst);
436 if (middle_diff < 0) {
437 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing min");
439 overall_error = max_diff - middle_diff;
440 min_diff = middle_diff;
441 /* SHOW_EXPR(overall_error); */
442 } else if (middle_diff > 0) {
443 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing max");
445 overall_error = max_diff - middle_diff;
446 max_diff = middle_diff;
447 /* SHOW_EXPR(overall_error); */
450 /* SHOW_EXPR(overall_error); */
452 } else if (min_diff == 0) {
455 /* SHOW_EXPR(overall_error); */
456 } else if (max_diff == 0) {
459 /* SHOW_EXPR(overall_error); */
460 } else if (min_diff > 0 && max_diff < 0) {
461 XBT_CWARN(surf_lagrange_dichotomy,
462 "The impossible happened, partial_diff(min) > 0 && partial_diff(max) < 0");
465 XBT_CWARN(surf_lagrange_dichotomy,
466 "diffmin (%1.20f) or diffmax (%1.20f) are something I don't know, taking no action.",
472 XBT_CDEBUG(surf_lagrange_dichotomy, "returning %e", (min + max) / 2.0);
474 return ((min + max) / 2.0);
477 static double partial_diff_lambda(double lambda, void *param_cnst)
481 xbt_swag_t elem_list = NULL;
482 lmm_element_t elem = NULL;
483 lmm_variable_t var = NULL;
484 lmm_constraint_t cnst = (lmm_constraint_t) param_cnst;
486 double sigma_i = 0.0;
489 elem_list = &(cnst->element_set);
491 XBT_CDEBUG(surf_lagrange_dichotomy, "Computing diff of cnst (%p)", cnst);
493 xbt_swag_foreach(elem, elem_list) {
494 var = elem->variable;
495 if (var->weight <= 0)
498 XBT_CDEBUG(surf_lagrange_dichotomy, "Computing sigma_i for var (%p)",
500 // Initialize the summation variable
504 for (j = 0; j < var->cnsts_number; j++) {
505 sigma_i += (var->cnsts[j].constraint)->lambda;
508 //add mu_i if this flow has a RTT constraint associated
512 //replace value of cnst->lambda by the value of parameter lambda
513 sigma_i = (sigma_i - cnst->lambda) + lambda;
515 diff += -var->func_fpi(var, sigma_i);
521 XBT_CDEBUG(surf_lagrange_dichotomy,
522 "d D/d lambda for cnst (%p) at %1.20f = %1.20f", cnst, lambda,
528 /** \brief Attribute the value bound to var->bound.
530 * \param func_fpi inverse of the partial differential of f (f prime inverse, (f')^{-1})
532 * Set default functions to the ones passed as parameters. This is a polimorfism in C pure, enjoy the roots of programming.
535 void lmm_set_default_protocol_function(double (*func_f)
542 (lmm_variable_t var, double x),
543 double (*func_fp) (lmm_variable_t
545 double (*func_fpi) (lmm_variable_t
549 func_fp_def = func_fp;
550 func_fpi_def = func_fpi;
554 /**************** Vegas and Reno functions *************************/
556 * NOTE for Reno: all functions consider the network
557 * coeficient (alpha) equal to 1.
561 * For Vegas: $f(x) = \alpha D_f\ln(x)$
562 * Therefore: $fp(x) = \frac{\alpha D_f}{x}$
563 * Therefore: $fpi(x) = \frac{\alpha D_f}{x}$
565 #define VEGAS_SCALING 1000.0
567 double func_vegas_f(lmm_variable_t var, double x)
569 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
570 return VEGAS_SCALING * var->weight * log(x);
573 double func_vegas_fp(lmm_variable_t var, double x)
575 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
576 return VEGAS_SCALING * var->weight / x;
579 double func_vegas_fpi(lmm_variable_t var, double x)
581 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
582 return var->weight / (x / VEGAS_SCALING);
586 * For Reno: $f(x) = \frac{\sqrt{3/2}}{D_f} atan(\sqrt{3/2}D_f x)$
587 * Therefore: $fp(x) = \frac{3}{3 D_f^2 x^2+2}$
588 * Therefore: $fpi(x) = \sqrt{\frac{1}{{D_f}^2 x} - \frac{2}{3{D_f}^2}}$
590 #define RENO_SCALING 1.0
591 double func_reno_f(lmm_variable_t var, double x)
593 xbt_assert(var->weight > 0.0, "Don't call me with stupid values!");
595 return RENO_SCALING * sqrt(3.0 / 2.0) / var->weight *
596 atan(sqrt(3.0 / 2.0) * var->weight * x);
599 double func_reno_fp(lmm_variable_t var, double x)
601 return RENO_SCALING * 3.0 / (3.0 * var->weight * var->weight * x * x +
605 double func_reno_fpi(lmm_variable_t var, double x)
609 xbt_assert(var->weight > 0.0, "Don't call me with stupid values!");
610 xbt_assert(x > 0.0, "Don't call me with stupid values!");
613 1.0 / (var->weight * var->weight * (x / RENO_SCALING)) -
614 2.0 / (3.0 * var->weight * var->weight);
617 /* xbt_assert(res_fpi>0.0,"Don't call me with stupid values!"); */
618 return sqrt(res_fpi);
622 /* Implementing new Reno-2
623 * For Reno-2: $f(x) = U_f(x_f) = \frac{{2}{D_f}}*ln(2+x*D_f)$
624 * Therefore: $fp(x) = 2/(Weight*x + 2)
625 * Therefore: $fpi(x) = (2*Weight)/x - 4
627 #define RENO2_SCALING 1.0
628 double func_reno2_f(lmm_variable_t var, double x)
630 xbt_assert(var->weight > 0.0, "Don't call me with stupid values!");
631 return RENO2_SCALING * (1.0 / var->weight) * log((x * var->weight) /
632 (2.0 * x * var->weight +
636 double func_reno2_fp(lmm_variable_t var, double x)
638 return RENO2_SCALING * 3.0 / (var->weight * x *
639 (2.0 * var->weight * x + 3.0));
642 double func_reno2_fpi(lmm_variable_t var, double x)
647 xbt_assert(x > 0.0, "Don't call me with stupid values!");
648 tmp = x * var->weight * var->weight;
649 res_fpi = tmp * (9.0 * x + 24.0);
654 res_fpi = RENO2_SCALING * (-3.0 * tmp + sqrt(res_fpi)) / (4.0 * tmp);