From e02082ad2d87032e8cff4fa8cc48682c85efc624 Mon Sep 17 00:00:00 2001 From: couchot Date: Mon, 9 Dec 2013 14:12:54 +0100 Subject: [PATCH] quelques typos --- IWCMC14/argmin.tex | 12 ++++++------ IWCMC14/convexity.tex | 18 +++++++++--------- exp_controle_asynchrone/simulMWSN.py | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/IWCMC14/argmin.tex b/IWCMC14/argmin.tex index 5677856..9a20c42 100644 --- a/IWCMC14/argmin.tex +++ b/IWCMC14/argmin.tex @@ -89,14 +89,14 @@ $$ \end{table*} -This improvment has been evaluated on a set of experiments. -For 10 tresholds $t$, such that $1E-5 \le t \le 1E-3$, we have -executed 10 times the aproach detailled before either with the new +This improvement has been evaluated on a set of experiments. +For 10 thresholds $t$, such that $1E-5 \le t \le 1E-3$, we have +executed 10 times the approach detailed before either with the new gradient calculus or with the original argmin one. The Table~\ref{Table:argmin:time} summarizes the averages of these -excution times, given in seconds. We remark time spent with the gradient +execution times, given in seconds. We remark time spent with the gradient approach is about 37 times smaller than the one of the argmin one. -Among implementations of argmin aproaches, we have retained +Among implementations of argmin approaches, we have retained the COBYLA one since it does not require any gradient to be executed. \begin{table*} @@ -104,7 +104,7 @@ the COBYLA one since it does not require any gradient to be executed. $$ \begin{array}{|l|l|l|l|l|l|l|l|l|l|l|} \hline -\textrm{Convergence Treshold} & +\textrm{Convergence Threshold} & 10^{-5} & 1.67.10^{-5} & 2.78.10^{-5} & diff --git a/IWCMC14/convexity.tex b/IWCMC14/convexity.tex index ff4656d..de0ddbc 100644 --- a/IWCMC14/convexity.tex +++ b/IWCMC14/convexity.tex @@ -22,7 +22,7 @@ in equation~(\ref{eq:obj2}) by + \delta_p\sum_{h \in V }P_{sh}^{\frac{8}{3}}. \label{eq:obj2p} \end{equation} -In this equation we have first introduced new regularisation factors +In this equation we have first introduced new regularization factors (namely $\delta_x$, $\delta_r$, and $\delta_p$) instead of the sole $\delta$. This allows to further separately study the influence of each factor. @@ -46,27 +46,27 @@ Provided $p^{5/3}$ is replaced by $P$, we have a quadratic function which is strictly convex, for any value of $\lambda_h$ since the discriminant is positive. -This proposed enhacement has been evaluated as follows: -10 tresholds $t$, such that $1E-5 \le t \le 1E-3$, have +This proposed enhancement has been evaluated as follows: +10 thresholds $t$, such that $1E-5 \le t \le 1E-3$, have been selected and for each of them, 10 random configurations have been generated. For each one, we store the number of iterations which is sufficient to make the dual -function variation smaller than this given treshold with +function variation smaller than this given threshold with the two approaches: either the original one ore the -one which is convex garantee. +one which is convex guarantee. The Figure~\ref{Fig:convex} summarizes the average number of convergence -iterations for each tresholdvalue. As we can see, even if this new +iterations for each treshold value. As we can see, even if this new enhanced method introduces new calculus, -it only slows few down the algorithm and garantee the convexity, +it only slows few down the algorithm and guarantee the convexity, and thus the convergence. - +Notice that the encoding power has been arbitrarily limited to 10 W. \begin{figure*} \begin{center} \includegraphics[scale=0.5]{convex.png} \end{center} -\caption{Original Vs Convex Garantee Approaches}\label{Fig:convex} +\caption{Original Vs Convex Guarantee Approaches}\label{Fig:convex} \end{figure*} diff --git a/exp_controle_asynchrone/simulMWSN.py b/exp_controle_asynchrone/simulMWSN.py index 45326b6..8e286a3 100644 --- a/exp_controle_asynchrone/simulMWSN.py +++ b/exp_controle_asynchrone/simulMWSN.py @@ -377,7 +377,7 @@ def maj(k,maj_theta,mxg,idxexp,comppsh=False): t= float(2*v[h]*mt.log(float(sigma2)/D))/(3*gamma*la[h]) rep = mt.pow(t,float(3)/5) else : - rep = 10 + rep = 1000 else : t= float(-3*la[h]+mt.sqrt(9*(la[h]**2)+64*delta*v[h]*mt.log(float(sigma2)/D)/gamma))/(16*delta) rep = mt.pow(t,float(3)/5) -- 2.39.5