From: couchot Date: Mon, 9 Dec 2013 13:03:52 +0000 (+0100) Subject: ajout comparaison convexite X-Git-Url: https://bilbo.iut-bm.univ-fcomte.fr/and/gitweb/desynchronisation-controle.git/commitdiff_plain/fe438aa764b5d1ad7ea61512cffbb3761eff99b7?ds=inline ajout comparaison convexite --- diff --git a/IWCMC14/argmin.tex b/IWCMC14/argmin.tex index 6aae1e5..5677856 100644 --- a/IWCMC14/argmin.tex +++ b/IWCMC14/argmin.tex @@ -89,4 +89,61 @@ $$ \end{table*} -This improvement \ No newline at end of file +This improvment has been evaluated on a set of experiments. +For 10 tresholds $t$, such that $1E-5 \le t \le 1E-3$, we have +executed 10 times the aproach detailled before either with the new +gradient calculus or with the original argmin one. +The Table~\ref{Table:argmin:time} summarizes the averages of these +excution times, given in seconds. We remark time spent with the gradient +approach is about 37 times smaller than the one of the argmin one. +Among implementations of argmin aproaches, we have retained +the COBYLA one since it does not require any gradient to be executed. + +\begin{table*} +\begin{scriptsize} +$$ +\begin{array}{|l|l|l|l|l|l|l|l|l|l|l|} +\hline +\textrm{Convergence Treshold} & +10^{-5} & +1.67.10^{-5} & +2.78.10^{-5} & +4.64.10^{-5} & +7.74.10^{-5} & +1.29.10^{-4} & +2.15.10^{-4} & +3.59.10^{-4} & +5.99.10^{-4} & +0.001 \\ +\hline +\textrm{Gradient Calculus} & +56.29 & +29.17 & +37.05 & +6.05 & +5.47 & +3.82 & +1.91 & +2.37 & +0.87 & +0.65 \\ +\hline +\textrm{Argmin Method} & +2224.27 & +1158.37 & +1125.21& +216.82& +162.26& +126.99& +58.044& +74.204& +23.99& +15.85\\ +\hline +\end{array} +$$ +\caption{Convergence Times for Gradient and Argmin Methods}\label{Table:argmin:time} +\end{scriptsize} +\end{table*} + + \ No newline at end of file diff --git a/IWCMC14/convex.png b/IWCMC14/convex.png new file mode 100644 index 0000000..2279c17 Binary files /dev/null and b/IWCMC14/convex.png differ diff --git a/IWCMC14/convexity.tex b/IWCMC14/convexity.tex index 172ca37..ff4656d 100644 --- a/IWCMC14/convexity.tex +++ b/IWCMC14/convexity.tex @@ -46,4 +46,27 @@ Provided $p^{5/3}$ is replaced by $P$, we have a quadratic function which is strictly convex, for any value of $\lambda_h$ since the discriminant is positive. - \ No newline at end of file +This proposed enhacement has been evaluated as follows: +10 tresholds $t$, such that $1E-5 \le t \le 1E-3$, have +been selected and for each of them, +10 random configurations have been generated. +For each one, we store the +number of iterations which is sufficient to make the dual +function variation smaller than this given treshold with +the two approaches: either the original one ore the +one which is convex garantee. + +The Figure~\ref{Fig:convex} summarizes the average number of convergence +iterations for each tresholdvalue. As we can see, even if this new +enhanced method introduces new calculus, +it only slows few down the algorithm and garantee the convexity, +and thus the convergence. + +\begin{figure*} +\begin{center} +\includegraphics[scale=0.5]{convex.png} +\end{center} +\caption{Original Vs Convex Garantee Approaches}\label{Fig:convex} +\end{figure*} + + diff --git a/exp_controle_asynchrone/simulMWSN.py b/exp_controle_asynchrone/simulMWSN.py index 7ca4bd5..45326b6 100644 --- a/exp_controle_asynchrone/simulMWSN.py +++ b/exp_controle_asynchrone/simulMWSN.py @@ -12,7 +12,7 @@ import cv2 as cv2 import datetime as dt errorq = 0.1 -errorD = 1E-1 +errorD = 1E-2 epsilon = 1E-10 vrate = 0.8 p = 0.7 @@ -268,7 +268,7 @@ def maj_theta(k): -def maj(k,maj_theta,mxg,idxexp): +def maj(k,maj_theta,mxg,idxexp,comppsh=False): # initialisation des operateurs lagrangiens global u, v, la, w, theta , q, Ps, Rh, eta, x, valeurFonctionDuale @@ -348,7 +348,7 @@ def maj(k,maj_theta,mxg,idxexp): n1 = dt.datetime.now() c = -float(sum([a[i][l]*w[l] for l in L]) - la[i]*Bi)/(2*amplifieur) n2 = dt.datetime.now() - cp = armin(f_q,q[i],POS,tuple([i])) + #cp = armin(f_q,q[i],POS,tuple([i])) n3 = dt.datetime.now() tq += (n2-n1).microseconds tqa += (n3-n2).microseconds @@ -371,10 +371,21 @@ def maj(k,maj_theta,mxg,idxexp): for h in V: if not ASYNC or random() < taux_succes: n1 = dt.datetime.now() - t= float(-3*la[h]+mt.sqrt(9*(la[h]**2)+64*delta*v[h]*mt.log(float(sigma2)/D)/gamma))/(16*delta) - rep = mt.pow(t,float(3)/5) + #calcul nouvelle solution + if comppsh : + if la[h] != 0 : + t= float(2*v[h]*mt.log(float(sigma2)/D))/(3*gamma*la[h]) + rep = mt.pow(t,float(3)/5) + else : + rep = 10 + else : + t= float(-3*la[h]+mt.sqrt(9*(la[h]**2)+64*delta*v[h]*mt.log(float(sigma2)/D)/gamma))/(16*delta) + rep = mt.pow(t,float(3)/5) + + + n2 = dt.datetime.now() - cp = armin(f_Ps,Ps[h],POS,tuple([h])) + #cp = armin(f_Ps,Ps[h],POS,tuple([h])) n3 = dt.datetime.now() tp += (n2-n1).microseconds tpa += (n3-n2).microseconds @@ -402,7 +413,7 @@ def maj(k,maj_theta,mxg,idxexp): n1 = dt.datetime.now() rep = float(v[h])/(2*delta) n2 = dt.datetime.now() - cp = armin(f_Rh,Rh[h],POS_NUL,tuple([h])) + #cp = armin(f_Rh,Rh[h],POS_NUL,tuple([h])) n3 = dt.datetime.now() tr += (n2-n1).microseconds tra += (n3-n2).microseconds @@ -425,7 +436,7 @@ def maj(k,maj_theta,mxg,idxexp): n1 = dt.datetime.now() rep = -float(cs[l]*sum([la[i]*aplus[i][l] for i in N]) +cr*sum([la[i]*amoins[i][l] for i in N])+sum([u[(h,i)]*a[i][l] for i in N]))/(2*delta) n2 = dt.datetime.now() - cp = armin(f_x,x[(h,l)],POS_NUL,tuple([h,l])) + #cp = armin(f_x,x[(h,l)],POS_NUL,tuple([h,l])) n3 = dt.datetime.now() tx += (n2-n1).microseconds txa += (n3-n2).microseconds @@ -612,7 +623,8 @@ def __evalue_maj_theta__(nbexp,out=False): #print liste_arret #print (min(m),max(m),float(sum(m))/nbexp,m),m - return (liste_arret,dur,dura) + #return (liste_arret,dur,dura) + return (liste_arret,dur) def __une_seule_exp__(fichier_donees): @@ -686,7 +698,146 @@ print listederes + +def __comparePsh_et_Psh_avec8_3__(nbexp,out=False): + print "tttttttttttt" + global u, v, la, w, theta , q, Ps, Rh, eta, x, valeurFonctionDuale + res = {} + m,mp = [],[] + itermax = 100000 + + def __maj_theta(k): + mem = [] + om = omega/(mt.pow(k,0.75)) + return om + liste_arret=[] + for idxexp in range(nbexp): + mxg = 0 + if not(out): + initialisation() + else : + initialisation_() + + k = 1 + arret = False + sm = 0 + err, ar = 0,0 + dur,dura=0,0 + + # duppliquee + while k < itermax and not arret : + (u,v,la,w,theta,eta,q,Ps,Rh,x,valeurFonctionDuale,ar,mxg,smax,ct,cta)=maj(k,__maj_theta,mxg,idxexp) + err = (max(q.values()) - min(q.values()))/min(q.values()) + dur += ct + dura += cta + arret = err < errorq or ar < errorD + k+=1 + variation = "+" if smax > sm else "-" + if out : print variation, + if k%100 ==0 : + print "k:", k, + "erreur sur q", + errorq, "erreur sur F", ar, "et q:", q + + if out : print "maxg=", mxg + mem = [deepcopy(q),deepcopy(Ps),deepcopy(Rh),deepcopy(eta), + deepcopy(x),deepcopy(u),deepcopy(v),deepcopy(la),deepcopy(w)] + """ + if k%4500 == 0 : + + #print "#########\n",mem,"\#########\n" + if k%4600 == 0 : + #print "#########m\n",mem,"\#########\n" + """ + + + if smax - sm > 500: + print "variation trop grande" + print "init" + print init + exit + sm = smax + + if k == itermax: + print "nbre d'iteration trop grand" + print "init" + print init + sy.exit(1) + else : + liste_arret += [(err, ar,k,errorD)] + + print "###############" + print k," avec optym" + print "###############" + m += [k] + ##fin de duplication + + + + #remise a zero + q,Ps,Rh,eta,x,u,v,la,w=init[0],init[1],init[2],init[3],init[4],init[5],init[6],init[7],init[8] + k = 1 + arret = False + sm = 0 + err, ar = 0,0 + dur,dura=0,0 + # duppliquee + while k < itermax and not arret : + (u,v,la,w,theta,eta,q,Ps,Rh,x,valeurFonctionDuale,ar,mxg,smax,ct,cta)=maj(k,__maj_theta,mxg,idxexp,True) + err = (max(q.values()) - min(q.values()))/min(q.values()) + dur += ct + dura += cta + arret = err < errorq or ar < errorD + k+=1 + variation = "+" if smax > sm else "-" + if out : print variation, + if k%100 ==0 : + print "k:", k, + "erreur sur q", + errorq, "erreur sur F", ar, "et q:", q + + if out : print "maxg=", mxg + mem = [deepcopy(q),deepcopy(Ps),deepcopy(Rh),deepcopy(eta), + deepcopy(x),deepcopy(u),deepcopy(v),deepcopy(la),deepcopy(w)] + """ + if k%4500 == 0 : + + #print "#########\n",mem,"\#########\n" + if k%4600 == 0 : + #print "#########m\n",mem,"\#########\n" + """ + + + if smax - sm > 500: + print "variation trop grande" + print "init" + print init + exit + sm = smax + + if k == itermax: + print "nbre d'iteration trop grand" + print "init" + print init + sy.exit(1) + else : + liste_arret += [(err, ar,k,errorD)] + + print "###############" + print k,"sans optym" + print "###############" + mp += [k] + + + #print liste_arret + #print (min(m),max(m),float(sum(m))/nbexp,m),m + #return (liste_arret,dur,dura) + return (m,mp) + + + # pour evaluer argmin +""" listederes=[] k=0 for k in list(np.logspace(-5, -3, num=10)): @@ -695,6 +846,19 @@ for k in list(np.logspace(-5, -3, num=10)): listederes += [(k,__evalue_maj_theta__(3))] k+=1 print listederes +""" + +# pour evaluer psh +listederes=[] +for k in list(np.logspace(-5, -3, num=10)): + print "Precision",k + errorD = k + listederes += [(k,__comparePsh_et_Psh_avec8_3__(10))] + k+=1 +print listederes + + +