From 9e6c76e39059d7f227f7ed48ad71195374a2f2a5 Mon Sep 17 00:00:00 2001 From: Michel Salomon Date: Sat, 8 Oct 2011 11:09:57 +0200 Subject: [PATCH] Conclusion restant a raccourcir + point 5 du reviewer 1 a revoir --- authors_response.tex | 127 +++++++++ chaos-paper.bib | 12 +- main.tex | 652 +++++++++++++++++++++---------------------- 3 files changed, 446 insertions(+), 345 deletions(-) create mode 100644 authors_response.tex diff --git a/authors_response.tex b/authors_response.tex new file mode 100644 index 0000000..f6bcb54 --- /dev/null +++ b/authors_response.tex @@ -0,0 +1,127 @@ + +\documentclass{article} +\usepackage[T1]{fontenc} +\usepackage[latin1]{inputenc} +\usepackage{amsmath} +\usepackage{dsfont} +\renewcommand{\labelenumii}{\labelenumi\arabic{enumii}} + +\begin{document} + +\begin{flushright} +\today +\end{flushright}% + +\vspace{-0.5cm}\hspace{-2cm}Computer Science Laboratory, LIFC + +\hspace{-2cm}University of Franche-Comt\'e + +\hspace{-2cm}25030 Besan\c{c}on, France. + +\bigskip + +\begin{center} +Detailed changes and addressed issues in the revision of the paper + +"Recurrent Neural Networks and Chaos: Construction, \\ +Evaluation, and Prediction Ability" \\ +renamed in \\ +"Chaotic Neural Networks: Construction, Evaluation, and Prediction Ability" + +by Jacques M. Bahi, Jean-Fran\c{c}ois Couchot, Christophe Guyeux, and Michel Salomon + +\bigskip +\end{center} + +Please, find below the detailed changes and issues we addressed in the +revision of our above mentioned paper that we resubmit. + +All the remarks and recommendations of the three reviewers have been +considered and have led to modifications in the paper. + +\begin{itemize} +\item Concerning the remarks of reviewer No. 1: +\end{itemize} + +\begin{enumerate} +\item Please explain the topic "Recurrent Neural Networks and Chaos: + Construction, Evaluation, and Prediction Ability" which makes me + confused. Does the paper discuss the construction, evaluation, and + prediction ability of chaotic neural networks? And in the paper, the + chaotic neural networks are mainly done research on. + + {\it Our response} + + To be completed +\item Please explain the Line 242( "than chaotic iterations Ff with + initial condition............" ) and Line 298( "investigate, when + comparing neural networks and Devaney's chaos"). + + {\it Our response} + + To be completed +\item Section VI analyzes the suitability of artificial neural + networks for predicting chaotic behaviors. So, I think section VI + don't correspond to the topic. And homoplastically, lines 30-32("the + learning, with neural networks having a feedforward structure, of + chaotic behaviors represented by data sets obtained from chaotic + maps, is far more difficult than non chaotic behaviors") refer to + the learning of neural networks having a feedforward structure. But + lines 402-405 refer to all the network topologies. + + {\it Our response} + + To be completed +\item Please explain the meanings of the percentage in TABLE I and + TABLE II. + + {\it Our response} + + To be completed +\item Except for prediction success rates, in order to reflect the + prediction ability, please add the analysis of the prediction errors + for data sequence and diagram it. + + {\it Our response} + + To be completed +\end{enumerate} + +\begin{itemize} +\item Concerning the remarks of reviewer No. 2: +\end{itemize} + +\begin{enumerate} +\item On the basis of the result botained by Guyeux in Ref[12], the + authors dealed with chaotic neural networks for various fields of + appli1cation. Firstly, the authors described how to build a neural + network that can be trained to learn a given chaotic map function, + then found a condition that allow to check whether the iterations + induced by a function are chaotic or not, and thus if a chaotic map + is obtained. As the authors said that this work is different from + most of prviews works, this manuscript gave a rigorous mathematical + proof for chaos of chaotic neural networks. This is a very + interesting work. On the other hand, I think, the conclusion is too + long, and some Definitions such as Definition 1-Definition 5 are + wellknow Definition, it's no necessary to be presented any more. + + {\it Our response} + +\end{enumerate} + +We are very grateful to the reviewers who, by their recommendations, +allowed us to improve our paper. + +%TCIMACRO{% +%\TeXButton{Flushright}{\begin{flushright} +%Best regards\\ +%The authors +%\end{flushright}}}% +%BeginExpansion +\begin{flushright} +Best regards\\ +The authors +\end{flushright}% +%EndExpansion + +\end{document} diff --git a/chaos-paper.bib b/chaos-paper.bib index 54fd84d..120e1fe 100644 --- a/chaos-paper.bib +++ b/chaos-paper.bib @@ -23,15 +23,13 @@ @String{TESC = {Turkish Journal of Electrical Engineering and Computer Sciences}} %% 1989 - Cybenko -@article{DBLP:journals/jpdc/Cybenko89, +@article{Cybenko89, author = {George Cybenko}, - title = {Dynamic Load Balancing for Distributed Memory Multiprocessors}, - journal = {J. Parallel Distrib. Comput.}, - volume = {7}, - number = {2}, + title = {Approximation by Superpositions of a Sigmoidal function}, + journal = {Mathematics of Control, Signals and Systems}, + volume = {2}, year = {1989}, - pages = {279-301}, - bibsource = {DBLP, http://dblp.uni-trier.de} + pages = {303-314} } %% 1989 - Hornik diff --git a/main.tex b/main.tex index 78454a7..de92eaa 100644 --- a/main.tex +++ b/main.tex @@ -39,9 +39,10 @@ preprint,% \begin{document} -\title[Recurrent Neural Networks and Chaos]{Recurrent Neural Networks -and Chaos: Construction, Evaluation, \\ -and Prediction Ability} +\title[Neural Networks and Chaos]{Neural Networks and Chaos: +Construction, Evaluation of Chaotic Networks \\ +and Prediction of Chaos with Multilayer Feedforward Networks +} \author{Jacques M. Bahi} \author{Jean-Fran\c{c}ois Couchot} @@ -59,10 +60,7 @@ IUT de Belfort-Montb\'eliard, BP 527, \\ \newcommand{\CG}[1]{\begin{color}{red}\textit{#1}\end{color}} \newcommand{\JFC}[1]{\begin{color}{blue}\textit{#1}\end{color}} - - - - +\newcommand{\MS}[1]{\begin{color}{green}\textit{#1}\end{color}} \begin{abstract} %% Text of abstract @@ -70,24 +68,23 @@ Many research works deal with chaotic neural networks for various fields of application. Unfortunately, up to now these networks are usually claimed to be chaotic without any mathematical proof. The purpose of this paper is to establish, based on a rigorous theoretical -framework, an equivalence between chaotic iterations according to -Devaney and a particular class of neural -networks. On the one hand we show how to build such a network, on the -other hand we provide a method to check if a neural network is a -chaotic one. Finally, the ability of classical feedforward multilayer -perceptrons to learn sets of data obtained from a dynamical -system is regarded. Various Boolean functions are iterated on finite -states. Iterations of some of them are proven to be chaotic - as it is defined by -Devaney. In that context, important differences occur in the training -process, establishing with various neural networks that chaotic -behaviors are far more difficult to learn. +framework, an equivalence between chaotic iterations according to +Devaney and a particular class of neural networks. On the one hand we +show how to build such a network, on the other hand we provide a +method to check if a neural network is a chaotic one. Finally, the +ability of classical feedforward multilayer perceptrons to learn sets +of data obtained from a dynamical system is regarded. Various Boolean +functions are iterated on finite states. Iterations of some of them +are proven to be chaotic as it is defined by Devaney. In that +context, important differences occur in the training process, +establishing with various neural networks that chaotic behaviors are +far more difficult to learn. \end{abstract} %%\pacs{Valid PACS appear here}% PACS, the Physics and Astronomy - % Classification Scheme. +% Classification Scheme. %%\keywords{Suggested keywords}%Use showkeys class option if keyword - %display desired +%display desired \maketitle \begin{quotation} @@ -97,23 +94,23 @@ appealing properties of deterministic chaos (unpredictability, sensitivity, and so on). However, such networks are often claimed chaotic without any rigorous mathematical proof. Therefore, in this work a theoretical framework based on the Devaney's definition of -chaos is introduced. Starting with a relationship between discrete +chaos is introduced. Starting with a relationship between discrete iterations and Devaney's chaos, we firstly show how to build a -recurrent neural networks that is equivalent to a chaotic map and +recurrent neural network that is equivalent to a chaotic map and secondly a way to check whether an already available network, is -chaotic or not. We also study different topological properties of +chaotic or not. We also study different topological properties of these truly chaotic neural networks. Finally, we show that the -learning, with neural networks having a feedforward structure, of -chaotic behaviors represented by data sets obtained from chaotic maps, -is far more difficult than non chaotic behaviors. +learning, with neural networks having a classical feedforward +structure, of chaotic behaviors represented by data sets obtained from +chaotic maps, is far more difficult than non chaotic behaviors. \end{quotation} %% Main text \section{Introduction} \label{S1} -Several research works have proposed or run chaotic neural networks -these last years. The complex dynamics of such a networks leads to +Several research works have proposed or used chaotic neural networks +these last years. The complex dynamics of such a network leads to various potential application areas: associative memories~\cite{Crook2007267} and digital security tools like hash functions~\cite{Xiao10}, digital @@ -122,11 +119,11 @@ schemes~\cite{Lian20091296}. In the former case, the background idea is to control chaotic dynamics in order to store patterns, with the key advantage of offering a large storage capacity. For the latter case, the use of chaotic dynamics is motivated by their -unpredictability and random-like behaviors. Thus, investigating new -concepts is crucial in this field, because new threats are constantly -emerging. As an illustrative example, the former standard in hash -functions, namely the SHA-1 algorithm, has been recently weakened -after flaws were discovered. +unpredictability and random-like behaviors. Indeed, investigating new +concepts is crucial for the computer security field, because new +threats are constantly emerging. As an illustrative example, the +former standard in hash functions, namely the SHA-1 algorithm, has +been recently weakened after flaws were discovered. Chaotic neural networks have been built with different approaches. In the context of associative memory, chaotic neurons like the nonlinear @@ -136,50 +133,46 @@ which is usually assessed through the computation of the Lyapunov exponent. An alternative approach is to consider a well-known neural network architecture: the MultiLayer Perceptron (MLP). These networks are suitable to model nonlinear relationships between data, due to -their universal approximator capacity. -\JFC{Michel, peux-tu donner une ref la dessus} -Thus, this kind of networks can -be trained to model a physical phenomenon known to be chaotic such as -Chua's circuit \cite{dalkiran10}. Sometimes, a neural network which -is build by combining transfer functions and initial conditions that are both -chaotic, is itself claimed to be chaotic +their universal approximator capacity +\cite{Cybenko89,DBLP:journals/nn/HornikSW89}. Thus, this kind of +networks can be trained to model a physical phenomenon known to be +chaotic such as Chua's circuit \cite{dalkiran10}. Sometimes, a neural +network which is build by combining transfer functions and initial +conditions that are both chaotic, is itself claimed to be chaotic \cite{springerlink:10.1007/s00521-010-0432-2}. What all of these chaotic neural networks have in common is that they are claimed to be chaotic despite a lack of any rigorous mathematical -proof. The first contribution of this paper is to fill this gap, -using a theoretical framework based on the Devaney's definition of chaos -\cite{Devaney}. This mathematical theory of chaos provides both +proof. The first contribution of this paper is to fill this gap, +using a theoretical framework based on the Devaney's definition of +chaos \cite{Devaney}. This mathematical theory of chaos provides both qualitative and quantitative tools to evaluate the complex behavior of a dynamical system: ergodicity, expansivity, and so on. More precisely, in this paper, which is an extension of a previous work -\cite{bgs11:ip}, we establish the equivalence between chaotic -iterations and a class of globally recurrent MLP. -The investigation the converse problem is the second contribution: -we indeed study the ability for -classical MultiLayer Perceptrons to learn a particular family of -discrete chaotic dynamical systems. This family -is defined by a Boolean vector, an update function, and a -sequence giving which component to update at each iteration. It has -been previously established that such dynamical systems is -chaotically iterated (as it is defined by Devaney) when the chosen function has -a strongly connected iterations graph. In this document, we -experiment several MLPs and try to learn some iterations of this kind. -We show that non-chaotic iterations can be learned, whereas it is -far more difficult for chaotic ones. That is to say, we have -discovered at least one family of problems with a reasonable size, -such that artificial neural networks should not be applied -due to their inability to learn chaotic behaviors in this context. +\cite{bgs11:ip}, we establish the equivalence between chaotic +iterations and a class of globally recurrent MLP. The second +contribution is a study of the converse problem, indeed we study the +ability of classical multiLayer perceptrons to learn a particular +family of discrete chaotic dynamical systems. This family is defined +by a Boolean vector, an update function, and a sequence defining which +component to update at each iteration. It has been previously +established that such dynamical systems are chaotically iterated (as +it is defined by Devaney) when the chosen function has a strongly +connected iterations graph. In this document, we experiment several +MLPs and try to learn some iterations of this kind. We show that +non-chaotic iterations can be learned, whereas it is far more +difficult for chaotic ones. That is to say, we have discovered at +least one family of problems with a reasonable size, such that +artificial neural networks should not be applied due to their +inability to learn chaotic behaviors in this context. The remainder of this research work is organized as follows. The next -section is devoted to the basics of Devaney's -chaos. Section~\ref{S2} formally describes how to build a neural -network that operates chaotically. Section~\ref{S3} is -devoted to the dual case of checking whether an existing neural network -is chaotic or not. -Topological properties of chaotic neural networks -are discussed in Sect.~\ref{S4}. The -Section~\ref{section:translation} shows how to translate such +section is devoted to the basics of Devaney's chaos. Section~\ref{S2} +formally describes how to build a neural network that operates +chaotically. Section~\ref{S3} is devoted to the dual case of checking +whether an existing neural network is chaotic or not. Topological +properties of chaotic neural networks are discussed in Sect.~\ref{S4}. +The Section~\ref{section:translation} shows how to translate such iterations into an Artificial Neural Network (ANN), in order to evaluate the capability for this latter to learn chaotic behaviors. This ability is studied in Sect.~\ref{section:experiments}, where @@ -192,18 +185,18 @@ is summed up and intended future work is exposed. \section{Chaotic Iterations according to Devaney} In this section, the well-established notion of Devaney's mathematical -chaos is firstly recalled. Preservation of the unpredictability of +chaos is firstly recalled. Preservation of the unpredictability of such dynamical system when implemented on a computer is obtained by -using some discrete iterations called ``asynchronous iterations'', which -are thus introduced. The result establishing the link between such -iterations and Devaney's chaos is finally presented at the end of this -section. +using some discrete iterations called ``asynchronous iterations'', +which are thus introduced. The result establishing the link between +such iterations and Devaney's chaos is finally presented at the end of +this section. In what follows and for any function $f$, $f^n$ means the composition -$f \circ f \circ \hdots \circ f$ ($n$ times) and an \emph{iteration} -of a \emph{dynamical system} is the step that consists in -updating the global state $x^t$ at time $t$ with respect to a function $f$ -s.t. $x^{t+1} = f(x^t)$. +$f \circ f \circ \hdots \circ f$ ($n$ times) and an {\bf iteration} of +a {\bf dynamical system} is the step that consists in updating the +global state $x^t$ at time $t$ with respect to a function $f$ +s.t. $x^{t+1} = f(x^t)$. \subsection{Devaney's chaotic dynamical systems} @@ -223,7 +216,7 @@ mathematically this kind of unpredictability is also referred to as deterministic chaos. For example, many weather forecast models exist, but they give only suitable predictions for about a week, because they are initialized with conditions that reflect only a partial knowledge -of the current weather. Even the differences are initially small, +of the current weather. Even if the differences are initially small, they are amplified in the course of time, and thus make difficult a long-term prediction. In fact, in a chaotic system, an approximation of the current state is a quite useless indicator for predicting @@ -232,49 +225,48 @@ future states. From mathematical point of view, deterministic chaos has been thoroughly studied these last decades, with different research works that have provide various definitions of chaos. Among these -definitions, the one given by Devaney~\cite{Devaney} is -well-established. This definition consists of three conditions: +definitions, the one given by Devaney~\cite{Devaney} is +well-established. This definition consists of three conditions: topological transitivity, density of periodic points, and sensitive point dependence on initial conditions. -Topological transitivity is checked when, for any point, any +{\bf Topological transitivity} is checked when, for any point, any neighborhood of its future evolution eventually overlap with any other -given region. This property implies that a dynamical system -cannot be broken into simpler subsystems. -Intuitively, its complexity does not allow any simplification. -On the contrary, a dense set of periodic points is an -element of regularity that a chaotic dynamical system has to exhibit. - -However, chaos need some regularity to ``counteracts'' -the effects of transitivity. +given region. This property implies that a dynamical system cannot be +broken into simpler subsystems. Intuitively, its complexity does not +allow any simplification. On the contrary, a dense set of periodic +points is an element of regularity that a chaotic dynamical system has +to exhibit. + +However, chaos needs some regularity to ``counteracts'' the effects of +transitivity. %\begin{definition} \label{def3} -We recall that a point $x$ is {\emph{periodic point}} for $f$ of +We recall that a point $x$ is {\bf periodic point} for $f$ of period~$n \in \mathds{N}^{\ast}$ if $f^{n}(x)=x$. %\end{definition} Then, the map %\begin{definition} \label{def4} -$f$ is {\emph{ regular}} on $(\mathcal{X},\tau)$ if the set of - periodic points for $f$ is dense in $\mathcal{X}$ (for any $x \in - \mathcal{X}$, we can find at least one periodic point in any of its - neighborhood). +$f$ is {\bf regular} on the topological space $(\mathcal{X},\tau)$ if +the set of periodic points for $f$ is dense in $\mathcal{X}$ (for any +$x \in \mathcal{X}$, we can find at least one periodic point in any of +its neighborhood). %\end{definition} - Thus, - due to these two properties, two points close to each other can behave - in a completely different manner, leading to unpredictability for the - whole system. - -Let we recall that $f$ -has {\emph{ sensitive dependence on initial conditions}} if there -exists $\delta >0$ such that, for any $x\in \mathcal{X}$ and any -neighborhood $V$ of $x$, there exist $y\in V$ and $n > 0$ such that -$d\left(f^{n}(x), f^{n}(y)\right) >\delta $. The value $\delta$ is called the - {\emph{constant of sensitivity}} of $f$. - -Finally, The dynamical system that iterates $f$ is {\emph{ chaotic according to Devaney}} on $(\mathcal{X},\tau)$ if $f$ is regular, topologically transitive, -and has sensitive dependence to its initial conditions. -In what follows, iterations are said to be \emph{chaotic according Devaney} -when corresponding dynamical system is chaotic according Devaney. - +Thus, due to these two properties, two points close to each other can +behave in a completely different manner, leading to unpredictability +for the whole system. + +Let us recall that $f$ has {\bf sensitive dependence on initial + conditions} if there exists $\delta >0$ such that, for any $x\in +\mathcal{X}$ and any neighborhood $V$ of $x$, there exist $y\in V$ and +$n > 0$ such that $d\left(f^{n}(x), f^{n}(y)\right) >\delta $. The +value $\delta$ is called the {\bf constant of sensitivity} of $f$. + +Finally, The dynamical system that iterates $f$ is {\bf chaotic + according to Devaney} on $(\mathcal{X},\tau)$ if $f$ is regular, +topologically transitive, and has sensitive dependence to its initial +conditions. In what follows, iterations are said to be chaotic +according Devaney when corresponding dynamical system is chaotic +according Devaney. %Let us notice that for a metric space the last condition follows from %the two first ones~\cite{Banks92}. @@ -283,21 +275,19 @@ when corresponding dynamical system is chaotic according Devaney. %This section presents some basics on topological chaotic iterations. Let us firstly discuss about the domain of iteration. As far as we -know, no result rules that the chaotic behavior of a dynamical system -that has been theoretically proven on $\R$ remains valid on the -floating-point -numbers, which is the implementation domain. Thus, to avoid loss of -chaos this work presents an alternative, that is to iterate Boolean -maps: results that are theoretically obtained in that domain are -preserved in implementations. +know, no result rules that the chaotic behavior of a dynamical system +that has been theoretically proven on $\R$ remains valid on the +floating-point numbers, which is the implementation domain. Thus, to +avoid loss of chaos this work presents an alternative, that is to +iterate Boolean maps: results that are theoretically obtained in that +domain are preserved in implementations. Let us denote by $\llbracket a ; b \rrbracket$ the following interval -of integers: $\{a, a+1, \hdots, b\}$, where $a~<~b$. -In that section, a system -under consideration iteratively modifies a collection of -$n$~components. Each component $i \in \llbracket 1; n \rrbracket$ -takes its value $x_i$ among the domain $\Bool=\{0,1\}$. -A \emph{configuration} of the system at discrete time $t$ is the vector +of integers: $\{a, a+1, \hdots, b\}$, where $a~<~b$. In this section, +a {\it system} under consideration iteratively modifies a collection +of $n$~components. Each component $i \in \llbracket 1; n \rrbracket$ +takes its value $x_i$ among the domain $\Bool=\{0,1\}$. A {\it + configuration} of the system at discrete time $t$ is the vector %\begin{equation*} $x^{t}=(x_1^{t},\ldots,x_{n}^{t}) \in \Bool^n$. %\end{equation*} @@ -309,22 +299,21 @@ $f(x)=(f_1(x),\ldots,f_n(x))$. % Notice that $f^k$ denotes the % $k-$th composition $f\circ \ldots \circ f$ of the function $f$. -Let be given a configuration $x$. In what follows +Let be given a configuration $x$. In what follows $N(i,x)=(x_1,\ldots,\overline{x_i},\ldots,x_n)$ is the configuration obtained by switching the $i-$th component of $x$ ($\overline{x_i}$ is indeed the negation of $x_i$). Intuitively, $x$ and $N(i,x)$ are neighbors. The discrete iterations of $f$ are represented by the -oriented \emph{graph of iterations} $\Gamma(f)$. In such a graph, +oriented {\it graph of iterations} $\Gamma(f)$. In such a graph, vertices are configurations of $\Bool^n$ and there is an arc labeled -$i$ from $x$ to $N(i,x)$ if and only if $f_i(x)$ is $N(i,x)$. - -In the sequel, the \emph{strategy} $S=(S^{t})^{t \in \Nats}$ is the -sequence defining which component to update at time $t$ and $S^{t}$ -denotes its $t-$th term. -This iteration scheme that only modifies one element at each iteration -is classically referred as \emph{asynchronous iterations}. -More precisely, we have here for any $i$, $1\le i \le n$, -$$ +$i$ from $x$ to $N(i,x)$ if and only if $f_i(x)$ is $N(i,x)$. + +In the sequel, the {\it strategy} $S=(S^{t})^{t \in \Nats}$ is the +sequence defining which component to update at time $t$ and $S^{t}$ +denotes its $t-$th term. This iteration scheme that only modifies one +element at each iteration is classically referred as {\it asynchronous + iterations}. More precisely, we have for any $i$, $1\le i \le n$, +\begin{equation} \left\{ \begin{array}{l} x^{0} \in \Bool^n \\ x^{t+1}_i = \left\{ @@ -334,21 +323,21 @@ x^{t+1}_i = \left\{ \end{array} \right. \end{array} \right. -$$ +\end{equation} -Next section shows the link between asynchronous iterations and -Devaney's Chaos. +Next section shows the link between asynchronous iterations and +Devaney's chaos. \subsection{On the link between asynchronous iterations and Devaney's Chaos} In this subsection we recall the link we have established between -asynchronous iterations and Devaney's chaos. The theoretical framework is -fully described in \cite{guyeux09}. +asynchronous iterations and Devaney's chaos. The theoretical +framework is fully described in \cite{guyeux09}. -We introduce the function $F_{f}$ that is -defined for any given application $f:\Bool^{n} \to \Bool^{n}$ by -$F_{f}: \llbracket1;n\rrbracket\times \mathds{B}^{n} \rightarrow +We introduce the function $F_{f}$ that is defined for any given +application $f:\Bool^{n} \to \Bool^{n}$ by $F_{f}: +\llbracket1;n\rrbracket\times \mathds{B}^{n} \rightarrow \mathds{B}^{n}$, s.t. \begin{equation} \label{eq:CIs} @@ -361,9 +350,8 @@ $F_{f}: \llbracket1;n\rrbracket\times \mathds{B}^{n} \rightarrow \right. \end{equation} -\noindent With such a notation, configurations -asynchronously obtained are defined for times -$t=0,1,2,\ldots$ by: +\noindent With such a notation, asynchronously obtained configurations +are defined for times \linebreak $t=0,1,2,\ldots$ by: \begin{equation}\label{eq:sync} \left\{\begin{array}{l} x^{0}\in \mathds{B}^{n} \textrm{ and}\\ @@ -386,19 +374,13 @@ X^{k+1}& = & G_{f}(X^{k})\\ \label{eq:Gf} \end{equation} where $\sigma$ is the function that removes the first term of the -strategy ({\it i.e.},~$S^0$). -This definition allows to links asynchronous iterations with -classical iterations of a dynamical system. - - -%means that only one -%component of the system is updated at an iteration: the $S^t$-th -%element. But it can be extended by considering subsets for $S^t$. +strategy ({\it i.e.},~$S^0$). This definition allows to links +asynchronous iterations with classical iterations of a dynamical +system. Note that it can be extended by considering subsets for $S^t$. - -To study topological properties of these iterations, we are then left to -introduce a {\emph{ distance}} $d$ between two points $(S,x)$ and -$(\check{S},\check{x})\in \mathcal{X} = \llbracket1;n\rrbracket^\Nats. +To study topological properties of these iterations, we are then left +to introduce a {\bf distance} $d$ between two points $(S,x)$ and +$(\check{S},\check{x})\in \mathcal{X} = \llbracket1;n\rrbracket^\Nats \times \Bool^{n}$. It is defined by \begin{equation} d((S,x);(\check{S},\check{x}))=d_{e}(x,\check{x})+d_{s}(S,\check{S}) @@ -415,42 +397,42 @@ d_{s}(S,\check{S})=\frac{9}{2n}\sum_{t=0}^{\infty }\frac{|S^{t}-\check{S}^{t}|}{10^{t+1}} \in [0 ; 1] \enspace . \end{equation} -Notice that the more two systems have different components, -the larger the distance between them is. Secondly, two systems with +This distance is defined to reflect the following +information. Firstly, the more two systems have different components, +the larger the distance between them. Secondly, two systems with similar components and strategies, which have the same starting terms, -must induce only a small distance. The proposed distance fulfill +must induce only a small distance. The proposed distance fulfills these requirements: on the one hand its floor value reflects the difference between the cells, on the other hand its fractional part measures the difference between the strategies. The relation between $\Gamma(f)$ and $G_f$ is clear: there exists a path from $x$ to $x'$ in $\Gamma(f)$ if and only if there exists a -strategy $s$ such that iterations of $G_f$ from the -initial point $(s,x)$ reaches the configuration $x'$. Using this -link, Guyeux~\cite{GuyeuxThese10} has proven that, +strategy $s$ such that iterations of $G_f$ from the initial point +$(s,x)$ reaches the configuration $x'$. Using this link, +Guyeux~\cite{GuyeuxThese10} has proven that, \begin{theorem}%[Characterization of $\mathcal{C}$] \label{Th:Caracterisation des IC chaotiques} Let $f:\Bool^n\to\Bool^n$. Iterations of $G_f$ are chaotic according to Devaney if and only if $\Gamma(f)$ is strongly connected. \end{theorem} -Checking if a graph is strongly connected is not difficult -(by the Tarjan's algorithm for instance). -Let be given a strategy $S$ and a function $f$ such that -$\Gamma(f)$ is strongly connected. -In that case, iterations of the function $G_f$ as defined in -Eq.~(\ref{eq:Gf}) are chaotic according to Devaney. +Checking if a graph is strongly connected is not difficult (by the +Tarjan's algorithm for instance). Let be given a strategy $S$ and a +function $f$ such that $\Gamma(f)$ is strongly connected. In that +case, iterations of the function $G_f$ as defined in Eq.~(\ref{eq:Gf}) +are chaotic according to Devaney. -Let us then define two function $f_0$ and $f_1$ both in -$\Bool^n\to\Bool^n$ that are used all along this article. -The former is the vectorial negation, \textit{i.e.}, -$f_{0}(x_{1},\dots,x_{n}) =(\overline{x_{1}},\dots,\overline{x_{n}})$. -The latter is $f_1\left(x_1,\dots,x_n\right)=\left( -\overline{x_1},x_1,x_2,\dots,x_{n-1}\right)$. -It is not hard to see that $\Gamma(f_0)$ and $\Gamma(f_1)$ are -both strongly connected, then iterations of $G_{f_0}$ and of -$G_{f_1}$ are chaotic according to Devaney. +Let us then define two function $f_0$ and $f_1$ both in +$\Bool^n\to\Bool^n$ that are used all along this paper. The former is +the vectorial negation, \textit{i.e.}, $f_{0}(x_{1},\dots,x_{n}) +=(\overline{x_{1}},\dots,\overline{x_{n}})$. The latter is +$f_1\left(x_1,\dots,x_n\right)=\left( +\overline{x_1},x_1,x_2,\dots,x_{n-1}\right)$. It is not hard to check +that $\Gamma(f_0)$ and $\Gamma(f_1)$ are both strongly connected, then +iterations of $G_{f_0}$ and of $G_{f_1}$ are chaotic according to +Devaney. With this material, we are now able to build a first chaotic neural network, as defined in the Devaney's formulation. @@ -458,17 +440,15 @@ network, as defined in the Devaney's formulation. \section{A chaotic neural network in the sense of Devaney} \label{S2} -Firstly, let us build a -multilayer perceptron neural network modeling -$F_{f_0}:\llbracket 1; n \rrbracket \times \mathds{B}^n \to -\mathds{B}^n$ associated to the vectorial negation. -More precisely, for all inputs -$(s,x) \in \llbracket 1;n\rrbracket \times \mathds{B}^n$, -the output layer produces $F_{f_0}(s,x)$. It is then possible to -link the output layer and the input one, in order to model the -dependence between two successive iterations. As a result we obtain a -global recurrent neural network that behaves as follows (see -Fig.~\ref{Fig:perceptron}). +Let us build a multilayer perceptron neural network modeling +$F_{f_0}:\llbracket 1; n \rrbracket \times \mathds{B}^n \to +\mathds{B}^n$ associated to the vectorial negation. More precisely, +for all inputs $(s,x) \in \llbracket 1;n\rrbracket \times +\mathds{B}^n$, the output layer will produce $F_{f_0}(s,x)$. It is +then possible to link the output layer and the input one, in order to +model the dependence between two successive iterations. As a result +we obtain a global recurrent neural network that behaves as follows +(see Fig.~\ref{Fig:perceptron}). \begin{itemize} \item The network is initialized with the input vector @@ -481,8 +461,8 @@ Fig.~\ref{Fig:perceptron}). state of the system $x^t \in \mathds{B}^n$ received from the output layer and the initial term of the sequence $(S^t)^{t \in \Nats}$ ($S^0 \in \llbracket 1;n\rrbracket$) are used to compute the new - output. This new output, which represents the new state of the - dynamical system, satisfies: + output vector. This new vector, which represents the new state of + the dynamical system, satisfies: \begin{equation} x^{t+1}=F_{f_0}(S^0, x^t) \in \mathds{B}^n \enspace . \end{equation} @@ -498,8 +478,8 @@ Fig.~\ref{Fig:perceptron}). The behavior of the neural network is such that when the initial state is $x^0~\in~\mathds{B}^n$ and a sequence $(S^t)^{t \in \Nats}$ is given as outside input, -\JFC{en dire davantage sur l'outside world} - then the sequence of successive published +\JFC{en dire davantage sur l'outside world} %% TO BE UPDATED +then the sequence of successive published output vectors $\left(x^t\right)^{t \in \mathds{N}^{\ast}}$ is exactly the one produced by the chaotic iterations formally described in Eq.~(\ref{eq:Gf}). It means that mathematically if we use similar @@ -525,24 +505,23 @@ chaotic neural network by using $f_1$ instead of $f_0$. \label{S3} We focus now on the case where a neural network is already available, -and for which we want to know if it is chaotic. Typically, in -many research papers neural network are usually claimed to be chaotic +and for which we want to know if it is chaotic. Typically, in many +research papers neural network are usually claimed to be chaotic without any convincing mathematical proof. We propose an approach to overcome this drawback for a particular category of multilayer perceptrons defined below, and for the Devaney's formulation of chaos. In spite of this restriction, we think that this approach can be -extended to a large variety of neural networks. +extended to a large variety of neural networks. - -We consider a multilayer perceptron of the following form: inputs -are $n$ binary digits and one integer value, while outputs are $n$ -bits. Moreover, each binary output is connected with a feedback -connection to an input one. +We consider a multilayer perceptron of the following form: inputs are +$n$ binary digits and one integer value, while outputs are $n$ bits. +Moreover, each binary output is connected with a feedback connection +to an input one. \begin{itemize} -\item During initialization, the network is seeded with $n$~bits denoted - $\left(x^0_1,\dots,x^0_n\right)$ and an integer value $S^0$ that - belongs to $\llbracket1;n\rrbracket$. +\item During initialization, the network is seeded with $n$~bits + denoted $\left(x^0_1,\dots,x^0_n\right)$ and an integer value $S^0$ + that belongs to $\llbracket1;n\rrbracket$. \item At iteration~$t$, the last output vector $\left(x^t_1,\dots,x^t_n\right)$ defines the $n$~bits used to compute the new output one $\left(x^{t+1}_1,\dots,x^{t+1}_n\right)$. @@ -556,15 +535,11 @@ proven to be chaotic through the following process. Firstly, we denote by $F: \llbracket 1;n \rrbracket \times \mathds{B}^n \rightarrow \mathds{B}^n$ the function that maps the value $\left(s,\left(x_1,\dots,x_n\right)\right) \in \llbracket 1;n -\rrbracket \times \mathds{B}^n$ -\JFC{ici, cela devait etre $S^t$ et pas $s$, nn ?} - into the value +\rrbracket \times \mathds{B}^n$ into the value $\left(y_1,\dots,y_n\right) \in \mathds{B}^n$, where $\left(y_1,\dots,y_n\right)$ is the response of the neural network after the initialization of its input layer with -$\left(s,\left(x_1,\dots, x_n\right)\right)$. -\JFC{ici, cela devait etre $S^t$ et pas $s$, nn ?} -Secondly, we define $f: +$\left(s,\left(x_1,\dots, x_n\right)\right)$. Secondly, we define $f: \mathds{B}^n \rightarrow \mathds{B}^n$ such that $f\left(x_1,x_2,\dots,x_n\right)$ is equal to \begin{equation} @@ -581,8 +556,8 @@ same output vectors than the chaotic iterations of $F_f$ with initial condition $\left(S,(x_1^0,\dots, x_n^0)\right) \in \llbracket 1;n \rrbracket^{\mathds{N}} \times \mathds{B}^n$. -Theoretically speaking, such iterations of $F_f$ are thus a formal model of -these kind of recurrent neural networks. In the rest of this +Theoretically speaking, such iterations of $F_f$ are thus a formal +model of these kind of recurrent neural networks. In the rest of this paper, we will call such multilayer perceptrons CI-MLP($f$), which stands for ``Chaotic Iterations based MultiLayer Perceptron''. @@ -596,10 +571,6 @@ like topology to establish, for example, their convergence or, contrarily, their unpredictable behavior. An example of such a study is given in the next section. -\JFC{Ce qui suit est davantage qu'un exemple.Il faudrait -motiver davantage, non?} - - \section{Topological properties of chaotic neural networks} \label{S4} @@ -607,38 +578,42 @@ Let us first recall two fundamental definitions from the mathematical theory of chaos. \begin{definition} \label{def8} -A function $f$ is said to be {\emph{ expansive}} if $\exists +A function $f$ is said to be {\bf expansive} if $\exists \varepsilon>0$, $\forall x \neq y$, $\exists n \in \mathds{N}$ such that $d\left(f^n(x),f^n(y)\right) \geq \varepsilon$. \end{definition} +\noindent In other words, a small error on any initial condition is +always amplified until $\varepsilon$, which denotes the constant of +expansivity of $f$. + \begin{definition} \label{def9} -A discrete dynamical system is said to be {\emph{ topologically mixing}} +A discrete dynamical system is said to be {\bf topologically mixing} if and only if, for any pair of disjoint open sets $U$,$V \neq -\emptyset$, we can find some $n_0 \in \mathds{N}$ such that for any $n$, -$n\geq n_0$, we have $f^n(U) \cap V \neq \emptyset$. +\emptyset$, we can find some $n_0 \in \mathds{N}$ such that for any +$n$, $n\geq n_0$, we have $f^n(U) \cap V \neq \emptyset$. \end{definition} -\JFC{Donner un sens à ces definitions} - -It has been proven in Ref.~\cite{gfb10:ip}, that chaotic iterations -are expansive and topologically mixing when $f$ is the -vectorial negation $f_0$. -Consequently, these properties are inherited by the CI-MLP($f_0$) -recurrent neural network previously presented, which induce a greater -unpredictability. Any difference on the initial value of the input -layer is in particular magnified up to be equal to the expansivity -constant. +\noindent Topologically mixing means that the dynamical system evolves +in time such that any given region of its topological space might +overlap with any other region. -Let us then focus on the consequences for a neural network to be chaotic -according to Devaney's definition. Intuitively, the topological -transitivity property implies indecomposability, which is formally defined -as follows: +It has been proven in Ref.~\cite{gfb10:ip} that chaotic iterations are +expansive and topologically mixing when $f$ is the vectorial negation +$f_0$. Consequently, these properties are inherited by the +CI-MLP($f_0$) recurrent neural network previously presented, which +induce a greater unpredictability. Any difference on the initial +value of the input layer is in particular magnified up to be equal to +the expansivity constant. +Let us then focus on the consequences for a neural network to be +chaotic according to Devaney's definition. Intuitively, the +topological transitivity property implies indecomposability, which is +formally defined as follows: \begin{definition} \label{def10} -A dynamical system $\left( \mathcal{X}, f\right)$ is -{\emph{not decomposable}} if it is not the union of two closed sets $A, B +A dynamical system $\left( \mathcal{X}, f\right)$ is {\bf not + decomposable} if it is not the union of two closed sets $A, B \subset \mathcal{X}$ such that $f(A) \subset A, f(B) \subset B$. \end{definition} @@ -648,25 +623,27 @@ strongly connected. Moreover, under this hypothesis CI-MLPs($f$) are strongly transitive: \begin{definition} \label{def11} -A dynamical system $\left( \mathcal{X}, f\right)$ is {\emph{ strongly -transitive}} if $\forall x,y \in \mathcal{X}$, $\forall r>0$, $\exists -z \in \mathcal{X}$, $d(z,x)~\leq~r \Rightarrow \exists n \in +A dynamical system $\left( \mathcal{X}, f\right)$ is {\bf strongly + transitive} if $\forall x,y \in \mathcal{X}$, $\forall r>0$, +$\exists z \in \mathcal{X}$, $d(z,x)~\leq~r \Rightarrow \exists n \in \mathds{N}^{\ast}$, $f^n(z)=y$. \end{definition} -According to this definition, for all pairs of points $(x, y)$ in the -phase space, a point $z$ can be found in the neighborhood of $x$ such -that one of its iterates $f^n(z)$ is $y$. Indeed, this result has been -established during the proof of the transitivity presented in -Ref.~\cite{guyeux09}. Among other things, the strong transitivity -leads to the fact that without the knowledge of the initial input -layer, all outputs are possible. Additionally, no point of the output -space can be discarded when studying CI-MLPs: this space is -intrinsically complicated and it cannot be decomposed or simplified. + +\noindent According to this definition, for all pairs of points $(x, +y)$ in the phase space, a point $z$ can be found in the neighborhood +of $x$ such that one of its iterates $f^n(z)$ is $y$. Indeed, this +result has been established during the proof of the transitivity +presented in Ref.~\cite{guyeux09}. Among other things, the strong +transitivity leads to the fact that without the knowledge of the +initial input layer, all outputs are possible. Additionally, no point +of the output space can be discarded when studying CI-MLPs: this space +is intrinsically complicated and it cannot be decomposed or +simplified. Furthermore, those recurrent neural networks exhibit the instability property: \begin{definition} -A dynamical system $\left( \mathcal{X}, f\right)$ is \emph{unstable} +A dynamical system $\left( \mathcal{X}, f\right)$ is {\bf unstable} if for all $x \in \mathcal{X}$, the orbit $\gamma_x:n \in \mathds{N} \longmapsto f^n(x)$ is unstable, that means: $\exists \varepsilon > @@ -674,17 +651,20 @@ all $x \in \mathcal{X}$, the orbit $\gamma_x:n \in \mathds{N} \mathds{N}$, such that $d(x,y)<\delta$ and $d\left(\gamma_x(n),\gamma_y(n)\right) \geq \varepsilon$. \end{definition} -This property, which is implied by the sensitive point dependence on -initial conditions, leads to the fact that in all neighborhoods of any -point $x$, there are points that can be apart by $\varepsilon$ in the -future through iterations of the CI-MLP($f$). Thus, we can claim that -the behavior of these MLPs is unstable when $\Gamma (f)$ is strongly -connected. + +\noindent This property, which is implied by the sensitive point +dependence on initial conditions, leads to the fact that in all +neighborhoods of any point $x$, there are points that can be apart by +$\varepsilon$ in the future through iterations of the +CI-MLP($f$). Thus, we can claim that the behavior of these MLPs is +unstable when $\Gamma (f)$ is strongly connected. Let us now consider a compact metric space $(M, d)$ and $f: M \rightarrow M$ a continuous map. For each natural number $n$, a new metric $d_n$ is defined on $M$ by -$$d_n(x,y)=\max\{d(f^i(x),f^i(y)): 0\leq i 0$ and $n \geqslant 1$, two points of $M$ are $\varepsilon$-close with respect to this metric if their first $n$ @@ -698,7 +678,7 @@ at least $\varepsilon$ apart in the metric $d_n$. Denote by $H(n, \varepsilon)$ the maximum cardinality of an $(n, \varepsilon)$-separated set, \begin{definition} -The \emph{topological entropy} of the map $f$ is defined by (see e.g., +The {\bf topological entropy} of the map $f$ is defined by (see e.g., Ref.~\cite{Adler65} or Ref.~\cite{Bowen}) $$h(f)=\lim_{\varepsilon\to 0} \left(\limsup_{n\to \infty} \frac{1}{n}\log H(n,\varepsilon)\right) \enspace .$$ @@ -712,32 +692,25 @@ of $(\mathcal{X},G_{f_0})$ is infinite. \begin{figure} \centering - \includegraphics[scale=0.625]{scheme} + \includegraphics[scale=0.5]{scheme} \caption{Summary of addressed neural networks and chaos problems} \label{Fig:scheme} \end{figure} -The Figure~\ref{Fig:scheme} is a summary of addressed neural networks and chaos problems. -Section~\ref{S2} has explained how to construct a truly chaotic neural -networks $A$ for instance. -Section~\ref{S3} has shown how to check whether a given MLP -$A$ or $C$ is chaotic or not in the sens of Devaney. -%, and how to study its topological behavior. -The last thing to investigate, when comparing -neural networks and Devaney's chaos, is to determine whether -an artificial neural network $A$ is able to learn or predict some chaotic -behaviors of $B$, as it is defined in the Devaney's formulation (when they -are not specifically constructed for this purpose). This statement is -studied in the next section. - - - - - - - -\section{Suitability of Artificial Neural Networks -for Predicting Chaotic Behaviors} +Figure~\ref{Fig:scheme} is a summary of addressed neural networks and +chaos problems. In Section~\ref{S2} we have explained how to +construct a truly chaotic neural networks, $A$ for +instance. Section~\ref{S3} has shown how to check whether a given MLP +$A$ or $C$ is chaotic or not in the sens of Devaney, and how to study +its topological behavior. The last thing to investigate, when +comparing neural networks and Devaney's chaos, is to determine whether +an artificial neural network $C$ is able to learn or predict some +chaotic behaviors of $B$, as it is defined in the Devaney's +formulation (when they are not specifically constructed for this +purpose). This statement is studied in the next section. + +\section{Suitability of Feedforward Neural Networks +for Predicting Chaotic and Non-chaotic Behaviors} In the context of computer science different topic areas have an interest in chaos, as for steganographic @@ -764,13 +737,13 @@ chaotic behaviors. The problem of deciding whether classical feedforward ANNs are suitable to approximate topological chaotic iterations may then be -reduced to evaluate ANNs on iterations of functions with Strongly -Connected Component (SCC)~graph of iterations. To compare with -non-chaotic iterations, the experiments detailed in the following -sections are carried out using both kinds of function (chaotic and -non-chaotic). Let us emphasize on the difference between this kind of -neural networks and the Chaotic Iterations based MultiLayer -Perceptron. +reduced to evaluate such neural networks on iterations of functions +with Strongly Connected Component (SCC)~graph of iterations. To +compare with non-chaotic iterations, the experiments detailed in the +following sections are carried out using both kinds of function +(chaotic and non-chaotic). Let us emphasize on the difference between +this kind of neural networks and the Chaotic Iterations based +multilayer peceptron. We are then left to compute two disjoint function sets that contain either functions with topological chaos properties or not, depending @@ -789,16 +762,11 @@ $$[0, 0, 2, 3, 13, 13, 6, 3, 8, 9, 10, 11, 8, 13, 14, $1110$: it is obtained as the binary value of the fourth element in the second list (namely~14). It is not hard to verify that $\Gamma(f)$ is not SCC (\textit{e.g.}, $f(1111)$ is $1111$) whereas -$\Gamma(g)$ is. Next section shows how to translate iterations of -such functions into a model amenable to be learned by an ANN. - -This section presents how (not) chaotic iterations of $G_f$ are -translated into another model more suited to artificial neural -networks. -\JFC{détailler le more suited} -Formally, input and output vectors are pairs~$((S^t)^{t \in -\Nats},x)$ and $\left(\sigma((S^t)^{t \in \Nats}),F_{f}(S^0,x)\right)$ -as defined in~Eq.~(\ref{eq:Gf}). +$\Gamma(g)$ is. The remaining of this section shows how to translate +iterations of such functions into a model amenable to be learned by an +ANN. Formally, input and output vectors are pairs~$((S^t)^{t \in + \Nats},x)$ and $\left(\sigma((S^t)^{t \in + \Nats}),F_{f}(S^0,x)\right)$ as defined in~Eq.~(\ref{eq:Gf}). Firstly, let us focus on how to memorize configurations. Two distinct translations are proposed. In the first case, we take one input in @@ -808,15 +776,15 @@ configuration as natural number could consist in labeling each configuration with its translation into decimal numeral system. However, such a representation induces too many changes between a configuration labeled by a power of two and its direct previous -configuration: for instance, 16~(10000) and 15~(01111) are closed in a +configuration: for instance, 16~(10000) and 15~(01111) are close in a decimal ordering, but their Hamming distance is 5. This is why Gray codes~\cite{Gray47} have been preferred. -Let us secondly detail how to deal with strategies. Obviously, it is not -possible to translate in a finite way an infinite strategy, even if -both $(S^t)^{t \in \Nats}$ and $\sigma((S^t)^{t \in \Nats})$ belong to -$\{1,\ldots,n\}^{\Nats}$. Input strategies are then reduced to have a -length of size $l \in \llbracket 2,k\rrbracket$, where $k$ is a +Secondly, let us detail how to deal with strategies. Obviously, it is +not possible to translate in a finite way an infinite strategy, even +if both $(S^t)^{t \in \Nats}$ and $\sigma((S^t)^{t \in \Nats})$ belong +to $\{1,\ldots,n\}^{\Nats}$. Input strategies are then reduced to +have a length of size $l \in \llbracket 2,k\rrbracket$, where $k$ is a parameter of the evaluation. Notice that $l$ is greater than or equal to $2$ since we do not want the shift $\sigma$~function to return an empty strategy. Strategies are memorized as natural numbers expressed @@ -859,15 +827,16 @@ $3$~terms we obtain 2304~input-output pairs. \label{section:experiments} To study if chaotic iterations can be predicted, we choose to train -the MultiLayer Perceptron. As stated before, this kind of network is -in particular well-known for its universal approximation -property. Furthermore, MLPs have been already considered for chaotic -time series prediction. For example, in~\cite{dalkiran10} the authors -have shown that a feedforward MLP with two hidden layers, and trained -with Bayesian Regulation back-propagation, can learn successfully the -dynamics of Chua's circuit. - -In these experiments we consider MLPs having one hidden layer of +the multiLayer perceptron. As stated before, this kind of network is +in particular well-known for its universal approximation property +\cite{Cybenko89,DBLP:journals/nn/HornikSW89}. Furthermore, MLPs have +been already considered for chaotic time series prediction. For +example, in~\cite{dalkiran10} the authors have shown that a +feedforward MLP with two hidden layers, and trained with Bayesian +Regulation back-propagation, can learn successfully the dynamics of +Chua's circuit. + +In these experiments we consider MLPs having one hidden layer of sigmoidal neurons and output neurons with a linear activation function. They are trained using the Limited-memory Broyden-Fletcher-Goldfarb-Shanno quasi-newton algorithm in combination @@ -892,16 +861,18 @@ trainings of two data sets, one of them describing chaotic iterations, are compared. Thereafter we give, for the different learning setups and data sets, -the mean prediction success rate obtained for each output. These -values are computed considering 10~trainings with random subsets -construction, weights and biases initialization. Firstly, neural -networks having 10 and 25~hidden neurons are trained, with a maximum -number of epochs that takes its value in $\{125,250,500\}$ (see -Tables~\ref{tab1} and \ref{tab2}). Secondly, we refine the second -coding scheme by splitting the output vector such that each output is -learned by a specific neural network (Table~\ref{tab3}). In this last -case, we increase the size of the hidden layer up to 40~neurons, and -we consider larger number of epochs. +the mean prediction success rate obtained for each output. A such rate +represent the percentage of input-output pairs belonging to the test +subset for which the corresponding output value was correctly +predicted. These values are computed considering 10~trainings with +random subsets construction, weights and biases initialization. +Firstly, neural networks having 10 and 25~hidden neurons are trained, +with a maximum number of epochs that takes its value in +$\{125,250,500\}$ (see Tables~\ref{tab1} and \ref{tab2}). Secondly, +we refine the second coding scheme by splitting the output vector such +that each output is learned by a specific neural network +(Table~\ref{tab3}). In this last case, we increase the size of the +hidden layer up to 40~neurons and we consider larger number of epochs. \begin{table}[htbp!] \caption{Prediction success rates for configurations expressed as boolean vectors.} @@ -961,15 +932,17 @@ is observed (from 36.10\% for 10~neurons and 125~epochs to 70.97\% for 25~neurons and 500~epochs). We also notice that the learning of outputs~(2) and~(3) is more difficult. Conversely, for the non-chaotic case the simplest training setup is enough to predict -configurations. For all network topologies and all outputs the -obtained results for the non-chaotic case outperform the chaotic -ones. Finally, the rates for the strategies show that the different -networks are unable to learn them. +configurations. For all those feedforward network topologies and all +outputs the obtained results for the non-chaotic case outperform the +chaotic ones. Finally, the rates for the strategies show that the +different networks are unable to learn them. -For the second coding scheme (\textit{i.e.}, with Gray Codes) -Table~\ref{tab2} shows that any network -learns about five times more non-chaotic configurations than chaotic -ones. As in the previous scheme, the strategies cannot be predicted. +\newpage + +For the second coding scheme (\textit{i.e.}, with Gray Codes) +Table~\ref{tab2} shows that any network learns about five times more +non-chaotic configurations than chaotic ones. As in the previous +scheme, the strategies cannot be predicted. Let us now compare the two coding schemes. Firstly, the second scheme disturbs the learning process. In fact in this scheme the @@ -1016,13 +989,12 @@ usually unknown. Hence, the first coding scheme cannot be used systematically. Therefore, we provide a refinement of the second scheme: each output is learned by a different ANN. Table~\ref{tab3} presents the results for this approach. In any case, whatever the -network topologies, the maximum epoch number and the kind of -iterations, the configuration success rate is slightly improved. -Moreover, the strategies predictions rates reach almost 12\%, whereas -in Table~\ref{tab2} they never exceed 1.5\%. Despite of this -improvement, a long term prediction of chaotic iterations still -appear to be -an open issue. +considered feedforward network topologies, the maximum epoch number +and the kind of iterations, the configuration success rate is slightly +improved. Moreover, the strategies predictions rates reach almost +12\%, whereas in Table~\ref{tab2} they never exceed 1.5\%. Despite of +this improvement, a long term prediction of chaotic iterations still +appear to be an open issue. \begin{table} \caption{Prediction success rates for split outputs.} @@ -1092,6 +1064,10 @@ Chaotic/non chaotic & \multicolumn{3}{c|}{Output = Strategy} \\ \end{tabular} \end{table} +% +% TO BE COMPLETED +% + \section{Conclusion} In this paper, we have established an equivalence between chaotic -- 2.39.5