1 \documentclass{article}
2 %\usepackage{prentcsmacro}
5 \usepackage[latin1]{inputenc}
6 \usepackage[T1]{fontenc}
7 \usepackage[english]{babel}
8 \usepackage{amsmath,amssymb,latexsym,eufrak,euscript}
9 \usepackage{subfigure,pstricks,pst-node,pst-coil}
13 \usepackage{pgflibrarysnakes}
17 \usetikzlibrary{arrows}
18 \usetikzlibrary{automata}
19 \usetikzlibrary{snakes}
20 \usetikzlibrary{shapes}
22 %% \setlength{\oddsidemargin}{15mm}
23 %% \setlength{\evensidemargin}{15mm} \setlength{\textwidth}{140mm}
24 %% \setlength{\textheight}{219mm} \setlength{\topmargin}{5mm}
25 \newtheorem{theorem}{Theorem}
26 %\newtheorem{definition}[theorem]{Definition}
27 % %\newtheorem{defis}[thm]{D\'efinitions}
28 \newtheorem{example}[theorem]{Example}
29 % %\newtheorem{Exes}[thm]{Exemples}
30 \newtheorem{lemma}[theorem]{Lemma}
31 \newtheorem{proposition}[theorem]{Proposition}
32 \newtheorem{construction}[theorem]{Construction}
33 \newtheorem{corollary}[theorem]{Corollary}
34 % \newtheorem{algor}[thm]{Algorithm}
35 %\newtheorem{propdef}[thm]{Proposition-D\'efinition}
36 \newcommand{\mlabel}[1]{\label{#1}\marginpar{\fbox{#1}}}
37 \newcommand{\flsup}[1]{\stackrel{#1}{\longrightarrow}}
39 \newcommand{\stirlingtwo}[2]{\genfrac{\lbrace}{\rbrace}{0pt}{}{#1}{#2}}
40 \newcommand{\stirlingone}[2]{\genfrac{\lbrack}{\rbrack}{0pt}{}{#1}{#2}}
45 \newline \vspace{-0.1em}
46 \begin{quote}\begin{rm}}
47 {\end{rm}\end{quote}\end{algor}\vspace{-1.5em}\vspace{2em}}
48 %\null \hfill $\diamondsuit$ \par\medskip \vspace{1em}}
54 %\null \hfill $\triangledown$ \par\medskip}
55 %\null \hfill $\triangledown$ \par\medskip \vspace{1em}}
59 \newenvironment{proof}
60 { \noindent {\sc Proof.\/} }
61 {\null \hfill $\Box$ \par\medskip \vspace{1em}}
64 \newcommand {\pb}[3] {
85 \newcommand {\tv}[1] {\lVert #1 \rVert_{\rm TV}}
88 \def \sp {\hspace*{0.8cm}}
95 \def \T {\mathfrak{T}}
102 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
103 \section{Mathematical Backgroung}
105 Let $G$ be a finite group. For any subset $S$ of $G$ we denote by
106 $<S>$ the subgroup of $G$ generated by $S$.
107 If $<S>=G$, $S$ is a generator of $G$.
111 Let $\pi$, $\mu$ be two distribution on a same set $\Omega$. The total
112 variation distance between $\pi$ and $\mu$ is denoted $\tv{\pi-\mu}$ and is
114 $$\tv{\pi-\mu}=\max_{A\subset \Omega} |\pi(A)-\mu(A)|.$$ It is known that
115 $$\tv{\pi-\mu}=\frac{1}{2}\sum_{x\in\Omega}|\pi(x)-\mu(x)|.$$ Moreover, if
116 $\nu$ is a distribution on $\Omega$, one has
117 $$\tv{\pi-\mu}\leq \tv{\pi-\nu}+\tv{\nu-\mu}$$
119 Let $P$ be the matrix of a markov chain on $\Omega$. $P(x,\cdot)$ is the
120 distribution induced by the $x$-th row of $P$. If the markov chain induced by
121 $P$ has a stationary distribution $\pi$, then we define
122 $$d(t)=\max_{x\in\Omega}\tv{P^t(x,\cdot)-\pi},$$
125 $$t_{\rm mix}(\varepsilon)=\min\{t \mid d(t)\leq \varepsilon\}.$$
128 $$t_{\rm mix}(\varepsilon)\leq \lceil\log_2(\varepsilon^{-1})\rceil t_{\rm mix}(\frac{1}{4})$$
130 It is known that $d(t+1)\leq d(t)$.
132 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
133 \section{PRNG and random walk on Cayley graphs}
136 Let $S$ be a generator of $\mathbb{B}^N$ such that if $s\in S$, then
137 $-s\in S$. Let $\nu$ be a distribution on $S$ such that
138 $\nu(s)=\nu(-s)$. The matrix $P^\nu$, or just $P^\nu$, or just $P$,
139 is the matrix defined by: $P^\nu(x,y)=\nu(y-x)$ if $x-y\in S$ and $0$
140 otherwise. $P_S^\nu$ is the $\nu$-random walk on the $S$-Cayley graph
143 A general results on random walks claims that the uniform distribution
144 is stationnary for $P$. Moreover, if $\nu(s)>0$ for each $s$, then
145 this is the limit distribution.
147 Let $\mathcal{P}$ be finite subset of $\mathbb{N}$ and $\mu$ a distribution
148 on $\mathcal{P}$. Set
149 $$ P_{\mathcal{P},\mu}=\sum_{k\in\mathcal{P}}\mu(k)P^k.$$
152 With the above notation, $P_{\mathcal{P},\mu}$ is the matrix of the markov
153 chain corresponding to the PRNG defined by Christophe, where $S$ corresponds
154 to the boolean functions and $\mu$ si the probability of choosing elements
159 For instance let $e_i$ be the vector of $\mathbb{B}^N$ whose $i$-th
160 componenent is $1$ and all other compoennts are null. Let $e_0=0$ and
161 $S=\{e_i\mid 0\leq i\leq N\}$. Choosing $\nu(e_i)=\dfrac{1}{N+1}$, we
162 obtain the random walk defined by the {\em bit negation} of the paper
163 by Christophe and JEF.
164 The associated matrix will be denoted $P_1$.
165 Choosing $\mathcal{P}=\{10,11\}$ and
166 $\mu(10)=\mu(11)=\dfrac{1}{2}$ provides the PRNG with steps of lengths
167 10 or 11 with the same probability.
172 With the same notation, choosing the same $S$, but
173 $\nu(e_i)=\dfrac{1}{2n}$ if $i \geq 1$ and $\nu(e_0)=1/2$ leads to the
174 the classical lazy random walk on $\mathbb{B}^N$ (also known as the
175 lazy random walk on the hypercube or as the Ehrenfest Urn Model).
176 The associated matrix will be denoted $P_2$.
180 Choosing $S=G$ and the uniform distribution for $\nu$ corresponds to
181 the xor approach of the paper with Raphael.
186 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
189 The main result is that if the minimal element of $\mathcal{P}$ is
190 greater or equal to the mixing time of $P$, then the PRNG provides a distribution
191 whose distance to the uniform distribution is at most $\varepsilon$.
193 Let $t_P(\varepsilon)$ be the $\varepsilon$ mixing time for $P$.
194 Without loss of generality we assume that if $k\in \mathcal{P}$, then
198 Let $k_0=\min \{ k\mid k\in \mathcal{P}\}$.
200 t_P(\varepsilon)$, and if $\nu(s)>0$ for all $s\in S$, then one has
201 $\tv{P_{\mathcal{P},\mu}(x,\cdot)-\pi}\leq \varepsilon$, where $\pi$
202 is the uniform distribution.
206 The fact that $\nu(s)>0$ for all $s\in S$ ensures that the uniform
207 distribution is the limits of the markov chains induced by $P$
208 (classical results on random walks).
212 \tv{P_{\mathcal{P},\mu}(x,\cdot)-\pi}&=\tv{\sum_{k\in\mathcal{P}}\mu(k)P^k(x,\cdot)-\pi}\\
213 &=\frac{1}{2}\sum_{y\in\mathbb{B}^N}|\sum_{k\in\mathcal{P}}\mu(k)P^k(x,y)-\dfrac{1}{2^N}|\\
214 &=\frac{1}{2}\sum_{y\in\mathbb{B}^N}|\sum_{k\in\mathcal{P}}\mu(k)P^k(x,y)-\dfrac{1}{2^N}\sum_{k\in\mathcal{P}}\mu(k)|\\
215 &=\frac{1}{2}\sum_{y\in\mathbb{B}^N}|\sum_{k\in\mathcal{P}}\mu(k)(P^k(x,y)-\dfrac{1}{2^N})|\\
216 &\leq \frac{1}{2}\sum_{y\in\mathbb{B}^N}\sum_{k\in\mathcal{P}}\mu(k)|P^k(x,y)-\dfrac{1}{2^N}|\\
217 &\leq \sum_{k\in\mathcal{P}}\mu(k)\left(\frac{1}{2}\sum_{y\in\mathbb{B}^N}|P^k(x,y)-\dfrac{1}{2^N}|\right)\\
218 &\leq \sum_{k\in\mathcal{P}}\mu(k)\tv{P^k(x,\cdot)-\pi}\\
219 &\leq \sum_{k\in\mathcal{P}}\mu(k)\varepsilon\\
225 Therfore it suffices to study the mixing time of $P$.
230 \section{Mixing time of $P_1$}
232 See the Ehrenfest Urn Model. One can prove that for $P_1$,
234 $$t_{\rm mix}(\varepsilon)\leq N \log N+ \log (\frac{1}{\varepsilon})N.$$
236 Better results exist see \cite[page 83, page 267]{mixing}
239 \section{Mixing time of $P_2$}
241 In practice one can compute egenvalues and use \cite[page 155]{mixing}.
243 There are theoretical results \cite[page 321-322]{rwfg} and~\cite{WilsonHyper}.
249 Experiments for computing mixing time for $P_2$.
251 Experiments for other $P$ (handly built)
253 Which $\varepsilon$ makes possible to pass statistical tests for our PRNGs. Other tests can be performed.
255 %%%%%%%%%%%%%%%%%%%%%%
258 Look at \cite{rwfg} for theoretical results. Explore random random walk on the hypercube.
261 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
262 \bibliographystyle{alpha}
263 \bibliography{markovbib}