\documentclass[sunil1,ChapterTOCs]{sunil}
\usepackage[utf8]{inputenc}
-\usepackage{amssymb}
+%\usepackage{amssymb}
%usepackage{amsbsy}
\usepackage{amsfonts,amssymb}
\usepackage{amsmath}
\usepackage{epstopdf}
\usepackage{url}
\usepackage{multirow}
-\usepackage{layouts}
+%\usepackage{layouts}
\usepackage{comment}
\usepackage{xcolor}
\usepackage{upquote}
\usepackage{moreverb}
\usepackage{commath}
\usepackage{numprint}
+\usepackage{placeins}
%\usepackage{lmodern}
%% \usepackage{listings}
%% \usepackage{subfigure}
\newcommand{\mbf}{\mathbf}
\newcommand{\mc}{\mathcal}
-%newcommand{\bs}{\boldsymbol}
\newcommand{\N}{\mathcal{N}}
\newcommand{\B}{\mathcal{B}}
+\newcommand{\bs}{\boldsymbol}
+
+
+
+\let\OLDlstinputlisting\lstinputlisting
+\renewcommand{\lstinputlisting}[2][]{\FloatBarrier\OLDlstinputlisting[#1]{#2}\FloatBarrier}
\DeclareMathOperator*{\argmax}{arg\,max}
\makeindex
-%\includeonly{Chapters/chapter5/ch5}
+%\includeonly{Chapters/chapter10/ch10}
\begin{document}
+
+
+
%%bibliography style to adopt
\bibliographyunit[\chapter]
\defaultbibliographystyle{plain}
\part{Optimization}
\include{Chapters/chapter8/ch8}
\include{Chapters/chapter9/ch9}
-\include{Chapters/chapter10/ch10} %revoir ce chapitre
+\include{Chapters/chapter10/ch10}
\part{Numerical applications}
\include{Chapters/chapter7/ch7}
}
@Article{GILL1989,
-author = {P.E. Gill et al.},
+author = {P. E. Gill et al.},
title = {A pratical anti-cycling procedure for linearly constrained optimization},
journal = {Mathematical Programming},
year = {1989},
@Misc{VOLKOV2010,
author = {V. Volkov},
title = {Better Performance at Lower Occupancy},
-howpublished = {http://www.cs.berkeley.edu/~volkov/},
-year = {2010}
+note = {Presentation at the {GPU} {T}echnology {C}onference 2010 (GTC 2010)},
+howpublished = {http://www.eecs.berkeley.edu/$\sim$volkov},
}
@TechReport{MODEL,
@Misc{NETBLIB,
author = {J. Dongarra and E. Grosse},
-title = {The {NETLIB} Repository},
+title = {The {NETLIB} {R}epository at {UTK} and {ORNL}},
+institution = {The University of Tennessee and Oak Ridge National Lab},
howpublished = {http://www.netlib.org},
-year = {2010}
}
@Misc{APMTECH,
}
@Book{HILLIER,
-author = {F.S Hillier and G.J. Lierberman},
+author = {F. S Hillier and G. J. Lierberman},
title = {Introduction to Operations Research},
year = {2010},
edition = {9}
author = {X. Meyer},
title = {Etude et implmentation de l'algorithme standard du simplexe sur {GPU}s},
school = {University of {Geneva}},
-year = {2011},
+year = {2011}
+}
+
+@InProceedings{MEYER2011,
+author = {X. Meyer{,} P. Albuquerque and B. Chopard},
+title = {A multi-{GPU} implementation and performance model for the standard simplex method},
+booktitle = {Proc. of the 1st Int'l Symp. and 10th Balkan Conf. on Operational Research, BalcOR},
+year = {2011}
}
@Article{LALAMI11,
author = {M. E. Lalami{,} D. El-Baz and V. Boyer},
-title = {Multi GPU Implementation of the Simplex Algorithm},
+title = {Multi-{GPU} Implementation of the Simplex Algorithm},
journal = {IEEE International Conference on High Performance Computing and Communications},
year = {2011},
pages = {179-186}
@Article{JUNG08,
author = {J. H. Jung and D. P. O'Leary},
-title = {Implementing an interior point method for linear programs on a CPU-GPU system},
+title = {Implementing an interior point method for linear programs on a {CPU-GPU} system},
journal = {Electronic Transactions on Numerical Analysis},
year = {2008},
pages = {174-189},
@Article{ELSTER09,
author = {D. G. Spampinato{,} A. C. Elster and T. Natvig},
-title = {Modeling Multi-GPU Systems in Parallel Computing: From Multicores and GPU's to Petascale},
+title = {Modeling Multi-{GPU} Systems in Parallel Computing: From Multicores and {GPU}'s to Petascale},
journal = {Advances in Parallel Computing},
year = {2010},
pages = {562-569},
@book{dantzig1953product,
title={The Product Form for the Inverse in the Simplex Method},
- author={Dantzig, G. B. and Orchard-Hays, W. and RAND Corp. Santa Monica Calif.},
+ author={G. B. Dantzig and W. Orchard-Hays},
+ institution={RAND Corp.},
+ address={Santa Monica, California},
url={http://books.google.ch/books?id=XLuttgAACAAJ},
year={1953},
publisher={Defense Technical Information Center}
@article{Marchand:2002:CPI:772382.772395,
- author = {Marchand, Hugues and Martin, Alexander and Weismantel, Robert and Wolsey, Laurence},
+ author = {Marchand, H. and Martin, A. and Weismantel, R. and Wolsey, L.},
title = {Cutting planes in integer and mixed integer programming},
journal = {Discrete Appl. Math.},
issue_date = {15 November 2002},
volume={43},
issue={1},
doi={10.1007/BF02025534},
-title={A fast LU update for linear programming},
+title={A fast {LU} update for linear programming},
url={http://dx.doi.org/10.1007/BF02025534},
publisher={Baltzer Science Publishers, Baarn/Kluwer Academic Publishers},
-author={Suhl, L. M. and Suhl, U. H.},
+author={L. M. Suhl and U. H. Suhl},
pages={33-47},
language={English}
}
title={Experiments in mixed-integer linear programming},
url={http://dx.doi.org/10.1007/BF01584074},
publisher={Springer-Verlag},
-author={Benichou, M. and Gauthier, J. M. and Girodet, P. and Hentges, G. and Ribiere, G. and Vincent, O.},
+author={M. Benichou et al.},
pages={76-94},
language={English}
}
@article{Achterberg05,
- author = {Achterberg, T. and Koch, T. and Martin, A.},
+ author = {T. Achterberg{,} T. Koch and A. Martin},
title = {Branching rules revisited},
journal = {Oper. Res. Lett.},
issue_date = {January, 2005},
number = {5},
pages = {736-773}
}
+
+@article {LEMKE54,
+author = {Lemke, C. E.},
+title = {The dual method of solving the linear programming problem},
+journal = {Naval Research Logistics Quarterly},
+volume = {1},
+number = {1},
+publisher = {Wiley Subscription Services, Inc., A Wiley Company},
+issn = {1931-9193},
+url = {http://dx.doi.org/10.1002/nav.3800010107},
+doi = {10.1002/nav.3800010107},
+pages = {36--47},
+year = {1954}
+}
+
+
+@article{BARTELS69,
+ author = {R. H. Bartels and G. H. Golub},
+ title = {The simplex method of linear programming using {LU} decomposition},
+ journal = {Commun. ACM},
+ issue_date = {May 1969},
+ volume = {12},
+ number = {5},
+ month = may,
+ year = {1969},
+ issn = {0001-0782},
+ pages = {266--268},
+ numpages = {3},
+ url = {http://doi.acm.org/10.1145/362946.362974},
+ doi = {10.1145/362946.362974},
+ acmid = {362974},
+ publisher = {ACM},
+ address = {New York, NY, USA},
+ keywords = {LU decomposition, computational stability, linear programming, round-off errors, simplex method}
+}
+
+@article{BRAYTON70,
+ title={Some results on sparse matrices},
+ author={R. K. Brayton{,} F. G. Gustavson and R. A. Willoughby},
+ journal={Mathematics of Computation},
+ volume={24},
+ number={112},
+ pages={937--954},
+ year={1970}
+}
\chapterauthor{Bastien Chopard}{Department of Computer Science, University of Geneva}
%\chapter{Linear programming on a GPU: a study case based on the simplex method and the branch-cut-and bound algorithm}
-\chapter{Linear programming on a GPU: a~study~case}
+\chapter{Linear programming on a GPU: a case study}
\section{Introduction}
\label{chXXX:sec:intro}
The simplex method is a well-known optimization algorithm for solving linear programming (LP) models in the field of operations research. It is part of software often employed by businesses for finding solutions to problems such as airline scheduling problems. The original standard simplex method was proposed by Dantzig in 1947~\cite{VCLP}. A more efficient method named the revised simplex, was later developed. Nowadays its sequential implementation can be found in almost all commercial LP solvers. But the always increasing complexity and size of LP problems from the industry, drives the demand for more computational power. Indeed, current implementations of the revised simplex strive to produce the expected results, if any. In this context, parallelization is the natural idea to investigate~\cite{HALL}. Already in 1996, Thomadakis et al.~\cite{THOMAD} implemented the standard method on a massively parallel computer and obtained an increase in performances when solving dense or large problems.
We then update the constraints matrix $\mbf{A}$, the bound $\mbf{b}$ and the cost $\mbf{c}$ using Gaussian elimination.
More precisely, denoting $\mbf{\tilde{I}_m}$, $\mbf{\tilde{A}_\N}$, $\mbf{\tilde{c}_\B}$ and $\mbf{\tilde{c}_\N}$ the resulting elements after the exchange, Gaussian elimination then transforms the tableau
+\begin{center}
\begin{tabular}{|c|c||c|}
$\mbf{\tilde{A}_\N}$ & $\mbf{\tilde{I}_m}$ & $\mbf{b}$ \\
\hline
$\mbf{\tilde{c}^T_\N}$ & $\mbf{\tilde{c}^T_\B}$ & $z$
\end{tabular}
+\end{center}
+\noindent
into a tableau with updated values for $\mbf{A_\N}$, $\mbf{c_\N}$ and $\mbf{b}$
-
+\begin{center}
\begin{tabular}{|c|c||c|}
$\mbf{A_\N}$ & $\mbf{I_m}$ & $\mbf{b}$ \\
\hline
$\mbf{c^T_\N}$ & $\mbf{c^T_\B}$ & $z-tc_e$
\end{tabular}
-
+\end{center}
The latter is obtained by first dividing the $\ell$-th row by $a_{\ell e}$;
the resulting row multiplied by $a_{ie}$ ($i\not=\ell$), resp. by $c_e$, is then added to the $i$-th row, resp. the last row.
Hence, $\mbf{c_\B=0}$.
\label{chXXX:algo:simplex-rev}
\textit{// Find entering variable $x_e,\, e\in \B$}\;
$\bs\tau \leftarrow \mbf{(B^{-1})^T}\mbf{c_\B} $ \hspace*{5mm} \textit{// cublasDgemv}\;
- $\bs\upsilon \leftarrow \mbf{c_\N} - \mbf{N^T} \bs\tau $ \hspace*{4mm} \textit{// cublasDgemv}
+ $\bs\upsilon \leftarrow \mbf{c_\N} - \mbf{N^T} \bs\tau $ \hspace*{4mm} \textit{// cublasDgemv}\;
$e \leftarrow argmax \left( \bs\upsilon / \sqrt{\bs\gamma} \right) $\;
\If { $e < 0$ } {\Return \texttt{optima\_found} }
\textit{// Find leaving variable $x_\ell,\, \ell\in \N$} \;
The test environnement is composed of a CPU server (2 Intel Xeon X5650, 2.67GHz, with 96GB DDR3 RAM) and a GPU computing system (NVIDIA tesla M2090) with the 4.2 CUDA Toolkit. This system connects 4 GPUs to the server. Each GPU has 6GB GDDR5 graphics memory and 512 cores (1.3GHz).
\begin{table}[!h]
+\begin{center}
\begin{tabular}{|l|r|r|r|r|}
\hline
\multicolumn{1}{|l|}{Problem name} & \multicolumn{1}{l|}{Rows} & \multicolumn{1}{l|}{Cols} & \multicolumn{1}{l|}{NNZ} & \multicolumn{1}{l|}{C-to-R} \\ \hline
\end{tabular}
\caption{NETLIB problems solved in less than 1 second}
\label{chXXX:tab:small}
+\end{center}
\end{table}
Let us begin by discussing the performances on problems solved in less than one second. The name, size, number of non-zero elements (NNZ) and columns to rows ratio (C-to-R) of each problems are reported in table~\ref{chXXX:tab:small}. The performances shown in figure~\ref{chXXX:fig:FastSolve} corroborate our previous observations. On these problems the \texttt{Revised-sparse} method doesn't outperform the \texttt{Standard} one. This can be explained by two factors: the added communications (kernel calls) for the revised method, and the small size and density of the problems. It is likely that sparse operations on a GPU require larger amounts of data to become more efficient than their dense counterpart.
The problems shown in table~\ref{chXXX:tab:medium} are solved in less than 4 seconds. As we can see in figure~\ref{chXXX:fig:NormalSolve}, the expected trend for the \texttt{Revised-base} and the \texttt{Revised-opti} methods is now confirmed. Let us presently focus on the \texttt{Standard} and \texttt{Revised-sparse} methods. Some of the problems, in particular \textit{czprob.mps} and \textit{nesm.mps}, are solved in a comparable amount of time. The performance gain of the \texttt{Revised-sparse} is related to the C-to-R (columns to rows) ratio of these problems, displaying respectively a 3.8 and a 4.4 ratio.
\begin{table}[!h]
+\begin{center}
\begin{tabular}{|l|r|r|r|r|}
\hline
\multicolumn{1}{|l|}{Problem name} & \multicolumn{1}{l|}{Rows} & \multicolumn{1}{l|}{Cols} & \multicolumn{1}{l|}{NNZ} & \multicolumn{1}{l|}{C-to-R} \\ \hline
\end{tabular}
\caption{NETLIB problems solved in the range of 1 to 4 seconds}
\label{chXXX:tab:medium}
+\end{center}
\end{table}
\begin{figure}[!h]
Finally, the biggest problems, and slowest to solve, are given in table~\ref{chXXX:tab:big}. A new tendency can be observed in figure~\ref{chXXX:fig:SlowSolve}. The \texttt{Revised-sparse} method is the fastest on most of the problems. The performances are still close between the best two methods on problems having a C-to-R ratio close to 2 like \textit{bnl2.mps}, \textit{pilot.mps} or \textit{greenbeb.mps}. However, when this ratio is greater, the \texttt{Revised-sparse} can be nearly twice as fast as the standard method. This is noticeable on \textit{80bau3b.mps} (4.5) and \textit{fit2p.mps} (4.3). Although the C-to-R ratio of \textit{d6cube.mps} (14.9) exceeds the ones previously mentioned, the \texttt{Revised-sparse} method doesn't show an impressive performance, probably due to the small amount of rows and the density of this problem, which does thus not fully benefit from the lower complexity of sparse operations.
\begin{table}[!h]
+\begin{center}
\begin{tabular}{|l|r|r|r|r|}
\hline
\multicolumn{1}{|l|}{Problem name} & \multicolumn{1}{l|}{Rows} & \multicolumn{1}{l|}{Cols} & \multicolumn{1}{l|}{NNZ} & \multicolumn{1}{l|}{C-to-R} \\ \hline
\end{tabular}
\caption{NETLIB problems solved in more than 5 seconds}
\label{chXXX:tab:big}
+\end{center}
\end{table}
\begin{figure}[!h]
\newlabel{fig:sap_examples}{{4.1}{33}}
\newlabel{lst:medianGeneric}{{4.1}{34}}
\@writefile{lol}{\contentsline {lstlisting}{\numberline {4.1}Generic CUDA kernel achieving median filtering}{34}}
-\@writefile{lof}{\contentsline {figure}{\numberline {4.2}{\ignorespaces Exemple of 5x5 median filtering\relax }}{35}}
-\newlabel{fig:median_1}{{4.2}{35}}
\newlabel{algoMedianGeneric}{{2}{35}}
\newlabel{algoMedianGeneric:memcpyH2D}{{1}{35}}
\newlabel{algoMedianGeneric:cptstart}{{3}{35}}
\newlabel{algoMedianGeneric:memcpyD2H}{{7}{35}}
\@writefile{loa}{\contentsline {algocf}{\numberline {2}{\ignorespaces generic n$\times $n median filter\relax }}{35}}
\@writefile{toc}{\contentsline {section}{\numberline {4.3}NVidia GPU tuning recipes}{35}}
+\@writefile{lof}{\contentsline {figure}{\numberline {4.2}{\ignorespaces Exemple of 5x5 median filtering\relax }}{35}}
+\newlabel{fig:median_1}{{4.2}{35}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.3}{\ignorespaces Illustration of window overlapping in 5x5 median filtering\relax }}{36}}
\newlabel{fig:median_overlap}{{4.3}{36}}
\@writefile{lot}{\contentsline {table}{\numberline {4.1}{\ignorespaces Performance results of \texttt {kernel medianR}. \relax }}{36}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.5}{\ignorespaces Forgetful selection with the minimal element register count. Illustration for 3$\times $3 pixel window represented in a row and supposed sorted.\relax }}{39}}
\newlabel{fig:forgetful_selection}{{4.5}{39}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {4.4.2.1}Reducing register count }{39}}
-\newlabel{lst:medianForget1pix3}{{4.3}{40}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {4.3}3$\times $3 median filter kernel using the minimum register count of 6 to find the median value by forgetful selection method. The optimal thread block size is 128 on GTX280 and 256 on C2070.}{40}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.6}{\ignorespaces Determination of the Median value by the forgetful selection process, applied to a $3\times 3$ neighborhood window.\relax }}{41}}
\newlabel{fig:forgetful3}{{4.6}{41}}
+\newlabel{lst:medianForget1pix3}{{4.3}{41}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {4.3}3$\times $3 median filter kernel using the minimum register count of 6 to find the median value by forgetful selection method. The optimal thread block size is 128 on GTX280 and 256 on C2070.}{41}}
+\@writefile{toc}{\contentsline {subsubsection}{\numberline {4.4.2.2}More data output per thread}{42}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.7}{\ignorespaces Illustration of how window overlapping is used to combine 2 pixel selections in a 3$\times $3 median kernel.\relax }}{42}}
\newlabel{fig:median3_overlap}{{4.7}{42}}
-\@writefile{toc}{\contentsline {subsubsection}{\numberline {4.4.2.2}More data output per thread}{42}}
-\newlabel{lst:medianForget2pix3}{{4.4}{42}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {4.4}3$\times $3 median filter kernel processing 2 output pixel values per thread using combined forgetful selection.}{42}}
-\@writefile{lof}{\contentsline {figure}{\numberline {4.8}{\ignorespaces Comparison of pixel throughput on GPU C2070 for the different 3$\times $3 median kernels.\relax }}{43}}
-\newlabel{fig:compMedians2}{{4.8}{43}}
-\@writefile{toc}{\contentsline {section}{\numberline {4.5}A 5$\times $5 and more median filter }{44}}
+\newlabel{lst:medianForget2pix3}{{4.4}{43}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {4.4}3$\times $3 median filter kernel processing 2 output pixel values per thread using combined forgetful selection.}{43}}
+\@writefile{toc}{\contentsline {section}{\numberline {4.5}A 5$\times $5 and more median filter }{43}}
+\@writefile{lof}{\contentsline {figure}{\numberline {4.8}{\ignorespaces Comparison of pixel throughput on GPU C2070 for the different 3$\times $3 median kernels.\relax }}{44}}
+\newlabel{fig:compMedians2}{{4.8}{44}}
\newlabel{sec:median5}{{4.5.1}{44}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.5.1}A register-only 5$\times $5 median filter }{44}}
-\newlabel{lst:medianForget2pix5}{{4.5}{44}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {4.5}kernel 5$\times $5 median filter processing 2 output pixel values per thread by a combined forgetfull selection.}{44}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.9}{\ignorespaces Reducing register count in a 5$\times $5 register-only median kernel outputting 2 pixels simultaneously. The first 7 forgetful selection stages are common to both processed center pixels. Only the last 5 selections have to be done separately.\relax }}{45}}
\newlabel{fig:median5overlap}{{4.9}{45}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.10}{\ignorespaces First iteration of the $5\times 5$ selection process, with $k_{25}=14$, which shows how Instruction Level Parallelism is maximized by the use of an incomplete sorting network. Arrows represent the result of the swapping function, with the lowest value at the starting point and the highest value at the end point.\relax }}{45}}
\newlabel{fig:median5overlap}{{4.10}{45}}
-\@writefile{lot}{\contentsline {table}{\numberline {4.2}{\ignorespaces Performance of various 5$\times $5 median kernel implementations, applied on 4096$\times $4096 pixel image with C2070 GPU card.\relax }}{46}}
-\newlabel{tab:median5comp}{{4.2}{46}}
+\newlabel{lst:medianForget2pix5}{{4.5}{46}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {4.5}kernel 5$\times $5 median filter processing 2 output pixel values per thread by a combined forgetfull selection.}{46}}
+\@writefile{lot}{\contentsline {table}{\numberline {4.2}{\ignorespaces Performance of various 5$\times $5 median kernel implementations, applied on 4096$\times $4096 pixel image with C2070 GPU card.\relax }}{47}}
+\newlabel{tab:median5comp}{{4.2}{47}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.5.2}Fast approximated n$\times $n median filter }{47}}
-\newlabel{lst:medianSeparable}{{4.6}{47}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {4.6}generic pseudo median kernel.}{47}}
-\newlabel{img:sap_example_ref}{{4.11(a)}{48}}
-\newlabel{sub@img:sap_example_ref}{{(a)}{48}}
-\newlabel{img:sap_example_sep_med3}{{4.11(b)}{48}}
-\newlabel{sub@img:sap_example_sep_med3}{{(b)}{48}}
-\newlabel{img:sap_example_sep_med5}{{4.11(c)}{48}}
-\newlabel{sub@img:sap_example_sep_med5}{{(c)}{48}}
-\newlabel{img:sap_example_sep_med3_it2}{{4.11(d)}{48}}
-\newlabel{sub@img:sap_example_sep_med3_it2}{{(d)}{48}}
-\@writefile{lof}{\contentsline {figure}{\numberline {4.11}{\ignorespaces Exemple of separable median filtering (smoother), applied to salt \& pepper noise reduction.\relax }}{48}}
-\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {Airplane image, corrupted with by salt and pepper noise of density 0.25}}}{48}}
-\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {Image denoised by a $3\times 3$ separable smoother}}}{48}}
-\@writefile{lof}{\contentsline {subfigure}{\numberline{(c)}{\ignorespaces {Image denoised by a $5\times 5$ separable smoother}}}{48}}
-\@writefile{lof}{\contentsline {subfigure}{\numberline{(d)}{\ignorespaces {Image background estimation by a $55\times 55$ separable smoother}}}{48}}
-\newlabel{fig:sap_examples2}{{4.11}{48}}
-\@writefile{lot}{\contentsline {table}{\numberline {4.3}{\ignorespaces Measured performance of one generic pseudo-separable median kernel applied to 4096$\times $4096 pixel image with various window sizes.\relax }}{49}}
-\newlabel{tab:medianSeparable}{{4.3}{49}}
-\@writefile{toc}{\contentsline {section}{Bibliography}{50}}
+\@writefile{lot}{\contentsline {table}{\numberline {4.3}{\ignorespaces Measured performance of one generic pseudo-separable median kernel applied to 4096$\times $4096 pixel image with various window sizes.\relax }}{48}}
+\newlabel{tab:medianSeparable}{{4.3}{48}}
+\newlabel{img:sap_example_ref}{{4.11(a)}{49}}
+\newlabel{sub@img:sap_example_ref}{{(a)}{49}}
+\newlabel{img:sap_example_sep_med3}{{4.11(b)}{49}}
+\newlabel{sub@img:sap_example_sep_med3}{{(b)}{49}}
+\newlabel{img:sap_example_sep_med5}{{4.11(c)}{49}}
+\newlabel{sub@img:sap_example_sep_med5}{{(c)}{49}}
+\newlabel{img:sap_example_sep_med3_it2}{{4.11(d)}{49}}
+\newlabel{sub@img:sap_example_sep_med3_it2}{{(d)}{49}}
+\@writefile{lof}{\contentsline {figure}{\numberline {4.11}{\ignorespaces Exemple of separable median filtering (smoother), applied to salt \& pepper noise reduction.\relax }}{49}}
+\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {Airplane image, corrupted with by salt and pepper noise of density 0.25}}}{49}}
+\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {Image denoised by a $3\times 3$ separable smoother}}}{49}}
+\@writefile{lof}{\contentsline {subfigure}{\numberline{(c)}{\ignorespaces {Image denoised by a $5\times 5$ separable smoother}}}{49}}
+\@writefile{lof}{\contentsline {subfigure}{\numberline{(d)}{\ignorespaces {Image background estimation by a $55\times 55$ separable smoother}}}{49}}
+\newlabel{fig:sap_examples2}{{4.11}{49}}
+\newlabel{lst:medianSeparable}{{4.6}{50}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {4.6}generic pseudo median kernel.}{50}}
+\@writefile{toc}{\contentsline {section}{Bibliography}{51}}
\@setckpt{Chapters/chapter3/ch3}{
\setcounter{page}{52}
\setcounter{equation}{0}