From bc21a4e60ae5e4ef525ce6933905824c6597401d Mon Sep 17 00:00:00 2001 From: couturie Date: Tue, 26 Feb 2013 22:51:13 +0100 Subject: [PATCH] new --- BookGPU/BookGPU.tex | 3 +- BookGPU/Chapters/chapter12/biblio12.bib | 50 +- BookGPU/Chapters/chapter12/ch12.aux | 119 ++- BookGPU/Chapters/chapter12/ch12.tex | 960 ++++++++++-------- BookGPU/Chapters/chapter13/biblio13.bib | 56 +- BookGPU/Chapters/chapter13/ch13.aux | 115 ++- BookGPU/Chapters/chapter13/ch13.tex | 953 ++++++++++------- .../Chapters/chapter13/figures/cluster.eps | 94 +- .../Chapters/chapter13/figures/cluster.pdf | Bin 13688 -> 14158 bytes 9 files changed, 1371 insertions(+), 979 deletions(-) diff --git a/BookGPU/BookGPU.tex b/BookGPU/BookGPU.tex index f4f8323..b58372d 100755 --- a/BookGPU/BookGPU.tex +++ b/BookGPU/BookGPU.tex @@ -45,7 +45,8 @@ \include{Chapters/chapter1/preamble} \include{Chapters/chapter5/preamble} - +\newcommand{\scalprod}[2]% +{\ensuremath{\langle #1 \, , #2 \rangle}} \makeatletter diff --git a/BookGPU/Chapters/chapter12/biblio12.bib b/BookGPU/Chapters/chapter12/biblio12.bib index cfb5778..d3cac8f 100644 --- a/BookGPU/Chapters/chapter12/biblio12.bib +++ b/BookGPU/Chapters/chapter12/biblio12.bib @@ -1,6 +1,6 @@ -@article{ref1, +@article{ch12:ref1, title = {Iterative methods for sparse linear systems}, -author = {Saad, Y.}, +author = {Saad, Yousef}, journal = {Society for Industrial and Applied Mathematics, 2nd edition}, volume = {}, number = {}, @@ -8,9 +8,9 @@ pages = {}, year = {2003}, } -@article{ref2, +@article{ch12:ref2, title = {Methods of conjugate gradients for solving linear systems}, -author = {Hestenes, M. R. and Stiefel, E.}, +author = {Hestenes, Maqnus R. and Stiefel, Eduard}, journal = {Journal of Research of the National Bureau of Standards}, volume = {49}, number = {6}, @@ -18,9 +18,9 @@ pages = {409--436}, year = {1952}, } -@article{ref3, +@article{ch12:ref3, title = {{GMRES}: a generalized minimal residual algorithm for solving nonsymmetric linear systems}, -author = {Saad, Y. and Schultz, M. H.}, +author = {Saad, Yousef and Schultz, Martin H.}, journal = {SIAM Journal on Scientific and Statistical Computing}, volume = {7}, number = {3}, @@ -28,9 +28,9 @@ pages = {856--869}, year = {1986}, } -@article{ref4, +@article{ch12:ref4, title = {Solution of sparse indefinite systems of linear equations}, -author = {Paige, C. C. and Saunders, M. A.}, +author = {Paige, Chris C. and Saunders, Michael A.}, journal = {SIAM Journal on Numerical Analysis}, volume = {12}, number = {4}, @@ -38,9 +38,9 @@ pages = {617--629}, year = {1975}, } -@article{ref5, +@article{ch12:ref5, title = {The principle of minimized iteration in the solution of the matrix eigenvalue problem}, -author = {Arnoldi, W. E.}, +author = {Arnoldi, Walter E.}, journal = {Quarterly of Applied Mathematics}, volume = {9}, number = {17}, @@ -48,7 +48,7 @@ pages = {17--29}, year = {1951}, } -@article{ref6, +@article{ch12:ref6, title = {{CUDA} Toolkit 4.2 {CUBLAS} Library}, author = {NVIDIA Corporation}, journal = {}, @@ -59,9 +59,9 @@ note = {\url{http://developer.download.nvidia.com/compute/DevZone/docs/html/CUDA year = {2012}, } -@article{ref7, +@article{ch12:ref7, title = {Efficient sparse matrix-vector multiplication on {CUDA}}, -author = {Bell, N. and Garland, M.}, +author = {Bell, Nathan and Garland, Michael}, journal = {NVIDIA Technical Report NVR-2008-004, NVIDIA Corporation}, volume = {}, number = {}, @@ -69,7 +69,7 @@ pages = {}, year = {2008}, } -@article{ref8, +@article{ch12:ref8, title = {{CUSP} {L}ibrary}, author = {}, journal = {}, @@ -80,7 +80,7 @@ note = {\url{http://code.google.com/p/cusp-library/}}, year = {}, } -@article{ref9, +@article{ch12:ref9, title = {{NVIDIA} {CUDA} {C} programming guide}, author = {NVIDIA Corporation}, journal = {}, @@ -91,7 +91,7 @@ note = {Version 4.2}, year = {2012}, } -@article{ref10, +@article{ch12:ref10, title = {The university of {F}lorida sparse matrix collection}, author = {Davis, T. and Hu, Y.}, journal = {}, @@ -102,9 +102,9 @@ note = {\url{http://www.cise.ufl.edu/research/sparse/matrices/list_by_id.html}}, year = {1997}, } -@article{ref11, +@article{ch12:ref11, title = {Hypergraph partitioning based decomposition for parallel sparse matrix-vector multiplication}, -author = {Catalyurek, U. and Aykanat, C.}, +author = {Catalyurek, Umit V. and Aykanat, Cevdet}, journal = {{IEEE} {T}ransactions on {P}arallel and {D}istributed {S}ystems}, volume = {10}, number = {7}, @@ -113,9 +113,9 @@ note = {}, year = {1999}, } -@article{ref12, +@article{ch12:ref12, title = {{hMETIS}: A hypergraph partitioning package}, -author = {Karypis, G. and Kumar, V.}, +author = {Karypis, George and Kumar, Vipin}, journal = {}, volume = {}, number = {}, @@ -124,9 +124,9 @@ note = {}, year = {1998}, } -@article{ref13, +@article{ch12:ref13, title = {{PaToH}: partitioning tool for hypergraphs}, -author = {Catalyurek, U. and Aykanat, C.}, +author = {Catalyurek, Umit V. and Aykanat, Cevdet}, journal = {}, volume = {}, number = {}, @@ -135,9 +135,9 @@ note = {}, year = {1999}, } -@article{ref14, +@article{ch12:ref14, title = {Parallel hypergraph partitioning for scientific computing}, -author = {Devine, K.D. and Boman, E.G. and Heaphy, R.T. and Bisseling, R.H and Catalyurek, U.V.}, +author = {Devine, Karen D. and Boman, Erik G. and Heaphy, Robert T. and Bisseling, Rob H. and Catalyurek, Umit V.}, journal = {In Proceedings of the 20th international conference on Parallel and distributed processing, IPDPS’06}, volume = {}, number = {}, @@ -146,7 +146,7 @@ note = {}, year = {2006}, } -@article{ref15, +@article{ch12:ref15, title = {{PHG} - parallel hypergraph and graph partitioning with {Z}oltan}, author = {}, journal = {}, diff --git a/BookGPU/Chapters/chapter12/ch12.aux b/BookGPU/Chapters/chapter12/ch12.aux index 0cc343d..26d263a 100644 --- a/BookGPU/Chapters/chapter12/ch12.aux +++ b/BookGPU/Chapters/chapter12/ch12.aux @@ -1,97 +1,106 @@ \relax -\@writefile{toc}{\author{}{}} +\@writefile{toc}{\author{Lilia Ziane Khodja}{}} +\@writefile{toc}{\author{Rapha\IeC {\"e}l Couturier}{}} +\@writefile{toc}{\author{Jacques Bahi}{}} \@writefile{loa}{\addvspace {10\p@ }} \@writefile{toc}{\contentsline {chapter}{\numberline {11}Solving sparse linear systems with GMRES and CG methods on GPU clusters}{249}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} +\newlabel{ch12}{{11}{249}} \@writefile{toc}{\contentsline {section}{\numberline {11.1}Introduction}{249}} -\newlabel{sec:01}{{11.1}{249}} +\newlabel{ch12:sec:01}{{11.1}{249}} \@writefile{toc}{\contentsline {section}{\numberline {11.2}Krylov iterative methods}{250}} -\newlabel{sec:02}{{11.2}{250}} -\newlabel{eq:01}{{11.1}{250}} -\newlabel{eq:02}{{11.2}{250}} -\newlabel{eq:03}{{11.3}{250}} -\newlabel{eq:11}{{11.4}{251}} +\newlabel{ch12:sec:02}{{11.2}{250}} +\newlabel{ch12:eq:01}{{11.1}{250}} +\newlabel{ch12:eq:02}{{11.2}{250}} +\newlabel{ch12:eq:03}{{11.3}{250}} +\newlabel{ch12:eq:11}{{11.4}{251}} \@writefile{toc}{\contentsline {subsection}{\numberline {11.2.1}CG method}{251}} -\newlabel{sec:02.01}{{11.2.1}{251}} -\newlabel{eq:04}{{11.5}{251}} -\newlabel{eq:05}{{11.6}{251}} -\newlabel{eq:06}{{11.7}{251}} -\newlabel{eq:07}{{11.8}{251}} -\newlabel{eq:08}{{11.9}{251}} -\newlabel{eq:09}{{11.10}{251}} +\newlabel{ch12:sec:02.01}{{11.2.1}{251}} +\newlabel{ch12:eq:04}{{11.5}{251}} +\newlabel{ch12:eq:05}{{11.6}{251}} +\newlabel{ch12:eq:06}{{11.7}{251}} +\newlabel{ch12:eq:07}{{11.8}{251}} +\newlabel{ch12:eq:08}{{11.9}{251}} +\newlabel{ch12:eq:09}{{11.10}{251}} \@writefile{loa}{\contentsline {algocf}{\numberline {9}{\ignorespaces Left-preconditioned CG method\relax }}{252}} -\newlabel{alg:01}{{9}{252}} -\newlabel{eq:10}{{11.11}{252}} +\newlabel{ch12:alg:01}{{9}{252}} +\newlabel{ch12:eq:10}{{11.11}{252}} \@writefile{toc}{\contentsline {subsection}{\numberline {11.2.2}GMRES method}{253}} -\newlabel{sec:02.02}{{11.2.2}{253}} -\newlabel{eq:12}{{11.12}{253}} -\newlabel{eq:13}{{11.13}{253}} -\newlabel{eq:14}{{11.14}{253}} -\newlabel{eq:15}{{11.15}{253}} -\newlabel{eq:16}{{11.16}{253}} -\newlabel{eq:17}{{11.17}{253}} -\newlabel{eq:18}{{11.18}{253}} -\newlabel{eq:19}{{11.19}{253}} +\newlabel{ch12:sec:02.02}{{11.2.2}{253}} +\newlabel{ch12:eq:12}{{11.12}{253}} +\newlabel{ch12:eq:13}{{11.13}{253}} +\newlabel{ch12:eq:14}{{11.14}{253}} +\newlabel{ch12:eq:15}{{11.15}{253}} +\newlabel{ch12:eq:16}{{11.16}{253}} +\newlabel{ch12:eq:17}{{11.17}{253}} +\newlabel{ch12:eq:18}{{11.18}{253}} +\newlabel{ch12:eq:19}{{11.19}{253}} \@writefile{loa}{\contentsline {algocf}{\numberline {10}{\ignorespaces Left-preconditioned GMRES method with restarts\relax }}{254}} -\newlabel{alg:02}{{10}{254}} +\newlabel{ch12:alg:02}{{10}{254}} \@writefile{toc}{\contentsline {section}{\numberline {11.3}Parallel implementation on a GPU cluster}{255}} -\newlabel{sec:03}{{11.3}{255}} +\newlabel{ch12:sec:03}{{11.3}{255}} \@writefile{toc}{\contentsline {subsection}{\numberline {11.3.1}Data partitioning}{255}} -\newlabel{sec:03.01}{{11.3.1}{255}} +\newlabel{ch12:sec:03.01}{{11.3.1}{255}} \@writefile{lof}{\contentsline {figure}{\numberline {11.1}{\ignorespaces A data partitioning of the sparse matrix $A$, the solution vector $x$ and the right-hand side $b$ into four portions.\relax }}{256}} -\newlabel{fig:01}{{11.1}{256}} +\newlabel{ch12:fig:01}{{11.1}{256}} \@writefile{toc}{\contentsline {subsection}{\numberline {11.3.2}GPU computing}{256}} -\newlabel{sec:03.02}{{11.3.2}{256}} +\newlabel{ch12:sec:03.02}{{11.3.2}{256}} \@writefile{toc}{\contentsline {subsection}{\numberline {11.3.3}Data communications}{257}} -\newlabel{sec:03.03}{{11.3.3}{257}} +\newlabel{ch12:sec:03.03}{{11.3.3}{257}} \@writefile{lof}{\contentsline {figure}{\numberline {11.2}{\ignorespaces Data exchanges between \textit {Node 1} and its neighbors \textit {Node 0}, \textit {Node 2} and \textit {Node 3}.\relax }}{258}} -\newlabel{fig:02}{{11.2}{258}} +\newlabel{ch12:fig:02}{{11.2}{258}} \@writefile{lof}{\contentsline {figure}{\numberline {11.3}{\ignorespaces Columns reordering of a sparse sub-matrix.\relax }}{259}} -\newlabel{fig:03}{{11.3}{259}} +\newlabel{ch12:fig:03}{{11.3}{259}} \@writefile{lof}{\contentsline {figure}{\numberline {11.4}{\ignorespaces General scheme of the GPU cluster of tests composed of six machines, each with two GPUs.\relax }}{260}} -\newlabel{fig:04}{{11.4}{260}} +\newlabel{ch12:fig:04}{{11.4}{260}} \@writefile{toc}{\contentsline {section}{\numberline {11.4}Experimental results}{260}} -\newlabel{sec:04}{{11.4}{260}} +\newlabel{ch12:sec:04}{{11.4}{260}} \@writefile{lof}{\contentsline {figure}{\numberline {11.5}{\ignorespaces Sketches of sparse matrices chosen from the Davis's collection.\relax }}{261}} -\newlabel{fig:05}{{11.5}{261}} +\newlabel{ch12:fig:05}{{11.5}{261}} \@writefile{lot}{\contentsline {table}{\numberline {11.1}{\ignorespaces Main characteristics of sparse matrices chosen from the Davis's collection.\relax }}{262}} -\newlabel{tab:01}{{11.1}{262}} +\newlabel{ch12:tab:01}{{11.1}{262}} \@writefile{lot}{\contentsline {table}{\numberline {11.2}{\ignorespaces Performances of the parallel CG method on a cluster of 24 CPU cores vs. on a cluster of 12 GPUs.\relax }}{262}} -\newlabel{tab:02}{{11.2}{262}} +\newlabel{ch12:tab:02}{{11.2}{262}} \@writefile{lot}{\contentsline {table}{\numberline {11.3}{\ignorespaces Performances of the parallel GMRES method on a cluster 24 CPU cores vs. on cluster of 12 GPUs.\relax }}{263}} -\newlabel{tab:03}{{11.3}{263}} -\newlabel{eq:20}{{11.20}{263}} +\newlabel{ch12:tab:03}{{11.3}{263}} +\newlabel{ch12:eq:20}{{11.20}{263}} \@writefile{lof}{\contentsline {figure}{\numberline {11.6}{\ignorespaces Parallel generation of a large sparse matrix by four computing nodes.\relax }}{264}} -\newlabel{fig:06}{{11.6}{264}} +\newlabel{ch12:fig:06}{{11.6}{264}} \@writefile{lot}{\contentsline {table}{\numberline {11.4}{\ignorespaces Main characteristics of sparse banded matrices generated from those of the Davis's collection.\relax }}{265}} -\newlabel{tab:04}{{11.4}{265}} +\newlabel{ch12:tab:04}{{11.4}{265}} \@writefile{lot}{\contentsline {table}{\numberline {11.5}{\ignorespaces Performances of the parallel CG method for solving linear systems associated to sparse banded matrices on a cluster of 24 CPU cores vs. on a cluster of 12 GPUs.\relax }}{265}} -\newlabel{tab:05}{{11.5}{265}} +\newlabel{ch12:tab:05}{{11.5}{265}} \@writefile{toc}{\contentsline {section}{\numberline {11.5}Hypergraph partitioning}{265}} -\newlabel{sec:05}{{11.5}{265}} +\newlabel{ch12:sec:05}{{11.5}{265}} \@writefile{lot}{\contentsline {table}{\numberline {11.6}{\ignorespaces Performances of the parallel GMRES method for solving linear systems associated to sparse banded matrices on a cluster of 24 CPU cores vs. on a cluster of 12 GPUs.\relax }}{266}} -\newlabel{tab:06}{{11.6}{266}} +\newlabel{ch12:tab:06}{{11.6}{266}} \@writefile{lot}{\contentsline {table}{\numberline {11.7}{\ignorespaces Main characteristics of sparse five-bands matrices generated from those of the Davis's collection.\relax }}{266}} -\newlabel{tab:07}{{11.7}{266}} +\newlabel{ch12:tab:07}{{11.7}{266}} \@writefile{lof}{\contentsline {figure}{\numberline {11.7}{\ignorespaces Parallel generation of a large sparse five-bands matrix by four computing nodes.\relax }}{267}} -\newlabel{fig:07}{{11.7}{267}} +\newlabel{ch12:fig:07}{{11.7}{267}} \@writefile{lot}{\contentsline {table}{\numberline {11.8}{\ignorespaces Performances of parallel CG solver for solving linear systems associated to sparse five-bands matrices on a cluster of 24 CPU cores vs. on a cluster of 12 GPUs\relax }}{267}} -\newlabel{tab:08}{{11.8}{267}} +\newlabel{ch12:tab:08}{{11.8}{267}} \@writefile{lot}{\contentsline {table}{\numberline {11.9}{\ignorespaces Performances of parallel GMRES solver for solving linear systems associated to sparse five-bands matrices on a cluster of 24 CPU cores vs. on a cluster of 12 GPUs\relax }}{268}} -\newlabel{tab:09}{{11.9}{268}} +\newlabel{ch12:tab:09}{{11.9}{268}} \@writefile{lof}{\contentsline {figure}{\numberline {11.8}{\ignorespaces An example of the hypergraph partitioning of a sparse matrix decomposed between three computing nodes.\relax }}{269}} -\newlabel{fig:08}{{11.8}{269}} +\newlabel{ch12:fig:08}{{11.8}{269}} \@writefile{lot}{\contentsline {table}{\numberline {11.10}{\ignorespaces Performances of the parallel CG solver using hypergraph partitioning for solving linear systems associated to sparse five-bands matrices on a cluster of 24 CPU cores vs. on a cluster of 12 GPU.\relax }}{270}} -\newlabel{tab:10}{{11.10}{270}} +\newlabel{ch12:tab:10}{{11.10}{270}} \@writefile{lot}{\contentsline {table}{\numberline {11.11}{\ignorespaces Performances of the parallel GMRES solver using hypergraph partitioning for solving linear systems associated to sparse five-bands matrices on a cluster of 24 CPU cores vs. on a cluster of 12 GPU.\relax }}{271}} -\newlabel{tab:11}{{11.11}{271}} +\newlabel{ch12:tab:11}{{11.11}{271}} \@writefile{lot}{\contentsline {table}{\numberline {11.12}{\ignorespaces The total communication volume between 12 GPU computing nodes without and with the hypergraph partitioning method.\relax }}{272}} -\newlabel{tab:12}{{11.12}{272}} +\newlabel{ch12:tab:12}{{11.12}{272}} +\newlabel{ch12:fig:09.01}{{11.9(a)}{273}} +\newlabel{sub@ch12:fig:09.01}{{(a)}{273}} +\newlabel{ch12:fig:09.02}{{11.9(b)}{273}} +\newlabel{sub@ch12:fig:09.02}{{(b)}{273}} \@writefile{lof}{\contentsline {figure}{\numberline {11.9}{\ignorespaces Weak-scaling of the parallel CG and GMRES solvers on a GPU cluster for solving large sparse linear systems.\relax }}{273}} -\newlabel{fig:09}{{11.9}{273}} +\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {Sparse band matrices}}}{273}} +\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {Sparse five-bands matrices}}}{273}} +\newlabel{ch12:fig:09}{{11.9}{273}} \@writefile{toc}{\contentsline {section}{\numberline {11.6}Conclusion}{273}} -\newlabel{sec:06}{{11.6}{273}} +\newlabel{ch12:sec:06}{{11.6}{273}} \@writefile{toc}{\contentsline {section}{Bibliography}{274}} \@setckpt{Chapters/chapter12/ch12}{ \setcounter{page}{276} diff --git a/BookGPU/Chapters/chapter12/ch12.tex b/BookGPU/Chapters/chapter12/ch12.tex index 7cd99f4..eaa4f9c 100755 --- a/BookGPU/Chapters/chapter12/ch12.tex +++ b/BookGPU/Chapters/chapter12/ch12.tex @@ -4,138 +4,158 @@ %% %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\chapterauthor{}{} +%\chapterauthor{}{} +\chapterauthor{Lilia Ziane Khodja}{Femto-ST Institute, University of Franche-Comte, France} +\chapterauthor{Raphaël Couturier}{Femto-ST Institute, University of Franche-Comte, France} +\chapterauthor{Jacques Bahi}{Femto-ST Institute, University of Franche-Comte, France} + \chapter{Solving sparse linear systems with GMRES and CG methods on GPU clusters} +\label{ch12} %%--------------------------%% %% SECTION 1 %% %%--------------------------%% \section{Introduction} -\label{sec:01} -The sparse linear systems are used to model many scientific and industrial problems, such as the environmental simulations or -the industrial processing of the complex or non-Newtonian fluids. Moreover, the resolution of these problems often involves the -solving of such linear systems which is considered as the most expensive process in terms of time execution and memory space. -Therefore, solving sparse linear systems must be as efficient as possible in order to deal with problems of ever increasing size. - -There are, in the jargon of numerical analysis, different methods of solving sparse linear systems that we can classify in two -classes: the direct and iterative methods. However, the iterative methods are often more suitable than their counterpart, direct -methods, for solving large sparse linear systems. Indeed, they are less memory consuming and easier to parallelize on parallel -computers than direct methods. Different computing platforms, sequential and parallel computers, are used for solving sparse -linear systems with iterative solutions. Nowadays, graphics processing units (GPUs) have become attractive for solving these -linear systems, due to their computing power and their ability to compute faster than traditional CPUs. - -In Section~\ref{sec:02}, we describe the general principle of two well-known iterative methods: the conjugate gradient method and -the generalized minimal residual method. In Section~\ref{sec:03}, we give the main key points of the parallel implementation of both -methods on a cluster of GPUs. Then, in Section~\ref{sec:04}, we present the experimental results obtained on a CPU cluster and on -a GPU cluster, for solving sparse linear systems associated to matrices of different structures. Finally, in Section~\ref{sec:05}, -we apply the hypergraph partitioning technique to reduce the total communication volume between the computing nodes and, thus, to -improve the execution times of the parallel algorithms of both iterative methods. +\label{ch12:sec:01} +The sparse linear systems are used to model many scientific and industrial problems, +such as the environmental simulations or the industrial processing of the complex or +non-Newtonian fluids. Moreover, the resolution of these problems often involves the +solving of such linear systems which is considered as the most expensive process in +terms of execution time and memory space. Therefore, solving sparse linear systems +must be as efficient as possible in order to deal with problems of ever increasing +size. + +There are, in the jargon of numerical analysis, different methods of solving sparse +linear systems that can be classified in two classes: the direct and iterative methods. +However, the iterative methods are often more suitable than their counterpart, direct +methods, for solving these systems. Indeed, they are less memory consuming and easier +to parallelize on parallel computers than direct methods. Different computing platforms, +sequential and parallel computers, are used for solving sparse linear systems with iterative +solutions. Nowadays, graphics processing units (GPUs) have become attractive for solving +these systems, due to their computing power and their ability to compute faster than +traditional CPUs. + +In Section~\ref{ch12:sec:02}, we describe the general principle of two well-known iterative +methods: the conjugate gradient method and the generalized minimal residual method. In Section~\ref{ch12:sec:03}, +we give the main key points of the parallel implementation of both methods on a cluster of +GPUs. Then, in Section~\ref{ch12:sec:04}, we present the experimental results obtained on a +CPU cluster and on a GPU cluster, for solving sparse linear systems associated to matrices +of different structures. Finally, in Section~\ref{ch12:sec:05}, we apply the hypergraph partitioning +technique to reduce the total communication volume between the computing nodes and, thus, +to improve the execution times of the parallel algorithms of both iterative methods. %%--------------------------%% %% SECTION 2 %% %%--------------------------%% \section{Krylov iterative methods} -\label{sec:02} -Let us consider the following system of $n$ linear equations in $\mathbb{R}$: +\label{ch12:sec:02} +Let us consider the following system of $n$ linear equations\index{Sparse~linear~system} +in $\mathbb{R}$: \begin{equation} Ax=b, -\label{eq:01} +\label{ch12:eq:01} \end{equation} -where $A\in\mathbb{R}^{n\times n}$ is a sparse nonsingular square matrix, $x\in\mathbb{R}^{n}$ is the solution vector, -$b\in\mathbb{R}^{n}$ is the right-hand side and $n\in\mathbb{N}$ is a large integer number. - -The iterative methods for solving the large sparse linear system~(\ref{eq:01}) proceed by successive iterations of a same -block of elementary operations, during which an infinite number of approximate solutions $\{x_k\}_{k\geq 0}$ are computed. -Indeed, from an initial guess $x_0$, an iterative method determines at each iteration $k>0$ an approximate solution $x_k$ -which, gradually, converges to the exact solution $x^{*}$ as follows: +where $A\in\mathbb{R}^{n\times n}$ is a sparse nonsingular square matrix, $x\in\mathbb{R}^{n}$ +is the solution vector, $b\in\mathbb{R}^{n}$ is the right-hand side and $n\in\mathbb{N}$ is a +large integer number. + +The iterative methods\index{Iterative~method} for solving the large sparse linear system~(\ref{ch12:eq:01}) +proceed by successive iterations of a same block of elementary operations, during which an +infinite number of approximate solutions $\{x_k\}_{k\geq 0}$ are computed. Indeed, from an +initial guess $x_0$, an iterative method determines at each iteration $k>0$ an approximate +solution $x_k$ which, gradually, converges to the exact solution $x^{*}$ as follows: \begin{equation} x^{*}=\lim\limits_{k\to\infty}x_{k}=A^{-1}b. -\label{eq:02} +\label{ch12:eq:02} \end{equation} -The number of iterations necessary to reach the exact solution $x^{*}$ is not known beforehand and can be infinite. In -practice, an iterative method often finds an approximate solution $\tilde{x}$ after a fixed number of iterations and/or -when a given convergence criterion is satisfied as follows: +The number of iterations necessary to reach the exact solution $x^{*}$ is not known beforehand +and can be infinite. In practice, an iterative method often finds an approximate solution $\tilde{x}$ +after a fixed number of iterations and/or when a given convergence criterion\index{Convergence} +is satisfied as follows: \begin{equation} \|b-A\tilde{x}\| < \varepsilon, -\label{eq:03} +\label{ch12:eq:03} \end{equation} -where $\varepsilon<1$ is the required convergence tolerance threshold. - -Some of the most iterative methods that have proven their efficiency for solving large sparse linear systems are those -called \textit{Krylov sub-space methods}~\cite{ref1}. In the present chapter, we describe two Krylov methods which are -widely used: the conjugate gradient method (CG) and the generalized minimal residual method (GMRES). In practice, the -Krylov sub-space methods are usually used with preconditioners that allow to improve their convergence. So, in what -follows, the CG and GMRES methods are used for solving the left-preconditioned sparse linear system: +where $\varepsilon<1$ is the required convergence tolerance threshold\index{Convergence!Tolerance~threshold}. + +Some of the most iterative methods that have proven their efficiency for solving large sparse +linear systems are those called \textit{Krylov subspace methods}~\cite{ch12:ref1}\index{Iterative~method!Krylov~subspace}. +In the present chapter, we describe two Krylov methods which are widely used: the conjugate +gradient method (CG) and the generalized minimal residual method (GMRES). In practice, the +Krylov subspace methods are usually used with preconditioners that allow to improve their +convergence. So, in what follows, the CG and GMRES methods are used for solving the left-preconditioned\index{Sparse~linear~system!Preconditioned} +sparse linear system: \begin{equation} M^{-1}Ax=M^{-1}b, -\label{eq:11} +\label{ch12:eq:11} \end{equation} where $M$ is the preconditioning matrix. + %%****************%% %%****************%% \subsection{CG method} -\label{sec:02.01} -The conjugate gradient method is initially developed by Hestenes and Stiefel in 1952~\cite{ref2}. It is one of the well -known iterative method for solving large sparse linear systems. In addition, it can be adapted for solving nonlinear -equations and optimization problems. However, it can only be applied to problems with positive definite symmetric matrices. - -The main idea of the CG method is the computation of a sequence of approximate solutions $\{x_k\}_{k\geq 0}$ in a Krylov -sub-space of order $k$ as follows: +\label{ch12:sec:02.01} +The conjugate gradient method is initially developed by Hestenes and Stiefel in 1952~\cite{ch12:ref2}. +It is one of the well known iterative method for solving large sparse linear systems. In addition, it +can be adapted for solving nonlinear equations and optimization problems. However, it can only be applied +to problems with positive definite symmetric matrices. + +The main idea of the CG method\index{Iterative~method!CG} is the computation of a sequence of approximate +solutions $\{x_k\}_{k\geq 0}$ in a Krylov subspace\index{Iterative~method!Krylov~subspace} of order $k$ as +follows: \begin{equation} x_k \in x_0 + \mathcal{K}_k(A,r_0), -\label{eq:04} +\label{ch12:eq:04} \end{equation} -such that the Galerkin condition must be satisfied: +such that the Galerkin condition\index{Galerkin~condition} must be satisfied: \begin{equation} r_k \bot \mathcal{K}_k(A,r_0), -\label{eq:05} +\label{ch12:eq:05} \end{equation} -where $x_0$ is the initial guess, $r_k=b-Ax_k$ is the residual of the computed solution $x_k$ and $\mathcal{K}_k$ the Krylov -sub-space of order $k$: \[\mathcal{K}_k(A,r_0) \equiv\text{span}\{r_0, Ar_0, A^2r_0,\ldots, A^{k-1}r_0\}.\] +where $x_0$ is the initial guess, $r_k=b-Ax_k$ is the residual of the computed solution $x_k$ and $\mathcal{K}_k$ +the Krylov subspace of order $k$: \[\mathcal{K}_k(A,r_0) \equiv\text{span}\{r_0, Ar_0, A^2r_0,\ldots, A^{k-1}r_0\}.\] In fact, CG is based on the construction of a sequence $\{p_k\}_{k\in\mathbb{N}}$ of direction vectors in $\mathcal{K}_k$ which are pairwise $A$-conjugate ($A$-orthogonal): \begin{equation} \begin{array}{ll} p_i^T A p_j = 0, & i\neq j. \end{array} -\label{eq:06} +\label{ch12:eq:06} \end{equation} At each iteration $k$, an approximate solution $x_k$ is computed by recurrence as follows: \begin{equation} \begin{array}{ll} x_k = x_{k-1} + \alpha_k p_k, & \alpha_k\in\mathbb{R}. \end{array} -\label{eq:07} +\label{ch12:eq:07} \end{equation} Consequently, the residuals $r_k$ are computed in the same way: \begin{equation} r_k = r_{k-1} - \alpha_k A p_k. -\label{eq:08} +\label{ch12:eq:08} \end{equation} -In the case where all residuals are nonzero, the direction vectors $p_k$ can be determined so that the following recurrence -holds: +In the case where all residuals are nonzero, the direction vectors $p_k$ can be determined so that +the following recurrence holds: \begin{equation} \begin{array}{lll} p_0=r_0, & p_k=r_k+\beta_k p_{k-1}, & \beta_k\in\mathbb{R}. \end{array} -\label{eq:09} +\label{ch12:eq:09} \end{equation} -Moreover, the scalars $\{\alpha_k\}_{k>0}$ are chosen so as to minimize the $A$-norm error $\|x^{*}-x_k\|_A$ over the Krylov -sub-space $\mathcal{K}_{k}$ and the scalars $\{\beta_k\}_{k>0}$ are chosen so as to ensure that the direction vectors are -pairwise $A$-conjugate. So, the assumption that matrix $A$ is symmetric and the recurrences~(\ref{eq:08}) and~(\ref{eq:09}) -allow to deduce that: +Moreover, the scalars $\{\alpha_k\}_{k>0}$ are chosen so as to minimize the $A$-norm error $\|x^{*}-x_k\|_A$ +over the Krylov subspace $\mathcal{K}_{k}$ and the scalars $\{\beta_k\}_{k>0}$ are chosen so as to ensure +that the direction vectors are pairwise $A$-conjugate. So, the assumption that matrix $A$ is symmetric and +the recurrences~(\ref{ch12:eq:08}) and~(\ref{ch12:eq:09}) allow to deduce that: \begin{equation} \begin{array}{ll} \alpha_{k}=\frac{r^{T}_{k-1}r_{k-1}}{p_{k}^{T}Ap_{k}}, & \beta_{k}=\frac{r_{k}^{T}r_{k}}{r_{k-1}^{T}r_{k-1}}. \end{array} -\label{eq:10} +\label{ch12:eq:10} \end{equation} \begin{algorithm}[!t] - %\SetLine - %\linesnumbered Choose an initial guess $x_0$\; $r_{0} = b - A x_{0}$\; $convergence$ = false\; @@ -160,62 +180,70 @@ allow to deduce that: } } \caption{Left-preconditioned CG method} -\label{alg:01} +\label{ch12:alg:01} \end{algorithm} -Algorithm~\ref{alg:01} shows the main key points of the preconditioned CG method. It allows to solve the left-preconditioned -sparse linear system~(\ref{eq:11}). In this algorithm, $\varepsilon$ is the convergence tolerance threshold, $maxiter$ is the maximum -number of iterations and $(\cdot,\cdot)$ defines the dot product between two vectors in $\mathbb{R}^{n}$. At every iteration, a direction -vector $p_k$ is determined, so that it is orthogonal to the preconditioned residual $z_k$ and to the direction vectors $\{p_i\}_{i0}$ in a Krylov sub-space $\mathcal{K}_k$ as follows: +\label{ch12:sec:02.02} +The iterative GMRES method is developed by Saad and Schultz in 1986~\cite{ch12:ref3} as a generalization +of the minimum residual method MINRES~\cite{ch12:ref4}\index{Iterative~method!MINRES}. Indeed, GMRES can +be applied for solving symmetric or nonsymmetric linear systems. + +The main principle of the GMRES method\index{Iterative~method!GMRES} is to find an approximation minimizing +at best the residual norm. In fact, GMRES computes a sequence of approximate solutions $\{x_k\}_{k>0}$ in +a Krylov subspace\index{Iterative~method!Krylov~subspace} $\mathcal{K}_k$ as follows: \begin{equation} \begin{array}{ll} x_k \in x_0 + \mathcal{K}_k(A, v_1),& v_1=\frac{r_0}{\|r_0\|_2}, \end{array} -\label{eq:12} +\label{ch12:eq:12} \end{equation} -so that the Petrov-Galerkin condition is satisfied: +so that the Petrov-Galerkin condition\index{Petrov-Galerkin~condition} is satisfied: \begin{equation} \begin{array}{ll} r_k \bot A \mathcal{K}_k(A, v_1). \end{array} -\label{eq:13} +\label{ch12:eq:13} \end{equation} -GMRES uses the Arnoldi process~\cite{ref5} to construct an orthonormal basis $V_k$ for the Krylov sub-space $\mathcal{K}_k$ -and an upper Hessenberg matrix $\bar{H}_k$ of order $(k+1)\times k$: +GMRES uses the Arnoldi process~\cite{ch12:ref5}\index{Iterative~method!Arnoldi~process} to construct an +orthonormal basis $V_k$ for the Krylov subspace $\mathcal{K}_k$ and an upper Hessenberg matrix\index{Hessenberg~matrix} +$\bar{H}_k$ of order $(k+1)\times k$: \begin{equation} \begin{array}{ll} V_k = \{v_1, v_2,\ldots,v_k\}, & \forall k>1, v_k=A^{k-1}v_1, \end{array} -\label{eq:14} +\label{ch12:eq:14} \end{equation} and \begin{equation} V_k A = V_{k+1} \bar{H}_k. -\label{eq:15} +\label{ch12:eq:15} \end{equation} -Then, at each iteration $k$, an approximate solution $x_k$ is computed in the Krylov sub-space $\mathcal{K}_k$ spanned by $V_k$ -as follows: +Then, at each iteration $k$, an approximate solution $x_k$ is computed in the Krylov subspace $\mathcal{K}_k$ +spanned by $V_k$ as follows: \begin{equation} \begin{array}{ll} x_k = x_0 + V_k y, & y\in\mathbb{R}^{k}. \end{array} -\label{eq:16} +\label{ch12:eq:16} \end{equation} -From both formulas~(\ref{eq:15}) and~(\ref{eq:16}) and $r_k=b-Ax_k$, we can deduce that: +From both formulas~(\ref{ch12:eq:15}) and~(\ref{ch12:eq:16}) and $r_k=b-Ax_k$, we can deduce that: \begin{equation} \begin{array}{lll} r_{k} & = & b - A (x_{0} + V_{k}y) \\ @@ -223,34 +251,34 @@ From both formulas~(\ref{eq:15}) and~(\ref{eq:16}) and $r_k=b-Ax_k$, we can dedu & = & \beta v_{1} - V_{k+1}\bar{H}_{k}y \\ & = & V_{k+1}(\beta e_{1} - \bar{H}_{k}y), \end{array} -\label{eq:17} +\label{ch12:eq:17} \end{equation} -such that $\beta=\|r_0\|_2$ and $e_1=(1,0,\cdots,0)$ is the first vector of the canonical basis of $\mathbb{R}^k$. So, -the vector $y$ is chosen in $\mathbb{R}^k$ so as to minimize at best the Euclidean norm of the residual $r_k$. Consequently, -a linear least-squares problem of size $k$ is solved: +such that $\beta=\|r_0\|_2$ and $e_1=(1,0,\cdots,0)$ is the first vector of the canonical basis of +$\mathbb{R}^k$. So, the vector $y$ is chosen in $\mathbb{R}^k$ so as to minimize at best the Euclidean +norm of the residual $r_k$. Consequently, a linear least-squares problem of size $k$ is solved: \begin{equation} \underset{y\in\mathbb{R}^{k}}{min}\|r_{k}\|_{2}=\underset{y\in\mathbb{R}^{k}}{min}\|\beta e_{1}-\bar{H}_{k}y\|_{2}. -\label{eq:18} +\label{ch12:eq:18} \end{equation} -The QR factorization of matrix $\bar{H}_k$ is used to compute the solution of this problem by using Givens rotations~\cite{ref1,ref3}, -such that: +The QR factorization of matrix $\bar{H}_k$ is used to compute the solution of this problem by using +Givens rotations~\cite{ch12:ref1,ch12:ref3}, such that: \begin{equation} \begin{array}{lll} \bar{H}_{k}=Q_{k}R_{k}, & Q_{k}\in\mathbb{R}^{(k+1)\times (k+1)}, & R_{k}\in\mathbb{R}^{(k+1)\times k}, \end{array} -\label{eq:19} +\label{ch12:eq:19} \end{equation} where $Q_kQ_k^T=I_k$ and $R_k$ is an upper triangular matrix. -The GMRES method computes an approximate solution with a sufficient precision after, at most, $n$ iterations ($n$ is the size of the -sparse linear system to be solved). However, the GMRES algorithm must construct and store in the memory an orthonormal basis $V_k$ whose -size is proportional to the number of iterations required to achieve the convergence. Then, to avoid a huge memory storage, the GMRES -method must be restarted at each $m$ iterations, such that $m$ is very small ($m\ll n$), and with $x_m$ as the initial guess to the -next iteration. This allows to limit the size of the basis $V$ to $m$ orthogonal vectors. +The GMRES method computes an approximate solution with a sufficient precision after, at most, $n$ +iterations ($n$ is the size of the sparse linear system to be solved). However, the GMRES algorithm +must construct and store in the memory an orthonormal basis $V_k$ whose size is proportional to the +number of iterations required to achieve the convergence. Then, to avoid a huge memory storage, the +GMRES method must be restarted at each $m$ iterations, such that $m$ is very small ($m\ll n$), and +with $x_m$ as the initial guess to the next iteration. This allows to limit the size of the basis +$V$ to $m$ orthogonal vectors. \begin{algorithm}[!t] - %\SetLine - %\linesnumbered Choose an initial guess $x_0$\; $convergence$ = false\; $k = 1$\; @@ -281,190 +309,245 @@ next iteration. This allows to limit the size of the basis $V$ to $m$ orthogonal } } \caption{Left-preconditioned GMRES method with restarts} -\label{alg:02} +\label{ch12:alg:02} \end{algorithm} -Algorithm~\ref{alg:02} shows the main key points of the GMRES method with restarts. It solves the left-preconditioned sparse linear -system~(\ref{eq:11}), such that $M$ is the preconditioning matrix. At each iteration $k$, GMRES uses the Arnoldi process (defined -from line~$7$ to line~$17$) to construct a basis $V_m$ of $m$ orthogonal vectors and an upper Hessenberg matrix $\bar{H}_m$ of size -$(m+1)\times m$. Then, it solves the linear least-squares problem of size $m$ to find the vector $y\in\mathbb{R}^{m}$ which minimizes -at best the residual norm (line~$18$). Finally, it computes an approximate solution $x_m$ in the Krylov sub-space spanned by $V_m$ -(line~$19$). The GMRES algorithm is stopped when the residual norm is sufficiently small ($\|r_m\|_2<\varepsilon$) and/or the maximum -number of iterations ($maxiter$) is reached. +Algorithm~\ref{ch12:alg:02} shows the main key points of the GMRES method with restarts. +It solves the left-preconditioned\index{Sparse~linear~system!Preconditioned} sparse linear +system~(\ref{ch12:eq:11}), such that $M$ is the preconditioning matrix. At each iteration +$k$, GMRES uses the Arnoldi process\index{Iterative~method!Arnoldi~process} (defined from +line~$7$ to line~$17$) to construct a basis $V_m$ of $m$ orthogonal vectors and an upper +Hessenberg matrix\index{Hessenberg~matrix} $\bar{H}_m$ of size $(m+1)\times m$. Then, it +solves the linear least-squares problem of size $m$ to find the vector $y\in\mathbb{R}^{m}$ +which minimizes at best the residual norm (line~$18$). Finally, it computes an approximate +solution $x_m$ in the Krylov subspace spanned by $V_m$ (line~$19$). The GMRES algorithm is +stopped when the residual norm is sufficiently small ($\|r_m\|_2<\varepsilon$) and/or the +maximum number of iterations\index{Convergence!Maximum~number~of~iterations} ($maxiter$) +is reached. + %%--------------------------%% %% SECTION 3 %% %%--------------------------%% \section{Parallel implementation on a GPU cluster} -\label{sec:03} -In this section, we present the parallel algorithms of both iterative CG and GMRES methods for GPU clusters. -The implementation is performed on a GPU cluster composed of different computing nodes, such that each node -is a CPU core managed by a MPI process and equipped with a GPU card. The parallelization of these algorithms -is carried out by using the MPI communication routines between the GPU computing nodes and the CUDA programming -environment inside each node. In what follows, the algorithms of the iterative methods are called iterative -solvers. +\label{ch12:sec:03} +In this section, we present the parallel algorithms of both iterative CG\index{Iterative~method!CG} +and GMRES\index{Iterative~method!GMRES} methods for GPU clusters. The implementation is performed on +a GPU cluster composed of different computing nodes, such that each node is a CPU core managed by a +MPI process and equipped with a GPU card. The parallelization of these algorithms is carried out by +using the MPI communication routines between the GPU computing nodes\index{Computing~node} and the +CUDA programming environment inside each node. In what follows, the algorithms of the iterative methods +are called iterative solvers. + %%****************%% %%****************%% \subsection{Data partitioning} -\label{sec:03.01} -The parallel solving of the large sparse linear system~(\ref{eq:11}) requires a data partitioning between the computing -nodes of the GPU cluster. Let $p$ denotes the number of the computing nodes on the GPU cluster. The partitioning operation -consists in the decomposition of the vectors and matrices, involved in the iterative solver, in $p$ portions. Indeed, this -operation allows to assign to each computing node $i$: +\label{ch12:sec:03.01} +The parallel solving of the large sparse linear system~(\ref{ch12:eq:11}) requires a data partitioning +between the computing nodes of the GPU cluster. Let $p$ denotes the number of the computing nodes on the +GPU cluster. The partitioning operation consists in the decomposition of the vectors and matrices, involved +in the iterative solver, in $p$ portions. Indeed, this operation allows to assign to each computing node +$i$: \begin{itemize} \item a portion of size $\frac{n}{p}$ elements of each vector, \item a sparse rectangular sub-matrix $A_i$ of size $(\frac{n}{p},n)$ and, \item a square preconditioning sub-matrix $M_i$ of size $(\frac{n}{p},\frac{n}{p})$, \end{itemize} -where $n$ is the size of the sparse linear system to be solved. In the first instance, we perform a naive row-wise partitioning -(decomposition row-by-row) on the data of the sparse linear systems to be solved. Figure~\ref{fig:01} shows an example of a row-wise -data partitioning between four computing nodes of a sparse linear system (sparse matrix $A$, solution vector $x$ and right-hand -side $b$) of size $16$ unknown values. +where $n$ is the size of the sparse linear system to be solved. In the first instance, we perform a naive +row-wise partitioning (decomposition row-by-row) on the data of the sparse linear systems to be solved. +Figure~\ref{ch12:fig:01} shows an example of a row-wise data partitioning between four computing nodes +of a sparse linear system (sparse matrix $A$, solution vector $x$ and right-hand side $b$) of size $16$ +unknown values. \begin{figure} \centerline{\includegraphics[scale=0.35]{Chapters/chapter12/figures/partition}} \caption{A data partitioning of the sparse matrix $A$, the solution vector $x$ and the right-hand side $b$ into four portions.} -\label{fig:01} +\label{ch12:fig:01} \end{figure} + %%****************%% %%****************%% \subsection{GPU computing} -\label{sec:03.02} -After the partitioning operation, all the data involved from this operation must be transferred from the CPU memories to the GPU -memories, in order to be processed by GPUs. We use two functions of the CUBLAS library (CUDA Basic Linear Algebra Subroutines), -developed by Nvidia~\cite{ref6}: \verb+cublasAlloc()+ for the memory allocations on GPUs and \verb+cublasSetVector()+ for the -memory copies from the CPUs to the GPUs. - -An efficient implementation of CG and GMRES solvers on a GPU cluster requires to determine all parts of their codes that can be -executed in parallel and, thus, take advantage of the GPU acceleration. As many Krylov sub-space methods, the CG and GMRES methods -are mainly based on arithmetic operations dealing with vectors or matrices: sparse matrix-vector multiplications, scalar-vector -multiplications, dot products, Euclidean norms, AXPY operations ($y\leftarrow ax+y$ where $x$ and $y$ are vectors and $a$ is a -scalar) and so on. These vector operations are often easy to parallelize and they are more efficient on parallel computers when -they work on large vectors. Therefore, all the vector operations used in CG and GMRES solvers must be executed by the GPUs as kernels. - -We use the kernels of the CUBLAS library to compute some vector operations of CG and GMRES solvers. The following kernels of CUBLAS -(dealing with double floating point) are used: \verb+cublasDdot()+ for the dot products, \verb+cublasDnrm2()+ for the Euclidean -norms and \verb+cublasDaxpy()+ for the AXPY operations. For the rest of the data-parallel operations, we code their kernels in CUDA. -In the CG solver, we develop a kernel for the XPAY operation ($y\leftarrow x+ay$) used at line~$12$ in Algorithm~\ref{alg:01}. In the -GMRES solver, we program a kernel for the scalar-vector multiplication (lines~$7$ and~$15$ in Algorithm~\ref{alg:02}), a kernel for -solving the least-squares problem and a kernel for the elements updates of the solution vector $x$. - -The least-squares problem in the GMRES method is solved by performing a QR factorization on the Hessenberg matrix $\bar{H}_m$ with -plane rotations and, then, solving the triangular system by backward substitutions to compute $y$. Consequently, solving the least-squares -problem on the GPU is not interesting. Indeed, the triangular solves are not easy to parallelize and inefficient on GPUs. However, -the least-squares problem to solve in the GMRES method with restarts has, generally, a very small size $m$. Therefore, we develop -an inexpensive kernel which must be executed in sequential by a single CUDA thread. - -The most important operation in CG and GMRES methods is the sparse matrix-vector multiplication (SpMV), because it is often an -expensive operation in terms of execution time and memory space. Moreover, it requires to take care of the storage format of the -sparse matrix in the memory. Indeed, the naive storage, row-by-row or column-by-column, of a sparse matrix can cause a significant -waste of memory space and execution time. In addition, the sparsity nature of the matrix often leads to irregular memory accesses -to read the matrix nonzero values. So, the computation of the SpMV multiplication on GPUs can involve non coalesced accesses to -the global memory, which slows down even more its performances. One of the most efficient compressed storage formats of sparse -matrices on GPUs is HYB format~\cite{ref7}. It is a combination of ELLpack (ELL) and Coordinate (COO) formats. Indeed, it stores -a typical number of nonzero values per row in ELL format and remaining entries of exceptional rows in COO format. It combines -the efficiency of ELL due to the regularity of its memory accesses and the flexibility of COO which is insensitive to the matrix -structure. Consequently, we use the HYB kernel~\cite{ref8} developed by Nvidia to implement the SpMV multiplication of CG and -GMRES methods on GPUs. Moreover, to avoid the non coalesced accesses to the high-latency global memory, we fill the elements of -the iterate vector $x$ in the cached texture memory. +\label{ch12:sec:03.02} +After the partitioning operation, all the data involved from this operation must be +transferred from the CPU memories to the GPU memories, in order to be processed by +GPUs. We use two functions of the CUBLAS\index{CUBLAS} library (CUDA Basic Linear +Algebra Subroutines), developed by Nvidia~\cite{ch12:ref6}: \verb+cublasAlloc()+ +for the memory allocations on GPUs and \verb+cublasSetVector()+ for the memory +copies from the CPUs to the GPUs. + +An efficient implementation of CG and GMRES solvers on a GPU cluster requires to +determine all parts of their codes that can be executed in parallel and, thus, take +advantage of the GPU acceleration. As many Krylov subspace methods, the CG and GMRES +methods are mainly based on arithmetic operations dealing with vectors or matrices: +sparse matrix-vector multiplications, scalar-vector multiplications, dot products, +Euclidean norms, AXPY operations ($y\leftarrow ax+y$ where $x$ and $y$ are vectors +and $a$ is a scalar) and so on. These vector operations are often easy to parallelize +and they are more efficient on parallel computers when they work on large vectors. +Therefore, all the vector operations used in CG and GMRES solvers must be executed +by the GPUs as kernels. + +We use the kernels of the CUBLAS library to compute some vector operations of CG and +GMRES solvers. The following kernels of CUBLAS (dealing with double floating point) +are used: \verb+cublasDdot()+ for the dot products, \verb+cublasDnrm2()+ for the +Euclidean norms and \verb+cublasDaxpy()+ for the AXPY operations. For the rest of +the data-parallel operations, we code their kernels in CUDA. In the CG solver, we +develop a kernel for the XPAY operation ($y\leftarrow x+ay$) used at line~$12$ in +Algorithm~\ref{ch12:alg:01}. In the GMRES solver, we program a kernel for the scalar-vector +multiplication (lines~$7$ and~$15$ in Algorithm~\ref{ch12:alg:02}), a kernel for +solving the least-squares problem and a kernel for the elements updates of the solution +vector $x$. + +The least-squares problem in the GMRES method is solved by performing a QR factorization +on the Hessenberg matrix\index{Hessenberg~matrix} $\bar{H}_m$ with plane rotations and, +then, solving the triangular system by backward substitutions to compute $y$. Consequently, +solving the least-squares problem on the GPU is not interesting. Indeed, the triangular +solves are not easy to parallelize and inefficient on GPUs. However, the least-squares +problem to solve in the GMRES method with restarts has, generally, a very small size $m$. +Therefore, we develop an inexpensive kernel which must be executed in sequential by a +single CUDA thread. + +The most important operation in CG\index{Iterative~method!CG} and GMRES\index{Iterative~method!GMRES} +methods is the sparse matrix-vector multiplication (SpMV)\index{SpMV~multiplication}, +because it is often an expensive operation in terms of execution time and memory space. +Moreover, it requires to take care of the storage format of the sparse matrix in the +memory. Indeed, the naive storage, row-by-row or column-by-column, of a sparse matrix +can cause a significant waste of memory space and execution time. In addition, the sparsity +nature of the matrix often leads to irregular memory accesses to read the matrix nonzero +values. So, the computation of the SpMV multiplication on GPUs can involve non coalesced +accesses to the global memory, which slows down even more its performances. One of the +most efficient compressed storage formats\index{Compressed~storage~format} of sparse +matrices on GPUs is HYB\index{Compressed~storage~format!HYB} format~\cite{ch12:ref7}. +It is a combination of ELLpack (ELL) and Coordinate (COO) formats. Indeed, it stores +a typical number of nonzero values per row in ELL\index{Compressed~storage~format!ELL} +format and remaining entries of exceptional rows in COO format. It combines the efficiency +of ELL due to the regularity of its memory accesses and the flexibility of COO\index{Compressed~storage~format!COO} +which is insensitive to the matrix structure. Consequently, we use the HYB kernel~\cite{ch12:ref8} +developed by Nvidia to implement the SpMV multiplication of CG and GMRES methods on GPUs. +Moreover, to avoid the non coalesced accesses to the high-latency global memory, we fill +the elements of the iterate vector $x$ in the cached texture memory. + %%****************%% %%****************%% \subsection{Data communications} -\label{sec:03.03} -All the computing nodes of the GPU cluster execute in parallel the same iterative solver (Algorithm~\ref{alg:01} or Algorithm~\ref{alg:02}) -adapted to GPUs, but on their own portions of the sparse linear system: $M^{-1}_iA_ix_i=M^{-1}_ib_i$, $0\leq i0$, the fixed point mapping $F_{\gamma}$ of the projected -Richardson method is defined as follows: +For any $U\in E$, let $P_K(U)$ be the projection of $U$ on $K$. For any $\gamma\in\mathbb{R}$, +$\gamma>0$, the fixed point mapping $F_{\gamma}$ of the projected Richardson method\index{Iterative~method!Projected~Richardson} +is defined as follows: \begin{equation} U^{*} = F_{\gamma}(U^{*}) = P_K(U^{*} - \gamma(\mathcal{A}.U^{*} - G)). -\label{eq:10} +\label{ch13:eq:10} \end{equation} -In order to reduce the computation time, the large optimization problem is solved in a numerical way by using a parallel asynchronous algorithm of the projected -Richardson method on the convex set $K$. Particularly, we will consider an asynchronous parallel adaptation of the projected Richardson method~\cite{ref6}. - -Let $\alpha\in\mathbb{N}$ be a positive integer. We consider that the space $E=\displaystyle\prod_{i=1}^{\alpha} E_i$ is a product of $\alpha$ subspaces $E_i$ -where $i\in\{1,\ldots,\alpha\}$. Note that $E_i=\mathbb{R}^{m_i}$, where $\displaystyle\sum_{i=1}^{\alpha} m_{i}=M$, is also a Hilbert space in which $\scalprod{.}{.}_i$ -denotes the scalar product and $|.|_i$ the associated norm, for all $i\in\{1,\ldots,\alpha\}$. Then, for all $u,v\in E$, $\scalprod{u}{v}=\displaystyle\sum_{i=1}^{\alpha}\scalprod{u_i}{v_i}_i$ +In order to reduce the computation time, the large optimization problem is solved in a +numerical way by using a parallel asynchronous algorithm of the projected Richardson method +on the convex set $K$. Particularly, we will consider an asynchronous parallel adaptation +of the projected Richardson method~\cite{ch13:ref6}. + +Let $\alpha\in\mathbb{N}$ be a positive integer. We consider that the space $E=\displaystyle\prod_{i=1}^{\alpha} E_i$ +is a product of $\alpha$ subspaces $E_i$ where $i\in\{1,\ldots,\alpha\}$. Note that $E_i=\mathbb{R}^{m_i}$, +where $\displaystyle\sum_{i=1}^{\alpha} m_{i}=M$, is also a Hilbert space\index{Hilbert~space} +in which $\scalprod{.}{.}_i$ denotes the scalar product and $|.|_i$ the associated norm, for +all $i\in\{1,\ldots,\alpha\}$. Then, for all $u,v\in E$, $\scalprod{u}{v}=\displaystyle\sum_{i=1}^{\alpha}\scalprod{u_i}{v_i}_i$ is the scalar product on $E$. -Let $U\in E$, we consider the following decomposition of $U$ and the corresponding decomposition of $F_\gamma$ into $\alpha$ blocks: +Let $U\in E$, we consider the following decomposition of $U$ and the corresponding decomposition +of $F_\gamma$ into $\alpha$ blocks: \begin{equation} \begin{array}{rcl} U & = & (U_1,\ldots,U_{\alpha}), \\ F_{\gamma}(U) & = & (F_{1,\gamma}(U),\ldots,F_{\alpha,\gamma}(U)). \\ \end{array} -\label{eq:11} +\label{ch13:eq:11} \end{equation} -Assume that the convex set $K=\displaystyle\prod_{i=1}^{\alpha}K_{i}$, such that $\forall i\in\{1,\ldots,\alpha\},K_i\subset E_i$ and $K_i$ is a closed convex set. -Let also $G=(G_1,\ldots,G_{\alpha})\in E$ and, for any $U\in E$, $P_K(U)=(P_{K_1}(U_1),\ldots,P_{K_{\alpha}}(U_{\alpha}))$ is the projection of $U$ on $K$ where $\forall i\in\{1,\ldots,\alpha\},P_{K_i}$ -is the projector from $E_i$ onto $K_i$. So, the fixed point mapping of the projected Richardson method~(\ref{eq:10}) can be written in the following way: +Assume that the convex set $K=\displaystyle\prod_{i=1}^{\alpha}K_{i}$, such that $\forall i\in\{1,\ldots,\alpha\},K_i\subset E_i$ +and $K_i$ is a closed convex set. Let also $G=(G_1,\ldots,G_{\alpha})\in E$ and, for any +$U\in E$, $P_K(U)=(P_{K_1}(U_1),\ldots,P_{K_{\alpha}}(U_{\alpha}))$ is the projection of $U$ +on $K$ where $\forall i\in\{1,\ldots,\alpha\},P_{K_i}$ is the projector from $E_i$ onto +$K_i$. So, the fixed point mapping of the projected Richardson method~(\ref{ch13:eq:10})\index{Iterative~method!Projected~Richardson} +can be written in the following way: \begin{equation} \forall U\in E\mbox{,~}\forall i\in\{1,\ldots,\alpha\}\mbox{,~}F_{i,\gamma}(U) = P_{K_i}(U_i - \gamma(\mathcal{A}_i.U - G_i)). -\label{eq:12} +\label{ch13:eq:12} \end{equation} -Note that $\displaystyle\mathcal{A}_i.U= \sum_{j=1}^{\alpha}\mathcal{A}_{i,j}.U_j$, where $\mathcal{A}_{i,j}$ denote block matrices of $\mathcal{A}$. +Note that $\displaystyle\mathcal{A}_i.U= \sum_{j=1}^{\alpha}\mathcal{A}_{i,j}.U_j$, where +$\mathcal{A}_{i,j}$ denote block matrices of $\mathcal{A}$. -The parallel asynchronous iterations of the projected Richardson method for solving the obstacle problem~(\ref{eq:08}) are defined as follows: let $U^0\in E,U^0=(U^0_1,\ldots,U^0_\alpha)$ be -the initial solution, then for all $p\in\mathbb{N}$, the iterate $U^{p+1}=(U^{p+1}_1,\ldots,U^{p+1}_{\alpha})$ is recursively defined by: +The parallel asynchronous iterations of the projected Richardson method for solving the +obstacle problem~(\ref{ch13:eq:08}) are defined as follows: let $U^0\in E,U^0=(U^0_1,\ldots,U^0_\alpha)$ +be the initial solution, then for all $p\in\mathbb{N}$, the iterate $U^{p+1}=(U^{p+1}_1,\ldots,U^{p+1}_{\alpha})$ +is recursively defined by: \begin{equation} U_i^{p+1} = \left\{ @@ -219,7 +276,7 @@ F_{i,\gamma}(U_1^{\rho_1(p)}, \ldots, U_{\alpha}^{\rho_{\alpha}(p)}) \mbox{~if~} U_i^p \mbox{~otherwise}, \\ \end{array} \right. -\label{eq:13} +\label{ch13:eq:13} \end{equation} where \begin{equation} @@ -229,7 +286,7 @@ where \forall i\in\{1,\ldots,\alpha\},\{p \ | \ i \in s(p)\}\mbox{~is denombrable}, \end{array} \right. -\label{eq:14} +\label{ch13:eq:14} \end{equation} and $\forall j\in\{1,\ldots,\alpha\}$, \begin{equation} @@ -239,62 +296,75 @@ and $\forall j\in\{1,\ldots,\alpha\}$, \displaystyle\lim_{p\to\infty}\rho_j(p) = +\infty.\\ \end{array} \right. -\label{eq:15} +\label{ch13:eq:15} \end{equation} -The previous asynchronous scheme of the projected Richardson method models computations that are carried out in parallel -without order nor synchronization (according to the behavior of the parallel iterative method) and describes a subdomain -method without overlapping. It is a general model that takes into account all possible situations of parallel computations -and non-blocking message passing. So, the synchronous iterative scheme is defined by: +The previous asynchronous scheme\index{Asynchronous} of the projected Richardson +method models computations that are carried out in parallel without order nor +synchronization (according to the behavior of the parallel iterative method) and +describes a subdomain method without overlapping. It is a general model that takes +into account all possible situations of parallel computations and nonblocking message +passing. So, the synchronous iterative scheme\index{Synchronous} is defined by: \begin{equation} \forall j\in\{1,\ldots,\alpha\} \mbox{,~} \forall p\in\mathbb{N} \mbox{,~} \rho_j(p)=p. -\label{eq:16} +\label{ch13:eq:16} \end{equation} -The values of $s(p)$ and $\rho_j(p)$ are defined dynamically and not explicitly by the parallel asynchronous or synchronous -execution of the algorithm. Particularly, it enables one to consider distributed computations whereby processors compute at -their own pace according to their intrinsic characteristics and computational load. The parallelism between the processors is -well described by the set $s(p)$ which contains at each step $p$ the index of the components relaxed by each processor on a -parallel way while the use of delayed components in~(\ref{eq:13}) permits one to model nondeterministic behavior and does not -imply inefficiency of the considered distributed scheme of computation. Note that, according to~\cite{ref7}, theoretically, -each component of the vector must be relaxed an infinity of time. The choice of the relaxed components to be used in the -computational process may be guided by any criterion and, in particular, a natural criterion is to pick-up the most recently -available values of the components computed by the processors. Furthermore, the asynchronous iterations are implemented by -means of non-blocking MPI communication subroutines (asynchronous communications). - -The important property ensuring the convergence of the parallel projected Richardson method, both synchronous and asynchronous -algorithms, is the fact that $\mathcal{A}$ is an M-matrix. Moreover, the convergence proceeds from a result of~\cite{ref6}. -Indeed, there exists a value $\gamma_0>0$, such that $\forall\gamma\in ]0,\gamma_0[$, the parallel iterations~(\ref{eq:13}), -(\ref{eq:14}) and~(\ref{eq:15}), associated to the fixed point mapping $F_\gamma$~(\ref{eq:12}), converge to the unique solution -$U^{*}$ of the discretized problem. +The values of $s(p)$ and $\rho_j(p)$ are defined dynamically and not explicitly by +the parallel asynchronous or synchronous execution of the algorithm. Particularly, +it enables one to consider distributed computations whereby processors compute at +their own pace according to their intrinsic characteristics and computational load. +The parallelism between the processors is well described by the set $s(p)$ which +contains at each step $p$ the index of the components relaxed by each processor on +a parallel way while the use of delayed components in~(\ref{ch13:eq:13}) permits one +to model nondeterministic behavior and does not imply inefficiency of the considered +distributed scheme of computation. Note that, according to~\cite{ch13:ref7}, theoretically, +each component of the vector must be relaxed an infinity of time. The choice of the +relaxed components to be used in the computational process may be guided by any criterion +and, in particular, a natural criterion is to pick-up the most recently available +values of the components computed by the processors. Furthermore, the asynchronous +iterations are implemented by means of nonblocking MPI communication subroutines\index{MPI~subroutines!Nonblocking} +(asynchronous communications). + +The important property ensuring the convergence of the parallel projected Richardson +method, both synchronous and asynchronous algorithms, is the fact that $\mathcal{A}$ +is an M-matrix. Moreover, the convergence\index{Convergence} proceeds from a result +of~\cite{ch13:ref6}. Indeed, there exists a value $\gamma_0>0$, such that $\forall\gamma\in ]0,\gamma_0[$, +the parallel iterations~(\ref{ch13:eq:13}), (\ref{ch13:eq:14}) and~(\ref{ch13:eq:15}), +associated to the fixed point mapping $F_\gamma$~(\ref{ch13:eq:12}), converge to the +unique solution $U^{*}$ of the discretized problem. %%--------------------------%% %% SECTION 4 %% %%--------------------------%% \section{Parallel implementation on a GPU cluster} -\label{sec:04} -In this section, we give the main key points of the parallel implementation of the projected Richardson method, both synchronous -and asynchronous versions, on a GPU cluster, for solving the nonlinear systems derived from the discretization of large obstacle -problems. More precisely, each nonlinear system is solved iteratively using the whole cluster. We use a heteregeneous CUDA/MPI -programming. Indeed, the communication of data, at each iteration between the GPU computing nodes, can be either synchronous -or asynchronous using the MPI communication subroutines, whereas inside each GPU node, a CUDA parallelization is performed. +\label{ch13:sec:04} +In this section, we give the main key points of the parallel implementation of the +projected Richardson method, both synchronous and asynchronous versions, on a GPU +cluster, for solving the nonlinear systems derived from the discretization of large +obstacle problems. More precisely, each nonlinear system is solved iteratively using +the whole cluster. We use a heterogeneous CUDA/MPI programming. Indeed, the communication +of data, at each iteration between the GPU computing nodes, can be either synchronous +or asynchronous using the MPI communication subroutines, whereas inside each GPU node, +a CUDA parallelization is performed. \begin{figure}[!h] \centerline{\includegraphics[scale=0.30]{Chapters/chapter13/figures/splitCPU}} \caption{Data partitioning of a problem to be solved among $S=3\times 4$ computing nodes.} -\label{fig:01} +\label{ch13:fig:01} \end{figure} -Let $S$ denote the number of computing nodes on the GPU cluster, where a computing node is composed of CPU core holding one MPI -process and a GPU card. So, before starting computations, the obstacle problem of size $(NX\times NY\times NZ)$ is split into $S$ -parallelepipedic sub-problems, each for a node (MPI process, GPU), as is shown in Figure~\ref{fig:01}. Indeed, the $NY$ and $NZ$ -dimensions (according to the $y$ and $z$ axises) of the three-dimensional problem are, respectively, split into $Sy$ and $Sz$ parts, -such that $S=Sy\times Sz$. In this case, each computing node has at most four neighboring nodes. This kind of the data partitioning -reduces the data exchanges at subdomain boundaries compared to a naive $z$-axis-wise partitioning. +Let $S$ denote the number of computing nodes\index{Computing~node} on the GPU cluster, +where a computing node is composed of CPU core holding one MPI process and a GPU card. +So, before starting computations, the obstacle problem of size $(NX\times NY\times NZ)$ +is split into $S$ parallelepipedic sub-problems, each for a node (MPI process, GPU), as +is shown in Figure~\ref{ch13:fig:01}. Indeed, the $NY$ and $NZ$ dimensions (according +to the $y$ and $z$ axises) of the three-dimensional problem are, respectively, split +into $Sy$ and $Sz$ parts, such that $S=Sy\times Sz$. In this case, each computing node +has at most four neighboring nodes. This kind of the data partitioning reduces the data +exchanges at subdomain boundaries compared to a naive $z$-axis-wise partitioning. \begin{algorithm}[!t] -%\SetLine -%\linesnumbered Initialization of the parameters of the sub-problem\; Allocate and fill the data in the global memory GPU\; \For{$i=1$ {\bf to} $NbSteps$}{ @@ -304,30 +374,34 @@ Allocate and fill the data in the global memory GPU\; } Copy the solution $U$ back from GPU memory\; \caption{Parallel solving of the obstacle problem on a GPU cluster} -\label{alg:01} +\label{ch13:alg:01} \end{algorithm} -All the computing nodes of the GPU cluster execute in parallel the same Algorithm~\ref{alg:01} but on different three-dimensional -sub-problems of size $(NX\times ny\times nz)$. This algorithm gives the main key points for solving an obstacle problem defined in -a three-dimensional domain, where $A$ is the discretization matrix, $G$ is the right-hand side and $U$ is the solution vector. After -the initialization step, all the data generated from the partitioning operation are copied from the CPU memories to the GPU global -memories, to be processed on the GPUs. Next, the algorithm uses $NbSteps$ time steps to solve the global obstacle problem. In fact, -it uses a parallel algorithm adapted to GPUs of the projected Richardson iterative method for solving the nonlinear systems of the -obstacle problem. This function is defined by {\it Solve()} in Algorithm~\ref{alg:01}. At every time step, the initial guess $U^0$ -for the iterative algorithm is set to the solution found at the previous time step. Moreover, the right-hand side $G$ is computed -as follows: \[G = \frac{1}{k}.U^{prev} + F\] where $k$ is the time step, $U^{prev}$ is the solution computed in the previous time -step and each element $f(x, y, z)$ of the vector $F$ is computed as follows: +All the computing nodes of the GPU cluster execute in parallel the same Algorithm~\ref{ch13:alg:01} +but on different three-dimensional sub-problems of size $(NX\times ny\times nz)$. +This algorithm gives the main key points for solving an obstacle problem\index{Obstacle~problem} +defined in a three-dimensional domain, where $A$ is the discretization matrix, $G$ +is the right-hand side and $U$ is the solution vector. After the initialization step, +all the data generated from the partitioning operation are copied from the CPU memories +to the GPU global memories, to be processed on the GPUs. Next, the algorithm uses $NbSteps$ +time steps to solve the global obstacle problem. In fact, it uses a parallel algorithm +adapted to GPUs of the projected Richardson iterative method for solving the nonlinear +systems\index{Nonlinear} of the obstacle problem. This function is defined by {\it Solve()} +in Algorithm~\ref{ch13:alg:01}. At every time step, the initial guess $U^0$ for the iterative +algorithm is set to the solution found at the previous time step. Moreover, the right-hand +side $G$ is computed as follows: \[G = \frac{1}{k}.U^{prev} + F\] where $k$ is the time step, +$U^{prev}$ is the solution computed in the previous time step and each element $f(x, y, z)$ +of the vector $F$ is computed as follows: \begin{equation} f(x,y,z)=\cos(2\pi x)\cdot\cos(4\pi y)\cdot\cos(6\pi z). -\label{eq:18} +\label{ch13:eq:18} \end{equation} -Finally, the solution $U$ of the obstacle problem is copied back from the GPU global memories to the CPU memories. We use the -communication subroutines of the CUBLAS library~\cite{ref8} (CUDA Basic Linear Algebra Subroutines) for the memory allocations in -the GPU (\verb+cublasAlloc()+) and the data transfers between the CPU and its GPU: \verb+cublasSetVector()+ and \verb+cublasGetVector()+. +Finally, the solution $U$ of the obstacle problem is copied back from the GPU global +memories to the CPU memories. We use the communication subroutines of the CUBLAS library~\cite{ch13:ref8}\index{CUBLAS} +(CUDA Basic Linear Algebra Subroutines) for the memory allocations in the GPU (\verb+cublasAlloc()+) +and the data transfers between the CPU and its GPU: \verb+cublasSetVector()+ and \verb+cublasGetVector()+. \begin{algorithm}[!t] -% \SetLine -% \linesnumbered $p = 0$\; $conv = false$\; $U = U^{0}$\; @@ -341,43 +415,55 @@ the GPU (\verb+cublasAlloc()+) and the data transfers between the CPU and its GP $conv$ = Convergence($error$, $p$, $\varepsilon$, $MaxRelax$)\; } \caption{Parallel iterative solving of the nonlinear systems on a GPU cluster ($Solve()$ function)} -\label{alg:02} +\label{ch13:alg:02} \end{algorithm} -As many other iterative methods, the algorithm of the projected Richardson method is based on algebraic functions operating on vectors -and/or matrices, which are more efficient on parallel computers when they work on large vectors. Its parallel implementation on the GPU -cluster is carried out so that the GPUs execute the vector operations as kernels and the CPUs execute the serial codes, supervise the -kernel executions and the data exchanges with the neighboring nodes and supply the GPUs with data. Algorithm~\ref{alg:02} shows the -main key points of the parallel iterative algorithm (function $Solve()$ in Algorithm~\ref{alg:01}). All the vector operations inside -the main loop ({\bf repeat} ... {\bf until}) are executed by the GPU. We use the following functions of the CUBLAS library: +As many other iterative methods, the algorithm of the projected Richardson +method\index{Iterative~method!Projected~Richardson} is based on algebraic +functions operating on vectors and/or matrices, which are more efficient on +parallel computers when they work on large vectors. Its parallel implementation +on the GPU cluster is carried out so that the GPUs execute the vector operations +as kernels and the CPUs execute the serial codes, supervise the kernel executions +and the data exchanges with the neighboring nodes\index{Neighboring~node} and +supply the GPUs with data. Algorithm~\ref{ch13:alg:02} shows the main key points +of the parallel iterative algorithm (function $Solve()$ in Algorithm~\ref{ch13:alg:01}). +All the vector operations inside the main loop ({\bf repeat} ... {\bf until}) +are executed by the GPU. We use the following functions of the CUBLAS library\index{CUBLAS}: \begin{itemize} \item \verb+cublasDaxpy()+ to compute the difference between the solution vectors $U^{p}$ and $U^{p+1}$ computed in two successive relaxations -$p$ and $p+1$ (line~$7$ in Algorithm~\ref{alg:02}), +$p$ and $p+1$ (line~$7$ in Algorithm~\ref{ch13:alg:02}), \item \verb+cublasDnrm2()+ to perform the Euclidean norm (line~$8$) and, \item \verb+cublasDcpy()+ for the data copy of a vector to another one in the GPU memory (lines~$3$ and~$9$). \end{itemize} -The dimensions of the grid and blocks of threads that execute a given kernel depend on the resources of the GPU multiprocessor and the -resource requirements of the kernel. So, if $block$ defines the size of a thread block, which must not exceed the maximum size of a thread -block, then the number of thread blocks in the grid, denoted by $grid$, can be computed according to the size of the local sub-problem -as follows: \[grid = \frac{(NX\times ny\times nz)+block-1}{block}.\] However, when solving very large problems, the size of the thread -grid can exceed the maximum number of thread blocks that can be executed on the GPUs (up-to $65.535$ thread blocks) and, thus, the kernel -will fail to launch. Therefore, for each kernel, we decompose the three-dimensional sub-problem into $nz$ two-dimensional slices of size -($NX\times ny$), as is shown in Figure~\ref{fig:02}. All slices of the same kernel are executed using {\bf for} loop by $NX\times ny$ parallel -threads organized in a two-dimensional grid of two-dimensional thread blocks, as is shown in Listing~\ref{list:01}. Each thread is in charge -of $nz$ discretization points (one from each slice), accessed in the GPU memory with a constant stride $(NX\times ny)$. +The dimensions of the grid and blocks of threads that execute a given kernel +depend on the resources of the GPU multiprocessor and the resource requirements +of the kernel. So, if $block$ defines the size of a thread block, which must +not exceed the maximum size of a thread block, then the number of thread blocks +in the grid, denoted by $grid$, can be computed according to the size of the +local sub-problem as follows: \[grid = \frac{(NX\times ny\times nz)+block-1}{block}.\] +However, when solving very large problems, the size of the thread grid can exceed +the maximum number of thread blocks that can be executed on the GPUs (up-to $65.535$ +thread blocks) and, thus, the kernel will fail to launch. Therefore, for each kernel, +we decompose the three-dimensional sub-problem into $nz$ two-dimensional slices of size +($NX\times ny$), as is shown in Figure~\ref{ch13:fig:02}. All slices of the same kernel +are executed using {\bf for} loop by $NX\times ny$ parallel threads organized in a +two-dimensional grid of two-dimensional thread blocks, as is shown in Listing~\ref{ch13:list:01}. +Each thread is in charge of $nz$ discretization points (one from each slice), accessed +in the GPU memory with a constant stride $(NX\times ny)$. \begin{figure} \centerline{\includegraphics[scale=0.30]{Chapters/chapter13/figures/splitGPU}} \caption{Decomposition of a sub-problem in a GPU into $nz$ slices.} -\label{fig:02} +\label{ch13:fig:02} \end{figure} \begin{center} -\lstinputlisting[label=list:01,caption=Skeleton codes of a GPU kernel and a CPU function]{Chapters/chapter13/ex1.cu} +\lstinputlisting[label=ch13:list:01,caption=Skeleton codes of a GPU kernel and a CPU function]{Chapters/chapter13/ex1.cu} \end{center} -The function $Determine\_Bordering\_Vector\_Elements()$ (line~$5$ in Algorithm~\ref{alg:02}) determines the values of the vector -elements shared at the boundaries with neighboring computing nodes. Its main operations are defined as follows: +The function $Determine\_Bordering\_Vector\_Elements()$ (line~$5$ in Algorithm~\ref{ch13:alg:02}) +determines the values of the vector elements shared at the boundaries with neighboring computing +nodes. Its main operations are defined as follows: \begin{enumerate} \item define the values associated to the bordering points needed by the neighbors, \item copy the values associated to the bordering points from the GPU to the CPU, @@ -389,21 +475,30 @@ The first operation of this function is implemented as kernels to be performed b \item a kernel executed by $NX\times nz$ threads to define the values associated to the bordering vector elements along $y$-axis and, \item a kernel executed by $NX\times ny$ threads to define the values associated to the bordering vector elements along $z$-axis. \end{itemize} -As mentioned before, we develop the \emph{synchronous} and \emph{asynchronous} algorithms of the projected Richardson method. Obviously, -in this scope, the synchronous or asynchronous communications refer to the communications between the CPU cores (MPI processes) on the -GPU cluster, in order to exchange the vector elements associated to subdomain boundaries. For the memory copies between a CPU core and -its GPU, we use the synchronous communication routines of the CUBLAS library: \verb+cublasSetVector()+ and \verb+cublasGetVector()+ -in the synchronous algorithm and the asynchronous ones: \verb+cublasSetVectorAsync()+ and \verb+cublasGetVectorAsync()+ in the -asynchronous algorithm. Moreover, we use the communication routines of the MPI library to carry out the data exchanges between the neighboring -nodes. We use the following communication routines: \verb+MPI_Isend()+ and \verb+MPI_Irecv()+ to perform non-blocking sends and receptions, -respectively. For the synchronous algorithm, we use the MPI routine \verb+MPI_Waitall()+ which puts the MPI process of a computing node -in blocking status until all data exchanges with neighboring nodes (sends and receptions) are completed. In contrast, for the asynchronous -algorithms, we use the MPI routine \verb+MPI_Test()+ which tests the completion of a data exchange (send or reception) without putting the -MPI process in blocking status. - -The function $Compute\_New\_Vector\_Elements()$ (line~$6$ in Algorithm~\ref{alg:02}) computes, at each iteration, the new elements -of the iterate vector $U$. Its general code is presented in Listing~\ref{list:01} (CPU function). The iterations of the projected -Richardson method, based on those of the Jacobi method, are defined as follows: +As mentioned before, we develop the \emph{synchronous} and \emph{asynchronous} +algorithms of the projected Richardson method. Obviously, in this scope, the +synchronous\index{Synchronous} or asynchronous\index{Asynchronous} communications +refer to the communications between the CPU cores (MPI processes) on the GPU cluster, +in order to exchange the vector elements associated to subdomain boundaries. For +the memory copies between a CPU core and its GPU, we use the synchronous communication +routines of the CUBLAS library\index{CUBLAS}: \verb+cublasSetVector()+ and \verb+cublasGetVector()+ +in the synchronous algorithm and the asynchronous ones: \verb+cublasSetVectorAsync()+ +and \verb+cublasGetVectorAsync()+ in the asynchronous algorithm. Moreover, we +use the communication routines of the MPI library to carry out the data exchanges +between the neighboring nodes. We use the following communication routines: \verb+MPI_Isend()+ +and \verb+MPI_Irecv()+ to perform nonblocking\index{MPI~subroutines!Nonblocking} +sends and receptions, respectively. For the synchronous algorithm, we use the MPI +routine \verb+MPI_Waitall()+ which puts the MPI process of a computing node in +blocking status until all data exchanges with neighboring nodes (sends and receptions) +are completed. In contrast, for the asynchronous algorithms, we use the MPI routine +\verb+MPI_Test()+ which tests the completion of a data exchange (send or reception) +without putting the MPI process in blocking status\index{MPI~subroutines!Blocking}. + +The function $Compute\_New\_Vector\_Elements()$ (line~$6$ in Algorithm~\ref{ch13:alg:02}) +computes, at each iteration, the new elements of the iterate vector $U$. Its general code +is presented in Listing~\ref{ch13:list:01} (CPU function). The iterations of the projected +Richardson method\index{Iterative~method!Projected~Richardson}, based on those of the Jacobi +method\index{Iterative~method!Jacobi}, are defined as follows: \begin{equation} \begin{array}{ll} u^{p+1}(x,y,z) =& \frac{1}{Center}(g(x,y,z) - (Center\cdot u^{p}(x,y,z) + \\ @@ -411,64 +506,88 @@ u^{p+1}(x,y,z) =& \frac{1}{Center}(g(x,y,z) - (Center\cdot u^{p}(x,y,z) + \\ & South\cdot u^{p}(x,y-h,z) + North\cdot u^{p}(x,y+h,z) + \\ & Rear\cdot u^{p}(x,y,z-h) + Front\cdot u^{p}(x,y,z+h))), \end{array} -\label{eq:17} +\label{ch13:eq:17} \end{equation} -where $u^{p}(x,y,z)$ is an element of the iterate vector $U$ computed at the iteration $p$ and $g(x,y,z)$ is a vector element of the -right-hand side $G$. The scalars $Center$, $West$, $East$, $South$, $North$, $Rear$ and $Front$ define constant coefficients of the -block matrix $A$. Figure~\ref{fig:03} shows the positions of these coefficients in a three-dimensional domain. +where $u^{p}(x,y,z)$ is an element of the iterate vector $U$ computed at the +iteration $p$ and $g(x,y,z)$ is a vector element of the right-hand side $G$. +The scalars $Center$, $West$, $East$, $South$, $North$, $Rear$ and $Front$ +define constant coefficients of the block matrix $A$. Figure~\ref{ch13:fig:03} +shows the positions of these coefficients in a three-dimensional domain. \begin{figure} \centerline{\includegraphics[scale=0.35]{Chapters/chapter13/figures/matrix}} \caption{Matrix constant coefficients in a three-dimensional domain.} -\label{fig:03} +\label{ch13:fig:03} \end{figure} -The kernel implementations of the projected Richardson method on GPUs uses a perfect fine-grain multithreading parallelism. Since the -projected Richardson algorithm is implemented as a fixed point method, each kernel is executed by a large number of GPU threads such -that each thread is in charge of the computation of one element of the iterate vector $U$. Moreover, this method uses the vector elements -updates of the Jacobi method, which means that each thread $i$ computes the new value of its element $u_{i}^{p+1}$ independently of the -new values $u_{j}^{p+1}$, where $j\neq i$, of those computed in parallel by other threads at the same iteration $p+1$. Listing~\ref{list:02} -shows the GPU implementations of the main kernels of the projected Richardson method, which are: the matrix-vector multiplication -(\verb+MV_Multiplication()+) and the vector elements updates (\verb+Vector_Updates()+). The codes of these kernels are based on -that presented in Listing~\ref{list:01}. - -\lstinputlisting[label=list:02,caption=GPU kernels of the projected Richardson method]{Chapters/chapter13/ex2.cu} +The kernel implementations of the projected Richardson method on GPUs uses a +perfect fine-grain multithreading parallelism. Since the projected Richardson +algorithm is implemented as a fixed point method, each kernel is executed by +a large number of GPU threads such that each thread is in charge of the computation +of one element of the iterate vector $U$. Moreover, this method uses the vector +elements updates of the Jacobi method, which means that each thread $i$ computes +the new value of its element $u_{i}^{p+1}$ independently of the new values $u_{j}^{p+1}$, +where $j\neq i$, of those computed in parallel by other threads at the same iteration +$p+1$. Listing~\ref{ch13:list:02} shows the GPU implementations of the main kernels +of the projected Richardson method, which are: the matrix-vector multiplication +(\verb+MV_Multiplication()+) and the vector elements updates (\verb+Vector_Updates()+). +The codes of these kernels are based on that presented in Listing~\ref{ch13:list:01}. + +\lstinputlisting[label=ch13:list:02,caption=GPU kernels of the projected Richardson method]{Chapters/chapter13/ex2.cu} \begin{figure} \centerline{\includegraphics[scale=0.3]{Chapters/chapter13/figures/points3D}} \caption{Computation of a vector element with the projected Richardson method.} -\label{fig:04} +\label{ch13:fig:04} \end{figure} -Each kernel is executed by $NX\times ny$ GPU threads so that $nz$ slices of $(NX\times ny)$ vector elements are computed in -a {\bf for} loop. In this case, each thread is in charge of one vector element from each slice (in total $nz$ vector elements -along $z$-axis). We can notice from the formula~(\ref{eq:17}) that the computation of a vector element $u^{p+1}(x,y,z)$, by -a thread at iteration $p+1$, requires seven vector elements computed at the previous iteration $p$: two vector elements in -each dimension plus the vector element at the intersection of the three axises $x$, $y$ and $z$ (see Figure~\ref{fig:04}). -So, to reduce the memory accesses to the high-latency global memory, the vector elements of the current slice can be stored -in the low-latency shared memories of thread blocks, as is described in~\cite{ref9}. Nevertheless, the fact that the computation -of a vector element requires only two elements in each dimension does not allow to maximize the data reuse from the shared memories. -The computation of a slice involves in total $(bx+2)\times(by+2)$ accesses to the global memory per thread block, to fill the -required vector elements in the shared memory where $bx$ and $by$ are the dimensions of a thread block. Then, in order to optimize -the memory accesses on GPUs, the elements of the iterate vector $U$ are filled in the cache texture memory (see~\cite{ref10}). -In new GPU generations as Fermi or Kepler, the global memory accesses are always cached in L1 and L2 caches. For example, for -a given kernel, we can favour the use of the L1 cache to that of the shared memory by using the function \verb+cudaFuncSetCacheConfig(Kernel,cudaFuncCachePreferL1)+. -So, the initial access to the global memory loads the vector elements required by the threads of a block into the cache memory -(texture or L1/L2 caches). Then, all the following memory accesses read from this cache memory. In Listing~\ref{list:02}, the -function \verb+fetch_double(v,i)+ is used to read from the texture memory the $i^{th}$ element of the double-precision vector -\verb+v+ (see Listing~\ref{list:03}). Moreover, the seven constant coefficients of matrix $A$ can be stored in the constant memory -but, since they are reused $nz$ times by each thread, it is more interesting to fill them on the low-latency registers of each thread. - -\lstinputlisting[label=list:03,caption=Memory access to the cache texture memory]{Chapters/chapter13/ex3.cu} - -The function $Convergence()$ (line~$11$ in Algorithm~\ref{alg:02}) allows to detect the convergence of the parallel iterative algorithm -and is based on the tolerance threshold $\varepsilon$ and the maximum number of relaxations $MaxRelax$. We take into account the number -of relaxations since that of iterations cannot be computed in the asynchronous case. Indeed, a relaxation is the update~(\ref{eq:13}) of -a local iterate vector $U_i$ according to $F_i$. Then, counting the number of relaxations is possible in both synchronous and asynchronous -cases. On the other hand, an iteration is the update of at least all vector components with $F_i$. - -In the synchronous algorithm, the global convergence is detected when the maximal value of the absolute error, $error$, is sufficiently small -and/or the maximum number of relaxations, $MaxRelax$, is reached, as follows: +Each kernel is executed by $NX\times ny$ GPU threads so that $nz$ slices +of $(NX\times ny)$ vector elements are computed in a {\bf for} loop. In +this case, each thread is in charge of one vector element from each slice +(in total $nz$ vector elements along $z$-axis). We can notice from the +formula~(\ref{ch13:eq:17}) that the computation of a vector element $u^{p+1}(x,y,z)$, +by a thread at iteration $p+1$, requires seven vector elements computed +at the previous iteration $p$: two vector elements in each dimension plus +the vector element at the intersection of the three axises $x$, $y$ and $z$ +(see Figure~\ref{ch13:fig:04}). So, to reduce the memory accesses to the +high-latency global memory, the vector elements of the current slice can +be stored in the low-latency shared memories of thread blocks, as is described +in~\cite{ch13:ref9}. Nevertheless, the fact that the computation of a vector +element requires only two elements in each dimension does not allow to maximize +the data reuse from the shared memories. The computation of a slice involves +in total $(bx+2)\times(by+2)$ accesses to the global memory per thread block, +to fill the required vector elements in the shared memory where $bx$ and $by$ +are the dimensions of a thread block. Then, in order to optimize the memory +accesses on GPUs, the elements of the iterate vector $U$ are filled in the +cache texture memory (see~\cite{ch13:ref10}). In new GPU generations as Fermi +or Kepler, the global memory accesses are always cached in L1 and L2 caches. +For example, for a given kernel, we can favour the use of the L1 cache to that +of the shared memory by using the function \verb+cudaFuncSetCacheConfig(Kernel,cudaFuncCachePreferL1)+. +So, the initial access to the global memory loads the vector elements required +by the threads of a block into the cache memory (texture or L1/L2 caches). Then, +all the following memory accesses read from this cache memory. In Listing~\ref{ch13:list:02}, +the function \verb+fetch_double(v,i)+ is used to read from the texture memory +the $i^{th}$ element of the double-precision vector \verb+v+ (see Listing~\ref{ch13:list:03}). +Moreover, the seven constant coefficients of matrix $A$ can be stored in the +constant memory but, since they are reused $nz$ times by each thread, it is more +interesting to fill them on the low-latency registers of each thread. + +\lstinputlisting[label=ch13:list:03,caption=Memory access to the cache texture memory]{Chapters/chapter13/ex3.cu} + +The function $Convergence()$ (line~$11$ in Algorithm~\ref{ch13:alg:02}) allows +to detect the convergence of the parallel iterative algorithm and is based on +the tolerance threshold\index{Convergence!Tolerance~threshold} $\varepsilon$ +and the maximum number of relaxations\index{Convergence!Maximum~number~of~relaxations} +$MaxRelax$. We take into account the number of relaxations since that of iterations +cannot be computed in the asynchronous case. Indeed, a relaxation is the update~(\ref{ch13:eq:13}) +of a local iterate vector $U_i$ according to $F_i$. Then, counting the number +of relaxations is possible in both synchronous and asynchronous cases. On the +other hand, an iteration is the update of at least all vector components with +$F_i$. + +In the synchronous\index{Synchronous} algorithm, the global convergence is detected +when the maximal value of the absolute error, $error$, is sufficiently small and/or +the maximum number of relaxations, $MaxRelax$, is reached, as follows: $$ \begin{array}{l} error=\|U^{p}-U^{p+1}\|_{2}; \\ @@ -477,52 +596,90 @@ AllReduce(error,\hspace{0.1cm}maxerror,\hspace{0.1cm}MAX); \\ conv \leftarrow true; \end{array} $$ -where the function $AllReduce()$ uses the MPI reduction subroutine \verb+MPI_Allreduce()+ to compute the maximal value, $maxerror$, among the -local absolute errors, $error$, of all computing nodes and $p$ (in Algorithm~\ref{alg:02}) is used as a counter of the local relaxations carried -out by a computing node. In the asynchronous algorithms, the global convergence is detected when all computing nodes locally converge. For this, -we use a token ring architecture around which a boolean token travels, in one direction, from a computing node to another. Starting from node $0$, -the boolean token is set to $true$ by node $i$ if the local convergence is reached or to $false$ otherwise and, then, it is sent to node $i+1$. -Finally, the global convergence is detected when node $0$ receives from its neighbor node $S-1$, in the ring architecture, a token set to $true$. -In this case, node $0$ sends a stop message (end of parallel solving) to all computing nodes in the cluster. +where the function $AllReduce()$ uses the MPI global reduction subroutine\index{MPI~subroutines!Global} +\verb+MPI_Allreduce()+ to compute the maximal value, $maxerror$, among the local +absolute errors, $error$, of all computing nodes and $p$ (in Algorithm~\ref{ch13:alg:02}) +is used as a counter of the local relaxations carried out by a computing node. In +the asynchronous\index{Asynchronous} algorithms, the global convergence is detected +when all computing nodes locally converge. For this, we use a token ring architecture +around which a boolean token travels, in one direction, from a computing node to another. +Starting from node $0$, the boolean token is set to $true$ by node $i$ if the local +convergence is reached or to $false$ otherwise and, then, it is sent to node $i+1$. +Finally, the global convergence is detected when node $0$ receives from its neighbor +node $S-1$, in the ring architecture, a token set to $true$. In this case, node $0$ +sends a stop message (end of parallel solving) to all computing nodes in the cluster. %%--------------------------%% %% SECTION 5 %% %%--------------------------%% \section{Experimental tests on a GPU cluster} -\label{sec:05} -The GPU cluster of tests, that we used in this chapter, is an $20Gbps$ Infiniband network of six machines. Each machine is a Quad-Core Xeon -E5530 CPU running at $2.4$GHz. It provides a RAM memory of $12$GB with a memory bandwidth of $25.6$GB/s and it is equipped with two Nvidia -Tesla C1060 GPUs. A Tesla GPU contains in total $240$ cores running at $1.3$GHz. It provides $4$GB of global memory with a memory bandwidth -of $102$GB/s, accessible by all its cores and also by the CPU through the PCI-Express 16x Gen 2.0 interface with a throughput of $8$GB/s. -Hence, the memory copy operations between the GPU and the CPU are about $12$ times slower than those of the Tesla GPU memory. We have performed -our simulations on a cluster of $24$ CPU cores and on a cluster of $12$ GPUs. Figure~\ref{fig:05} describes the components of the GPU cluster -of tests. +\label{ch13:sec:05} +The GPU cluster\index{GPU~cluster} of tests, that we used in this chapter, is an $20Gbps$ +Infiniband network of six machines. Each machine is a Quad-Core Xeon E5530 CPU running at +$2.4$GHz. It provides a RAM memory of $12$GB with a memory bandwidth of $25.6$GB/s and it +is equipped with two Nvidia Tesla C1060 GPUs. A Tesla GPU contains in total $240$ cores +running at $1.3$GHz. It provides $4$GB of global memory with a memory bandwidth of $102$GB/s, +accessible by all its cores and also by the CPU through the PCI-Express 16x Gen 2.0 interface +with a throughput of $8$GB/s. Hence, the memory copy operations between the GPU and the CPU +are about $12$ times slower than those of the Tesla GPU memory. We have performed our simulations +on a cluster of $24$ CPU cores and on a cluster of $12$ GPUs. Figure~\ref{ch13:fig:05} describes +the components of the GPU cluster of tests. + +Linux cluster version 2.6.39 OS is installed on CPUs. C programming language is used for +coding the parallel algorithms of the methods on both GPU cluster and CPU cluster. CUDA +version 4.0~\cite{ch13:ref12} is used for programming GPUs, using CUBLAS library~\cite{ch13:ref8} +to deal with vector operations in GPUs and, finally, MPI functions of OpenMPI 1.3.3 are +used to carry out the synchronous and asynchronous communications between CPU cores. Indeed, +in our experiments, a computing node is managed by a MPI process and it is composed of +one CPU core and one GPU card. + +All experimental results of the parallel projected Richardson algorithms are obtained +from simulations made in double precision data. The obstacle problems to be solved are +defined in constant three-dimensional domain $\Omega\subset\mathbb{R}^{3}$. The numerical +values of the parameters of the obstacle problems are: $\eta=0.2$, $c=1.1$, $f$ is computed +by formula~(\ref{ch13:eq:18}) and final time $T=0.02$. Moreover, three time steps ($NbSteps=3$) +are computed with $k=0.0066$. As the discretization matrix is constant along the time +steps, the convergence properties of the iterative algorithms do not change. Thus, the +performance characteristics obtained with three time steps will still be valid for more +time steps. The initial function $u(0,x,y,z)$ of the obstacle problem~(\ref{ch13:eq:01}) +is set to $0$, with a constraint $u\geq\phi=0$. The relaxation parameter $\gamma$ used +by the projected Richardson method is computed automatically thanks to the diagonal entries +of the discretization matrix. The formula and its proof can be found in~\cite{ch13:ref11}, +Section~2.3. The convergence tolerance threshold $\varepsilon$ is set to $1e$-$04$ and the +maximum number of relaxations is limited to $10^{6}$ relaxations. Finally, the number of +threads per block is set to $256$ threads, which gives, in general, good performances for +most GPU applications. We have performed some tests for the execution configurations and +we have noticed that the best configuration of the $256$ threads per block is an organization +into two dimensions of sizes $(64,4)$. \begin{figure} \centerline{\includegraphics[scale=0.25]{Chapters/chapter13/figures/cluster}} \caption{GPU cluster of tests composed of 12 computing nodes (six machines, each with two GPUs.} -\label{fig:05} +\label{ch13:fig:05} \end{figure} -Linux cluster version 2.6.39 OS is installed on CPUs. C programming language is used for coding the parallel algorithms of the methods on both -GPU cluster and CPU cluster. CUDA version 4.0~\cite{ref12} is used for programming GPUs, using CUBLAS library~\cite{ref8} to deal with vector -operations in GPUs and, finally, MPI functions of OpenMPI 1.3.3 are used to carry out the synchronous and asynchronous communications between -CPU cores. Indeed, in our experiments, a computing node is managed by a MPI process and it is composed of one CPU core and one GPU card. - -All experimental results of the parallel projected Richardson algorithms are obtained from simulations made in double precision data. The obstacle -problems to be solved are defined in constant three-dimensional domain $\Omega\subset\mathbb{R}^{3}$. The numerical values of the parameters of the -obstacle problems are: $\eta=0.2$, $c=1.1$, $f$ is computed by formula~(\ref{eq:18}) and final time $T=0.02$. Moreover, three time steps ($NbSteps=3$) -are computed with $k=0.0066$. As the discretization matrix is constant along the time steps, the convergence properties of the iterative algorithms -do not change. Thus, the performance characteristics obtained with three time steps will still be valid for more time steps. The initial function -$u(0,x,y,z)$ of the obstacle problem~(\ref{eq:01}) is set to $0$, with a constraint $u\geq\phi=0$. The relaxation parameter $\gamma$ used by the -projected Richardson method is computed automatically thanks to the diagonal entries of the discretization matrix. The formula and its proof can be -found in~\cite{ref11}, Section~2.3. The convergence tolerance threshold $\varepsilon$ is set to $1e$-$04$ and the maximum number of relaxations is -limited to $10^{6}$ relaxations. Finally, the number of threads per block is set to $256$ threads, which gives, in general, good performances for -most GPU applications. We have performed some tests for the execution configurations and we have noticed that the best configuration of the $256$ -threads per block is an organization into two dimensions of sizes $(64,4)$. - -\begin{table}[!h] +The performance measures that we took into account are the execution times and the number +of relaxations performed by the parallel iterative algorithms, both synchronous and asynchronous +versions, on the GPU and CPU clusters. These algorithms are used for solving nonlinear systems +derived from the discretization of obstacle problems of sizes $256^{3}$, $512^{3}$, $768^{3}$ +and $800^{3}$. In Table~\ref{ch13:tab:01} and Table~\ref{ch13:tab:02}, we show the performances +of the parallel synchronous and asynchronous algorithms of the projected Richardson method +implemented, respectively, on a cluster of $24$ CPU cores and on a cluster of $12$ GPUs. In +these tables, the execution time defines the time spent by the slowest computing node and the +number of relaxations is computed as the summation of those carried out by all computing nodes. + +In the sixth column of Table~\ref{ch13:tab:01} and in the eighth column of Table~\ref{ch13:tab:02}, +we give the gains in $\%$ obtained by using an asynchronous algorithm compared to a synchronous +one. We can notice that the asynchronous version on CPU and GPU clusters is slightly faster than +the synchronous one for both methods. Indeed, the cluster of tests is composed of local and homogeneous +nodes communicating via low-latency connections. So, in the case of distant and/or heterogeneous +nodes (or even with geographically distant clusters) the asynchronous version would be faster than +the synchronous one. However, the gains obtained on the GPU cluster are better than those obtained +on the CPU cluster. In fact, the computation times are reduced by accelerating the computations on +GPUs while the communication times still unchanged. + +\begin{table} \centering \begin{tabular}{|c|c|c|c|c|c|} \hline @@ -540,105 +697,130 @@ $800^{3}$ & $222,108.09$ & $1,769,232$ & $188,790 \end{tabular} \vspace{0.5cm} \caption{Execution times in seconds of the parallel projected Richardson method implemented on a cluster of 24 CPU cores.} -\label{tab:01} +\label{ch13:tab:01} \end{table} -\begin{table}[!h] +\begin{table} \centering \begin{tabular}{|c|c|c|c|c|c|c|c|} \hline -\multirow{2}{*}{\bf Pb. size} & \multicolumn{3}{c|}{\bf Synchronous} & \multicolumn{3}{c|}{\bf Asynchronous} & \multirow{2}{*}{\bf Gain\%} \\ \cline{2-7} +\multirow{2}{*}{\bf Pb. size} & \multicolumn{3}{c|}{\bf Synchronous} & \multicolumn{3}{c|}{\bf Asynchronous} & \multirow{2}{*}{\bf Gain\%} \\ \cline{2-7} - & $\mathbf{T_{gpu}}$ & {\bf \#relax.} & $\mathbf{\tau}$ & $\mathbf{T_{gpu}}$ & {\bf \#relax.} & $\mathbf{\tau}$ & \\ \hline \hline + & $\mathbf{T_{gpu}}$ & {\bf \#relax.} & $\mathbf{\tau}$ & $\mathbf{T_{gpu}}$ & {\bf \#relax.} & $\mathbf{\tau}$ & \\ \hline \hline -$256^{3}$ & $29.67$ & $100,692$ & $19.39$ & $18.00$ & $94,215$ & $29.96$ & $39.33$ \\ \hline \hline +$256^{3}$ & $29.67$ & $100,692$ & $19.39$ & $18.00$ & $94,215$ & $29.96$ & $39.33$ \\\hline \hline -$512^{3}$ & $521.83$ & $381,300$ & $36.89$ & $425.15$ & $347,279$ & $42.89$ & $18.53$ \\ \hline \hline +$512^{3}$ & $521.83$ & $381,300$ & $36.89$ & $425.15$ & $347,279$ & $42.89$ & $18.53$ \\\hline \hline -$768^{3}$ & $4,112.68$ & $831,144$ & $50.13$ & $3,313.87$ & $750,232$ & $55.40$ & $19.42$ \\ \hline \hline +$768^{3}$ & $4,112.68$ & $831,144$ & $50.13$ & $3,313.87$ & $750,232$ & $55.40$ & $19.42$ \\ \hline \hline -$800^{3}$ & $3,950.87$ & $899,088$ & $56.22$ & $3,636.57$ & $834,900$ & $51.91$ & $7.95$ \\ \hline +$800^{3}$ & $3,950.87$ & $899,088$ & $56.22$ & $3,636.57$ & $834,900$ & $51.91$ & $7.95$ \\ \hline \end{tabular} \vspace{0.5cm} \caption{Execution times in seconds of the parallel projected Richardson method implemented on a cluster of 12 GPUs.} -\label{tab:02} +\label{ch13:tab:02} \end{table} -The performance measures that we took into account are the execution times and the number of relaxations performed by the parallel iterative algorithms, -both synchronous and asynchronous versions, on the GPU and CPU clusters. These algorithms are used for solving nonlinear systems derived from the discretization -of obstacle problems of sizes $256^{3}$, $512^{3}$, $768^{3}$ and $800^{3}$. In Table~\ref{tab:01} and Table~\ref{tab:02}, we show the performances -of the parallel synchronous and asynchronous algorithms of the projected Richardson method implemented, respectively, on a cluster of $24$ CPU cores -and on a cluster of $12$ GPUs. In these tables, the execution time defines the time spent by the slowest computing node and the number of relaxations -is computed as the summation of those carried out by all computing nodes. - -In the sixth column of Table~\ref{tab:01} and in the eighth column of Table~\ref{tab:02}, we give the gains in $\%$ obtained by using an -asynchronous algorithm compared to a synchronous one. We can notice that the asynchronous version on CPU and GPU clusters is slightly faster -than the synchronous one for both methods. Indeed, the cluster of tests is composed of local and homogeneous nodes communicating via low-latency -connections. So, in the case of distant and/or heterogeneous nodes (or even with geographically distant clusters) the asynchronous version -would be faster than the synchronous one. However, the gains obtained on the GPU cluster are better than those obtained on the CPU cluster. -In fact, the computation times are reduced by accelerating the computations on GPUs while the communication times still unchanged. - -The fourth and seventh columns of Table~\ref{tab:02} show the relative gains obtained by executing the parallel algorithms on the cluster -of $12$ GPUs instead on the cluster of $24$ CPU cores. We compute the relative gain $\tau$ as a ratio between the execution time $T_{cpu}$ -spent on the CPU cluster over that $T_{gpu}$ spent on the GPU cluster: \[\tau=\frac{T_{cpu}}{T_{gpu}}.\] We can see from these ratios that -solving large obstacle problems is faster on the GPU cluster than on the CPU cluster. Indeed, the GPUs are more efficient than their -counterpart CPUs to execute large data-parallel operations. In addition, the projected Richardson method is implemented as a fixed point-based -iteration and uses the Jacobi vector updates that allow a well thread-parallelization on GPUs, such that each GPU thread is in charge -of one vector component at a time without being dependent on other vector components computed by other threads. Then, this allow to exploit -at best the high performance computing of the GPUs by using all the GPU resources and avoiding the idle cores. - -Finally, the number of relaxations performed by the parallel synchronous algorithm is different in the CPU and GPU versions, because the number -of computing nodes involved in the GPU cluster and in the CPU cluster is different. In the CPU case, $24$ computing nodes ($24$ CPU cores) are -considered, whereas in the GPU case, $12$ computing nodes ($12$ GPUs) are considered. As the number of relaxations depends on the domain decomposition, +The fourth and seventh columns of Table~\ref{ch13:tab:02} show the relative gains +obtained by executing the parallel algorithms on the cluster of $12$ GPUs instead +on the cluster of $24$ CPU cores. We compute the relative gain\index{Relative~gain} +$\tau$ as a ratio between the execution time $T_{cpu}$ spent on the CPU cluster over +that $T_{gpu}$ spent on the GPU cluster: \[\tau=\frac{T_{cpu}}{T_{gpu}}.\] We can see +from these ratios that solving large obstacle problems is faster on the GPU cluster +than on the CPU cluster. Indeed, the GPUs are more efficient than their counterpart +CPUs to execute large data-parallel operations. In addition, the projected Richardson +method is implemented as a fixed point-based iteration and uses the Jacobi vector updates +that allow a well thread-parallelization on GPUs, such that each GPU thread is in charge +of one vector component at a time without being dependent on other vector components +computed by other threads. Then, this allow to exploit at best the high performance +computing of the GPUs by using all the GPU resources and avoiding the idle cores. + +Finally, the number of relaxations performed by the parallel synchronous algorithm +is different in the CPU and GPU versions, because the number of computing nodes involved +in the GPU cluster and in the CPU cluster is different. In the CPU case, $24$ computing +nodes ($24$ CPU cores) are considered, whereas in the GPU case, $12$ computing nodes +($12$ GPUs) are considered. As the number of relaxations depends on the domain decomposition, consequently it also depends on the number of computing nodes. + %%--------------------------%% %% SECTION 6 %% %%--------------------------%% \section{Red-Black ordering technique} -\label{sec:06} -As is well-known, the Jacobi method is characterized by a slow convergence rate compared to some iterative methods (for example Gauss-Seidel method). -So, in this section, we present some solutions to reduce the execution time and the number of relaxations and, more specifically, to speed up the -convergence of the parallel projected Richardson method on the GPU cluster. We propose to use the point red-black ordering technique to accelerate -the convergence. This technique is often used to increase the parallelism of iterative methods for solving linear systems~\cite{ref13,ref14,ref15}. -We apply it to the projected Richardson method as a compromise between the Jacobi and Gauss-Seidel iterative methods. - -The general principle of the red-black technique is as follows. Let $t$ be the summation of the integer $x$-, $y$- and $z$-coordinates of a vector -element $u(x,y,z)$ on a three-dimensional domain: $t=x+y+z$. As is shown in Figure~\ref{fig:06.01}, the red-black ordering technique consists in the -parallel computing of the red vector elements having even value $t$ by using the values of the black ones then, the parallel computing of the black -vector elements having odd values $t$ by using the new values of the red ones. - -\begin{figure} -\centering - \mbox{\subfigure[Red-black ordering on x, y and z axises]{\includegraphics[width=2.3in]{Chapters/chapter13/figures/rouge-noir}\label{fig:06.01}}\quad - \subfigure[Red-black ordering on y axis]{\includegraphics[width=2.3in]{Chapters/chapter13/figures/rouge-noir-y}\label{fig:06.02}}} -\caption{Red-black ordering for computing the iterate vector elements in a three-dimensional space.} -\end{figure} +\label{ch13:sec:06} +As is well-known, the Jacobi method\index{Iterative~method!Jacobi} is characterized +by a slow convergence\index{Convergence} rate compared to some iterative methods\index{Iterative~method} +(for example Gauss-Seidel method\index{Iterative~method!Gauss-Seidel}). So, in this +section, we present some solutions to reduce the execution time and the number of +relaxations and, more specifically, to speed up the convergence of the parallel +projected Richardson method on the GPU cluster. We propose to use the point red-black +ordering technique\index{Iterative~method!Red-Black~ordering} to accelerate the +convergence. This technique is often used to increase the parallelism of iterative +methods for solving linear systems~\cite{ch13:ref13,ch13:ref14,ch13:ref15}. We +apply it to the projected Richardson method as a compromise between the Jacobi +and Gauss-Seidel iterative methods. + +The general principle of the red-black technique is as follows. Let $t$ be the +summation of the integer $x$-, $y$- and $z$-coordinates of a vector element $u(x,y,z)$ +on a three-dimensional domain: $t=x+y+z$. As is shown in Figure~\ref{ch13:fig:06.01}, +the red-black ordering technique consists in the parallel computing of the red +vector elements having even value $t$ by using the values of the black ones then, +the parallel computing of the black vector elements having odd values $t$ by using +the new values of the red ones. This technique can be implemented on the GPU in two different manners: \begin{itemize} \item among all launched threads ($NX\times ny$ threads), only one thread out of two computes its red or black vector element at a time or, \item all launched threads (on average half of $NX\times ny$ threads) compute the red vector elements first and, then, the black ones. \end{itemize} -However, in both solutions, for each memory transaction, only half of the memory segment addressed by a half-warp is used. So, the computation of the -red and black vector elements leads to use twice the initial number of memory transactions. Then, we apply the point red-black ordering accordingly to -the $y$-coordinate, as is shown in Figure~\ref{fig:06.02}. In this case, the vector elements having even $y$-coordinate are computed in parallel using -the values of those having odd $y$-coordinate and then vice-versa. Moreover, in the GPU implementation of the parallel projected Richardson method (Section~\ref{sec:04}), -we have shown that a sub-problem of size $(NX\times ny\times nz)$ is decomposed into $nz$ grids of size $(NX\times ny)$. Then, each kernel is executed -in parallel by $NX\times ny$ GPU threads, so that each thread is in charge of $nz$ vector elements along $z$-axis (one vector element in each grid of -the sub-problem). So, we propose to use the new values of the vector elements computed in grid $i$ to compute those of the vector elements in grid $i+1$. -Listing~\ref{list:04} describes the kernel of the matrix-vector multiplication and the kernel of the vector elements updates of the parallel projected -Richardson method using the red-black ordering technique. - -\lstinputlisting[label=list:04,caption=GPU kernels of the projected Richardson method using the red-black technique]{Chapters/chapter13/ex4.cu} - -Finally, we exploit the concurrent executions between the host functions and the GPU kernels provided by the GPU hardware and software. In fact, the kernel -launches are asynchronous (when this environment variable is not disabled on the GPUs), such that the control is returned to the host (MPI process) before -the GPU has completed the requested task (kernel)~\cite{ref12}. Therefore, all the kernels necessary to update the local vector elements, $u(x,y,z)$ where -$05ja?WHjw{~L=v9fRjy%@?yeE%GiNyi#&_WlRYdUKXn&I{yZ zUg#I5Rdew{My$!sv@fDsjAgp3qXOC>q<35>HlYwVUuo?)tHbCVaJ2bkxD zFh`XrWt+m>a`oebYNtuIm=LMK_Xw|ZkRZ|M1p=XtX4Ly;F;eRa?yiJ*jYWq34^{9# z>CF6vPf-M5H90#~xjHuG<|x8iQBQO_4P%-wXcYC@bC@fxv&uNow2r`oPGNX^FqB z(3oQK{JF&{BAL2HR#S=A!!GY9)LwU(L8_<8_L zlNo(H4DI(E@lqq%_$zp47tU*~A-AGWgn_G}EH^<&PI}Y6a#^{IxUD38K0#SO?uc-0 z{rzq)Nq#Q(n$SuT+2{W>F^e$%LWOoFOXZt49co9ce%Gap!_a#|EO%2)xt^^iaQ*^z zxH#cAXngWLo!bG$#Z{XRPa$dW_ThHLUf2HFd9w=! z$arU#cl-aozVyj-W2Ec4rjatkt#=dAmWig8yvQrX1$7-05986{yJV_0QCpWtl-M;F zTek+~N*K)k0inUsuNe{MU0}1A{a8?wFeDfD8fy2TBGMS^@!+0o-M=Nt(`$7URK#Go z%^M3~4L4>qH%krLe@maq+XRgT8%gD}9YYH`Gl{iRmCNVqu6pCj&9oe;H z37xX=j$@Ph^69`H=!}}8>Q+h8JOU;(T8uR8=Hvk_sKTq&sUd#Vf@NAjf@;=K5#O0V z-4Z494pl|Asq`RWvR;a+DPbU<9}@+}DTpYjH&kt?jTP%B zFv?;dRrso~6xwI6!{utv);GCa?7T+kd#6d_jM};{+1C4wGn~^>u^A0OIph9^X|2b( zPi^=Tp4y6+7sY9*sy`?$Sdvd$mXA5?nTW#B5^L}5r`?wo8L|g_H777I6M_eIrF(y{ zAkTa!qpjjfN0?#V8oNP9T%GT2Pa?_c*?`$Rf?|7!sC{4jrZ3bmCgF0@9d$nL<}$`s zk|kpPW4F~?P97?<13l@KG-@roo;fud9OuGCIEJD2n;0GoYeG(+H-urLkBorP`a z8n$|^RrFhIb(84s+J~TiJG+0#RR6HxLD(RWOUTb6Ng($(+5BMvqB3fgXe{PVeAFCB zaE&HI>k^m5fZ0qE=*!$(N!Nx^R0i$^bf0`}_$tTCg&@R6)T*pHqkR=iB?DI@hez6r z%KZII83oct;K2OQ5#F}Ft_x!)O$=M~0C2!)yXh5OS$q+lfeFh3!eFwI(fVyLpIA>z zNNIjHS=78t@x*pI+Cvzbv|*p?KiLBBzfx-C^dkRzkMSFC>c*1Lz1gK_*>@V)QU%it zByP+e()SS_Ggu`$6-uZFO+jKJ@oN8(mvKs(I?=XKyWuH;;65ptBM;;HvdeM3=O7?64r-xOw5{)o>YJn$m#s!y;ojnK%Vn;8vWwi!D30 z=sJbMmi+h3P6oa#^gw~AQ5Y{-`10QIJmuKQV6Xdj%Jw&>4W_ccg|NIa3-!rysy%NS zVqe0^KhG-;Yp_sk79(7#US%0oJin4mQt+U5S(ZqgbjMQofG@{fk7w!jooWDzY#nMN z6hHvG*B#glnQBrcv#=9_hjB0Ad~-c6NbqGSV+M-pYi4&ob|Jr8jA?q@ANF{@*rtC? zI}sqU8*?ZW)qtoHf0;s~1Eca{dr9rcM?GJ(+9dcHrSWy52oYs1!;yj?FP zAU|wGhvdzIS364YQt#meH#VZ*jv6vfPtt|oa=Vr@MO2F?;-hUSX!^^LIdSESOEVw+ zs;F>3M-1A^Mvmmi^z(`5qmjZVp)xvTNBt2po7B5BHQe;IBFYE%LTcZ9eU602$3)o_ zFKf^q^_r(K%V;4Fo~#pkY$n@ZbOHOk&4wf#TpSJ^uAl6dbtbm^K)N-MV(aNMgd?i? zd@Yh_AKe~#N&0c5#$3~fhv$VJra7yCeHhMx2y?f!GHr!AN zd=08qhO%0O@%iRQp3kb00TWZYpDBF(g2h%aRP|ADI=5UZSBz3WkEV$ps>;R3#vg7f z@+n~jm;Y2;`3M!oRyoWQSJR^zb=C1)nB?b|I6tr-Jlr7U)xI@%v41@Ap-{3o-4b?K zo2c%kGm}(Yf`Uc zr#08)LXV_;aGQnASgu^AlU^Eb6-tyUw%Cp33t!3K8y7o4F=K$7ufoT&rkscr06{vJ z@|a(@2%}FG$m;*OxKD2cE}22Jhqt;NgK?ZeAGeoMT1X?~T$)KN!ia zomFplx5VCTP(m2LE#&jKHb=Mn#l)zEaA9Wv{bq7w8R?j2ds7_J$pmXTfliHXcVJou)gRTF>P!~kgShpHCw?k`mqzgZX|MdHw{Z2}14(}_}9bcJ2#Pcxr_ z>Sfa)_-Kd7FhdPSA;u{BD!*&-6UNp69-nf(FW{;IDcKgcn>v+%meut~`@3}UHL~R` zfw4Evnmkrf~ha0_jO%XXWR&&=B^lYSo&ozKN!3{X~|)A*zj6Hp$8`6wO*=taZ!+ zrypWYjB5Xm^$=ZQ)j1qZO5cNi!O5EoOH@IhZ3^qFr^9*Z9`0UaJu;<)bR=u{Vc^ZN~Vph*UrP-Y8<;<1z_ZY7h5Q7-|nM$AQ|tg11ez03E% zw|VmWT8W^y`@nBDFG=7+D_z>QZs$MY(HQ!A*2yG5+#J8lKS!-hVhVJ&Pap~wXhc0V0np(TWiz3G!=fXtRHe!uqi_S=v|kg8ST5@ilt<>|O0`y?K#fwK$6&!Yb1e z1E+TS+-%YWq492Qu#e=KF0$Uk4^ek|ukWtyeBT&@hzaQV!VHnqQS;|E=OPO0x(_+# z!k)HUQ#?N6fWA4#a)|q#Ot^h@6M<6B1{%Ixf&mLn}%z$Oh z%QDT&+kCm=ZugQs|4Pj=$HZ<{$WMsSaD6IW1f4Pbi{ZnS?mt~@C*7&C9kFv1QYShe zb$vC6@!)duOK1VBV}!J5k=W~=>XmC{5FRb!a!y8S7PsG1r~kutMx&$u?eodc{zGfA z$EQyLH-P6CU@}P1DS-lu92o);NZ`i;A#wB6eZb;HekG$Sp(Ur4P=pIZF8;|m4HYUoDfRIg8>6N16_bF zww?)tR1R=HUXZA$i>ou#>?6Q4^P`TJX2N{Ki@Drb5&g&T=CF4?(+v$cVHL#o_4Bn?qE>xdrM0}JX#cV#GsLUtUOx6!U+Bu(Xx8|li+fL<v)SG-1-eGtCUN2s-t772^Q2Wpg_* zf$Qzn)e+z0x>!cz$K^fZl!q!JJ{mPEbKlbCvYk+=M0RCG{6eet?L8%TW@ct;mq?W{ zB}Mpqvc6gHUyQuC)`Dne)U}D3}>0W*#mkzP$T5ChkJ1Z-qbwl$YE4dtA+q~dQ3xx^w|~yMK9vRTif!3?px(nn7=9sb_tWmE@i?qpiN=BJ#2T_HMbK& zD)l@$drXRx2;$(!_M3qnW*xMSPh#DJthk?mr|r>tPo6{tPCO}%>VS@!jQTT?T6jas zx{;BW``&A>tLw*1z!uK|{aEuyds0dlFlZzw}6q zi7!!3HX@rhQzM4^KG0?jLTGrc~_ z8nY6JBK;Xgkp9_okh0TqqQ5WOwHUcl_~12 zc)zMla`+?a539LEMtr~D+Ctj53rJUY6BsS48X4nMG^r6`8F^5JkJre?^^_#|30$Rd zEkXq4DU(y}ZQ}uq6-!pLMEYMu*bOzb^jMpLT2lb*V(bv znqoz-BgsG8t}VF0IkxCun5oudqA+C!9KXW>%$rSr-utFBaaPRcIP4v&3NvSXh~0b3 zq>8*dU^uOI6!#;)fL+ZK+c?s~*l`^~M%z~ZD-E3^fCsOAhHpkT-v*&-PWRgnTjrUH zG*e2SOb!+rnpF2q5y_+iY_CQWX>eodf(_n&{i_ON2Ednacp}L}o2*^CL{29qaWWxU zq|Mq%%o$Uqwa&=wd3SQSabT%_Jl(YAyZP4<*D)dcSQ{euArF+tHohp>3%c-;ykR|F z0=f~|T!giXN`KS5k4**u>ix3WiHz(U)CrA*Muwc^JO_#et=w9?T5%^E0-EPCqBk2= z%H6r+lh6zocy$dPElyy2y?e=pFlP7_D#e0T;(X6|jse{GLmDT;97(~3ntyaU`1K|l zl%cLYuD>8b(=<0|S;J;loOOoK767i=Oovv{*5x)-6Q>)Qy{j)7mosomMZzLZ1Lp%7 zWQD(%&zR0oxDhoIwE%Pxm57fJ^do+A96f&TnSnCGage||LEloL9AgRVdGj6an_1S6 zFIhm1yyj9L#yE<3NfF*5>U=%Z=Y}}$+|(xF?A~wdS^D?Q!oI1r zeqjc8?$C};DIxs86swGvLrt8FubFuwt0;5KN;}hZYcY3Xn|SHLyTsr4DaVAoOm14b z#8AVHQlOZH_0thwso+c4{z3@VW)M@;DeB>Vq zzn4m?rf*eWiS+@rG(Z;9nf(TZ8G4+zwu3h(x7uMs(J$A9LgqO%rANO>zBeu`wN_3k zztr#Kgjqy$Vb>iuH>L_b-oX&sFEp1=icjhTS9gCb!Sk+pB6p2ZyNwvT4lWCI#}b`k z!HEw_9Za~tzx|p=yscF~gwncu@t!%r)5^)7C3!v|zRjT7o^d`wO%HGz$-~ip!1|G9 zm7fFl{q7#5f#w_;X-ZeH?KYb)x%Y;yPfjn$s&hmO=dI#fQ>Fs6k#v5#YlOV)?}rw_ ztN}uM8?&AVPaHPgJEq$UwwwM_KpfSo@#$h07jAG#>^0yS&>q0QebaGkBm7nqF6XVk zT3j!CWFK=qiZ2vQ2FbB~b*4WXFsg2#Wx#9WSSHL$%3jVfn8QyioBe zM~ia)k>M(H`dCzU;xj`LPA`lUp2VgQ6`^<;xNO7T)JhasbW=rtn28`JMR7ZetAcA7 zI&T)aP#ev|)07 zC+n~5N9e1x@3CA57m=^;WuY8BQE4HZY{%c~=Mt}H{#DKUE;%kKDHaBX^AS=9%2E)t z;KpP>hS(7BFP~30gYHMpJg1>@F9N9Z*IyTK=Bf&!_>MWwk?7W04Q@6{^p)&BedTPI z(ZD^hGI-|#cU|sA&b5`D3OSf;x!v;E|0ut{VPM!~koD_hiX$;wF0UJ;O(owTjHU0_ zA*`Qi?$`8C4rK?K$aW3|{G?P2`2e%xyhD-_al<(J?ZjD;Fjfh-X3sJ>0;O}%*jtE} z_i_Utu?6WOI^2t_<(>09*k?N>iPTJ>Ae7Aw)N{wkJ%zm_PF17Q5Yz=w!meR1#aT)< zIa#uDCoVZ&+Us7?r!cV`DNzuK-d63eHAt>ZU%SrcT^8Vf6{>{{Kgi0fGyX_{K`PYT zcBu|C(JA~aI$ji*^2RZRio^(oWH-{9>tX6=)0P z5YJ-usVqIRp{No}5#FAkWOl!PXlgVgoYEnmY76Gi9`efKgsZCXhF8$Gg9|lh5yqSH z_ydTkIBuqTG5;P>1%E4sUob@-QmH#Ay&O00A(nlIBm|XZLCsJtEGk%hY6_12V>uqWoM&fUr-kDa!RLpbdroyMu&9m-(#WX(j{5Jl} z#5N$u>J#MEBtiR-%EFhq1U>-ZqeH$ErD$(X^G38u59XJLhYqp}l#eaCi<34)*`-Ik zH;V_oHa82rzg7n9ygsv!FAPcjAjh(RG7})-hI8o$c+&Ni8@IuZC&d5skFa{;c>RR*~3XM4O9DbefS!mm%@T5nvEi< zIewSAy86y+GzI59llUYRZPhVw_#y(iEi9n|JWwtUGjotR2M7eUHa7>CfF?J$AU8-51WL#Qa{dQd z1xEgN&Z4*^|2rWbkoLb4*71V+5ZSo71h~1mK_Czt7l@gQi_0UNq_*s zUhen4diA~EeO2#H%^zK-PIsS~>8_gU8Rsj9WF>^rWFkW+>H4L?xj;u~@l~*6{j<1G zario@?@5cMBAemhTO1XYh})o-&DT}OJ3-uvTagy;_9b=dc&KW}bTaQOGGwPy?|C0K zBwlKmMO;r;j>i?%d!OxcKLiE;bkv5aeEH^>4;iYxCXG=Ux``53pRnVnfcJq}jb!WE zWE&9Hd87-8Bv!oIN?v6S=+P(Sc-81$^@}DJDUE2A0Pe_+DVHNhG9y9aHmEAj@GTls zBbfhN`zdyj;TtQM_Jwd`uG4d!TVucaR8`jQ)mPN=_!LJ!N|h^EvRvF%UyXIeAfE1} z!fpA<+kp-dKa%A&19jzcY`*tN(~5f@s8=9-KPDeSQ@K^_$pWsfqB2GazS;1z)ScJW z;mdw7RKgasxc8Onzqe5x`qag~-l`Tw-;>-P8U{rNgi`2Grc>JrF;MQu>1rw~wUUAr z$Qx@tzCPAZQk{^-+s^CIg(T^e2&GoAyrQ*d#hsd=L+mNfS~}Ld8Iw?)5HikQzGoi-32^%B zva{p!U;H;HJy*lbm2St3?|8nKV=k!a&jceWEL!qQUw65&+9qZ;9&^BDH9N+Jx63e! zXu~*SwH5n}m+ast9|g6Nd~_5LX+ve}+FvM}lLsl{UzVqlKc8B52twtmtLtAaB}b35 zA6oY$fyUMFt(R9fscYx7lJ{4!<)}7mXJDNU?E{etSzc*dhY*0nq0N8v2*y)u$m7 zqZ@BXv$L9z-K>xeu7tsC){AVC+jN>!Ps`$`JW7b%&><0ot<#y$um8)~PW`fqQ#=`TtG{GZhgN_yX1 zF)A{|mTtSf{TOb%YgO*yxf^C(x8WE4%r1ndMV9(oRou3h*`y2pF~V^y3LXavITr#t5`AUVgP-J!hlKja9?21z z{(uAA|MrPj4Yep>iPH3Ayb1FRFR3GpROdKv!=C1S4K>1TDb0%bByWR;qR({vSwfi> zTbbLx6zh3>JD-LjxfvToBy2Ld9WKR`OGZ-sLOkYymEnq!@7u_vk)cPfArPy-{m}Ah z$E|e|C(ea8sejpQq>Cl+=PPRWP>}=Ky5b6fCINW7I0j}Yx*yO?JNZQnd2-TDP_29- z3v2O25bD_;an&quM9N!BBpjJ4>w+HimtfQhJF~qNt*8}&GS+^Ay|J$W%Z9u9^^AM6 z4$cLKvUv&uno3?y)Y@vNaB6q6C)N&M^sh>ukMv|EAs1J@Lzjw=cFh+emiWv%ALVN1 zQNth_8ayz7C*`uI2)DRx&Lg`v@T<7Dj4TO=--IM^CJ9hUcSqZ)32<+l2K#c{56#Zk zm=FNHCXdk=pKoN#A9krXSr5=nTnI>liP@CRT()djLC#8pTQ{tlOnOC5&|h-`oACTg zY^Z1#(3_(q8BsyekX&r`FSl-?em(69zw<*cHw1f*r7b;nSBxU!JbTQP^5yCGapDo{ z_77-u^N1Zzg%FdMfT=<5y2G7PtLV-n7fHq1ruYqkq2V35@0;1;C?c(i$>pb*@OPz@ ze{IFycKN28%i!|nvDV9yX7g~}S`(X}jzPsm42k^xJp;|5&cRt;Pr=C9w>yo`akp2% zOGilXT`ei`gH=bx+Rj9 z5$C)Kuz%GT>Pa=ad+O^K1!|U8^kpl0)GWKv^gVNX@yTb}>dWKt$*r=c^W}fh_bb#a zDpohI+09gV)-O1e+}j$@;z{rpcG3SWKwGg{EwQ9q>)IxNl8+J5P$kYIt~m%s3EgoG z0Ys(w2Pr5ojX&6}kc}R0_8ut|pa8VJsAV1`cY)Jwx_v;QTiZ_&-l0?~`uiK3Z`QO@HoQII_M|m4pJa7w`;RoAf1Pjbz>XrVZ$vQcA0KYh$xDkxiF=l%oCxo;hn0}#jkqA3OxFYd7FKQ&OvR?``pzV%%iFZft z7$GVlJupOwy`H9Cd=}PJ_zT0DnYKwyK*Hoy zaS0%uap}7>89rbyy@_x`=$e>^ab^SFTLWf87>Iy!&*GF8=k377N0#-SCo+^q$9rh~ z4s*olMNXCSCi{Mu#6m4RgyN>Zqod=~(D_jbam|`S?x=%>qVvhrkqPYWP(SgaKw7lB zQOJ+rtfgNj_uT<`sQ&zdK53U!c~z0m&gL4Em4mPO(0C9&!8`Q=c$N!OWnqD!U| zsi2^nq&?gW>G-3-@F+Pu;MtnNqQPuoFajaaD)l3h+SQQGT^os1o132Em%5vbKe16*D6*PQ-C`oB zLEW4Bfse+&+KXL8xIHN?F%O)BBurSeLrmawFoe zc8j$;?X{_oe;ioz5d`sOe0x_~ZU24G47>W<9GULLAJ&Vg0q-=2foe=nOk;7ST`owA zygNiGZThalUvbm2=yIwBg?hML>%F=DdH?h7!^0dGI~RS-tNPofuIuQ_!uxS}(>1KL z8hXqsrm(qQLedf8e-X@T&P|Qw*MJ!Fz;m&gFp`7dXQdZe*sX9`J>1?e-;v2p?9q{V z5J6@Z8-wwe;k{Y=i9$&NZXAzOGKns>vm%u~ryJc~El&0pZVvDY3)}BX^D)fS>TC9H zcl9lOUDZBA6=*O1@vYX%cmR~!1YeX_2ynNb9`Lnq?NPlyR-0~6onA2b$b&dio4(a9 z&uDCNRZVbsLF_QC1PA@P$gFDLO#Ih=kgMmm%FuA`Xsfp;?%W^KrP%F8 zFYg#%J>~G6!cJjM3`TbSoPVN+ez@^z+#CqFxy(EIqX3mo?kK0FW*e7p^_c7}U+ zx?5W~;s3QHG%rA;0 zVv4|&MW!%8p1K1(03P*uo`fD{YM!aoCk2Pyq@maxDgk-pQ5g_(t95B%N2}uPBvqb?V2~IIrd&qLf3e0m3 zP370=y|7~NY2bKx?epb2L%^obgYVry;I(_prBOiR=AZiZ;Apv3{vr|jL_|Nru}{Rh zv=AaGKKQ#Y)O66oK;X53Tf2zG@k+O8pI(}egb!ZZu53VWRQfEFr@Ry@1Xd{bXO~+b znho0Ft4dBe-aHSg7625Uid|3qxuCnho3$Y(CeD2F{-M6hH&(~8l-vZJ$^>qf&v^!T zsT{QU9(kCdY&qK_uSO9ILU0RwNlE#toNFlEB~QQGBBf83o5hvh-FJ8$H%SHUoTeLJ z5ib0A71KtMX$$rFnd0B&+j|vTy2$=z`~eYJQnid1P+nkmld3~hHV~vq#aJv7z`mdq z2;mt82nU~K)Hn7Yw~Qy0Bx=`ITP@M#4-`>UUs#lttu_5MZfR2|H$EKXhusZJnwrR*Ua!EOahy(-tNLR zh1K=#qp#Md17&*W*d#cAXjpVmbHNs$hfb*c_-RcK+q=d$W(PLzS2(iOq8QMyzOPMd z#VON1ug{9JP45|ZClU7BLeuru<`ii(Jn=Qmi)O`yR9{#z?ppLh{oazj&ysl^+d}&r z{6pM#*WJpC7(SZA@mS#%VNoZHZLEWPfiO(1wdTvSaEh(yxmbxx4w`7^aiOq0dyq02R^|h5`ds)JnTuTt!>fz>$|vBu=4W zm9Ffa1}amWI-h3z0nwcB6kDBd6r?@j$$0vDp7C=<4Bgh@%|m?j71v}q&Jm8QG$cJ* z@|yE)t;U~38IllM(^dlDCJU@v>duQ-F$RGsDpA%CoDT^RWA=+lzSYY7lfiaS3& zxMV_#N5xDQg>RNd{wN=JTV2Jp@X0fD5FHVjQn;i0Zfy~$=fMQL=@VBY<`A*ii zT}8}{dyn?;_*!}pwE3>`)a@4g;gaa{Xr5UgKDaA(r!g%Ya87BIN;ClPP@2=zlN34A zvN;qRdd!Ky?UYfM>Ep7zp{v9q4{S9HpIYU^s=Iday^w7EauUc;lCO zl2i<7pE``OFAM_vbX}%|D0By5xH8-L17SApOiH`y!`i({tPZQ^8uenb=-CnEJW^~)_(09e134v$);8`|(YWqjr%|BDD$ts2bVmzXc1C5D-mh|k^3=e-!2yg!O>_ir8Y4UIGKho9ZT zV$M{~N7hzYXs>VQ?vGBhTzwC1);_pmC5{6WeN^9&8wX}RH;OLtv-eCiDNdDnItAf+ zYdcjrIh)*;H&pu*M*mxN?Ksw|#Q3H#W*$ofd!B$AR{+RG@$O)`qFQ?t)60I;@Wxq* z95L};sUrU>NmG;?>;9-crkI!DsitznfCoqcyj?r_ z`sV!iOw)nY-SjsJS+8_HQ8^@TercK$Q_P&gJH)x*-P=Pu$7dIYBB>-BC_mKBD_Ay_ey;V{m(H8c&=w`PFt|eba~gDFPzj zQGv<4{K#+&QIe0UZ#rRb0wkLUEHN3Qrf9Fm1rIbF)E7BM{>0IX|0(TExi4vH5v!vY zj+G8oQ0)=htP6Fo;-1^KHyd4^G?S$i6}lE`8lpiP&;Xu26C~0NyT>ZEfj@2efQ^`p z^C0vOZ&25&;0?E)vjblim;YF_W?;&s5qr8FVwIzU;`TOd{Wh}l<9qbW+sV4wO&pq< zIuj1K_JzhDJT68+CbdU4bmWXc?s3wUE1r3~q^l2Jwfa zn$+|0DsU`MFq5-MNU`&<)IB>?P>XURvFllV&*bW?QF@N@m>=!V5BvPp)w<+3?USg? zhe;f31&+BV*`fGOU3LRh%8dCpWc;}b1al7OpBtFa*H?;ixV{zthBwMotpKJOTqWoglg}IqMo9#XcVH7Y$*en#Oh9>2!js;3si=d)&J{NWi+k zEmnQ^bJ>PNTpb@h>(;^5sCx-S#>$|5_pG(KymMG`&^&i=vYXf~2NtT&=hC8)TdYP~ zH$z{b-T)%Ki&j)y92uBD8Vuc=CoL(`+{5e&A4FUaQMv($m%K0V*K+us@>gi09}Is1 zM%|)T{}>*BJ~(6;3?vCTHr!_rM*n{GZbMsd8ucn*1^%b)?Wq9HR@pVeGH;yf%(W@+ zo@XWP(Ixn-5vxm)!)KPo+mPy1UqzApHrrwaQx_~XQCsYUM;qOKSmOP1jZn34r=*e= z(+E0K`zUS*D~mYW&U85@<}MjBlKl4B^cTWibxku}+)0*2gW^~c3n=sP%Q~1E?2+qx z@mp52KdW!-2IWy+=Y2mAz-nr-suf%N7RtTx9GtPP8$@h@Twk+>6vF3FHza1svqSGq zJbIq$2yH2(9z><{!uo{@s=lseyGRQ4e73H=hNt8UDXtXm8J7OcaPRgx zN!|R@Jjr?m^$_Um z)}`|tC;D3d)rX_+d7Kj0{PG_2Qahg?U5S{*u52yR^{zKu$5r&9VsFLZnj}WCCUbj* zWQ^A+z^{S+ejf=$=qG2KL@xht4Uc@BB78yO6&r=Y+PQftc{#M?9*U$G8pDDYAX84I znLBj~%seNAzkOP_2n2n_2*1w?JLu8!72Bmw9nI}X^-Y9qa;q623$upqPTKGqAwDK5(mdeXE8mIv zMOg@*`_pE_@t$wlGqD{*0B-O*?J>gigCY4dH+b2uU69v^nI-Okb&iAOgSuP`>SNpV zA)oeI+w>ix+lwmnW3pMcE2C0Ti*=`Z{<4JAdVPkPD+N*YO)6|8{J5SDk4DVy-dglZ z-5~pEf58iEpBHFnIsgn0psF&9QgX+59+yJ3xmD~0k#d!c$R}+xNGC3$t{4)=9Um(F zKD4L;5mW(DOFc5NnCz5pPcf9-Os$mmk%#_<$+oC(esD-Vr>G}q6r29Zl99!S)pw zTbo6A`_|B0QF0$_y}(Tmn<*~%Lpr98=In`gg^=FD@*4{rQ*&gr-2$6?7|lHYmIHrW z-~<2WJN8X-+4peAHN0o|9G<+J$_0(|6{1{Bbz$^5dM-0Qd{70~M< zXXC%%ax=5ou+|xK8hki8X_>j!mkPW69i-%WLhw_9^WLeAG>E^8C>KAc9l9v+XAp5W z<&BaHAX`kB)*8C{Ls(ir%d6LtRoo_aiLzs$r%WK~csXc%>ab%cYv5nn_xE4PK6x~s zG4N)RiLu}#1w{@iHUC=;-RI>DLc9EQx7+TskCU%|xL5hKfnlPC6ifg;M&;%gQg#Xl zlI&lXNKJ!J(LtQ|mvuPS_-2+ta`uS8HyfEg&+H)DI?T7w1llMOyRg4Qcg$FTzCHkjRoSM5@A zgju(OIw1bjoIgC8tF1eIko2nL^-dgTFeV`8e6{mpgxGFz`ImRC;gaT>o*QBiElC)$ z&2x~K%==7fvc|bBxLHrt4p5W)XPE>RKSKY36=p?~ugga`@#i)a2n#Q#k!@?YTp#2~_={~k~n z`tJu45&fqx2=w=zTZJWL