From: raphael couturier Date: Fri, 10 Oct 2014 20:02:48 +0000 (+0200) Subject: update X-Git-Url: https://bilbo.iut-bm.univ-fcomte.fr/and/gitweb/GMRES2stage.git/commitdiff_plain/5890d7827b348b48bcfa9f454f2a10f591deef14?ds=sidebyside update --- diff --git a/biblio.bib b/biblio.bib index ec1c7e7..da3ffc3 100644 --- a/biblio.bib +++ b/biblio.bib @@ -69,4 +69,60 @@ title = {The {U}niversity of {F}lorida Sparse Matrix Collection}, year = {1997}, note = {Digest, \url{http://www.cise.ufl.edu/research/sparse/matrices/}}, - } \ No newline at end of file + } + + +@article{Saad:1993, + author = {Saad, Youcef}, + title = {A Flexible Inner-outer Preconditioned GMRES Algorithm}, + journal = {SIAM J. Sci. Comput.}, + issue_date = {March 1993}, + volume = {14}, + number = {2}, + month = mar, + year = {1993}, + issn = {1064-8275}, + pages = {461--469}, + numpages = {9}, + url = {http://dx.doi.org/10.1137/0914028}, + doi = {10.1137/0914028}, + acmid = {160089}, + publisher = {Society for Industrial and Applied Mathematics}, + address = {Philadelphia, PA, USA}, + keywords = {GMRES, Krylov subspace methods, non-Hermitian systems, preconditioned conjugate gradient, variable preconditioners}, +} + +@incollection{bahicontascoutu, +title = {{P}arallel iterative algorithms: from sequential to grid computing}, +author = {Bahi, J.M. and Contassot-Vivier, S. and Couturier, R.}, +booktitle = {Numerical Analysis and Scientific Computing}, +publisher = {Chapman \& Hall/CRC}, +year = {2008}, +} + +@Article{Nichols:1973:CTS, + author = "Nancy K. Nichols", + title = "On the Convergence of Two-Stage Iterative Processes + for Solving Linear Equations", + journal = "SIAM Journal on Numerical Analysis", + volume = "10", + number = "3", + pages = "460--469", + month = jun, + year = "1973", + CODEN = "SJNAAM", + ISSN = "0036-1429 (print), 1095-7170 (electronic)", + issn-l = "0036-1429", + bibdate = "Fri Oct 16 06:57:22 MDT 1998", + bibsource = "http://www.math.utah.edu/pub/tex/bib/siamjnumeranal.bib; + JSTOR database", + acknowledgement = "Nelson H. F. Beebe, University of Utah, Department + of Mathematics, 110 LCB, 155 S 1400 E RM 233, Salt Lake + City, UT 84112-0090, USA, Tel: +1 801 581 5254, FAX: +1 + 801 581 4148, e-mail: \path|beebe@math.utah.edu|, + \path|beebe@acm.org|, \path|beebe@computer.org| + (Internet), URL: + \path|http://www.math.utah.edu/~beebe/|", + fjournal = "SIAM Journal on Numerical Analysis", + journal-url = "http://epubs.siam.org/sinum", +} \ No newline at end of file diff --git a/paper.tex b/paper.tex index 25f1393..126ff34 100644 --- a/paper.tex +++ b/paper.tex @@ -439,7 +439,7 @@ GMRES. \end{abstract} \begin{IEEEkeywords} -Iterative Krylov methods; sparse linear systems; residual minimization; PETSc; %à voir... +Iterative Krylov methods; sparse linear systems; two stage iteration; least-squares residual minimization; PETSc \end{IEEEkeywords} @@ -547,38 +547,42 @@ Iterative Krylov methods; sparse linear systems; residual minimization; PETSc; % % You must have at least 2 lines in the paragraph with the drop letter % (should never be an issue) -Iterative methods have recently become more attractive than direct ones to solve very large -sparse linear systems. They are more efficient in a parallel -context, supporting thousands of cores, and they require less memory and arithmetic -operations than direct methods. This is why new iterative methods are frequently -proposed or adapted by researchers, and the increasing need to solve very large sparse -linear systems has triggered the development of such efficient iterative techniques -suitable for parallel processing. - -Most of the successful iterative methods currently available are based on so-called ``Krylov -subspaces''. They consist in forming a basis of successive matrix -powers multiplied by an initial vector, which can be for instance the residual. These methods use vectors orthogonality of the Krylov subspace basis in order to solve linear -systems. The most known iterative Krylov subspace methods are conjugate -gradient and GMRES ones (Generalized Minimal RESidual). - - -However, iterative methods suffer from scalability problems on parallel -computing platforms with many processors, due to their need of reduction -operations, and to collective communications to achieve matrix-vector +Iterative methods have recently become more attractive than direct ones to solve +very large sparse linear systems\cite{Saad2003}. They are more efficient in a +parallel context, supporting thousands of cores, and they require less memory +and arithmetic operations than direct methods~\cite{bahicontascoutu}. This is +why new iterative methods are frequently proposed or adapted by researchers, and +the increasing need to solve very large sparse linear systems has triggered the +development of such efficient iterative techniques suitable for parallel +processing. + +Most of the successful iterative methods currently available are based on +so-called ``Krylov subspaces''. They consist in forming a basis of successive +matrix powers multiplied by an initial vector, which can be for instance the +residual. These methods use vectors orthogonality of the Krylov subspace basis +in order to solve linear systems. The most known iterative Krylov subspace +methods are conjugate gradient and GMRES ones (Generalized Minimal RESidual). + + +However, iterative methods suffer from scalability problems on parallel +computing platforms with many processors, due to their need of reduction +operations, and to collective communications to achieve matrix-vector multiplications. The communications on large clusters with thousands of cores -and large sizes of messages can significantly affect the performances of these -iterative methods. As a consequence, Krylov subspace iteration methods are often used -with preconditioners in practice, to increase their convergence and accelerate their -performances. However, most of the good preconditioners are not scalable on -large clusters. - -In this research work, a two-stage algorithm based on two nested iterations -called inner-outer iterations is proposed. This algorithm consists in solving the sparse -linear system iteratively with a small number of inner iterations, and restarting -the outer step with a new solution minimizing some error functions over some -previous residuals. This algorithm is iterative and easy to parallelize on large -clusters. Furthermore, the minimization technique improves its convergence and -performances. +and large sizes of messages can significantly affect the performances of these +iterative methods. As a consequence, Krylov subspace iteration methods are often +used with preconditioners in practice, to increase their convergence and +accelerate their performances. However, most of the good preconditioners are +not scalable on large clusters. + +In this research work, a two-stage algorithm based on two nested iterations +called inner-outer iterations is proposed. This algorithm consists in solving +the sparse linear system iteratively with a small number of inner iterations, +and restarting the outer step with a new solution minimizing some error +functions over some previous residuals. For further information on two-stage +iteration methods, interested readers are invited to +consult~\cite{Nichols:1973:CTS}. Two-stage algorithms are easy to parallelize on +large clusters. Furthermore, the least-squares minimization technique improves +its convergence and performances. The present article is organized as follows. Related works are presented in Section~\ref{sec:02}. Section~\ref{sec:03} details the two-stage algorithm using @@ -838,13 +842,14 @@ Core(TM) i7-3630QM CPU @ 2.40GHz with the version 3.5.1 of PETSc. In Table~\ref{tab:02}, some experiments comparing the solving of the linear systems obtained with the previous matrices with a GMRES variant and with out 2 stage algorithm are given. In the second column, it can be noticed that either -gmres or fgmres is used to solve the linear system. According to the matrices, -different preconditioner is used. With TSIRM, the same solver and the same -preconditionner are used. This Table shows that TSIRM can drastically reduce the -number of iterations to reach the convergence when the number of iterations for -the normal GMRES is more or less greater than 500. In fact this also depends on -tow parameters: the number of iterations to stop GMRES and the number of -iterations to perform the minimization. +GRMES or FGMRES (Flexible GMRES)~\cite{Saad:1993} is used to solve the linear +system. According to the matrices, different preconditioner is used. With +TSIRM, the same solver and the same preconditionner are used. This Table shows +that TSIRM can drastically reduce the number of iterations to reach the +convergence when the number of iterations for the normal GMRES is more or less +greater than 500. In fact this also depends on tow parameters: the number of +iterations to stop GMRES and the number of iterations to perform the +minimization. \begin{table}[htbp]