title = {The {U}niversity of {F}lorida Sparse Matrix Collection},
year = {1997},
note = {Digest, \url{http://www.cise.ufl.edu/research/sparse/matrices/}},
- }
\ No newline at end of file
+ }
+
+
+@article{Saad:1993,
+ author = {Saad, Youcef},
+ title = {A Flexible Inner-outer Preconditioned GMRES Algorithm},
+ journal = {SIAM J. Sci. Comput.},
+ issue_date = {March 1993},
+ volume = {14},
+ number = {2},
+ month = mar,
+ year = {1993},
+ issn = {1064-8275},
+ pages = {461--469},
+ numpages = {9},
+ url = {http://dx.doi.org/10.1137/0914028},
+ doi = {10.1137/0914028},
+ acmid = {160089},
+ publisher = {Society for Industrial and Applied Mathematics},
+ address = {Philadelphia, PA, USA},
+ keywords = {GMRES, Krylov subspace methods, non-Hermitian systems, preconditioned conjugate gradient, variable preconditioners},
+}
+
+@incollection{bahicontascoutu,
+title = {{P}arallel iterative algorithms: from sequential to grid computing},
+author = {Bahi, J.M. and Contassot-Vivier, S. and Couturier, R.},
+booktitle = {Numerical Analysis and Scientific Computing},
+publisher = {Chapman \& Hall/CRC},
+year = {2008},
+}
+
+@Article{Nichols:1973:CTS,
+ author = "Nancy K. Nichols",
+ title = "On the Convergence of Two-Stage Iterative Processes
+ for Solving Linear Equations",
+ journal = "SIAM Journal on Numerical Analysis",
+ volume = "10",
+ number = "3",
+ pages = "460--469",
+ month = jun,
+ year = "1973",
+ CODEN = "SJNAAM",
+ ISSN = "0036-1429 (print), 1095-7170 (electronic)",
+ issn-l = "0036-1429",
+ bibdate = "Fri Oct 16 06:57:22 MDT 1998",
+ bibsource = "http://www.math.utah.edu/pub/tex/bib/siamjnumeranal.bib;
+ JSTOR database",
+ acknowledgement = "Nelson H. F. Beebe, University of Utah, Department
+ of Mathematics, 110 LCB, 155 S 1400 E RM 233, Salt Lake
+ City, UT 84112-0090, USA, Tel: +1 801 581 5254, FAX: +1
+ 801 581 4148, e-mail: \path|beebe@math.utah.edu|,
+ \path|beebe@acm.org|, \path|beebe@computer.org|
+ (Internet), URL:
+ \path|http://www.math.utah.edu/~beebe/|",
+ fjournal = "SIAM Journal on Numerical Analysis",
+ journal-url = "http://epubs.siam.org/sinum",
+}
\ No newline at end of file
\end{abstract}
\begin{IEEEkeywords}
-Iterative Krylov methods; sparse linear systems; residual minimization; PETSc; %à voir...
+Iterative Krylov methods; sparse linear systems; two stage iteration; least-squares residual minimization; PETSc
\end{IEEEkeywords}
% You must have at least 2 lines in the paragraph with the drop letter
% (should never be an issue)
-Iterative methods have recently become more attractive than direct ones to solve very large
-sparse linear systems. They are more efficient in a parallel
-context, supporting thousands of cores, and they require less memory and arithmetic
-operations than direct methods. This is why new iterative methods are frequently
-proposed or adapted by researchers, and the increasing need to solve very large sparse
-linear systems has triggered the development of such efficient iterative techniques
-suitable for parallel processing.
-
-Most of the successful iterative methods currently available are based on so-called ``Krylov
-subspaces''. They consist in forming a basis of successive matrix
-powers multiplied by an initial vector, which can be for instance the residual. These methods use vectors orthogonality of the Krylov subspace basis in order to solve linear
-systems. The most known iterative Krylov subspace methods are conjugate
-gradient and GMRES ones (Generalized Minimal RESidual).
-
-
-However, iterative methods suffer from scalability problems on parallel
-computing platforms with many processors, due to their need of reduction
-operations, and to collective communications to achieve matrix-vector
+Iterative methods have recently become more attractive than direct ones to solve
+very large sparse linear systems\cite{Saad2003}. They are more efficient in a
+parallel context, supporting thousands of cores, and they require less memory
+and arithmetic operations than direct methods~\cite{bahicontascoutu}. This is
+why new iterative methods are frequently proposed or adapted by researchers, and
+the increasing need to solve very large sparse linear systems has triggered the
+development of such efficient iterative techniques suitable for parallel
+processing.
+
+Most of the successful iterative methods currently available are based on
+so-called ``Krylov subspaces''. They consist in forming a basis of successive
+matrix powers multiplied by an initial vector, which can be for instance the
+residual. These methods use vectors orthogonality of the Krylov subspace basis
+in order to solve linear systems. The most known iterative Krylov subspace
+methods are conjugate gradient and GMRES ones (Generalized Minimal RESidual).
+
+
+However, iterative methods suffer from scalability problems on parallel
+computing platforms with many processors, due to their need of reduction
+operations, and to collective communications to achieve matrix-vector
multiplications. The communications on large clusters with thousands of cores
-and large sizes of messages can significantly affect the performances of these
-iterative methods. As a consequence, Krylov subspace iteration methods are often used
-with preconditioners in practice, to increase their convergence and accelerate their
-performances. However, most of the good preconditioners are not scalable on
-large clusters.
-
-In this research work, a two-stage algorithm based on two nested iterations
-called inner-outer iterations is proposed. This algorithm consists in solving the sparse
-linear system iteratively with a small number of inner iterations, and restarting
-the outer step with a new solution minimizing some error functions over some
-previous residuals. This algorithm is iterative and easy to parallelize on large
-clusters. Furthermore, the minimization technique improves its convergence and
-performances.
+and large sizes of messages can significantly affect the performances of these
+iterative methods. As a consequence, Krylov subspace iteration methods are often
+used with preconditioners in practice, to increase their convergence and
+accelerate their performances. However, most of the good preconditioners are
+not scalable on large clusters.
+
+In this research work, a two-stage algorithm based on two nested iterations
+called inner-outer iterations is proposed. This algorithm consists in solving
+the sparse linear system iteratively with a small number of inner iterations,
+and restarting the outer step with a new solution minimizing some error
+functions over some previous residuals. For further information on two-stage
+iteration methods, interested readers are invited to
+consult~\cite{Nichols:1973:CTS}. Two-stage algorithms are easy to parallelize on
+large clusters. Furthermore, the least-squares minimization technique improves
+its convergence and performances.
The present article is organized as follows. Related works are presented in
Section~\ref{sec:02}. Section~\ref{sec:03} details the two-stage algorithm using
In Table~\ref{tab:02}, some experiments comparing the solving of the linear
systems obtained with the previous matrices with a GMRES variant and with out 2
stage algorithm are given. In the second column, it can be noticed that either
-gmres or fgmres is used to solve the linear system. According to the matrices,
-different preconditioner is used. With TSIRM, the same solver and the same
-preconditionner are used. This Table shows that TSIRM can drastically reduce the
-number of iterations to reach the convergence when the number of iterations for
-the normal GMRES is more or less greater than 500. In fact this also depends on
-tow parameters: the number of iterations to stop GMRES and the number of
-iterations to perform the minimization.
+GRMES or FGMRES (Flexible GMRES)~\cite{Saad:1993} is used to solve the linear
+system. According to the matrices, different preconditioner is used. With
+TSIRM, the same solver and the same preconditionner are used. This Table shows
+that TSIRM can drastically reduce the number of iterations to reach the
+convergence when the number of iterations for the normal GMRES is more or less
+greater than 500. In fact this also depends on tow parameters: the number of
+iterations to stop GMRES and the number of iterations to perform the
+minimization.
\begin{table}[htbp]