% use a multiple column layout for up to two different
% affiliations
-\author{\IEEEauthorblockN{Rapha\"el Couturier\IEEEauthorrefmark{1}, Lilia Ziane Khodja \IEEEauthorrefmark{2}, and Christophe Guyeux\IEEEauthorrefmark{1}}
+\author{\IEEEauthorblockN{Rapha\"el Couturier\IEEEauthorrefmark{1}, Lilia Ziane Khodja\IEEEauthorrefmark{2}, and Christophe Guyeux\IEEEauthorrefmark{1}}
\IEEEauthorblockA{\IEEEauthorrefmark{1} Femto-ST Institute, University of Franche Comte, France\\
Email: \{raphael.couturier,christophe.guyeux\}@univ-fcomte.fr}
\IEEEauthorblockA{\IEEEauthorrefmark{2} INRIA Bordeaux Sud-Ouest, France\\
\begin{algorithmic}[1]
\Input $A$ (sparse matrix), $b$ (right-hand side)
\Output $x$ (solution vector)\vspace{0.2cm}
- \State Set the initial guess $x^0$
+ \State Set the initial guess $x_0$
\For {$k=1,2,3,\ldots$ until convergence (error$<\epsilon_{tsirm}$)} \label{algo:conv}
- \State $x^k=Solve(A,b,x^{k-1},max\_iter_{kryl})$ \label{algo:solve}
+ \State $x_k=Solve(A,b,x_{k-1},max\_iter_{kryl})$ \label{algo:solve}
\State retrieve error
- \State $S_{k \mod s}=x^k$ \label{algo:store}
+ \State $S_{k \mod s}=x_k$ \label{algo:store}
\If {$k \mod s=0$ {\bf and} error$>\epsilon_{kryl}$}
\State $R=AS$ \Comment{compute dense matrix} \label{algo:matrix_mul}
- \State $\alpha=Solve\_Least\_Squares(R,b,max\_iter_{ls})$ \label{algo:}
- \State $x^k=S\alpha$ \Comment{compute new solution}
+ \State $\alpha=Least\_Squares(R,b,max\_iter_{ls})$ \label{algo:}
+ \State $x_k=S\alpha$ \Comment{compute new solution}
\EndIf
\EndFor
\end{algorithmic}
equals to the restart number of the GMRES-like method. Moreover, a tolerance
threshold must be specified for the solver. In practice, this threshold must be
much smaller than the convergence threshold of the TSIRM algorithm (\emph{i.e.}
-$\epsilon_{tsirm}$). Line~\ref{algo:store}, $S_{k~ mod~ s}=x^k$ consists in copying the
-solution $x_k$ into the column $k~ mod~ s$ of the matrix $S$. After the
+$\epsilon_{tsirm}$). Line~\ref{algo:store}, $S_{k \mod s}=x^k$ consists in copying the
+solution $x_k$ into the column $k \mod s$ of the matrix $S$, where $S$ is a matrix of size $n\times s$ whose column vector $i$ is denoted by $S_i$. After the
minimization, the matrix $S$ is reused with the new values of the residuals. To
solve the minimization problem, an iterative method is used. Two parameters are
required for that: the maximum number of iterations and the threshold to stop the
\end{itemize}
-The parallelisation of TSIRM relies on the parallelization of all its
+The parallelization of TSIRM relies on the parallelization of all its
parts. More precisely, except the least-squares step, all the other parts are
obvious to achieve out in parallel. In order to develop a parallel version of
our code, we have chosen to use PETSc~\cite{petsc-web-page}. For
line~\ref{algo:matrix_mul} the matrix-matrix multiplication is implemented and
efficient since the matrix $A$ is sparse and since the matrix $S$ contains few
-colums in practice. As explained previously, at least two methods seem to be
+columns in practice. As explained previously, at least two methods seem to be
interesting to solve the least-squares minimization, CGLS and LSQR.
In the following we remind the CGLS algorithm. The LSQR method follows more or
\label{sec:04}
Let us recall the following result, see~\cite{Saad86}.
\begin{proposition}
+\label{prop:saad}
Suppose that $A$ is a positive real matrix with symmetric part $M$. Then the residual norm provided at the $m$-th step of GMRES satisfies:
\begin{equation}
||r_m|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{m}{2}} ||r_0|| ,
\end{equation}
-where $\alpha = \lambda_min(M)^2$ and $\beta = \lambda_max(A^T A)$, which proves
+where $\alpha = \lambda_{min}(M)^2$ and $\beta = \lambda_{max}(A^T A)$, which proves
the convergence of GMRES($m$) for all $m$ under that assumption regarding $A$.
\end{proposition}
+We can now claim that,
+\begin{proposition}
+If $A$ is a positive real matrix and GMRES($m$) is used as solver, then the TSIRM algorithm is convergent. Furthermore,
+let $r_k$ be the
+$k$-th residue of TSIRM, then
+we still have:
+\begin{equation}
+||r_k|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{km}{2}} ||r_0|| ,
+\end{equation}
+where $\alpha$ and $\beta$ are defined as in Proposition~\ref{prop:saad}.
+\end{proposition}
+
+\begin{proof}
+We will prove by a mathematical induction that, for each $k \in \mathbb{N}^\ast$,
+$||r_k|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{mk}{2}} ||r_0||.$
+
+The base case is obvious, as for $k=1$, the TSIRM algorithm simply consists in applying GMRES($m$) once, leading to a new residual $r_1$ which follows the inductive hypothesis due to Proposition~\ref{prop:saad}.
+
+Suppose now that the claim holds for all $m=1, 2, \hdots, k-1$, that is, $\forall m \in \{1,2,\hdots, k-1\}$, $||r_m|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{km}{2}} ||r_0||$.
+We will show that the statement holds too for $r_k$. Two situations can occur:
+\begin{itemize}
+\item If $k \mod m \neq 0$, then the TSIRM algorithm consists in executing GMRES once. In that case, we obtain $||r_k|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{m}{2}} ||r_{k-1}||\leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{km}{2}} ||r_0||$ by the inductive hypothesis.
+\item Else, the TSIRM algorithm consists in two stages: a first GMRES($m$) execution leads to a temporary $x_k$ whose residue satisfies $||r_k|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{m}{2}} ||r_{k-1}||\leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{km}{2}} ||r_0||$, and a least squares resolution.
+Let $\operatorname{span}(S) = \left \{ {\sum_{i=1}^k \lambda_i v_i \Big| k \in \mathbb{N}, v_i \in S, \lambda _i \in \mathbb{R}} \right \}$ be the linear span of a set of real vectors $S$. So,\\
+$\min_{\alpha \in \mathbb{R}^s} ||b-R\alpha ||_2 = \min_{\alpha \in \mathbb{R}^s} ||b-AS\alpha ||_2$
+
+$\begin{array}{ll}
+& = \min_{x \in span\left(S_{k-s}, S_{k-s+1}, \hdots, S_{k-1} \right)} ||b-AS\alpha ||_2\\
+& = \min_{x \in span\left(x_{k-s}, x_{k-s}+1, \hdots, x_{k-1} \right)} ||b-AS\alpha ||_2\\
+& \leqslant \min_{x \in span\left( x_{k-1} \right)} ||b-Ax ||_2\\
+& \leqslant \min_{\lambda \in \mathbb{R}} ||b-\lambda Ax_{k-1} ||_2\\
+& \leqslant ||b-Ax_{k-1}||_2 .
+\end{array}$
+\end{itemize}
+\end{proof}
+
+We can remark that, at each iterate, the residue of the TSIRM algorithm is lower
+than the one of the GMRES method.
%%%*********************************************************
%%%*********************************************************
\hline
\end{tabular}
-\caption{Comparison of (F)GMRES and 2 stage (F)GMRES algorithms in sequential with some matrices, time is expressed in seconds.}
+\caption{Comparison of (F)GMRES and TSIRM with (F)GMRES in sequential with some matrices, time is expressed in seconds.}
\label{tab:02}
\end{center}
\end{table}
\begin{itemize}
\item ex15 is an example which solves in parallel an operator using a finite
difference scheme. The diagonal is equal to 4 and 4 extra-diagonals
- representing the neighbors in each directions is equal to -1. This example is
+ representing the neighbors in each directions are equal to -1. This example is
used in many physical phenomena, for example, heat and fluid flow, wave
- propagation...
+ propagation, etc.
\item ex54 is another example based on 2D problem discretized with quadrilateral
finite elements. For this example, the user can define the scaling of material
- coefficient in embedded circle, it is called $\alpha$.
+ coefficient in embedded circle called $\alpha$.
\end{itemize}
-For more technical details on these applications, interested reader are invited
-to read the codes available in the PETSc sources. Those problem have been
-chosen because they are scalable with many cores. We have tested other problem
-but they are not scalable with many cores.
+For more technical details on these applications, interested readers are invited
+to read the codes available in the PETSc sources. Those problems have been
+chosen because they are scalable with many cores which is not the case of other problems that we have tested.
In the following larger experiments are described on two large scale architectures: Curie and Juqeen... {\bf description...}\\
-{\bf Description of preconditioners}
+{\bf Description of preconditioners}\\
\begin{table*}[htbp]
\begin{center}
\hline
\end{tabular}
-\caption{Comparison of FGMRES and TSIRM with FGMRES for example ex15 of PETSc with two preconditioner (mg and sor) with 25,000 components per core on Juqueen (threshold 1e-3, restart=30, s=12), time is expressed in seconds.}
+\caption{Comparison of FGMRES and TSIRM with FGMRES for example ex15 of PETSc with two preconditioners (mg and sor) with 25,000 components per core on Juqueen (threshold 1e-3, restart=30, s=12), time is expressed in seconds.}
\label{tab:03}
\end{center}
\end{table*}
Table~\ref{tab:03} shows the execution times and the number of iterations of
-example ex15 of PETSc on the Juqueen architecture. Differents number of cores
-are studied rangin from 2,048 upto 16,383. Two preconditioners have been
-tested. For those experiments, the number of components (or unknown of the
-problems) per processor is fixed to 25,000, also called weak scaling. This
+example ex15 of PETSc on the Juqueen architecture. Different numbers of cores
+are studied ranging from 2,048 up-to 16,383. Two preconditioners have been
+tested: {\it mg} and {\it sor}. For those experiments, the number of components (or unknowns of the
+problems) per core is fixed to 25,000, also called weak scaling. This
number can seem relatively small. In fact, for some applications that need a lot
of memory, the number of components per processor requires sometimes to be
small.
-In this Table, we can notice that TSIRM is always faster than FGMRES. The last
+In Table~\ref{tab:03}, we can notice that TSIRM is always faster than FGMRES. The last
column shows the ratio between FGMRES and the best version of TSIRM according to
the minimization procedure: CGLS or LSQR. Even if we have computed the worst
-case between CGLS and LSQR, it is clear that TSIRM is alsways faster than
-FGMRES. For this example, the multigrid preconditionner is faster than SOR. The
+case between CGLS and LSQR, it is clear that TSIRM is always faster than
+FGMRES. For this example, the multigrid preconditioner is faster than SOR. The
gain between TSIRM and FGMRES is more or less similar for the two
preconditioners. Looking at the number of iterations to reach the convergence,
it is obvious that TSIRM allows the reduction of the number of iterations. It
In Figure~\ref{fig:01}, the number of iterations per second corresponding to
-Table~\ref{tab:01} is displayed. It can be noticed that the number of
-iterations per second of FMGRES is constant whereas it decrease with TSIRM with
-both preconditioner. This can be explained by the fact that when the number of
-core increases the time for the minimization step also increases but, generally,
+Table~\ref{tab:03} is displayed. It can be noticed that the number of
+iterations per second of FMGRES is constant whereas it decreases with TSIRM with
+both preconditioners. This can be explained by the fact that when the number of
+cores increases the time for the least-squares minimization step also increases but, generally,
when the number of cores increases, the number of iterations to reach the
threshold also increases, and, in that case, TSIRM is more efficient to reduce
the number of iterations. So, the overall benefit of using TSIRM is interesting.
\begin{tabular}{|r|r|r|r|r|r|r|r|r|}
\hline
- nb. cores & threshold & \multicolumn{2}{c|}{GMRES} & \multicolumn{2}{c|}{TSIRM CGLS} & \multicolumn{2}{c|}{TSIRM LSQR} & best gain \\
+ nb. cores & threshold & \multicolumn{2}{c|}{FGMRES} & \multicolumn{2}{c|}{TSIRM CGLS} & \multicolumn{2}{c|}{TSIRM LSQR} & best gain \\
\cline{3-8}
& & Time & \# Iter. & Time & \# Iter. & Time & \# Iter. & \\\hline \hline
2,048 & 8e-5 & 108.88 & 16,560 & 23.06 & 3,630 & 22.79 & 3,630 & 4.77 \\
\hline
\end{tabular}
-\caption{Comparison of FGMRES and 2 stage FGMRES algorithms for ex54 of Petsc (both with the MG preconditioner) with 25000 components per core on Curie (restart=30, s=12), time is expressed in seconds.}
+\caption{Comparison of FGMRES and TSIRM with FGMRES algorithms for ex54 of Petsc (both with the MG preconditioner) with 25,000 components per core on Curie (restart=30, s=12), time is expressed in seconds.}
\label{tab:04}
\end{center}
\end{table*}
\begin{tabular}{|r|r|r|r|r|r|r|r|r|r|r|}
\hline
- nb. cores & \multicolumn{2}{c|}{GMRES} & \multicolumn{2}{c|}{TSIRM CGLS} & \multicolumn{2}{c|}{TSIRM LSQR} & best gain & \multicolumn{3}{c|}{efficiency} \\
+ nb. cores & \multicolumn{2}{c|}{FGMRES} & \multicolumn{2}{c|}{TSIRM CGLS} & \multicolumn{2}{c|}{TSIRM LSQR} & best gain & \multicolumn{3}{c|}{efficiency} \\
\cline{2-7} \cline{9-11}
- & Time & \# Iter. & Time & \# Iter. & Time & \# Iter. & & GMRES & TS CGLS & TS LSQR\\\hline \hline
+ & Time & \# Iter. & Time & \# Iter. & Time & \# Iter. & & FGMRES & TS CGLS & TS LSQR\\\hline \hline
512 & 3,969.69 & 33,120 & 709.57 & 5,790 & 622.76 & 5,070 & 6.37 & 1 & 1 & 1 \\
1024 & 1,530.06 & 25,860 & 290.95 & 4,830 & 307.71 & 5,070 & 5.25 & 1.30 & 1.21 & 1.01 \\
2048 & 919.62 & 31,470 & 237.52 & 8,040 & 194.22 & 6,510 & 4.73 & 1.08 & .75 & .80\\
\hline
\end{tabular}
-\caption{Comparison of FGMRES and 2 stage FGMRES algorithms for ex54 of Petsc (both with the MG preconditioner) with 204,919,225 components on Curie with different number of cores (restart=30, s=12, threshol 5e-5), time is expressed in seconds.}
+\caption{Comparison of FGMRES and TSIRM with FGMRES for ex54 of Petsc (both with the MG preconditioner) with 204,919,225 components on Curie with different number of cores (restart=30, s=12, threshold 5e-5), time is expressed in seconds.}
\label{tab:05}
\end{center}
\end{table*}
future plan : \\
- study other kinds of matrices, problems, inner solvers\\
-- test the influence of all the parameters\\
+- test the influence of all parameters\\
- adaptative number of outer iterations to minimize\\
- other methods to minimize the residuals?\\
- implement our solver inside PETSc
%%%*********************************************************
\section*{Acknowledgment}
This paper is partially funded by the Labex ACTION program (contract
-ANR-11-LABX-01-01). We acknowledge PRACE for awarding us access to resource
+ANR-11-LABX-01-01). We acknowledge PRACE for awarding us access to resources
Curie and Juqueen respectively based in France and Germany.
% that's all folks
\end{document}
-
-