acmid = {355989},
publisher = {ACM},
address = {New York, NY, USA},
-}
\ No newline at end of file
+}
+
+@Misc{petsc-web-page,
+ author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Jed Brown and Peter Brune
+ and Kris Buschelman and Victor Eijkhout and William~D. Gropp
+ and Dinesh Kaushik and Matthew~G. Knepley
+ and Lois Curfman McInnes and Karl Rupp and Barry~F. Smith
+ and Hong Zhang},
+ title = {{PETS}c {W}eb page},
+ url = {http://www.mcs.anl.gov/petsc},
+ howpublished = {\url{http://www.mcs.anl.gov/petsc}},
+ year = {2014}
+ }
\end{abstract}
\begin{IEEEkeywords}
-Iterative Krylov methods; sparse linear systems; error minimization; PETSc; %à voir...
+Iterative Krylov methods; sparse linear systems; residual minimization; PETSc; %à voir...
\end{IEEEkeywords}
\State retrieve error
\State $S_{k~mod~s}=x^k$ \label{algo:store}
\If {$k$ mod $s=0$ {\bf and} error$>\epsilon_{kryl}$}
- \State $R=AS$ \Comment{compute dense matrix}
+ \State $R=AS$ \Comment{compute dense matrix} \label{algo:matrix_mul}
\State Solve least-squares problem $\underset{\alpha\in\mathbb{R}^{s}}{min}\|b-R\alpha\|_2$ \label{algo:}
\State $x^k=S\alpha$ \Comment{compute new solution}
\EndIf
required for that: the maximum number of iteration and the threshold to stop the
method.
-To summarize, the important parameters of are:
+To summarize, the important parameters of TSARM are:
\begin{itemize}
\item $\epsilon_{kryl}$ the threshold to stop the method of the krylov method
\item $max\_iter_{kryl}$ the maximum number of iterations for the krylov method
\section{Parallelization}
\label{sec:05}
+The parallelisation of TSARM relies on the parallelization of all its
+parts. More precisely, except the least-square step, all the other parts are
+obvious to achieve out in parallel. In order to develop a parallel version of
+our code, we have chosen to use PETSc~\cite{petsc-web-page}. For
+line~\ref{algo:matrix_mul} the matrix-matrix multiplication is implemented and
+efficient since the matrix $A$ is sparse and since the matrix $S$ contains few
+colums in practice. As explained previously, at least two methods seem to be
+interesting to solve the least-square minimization, CGLS and LSQR.
+
+In the following we remind the CGLS algorithm. The LSQR method follows more or
+less the same principle but it take more place, so we briefly explain the parallelization of CGLS which is similar to LSQR.
+
+\begin{algorithm}[t]
+\caption{CGLS}
+\begin{algorithmic}[1]
+ \Input $A$ (matrix), $b$ (right-hand side)
+ \Output $x$ (solution vector)\vspace{0.2cm}
+ \State $r=b-Ax$
+ \State $p=A'r$
+ \State $s=p$
+ \State $g=||s||^2_2$
+ \For {$k=1,2,3,\ldots$ until convergence (g$<\epsilon_{ls}$)} \label{algo2:conv}
+ \State $q=Ap$
+ \State $\alpha=g/||q||^2_2$
+ \State $x=x+alpha*p$
+ \State $r=r-alpha*q$
+ \State $s=A'*r$
+ \State $g_{old}=g$
+ \State $g=||s||^2_2$
+ \State $\beta=g/g_{old}$
+ \EndFor
+\end{algorithmic}
+\label{algo:02}
+\end{algorithm}
+
+
+In each iteration of CGLS, there is two matrix-vector multiplications and some
+classical operations: dots, norm, multiplication and addition on vectors. All
+these operations are easy to implement in PETSc or similar environment.
+
%%%*********************************************************
%%%*********************************************************
\section{Experiments using petsc}
future plan : \\
- study other kinds of matrices, problems, inner solvers\\
+- test the influence of all the parameters\\
- adaptative number of outer iterations to minimize\\
- other methods to minimize the residuals?\\
- implement our solver inside PETSc