From: ziane Date: Wed, 22 Apr 2015 15:10:11 +0000 (+0200) Subject: ec 04: tw-stage methods, biblio X-Git-Url: https://bilbo.iut-bm.univ-fcomte.fr/and/gitweb/rce2015.git/commitdiff_plain/834d4524e2a2dc64082fa6dd1db207b703af53ea?ds=inline;hp=663c29e4f7bb8e7d7d9d75ef19a6be1fed40f48a ec 04: tw-stage methods, biblio --- diff --git a/biblio.bib b/biblio.bib new file mode 100644 index 0000000..d81107e --- /dev/null +++ b/biblio.bib @@ -0,0 +1,61 @@ +@ARTICLE{O'leary85, + author = {O'leary, D.P. and White, R.E.}, + title = {Multi-splittings of matrices and parallel solution of linear systems}, + journal = {SIAM Journal on Algebraic and Discrete Methods}, + year = {1985}, + volume = {6}, + pages = {630--640} +} + +@article{White86, + author = {White, R.E.}, + title = {Parallel algorithms for nonlinear problems}, + journal = {SIAM Journal Algebraic Discrete Methods}, + volume = {7}, + number = {1}, + year = {1986}, + pages = {137--149}, + publisher = {Society for Industrial and Applied Mathematics}, + address = {Philadelphia, PA, USA}, +} + +@book{bahi07, + TITLE = {Parallel Iterative Algorithms: from sequential to grid computing}, + AUTHOR = {Bahi, J.M. and Contassot-Vivier, S. and Couturier, R.}, + PUBLISHER = {{Chapman \& Hall/CRC}}, + SERIES = {Numerical Analysis \& Scientific Computing Series}, + PAGES = {240}, + YEAR = {2007}, +} + +@article{bahi97, +author = {Bahi, J.M. and Miellou, J.C. and Rhofir, K.}, +title={Asynchronous multisplitting methods for nonlinear fixed point problems}, +journal={Numerical Algorithms}, +volume={15}, +number={3-4}, +pages={315--345}, +year={1997}, +} + +@article{Bai99, +author = {Bai, Zhong-Zhi and Evans, D.J. and Calinescu, R.C.}, +title = {A class of asynchronous multisplitting two-stage iterations for large sparse block systems of weakly nonlinear equations}, +journal = {Journal of Computational and Applied Mathematics}, +volume = {110}, +number = {2}, +pages = {271--286}, +year = {1999}, +} + +@article{saad86, +title = {{GMRES}: {A} {G}eneralized {M}inimal {R}esidual algorithm for solving nonsymmetric linear systems}, +author = {Saad, Y. and Schultz, M.H.}, +journal = {SIAM Journal on Scientific and Statistical Computing}, +volume = {7}, +number = {3}, +pages = {856--869}, +year = {1986}, +} + + diff --git a/paper.tex b/paper.tex index 3120437..375f7c1 100644 --- a/paper.tex +++ b/paper.tex @@ -52,7 +52,8 @@ \algnewcommand\algorithmicoutput{\textbf{Output:}} \algnewcommand\Output{\item[\algorithmicoutput]} -\newcommand{\MI}{\mathit{MaxIter}} +\newcommand{\TOLG}{\mathit{tol_{gmres}}} +\newcommand{\MIG}{\mathit{maxit_{gmres}}} \usepackage{array} \usepackage{color, colortbl} @@ -117,24 +118,56 @@ where $A$ is a sparse square and nonsingular matrix, $b$ is the right-hand side x^{k+1}=\displaystyle\sum^L_{\ell=1} E_\ell M^{-1}_\ell (N_\ell x^k + b),~k=1,2,3,\ldots \label{eq:02} \end{equation} -where a collection of $L$ triplets $(M_\ell, N_\ell, E_\ell)$ defines the multisplitting of matrix $A$, such that: the different splittings are defined as $A=M_\ell-N_\ell$ where $M_\ell$ are nonsingular matrices, and $\sum_\ell{E_\ell=I}$ are diagonal nonnegative weighting matrices and $I$ is the identity matrix. The iterations of the multisplitting methods can naturally be computed in parallel such that each processor or a group of processors is responsible for solving one splitting as a linear sub-system +where a collection of $L$ triplets $(M_\ell, N_\ell, E_\ell)$ defines the multisplitting of matrix $A$~\cite{O'leary85,White86}, such that: the different splittings are defined as $A=M_\ell-N_\ell$ where $M_\ell$ are nonsingular matrices, and $\sum_\ell{E_\ell=I}$ are diagonal nonnegative weighting matrices and $I$ is the identity matrix. The iterations of the multisplitting methods can naturally be computed in parallel such that each processor or cluster of processors is responsible for solving one splitting as a linear sub-system \begin{equation} -M_\ell y_\ell^{k+1} = R_\ell^k,\mbox{~such that~} R_\ell^k = N_\ell x^k_\ell + b, +M_\ell y_\ell = c_\ell^k,\mbox{~such that~} c_\ell^k = N_\ell x^k + b, \label{eq:03} \end{equation} then the weighting matrices $E_\ell$ are used to compute the solution of the global system~(\ref{eq:01}) \begin{equation} -x^{k+1}=\displaystyle\sum^L_{\ell=1} E_\ell y^{k+1}_\ell. +x^{k+1}=\displaystyle\sum^L_{\ell=1} E_\ell y_\ell. \label{eq:04} \end{equation} -The convergence of the multisplitting methods, based on synchronous or asynchronous iterations, is studied by many authors. It is dependent on the condition +The convergence of the multisplitting methods, based on synchronous or asynchronous iterations, is studied by many authors for example~\cite{O'leary85,bahi97,Bai99,bahi07}. %It is dependent on the condition +%\begin{equation} +%\rho(\displaystyle\sum_{\ell=1}^L E_\ell M^{-1}_\ell N_\ell) < 1, +%\label{eq:05} +%\end{equation} +%where $\rho$ is the spectral radius of the square matrix. +The multisplitting methods are convergent: +\begin{itemize} +\item if $A^{-1}>0$ and the splittings of matrix $A$ are weak regular (i.e. $M^{-1}\geq 0$ and $M^{-1}N\geq 0$) when the iterations are synchronous, or +\item if $A$ is M-matrix and its splittings are regular (i.e. $M^{-1}\geq 0$ and $N\geq 0$) when the iterations are asynchronous. +\end{itemize} +The solutions of the different linear sub-systems~(\ref{eq:03}) arising from the multisplitting of matrix $A$ can be either computed exactly with a direct method or approximated with an iterative method. In the latter case, the multisplitting methods are called {\it inner-outer iterative methods} or {\it two-stage multisplitting methods}. This kind of methods uses two nested iterations: the outer iteration and the inner iteration (that of the iterative method). + +In this paper we are focused on two-stage multisplitting methods, in their both versions synchronous and asynchronous, where the well-known iterative method GMRES ({\it Generalized Minimal RESidual})~\cite{saad86} is used as an inner iteration. Furthermore, our work in this paper is restricted to the block Jacobi splitting method. This approach of multisplitting consists in partitioning the matrix $A$ into $L$ horizontal band matrices of order $\frac{n}{L}\times n$ without overlapping (i.e. weighting matrices $E_\ell$ have only zero and one factors). In this case, the iteration of the multisplitting method presented by (\ref{eq:03}) and~(\ref{eq:04}) can be rewritten in the following form \begin{equation} -\rho(\displaystyle\sum_{\ell=1}^L E_\ell M^{-1}_\ell N_\ell) < 1, +A_{\ell\ell} x_\ell^{k+1} = b_\ell - \displaystyle\sum^{L}_{\substack{m=1\\m\neq\ell}}{A_{\ell m}x^k_m},\mbox{~for~}\ell=1,\ldots,L\mbox{~and~}k=1,2,3,\ldots \label{eq:05} \end{equation} -where $\rho$ is the spectral radius of the square matrix. The different linear splittings~(\ref{eq:03}) arising from the multisplitting of matrix $A$can be solved exactly with a direct method or approximated with an iterative method. When the inner method used to solve the linear sub-systems is iterative, the multisplitting method is called {\it inner-outer iterative method} or {\it two-stage multisplitting method}. - -In this paper we are focused on two-stage multisplitting methods where the well-known iterative method GMRES is used as an inner iteration. +where $x_\ell$ are sub-vectors of the solution $x$, $b_\ell$ are the sub-vectors of the right-hand side $b$, and $A_{\ell\ell}$ and $A_{\ell m}$ are diagonal and off-diagonal blocks of matrix $A$ respectively. In each outer iteration $k$ until the convergence, each sub-system arising from the block Jacobi multisplitting +\begin{equation} +A_{\ell\ell} x_\ell = c_\ell, +\label{eq:06} +\end{equation} +is solved iteratively using GMRES method and independently from other sub-systems by a cluster of processors. The right-hand sides $c_\ell=b_\ell-\sum_{m\neq\ell}A_{\ell m}x_m$ are computed using the shared vectors $x_m$. Algorithm~\ref{alg:01} shows the main key points of the block Jacobi two-stage method executed by a cluster of processors. In line~\ref{solve}, the linear sub-system~(\ref{eq:06}) is solved in parallel using GMRES method where $\MIG$ and $\TOLG$ are the maximum number of iterations and the tolerance threshold respectively. + +\begin{algorithm}[t] +\caption{Block Jacobi two-stage method} +\begin{algorithmic}[1] + \Input $A_\ell$ (sparse matrix), $b_\ell$ (right-hand side) + \Output $x_\ell$ (solution vector)\vspace{0.2cm} + \State Set the initial guess $x^0$ + \For {$k=1,2,3,\ldots$ until convergence} + \State $c_\ell=b_\ell-\sum_{m\neq\ell}A_{\ell m}x_m^{k-1}$ + \State $x^k_\ell=Solve(A_{\ell\ell},c_\ell,x^{k-1}_\ell,\MIG,\TOLG)$ \label{solve} + \State Send $x_\ell^k$ to neighboring clusters + \State Receive $\{x_m^k\}_{m\neq\ell}$ from neighboring clusters + \EndFor +\end{algorithmic} +\label{alg:01} +\end{algorithm} \subsection{Simulation of two-stage methods using SimGrid framework} @@ -155,8 +188,7 @@ have been chosen for the study in the paper. \textbf{Step 2} : Collect the software materials needed for the experimentation. In our case, we have three variants algorithms for the -resolution of three 3D-Poisson problem: (1) using the classical GMRES -\textit{(Generalized Minimal RESidual Method)} alias Algo-1 in this +resolution of three 3D-Poisson problem: (1) using the classical GMRES alias Algo-1 in this paper, (2) using the multisplitting method alias Algo-2 and (3) an enhanced version of the multisplitting method as Algo-3. In addition, SIMGRID simulator has been chosen to simulate the behaviors of the @@ -571,8 +603,8 @@ The authors would like to thank\dots{} % number - used to balance the columns on the last page % adjust value as needed - may need to be readjusted if % the document is modified later -\bibliographystyle{IEEEtran} -\bibliography{hpccBib} +\bibliographystyle{plain} +\bibliography{biblio} \end{document}