\algnewcommand\algorithmicoutput{\textbf{Output:}}
\algnewcommand\Output{\item[\algorithmicoutput]}
-\newcommand{\MI}{\mathit{MaxIter}}
+\newcommand{\TOLG}{\mathit{tol_{gmres}}}
+\newcommand{\MIG}{\mathit{maxit_{gmres}}}
\usepackage{array}
\usepackage{color, colortbl}
x^{k+1}=\displaystyle\sum^L_{\ell=1} E_\ell M^{-1}_\ell (N_\ell x^k + b),~k=1,2,3,\ldots
\label{eq:02}
\end{equation}
-where a collection of $L$ triplets $(M_\ell, N_\ell, E_\ell)$ defines the multisplitting of matrix $A$, such that: the different splittings are defined as $A=M_\ell-N_\ell$ where $M_\ell$ are nonsingular matrices, and $\sum_\ell{E_\ell=I}$ are diagonal nonnegative weighting matrices and $I$ is the identity matrix. The iterations of the multisplitting methods can naturally be computed in parallel such that each processor or a group of processors is responsible for solving one splitting as a linear sub-system
+where a collection of $L$ triplets $(M_\ell, N_\ell, E_\ell)$ defines the multisplitting of matrix $A$~\cite{O'leary85,White86}, such that: the different splittings are defined as $A=M_\ell-N_\ell$ where $M_\ell$ are nonsingular matrices, and $\sum_\ell{E_\ell=I}$ are diagonal nonnegative weighting matrices and $I$ is the identity matrix. The iterations of the multisplitting methods can naturally be computed in parallel such that each processor or cluster of processors is responsible for solving one splitting as a linear sub-system
\begin{equation}
-M_\ell y_\ell^{k+1} = R_\ell^k,\mbox{~such that~} R_\ell^k = N_\ell x^k_\ell + b,
+M_\ell y_\ell = c_\ell^k,\mbox{~such that~} c_\ell^k = N_\ell x^k + b,
\label{eq:03}
\end{equation}
then the weighting matrices $E_\ell$ are used to compute the solution of the global system~(\ref{eq:01})
\begin{equation}
-x^{k+1}=\displaystyle\sum^L_{\ell=1} E_\ell y^{k+1}_\ell.
+x^{k+1}=\displaystyle\sum^L_{\ell=1} E_\ell y_\ell.
\label{eq:04}
\end{equation}
-The convergence of the multisplitting methods, based on synchronous or asynchronous iterations, is studied by many authors. It is dependent on the condition
+The convergence of the multisplitting methods, based on synchronous or asynchronous iterations, is studied by many authors for example~\cite{O'leary85,bahi97,Bai99,bahi07}. %It is dependent on the condition
+%\begin{equation}
+%\rho(\displaystyle\sum_{\ell=1}^L E_\ell M^{-1}_\ell N_\ell) < 1,
+%\label{eq:05}
+%\end{equation}
+%where $\rho$ is the spectral radius of the square matrix.
+The multisplitting methods are convergent:
+\begin{itemize}
+\item if $A^{-1}>0$ and the splittings of matrix $A$ are weak regular (i.e. $M^{-1}\geq 0$ and $M^{-1}N\geq 0$) when the iterations are synchronous, or
+\item if $A$ is M-matrix and its splittings are regular (i.e. $M^{-1}\geq 0$ and $N\geq 0$) when the iterations are asynchronous.
+\end{itemize}
+The solutions of the different linear sub-systems~(\ref{eq:03}) arising from the multisplitting of matrix $A$ can be either computed exactly with a direct method or approximated with an iterative method. In the latter case, the multisplitting methods are called {\it inner-outer iterative methods} or {\it two-stage multisplitting methods}. This kind of methods uses two nested iterations: the outer iteration and the inner iteration (that of the iterative method).
+
+In this paper we are focused on two-stage multisplitting methods, in their both versions synchronous and asynchronous, where the well-known iterative method GMRES ({\it Generalized Minimal RESidual})~\cite{saad86} is used as an inner iteration. Furthermore, our work in this paper is restricted to the block Jacobi splitting method. This approach of multisplitting consists in partitioning the matrix $A$ into $L$ horizontal band matrices of order $\frac{n}{L}\times n$ without overlapping (i.e. weighting matrices $E_\ell$ have only zero and one factors). In this case, the iteration of the multisplitting method presented by (\ref{eq:03}) and~(\ref{eq:04}) can be rewritten in the following form
\begin{equation}
-\rho(\displaystyle\sum_{\ell=1}^L E_\ell M^{-1}_\ell N_\ell) < 1,
+A_{\ell\ell} x_\ell^{k+1} = b_\ell - \displaystyle\sum^{L}_{\substack{m=1\\m\neq\ell}}{A_{\ell m}x^k_m},\mbox{~for~}\ell=1,\ldots,L\mbox{~and~}k=1,2,3,\ldots
\label{eq:05}
\end{equation}
-where $\rho$ is the spectral radius of the square matrix. The different linear splittings~(\ref{eq:03}) arising from the multisplitting of matrix $A$can be solved exactly with a direct method or approximated with an iterative method. When the inner method used to solve the linear sub-systems is iterative, the multisplitting method is called {\it inner-outer iterative method} or {\it two-stage multisplitting method}.
-
-In this paper we are focused on two-stage multisplitting methods where the well-known iterative method GMRES is used as an inner iteration.
+where $x_\ell$ are sub-vectors of the solution $x$, $b_\ell$ are the sub-vectors of the right-hand side $b$, and $A_{\ell\ell}$ and $A_{\ell m}$ are diagonal and off-diagonal blocks of matrix $A$ respectively. In each outer iteration $k$ until the convergence, each sub-system arising from the block Jacobi multisplitting
+\begin{equation}
+A_{\ell\ell} x_\ell = c_\ell,
+\label{eq:06}
+\end{equation}
+is solved iteratively using GMRES method and independently from other sub-systems by a cluster of processors. The right-hand sides $c_\ell=b_\ell-\sum_{m\neq\ell}A_{\ell m}x_m$ are computed using the shared vectors $x_m$. Algorithm~\ref{alg:01} shows the main key points of the block Jacobi two-stage method executed by a cluster of processors. In line~\ref{solve}, the linear sub-system~(\ref{eq:06}) is solved in parallel using GMRES method where $\MIG$ and $\TOLG$ are the maximum number of iterations and the tolerance threshold respectively.
+
+\begin{algorithm}[t]
+\caption{Block Jacobi two-stage method}
+\begin{algorithmic}[1]
+ \Input $A_\ell$ (sparse matrix), $b_\ell$ (right-hand side)
+ \Output $x_\ell$ (solution vector)\vspace{0.2cm}
+ \State Set the initial guess $x^0$
+ \For {$k=1,2,3,\ldots$ until convergence}
+ \State $c_\ell=b_\ell-\sum_{m\neq\ell}A_{\ell m}x_m^{k-1}$
+ \State $x^k_\ell=Solve(A_{\ell\ell},c_\ell,x^{k-1}_\ell,\MIG,\TOLG)$ \label{solve}
+ \State Send $x_\ell^k$ to neighboring clusters
+ \State Receive $\{x_m^k\}_{m\neq\ell}$ from neighboring clusters
+ \EndFor
+\end{algorithmic}
+\label{alg:01}
+\end{algorithm}
\subsection{Simulation of two-stage methods using SimGrid framework}
\textbf{Step 2} : Collect the software materials needed for the
experimentation. In our case, we have three variants algorithms for the
-resolution of three 3D-Poisson problem: (1) using the classical GMRES
-\textit{(Generalized Minimal RESidual Method)} alias Algo-1 in this
+resolution of three 3D-Poisson problem: (1) using the classical GMRES alias Algo-1 in this
paper, (2) using the multisplitting method alias Algo-2 and (3) an
enhanced version of the multisplitting method as Algo-3. In addition,
SIMGRID simulator has been chosen to simulate the behaviors of the
The authors would like to thank\dots{}
-% trigger a \newpage just before the given reference
-% number - used to balance the columns on the last page
-% adjust value as needed - may need to be readjusted if
-% the document is modified later
-\bibliographystyle{IEEEtran}
-\bibliography{hpccBib}
+\bibliographystyle{wileyj}
+\bibliography{biblio}
\end{document}