1 \documentclass[times]{cpeauth}
5 %\usepackage[dvips,colorlinks,bookmarksopen,bookmarksnumbered,citecolor=red,urlcolor=red]{hyperref}
7 %\newcommand\BibTeX{{\rmfamily B\kern-.05em \textsc{i\kern-.025em b}\kern-.08em
8 %T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
16 \usepackage[T1]{fontenc}
17 \usepackage[utf8]{inputenc}
18 \usepackage{amsfonts,amssymb}
20 \usepackage{algorithm}
21 \usepackage{algpseudocode}
24 \usepackage[american]{babel}
25 % Extension pour les liens intra-documents (tagged PDF)
26 % et l'affichage correct des URL (commande \url{http://example.com})
27 %\usepackage{hyperref}
30 \DeclareUrlCommand\email{\urlstyle{same}}
32 \usepackage[autolanguage,np]{numprint}
34 \renewcommand*\npunitcommand[1]{\text{#1}}
35 \npthousandthpartsep{}}
38 \usepackage[textsize=footnotesize]{todonotes}
40 \newcommand{\AG}[2][inline]{%
41 \todo[color=green!50,#1]{\sffamily\textbf{AG:} #2}\xspace}
42 \newcommand{\RC}[2][inline]{%
43 \todo[color=red!10,#1]{\sffamily\textbf{RC:} #2}\xspace}
44 \newcommand{\LZK}[2][inline]{%
45 \todo[color=blue!10,#1]{\sffamily\textbf{LZK:} #2}\xspace}
46 \newcommand{\RCE}[2][inline]{%
47 \todo[color=yellow!10,#1]{\sffamily\textbf{RCE:} #2}\xspace}
49 \algnewcommand\algorithmicinput{\textbf{Input:}}
50 \algnewcommand\Input{\item[\algorithmicinput]}
52 \algnewcommand\algorithmicoutput{\textbf{Output:}}
53 \algnewcommand\Output{\item[\algorithmicoutput]}
55 \newcommand{\TOLG}{\mathit{tol_{gmres}}}
56 \newcommand{\MIG}{\mathit{maxit_{gmres}}}
57 \newcommand{\TOLM}{\mathit{tol_{multi}}}
58 \newcommand{\MIM}{\mathit{maxit_{multi}}}
59 \newcommand{\TOLC}{\mathit{tol_{cgls}}}
60 \newcommand{\MIC}{\mathit{maxit_{cgls}}}
63 \usepackage{color, colortbl}
64 \newcolumntype{M}[1]{>{\centering\arraybackslash}m{#1}}
65 \newcolumntype{Z}[1]{>{\raggedleft}m{#1}}
67 \newcolumntype{g}{>{\columncolor{Gray}}c}
68 \definecolor{Gray}{gray}{0.9}
73 \RCE{Titre a confirmer.}
74 \title{Comparative performance analysis of simulated grid-enabled numerical iterative algorithms}
75 %\itshape{\journalnamelc}\footnotemark[2]}
77 \author{ Charles Emile Ramamonjisoa and
80 Lilia Ziane Khodja and
86 Femto-ST Institute - DISC Department\\
87 Université de Franche-Comté\\
89 Email: \email{{raphael.couturier,arnaud.giersch,david.laiymani,charles.ramamonjisoa}@univ-fcomte.fr}
92 %% Lilia Ziane Khodja: Department of Aerospace \& Mechanical Engineering\\ Non Linear Computational Mechanics\\ University of Liege\\ Liege, Belgium. Email: l.zianekhodja@ulg.ac.be
98 \keywords{Algorithm; distributed; iterative; asynchronous; simulation; simgrid; performance}
102 \section{Introduction}
104 \section{The asynchronous iteration model}
108 %%%%%%%%%%%%%%%%%%%%%%%%%
109 %%%%%%%%%%%%%%%%%%%%%%%%%
111 \section{Two-stage multisplitting methods}
113 \subsection{Synchronous and asynchronous two-stage methods for sparse linear systems}
115 In this paper we focus on two-stage multisplitting methods in their both versions synchronous and asynchronous~\cite{Frommer92,Szyld92,Bru95}. These iterative methods are based on multisplitting methods~\cite{O'leary85,White86,Alefeld97} and use two nested iterations: the outer iteration and the inner iteration. Let us consider the following sparse linear system of $n$ equations in $\mathbb{R}$
120 where $A$ is a sparse square and nonsingular matrix, $b$ is the right-hand side and $x$ is the solution of the system. Our work in this paper is restricted to the block Jacobi splitting method. This approach of multisplitting consists in partitioning the matrix $A$ into $L$ horizontal band matrices of order $\frac{n}{L}\times n$ without overlapping (i.e. sub-vectors $\{x_\ell\}_{1\leq\ell\leq L}$ are disjoint). The two-stage multisplitting methods solve the linear system~(\ref{eq:01}) iteratively as follows
122 x_\ell^{k+1} = A_{\ell\ell}^{-1}(b_\ell - \displaystyle\sum^{L}_{\substack{m=1\\m\neq\ell}}{A_{\ell m}x^k_m}),\mbox{~for~}\ell=1,\ldots,L\mbox{~and~}k=1,2,3,\ldots
125 where $x_\ell$ are sub-vectors of the solution $x$, $b_\ell$ are the sub-vectors of the right-hand side $b$, and $A_{\ell\ell}$ and $A_{\ell m}$ are diagonal and off-diagonal blocks of matrix $A$ respectively. The iterations of these methods can naturally be computed in parallel such that each processor or cluster of processors is responsible for solving one splitting as a linear sub-system
127 A_{\ell\ell} x_\ell = c_\ell,\mbox{~for~}\ell=1,\ldots,L,
130 where right-hand sides $c_\ell=b_\ell-\sum_{m\neq\ell}A_{\ell m}x_m$ are computed using the shared vectors $x_m$. In this paper, we use the well-known iterative method GMRES ({\it Generalized Minimal RESidual})~\cite{saad86} as an inner iteration to approximate the solutions of the different splittings arising from the block Jacobi multisplitting of matrix $A$. The algorithm in Figure~\ref{alg:01} shows the main key points of our block Jacobi two-stage method executed by a cluster of processors. In line~\ref{solve}, the linear sub-system~(\ref{eq:03}) is solved in parallel using GMRES method where $\MIG$ and $\TOLG$ are the maximum number of inner iterations and the tolerance threshold for GMRES respectively. The convergence of the two-stage multisplitting methods, based on synchronous or asynchronous iterations, is studied by many authors for example~\cite{Bru95,bahi07}.
133 %\begin{algorithm}[t]
134 %\caption{Block Jacobi two-stage multisplitting method}
135 \begin{algorithmic}[1]
136 \Input $A_\ell$ (sparse matrix), $b_\ell$ (right-hand side)
137 \Output $x_\ell$ (solution vector)\vspace{0.2cm}
138 \State Set the initial guess $x^0$
139 \For {$k=1,2,3,\ldots$ until convergence}
140 \State $c_\ell=b_\ell-\sum_{m\neq\ell}A_{\ell m}x_m^{k-1}$
141 \State $x^k_\ell=Solve_{gmres}(A_{\ell\ell},c_\ell,x^{k-1}_\ell,\MIG,\TOLG)$\label{solve}
142 \State Send $x_\ell^k$ to neighboring clusters\label{send}
143 \State Receive $\{x_m^k\}_{m\neq\ell}$ from neighboring clusters\label{recv}
146 \caption{Block Jacobi two-stage multisplitting method}
151 In this paper, we propose two algorithms of two-stage multisplitting methods. The first algorithm is based on asynchronous model which allows the communications to be overlapped by computations and reduces the idle times resulting from the synchronizations. So in the asynchronous mode, our two-stage algorithm uses asynchronous outer iterations and asynchronous communications between clusters. The communications (i.e. lines~\ref{send} and~\ref{recv} in Figure~\ref{alg:01}) are performed by message passing using MPI non-blocking communication routines. The convergence of the asynchronous iterations is detected when all clusters have locally converged
153 k\geq\MIM\mbox{~or~}\|x_\ell^{k+1}-x_\ell^k\|_{\infty }\leq\TOLM,
156 where $\MIM$ is the maximum number of outer iterations and $\TOLM$ is the tolerance threshold for the two-stage algorithm.
158 The second two-stage algorithm is based on synchronous outer iterations. We propose to use the Krylov iteration based on residual minimization to improve the slow convergence of the multisplitting methods. In this case, a $n\times s$ matrix $S$ is set using solutions issued from the inner iteration
160 S=[x^1,x^2,\ldots,x^s],~s\ll n.
163 At each $s$ outer iterations, the algorithm computes a new approximation $\tilde{x}=S\alpha$ which minimizes the residual
165 \min_{\alpha\in\mathbb{R}^s}{\|b-AS\alpha\|_2}.
168 The algorithm in Figure~\ref{alg:02} includes the procedure of the residual minimization and the outer iteration is restarted with a new approximation $\tilde{x}$ at every $s$ iterations. The least-squares problem~(\ref{eq:06}) is solved in parallel by all clusters using CGLS method~\cite{Hestenes52} such that $\MIC$ is the maximum number of iterations and $\TOLC$ is the tolerance threshold for this method (line~\ref{cgls} in Figure~\ref{alg:02}).
171 %\begin{algorithm}[t]
172 %\caption{Krylov two-stage method using block Jacobi multisplitting}
173 \begin{algorithmic}[1]
174 \Input $A_\ell$ (sparse matrix), $b_\ell$ (right-hand side)
175 \Output $x_\ell$ (solution vector)\vspace{0.2cm}
176 \State Set the initial guess $x^0$
177 \For {$k=1,2,3,\ldots$ until convergence}
178 \State $c_\ell=b_\ell-\sum_{m\neq\ell}A_{\ell m}x_m^{k-1}$
179 \State $x^k_\ell=Solve_{gmres}(A_{\ell\ell},c_\ell,x^{k-1}_\ell,\MIG,\TOLG)$
180 \State $S_{\ell,k\mod s}=x_\ell^k$
182 \State $\alpha = Solve_{cgls}(AS,b,\MIC,\TOLC)$\label{cgls}
183 \State $\tilde{x_\ell}=S_\ell\alpha$
184 \State Send $\tilde{x_\ell}$ to neighboring clusters
186 \State Send $x_\ell^k$ to neighboring clusters
188 \State Receive $\{x_m^k\}_{m\neq\ell}$ from neighboring clusters
191 \caption{Krylov two-stage method using block Jacobi multisplitting}
196 \subsection{Simulation of two-stage methods using SimGrid framework}
199 One of our objectives when simulating the application in SIMGRID is, as in real life, to get accurate results (solutions of the problem) but also ensure the test reproducibility under the same conditions.According our experience, very few modifications are required to adapt a MPI program to run in SIMGRID simulator using SMPI (Simulator MPI).The first modification is to include SMPI libraries and related header files (smpi.h). The second and important modification is to eliminate all global variables in moving them to local subroutine or using a Simgrid selector called "runtime automatic switching" (smpi/privatize\_global\_variables). Indeed, global variables can generate side effects on runtime between the threads running in the same process, generated by the Simgrid to simulate the grid environment.The last modification on the MPI program pointed out for some cases, the review of the sequence of the MPI\_Isend, MPI\_Irecv and MPI\_Waitall instructions which might cause an infinite loop.
202 \paragraph{SIMGRID Simulator parameters}
205 \item HOSTFILE: Hosts description file.
206 \item PLATFORM: File describing the platform architecture : clusters (CPU power,
207 \dots{}), intra cluster network description, inter cluster network (bandwidth bw,
208 lat latency, \dots{}).
209 \item ARCHI : Grid computational description (Number of clusters, Number of
210 nodes/processors for each cluster).
214 In addition, the following arguments are given to the programs at runtime:
217 \item Maximum number of inner and outer iterations;
218 \item Inner and outer precisions;
219 \item Matrix size (NX, NY and NZ);
220 \item Matrix diagonal value = 6.0;
221 \item Execution Mode: synchronous or asynchronous.
224 At last, note that the two solver algorithms have been executed with the Simgrid selector --cfg=smpi/running\_power which determine the computational power (here 19GFlops) of the simulator host machine.
226 %%%%%%%%%%%%%%%%%%%%%%%%%
227 %%%%%%%%%%%%%%%%%%%%%%%%%
229 \section{Experimental, Results and Comments}
232 \subsection{Setup study and Methodology}
234 To conduct our study, we have put in place the following methodology
235 which can be reused with any grid-enabled applications.
237 \textbf{Step 1} : Choose with the end users the class of algorithms or
238 the application to be tested. Numerical parallel iterative algorithms
239 have been chosen for the study in the paper.
241 \textbf{Step 2} : Collect the software materials needed for the
242 experimentation. In our case, we have three variants algorithms for the
243 resolution of three 3D-Poisson problem: (1) using the classical GMRES alias Algo-1 in this
244 paper, (2) using the multisplitting method alias Algo-2 and (3) an
245 enhanced version of the multisplitting method as Algo-3. In addition,
246 SIMGRID simulator has been chosen to simulate the behaviors of the
247 distributed applications. SIMGRID is running on the Mesocentre
248 datacenter in Franche-Comte University but also in a virtual
251 \textbf{Step 3} : Fix the criteria which will be used for the future
252 results comparison and analysis. In the scope of this study, we retain
253 in one hand the algorithm execution mode (synchronous and asynchronous)
254 and in the other hand the execution time and the number of iterations of
255 the application before obtaining the convergence.
257 \textbf{Step 4 }: Setup up the different grid testbeds environment
258 which will be simulated in the simulator tool to run the program. The
259 following architecture has been configured in Simgrid : 2x16 - that is a
260 grid containing 2 clusters with 16 hosts (processors/cores) each -, 4x8,
261 4x16, 8x8 and 2x50. The network has been designed to operate with a
262 bandwidth equals to 10Gbits (resp. 1Gbits/s) and a latency of 8E-6
263 microseconds (resp. 5E-5) for the intra-clusters links (resp.
264 inter-clusters backbone links).
266 \textbf{Step 5}: Process an extensive and comprehensive testings
267 within these configurations in varying the key parameters, especially
268 the CPU power capacity, the network parameters and also the size of the
269 input matrix. Note that some parameters should be invariant to allow the
270 comparison like some program input arguments.
272 {Step 6} : Collect and analyze the output results.
274 \subsection{Factors impacting distributed applications performance in
277 From our previous experience on running distributed application in a
278 computational grid, many factors are identified to have an impact on the
279 program behavior and performance on this specific environment. Mainly,
280 first of all, the architecture of the grid itself can obviously
281 influence the performance results of the program. The performance gain
282 might be important theoretically when the number of clusters and/or the
283 number of nodes (processors/cores) in each individual cluster increase.
285 Another important factor impacting the overall performance of the
286 application is the network configuration. Two main network parameters
287 can modify drastically the program output results : (i) the network
288 bandwidth (bw=bits/s) also known as "the data-carrying capacity"
289 of the network is defined as the maximum of data that can pass
290 from one point to another in a unit of time. (ii) the network latency
291 (lat : microsecond) defined as the delay from the start time to send the
292 data from a source and the final time the destination have finished to
293 receive it. Upon the network characteristics, another impacting factor
294 is the application dependent volume of data exchanged between the nodes
295 in the cluster and between distant clusters. Large volume of data can be
296 transferred in transit between the clusters and nodes during the code
299 In a grid environment, it is common to distinguish in one hand, the
300 "\,intra-network" which refers to the links between nodes within a
301 cluster and in the other hand, the "\,inter-network" which is the
302 backbone link between clusters. By design, these two networks perform
303 with different speed. The intra-network generally works like a high
304 speed local network with a high bandwith and very low latency. In
305 opposite, the inter-network connects clusters sometime via heterogeneous
306 networks components thru internet with a lower speed. The network
307 between distant clusters might be a bottleneck for the global
308 performance of the application.
310 \subsection{Comparing GMRES and Multisplitting algorithms in
313 In the scope of this paper, our first objective is to demonstrate the
314 Algo-2 (Multisplitting method) shows a better performance in grid
315 architecture compared with Algo-1 (Classical GMRES) both running in
316 \textbf{\textit{synchronous mode}}. Better algorithm performance
317 should mean a less number of iterations output and a less execution time
318 before reaching the convergence. For a systematic study, the experiments
319 should figure out that, for various grid parameters values, the
320 simulator will confirm the targeted outcomes, particularly for poor and
321 slow networks, focusing on the impact on the communication performance
322 on the chosen class of algorithm.
324 The following paragraphs present the test conditions, the output results
328 \textit{3.a Executing the algorithms on various computational grid
329 architecture scaling up the input matrix size}
334 \begin{tabular}{r c }
336 Grid & 2x16, 4x8, 4x16 and 8x8\\ %\hline
337 Network & N2 : bw=1Gbs-lat=5E-05 \\ %\hline
338 Input matrix size & N$_{x}$ =150 x 150 x 150 and\\ %\hline
339 - & N$_{x}$ =170 x 170 x 170 \\ \hline
341 Table 1 : Clusters x Nodes with NX=150 or NX=170 \\
347 %\RCE{J'ai voulu mettre les tableaux des données mais je pense que c'est inutile et ça va surcharger}
350 The results in figure 1 show the non-variation of the number of
351 iterations of classical GMRES for a given input matrix size; it is not
352 the case for the multisplitting method.
354 %\begin{wrapfigure}{l}{100mm}
357 \includegraphics[width=100mm]{cluster_x_nodes_nx_150_and_nx_170.pdf}
358 \caption{Cluster x Nodes NX=150 and NX=170}
363 Unless the 8x8 cluster, the time
364 execution difference between the two algorithms is important when
365 comparing between different grid architectures, even with the same number of
366 processors (like 2x16 and 4x8 = 32 processors for example). The
367 experiment concludes the low sensitivity of the multisplitting method
368 (compared with the classical GMRES) when scaling up to higher input
371 \textit{\\3.b Running on various computational grid architecture\\}
375 \begin{tabular}{r c }
377 Grid & 2x16, 4x8\\ %\hline
378 Network & N1 : bw=10Gbs-lat=8E-06 \\ %\hline
379 - & N2 : bw=1Gbs-lat=5E-05 \\
380 Input matrix size & N$_{x}$ =150 x 150 x 150\\ \hline \\
382 Table 2 : Clusters x Nodes - Networks N1 x N2 \\
388 %\begin{wrapfigure}{l}{100mm}
391 \includegraphics[width=100mm]{cluster_x_nodes_n1_x_n2.pdf}
392 \caption{Cluster x Nodes N1 x N2}
397 The experiments compare the behavior of the algorithms running first on
398 speed inter- cluster network (N1) and a less performant network (N2).
399 The figure 2 shows that end users will gain to reduce the execution time
400 for both algorithms in using a grid architecture like 4x16 or 8x8: the
401 performance was increased in a factor of 2. The results depict also that
402 when the network speed drops down, the difference between the execution
403 times can reach more than 25\%.
405 \textit{\\3.c Network latency impacts on performance\\}
409 \begin{tabular}{r c }
411 Grid & 2x16\\ %\hline
412 Network & N1 : bw=1Gbs \\ %\hline
413 Input matrix size & N$_{x}$ =150 x 150 x 150\\ \hline\\
416 Table 3 : Network latency impact \\
424 \includegraphics[width=100mm]{network_latency_impact_on_execution_time.pdf}
425 \caption{Network latency impact on execution time}
430 According the results in table and figure 3, degradation of the network
431 latency from 8.10$^{-6}$ to 6.10$^{-5}$ implies an absolute time
432 increase more than 75\% (resp. 82\%) of the execution for the classical
433 GMRES (resp. multisplitting) algorithm. In addition, it appears that the
434 multisplitting method tolerates more the network latency variation with
435 a less rate increase. Consequently, in the worst case (lat=6.10$^{-5
436 }$), the execution time for GMRES is almost the double of the time for
437 the multisplitting, even though, the performance was on the same order
438 of magnitude with a latency of 8.10$^{-6}$.
440 \textit{\\3.d Network bandwidth impacts on performance\\}
444 \begin{tabular}{r c }
446 Grid & 2x16\\ %\hline
447 Network & N1 : bw=1Gbs - lat=5E-05 \\ %\hline
448 Input matrix size & N$_{x}$ =150 x 150 x 150\\ \hline
451 Table 4 : Network bandwidth impact \\
458 \includegraphics[width=100mm]{network_bandwith_impact_on_execution_time.pdf}
459 \caption{Network bandwith impact on execution time}
465 The results of increasing the network bandwidth depict the improvement
466 of the performance by reducing the execution time for both of the two
467 algorithms. However, and again in this case, the multisplitting method
468 presents a better performance in the considered bandwidth interval with
469 a gain of 40\% which is only around 24\% for classical GMRES.
471 \textit{\\3.e Input matrix size impacts on performance\\}
475 \begin{tabular}{r c }
478 Network & N2 : bw=1Gbs - lat=5E-05 \\ %\hline
479 Input matrix size & N$_{x}$ = From 40 to 200\\ \hline
481 Table 5 : Input matrix size impact\\
488 \includegraphics[width=100mm]{pb_size_impact_on_execution_time.pdf}
489 \caption{Pb size impact on execution time}
493 In this experimentation, the input matrix size has been set from
494 Nx=Ny=Nz=40 to 200 side elements that is from 40$^{3}$ = 64.000 to
495 200$^{3}$ = 8.000.000 points. Obviously, as shown in the figure 5,
496 the execution time for the algorithms convergence increases with the
497 input matrix size. But the interesting result here direct on (i) the
498 drastic increase (300 times) of the number of iterations needed before
499 the convergence for the classical GMRES algorithm when the matrix size
500 go beyond Nx=150; (ii) the classical GMRES execution time also almost
501 the double from Nx=140 compared with the convergence time of the
502 multisplitting method. These findings may help a lot end users to setup
503 the best and the optimal targeted environment for the application
504 deployment when focusing on the problem size scale up. Note that the
505 same test has been done with the grid 2x16 getting the same conclusion.
507 \textit{\\3.f CPU Power impact on performance\\}
511 \begin{tabular}{r c }
513 Grid & 2x16\\ %\hline
514 Network & N2 : bw=1Gbs - lat=5E-05 \\ %\hline
515 Input matrix size & N$_{x}$ = 150 x 150 x 150\\ \hline
517 Table 6 : CPU Power impact \\
524 \includegraphics[width=100mm]{cpu_power_impact_on_execution_time.pdf}
525 \caption{CPU Power impact on execution time}
529 Using the SIMGRID simulator flexibility, we have tried to determine the
530 impact on the algorithms performance in varying the CPU power of the
531 clusters nodes from 1 to 19 GFlops. The outputs depicted in the figure 6
532 confirm the performance gain, around 95\% for both of the two methods,
533 after adding more powerful CPU. Note that the execution time axis in the
534 figure is in logarithmic scale.
536 \subsection{Comparing GMRES in native synchronous mode and
537 Multisplitting algorithms in asynchronous mode}
539 The previous paragraphs put in evidence the interests to simulate the
540 behavior of the application before any deployment in a real environment.
541 We have focused the study on analyzing the performance in varying the
542 key factors impacting the results. In the same line, the study compares
543 the performance of the two proposed methods in \textbf{synchronous mode
544 }. In this section, with the same previous methodology, the goal is to
545 demonstrate the efficiency of the multisplitting method in \textbf{
546 asynchronous mode} compare with the classical GMRES staying in the
549 Note that the interest of using the asynchronous mode for data exchange
550 is mainly, in opposite of the synchronous mode, the non-wait aspects of
551 the current computation after a communication operation like sending
552 some data between nodes. Each processor can continue their local
553 calculation without waiting for the end of the communication. Thus, the
554 asynchronous may theoretically reduce the overall execution time and can
555 improve the algorithm performance.
557 As stated supra, SIMGRID simulator tool has been used to prove the
558 efficiency of the multisplitting in asynchronous mode and to find the
559 best combination of the grid resources (CPU, Network, input matrix size,
560 \ldots ) to get the highest "\,relative gain" in comparison with the
561 classical GMRES time.
564 The test conditions are summarized in the table below : \\
568 \begin{tabular}{r c }
570 Grid & 2x50 totaling 100 processors\\ %\hline
571 Processors & 1 GFlops to 1.5 GFlops\\
572 Intra-Network & bw=1.25 Gbits - lat=5E-05 \\ %\hline
573 Inter-Network & bw=5 Mbits - lat=2E-02\\
574 Input matrix size & N$_{x}$ = From 62 to 150\\ %\hline
575 Residual error precision: 10$^{-5}$ to 10$^{-9}$\\ \hline \\
579 Again, comprehensive and extensive tests have been conducted varying the
580 CPU power and the network parameters (bandwidth and latency) in the
581 simulator tool with different problem size. The relative gains greater
582 than 1 between the two algorithms have been captured after each step of
583 the test. Table I below has recorded the best grid configurations
584 allowing a multiplitting method time more than 2.5 times lower than
585 classical GMRES execution and convergence time. The finding thru this
586 experimentation is the tolerance of the multisplitting method under a
587 low speed network that we encounter usually with distant clusters thru the
590 % use the same column width for the following three tables
591 \newlength{\mytablew}\settowidth{\mytablew}{\footnotesize\np{E-11}}
592 \newenvironment{mytable}[1]{% #1: number of columns for data
593 \renewcommand{\arraystretch}{1.3}%
594 \begin{tabular}{|>{\bfseries}r%
595 |*{#1}{>{\centering\arraybackslash}p{\mytablew}|}}}{%
600 \caption{Relative gain of the multisplitting algorithm compared with
607 & 5 & 5 & 5 & 5 & 5 \\
610 & 20 & 20 & 20 & 20 & 20 \\
613 & 1 & 1 & 1 & 1.5 & 1.5 \\
616 & 62 & 62 & 62 & 100 & 100 \\
619 & \np{E-5} & \np{E-8} & \np{E-9} & \np{E-11} & \np{E-11} \\
622 & 2.52 & 2.55 & 2.52 & 2.57 & 2.54 \\
631 & 50 & 50 & 50 & 50 & 50 \\
634 & 20 & 20 & 20 & 20 & 20 \\
637 & 1.5 & 1.5 & 1 & 1.5 & 1.5 \\
640 & 110 & 120 & 130 & 140 & 150 \\
643 & \np{E-11} & \np{E-11} & \np{E-11} & \np{E-11} & \np{E-11}\\
646 & 2.53 & 2.51 & 2.58 & 2.55 & 2.54 \\
655 \section*{Acknowledgment}
658 The authors would like to thank\dots{}
661 \bibliographystyle{wileyj}
662 \bibliography{biblio}
670 %%% ispell-local-dictionary: "american"