From: raphael couturier Date: Sun, 14 Dec 2014 09:43:10 +0000 (+0100) Subject: renomme un fichier figure X-Git-Url: https://bilbo.iut-bm.univ-fcomte.fr/and/gitweb/Krylov_multi.git/commitdiff_plain/fe4f163b23c01b1cb74425a37c7c571946a69dbe?ds=sidebyside;hp=93d523a29282f24946a4550b80d133269f3130f2 renomme un fichier figure --- diff --git a/biblio.bib b/biblio.bib index 6f6fd4c..492d5ce 100644 --- a/biblio.bib +++ b/biblio.bib @@ -126,3 +126,24 @@ year = {1986}, } + + +@article{zkcgb+14:ij, +inhal = {no}, +domainehal = {INFO:INFO_DC, INFO:INFO_CR, INFO:INFO_MO}, +equipe = {and}, +classement = {ACLI}, +impact-factor ={0.917}, +isi-acro = {J SUPERCOMPUT}, +author = {Ziane Khodja, L. and Couturier, R. and Giersch, A. and Bahi, J.}, +title = {Parallel sparse linear solver with {GMRES} method using minimization techniques of communications for {GPU} clusters}, +journal = {The journal of Supercomputing}, +pages = {200--224}, +volume = 69, +number = 1, +doi = {10.1007/s11227-014-1143-8}, +url = {http://dx.doi.org/10.1007/s11227-014-1143-8}, +publisher = {Springer}, +year = 2014, + +} \ No newline at end of file diff --git a/krylov_multi_reviewed.tex b/krylov_multi_reviewed.tex index 02d1aca..7934501 100644 --- a/krylov_multi_reviewed.tex +++ b/krylov_multi_reviewed.tex @@ -84,16 +84,16 @@ thousands of cores are used. Traditional parallel iterative solvers are based on fine-grain computations that frequently require data exchanges between computing nodes and have global -synchronizations that penalize the scalability. Particularly, they are more -penalized on large scale architectures or on distributed platforms composed of -distant clusters interconnected by a high-latency network. It is therefore -imperative to develop coarse-grain based algorithms to reduce the communications -in the parallel iterative solvers. Two possible solutions consists either in -using asynchronous iterative methods~\cite{ref18} or in using multisplitting -algorithms. In this paper, we will reconsider the use of a multisplitting -method. In opposition to traditional multisplitting method that suffer from slow -convergence, as proposed in~\cite{huang1993krylov}, the use of a minimization -process can drastically improve the convergence.\\ +synchronizations that penalize the scalability~\cite{zkcgb+14:ij}. Particularly, +they are more penalized on large scale architectures or on distributed platforms +composed of distant clusters interconnected by a high-latency network. It is +therefore imperative to develop coarse-grain based algorithms to reduce the +communications in the parallel iterative solvers. Two possible solutions +consists either in using asynchronous iterative methods~\cite{ref18} or in using +multisplitting algorithms. In this paper, we will reconsider the use of a +multisplitting method. In opposition to traditional multisplitting method that +suffer from slow convergence, as proposed in~\cite{huang1993krylov}, the use of +a minimization process can drastically improve the convergence.\\ %%% AJOUTE************************ @@ -380,7 +380,7 @@ respectively. The size of the Krylov subspace basis $S$ is fixed to 10 vectors. \begin{figure}[htbp] \centering \begin{tabular}{c} -\includegraphics[width=0.8\textwidth]{weak_scaling_280k} \\ \includegraphics[width=0.8\textwidth]{weak_scaling_280K}\\ +\includegraphics[width=0.8\textwidth]{weak_scaling_280k} \\ \includegraphics[width=0.8\textwidth]{weak_scaling_280K2}\\ \end{tabular} \caption{Weak scaling with 3 blocks of 4 cores each to solve a 3D Poisson problem with approximately 280K components per core} \label{fig:002} @@ -476,7 +476,7 @@ to 115. So it is not different from GMRES. \begin{figure}[htbp] \centering \includegraphics[width=0.7\textwidth]{nb_iter_sec} -\caption{Number of iterations per second with the same parameters as in Table~\ref{tab1} (weak scaling) with only 2 blocks of cores} +\caption{Number of iterations per second with the same parameters as in Table~\ref{tab1} (weak scaling) with only blocks of cores} \label{fig:01} \end{figure} diff --git a/weak_scaling_280K.pdf b/weak_scaling_280K2.pdf similarity index 100% rename from weak_scaling_280K.pdf rename to weak_scaling_280K2.pdf