\usepackage[utf8]{inputenc}
\usepackage{amsfonts,amssymb}
\usepackage{amsmath}
-\usepackage{algorithm}
+%\usepackage{algorithm}
\usepackage{algpseudocode}
%\usepackage{amsthm}
\usepackage{graphicx}
\section{SimGrid}
-\AG{Décrire SimGrid~\cite{casanova+legrand+quinson.2008.simgrid} (Arnaud)}
+\AG{Décrire SimGrid~\cite{casanova+legrand+quinson.2008.simgrid,SimGrid} (Arnaud)}
+%%% brief history?
+%%% programming interfaces: MSG, SimDAG, SMPI
+%%% platforms
+%%% validation?
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Simulation of the multisplitting method}
\end{equation}
is solved independently by a cluster and communications are required to update the right-hand side sub-vector $Y_l$, such that the sub-vectors $X_m$ represent the data dependencies between the clusters. As each sub-system (\ref{eq:4.1}) is solved in parallel by a cluster of processors, our multisplitting method uses an iterative method as an inner solver which is easier to parallelize and more scalable than a direct method. In this work, we use the parallel algorithm of GMRES method~\cite{ref1} which is one of the most used iterative method by many researchers.
-\begin{algorithm}
-\caption{A multisplitting solver with GMRES method}
+\begin{figure}
+ %%% IEEE instructions forbid to use an algorithm environment here, use figure
+ %%% instead
\begin{algorithmic}[1]
\Input $A_l$ (sparse sub-matrix), $B_l$ (right-hand side sub-vector)
\Output $X_l$ (solution sub-vector)\vspace{0.2cm}
\State \Return $X_l^k$
\EndFunction
\end{algorithmic}
+\caption{A multisplitting solver with GMRES method}
\label{algo:01}
-\end{algorithm}
+\end{figure}
-Algorithm~\ref{algo:01} shows the main key points of the multisplitting method to solve a large sparse linear system. This algorithm is based on an outer-inner iteration method where the parallel synchronous GMRES method is used to solve the inner iteration. It is executed in parallel by each cluster of processors. For all $l,m\in\{1,\ldots,L\}$, the matrices and vectors with the subscript $l$ represent the local data for cluster $l$, while $\{A_{lm}\}_{m\neq l}$ are off-diagonal matrices of sparse matrix $A$ and $\{X_m\}_{m\neq l}$ contain vector elements of solution $x$ shared with neighboring clusters. At every outer iteration $k$, asynchronous communications are performed between processors of the local cluster and those of distant clusters (lines $6$ and $7$ in Algorithm~\ref{algo:01}). The shared vector elements of the solution $x$ are exchanged by message passing using MPI non-blocking communication routines.
+Algorithm on Figure~\ref{algo:01} shows the main key points of the
+multisplitting method to solve a large sparse linear system. This algorithm is
+based on an outer-inner iteration method where the parallel synchronous GMRES
+method is used to solve the inner iteration. It is executed in parallel by each
+cluster of processors. For all $l,m\in\{1,\ldots,L\}$, the matrices and vectors
+with the subscript $l$ represent the local data for cluster $l$, while
+$\{A_{lm}\}_{m\neq l}$ are off-diagonal matrices of sparse matrix $A$ and
+$\{X_m\}_{m\neq l}$ contain vector elements of solution $x$ shared with
+neighboring clusters. At every outer iteration $k$, asynchronous communications
+are performed between processors of the local cluster and those of distant
+clusters (lines $6$ and $7$ in Figure~\ref{algo:01}). The shared vector
+elements of the solution $x$ are exchanged by message passing using MPI
+non-blocking communication routines.
\begin{figure}
\centering
% adjust value as needed - may need to be readjusted if
% the document is modified later
\bibliographystyle{IEEEtran}
-\bibliography{hpccBib}
+\bibliography{IEEEabrv,hpccBib}
\end{document}