From 2367662d760a73d98ca68d37a3912c211972afe3 Mon Sep 17 00:00:00 2001 From: lilia Date: Fri, 11 Apr 2014 00:06:45 +0200 Subject: [PATCH] 11-04-2014 --- hpcc.tex | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++--- hpccBib.bib | 10 ++++++++++ 2 files changed, 61 insertions(+), 3 deletions(-) create mode 100644 hpccBib.bib diff --git a/hpcc.tex b/hpcc.tex index b4d0be3..968b235 100644 --- a/hpcc.tex +++ b/hpcc.tex @@ -320,6 +320,8 @@ \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} +\usepackage{amsfonts,amssymb} +\usepackage{amsmath} %\usepackage{amsmath} %\usepackage{amsthm} %\usepackage{amsfonts} @@ -409,9 +411,55 @@ Décrire le modèle asynchrone. Je m'en charge (DL) Décrire SimGrid (Arnaud) -\section{Simulation of the multi-splitting method} -Décrire le problème (algo) traité ainsi que le processus d'adaptation à SimGrid. + + + + + +%%%%% +\section{Simulation of the multisplitting method} +%Décrire le problème (algo) traité ainsi que le processus d'adaptation à SimGrid. +Let $Ax=b$ be a large sparse system of $n$ linear equations in $\mathbb{R}$, where $A$ is a sparse square and nonsingular matrix, $x$ is the solution vector and $y$ is the right-hand side vector. We use a multisplitting method based on the block Jacobi partitioning to solve this linear system on a large scale platform composed of $L$ clusters of processors. In this case, we apply a row-by-row splitting without overlapping +\[ +\left(\begin{array}{ccc} +A_{11} & \cdots & A_{1L} \\ +\vdots & \ddots & \vdots\\ +A_{L1} & \cdots & A_{LL} +\end{array} \right) +\times +\left(\begin{array}{c} +X_1 \\ +\vdots\\ +X_L +\end{array} \right) += +\left(\begin{array}{c} +Y_1 \\ +\vdots\\ +Y_L +\end{array} \right)\] +in such a way that successive rows of matrix $A$ and both vectors $x$ and $b$ are assigned to one cluster, where for all $l,i\in\{1,\ldots,L\}$ $A_{li}$ is a rectangular block of $A$ of size $n_l\times n_i$, $X_l$ and $Y_l$ are sub-vectors of $x$ and $y$, respectively, each of size $n_l$ and $\sum_{l} n_l=\sum_{i} n_i=n$. + +The multisplitting method proceeds by iteration to solve in parallel the linear system by $L$ clusters of processors, in such a way each sub-system +\begin{equation} +\left\{ +\begin{array}{l} +A_{ll}X_l = Y_l \mbox{,~such that}\\ +Y_l = B_l - \displaystyle\sum_{i=1,i\neq l}^{L}A_{li}X_i, +\end{array} +\right. +\label{eq:4.1} +\end{equation} +is solved independently by a cluster and communication are required to update the right-hand side sub-vectors $Y_l$, such that the sub-vectors $X_i$ represent the data dependencies between the clusters. As each sub-system (\ref{eq:4.1}) is solved in parallel by a cluster of processors, our multisplitting method uses an iterative method as an inner solver which is easier to parallelize and more scalable than a direct method. In this work, we use the parallel GMRES method~\cite{ref1} which is one of the most used iterative method by many researchers. +%%%%% + + + + + + + \section{Experimental results} @@ -650,7 +698,7 @@ The authors would like to thank... % http://www.michaelshell.org/tex/ieeetran/bibtex/ \bibliographystyle{IEEEtran} % argument is your BibTeX string definitions and bibliography database(s) -\bibliography{bib/hpccBib} +\bibliography{hpccBib} % % manually copy in the resultant .bbl file % set second argument of \begin to the number of references diff --git a/hpccBib.bib b/hpccBib.bib new file mode 100644 index 0000000..310aef0 --- /dev/null +++ b/hpccBib.bib @@ -0,0 +1,10 @@ +@article{ref1, +title = {{GMRES}: {A} {G}eneralized {M}inimal {R}esidual {A}lgorithm for {S}olving {N}onsymmetric {L}inear {S}ystems}, +author = {Saad, Y. and Schultz, M. H.}, +journal = {SIAM Journal on Scientific and Statistical Computing}, +volume = {7}, +number = {3}, +pages = {856--869}, +year = {1986} +} + -- 2.39.5