From 6984fef9a0c912c9bc10b004ed7c8b50d6ff188e Mon Sep 17 00:00:00 2001 From: Arnaud Giersch Date: Tue, 22 Apr 2014 13:57:04 +0200 Subject: [PATCH] Use labels to reference line numbers of algorithm. --- hpcc.tex | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hpcc.tex b/hpcc.tex index 92d275c..3dd67ef 100644 --- a/hpcc.tex +++ b/hpcc.tex @@ -277,8 +277,8 @@ is solved independently by a cluster and communications are required to update t \For {$k=0,1,2,\ldots$ until the global convergence} \State Restart outer iteration with $x^0=x^k$ \State Inner iteration: \Call{InnerSolver}{$x^0$, $k+1$} -\State Send shared elements of $X_l^{k+1}$ to neighboring clusters -\State Receive shared elements in $\{X_m^{k+1}\}_{m\neq l}$ +\State\label{algo:01:send} Send shared elements of $X_l^{k+1}$ to neighboring clusters +\State\label{algo:01:recv} Receive shared elements in $\{X_m^{k+1}\}_{m\neq l}$ \EndFor \Statex @@ -303,9 +303,9 @@ $\{A_{lm}\}_{m\neq l}$ are off-diagonal matrices of sparse matrix $A$ and $\{X_m\}_{m\neq l}$ contain vector elements of solution $x$ shared with neighboring clusters. At every outer iteration $k$, asynchronous communications are performed between processors of the local cluster and those of distant -clusters (lines $6$ and $7$ in Figure~\ref{algo:01}). The shared vector -elements of the solution $x$ are exchanged by message passing using MPI -non-blocking communication routines. +clusters (lines~\ref{algo:01:send} and~\ref{algo:01:recv} in +Figure~\ref{algo:01}). The shared vector elements of the solution $x$ are +exchanged by message passing using MPI non-blocking communication routines. \begin{figure}[!t] \centering -- 2.39.5