From: couturie Date: Tue, 1 Sep 2015 16:06:39 +0000 (+0200) Subject: 1ere version du journal X-Git-Url: https://bilbo.iut-bm.univ-fcomte.fr/and/gitweb/GMRES2stage.git/commitdiff_plain/df562f71fc6dcfdbb4e0b77f138977ca7219df6f 1ere version du journal --- diff --git a/IJHPCN/biblio.bib b/IJHPCN/biblio.bib new file mode 100644 index 0000000..c3ab7d4 --- /dev/null +++ b/IJHPCN/biblio.bib @@ -0,0 +1,223 @@ +@article{Saad86, + author = {Saad, Y. and Schultz, M.H.}, + title = {{GMRES} : a Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems}, + journal = {SIAM J. Sci. Stat. Comput.}, + year = {1986}, + volume = {7}, + number = {3}, + pages = {856--869}, + publisher = {Society for Industrial and Applied Mathematics}, + address = {Philadelphia, PA, USA}, + } + + +@book{Saad2003, + author = {Saad, Y.}, + title = {Iterative Methods for Sparse Linear Systems}, + year = {2003}, + isbn = {0898715342}, + edition = {2nd}, + publisher = {Society for Industrial and Applied Mathematics}, + address = {Philadelphia, PA, USA}, +} + + +@article{Hestenes52, + author = {Hestenes, M. R. and Stiefel, E.}, + journal = {Journal of research of the National Bureau of Standards}, + pages = {409--436}, + timestamp = {2008-10-07T16:03:39.000+0200}, + title = {Methods of conjugate gradients for solving linear systems}, + volume = 49, + year = 1952 +} + +@article{Paige82, + author = {Paige, Christopher C. and Saunders, Michael A.}, + title = {{LSQR:} An Algorithm for Sparse Linear Equations and Sparse Least Squares}, + journal = {ACM Trans. Math. Softw.}, + issue_date = {March 1982}, + volume = {8}, + number = {1}, + month = mar, + year = {1982}, + issn = {0098-3500}, + pages = {43--71}, + numpages = {29}, + url = {http://doi.acm.org/10.1145/355984.355989}, + doi = {10.1145/355984.355989}, + acmid = {355989}, + publisher = {ACM}, + address = {New York, NY, USA}, +} + +@Misc{petsc-web-page, + author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Jed Brown and Peter Brune + and Kris Buschelman and Victor Eijkhout and William~D. Gropp + and Dinesh Kaushik and Matthew~G. Knepley + and Lois Curfman McInnes and Karl Rupp and Barry~F. Smith + and Hong Zhang}, + title = {{PETS}c {W}eb page}, + url = {http://www.mcs.anl.gov/petsc}, + howpublished = {\url{http://www.mcs.anl.gov/petsc}}, + year = {2014} + } + + +@misc{Dav97, + author = {Davis, T. and Hu, Y.}, + title = {The {U}niversity of {F}lorida Sparse Matrix Collection}, + year = {1997}, + note = {Digest, \url{http://www.cise.ufl.edu/research/sparse/matrices/}}, + } + + +@article{Saad:1993, + author = {Saad, Y.}, + title = {A Flexible Inner-outer Preconditioned GMRES Algorithm}, + journal = {SIAM J. Sci. Comput.}, + issue_date = {March 1993}, + volume = {14}, + number = {2}, + month = mar, + year = {1993}, + issn = {1064-8275}, + pages = {461--469}, + numpages = {9}, + url = {http://dx.doi.org/10.1137/0914028}, + doi = {10.1137/0914028}, + acmid = {160089}, + publisher = {Society for Industrial and Applied Mathematics}, + address = {Philadelphia, PA, USA}, + keywords = {GMRES, Krylov subspace methods, non-Hermitian systems, preconditioned conjugate gradient, variable preconditioners}, +} + +@incollection{bahicontascoutu, +title = {{P}arallel iterative algorithms: from sequential to grid computing}, +author = {Bahi, J.M. and Contassot-Vivier, S. and Couturier, R.}, +booktitle = {Numerical Analysis and Scientific Computing}, +publisher = {Chapman \& Hall/CRC}, +year = {2008}, +} + +@Article{Nichols:1973:CTS, + author = "Nancy K. Nichols", + title = "On the Convergence of Two-Stage Iterative Processes + for Solving Linear Equations", + journal = "SIAM Journal on Numerical Analysis", + volume = "10", + number = "3", + pages = "460--469", + month = jun, + year = "1973", + CODEN = "SJNAAM", + ISSN = "0036-1429 (print), 1095-7170 (electronic)", + issn-l = "0036-1429", + bibdate = "Fri Oct 16 06:57:22 MDT 1998", + bibsource = "http://www.math.utah.edu/pub/tex/bib/siamjnumeranal.bib; + JSTOR database", + acknowledgement = "Nelson H. F. Beebe, University of Utah, Department + of Mathematics, 110 LCB, 155 S 1400 E RM 233, Salt Lake + City, UT 84112-0090, USA, Tel: +1 801 581 5254, FAX: +1 + 801 581 4148, e-mail: \path|beebe@math.utah.edu|, + \path|beebe@acm.org|, \path|beebe@computer.org| + (Internet), URL: + \path|http://www.math.utah.edu/~beebe/|", + fjournal = "SIAM Journal on Numerical Analysis", + journal-url = "http://epubs.siam.org/sinum", +} + +@article{Meijerink77, + author = {Meijerink, J.A. and Vorst, H.A. Van der}, + title = {An Iterative Solution Method for Linear Systems of Which the Coefficient Matrix is a Symmetric {M}-Matrix}, + journal = {Mathematics of Computation}, + year = {1977}, + volume = {31}, + number = {137}, + pages = {148--162}, + publisher = {American Mathematical Society}, + } + +@techreport{Huang89, + author = {Huang, Y. and Van der Vorst, H.A.}, + title = {Some Observations on the Convergence Behavior of {GMRES}}, + institution = {Delft Univ. Technology}, + type = { Report 89--09}, + year = {1989}, +} + +@article {Vorst94, +author = {Van der Vorst, H. A. and Vuik, C.}, +title = {{GMRESR}: a family of nested {GMRES} methods}, +journal = {Numerical Linear Algebra with Applications}, +volume = {1}, +number = {4}, +publisher = {John Wiley & Sons, Ltd}, +issn = {1099-1506}, +url = {http://dx.doi.org/10.1002/nla.1680010404}, +doi = {10.1002/nla.1680010404}, +pages = {369--386}, +year = {1994}, +} + +@inproceedings{Yamazaki2014, + author = {Yamazaki, Ichitaro and Anzt, Hartwig and Tomov, Stanimire and Hoemmen, Mark and Dongarra, Jack}, + title = {Improving the Performance of CA-GMRES on Multicores with Multiple GPUs}, + booktitle = {Proceedings of the 2014 IEEE 28th International Parallel and Distributed Processing Symposium}, + series = {IPDPS '14}, + year = {2014}, + isbn = {978-1-4799-3800-1}, + pages = {382--391}, + numpages = {10}, + url = {http://dx.doi.org/10.1109/IPDPS.2014.48}, + doi = {10.1109/IPDPS.2014.48}, + acmid = {2650529}, + publisher = {IEEE Computer Society}, + address = {Washington, DC, USA}, +} + +@techreport{Hoemmen2010, + author = {Hoemmen, Mark}, + title = {Communication-avoiding Krylov subspace methods}, + institution = {University of California, Berkeley}, + type = {Ph.D. thesis}, + url = {http://www.cs.berkeley.edu/~mhoemmen/pubs/thesis.pdf}, + year = {2010}, +} + +@inproceedings{Mohiyuddin2009, + author = {Mohiyuddin, Marghoob and Hoemmen, Mark and Demmel, James and Yelick, Katherine}, + title = {Minimizing Communication in Sparse Matrix Solvers}, + booktitle = {Proceedings of the Conference on High Performance Computing Networking, Storage and Analysis}, + series = {SC '09}, + year = {2009}, + isbn = {978-1-60558-744-8}, + location = {Portland, Oregon}, + pages = {36:1--36:12}, + articleno = {36}, + numpages = {12}, + url = {http://doi.acm.org/10.1145/1654059.1654096}, + doi = {10.1145/1654059.1654096}, + acmid = {1654096}, + publisher = {ACM}, + address = {New York, NY, USA}, +} + + + + +@article{cz15:ij, +inhal = {no}, +domainehal = {INFO:INFO_DC, INFO:INFO_CR, INFO:INFO_MO}, +equipe = {and}, +classement = {ACLI}, +impact-factor ={0.841}, +isi-acro = {J SUPERCOMPUT}, +author = {Couturier, Rapha\"el and Ziane Khodja, Lilia}, +title = {A scalable multisplitting algorithm to solve large sparse linear systems}, +journal = {The journal of Supercomputing}, +note={{O}nline version, 10.1007/s11227-014-1367-7}, +publisher = {Springer}, +year = 2015, + +} \ No newline at end of file diff --git a/IJHPCN/doublecol-new.cls b/IJHPCN/doublecol-new.cls new file mode 100644 index 0000000..013e82e --- /dev/null +++ b/IJHPCN/doublecol-new.cls @@ -0,0 +1,1579 @@ +\NeedsTeXFormat{LaTeX2e}[1995/12/01] \ProvidesClass{singlecol} + [1998/05/05 v1.3y + Standard LaTeX document class] + +%\def\clsdir{C:/Inderscience/IJBRA-sample} +\RequirePackage{epsf,endnotes,ifthen,dcolumn,graphics,leqno,calc,lastpage,color} + +\usepackage{amsmath,amsfonts,amssymb,bm,latexsym} + +%\usepackage[LY1]{fontenc} +% +%\edef\@tempa{\rmdefault} +%\def\@tempb {cmr} +%\ifx\@tempa\@tempb +% \renewcommand*\sfdefault{phv} +% \renewcommand*\rmdefault{ptm} +% \renewcommand*\ttdefault{fftt} +% \renewcommand*\bfdefault{b} +%\fi + +\DeclareOldFontCommand{\RMbi}{\rmfamily\bfseries\itshape}{\mathbi} + \let\bi=\RMbi + +\newcommand\@ptsize{} +\newif\if@restonecol +\newif\if@titlepage +\@titlepagefalse +\if@compatibility\else +\DeclareOption{a4paper} + {\setlength\paperheight {297mm}% + \setlength\paperwidth {210mm}} +\DeclareOption{a5paper} + {\setlength\paperheight {210mm}% + \setlength\paperwidth {148mm}} +\DeclareOption{b5paper} + {\setlength\paperheight {250mm}% + \setlength\paperwidth {176mm}} +\DeclareOption{letterpaper} + {\setlength\paperheight {11in}% + \setlength\paperwidth {8.5in}} +\DeclareOption{legalpaper} + {\setlength\paperheight {14in}% + \setlength\paperwidth {8.5in}} +\DeclareOption{executivepaper} + {\setlength\paperheight {9.5in}% + \setlength\paperwidth {6.5in}} +\DeclareOption{landscape} + {\setlength\@tempdima {\paperheight}% + \setlength\paperheight {\paperwidth}% + \setlength\paperwidth {\@tempdima}} +\fi +\if@compatibility + \renewcommand\@ptsize{0} +\else +\DeclareOption{10pt}{\renewcommand\@ptsize{0}} +\fi +\DeclareOption{11pt}{\renewcommand\@ptsize{1}} +\DeclareOption{12pt}{\renewcommand\@ptsize{2}} +\if@compatibility\else +\DeclareOption{oneside}{\@twosidefalse \@mparswitchfalse} +\fi +\DeclareOption{twoside}{\@twosidetrue \@mparswitchtrue} +\DeclareOption{draft}{\setlength\overfullrule{5pt}% + \AtBeginDocument{\PagePositionCropYes + {7pc}{5.3pc}{5.9pc}{7pt} + {7in}{10in}{-1in}{-1in}}} +\DeclareOption{boxdraft}{\setlength\overfullrule{5pt}% + \AtBeginDocument{\PagePositionBoxYes + {7pc}{5.3pc}{5.9pc}{7pt} + {7in}{10in}{-1in}{-1in}}} +\DeclareOption{final}{\setlength\overfullrule{0pt}% + \AtBeginDocument{\PagePositionCropYes + {4pc}{4pc}{3.77pc}{7pt} + {8.26in}{11.68in}{-1in}{-1in}}} +\DeclareOption{finalcro}{\setlength\overfullrule{0pt}% + \AtBeginDocument{\PagePositionCropYes + {7pc}{5.3pc}{5.9pc}{7pt} + {7in}{10in}{-1in}{-1in}}} +\DeclareOption{titlepage}{\@titlepagetrue} +\if@compatibility\else +\DeclareOption{notitlepage}{\@titlepagefalse} +\fi +\if@compatibility\else +\DeclareOption{onecolumn}{\@twocolumnfalse} +\fi +\DeclareOption{twocolumn}{\@twocolumntrue} +\DeclareOption{leqno}{\input{leqno.clo}} +\DeclareOption{fleqn}{\input{fleqn.clo}} +\DeclareOption{openbib}{% + \AtEndOfPackage{% + \renewcommand\@openbib@code{% + \advance\leftmargin\bibindent + \itemindent -\bibindent + \listparindent \itemindent + \parsep \z@ + }% + \renewcommand\newblock{\par}}% +} +\ExecuteOptions{letterpaper,10pt,twoside,onecolumn,final} +\ProcessOptions +\input{size1\@ptsize.clo} +\setlength\lineskip{1\p@} +\setlength\normallineskip{1\p@} +\renewcommand\baselinestretch{} +\setlength\parskip{0\p@ \@plus \p@} +\@lowpenalty 51 +\@medpenalty 151 +\@highpenalty 301 +\setcounter{topnumber}{2} +\renewcommand\topfraction{.7} +\setcounter{bottomnumber}{1} +\renewcommand\bottomfraction{.3} +\setcounter{totalnumber}{3} +\renewcommand\textfraction{.2} +\renewcommand\floatpagefraction{.5} +\setcounter{dbltopnumber}{2} +\renewcommand\dbltopfraction{.7} +\renewcommand\dblfloatpagefraction{.5} + +%\DeclareFontFamily{LY1}{fftt}{}% +%\DeclareFontShape{LY1}{fftt}{m}{n}{<->jmcrxr} +% {\hyphenchar\font=-1}% +%\DeclareFontShape{LY1}{fftt}{m}{it}{<->jmcrxi} +% {\hyphenchar\font=-1}% +%\DeclareFontShape{LY1}{fftt}{b}{n}{<->jmcrxb} +% {\hyphenchar\font=-1}% +%\DeclareFontShape{LY1}{fftt}{b}{it}{<->jmcrxw} +% {\hyphenchar\font=-1}% + +\def\SEVEN{\fontsize{7}{9}\selectfont} +\def\EGT{\fontsize{8}{10}\selectfont} +\def\NINE{\fontsize{9}{10}\selectfont} +\def\TEN{\fontsize{10}{12}\selectfont} +\def\ELEVEN{\fontsize{11}{13}\selectfont} +\def\TWELVE{\fontsize{12}{14}\selectfont} +\def\THIRTEEN{\fontsize{13}{15}\selectfont} +\def\FOURTEEN{\fontsize{14}{16}\selectfont} +\def\SIXTEEN{\fontsize{16}{22}\selectfont} + +\newdimen\markwidth +\newdimen\blth +\newdimen\marklength +\newdimen\markdistance +\newdimen\trimwidth +\newdimen\trimheight +\newdimen\auxaaa +\newdimen\auxbbb +\newdimen\auxccc +\newdimen\auxddd + +\long\def\koo#1#2#3{\vbox to0pt{\hsize0pt\kern #2\hbox to0pt{\kern +#1{#3}\hss}\vss}} +\long\def\zeroCC#1{\vbox to0pt{\vss\hbox to0pt{\hss #1\hss}\vss}} +\long\def\zeroLC#1{\vbox to0pt{\vss\hbox to0pt{\hss #1\hss}}} +\long\def\zeroUC#1{\vbox to0pt{\hbox to0pt{\hss #1\hss}\vss}} +\long\def\zeroLR#1{\vbox to0pt{\vss\hbox to0pt{\hss #1}}} +\long\def\zeroLL#1{\vbox to0pt{\vss\hbox to0pt{#1\hss}}} +\long\def\zeroUR#1{\vbox to0pt{\hbox to0pt{\hss #1}\vss}} +\long\def\zeroUL#1{\vbox to0pt{\hbox to0pt{#1\hss}\vss}} +\long\def\zeroCL#1{\vbox to0pt{\vss\hbox to0pt{#1\hss}\vss}} +\long\def\zeroCR#1{\vbox to0pt{\vss\hbox to0pt{\hss #1}\vss}} + +\newcolumntype{d}[1]{D{.}{.}{#1}} + +%%%%%%%%%%%%%%%%%%% Numbers supplied down here! +\markwidth=.5pt %0.4 truept % Thickness of the mark line +\blth=0.2pt +\marklength=20 truept % Length of the mark line +\markdistance=3 truept % Distance of the real corner to the beg. of the mark +%%% \trimwidth=\textwidth+\oddsidemargin+\evensidemargin + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\auxaaa\marklength\advance\auxaaa\markdistance +\def\oddevenswitch{\ifodd\c@page \auxbbb=\oddsidemargin + \else\auxbbb=\evensidemargin\fi}% + +%%% First reset all page position parameters depending on the job: +% \PagePositionCropYes - should be just for testing +% \PagePositionCropNo - should be the default behavior +% \PagePositionBoxYes - gives the page box for testing + +\def\PagePositionCropYes#1#2#3#4#5#6#7#8{% + \oddsidemargin=#1% % [3] inner margin / gutter + \evensidemargin=#2% % [4] outside margin + \topmargin=#3% % [5] \rhsink = \topmargin + \headheight (=12pt here) + \headheight=#4% % [6] VERY IMPORTANT! (see above) + \trimwidth=#5% % [7] + \trimheight=#6% % [8] + \hoffset=#7% % = -1in to move the area in the upper left corner + \voffset=#8% % = -1in to move the area in the upper left corner +%%% + \def\CROPMARKS{\oddevenswitch + \hbox to0pt{\kern-\auxbbb + \vbox to0bp{\kern-\topmargin\kern-\headheight + \hbox to\trimwidth{% + \koo{-\auxaaa}{-\markwidth}{\VRHDW{\markwidth}{0pt}{\marklength}}% + \koo{-\markwidth}{-\auxaaa}{\VRHDW{0pt}{\marklength}{\markwidth}}% + \hfill + \koo{\markdistance}{-\markwidth}{\VRHDW{\markwidth}{0pt}{\marklength}}% + \koo{0pt}{-\auxaaa}{\VRHDW{0pt}{\marklength}{\markwidth}}}% + \nointerlineskip\vskip\trimheight + \nointerlineskip + \hbox to\trimwidth{% + \koo{-\auxaaa}{0pt}{\VRHDW{0pt}{\markwidth}{\marklength}}% + \koo{-\markwidth}{\markdistance}{\VRHDW{0pt}{\marklength}{\markwidth}}% + \hfill + \koo{\markdistance}{0pt}{\VRHDW{0pt}{\markwidth}{\marklength}}% + \koo{0pt}{\markdistance}{\VRHDW{0pt}{\marklength}{\markwidth}}}% + \vss}\hss}}% +}% + +\def\PagePositionCropNo#1#2#3#4#5#6#7#8{% + \oddsidemargin=#1% % [3] inner margin / gutter + \evensidemargin=#2% % [4] outside margin + \topmargin=#3% % [5] \rhsink = \topmargin + \headheight (=12pt here) + \headheight=#4% % [6] VERY IMPORTANT! (see above) + \trimwidth=#5% % [7] + \trimheight=#6% % [8] + \hoffset=#7% % = -1in to move the area in the upper left corner + \voffset=#8% % = -1in to move the area in the upper left corner +%%% +\def\CROPMARKS{}% +}% + +\def\PagePositionBoxYes#1#2#3#4#5#6#7#8{% + \oddsidemargin=#1% % [3] inner margin / gutter + \evensidemargin=#2% % [4] outside margin + \topmargin=#3% % [5] \rhsink = \topmargin + \headheight (=12pt here) + \headheight=#4% % [6] VERY IMPORTANT! (see above) + \trimwidth=#5% % [7] + \trimheight=#6% % [8] + \hoffset=#7% % = -1in to move the area in the upper left corner + \voffset=#8% % = -1in to move the area in the upper left corner +%%% + \def\CROPMARKS{\oddevenswitch + \hbox to0pt{\kern-\auxbbb + \vbox to0bp{\kern-\topmargin\kern-\headheight + \hbox to\trimwidth{% + \auxccc=\trimwidth \advance\auxccc by 2\blth + \auxddd=\trimheight \advance\auxddd by 2\blth + \koo{-\blth}{0pt}{\zeroLL{\VRHDW{\blth}{0pt}{\auxccc}}}% + \koo{-\blth}{-\blth}{\zeroUL{\VRHDW{\auxddd}{0pt}{\blth}}}% + \koo{\trimwidth}{-\blth}{\zeroUL{\VRHDW{\auxddd}{0pt}{\blth}}}% + \koo{-\blth}{\trimheight}{\zeroUL{\VRHDW{\blth}{0pt}{\auxccc}}}\hss}% + \vss}\hss}}% +}% + +\def\LRH#1{\gdef\setLRH{#1}}\def\setLRH{}% +\def\RRH#1{\gdef\setRRH{#1}}\def\setRRH{}% + +\LRH{author} +\RRH{short title} + +\def\ps@headings{\let\@mkboth\markboth + \def\@oddfoot{\setoddFOOT}% + \def\@evenfoot{\setevenFOOT}% + \def\@evenhead{\setevenRH}% + \def\@oddhead{\setoddRH}% + \def\sectionmark##1{}% + \def\subsectionmark##1{}}% + +%%%%%%%%%%%%%%%%%%% Standard settings: + +\def\setoddRH{\VRHDW{\headheight}{0pt}{0pt}\CROPMARKS% + \raisebox{0pt}[\headheight][0pt]{% +% \marginnumbering + \begin{tabular}{@{}c@{}}\\[-15pt]% +% \TEN\hskip -1pt\today\\[27pt]% + \hbox to \textwidth{\hspace*{36pt}{\TEN\it\spaceskip0.41em\setRRH}\hfill{\TEN\thepage}} + \end{tabular} + }}% + +\def\setevenRH{\VRHDW{\headheight}{0pt}{0pt}\CROPMARKS% + \raisebox{0pt}[\headheight][0pt]{% +% \marginnumbering + \begin{tabular}{@{}c@{}}\\[-15pt]% +% \TEN\hskip -1pt\today \\[30pt]% + \hbox to \textwidth{\spaceskip0.41em{\TEN\thepage}\hspace*{30pt}\TEN\it% + \setLRH}% + \end{tabular} + }}% + +\def\setevenFOOT{\hfil} +\def\setoddFOOT{\hfil} + +\let\savesetevenRH=\setevenRH +\let\savesetoddRH=\setoddRH +\let\savesetoddFOOT=\setoddFOOT +\let\savesetevenFOOT=\setevenFOOT + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\ps@plain +\def\ps@plain{\let\@mkboth\@gobbletwo + \def\@oddhead{\it \CROPMARKS\TEN \theJOURNALNAME, Vol. \theVOL, No. \theISSUE, \thePUBYEAR\hfill{\rm \thepage}}% + \let\@evenhead=\@oddhead + \def\@oddfoot{\NINE Copyright \copyright\ 2009 Inderscience Enterprises Ltd.\hfill}% + \def\@evenfoot{\NINE Copyright \copyright\ 2009 Inderscience Enterprises Ltd.\hfill}} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\ps@empty +\def\ps@empty{\let\@mkboth\@gobbletwo + \def\@oddhead{\hfil}% + \let\@evenhead=\@oddhead + \def\@oddfoot{\hfil}% + \let\@evenfoot=\@oddfoot} + +\def\jname{ETC} + +\def\boxtext{\begin{tabular}{l} +\\[-190pt] +\hbox to \textwidth{\hfill{\parbox{16pc} + {\EGT + \begin{tabular}{c} + \jname\ \artname\quad Pe:\ \penumber/LE/CP/DISK (LaTeX)\\ + \today\quad \printtime\quad Ist Proof + \end{tabular} + } +}\enskip} +\end{tabular}} + +\def\DS{\displaystyle} + +\def\KLUWERLOGO{\epsffile[0 0 16 18]{aglogo3.eps}}% + +\def\setfirstoddFOOT{\hfil} +\def\setfirstevenFOOT{\hfil} + +\def\JOURNALNAME#1{\gdef\theJOURNALNAME{#1}}% +\def\VOL#1{\gdef\theVOL{#1}}% +\VOL{0}% +\def\PAGES#1{\gdef\thePAGES{#1}}% +\PAGES{\thepage--\pageref{LastPage}}% +\def\PUBYEAR#1{\gdef\thePUBYEAR{#1}}% +\PUBYEAR{2004}% +\def\ISSUE#1{\gdef\theISSUE{#1}}% +\ISSUE{00}% + +\JOURNALNAME{\TEN{\it Int. J. Signal and Imaging Systems Engineering}}% +\def\jtitlefont{\fontsize{16}{22}\selectfont\rm} + +\def\Dnum#1{\gdef\theDnum{#1}}% +\Dnum{Please enter the Dekker No}% + +\def\CLline{{\CROPMARKS\vskip -36pt \fontsize{9}{12}\selectfont +\TEN\hskip -1pt\\ +\NINE{\theJOURNALNAME\\[9pt] +%Vol. \theVOL, No. \theISSUE, pp. \thePAGES, \thePUBYEAR} +}\endgraf}\vspace*{38pt}}% + +\def\BottomCatch{% +\vskip -10pt +\thispagestyle{empty}% +\begin{table}[b]% +\NINE\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lcr@{}}% +\\[-12pt] +Copyright \copyright\ 2008 Inderscience Enterprises Ltd. & &% +\end{tabular*}% +\vskip -15pt% +\end{table}% +}% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%SSS +%%%%%% \Trivlist, \TRivlist, \Center, \Flushleft, \Flushright +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\def\Trivlist{\parskip0pt\topsep=0pt\parsep0pt\partopsep0pt\@nmbrlistfalse + \@trivlist \labelwidth\z@ \leftmargin\z@ + \itemindent\z@ \def\makelabel##1{##1}} + +\def\endTrivlist{\if@newlist\@noitemerr\fi + \if@inlabel\indent\fi + \ifhmode\unskip \par\fi + \if@noparlist \else + \ifdim\lastskip >\z@ \@tempskipa\lastskip \vskip -\lastskip + \advance\@tempskipa\parskip \advance\@tempskipa -\@outerparskip + \vskip\@tempskipa + \fi\@endparenv\fi} + +\def\TRivlist{\parskip0pt\topsep=12pt plus 5pt minus 5pt + \parsep0pt\partopsep0pt\@nmbrlistfalse + \@trivlist \labelwidth\z@ + \leftmargin 0pt \rightmargin 0pt + \itemindent\z@ \def\makelabel##1{##1}} + +\def\endTRivlist{\if@newlist\@noitemerr\fi + \if@inlabel\indent\fi + \ifhmode\unskip \par\fi + \if@noparlist \else + \ifdim\lastskip >\z@ \@tempskipa\lastskip \vskip -\lastskip + \advance\@tempskipa\parskip \advance\@tempskipa -\@outerparskip + \vskip\@tempskipa + \fi\@endparenv\fi} + +\def\Center{\Trivlist \centering\item[]} +\let\endCenter=\endTrivlist +\def\Flushleft{\Trivlist \raggedright\item[]} +\let\endFlushleft=\endTrivlist +\def\Flushright{\Trivlist \raggedleft\item[]} +\let\endFlushright=\endTrivlist + +\newcommand{\up}{\HD{9}{0}}% for tables +\newcommand{\down}{\HD{0}{6}}% + +%%%%%%%%%%koo.sty +\def\VRHDW#1#2#3{\vrule height #1 depth #2 width #3} +\def\HRHDW#1#2#3{\hrule height #1 depth #2 width #3} +\def\HD#1#2{\vrule height #1pt depth #2pt width 0pt\relax} +\def\halmos{\VRHDW{8pt}{0pt}{4pt}}% + +\newcommand\abstractname{\NINE Abstract:} +%\newcommand\keywordname{Keywords:\null} + +\newbox\t@abstract +\newbox\t@receivedname +\newbox\temptbox +\newdimen\tempdime + +\def\authorA#1{\gdef\@authorA{\bf #1}}%{\authfont #1}} +\def\authorB#1{\gdef\@authorB{\bf #1}}%{\authfont #1}} +\def\authorC#1{\gdef\@authorC{\bf #1}}%{\authfont #1}} +\def\authorD#1{\gdef\@authorD{\bf #1}}%{\authfont #1}} + +\def\affA#1{\gdef\@affA{\TEN #1}} +\def\affB#1{\gdef\@affB{\TEN #1}} +\def\affC#1{\gdef\@affC{\TEN #1}} +\def\affD#1{\gdef\@affD{\TEN #1}} + +\def\HISTORY#1{\gdef\@HISTORY{\NINE #1}} +\def\ADDITIONAL#1{\gdef\@ADDITIONAL{\NINE #1}} +\def\PRESENTED#1{\gdef\@PRESENTED{\NINE #1}} +\def\DEDICATED#1{\gdef\@DEDICATED{{\TWELVE\it #1}}} + +\def\REF#1{\gdef\@REF{\NINE #1}} +\def\BIO#1{\gdef\@BIO{\NINE #1}} +\def\JEL#1{\gdef\@JEL{\NINE #1}} +\def\PACS#1{\gdef\@PACS{\NINE #1}} +\def\KEYWORD#1{\gdef\@KEYWORD{\NINE #1}} + +%% +\def\sechead#1{\def\@sechead{#1}} +\def\docHeading#1{\def\@docHeading{#1}} +\def\subtitle#1{\def\@subtitle{\uppercase{#1}}} + +\def\address#1{\def\@address{#1}} +\def\Date#1{\def\@Date{Accepted~#1}} +\def\abstract{\@ifnextchar[{\@abstract}{\@abstract[]}} + +\def\@abstract[#1]{% + \global\setbox\t@abstract=\vbox\bgroup% + \parindent10pt\leftskip 12.75pc%\rightskip 3pc% + \noindent{\bf\TEN \abstractname}\ %\vskip 12.8pt + \noindent\NINE% + \ignorespaces}% +\def\endabstract{\egroup} + +\newcommand\bioname{\NINE Biographical notes:} +\newbox\t@bio +\def\bio{\@ifnextchar[{\@bio}{\@bio[]}} +\def\@bio[#1]{% + \global\setbox\t@bio=\vbox\bgroup% + \parindent10pt\leftskip 12.75pc%\rightskip 3pc% + \noindent{\bf\TEN \bioname}\ %\vskip 12.8pt + \noindent\NINE% + \ignorespaces}% +\def\endbio{\egroup} + +%\font\titfont=ARIALBD at 14pt +%\font\authfont=ARIAL at 14pt + +\newcommand\maketitle{\par + \begingroup + \gdef\thefootnote{\alph{footnote}} + \def\@makefnmark{\hbox + to 0pt{$^{\@thefnmark}$\hss}} + \long\def\@makefntext##1{\noindent + \ifnum\c@footnote=1\hbox{*\,}\else + \hbox{\@textsuperscript{\normalfont\@thefnmark}\,}\fi + \foot ##1}% +% \ifnum \col@number=\@ne +% \@maketitle +% \else +% \@maketitle + \twocolumn[\@maketitle]% +% \fi + \thispagestyle{plain} + \@thanks + \endgroup + \setcounter{footnote}{0}% + \global\let\maketitle\relax\global\let\@maketitle\relax +} + +\long\gdef\ARTICLEOPENING#1{\long\gdef\@maketitle{#1}}% +\ARTICLEOPENING{}% + +%FFF +\def\@maketitle{% +% \begin{Center}% + \let \footnote \thanks% + {% + \hyphenpenalty 10000% + \begin{Flushleft} + \vskip-1pc + \leftskip 12.75pc plus 20pc + \rule{30pc}{1.5pt}\\%[1pt] + \baselineskip16pt plus .2pt minus .2pt + \HD{0}{0}\FOURTEEN%\titfont + {\bf \@title}\\[-8pt] + \rule{30pc}{1.5pt}\HD{0}{6}% + \end{Flushleft} + \par}% +%%%authorA %GGG + \ifx\@authorA\undefined% + \else% + \begin{Flushleft}% + \baselineskip16pt plus .25pt minus .24pt + \leftskip 12.75pc plus 20pc %\rightskip 3pc plus 20pc + \HD{19}{0}\FOURTEEN\@authorA\HD{0}{0} + \end{Flushleft}% + \fi\par% + \ifx\@affA\undefined% + \else + \begin{Flushleft}% + \HD{15}{0}% + \leftskip 12.75pc plus 0pt %\rightskip 3pc plus 0pt + \@affA\HD{0}{12}% + \end{Flushleft}% + \fi% +%%% + \ifx\@authorB\undefined% + \else% + \begin{Flushleft}% + \leftskip 12.75pc plus 20pc %\rightskip 3pc plus 20pc + \HD{10}{0}\FOURTEEN\@authorB\HD{0}{0} + \end{Flushleft} + \fi% + \ifx\@affB\undefined% + \else \begin{Flushleft}\HD{15}{0}% + \leftskip 12.75pc plus 0pc %\rightskip 3pc plus 20pc + \@affB\HD{0}{12}% + \end{Flushleft}%% + \fi% +%%% +%%% + \ifx\@authorC\undefined% + \else% + \begin{Flushleft}% + \leftskip 12.75pc plus 20pc %\rightskip 3pc plus 20pc + \HD{10}{0}\FOURTEEN\@authorC\HD{0}{0} + \end{Flushleft} + \fi% + \ifx\@affC\undefined% + \else \begin{Flushleft}\HD{15}{0}% + \leftskip 12.75pc plus 0pc %\rightskip 3pc plus 20pc + \@affC\HD{0}{12}% + \end{Flushleft}%% + \fi% +%%% +%%% + \ifx\@authorD\undefined% + \else% + \begin{Flushleft}% + \leftskip 12.75pc plus 20pc %\rightskip 3pc plus 20pc + \HD{10}{0}\FOURTEEN\@authorD\HD{0}{0} + \end{Flushleft} + \fi% + \ifx\@affD\undefined% + \else \begin{Flushleft}\HD{15}{0}% + \leftskip 12.75pc plus 0pc %\rightskip 3pc plus 20pc + \@affD\HD{0}{12}% + \end{Flushleft}%% + \fi% +%%% + \ifx\@Date\undefined\else{\footnotesize% + \@Date% + \par}\vskip24pt\fi% + \HD{3}{0}% + \ifx\@ADDITIONAL\undefined% + \else% + \@ADDITIONAL \HD{0}{20}\par% + \fi% + \ifx\@PRESENTED\undefined% + \else% + \@PRESENTED\HD{0}{20}\par% + \fi% + \ifx\@HISTORY\undefined% + \else% + \@HISTORY \HD{9}{0}\par% + \fi% + \HD{0}{0}% +% \tableofcontents + \noindent\leftskip 12.75pc%\rightskip 3pc% + {\unvbox\t@abstract}% +% \end{Center}% + \ifx\@KEYWORD\undefined% + \else% + \noindent\leftskip 12.75pc%\rightskip 3pc% + \HD{19}{0}\NINE{\bf Keywords:}\ \@KEYWORD \HD{0}{0}\par% + \fi + \ifx\@REF\undefined% + \else% + \noindent\leftskip 12.75pc%\rightskip 3pc% + \HD{21}{0}\NINE{\bf Reference}\ \@REF \HD{0}{0}\par% + \fi% + \HD{12}{0}{\unvbox\t@bio}% +%%bio + \noindent\leftskip 0pc%\rightskip 2pc + \HD{18.5}{0}\rule{42.5pc}{1.5pt} + \vskip 20pt plus 3pt minus 2pt% + \relax% +}% + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%-b-THEOREM + +\RequirePackage{theorem} + +\gdef\th@TH{% + \def\theorem@headerfont{\normalfont\mdseries\bf}% + \theorempreskipamount=12pt plus 1pt minus 1pt% + \theorempostskipamount=12pt plus 1pt minus 1pt% + \def\@begintheorem##1##2{\normalfont\it% + \item[\noindent\hskip\labelsep\theorem@headerfont\bf ##1\ ##2:]\enskip}% + \def\@opargbegintheorem##1##2##3{\normalfont\it% + \item[\noindent\hskip\labelsep\theorem@headerfont{\bf ##3:}]\enskip}} + +%This is defined to get Rule Case, definition, Example etc. +\gdef\th@THrm{% + \def\theorem@headerfont{\normalfont\mdseries\bf}% + \theorempreskipamount=12pt plus 1pt minus 1pt% + \theorempostskipamount=12pt plus 1pt minus 1pt% + \labelsep 0.5em %%10pt + \def\@begintheorem##1##2{\normalfont\rm + \item[\noindent\hskip\labelsep\theorem@headerfont{\bf ##1\ ##2:}]\enskip}% + \def\@opargbegintheorem##1##2##3{\normalfont\rm + \item[\noindent\hskip\labelsep\theorem@headerfont {{\bf ##3:}}]\enskip}} + +\gdef\th@THhit{% + \def\theorem@headerfont{\normalfont\mdseries\scshape}% + \theorempreskipamount=12pt plus 1pt minus 1pt% + \theorempostskipamount=12pt plus 1pt minus 1pt% + \labelsep 0.5em %%10pt + \def\@begintheorem##1##2{\it + \item[\noindent\hskip\labelsep\theorem@headerfont {\bf ##1}\ {\bf ##2:}]\enskip}% + \def\@opargbegintheorem##1##2##3{\normalfont\it + \item[\noindent\hskip\labelsep\theorem@headerfont {\bf ##3:}]\enskip}} + +\gdef\th@THkeyall{% + \def\theorem@headerfont{\normalfont\mdseries\scshape}% + \theorempreskipamount=12pt plus 6pt minus 3pt% + \theorempostskipamount=12pt plus 6pt minus 3pt% + \labelsep 0.5em %%10pt + \def\@begintheorem##1##2{\normalfont\it + \item[\indent\hskip\labelsep \theorem@headerfont ##1.]}% + \def\@opargbegintheorem##1##2##3{\normalfont\it + \item[\indent]{\theorem@headerfont {##3}\hskip\labelsep}\ignorespaces}} + +\gdef\th@EX{% + \def\theorem@headerfont{\normalfont\mdseries\scshape}% + \theorempreskipamount=0pt\theorempostskipamount=0pt + \labelsep 0.5em %%10pt + \def\@begintheorem##1##2{\normalfont\upshape + \item[\indent\hskip\labelsep \theorem@headerfont ##1\ ##2.]}% + \def\@opargbegintheorem##1##2##3{\normalfont\upshape + \item[\indent\hskip\labelsep \theorem@headerfont ##1\ ##2\ {##3.}]}} + +\gdef\th@EXkey{% + \def\theorem@headerfont{\normalfont\mdseries\scshape}% + \theorempreskipamount=13pt\theorempostskipamount=13pt + \labelsep 0.5em %%10pt + \def\@begintheorem##1##2{\normalfont\upshape + \item[\indent\hskip\labelsep \theorem@headerfont ##1\ ##2.]}% + \def\@opargbegintheorem##1##2##3{\normalfont\upshape + \item[\indent\hskip\labelsep \theorem@headerfont {##3}]}} + +\gdef\th@EXkeyall{% + \def\theorem@headerfont{\normalfont\mdseries\scshape}% + \theorempreskipamount=0pt\theorempostskipamount=0pt + \labelsep 0.5em %%10pt + \def\@begintheorem##1##2{\normalfont\upshape + \item[\indent\hskip\labelsep \theorem@headerfont ##1\ ##2.]}% + \def\@opargbegintheorem##1##2##3{\normalfont\upshape + \item[\indent\hskip\labelsep]{\theorem@headerfont {##3}}}} + +\def\problem#1{\noindent{\bf Problem #1.\/}\enskip\rm} +\def\endproblem{\hfill$\square$\vskip 10pt} + +\def\theo#1{\vskip 10pt \noindent{\bi Theorem #1.\/}\enskip\it} +\def\endtheo{\hfill$\square$\vskip 12pt} +%% Use for proper proofs that end with extra space (regardless of the use +%% or non-use of \qed (=the black box) +\def\THTrivlist{\parskip0pt\topsep=1.2em plus 0.3em minus 0.2em + \parsep0pt\partopsep0pt\@nmbrlistfalse + \@trivlist \labelwidth\z@ \leftmargin\z@ + \itemindent\z@ \def\makelabel##1{##1}} + +\let\endTHTrivlist=\endlist + +\def\proof#1{\THTrivlist %%\addvspace{0pt} + \item[\hspace*{0pt}\hskip\labelsep{\it #1}:\enskip]\ignorespaces} +\def\endproof{\hfill $\square$\endTHTrivlist} + +\def\endSQproof{\hfill\endTHTrivlist} +% this is to remove square from end of Proof + +\def\thproof#1{\vskip-\lastskip\par{ #1.\/}\enskip\rm} +\def\endthproof{\hfill$\square$\vskip 12pt} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%-e-THEOREM + +%%% BIBLIOGRAPHY + +\def\@biblabel#1{[#1]} + \def\@cite#1{[#1]} + +%%%%%%%%%%%%%%%%%BIBLIOGRAPHY %%%%%%%%%%%%BBBBBB + +\def\thebibliography#1{% + \par{\section*{References and Notes}}% + \list{}{\settowidth\labelwidth{#1} + \setlength{\topsep}{0pt} + \labelsep 0pt\labelwidth 0pt + \leftmargin 8pt + \itemindent-8pt + \itemsep 5pt + \def\bibitem{\item \NINE + \baselineskip10pt plus 0.3pt minus 0.25pt\relax}} + \def\newblock{\hskip .11em plus .33em minus .07em} + \sloppy\clubpenalty4000\widowpenalty4000 + \sfcode`\.=1000\relax} + +\def\endthebibliography{\endlist} + +\def\notesname{Notes}% <------ JK +\def\enoteheading{\section*{\notesname}% + \leavevmode\par} + +\def\enoteformat{\rightskip\z@ \leftskip6pt \parindent=0em + \leavevmode\llap{\hbox{$^{\@theenmark}$}}} + +\def\enotesize{\footnotesize} + +\def\NUMBIB{\def\@biblabel##1{\bf ##1}% +\def\thebibliography##1{% + \par{\section*{References and Notes\HD{0}{0}}}% + \list + {\@biblabel{\arabic{enumiv}}}{\NINE + \baselineskip 10pt plus 0.3pt minus 0.25pt\relax + \settowidth\labelwidth{\@biblabel{##1}}% + \leftmargin\labelwidth + \advance\leftmargin\labelsep + \usecounter{enumiv}% + \let\p@enumiv\@empty + \labelsep 8pt\labelwidth 13.5pt + \itemsep 2pt + \parskip4pt\topsep4pt + \def\theenumiv{\arabic{enumiv}}}% + \def\newblock{\hskip .11em plus.33em minus.07em}% + \sloppy\clubpenalty4000\widowpenalty4000 + \sfcode`\.=1000\relax + \parsep0pt}} + +\clubpenalty 10000 +\widowpenalty 10000 +\predisplaypenalty=10000 +\setcounter{secnumdepth}{3} +\newcounter {part} +\newcounter {section} +\newcounter {subsection}[section] +\newcounter {subsubsection}[subsection] +\newcounter {paragraph}[subsubsection] +\newcounter {subparagraph}[paragraph] +\renewcommand \thepart {\@Roman\c@part} +\renewcommand \thesection {\@arabic\c@section} +\renewcommand\thesubsection {\thesection.\@arabic\c@subsection} +\renewcommand\thesubsubsection{\thesubsection .\@arabic\c@subsubsection} +\renewcommand\theparagraph {\thesubsubsection.\@arabic\c@paragraph} +\renewcommand\thesubparagraph {\theparagraph.\@arabic\c@subparagraph} +\newcommand\part{\par + \addvspace{4ex}% + \@afterindentfalse + \secdef\@part\@spart} + +\def\@part[#1]#2{% + \ifnum \c@secnumdepth >\m@ne + \refstepcounter{part}% + \addcontentsline{toc}{part}{\thepart\hspace{1em}#1}% + \else + \addcontentsline{toc}{part}{#1}% + \fi + {\parindent \z@ \raggedright + \interlinepenalty \@M + \normalfont + \ifnum \c@secnumdepth >\m@ne + \Large\bfseries \partname~\thepart + \par\nobreak + \fi + \huge \bfseries #2% + \markboth{}{}\par}% + \nobreak + \vskip 3ex + \@afterheading} +\def\@spart#1{% + {\parindent \z@ \raggedright + \interlinepenalty \@M + \normalfont + \huge \bfseries #1\par}% + \nobreak + \vskip 3ex + \@afterheading} + +\def\@startsection#1#2#3#4#5#6{\if@noskipsec \leavevmode \fi + \par \@tempskipa #4\relax + \@afterindentfalse + \ifdim \@tempskipa <\z@ \@tempskipa -\@tempskipa \@afterindentfalse\fi + \if@nobreak \everypar{}\else + \addpenalty{\@secpenalty}\addvspace{\@tempskipa}\fi \@ifstar + {\@ssect{#3}{#4}{#5}{#6}}{\@dblarg{\@sect{#1}{#2}{#3}{#4}{#5}{#6}}}} + +\def\@sect#1#2#3#4#5#6[#7]#8{% + \ifnum #2>\c@secnumdepth + \let\@svsec\@empty + \else + \refstepcounter{#1}% + \edef\@svsec{\csname the#1\endcsname\hskip 9.3pt}% + \fi + \@tempskipa #5\relax + \ifdim \@tempskipa>\z@ + \begingroup #6{\relax + \parindent0pt + \@hangfrom{\hskip #3\relax\@svsec}{\interlinepenalty \@M #8\par}}% + \endgroup + \csname #1mark\endcsname{#7}\addcontentsline + {toc}{#1}{\ifnum #2>8 %%%\c@secnumdepth + \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}\else + \def\@svsechd{#6\hskip #3\relax %% \relax added 2 May 90 + \@svsec #8.\csname #1mark\endcsname + {#7}\addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}}\fi + \@xsect{#5}} + +\def\@xsect#1{\@tempskipa #1\relax + \ifdim \@tempskipa>\z@ + \par \nobreak + \vskip \@tempskipa + \@afterheading + \else \global\@nobreakfalse \global\@noskipsectrue + \everypar{\if@noskipsec \global\@noskipsecfalse + \clubpenalty\@M \hskip -\parindent + \begingroup \@svsechd \endgroup \unskip + \hskip -#1\relax % relax added 14 Jan 91 + \else \clubpenalty \@clubpenalty + \everypar{}\fi}\fi\ignorespaces} + +\def\@ssect#1#2#3#4#5{\@tempskipa #3\relax + \ifdim \@tempskipa>\z@ + \begingroup #4{\@hangfrom{\hskip #1}{\interlinepenalty \@M #5\par}}\endgroup + \else \def\@svsechd{#4\hskip #1\relax #5}\fi + \@xsect{#3}} + +\def\section{\@startsection {section}{1}{\z@} + {24pt plus 4pt minus 4pt} + {12pt} + {\reset@font\hyphenpenalty9999\ELEVEN\bfseries\hangindent1.33pc}} + +\def\subsection{\@startsection{subsection}{2}{\z@} + {12pt plus 4pt minus 4pt} + {10pt} + {\reset@font\hyphenpenalty9999\ELEVEN\it\raggedright\hangindent1.95pc}} + +\def\subsubsection{\@startsection{subsubsection}{3}{\z@} + {12pt plus 4pt minus 4pt} + {8pt} + {\reset@font\hyphenpenalty9999\ELEVEN\it\hangindent1.95pc}} + +\def\paragraph{\@startsection{paragraph}{4}{0pt} + {12pt plus 4pt minus 2pt} + {-8pt} + {\reset@font\hyphenpenalty9999\it\hangindent1.95pc}} + +\def\subparagraph{\@startsection{subparagraph}{5}{18pt} + {13pt plus 4pt minus 2pt} + {-10pt} + {\reset@font\TEN\SECRR\bf }} +\if@twocolumn + \setlength\leftmargini {2em} +\else + \setlength\leftmargini {2.5em} +\fi +\leftmargin \leftmargini +\setlength\leftmarginii {2.2em} +\setlength\leftmarginiii {1.87em} +\setlength\leftmarginiv {1.7em} +\if@twocolumn + \setlength\leftmarginv {.5em} + \setlength\leftmarginvi {.5em} +\else + \setlength\leftmarginv {1em} + \setlength\leftmarginvi {1em} +\fi +\setlength \labelsep {.5em} +\setlength \labelwidth{\leftmargini} +\addtolength\labelwidth{-\labelsep} +\@beginparpenalty -\@lowpenalty +\@endparpenalty -\@lowpenalty +\@itempenalty -\@lowpenalty +\renewcommand\theenumi{\@arabic\c@enumi} +\renewcommand\theenumii{\@alph\c@enumii} +\renewcommand\theenumiii{\@roman\c@enumiii} +\renewcommand\theenumiv{\@Alph\c@enumiv} +\newcommand\labelenumi{\theenumi.} +\newcommand\labelenumii{(\theenumii)} +\newcommand\labelenumiii{\theenumiii.} +\newcommand\labelenumiv{\theenumiv.} +\renewcommand\p@enumii{\theenumi} +\renewcommand\p@enumiii{\theenumi(\theenumii)} +\renewcommand\p@enumiv{\p@enumiii\theenumiii} +\newcommand\labelitemi{\textbullet} +\newcommand\labelitemii{\normalfont\bfseries \textendash} +\newcommand\labelitemiii{\textasteriskcentered} +\newcommand\labelitemiv{\textperiodcentered} +\newenvironment{description} + {\list{}{\labelwidth\z@ \itemindent-\leftmargin + \let\makelabel\descriptionlabel}} + {\endlist} +\newcommand*\descriptionlabel[1]{\hspace\labelsep + \normalfont\bfseries #1} +\newenvironment{verse} + {\let\\\@centercr + \list{}{\itemsep \z@ + \itemindent -1.5em% + \listparindent\itemindent + \rightmargin \leftmargin + \advance\leftmargin 1.5em}% + \item\relax} + {\endlist} +\newenvironment{quotation} + {\list{}{\listparindent 1.5em% + \itemindent \listparindent + \rightmargin \leftmargin + \parsep \z@ \@plus\p@}% + \item\relax} + {\endlist} +\newenvironment{quote} + {\list{}{\NINE\rightmargin 14pt\leftmargin 14pt}% + \item\relax} + {\endlist} +\if@compatibility +\newenvironment{titlepage} + {% + \if@twocolumn + \@restonecoltrue\onecolumn + \else + \@restonecolfalse\newpage + \fi + \thispagestyle{empty}% + \setcounter{page}\z@ + }% + {\if@restonecol\twocolumn \else \newpage \fi + } +\else +\newenvironment{titlepage} + {% + \if@twocolumn + \@restonecoltrue\onecolumn + \else + \@restonecolfalse\newpage + \fi + \thispagestyle{empty}% + \setcounter{page}\@ne + }% + {\if@restonecol\twocolumn \else \newpage \fi + \if@twoside\else + \setcounter{page}\@ne + \fi + } +\fi +\newcommand\appendix{\par + \setcounter{section}{0}% + \setcounter{subsection}{0}% + \renewcommand\thesection{\@Alph\c@section}} +\setlength\arraycolsep{1\p@} +\setlength\tabcolsep{6\p@} +\setlength\arrayrulewidth{.4\p@} +\setlength\doublerulesep{2\p@} +\setlength\tabbingsep{\labelsep} +\skip\@mpfootins = \skip\footins +\setlength\fboxsep{3\p@} +\setlength\fboxrule{.4\p@} +\newdimen\mathindent +\mathindent=9pt +\renewcommand \theequation {\@arabic\c@equation} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%EQUATIONS +% They are indented from left 2 picas and have on the right eqno as "[23]" +%\def\EQtrivlist{\parskip0pt\topsep=10pt plus 5pt minus 6pt +% \parsep0pt\partopsep0pt\@nmbrlistfalse +% \@trivlist \labelwidth\z@ \leftmargin\z@\listparindent\z@ +% \itemindent\z@ \def\makelabel##1{##1}}% +%\def\endEQtrivlist{\if@newlist\@noitemerr\fi +% \if@inlabel\indent\fi +% \ifhmode\unskip \par\fi +% \if@noparlist \else +% \ifdim\lastskip >\z@ \@tempskipa\lastskip \vskip -\lastskip +% \advance\@tempskipa\parskip \advance\@tempskipa -\@outerparskip +% \vskip\@tempskipa +% \fi\@endparenv\fi}% +% +%%%%%%%%%%%%%%%%%%%%%%-B-fleqn.sty-(modified) +\def\[{\relax\ifmmode\@badmath\else + \begin{trivlist}% + \@beginparpenalty\predisplaypenalty + \@endparpenalty\postdisplaypenalty + \item[]\leavevmode + \hbox to\linewidth\bgroup $\m@th\displaystyle + \hskip\mathindent\bgroup\fi} + +\def\]{\relax\ifmmode \egroup $\hfil\displaywidth\linewidth + \egroup \end{trivlist}\else \@badmath \fi\noindent\ignorespaces} + +\def\equation{\@beginparpenalty\predisplaypenalty + \@endparpenalty\postdisplaypenalty + \refstepcounter{equation}\trivlist \item[]\leavevmode + \hbox to\linewidth\bgroup $\m@th\displaystyle\hskip\mathindent} + +\def\endequation{$\hfil\displaywidth\linewidth\@eqnnum\egroup\endtrivlist} + +\def\VVV{\@beginparpenalty\predisplaypenalty + \@endparpenalty\postdisplaypenalty + \EQtrivlist \item[]\leavevmode + \hbox to\linewidth\bgroup $\m@th\displaystyle\hskip\mathindent} + +\def\endVVV{$\hfil\displaywidth\linewidth\egroup\endEQtrivlist} + +\def\eqnarray{\stepcounter{equation}\let\@currentlabel=\theequation + \global\@eqnswtrue + \global\@eqcnt\z@\tabskip\mathindent\let\\=\@eqncr + \abovedisplayskip 12pt plus 5pt minus 5pt + \belowdisplayskip\abovedisplayskip + \belowdisplayshortskip\abovedisplayskip + \abovedisplayshortskip\abovedisplayskip + $$\m@th\halign to\linewidth\bgroup\@eqnsel + \hskip\@centering + $\displaystyle\tabskip\z@ + {##}$&\global\@eqcnt\@ne \hfil$\displaystyle{{}##{}}$\hfil + &\global\@eqcnt\tw@ $\displaystyle{##}$\hfil + \tabskip\@centering&\llap{##}\tabskip\z@\cr} + +\def\endeqnarray{\@@eqncr\egroup + \global\advance\c@equation\m@ne$$\global\@ignoretrue} + +\newdimen\mathindent +\mathindent=9pt %%% \leftmargini + +%%%%%%%%%%%%%%%%%%%%-E-fleqn.sty-(modified) + + +\newcounter{figure} +\renewcommand \thefigure {\@arabic\c@figure} +\def\fps@figure{tbp} +\def\ftype@figure{1} +\def\ext@figure{lof} +\def\fnum@figure{\bf\figurename~\thefigure} +\newenvironment{figure} + {\@float{figure}} + {\end@float} +\newenvironment{figure*} + {\@dblfloat{figure}} + {\end@dblfloat} +\newcounter{table} +\renewcommand\thetable{\@arabic\c@table} +\def\fps@table{tbp} +\def\ftype@table{2} +\def\ext@table{lot} +\def\fnum@table{\bf\tablename~\thetable} +\newenvironment{table} + {\@float{table}} + {\end@float} +\newenvironment{table*} + {\@dblfloat{table}} + {\end@dblfloat} +\newlength\abovecaptionskip +\newlength\belowcaptionskip +\setlength\abovecaptionskip{10\p@} +\setlength\belowcaptionskip{0\p@} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\@makecaption!!! +%%%%%%%%%%%%%%%%%%%%%%%%%%%ragu +%\long\def\@makecaption#1#2{\parindent0pt +%\ifx\@captype\@tabtxt +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%TABLE +%\NINE +%\par\noindent\hbox to\textwidth{\hfil\parbox{\tabw}{\noindent % +% {\tabnamefont #1.}\quad\hangindent3.3pc #2\HD{0}{18}\endgraf}\hfil} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%\else %% if it is a figure! +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%FIGURE +%\NINE +%\par\noindent{\fignamefont #1\HD{7}{0}}\quad +%\hangindent3.2pc\raggedright #2\vspace{6pt} \fi} + +\long\def\@makecaption#1#2{\parindent0pt +\ifx\@captype\@tabtxt +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%TABLE +\NINE +\par\noindent\hbox to\textwidth{\hfil\parbox{\tabw}{\noindent % + {\tabnamefont #1.}\quad\raggedright\hangindent3.3pc #2\HD{0}{18}\endgraf}\hfil} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\else %% if it is a figure! +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%FIGURE +\NINE\par\noindent{\fignamefont #1\HD{7}{0}}\quad +\hangindent3.29pc\raggedright #2\vspace{6pt} \fi} + +\DeclareOldFontCommand{\rm}{\normalfont\rmfamily}{\mathrm} +\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf} +\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt} +\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf} +\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit} +\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl} +\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc} +\DeclareRobustCommand*\cal{\@fontswitch\relax\mathcal} +\DeclareRobustCommand*\mit{\@fontswitch\relax\mathnormal} +\newcommand\@pnumwidth{1.55em} +\newcommand\@tocrmarg{2.55em} +\newcommand\@dotsep{4.5} +\setcounter{tocdepth}{3} +\newcommand\tableofcontents{% + \section*{\contentsname + \@mkboth{% + \MakeUppercase\contentsname}{\MakeUppercase\contentsname}}% + \@starttoc{toc}% + } +\newcommand*\l@part[2]{% + \ifnum \c@tocdepth >-2\relax + \addpenalty\@secpenalty + \addvspace{2.25em \@plus\p@}% + \begingroup + \parindent \z@ \rightskip \@pnumwidth + \parfillskip -\@pnumwidth + {\leavevmode + \large \bfseries #1\hfil \hb@xt@\@pnumwidth{\hss #2}}\par + \nobreak + \if@compatibility + \global\@nobreaktrue + \everypar{\global\@nobreakfalse\everypar{}}% + \fi + \endgroup + \fi} +\newcommand*\l@section[2]{% + \ifnum \c@tocdepth >\z@ + \addpenalty\@secpenalty + \addvspace{1.0em \@plus\p@}% + \setlength\@tempdima{1.5em}% + \begingroup + \parindent \z@ \rightskip \@pnumwidth + \parfillskip -\@pnumwidth + \leavevmode \bfseries + \advance\leftskip\@tempdima + \hskip -\leftskip + #1\nobreak\hfil \nobreak\hb@xt@\@pnumwidth{\hss #2}\par + \endgroup + \fi} +\newcommand*\l@subsection{\@dottedtocline{2}{1.5em}{2.3em}} +\newcommand*\l@subsubsection{\@dottedtocline{3}{3.8em}{3.2em}} +\newcommand*\l@paragraph{\@dottedtocline{4}{7.0em}{4.1em}} +\newcommand*\l@subparagraph{\@dottedtocline{5}{10em}{5em}} +\newcommand\listoffigures{% + \section*{\listfigurename + \@mkboth{\MakeUppercase\listfigurename}% + {\MakeUppercase\listfigurename}}% + \@starttoc{lof}% + } +\newcommand*\l@figure{\@dottedtocline{1}{1.5em}{2.3em}} +\newcommand\listoftables{% + \section*{\listtablename + \@mkboth{% + \MakeUppercase\listtablename}{\MakeUppercase\listtablename}}% + \@starttoc{lot}% + } +\let\l@table\l@figure + +\newenvironment{NLh}[1]{% + \setbox\boxH=\hbox{{\rm #1}}% + \raggedright + \begin{list}{\rm{\arabic{enumii}.}}{% + \usecounter{enumii}% + \topsep=10pt plus .5pt minus .5pt% + \partopsep=0pt% + \itemsep=6pt% + \parsep=0pt% + \labelsep=14pt% + \labelwidth=\wd\boxH% + \leftmargin=\labelwidth% + \advance\leftmargin\labelsep% + \rightmargin=0pt% + \listparindent=0em% + \itemindent 0pt\ignorespaces}\mathindent=0pt}{\end{list}}% + +\def\LM{\leavevmode } +\newenvironment{BL}{% + \begin{list}{\LM\hbox{$\bullet$}}{% +\raggedright + \topsep=10pt plus 4pt minus 4pt + \partopsep=0pt + \itemsep=6pt %2pt + \parsep=0pt + \labelsep=14pt + \labelwidth=10pt + \rightmargin=0pt + \listparindent=0pt + \itemindent=0pt %=\leftmargin + \leftmargin=20pt}\mathindent=0pt}{\end{list}}% + +\floatsep 16pt plus 4pt minus 4pt +\textfloatsep 16pt plus 4pt minus 4pt +\intextsep 14pt plus 4pt minus 4pt +\@fptop 0pt plus 1fil \@fpsep 8pt plus +2fil \@fpbot 0pt plus 1fil + +\newenvironment{inFig} +{\vskip 14pt\noindent\hskip -0pt} +{\vskip 10pt} + +\newif\if@ttirotate \@ttirotatefalse +\def\ttirotate{\global\@ttirotatetrue} +\def\endttirotate{\global\@ttirotatefalse} + +%------ Figures intro +\newcommand\listfigurename{List of Figures} +\def\thefigure{\arabic{figure}} +\def\fignamefont{\NINE} +\def\tabnamefont{\NINE} + +%------ Tables intro +\newbox\@tempboxb +\newcommand\tablename{Table} +\newcommand\listtablename{List of Tables} +\renewcommand\thetable{\arabic{table}} +\def\fps@table{tbp}\def\ftype@table{2}\def\ext@table{lot} +\def\fnum@table{\tabnamefont\bf\tablename\ \thetable} +\def\table{\let\@makecaption\@tablecaption\@float{table}} +\let\endtable\end@float +\@namedef{table*}{\let\@makecaption\@tablecaption\@dblfloat{table}} +\@namedef{endtable*}{\end@dblfloat} + +\long\def\@tablecaption#1#2{% +% Set TN followed by the caption see the length + \setbox\@tempboxa + \hbox{\NINE{\it #1}\quad #2}% +% Set the caption only to examine its existence and length if any + \setbox\@tempboxb + \hbox{\NINE#2}% +% If no caption, TN centers with no period after it + \ifdim\wd\@tempboxb < 3pt + \begin{Center}\NINE + \HD{8}{0}{\it #1}\HD{0}{6}\end{Center} + \else +% If TN+caption shorter than the table + \ifdim\wd\@tempboxa < \tempdime + %\begin{Center} + \NINE \HD{8}{0}{\it #1}\qquad #2\HD{0}{6}%\end{Center} + \else +% If TN+caption longer than the table + \noindent\NINE\HD{8}{0}{\bf #1}\quad + \raggedright\hangindent3.05pc #2\HD{0}{6}\endgraf + \fi\fi} + +%------TABLE + +\newbox\boxH +\newbox\boxHrunin +\newbox\temptbox +\newbox\temptboxA + +\newdimen\tempdime +\newdimen\tempdimeA +\newdimen\rboxh +\newdimen\rboxw +\def\EGT{\footnotesize} + +\newcommand{\TABLE}[3]{% + \setbox\temptboxA=\hbox{{\NINE#1}}% + \setbox\temptbox=\hbox{{\EGT#2}}% + \tempdime\wd\temptbox% + \tempdimeA\wd\temptboxA% + \@TABLE{\hangindent3.1pc {#1}\vs{2}}{#2}{#3}{\tempdime}} + +\newcommand{\@TABLE}[4]{% +\long\def\testtabnote{#3}% +\long\def\itis@empty{}% +\ifx\testtabnote\itis@empty + \long\def\tabnote##1{\relax}% +\else + \long\def\tabnote##1{\par{\NINE \vspace*{4pt} ##1\endgraf}}% +\fi +\if@ttirotate + \setbox4=\hbox to #4 %%\textheight + {\vbox to \textwidth{\hsize #4 %%%\textheight + \vss\overfullrule=0pt + \begin{minipage}[c]{#4}% + \caption{#1}% + {\NINE#2} + \tabnote{\hspace*{3pc}#3}% + \end{minipage}% + \vss}} +\else + \setbox4=\hbox to \hsize{\hss + % Table should be indented by 2pc + % from left margin + \begin{minipage}[t]{#4}% + \caption{#1}% + {\NINE#2}% + \tabnote{\hspace*{3pc}#3}% + \end{minipage}\hss} +\fi +\if@ttirotate + \setbox5=\zeroCC{\box4}% + \par\noindent\hspace*{0.5\textwidth}% + \vrule width 0pt height 0.5\textheight depth 0.499\textheight + \rotatebox{90}{\box5}% +\else + \noindent\box4% +\fi +} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%-b-figures-and-tables + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%BBB +%%% FIGURES AND TABLES + +\setcounter{topnumber}{3} +\def\topfraction{0.99} +\setcounter{bottomnumber}{2} +\def\bottomfraction{.99} +\setcounter{totalnumber}{5} +\def\textfraction{.08} +\def\floatpagefraction{.55} +\setcounter{dbltopnumber}{3} +\def\dbltopfraction{.9} +\def\dblfloatpagefraction{.5} + +% FIGURE + +\def\fps@figure{tbp} +\def\ftype@figure{1} +\def\ext@figure{lof} +%\def\fnum@figure{\bf Figure\ \thefigure} +\def\figure{\@float{figure}} +\let\endfigure\end@float +\@namedef{figure*}{\@dblfloat{figure}} +\@namedef{endfigure*}{\end@dblfloat} + +\newenvironment{theindex} + {\if@twocolumn + \@restonecolfalse + \else + \@restonecoltrue + \fi + \columnseprule \z@ + \columnsep 35\p@ + \twocolumn[\section*{\indexname}]% + \@mkboth{\MakeUppercase\indexname}% + {\MakeUppercase\indexname}% + \thispagestyle{plain}\parindent\z@ + \parskip\z@ \@plus .3\p@\relax + \let\item\@idxitem} + {\if@restonecol\onecolumn\else\clearpage\fi} +\newcommand\@idxitem{\par\hangindent 40\p@} +\newcommand\subitem{\@idxitem \hspace*{20\p@}} +\newcommand\subsubitem{\@idxitem \hspace*{30\p@}} +\newcommand\indexspace{\par \vskip 10\p@ \@plus5\p@ \@minus3\p@\relax} +\renewcommand\footnoterule{% + \kern-3\p@ + \hrule\@width.4\columnwidth + \kern2.6\p@} +\newcommand\@makefntext[1]{% + \parindent 1em% + \noindent + \hb@xt@1.8em{\hss\@makefnmark}#1} + +\renewcommand{\footnote}{\endnote} +\def\enotesize{\NINE} + +\newcommand\contentsname{Contents} +\newcommand\refname{References} +\newcommand\indexname{Index} +\newcommand\figurename{Figure} +\newcommand\partname{Part} +\newcommand\appendixname{Appendix} +\def\p#1{\phantom{#1}} +\def\vvp{\vadjust{\vfill\pagebreak}} +\def\vvsp#1{\vadjust{\vspace{#1pt}\pagebreak}} +\def\vvSP#1{\HD{0}{#1}\pagebreak} +\def\tra#1{\spaceskip 0.#1em plus 0.15em minus 0.09em\relax}% +\def\vs#1{\vadjust{\vspace{#1pt}}} +\def\mathtight{ +\thickmuskip2mu \thinmuskip1mu \medmuskip=1.5mu\relax} + +%\font\bb=qp3tir at 10pt +%\def\sst{\mbox{\bb\char"019}} +%\def\aet{\mbox{\bb\char"01A}} +%\def\oet{\mbox{\bb\char"01B}} +%\def\oot{\mbox{\bb\char"01C}} +%\def\AEt{\mbox{\bb\char"01D}} +%\def\OEt{\mbox{\bb\char"01E}} +%\def\OOt{\mbox{\bb\char"01F}} +% +%\font\bs=qp3tir at 9pt +%\def\ssr{\mbox{\bs\char"019}} +%\def\aer{\mbox{\bs\char"01A}} +%\def\oer{\mbox{\bs\char"01B}} +%\def\oor{\mbox{\bs\char"01C}} +%\def\AEr{\mbox{\bs\char"01D}} +%\def\OEr{\mbox{\bs\char"01E}} +%\def\OOr{\mbox{\bs\char"01F}} +% +%\font\gs=r1-tii at 10pt +%\def\UPalpha{\mbox{\gs\char"0C1}} +%\def\UPbeta{\mbox{\gs\char"0C2}} +%\def\UPgamma{\mbox{\gs\char"0C3}} +%\def\UPdelta{\mbox{\gs\char"0C4}} +%\def\UPepsilon{\mbox{\gs\char"0C5}} +%\def\UPzeta{\mbox{\gs\char"0C6}} +%\def\UPeta{\mbox{\gs\char"0C7}} +%\def\UPtheta{\mbox{\gs\char"0C8}} +%\def\UPiota{\mbox{\gs\char"0C9}} +%\def\UPkappa{\mbox{\gs\char"0CA}} +%\def\UPlambda{\mbox{\gs\char"0CB}} +%\def\UPmu{\mbox{\gs\char"0CC}} +%\def\UPnu{\mbox{\gs\char"0CD}} +%\def\UPxi{\mbox{\gs\char"0CE}} +%\def\UPpi{\mbox{\gs\char"0CF}} +%\def\UPrho{\mbox{\gs\char"0D0}} +%\def\UPsigma{\mbox{\gs\char"0D1}} +%\def\UPtau{\mbox{\gs\char"0D2}} +%\def\UPupsilon{\mbox{\gs\char"0D3}} +%\def\UPphi{\mbox{\gs\char"0D4}} +%\def\UPchi{\mbox{\gs\char"0D5}} +%\def\UPpsi{\mbox{\gs\char"0D6}} +%\def\UPomega{\mbox{\gs\char"0D7}} + +%%For Table rule + +\def\toprule{\noalign{\ifnum0=`}\fi \hrule \@height 2.0pt \hrule +\@height 2pt \@width 0pt \futurelet\@tempa\@xhline} + +\def\midrule{\noalign{\ifnum0=`}\fi \hrule \@height 2pt \@width 0pt +\hrule \@height 0.7pt \hrule \@height 2pt \@width 0pt +\futurelet\@tempa\@xhline} + +\def\botrule{\noalign{\ifnum0=`}\fi \hrule +\@height 2pt \@width 0pt \hrule \@height 2.0pt +\futurelet\@tempa\@xhline} + +\def\hline{\noalign{\ifnum0=`}\fi\hrule \@height .5pt \futurelet + \reserved@a\@xhline} +\def\@xhline{\ifx\reserved@a\hline\vskip\doublerulesep + \vskip-0.5pt\fi\ifnum0=`{\fi}} + +% Thick 1 pt table rules +\def\Hline{\noalign{\ifnum0=`}\fi\hrule \@height .7pt \futurelet + \reserved@a\@xHLINE} +\def\@xHline{\ifx\reserved@a\Hline\vskip0.5pt\vskip\doublerulesep + \vskip-1pt\fi\ifnum0=`{\fi}} + +% Thick 2 pt table rules +\def\HLINE{\noalign{\ifnum0=`}\fi\hrule \@height 2.0pt \futurelet + \reserved@a\@xHLINE} +\def\@xHLINE{\ifx\reserved@a\HLINE\vskip1pt\vskip\doublerulesep + \vskip-1pt\fi\ifnum0=`{\fi}} +\topmargin12pt +\headheight12pt +\headsep 14pt +\topskip=4pt %% \headsep + \topskip s/b here 27pt (17+10) +\footskip 36pt +\textheight=704pt %% = 46 x 12 + 10 = 47 lines of text 10/12 +\textwidth=42.5pc +\columnsep 22pt +\columnseprule 0pt + +\def\ps@kumaresh{\let\@mkboth\markboth + \def\@oddfoot{\setoddFOOT}% + \def\@evenfoot{\setevenFOOT}% + \def\@evenhead{{\VRHDW{\headheight}{0pt}{0pt}\CROPMARKS% + \raisebox{0pt}[\headheight][0pt]{% +%\hspace*{-25pt} \marginnumbering\hspace*{18pt} + \begin{tabular}{@{}c@{}}\\[-15pt]% + \hbox to + \textwidth{\spaceskip0.41em{%\TEN\thepage + }\hfill\centerline{%\TEN\it\setLRH + }} + \end{tabular} + }}}% + \def\@oddhead{{\VRHDW{\headheight}{0pt}{0pt}\CROPMARKS% + \raisebox{0pt}[\headheight][0pt]{% +%\hspace*{-25pt} \marginnumbering\hspace*{18pt} + \begin{tabular}{@{}c@{}}\\[-15pt]% + \hbox to \textwidth{\hfill{%\TEN\it\spaceskip0.41em\setRRH + }\hfill{%\TEN\thepage + }} + \end{tabular} + }}}% + \def\sectionmark##1{}% + \def\subsectionmark##1{}} + +\newcommand{\querypage}[1]{\def\testquerypage{#1}% +\newpage\thispagestyle{kumaresh}% +\begin{center} +\HD{5}{0}\fontsize{14}{16}\selectfont\bf\MakeUppercase\@title\HD{3}{30}\par +{\TEN \setLRH}\vskip1pc\par +{\TEN Author Queries} +\vskip1pc +\end{center} +\hangindent3.7pc\Q} + +\def\Q#1{\noindent \hangindent3.7pc\h{AQ#1 Au:}} + +\def\mgpl#1{\marginpar{{\hspace*{-1.8pc}}\EGT #1}} +\def\mgpr#1{\marginpar{{\hspace*{.8pc}\EGT #1}}} + + +%\def\mgp#1{\marginpar{\kern-6pt{\EGT #1}}} +\def\x{\end{document}} +\def\h{\hbox} +\def\p#1{\phantom{#1}} +\def\vvp{\vadjust{\vfill\pagebreak}} +\def\vvsp#1{\vadjust{\vspace{#1pt}\pagebreak}} +\def\vvSP#1{\HD{0}{#1}\pagebreak} +\def\tra#1{\spaceskip 0.#1em plus 0.15em minus 0.09em\relax}% +\def\vs#1{\vadjust{\vspace{#1pt}}} +\def\mathtight{ +\thickmuskip2mu \thinmuskip1mu \medmuskip=1.5mu\relax} + +\def\today{\ifcase\month\or + January\or February\or March\or April\or May\or June\or + July\or August\or September\or October\or November\or December\fi + \space\number\day, \number\year} +\setlength\columnsep{22\p@} +\setlength\columnseprule{0\p@} +\setlength\parindent{14\p@} +\pagestyle{headings} + +\clubpenalty 10000 \widowpenalty 10000 \predisplaypenalty=10000 +\interdisplaylinepenalty=100 \displaywidowpenalty 1500 +\postdisplaypenalty 0 %%%500 +\interlinepenalty 0 +\brokenpenalty 500 % Plain default = 100 +\adjdemerits=100 % Plain default = 10000 +\linepenalty=10 \doublehyphendemerits=10000 \finalhyphendemerits=5000 +\hyphenpenalty=10 \exhyphenpenalty=50 +\binoppenalty=150 % 700 +\relpenalty=100 % 500 +\interfootnotelinepenalty=100 +%\nulldelimiterspace=1.2pt +\lefthyphenmin=15 +\righthyphenmin=15 +\tolerance=9999 +\emergencystretch=1.6pc +\frenchspacing + +\thinmuskip=3mu +\medmuskip=4mu% plus 1mu minus 1mu +\thickmuskip=5mu% plus 3mu +\def\enotesize{\NINE} + +\pagenumbering{arabic} +\if@twoside +\else + \raggedbottom +\fi +\if@twocolumn + \twocolumn + \sloppy + \flushbottom +\else + \onecolumn +\fi +\endinput +%% +%% End of file `doublecol.cls'. diff --git a/IJHPCN/nb_iter_sec_ex15_juqueen.pdf b/IJHPCN/nb_iter_sec_ex15_juqueen.pdf new file mode 100644 index 0000000..748cda4 Binary files /dev/null and b/IJHPCN/nb_iter_sec_ex15_juqueen.pdf differ diff --git a/IJHPCN/nb_iter_sec_ex54_curie.pdf b/IJHPCN/nb_iter_sec_ex54_curie.pdf new file mode 100644 index 0000000..d588709 Binary files /dev/null and b/IJHPCN/nb_iter_sec_ex54_curie.pdf differ diff --git a/IJHPCN/paper.tex b/IJHPCN/paper.tex new file mode 100644 index 0000000..0c88f29 --- /dev/null +++ b/IJHPCN/paper.tex @@ -0,0 +1,860 @@ +%%%%%%%%%%%%%%%%%%%%%% +\documentclass{doublecol-new} +%%%%%%%%%%%%%%%%%%%%%% + +\usepackage{natbib,stfloats} +\usepackage{mathrsfs} +\usepackage[utf8]{inputenc} +\usepackage[T1]{fontenc} +\usepackage{algorithm} +\usepackage{algpseudocode} +\usepackage{amsmath} +\usepackage{amssymb} +\usepackage{multirow} +\usepackage{graphicx} +\usepackage{url} + +\def\newblock{\hskip .11em plus .33em minus .07em} + +\theoremstyle{TH}{ +\newtheorem{lemma}{Lemma} +\newtheorem{theorem}[lemma]{Theorem} +\newtheorem{corrolary}[lemma]{Corrolary} +\newtheorem{conjecture}[lemma]{Conjecture} +\newtheorem{proposition}[lemma]{Proposition} +\newtheorem{claim}[lemma]{Claim} +\newtheorem{stheorem}[lemma]{Wrong Theorem} +%\newtheorem{algorithm}{Algorithm} +} + +\theoremstyle{THrm}{ +\newtheorem{definition}{Definition}[section] +\newtheorem{question}{Question}[section] +\newtheorem{remark}{Remark} +\newtheorem{scheme}{Scheme} +} + +\theoremstyle{THhit}{ +\newtheorem{case}{Case}[section] +} +\algnewcommand\algorithmicinput{\textbf{Input:}} +\algnewcommand\Input{\item[\algorithmicinput]} + +\algnewcommand\algorithmicoutput{\textbf{Output:}} +\algnewcommand\Output{\item[\algorithmicoutput]} + + + + +\makeatletter +\def\theequation{\arabic{equation}} + +%\JOURNALNAME{\TEN{\it Int. J. System Control and Information +%Processing, +%Vol. \theVOL, No. \theISSUE, \thePUBYEAR\hfill\thepage}}% +% +%\def\BottomCatch{% +%\vskip -10pt +%\thispagestyle{empty}% +%\begin{table}[b]% +%\NINE\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lcr@{}}% +%\\[-12pt] +%Copyright \copyright\ 2012 Inderscience Enterprises Ltd. & &% +%\end{tabular*}% +%\vskip -30pt% +%%%\vskip -35pt% +%\end{table}% +%} +\makeatother + +%%%%%%%%%%%%%%%%% +\begin{document}% +%%%%%%%%%%%%%%%%% + +\setcounter{page}{1} + +\LRH{F. Wang et~al.} + +\RRH{Metadata Based Management and Sharing of Distributed Biomedical +Data} + +\VOL{x} + +\ISSUE{x} + +\PUBYEAR{xxxx} + +\BottomCatch + +\PUBYEAR{2012} + +\subtitle{} + +\title{TSIRM: A Two-Stage Iteration with least-squares Residual Minimization algorithm to solve large sparse linear and non linear systems} + +% +\authorA{Rapha\"el Couturier} +% +\affA{Femto-ST Institute, University of Bourgogne Franche-Comte, France\\ + E-mail: raphael.couturier@univ-fcomte.fr} +% +% +\authorB{Lilia Ziane Khodja} +\affB{LTAS-Mécanique numérique non linéaire, University of Liege, Belgium \\ + E-mail: l.zianekhodja@ulg.ac.be} + +\authorC{Christophe Guyeux} +\affC{Femto-ST Institute, University of Bourgogne Franche-Comte, France\\ + E-mail: christophe.guyeux@univ-fcomte.fr} + + +\begin{abstract} +In this article, a two-stage iterative algorithm is proposed to improve the +convergence of Krylov based iterative methods, typically those of GMRES +variants. The principle of the proposed approach is to build an external +iteration over the Krylov method, and to frequently store its current residual +(at each GMRES restart for instance). After a given number of outer iterations, +a least-squares minimization step is applied on the matrix composed by the saved +residuals, in order to compute a better solution and to make new iterations if +required. It is proven that the proposal has the same convergence properties +than the inner embedded method itself. Experiments using up to 16,394 cores +also show that the proposed algorithm runs around 5 or 7 times faster than +GMRES. +\end{abstract} + +\KEYWORD{Iterative Krylov methods; sparse linear and non linear systems; two stage iteration; least-squares residual minimization; PETSc.} + +%\REF{to this paper should be made as follows: Rodr\'{\i}guez +%Bol\'{\i}var, M.P. and Sen\'{e}s Garc\'{\i}a, B. (xxxx) `The +%corporate environmental disclosures on the internet: the case of +%IBEX 35 Spanish companies', {\it International Journal of Metadata, +%Semantics and Ontologies}, Vol. x, No. x, pp.xxx\textendash xxx.} + +\begin{bio} +Manuel Pedro Rodr\'iguez Bol\'ivar received his PhD in Accounting at +the University of Granada. He is a Lecturer at the Department of +Accounting and Finance, University of Granada. His research +interests include issues related to conceptual frameworks of +accounting, diffusion of financial information on Internet, Balanced +Scorecard applications and environmental accounting. He is author of +a great deal of research studies published at national and +international journals, conference proceedings as well as book +chapters, one of which has been edited by Kluwer Academic +Publishers.\vs{9} + +\noindent Bel\'en Sen\'es Garc\'ia received her PhD in Accounting at +the University of Granada. She is a Lecturer at the Department of +Accounting and Finance, University of Granada. Her research +interests are related to cultural, institutional and historic +accounting and in environmental accounting. She has published +research papers at national and international journals, conference +proceedings as well as chapters of books.\vs{8} + +\noindent Both authors have published a book about environmental +accounting edited by the Institute of Accounting and Auditing, +Ministry of Economic Affairs, in Spain in October 2003. +\end{bio} + + +\maketitle + + + \section{Introduction} + +Iterative methods have recently become more attractive than direct ones to solve +very large sparse linear systems~\cite{Saad2003}. They are more efficient in a +parallel context, supporting thousands of cores, and they require less memory +and arithmetic operations than direct methods~\cite{bahicontascoutu}. This is +why new iterative methods are frequently proposed or adapted by researchers, and +the increasing need to solve very large sparse linear systems has triggered the +development of such efficient iterative techniques suitable for parallel +processing. + +Most of the successful iterative methods currently available are based on +so-called ``Krylov subspaces''. They consist in forming a basis of successive +matrix powers multiplied by an initial vector, which can be for instance the +residual. These methods use vectors orthogonality of the Krylov subspace basis +in order to solve linear systems. The best known iterative Krylov subspace +methods are conjugate gradient and GMRES ones (Generalized Minimal RESidual). + + +However, iterative methods suffer from scalability problems on parallel +computing platforms with many processors, due to their need of reduction +operations, and to collective communications to achieve matrix-vector +multiplications. The communications on large clusters with thousands of cores +and large sizes of messages can significantly affect the performances of these +iterative methods. As a consequence, Krylov subspace iteration methods are often +used with preconditioners in practice, to increase their convergence and +accelerate their performances. However, most of the good preconditioners are +not scalable on large clusters. + +In this research work, a two-stage algorithm based on two nested iterations +called inner-outer iterations is proposed. This algorithm consists in solving +the sparse linear system iteratively with a small number of inner iterations, +and restarting the outer step with a new solution minimizing some error +functions over some previous residuals. For further information on two-stage +iteration methods, interested readers are invited to +consult~\cite{Nichols:1973:CTS}. Two-stage algorithms are easy to parallelize on +large clusters. Furthermore, the least-squares minimization technique improves +its convergence and performances. + +The present article is organized as follows. Related works are presented in +Section~\ref{sec:02}. Section~\ref{sec:03} details the two-stage algorithm using +a least-squares residual minimization, while Section~\ref{sec:04} provides +convergence results regarding this method. Section~\ref{sec:05} shows some +experimental results obtained on large clusters using routines of PETSc +toolkit. This research work ends by a conclusion section, in which the proposal +is summarized while intended perspectives are provided. + + + +%%%********************************************************* +%%%********************************************************* + + + +%%%********************************************************* +%%%********************************************************* +\section{Related works} +\label{sec:02} +Krylov subspace iteration methods have increasingly become key +techniques for solving linear and nonlinear systems, or eigenvalue problems, +especially since the increasing development of +preconditioners~\cite{Saad2003,Meijerink77}. One reason for the popularity of +these methods is their generality, simplicity, and efficiency to solve systems of +equations arising from very large and complex problems. + +GMRES is one of the most widely used Krylov iterative method for solving sparse +and large linear systems. It has been developed by Saad \emph{et + al.}~\cite{Saad86} as a generalized method to deal with unsymmetric and +non-Hermitian problems, and indefinite symmetric problems too. In its original +version called full GMRES, this algorithm minimizes the residual over the +current Krylov subspace until convergence in at most $n$ iterations, where $n$ +is the size of the sparse matrix. Full GMRES is however too expensive in the +case of large matrices, since the required orthogonalization process per +iteration grows quadratically with the number of iterations. For that reason, +GMRES is restarted in practice after each $m\ll n$ iterations, to avoid the +storage of a large orthonormal basis. However, the convergence behavior of the +restarted GMRES, called GMRES($m$), in many cases depends quite critically on +the $m$ value~\cite{Huang89}. Therefore in most cases, a preconditioning +technique is applied to the restarted GMRES method in order to improve its +convergence. + +To enhance the robustness of Krylov iterative solvers, some techniques have been +proposed allowing the use of different preconditioners, if necessary, within the +iteration itself instead of restarting. Those techniques may lead to +considerable savings in CPU time and memory requirements. Van der Vorst +in~\cite{Vorst94} has for instance proposed variants of the GMRES algorithm in +which a different preconditioner is applied in each iteration, leading to the +so-called GMRESR family of nested methods. In fact, the GMRES method is +effectively preconditioned with other iterative schemes (or GMRES itself), where +the iterations of the GMRES method are called outer iterations while the +iterations of the preconditioning process is referred to as inner iterations. +Saad in~\cite{Saad:1993} has proposed Flexible GMRES (FGMRES) which is another +variant of the GMRES algorithm using a variable preconditioner. In FGMRES the +search directions are preconditioned whereas in GMRESR the residuals are +preconditioned. However, in practice, good preconditioners are those based on +direct methods, as ILU preconditioners, which are not easy to parallelize and +suffer from the scalability problems on large clusters of thousands of cores. + +Recently, communication-avoiding methods have been developed to reduce the +communication overheads in Krylov subspace iterative solvers. On modern computer +architectures, communications between processors are much slower than +floating-point arithmetic operations on a given +processor. Communication-avoiding techniques reduce either communications +between processors or data movements between levels of the memory hierarchy, by +reformulating the communication-bound kernels (more frequently SpMV kernels) and +the orthogonalization operations within the Krylov iterative solver. Different +works have studied the communication-avoiding techniques for the GMRES method, +so-called CA-GMRES, on multicore processors and multi-GPU +machines~\cite{Mohiyuddin2009,Hoemmen2010,Yamazaki2014}. + +Compared to all these works and to all the other works on Krylov iterative +methods, the originality of our work is to build a second iteration over a +Krylov iterative method and to minimize the residuals with a least-squares +method after a given number of outer iterations. + +%%%********************************************************* +%%%********************************************************* + + + +%%%********************************************************* +%%%********************************************************* +\section{TSIRM: Two-stage iteration with least-squares residuals minimization algorithm} +\label{sec:03} +A two-stage algorithm is proposed to solve large sparse linear systems of the +form $Ax=b$, where $A\in\mathbb{R}^{n\times n}$ is a sparse and square +nonsingular matrix, $x\in\mathbb{R}^n$ is the solution vector, and +$b\in\mathbb{R}^n$ is the right-hand side. As explained previously, the +algorithm is implemented as an inner-outer iteration solver based on iterative +Krylov methods. The main key-points of the proposed solver are given in +Algorithm~\ref{algo:01}. It can be summarized as follows: the inner solver is a +Krylov based one. In order to accelerate its convergence, the outer solver +periodically applies a least-squares minimization on the residuals computed by +the inner one. + +At each outer iteration, the sparse linear system $Ax=b$ is partially solved +using only $m$ iterations of an iterative method, this latter being initialized +with the last obtained approximation. The GMRES method~\cite{Saad86}, or any of +its variants, can potentially be used as inner solver. The current approximation +of the Krylov method is then stored inside a $n \times s$ matrix $S$, which is +composed by the $s$ last solutions that have been computed during the inner +iterations phase. In the remainder, the $i$-th column vector of $S$ will be +denoted by $S_i$. + +At each $s$ iterations, another kind of minimization step is applied in order to +compute a new solution $x$. For that, the previous residuals of $Ax=b$ are +computed by the inner iterations with $(b-AS)$. The minimization of the +residuals is obtained by +\begin{equation} + \underset{\alpha\in\mathbb{R}^{s}}{min}\|b-R\alpha\|_2 +\label{eq:01} +\end{equation} +with $R=AS$. The new solution $x$ is then computed with $x=S\alpha$. + + +In practice, $R$ is a dense rectangular matrix belonging in $\mathbb{R}^{n\times + s}$, with $s\ll n$. In order to minimize~\eqref{eq:01}, a least-squares +method such as CGLS ~\cite{Hestenes52} or LSQR~\cite{Paige82} is used. Remark +that these methods are more appropriate than a single direct method in a +parallel context. CGLS has recently been used to improve the performance of multisplitting algorithms \cite{cz15:ij}. + + + +\begin{algorithm}[t] +\caption{TSIRM} +\begin{algorithmic}[1] + \Input $A$ (sparse matrix), $b$ (right-hand side) + \Output $x$ (solution vector)\vspace{0.2cm} + \State Set the initial guess $x_0$ + \For {$k=1,2,3,\ldots$ until convergence ($error<\epsilon_{tsirm}$)} \label{algo:conv} + \State $[x_k,error]=Solve(A,b,x_{k-1},max\_iter_{kryl})$ \label{algo:solve} + \State $S_{k \mod s}=x_k$ \label{algo:store} \Comment{update column ($k \mod s$) of $S$} + \If {$k \mod s=0$ {\bf and} $error>\epsilon_{kryl}$} + \State $R=AS$ \Comment{compute dense matrix} \label{algo:matrix_mul} + \State $\alpha=Least\_Squares(R,b,max\_iter_{ls})$ \label{algo:} + \State $x_k=S\alpha$ \Comment{compute new solution} + \EndIf + \EndFor +\end{algorithmic} +\label{algo:01} +\end{algorithm} + +Algorithm~\ref{algo:01} summarizes the principle of the proposed method. The +outer iteration is inside the \emph{for} loop. Line~\ref{algo:solve}, the Krylov +method is called for a maximum of $max\_iter_{kryl}$ iterations. In practice, +we suggest to set this parameter equal to the restart number in the GMRES-like +method. Moreover, a tolerance threshold must be specified for the solver. In +practice, this threshold must be much smaller than the convergence threshold of +the TSIRM algorithm (\emph{i.e.}, $\epsilon_{tsirm}$). We also consider that +after the call of the $Solve$ function, we obtain the vector $x_k$ and the +$error$, which is defined by $||Ax_k-b||_2$. + + Line~\ref{algo:store}, $S_{k \mod s}=x_k$ consists in copying the solution + $x_k$ into the column $k \mod s$ of $S$. After the minimization, the matrix + $S$ is reused with the new values of the residuals. To solve the minimization + problem, an iterative method is used. Two parameters are required for that: + the maximum number of iterations ($max\_iter_{ls}$) and the threshold to stop + the method ($\epsilon_{ls}$). + +Let us summarize the most important parameters of TSIRM: +\begin{itemize} +\item $\epsilon_{tsirm}$: the threshold that stops the TSIRM method; +\item $max\_iter_{kryl}$: the maximum number of iterations for the Krylov method; +\item $s$: the number of outer iterations before applying the minimization step; +\item $max\_iter_{ls}$: the maximum number of iterations for the iterative least-squares method; +\item $\epsilon_{ls}$: the threshold used to stop the least-squares method. +\end{itemize} + + +The parallelization of TSIRM relies on the parallelization of all its +parts. More precisely, except the least-squares step, all the other parts are +obvious to achieve out in parallel. In order to develop a parallel version of +our code, we have chosen to use PETSc~\cite{petsc-web-page}. In +line~\ref{algo:matrix_mul}, the matrix-matrix multiplication is implemented and +efficient since the matrix $A$ is sparse and the matrix $S$ contains few columns +in practice. As explained previously, at least two methods seem to be +interesting to solve the least-squares minimization, the CGLS and the LSQR +methods. + +In Algorithm~\ref{algo:02} we remind the CGLS algorithm. The LSQR method follows +more or less the same principle but it takes more place, so we briefly explain +the parallelization of CGLS which is similar to LSQR. + +\begin{algorithm}[t] +\caption{CGLS} +\begin{algorithmic}[1] + \Input $A$ (matrix), $b$ (right-hand side) + \Output $x$ (solution vector)\vspace{0.2cm} + \State Let $x_0$ be an initial approximation + \State $r_0=b-Ax_0$ + \State $p_1=A^Tr_0$ + \State $s_0=p_1$ + \State $\gamma=||s_0||^2_2$ + \For {$k=1,2,3,\ldots$ until convergence ($\gamma<\epsilon_{ls}$)} \label{algo2:conv} + \State $q_k=Ap_k$ + \State $\alpha_k=\gamma/||q_k||^2_2$ + \State $x_k=x_{k-1}+\alpha_kp_k$ + \State $r_k=r_{k-1}-\alpha_kq_k$ + \State $s_k=A^Tr_k$ + \State $\gamma_{old}=\gamma$ + \State $\gamma=||s_k||^2_2$ + \State $\beta_k=\gamma/\gamma_{old}$ + \State $p_{k+1}=s_k+\beta_kp_k$ + \EndFor +\end{algorithmic} +\label{algo:02} +\end{algorithm} + + +In each iteration of CGLS, there are two matrix-vector multiplications and some +classical operations: dot product, norm, multiplication, and addition on +vectors. All these operations are easy to implement in PETSc or similar +environment. It should be noticed that LSQR follows the same principle, it is a +little bit longer but it performs more or less the same operations. + + +%%%********************************************************* +%%%********************************************************* + +\section{Convergence results} +\label{sec:04} + + +We can now claim that, +\begin{proposition} +\label{prop:saad} +If $A$ is either a definite positive or a positive matrix and GMRES($m$) is used as a solver, then the TSIRM algorithm is convergent. + +Furthermore, let $r_k$ be the +$k$-th residue of TSIRM, then +we have the following boundaries: +\begin{itemize} +\item when $A$ is positive: +\begin{equation} +||r_k|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{km}{2}} ||r_0|| , +\end{equation} +where $M$ is the symmetric part of $A$, $\alpha = \lambda_{min}(M)^2$ and $\beta = \lambda_{max}(A^T A)$; +\item when $A$ is positive definite: +\begin{equation} +\|r_k\| \leq \left( 1-\frac{\lambda_{\mathrm{min}}^2(1/2(A^T + A))}{ \lambda_{\mathrm{max}}(A^T A)} \right)^{km/2} \|r_0\|. +\end{equation} +\end{itemize} +%In the general case, where A is not positive definite, we have +%$\|r_n\| \le \inf_{p \in P_n} \|p(A)\| \le \kappa_2(V) \inf_{p \in P_n} \max_{\lambda \in \sigma(A)} |p(\lambda)| \|r_0\|, .$ +\end{proposition} + +\begin{proof} +Let us first recall that the residue is under control when considering the GMRES algorithm on a positive definite matrix, and it is bounded as follows: +\begin{equation*} +\|r_k\| \leq \left( 1-\frac{\lambda_{\mathrm{min}}^2(1/2(A^T + A))}{ \lambda_{\mathrm{max}}(A^T A)} \right)^{k/2} \|r_0\| . +\end{equation*} +Additionally, when $A$ is a positive real matrix with symmetric part $M$, then the residual norm provided at the $m$-th step of GMRES satisfies: +\begin{equation*} +||r_m|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{m}{2}} ||r_0|| , +\end{equation*} +where $\alpha$ and $\beta$ are defined as in Proposition~\ref{prop:saad}, which proves +the convergence of GMRES($m$) for all $m$ under such assumptions regarding $A$. +These well-known results can be found, \emph{e.g.}, in~\cite{Saad86}. + +We will now prove by a mathematical induction that, for each $k \in \mathbb{N}^\ast$, +$||r_k|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{mk}{2}} ||r_0||$ when $A$ is positive, and $\|r_k\| \leq \left( 1-\frac{\lambda_{\mathrm{min}}^2(1/2(A^T + A))}{ \lambda_{\mathrm{max}}(A^T A)} \right)^{km/2} \|r_0\|$ when $A$ is positive definite. + +The base case is obvious, as for $k=1$, the TSIRM algorithm simply consists in applying GMRES($m$) once, leading to a new residual $r_1$ that follows the inductive hypothesis due to the results recalled above. + +Suppose now that the claim holds for all $m=1, 2, \hdots, k-1$, that is, $\forall m \in \{1,2,\hdots, k-1\}$, $||r_m|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{km}{2}} ||r_0||$ in the positive case, and $\|r_k\| \leq \left( 1-\frac{\lambda_{\mathrm{min}}^2(1/2(A^T + A))}{ \lambda_{\mathrm{max}}(A^T A)} \right)^{km/2} \|r_0\|$ in the definite positive one. +We will show that the statement holds too for $r_k$. Two situations can occur: +\begin{itemize} +\item If $k \not\equiv 0 ~(\textrm{mod}\ m)$, then the TSIRM algorithm consists in executing GMRES once. In that case and by using the inductive hypothesis, we obtain either $||r_k|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{m}{2}} ||r_{k-1}||\leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{km}{2}} ||r_0||$ if $A$ is positive, or $\|r_k\| \leqslant \left( 1-\frac{\lambda_{\mathrm{min}}^2(1/2(A^T + A))}{ \lambda_{\mathrm{max}}(A^T A)} \right)^{m/2} \|r_{k-1}\|$ $\leqslant$ $\left( 1-\frac{\lambda_{\mathrm{min}}^2(1/2(A^T + A))}{ \lambda_{\mathrm{max}}(A^T A)} \right)^{km/2} \|r_{0}\|$ in the positive definite case. +\item Else, the TSIRM algorithm consists in two stages: a first GMRES($m$) execution leads to a temporary $x_k$ whose residue satisfies: +\begin{itemize} +\item $||r_k|| \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{m}{2}} ||r_{k-1}||\leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{km}{2}} ||r_0||$ in the positive case, +\item $\|r_k\| \leqslant \left( 1-\frac{\lambda_{\mathrm{min}}^2(1/2(A^T + A))}{ \lambda_{\mathrm{max}}(A^T A)} \right)^{m/2} \|r_{k-1}\|$ $\leqslant$ $\left( 1-\frac{\lambda_{\mathrm{min}}^2(1/2(A^T + A))}{ \lambda_{\mathrm{max}}(A^T A)} \right)^{km/2} \|r_{0}\|$ in the positive definite one, +\end{itemize} +and a least squares resolution. +Let $\operatorname{span}(S) = \left \{ {\sum_{i=1}^k \lambda_i v_i \Big| k \in \mathbb{N}, v_i \in S, \lambda _i \in \mathbb{R}} \right \}$ be the linear span of a set of real vectors $S$. So,\\ +$\min_{\alpha \in \mathbb{R}^s} ||b-R\alpha ||_2 = \min_{\alpha \in \mathbb{R}^s} ||b-AS\alpha ||_2$ + +$\begin{array}{ll} +& = \min_{x \in span\left(S_{k-s+1}, S_{k-s+2}, \hdots, S_{k} \right)} ||b-AS\alpha ||_2\\ +& = \min_{x \in span\left(x_{k-s+1}, x_{k-s}+2, \hdots, x_{k} \right)} ||b-AS\alpha ||_2\\ +& \leqslant \min_{x \in span\left( x_{k} \right)} ||b-Ax ||_2\\ +& \leqslant \min_{\lambda \in \mathbb{R}} ||b-\lambda Ax_{k} ||_2\\ +& \leqslant ||b-Ax_{k}||_2\\ +& = ||r_k||_2\\ +& \leqslant \left(1-\dfrac{\alpha}{\beta}\right)^{\frac{km}{2}} ||r_0||, \textrm{ if $A$ is positive,}\\ +& \leqslant \left( 1-\frac{\lambda_{\mathrm{min}}^2(1/2(A^T + A))}{ \lambda_{\mathrm{max}}(A^T A)} \right)^{km/2} \|r_{0}\|, \textrm{ if $A$ is}\\ +& \textrm{positive definite,} +\end{array}$ +\end{itemize} +which concludes the induction and the proof. +\end{proof} + +Remark that a similar proposition can be formulated at each time +the given solver satisfies an inequality of the form $||r_n|| \leqslant \mu^n ||r_0||$, +with $|\mu|<1$. Furthermore, it is \emph{a priori} possible in some particular cases +regarding $A$, +that the proposed TSIRM converges while the GMRES($m$) does not. + +%%%********************************************************* +%%%********************************************************* +\section{Experiments using PETSc} +\label{sec:05} + + +In order to see the behavior of our approach when considering only one processor, +a first comparison with GMRES or FGMRES and the new algorithm detailed +previously has been experimented. Matrices that have been used with their +characteristics (names, fields, rows, and nonzero coefficients) are detailed in +Table~\ref{tab:01}. These latter, which are real-world applications matrices, +have been extracted from the Davis collection, University of +Florida~\cite{Dav97}. + +\begin{table}[htbp] +\begin{center} +\begin{tabular}{|c|c|r|r|r|} +\hline +Matrix name & Field &\# Rows & \# Nonzeros \\\hline \hline +crashbasis & Optimization & 160,000 & 1,750,416 \\ +parabolic\_fem & Comput. fluid dynamics & 525,825 & 2,100,225 \\ +epb3 & Thermal problem & 84,617 & 463,625 \\ +atmosmodj & Comput. fluid dynamics & 1,270,432 & 8,814,880 \\ +bfwa398 & Electromagnetics pb & 398 & 3,678 \\ +torso3 & 2D/3D problem & 259,156 & 4,429,042 \\ +\hline + +\end{tabular} +\caption{Main characteristics of the sparse matrices chosen from the Davis collection} +\label{tab:01} +\end{center} +\end{table} +Chosen parameters are detailed below. +We have stopped the GMRES every 30 +iterations (\emph{i.e.}, $max\_iter_{kryl}=30$), which is the default +setting of GMRES restart parameter. The parameter $s$ has been set to 8. CGLS + minimizes the least-squares problem with parameters +$\epsilon_{ls}=1e-40$ and $max\_iter_{ls}=20$. The external precision is set to +$\epsilon_{tsirm}=1e-10$. These experiments have been performed on an Intel(R) +Core(TM) i7-3630QM CPU @ 2.40GHz with the 3.5.1 version of PETSc. + + +Experiments comparing +a GMRES variant with TSIRM in the resolution of linear systems are given in Table~\ref{tab:02}. +The second column describes whether GMRES or FGMRES has been used for linear systems solving. +Different preconditioners have been used according to the matrices. With TSIRM, the same +solver and the same preconditioner are used. This table shows that TSIRM can +drastically reduce the number of iterations needed to reach the convergence, when the +number of iterations for the normal GMRES is more or less greater than 500. In +fact this also depends on two parameters: the number of iterations before stopping GMRES +and the number of iterations to perform the minimization. + + +\begin{table}[htbp] +\begin{center} +\begin{tabular}{|c|c|r|r|r|r|} +\hline + + \multirow{2}{*}{Matrix name} & Solver / & \multicolumn{2}{c|}{GMRES} & \multicolumn{2}{c|}{TSIRM CGLS} \\ +\cline{3-6} + & precond & Time & \# Iter. & Time & \# Iter. \\\hline \hline + +crashbasis & gmres / none & 15.65 & 518 & 14.12 & 450 \\ +parabolic\_fem & gmres / ilu & 1009.94 & 7573 & 401.52 & 2970 \\ +epb3 & fgmres / sor & 8.67 & 600 & 8.21 & 540 \\ +atmosmodj & fgmres / sor & 104.23 & 451 & 88.97 & 366 \\ +bfwa398 & gmres / none & 1.42 & 9612 & 0.28 & 1650 \\ +torso3 & fgmres / sor & 37.70 & 565 & 34.97 & 510 \\ +\hline + +\end{tabular} +\caption{Comparison between sequential standalone (F)GMRES and TSIRM with (F)GMRES (time in seconds).} +\label{tab:02} +\end{center} +\end{table} + + + + + +In order to perform larger experiments, we have tested some example applications +of PETSc. These applications are available in the \emph{ksp} part, which is +suited for scalable linear equations solvers: +\begin{itemize} +\item ex15 is an example that solves in parallel an operator using a finite + difference scheme. The diagonal is equal to 4 and 4 extra-diagonals + representing the neighbors in each directions are equal to -1. This example is + used in many physical phenomena, for example, heat and fluid flow, wave + propagation, etc. +\item ex54 is another example based on a 2D problem discretized with quadrilateral + finite elements. In this example, the user can define the scaling of material + coefficient in embedded circle called $\alpha$. +\end{itemize} +For more technical details on these applications, interested readers are invited +to read the codes available in the PETSc sources. These problems have been +chosen because they are scalable with many cores. + +In the following, larger experiments are described on two large scale +architectures: Curie and Juqueen. Both these architectures are supercomputers +respectively composed of 80,640 cores for Curie and 458,752 cores for +Juqueen. Those machines are respectively hosted by GENCI in France and Jülich +Supercomputing Center in Germany. They belong with other similar architectures +to the PRACE initiative (Partnership for Advanced Computing in Europe), which +aims at proposing high performance supercomputing architecture to enhance +research in Europe. The Curie architecture is composed of Intel E5-2680 +processors at 2.7 GHz with 2Gb memory by core. The Juqueen architecture, +for its part, is +composed by IBM PowerPC A2 at 1.6 GHz with 1Gb memory per core. Both those +architectures are equipped with a dedicated high speed network. + + +In many situations, using preconditioners is essential in order to find the +solution of a linear system. There are many preconditioners available in PETSc. +However, for parallel applications, all the preconditioners based on matrix factorization +are not available. In our experiments, we have tested different kinds of +preconditioners, but as it is not the subject of this paper, we will not +present results with many preconditioners. In practice, we have chosen to use a +multigrid (mg) and successive over-relaxation (sor). For further details on the +preconditioners in PETSc, readers are referred to~\cite{petsc-web-page}. + + + +\begin{table*}[htbp] +\begin{center} +\begin{tabular}{|r|r|r|r|r|r|r|r|r|} +\hline + + nb. cores & precond & \multicolumn{2}{c|}{FGMRES} & \multicolumn{2}{c|}{TSIRM CGLS} & \multicolumn{2}{c|}{TSIRM LSQR} & best gain \\ +\cline{3-8} + & & Time & \# Iter. & Time & \# Iter. & Time & \# Iter. & \\\hline \hline + 2,048 & mg & 403.49 & 18,210 & 73.89 & 3,060 & 77.84 & 3,270 & 5.46 \\ + 2,048 & sor & 745.37 & 57,060 & 87.31 & 6,150 & 104.21 & 7,230 & 8.53 \\ + 4,096 & mg & 562.25 & 25,170 & 97.23 & 3,990 & 89.71 & 3,630 & 6.27 \\ + 4,096 & sor & 912.12 & 70,194 & 145.57 & 9,750 & 168.97 & 10,980 & 6.26 \\ + 8,192 & mg & 917.02 & 40,290 & 148.81 & 5,730 & 143.03 & 5,280 & 6.41 \\ + 8,192 & sor & 1,404.53 & 106,530 & 212.55 & 12,990 & 180.97 & 10,470 & 7.76 \\ + 16,384 & mg & 1,430.56 & 63,930 & 237.17 & 8,310 & 244.26 & 7,950 & 6.03 \\ + 16,384 & sor & 2,852.14 & 216,240 & 418.46 & 21,690 & 505.26 & 23,970 & 6.82 \\ +\hline + +\end{tabular} +\caption{Comparison of FGMRES and TSIRM with FGMRES for example ex15 of PETSc with two preconditioners (mg and sor) having 25,000 components per core on Juqueen ($\epsilon_{tsirm}=1e-3$, $max\_iter_{kryl}=30$, $s=12$, $max\_iter_{ls}=15$, $\epsilon_{ls}=1e-40$), time is expressed in seconds.} +\label{tab:03} +\end{center} +\end{table*} + +Table~\ref{tab:03} shows the execution times and the number of iterations of +example ex15 of PETSc on the Juqueen architecture. Different numbers of cores +are studied ranging from 2,048 up-to 16,383 with the two preconditioners {\it + mg} and {\it sor}. For those experiments, the number of components (or +unknowns of the problems) per core is fixed at 25,000, also called weak +scaling. This number can seem relatively small. In fact, for some applications +that need a lot of memory, the number of components per processor requires +sometimes to be small. Other parameters for this application are described in +the legend of this table. + + + +In Table~\ref{tab:03}, we can notice that TSIRM is always faster than +FGMRES. The last column shows the ratio between FGMRES and the best version of +TSIRM according to the minimization procedure: CGLS or LSQR. Even if we have +computed the worst case between CGLS and LSQR, it is clear that TSIRM is always +faster than FGMRES. For this example, the multigrid preconditioner is faster +than SOR. The gain between TSIRM and FGMRES is more or less similar for the two +preconditioners. Looking at the number of iterations to reach the convergence, +it is obvious that TSIRM allows the reduction of the number of iterations. It +should be noticed that for TSIRM, in those experiments, only the iterations of +the Krylov solver are taken into account. Iterations of CGLS or LSQR were not +recorded but they are time-consuming. In general, each $max\_iter_{kryl}*s$ +iterations which corresponds to 30*12, there are $max\_iter_{ls}$ iterations for +the least-squares method which corresponds to 15. + +\begin{figure}[htbp] +\centering + \includegraphics[width=0.5\textwidth]{nb_iter_sec_ex15_juqueen} +\caption{Number of iterations per second with ex15 and the same parameters as in Table~\ref{tab:03} (weak scaling)} +\label{fig:01} +\end{figure} + + +In Figure~\ref{fig:01}, the number of iterations per second corresponding to +Table~\ref{tab:03} is displayed. It can be noticed that the number of +iterations per second of FMGRES is constant whereas it decreases with TSIRM with +both preconditioners. This can be explained by the fact that when the number of +cores increases, the time for the least-squares minimization step also increases +but, generally, when the number of cores increases, the number of iterations to +reach the threshold also increases, and, in that case, TSIRM is more efficient +to reduce the number of iterations. So, the overall benefit of using TSIRM is +interesting. + + + + + + +\begin{table*}[htbp] +\begin{center} +\begin{tabular}{|r|r|r|r|r|r|r|r|r|} +\hline + + nb. cores & $\epsilon_{tsirm}$ & \multicolumn{2}{c|}{FGMRES} & \multicolumn{2}{c|}{TSIRM CGLS} & \multicolumn{2}{c|}{TSIRM LSQR} & best gain \\ +\cline{3-8} + & & Time & \# Iter. & Time & \# Iter. & Time & \# Iter. & \\\hline \hline + 2,048 & 8e-5 & 108.88 & 16,560 & 23.06 & 3,630 & 22.79 & 3,630 & 4.77 \\ + 2,048 & 6e-5 & 194.01 & 30,270 & 35.50 & 5,430 & 27.74 & 4,350 & 6.99 \\ + 4,096 & 7e-5 & 160.59 & 22,530 & 35.15 & 5,130 & 29.21 & 4,350 & 5.49 \\ + 4,096 & 6e-5 & 249.27 & 35,520 & 52.13 & 7,950 & 39.24 & 5,790 & 6.35 \\ + 8,192 & 6e-5 & 149.54 & 17,280 & 28.68 & 3,810 & 29.05 & 3,990 & 5.21 \\ + 8,192 & 5e-5 & 785.04 & 109,590 & 76.07 & 10,470 & 69.42 & 9,030 & 11.30 \\ + 16,384 & 4e-5 & 718.61 & 86,400 & 98.98 & 10,830 & 131.86 & 14,790 & 7.26 \\ +\hline + +\end{tabular} +\caption{Comparison of FGMRES and TSIRM with FGMRES algorithms for ex54 of Petsc (both with the MG preconditioner) with 25,000 components per core on Curie ($max\_iter_{kryl}=30$, $s=12$, $max\_iter_{ls}=15$, $\epsilon_{ls}=1e-40$), time is expressed in seconds.} +\label{tab:04} +\end{center} +\end{table*} + + +In Table~\ref{tab:04}, some experiments with example ex54 on the Curie +architecture are reported. For this application, we fixed $\alpha=0.6$. As it +can be seen in that table, the size of the problem has a strong influence on the +number of iterations to reach the convergence. That is why we have preferred to +change the threshold. If we set it to $1e-3$ as with the previous application, +only one iteration is necessary to reach the convergence. So Table~\ref{tab:04} +shows the results of different executions with different number of cores and +different thresholds. As with the previous example, we can observe that TSIRM is +faster than FGMRES. The ratio greatly depends on the number of iterations for +FMGRES to reach the threshold. The greater the number of iterations to reach the +convergence is, the better the ratio between our algorithm and FMGRES is. This +experiment is also a weak scaling with approximately $25,000$ components per +core. It can also be observed that the difference between CGLS and LSQR is not +significant. Both can be good but it seems not possible to know in advance which +one will be the best. + +Table~\ref{tab:05} shows a strong scaling experiment with example ex54 on the +Curie architecture. So, in this case, the number of unknowns is fixed at +$204,919,225$ and the number of cores ranges from $512$ to $8192$ with the power +of two. The threshold is fixed at $5e-5$ and only the $mg$ preconditioner has +been tested. Here again we can see that TSIRM is faster than FGMRES. The +efficiency of each algorithm is reported. It can be noticed that the efficiency +of FGMRES is better than the TSIRM one except with $8,192$ cores and that its +efficiency is greater than one whereas the efficiency of TSIRM is lower than +one. Nevertheless, the ratio of TSIRM with any version of the least-squares +method is always faster. With $8,192$ cores when the number of iterations is +far more important for FGMRES, we can see that it is only slightly more +important for TSIRM. + +In Figure~\ref{fig:02} we report the number of iterations per second for the +experiments reported in Table~\ref{tab:05}. This figure highlights that the +number of iterations per second is more or less the same for FGMRES and TSIRM +with a little advantage for FGMRES. It can be explained by the fact that, as we +have previously explained, the iterations of the least-squares steps are not +taken into account with TSIRM. + +\begin{table*}[htbp] +\begin{center} +\begin{tabular}{|r|r|r|r|r|r|r|r|r|r|r|} +\hline + + nb. cores & \multicolumn{2}{c|}{FGMRES} & \multicolumn{2}{c|}{TSIRM CGLS} & \multicolumn{2}{c|}{TSIRM LSQR} & best gain & \multicolumn{3}{c|}{efficiency} \\ +\cline{2-7} \cline{9-11} + & Time & \# Iter. & Time & \# Iter. & Time & \# Iter. & & FGMRES & TS CGLS & TS LSQR\\\hline \hline + 512 & 3,969.69 & 33,120 & 709.57 & 5,790 & 622.76 & 5,070 & 6.37 & 1 & 1 & 1 \\ + 1024 & 1,530.06 & 25,860 & 290.95 & 4,830 & 307.71 & 5,070 & 5.25 & 1.30 & 1.21 & 1.01 \\ + 2048 & 919.62 & 31,470 & 237.52 & 8,040 & 194.22 & 6,510 & 4.73 & 1.08 & .75 & .80\\ + 4096 & 405.60 & 28,380 & 111.67 & 7,590 & 91.72 & 6,510 & 4.42 & 1.22 & .79 & .84 \\ + 8192 & 785.04 & 109,590 & 76.07 & 10,470 & 69.42 & 9,030 & 11.30 & .32 & .58 & .56 \\ + +\hline + +\end{tabular} +\caption{Comparison of FGMRES and TSIRM for ex54 of PETSc (both with the MG preconditioner) with 204,919,225 components on Curie with different number of cores ($\epsilon_{tsirm}=5e-5$, $max\_iter_{kryl}=30$, $s=12$, $max\_iter_{ls}=15$, $\epsilon_{ls}=1e-40$), time is expressed in seconds.} +\label{tab:05} +\end{center} +\end{table*} + +\begin{figure}[htbp] +\centering + \includegraphics[width=0.5\textwidth]{nb_iter_sec_ex54_curie} +\caption{Number of iterations per second with ex54 and the same parameters as in Table~\ref{tab:05} (strong scaling)} +\label{fig:02} +\end{figure} + + +Concerning the experiments some other remarks are interesting. +\begin{itemize} +\item We have tested other examples of PETSc (ex29, ex45, ex49). For all these + examples, we have also obtained similar gains between GMRES and TSIRM but + those examples are not scalable with many cores. In general, we had some + problems with more than $4,096$ cores. +\item We have tested many iterative solvers available in PETSc. In fact, it is + possible to use most of them with TSIRM. From our point of view, the condition + to use a solver inside TSIRM is that the solver must have a restart + feature. More precisely, the solver must support to be stopped and restarted + without decreasing its convergence. That is why with GMRES we stop it when it + is naturally restarted (\emph{i.e.} with $m$ the restart parameter). The + Conjugate Gradient (CG) and all its variants do not have ``restarted'' version + in PETSc, so they are not efficient. They will converge with TSIRM but not + quickly because if we compare a normal CG with a CG which is stopped and + restarted every 16 iterations (for example), the normal CG will be far more + efficient. Some restarted CG or CG variant versions exist and may be + interesting to study in future works. +\end{itemize} +%%%********************************************************* +%%%********************************************************* + + + +%%%********************************************************* +%%%********************************************************* +\section{Conclusion} +\label{sec:06} +%The conclusion goes here. this is more of the conclusion +%%%********************************************************* +%%%********************************************************* + +A new two-stage iterative algorithm TSIRM has been proposed in this article, +in order to accelerate the convergence of Krylov iterative methods. +Our TSIRM proposal acts as a merger between Krylov based solvers and +a least-squares minimization step. +The convergence of the method has been proven in some situations, while +experiments up to 16,394 cores have been led to verify that TSIRM runs +5 or 7 times faster than GMRES. + + +For future work, the authors' intention is to investigate other kinds of +matrices, problems, and inner solvers. In particular, the possibility +to obtain a convergence of TSIRM in situations where the GMRES is divergent will be +investigated. The influence of all parameters must be +tested too, while other methods to minimize the residuals must be regarded. The +number of outer iterations to minimize should become adaptive to improve the +overall performances of the proposal. Finally, this solver will be implemented +inside PETSc, which would be of interest as it would allows us to test +all the non-linear examples and compare our algorithm with the other algorithm +implemented in PETSc. + + +% conference papers do not normally have an appendix + + + +% use section* for acknowledgement +%%%********************************************************* +%%%********************************************************* +\section*{Acknowledgment} +This paper is partially funded by the Labex ACTION program (contract +ANR-11-LABX-01-01). We acknowledge PRACE for awarding us access to resources +Curie and Juqueen respectively based in France and Germany. + + + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\bibliography{biblio} +\bibliographystyle{unsrt} +\bibliographystyle{alpha} + +\end{document}