]> AND Private Git Repository - book_gpu.git/blobdiff - BookGPU/Chapters/chapter6/ch6.aux
Logo AND Algorithmique Numérique Distribuée

Private GIT Repository
new
[book_gpu.git] / BookGPU / Chapters / chapter6 / ch6.aux
index eec3fee85499025ccd9c44f0e38add3b6c8a901c..2be0894822ca03e6e2ca469306377311e09a80ed 100644 (file)
 \@writefile{toc}{\author{Stephane Vialle}{}}
 \@writefile{toc}{\author{Jens Gustedt}{}}
 \@writefile{loa}{\addvspace {10\p@ }}
-\@writefile{toc}{\contentsline {chapter}{\numberline {5}Development methodologies for GPU and cluster of GPUs}{49}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {6}Development methodologies for GPU and cluster of GPUs}{83}}
 \@writefile{lof}{\addvspace {10\p@ }}
 \@writefile{lot}{\addvspace {10\p@ }}
-\@writefile{toc}{\contentsline {section}{\numberline {5.1}Introduction}{50}}
-\newlabel{ch6:intro}{{5.1}{50}}
-\@writefile{toc}{\contentsline {section}{\numberline {5.2}General scheme of synchronous code with computation/communication overlapping in GPU clusters}{50}}
-\newlabel{ch6:part1}{{5.2}{50}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.2.1}Synchronous parallel algorithms on GPU clusters}{50}}
-\@writefile{lof}{\contentsline {figure}{\numberline {5.1}{\ignorespaces Native overlap of internode CPU communications with GPU computations.\relax }}{52}}
-\newlabel{fig:ch6p1overlapnative}{{5.1}{52}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.2.2}Native overlap of CPU communications and GPU computations}{52}}
-\newlabel{algo:ch6p1overlapnative}{{5.1}{53}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.1}Generic scheme implicitly overlapping MPI communications with CUDA GPU computations}{53}}
-\@writefile{lof}{\contentsline {figure}{\numberline {5.2}{\ignorespaces Overlap of internode CPU communications with a sequence of CPU/GPU data transfers and GPU computations.\relax }}{54}}
-\newlabel{fig:ch6p1overlapseqsequence}{{5.2}{54}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.2.3}Overlapping with sequences of transfers and computations}{54}}
-\newlabel{algo:ch6p1overlapseqsequence}{{5.2}{55}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.2}Generic scheme explicitly overlapping MPI communications with sequences of CUDA CPU/GPU transfers and CUDA GPU computations}{55}}
-\@writefile{lof}{\contentsline {figure}{\numberline {5.3}{\ignorespaces Overlap of internode CPU communications with a streamed sequence of CPU/GPU data transfers and GPU computations.\relax }}{56}}
-\newlabel{fig:ch6p1overlapstreamsequence}{{5.3}{56}}
-\newlabel{algo:ch6p1overlapstreamsequence}{{5.3}{57}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.3}Generic scheme explicitly overlapping MPI communications with streamed sequences of CUDA CPU/GPU transfers and CUDA GPU computations}{57}}
-\@writefile{lof}{\contentsline {figure}{\numberline {5.4}{\ignorespaces Complete overlap of internode CPU communications, CPU/GPU data transfers and GPU computations, interleaving computation-communication iterations\relax }}{59}}
-\newlabel{fig:ch6p1overlapinterleaved}{{5.4}{59}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.2.4}Interleaved communications-transfers-computations overlapping}{59}}
-\newlabel{algo:ch6p1overlapinterleaved}{{5.4}{60}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.4}Generic scheme explicitly overlapping MPI communications, CUDA CPU/GPU transfers and CUDA GPU computations, interleaving computation-communication iterations}{60}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.2.5}Experimental validation}{62}}
-\newlabel{ch6:p1expes}{{5.2.5}{62}}
-\newlabel{ch6:p1block-cyclic}{{5.2.5}{62}}
-\@writefile{lof}{\contentsline {figure}{\numberline {5.5}{\ignorespaces Experimental performances of different synchronous algorithms computing a dense matrix product\relax }}{63}}
-\newlabel{fig:ch6p1syncexpematrixprod}{{5.5}{63}}
-\@writefile{toc}{\contentsline {section}{\numberline {5.3}General scheme of asynchronous parallel code with computation/communication overlapping}{64}}
-\newlabel{ch6:part2}{{5.3}{64}}
-\@writefile{loa}{\contentsline {algorithm}{\numberline {3}{\ignorespaces Synchronous iterative scheme\relax }}{64}}
-\newlabel{algo:ch6p2sync}{{3}{64}}
-\@writefile{loa}{\contentsline {algorithm}{\numberline {4}{\ignorespaces Asynchronous iterative scheme\relax }}{64}}
-\newlabel{algo:ch6p2async}{{4}{64}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.1}A basic asynchronous scheme}{66}}
-\newlabel{ch6:p2BasicAsync}{{5.3.1}{66}}
-\newlabel{algo:ch6p2BasicAsync}{{5.5}{66}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.5}Initialization of the basic asynchronous scheme}{66}}
-\newlabel{algo:ch6p2BasicAsyncComp}{{5.6}{67}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.6}Computing function in the basic asynchronous scheme}{67}}
-\newlabel{algo:ch6p2BasicAsyncSendings}{{5.7}{68}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.7}Sending function in the basic asynchronous scheme}{68}}
-\newlabel{algo:ch6p2BasicAsyncReceptions}{{5.8}{69}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.8}Reception function in the basic asynchronous scheme}{69}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.2}Synchronization of the asynchronous scheme}{70}}
-\newlabel{ch6:p2SsyncOverAsync}{{5.3.2}{70}}
-\newlabel{algo:ch6p2Sync}{{5.9}{71}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.9}Initialization of the synchronized scheme}{71}}
-\newlabel{algo:ch6p2SyncComp}{{5.10}{72}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.10}Computing function in the synchronized scheme}{72}}
-\newlabel{algo:ch6p2SyncReceptions}{{5.11}{73}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.11}Reception function in the synchronized scheme}{73}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.3}Asynchronous scheme using MPI, OpenMP and CUDA}{74}}
-\newlabel{ch6:p2GPUAsync}{{5.3.3}{74}}
-\newlabel{algo:ch6p2AsyncSyncComp}{{5.12}{76}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.12}Computing function in the final asynchronous scheme}{76}}
-\newlabel{algo:ch6p2syncGPU}{{5.13}{77}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.13}Computing function in the final asynchronous scheme}{77}}
-\newlabel{algo:ch6p2FullOverAsyncMain}{{5.14}{79}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.14}Initialization of the main process of complete overlap with asynchronism}{79}}
-\newlabel{algo:ch6p2FullOverAsyncComp1}{{5.15}{80}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.15}Computing function in the final asynchronous scheme with CPU/GPU overlap}{80}}
-\newlabel{algo:ch6p2FullOverAsyncComp2}{{5.16}{81}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.16}Auxiliary computing function in the final asynchronous scheme with CPU/GPU overlap}{81}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.4}Experimental validation}{82}}
-\newlabel{sec:ch6p2expes}{{5.3.4}{82}}
-\@writefile{lof}{\contentsline {figure}{\numberline {5.6}{\ignorespaces Computation times of the test application in synchronous and asynchronous modes.\relax }}{83}}
-\newlabel{fig:ch6p2syncasync}{{5.6}{83}}
-\@writefile{lof}{\contentsline {figure}{\numberline {5.7}{\ignorespaces Computation times with or without overlap of Jacobian updatings in asynchronous mode.\relax }}{84}}
-\newlabel{fig:ch6p2aux}{{5.7}{84}}
-\@writefile{toc}{\contentsline {section}{\numberline {5.4}Perspective: A unifying programming model}{85}}
-\newlabel{sec:ch6p3unify}{{5.4}{85}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.1}Resources}{85}}
-\newlabel{sec:ch6p3resources}{{5.4.1}{85}}
-\newlabel{algo:ch6p3ORWLresources}{{5.17}{86}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.17}Declaration of ORWL resources for a block-cyclic matrix multiplication}{86}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.2}Control}{86}}
-\newlabel{sec:ch6p3ORWLcontrol}{{5.4.2}{86}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.3}Example: block-cyclic matrix multiplication (MM)}{87}}
-\newlabel{sec:ch6p3ORWLMM}{{5.4.3}{87}}
-\newlabel{algo:ch6p3ORWLBCCMM}{{5.18}{87}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.18}Block-cyclic matrix multiplication, high level per task view}{87}}
-\newlabel{algo:ch6p3ORWLlcopy}{{5.19}{88}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.19}An iterative local copy operation}{88}}
-\newlabel{algo:ch6p3ORWLrcopy}{{5.20}{88}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.20}An iterative remote copy operation as part of a block cyclic matrix multiplication task}{88}}
-\newlabel{algo:ch6p3ORWLtrans}{{5.21}{88}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.21}An iterative GPU transfer and compute operation as part of a block cyclic matrix multiplication task}{88}}
-\newlabel{algo:ch6p3ORWLdecl}{{5.22}{89}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.22}Dynamic declaration of handles to represent the resources}{89}}
-\newlabel{algo:ch6p3ORWLinit}{{5.23}{90}}
-\@writefile{lol}{\contentsline {lstlisting}{\numberline {5.23}Dynamic initialization of access mode and priorities}{90}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.4}Tasks and operations}{90}}
-\newlabel{sec:ch6p3tasks}{{5.4.4}{90}}
-\@writefile{toc}{\contentsline {section}{\numberline {5.5}Conclusion}{91}}
-\newlabel{ch6:conclu}{{5.5}{91}}
-\@writefile{toc}{\contentsline {section}{\numberline {5.6}Glossary}{91}}
-\@writefile{toc}{\contentsline {section}{Bibliography}{92}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.1}Introduction}{84}}
+\newlabel{ch6:intro}{{6.1}{84}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.2}General scheme of synchronous code with computation/communication overlapping in GPU clusters}{84}}
+\newlabel{ch6:part1}{{6.2}{84}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.2.1}Synchronous parallel algorithms on GPU clusters}{84}}
+\@writefile{lof}{\contentsline {figure}{\numberline {6.1}{\ignorespaces Native overlap of internode CPU communications with GPU computations.\relax }}{86}}
+\newlabel{fig:ch6p1overlapnative}{{6.1}{86}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.2.2}Native overlap of CPU communications and GPU computations}{86}}
+\newlabel{algo:ch6p1overlapnative}{{6.1}{87}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.1}Generic scheme implicitly overlapping MPI communications with CUDA GPU computations}{87}}
+\@writefile{lof}{\contentsline {figure}{\numberline {6.2}{\ignorespaces Overlap of internode CPU communications with a sequence of CPU/GPU data transfers and GPU computations.\relax }}{88}}
+\newlabel{fig:ch6p1overlapseqsequence}{{6.2}{88}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.2.3}Overlapping with sequences of transfers and computations}{88}}
+\newlabel{algo:ch6p1overlapseqsequence}{{6.2}{89}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.2}Generic scheme explicitly overlapping MPI communications with sequences of CUDA CPU/GPU transfers and CUDA GPU computations}{89}}
+\@writefile{lof}{\contentsline {figure}{\numberline {6.3}{\ignorespaces Overlap of internode CPU communications with a streamed sequence of CPU/GPU data transfers and GPU computations.\relax }}{90}}
+\newlabel{fig:ch6p1overlapstreamsequence}{{6.3}{90}}
+\newlabel{algo:ch6p1overlapstreamsequence}{{6.3}{91}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.3}Generic scheme explicitly overlapping MPI communications with streamed sequences of CUDA CPU/GPU transfers and CUDA GPU computations}{91}}
+\@writefile{lof}{\contentsline {figure}{\numberline {6.4}{\ignorespaces Complete overlap of internode CPU communications, CPU/GPU data transfers and GPU computations, interleaving computation-communication iterations\relax }}{93}}
+\newlabel{fig:ch6p1overlapinterleaved}{{6.4}{93}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.2.4}Interleaved communications-transfers-computations overlapping}{93}}
+\newlabel{algo:ch6p1overlapinterleaved}{{6.4}{94}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.4}Generic scheme explicitly overlapping MPI communications, CUDA CPU/GPU transfers and CUDA GPU computations, interleaving computation-communication iterations}{94}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.2.5}Experimental validation}{96}}
+\newlabel{ch6:p1expes}{{6.2.5}{96}}
+\newlabel{ch6:p1block-cyclic}{{6.2.5}{96}}
+\@writefile{lof}{\contentsline {figure}{\numberline {6.5}{\ignorespaces Experimental performances of different synchronous algorithms computing a dense matrix product\relax }}{97}}
+\newlabel{fig:ch6p1syncexpematrixprod}{{6.5}{97}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.3}General scheme of asynchronous parallel code with computation/communication overlapping}{98}}
+\newlabel{ch6:part2}{{6.3}{98}}
+\@writefile{loa}{\contentsline {algocf}{\numberline {3}{\ignorespaces Synchronous iterative scheme\relax }}{98}}
+\newlabel{algo:ch6p2sync}{{3}{98}}
+\@writefile{loa}{\contentsline {algocf}{\numberline {4}{\ignorespaces Asynchronous iterative scheme\relax }}{98}}
+\newlabel{algo:ch6p2async}{{4}{98}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.3.1}A basic asynchronous scheme}{99}}
+\newlabel{ch6:p2BasicAsync}{{6.3.1}{99}}
+\newlabel{algo:ch6p2BasicAsync}{{6.5}{100}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.5}Initialization of the basic asynchronous scheme}{100}}
+\newlabel{algo:ch6p2BasicAsyncComp}{{6.6}{101}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.6}Computing function in the basic asynchronous scheme}{101}}
+\newlabel{algo:ch6p2BasicAsyncSendings}{{6.7}{102}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.7}Sending function in the basic asynchronous scheme}{102}}
+\newlabel{algo:ch6p2BasicAsyncReceptions}{{6.8}{103}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.8}Reception function in the basic asynchronous scheme}{103}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.3.2}Synchronization of the asynchronous scheme}{104}}
+\newlabel{ch6:p2SsyncOverAsync}{{6.3.2}{104}}
+\newlabel{algo:ch6p2Sync}{{6.9}{105}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.9}Initialization of the synchronized scheme}{105}}
+\newlabel{algo:ch6p2SyncComp}{{6.10}{106}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.10}Computing function in the synchronized scheme}{106}}
+\newlabel{algo:ch6p2SyncReceptions}{{6.11}{107}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.11}Reception function in the synchronized scheme}{107}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.3.3}Asynchronous scheme using MPI, OpenMP and CUDA}{108}}
+\newlabel{ch6:p2GPUAsync}{{6.3.3}{108}}
+\newlabel{algo:ch6p2AsyncSyncComp}{{6.12}{109}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.12}Computing function in the final asynchronous scheme}{109}}
+\newlabel{algo:ch6p2syncGPU}{{6.13}{111}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.13}Computing function in the final asynchronous scheme}{111}}
+\newlabel{algo:ch6p2FullOverAsyncMain}{{6.14}{113}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.14}Initialization of the main process of complete overlap with asynchronism}{113}}
+\newlabel{algo:ch6p2FullOverAsyncComp1}{{6.15}{114}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.15}Computing function in the final asynchronous scheme with CPU/GPU overlap}{114}}
+\newlabel{algo:ch6p2FullOverAsyncComp2}{{6.16}{115}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.16}Auxiliary computing function in the final asynchronous scheme with CPU/GPU overlap}{115}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.3.4}Experimental validation}{116}}
+\newlabel{sec:ch6p2expes}{{6.3.4}{116}}
+\@writefile{lof}{\contentsline {figure}{\numberline {6.6}{\ignorespaces Computation times of the test application in synchronous and asynchronous modes.\relax }}{117}}
+\newlabel{fig:ch6p2syncasync}{{6.6}{117}}
+\@writefile{lof}{\contentsline {figure}{\numberline {6.7}{\ignorespaces Computation times with or without overlap of Jacobian updatings in asynchronous mode.\relax }}{118}}
+\newlabel{fig:ch6p2aux}{{6.7}{118}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.4}Perspective: A unifying programming model}{119}}
+\newlabel{sec:ch6p3unify}{{6.4}{119}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.4.1}Resources}{119}}
+\newlabel{sec:ch6p3resources}{{6.4.1}{119}}
+\newlabel{algo:ch6p3ORWLresources}{{6.17}{120}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.17}Declaration of ORWL resources for a block-cyclic matrix multiplication}{120}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.4.2}Control}{120}}
+\newlabel{sec:ch6p3ORWLcontrol}{{6.4.2}{120}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.4.3}Example: block-cyclic matrix multiplication (MM)}{121}}
+\newlabel{sec:ch6p3ORWLMM}{{6.4.3}{121}}
+\newlabel{algo:ch6p3ORWLBCCMM}{{6.18}{121}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.18}Block-cyclic matrix multiplication, high level per task view}{121}}
+\newlabel{algo:ch6p3ORWLlcopy}{{6.19}{122}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.19}An iterative local copy operation}{122}}
+\newlabel{algo:ch6p3ORWLrcopy}{{6.20}{122}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.20}An iterative remote copy operation as part of a block cyclic matrix multiplication task}{122}}
+\newlabel{algo:ch6p3ORWLtrans}{{6.21}{122}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.21}An iterative GPU transfer and compute operation as part of a block cyclic matrix multiplication task}{122}}
+\newlabel{algo:ch6p3ORWLdecl}{{6.22}{123}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.22}Dynamic declaration of handles to represent the resources}{123}}
+\newlabel{algo:ch6p3ORWLinit}{{6.23}{124}}
+\@writefile{lol}{\contentsline {lstlisting}{\numberline {6.23}Dynamic initialization of access mode and priorities}{124}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.4.4}Tasks and operations}{124}}
+\newlabel{sec:ch6p3tasks}{{6.4.4}{124}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.5}Conclusion}{125}}
+\newlabel{ch6:conclu}{{6.5}{125}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.6}Glossary}{125}}
+\@writefile{toc}{\contentsline {section}{Bibliography}{126}}
 \@setckpt{Chapters/chapter6/ch6}{
-\setcounter{page}{94}
+\setcounter{page}{128}
 \setcounter{equation}{0}
 \setcounter{enumi}{4}
 \setcounter{enumii}{0}
 \setcounter{footnote}{0}
 \setcounter{mpfootnote}{0}
 \setcounter{part}{1}
-\setcounter{chapter}{5}
+\setcounter{chapter}{6}
 \setcounter{section}{6}
 \setcounter{subsection}{0}
 \setcounter{subsubsection}{0}
 \setcounter{figure}{7}
 \setcounter{table}{0}
 \setcounter{numauthors}{0}
-\setcounter{parentequation}{0}
+\setcounter{parentequation}{8}
 \setcounter{subfigure}{0}
 \setcounter{lofdepth}{1}
 \setcounter{subtable}{0}
 \setcounter{lotdepth}{1}
 \setcounter{lstnumber}{17}
 \setcounter{ContinuedFloat}{0}
-\setcounter{float@type}{16}
-\setcounter{algorithm}{4}
-\setcounter{ALC@unique}{0}
-\setcounter{ALC@line}{0}
-\setcounter{ALC@rem}{0}
-\setcounter{ALC@depth}{0}
 \setcounter{AlgoLine}{0}
-\setcounter{algocfline}{0}
-\setcounter{algocfproc}{0}
-\setcounter{algocf}{0}
+\setcounter{algocfline}{4}
+\setcounter{algocfproc}{4}
+\setcounter{algocf}{4}
 \setcounter{proposition}{0}
+\setcounter{theorem}{0}
+\setcounter{exercise}{0}
+\setcounter{example}{0}
+\setcounter{definition}{0}
 \setcounter{proof}{0}
 \setcounter{lstlisting}{23}
 }