X-Git-Url: https://bilbo.iut-bm.univ-fcomte.fr/and/gitweb/book_gpu.git/blobdiff_plain/17bff40b83bcdcc39769f9e59c70ffae1c525b72..0d39f3bfb1736ae41805f75a779e0bb01f4f5139:/BookGPU/Chapters/chapter6/PartieAsync.tex?ds=sidebyside diff --git a/BookGPU/Chapters/chapter6/PartieAsync.tex b/BookGPU/Chapters/chapter6/PartieAsync.tex index 9edbe8d..1623f8c 100644 --- a/BookGPU/Chapters/chapter6/PartieAsync.tex +++ b/BookGPU/Chapters/chapter6/PartieAsync.tex @@ -139,7 +139,7 @@ communication libraries such as MPI are not systematically performed in parallel the computations~\cite{ChVCV13,Hoefler08a}. So, the logical and classical way to implement such an overlap is to use three threads: one for computing, one for sending, and one for receiving. Moreover, since -the communication is performed by threads, blocking synchronous communications\index{MPI!communication!blocking}\index{MPI!communication!synchronous} +the communication is performed by threads, blocking synchronous communications\index{MPI!blocking}\index{MPI!synchronous} can be used without deteriorating the overall performance. In this basic version, the termination\index{termination} of the global process is performed @@ -648,7 +648,7 @@ while(!Finished){ case tagState: // Management of local state messages // Actual reception of the message MPI_Recv(&recvdState, 1, MPI_CHAR, status.MPI_SOURCE, tagState, MPI_COMM_WORLD, &status); - // Updates of numbers of stabilized nodes and received state msgs + // Updates of numbers of stabilized nodes and recvd state msgs nbOtherCVs += recvdState; nbStateMsg++; // Unlocking of the computing thread when states of all other