1 /* Copyright (c) 2010-2019. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #ifndef SMPI_FILE_HPP_INCLUDED
8 #define SMPI_FILE_HPP_INCLUDED
9 #include "simgrid/plugins/file_system.h"
10 #include "smpi_comm.hpp"
11 #include "smpi_coll.hpp"
12 #include "smpi_datatype.hpp"
13 #include "smpi_errhandler.hpp"
14 #include "smpi_info.hpp"
17 XBT_LOG_EXTERNAL_CATEGORY(smpi_pmpi);
24 simgrid::s4u::File* file_;
26 MPI_Offset* shared_file_pointer_;
27 s4u::MutexPtr shared_mutex_;
30 MPI_Errhandler errhandler_;
32 MPI_Datatype filetype_;
36 File(MPI_Comm comm, const char *filename, int amode, MPI_Info info);
37 File(const File&) = delete;
38 File& operator=(const File&) = delete;
41 int get_position(MPI_Offset* offset);
42 int get_position_shared(MPI_Offset* offset);
46 int seek(MPI_Offset offset, int whence);
47 int seek_shared(MPI_Offset offset, int whence);
48 int set_view(MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info);
49 int get_view(MPI_Offset *disp, MPI_Datatype *etype, MPI_Datatype *filetype, char *datarep);
51 void set_info( MPI_Info info);
52 static int read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
53 static int read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
54 static int read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
55 static int write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
56 static int write_shared(MPI_File fh, const void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
57 static int write_ordered(MPI_File fh, const void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
58 template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)> int op_all(void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
59 static int close(MPI_File *fh);
60 static int del(const char *filename, MPI_Info info);
61 MPI_Errhandler errhandler();
62 void set_errhandler( MPI_Errhandler errhandler);
65 /* Read_all, Write_all : loosely based on */
66 /* @article{Thakur:1996:ETM:245875.245879,*/
67 /* author = {Thakur, Rajeev and Choudhary, Alok},*/
68 /* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/
69 /* journal = {Sci. Program.},*/
70 /* issue_date = {Winter 1996},*/
71 /* pages = {301--317},*/
73 template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)>
74 int File::op_all(void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
75 //get min and max offsets from everyone.
76 int size = comm_->size();
77 int rank = comm_-> rank();
78 MPI_Offset min_offset = file_->tell();
79 MPI_Offset max_offset = min_offset + count * datatype->get_extent();//cheating, as we don't care about exact data location, we can skip extent
80 MPI_Offset* min_offsets = new MPI_Offset[size];
81 MPI_Offset* max_offsets = new MPI_Offset[size];
82 simgrid::smpi::colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_);
83 simgrid::smpi::colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_);
84 MPI_Offset min=min_offset;
85 MPI_Offset max=max_offset;
88 for(int i=0;i<size;i++){
89 if(min_offsets[i]!=max_offsets[i])
91 tot+=(max_offsets[i]-min_offsets[i]);
92 if(min_offsets[i]<min)
94 if(max_offsets[i]>max)
98 XBT_CDEBUG(smpi_pmpi, "my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min, max);
100 delete[] min_offsets;
101 delete[] max_offsets;
105 MPI_Offset total = max-min;
106 if(total==tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)){
107 delete[] min_offsets;
108 delete[] max_offsets;
109 //contiguous. Just have each proc perform its read
110 if(status != MPI_STATUS_IGNORE)
111 status->count=count * datatype->size();
112 return T(this,buf,count,datatype, status);
115 //Interleaved case : How much do I need to read, and whom to send it ?
116 MPI_Offset my_chunk_start=(max-min+1)/size*rank;
117 MPI_Offset my_chunk_end=((max-min+1)/size*(rank+1));
118 XBT_CDEBUG(smpi_pmpi, "my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end);
119 int* send_sizes = new int[size];
120 int* recv_sizes = new int[size];
121 int* send_disps = new int[size];
122 int* recv_disps = new int[size];
124 for(int i=0;i<size;i++){
126 send_disps[i]=0;//cheat to avoid issues when send>recv as we use recv buffer
127 if((my_chunk_start>=min_offsets[i] && my_chunk_start < max_offsets[i])||
128 ((my_chunk_end<=max_offsets[i]) && my_chunk_end> min_offsets[i])){
129 send_sizes[i]=(std::min(max_offsets[i]-1, my_chunk_end-1)-std::max(min_offsets[i], my_chunk_start));
130 // store min and max offset to actually read
131 min_offset=std::min(min_offset, min_offsets[i]);
132 total_sent+=send_sizes[i];
133 XBT_CDEBUG(smpi_pmpi, "will have to send %d bytes to %d", send_sizes[i], i);
136 min_offset=std::max(min_offset, my_chunk_start);
138 //merge the ranges of every process
139 std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;
140 for(int i=0; i<size; ++i)
141 ranges.push_back(std::make_pair(min_offsets[i],max_offsets[i]));
142 std::sort(ranges.begin(), ranges.end());
143 std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;
144 chunks.push_back(ranges[0]);
146 unsigned int nchunks=0;
148 while(i < ranges.size()){
149 if(ranges[i].second>chunks[nchunks].second){
150 // else range included - ignore
151 if(ranges[i].first>chunks[nchunks].second){
153 chunks.push_back(ranges[i]);
157 chunks[nchunks].second=ranges[i].second;
162 //what do I need to read ?
163 MPI_Offset totreads=0;
164 for(i=0; i<chunks.size();i++){
165 if(chunks[i].second < my_chunk_start)
167 else if (chunks[i].first > my_chunk_end)
170 totreads += (std::min(chunks[i].second, my_chunk_end-1)-std::max(chunks[i].first, my_chunk_start));
172 XBT_CDEBUG(smpi_pmpi, "will have to access %lld from my chunk", totreads);
174 unsigned char* sendbuf = smpi_get_tmp_sendbuffer(total_sent);
177 seek(min_offset, MPI_SEEK_SET);
178 T(this,sendbuf,totreads/datatype->size(),datatype, status);
180 simgrid::smpi::colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_);
182 for(int i=0;i<size;i++){
183 recv_disps[i]=total_recv;
184 total_recv+=recv_sizes[i];
186 //Set buf value to avoid copying dumb data
187 simgrid::smpi::colls::alltoallv(sendbuf, send_sizes, send_disps, MPI_BYTE, buf, recv_sizes, recv_disps, MPI_BYTE,
189 if(status!=MPI_STATUS_IGNORE)
190 status->count=count * datatype->size();
191 smpi_free_tmp_buffer(sendbuf);
196 delete[] min_offsets;
197 delete[] max_offsets;