1 /* Copyright (c) 2010-2023. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #ifndef SMPI_FILE_HPP_INCLUDED
8 #define SMPI_FILE_HPP_INCLUDED
9 #include "simgrid/plugins/file_system.h"
10 #include "smpi_comm.hpp"
11 #include "smpi_coll.hpp"
12 #include "smpi_datatype.hpp"
13 #include "smpi_errhandler.hpp"
14 #include "smpi_info.hpp"
17 XBT_LOG_EXTERNAL_CATEGORY(smpi_io);
19 namespace simgrid::smpi {
20 class File : public F2C{
23 simgrid::s4u::File* file_;
25 MPI_Offset* shared_file_pointer_;
26 s4u::MutexPtr shared_mutex_;
29 MPI_Errhandler errhandler_;
31 MPI_Datatype filetype_;
37 File(MPI_Comm comm, const char *filename, int amode, MPI_Info info);
38 File(const File&) = delete;
39 File& operator=(const File&) = delete;
42 int get_position(MPI_Offset* offset) const;
43 int get_position_shared(MPI_Offset* offset) const;
45 MPI_Datatype etype() const;
46 MPI_Comm comm() const;
47 std::string name() const override { return file_ ? "MPI_File: " + std::string(file_->get_path()) : "MPI_File"; }
50 int seek(MPI_Offset offset, int whence);
51 int seek_shared(MPI_Offset offset, int whence);
52 int set_view(MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char* datarep, const Info* info);
53 int get_view(MPI_Offset* disp, MPI_Datatype* etype, MPI_Datatype* filetype, char* datarep) const;
55 void set_info( MPI_Info info);
56 void set_size(int size);
57 static int read(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
58 static int read_shared(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
59 static int read_ordered(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
60 static int write(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
61 static int write_shared(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status);
62 static int write_ordered(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status);
63 template <int (*T)(MPI_File, void*, int, const Datatype*, MPI_Status*)>
64 int op_all(void* buf, int count, const Datatype* datatype, MPI_Status* status);
65 static int close(MPI_File *fh);
66 static int del(const char* filename, const Info* info);
67 MPI_Errhandler errhandler();
68 void set_errhandler( MPI_Errhandler errhandler);
69 void set_atomicity(bool a);
70 bool get_atomicity() const;
71 static File* f2c(int id);
74 /* Read_all, Write_all : loosely based on */
75 /* @article{Thakur:1996:ETM:245875.245879,*/
76 /* author = {Thakur, Rajeev and Choudhary, Alok},*/
77 /* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/
78 /* journal = {Sci. Program.},*/
79 /* issue_date = {Winter 1996},*/
80 /* pages = {301--317},*/
82 template <int (*T)(MPI_File, void*, int, const Datatype*, MPI_Status*)>
83 int File::op_all(void* buf, int count, const Datatype* datatype, MPI_Status* status)
85 // get min and max offsets from everyone.
86 int size = comm_->size();
87 int rank = comm_->rank();
88 MPI_Offset min_offset = file_->tell();
89 MPI_Offset max_offset =
91 count * datatype->get_extent(); // cheating, as we don't care about exact data location, we can skip extent
92 std::vector<MPI_Offset> min_offsets(size);
93 std::vector<MPI_Offset> max_offsets(size);
94 simgrid::smpi::colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets.data(), 1, MPI_OFFSET, comm_);
95 simgrid::smpi::colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets.data(), 1, MPI_OFFSET, comm_);
96 MPI_Offset min = min_offset;
97 MPI_Offset max = max_offset;
100 for (int i = 0; i < size; i++) {
101 if (min_offsets[i] != max_offsets[i])
103 tot += (max_offsets[i] - min_offsets[i]);
104 if (min_offsets[i] < min)
105 min = min_offsets[i];
106 if (max_offsets[i] > max)
107 max = max_offsets[i];
110 XBT_CDEBUG(smpi_io, "my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min,
113 if (status != MPI_STATUS_IGNORE)
117 XBT_CDEBUG(smpi_io, "min:max : %lld:%lld, tot %lld contig %u", min, max, tot, (datatype->flags() & DT_FLAG_CONTIGUOUS));
118 if ( size==1 || (max - min == tot && (datatype->flags() & DT_FLAG_CONTIGUOUS))) {
119 // contiguous. Just have each proc perform its read
120 if (status != MPI_STATUS_IGNORE)
121 status->count = count * datatype->size();
122 int ret = T(this, buf, count, datatype, status);
123 seek(max_offset, MPI_SEEK_SET);
127 // Interleaved case : How much do I need to read, and whom to send it ?
128 MPI_Offset my_chunk_start = min + (max - min + 1) / size * rank;
129 MPI_Offset my_chunk_end = min + ((max - min + 1) / size * (rank + 1)) +1;
130 XBT_CDEBUG(smpi_io, "my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end);
131 std::vector<int> send_sizes(size);
132 std::vector<int> recv_sizes(size);
133 std::vector<int> send_disps(size);
134 std::vector<int> recv_disps(size);
136 for (int i = 0; i < size; i++) {
138 send_disps[i] = 0; // cheat to avoid issues when send>recv as we use recv buffer
139 if ((my_chunk_start >= min_offsets[i] && my_chunk_start < max_offsets[i]) ||
140 ((my_chunk_end <= max_offsets[i]) && my_chunk_end > min_offsets[i])) {
141 send_sizes[i] = (std::min(max_offsets[i], my_chunk_end) - std::max(min_offsets[i], my_chunk_start));
142 //we want to send only useful data, so let's pretend we pack it
143 send_sizes[i]=send_sizes[i]/datatype->get_extent()*datatype->size();
144 // store min and max offset to actually read
146 min_offset = std::min(min_offset, min_offsets[i]);
147 total_sent += send_sizes[i];
148 XBT_CDEBUG(smpi_io, "will have to send %d bytes to %d", send_sizes[i], i);
151 min_offset = std::max(min_offset, my_chunk_start);
153 // merge the ranges of every process
154 std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;
155 for (int i = 0; i < size; ++i)
156 ranges.emplace_back(min_offsets[i], max_offsets[i]);
157 std::sort(ranges.begin(), ranges.end());
158 std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;
159 chunks.push_back(ranges[0]);
161 unsigned int nchunks = 0;
162 for (unsigned i = 1; i < ranges.size(); i++) {
163 if (ranges[i].second > chunks[nchunks].second) {
164 // else range included - ignore
165 if (ranges[i].first > chunks[nchunks].second) {
166 // new disjoint range
167 chunks.push_back(ranges[i]);
171 chunks[nchunks].second = ranges[i].second;
175 // what do I need to read ?
176 MPI_Offset totreads = 0;
177 for (auto const& [chunk_start, chunk_end] : chunks) {
178 if (chunk_end < my_chunk_start)
180 else if (chunk_start > my_chunk_end)
183 totreads += (std::min(chunk_end, my_chunk_end) - std::max(chunk_start, my_chunk_start));
185 XBT_CDEBUG(smpi_io, "will have to access %lld from my chunk", totreads);
187 unsigned char* sendbuf = smpi_get_tmp_sendbuffer(total_sent);
190 seek(min_offset, MPI_SEEK_SET);
191 T(this, sendbuf, totreads / datatype->get_extent(), datatype, status);
192 seek(max_offset, MPI_SEEK_SET);
194 simgrid::smpi::colls::alltoall(send_sizes.data(), 1, MPI_INT, recv_sizes.data(), 1, MPI_INT, comm_);
196 for (int i = 0; i < size; i++) {
197 recv_disps[i] = total_recv;
198 total_recv += recv_sizes[i];
200 // Set buf value to avoid copying dumb data
201 simgrid::smpi::colls::alltoallv(sendbuf, send_sizes.data(), send_disps.data(), MPI_BYTE, buf, recv_sizes.data(),
202 recv_disps.data(), MPI_BYTE, comm_);
203 if (status != MPI_STATUS_IGNORE)
204 status->count = count * datatype->size();
205 smpi_free_tmp_buffer(sendbuf);
208 } // namespace simgrid::smpi