1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Direct I/O support.
3 *
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells ([email protected])
6 */
7
8 #include <linux/export.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/uio.h>
14 #include <linux/sched/mm.h>
15 #include <linux/task_io_accounting_ops.h>
16 #include <linux/netfs.h>
17 #include "internal.h"
18
netfs_prepare_dio_read_iterator(struct netfs_io_subrequest * subreq)19 static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
20 {
21 struct netfs_io_request *rreq = subreq->rreq;
22 size_t rsize;
23
24 rsize = umin(subreq->len, rreq->io_streams[0].sreq_max_len);
25 subreq->len = rsize;
26
27 if (unlikely(rreq->io_streams[0].sreq_max_segs)) {
28 size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize,
29 rreq->io_streams[0].sreq_max_segs);
30
31 if (limit < rsize) {
32 subreq->len = limit;
33 trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
34 }
35 }
36
37 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
38
39 subreq->io_iter = rreq->buffer.iter;
40 iov_iter_truncate(&subreq->io_iter, subreq->len);
41 iov_iter_advance(&rreq->buffer.iter, subreq->len);
42 }
43
44 /*
45 * Perform a read to a buffer from the server, slicing up the region to be read
46 * according to the network rsize.
47 */
netfs_dispatch_unbuffered_reads(struct netfs_io_request * rreq)48 static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
49 {
50 struct netfs_io_stream *stream = &rreq->io_streams[0];
51 unsigned long long start = rreq->start;
52 ssize_t size = rreq->len;
53 int ret = 0;
54
55 do {
56 struct netfs_io_subrequest *subreq;
57 ssize_t slice;
58
59 subreq = netfs_alloc_subrequest(rreq);
60 if (!subreq) {
61 ret = -ENOMEM;
62 break;
63 }
64
65 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
66 subreq->start = start;
67 subreq->len = size;
68
69 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
70
71 spin_lock(&rreq->lock);
72 list_add_tail(&subreq->rreq_link, &stream->subrequests);
73 if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
74 stream->front = subreq;
75 if (!stream->active) {
76 stream->collected_to = stream->front->start;
77 /* Store list pointers before active flag */
78 smp_store_release(&stream->active, true);
79 }
80 }
81 trace_netfs_sreq(subreq, netfs_sreq_trace_added);
82 spin_unlock(&rreq->lock);
83
84 netfs_stat(&netfs_n_rh_download);
85 if (rreq->netfs_ops->prepare_read) {
86 ret = rreq->netfs_ops->prepare_read(subreq);
87 if (ret < 0) {
88 netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
89 break;
90 }
91 }
92
93 netfs_prepare_dio_read_iterator(subreq);
94 slice = subreq->len;
95 size -= slice;
96 start += slice;
97 rreq->submitted += slice;
98 if (size <= 0) {
99 smp_wmb(); /* Write lists before ALL_QUEUED. */
100 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
101 }
102
103 rreq->netfs_ops->issue_read(subreq);
104
105 if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
106 netfs_wait_for_pause(rreq);
107 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags))
108 break;
109 if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
110 test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
111 break;
112 cond_resched();
113 } while (size > 0);
114
115 if (unlikely(size > 0)) {
116 smp_wmb(); /* Write lists before ALL_QUEUED. */
117 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
118 netfs_wake_read_collector(rreq);
119 }
120
121 return ret;
122 }
123
124 /*
125 * Perform a read to an application buffer, bypassing the pagecache and the
126 * local disk cache.
127 */
netfs_unbuffered_read(struct netfs_io_request * rreq,bool sync)128 static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
129 {
130 ssize_t ret;
131
132 _enter("R=%x %llx-%llx",
133 rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
134
135 if (rreq->len == 0) {
136 pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
137 return -EIO;
138 }
139
140 // TODO: Use bounce buffer if requested
141
142 inode_dio_begin(rreq->inode);
143
144 ret = netfs_dispatch_unbuffered_reads(rreq);
145
146 if (!rreq->submitted) {
147 netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit);
148 inode_dio_end(rreq->inode);
149 ret = 0;
150 goto out;
151 }
152
153 if (sync)
154 ret = netfs_wait_for_read(rreq);
155 else
156 ret = -EIOCBQUEUED;
157 out:
158 _leave(" = %zd", ret);
159 return ret;
160 }
161
162 /**
163 * netfs_unbuffered_read_iter_locked - Perform an unbuffered or direct I/O read
164 * @iocb: The I/O control descriptor describing the read
165 * @iter: The output buffer (also specifies read length)
166 *
167 * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
168 * output buffer. No use is made of the pagecache.
169 *
170 * The caller must hold any appropriate locks.
171 */
netfs_unbuffered_read_iter_locked(struct kiocb * iocb,struct iov_iter * iter)172 ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter)
173 {
174 struct netfs_io_request *rreq;
175 ssize_t ret;
176 size_t orig_count = iov_iter_count(iter);
177 bool sync = is_sync_kiocb(iocb);
178
179 _enter("");
180
181 if (!orig_count)
182 return 0; /* Don't update atime */
183
184 ret = kiocb_write_and_wait(iocb, orig_count);
185 if (ret < 0)
186 return ret;
187 file_accessed(iocb->ki_filp);
188
189 rreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
190 iocb->ki_pos, orig_count,
191 NETFS_DIO_READ);
192 if (IS_ERR(rreq))
193 return PTR_ERR(rreq);
194
195 netfs_stat(&netfs_n_rh_dio_read);
196 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_dio_read);
197
198 /* If this is an async op, we have to keep track of the destination
199 * buffer for ourselves as the caller's iterator will be trashed when
200 * we return.
201 *
202 * In such a case, extract an iterator to represent as much of the the
203 * output buffer as we can manage. Note that the extraction might not
204 * be able to allocate a sufficiently large bvec array and may shorten
205 * the request.
206 */
207 if (user_backed_iter(iter)) {
208 ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0);
209 if (ret < 0)
210 goto out;
211 rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec;
212 rreq->direct_bv_count = ret;
213 rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
214 rreq->len = iov_iter_count(&rreq->buffer.iter);
215 } else {
216 rreq->buffer.iter = *iter;
217 rreq->len = orig_count;
218 rreq->direct_bv_unpin = false;
219 iov_iter_advance(iter, orig_count);
220 }
221
222 // TODO: Set up bounce buffer if needed
223
224 if (!sync) {
225 rreq->iocb = iocb;
226 __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);
227 }
228
229 ret = netfs_unbuffered_read(rreq, sync);
230 if (ret < 0)
231 goto out; /* May be -EIOCBQUEUED */
232 if (sync) {
233 // TODO: Copy from bounce buffer
234 iocb->ki_pos += rreq->transferred;
235 ret = rreq->transferred;
236 }
237
238 out:
239 netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
240 if (ret > 0)
241 orig_count -= ret;
242 return ret;
243 }
244 EXPORT_SYMBOL(netfs_unbuffered_read_iter_locked);
245
246 /**
247 * netfs_unbuffered_read_iter - Perform an unbuffered or direct I/O read
248 * @iocb: The I/O control descriptor describing the read
249 * @iter: The output buffer (also specifies read length)
250 *
251 * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
252 * output buffer. No use is made of the pagecache.
253 */
netfs_unbuffered_read_iter(struct kiocb * iocb,struct iov_iter * iter)254 ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
255 {
256 struct inode *inode = file_inode(iocb->ki_filp);
257 ssize_t ret;
258
259 if (!iter->count)
260 return 0; /* Don't update atime */
261
262 ret = netfs_start_io_direct(inode);
263 if (ret == 0) {
264 ret = netfs_unbuffered_read_iter_locked(iocb, iter);
265 netfs_end_io_direct(inode);
266 }
267 return ret;
268 }
269 EXPORT_SYMBOL(netfs_unbuffered_read_iter);
270