1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12
13 #include <uapi/linux/io_uring.h>
14
15 #include "io_uring.h"
16 #include "openclose.h"
17 #include "rsrc.h"
18 #include "memmap.h"
19 #include "register.h"
20
21 struct io_rsrc_update {
22 struct file *file;
23 u64 arg;
24 u32 nr_args;
25 u32 offset;
26 };
27
28 static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
29 struct iovec *iov, struct page **last_hpage);
30
31 /* only define max */
32 #define IORING_MAX_FIXED_FILES (1U << 20)
33 #define IORING_MAX_REG_BUFFERS (1U << 14)
34
__io_account_mem(struct user_struct * user,unsigned long nr_pages)35 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
36 {
37 unsigned long page_limit, cur_pages, new_pages;
38
39 if (!nr_pages)
40 return 0;
41
42 /* Don't allow more pages than we can safely lock */
43 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
44
45 cur_pages = atomic_long_read(&user->locked_vm);
46 do {
47 new_pages = cur_pages + nr_pages;
48 if (new_pages > page_limit)
49 return -ENOMEM;
50 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
51 &cur_pages, new_pages));
52 return 0;
53 }
54
io_unaccount_mem(struct io_ring_ctx * ctx,unsigned long nr_pages)55 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
56 {
57 if (ctx->user)
58 __io_unaccount_mem(ctx->user, nr_pages);
59
60 if (ctx->mm_account)
61 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
62 }
63
io_account_mem(struct io_ring_ctx * ctx,unsigned long nr_pages)64 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
65 {
66 int ret;
67
68 if (ctx->user) {
69 ret = __io_account_mem(ctx->user, nr_pages);
70 if (ret)
71 return ret;
72 }
73
74 if (ctx->mm_account)
75 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
76
77 return 0;
78 }
79
io_buffer_validate(struct iovec * iov)80 static int io_buffer_validate(struct iovec *iov)
81 {
82 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
83
84 /*
85 * Don't impose further limits on the size and buffer
86 * constraints here, we'll -EINVAL later when IO is
87 * submitted if they are wrong.
88 */
89 if (!iov->iov_base)
90 return iov->iov_len ? -EFAULT : 0;
91 if (!iov->iov_len)
92 return -EFAULT;
93
94 /* arbitrary limit, but we need something */
95 if (iov->iov_len > SZ_1G)
96 return -EFAULT;
97
98 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
99 return -EOVERFLOW;
100
101 return 0;
102 }
103
io_buffer_unmap(struct io_ring_ctx * ctx,struct io_rsrc_node * node)104 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
105 {
106 unsigned int i;
107
108 if (node->buf) {
109 struct io_mapped_ubuf *imu = node->buf;
110
111 if (!refcount_dec_and_test(&imu->refs))
112 return;
113 for (i = 0; i < imu->nr_bvecs; i++)
114 unpin_user_page(imu->bvec[i].bv_page);
115 if (imu->acct_pages)
116 io_unaccount_mem(ctx, imu->acct_pages);
117 kvfree(imu);
118 }
119 }
120
io_rsrc_node_alloc(int type)121 struct io_rsrc_node *io_rsrc_node_alloc(int type)
122 {
123 struct io_rsrc_node *node;
124
125 node = kzalloc(sizeof(*node), GFP_KERNEL);
126 if (node) {
127 node->type = type;
128 node->refs = 1;
129 }
130 return node;
131 }
132
io_clear_table_tags(struct io_rsrc_data * data)133 static void io_clear_table_tags(struct io_rsrc_data *data)
134 {
135 int i;
136
137 for (i = 0; i < data->nr; i++) {
138 struct io_rsrc_node *node = data->nodes[i];
139
140 if (node)
141 node->tag = 0;
142 }
143 }
144
io_rsrc_data_free(struct io_ring_ctx * ctx,struct io_rsrc_data * data)145 __cold void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data)
146 {
147 if (!data->nr)
148 return;
149 while (data->nr--) {
150 if (data->nodes[data->nr])
151 io_put_rsrc_node(ctx, data->nodes[data->nr]);
152 }
153 kvfree(data->nodes);
154 data->nodes = NULL;
155 data->nr = 0;
156 }
157
io_rsrc_data_alloc(struct io_rsrc_data * data,unsigned nr)158 __cold int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr)
159 {
160 data->nodes = kvmalloc_array(nr, sizeof(struct io_rsrc_node *),
161 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
162 if (data->nodes) {
163 data->nr = nr;
164 return 0;
165 }
166 return -ENOMEM;
167 }
168
__io_sqe_files_update(struct io_ring_ctx * ctx,struct io_uring_rsrc_update2 * up,unsigned nr_args)169 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
170 struct io_uring_rsrc_update2 *up,
171 unsigned nr_args)
172 {
173 u64 __user *tags = u64_to_user_ptr(up->tags);
174 __s32 __user *fds = u64_to_user_ptr(up->data);
175 int fd, i, err = 0;
176 unsigned int done;
177
178 if (!ctx->file_table.data.nr)
179 return -ENXIO;
180 if (up->offset + nr_args > ctx->file_table.data.nr)
181 return -EINVAL;
182
183 for (done = 0; done < nr_args; done++) {
184 u64 tag = 0;
185
186 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
187 copy_from_user(&fd, &fds[done], sizeof(fd))) {
188 err = -EFAULT;
189 break;
190 }
191 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
192 err = -EINVAL;
193 break;
194 }
195 if (fd == IORING_REGISTER_FILES_SKIP)
196 continue;
197
198 i = up->offset + done;
199 if (io_reset_rsrc_node(ctx, &ctx->file_table.data, i))
200 io_file_bitmap_clear(&ctx->file_table, i);
201
202 if (fd != -1) {
203 struct file *file = fget(fd);
204 struct io_rsrc_node *node;
205
206 if (!file) {
207 err = -EBADF;
208 break;
209 }
210 /*
211 * Don't allow io_uring instances to be registered.
212 */
213 if (io_is_uring_fops(file)) {
214 fput(file);
215 err = -EBADF;
216 break;
217 }
218 node = io_rsrc_node_alloc(IORING_RSRC_FILE);
219 if (!node) {
220 err = -ENOMEM;
221 fput(file);
222 break;
223 }
224 ctx->file_table.data.nodes[i] = node;
225 if (tag)
226 node->tag = tag;
227 io_fixed_file_set(node, file);
228 io_file_bitmap_set(&ctx->file_table, i);
229 }
230 }
231 return done ? done : err;
232 }
233
__io_sqe_buffers_update(struct io_ring_ctx * ctx,struct io_uring_rsrc_update2 * up,unsigned int nr_args)234 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
235 struct io_uring_rsrc_update2 *up,
236 unsigned int nr_args)
237 {
238 u64 __user *tags = u64_to_user_ptr(up->tags);
239 struct iovec fast_iov, *iov;
240 struct page *last_hpage = NULL;
241 struct iovec __user *uvec;
242 u64 user_data = up->data;
243 __u32 done;
244 int i, err;
245
246 if (!ctx->buf_table.nr)
247 return -ENXIO;
248 if (up->offset + nr_args > ctx->buf_table.nr)
249 return -EINVAL;
250
251 for (done = 0; done < nr_args; done++) {
252 struct io_rsrc_node *node;
253 u64 tag = 0;
254
255 uvec = u64_to_user_ptr(user_data);
256 iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
257 if (IS_ERR(iov)) {
258 err = PTR_ERR(iov);
259 break;
260 }
261 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
262 err = -EFAULT;
263 break;
264 }
265 err = io_buffer_validate(iov);
266 if (err)
267 break;
268 node = io_sqe_buffer_register(ctx, iov, &last_hpage);
269 if (IS_ERR(node)) {
270 err = PTR_ERR(node);
271 break;
272 }
273 if (tag) {
274 if (!node) {
275 err = -EINVAL;
276 break;
277 }
278 node->tag = tag;
279 }
280 i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
281 io_reset_rsrc_node(ctx, &ctx->buf_table, i);
282 ctx->buf_table.nodes[i] = node;
283 if (ctx->compat)
284 user_data += sizeof(struct compat_iovec);
285 else
286 user_data += sizeof(struct iovec);
287 }
288 return done ? done : err;
289 }
290
__io_register_rsrc_update(struct io_ring_ctx * ctx,unsigned type,struct io_uring_rsrc_update2 * up,unsigned nr_args)291 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
292 struct io_uring_rsrc_update2 *up,
293 unsigned nr_args)
294 {
295 __u32 tmp;
296
297 lockdep_assert_held(&ctx->uring_lock);
298
299 if (check_add_overflow(up->offset, nr_args, &tmp))
300 return -EOVERFLOW;
301
302 switch (type) {
303 case IORING_RSRC_FILE:
304 return __io_sqe_files_update(ctx, up, nr_args);
305 case IORING_RSRC_BUFFER:
306 return __io_sqe_buffers_update(ctx, up, nr_args);
307 }
308 return -EINVAL;
309 }
310
io_register_files_update(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)311 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
312 unsigned nr_args)
313 {
314 struct io_uring_rsrc_update2 up;
315
316 if (!nr_args)
317 return -EINVAL;
318 memset(&up, 0, sizeof(up));
319 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
320 return -EFAULT;
321 if (up.resv || up.resv2)
322 return -EINVAL;
323 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
324 }
325
io_register_rsrc_update(struct io_ring_ctx * ctx,void __user * arg,unsigned size,unsigned type)326 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
327 unsigned size, unsigned type)
328 {
329 struct io_uring_rsrc_update2 up;
330
331 if (size != sizeof(up))
332 return -EINVAL;
333 if (copy_from_user(&up, arg, sizeof(up)))
334 return -EFAULT;
335 if (!up.nr || up.resv || up.resv2)
336 return -EINVAL;
337 return __io_register_rsrc_update(ctx, type, &up, up.nr);
338 }
339
io_register_rsrc(struct io_ring_ctx * ctx,void __user * arg,unsigned int size,unsigned int type)340 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
341 unsigned int size, unsigned int type)
342 {
343 struct io_uring_rsrc_register rr;
344
345 /* keep it extendible */
346 if (size != sizeof(rr))
347 return -EINVAL;
348
349 memset(&rr, 0, sizeof(rr));
350 if (copy_from_user(&rr, arg, size))
351 return -EFAULT;
352 if (!rr.nr || rr.resv2)
353 return -EINVAL;
354 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
355 return -EINVAL;
356
357 switch (type) {
358 case IORING_RSRC_FILE:
359 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
360 break;
361 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
362 rr.nr, u64_to_user_ptr(rr.tags));
363 case IORING_RSRC_BUFFER:
364 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
365 break;
366 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
367 rr.nr, u64_to_user_ptr(rr.tags));
368 }
369 return -EINVAL;
370 }
371
io_files_update_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)372 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
373 {
374 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
375
376 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
377 return -EINVAL;
378 if (sqe->rw_flags || sqe->splice_fd_in)
379 return -EINVAL;
380
381 up->offset = READ_ONCE(sqe->off);
382 up->nr_args = READ_ONCE(sqe->len);
383 if (!up->nr_args)
384 return -EINVAL;
385 up->arg = READ_ONCE(sqe->addr);
386 return 0;
387 }
388
io_files_update_with_index_alloc(struct io_kiocb * req,unsigned int issue_flags)389 static int io_files_update_with_index_alloc(struct io_kiocb *req,
390 unsigned int issue_flags)
391 {
392 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
393 __s32 __user *fds = u64_to_user_ptr(up->arg);
394 unsigned int done;
395 struct file *file;
396 int ret, fd;
397
398 if (!req->ctx->file_table.data.nr)
399 return -ENXIO;
400
401 for (done = 0; done < up->nr_args; done++) {
402 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
403 ret = -EFAULT;
404 break;
405 }
406
407 file = fget(fd);
408 if (!file) {
409 ret = -EBADF;
410 break;
411 }
412 ret = io_fixed_fd_install(req, issue_flags, file,
413 IORING_FILE_INDEX_ALLOC);
414 if (ret < 0)
415 break;
416 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
417 __io_close_fixed(req->ctx, issue_flags, ret);
418 ret = -EFAULT;
419 break;
420 }
421 }
422
423 if (done)
424 return done;
425 return ret;
426 }
427
io_files_update(struct io_kiocb * req,unsigned int issue_flags)428 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
429 {
430 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
431 struct io_ring_ctx *ctx = req->ctx;
432 struct io_uring_rsrc_update2 up2;
433 int ret;
434
435 up2.offset = up->offset;
436 up2.data = up->arg;
437 up2.nr = 0;
438 up2.tags = 0;
439 up2.resv = 0;
440 up2.resv2 = 0;
441
442 if (up->offset == IORING_FILE_INDEX_ALLOC) {
443 ret = io_files_update_with_index_alloc(req, issue_flags);
444 } else {
445 io_ring_submit_lock(ctx, issue_flags);
446 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
447 &up2, up->nr_args);
448 io_ring_submit_unlock(ctx, issue_flags);
449 }
450
451 if (ret < 0)
452 req_set_fail(req);
453 io_req_set_res(req, ret, 0);
454 return IOU_OK;
455 }
456
io_free_rsrc_node(struct io_ring_ctx * ctx,struct io_rsrc_node * node)457 void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
458 {
459 if (node->tag)
460 io_post_aux_cqe(ctx, node->tag, 0, 0);
461
462 switch (node->type) {
463 case IORING_RSRC_FILE:
464 if (io_slot_file(node))
465 fput(io_slot_file(node));
466 break;
467 case IORING_RSRC_BUFFER:
468 if (node->buf)
469 io_buffer_unmap(ctx, node);
470 break;
471 default:
472 WARN_ON_ONCE(1);
473 break;
474 }
475
476 kfree(node);
477 }
478
io_sqe_files_unregister(struct io_ring_ctx * ctx)479 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
480 {
481 if (!ctx->file_table.data.nr)
482 return -ENXIO;
483
484 io_free_file_tables(ctx, &ctx->file_table);
485 io_file_table_set_alloc_range(ctx, 0, 0);
486 return 0;
487 }
488
io_sqe_files_register(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args,u64 __user * tags)489 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
490 unsigned nr_args, u64 __user *tags)
491 {
492 __s32 __user *fds = (__s32 __user *) arg;
493 struct file *file;
494 int fd, ret;
495 unsigned i;
496
497 if (ctx->file_table.data.nr)
498 return -EBUSY;
499 if (!nr_args)
500 return -EINVAL;
501 if (nr_args > IORING_MAX_FIXED_FILES)
502 return -EMFILE;
503 if (nr_args > rlimit(RLIMIT_NOFILE))
504 return -EMFILE;
505 if (!io_alloc_file_tables(ctx, &ctx->file_table, nr_args))
506 return -ENOMEM;
507
508 for (i = 0; i < nr_args; i++) {
509 struct io_rsrc_node *node;
510 u64 tag = 0;
511
512 ret = -EFAULT;
513 if (tags && copy_from_user(&tag, &tags[i], sizeof(tag)))
514 goto fail;
515 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd)))
516 goto fail;
517 /* allow sparse sets */
518 if (!fds || fd == -1) {
519 ret = -EINVAL;
520 if (tag)
521 goto fail;
522 continue;
523 }
524
525 file = fget(fd);
526 ret = -EBADF;
527 if (unlikely(!file))
528 goto fail;
529
530 /*
531 * Don't allow io_uring instances to be registered.
532 */
533 if (io_is_uring_fops(file)) {
534 fput(file);
535 goto fail;
536 }
537 ret = -ENOMEM;
538 node = io_rsrc_node_alloc(IORING_RSRC_FILE);
539 if (!node) {
540 fput(file);
541 goto fail;
542 }
543 if (tag)
544 node->tag = tag;
545 ctx->file_table.data.nodes[i] = node;
546 io_fixed_file_set(node, file);
547 io_file_bitmap_set(&ctx->file_table, i);
548 }
549
550 /* default it to the whole table */
551 io_file_table_set_alloc_range(ctx, 0, ctx->file_table.data.nr);
552 return 0;
553 fail:
554 io_clear_table_tags(&ctx->file_table.data);
555 io_sqe_files_unregister(ctx);
556 return ret;
557 }
558
io_sqe_buffers_unregister(struct io_ring_ctx * ctx)559 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
560 {
561 if (!ctx->buf_table.nr)
562 return -ENXIO;
563 io_rsrc_data_free(ctx, &ctx->buf_table);
564 return 0;
565 }
566
567 /*
568 * Not super efficient, but this is just a registration time. And we do cache
569 * the last compound head, so generally we'll only do a full search if we don't
570 * match that one.
571 *
572 * We check if the given compound head page has already been accounted, to
573 * avoid double accounting it. This allows us to account the full size of the
574 * page, not just the constituent pages of a huge page.
575 */
headpage_already_acct(struct io_ring_ctx * ctx,struct page ** pages,int nr_pages,struct page * hpage)576 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
577 int nr_pages, struct page *hpage)
578 {
579 int i, j;
580
581 /* check current page array */
582 for (i = 0; i < nr_pages; i++) {
583 if (!PageCompound(pages[i]))
584 continue;
585 if (compound_head(pages[i]) == hpage)
586 return true;
587 }
588
589 /* check previously registered pages */
590 for (i = 0; i < ctx->buf_table.nr; i++) {
591 struct io_rsrc_node *node = ctx->buf_table.nodes[i];
592 struct io_mapped_ubuf *imu;
593
594 if (!node)
595 continue;
596 imu = node->buf;
597 for (j = 0; j < imu->nr_bvecs; j++) {
598 if (!PageCompound(imu->bvec[j].bv_page))
599 continue;
600 if (compound_head(imu->bvec[j].bv_page) == hpage)
601 return true;
602 }
603 }
604
605 return false;
606 }
607
io_buffer_account_pin(struct io_ring_ctx * ctx,struct page ** pages,int nr_pages,struct io_mapped_ubuf * imu,struct page ** last_hpage)608 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
609 int nr_pages, struct io_mapped_ubuf *imu,
610 struct page **last_hpage)
611 {
612 int i, ret;
613
614 imu->acct_pages = 0;
615 for (i = 0; i < nr_pages; i++) {
616 if (!PageCompound(pages[i])) {
617 imu->acct_pages++;
618 } else {
619 struct page *hpage;
620
621 hpage = compound_head(pages[i]);
622 if (hpage == *last_hpage)
623 continue;
624 *last_hpage = hpage;
625 if (headpage_already_acct(ctx, pages, i, hpage))
626 continue;
627 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
628 }
629 }
630
631 if (!imu->acct_pages)
632 return 0;
633
634 ret = io_account_mem(ctx, imu->acct_pages);
635 if (ret)
636 imu->acct_pages = 0;
637 return ret;
638 }
639
io_coalesce_buffer(struct page *** pages,int * nr_pages,struct io_imu_folio_data * data)640 static bool io_coalesce_buffer(struct page ***pages, int *nr_pages,
641 struct io_imu_folio_data *data)
642 {
643 struct page **page_array = *pages, **new_array = NULL;
644 int nr_pages_left = *nr_pages, i, j;
645 int nr_folios = data->nr_folios;
646
647 /* Store head pages only*/
648 new_array = kvmalloc_array(nr_folios, sizeof(struct page *),
649 GFP_KERNEL);
650 if (!new_array)
651 return false;
652
653 new_array[0] = compound_head(page_array[0]);
654 /*
655 * The pages are bound to the folio, it doesn't
656 * actually unpin them but drops all but one reference,
657 * which is usually put down by io_buffer_unmap().
658 * Note, needs a better helper.
659 */
660 if (data->nr_pages_head > 1)
661 unpin_user_pages(&page_array[1], data->nr_pages_head - 1);
662
663 j = data->nr_pages_head;
664 nr_pages_left -= data->nr_pages_head;
665 for (i = 1; i < nr_folios; i++) {
666 unsigned int nr_unpin;
667
668 new_array[i] = page_array[j];
669 nr_unpin = min_t(unsigned int, nr_pages_left - 1,
670 data->nr_pages_mid - 1);
671 if (nr_unpin)
672 unpin_user_pages(&page_array[j+1], nr_unpin);
673 j += data->nr_pages_mid;
674 nr_pages_left -= data->nr_pages_mid;
675 }
676 kvfree(page_array);
677 *pages = new_array;
678 *nr_pages = nr_folios;
679 return true;
680 }
681
io_check_coalesce_buffer(struct page ** page_array,int nr_pages,struct io_imu_folio_data * data)682 bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
683 struct io_imu_folio_data *data)
684 {
685 struct folio *folio = page_folio(page_array[0]);
686 unsigned int count = 1, nr_folios = 1;
687 int i;
688
689 data->nr_pages_mid = folio_nr_pages(folio);
690 data->folio_shift = folio_shift(folio);
691
692 /*
693 * Check if pages are contiguous inside a folio, and all folios have
694 * the same page count except for the head and tail.
695 */
696 for (i = 1; i < nr_pages; i++) {
697 if (page_folio(page_array[i]) == folio &&
698 page_array[i] == page_array[i-1] + 1) {
699 count++;
700 continue;
701 }
702
703 if (nr_folios == 1) {
704 if (folio_page_idx(folio, page_array[i-1]) !=
705 data->nr_pages_mid - 1)
706 return false;
707
708 data->nr_pages_head = count;
709 } else if (count != data->nr_pages_mid) {
710 return false;
711 }
712
713 folio = page_folio(page_array[i]);
714 if (folio_size(folio) != (1UL << data->folio_shift) ||
715 folio_page_idx(folio, page_array[i]) != 0)
716 return false;
717
718 count = 1;
719 nr_folios++;
720 }
721 if (nr_folios == 1)
722 data->nr_pages_head = count;
723
724 data->nr_folios = nr_folios;
725 return true;
726 }
727
io_sqe_buffer_register(struct io_ring_ctx * ctx,struct iovec * iov,struct page ** last_hpage)728 static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
729 struct iovec *iov,
730 struct page **last_hpage)
731 {
732 struct io_mapped_ubuf *imu = NULL;
733 struct page **pages = NULL;
734 struct io_rsrc_node *node;
735 unsigned long off;
736 size_t size;
737 int ret, nr_pages, i;
738 struct io_imu_folio_data data;
739 bool coalesced = false;
740
741 if (!iov->iov_base)
742 return NULL;
743
744 node = io_rsrc_node_alloc(IORING_RSRC_BUFFER);
745 if (!node)
746 return ERR_PTR(-ENOMEM);
747 node->buf = NULL;
748
749 ret = -ENOMEM;
750 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
751 &nr_pages);
752 if (IS_ERR(pages)) {
753 ret = PTR_ERR(pages);
754 pages = NULL;
755 goto done;
756 }
757
758 /* If it's huge page(s), try to coalesce them into fewer bvec entries */
759 if (nr_pages > 1 && io_check_coalesce_buffer(pages, nr_pages, &data)) {
760 if (data.nr_pages_mid != 1)
761 coalesced = io_coalesce_buffer(&pages, &nr_pages, &data);
762 }
763
764 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
765 if (!imu)
766 goto done;
767
768 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
769 if (ret) {
770 unpin_user_pages(pages, nr_pages);
771 goto done;
772 }
773
774 size = iov->iov_len;
775 /* store original address for later verification */
776 imu->ubuf = (unsigned long) iov->iov_base;
777 imu->len = iov->iov_len;
778 imu->nr_bvecs = nr_pages;
779 imu->folio_shift = PAGE_SHIFT;
780 if (coalesced)
781 imu->folio_shift = data.folio_shift;
782 refcount_set(&imu->refs, 1);
783 off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1);
784 node->buf = imu;
785 ret = 0;
786
787 for (i = 0; i < nr_pages; i++) {
788 size_t vec_len;
789
790 vec_len = min_t(size_t, size, (1UL << imu->folio_shift) - off);
791 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
792 off = 0;
793 size -= vec_len;
794 }
795 done:
796 if (ret) {
797 kvfree(imu);
798 if (node)
799 io_put_rsrc_node(ctx, node);
800 node = ERR_PTR(ret);
801 }
802 kvfree(pages);
803 return node;
804 }
805
io_sqe_buffers_register(struct io_ring_ctx * ctx,void __user * arg,unsigned int nr_args,u64 __user * tags)806 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
807 unsigned int nr_args, u64 __user *tags)
808 {
809 struct page *last_hpage = NULL;
810 struct io_rsrc_data data;
811 struct iovec fast_iov, *iov = &fast_iov;
812 const struct iovec __user *uvec;
813 int i, ret;
814
815 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
816
817 if (ctx->buf_table.nr)
818 return -EBUSY;
819 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
820 return -EINVAL;
821 ret = io_rsrc_data_alloc(&data, nr_args);
822 if (ret)
823 return ret;
824
825 if (!arg)
826 memset(iov, 0, sizeof(*iov));
827
828 for (i = 0; i < nr_args; i++) {
829 struct io_rsrc_node *node;
830 u64 tag = 0;
831
832 if (arg) {
833 uvec = (struct iovec __user *) arg;
834 iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
835 if (IS_ERR(iov)) {
836 ret = PTR_ERR(iov);
837 break;
838 }
839 ret = io_buffer_validate(iov);
840 if (ret)
841 break;
842 if (ctx->compat)
843 arg += sizeof(struct compat_iovec);
844 else
845 arg += sizeof(struct iovec);
846 }
847
848 if (tags) {
849 if (copy_from_user(&tag, &tags[i], sizeof(tag))) {
850 ret = -EFAULT;
851 break;
852 }
853 }
854
855 node = io_sqe_buffer_register(ctx, iov, &last_hpage);
856 if (IS_ERR(node)) {
857 ret = PTR_ERR(node);
858 break;
859 }
860 if (tag) {
861 if (!node) {
862 ret = -EINVAL;
863 break;
864 }
865 node->tag = tag;
866 }
867 data.nodes[i] = node;
868 }
869
870 ctx->buf_table = data;
871 if (ret) {
872 io_clear_table_tags(&ctx->buf_table);
873 io_sqe_buffers_unregister(ctx);
874 }
875 return ret;
876 }
877
io_import_fixed(int ddir,struct iov_iter * iter,struct io_mapped_ubuf * imu,u64 buf_addr,size_t len)878 int io_import_fixed(int ddir, struct iov_iter *iter,
879 struct io_mapped_ubuf *imu,
880 u64 buf_addr, size_t len)
881 {
882 u64 buf_end;
883 size_t offset;
884
885 if (WARN_ON_ONCE(!imu))
886 return -EFAULT;
887 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
888 return -EFAULT;
889 /* not inside the mapped region */
890 if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
891 return -EFAULT;
892
893 /*
894 * Might not be a start of buffer, set size appropriately
895 * and advance us to the beginning.
896 */
897 offset = buf_addr - imu->ubuf;
898 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, len);
899
900 if (offset) {
901 /*
902 * Don't use iov_iter_advance() here, as it's really slow for
903 * using the latter parts of a big fixed buffer - it iterates
904 * over each segment manually. We can cheat a bit here, because
905 * we know that:
906 *
907 * 1) it's a BVEC iter, we set it up
908 * 2) all bvecs are the same in size, except potentially the
909 * first and last bvec
910 *
911 * So just find our index, and adjust the iterator afterwards.
912 * If the offset is within the first bvec (or the whole first
913 * bvec, just use iov_iter_advance(). This makes it easier
914 * since we can just skip the first segment, which may not
915 * be folio_size aligned.
916 */
917 const struct bio_vec *bvec = imu->bvec;
918
919 if (offset < bvec->bv_len) {
920 iter->iov_offset = offset;
921 } else {
922 unsigned long seg_skip;
923
924 /* skip first vec */
925 offset -= bvec->bv_len;
926 seg_skip = 1 + (offset >> imu->folio_shift);
927
928 iter->bvec += seg_skip;
929 iter->nr_segs -= seg_skip;
930 iter->iov_offset = offset & ((1UL << imu->folio_shift) - 1);
931 }
932 }
933
934 return 0;
935 }
936
937 /* Lock two rings at once. The rings must be different! */
lock_two_rings(struct io_ring_ctx * ctx1,struct io_ring_ctx * ctx2)938 static void lock_two_rings(struct io_ring_ctx *ctx1, struct io_ring_ctx *ctx2)
939 {
940 if (ctx1 > ctx2)
941 swap(ctx1, ctx2);
942 mutex_lock(&ctx1->uring_lock);
943 mutex_lock_nested(&ctx2->uring_lock, SINGLE_DEPTH_NESTING);
944 }
945
946 /* Both rings are locked by the caller. */
io_clone_buffers(struct io_ring_ctx * ctx,struct io_ring_ctx * src_ctx,struct io_uring_clone_buffers * arg)947 static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx,
948 struct io_uring_clone_buffers *arg)
949 {
950 struct io_rsrc_data data;
951 int i, ret, off, nr;
952 unsigned int nbufs;
953
954 lockdep_assert_held(&ctx->uring_lock);
955 lockdep_assert_held(&src_ctx->uring_lock);
956
957 /*
958 * Accounting state is shared between the two rings; that only works if
959 * both rings are accounted towards the same counters.
960 */
961 if (ctx->user != src_ctx->user || ctx->mm_account != src_ctx->mm_account)
962 return -EINVAL;
963
964 /* if offsets are given, must have nr specified too */
965 if (!arg->nr && (arg->dst_off || arg->src_off))
966 return -EINVAL;
967 /* not allowed unless REPLACE is set */
968 if (ctx->buf_table.nr && !(arg->flags & IORING_REGISTER_DST_REPLACE))
969 return -EBUSY;
970
971 nbufs = src_ctx->buf_table.nr;
972 if (!arg->nr)
973 arg->nr = nbufs;
974 else if (arg->nr > nbufs)
975 return -EINVAL;
976 else if (arg->nr > IORING_MAX_REG_BUFFERS)
977 return -EINVAL;
978 if (check_add_overflow(arg->nr, arg->dst_off, &nbufs))
979 return -EOVERFLOW;
980
981 ret = io_rsrc_data_alloc(&data, max(nbufs, ctx->buf_table.nr));
982 if (ret)
983 return ret;
984
985 /* Fill entries in data from dst that won't overlap with src */
986 for (i = 0; i < min(arg->dst_off, ctx->buf_table.nr); i++) {
987 struct io_rsrc_node *src_node = ctx->buf_table.nodes[i];
988
989 if (src_node) {
990 data.nodes[i] = src_node;
991 src_node->refs++;
992 }
993 }
994
995 ret = -ENXIO;
996 nbufs = src_ctx->buf_table.nr;
997 if (!nbufs)
998 goto out_free;
999 ret = -EINVAL;
1000 if (!arg->nr)
1001 arg->nr = nbufs;
1002 else if (arg->nr > nbufs)
1003 goto out_free;
1004 ret = -EOVERFLOW;
1005 if (check_add_overflow(arg->nr, arg->src_off, &off))
1006 goto out_free;
1007 if (off > nbufs)
1008 goto out_free;
1009
1010 off = arg->dst_off;
1011 i = arg->src_off;
1012 nr = arg->nr;
1013 while (nr--) {
1014 struct io_rsrc_node *dst_node, *src_node;
1015
1016 src_node = io_rsrc_node_lookup(&src_ctx->buf_table, i);
1017 if (!src_node) {
1018 dst_node = NULL;
1019 } else {
1020 dst_node = io_rsrc_node_alloc(IORING_RSRC_BUFFER);
1021 if (!dst_node) {
1022 ret = -ENOMEM;
1023 goto out_free;
1024 }
1025
1026 refcount_inc(&src_node->buf->refs);
1027 dst_node->buf = src_node->buf;
1028 }
1029 data.nodes[off++] = dst_node;
1030 i++;
1031 }
1032
1033 /*
1034 * If asked for replace, put the old table. data->nodes[] holds both
1035 * old and new nodes at this point.
1036 */
1037 if (arg->flags & IORING_REGISTER_DST_REPLACE)
1038 io_rsrc_data_free(ctx, &ctx->buf_table);
1039
1040 /*
1041 * ctx->buf_table must be empty now - either the contents are being
1042 * replaced and we just freed the table, or the contents are being
1043 * copied to a ring that does not have buffers yet (checked at function
1044 * entry).
1045 */
1046 WARN_ON_ONCE(ctx->buf_table.nr);
1047 ctx->buf_table = data;
1048 return 0;
1049
1050 out_free:
1051 io_rsrc_data_free(ctx, &data);
1052 return ret;
1053 }
1054
1055 /*
1056 * Copy the registered buffers from the source ring whose file descriptor
1057 * is given in the src_fd to the current ring. This is identical to registering
1058 * the buffers with ctx, except faster as mappings already exist.
1059 *
1060 * Since the memory is already accounted once, don't account it again.
1061 */
io_register_clone_buffers(struct io_ring_ctx * ctx,void __user * arg)1062 int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
1063 {
1064 struct io_uring_clone_buffers buf;
1065 struct io_ring_ctx *src_ctx;
1066 bool registered_src;
1067 struct file *file;
1068 int ret;
1069
1070 if (copy_from_user(&buf, arg, sizeof(buf)))
1071 return -EFAULT;
1072 if (buf.flags & ~(IORING_REGISTER_SRC_REGISTERED|IORING_REGISTER_DST_REPLACE))
1073 return -EINVAL;
1074 if (!(buf.flags & IORING_REGISTER_DST_REPLACE) && ctx->buf_table.nr)
1075 return -EBUSY;
1076 if (memchr_inv(buf.pad, 0, sizeof(buf.pad)))
1077 return -EINVAL;
1078
1079 registered_src = (buf.flags & IORING_REGISTER_SRC_REGISTERED) != 0;
1080 file = io_uring_register_get_file(buf.src_fd, registered_src);
1081 if (IS_ERR(file))
1082 return PTR_ERR(file);
1083
1084 src_ctx = file->private_data;
1085 if (src_ctx != ctx) {
1086 mutex_unlock(&ctx->uring_lock);
1087 lock_two_rings(ctx, src_ctx);
1088 }
1089
1090 ret = io_clone_buffers(ctx, src_ctx, &buf);
1091
1092 if (src_ctx != ctx)
1093 mutex_unlock(&src_ctx->uring_lock);
1094
1095 fput(file);
1096 return ret;
1097 }
1098