Lines Matching full:req

37 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask)  in io_file_supports_nowait()  argument
40 if (req->flags & REQ_F_SUPPORT_NOWAIT) in io_file_supports_nowait()
43 if (io_file_can_poll(req)) { in io_file_supports_nowait()
46 return vfs_poll(req->file, &pt) & mask; in io_file_supports_nowait()
71 static int io_iov_buffer_select_prep(struct io_kiocb *req) in io_iov_buffer_select_prep() argument
75 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_iov_buffer_select_prep()
81 if (req->ctx->compat) in io_iov_buffer_select_prep()
92 static int __io_import_iovec(int ddir, struct io_kiocb *req, in __io_import_iovec() argument
96 const struct io_issue_def *def = &io_issue_defs[req->opcode]; in __io_import_iovec()
97 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_import_iovec()
106 if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) { in __io_import_iovec()
107 if (io_do_buffer_select(req)) { in __io_import_iovec()
108 buf = io_buffer_select(req, &sqe_len, issue_flags); in __io_import_iovec()
126 req->ctx->compat); in __io_import_iovec()
130 req->flags |= REQ_F_NEED_CLEANUP; in __io_import_iovec()
138 static inline int io_import_iovec(int rw, struct io_kiocb *req, in io_import_iovec() argument
144 ret = __io_import_iovec(rw, req, io, issue_flags); in io_import_iovec()
152 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_rw_recycle() argument
154 struct io_async_rw *rw = req->async_data; in io_rw_recycle()
160 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { in io_rw_recycle()
161 req->async_data = NULL; in io_rw_recycle()
162 req->flags &= ~REQ_F_ASYNC_DATA; in io_rw_recycle()
166 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) in io_req_rw_cleanup() argument
195 if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) { in io_req_rw_cleanup()
196 req->flags &= ~REQ_F_NEED_CLEANUP; in io_req_rw_cleanup()
197 io_rw_recycle(req, issue_flags); in io_req_rw_cleanup()
201 static int io_rw_alloc_async(struct io_kiocb *req) in io_rw_alloc_async() argument
203 struct io_ring_ctx *ctx = req->ctx; in io_rw_alloc_async()
206 rw = io_uring_alloc_async_data(&ctx->rw_cache, req); in io_rw_alloc_async()
210 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_alloc_async()
215 static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import) in io_prep_rw_setup() argument
219 if (io_rw_alloc_async(req)) in io_prep_rw_setup()
222 if (!do_import || io_do_buffer_select(req)) in io_prep_rw_setup()
225 rw = req->async_data; in io_prep_rw_setup()
226 return io_import_iovec(ddir, req, rw, 0); in io_prep_rw_setup()
243 static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir, in io_prep_rw_pi() argument
257 io = req->async_data; in io_prep_rw_pi()
265 req->flags |= REQ_F_HAS_METADATA; in io_prep_rw_pi()
270 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw() argument
273 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_prep_rw()
280 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
295 if (req->ctx->flags & IORING_SETUP_IOPOLL) in io_prep_rw()
303 ret = io_prep_rw_setup(req, ddir, do_import); in io_prep_rw()
317 ret = io_prep_rw_pi(req, rw, ddir, attr_ptr, attr_type_mask); in io_prep_rw()
322 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_read() argument
324 return io_prep_rw(req, sqe, ITER_DEST, true); in io_prep_read()
327 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_write() argument
329 return io_prep_rw(req, sqe, ITER_SOURCE, true); in io_prep_write()
332 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rwv() argument
335 const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT); in io_prep_rwv()
338 ret = io_prep_rw(req, sqe, ddir, do_import); in io_prep_rwv()
348 return io_iov_buffer_select_prep(req); in io_prep_rwv()
351 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_readv() argument
353 return io_prep_rwv(req, sqe, ITER_DEST); in io_prep_readv()
356 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_writev() argument
358 return io_prep_rwv(req, sqe, ITER_SOURCE); in io_prep_writev()
361 static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw_fixed() argument
364 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_prep_rw_fixed()
365 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw_fixed()
370 ret = io_prep_rw(req, sqe, ddir, false); in io_prep_rw_fixed()
374 node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index); in io_prep_rw_fixed()
377 io_req_assign_buf_node(req, node); in io_prep_rw_fixed()
379 io = req->async_data; in io_prep_rw_fixed()
385 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_read_fixed() argument
387 return io_prep_rw_fixed(req, sqe, ITER_DEST); in io_prep_read_fixed()
390 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_write_fixed() argument
392 return io_prep_rw_fixed(req, sqe, ITER_SOURCE); in io_prep_write_fixed()
399 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_mshot_prep() argument
401 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_read_mshot_prep()
405 if (!(req->flags & REQ_F_BUFFER_SELECT)) in io_read_mshot_prep()
408 ret = io_prep_rw(req, sqe, ITER_DEST, false); in io_read_mshot_prep()
415 req->flags |= REQ_F_APOLL_MULTISHOT; in io_read_mshot_prep()
419 void io_readv_writev_cleanup(struct io_kiocb *req) in io_readv_writev_cleanup() argument
421 lockdep_assert_held(&req->ctx->uring_lock); in io_readv_writev_cleanup()
422 io_rw_recycle(req, 0); in io_readv_writev_cleanup()
425 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) in io_kiocb_update_pos() argument
427 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_kiocb_update_pos()
432 if (!(req->file->f_mode & FMODE_STREAM)) { in io_kiocb_update_pos()
433 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
434 rw->kiocb.ki_pos = req->file->f_pos; in io_kiocb_update_pos()
442 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
445 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_should_reissue()
446 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
447 struct io_async_rw *io = req->async_data; in io_rw_should_reissue()
448 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
452 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
471 static void io_req_end_write(struct io_kiocb *req) in io_req_end_write() argument
473 if (req->flags & REQ_F_ISREG) { in io_req_end_write()
474 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_end_write()
484 static void io_req_io_end(struct io_kiocb *req) in io_req_io_end() argument
486 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_io_end()
489 io_req_end_write(req); in io_req_io_end()
490 fsnotify_modify(req->file); in io_req_io_end()
492 fsnotify_access(req->file); in io_req_io_end()
496 static void __io_complete_rw_common(struct io_kiocb *req, long res) in __io_complete_rw_common() argument
498 if (res == req->cqe.res) in __io_complete_rw_common()
500 if (res == -EAGAIN && io_rw_should_reissue(req)) { in __io_complete_rw_common()
501 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; in __io_complete_rw_common()
503 req_set_fail(req); in __io_complete_rw_common()
504 req->cqe.res = res; in __io_complete_rw_common()
508 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) in io_fixup_rw_res() argument
510 struct io_async_rw *io = req->async_data; in io_fixup_rw_res()
513 if (req_has_async_data(req) && io->bytes_done > 0) { in io_fixup_rw_res()
522 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) in io_req_rw_complete() argument
524 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_rw_complete()
530 io_req_set_res(req, io_fixup_rw_res(req, res), 0); in io_req_rw_complete()
533 io_req_io_end(req); in io_req_rw_complete()
535 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) in io_req_rw_complete()
536 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0); in io_req_rw_complete()
538 io_req_rw_cleanup(req, 0); in io_req_rw_complete()
539 io_req_task_complete(req, ts); in io_req_rw_complete()
545 struct io_kiocb *req = cmd_to_io_kiocb(rw); in io_complete_rw() local
548 __io_complete_rw_common(req, res); in io_complete_rw()
549 io_req_set_res(req, io_fixup_rw_res(req, res), 0); in io_complete_rw()
551 req->io_task_work.func = io_req_rw_complete; in io_complete_rw()
552 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); in io_complete_rw()
558 struct io_kiocb *req = cmd_to_io_kiocb(rw); in io_complete_rw_iopoll() local
561 io_req_end_write(req); in io_complete_rw_iopoll()
562 if (unlikely(res != req->cqe.res)) { in io_complete_rw_iopoll()
563 if (res == -EAGAIN && io_rw_should_reissue(req)) in io_complete_rw_iopoll()
564 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; in io_complete_rw_iopoll()
566 req->cqe.res = res; in io_complete_rw_iopoll()
570 smp_store_release(&req->iopoll_completed, 1); in io_complete_rw_iopoll()
573 static inline void io_rw_done(struct io_kiocb *req, ssize_t ret) in io_rw_done() argument
575 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_done()
598 if (req->ctx->flags & IORING_SETUP_IOPOLL) in io_rw_done()
604 static int kiocb_done(struct io_kiocb *req, ssize_t ret, in kiocb_done() argument
607 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in kiocb_done()
608 unsigned final_ret = io_fixup_rw_res(req, ret); in kiocb_done()
610 if (ret >= 0 && req->flags & REQ_F_CUR_POS) in kiocb_done()
611 req->file->f_pos = rw->kiocb.ki_pos; in kiocb_done()
612 if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) { in kiocb_done()
613 __io_complete_rw_common(req, ret); in kiocb_done()
618 io_req_io_end(req); in kiocb_done()
619 io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags)); in kiocb_done()
620 io_req_rw_cleanup(req, issue_flags); in kiocb_done()
623 io_rw_done(req, ret); in kiocb_done()
714 struct io_kiocb *req = wait->private; in io_async_buf_func() local
715 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_async_buf_func()
725 io_req_task_queue(req); in io_async_buf_func()
741 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
743 struct io_async_rw *io = req->async_data; in io_rw_should_retry()
745 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_should_retry()
752 if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA)) in io_rw_should_retry()
763 if (io_file_can_poll(req) || in io_rw_should_retry()
764 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) in io_rw_should_retry()
768 wait->wait.private = req; in io_rw_should_retry()
789 static bool need_complete_io(struct io_kiocb *req) in need_complete_io() argument
791 return req->flags & REQ_F_ISREG || in need_complete_io()
792 S_ISBLK(file_inode(req->file)->i_mode); in need_complete_io()
795 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) in io_rw_init_file() argument
797 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_init_file()
799 struct io_ring_ctx *ctx = req->ctx; in io_rw_init_file()
800 struct file *file = req->file; in io_rw_init_file()
806 if (!(req->flags & REQ_F_FIXED_FILE)) in io_rw_init_file()
807 req->flags |= io_file_get_flags(file); in io_rw_init_file()
821 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) in io_rw_init_file()
822 req->flags |= REQ_F_NOWAIT; in io_rw_init_file()
829 req->iopoll_completed = 0; in io_rw_init_file()
831 /* make sure every req only blocks once*/ in io_rw_init_file()
832 req->flags &= ~REQ_F_IOPOLL_STATE; in io_rw_init_file()
833 req->iopoll_start = ktime_get_ns(); in io_rw_init_file()
840 if (req->flags & REQ_F_HAS_METADATA) { in io_rw_init_file()
841 struct io_async_rw *io = req->async_data; in io_rw_init_file()
847 if (!(req->file->f_flags & O_DIRECT)) in io_rw_init_file()
856 static int __io_read(struct io_kiocb *req, unsigned int issue_flags) in __io_read() argument
859 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_read()
860 struct io_async_rw *io = req->async_data; in __io_read()
865 if (io_do_buffer_select(req)) { in __io_read()
866 ret = io_import_iovec(ITER_DEST, req, io, issue_flags); in __io_read()
870 ret = io_rw_init_file(req, FMODE_READ, READ); in __io_read()
873 req->cqe.res = iov_iter_count(&io->iter); in __io_read()
877 if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) in __io_read()
885 ppos = io_kiocb_update_pos(req); in __io_read()
887 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); in __io_read()
903 if (io_file_can_poll(req)) in __io_read()
906 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_read()
909 if (req->flags & REQ_F_NOWAIT) in __io_read()
914 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || in __io_read()
915 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) || in __io_read()
942 if (!io_rw_should_retry(req)) { in __io_read()
947 req->cqe.res = iov_iter_count(&io->iter); in __io_read()
966 int io_read(struct io_kiocb *req, unsigned int issue_flags) in io_read() argument
970 ret = __io_read(req, issue_flags); in io_read()
972 return kiocb_done(req, ret, issue_flags); in io_read()
977 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) in io_read_mshot() argument
979 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_read_mshot()
986 if (!io_file_can_poll(req)) in io_read_mshot()
991 ret = __io_read(req, issue_flags); in io_read_mshot()
1002 if (io_kbuf_recycle(req, issue_flags)) in io_read_mshot()
1008 io_kbuf_recycle(req, issue_flags); in io_read_mshot()
1010 req_set_fail(req); in io_read_mshot()
1011 } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { in io_read_mshot()
1012 cflags = io_put_kbuf(req, ret, issue_flags); in io_read_mshot()
1020 cflags = io_put_kbuf(req, ret, issue_flags); in io_read_mshot()
1023 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { in io_read_mshot()
1030 io_poll_multishot_retry(req); in io_read_mshot()
1041 io_req_set_res(req, ret, cflags); in io_read_mshot()
1042 io_req_rw_cleanup(req, issue_flags); in io_read_mshot()
1048 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) in io_kiocb_start_write() argument
1053 if (!(req->flags & REQ_F_ISREG)) in io_kiocb_start_write()
1067 int io_write(struct io_kiocb *req, unsigned int issue_flags) in io_write() argument
1070 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_write()
1071 struct io_async_rw *io = req->async_data; in io_write()
1076 ret = io_rw_init_file(req, FMODE_WRITE, WRITE); in io_write()
1079 req->cqe.res = iov_iter_count(&io->iter); in io_write()
1083 if (unlikely(!io_file_supports_nowait(req, EPOLLOUT))) in io_write()
1088 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && in io_write()
1089 (req->flags & REQ_F_ISREG)) in io_write()
1098 ppos = io_kiocb_update_pos(req); in io_write()
1100 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); in io_write()
1104 if (unlikely(!io_kiocb_start_write(req, kiocb))) in io_write()
1108 if (likely(req->file->f_op->write_iter)) in io_write()
1109 ret2 = req->file->f_op->write_iter(kiocb, &io->iter); in io_write()
1110 else if (req->file->f_op->write) in io_write()
1122 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
1126 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) in io_write()
1129 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { in io_write()
1130 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, in io_write()
1131 req->cqe.res, ret2); in io_write()
1142 io_req_end_write(req); in io_write()
1146 return kiocb_done(req, ret2, issue_flags); in io_write()
1152 io_req_end_write(req); in io_write()
1157 void io_rw_fail(struct io_kiocb *req) in io_rw_fail() argument
1161 res = io_fixup_rw_res(req, req->cqe.res); in io_rw_fail()
1162 io_req_set_res(req, res, req->cqe.flags); in io_rw_fail()
1165 static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob, in io_uring_classic_poll() argument
1168 struct file *file = req->file; in io_uring_classic_poll()
1170 if (req->opcode == IORING_OP_URING_CMD) { in io_uring_classic_poll()
1173 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); in io_uring_classic_poll()
1176 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_uring_classic_poll()
1182 static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req) in io_hybrid_iopoll_delay() argument
1189 if (req->flags & REQ_F_IOPOLL_STATE) in io_hybrid_iopoll_delay()
1199 req->flags |= REQ_F_IOPOLL_STATE; in io_hybrid_iopoll_delay()
1216 static int io_uring_hybrid_poll(struct io_kiocb *req, in io_uring_hybrid_poll() argument
1219 struct io_ring_ctx *ctx = req->ctx; in io_uring_hybrid_poll()
1223 sleep_time = io_hybrid_iopoll_delay(ctx, req); in io_uring_hybrid_poll()
1224 ret = io_uring_classic_poll(req, iob, poll_flags); in io_uring_hybrid_poll()
1225 runtime = ktime_get_ns() - req->iopoll_start - sleep_time; in io_uring_hybrid_poll()
1252 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); in io_do_iopoll() local
1260 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1264 ret = io_uring_hybrid_poll(req, &iob, poll_flags); in io_do_iopoll()
1266 ret = io_uring_classic_poll(req, &iob, poll_flags); in io_do_iopoll()
1273 /* iopoll may have completed current req */ in io_do_iopoll()
1275 READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1286 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); in io_do_iopoll() local
1289 if (!smp_load_acquire(&req->iopoll_completed)) in io_do_iopoll()
1292 req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0); in io_do_iopoll()
1293 if (req->opcode != IORING_OP_URING_CMD) in io_do_iopoll()
1294 io_req_rw_cleanup(req, 0); in io_do_iopoll()