Lines Matching full:req
36 struct io_kiocb *req; member
73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath() argument
82 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_get_ownership_slowpath()
85 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership_slowpath()
94 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership() argument
96 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) in io_poll_get_ownership()
97 return io_poll_get_ownership_slowpath(req); in io_poll_get_ownership()
98 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership()
101 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled() argument
103 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); in io_poll_mark_cancelled()
106 static struct io_poll *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double() argument
109 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
110 return req->async_data; in io_poll_get_double()
111 return req->apoll->double_poll; in io_poll_get_double()
114 static struct io_poll *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single() argument
116 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
117 return io_kiocb_to_cmd(req, struct io_poll); in io_poll_get_single()
118 return &req->apoll->poll; in io_poll_get_single()
121 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert() argument
123 struct io_hash_table *table = &req->ctx->cancel_table; in io_poll_req_insert()
124 u32 index = hash_long(req->cqe.user_data, table->hash_bits); in io_poll_req_insert()
126 lockdep_assert_held(&req->ctx->uring_lock); in io_poll_req_insert()
128 hlist_add_head(&req->hash_node, &table->hbs[index].list); in io_poll_req_insert()
153 static void io_poll_remove_entries(struct io_kiocb *req) in io_poll_remove_entries() argument
159 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) in io_poll_remove_entries()
178 if (req->flags & REQ_F_SINGLE_POLL) in io_poll_remove_entries()
179 io_poll_remove_entry(io_poll_get_single(req)); in io_poll_remove_entries()
180 if (req->flags & REQ_F_DOUBLE_POLL) in io_poll_remove_entries()
181 io_poll_remove_entry(io_poll_get_double(req)); in io_poll_remove_entries()
193 static void __io_poll_execute(struct io_kiocb *req, int mask) in __io_poll_execute() argument
197 io_req_set_res(req, mask, 0); in __io_poll_execute()
198 req->io_task_work.func = io_poll_task_func; in __io_poll_execute()
200 trace_io_uring_task_add(req, mask); in __io_poll_execute()
202 if (!(req->flags & REQ_F_POLL_NO_LAZY)) in __io_poll_execute()
204 __io_req_task_work_add(req, flags); in __io_poll_execute()
207 static inline void io_poll_execute(struct io_kiocb *req, int res) in io_poll_execute() argument
209 if (io_poll_get_ownership(req)) in io_poll_execute()
210 __io_poll_execute(req, res); in io_poll_execute()
220 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
221 * poll and that the result is stored in req->cqe.
223 static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) in io_poll_check_events() argument
231 v = atomic_read(&req->poll_refs); in io_poll_check_events()
245 req->cqe.res = 0; in io_poll_check_events()
248 req->cqe.res = 0; in io_poll_check_events()
254 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_check_events()
260 if (!req->cqe.res) { in io_poll_check_events()
261 struct poll_table_struct pt = { ._key = req->apoll_events }; in io_poll_check_events()
262 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; in io_poll_check_events()
269 if (unlikely(!req->cqe.res)) { in io_poll_check_events()
271 if (!(req->apoll_events & EPOLLONESHOT)) in io_poll_check_events()
276 if (unlikely(req->cqe.res & EPOLLERR)) in io_poll_check_events()
277 req_set_fail(req); in io_poll_check_events()
278 if (req->apoll_events & EPOLLONESHOT) in io_poll_check_events()
282 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { in io_poll_check_events()
283 __poll_t mask = mangle_poll(req->cqe.res & in io_poll_check_events()
284 req->apoll_events); in io_poll_check_events()
286 if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) { in io_poll_check_events()
287 io_req_set_res(req, mask, 0); in io_poll_check_events()
291 int ret = io_poll_issue(req, ts); in io_poll_check_events()
301 req->cqe.res = 0; in io_poll_check_events()
308 } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK); in io_poll_check_events()
310 io_napi_add(req); in io_poll_check_events()
314 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts) in io_poll_task_func() argument
318 ret = io_poll_check_events(req, ts); in io_poll_task_func()
320 io_kbuf_recycle(req, 0); in io_poll_task_func()
323 io_kbuf_recycle(req, 0); in io_poll_task_func()
324 __io_poll_execute(req, 0); in io_poll_task_func()
327 io_poll_remove_entries(req); in io_poll_task_func()
329 hash_del(&req->hash_node); in io_poll_task_func()
331 if (req->opcode == IORING_OP_POLL_ADD) { in io_poll_task_func()
335 poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_task_func()
336 req->cqe.res = mangle_poll(req->cqe.res & poll->events); in io_poll_task_func()
338 io_req_task_submit(req, ts); in io_poll_task_func()
341 req->cqe.res = ret; in io_poll_task_func()
342 req_set_fail(req); in io_poll_task_func()
345 io_req_set_res(req, req->cqe.res, 0); in io_poll_task_func()
346 io_req_task_complete(req, ts); in io_poll_task_func()
348 io_tw_lock(req->ctx, ts); in io_poll_task_func()
351 io_req_task_complete(req, ts); in io_poll_task_func()
353 io_req_task_submit(req, ts); in io_poll_task_func()
355 io_req_defer_failed(req, ret); in io_poll_task_func()
359 static void io_poll_cancel_req(struct io_kiocb *req) in io_poll_cancel_req() argument
361 io_poll_mark_cancelled(req); in io_poll_cancel_req()
363 io_poll_execute(req, 0); in io_poll_cancel_req()
368 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) in io_pollfree_wake() argument
370 io_poll_mark_cancelled(req); in io_pollfree_wake()
372 io_poll_execute(req, 0); in io_pollfree_wake()
385 * as req->head is NULL'ed out, the request can be in io_pollfree_wake()
396 struct io_kiocb *req = wqe_to_req(wait); in io_poll_wake() local
401 return io_pollfree_wake(req, poll); in io_poll_wake()
407 if (io_poll_get_ownership(req)) { in io_poll_wake()
421 req->flags &= ~REQ_F_DOUBLE_POLL; in io_poll_wake()
423 req->flags &= ~REQ_F_SINGLE_POLL; in io_poll_wake()
425 __io_poll_execute(req, mask); in io_poll_wake()
431 static bool io_poll_double_prepare(struct io_kiocb *req) in io_poll_double_prepare() argument
434 struct io_poll *poll = io_poll_get_single(req); in io_poll_double_prepare()
440 * poll arm might not hold ownership and so race for req->flags with in io_poll_double_prepare()
447 req->flags |= REQ_F_DOUBLE_POLL; in io_poll_double_prepare()
448 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_double_prepare()
449 req->flags |= REQ_F_ASYNC_DATA; in io_poll_double_prepare()
460 struct io_kiocb *req = pt->req; in __io_queue_proc() local
461 unsigned long wqe_private = (unsigned long) req; in __io_queue_proc()
491 if (!io_poll_double_prepare(req)) { in __io_queue_proc()
499 req->flags |= REQ_F_SINGLE_POLL; in __io_queue_proc()
517 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); in io_poll_queue_proc()
520 (struct io_poll **) &pt->req->async_data); in io_poll_queue_proc()
523 static bool io_poll_can_finish_inline(struct io_kiocb *req, in io_poll_can_finish_inline() argument
526 return pt->owning || io_poll_get_ownership(req); in io_poll_can_finish_inline()
529 static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags) in io_poll_add_hash() argument
531 struct io_ring_ctx *ctx = req->ctx; in io_poll_add_hash()
534 io_poll_req_insert(req); in io_poll_add_hash()
544 static int __io_arm_poll_handler(struct io_kiocb *req, in __io_arm_poll_handler() argument
549 INIT_HLIST_NODE(&req->hash_node); in __io_arm_poll_handler()
551 poll->file = req->file; in __io_arm_poll_handler()
552 req->apoll_events = poll->events; in __io_arm_poll_handler()
555 ipt->req = req; in __io_arm_poll_handler()
570 atomic_set(&req->poll_refs, (int)ipt->owning); in __io_arm_poll_handler()
580 req->flags |= REQ_F_POLL_NO_LAZY; in __io_arm_poll_handler()
582 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
585 io_poll_remove_entries(req); in __io_arm_poll_handler()
587 if (!io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
588 io_poll_mark_cancelled(req); in __io_arm_poll_handler()
599 if (!io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
600 io_poll_add_hash(req, issue_flags); in __io_arm_poll_handler()
603 io_poll_remove_entries(req); in __io_arm_poll_handler()
605 /* no one else has access to the req, forget about the ref */ in __io_arm_poll_handler()
609 io_poll_add_hash(req, issue_flags); in __io_arm_poll_handler()
612 io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
613 __io_poll_execute(req, mask); in __io_arm_poll_handler()
616 io_napi_add(req); in __io_arm_poll_handler()
623 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) in __io_arm_poll_handler()
624 __io_poll_execute(req, 0); in __io_arm_poll_handler()
633 struct async_poll *apoll = pt->req->apoll; in io_async_queue_proc()
646 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, in io_req_alloc_apoll() argument
649 struct io_ring_ctx *ctx = req->ctx; in io_req_alloc_apoll()
652 if (req->flags & REQ_F_POLLED) { in io_req_alloc_apoll()
653 apoll = req->apoll; in io_req_alloc_apoll()
665 req->apoll = apoll; in io_req_alloc_apoll()
671 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) in io_arm_poll_handler() argument
673 const struct io_issue_def *def = &io_issue_defs[req->opcode]; in io_arm_poll_handler()
681 if (!io_file_can_poll(req)) in io_arm_poll_handler()
683 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) in io_arm_poll_handler()
690 if (req->flags & REQ_F_CLEAR_POLLIN) in io_arm_poll_handler()
698 apoll = io_req_alloc_apoll(req, issue_flags); in io_arm_poll_handler()
701 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL); in io_arm_poll_handler()
702 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
705 io_kbuf_recycle(req, issue_flags); in io_arm_poll_handler()
707 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); in io_arm_poll_handler()
710 trace_io_uring_poll_arm(req, mask, apoll->poll.events); in io_arm_poll_handler()
722 struct io_kiocb *req; in io_poll_remove_all() local
731 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { in io_poll_remove_all()
732 if (io_match_task_safe(req, tctx, cancel_all)) { in io_poll_remove_all()
733 hlist_del_init(&req->hash_node); in io_poll_remove_all()
734 io_poll_cancel_req(req); in io_poll_remove_all()
745 struct io_kiocb *req; in io_poll_find() local
749 hlist_for_each_entry(req, &hb->list, hash_node) { in io_poll_find()
750 if (cd->data != req->cqe.user_data) in io_poll_find()
752 if (poll_only && req->opcode != IORING_OP_POLL_ADD) in io_poll_find()
755 if (io_cancel_match_sequence(req, cd->seq)) in io_poll_find()
758 return req; in io_poll_find()
767 struct io_kiocb *req; in io_poll_file_find() local
773 hlist_for_each_entry(req, &hb->list, hash_node) { in io_poll_file_find()
774 if (io_cancel_req_match(req, cd)) in io_poll_file_find()
775 return req; in io_poll_file_find()
781 static int io_poll_disarm(struct io_kiocb *req) in io_poll_disarm() argument
783 if (!req) in io_poll_disarm()
785 if (!io_poll_get_ownership(req)) in io_poll_disarm()
787 io_poll_remove_entries(req); in io_poll_disarm()
788 hash_del(&req->hash_node); in io_poll_disarm()
794 struct io_kiocb *req; in __io_poll_cancel() local
798 req = io_poll_file_find(ctx, cd); in __io_poll_cancel()
800 req = io_poll_find(ctx, false, cd); in __io_poll_cancel()
802 if (req) { in __io_poll_cancel()
803 io_poll_cancel_req(req); in __io_poll_cancel()
837 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_remove_prep() argument
839 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update); in io_poll_remove_prep()
867 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
869 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_add_prep()
877 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) in io_poll_add_prep()
884 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) in io_poll_add() argument
886 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_add()
892 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); in io_poll_add()
894 io_req_set_res(req, ipt.result_mask, 0); in io_poll_add()
900 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) in io_poll_remove() argument
902 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); in io_poll_remove()
903 struct io_ring_ctx *ctx = req->ctx; in io_poll_remove()
945 req_set_fail(req); in io_poll_remove()
949 io_req_set_res(req, ret, 0); in io_poll_remove()