Lines Matching full:req

60 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
62 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
64 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg);
67 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
68 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
70 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
71 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
77 void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
79 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
84 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) in io_kbuf_recycle_ring() argument
93 if (req->buf_list) { in io_kbuf_recycle_ring()
94 req->buf_index = req->buf_list->bgid; in io_kbuf_recycle_ring()
95 req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT); in io_kbuf_recycle_ring()
101 static inline bool io_do_buffer_select(struct io_kiocb *req) in io_do_buffer_select() argument
103 if (!(req->flags & REQ_F_BUFFER_SELECT)) in io_do_buffer_select()
105 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); in io_do_buffer_select()
108 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) in io_kbuf_recycle() argument
110 if (req->flags & REQ_F_BL_NO_RECYCLE) in io_kbuf_recycle()
112 if (req->flags & REQ_F_BUFFER_SELECTED) in io_kbuf_recycle()
113 return io_kbuf_recycle_legacy(req, issue_flags); in io_kbuf_recycle()
114 if (req->flags & REQ_F_BUFFER_RING) in io_kbuf_recycle()
115 return io_kbuf_recycle_ring(req); in io_kbuf_recycle()
122 static inline bool io_kbuf_commit(struct io_kiocb *req, in io_kbuf_commit() argument
125 if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT))) in io_kbuf_commit()
128 req->flags &= ~REQ_F_BUFFERS_COMMIT; in io_kbuf_commit()
150 static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr) in __io_put_kbuf_ring() argument
152 struct io_buffer_list *bl = req->buf_list; in __io_put_kbuf_ring()
156 ret = io_kbuf_commit(req, bl, len, nr); in __io_put_kbuf_ring()
157 req->buf_index = bl->bgid; in __io_put_kbuf_ring()
159 req->flags &= ~REQ_F_BUFFER_RING; in __io_put_kbuf_ring()
163 static inline void __io_put_kbuf_list(struct io_kiocb *req, int len, in __io_put_kbuf_list() argument
166 if (req->flags & REQ_F_BUFFER_RING) { in __io_put_kbuf_list()
167 __io_put_kbuf_ring(req, len, 1); in __io_put_kbuf_list()
169 req->buf_index = req->kbuf->bgid; in __io_put_kbuf_list()
170 list_add(&req->kbuf->list, list); in __io_put_kbuf_list()
171 req->flags &= ~REQ_F_BUFFER_SELECTED; in __io_put_kbuf_list()
175 static inline void io_kbuf_drop(struct io_kiocb *req) in io_kbuf_drop() argument
177 lockdep_assert_held(&req->ctx->completion_lock); in io_kbuf_drop()
179 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) in io_kbuf_drop()
183 __io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp); in io_kbuf_drop()
186 static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len, in __io_put_kbufs() argument
191 if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) in __io_put_kbufs()
194 ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); in __io_put_kbufs()
195 if (req->flags & REQ_F_BUFFER_RING) { in __io_put_kbufs()
196 if (!__io_put_kbuf_ring(req, len, nbufs)) in __io_put_kbufs()
199 __io_put_kbuf(req, len, issue_flags); in __io_put_kbufs()
204 static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len, in io_put_kbuf() argument
207 return __io_put_kbufs(req, len, 1, issue_flags); in io_put_kbuf()
210 static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len, in io_put_kbufs() argument
213 return __io_put_kbufs(req, len, nbufs, issue_flags); in io_put_kbufs()