Lines Matching +full:reg +full:- +full:offset

1 // SPDX-License-Identifier: GPL-2.0
22 mutex_lock(&ctx->uring_lock); in io_init_wq_offload()
23 hash = ctx->hash_map; in io_init_wq_offload()
27 mutex_unlock(&ctx->uring_lock); in io_init_wq_offload()
28 return ERR_PTR(-ENOMEM); in io_init_wq_offload()
30 refcount_set(&hash->refs, 1); in io_init_wq_offload()
31 init_waitqueue_head(&hash->wait); in io_init_wq_offload()
32 ctx->hash_map = hash; in io_init_wq_offload()
34 mutex_unlock(&ctx->uring_lock); in io_init_wq_offload()
42 concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); in io_init_wq_offload()
49 struct io_uring_task *tctx = tsk->io_uring; in __io_uring_free()
59 xa_for_each(&tctx->xa, index, node) { in __io_uring_free()
63 WARN_ON_ONCE(tctx->io_wq); in __io_uring_free()
64 WARN_ON_ONCE(tctx->cached_refs); in __io_uring_free()
66 percpu_counter_destroy(&tctx->inflight); in __io_uring_free()
68 tsk->io_uring = NULL; in __io_uring_free()
79 return -ENOMEM; in io_uring_alloc_task_context()
81 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); in io_uring_alloc_task_context()
87 tctx->io_wq = io_init_wq_offload(ctx, task); in io_uring_alloc_task_context()
88 if (IS_ERR(tctx->io_wq)) { in io_uring_alloc_task_context()
89 ret = PTR_ERR(tctx->io_wq); in io_uring_alloc_task_context()
90 percpu_counter_destroy(&tctx->inflight); in io_uring_alloc_task_context()
95 tctx->task = task; in io_uring_alloc_task_context()
96 xa_init(&tctx->xa); in io_uring_alloc_task_context()
97 init_waitqueue_head(&tctx->wait); in io_uring_alloc_task_context()
98 atomic_set(&tctx->in_cancel, 0); in io_uring_alloc_task_context()
99 atomic_set(&tctx->inflight_tracked, 0); in io_uring_alloc_task_context()
100 task->io_uring = tctx; in io_uring_alloc_task_context()
101 init_llist_head(&tctx->task_list); in io_uring_alloc_task_context()
102 init_task_work(&tctx->task_work, tctx_task_work); in io_uring_alloc_task_context()
108 struct io_uring_task *tctx = current->io_uring; in __io_uring_add_tctx_node()
117 tctx = current->io_uring; in __io_uring_add_tctx_node()
118 if (ctx->iowq_limits_set) { in __io_uring_add_tctx_node()
119 unsigned int limits[2] = { ctx->iowq_limits[0], in __io_uring_add_tctx_node()
120 ctx->iowq_limits[1], }; in __io_uring_add_tctx_node()
122 ret = io_wq_max_workers(tctx->io_wq, limits); in __io_uring_add_tctx_node()
127 if (!xa_load(&tctx->xa, (unsigned long)ctx)) { in __io_uring_add_tctx_node()
130 return -ENOMEM; in __io_uring_add_tctx_node()
131 node->ctx = ctx; in __io_uring_add_tctx_node()
132 node->task = current; in __io_uring_add_tctx_node()
134 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, in __io_uring_add_tctx_node()
141 mutex_lock(&ctx->uring_lock); in __io_uring_add_tctx_node()
142 list_add(&node->ctx_node, &ctx->tctx_list); in __io_uring_add_tctx_node()
143 mutex_unlock(&ctx->uring_lock); in __io_uring_add_tctx_node()
152 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER in __io_uring_add_tctx_node_from_submit()
153 && ctx->submitter_task != current) in __io_uring_add_tctx_node_from_submit()
154 return -EEXIST; in __io_uring_add_tctx_node_from_submit()
160 current->io_uring->last = ctx; in __io_uring_add_tctx_node_from_submit()
165 * Remove this io_uring_file -> task mapping.
169 struct io_uring_task *tctx = current->io_uring; in io_uring_del_tctx_node()
174 node = xa_erase(&tctx->xa, index); in io_uring_del_tctx_node()
178 WARN_ON_ONCE(current != node->task); in io_uring_del_tctx_node()
179 WARN_ON_ONCE(list_empty(&node->ctx_node)); in io_uring_del_tctx_node()
181 mutex_lock(&node->ctx->uring_lock); in io_uring_del_tctx_node()
182 list_del(&node->ctx_node); in io_uring_del_tctx_node()
183 mutex_unlock(&node->ctx->uring_lock); in io_uring_del_tctx_node()
185 if (tctx->last == node->ctx) in io_uring_del_tctx_node()
186 tctx->last = NULL; in io_uring_del_tctx_node()
192 struct io_wq *wq = tctx->io_wq; in io_uring_clean_tctx()
196 xa_for_each(&tctx->xa, index, node) { in io_uring_clean_tctx()
206 tctx->io_wq = NULL; in io_uring_clean_tctx()
212 struct io_uring_task *tctx = current->io_uring; in io_uring_unreg_ringfd()
216 if (tctx->registered_rings[i]) { in io_uring_unreg_ringfd()
217 fput(tctx->registered_rings[i]); in io_uring_unreg_ringfd()
218 tctx->registered_rings[i] = NULL; in io_uring_unreg_ringfd()
226 int offset; in io_ring_add_registered_file() local
227 for (offset = start; offset < end; offset++) { in io_ring_add_registered_file()
228 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX); in io_ring_add_registered_file()
229 if (tctx->registered_rings[offset]) in io_ring_add_registered_file()
232 tctx->registered_rings[offset] = file; in io_ring_add_registered_file()
233 return offset; in io_ring_add_registered_file()
235 return -EBUSY; in io_ring_add_registered_file()
242 int offset; in io_ring_add_registered_fd() local
246 return -EBADF; in io_ring_add_registered_fd()
249 return -EOPNOTSUPP; in io_ring_add_registered_fd()
251 offset = io_ring_add_registered_file(tctx, file, start, end); in io_ring_add_registered_fd()
252 if (offset < 0) in io_ring_add_registered_fd()
254 return offset; in io_ring_add_registered_fd()
260 * with ->data set to the ring_fd, and ->offset given for the desired
261 * index. If no index is desired, application may set ->offset == -1U
269 struct io_uring_rsrc_update reg; in io_ringfd_register() local
274 return -EINVAL; in io_ringfd_register()
276 mutex_unlock(&ctx->uring_lock); in io_ringfd_register()
278 mutex_lock(&ctx->uring_lock); in io_ringfd_register()
282 tctx = current->io_uring; in io_ringfd_register()
286 if (copy_from_user(&reg, &arg[i], sizeof(reg))) { in io_ringfd_register()
287 ret = -EFAULT; in io_ringfd_register()
291 if (reg.resv) { in io_ringfd_register()
292 ret = -EINVAL; in io_ringfd_register()
296 if (reg.offset == -1U) { in io_ringfd_register()
300 if (reg.offset >= IO_RINGFD_REG_MAX) { in io_ringfd_register()
301 ret = -EINVAL; in io_ringfd_register()
304 start = reg.offset; in io_ringfd_register()
308 ret = io_ring_add_registered_fd(tctx, reg.data, start, end); in io_ringfd_register()
312 reg.offset = ret; in io_ringfd_register()
313 if (copy_to_user(&arg[i], &reg, sizeof(reg))) { in io_ringfd_register()
314 fput(tctx->registered_rings[reg.offset]); in io_ringfd_register()
315 tctx->registered_rings[reg.offset] = NULL; in io_ringfd_register()
316 ret = -EFAULT; in io_ringfd_register()
328 struct io_uring_task *tctx = current->io_uring; in io_ringfd_unregister()
329 struct io_uring_rsrc_update reg; in io_ringfd_unregister() local
333 return -EINVAL; in io_ringfd_unregister()
338 if (copy_from_user(&reg, &arg[i], sizeof(reg))) { in io_ringfd_unregister()
339 ret = -EFAULT; in io_ringfd_unregister()
342 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) { in io_ringfd_unregister()
343 ret = -EINVAL; in io_ringfd_unregister()
347 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX); in io_ringfd_unregister()
348 if (tctx->registered_rings[reg.offset]) { in io_ringfd_unregister()
349 fput(tctx->registered_rings[reg.offset]); in io_ringfd_unregister()
350 tctx->registered_rings[reg.offset] = NULL; in io_ringfd_unregister()