Lines Matching +full:fiq +full:- +full:device

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2023-2024 DataDirect Networks.
17 "Enable userspace communication through io-uring");
39 pdu->ent = ring_ent; in uring_cmd_set_ring_ent()
47 return pdu->ent; in uring_cmd_to_ring_ent()
52 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg()
53 struct fuse_conn *fc = ring->fc; in fuse_uring_flush_bg()
55 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg()
56 lockdep_assert_held(&fc->bg_lock); in fuse_uring_flush_bg()
61 * eliminates the need for remote queue wake-ups when global in fuse_uring_flush_bg()
64 while ((fc->active_background < fc->max_background || in fuse_uring_flush_bg()
65 !queue->active_background) && in fuse_uring_flush_bg()
66 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg()
69 req = list_first_entry(&queue->fuse_req_bg_queue, in fuse_uring_flush_bg()
71 fc->active_background++; in fuse_uring_flush_bg()
72 queue->active_background++; in fuse_uring_flush_bg()
74 list_move_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_flush_bg()
81 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_req_end()
82 struct fuse_ring *ring = queue->ring; in fuse_uring_req_end()
83 struct fuse_conn *fc = ring->fc; in fuse_uring_req_end()
85 lockdep_assert_not_held(&queue->lock); in fuse_uring_req_end()
86 spin_lock(&queue->lock); in fuse_uring_req_end()
87 ent->fuse_req = NULL; in fuse_uring_req_end()
88 if (test_bit(FR_BACKGROUND, &req->flags)) { in fuse_uring_req_end()
89 queue->active_background--; in fuse_uring_req_end()
90 spin_lock(&fc->bg_lock); in fuse_uring_req_end()
92 spin_unlock(&fc->bg_lock); in fuse_uring_req_end()
95 spin_unlock(&queue->lock); in fuse_uring_req_end()
98 req->out.h.error = error; in fuse_uring_req_end()
100 clear_bit(FR_SENT, &req->flags); in fuse_uring_req_end()
110 spin_lock(&queue->lock); in fuse_uring_abort_end_queue_requests()
111 list_for_each_entry(req, &queue->fuse_req_queue, list) in fuse_uring_abort_end_queue_requests()
112 clear_bit(FR_PENDING, &req->flags); in fuse_uring_abort_end_queue_requests()
113 list_splice_init(&queue->fuse_req_queue, &req_list); in fuse_uring_abort_end_queue_requests()
114 spin_unlock(&queue->lock); in fuse_uring_abort_end_queue_requests()
116 /* must not hold queue lock to avoid order issues with fi->lock */ in fuse_uring_abort_end_queue_requests()
124 struct fuse_conn *fc = ring->fc; in fuse_uring_abort_end_requests()
126 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_abort_end_requests()
127 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_abort_end_requests()
131 queue->stopped = true; in fuse_uring_abort_end_requests()
133 WARN_ON_ONCE(ring->fc->max_background != UINT_MAX); in fuse_uring_abort_end_requests()
134 spin_lock(&queue->lock); in fuse_uring_abort_end_requests()
135 spin_lock(&fc->bg_lock); in fuse_uring_abort_end_requests()
137 spin_unlock(&fc->bg_lock); in fuse_uring_abort_end_requests()
138 spin_unlock(&queue->lock); in fuse_uring_abort_end_requests()
145 struct fuse_ring *ring = fc->ring; in fuse_uring_destruct()
151 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_destruct()
152 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_destruct()
158 WARN_ON(!list_empty(&queue->ent_avail_queue)); in fuse_uring_destruct()
159 WARN_ON(!list_empty(&queue->ent_w_req_queue)); in fuse_uring_destruct()
160 WARN_ON(!list_empty(&queue->ent_commit_queue)); in fuse_uring_destruct()
161 WARN_ON(!list_empty(&queue->ent_in_userspace)); in fuse_uring_destruct()
163 list_for_each_entry_safe(ent, next, &queue->ent_released, in fuse_uring_destruct()
165 list_del_init(&ent->list); in fuse_uring_destruct()
169 kfree(queue->fpq.processing); in fuse_uring_destruct()
171 ring->queues[qid] = NULL; in fuse_uring_destruct()
174 kfree(ring->queues); in fuse_uring_destruct()
176 fc->ring = NULL; in fuse_uring_destruct()
189 ring = kzalloc(sizeof(*fc->ring), GFP_KERNEL_ACCOUNT); in fuse_uring_create()
193 ring->queues = kcalloc(nr_queues, sizeof(struct fuse_ring_queue *), in fuse_uring_create()
195 if (!ring->queues) in fuse_uring_create()
198 max_payload_size = max(FUSE_MIN_READ_BUFFER, fc->max_write); in fuse_uring_create()
199 max_payload_size = max(max_payload_size, fc->max_pages * PAGE_SIZE); in fuse_uring_create()
201 spin_lock(&fc->lock); in fuse_uring_create()
202 if (fc->ring) { in fuse_uring_create()
204 spin_unlock(&fc->lock); in fuse_uring_create()
205 res = fc->ring; in fuse_uring_create()
209 init_waitqueue_head(&ring->stop_waitq); in fuse_uring_create()
211 ring->nr_queues = nr_queues; in fuse_uring_create()
212 ring->fc = fc; in fuse_uring_create()
213 ring->max_payload_sz = max_payload_size; in fuse_uring_create()
214 atomic_set(&ring->queue_refs, 0); in fuse_uring_create()
215 smp_store_release(&fc->ring, ring); in fuse_uring_create()
217 spin_unlock(&fc->lock); in fuse_uring_create()
221 kfree(ring->queues); in fuse_uring_create()
229 struct fuse_conn *fc = ring->fc; in fuse_uring_create_queue()
242 queue->qid = qid; in fuse_uring_create_queue()
243 queue->ring = ring; in fuse_uring_create_queue()
244 spin_lock_init(&queue->lock); in fuse_uring_create_queue()
246 INIT_LIST_HEAD(&queue->ent_avail_queue); in fuse_uring_create_queue()
247 INIT_LIST_HEAD(&queue->ent_commit_queue); in fuse_uring_create_queue()
248 INIT_LIST_HEAD(&queue->ent_w_req_queue); in fuse_uring_create_queue()
249 INIT_LIST_HEAD(&queue->ent_in_userspace); in fuse_uring_create_queue()
250 INIT_LIST_HEAD(&queue->fuse_req_queue); in fuse_uring_create_queue()
251 INIT_LIST_HEAD(&queue->fuse_req_bg_queue); in fuse_uring_create_queue()
252 INIT_LIST_HEAD(&queue->ent_released); in fuse_uring_create_queue()
254 queue->fpq.processing = pq; in fuse_uring_create_queue()
255 fuse_pqueue_init(&queue->fpq); in fuse_uring_create_queue()
257 spin_lock(&fc->lock); in fuse_uring_create_queue()
258 if (ring->queues[qid]) { in fuse_uring_create_queue()
259 spin_unlock(&fc->lock); in fuse_uring_create_queue()
260 kfree(queue->fpq.processing); in fuse_uring_create_queue()
262 return ring->queues[qid]; in fuse_uring_create_queue()
268 WRITE_ONCE(ring->queues[qid], queue); in fuse_uring_create_queue()
269 spin_unlock(&fc->lock); in fuse_uring_create_queue()
276 clear_bit(FR_SENT, &req->flags); in fuse_uring_stop_fuse_req_end()
277 req->out.h.error = -ECONNABORTED; in fuse_uring_stop_fuse_req_end()
289 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_entry_teardown()
291 spin_lock(&queue->lock); in fuse_uring_entry_teardown()
292 cmd = ent->cmd; in fuse_uring_entry_teardown()
293 ent->cmd = NULL; in fuse_uring_entry_teardown()
294 req = ent->fuse_req; in fuse_uring_entry_teardown()
295 ent->fuse_req = NULL; in fuse_uring_entry_teardown()
297 /* remove entry from queue->fpq->processing */ in fuse_uring_entry_teardown()
298 list_del_init(&req->list); in fuse_uring_entry_teardown()
303 * pointer access of entries through IO_URING_F_CANCEL - there is a risk in fuse_uring_entry_teardown()
307 list_move(&ent->list, &queue->ent_released); in fuse_uring_entry_teardown()
308 ent->state = FRRS_RELEASED; in fuse_uring_entry_teardown()
309 spin_unlock(&queue->lock); in fuse_uring_entry_teardown()
312 io_uring_cmd_done(cmd, -ENOTCONN, 0, IO_URING_F_UNLOCKED); in fuse_uring_entry_teardown()
322 struct fuse_ring *ring = queue->ring; in fuse_uring_stop_list_entries()
327 spin_lock(&queue->lock); in fuse_uring_stop_list_entries()
329 if (ent->state != exp_state) { in fuse_uring_stop_list_entries()
331 queue->qid, ent->state, exp_state); in fuse_uring_stop_list_entries()
335 ent->state = FRRS_TEARDOWN; in fuse_uring_stop_list_entries()
336 list_move(&ent->list, &to_teardown); in fuse_uring_stop_list_entries()
338 spin_unlock(&queue->lock); in fuse_uring_stop_list_entries()
343 queue_refs = atomic_dec_return(&ring->queue_refs); in fuse_uring_stop_list_entries()
350 fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue, in fuse_uring_teardown_entries()
352 fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue, in fuse_uring_teardown_entries()
364 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_log_ent_state()
365 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_log_ent_state()
370 spin_lock(&queue->lock); in fuse_uring_log_ent_state()
375 list_for_each_entry(ent, &queue->ent_w_req_queue, list) { in fuse_uring_log_ent_state()
376 pr_info(" ent-req-queue ring=%p qid=%d ent=%p state=%d\n", in fuse_uring_log_ent_state()
377 ring, qid, ent, ent->state); in fuse_uring_log_ent_state()
379 list_for_each_entry(ent, &queue->ent_commit_queue, list) { in fuse_uring_log_ent_state()
380 pr_info(" ent-commit-queue ring=%p qid=%d ent=%p state=%d\n", in fuse_uring_log_ent_state()
381 ring, qid, ent, ent->state); in fuse_uring_log_ent_state()
383 spin_unlock(&queue->lock); in fuse_uring_log_ent_state()
385 ring->stop_debug_log = 1; in fuse_uring_log_ent_state()
395 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_async_stop_queues()
396 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_async_stop_queues()
407 * or on the way to userspace - we could handle that with conditions in in fuse_uring_async_stop_queues()
411 if (atomic_read(&ring->queue_refs) > 0) { in fuse_uring_async_stop_queues()
413 ring->teardown_time + FUSE_URING_TEARDOWN_TIMEOUT)) in fuse_uring_async_stop_queues()
416 schedule_delayed_work(&ring->async_teardown_work, in fuse_uring_async_stop_queues()
419 wake_up_all(&ring->stop_waitq); in fuse_uring_async_stop_queues()
430 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_stop_queues()
431 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_stop_queues()
439 if (atomic_read(&ring->queue_refs) > 0) { in fuse_uring_stop_queues()
440 ring->teardown_time = jiffies; in fuse_uring_stop_queues()
441 INIT_DELAYED_WORK(&ring->async_teardown_work, in fuse_uring_stop_queues()
443 schedule_delayed_work(&ring->async_teardown_work, in fuse_uring_stop_queues()
446 wake_up_all(&ring->stop_waitq); in fuse_uring_stop_queues()
464 * direct access on ent - it must not be destructed as long as in fuse_uring_cancel()
467 queue = ent->queue; in fuse_uring_cancel()
468 spin_lock(&queue->lock); in fuse_uring_cancel()
469 if (ent->state == FRRS_AVAILABLE) { in fuse_uring_cancel()
470 ent->state = FRRS_USERSPACE; in fuse_uring_cancel()
471 list_move(&ent->list, &queue->ent_in_userspace); in fuse_uring_cancel()
473 ent->cmd = NULL; in fuse_uring_cancel()
475 spin_unlock(&queue->lock); in fuse_uring_cancel()
479 io_uring_cmd_done(cmd, -ENOTCONN, 0, issue_flags); in fuse_uring_cancel()
499 err = -EINVAL; in fuse_uring_out_header_has_err()
500 if (oh->unique == 0) { in fuse_uring_out_header_has_err()
501 /* Not supported through io-uring yet */ in fuse_uring_out_header_has_err()
502 pr_warn_once("notify through fuse-io-uring not supported\n"); in fuse_uring_out_header_has_err()
506 if (oh->error <= -ERESTARTSYS || oh->error > 0) in fuse_uring_out_header_has_err()
509 if (oh->error) { in fuse_uring_out_header_has_err()
510 err = oh->error; in fuse_uring_out_header_has_err()
514 err = -ENOENT; in fuse_uring_out_header_has_err()
515 if ((oh->unique & ~FUSE_INT_REQ_BIT) != req->in.h.unique) { in fuse_uring_out_header_has_err()
517 req->in.h.unique, in fuse_uring_out_header_has_err()
518 oh->unique & ~FUSE_INT_REQ_BIT); in fuse_uring_out_header_has_err()
524 * XXX: Not supported through fuse-io-uring yet, it should not even in fuse_uring_out_header_has_err()
525 * find the request - should not happen. in fuse_uring_out_header_has_err()
527 WARN_ON_ONCE(oh->unique & FUSE_INT_REQ_BIT); in fuse_uring_out_header_has_err()
539 struct fuse_args *args = req->args; in fuse_uring_copy_from_ring()
544 err = copy_from_user(&ring_in_out, &ent->headers->ring_ent_in_out, in fuse_uring_copy_from_ring()
547 return -EFAULT; in fuse_uring_copy_from_ring()
549 err = import_ubuf(ITER_SOURCE, ent->payload, ring->max_payload_sz, in fuse_uring_copy_from_ring()
568 struct fuse_args *args = req->args; in fuse_uring_args_to_ring()
569 struct fuse_in_arg *in_args = args->in_args; in fuse_uring_args_to_ring()
570 int num_args = args->in_numargs; in fuse_uring_args_to_ring()
575 .commit_id = req->in.h.unique, in fuse_uring_args_to_ring()
578 err = import_ubuf(ITER_DEST, ent->payload, ring->max_payload_sz, &iter); in fuse_uring_args_to_ring()
593 if (args->in_args[0].size > 0) { in fuse_uring_args_to_ring()
594 err = copy_to_user(&ent->headers->op_in, in_args->value, in fuse_uring_args_to_ring()
595 in_args->size); in fuse_uring_args_to_ring()
599 return -EFAULT; in fuse_uring_args_to_ring()
603 num_args--; in fuse_uring_args_to_ring()
607 err = fuse_copy_args(&cs, num_args, args->in_pages, in fuse_uring_args_to_ring()
615 err = copy_to_user(&ent->headers->ring_ent_in_out, &ent_in_out, in fuse_uring_args_to_ring()
617 return err ? -EFAULT : 0; in fuse_uring_args_to_ring()
623 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_copy_to_ring()
624 struct fuse_ring *ring = queue->ring; in fuse_uring_copy_to_ring()
627 err = -EIO; in fuse_uring_copy_to_ring()
628 if (WARN_ON(ent->state != FRRS_FUSE_REQ)) { in fuse_uring_copy_to_ring()
629 pr_err("qid=%d ring-req=%p invalid state %d on send\n", in fuse_uring_copy_to_ring()
630 queue->qid, ent, ent->state); in fuse_uring_copy_to_ring()
634 err = -EINVAL; in fuse_uring_copy_to_ring()
635 if (WARN_ON(req->in.h.unique == 0)) in fuse_uring_copy_to_ring()
646 err = copy_to_user(&ent->headers->in_out, &req->in.h, in fuse_uring_copy_to_ring()
647 sizeof(req->in.h)); in fuse_uring_copy_to_ring()
649 err = -EFAULT; in fuse_uring_copy_to_ring()
663 set_bit(FR_SENT, &req->flags); in fuse_uring_prepare_send()
679 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_next_to_ring()
687 spin_lock(&queue->lock); in fuse_uring_send_next_to_ring()
688 cmd = ent->cmd; in fuse_uring_send_next_to_ring()
689 ent->cmd = NULL; in fuse_uring_send_next_to_ring()
690 ent->state = FRRS_USERSPACE; in fuse_uring_send_next_to_ring()
691 list_move(&ent->list, &queue->ent_in_userspace); in fuse_uring_send_next_to_ring()
692 spin_unlock(&queue->lock); in fuse_uring_send_next_to_ring()
704 WARN_ON_ONCE(!ent->cmd); in fuse_uring_ent_avail()
705 list_move(&ent->list, &queue->ent_avail_queue); in fuse_uring_ent_avail()
706 ent->state = FRRS_AVAILABLE; in fuse_uring_ent_avail()
713 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_to_pq()
714 struct fuse_pqueue *fpq = &queue->fpq; in fuse_uring_add_to_pq()
717 req->ring_entry = ent; in fuse_uring_add_to_pq()
718 hash = fuse_req_hash(req->in.h.unique); in fuse_uring_add_to_pq()
719 list_move_tail(&req->list, &fpq->processing[hash]); in fuse_uring_add_to_pq()
728 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_req_to_ring_ent()
730 lockdep_assert_held(&queue->lock); in fuse_uring_add_req_to_ring_ent()
732 if (WARN_ON_ONCE(ent->state != FRRS_AVAILABLE && in fuse_uring_add_req_to_ring_ent()
733 ent->state != FRRS_COMMIT)) { in fuse_uring_add_req_to_ring_ent()
734 pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid, in fuse_uring_add_req_to_ring_ent()
735 ent->state); in fuse_uring_add_req_to_ring_ent()
738 clear_bit(FR_PENDING, &req->flags); in fuse_uring_add_req_to_ring_ent()
739 ent->fuse_req = req; in fuse_uring_add_req_to_ring_ent()
740 ent->state = FRRS_FUSE_REQ; in fuse_uring_add_req_to_ring_ent()
741 list_move(&ent->list, &queue->ent_w_req_queue); in fuse_uring_add_req_to_ring_ent()
747 __must_hold(&queue->lock) in fuse_uring_ent_assign_req()
750 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_ent_assign_req()
751 struct list_head *req_queue = &queue->fuse_req_queue; in fuse_uring_ent_assign_req()
753 lockdep_assert_held(&queue->lock); in fuse_uring_ent_assign_req()
771 struct fuse_ring *ring = ent->queue->ring; in fuse_uring_commit()
772 struct fuse_conn *fc = ring->fc; in fuse_uring_commit()
775 err = copy_from_user(&req->out.h, &ent->headers->in_out, in fuse_uring_commit()
776 sizeof(req->out.h)); in fuse_uring_commit()
778 req->out.h.error = -EFAULT; in fuse_uring_commit()
782 err = fuse_uring_out_header_has_err(&req->out.h, req, fc); in fuse_uring_commit()
784 /* req->out.h.error already set */ in fuse_uring_commit()
804 spin_lock(&queue->lock); in fuse_uring_next_fuse_req()
807 spin_unlock(&queue->lock); in fuse_uring_next_fuse_req()
818 struct fuse_ring_queue *queue = ent->queue; in fuse_ring_ent_set_commit()
820 lockdep_assert_held(&queue->lock); in fuse_ring_ent_set_commit()
822 if (WARN_ON_ONCE(ent->state != FRRS_USERSPACE)) in fuse_ring_ent_set_commit()
823 return -EIO; in fuse_ring_ent_set_commit()
825 ent->state = FRRS_COMMIT; in fuse_ring_ent_set_commit()
826 list_move(&ent->list, &queue->ent_commit_queue); in fuse_ring_ent_set_commit()
835 const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); in fuse_uring_commit_fetch()
838 struct fuse_ring *ring = fc->ring; in fuse_uring_commit_fetch()
840 uint64_t commit_id = READ_ONCE(cmd_req->commit_id); in fuse_uring_commit_fetch()
841 unsigned int qid = READ_ONCE(cmd_req->qid); in fuse_uring_commit_fetch()
845 err = -ENOTCONN; in fuse_uring_commit_fetch()
849 if (qid >= ring->nr_queues) in fuse_uring_commit_fetch()
850 return -EINVAL; in fuse_uring_commit_fetch()
852 queue = ring->queues[qid]; in fuse_uring_commit_fetch()
855 fpq = &queue->fpq; in fuse_uring_commit_fetch()
857 if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped)) in fuse_uring_commit_fetch()
860 spin_lock(&queue->lock); in fuse_uring_commit_fetch()
867 err = -ENOENT; in fuse_uring_commit_fetch()
869 pr_info("qid=%d commit_id %llu not found\n", queue->qid, in fuse_uring_commit_fetch()
871 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
874 list_del_init(&req->list); in fuse_uring_commit_fetch()
875 ent = req->ring_entry; in fuse_uring_commit_fetch()
876 req->ring_entry = NULL; in fuse_uring_commit_fetch()
881 queue->qid, commit_id, ent->state); in fuse_uring_commit_fetch()
882 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
883 req->out.h.error = err; in fuse_uring_commit_fetch()
884 clear_bit(FR_SENT, &req->flags); in fuse_uring_commit_fetch()
889 ent->cmd = cmd; in fuse_uring_commit_fetch()
890 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
898 * fuse requests would otherwise not get processed - committing in fuse_uring_commit_fetch()
912 for (qid = 0; qid < ring->nr_queues && ready; qid++) { in is_ring_ready()
916 queue = ring->queues[qid]; in is_ring_ready()
922 spin_lock(&queue->lock); in is_ring_ready()
923 if (list_empty(&queue->ent_avail_queue)) in is_ring_ready()
925 spin_unlock(&queue->lock); in is_ring_ready()
938 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_do_register()
939 struct fuse_ring *ring = queue->ring; in fuse_uring_do_register()
940 struct fuse_conn *fc = ring->fc; in fuse_uring_do_register()
941 struct fuse_iqueue *fiq = &fc->iq; in fuse_uring_do_register() local
945 spin_lock(&queue->lock); in fuse_uring_do_register()
946 ent->cmd = cmd; in fuse_uring_do_register()
948 spin_unlock(&queue->lock); in fuse_uring_do_register()
950 if (!ring->ready) { in fuse_uring_do_register()
951 bool ready = is_ring_ready(ring, queue->qid); in fuse_uring_do_register()
954 WRITE_ONCE(fiq->ops, &fuse_io_uring_ops); in fuse_uring_do_register()
955 WRITE_ONCE(ring->ready, true); in fuse_uring_do_register()
956 wake_up_all(&fc->blocked_waitq); in fuse_uring_do_register()
962 * sqe->addr is a ptr to an iovec array, iov[0] has the headers, iov[1]
968 struct iovec __user *uiov = u64_to_user_ptr(READ_ONCE(sqe->addr)); in fuse_uring_get_iovec_from_sqe()
972 if (sqe->len != FUSE_URING_IOV_SEGS) in fuse_uring_get_iovec_from_sqe()
973 return -EINVAL; in fuse_uring_get_iovec_from_sqe()
991 struct fuse_ring *ring = queue->ring; in fuse_uring_create_ring_ent()
997 err = fuse_uring_get_iovec_from_sqe(cmd->sqe, iov); in fuse_uring_create_ring_ent()
1004 err = -EINVAL; in fuse_uring_create_ring_ent()
1011 if (payload_size < ring->max_payload_sz) { in fuse_uring_create_ring_ent()
1017 err = -ENOMEM; in fuse_uring_create_ring_ent()
1022 INIT_LIST_HEAD(&ent->list); in fuse_uring_create_ring_ent()
1024 ent->queue = queue; in fuse_uring_create_ring_ent()
1025 ent->headers = iov[0].iov_base; in fuse_uring_create_ring_ent()
1026 ent->payload = iov[1].iov_base; in fuse_uring_create_ring_ent()
1028 atomic_inc(&ring->queue_refs); in fuse_uring_create_ring_ent()
1039 const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); in fuse_uring_register()
1040 struct fuse_ring *ring = smp_load_acquire(&fc->ring); in fuse_uring_register()
1044 unsigned int qid = READ_ONCE(cmd_req->qid); in fuse_uring_register()
1046 err = -ENOMEM; in fuse_uring_register()
1053 if (qid >= ring->nr_queues) { in fuse_uring_register()
1055 return -EINVAL; in fuse_uring_register()
1058 queue = ring->queues[qid]; in fuse_uring_register()
1087 u32 cmd_op = cmd->cmd_op; in fuse_uring_cmd()
1097 return -EINVAL; in fuse_uring_cmd()
1099 fud = fuse_get_dev(cmd->file); in fuse_uring_cmd()
1101 pr_info_ratelimited("No fuse device found\n"); in fuse_uring_cmd()
1102 return -ENOTCONN; in fuse_uring_cmd()
1104 fc = fud->fc; in fuse_uring_cmd()
1106 /* Once a connection has io-uring enabled on it, it can't be disabled */ in fuse_uring_cmd()
1107 if (!enable_uring && !fc->io_uring) { in fuse_uring_cmd()
1108 pr_info_ratelimited("fuse-io-uring is disabled\n"); in fuse_uring_cmd()
1109 return -EOPNOTSUPP; in fuse_uring_cmd()
1112 if (fc->aborted) in fuse_uring_cmd()
1113 return -ECONNABORTED; in fuse_uring_cmd()
1114 if (!fc->connected) in fuse_uring_cmd()
1115 return -ENOTCONN; in fuse_uring_cmd()
1121 if (!fc->initialized) in fuse_uring_cmd()
1122 return -EAGAIN; in fuse_uring_cmd()
1130 fc->io_uring = 0; in fuse_uring_cmd()
1131 wake_up_all(&fc->blocked_waitq); in fuse_uring_cmd()
1144 return -EINVAL; in fuse_uring_cmd()
1147 return -EIOCBQUEUED; in fuse_uring_cmd()
1153 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send()
1155 spin_lock(&queue->lock); in fuse_uring_send()
1156 ent->state = FRRS_USERSPACE; in fuse_uring_send()
1157 list_move(&ent->list, &queue->ent_in_userspace); in fuse_uring_send()
1158 ent->cmd = NULL; in fuse_uring_send()
1159 spin_unlock(&queue->lock); in fuse_uring_send()
1165 * This prepares and sends the ring request in fuse-uring task context.
1166 * User buffers are not mapped yet - the application does not have permission
1167 * to write to it - this has to be executed in ring task context.
1173 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_in_task()
1177 err = fuse_uring_prepare_send(ent, ent->fuse_req); in fuse_uring_send_in_task()
1183 err = -ECANCELED; in fuse_uring_send_in_task()
1196 if (WARN_ONCE(qid >= ring->nr_queues, in fuse_uring_task_to_queue()
1198 ring->nr_queues)) in fuse_uring_task_to_queue()
1201 queue = ring->queues[qid]; in fuse_uring_task_to_queue()
1209 struct io_uring_cmd *cmd = ent->cmd; in fuse_uring_dispatch_ent()
1216 void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req) in fuse_uring_queue_fuse_req() argument
1218 struct fuse_conn *fc = req->fm->fc; in fuse_uring_queue_fuse_req()
1219 struct fuse_ring *ring = fc->ring; in fuse_uring_queue_fuse_req()
1224 err = -EINVAL; in fuse_uring_queue_fuse_req()
1229 if (req->in.h.opcode != FUSE_NOTIFY_REPLY) in fuse_uring_queue_fuse_req()
1230 req->in.h.unique = fuse_get_unique(fiq); in fuse_uring_queue_fuse_req()
1232 spin_lock(&queue->lock); in fuse_uring_queue_fuse_req()
1233 err = -ENOTCONN; in fuse_uring_queue_fuse_req()
1234 if (unlikely(queue->stopped)) in fuse_uring_queue_fuse_req()
1237 set_bit(FR_URING, &req->flags); in fuse_uring_queue_fuse_req()
1238 req->ring_queue = queue; in fuse_uring_queue_fuse_req()
1239 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_fuse_req()
1244 list_add_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_queue_fuse_req()
1245 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1253 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1255 req->out.h.error = err; in fuse_uring_queue_fuse_req()
1256 clear_bit(FR_PENDING, &req->flags); in fuse_uring_queue_fuse_req()
1262 struct fuse_conn *fc = req->fm->fc; in fuse_uring_queue_bq_req()
1263 struct fuse_ring *ring = fc->ring; in fuse_uring_queue_bq_req()
1271 spin_lock(&queue->lock); in fuse_uring_queue_bq_req()
1272 if (unlikely(queue->stopped)) { in fuse_uring_queue_bq_req()
1273 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1277 set_bit(FR_URING, &req->flags); in fuse_uring_queue_bq_req()
1278 req->ring_queue = queue; in fuse_uring_queue_bq_req()
1279 list_add_tail(&req->list, &queue->fuse_req_bg_queue); in fuse_uring_queue_bq_req()
1281 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_bq_req()
1283 spin_lock(&fc->bg_lock); in fuse_uring_queue_bq_req()
1284 fc->num_background++; in fuse_uring_queue_bq_req()
1285 if (fc->num_background == fc->max_background) in fuse_uring_queue_bq_req()
1286 fc->blocked = 1; in fuse_uring_queue_bq_req()
1288 spin_unlock(&fc->bg_lock); in fuse_uring_queue_bq_req()
1295 req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req, in fuse_uring_queue_bq_req()
1299 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1303 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1311 struct fuse_ring_queue *queue = req->ring_queue; in fuse_uring_remove_pending_req()
1313 return fuse_remove_pending_req(req, &queue->lock); in fuse_uring_remove_pending_req()
1317 /* should be send over io-uring as enhancement */
1321 * could be send over io-uring, but interrupts should be rare,