Lines Matching full:queue
50 static void fuse_uring_flush_bg(struct fuse_ring_queue *queue) in fuse_uring_flush_bg() argument
52 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg()
55 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg()
59 * Allow one bg request per queue, ignoring global fc limits. in fuse_uring_flush_bg()
60 * This prevents a single queue from consuming all resources and in fuse_uring_flush_bg()
61 * eliminates the need for remote queue wake-ups when global in fuse_uring_flush_bg()
62 * limits are met but this queue has no more waiting requests. in fuse_uring_flush_bg()
65 !queue->active_background) && in fuse_uring_flush_bg()
66 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg()
69 req = list_first_entry(&queue->fuse_req_bg_queue, in fuse_uring_flush_bg()
72 queue->active_background++; in fuse_uring_flush_bg()
74 list_move_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_flush_bg()
81 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_req_end() local
82 struct fuse_ring *ring = queue->ring; in fuse_uring_req_end()
85 lockdep_assert_not_held(&queue->lock); in fuse_uring_req_end()
86 spin_lock(&queue->lock); in fuse_uring_req_end()
89 queue->active_background--; in fuse_uring_req_end()
91 fuse_uring_flush_bg(queue); in fuse_uring_req_end()
95 spin_unlock(&queue->lock); in fuse_uring_req_end()
104 /* Abort all list queued request on the given ring queue */
105 static void fuse_uring_abort_end_queue_requests(struct fuse_ring_queue *queue) in fuse_uring_abort_end_queue_requests() argument
110 spin_lock(&queue->lock); in fuse_uring_abort_end_queue_requests()
111 list_for_each_entry(req, &queue->fuse_req_queue, list) in fuse_uring_abort_end_queue_requests()
113 list_splice_init(&queue->fuse_req_queue, &req_list); in fuse_uring_abort_end_queue_requests()
114 spin_unlock(&queue->lock); in fuse_uring_abort_end_queue_requests()
116 /* must not hold queue lock to avoid order issues with fi->lock */ in fuse_uring_abort_end_queue_requests()
123 struct fuse_ring_queue *queue; in fuse_uring_abort_end_requests() local
127 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_abort_end_requests()
128 if (!queue) in fuse_uring_abort_end_requests()
131 queue->stopped = true; in fuse_uring_abort_end_requests()
134 spin_lock(&queue->lock); in fuse_uring_abort_end_requests()
136 fuse_uring_flush_bg(queue); in fuse_uring_abort_end_requests()
138 spin_unlock(&queue->lock); in fuse_uring_abort_end_requests()
139 fuse_uring_abort_end_queue_requests(queue); in fuse_uring_abort_end_requests()
152 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_destruct() local
155 if (!queue) in fuse_uring_destruct()
158 WARN_ON(!list_empty(&queue->ent_avail_queue)); in fuse_uring_destruct()
159 WARN_ON(!list_empty(&queue->ent_w_req_queue)); in fuse_uring_destruct()
160 WARN_ON(!list_empty(&queue->ent_commit_queue)); in fuse_uring_destruct()
161 WARN_ON(!list_empty(&queue->ent_in_userspace)); in fuse_uring_destruct()
163 list_for_each_entry_safe(ent, next, &queue->ent_released, in fuse_uring_destruct()
169 kfree(queue->fpq.processing); in fuse_uring_destruct()
170 kfree(queue); in fuse_uring_destruct()
230 struct fuse_ring_queue *queue; in fuse_uring_create_queue() local
233 queue = kzalloc(sizeof(*queue), GFP_KERNEL_ACCOUNT); in fuse_uring_create_queue()
234 if (!queue) in fuse_uring_create_queue()
238 kfree(queue); in fuse_uring_create_queue()
242 queue->qid = qid; in fuse_uring_create_queue()
243 queue->ring = ring; in fuse_uring_create_queue()
244 spin_lock_init(&queue->lock); in fuse_uring_create_queue()
246 INIT_LIST_HEAD(&queue->ent_avail_queue); in fuse_uring_create_queue()
247 INIT_LIST_HEAD(&queue->ent_commit_queue); in fuse_uring_create_queue()
248 INIT_LIST_HEAD(&queue->ent_w_req_queue); in fuse_uring_create_queue()
249 INIT_LIST_HEAD(&queue->ent_in_userspace); in fuse_uring_create_queue()
250 INIT_LIST_HEAD(&queue->fuse_req_queue); in fuse_uring_create_queue()
251 INIT_LIST_HEAD(&queue->fuse_req_bg_queue); in fuse_uring_create_queue()
252 INIT_LIST_HEAD(&queue->ent_released); in fuse_uring_create_queue()
254 queue->fpq.processing = pq; in fuse_uring_create_queue()
255 fuse_pqueue_init(&queue->fpq); in fuse_uring_create_queue()
260 kfree(queue->fpq.processing); in fuse_uring_create_queue()
261 kfree(queue); in fuse_uring_create_queue()
268 WRITE_ONCE(ring->queues[qid], queue); in fuse_uring_create_queue()
271 return queue; in fuse_uring_create_queue()
289 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_entry_teardown() local
291 spin_lock(&queue->lock); in fuse_uring_entry_teardown()
297 /* remove entry from queue->fpq->processing */ in fuse_uring_entry_teardown()
307 list_move(&ent->list, &queue->ent_released); in fuse_uring_entry_teardown()
309 spin_unlock(&queue->lock); in fuse_uring_entry_teardown()
319 struct fuse_ring_queue *queue, in fuse_uring_stop_list_entries() argument
322 struct fuse_ring *ring = queue->ring; in fuse_uring_stop_list_entries()
327 spin_lock(&queue->lock); in fuse_uring_stop_list_entries()
331 queue->qid, ent->state, exp_state); in fuse_uring_stop_list_entries()
338 spin_unlock(&queue->lock); in fuse_uring_stop_list_entries()
340 /* no queue lock to avoid lock order issues */ in fuse_uring_stop_list_entries()
348 static void fuse_uring_teardown_entries(struct fuse_ring_queue *queue) in fuse_uring_teardown_entries() argument
350 fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue, in fuse_uring_teardown_entries()
352 fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue, in fuse_uring_teardown_entries()
365 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_log_ent_state() local
367 if (!queue) in fuse_uring_log_ent_state()
370 spin_lock(&queue->lock); in fuse_uring_log_ent_state()
372 * Log entries from the intermediate queue, the other queues in fuse_uring_log_ent_state()
375 list_for_each_entry(ent, &queue->ent_w_req_queue, list) { in fuse_uring_log_ent_state()
376 pr_info(" ent-req-queue ring=%p qid=%d ent=%p state=%d\n", in fuse_uring_log_ent_state()
379 list_for_each_entry(ent, &queue->ent_commit_queue, list) { in fuse_uring_log_ent_state()
380 pr_info(" ent-commit-queue ring=%p qid=%d ent=%p state=%d\n", in fuse_uring_log_ent_state()
383 spin_unlock(&queue->lock); in fuse_uring_log_ent_state()
396 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_async_stop_queues() local
398 if (!queue) in fuse_uring_async_stop_queues()
401 fuse_uring_teardown_entries(queue); in fuse_uring_async_stop_queues()
409 * If there are still queue references left in fuse_uring_async_stop_queues()
431 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_stop_queues() local
433 if (!queue) in fuse_uring_stop_queues()
436 fuse_uring_teardown_entries(queue); in fuse_uring_stop_queues()
460 struct fuse_ring_queue *queue; in fuse_uring_cancel() local
467 queue = ent->queue; in fuse_uring_cancel()
468 spin_lock(&queue->lock); in fuse_uring_cancel()
471 list_move(&ent->list, &queue->ent_in_userspace); in fuse_uring_cancel()
475 spin_unlock(&queue->lock); in fuse_uring_cancel()
478 /* no queue lock to avoid lock order issues */ in fuse_uring_cancel()
623 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_copy_to_ring() local
624 struct fuse_ring *ring = queue->ring; in fuse_uring_copy_to_ring()
630 queue->qid, ent, ent->state); in fuse_uring_copy_to_ring()
679 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_next_to_ring() local
687 spin_lock(&queue->lock); in fuse_uring_send_next_to_ring()
691 list_move(&ent->list, &queue->ent_in_userspace); in fuse_uring_send_next_to_ring()
692 spin_unlock(&queue->lock); in fuse_uring_send_next_to_ring()
702 struct fuse_ring_queue *queue) in fuse_uring_ent_avail() argument
705 list_move(&ent->list, &queue->ent_avail_queue); in fuse_uring_ent_avail()
713 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_to_pq() local
714 struct fuse_pqueue *fpq = &queue->fpq; in fuse_uring_add_to_pq()
723 * Assign a fuse queue entry to the given entry
728 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_req_to_ring_ent() local
730 lockdep_assert_held(&queue->lock); in fuse_uring_add_req_to_ring_ent()
734 pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid, in fuse_uring_add_req_to_ring_ent()
741 list_move(&ent->list, &queue->ent_w_req_queue); in fuse_uring_add_req_to_ring_ent()
747 __must_hold(&queue->lock) in fuse_uring_ent_assign_req()
750 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_ent_assign_req() local
751 struct list_head *req_queue = &queue->fuse_req_queue; in fuse_uring_ent_assign_req()
753 lockdep_assert_held(&queue->lock); in fuse_uring_ent_assign_req()
771 struct fuse_ring *ring = ent->queue->ring; in fuse_uring_commit()
797 struct fuse_ring_queue *queue, in fuse_uring_next_fuse_req() argument
804 spin_lock(&queue->lock); in fuse_uring_next_fuse_req()
805 fuse_uring_ent_avail(ent, queue); in fuse_uring_next_fuse_req()
807 spin_unlock(&queue->lock); in fuse_uring_next_fuse_req()
818 struct fuse_ring_queue *queue = ent->queue; in fuse_ring_ent_set_commit() local
820 lockdep_assert_held(&queue->lock); in fuse_ring_ent_set_commit()
826 list_move(&ent->list, &queue->ent_commit_queue); in fuse_ring_ent_set_commit()
839 struct fuse_ring_queue *queue; in fuse_uring_commit_fetch() local
852 queue = ring->queues[qid]; in fuse_uring_commit_fetch()
853 if (!queue) in fuse_uring_commit_fetch()
855 fpq = &queue->fpq; in fuse_uring_commit_fetch()
857 if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped)) in fuse_uring_commit_fetch()
860 spin_lock(&queue->lock); in fuse_uring_commit_fetch()
869 pr_info("qid=%d commit_id %llu not found\n", queue->qid, in fuse_uring_commit_fetch()
871 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
881 queue->qid, commit_id, ent->state); in fuse_uring_commit_fetch()
882 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
890 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
892 /* without the queue lock, as other locks are taken */ in fuse_uring_commit_fetch()
902 fuse_uring_next_fuse_req(ent, queue, issue_flags); in fuse_uring_commit_fetch()
909 struct fuse_ring_queue *queue; in is_ring_ready() local
916 queue = ring->queues[qid]; in is_ring_ready()
917 if (!queue) { in is_ring_ready()
922 spin_lock(&queue->lock); in is_ring_ready()
923 if (list_empty(&queue->ent_avail_queue)) in is_ring_ready()
925 spin_unlock(&queue->lock); in is_ring_ready()
938 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_do_register() local
939 struct fuse_ring *ring = queue->ring; in fuse_uring_do_register()
945 spin_lock(&queue->lock); in fuse_uring_do_register()
947 fuse_uring_ent_avail(ent, queue); in fuse_uring_do_register()
948 spin_unlock(&queue->lock); in fuse_uring_do_register()
951 bool ready = is_ring_ready(ring, queue->qid); in fuse_uring_do_register()
989 struct fuse_ring_queue *queue) in fuse_uring_create_ring_ent() argument
991 struct fuse_ring *ring = queue->ring; in fuse_uring_create_ring_ent()
1024 ent->queue = queue; in fuse_uring_create_ring_ent()
1034 * entry as "ready to get fuse requests" on the queue
1041 struct fuse_ring_queue *queue; in fuse_uring_register() local
1058 queue = ring->queues[qid]; in fuse_uring_register()
1059 if (!queue) { in fuse_uring_register()
1060 queue = fuse_uring_create_queue(ring, qid); in fuse_uring_register()
1061 if (!queue) in fuse_uring_register()
1066 * The created queue above does not need to be destructed in in fuse_uring_register()
1070 ent = fuse_uring_create_ring_ent(cmd, queue); in fuse_uring_register()
1153 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send() local
1155 spin_lock(&queue->lock); in fuse_uring_send()
1157 list_move(&ent->list, &queue->ent_in_userspace); in fuse_uring_send()
1159 spin_unlock(&queue->lock); in fuse_uring_send()
1173 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_in_task() local
1179 fuse_uring_next_fuse_req(ent, queue, issue_flags); in fuse_uring_send_in_task()
1192 struct fuse_ring_queue *queue; in fuse_uring_task_to_queue() local
1201 queue = ring->queues[qid]; in fuse_uring_task_to_queue()
1202 WARN_ONCE(!queue, "Missing queue for qid %d\n", qid); in fuse_uring_task_to_queue()
1204 return queue; in fuse_uring_task_to_queue()
1215 /* queue a fuse request and send it if a ring entry is available */
1220 struct fuse_ring_queue *queue; in fuse_uring_queue_fuse_req() local
1225 queue = fuse_uring_task_to_queue(ring); in fuse_uring_queue_fuse_req()
1226 if (!queue) in fuse_uring_queue_fuse_req()
1232 spin_lock(&queue->lock); in fuse_uring_queue_fuse_req()
1234 if (unlikely(queue->stopped)) in fuse_uring_queue_fuse_req()
1238 req->ring_queue = queue; in fuse_uring_queue_fuse_req()
1239 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_fuse_req()
1244 list_add_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_queue_fuse_req()
1245 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1253 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1264 struct fuse_ring_queue *queue; in fuse_uring_queue_bq_req() local
1267 queue = fuse_uring_task_to_queue(ring); in fuse_uring_queue_bq_req()
1268 if (!queue) in fuse_uring_queue_bq_req()
1271 spin_lock(&queue->lock); in fuse_uring_queue_bq_req()
1272 if (unlikely(queue->stopped)) { in fuse_uring_queue_bq_req()
1273 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1278 req->ring_queue = queue; in fuse_uring_queue_bq_req()
1279 list_add_tail(&req->list, &queue->fuse_req_bg_queue); in fuse_uring_queue_bq_req()
1281 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_bq_req()
1287 fuse_uring_flush_bg(queue); in fuse_uring_queue_bq_req()
1292 * in the queue that need to be handled first. Or no further req in fuse_uring_queue_bq_req()
1295 req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req, in fuse_uring_queue_bq_req()
1299 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1303 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1311 struct fuse_ring_queue *queue = req->ring_queue; in fuse_uring_remove_pending_req() local
1313 return fuse_remove_pending_req(req, &queue->lock); in fuse_uring_remove_pending_req()