Lines Matching full:queue
51 static void fuse_uring_flush_bg(struct fuse_ring_queue *queue) in fuse_uring_flush_bg() argument
53 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg()
56 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg()
60 * Allow one bg request per queue, ignoring global fc limits. in fuse_uring_flush_bg()
61 * This prevents a single queue from consuming all resources and in fuse_uring_flush_bg()
62 * eliminates the need for remote queue wake-ups when global in fuse_uring_flush_bg()
63 * limits are met but this queue has no more waiting requests. in fuse_uring_flush_bg()
66 !queue->active_background) && in fuse_uring_flush_bg()
67 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg()
70 req = list_first_entry(&queue->fuse_req_bg_queue, in fuse_uring_flush_bg()
73 queue->active_background++; in fuse_uring_flush_bg()
75 list_move_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_flush_bg()
82 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_req_end() local
83 struct fuse_ring *ring = queue->ring; in fuse_uring_req_end()
86 lockdep_assert_not_held(&queue->lock); in fuse_uring_req_end()
87 spin_lock(&queue->lock); in fuse_uring_req_end()
90 queue->active_background--; in fuse_uring_req_end()
92 fuse_uring_flush_bg(queue); in fuse_uring_req_end()
96 spin_unlock(&queue->lock); in fuse_uring_req_end()
105 /* Abort all list queued request on the given ring queue */
106 static void fuse_uring_abort_end_queue_requests(struct fuse_ring_queue *queue) in fuse_uring_abort_end_queue_requests() argument
111 spin_lock(&queue->lock); in fuse_uring_abort_end_queue_requests()
112 list_for_each_entry(req, &queue->fuse_req_queue, list) in fuse_uring_abort_end_queue_requests()
114 list_splice_init(&queue->fuse_req_queue, &req_list); in fuse_uring_abort_end_queue_requests()
115 spin_unlock(&queue->lock); in fuse_uring_abort_end_queue_requests()
117 /* must not hold queue lock to avoid order issues with fi->lock */ in fuse_uring_abort_end_queue_requests()
124 struct fuse_ring_queue *queue; in fuse_uring_abort_end_requests() local
128 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_abort_end_requests()
129 if (!queue) in fuse_uring_abort_end_requests()
132 queue->stopped = true; in fuse_uring_abort_end_requests()
135 spin_lock(&queue->lock); in fuse_uring_abort_end_requests()
137 fuse_uring_flush_bg(queue); in fuse_uring_abort_end_requests()
139 spin_unlock(&queue->lock); in fuse_uring_abort_end_requests()
140 fuse_uring_abort_end_queue_requests(queue); in fuse_uring_abort_end_requests()
162 struct fuse_ring_queue *queue; in fuse_uring_request_expired() local
169 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_request_expired()
170 if (!queue) in fuse_uring_request_expired()
173 spin_lock(&queue->lock); in fuse_uring_request_expired()
174 if (fuse_request_expired(fc, &queue->fuse_req_queue) || in fuse_uring_request_expired()
175 fuse_request_expired(fc, &queue->fuse_req_bg_queue) || in fuse_uring_request_expired()
176 ent_list_request_expired(fc, &queue->ent_w_req_queue) || in fuse_uring_request_expired()
177 ent_list_request_expired(fc, &queue->ent_in_userspace)) { in fuse_uring_request_expired()
178 spin_unlock(&queue->lock); in fuse_uring_request_expired()
181 spin_unlock(&queue->lock); in fuse_uring_request_expired()
196 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_destruct() local
199 if (!queue) in fuse_uring_destruct()
202 WARN_ON(!list_empty(&queue->ent_avail_queue)); in fuse_uring_destruct()
203 WARN_ON(!list_empty(&queue->ent_w_req_queue)); in fuse_uring_destruct()
204 WARN_ON(!list_empty(&queue->ent_commit_queue)); in fuse_uring_destruct()
205 WARN_ON(!list_empty(&queue->ent_in_userspace)); in fuse_uring_destruct()
207 list_for_each_entry_safe(ent, next, &queue->ent_released, in fuse_uring_destruct()
213 kfree(queue->fpq.processing); in fuse_uring_destruct()
214 kfree(queue); in fuse_uring_destruct()
273 struct fuse_ring_queue *queue; in fuse_uring_create_queue() local
276 queue = kzalloc(sizeof(*queue), GFP_KERNEL_ACCOUNT); in fuse_uring_create_queue()
277 if (!queue) in fuse_uring_create_queue()
281 kfree(queue); in fuse_uring_create_queue()
285 queue->qid = qid; in fuse_uring_create_queue()
286 queue->ring = ring; in fuse_uring_create_queue()
287 spin_lock_init(&queue->lock); in fuse_uring_create_queue()
289 INIT_LIST_HEAD(&queue->ent_avail_queue); in fuse_uring_create_queue()
290 INIT_LIST_HEAD(&queue->ent_commit_queue); in fuse_uring_create_queue()
291 INIT_LIST_HEAD(&queue->ent_w_req_queue); in fuse_uring_create_queue()
292 INIT_LIST_HEAD(&queue->ent_in_userspace); in fuse_uring_create_queue()
293 INIT_LIST_HEAD(&queue->fuse_req_queue); in fuse_uring_create_queue()
294 INIT_LIST_HEAD(&queue->fuse_req_bg_queue); in fuse_uring_create_queue()
295 INIT_LIST_HEAD(&queue->ent_released); in fuse_uring_create_queue()
297 queue->fpq.processing = pq; in fuse_uring_create_queue()
298 fuse_pqueue_init(&queue->fpq); in fuse_uring_create_queue()
303 kfree(queue->fpq.processing); in fuse_uring_create_queue()
304 kfree(queue); in fuse_uring_create_queue()
311 WRITE_ONCE(ring->queues[qid], queue); in fuse_uring_create_queue()
314 return queue; in fuse_uring_create_queue()
332 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_entry_teardown() local
334 spin_lock(&queue->lock); in fuse_uring_entry_teardown()
340 /* remove entry from queue->fpq->processing */ in fuse_uring_entry_teardown()
350 list_move(&ent->list, &queue->ent_released); in fuse_uring_entry_teardown()
352 spin_unlock(&queue->lock); in fuse_uring_entry_teardown()
362 struct fuse_ring_queue *queue, in fuse_uring_stop_list_entries() argument
365 struct fuse_ring *ring = queue->ring; in fuse_uring_stop_list_entries()
370 spin_lock(&queue->lock); in fuse_uring_stop_list_entries()
374 queue->qid, ent->state, exp_state); in fuse_uring_stop_list_entries()
381 spin_unlock(&queue->lock); in fuse_uring_stop_list_entries()
383 /* no queue lock to avoid lock order issues */ in fuse_uring_stop_list_entries()
391 static void fuse_uring_teardown_entries(struct fuse_ring_queue *queue) in fuse_uring_teardown_entries() argument
393 fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue, in fuse_uring_teardown_entries()
395 fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue, in fuse_uring_teardown_entries()
408 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_log_ent_state() local
410 if (!queue) in fuse_uring_log_ent_state()
413 spin_lock(&queue->lock); in fuse_uring_log_ent_state()
415 * Log entries from the intermediate queue, the other queues in fuse_uring_log_ent_state()
418 list_for_each_entry(ent, &queue->ent_w_req_queue, list) { in fuse_uring_log_ent_state()
419 pr_info(" ent-req-queue ring=%p qid=%d ent=%p state=%d\n", in fuse_uring_log_ent_state()
422 list_for_each_entry(ent, &queue->ent_commit_queue, list) { in fuse_uring_log_ent_state()
423 pr_info(" ent-commit-queue ring=%p qid=%d ent=%p state=%d\n", in fuse_uring_log_ent_state()
426 spin_unlock(&queue->lock); in fuse_uring_log_ent_state()
439 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_async_stop_queues() local
441 if (!queue) in fuse_uring_async_stop_queues()
444 fuse_uring_teardown_entries(queue); in fuse_uring_async_stop_queues()
452 * If there are still queue references left in fuse_uring_async_stop_queues()
474 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_stop_queues() local
476 if (!queue) in fuse_uring_stop_queues()
479 fuse_uring_teardown_entries(queue); in fuse_uring_stop_queues()
503 struct fuse_ring_queue *queue; in fuse_uring_cancel() local
510 queue = ent->queue; in fuse_uring_cancel()
511 spin_lock(&queue->lock); in fuse_uring_cancel()
514 list_move_tail(&ent->list, &queue->ent_in_userspace); in fuse_uring_cancel()
518 spin_unlock(&queue->lock); in fuse_uring_cancel()
521 /* no queue lock to avoid lock order issues */ in fuse_uring_cancel()
666 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_copy_to_ring() local
667 struct fuse_ring *ring = queue->ring; in fuse_uring_copy_to_ring()
673 queue->qid, ent, ent->state); in fuse_uring_copy_to_ring()
722 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_next_to_ring() local
730 spin_lock(&queue->lock); in fuse_uring_send_next_to_ring()
734 list_move_tail(&ent->list, &queue->ent_in_userspace); in fuse_uring_send_next_to_ring()
735 spin_unlock(&queue->lock); in fuse_uring_send_next_to_ring()
745 struct fuse_ring_queue *queue) in fuse_uring_ent_avail() argument
748 list_move(&ent->list, &queue->ent_avail_queue); in fuse_uring_ent_avail()
756 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_to_pq() local
757 struct fuse_pqueue *fpq = &queue->fpq; in fuse_uring_add_to_pq()
766 * Assign a fuse queue entry to the given entry
771 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_req_to_ring_ent() local
773 lockdep_assert_held(&queue->lock); in fuse_uring_add_req_to_ring_ent()
777 pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid, in fuse_uring_add_req_to_ring_ent()
784 list_move_tail(&ent->list, &queue->ent_w_req_queue); in fuse_uring_add_req_to_ring_ent()
790 __must_hold(&queue->lock) in fuse_uring_ent_assign_req()
793 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_ent_assign_req() local
794 struct list_head *req_queue = &queue->fuse_req_queue; in fuse_uring_ent_assign_req()
796 lockdep_assert_held(&queue->lock); in fuse_uring_ent_assign_req()
814 struct fuse_ring *ring = ent->queue->ring; in fuse_uring_commit()
840 struct fuse_ring_queue *queue, in fuse_uring_next_fuse_req() argument
847 spin_lock(&queue->lock); in fuse_uring_next_fuse_req()
848 fuse_uring_ent_avail(ent, queue); in fuse_uring_next_fuse_req()
850 spin_unlock(&queue->lock); in fuse_uring_next_fuse_req()
861 struct fuse_ring_queue *queue = ent->queue; in fuse_ring_ent_set_commit() local
863 lockdep_assert_held(&queue->lock); in fuse_ring_ent_set_commit()
869 list_move(&ent->list, &queue->ent_commit_queue); in fuse_ring_ent_set_commit()
882 struct fuse_ring_queue *queue; in fuse_uring_commit_fetch() local
895 queue = ring->queues[qid]; in fuse_uring_commit_fetch()
896 if (!queue) in fuse_uring_commit_fetch()
898 fpq = &queue->fpq; in fuse_uring_commit_fetch()
900 if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped)) in fuse_uring_commit_fetch()
903 spin_lock(&queue->lock); in fuse_uring_commit_fetch()
912 pr_info("qid=%d commit_id %llu not found\n", queue->qid, in fuse_uring_commit_fetch()
914 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
924 queue->qid, commit_id, ent->state); in fuse_uring_commit_fetch()
925 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
933 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
935 /* without the queue lock, as other locks are taken */ in fuse_uring_commit_fetch()
945 fuse_uring_next_fuse_req(ent, queue, issue_flags); in fuse_uring_commit_fetch()
952 struct fuse_ring_queue *queue; in is_ring_ready() local
959 queue = ring->queues[qid]; in is_ring_ready()
960 if (!queue) { in is_ring_ready()
965 spin_lock(&queue->lock); in is_ring_ready()
966 if (list_empty(&queue->ent_avail_queue)) in is_ring_ready()
968 spin_unlock(&queue->lock); in is_ring_ready()
981 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_do_register() local
982 struct fuse_ring *ring = queue->ring; in fuse_uring_do_register()
988 spin_lock(&queue->lock); in fuse_uring_do_register()
990 fuse_uring_ent_avail(ent, queue); in fuse_uring_do_register()
991 spin_unlock(&queue->lock); in fuse_uring_do_register()
994 bool ready = is_ring_ready(ring, queue->qid); in fuse_uring_do_register()
1032 struct fuse_ring_queue *queue) in fuse_uring_create_ring_ent() argument
1034 struct fuse_ring *ring = queue->ring; in fuse_uring_create_ring_ent()
1067 ent->queue = queue; in fuse_uring_create_ring_ent()
1077 * entry as "ready to get fuse requests" on the queue
1084 struct fuse_ring_queue *queue; in fuse_uring_register() local
1101 queue = ring->queues[qid]; in fuse_uring_register()
1102 if (!queue) { in fuse_uring_register()
1103 queue = fuse_uring_create_queue(ring, qid); in fuse_uring_register()
1104 if (!queue) in fuse_uring_register()
1109 * The created queue above does not need to be destructed in in fuse_uring_register()
1113 ent = fuse_uring_create_ring_ent(cmd, queue); in fuse_uring_register()
1196 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send() local
1198 spin_lock(&queue->lock); in fuse_uring_send()
1200 list_move_tail(&ent->list, &queue->ent_in_userspace); in fuse_uring_send()
1202 spin_unlock(&queue->lock); in fuse_uring_send()
1216 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_in_task() local
1222 fuse_uring_next_fuse_req(ent, queue, issue_flags); in fuse_uring_send_in_task()
1235 struct fuse_ring_queue *queue; in fuse_uring_task_to_queue() local
1244 queue = ring->queues[qid]; in fuse_uring_task_to_queue()
1245 WARN_ONCE(!queue, "Missing queue for qid %d\n", qid); in fuse_uring_task_to_queue()
1247 return queue; in fuse_uring_task_to_queue()
1258 /* queue a fuse request and send it if a ring entry is available */
1263 struct fuse_ring_queue *queue; in fuse_uring_queue_fuse_req() local
1268 queue = fuse_uring_task_to_queue(ring); in fuse_uring_queue_fuse_req()
1269 if (!queue) in fuse_uring_queue_fuse_req()
1274 spin_lock(&queue->lock); in fuse_uring_queue_fuse_req()
1276 if (unlikely(queue->stopped)) in fuse_uring_queue_fuse_req()
1280 req->ring_queue = queue; in fuse_uring_queue_fuse_req()
1281 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_fuse_req()
1286 list_add_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_queue_fuse_req()
1287 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1295 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1306 struct fuse_ring_queue *queue; in fuse_uring_queue_bq_req() local
1309 queue = fuse_uring_task_to_queue(ring); in fuse_uring_queue_bq_req()
1310 if (!queue) in fuse_uring_queue_bq_req()
1313 spin_lock(&queue->lock); in fuse_uring_queue_bq_req()
1314 if (unlikely(queue->stopped)) { in fuse_uring_queue_bq_req()
1315 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1320 req->ring_queue = queue; in fuse_uring_queue_bq_req()
1321 list_add_tail(&req->list, &queue->fuse_req_bg_queue); in fuse_uring_queue_bq_req()
1323 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_bq_req()
1329 fuse_uring_flush_bg(queue); in fuse_uring_queue_bq_req()
1334 * in the queue that need to be handled first. Or no further req in fuse_uring_queue_bq_req()
1337 req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req, in fuse_uring_queue_bq_req()
1341 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1345 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1353 struct fuse_ring_queue *queue = req->ring_queue; in fuse_uring_remove_pending_req() local
1355 return fuse_remove_pending_req(req, &queue->lock); in fuse_uring_remove_pending_req()