Lines Matching +full:fiq +full:- +full:device
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2023-2024 DataDirect Networks.
18 "Enable userspace communication through io-uring");
40 pdu->ent = ring_ent; in uring_cmd_set_ring_ent()
48 return pdu->ent; in uring_cmd_to_ring_ent()
53 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg()
54 struct fuse_conn *fc = ring->fc; in fuse_uring_flush_bg()
56 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg()
57 lockdep_assert_held(&fc->bg_lock); in fuse_uring_flush_bg()
62 * eliminates the need for remote queue wake-ups when global in fuse_uring_flush_bg()
65 while ((fc->active_background < fc->max_background || in fuse_uring_flush_bg()
66 !queue->active_background) && in fuse_uring_flush_bg()
67 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg()
70 req = list_first_entry(&queue->fuse_req_bg_queue, in fuse_uring_flush_bg()
72 fc->active_background++; in fuse_uring_flush_bg()
73 queue->active_background++; in fuse_uring_flush_bg()
75 list_move_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_flush_bg()
82 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_req_end()
83 struct fuse_ring *ring = queue->ring; in fuse_uring_req_end()
84 struct fuse_conn *fc = ring->fc; in fuse_uring_req_end()
86 lockdep_assert_not_held(&queue->lock); in fuse_uring_req_end()
87 spin_lock(&queue->lock); in fuse_uring_req_end()
88 ent->fuse_req = NULL; in fuse_uring_req_end()
89 if (test_bit(FR_BACKGROUND, &req->flags)) { in fuse_uring_req_end()
90 queue->active_background--; in fuse_uring_req_end()
91 spin_lock(&fc->bg_lock); in fuse_uring_req_end()
93 spin_unlock(&fc->bg_lock); in fuse_uring_req_end()
96 spin_unlock(&queue->lock); in fuse_uring_req_end()
99 req->out.h.error = error; in fuse_uring_req_end()
101 clear_bit(FR_SENT, &req->flags); in fuse_uring_req_end()
111 spin_lock(&queue->lock); in fuse_uring_abort_end_queue_requests()
112 list_for_each_entry(req, &queue->fuse_req_queue, list) in fuse_uring_abort_end_queue_requests()
113 clear_bit(FR_PENDING, &req->flags); in fuse_uring_abort_end_queue_requests()
114 list_splice_init(&queue->fuse_req_queue, &req_list); in fuse_uring_abort_end_queue_requests()
115 spin_unlock(&queue->lock); in fuse_uring_abort_end_queue_requests()
117 /* must not hold queue lock to avoid order issues with fi->lock */ in fuse_uring_abort_end_queue_requests()
125 struct fuse_conn *fc = ring->fc; in fuse_uring_abort_end_requests()
127 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_abort_end_requests()
128 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_abort_end_requests()
132 queue->stopped = true; in fuse_uring_abort_end_requests()
134 WARN_ON_ONCE(ring->fc->max_background != UINT_MAX); in fuse_uring_abort_end_requests()
135 spin_lock(&queue->lock); in fuse_uring_abort_end_requests()
136 spin_lock(&fc->bg_lock); in fuse_uring_abort_end_requests()
138 spin_unlock(&fc->bg_lock); in fuse_uring_abort_end_requests()
139 spin_unlock(&queue->lock); in fuse_uring_abort_end_requests()
153 req = ent->fuse_req; in ent_list_request_expired()
155 return time_is_before_jiffies(req->create_time + in ent_list_request_expired()
156 fc->timeout.req_timeout); in ent_list_request_expired()
161 struct fuse_ring *ring = fc->ring; in fuse_uring_request_expired()
168 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_request_expired()
169 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_request_expired()
173 spin_lock(&queue->lock); in fuse_uring_request_expired()
174 if (fuse_request_expired(fc, &queue->fuse_req_queue) || in fuse_uring_request_expired()
175 fuse_request_expired(fc, &queue->fuse_req_bg_queue) || in fuse_uring_request_expired()
176 ent_list_request_expired(fc, &queue->ent_w_req_queue) || in fuse_uring_request_expired()
177 ent_list_request_expired(fc, &queue->ent_in_userspace)) { in fuse_uring_request_expired()
178 spin_unlock(&queue->lock); in fuse_uring_request_expired()
181 spin_unlock(&queue->lock); in fuse_uring_request_expired()
189 struct fuse_ring *ring = fc->ring; in fuse_uring_destruct()
195 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_destruct()
196 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_destruct()
202 WARN_ON(!list_empty(&queue->ent_avail_queue)); in fuse_uring_destruct()
203 WARN_ON(!list_empty(&queue->ent_w_req_queue)); in fuse_uring_destruct()
204 WARN_ON(!list_empty(&queue->ent_commit_queue)); in fuse_uring_destruct()
205 WARN_ON(!list_empty(&queue->ent_in_userspace)); in fuse_uring_destruct()
207 list_for_each_entry_safe(ent, next, &queue->ent_released, in fuse_uring_destruct()
209 list_del_init(&ent->list); in fuse_uring_destruct()
213 kfree(queue->fpq.processing); in fuse_uring_destruct()
215 ring->queues[qid] = NULL; in fuse_uring_destruct()
218 kfree(ring->queues); in fuse_uring_destruct()
220 fc->ring = NULL; in fuse_uring_destruct()
233 ring = kzalloc(sizeof(*fc->ring), GFP_KERNEL_ACCOUNT); in fuse_uring_create()
237 ring->queues = kcalloc(nr_queues, sizeof(struct fuse_ring_queue *), in fuse_uring_create()
239 if (!ring->queues) in fuse_uring_create()
242 max_payload_size = max(FUSE_MIN_READ_BUFFER, fc->max_write); in fuse_uring_create()
243 max_payload_size = max(max_payload_size, fc->max_pages * PAGE_SIZE); in fuse_uring_create()
245 spin_lock(&fc->lock); in fuse_uring_create()
246 if (fc->ring) { in fuse_uring_create()
248 spin_unlock(&fc->lock); in fuse_uring_create()
249 res = fc->ring; in fuse_uring_create()
253 init_waitqueue_head(&ring->stop_waitq); in fuse_uring_create()
255 ring->nr_queues = nr_queues; in fuse_uring_create()
256 ring->fc = fc; in fuse_uring_create()
257 ring->max_payload_sz = max_payload_size; in fuse_uring_create()
258 smp_store_release(&fc->ring, ring); in fuse_uring_create()
260 spin_unlock(&fc->lock); in fuse_uring_create()
264 kfree(ring->queues); in fuse_uring_create()
272 struct fuse_conn *fc = ring->fc; in fuse_uring_create_queue()
285 queue->qid = qid; in fuse_uring_create_queue()
286 queue->ring = ring; in fuse_uring_create_queue()
287 spin_lock_init(&queue->lock); in fuse_uring_create_queue()
289 INIT_LIST_HEAD(&queue->ent_avail_queue); in fuse_uring_create_queue()
290 INIT_LIST_HEAD(&queue->ent_commit_queue); in fuse_uring_create_queue()
291 INIT_LIST_HEAD(&queue->ent_w_req_queue); in fuse_uring_create_queue()
292 INIT_LIST_HEAD(&queue->ent_in_userspace); in fuse_uring_create_queue()
293 INIT_LIST_HEAD(&queue->fuse_req_queue); in fuse_uring_create_queue()
294 INIT_LIST_HEAD(&queue->fuse_req_bg_queue); in fuse_uring_create_queue()
295 INIT_LIST_HEAD(&queue->ent_released); in fuse_uring_create_queue()
297 queue->fpq.processing = pq; in fuse_uring_create_queue()
298 fuse_pqueue_init(&queue->fpq); in fuse_uring_create_queue()
300 spin_lock(&fc->lock); in fuse_uring_create_queue()
301 if (ring->queues[qid]) { in fuse_uring_create_queue()
302 spin_unlock(&fc->lock); in fuse_uring_create_queue()
303 kfree(queue->fpq.processing); in fuse_uring_create_queue()
305 return ring->queues[qid]; in fuse_uring_create_queue()
311 WRITE_ONCE(ring->queues[qid], queue); in fuse_uring_create_queue()
312 spin_unlock(&fc->lock); in fuse_uring_create_queue()
319 clear_bit(FR_SENT, &req->flags); in fuse_uring_stop_fuse_req_end()
320 req->out.h.error = -ECONNABORTED; in fuse_uring_stop_fuse_req_end()
332 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_entry_teardown()
334 spin_lock(&queue->lock); in fuse_uring_entry_teardown()
335 cmd = ent->cmd; in fuse_uring_entry_teardown()
336 ent->cmd = NULL; in fuse_uring_entry_teardown()
337 req = ent->fuse_req; in fuse_uring_entry_teardown()
338 ent->fuse_req = NULL; in fuse_uring_entry_teardown()
340 /* remove entry from queue->fpq->processing */ in fuse_uring_entry_teardown()
341 list_del_init(&req->list); in fuse_uring_entry_teardown()
346 * pointer access of entries through IO_URING_F_CANCEL - there is a risk in fuse_uring_entry_teardown()
350 list_move(&ent->list, &queue->ent_released); in fuse_uring_entry_teardown()
351 ent->state = FRRS_RELEASED; in fuse_uring_entry_teardown()
352 spin_unlock(&queue->lock); in fuse_uring_entry_teardown()
355 io_uring_cmd_done(cmd, -ENOTCONN, IO_URING_F_UNLOCKED); in fuse_uring_entry_teardown()
365 struct fuse_ring *ring = queue->ring; in fuse_uring_stop_list_entries()
370 spin_lock(&queue->lock); in fuse_uring_stop_list_entries()
372 if (ent->state != exp_state) { in fuse_uring_stop_list_entries()
374 queue->qid, ent->state, exp_state); in fuse_uring_stop_list_entries()
378 ent->state = FRRS_TEARDOWN; in fuse_uring_stop_list_entries()
379 list_move(&ent->list, &to_teardown); in fuse_uring_stop_list_entries()
381 spin_unlock(&queue->lock); in fuse_uring_stop_list_entries()
386 queue_refs = atomic_dec_return(&ring->queue_refs); in fuse_uring_stop_list_entries()
393 fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue, in fuse_uring_teardown_entries()
395 fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue, in fuse_uring_teardown_entries()
407 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_log_ent_state()
408 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_log_ent_state()
413 spin_lock(&queue->lock); in fuse_uring_log_ent_state()
418 list_for_each_entry(ent, &queue->ent_w_req_queue, list) { in fuse_uring_log_ent_state()
419 pr_info(" ent-req-queue ring=%p qid=%d ent=%p state=%d\n", in fuse_uring_log_ent_state()
420 ring, qid, ent, ent->state); in fuse_uring_log_ent_state()
422 list_for_each_entry(ent, &queue->ent_commit_queue, list) { in fuse_uring_log_ent_state()
423 pr_info(" ent-commit-queue ring=%p qid=%d ent=%p state=%d\n", in fuse_uring_log_ent_state()
424 ring, qid, ent, ent->state); in fuse_uring_log_ent_state()
426 spin_unlock(&queue->lock); in fuse_uring_log_ent_state()
428 ring->stop_debug_log = 1; in fuse_uring_log_ent_state()
438 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_async_stop_queues()
439 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_async_stop_queues()
450 * or on the way to userspace - we could handle that with conditions in in fuse_uring_async_stop_queues()
454 if (atomic_read(&ring->queue_refs) > 0) { in fuse_uring_async_stop_queues()
456 ring->teardown_time + FUSE_URING_TEARDOWN_TIMEOUT)) in fuse_uring_async_stop_queues()
459 schedule_delayed_work(&ring->async_teardown_work, in fuse_uring_async_stop_queues()
462 wake_up_all(&ring->stop_waitq); in fuse_uring_async_stop_queues()
473 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_stop_queues()
474 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_stop_queues()
482 if (atomic_read(&ring->queue_refs) > 0) { in fuse_uring_stop_queues()
483 ring->teardown_time = jiffies; in fuse_uring_stop_queues()
484 INIT_DELAYED_WORK(&ring->async_teardown_work, in fuse_uring_stop_queues()
486 schedule_delayed_work(&ring->async_teardown_work, in fuse_uring_stop_queues()
489 wake_up_all(&ring->stop_waitq); in fuse_uring_stop_queues()
507 * direct access on ent - it must not be destructed as long as in fuse_uring_cancel()
510 queue = ent->queue; in fuse_uring_cancel()
511 spin_lock(&queue->lock); in fuse_uring_cancel()
512 if (ent->state == FRRS_AVAILABLE) { in fuse_uring_cancel()
513 ent->state = FRRS_USERSPACE; in fuse_uring_cancel()
514 list_move_tail(&ent->list, &queue->ent_in_userspace); in fuse_uring_cancel()
516 ent->cmd = NULL; in fuse_uring_cancel()
518 spin_unlock(&queue->lock); in fuse_uring_cancel()
522 io_uring_cmd_done(cmd, -ENOTCONN, issue_flags); in fuse_uring_cancel()
542 err = -EINVAL; in fuse_uring_out_header_has_err()
543 if (oh->unique == 0) { in fuse_uring_out_header_has_err()
544 /* Not supported through io-uring yet */ in fuse_uring_out_header_has_err()
545 pr_warn_once("notify through fuse-io-uring not supported\n"); in fuse_uring_out_header_has_err()
549 if (oh->error <= -ERESTARTSYS || oh->error > 0) in fuse_uring_out_header_has_err()
552 if (oh->error) { in fuse_uring_out_header_has_err()
553 err = oh->error; in fuse_uring_out_header_has_err()
557 err = -ENOENT; in fuse_uring_out_header_has_err()
558 if ((oh->unique & ~FUSE_INT_REQ_BIT) != req->in.h.unique) { in fuse_uring_out_header_has_err()
560 req->in.h.unique, in fuse_uring_out_header_has_err()
561 oh->unique & ~FUSE_INT_REQ_BIT); in fuse_uring_out_header_has_err()
567 * XXX: Not supported through fuse-io-uring yet, it should not even in fuse_uring_out_header_has_err()
568 * find the request - should not happen. in fuse_uring_out_header_has_err()
570 WARN_ON_ONCE(oh->unique & FUSE_INT_REQ_BIT); in fuse_uring_out_header_has_err()
582 struct fuse_args *args = req->args; in fuse_uring_copy_from_ring()
587 err = copy_from_user(&ring_in_out, &ent->headers->ring_ent_in_out, in fuse_uring_copy_from_ring()
590 return -EFAULT; in fuse_uring_copy_from_ring()
592 err = import_ubuf(ITER_SOURCE, ent->payload, ring->max_payload_sz, in fuse_uring_copy_from_ring()
611 struct fuse_args *args = req->args; in fuse_uring_args_to_ring()
612 struct fuse_in_arg *in_args = args->in_args; in fuse_uring_args_to_ring()
613 int num_args = args->in_numargs; in fuse_uring_args_to_ring()
618 .commit_id = req->in.h.unique, in fuse_uring_args_to_ring()
621 err = import_ubuf(ITER_DEST, ent->payload, ring->max_payload_sz, &iter); in fuse_uring_args_to_ring()
636 if (args->in_args[0].size > 0) { in fuse_uring_args_to_ring()
637 err = copy_to_user(&ent->headers->op_in, in_args->value, in fuse_uring_args_to_ring()
638 in_args->size); in fuse_uring_args_to_ring()
642 return -EFAULT; in fuse_uring_args_to_ring()
646 num_args--; in fuse_uring_args_to_ring()
650 err = fuse_copy_args(&cs, num_args, args->in_pages, in fuse_uring_args_to_ring()
658 err = copy_to_user(&ent->headers->ring_ent_in_out, &ent_in_out, in fuse_uring_args_to_ring()
660 return err ? -EFAULT : 0; in fuse_uring_args_to_ring()
666 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_copy_to_ring()
667 struct fuse_ring *ring = queue->ring; in fuse_uring_copy_to_ring()
670 err = -EIO; in fuse_uring_copy_to_ring()
671 if (WARN_ON(ent->state != FRRS_FUSE_REQ)) { in fuse_uring_copy_to_ring()
672 pr_err("qid=%d ring-req=%p invalid state %d on send\n", in fuse_uring_copy_to_ring()
673 queue->qid, ent, ent->state); in fuse_uring_copy_to_ring()
677 err = -EINVAL; in fuse_uring_copy_to_ring()
678 if (WARN_ON(req->in.h.unique == 0)) in fuse_uring_copy_to_ring()
689 err = copy_to_user(&ent->headers->in_out, &req->in.h, in fuse_uring_copy_to_ring()
690 sizeof(req->in.h)); in fuse_uring_copy_to_ring()
692 err = -EFAULT; in fuse_uring_copy_to_ring()
706 set_bit(FR_SENT, &req->flags); in fuse_uring_prepare_send()
722 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_next_to_ring()
730 spin_lock(&queue->lock); in fuse_uring_send_next_to_ring()
731 cmd = ent->cmd; in fuse_uring_send_next_to_ring()
732 ent->cmd = NULL; in fuse_uring_send_next_to_ring()
733 ent->state = FRRS_USERSPACE; in fuse_uring_send_next_to_ring()
734 list_move_tail(&ent->list, &queue->ent_in_userspace); in fuse_uring_send_next_to_ring()
735 spin_unlock(&queue->lock); in fuse_uring_send_next_to_ring()
747 WARN_ON_ONCE(!ent->cmd); in fuse_uring_ent_avail()
748 list_move(&ent->list, &queue->ent_avail_queue); in fuse_uring_ent_avail()
749 ent->state = FRRS_AVAILABLE; in fuse_uring_ent_avail()
756 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_to_pq()
757 struct fuse_pqueue *fpq = &queue->fpq; in fuse_uring_add_to_pq()
760 req->ring_entry = ent; in fuse_uring_add_to_pq()
761 hash = fuse_req_hash(req->in.h.unique); in fuse_uring_add_to_pq()
762 list_move_tail(&req->list, &fpq->processing[hash]); in fuse_uring_add_to_pq()
771 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_req_to_ring_ent()
773 lockdep_assert_held(&queue->lock); in fuse_uring_add_req_to_ring_ent()
775 if (WARN_ON_ONCE(ent->state != FRRS_AVAILABLE && in fuse_uring_add_req_to_ring_ent()
776 ent->state != FRRS_COMMIT)) { in fuse_uring_add_req_to_ring_ent()
777 pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid, in fuse_uring_add_req_to_ring_ent()
778 ent->state); in fuse_uring_add_req_to_ring_ent()
781 clear_bit(FR_PENDING, &req->flags); in fuse_uring_add_req_to_ring_ent()
782 ent->fuse_req = req; in fuse_uring_add_req_to_ring_ent()
783 ent->state = FRRS_FUSE_REQ; in fuse_uring_add_req_to_ring_ent()
784 list_move_tail(&ent->list, &queue->ent_w_req_queue); in fuse_uring_add_req_to_ring_ent()
790 __must_hold(&queue->lock) in fuse_uring_ent_assign_req()
793 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_ent_assign_req()
794 struct list_head *req_queue = &queue->fuse_req_queue; in fuse_uring_ent_assign_req()
796 lockdep_assert_held(&queue->lock); in fuse_uring_ent_assign_req()
814 struct fuse_ring *ring = ent->queue->ring; in fuse_uring_commit()
815 struct fuse_conn *fc = ring->fc; in fuse_uring_commit()
818 err = copy_from_user(&req->out.h, &ent->headers->in_out, in fuse_uring_commit()
819 sizeof(req->out.h)); in fuse_uring_commit()
821 req->out.h.error = -EFAULT; in fuse_uring_commit()
825 err = fuse_uring_out_header_has_err(&req->out.h, req, fc); in fuse_uring_commit()
827 /* req->out.h.error already set */ in fuse_uring_commit()
847 spin_lock(&queue->lock); in fuse_uring_next_fuse_req()
850 spin_unlock(&queue->lock); in fuse_uring_next_fuse_req()
861 struct fuse_ring_queue *queue = ent->queue; in fuse_ring_ent_set_commit()
863 lockdep_assert_held(&queue->lock); in fuse_ring_ent_set_commit()
865 if (WARN_ON_ONCE(ent->state != FRRS_USERSPACE)) in fuse_ring_ent_set_commit()
866 return -EIO; in fuse_ring_ent_set_commit()
868 ent->state = FRRS_COMMIT; in fuse_ring_ent_set_commit()
869 list_move(&ent->list, &queue->ent_commit_queue); in fuse_ring_ent_set_commit()
878 const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); in fuse_uring_commit_fetch()
881 struct fuse_ring *ring = fc->ring; in fuse_uring_commit_fetch()
883 uint64_t commit_id = READ_ONCE(cmd_req->commit_id); in fuse_uring_commit_fetch()
884 unsigned int qid = READ_ONCE(cmd_req->qid); in fuse_uring_commit_fetch()
888 err = -ENOTCONN; in fuse_uring_commit_fetch()
892 if (qid >= ring->nr_queues) in fuse_uring_commit_fetch()
893 return -EINVAL; in fuse_uring_commit_fetch()
895 queue = ring->queues[qid]; in fuse_uring_commit_fetch()
898 fpq = &queue->fpq; in fuse_uring_commit_fetch()
900 if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped)) in fuse_uring_commit_fetch()
903 spin_lock(&queue->lock); in fuse_uring_commit_fetch()
910 err = -ENOENT; in fuse_uring_commit_fetch()
912 pr_info("qid=%d commit_id %llu not found\n", queue->qid, in fuse_uring_commit_fetch()
914 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
917 list_del_init(&req->list); in fuse_uring_commit_fetch()
918 ent = req->ring_entry; in fuse_uring_commit_fetch()
919 req->ring_entry = NULL; in fuse_uring_commit_fetch()
924 queue->qid, commit_id, ent->state); in fuse_uring_commit_fetch()
925 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
926 req->out.h.error = err; in fuse_uring_commit_fetch()
927 clear_bit(FR_SENT, &req->flags); in fuse_uring_commit_fetch()
932 ent->cmd = cmd; in fuse_uring_commit_fetch()
933 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
941 * fuse requests would otherwise not get processed - committing in fuse_uring_commit_fetch()
955 for (qid = 0; qid < ring->nr_queues && ready; qid++) { in is_ring_ready()
959 queue = ring->queues[qid]; in is_ring_ready()
965 spin_lock(&queue->lock); in is_ring_ready()
966 if (list_empty(&queue->ent_avail_queue)) in is_ring_ready()
968 spin_unlock(&queue->lock); in is_ring_ready()
981 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_do_register()
982 struct fuse_ring *ring = queue->ring; in fuse_uring_do_register()
983 struct fuse_conn *fc = ring->fc; in fuse_uring_do_register()
984 struct fuse_iqueue *fiq = &fc->iq; in fuse_uring_do_register() local
988 spin_lock(&queue->lock); in fuse_uring_do_register()
989 ent->cmd = cmd; in fuse_uring_do_register()
991 spin_unlock(&queue->lock); in fuse_uring_do_register()
993 if (!ring->ready) { in fuse_uring_do_register()
994 bool ready = is_ring_ready(ring, queue->qid); in fuse_uring_do_register()
997 WRITE_ONCE(fiq->ops, &fuse_io_uring_ops); in fuse_uring_do_register()
998 WRITE_ONCE(ring->ready, true); in fuse_uring_do_register()
999 wake_up_all(&fc->blocked_waitq); in fuse_uring_do_register()
1005 * sqe->addr is a ptr to an iovec array, iov[0] has the headers, iov[1]
1011 struct iovec __user *uiov = u64_to_user_ptr(READ_ONCE(sqe->addr)); in fuse_uring_get_iovec_from_sqe()
1015 if (sqe->len != FUSE_URING_IOV_SEGS) in fuse_uring_get_iovec_from_sqe()
1016 return -EINVAL; in fuse_uring_get_iovec_from_sqe()
1034 struct fuse_ring *ring = queue->ring; in fuse_uring_create_ring_ent()
1040 err = fuse_uring_get_iovec_from_sqe(cmd->sqe, iov); in fuse_uring_create_ring_ent()
1047 err = -EINVAL; in fuse_uring_create_ring_ent()
1054 if (payload_size < ring->max_payload_sz) { in fuse_uring_create_ring_ent()
1060 err = -ENOMEM; in fuse_uring_create_ring_ent()
1065 INIT_LIST_HEAD(&ent->list); in fuse_uring_create_ring_ent()
1067 ent->queue = queue; in fuse_uring_create_ring_ent()
1068 ent->headers = iov[0].iov_base; in fuse_uring_create_ring_ent()
1069 ent->payload = iov[1].iov_base; in fuse_uring_create_ring_ent()
1071 atomic_inc(&ring->queue_refs); in fuse_uring_create_ring_ent()
1082 const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); in fuse_uring_register()
1083 struct fuse_ring *ring = smp_load_acquire(&fc->ring); in fuse_uring_register()
1087 unsigned int qid = READ_ONCE(cmd_req->qid); in fuse_uring_register()
1089 err = -ENOMEM; in fuse_uring_register()
1096 if (qid >= ring->nr_queues) { in fuse_uring_register()
1098 return -EINVAL; in fuse_uring_register()
1101 queue = ring->queues[qid]; in fuse_uring_register()
1130 u32 cmd_op = cmd->cmd_op; in fuse_uring_cmd()
1140 return -EINVAL; in fuse_uring_cmd()
1142 fud = fuse_get_dev(cmd->file); in fuse_uring_cmd()
1144 pr_info_ratelimited("No fuse device found\n"); in fuse_uring_cmd()
1147 fc = fud->fc; in fuse_uring_cmd()
1149 /* Once a connection has io-uring enabled on it, it can't be disabled */ in fuse_uring_cmd()
1150 if (!enable_uring && !fc->io_uring) { in fuse_uring_cmd()
1151 pr_info_ratelimited("fuse-io-uring is disabled\n"); in fuse_uring_cmd()
1152 return -EOPNOTSUPP; in fuse_uring_cmd()
1155 if (fc->aborted) in fuse_uring_cmd()
1156 return -ECONNABORTED; in fuse_uring_cmd()
1157 if (!fc->connected) in fuse_uring_cmd()
1158 return -ENOTCONN; in fuse_uring_cmd()
1164 if (!fc->initialized) in fuse_uring_cmd()
1165 return -EAGAIN; in fuse_uring_cmd()
1173 fc->io_uring = 0; in fuse_uring_cmd()
1174 wake_up_all(&fc->blocked_waitq); in fuse_uring_cmd()
1187 return -EINVAL; in fuse_uring_cmd()
1190 return -EIOCBQUEUED; in fuse_uring_cmd()
1196 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send()
1198 spin_lock(&queue->lock); in fuse_uring_send()
1199 ent->state = FRRS_USERSPACE; in fuse_uring_send()
1200 list_move_tail(&ent->list, &queue->ent_in_userspace); in fuse_uring_send()
1201 ent->cmd = NULL; in fuse_uring_send()
1202 spin_unlock(&queue->lock); in fuse_uring_send()
1208 * This prepares and sends the ring request in fuse-uring task context.
1209 * User buffers are not mapped yet - the application does not have permission
1210 * to write to it - this has to be executed in ring task context.
1216 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_in_task()
1220 err = fuse_uring_prepare_send(ent, ent->fuse_req); in fuse_uring_send_in_task()
1226 err = -ECANCELED; in fuse_uring_send_in_task()
1239 if (WARN_ONCE(qid >= ring->nr_queues, in fuse_uring_task_to_queue()
1241 ring->nr_queues)) in fuse_uring_task_to_queue()
1244 queue = ring->queues[qid]; in fuse_uring_task_to_queue()
1252 struct io_uring_cmd *cmd = ent->cmd; in fuse_uring_dispatch_ent()
1259 void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req) in fuse_uring_queue_fuse_req() argument
1261 struct fuse_conn *fc = req->fm->fc; in fuse_uring_queue_fuse_req()
1262 struct fuse_ring *ring = fc->ring; in fuse_uring_queue_fuse_req()
1267 err = -EINVAL; in fuse_uring_queue_fuse_req()
1272 fuse_request_assign_unique(fiq, req); in fuse_uring_queue_fuse_req()
1274 spin_lock(&queue->lock); in fuse_uring_queue_fuse_req()
1275 err = -ENOTCONN; in fuse_uring_queue_fuse_req()
1276 if (unlikely(queue->stopped)) in fuse_uring_queue_fuse_req()
1279 set_bit(FR_URING, &req->flags); in fuse_uring_queue_fuse_req()
1280 req->ring_queue = queue; in fuse_uring_queue_fuse_req()
1281 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_fuse_req()
1286 list_add_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_queue_fuse_req()
1287 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1295 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1297 req->out.h.error = err; in fuse_uring_queue_fuse_req()
1298 clear_bit(FR_PENDING, &req->flags); in fuse_uring_queue_fuse_req()
1304 struct fuse_conn *fc = req->fm->fc; in fuse_uring_queue_bq_req()
1305 struct fuse_ring *ring = fc->ring; in fuse_uring_queue_bq_req()
1313 spin_lock(&queue->lock); in fuse_uring_queue_bq_req()
1314 if (unlikely(queue->stopped)) { in fuse_uring_queue_bq_req()
1315 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1319 set_bit(FR_URING, &req->flags); in fuse_uring_queue_bq_req()
1320 req->ring_queue = queue; in fuse_uring_queue_bq_req()
1321 list_add_tail(&req->list, &queue->fuse_req_bg_queue); in fuse_uring_queue_bq_req()
1323 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_bq_req()
1325 spin_lock(&fc->bg_lock); in fuse_uring_queue_bq_req()
1326 fc->num_background++; in fuse_uring_queue_bq_req()
1327 if (fc->num_background == fc->max_background) in fuse_uring_queue_bq_req()
1328 fc->blocked = 1; in fuse_uring_queue_bq_req()
1330 spin_unlock(&fc->bg_lock); in fuse_uring_queue_bq_req()
1337 req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req, in fuse_uring_queue_bq_req()
1341 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1345 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1353 struct fuse_ring_queue *queue = req->ring_queue; in fuse_uring_remove_pending_req()
1355 return fuse_remove_pending_req(req, &queue->lock); in fuse_uring_remove_pending_req()
1359 /* should be send over io-uring as enhancement */
1363 * could be send over io-uring, but interrupts should be rare,