1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef IOU_TW_H 3 #define IOU_TW_H 4 5 #include <linux/sched.h> 6 #include <linux/percpu-refcount.h> 7 #include <linux/io_uring_types.h> 8 9 #define IO_LOCAL_TW_DEFAULT_MAX 20 10 11 /* 12 * No waiters. It's larger than any valid value of the tw counter 13 * so that tests against ->cq_wait_nr would fail and skip wake_up(). 14 */ 15 #define IO_CQ_WAKE_INIT (-1U) 16 /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */ 17 #define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1) 18 19 /* 20 * Terminate the request if either of these conditions are true: 21 * 22 * 1) It's being executed by the original task, but that task is marked 23 * with PF_EXITING as it's exiting. 24 * 2) PF_KTHREAD is set, in which case the invoker of the task_work is 25 * our fallback task_work. 26 * 3) The ring has been closed and is going away. 27 */ 28 static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx) 29 { 30 return (current->flags & (PF_EXITING | PF_KTHREAD)) || percpu_ref_is_dying(&ctx->refs); 31 } 32 33 void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags); 34 struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries); 35 void tctx_task_work(struct callback_head *cb); 36 int io_run_local_work(struct io_ring_ctx *ctx, int min_events, int max_events); 37 int io_run_task_work_sig(struct io_ring_ctx *ctx); 38 39 __cold void io_fallback_req_func(struct work_struct *work); 40 __cold void io_move_task_work_from_local(struct io_ring_ctx *ctx); 41 int io_run_local_work_locked(struct io_ring_ctx *ctx, int min_events); 42 43 void io_req_local_work_add(struct io_kiocb *req, unsigned flags); 44 void io_req_normal_work_add(struct io_kiocb *req); 45 struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count); 46 47 static inline void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) 48 { 49 if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) 50 io_req_local_work_add(req, flags); 51 else 52 io_req_normal_work_add(req); 53 } 54 55 static inline void io_req_task_work_add(struct io_kiocb *req) 56 { 57 __io_req_task_work_add(req, 0); 58 } 59 60 static inline int io_run_task_work(void) 61 { 62 bool ret = false; 63 64 /* 65 * Always check-and-clear the task_work notification signal. With how 66 * signaling works for task_work, we can find it set with nothing to 67 * run. We need to clear it for that case, like get_signal() does. 68 */ 69 if (test_thread_flag(TIF_NOTIFY_SIGNAL)) 70 clear_notify_signal(); 71 /* 72 * PF_IO_WORKER never returns to userspace, so check here if we have 73 * notify work that needs processing. 74 */ 75 if (current->flags & PF_IO_WORKER) { 76 if (test_thread_flag(TIF_NOTIFY_RESUME)) { 77 __set_current_state(TASK_RUNNING); 78 resume_user_mode_work(NULL); 79 } 80 if (current->io_uring) { 81 unsigned int count = 0; 82 83 __set_current_state(TASK_RUNNING); 84 tctx_task_work_run(current->io_uring, UINT_MAX, &count); 85 if (count) 86 ret = true; 87 } 88 } 89 if (task_work_pending(current)) { 90 __set_current_state(TASK_RUNNING); 91 task_work_run(); 92 ret = true; 93 } 94 95 return ret; 96 } 97 98 static inline bool io_local_work_pending(struct io_ring_ctx *ctx) 99 { 100 return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist); 101 } 102 103 static inline bool io_task_work_pending(struct io_ring_ctx *ctx) 104 { 105 return task_work_pending(current) || io_local_work_pending(ctx); 106 } 107 108 static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw) 109 { 110 lockdep_assert_held(&ctx->uring_lock); 111 } 112 113 static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) 114 { 115 return likely(ctx->submitter_task == current); 116 } 117 118 static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) 119 { 120 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || 121 ctx->submitter_task == current); 122 } 123 124 #endif 125