Lines Matching refs:sqd
33 void io_sq_thread_unpark(struct io_sq_data *sqd) in io_sq_thread_unpark() argument
34 __releases(&sqd->lock) in io_sq_thread_unpark()
36 WARN_ON_ONCE(sqpoll_task_locked(sqd) == current); in io_sq_thread_unpark()
42 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
43 if (atomic_dec_return(&sqd->park_pending)) in io_sq_thread_unpark()
44 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
45 mutex_unlock(&sqd->lock); in io_sq_thread_unpark()
46 wake_up(&sqd->wait); in io_sq_thread_unpark()
49 void io_sq_thread_park(struct io_sq_data *sqd) in io_sq_thread_park() argument
50 __acquires(&sqd->lock) in io_sq_thread_park()
54 atomic_inc(&sqd->park_pending); in io_sq_thread_park()
55 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_park()
56 mutex_lock(&sqd->lock); in io_sq_thread_park()
58 tsk = sqpoll_task_locked(sqd); in io_sq_thread_park()
65 void io_sq_thread_stop(struct io_sq_data *sqd) in io_sq_thread_stop() argument
69 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); in io_sq_thread_stop()
71 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sq_thread_stop()
72 mutex_lock(&sqd->lock); in io_sq_thread_stop()
73 tsk = sqpoll_task_locked(sqd); in io_sq_thread_stop()
78 mutex_unlock(&sqd->lock); in io_sq_thread_stop()
79 wait_for_completion(&sqd->exited); in io_sq_thread_stop()
82 void io_put_sq_data(struct io_sq_data *sqd) in io_put_sq_data() argument
84 if (refcount_dec_and_test(&sqd->refs)) { in io_put_sq_data()
85 WARN_ON_ONCE(atomic_read(&sqd->park_pending)); in io_put_sq_data()
87 io_sq_thread_stop(sqd); in io_put_sq_data()
88 kfree(sqd); in io_put_sq_data()
92 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) in io_sqd_update_thread_idle() argument
97 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sqd_update_thread_idle()
99 sqd->sq_thread_idle = sq_thread_idle; in io_sqd_update_thread_idle()
104 struct io_sq_data *sqd = ctx->sq_data; in io_sq_thread_finish() local
106 if (sqd) { in io_sq_thread_finish()
107 io_sq_thread_park(sqd); in io_sq_thread_finish()
109 io_sqd_update_thread_idle(sqd); in io_sq_thread_finish()
110 io_sq_thread_unpark(sqd); in io_sq_thread_finish()
112 io_put_sq_data(sqd); in io_sq_thread_finish()
120 struct io_sq_data *sqd; in io_attach_sq_data() local
129 sqd = ctx_attach->sq_data; in io_attach_sq_data()
130 if (!sqd) in io_attach_sq_data()
132 if (sqd->task_tgid != current->tgid) in io_attach_sq_data()
135 refcount_inc(&sqd->refs); in io_attach_sq_data()
136 return sqd; in io_attach_sq_data()
142 struct io_sq_data *sqd; in io_get_sq_data() local
146 sqd = io_attach_sq_data(p); in io_get_sq_data()
147 if (!IS_ERR(sqd)) { in io_get_sq_data()
149 return sqd; in io_get_sq_data()
152 if (PTR_ERR(sqd) != -EPERM) in io_get_sq_data()
153 return sqd; in io_get_sq_data()
156 sqd = kzalloc_obj(*sqd); in io_get_sq_data()
157 if (!sqd) in io_get_sq_data()
160 atomic_set(&sqd->park_pending, 0); in io_get_sq_data()
161 refcount_set(&sqd->refs, 1); in io_get_sq_data()
162 INIT_LIST_HEAD(&sqd->ctx_list); in io_get_sq_data()
163 mutex_init(&sqd->lock); in io_get_sq_data()
164 init_waitqueue_head(&sqd->wait); in io_get_sq_data()
165 init_completion(&sqd->exited); in io_get_sq_data()
166 return sqd; in io_get_sq_data()
169 static inline bool io_sqd_events_pending(struct io_sq_data *sqd) in io_sqd_events_pending() argument
171 return READ_ONCE(sqd->state); in io_sqd_events_pending()
188 static void io_sq_update_worktime(struct io_sq_data *sqd, struct io_sq_time *ist) in io_sq_update_worktime() argument
193 sqd->work_time += io_sq_cpu_usec(current) - ist->usec; in io_sq_update_worktime()
204 static int __io_sq_thread(struct io_ring_ctx *ctx, struct io_sq_data *sqd, in __io_sq_thread() argument
245 static bool io_sqd_handle_event(struct io_sq_data *sqd) in io_sqd_handle_event() argument
250 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || in io_sqd_handle_event()
252 mutex_unlock(&sqd->lock); in io_sqd_handle_event()
255 wait_event(sqd->wait, !atomic_read(&sqd->park_pending)); in io_sqd_handle_event()
256 mutex_lock(&sqd->lock); in io_sqd_handle_event()
257 sqd->sq_cpu = raw_smp_processor_id(); in io_sqd_handle_event()
259 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sqd_handle_event()
296 struct io_sq_data *sqd = data; in io_sq_thread() local
304 mutex_lock(&sqd->lock); in io_sq_thread()
305 rcu_assign_pointer(sqd->thread, NULL); in io_sq_thread()
307 mutex_unlock(&sqd->lock); in io_sq_thread()
311 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); in io_sq_thread()
315 sqd->task_pid = current->pid; in io_sq_thread()
317 if (sqd->sq_cpu != -1) { in io_sq_thread()
318 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); in io_sq_thread()
321 sqd->sq_cpu = raw_smp_processor_id(); in io_sq_thread()
332 mutex_lock(&sqd->lock); in io_sq_thread()
337 if (io_sqd_events_pending(sqd) || signal_pending(current)) { in io_sq_thread()
338 if (io_sqd_handle_event(sqd)) in io_sq_thread()
340 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
343 cap_entries = !list_is_singular(&sqd->ctx_list); in io_sq_thread()
344 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
345 int ret = __io_sq_thread(ctx, sqd, cap_entries, &ist); in io_sq_thread()
353 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
360 io_sq_update_worktime(sqd, &ist); in io_sq_thread()
364 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
366 mutex_unlock(&sqd->lock); in io_sq_thread()
368 mutex_lock(&sqd->lock); in io_sq_thread()
369 sqd->sq_cpu = raw_smp_processor_id(); in io_sq_thread()
374 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); in io_sq_thread()
375 if (!io_sqd_events_pending(sqd) && !io_sq_tw_pending(retry_list)) { in io_sq_thread()
378 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
400 mutex_unlock(&sqd->lock); in io_sq_thread()
402 mutex_lock(&sqd->lock); in io_sq_thread()
403 sqd->sq_cpu = raw_smp_processor_id(); in io_sq_thread()
405 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
410 finish_wait(&sqd->wait, &wait); in io_sq_thread()
411 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
417 io_uring_cancel_generic(true, sqd); in io_sq_thread()
418 rcu_assign_pointer(sqd->thread, NULL); in io_sq_thread()
420 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
423 mutex_unlock(&sqd->lock); in io_sq_thread()
425 complete(&sqd->exited); in io_sq_thread()
462 struct io_sq_data *sqd; in io_sq_offload_create() local
469 sqd = io_get_sq_data(p, &attached); in io_sq_offload_create()
470 if (IS_ERR(sqd)) { in io_sq_offload_create()
471 ret = PTR_ERR(sqd); in io_sq_offload_create()
476 ctx->sq_data = sqd; in io_sq_offload_create()
481 io_sq_thread_park(sqd); in io_sq_offload_create()
482 list_add(&ctx->sqd_list, &sqd->ctx_list); in io_sq_offload_create()
483 io_sqd_update_thread_idle(sqd); in io_sq_offload_create()
485 ret = (attached && !sqd->thread) ? -ENXIO : 0; in io_sq_offload_create()
486 io_sq_thread_unpark(sqd); in io_sq_offload_create()
510 sqd->sq_cpu = cpu; in io_sq_offload_create()
512 sqd->sq_cpu = -1; in io_sq_offload_create()
515 sqd->task_pid = current->pid; in io_sq_offload_create()
516 sqd->task_tgid = current->tgid; in io_sq_offload_create()
517 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); in io_sq_offload_create()
523 mutex_lock(&sqd->lock); in io_sq_offload_create()
524 rcu_assign_pointer(sqd->thread, tsk); in io_sq_offload_create()
525 mutex_unlock(&sqd->lock); in io_sq_offload_create()
548 struct io_sq_data *sqd = ctx->sq_data; in io_sqpoll_wq_cpu_affinity() local
551 if (sqd) { in io_sqpoll_wq_cpu_affinity()
554 io_sq_thread_park(sqd); in io_sqpoll_wq_cpu_affinity()
556 tsk = sqpoll_task_locked(sqd); in io_sqpoll_wq_cpu_affinity()
559 io_sq_thread_unpark(sqd); in io_sqpoll_wq_cpu_affinity()