Lines Matching +full:re +full:- +full:started
1 // SPDX-License-Identifier: GPL-2.0
33 __releases(&sqd->lock) in io_sq_thread_unpark()
41 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
42 if (atomic_dec_return(&sqd->park_pending)) in io_sq_thread_unpark()
43 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
44 mutex_unlock(&sqd->lock); in io_sq_thread_unpark()
45 wake_up(&sqd->wait); in io_sq_thread_unpark()
49 __acquires(&sqd->lock) in io_sq_thread_park()
53 atomic_inc(&sqd->park_pending); in io_sq_thread_park()
54 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_park()
55 mutex_lock(&sqd->lock); in io_sq_thread_park()
68 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); in io_sq_thread_stop()
70 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sq_thread_stop()
71 mutex_lock(&sqd->lock); in io_sq_thread_stop()
77 mutex_unlock(&sqd->lock); in io_sq_thread_stop()
78 wait_for_completion(&sqd->exited); in io_sq_thread_stop()
83 if (refcount_dec_and_test(&sqd->refs)) { in io_put_sq_data()
84 WARN_ON_ONCE(atomic_read(&sqd->park_pending)); in io_put_sq_data()
96 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sqd_update_thread_idle()
97 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); in io_sqd_update_thread_idle()
98 sqd->sq_thread_idle = sq_thread_idle; in io_sqd_update_thread_idle()
103 struct io_sq_data *sqd = ctx->sq_data; in io_sq_thread_finish()
107 list_del_init(&ctx->sqd_list); in io_sq_thread_finish()
112 ctx->sq_data = NULL; in io_sq_thread_finish()
120 CLASS(fd, f)(p->wq_fd); in io_attach_sq_data()
123 return ERR_PTR(-ENXIO); in io_attach_sq_data()
125 return ERR_PTR(-EINVAL); in io_attach_sq_data()
127 ctx_attach = fd_file(f)->private_data; in io_attach_sq_data()
128 sqd = ctx_attach->sq_data; in io_attach_sq_data()
130 return ERR_PTR(-EINVAL); in io_attach_sq_data()
131 if (sqd->task_tgid != current->tgid) in io_attach_sq_data()
132 return ERR_PTR(-EPERM); in io_attach_sq_data()
134 refcount_inc(&sqd->refs); in io_attach_sq_data()
144 if (p->flags & IORING_SETUP_ATTACH_WQ) { in io_get_sq_data()
151 if (PTR_ERR(sqd) != -EPERM) in io_get_sq_data()
157 return ERR_PTR(-ENOMEM); in io_get_sq_data()
159 atomic_set(&sqd->park_pending, 0); in io_get_sq_data()
160 refcount_set(&sqd->refs, 1); in io_get_sq_data()
161 INIT_LIST_HEAD(&sqd->ctx_list); in io_get_sq_data()
162 mutex_init(&sqd->lock); in io_get_sq_data()
163 init_waitqueue_head(&sqd->wait); in io_get_sq_data()
164 init_completion(&sqd->exited); in io_get_sq_data()
170 return READ_ONCE(sqd->state); in io_sqd_events_pending()
174 bool started; member
189 if (!ist->started) in io_sq_update_worktime()
191 ist->started = false; in io_sq_update_worktime()
192 sqd->work_time += io_sq_cpu_usec(current) - ist->usec; in io_sq_update_worktime()
197 if (ist->started) in io_sq_start_worktime()
199 ist->started = true; in io_sq_start_worktime()
200 ist->usec = io_sq_cpu_usec(current); in io_sq_start_worktime()
210 /* if we're handling multiple rings, cap submit size for fairness */ in __io_sq_thread()
214 if (to_submit || !wq_list_empty(&ctx->iopoll_list)) { in __io_sq_thread()
219 if (ctx->sq_creds != current_cred()) in __io_sq_thread()
220 creds = override_creds(ctx->sq_creds); in __io_sq_thread()
222 mutex_lock(&ctx->uring_lock); in __io_sq_thread()
223 if (!wq_list_empty(&ctx->iopoll_list)) in __io_sq_thread()
230 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) && in __io_sq_thread()
231 !(ctx->flags & IORING_SETUP_R_DISABLED)) in __io_sq_thread()
233 mutex_unlock(&ctx->uring_lock); in __io_sq_thread()
235 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait)) in __io_sq_thread()
236 wake_up(&ctx->sqo_sq_wait); in __io_sq_thread()
249 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || in io_sqd_handle_event()
251 mutex_unlock(&sqd->lock); in io_sqd_handle_event()
254 wait_event(sqd->wait, !atomic_read(&sqd->park_pending)); in io_sqd_handle_event()
255 mutex_lock(&sqd->lock); in io_sqd_handle_event()
256 sqd->sq_cpu = raw_smp_processor_id(); in io_sqd_handle_event()
258 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sqd_handle_event()
269 struct io_uring_task *tctx = current->io_uring; in io_sq_tw()
276 max_entries -= count; in io_sq_tw()
287 struct io_uring_task *tctx = current->io_uring; in io_sq_tw_pending()
289 return retry_list || !llist_empty(&tctx->task_list); in io_sq_tw_pending()
302 if (!current->io_uring) { in io_sq_thread()
303 mutex_lock(&sqd->lock); in io_sq_thread()
304 rcu_assign_pointer(sqd->thread, NULL); in io_sq_thread()
306 mutex_unlock(&sqd->lock); in io_sq_thread()
310 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); in io_sq_thread()
314 sqd->task_pid = current->pid; in io_sq_thread()
316 if (sqd->sq_cpu != -1) { in io_sq_thread()
317 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); in io_sq_thread()
320 sqd->sq_cpu = raw_smp_processor_id(); in io_sq_thread()
331 mutex_lock(&sqd->lock); in io_sq_thread()
339 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
342 cap_entries = !list_is_singular(&sqd->ctx_list); in io_sq_thread()
343 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
346 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) in io_sq_thread()
352 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
363 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
365 mutex_unlock(&sqd->lock); in io_sq_thread()
367 mutex_lock(&sqd->lock); in io_sq_thread()
368 sqd->sq_cpu = raw_smp_processor_id(); in io_sq_thread()
373 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); in io_sq_thread()
377 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
379 &ctx->rings->sq_flags); in io_sq_thread()
380 if ((ctx->flags & IORING_SETUP_IOPOLL) && in io_sq_thread()
381 !wq_list_empty(&ctx->iopoll_list)) { in io_sq_thread()
399 mutex_unlock(&sqd->lock); in io_sq_thread()
401 mutex_lock(&sqd->lock); in io_sq_thread()
402 sqd->sq_cpu = raw_smp_processor_id(); in io_sq_thread()
404 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
406 &ctx->rings->sq_flags); in io_sq_thread()
409 finish_wait(&sqd->wait, &wait); in io_sq_thread()
410 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
417 rcu_assign_pointer(sqd->thread, NULL); in io_sq_thread()
419 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
420 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags); in io_sq_thread()
422 mutex_unlock(&sqd->lock); in io_sq_thread()
424 complete(&sqd->exited); in io_sq_thread()
435 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE); in io_sqpoll_wait_sq()
442 finish_wait(&ctx->sqo_sq_wait, &wait); in io_sqpoll_wait_sq()
451 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == in io_sq_offload_create()
453 CLASS(fd, f)(p->wq_fd); in io_sq_offload_create()
455 return -ENXIO; in io_sq_offload_create()
457 return -EINVAL; in io_sq_offload_create()
459 if (ctx->flags & IORING_SETUP_SQPOLL) { in io_sq_offload_create()
474 ctx->sq_creds = get_current_cred(); in io_sq_offload_create()
475 ctx->sq_data = sqd; in io_sq_offload_create()
476 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); in io_sq_offload_create()
477 if (!ctx->sq_thread_idle) in io_sq_offload_create()
478 ctx->sq_thread_idle = HZ; in io_sq_offload_create()
481 list_add(&ctx->sqd_list, &sqd->ctx_list); in io_sq_offload_create()
484 ret = (attached && !sqd->thread) ? -ENXIO : 0; in io_sq_offload_create()
492 if (p->flags & IORING_SETUP_SQ_AFF) { in io_sq_offload_create()
494 int cpu = p->sq_thread_cpu; in io_sq_offload_create()
496 ret = -EINVAL; in io_sq_offload_create()
499 ret = -ENOMEM; in io_sq_offload_create()
502 ret = -EINVAL; in io_sq_offload_create()
509 sqd->sq_cpu = cpu; in io_sq_offload_create()
511 sqd->sq_cpu = -1; in io_sq_offload_create()
514 sqd->task_pid = current->pid; in io_sq_offload_create()
515 sqd->task_tgid = current->tgid; in io_sq_offload_create()
522 mutex_lock(&sqd->lock); in io_sq_offload_create()
523 rcu_assign_pointer(sqd->thread, tsk); in io_sq_offload_create()
524 mutex_unlock(&sqd->lock); in io_sq_offload_create()
531 } else if (p->flags & IORING_SETUP_SQ_AFF) { in io_sq_offload_create()
533 ret = -EINVAL; in io_sq_offload_create()
538 complete(&ctx->sq_data->exited); in io_sq_offload_create()
547 struct io_sq_data *sqd = ctx->sq_data; in io_sqpoll_wq_cpu_affinity()
548 int ret = -EINVAL; in io_sqpoll_wq_cpu_affinity()
557 ret = io_wq_cpu_affinity(tsk->io_uring, mask); in io_sqpoll_wq_cpu_affinity()