117437f31SJens Axboe // SPDX-License-Identifier: GPL-2.0 217437f31SJens Axboe /* 317437f31SJens Axboe * Contains the core associated with submission side polling of the SQ 417437f31SJens Axboe * ring, offloading submissions from the application to a kernel thread. 517437f31SJens Axboe */ 617437f31SJens Axboe #include <linux/kernel.h> 717437f31SJens Axboe #include <linux/errno.h> 817437f31SJens Axboe #include <linux/file.h> 917437f31SJens Axboe #include <linux/mm.h> 1017437f31SJens Axboe #include <linux/slab.h> 1117437f31SJens Axboe #include <linux/audit.h> 1217437f31SJens Axboe #include <linux/security.h> 1317437f31SJens Axboe #include <linux/io_uring.h> 1417437f31SJens Axboe 1517437f31SJens Axboe #include <uapi/linux/io_uring.h> 1617437f31SJens Axboe 1717437f31SJens Axboe #include "io_uring.h" 18ff183d42SStefan Roesch #include "napi.h" 1917437f31SJens Axboe #include "sqpoll.h" 2017437f31SJens Axboe 2117437f31SJens Axboe #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8 22af5d68f8SJens Axboe #define IORING_TW_CAP_ENTRIES_VALUE 8 2317437f31SJens Axboe 2417437f31SJens Axboe enum { 2517437f31SJens Axboe IO_SQ_THREAD_SHOULD_STOP = 0, 2617437f31SJens Axboe IO_SQ_THREAD_SHOULD_PARK, 2717437f31SJens Axboe }; 2817437f31SJens Axboe 2917437f31SJens Axboe void io_sq_thread_unpark(struct io_sq_data *sqd) 3017437f31SJens Axboe __releases(&sqd->lock) 3117437f31SJens Axboe { 3217437f31SJens Axboe WARN_ON_ONCE(sqd->thread == current); 3317437f31SJens Axboe 3417437f31SJens Axboe /* 3517437f31SJens Axboe * Do the dance but not conditional clear_bit() because it'd race with 3617437f31SJens Axboe * other threads incrementing park_pending and setting the bit. 3717437f31SJens Axboe */ 3817437f31SJens Axboe clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); 3917437f31SJens Axboe if (atomic_dec_return(&sqd->park_pending)) 4017437f31SJens Axboe set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); 4117437f31SJens Axboe mutex_unlock(&sqd->lock); 4217437f31SJens Axboe } 4317437f31SJens Axboe 4417437f31SJens Axboe void io_sq_thread_park(struct io_sq_data *sqd) 4517437f31SJens Axboe __acquires(&sqd->lock) 4617437f31SJens Axboe { 4717437f31SJens Axboe WARN_ON_ONCE(sqd->thread == current); 4817437f31SJens Axboe 4917437f31SJens Axboe atomic_inc(&sqd->park_pending); 5017437f31SJens Axboe set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); 5117437f31SJens Axboe mutex_lock(&sqd->lock); 5217437f31SJens Axboe if (sqd->thread) 5317437f31SJens Axboe wake_up_process(sqd->thread); 5417437f31SJens Axboe } 5517437f31SJens Axboe 5617437f31SJens Axboe void io_sq_thread_stop(struct io_sq_data *sqd) 5717437f31SJens Axboe { 5817437f31SJens Axboe WARN_ON_ONCE(sqd->thread == current); 5917437f31SJens Axboe WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); 6017437f31SJens Axboe 6117437f31SJens Axboe set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); 6217437f31SJens Axboe mutex_lock(&sqd->lock); 6317437f31SJens Axboe if (sqd->thread) 6417437f31SJens Axboe wake_up_process(sqd->thread); 6517437f31SJens Axboe mutex_unlock(&sqd->lock); 6617437f31SJens Axboe wait_for_completion(&sqd->exited); 6717437f31SJens Axboe } 6817437f31SJens Axboe 6917437f31SJens Axboe void io_put_sq_data(struct io_sq_data *sqd) 7017437f31SJens Axboe { 7117437f31SJens Axboe if (refcount_dec_and_test(&sqd->refs)) { 7217437f31SJens Axboe WARN_ON_ONCE(atomic_read(&sqd->park_pending)); 7317437f31SJens Axboe 7417437f31SJens Axboe io_sq_thread_stop(sqd); 7517437f31SJens Axboe kfree(sqd); 7617437f31SJens Axboe } 7717437f31SJens Axboe } 7817437f31SJens Axboe 7917437f31SJens Axboe static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) 8017437f31SJens Axboe { 8117437f31SJens Axboe struct io_ring_ctx *ctx; 8217437f31SJens Axboe unsigned sq_thread_idle = 0; 8317437f31SJens Axboe 8417437f31SJens Axboe list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 8517437f31SJens Axboe sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); 8617437f31SJens Axboe sqd->sq_thread_idle = sq_thread_idle; 8717437f31SJens Axboe } 8817437f31SJens Axboe 8917437f31SJens Axboe void io_sq_thread_finish(struct io_ring_ctx *ctx) 9017437f31SJens Axboe { 9117437f31SJens Axboe struct io_sq_data *sqd = ctx->sq_data; 9217437f31SJens Axboe 9317437f31SJens Axboe if (sqd) { 9417437f31SJens Axboe io_sq_thread_park(sqd); 9517437f31SJens Axboe list_del_init(&ctx->sqd_list); 9617437f31SJens Axboe io_sqd_update_thread_idle(sqd); 9717437f31SJens Axboe io_sq_thread_unpark(sqd); 9817437f31SJens Axboe 9917437f31SJens Axboe io_put_sq_data(sqd); 10017437f31SJens Axboe ctx->sq_data = NULL; 10117437f31SJens Axboe } 10217437f31SJens Axboe } 10317437f31SJens Axboe 10417437f31SJens Axboe static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) 10517437f31SJens Axboe { 10617437f31SJens Axboe struct io_ring_ctx *ctx_attach; 10717437f31SJens Axboe struct io_sq_data *sqd; 10817437f31SJens Axboe struct fd f; 10917437f31SJens Axboe 11017437f31SJens Axboe f = fdget(p->wq_fd); 11117437f31SJens Axboe if (!f.file) 11217437f31SJens Axboe return ERR_PTR(-ENXIO); 11317437f31SJens Axboe if (!io_is_uring_fops(f.file)) { 11417437f31SJens Axboe fdput(f); 11517437f31SJens Axboe return ERR_PTR(-EINVAL); 11617437f31SJens Axboe } 11717437f31SJens Axboe 11817437f31SJens Axboe ctx_attach = f.file->private_data; 11917437f31SJens Axboe sqd = ctx_attach->sq_data; 12017437f31SJens Axboe if (!sqd) { 12117437f31SJens Axboe fdput(f); 12217437f31SJens Axboe return ERR_PTR(-EINVAL); 12317437f31SJens Axboe } 12417437f31SJens Axboe if (sqd->task_tgid != current->tgid) { 12517437f31SJens Axboe fdput(f); 12617437f31SJens Axboe return ERR_PTR(-EPERM); 12717437f31SJens Axboe } 12817437f31SJens Axboe 12917437f31SJens Axboe refcount_inc(&sqd->refs); 13017437f31SJens Axboe fdput(f); 13117437f31SJens Axboe return sqd; 13217437f31SJens Axboe } 13317437f31SJens Axboe 13417437f31SJens Axboe static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, 13517437f31SJens Axboe bool *attached) 13617437f31SJens Axboe { 13717437f31SJens Axboe struct io_sq_data *sqd; 13817437f31SJens Axboe 13917437f31SJens Axboe *attached = false; 14017437f31SJens Axboe if (p->flags & IORING_SETUP_ATTACH_WQ) { 14117437f31SJens Axboe sqd = io_attach_sq_data(p); 14217437f31SJens Axboe if (!IS_ERR(sqd)) { 14317437f31SJens Axboe *attached = true; 14417437f31SJens Axboe return sqd; 14517437f31SJens Axboe } 14617437f31SJens Axboe /* fall through for EPERM case, setup new sqd/task */ 14717437f31SJens Axboe if (PTR_ERR(sqd) != -EPERM) 14817437f31SJens Axboe return sqd; 14917437f31SJens Axboe } 15017437f31SJens Axboe 15117437f31SJens Axboe sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); 15217437f31SJens Axboe if (!sqd) 15317437f31SJens Axboe return ERR_PTR(-ENOMEM); 15417437f31SJens Axboe 15517437f31SJens Axboe atomic_set(&sqd->park_pending, 0); 15617437f31SJens Axboe refcount_set(&sqd->refs, 1); 15717437f31SJens Axboe INIT_LIST_HEAD(&sqd->ctx_list); 15817437f31SJens Axboe mutex_init(&sqd->lock); 15917437f31SJens Axboe init_waitqueue_head(&sqd->wait); 16017437f31SJens Axboe init_completion(&sqd->exited); 16117437f31SJens Axboe return sqd; 16217437f31SJens Axboe } 16317437f31SJens Axboe 16417437f31SJens Axboe static inline bool io_sqd_events_pending(struct io_sq_data *sqd) 16517437f31SJens Axboe { 16617437f31SJens Axboe return READ_ONCE(sqd->state); 16717437f31SJens Axboe } 16817437f31SJens Axboe 16917437f31SJens Axboe static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) 17017437f31SJens Axboe { 17117437f31SJens Axboe unsigned int to_submit; 17217437f31SJens Axboe int ret = 0; 17317437f31SJens Axboe 17417437f31SJens Axboe to_submit = io_sqring_entries(ctx); 17517437f31SJens Axboe /* if we're handling multiple rings, cap submit size for fairness */ 17617437f31SJens Axboe if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE) 17717437f31SJens Axboe to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE; 17817437f31SJens Axboe 17917437f31SJens Axboe if (!wq_list_empty(&ctx->iopoll_list) || to_submit) { 18017437f31SJens Axboe const struct cred *creds = NULL; 18117437f31SJens Axboe 18217437f31SJens Axboe if (ctx->sq_creds != current_cred()) 18317437f31SJens Axboe creds = override_creds(ctx->sq_creds); 18417437f31SJens Axboe 18517437f31SJens Axboe mutex_lock(&ctx->uring_lock); 18617437f31SJens Axboe if (!wq_list_empty(&ctx->iopoll_list)) 18717437f31SJens Axboe io_do_iopoll(ctx, true); 18817437f31SJens Axboe 18917437f31SJens Axboe /* 19017437f31SJens Axboe * Don't submit if refs are dying, good for io_uring_register(), 19117437f31SJens Axboe * but also it is relied upon by io_ring_exit_work() 19217437f31SJens Axboe */ 19317437f31SJens Axboe if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) && 19417437f31SJens Axboe !(ctx->flags & IORING_SETUP_R_DISABLED)) 19517437f31SJens Axboe ret = io_submit_sqes(ctx, to_submit); 19617437f31SJens Axboe mutex_unlock(&ctx->uring_lock); 19717437f31SJens Axboe 198ff183d42SStefan Roesch if (io_napi(ctx)) 199ff183d42SStefan Roesch ret += io_napi_sqpoll_busy_poll(ctx); 200ff183d42SStefan Roesch 20117437f31SJens Axboe if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait)) 20217437f31SJens Axboe wake_up(&ctx->sqo_sq_wait); 20317437f31SJens Axboe if (creds) 20417437f31SJens Axboe revert_creds(creds); 20517437f31SJens Axboe } 20617437f31SJens Axboe 20717437f31SJens Axboe return ret; 20817437f31SJens Axboe } 20917437f31SJens Axboe 21017437f31SJens Axboe static bool io_sqd_handle_event(struct io_sq_data *sqd) 21117437f31SJens Axboe { 21217437f31SJens Axboe bool did_sig = false; 21317437f31SJens Axboe struct ksignal ksig; 21417437f31SJens Axboe 21517437f31SJens Axboe if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || 21617437f31SJens Axboe signal_pending(current)) { 21717437f31SJens Axboe mutex_unlock(&sqd->lock); 21817437f31SJens Axboe if (signal_pending(current)) 21917437f31SJens Axboe did_sig = get_signal(&ksig); 22017437f31SJens Axboe cond_resched(); 22117437f31SJens Axboe mutex_lock(&sqd->lock); 222a0d45c3fSJens Axboe sqd->sq_cpu = raw_smp_processor_id(); 22317437f31SJens Axboe } 22417437f31SJens Axboe return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); 22517437f31SJens Axboe } 22617437f31SJens Axboe 227af5d68f8SJens Axboe /* 228af5d68f8SJens Axboe * Run task_work, processing the retry_list first. The retry_list holds 229af5d68f8SJens Axboe * entries that we passed on in the previous run, if we had more task_work 230af5d68f8SJens Axboe * than we were asked to process. Newly queued task_work isn't run until the 231af5d68f8SJens Axboe * retry list has been fully processed. 232af5d68f8SJens Axboe */ 233af5d68f8SJens Axboe static unsigned int io_sq_tw(struct llist_node **retry_list, int max_entries) 234af5d68f8SJens Axboe { 235af5d68f8SJens Axboe struct io_uring_task *tctx = current->io_uring; 236af5d68f8SJens Axboe unsigned int count = 0; 237af5d68f8SJens Axboe 238af5d68f8SJens Axboe if (*retry_list) { 239af5d68f8SJens Axboe *retry_list = io_handle_tw_list(*retry_list, &count, max_entries); 240af5d68f8SJens Axboe if (count >= max_entries) 241af5d68f8SJens Axboe return count; 242af5d68f8SJens Axboe max_entries -= count; 243af5d68f8SJens Axboe } 244af5d68f8SJens Axboe 245af5d68f8SJens Axboe *retry_list = tctx_task_work_run(tctx, max_entries, &count); 246af5d68f8SJens Axboe return count; 247af5d68f8SJens Axboe } 248af5d68f8SJens Axboe 249c8d8fc3bSJens Axboe static bool io_sq_tw_pending(struct llist_node *retry_list) 250c8d8fc3bSJens Axboe { 251c8d8fc3bSJens Axboe struct io_uring_task *tctx = current->io_uring; 252c8d8fc3bSJens Axboe 253c8d8fc3bSJens Axboe return retry_list || !llist_empty(&tctx->task_list); 254c8d8fc3bSJens Axboe } 255c8d8fc3bSJens Axboe 2563fcb9d17SXiaobing Li static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start) 2573fcb9d17SXiaobing Li { 2583fcb9d17SXiaobing Li struct rusage end; 2593fcb9d17SXiaobing Li 2603fcb9d17SXiaobing Li getrusage(current, RUSAGE_SELF, &end); 2613fcb9d17SXiaobing Li end.ru_stime.tv_sec -= start->ru_stime.tv_sec; 2623fcb9d17SXiaobing Li end.ru_stime.tv_usec -= start->ru_stime.tv_usec; 2633fcb9d17SXiaobing Li 2643fcb9d17SXiaobing Li sqd->work_time += end.ru_stime.tv_usec + end.ru_stime.tv_sec * 1000000; 2653fcb9d17SXiaobing Li } 2663fcb9d17SXiaobing Li 26717437f31SJens Axboe static int io_sq_thread(void *data) 26817437f31SJens Axboe { 269af5d68f8SJens Axboe struct llist_node *retry_list = NULL; 27017437f31SJens Axboe struct io_sq_data *sqd = data; 27117437f31SJens Axboe struct io_ring_ctx *ctx; 2723fcb9d17SXiaobing Li struct rusage start; 27317437f31SJens Axboe unsigned long timeout = 0; 27417437f31SJens Axboe char buf[TASK_COMM_LEN]; 27517437f31SJens Axboe DEFINE_WAIT(wait); 27617437f31SJens Axboe 2771251d202SJens Axboe /* offload context creation failed, just exit */ 2781251d202SJens Axboe if (!current->io_uring) 2791251d202SJens Axboe goto err_out; 2801251d202SJens Axboe 28117437f31SJens Axboe snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); 28217437f31SJens Axboe set_task_comm(current, buf); 28317437f31SJens Axboe 284a0d45c3fSJens Axboe /* reset to our pid after we've set task_comm, for fdinfo */ 285a0d45c3fSJens Axboe sqd->task_pid = current->pid; 286a0d45c3fSJens Axboe 287a0d45c3fSJens Axboe if (sqd->sq_cpu != -1) { 28817437f31SJens Axboe set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); 289a0d45c3fSJens Axboe } else { 29017437f31SJens Axboe set_cpus_allowed_ptr(current, cpu_online_mask); 291a0d45c3fSJens Axboe sqd->sq_cpu = raw_smp_processor_id(); 292a0d45c3fSJens Axboe } 29317437f31SJens Axboe 294*c4ce0ab2SJens Axboe /* 295*c4ce0ab2SJens Axboe * Force audit context to get setup, in case we do prep side async 296*c4ce0ab2SJens Axboe * operations that would trigger an audit call before any issue side 297*c4ce0ab2SJens Axboe * audit has been done. 298*c4ce0ab2SJens Axboe */ 299*c4ce0ab2SJens Axboe audit_uring_entry(IORING_OP_NOP); 300*c4ce0ab2SJens Axboe audit_uring_exit(true, 0); 301*c4ce0ab2SJens Axboe 30217437f31SJens Axboe mutex_lock(&sqd->lock); 30317437f31SJens Axboe while (1) { 30417437f31SJens Axboe bool cap_entries, sqt_spin = false; 30517437f31SJens Axboe 30617437f31SJens Axboe if (io_sqd_events_pending(sqd) || signal_pending(current)) { 30717437f31SJens Axboe if (io_sqd_handle_event(sqd)) 30817437f31SJens Axboe break; 30917437f31SJens Axboe timeout = jiffies + sqd->sq_thread_idle; 31017437f31SJens Axboe } 31117437f31SJens Axboe 31217437f31SJens Axboe cap_entries = !list_is_singular(&sqd->ctx_list); 3133fcb9d17SXiaobing Li getrusage(current, RUSAGE_SELF, &start); 31417437f31SJens Axboe list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { 31517437f31SJens Axboe int ret = __io_sq_thread(ctx, cap_entries); 31617437f31SJens Axboe 31717437f31SJens Axboe if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) 31817437f31SJens Axboe sqt_spin = true; 31917437f31SJens Axboe } 320af5d68f8SJens Axboe if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE)) 32117437f31SJens Axboe sqt_spin = true; 32217437f31SJens Axboe 32317437f31SJens Axboe if (sqt_spin || !time_after(jiffies, timeout)) { 3243fcb9d17SXiaobing Li if (sqt_spin) { 3253fcb9d17SXiaobing Li io_sq_update_worktime(sqd, &start); 32617437f31SJens Axboe timeout = jiffies + sqd->sq_thread_idle; 3273fcb9d17SXiaobing Li } 328533ab73fSWenwen Chen if (unlikely(need_resched())) { 329533ab73fSWenwen Chen mutex_unlock(&sqd->lock); 330533ab73fSWenwen Chen cond_resched(); 331533ab73fSWenwen Chen mutex_lock(&sqd->lock); 332a0d45c3fSJens Axboe sqd->sq_cpu = raw_smp_processor_id(); 333533ab73fSWenwen Chen } 33417437f31SJens Axboe continue; 33517437f31SJens Axboe } 33617437f31SJens Axboe 33717437f31SJens Axboe prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); 338c8d8fc3bSJens Axboe if (!io_sqd_events_pending(sqd) && !io_sq_tw_pending(retry_list)) { 33917437f31SJens Axboe bool needs_sched = true; 34017437f31SJens Axboe 34117437f31SJens Axboe list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { 34217437f31SJens Axboe atomic_or(IORING_SQ_NEED_WAKEUP, 34317437f31SJens Axboe &ctx->rings->sq_flags); 34417437f31SJens Axboe if ((ctx->flags & IORING_SETUP_IOPOLL) && 34517437f31SJens Axboe !wq_list_empty(&ctx->iopoll_list)) { 34617437f31SJens Axboe needs_sched = false; 34717437f31SJens Axboe break; 34817437f31SJens Axboe } 34917437f31SJens Axboe 35017437f31SJens Axboe /* 35117437f31SJens Axboe * Ensure the store of the wakeup flag is not 35217437f31SJens Axboe * reordered with the load of the SQ tail 35317437f31SJens Axboe */ 35417437f31SJens Axboe smp_mb__after_atomic(); 35517437f31SJens Axboe 35617437f31SJens Axboe if (io_sqring_entries(ctx)) { 35717437f31SJens Axboe needs_sched = false; 35817437f31SJens Axboe break; 35917437f31SJens Axboe } 36017437f31SJens Axboe } 36117437f31SJens Axboe 36217437f31SJens Axboe if (needs_sched) { 36317437f31SJens Axboe mutex_unlock(&sqd->lock); 36417437f31SJens Axboe schedule(); 36517437f31SJens Axboe mutex_lock(&sqd->lock); 366a0d45c3fSJens Axboe sqd->sq_cpu = raw_smp_processor_id(); 36717437f31SJens Axboe } 36817437f31SJens Axboe list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 36917437f31SJens Axboe atomic_andnot(IORING_SQ_NEED_WAKEUP, 37017437f31SJens Axboe &ctx->rings->sq_flags); 37117437f31SJens Axboe } 37217437f31SJens Axboe 37317437f31SJens Axboe finish_wait(&sqd->wait, &wait); 37417437f31SJens Axboe timeout = jiffies + sqd->sq_thread_idle; 37517437f31SJens Axboe } 37617437f31SJens Axboe 377af5d68f8SJens Axboe if (retry_list) 378af5d68f8SJens Axboe io_sq_tw(&retry_list, UINT_MAX); 379af5d68f8SJens Axboe 38017437f31SJens Axboe io_uring_cancel_generic(true, sqd); 38117437f31SJens Axboe sqd->thread = NULL; 38217437f31SJens Axboe list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 38317437f31SJens Axboe atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags); 38417437f31SJens Axboe io_run_task_work(); 38517437f31SJens Axboe mutex_unlock(&sqd->lock); 3861251d202SJens Axboe err_out: 38717437f31SJens Axboe complete(&sqd->exited); 38817437f31SJens Axboe do_exit(0); 38917437f31SJens Axboe } 39017437f31SJens Axboe 39188b80534SQuanfa Fu void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) 39217437f31SJens Axboe { 39317437f31SJens Axboe DEFINE_WAIT(wait); 39417437f31SJens Axboe 39517437f31SJens Axboe do { 39617437f31SJens Axboe if (!io_sqring_full(ctx)) 39717437f31SJens Axboe break; 39817437f31SJens Axboe prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE); 39917437f31SJens Axboe 40017437f31SJens Axboe if (!io_sqring_full(ctx)) 40117437f31SJens Axboe break; 40217437f31SJens Axboe schedule(); 40317437f31SJens Axboe } while (!signal_pending(current)); 40417437f31SJens Axboe 40517437f31SJens Axboe finish_wait(&ctx->sqo_sq_wait, &wait); 40617437f31SJens Axboe } 40717437f31SJens Axboe 40817437f31SJens Axboe __cold int io_sq_offload_create(struct io_ring_ctx *ctx, 40917437f31SJens Axboe struct io_uring_params *p) 41017437f31SJens Axboe { 41117437f31SJens Axboe int ret; 41217437f31SJens Axboe 41317437f31SJens Axboe /* Retain compatibility with failing for an invalid attach attempt */ 41417437f31SJens Axboe if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == 41517437f31SJens Axboe IORING_SETUP_ATTACH_WQ) { 41617437f31SJens Axboe struct fd f; 41717437f31SJens Axboe 41817437f31SJens Axboe f = fdget(p->wq_fd); 41917437f31SJens Axboe if (!f.file) 42017437f31SJens Axboe return -ENXIO; 42117437f31SJens Axboe if (!io_is_uring_fops(f.file)) { 42217437f31SJens Axboe fdput(f); 42317437f31SJens Axboe return -EINVAL; 42417437f31SJens Axboe } 42517437f31SJens Axboe fdput(f); 42617437f31SJens Axboe } 42717437f31SJens Axboe if (ctx->flags & IORING_SETUP_SQPOLL) { 42817437f31SJens Axboe struct task_struct *tsk; 42917437f31SJens Axboe struct io_sq_data *sqd; 43017437f31SJens Axboe bool attached; 43117437f31SJens Axboe 43217437f31SJens Axboe ret = security_uring_sqpoll(); 43317437f31SJens Axboe if (ret) 43417437f31SJens Axboe return ret; 43517437f31SJens Axboe 43617437f31SJens Axboe sqd = io_get_sq_data(p, &attached); 43717437f31SJens Axboe if (IS_ERR(sqd)) { 43817437f31SJens Axboe ret = PTR_ERR(sqd); 43917437f31SJens Axboe goto err; 44017437f31SJens Axboe } 44117437f31SJens Axboe 44217437f31SJens Axboe ctx->sq_creds = get_current_cred(); 44317437f31SJens Axboe ctx->sq_data = sqd; 44417437f31SJens Axboe ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); 44517437f31SJens Axboe if (!ctx->sq_thread_idle) 44617437f31SJens Axboe ctx->sq_thread_idle = HZ; 44717437f31SJens Axboe 44817437f31SJens Axboe io_sq_thread_park(sqd); 44917437f31SJens Axboe list_add(&ctx->sqd_list, &sqd->ctx_list); 45017437f31SJens Axboe io_sqd_update_thread_idle(sqd); 45117437f31SJens Axboe /* don't attach to a dying SQPOLL thread, would be racy */ 45217437f31SJens Axboe ret = (attached && !sqd->thread) ? -ENXIO : 0; 45317437f31SJens Axboe io_sq_thread_unpark(sqd); 45417437f31SJens Axboe 45517437f31SJens Axboe if (ret < 0) 45617437f31SJens Axboe goto err; 45717437f31SJens Axboe if (attached) 45817437f31SJens Axboe return 0; 45917437f31SJens Axboe 46017437f31SJens Axboe if (p->flags & IORING_SETUP_SQ_AFF) { 46117437f31SJens Axboe int cpu = p->sq_thread_cpu; 46217437f31SJens Axboe 46317437f31SJens Axboe ret = -EINVAL; 46417437f31SJens Axboe if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 46517437f31SJens Axboe goto err_sqpoll; 46617437f31SJens Axboe sqd->sq_cpu = cpu; 46717437f31SJens Axboe } else { 46817437f31SJens Axboe sqd->sq_cpu = -1; 46917437f31SJens Axboe } 47017437f31SJens Axboe 47117437f31SJens Axboe sqd->task_pid = current->pid; 47217437f31SJens Axboe sqd->task_tgid = current->tgid; 47317437f31SJens Axboe tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); 47417437f31SJens Axboe if (IS_ERR(tsk)) { 47517437f31SJens Axboe ret = PTR_ERR(tsk); 47617437f31SJens Axboe goto err_sqpoll; 47717437f31SJens Axboe } 47817437f31SJens Axboe 47917437f31SJens Axboe sqd->thread = tsk; 48017437f31SJens Axboe ret = io_uring_alloc_task_context(tsk, ctx); 48117437f31SJens Axboe wake_up_new_task(tsk); 48217437f31SJens Axboe if (ret) 48317437f31SJens Axboe goto err; 48417437f31SJens Axboe } else if (p->flags & IORING_SETUP_SQ_AFF) { 48517437f31SJens Axboe /* Can't have SQ_AFF without SQPOLL */ 48617437f31SJens Axboe ret = -EINVAL; 48717437f31SJens Axboe goto err; 48817437f31SJens Axboe } 48917437f31SJens Axboe 49017437f31SJens Axboe return 0; 49117437f31SJens Axboe err_sqpoll: 49217437f31SJens Axboe complete(&ctx->sq_data->exited); 49317437f31SJens Axboe err: 49417437f31SJens Axboe io_sq_thread_finish(ctx); 49517437f31SJens Axboe return ret; 49617437f31SJens Axboe } 497ebdfefc0SJens Axboe 498ebdfefc0SJens Axboe __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, 499ebdfefc0SJens Axboe cpumask_var_t mask) 500ebdfefc0SJens Axboe { 501ebdfefc0SJens Axboe struct io_sq_data *sqd = ctx->sq_data; 502ebdfefc0SJens Axboe int ret = -EINVAL; 503ebdfefc0SJens Axboe 504ebdfefc0SJens Axboe if (sqd) { 505ebdfefc0SJens Axboe io_sq_thread_park(sqd); 506bd6fc5daSGabriel Krisman Bertazi /* Don't set affinity for a dying thread */ 507bd6fc5daSGabriel Krisman Bertazi if (sqd->thread) 508ebdfefc0SJens Axboe ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask); 509ebdfefc0SJens Axboe io_sq_thread_unpark(sqd); 510ebdfefc0SJens Axboe } 511ebdfefc0SJens Axboe 512ebdfefc0SJens Axboe return ret; 513ebdfefc0SJens Axboe } 514