117437f31SJens Axboe // SPDX-License-Identifier: GPL-2.0
217437f31SJens Axboe /*
317437f31SJens Axboe * Contains the core associated with submission side polling of the SQ
417437f31SJens Axboe * ring, offloading submissions from the application to a kernel thread.
517437f31SJens Axboe */
617437f31SJens Axboe #include <linux/kernel.h>
717437f31SJens Axboe #include <linux/errno.h>
817437f31SJens Axboe #include <linux/file.h>
917437f31SJens Axboe #include <linux/mm.h>
1017437f31SJens Axboe #include <linux/slab.h>
1117437f31SJens Axboe #include <linux/audit.h>
1217437f31SJens Axboe #include <linux/security.h>
13*f011c9cfSFelix Moessbauer #include <linux/cpuset.h>
1417437f31SJens Axboe #include <linux/io_uring.h>
1517437f31SJens Axboe
1617437f31SJens Axboe #include <uapi/linux/io_uring.h>
1717437f31SJens Axboe
1817437f31SJens Axboe #include "io_uring.h"
19ff183d42SStefan Roesch #include "napi.h"
2017437f31SJens Axboe #include "sqpoll.h"
2117437f31SJens Axboe
2217437f31SJens Axboe #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
23af5d68f8SJens Axboe #define IORING_TW_CAP_ENTRIES_VALUE 8
2417437f31SJens Axboe
2517437f31SJens Axboe enum {
2617437f31SJens Axboe IO_SQ_THREAD_SHOULD_STOP = 0,
2717437f31SJens Axboe IO_SQ_THREAD_SHOULD_PARK,
2817437f31SJens Axboe };
2917437f31SJens Axboe
io_sq_thread_unpark(struct io_sq_data * sqd)3017437f31SJens Axboe void io_sq_thread_unpark(struct io_sq_data *sqd)
3117437f31SJens Axboe __releases(&sqd->lock)
3217437f31SJens Axboe {
3317437f31SJens Axboe WARN_ON_ONCE(sqd->thread == current);
3417437f31SJens Axboe
3517437f31SJens Axboe /*
3617437f31SJens Axboe * Do the dance but not conditional clear_bit() because it'd race with
3717437f31SJens Axboe * other threads incrementing park_pending and setting the bit.
3817437f31SJens Axboe */
3917437f31SJens Axboe clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
4017437f31SJens Axboe if (atomic_dec_return(&sqd->park_pending))
4117437f31SJens Axboe set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
4217437f31SJens Axboe mutex_unlock(&sqd->lock);
4317437f31SJens Axboe }
4417437f31SJens Axboe
io_sq_thread_park(struct io_sq_data * sqd)4517437f31SJens Axboe void io_sq_thread_park(struct io_sq_data *sqd)
4617437f31SJens Axboe __acquires(&sqd->lock)
4717437f31SJens Axboe {
48e4956dc7SJens Axboe WARN_ON_ONCE(data_race(sqd->thread) == current);
4917437f31SJens Axboe
5017437f31SJens Axboe atomic_inc(&sqd->park_pending);
5117437f31SJens Axboe set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
5217437f31SJens Axboe mutex_lock(&sqd->lock);
5317437f31SJens Axboe if (sqd->thread)
5417437f31SJens Axboe wake_up_process(sqd->thread);
5517437f31SJens Axboe }
5617437f31SJens Axboe
io_sq_thread_stop(struct io_sq_data * sqd)5717437f31SJens Axboe void io_sq_thread_stop(struct io_sq_data *sqd)
5817437f31SJens Axboe {
5917437f31SJens Axboe WARN_ON_ONCE(sqd->thread == current);
6017437f31SJens Axboe WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
6117437f31SJens Axboe
6217437f31SJens Axboe set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
6317437f31SJens Axboe mutex_lock(&sqd->lock);
6417437f31SJens Axboe if (sqd->thread)
6517437f31SJens Axboe wake_up_process(sqd->thread);
6617437f31SJens Axboe mutex_unlock(&sqd->lock);
6717437f31SJens Axboe wait_for_completion(&sqd->exited);
6817437f31SJens Axboe }
6917437f31SJens Axboe
io_put_sq_data(struct io_sq_data * sqd)7017437f31SJens Axboe void io_put_sq_data(struct io_sq_data *sqd)
7117437f31SJens Axboe {
7217437f31SJens Axboe if (refcount_dec_and_test(&sqd->refs)) {
7317437f31SJens Axboe WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7417437f31SJens Axboe
7517437f31SJens Axboe io_sq_thread_stop(sqd);
7617437f31SJens Axboe kfree(sqd);
7717437f31SJens Axboe }
7817437f31SJens Axboe }
7917437f31SJens Axboe
io_sqd_update_thread_idle(struct io_sq_data * sqd)8017437f31SJens Axboe static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
8117437f31SJens Axboe {
8217437f31SJens Axboe struct io_ring_ctx *ctx;
8317437f31SJens Axboe unsigned sq_thread_idle = 0;
8417437f31SJens Axboe
8517437f31SJens Axboe list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
8617437f31SJens Axboe sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
8717437f31SJens Axboe sqd->sq_thread_idle = sq_thread_idle;
8817437f31SJens Axboe }
8917437f31SJens Axboe
io_sq_thread_finish(struct io_ring_ctx * ctx)9017437f31SJens Axboe void io_sq_thread_finish(struct io_ring_ctx *ctx)
9117437f31SJens Axboe {
9217437f31SJens Axboe struct io_sq_data *sqd = ctx->sq_data;
9317437f31SJens Axboe
9417437f31SJens Axboe if (sqd) {
9517437f31SJens Axboe io_sq_thread_park(sqd);
9617437f31SJens Axboe list_del_init(&ctx->sqd_list);
9717437f31SJens Axboe io_sqd_update_thread_idle(sqd);
9817437f31SJens Axboe io_sq_thread_unpark(sqd);
9917437f31SJens Axboe
10017437f31SJens Axboe io_put_sq_data(sqd);
10117437f31SJens Axboe ctx->sq_data = NULL;
10217437f31SJens Axboe }
10317437f31SJens Axboe }
10417437f31SJens Axboe
io_attach_sq_data(struct io_uring_params * p)10517437f31SJens Axboe static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
10617437f31SJens Axboe {
10717437f31SJens Axboe struct io_ring_ctx *ctx_attach;
10817437f31SJens Axboe struct io_sq_data *sqd;
10917437f31SJens Axboe struct fd f;
11017437f31SJens Axboe
11117437f31SJens Axboe f = fdget(p->wq_fd);
11217437f31SJens Axboe if (!f.file)
11317437f31SJens Axboe return ERR_PTR(-ENXIO);
11417437f31SJens Axboe if (!io_is_uring_fops(f.file)) {
11517437f31SJens Axboe fdput(f);
11617437f31SJens Axboe return ERR_PTR(-EINVAL);
11717437f31SJens Axboe }
11817437f31SJens Axboe
11917437f31SJens Axboe ctx_attach = f.file->private_data;
12017437f31SJens Axboe sqd = ctx_attach->sq_data;
12117437f31SJens Axboe if (!sqd) {
12217437f31SJens Axboe fdput(f);
12317437f31SJens Axboe return ERR_PTR(-EINVAL);
12417437f31SJens Axboe }
12517437f31SJens Axboe if (sqd->task_tgid != current->tgid) {
12617437f31SJens Axboe fdput(f);
12717437f31SJens Axboe return ERR_PTR(-EPERM);
12817437f31SJens Axboe }
12917437f31SJens Axboe
13017437f31SJens Axboe refcount_inc(&sqd->refs);
13117437f31SJens Axboe fdput(f);
13217437f31SJens Axboe return sqd;
13317437f31SJens Axboe }
13417437f31SJens Axboe
io_get_sq_data(struct io_uring_params * p,bool * attached)13517437f31SJens Axboe static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
13617437f31SJens Axboe bool *attached)
13717437f31SJens Axboe {
13817437f31SJens Axboe struct io_sq_data *sqd;
13917437f31SJens Axboe
14017437f31SJens Axboe *attached = false;
14117437f31SJens Axboe if (p->flags & IORING_SETUP_ATTACH_WQ) {
14217437f31SJens Axboe sqd = io_attach_sq_data(p);
14317437f31SJens Axboe if (!IS_ERR(sqd)) {
14417437f31SJens Axboe *attached = true;
14517437f31SJens Axboe return sqd;
14617437f31SJens Axboe }
14717437f31SJens Axboe /* fall through for EPERM case, setup new sqd/task */
14817437f31SJens Axboe if (PTR_ERR(sqd) != -EPERM)
14917437f31SJens Axboe return sqd;
15017437f31SJens Axboe }
15117437f31SJens Axboe
15217437f31SJens Axboe sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
15317437f31SJens Axboe if (!sqd)
15417437f31SJens Axboe return ERR_PTR(-ENOMEM);
15517437f31SJens Axboe
15617437f31SJens Axboe atomic_set(&sqd->park_pending, 0);
15717437f31SJens Axboe refcount_set(&sqd->refs, 1);
15817437f31SJens Axboe INIT_LIST_HEAD(&sqd->ctx_list);
15917437f31SJens Axboe mutex_init(&sqd->lock);
16017437f31SJens Axboe init_waitqueue_head(&sqd->wait);
16117437f31SJens Axboe init_completion(&sqd->exited);
16217437f31SJens Axboe return sqd;
16317437f31SJens Axboe }
16417437f31SJens Axboe
io_sqd_events_pending(struct io_sq_data * sqd)16517437f31SJens Axboe static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
16617437f31SJens Axboe {
16717437f31SJens Axboe return READ_ONCE(sqd->state);
16817437f31SJens Axboe }
16917437f31SJens Axboe
__io_sq_thread(struct io_ring_ctx * ctx,bool cap_entries)17017437f31SJens Axboe static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
17117437f31SJens Axboe {
17217437f31SJens Axboe unsigned int to_submit;
17317437f31SJens Axboe int ret = 0;
17417437f31SJens Axboe
17517437f31SJens Axboe to_submit = io_sqring_entries(ctx);
17617437f31SJens Axboe /* if we're handling multiple rings, cap submit size for fairness */
17717437f31SJens Axboe if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
17817437f31SJens Axboe to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
17917437f31SJens Axboe
1807255cd89SOlivier Langlois if (to_submit || !wq_list_empty(&ctx->iopoll_list)) {
18117437f31SJens Axboe const struct cred *creds = NULL;
18217437f31SJens Axboe
18317437f31SJens Axboe if (ctx->sq_creds != current_cred())
18417437f31SJens Axboe creds = override_creds(ctx->sq_creds);
18517437f31SJens Axboe
18617437f31SJens Axboe mutex_lock(&ctx->uring_lock);
18717437f31SJens Axboe if (!wq_list_empty(&ctx->iopoll_list))
18817437f31SJens Axboe io_do_iopoll(ctx, true);
18917437f31SJens Axboe
19017437f31SJens Axboe /*
19117437f31SJens Axboe * Don't submit if refs are dying, good for io_uring_register(),
19217437f31SJens Axboe * but also it is relied upon by io_ring_exit_work()
19317437f31SJens Axboe */
19417437f31SJens Axboe if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
19517437f31SJens Axboe !(ctx->flags & IORING_SETUP_R_DISABLED))
19617437f31SJens Axboe ret = io_submit_sqes(ctx, to_submit);
19717437f31SJens Axboe mutex_unlock(&ctx->uring_lock);
19817437f31SJens Axboe
199ff183d42SStefan Roesch if (io_napi(ctx))
200ff183d42SStefan Roesch ret += io_napi_sqpoll_busy_poll(ctx);
201ff183d42SStefan Roesch
20217437f31SJens Axboe if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
20317437f31SJens Axboe wake_up(&ctx->sqo_sq_wait);
20417437f31SJens Axboe if (creds)
20517437f31SJens Axboe revert_creds(creds);
20617437f31SJens Axboe }
20717437f31SJens Axboe
20817437f31SJens Axboe return ret;
20917437f31SJens Axboe }
21017437f31SJens Axboe
io_sqd_handle_event(struct io_sq_data * sqd)21117437f31SJens Axboe static bool io_sqd_handle_event(struct io_sq_data *sqd)
21217437f31SJens Axboe {
21317437f31SJens Axboe bool did_sig = false;
21417437f31SJens Axboe struct ksignal ksig;
21517437f31SJens Axboe
21617437f31SJens Axboe if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
21717437f31SJens Axboe signal_pending(current)) {
21817437f31SJens Axboe mutex_unlock(&sqd->lock);
21917437f31SJens Axboe if (signal_pending(current))
22017437f31SJens Axboe did_sig = get_signal(&ksig);
22117437f31SJens Axboe cond_resched();
22217437f31SJens Axboe mutex_lock(&sqd->lock);
223a0d45c3fSJens Axboe sqd->sq_cpu = raw_smp_processor_id();
22417437f31SJens Axboe }
22517437f31SJens Axboe return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
22617437f31SJens Axboe }
22717437f31SJens Axboe
228af5d68f8SJens Axboe /*
229af5d68f8SJens Axboe * Run task_work, processing the retry_list first. The retry_list holds
230af5d68f8SJens Axboe * entries that we passed on in the previous run, if we had more task_work
231af5d68f8SJens Axboe * than we were asked to process. Newly queued task_work isn't run until the
232af5d68f8SJens Axboe * retry list has been fully processed.
233af5d68f8SJens Axboe */
io_sq_tw(struct llist_node ** retry_list,int max_entries)234af5d68f8SJens Axboe static unsigned int io_sq_tw(struct llist_node **retry_list, int max_entries)
235af5d68f8SJens Axboe {
236af5d68f8SJens Axboe struct io_uring_task *tctx = current->io_uring;
237af5d68f8SJens Axboe unsigned int count = 0;
238af5d68f8SJens Axboe
239af5d68f8SJens Axboe if (*retry_list) {
240af5d68f8SJens Axboe *retry_list = io_handle_tw_list(*retry_list, &count, max_entries);
241af5d68f8SJens Axboe if (count >= max_entries)
242d13ddd9cSJens Axboe goto out;
243af5d68f8SJens Axboe max_entries -= count;
244af5d68f8SJens Axboe }
245af5d68f8SJens Axboe *retry_list = tctx_task_work_run(tctx, max_entries, &count);
246d13ddd9cSJens Axboe out:
247d13ddd9cSJens Axboe if (task_work_pending(current))
248d13ddd9cSJens Axboe task_work_run();
249af5d68f8SJens Axboe return count;
250af5d68f8SJens Axboe }
251af5d68f8SJens Axboe
io_sq_tw_pending(struct llist_node * retry_list)252c8d8fc3bSJens Axboe static bool io_sq_tw_pending(struct llist_node *retry_list)
253c8d8fc3bSJens Axboe {
254c8d8fc3bSJens Axboe struct io_uring_task *tctx = current->io_uring;
255c8d8fc3bSJens Axboe
256c8d8fc3bSJens Axboe return retry_list || !llist_empty(&tctx->task_list);
257c8d8fc3bSJens Axboe }
258c8d8fc3bSJens Axboe
io_sq_update_worktime(struct io_sq_data * sqd,struct rusage * start)2593fcb9d17SXiaobing Li static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start)
2603fcb9d17SXiaobing Li {
2613fcb9d17SXiaobing Li struct rusage end;
2623fcb9d17SXiaobing Li
2633fcb9d17SXiaobing Li getrusage(current, RUSAGE_SELF, &end);
2643fcb9d17SXiaobing Li end.ru_stime.tv_sec -= start->ru_stime.tv_sec;
2653fcb9d17SXiaobing Li end.ru_stime.tv_usec -= start->ru_stime.tv_usec;
2663fcb9d17SXiaobing Li
2673fcb9d17SXiaobing Li sqd->work_time += end.ru_stime.tv_usec + end.ru_stime.tv_sec * 1000000;
2683fcb9d17SXiaobing Li }
2693fcb9d17SXiaobing Li
io_sq_thread(void * data)27017437f31SJens Axboe static int io_sq_thread(void *data)
27117437f31SJens Axboe {
272af5d68f8SJens Axboe struct llist_node *retry_list = NULL;
27317437f31SJens Axboe struct io_sq_data *sqd = data;
27417437f31SJens Axboe struct io_ring_ctx *ctx;
2753fcb9d17SXiaobing Li struct rusage start;
27617437f31SJens Axboe unsigned long timeout = 0;
27717437f31SJens Axboe char buf[TASK_COMM_LEN];
27817437f31SJens Axboe DEFINE_WAIT(wait);
27917437f31SJens Axboe
2801251d202SJens Axboe /* offload context creation failed, just exit */
2811251d202SJens Axboe if (!current->io_uring)
2821251d202SJens Axboe goto err_out;
2831251d202SJens Axboe
28417437f31SJens Axboe snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
28517437f31SJens Axboe set_task_comm(current, buf);
28617437f31SJens Axboe
287a0d45c3fSJens Axboe /* reset to our pid after we've set task_comm, for fdinfo */
288a0d45c3fSJens Axboe sqd->task_pid = current->pid;
289a0d45c3fSJens Axboe
290a0d45c3fSJens Axboe if (sqd->sq_cpu != -1) {
29117437f31SJens Axboe set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
292a0d45c3fSJens Axboe } else {
29317437f31SJens Axboe set_cpus_allowed_ptr(current, cpu_online_mask);
294a0d45c3fSJens Axboe sqd->sq_cpu = raw_smp_processor_id();
295a0d45c3fSJens Axboe }
29617437f31SJens Axboe
297c4ce0ab2SJens Axboe /*
298c4ce0ab2SJens Axboe * Force audit context to get setup, in case we do prep side async
299c4ce0ab2SJens Axboe * operations that would trigger an audit call before any issue side
300c4ce0ab2SJens Axboe * audit has been done.
301c4ce0ab2SJens Axboe */
302c4ce0ab2SJens Axboe audit_uring_entry(IORING_OP_NOP);
303c4ce0ab2SJens Axboe audit_uring_exit(true, 0);
304c4ce0ab2SJens Axboe
30517437f31SJens Axboe mutex_lock(&sqd->lock);
30617437f31SJens Axboe while (1) {
30717437f31SJens Axboe bool cap_entries, sqt_spin = false;
30817437f31SJens Axboe
30917437f31SJens Axboe if (io_sqd_events_pending(sqd) || signal_pending(current)) {
31017437f31SJens Axboe if (io_sqd_handle_event(sqd))
31117437f31SJens Axboe break;
31217437f31SJens Axboe timeout = jiffies + sqd->sq_thread_idle;
31317437f31SJens Axboe }
31417437f31SJens Axboe
31517437f31SJens Axboe cap_entries = !list_is_singular(&sqd->ctx_list);
3163fcb9d17SXiaobing Li getrusage(current, RUSAGE_SELF, &start);
31717437f31SJens Axboe list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
31817437f31SJens Axboe int ret = __io_sq_thread(ctx, cap_entries);
31917437f31SJens Axboe
32017437f31SJens Axboe if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
32117437f31SJens Axboe sqt_spin = true;
32217437f31SJens Axboe }
323af5d68f8SJens Axboe if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE))
32417437f31SJens Axboe sqt_spin = true;
32517437f31SJens Axboe
32617437f31SJens Axboe if (sqt_spin || !time_after(jiffies, timeout)) {
3273fcb9d17SXiaobing Li if (sqt_spin) {
3283fcb9d17SXiaobing Li io_sq_update_worktime(sqd, &start);
32917437f31SJens Axboe timeout = jiffies + sqd->sq_thread_idle;
3303fcb9d17SXiaobing Li }
331533ab73fSWenwen Chen if (unlikely(need_resched())) {
332533ab73fSWenwen Chen mutex_unlock(&sqd->lock);
333533ab73fSWenwen Chen cond_resched();
334533ab73fSWenwen Chen mutex_lock(&sqd->lock);
335a0d45c3fSJens Axboe sqd->sq_cpu = raw_smp_processor_id();
336533ab73fSWenwen Chen }
33717437f31SJens Axboe continue;
33817437f31SJens Axboe }
33917437f31SJens Axboe
34017437f31SJens Axboe prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
341c8d8fc3bSJens Axboe if (!io_sqd_events_pending(sqd) && !io_sq_tw_pending(retry_list)) {
34217437f31SJens Axboe bool needs_sched = true;
34317437f31SJens Axboe
34417437f31SJens Axboe list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
34517437f31SJens Axboe atomic_or(IORING_SQ_NEED_WAKEUP,
34617437f31SJens Axboe &ctx->rings->sq_flags);
34717437f31SJens Axboe if ((ctx->flags & IORING_SETUP_IOPOLL) &&
34817437f31SJens Axboe !wq_list_empty(&ctx->iopoll_list)) {
34917437f31SJens Axboe needs_sched = false;
35017437f31SJens Axboe break;
35117437f31SJens Axboe }
35217437f31SJens Axboe
35317437f31SJens Axboe /*
35417437f31SJens Axboe * Ensure the store of the wakeup flag is not
35517437f31SJens Axboe * reordered with the load of the SQ tail
35617437f31SJens Axboe */
35717437f31SJens Axboe smp_mb__after_atomic();
35817437f31SJens Axboe
35917437f31SJens Axboe if (io_sqring_entries(ctx)) {
36017437f31SJens Axboe needs_sched = false;
36117437f31SJens Axboe break;
36217437f31SJens Axboe }
36317437f31SJens Axboe }
36417437f31SJens Axboe
36517437f31SJens Axboe if (needs_sched) {
36617437f31SJens Axboe mutex_unlock(&sqd->lock);
36717437f31SJens Axboe schedule();
36817437f31SJens Axboe mutex_lock(&sqd->lock);
369a0d45c3fSJens Axboe sqd->sq_cpu = raw_smp_processor_id();
37017437f31SJens Axboe }
37117437f31SJens Axboe list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
37217437f31SJens Axboe atomic_andnot(IORING_SQ_NEED_WAKEUP,
37317437f31SJens Axboe &ctx->rings->sq_flags);
37417437f31SJens Axboe }
37517437f31SJens Axboe
37617437f31SJens Axboe finish_wait(&sqd->wait, &wait);
37717437f31SJens Axboe timeout = jiffies + sqd->sq_thread_idle;
37817437f31SJens Axboe }
37917437f31SJens Axboe
380af5d68f8SJens Axboe if (retry_list)
381af5d68f8SJens Axboe io_sq_tw(&retry_list, UINT_MAX);
382af5d68f8SJens Axboe
38317437f31SJens Axboe io_uring_cancel_generic(true, sqd);
38417437f31SJens Axboe sqd->thread = NULL;
38517437f31SJens Axboe list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
38617437f31SJens Axboe atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
38717437f31SJens Axboe io_run_task_work();
38817437f31SJens Axboe mutex_unlock(&sqd->lock);
3891251d202SJens Axboe err_out:
39017437f31SJens Axboe complete(&sqd->exited);
39117437f31SJens Axboe do_exit(0);
39217437f31SJens Axboe }
39317437f31SJens Axboe
io_sqpoll_wait_sq(struct io_ring_ctx * ctx)39488b80534SQuanfa Fu void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
39517437f31SJens Axboe {
39617437f31SJens Axboe DEFINE_WAIT(wait);
39717437f31SJens Axboe
39817437f31SJens Axboe do {
39917437f31SJens Axboe if (!io_sqring_full(ctx))
40017437f31SJens Axboe break;
40117437f31SJens Axboe prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
40217437f31SJens Axboe
40317437f31SJens Axboe if (!io_sqring_full(ctx))
40417437f31SJens Axboe break;
40517437f31SJens Axboe schedule();
40617437f31SJens Axboe } while (!signal_pending(current));
40717437f31SJens Axboe
40817437f31SJens Axboe finish_wait(&ctx->sqo_sq_wait, &wait);
40917437f31SJens Axboe }
41017437f31SJens Axboe
io_sq_offload_create(struct io_ring_ctx * ctx,struct io_uring_params * p)41117437f31SJens Axboe __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
41217437f31SJens Axboe struct io_uring_params *p)
41317437f31SJens Axboe {
41417437f31SJens Axboe int ret;
41517437f31SJens Axboe
41617437f31SJens Axboe /* Retain compatibility with failing for an invalid attach attempt */
41717437f31SJens Axboe if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
41817437f31SJens Axboe IORING_SETUP_ATTACH_WQ) {
41917437f31SJens Axboe struct fd f;
42017437f31SJens Axboe
42117437f31SJens Axboe f = fdget(p->wq_fd);
42217437f31SJens Axboe if (!f.file)
42317437f31SJens Axboe return -ENXIO;
42417437f31SJens Axboe if (!io_is_uring_fops(f.file)) {
42517437f31SJens Axboe fdput(f);
42617437f31SJens Axboe return -EINVAL;
42717437f31SJens Axboe }
42817437f31SJens Axboe fdput(f);
42917437f31SJens Axboe }
43017437f31SJens Axboe if (ctx->flags & IORING_SETUP_SQPOLL) {
43117437f31SJens Axboe struct task_struct *tsk;
43217437f31SJens Axboe struct io_sq_data *sqd;
43317437f31SJens Axboe bool attached;
43417437f31SJens Axboe
43517437f31SJens Axboe ret = security_uring_sqpoll();
43617437f31SJens Axboe if (ret)
43717437f31SJens Axboe return ret;
43817437f31SJens Axboe
43917437f31SJens Axboe sqd = io_get_sq_data(p, &attached);
44017437f31SJens Axboe if (IS_ERR(sqd)) {
44117437f31SJens Axboe ret = PTR_ERR(sqd);
44217437f31SJens Axboe goto err;
44317437f31SJens Axboe }
44417437f31SJens Axboe
44517437f31SJens Axboe ctx->sq_creds = get_current_cred();
44617437f31SJens Axboe ctx->sq_data = sqd;
44717437f31SJens Axboe ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
44817437f31SJens Axboe if (!ctx->sq_thread_idle)
44917437f31SJens Axboe ctx->sq_thread_idle = HZ;
45017437f31SJens Axboe
45117437f31SJens Axboe io_sq_thread_park(sqd);
45217437f31SJens Axboe list_add(&ctx->sqd_list, &sqd->ctx_list);
45317437f31SJens Axboe io_sqd_update_thread_idle(sqd);
45417437f31SJens Axboe /* don't attach to a dying SQPOLL thread, would be racy */
45517437f31SJens Axboe ret = (attached && !sqd->thread) ? -ENXIO : 0;
45617437f31SJens Axboe io_sq_thread_unpark(sqd);
45717437f31SJens Axboe
45817437f31SJens Axboe if (ret < 0)
45917437f31SJens Axboe goto err;
46017437f31SJens Axboe if (attached)
46117437f31SJens Axboe return 0;
46217437f31SJens Axboe
46317437f31SJens Axboe if (p->flags & IORING_SETUP_SQ_AFF) {
464*f011c9cfSFelix Moessbauer struct cpumask allowed_mask;
46517437f31SJens Axboe int cpu = p->sq_thread_cpu;
46617437f31SJens Axboe
46717437f31SJens Axboe ret = -EINVAL;
468*f011c9cfSFelix Moessbauer cpuset_cpus_allowed(current, &allowed_mask);
469*f011c9cfSFelix Moessbauer if (!cpumask_test_cpu(cpu, &allowed_mask))
47017437f31SJens Axboe goto err_sqpoll;
47117437f31SJens Axboe sqd->sq_cpu = cpu;
47217437f31SJens Axboe } else {
47317437f31SJens Axboe sqd->sq_cpu = -1;
47417437f31SJens Axboe }
47517437f31SJens Axboe
47617437f31SJens Axboe sqd->task_pid = current->pid;
47717437f31SJens Axboe sqd->task_tgid = current->tgid;
47817437f31SJens Axboe tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
47917437f31SJens Axboe if (IS_ERR(tsk)) {
48017437f31SJens Axboe ret = PTR_ERR(tsk);
48117437f31SJens Axboe goto err_sqpoll;
48217437f31SJens Axboe }
48317437f31SJens Axboe
48417437f31SJens Axboe sqd->thread = tsk;
48517437f31SJens Axboe ret = io_uring_alloc_task_context(tsk, ctx);
48617437f31SJens Axboe wake_up_new_task(tsk);
48717437f31SJens Axboe if (ret)
48817437f31SJens Axboe goto err;
48917437f31SJens Axboe } else if (p->flags & IORING_SETUP_SQ_AFF) {
49017437f31SJens Axboe /* Can't have SQ_AFF without SQPOLL */
49117437f31SJens Axboe ret = -EINVAL;
49217437f31SJens Axboe goto err;
49317437f31SJens Axboe }
49417437f31SJens Axboe
49517437f31SJens Axboe return 0;
49617437f31SJens Axboe err_sqpoll:
49717437f31SJens Axboe complete(&ctx->sq_data->exited);
49817437f31SJens Axboe err:
49917437f31SJens Axboe io_sq_thread_finish(ctx);
50017437f31SJens Axboe return ret;
50117437f31SJens Axboe }
502ebdfefc0SJens Axboe
io_sqpoll_wq_cpu_affinity(struct io_ring_ctx * ctx,cpumask_var_t mask)503ebdfefc0SJens Axboe __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
504ebdfefc0SJens Axboe cpumask_var_t mask)
505ebdfefc0SJens Axboe {
506ebdfefc0SJens Axboe struct io_sq_data *sqd = ctx->sq_data;
507ebdfefc0SJens Axboe int ret = -EINVAL;
508ebdfefc0SJens Axboe
509ebdfefc0SJens Axboe if (sqd) {
510ebdfefc0SJens Axboe io_sq_thread_park(sqd);
511bd6fc5daSGabriel Krisman Bertazi /* Don't set affinity for a dying thread */
512bd6fc5daSGabriel Krisman Bertazi if (sqd->thread)
513ebdfefc0SJens Axboe ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
514ebdfefc0SJens Axboe io_sq_thread_unpark(sqd);
515ebdfefc0SJens Axboe }
516ebdfefc0SJens Axboe
517ebdfefc0SJens Axboe return ret;
518ebdfefc0SJens Axboe }
519