xref: /linux/io_uring/io_uring.c (revision ee7226b2ae3beff5d8feffa94e5fd06af6965e52)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared application/kernel submission and completion ring pairs, for
4  * supporting fast/efficient IO.
5  *
6  * A note on the read/write ordering memory barriers that are matched between
7  * the application and kernel side.
8  *
9  * After the application reads the CQ ring tail, it must use an
10  * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11  * before writing the tail (using smp_load_acquire to read the tail will
12  * do). It also needs a smp_mb() before updating CQ head (ordering the
13  * entry load(s) with the head store), pairing with an implicit barrier
14  * through a control-dependency in io_get_cqe (smp_store_release to
15  * store head will do). Failure to do so could lead to reading invalid
16  * CQ entries.
17  *
18  * Likewise, the application must use an appropriate smp_wmb() before
19  * writing the SQ tail (ordering SQ entry stores with the tail store),
20  * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21  * to store the tail will do). And it needs a barrier ordering the SQ
22  * head load before writing new SQ entries (smp_load_acquire to read
23  * head will do).
24  *
25  * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27  * updating the SQ tail; a full memory barrier smp_mb() is needed
28  * between.
29  *
30  * Also see the examples in the liburing library:
31  *
32  *	git://git.kernel.org/pub/scm/linux/kernel/git/axboe/liburing.git
33  *
34  * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35  * from data shared between the kernel and application. This is done both
36  * for ordering purposes, but also to ensure that once a value is loaded from
37  * data that the application could potentially modify, it remains stable.
38  *
39  * Copyright (C) 2018-2019 Jens Axboe
40  * Copyright (c) 2018-2019 Christoph Hellwig
41  */
42 #include <linux/kernel.h>
43 #include <linux/errno.h>
44 #include <linux/syscalls.h>
45 #include <linux/refcount.h>
46 #include <linux/bits.h>
47 
48 #include <linux/sched/signal.h>
49 #include <linux/fs.h>
50 #include <linux/mm.h>
51 #include <linux/percpu.h>
52 #include <linux/slab.h>
53 #include <linux/anon_inodes.h>
54 #include <linux/uaccess.h>
55 #include <linux/nospec.h>
56 #include <linux/task_work.h>
57 #include <linux/io_uring.h>
58 #include <linux/io_uring/cmd.h>
59 #include <linux/audit.h>
60 #include <linux/security.h>
61 #include <linux/jump_label.h>
62 
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/io_uring.h>
65 
66 #include <uapi/linux/io_uring.h>
67 
68 #include "io-wq.h"
69 
70 #include "filetable.h"
71 #include "io_uring.h"
72 #include "opdef.h"
73 #include "refs.h"
74 #include "tctx.h"
75 #include "register.h"
76 #include "sqpoll.h"
77 #include "fdinfo.h"
78 #include "kbuf.h"
79 #include "rsrc.h"
80 #include "cancel.h"
81 #include "net.h"
82 #include "notif.h"
83 #include "waitid.h"
84 #include "futex.h"
85 #include "napi.h"
86 #include "uring_cmd.h"
87 #include "msg_ring.h"
88 #include "memmap.h"
89 #include "zcrx.h"
90 #include "bpf-ops.h"
91 
92 #include "timeout.h"
93 #include "poll.h"
94 #include "rw.h"
95 #include "alloc_cache.h"
96 #include "eventfd.h"
97 #include "wait.h"
98 #include "bpf_filter.h"
99 #include "loop.h"
100 
101 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
102 			  IOSQE_IO_HARDLINK | IOSQE_ASYNC)
103 
104 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
105 
106 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
107 				REQ_F_INFLIGHT | REQ_F_CREDS | REQ_F_ASYNC_DATA)
108 
109 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | IO_REQ_LINK_FLAGS | \
110 				 REQ_F_REISSUE | REQ_F_POLLED | \
111 				 IO_REQ_CLEAN_FLAGS)
112 
113 #define IO_TCTX_REFS_CACHE_NR	(1U << 10)
114 
115 #define IO_COMPL_BATCH			32
116 #define IO_REQ_ALLOC_BATCH		8
117 
118 /* requests with any of those set should undergo io_disarm_next() */
119 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
120 
121 static void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags);
122 static void __io_req_caches_free(struct io_ring_ctx *ctx);
123 
124 static __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(io_key_has_sqarray, HZ);
125 
126 struct kmem_cache *req_cachep;
127 static struct workqueue_struct *iou_wq __ro_after_init;
128 
129 static int __read_mostly sysctl_io_uring_disabled;
130 static int __read_mostly sysctl_io_uring_group = -1;
131 
132 #ifdef CONFIG_SYSCTL
133 static const struct ctl_table kernel_io_uring_disabled_table[] = {
134 	{
135 		.procname	= "io_uring_disabled",
136 		.data		= &sysctl_io_uring_disabled,
137 		.maxlen		= sizeof(sysctl_io_uring_disabled),
138 		.mode		= 0644,
139 		.proc_handler	= proc_dointvec_minmax,
140 		.extra1		= SYSCTL_ZERO,
141 		.extra2		= SYSCTL_TWO,
142 	},
143 	{
144 		.procname	= "io_uring_group",
145 		.data		= &sysctl_io_uring_group,
146 		.maxlen		= sizeof(gid_t),
147 		.mode		= 0644,
148 		.proc_handler	= proc_dointvec,
149 	},
150 };
151 #endif
152 
io_poison_cached_req(struct io_kiocb * req)153 static void io_poison_cached_req(struct io_kiocb *req)
154 {
155 	req->ctx = IO_URING_PTR_POISON;
156 	req->tctx = IO_URING_PTR_POISON;
157 	req->file = IO_URING_PTR_POISON;
158 	req->creds = IO_URING_PTR_POISON;
159 	req->io_task_work.func = IO_URING_PTR_POISON;
160 	req->apoll = IO_URING_PTR_POISON;
161 }
162 
io_poison_req(struct io_kiocb * req)163 static void io_poison_req(struct io_kiocb *req)
164 {
165 	io_poison_cached_req(req);
166 	req->async_data = IO_URING_PTR_POISON;
167 	req->kbuf = IO_URING_PTR_POISON;
168 	req->comp_list.next = IO_URING_PTR_POISON;
169 	req->file_node = IO_URING_PTR_POISON;
170 	req->link = IO_URING_PTR_POISON;
171 }
172 
req_fail_link_node(struct io_kiocb * req,int res)173 static inline void req_fail_link_node(struct io_kiocb *req, int res)
174 {
175 	req_set_fail(req);
176 	io_req_set_res(req, res, 0);
177 }
178 
io_req_add_to_cache(struct io_kiocb * req,struct io_ring_ctx * ctx)179 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
180 {
181 	if (IS_ENABLED(CONFIG_KASAN))
182 		io_poison_cached_req(req);
183 	wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
184 }
185 
io_ring_ctx_ref_free(struct percpu_ref * ref)186 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
187 {
188 	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
189 
190 	complete(&ctx->ref_comp);
191 }
192 
io_alloc_hash_table(struct io_hash_table * table,unsigned bits)193 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
194 {
195 	unsigned int hash_buckets;
196 	int i;
197 
198 	do {
199 		hash_buckets = 1U << bits;
200 		table->hbs = kvmalloc_objs(table->hbs[0], hash_buckets,
201 					   GFP_KERNEL_ACCOUNT);
202 		if (table->hbs)
203 			break;
204 		if (bits == 1)
205 			return -ENOMEM;
206 		bits--;
207 	} while (1);
208 
209 	table->hash_bits = bits;
210 	for (i = 0; i < hash_buckets; i++)
211 		INIT_HLIST_HEAD(&table->hbs[i].list);
212 	return 0;
213 }
214 
io_free_alloc_caches(struct io_ring_ctx * ctx)215 static void io_free_alloc_caches(struct io_ring_ctx *ctx)
216 {
217 	io_alloc_cache_free(&ctx->apoll_cache, kfree);
218 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
219 	io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
220 	io_alloc_cache_free(&ctx->cmd_cache, io_cmd_cache_free);
221 	io_futex_cache_free(ctx);
222 	io_rsrc_cache_free(ctx);
223 }
224 
io_ring_ctx_alloc(struct io_uring_params * p)225 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
226 {
227 	struct io_ring_ctx *ctx;
228 	int hash_bits;
229 	bool ret;
230 
231 	ctx = kzalloc_obj(*ctx);
232 	if (!ctx)
233 		return NULL;
234 
235 	xa_init(&ctx->io_bl_xa);
236 
237 	/*
238 	 * Use 5 bits less than the max cq entries, that should give us around
239 	 * 32 entries per hash list if totally full and uniformly spread, but
240 	 * don't keep too many buckets to not overconsume memory.
241 	 */
242 	hash_bits = ilog2(p->cq_entries) - 5;
243 	hash_bits = clamp(hash_bits, 1, 8);
244 	if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
245 		goto err;
246 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
247 			    0, GFP_KERNEL))
248 		goto err;
249 
250 	ctx->flags = p->flags;
251 	ctx->hybrid_poll_time = LLONG_MAX;
252 	atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
253 	init_waitqueue_head(&ctx->sqo_sq_wait);
254 	INIT_LIST_HEAD(&ctx->sqd_list);
255 	INIT_LIST_HEAD(&ctx->cq_overflow_list);
256 	ret = io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
257 			    sizeof(struct async_poll), 0);
258 	ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
259 			    sizeof(struct io_async_msghdr),
260 			    offsetof(struct io_async_msghdr, clear));
261 	ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
262 			    sizeof(struct io_async_rw),
263 			    offsetof(struct io_async_rw, clear));
264 	ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX,
265 			    sizeof(struct io_async_cmd),
266 			    sizeof(struct io_async_cmd));
267 	ret |= io_futex_cache_init(ctx);
268 	ret |= io_rsrc_cache_init(ctx);
269 	if (ret)
270 		goto free_ref;
271 	init_completion(&ctx->ref_comp);
272 	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
273 	mutex_init(&ctx->uring_lock);
274 	init_waitqueue_head(&ctx->cq_wait);
275 	init_waitqueue_head(&ctx->poll_wq);
276 	spin_lock_init(&ctx->completion_lock);
277 	raw_spin_lock_init(&ctx->timeout_lock);
278 	INIT_LIST_HEAD(&ctx->iopoll_list);
279 	INIT_LIST_HEAD(&ctx->defer_list);
280 	INIT_LIST_HEAD(&ctx->timeout_list);
281 	INIT_LIST_HEAD(&ctx->ltimeout_list);
282 	init_llist_head(&ctx->work_llist);
283 	INIT_LIST_HEAD(&ctx->tctx_list);
284 	mutex_init(&ctx->tctx_lock);
285 	ctx->submit_state.free_list.next = NULL;
286 	INIT_HLIST_HEAD(&ctx->waitid_list);
287 	xa_init_flags(&ctx->zcrx_ctxs, XA_FLAGS_ALLOC);
288 #ifdef CONFIG_FUTEX
289 	INIT_HLIST_HEAD(&ctx->futex_list);
290 #endif
291 	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
292 	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
293 	INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd);
294 	io_napi_init(ctx);
295 	mutex_init(&ctx->mmap_lock);
296 
297 	return ctx;
298 
299 free_ref:
300 	percpu_ref_exit(&ctx->refs);
301 err:
302 	io_free_alloc_caches(ctx);
303 	kvfree(ctx->cancel_table.hbs);
304 	xa_destroy(&ctx->io_bl_xa);
305 	kfree(ctx);
306 	return NULL;
307 }
308 
io_clean_op(struct io_kiocb * req)309 static void io_clean_op(struct io_kiocb *req)
310 {
311 	if (unlikely(req->flags & REQ_F_BUFFER_SELECTED))
312 		io_kbuf_drop_legacy(req);
313 
314 	if (req->flags & REQ_F_NEED_CLEANUP) {
315 		const struct io_cold_def *def = &io_cold_defs[req->opcode];
316 
317 		if (def->cleanup)
318 			def->cleanup(req);
319 	}
320 	if (req->flags & REQ_F_INFLIGHT)
321 		atomic_dec(&req->tctx->inflight_tracked);
322 	if (req->flags & REQ_F_CREDS)
323 		put_cred(req->creds);
324 	if (req->flags & REQ_F_ASYNC_DATA) {
325 		kfree(req->async_data);
326 		req->async_data = NULL;
327 	}
328 	req->flags &= ~IO_REQ_CLEAN_FLAGS;
329 }
330 
331 /*
332  * Mark the request as inflight, so that file cancelation will find it.
333  * Can be used if the file is an io_uring instance, or if the request itself
334  * relies on ->mm being alive for the duration of the request.
335  */
io_req_track_inflight(struct io_kiocb * req)336 inline void io_req_track_inflight(struct io_kiocb *req)
337 {
338 	if (!(req->flags & REQ_F_INFLIGHT)) {
339 		req->flags |= REQ_F_INFLIGHT;
340 		atomic_inc(&req->tctx->inflight_tracked);
341 	}
342 }
343 
__io_prep_linked_timeout(struct io_kiocb * req)344 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
345 {
346 	if (WARN_ON_ONCE(!req->link))
347 		return NULL;
348 
349 	req->flags &= ~REQ_F_ARM_LTIMEOUT;
350 	req->flags |= REQ_F_LINK_TIMEOUT;
351 
352 	/* linked timeouts should have two refs once prep'ed */
353 	io_req_set_refcount(req);
354 	__io_req_set_refcount(req->link, 2);
355 	return req->link;
356 }
357 
io_prep_async_work(struct io_kiocb * req)358 static void io_prep_async_work(struct io_kiocb *req)
359 {
360 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
361 
362 	if (!(req->flags & REQ_F_CREDS)) {
363 		req->flags |= REQ_F_CREDS;
364 		req->creds = get_current_cred();
365 	}
366 
367 	req->work.list.next = NULL;
368 	atomic_set(&req->work.flags, 0);
369 	if (req->flags & REQ_F_FORCE_ASYNC)
370 		atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags);
371 
372 	if (req->file && !(req->flags & REQ_F_FIXED_FILE))
373 		req->flags |= io_file_get_flags(req->file);
374 
375 	if (req->file && (req->flags & REQ_F_ISREG)) {
376 		bool should_hash = def->hash_reg_file;
377 
378 		/* don't serialize this request if the fs doesn't need it */
379 		if (should_hash && (req->file->f_flags & O_DIRECT) &&
380 		    (req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE))
381 			should_hash = false;
382 		if (should_hash || (req->flags & REQ_F_IOPOLL))
383 			io_wq_hash_work(&req->work, file_inode(req->file));
384 	} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
385 		if (def->unbound_nonreg_file)
386 			atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags);
387 	}
388 }
389 
io_prep_async_link(struct io_kiocb * req)390 static void io_prep_async_link(struct io_kiocb *req)
391 {
392 	struct io_kiocb *cur;
393 
394 	if (req->flags & REQ_F_LINK_TIMEOUT) {
395 		struct io_ring_ctx *ctx = req->ctx;
396 
397 		raw_spin_lock_irq(&ctx->timeout_lock);
398 		io_for_each_link(cur, req)
399 			io_prep_async_work(cur);
400 		raw_spin_unlock_irq(&ctx->timeout_lock);
401 	} else {
402 		io_for_each_link(cur, req)
403 			io_prep_async_work(cur);
404 	}
405 }
406 
io_queue_iowq(struct io_kiocb * req)407 static void io_queue_iowq(struct io_kiocb *req)
408 {
409 	struct io_uring_task *tctx = req->tctx;
410 
411 	BUG_ON(!tctx);
412 
413 	if ((current->flags & PF_KTHREAD) || !tctx->io_wq) {
414 		io_req_task_queue_fail(req, -ECANCELED);
415 		return;
416 	}
417 
418 	/* init ->work of the whole link before punting */
419 	io_prep_async_link(req);
420 
421 	/*
422 	 * Not expected to happen, but if we do have a bug where this _can_
423 	 * happen, catch it here and ensure the request is marked as
424 	 * canceled. That will make io-wq go through the usual work cancel
425 	 * procedure rather than attempt to run this request (or create a new
426 	 * worker for it).
427 	 */
428 	if (WARN_ON_ONCE(!same_thread_group(tctx->task, current)))
429 		atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
430 
431 	trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
432 	io_wq_enqueue(tctx->io_wq, &req->work);
433 }
434 
io_req_queue_iowq_tw(struct io_tw_req tw_req,io_tw_token_t tw)435 static void io_req_queue_iowq_tw(struct io_tw_req tw_req, io_tw_token_t tw)
436 {
437 	io_queue_iowq(tw_req.req);
438 }
439 
io_req_queue_iowq(struct io_kiocb * req)440 void io_req_queue_iowq(struct io_kiocb *req)
441 {
442 	req->io_task_work.func = io_req_queue_iowq_tw;
443 	io_req_task_work_add(req);
444 }
445 
io_linked_nr(struct io_kiocb * req)446 unsigned io_linked_nr(struct io_kiocb *req)
447 {
448 	struct io_kiocb *tmp;
449 	unsigned nr = 0;
450 
451 	io_for_each_link(tmp, req)
452 		nr++;
453 	return nr;
454 }
455 
io_queue_deferred(struct io_ring_ctx * ctx)456 static __cold noinline void io_queue_deferred(struct io_ring_ctx *ctx)
457 {
458 	bool drain_seen = false, first = true;
459 
460 	lockdep_assert_held(&ctx->uring_lock);
461 	__io_req_caches_free(ctx);
462 
463 	while (!list_empty(&ctx->defer_list)) {
464 		struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
465 						struct io_defer_entry, list);
466 
467 		drain_seen |= de->req->flags & REQ_F_IO_DRAIN;
468 		if ((drain_seen || first) && ctx->nr_req_allocated != ctx->nr_drained)
469 			return;
470 
471 		list_del_init(&de->list);
472 		ctx->nr_drained -= io_linked_nr(de->req);
473 		io_req_task_queue(de->req);
474 		kfree(de);
475 		first = false;
476 	}
477 }
478 
__io_commit_cqring_flush(struct io_ring_ctx * ctx)479 void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
480 {
481 	if (ctx->int_flags & IO_RING_F_POLL_ACTIVATED)
482 		io_poll_wq_wake(ctx);
483 	if (ctx->int_flags & IO_RING_F_OFF_TIMEOUT_USED)
484 		io_flush_timeouts(ctx);
485 	if (ctx->int_flags & IO_RING_F_HAS_EVFD)
486 		io_eventfd_signal(ctx, true);
487 }
488 
__io_cq_lock(struct io_ring_ctx * ctx)489 static inline void __io_cq_lock(struct io_ring_ctx *ctx)
490 {
491 	if (!(ctx->int_flags & IO_RING_F_LOCKLESS_CQ))
492 		spin_lock(&ctx->completion_lock);
493 }
494 
io_cq_lock(struct io_ring_ctx * ctx)495 static inline void io_cq_lock(struct io_ring_ctx *ctx)
496 	__acquires(ctx->completion_lock)
497 {
498 	spin_lock(&ctx->completion_lock);
499 }
500 
__io_cq_unlock_post(struct io_ring_ctx * ctx)501 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
502 {
503 	io_commit_cqring(ctx);
504 	if (!(ctx->int_flags & IO_RING_F_TASK_COMPLETE)) {
505 		if (!(ctx->int_flags & IO_RING_F_LOCKLESS_CQ))
506 			spin_unlock(&ctx->completion_lock);
507 		/* IOPOLL rings only need to wake up if it's also SQPOLL */
508 		if (!(ctx->int_flags & IO_RING_F_SYSCALL_IOPOLL))
509 			io_cqring_wake(ctx);
510 	}
511 	io_commit_cqring_flush(ctx);
512 }
513 
io_cq_unlock_post(struct io_ring_ctx * ctx)514 static void io_cq_unlock_post(struct io_ring_ctx *ctx)
515 	__releases(ctx->completion_lock)
516 {
517 	io_commit_cqring(ctx);
518 	spin_unlock(&ctx->completion_lock);
519 	io_cqring_wake(ctx);
520 	io_commit_cqring_flush(ctx);
521 }
522 
__io_cqring_overflow_flush(struct io_ring_ctx * ctx,bool dying)523 static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
524 {
525 	lockdep_assert_held(&ctx->uring_lock);
526 
527 	/* don't abort if we're dying, entries must get freed */
528 	if (!dying && __io_cqring_events(ctx) == ctx->cq_entries)
529 		return;
530 
531 	io_cq_lock(ctx);
532 	while (!list_empty(&ctx->cq_overflow_list)) {
533 		size_t cqe_size = sizeof(struct io_uring_cqe);
534 		struct io_uring_cqe *cqe;
535 		struct io_overflow_cqe *ocqe;
536 		bool is_cqe32 = false;
537 
538 		ocqe = list_first_entry(&ctx->cq_overflow_list,
539 					struct io_overflow_cqe, list);
540 		if (ocqe->cqe.flags & IORING_CQE_F_32 ||
541 		    ctx->flags & IORING_SETUP_CQE32) {
542 			is_cqe32 = true;
543 			cqe_size <<= 1;
544 		}
545 		if (ctx->flags & IORING_SETUP_CQE32)
546 			is_cqe32 = false;
547 
548 		if (!dying) {
549 			if (!io_get_cqe_overflow(ctx, &cqe, true, is_cqe32))
550 				break;
551 			memcpy(cqe, &ocqe->cqe, cqe_size);
552 		}
553 		list_del(&ocqe->list);
554 		kfree(ocqe);
555 
556 		/*
557 		 * For silly syzbot cases that deliberately overflow by huge
558 		 * amounts, check if we need to resched and drop and
559 		 * reacquire the locks if so. Nothing real would ever hit this.
560 		 * Ideally we'd have a non-posting unlock for this, but hard
561 		 * to care for a non-real case.
562 		 */
563 		if (need_resched()) {
564 			ctx->cqe_sentinel = ctx->cqe_cached;
565 			io_cq_unlock_post(ctx);
566 			mutex_unlock(&ctx->uring_lock);
567 			cond_resched();
568 			mutex_lock(&ctx->uring_lock);
569 			io_cq_lock(ctx);
570 		}
571 	}
572 
573 	if (list_empty(&ctx->cq_overflow_list)) {
574 		clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
575 		atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
576 	}
577 	io_cq_unlock_post(ctx);
578 }
579 
io_cqring_overflow_kill(struct io_ring_ctx * ctx)580 static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
581 {
582 	if (ctx->rings)
583 		__io_cqring_overflow_flush(ctx, true);
584 }
585 
io_cqring_do_overflow_flush(struct io_ring_ctx * ctx)586 void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
587 {
588 	mutex_lock(&ctx->uring_lock);
589 	__io_cqring_overflow_flush(ctx, false);
590 	mutex_unlock(&ctx->uring_lock);
591 }
592 
io_cqring_overflow_flush_locked(struct io_ring_ctx * ctx)593 void io_cqring_overflow_flush_locked(struct io_ring_ctx *ctx)
594 {
595 	__io_cqring_overflow_flush(ctx, false);
596 }
597 
598 /* must to be called somewhat shortly after putting a request */
io_put_task(struct io_kiocb * req)599 static inline void io_put_task(struct io_kiocb *req)
600 {
601 	struct io_uring_task *tctx = req->tctx;
602 
603 	if (likely(tctx->task == current)) {
604 		tctx->cached_refs++;
605 	} else {
606 		percpu_counter_sub(&tctx->inflight, 1);
607 		if (unlikely(atomic_read(&tctx->in_cancel)))
608 			wake_up(&tctx->wait);
609 		put_task_struct(tctx->task);
610 	}
611 }
612 
io_task_refs_refill(struct io_uring_task * tctx)613 void io_task_refs_refill(struct io_uring_task *tctx)
614 {
615 	unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
616 
617 	percpu_counter_add(&tctx->inflight, refill);
618 	refcount_add(refill, &current->usage);
619 	tctx->cached_refs += refill;
620 }
621 
io_uring_drop_tctx_refs(struct task_struct * task)622 __cold void io_uring_drop_tctx_refs(struct task_struct *task)
623 {
624 	struct io_uring_task *tctx = task->io_uring;
625 	unsigned int refs = tctx->cached_refs;
626 
627 	if (refs) {
628 		tctx->cached_refs = 0;
629 		percpu_counter_sub(&tctx->inflight, refs);
630 		put_task_struct_many(task, refs);
631 	}
632 }
633 
io_cqring_add_overflow(struct io_ring_ctx * ctx,struct io_overflow_cqe * ocqe)634 static __cold bool io_cqring_add_overflow(struct io_ring_ctx *ctx,
635 					  struct io_overflow_cqe *ocqe)
636 {
637 	lockdep_assert_held(&ctx->completion_lock);
638 
639 	if (!ocqe) {
640 		struct io_rings *r = ctx->rings;
641 
642 		/*
643 		 * If we're in ring overflow flush mode, or in task cancel mode,
644 		 * or cannot allocate an overflow entry, then we need to drop it
645 		 * on the floor.
646 		 */
647 		WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
648 		set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
649 		return false;
650 	}
651 	if (list_empty(&ctx->cq_overflow_list)) {
652 		set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
653 		atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
654 
655 	}
656 	list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
657 	return true;
658 }
659 
io_alloc_ocqe(struct io_ring_ctx * ctx,struct io_cqe * cqe,struct io_big_cqe * big_cqe,gfp_t gfp)660 static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx,
661 					     struct io_cqe *cqe,
662 					     struct io_big_cqe *big_cqe, gfp_t gfp)
663 {
664 	struct io_overflow_cqe *ocqe;
665 	size_t ocq_size = sizeof(struct io_overflow_cqe);
666 	bool is_cqe32 = false;
667 
668 	if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32) {
669 		is_cqe32 = true;
670 		ocq_size += sizeof(struct io_uring_cqe);
671 	}
672 
673 	ocqe = kzalloc(ocq_size, gfp | __GFP_ACCOUNT);
674 	trace_io_uring_cqe_overflow(ctx, cqe->user_data, cqe->res, cqe->flags, ocqe);
675 	if (ocqe) {
676 		ocqe->cqe.user_data = cqe->user_data;
677 		ocqe->cqe.res = cqe->res;
678 		ocqe->cqe.flags = cqe->flags;
679 		if (is_cqe32 && big_cqe) {
680 			ocqe->cqe.big_cqe[0] = big_cqe->extra1;
681 			ocqe->cqe.big_cqe[1] = big_cqe->extra2;
682 		}
683 	}
684 	if (big_cqe)
685 		big_cqe->extra1 = big_cqe->extra2 = 0;
686 	return ocqe;
687 }
688 
689 /*
690  * Compute queued CQEs for free-space calculation, clamped to cq_entries.
691  */
io_cqring_queued(struct io_ring_ctx * ctx)692 static unsigned int io_cqring_queued(struct io_ring_ctx *ctx)
693 {
694 	struct io_rings *rings = io_get_rings(ctx);
695 	int diff;
696 
697 	diff = (int)(ctx->cached_cq_tail - READ_ONCE(rings->cq.head));
698 	if (diff >= 0)
699 		return min((unsigned int)diff, ctx->cq_entries);
700 	return 0;
701 }
702 
703 /*
704  * Fill an empty dummy CQE, in case alignment is off for posting a 32b CQE
705  * because the ring is a single 16b entry away from wrapping.
706  */
io_fill_nop_cqe(struct io_ring_ctx * ctx,unsigned int off)707 static bool io_fill_nop_cqe(struct io_ring_ctx *ctx, unsigned int off)
708 {
709 	if (io_cqring_queued(ctx) < ctx->cq_entries) {
710 		struct io_uring_cqe *cqe = &ctx->rings->cqes[off];
711 
712 		cqe->user_data = 0;
713 		cqe->res = 0;
714 		cqe->flags = IORING_CQE_F_SKIP;
715 		ctx->cached_cq_tail++;
716 		return true;
717 	}
718 	return false;
719 }
720 
721 /*
722  * writes to the cq entry need to come after reading head; the
723  * control dependency is enough as we're using WRITE_ONCE to
724  * fill the cq entry
725  */
io_cqe_cache_refill(struct io_ring_ctx * ctx,bool overflow,bool cqe32)726 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow, bool cqe32)
727 {
728 	struct io_rings *rings = ctx->rings;
729 	unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
730 	unsigned int free, len;
731 
732 	/*
733 	 * Posting into the CQ when there are pending overflowed CQEs may break
734 	 * ordering guarantees, which will affect links, F_MORE users and more.
735 	 * Force overflow the completion.
736 	 */
737 	if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
738 		return false;
739 
740 	/*
741 	 * Post dummy CQE if a 32b CQE is needed and there's only room for a
742 	 * 16b CQE before the ring wraps.
743 	 */
744 	if (cqe32 && off + 1 == ctx->cq_entries) {
745 		if (!io_fill_nop_cqe(ctx, off))
746 			return false;
747 		off = 0;
748 	}
749 
750 	free = ctx->cq_entries - io_cqring_queued(ctx);
751 	/* we need a contiguous range, limit based on the current array offset */
752 	len = min(free, ctx->cq_entries - off);
753 	if (len < (cqe32 + 1))
754 		return false;
755 
756 	if (ctx->flags & IORING_SETUP_CQE32) {
757 		off <<= 1;
758 		len <<= 1;
759 	}
760 
761 	ctx->cqe_cached = &rings->cqes[off];
762 	ctx->cqe_sentinel = ctx->cqe_cached + len;
763 	return true;
764 }
765 
io_fill_cqe_aux32(struct io_ring_ctx * ctx,struct io_uring_cqe src_cqe[2])766 static bool io_fill_cqe_aux32(struct io_ring_ctx *ctx,
767 			      struct io_uring_cqe src_cqe[2])
768 {
769 	struct io_uring_cqe *cqe;
770 
771 	if (WARN_ON_ONCE(!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED))))
772 		return false;
773 	if (unlikely(!io_get_cqe(ctx, &cqe, true)))
774 		return false;
775 
776 	memcpy(cqe, src_cqe, 2 * sizeof(*cqe));
777 	trace_io_uring_complete(ctx, NULL, cqe);
778 	return true;
779 }
780 
io_fill_cqe_aux(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags)781 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
782 			      u32 cflags)
783 {
784 	bool cqe32 = cflags & IORING_CQE_F_32;
785 	struct io_uring_cqe *cqe;
786 
787 	if (likely(io_get_cqe(ctx, &cqe, cqe32))) {
788 		WRITE_ONCE(cqe->user_data, user_data);
789 		WRITE_ONCE(cqe->res, res);
790 		WRITE_ONCE(cqe->flags, cflags);
791 
792 		if (cqe32) {
793 			WRITE_ONCE(cqe->big_cqe[0], 0);
794 			WRITE_ONCE(cqe->big_cqe[1], 0);
795 		}
796 
797 		trace_io_uring_complete(ctx, NULL, cqe);
798 		return true;
799 	}
800 	return false;
801 }
802 
io_init_cqe(u64 user_data,s32 res,u32 cflags)803 static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags)
804 {
805 	return (struct io_cqe) { .user_data = user_data, .res = res, .flags = cflags };
806 }
807 
io_cqe_overflow(struct io_ring_ctx * ctx,struct io_cqe * cqe,struct io_big_cqe * big_cqe)808 static __cold void io_cqe_overflow(struct io_ring_ctx *ctx, struct io_cqe *cqe,
809 				   struct io_big_cqe *big_cqe)
810 {
811 	struct io_overflow_cqe *ocqe;
812 
813 	ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_KERNEL);
814 	spin_lock(&ctx->completion_lock);
815 	io_cqring_add_overflow(ctx, ocqe);
816 	spin_unlock(&ctx->completion_lock);
817 }
818 
io_cqe_overflow_locked(struct io_ring_ctx * ctx,struct io_cqe * cqe,struct io_big_cqe * big_cqe)819 static __cold bool io_cqe_overflow_locked(struct io_ring_ctx *ctx,
820 					  struct io_cqe *cqe,
821 					  struct io_big_cqe *big_cqe)
822 {
823 	struct io_overflow_cqe *ocqe;
824 
825 	ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_NOWAIT);
826 	return io_cqring_add_overflow(ctx, ocqe);
827 }
828 
io_post_aux_cqe(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags)829 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
830 {
831 	bool filled;
832 
833 	io_cq_lock(ctx);
834 	filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
835 	if (unlikely(!filled)) {
836 		struct io_cqe cqe = io_init_cqe(user_data, res, cflags);
837 
838 		filled = io_cqe_overflow_locked(ctx, &cqe, NULL);
839 	}
840 	io_cq_unlock_post(ctx);
841 	return filled;
842 }
843 
844 /*
845  * Must be called from inline task_work so we know a flush will happen later,
846  * and obviously with ctx->uring_lock held (tw always has that).
847  */
io_add_aux_cqe(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags)848 void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
849 {
850 	lockdep_assert_held(&ctx->uring_lock);
851 	lockdep_assert(ctx->int_flags & IO_RING_F_LOCKLESS_CQ);
852 
853 	if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) {
854 		struct io_cqe cqe = io_init_cqe(user_data, res, cflags);
855 
856 		io_cqe_overflow(ctx, &cqe, NULL);
857 	}
858 	ctx->submit_state.cq_flush = true;
859 }
860 
861 /*
862  * A helper for multishot requests posting additional CQEs.
863  * Should only be used from a task_work including IO_URING_F_MULTISHOT.
864  */
io_req_post_cqe(struct io_kiocb * req,s32 res,u32 cflags)865 bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
866 {
867 	struct io_ring_ctx *ctx = req->ctx;
868 	bool posted;
869 
870 	/*
871 	 * If multishot has already posted deferred completions, ensure that
872 	 * those are flushed first before posting this one. If not, CQEs
873 	 * could get reordered.
874 	 */
875 	if (!wq_list_empty(&ctx->submit_state.compl_reqs))
876 		__io_submit_flush_completions(ctx);
877 
878 	lockdep_assert(!io_wq_current_is_worker());
879 	lockdep_assert_held(&ctx->uring_lock);
880 
881 	if (!(ctx->int_flags & IO_RING_F_LOCKLESS_CQ)) {
882 		spin_lock(&ctx->completion_lock);
883 		posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
884 		spin_unlock(&ctx->completion_lock);
885 	} else {
886 		posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
887 	}
888 
889 	ctx->submit_state.cq_flush = true;
890 	return posted;
891 }
892 
893 /*
894  * A helper for multishot requests posting additional CQEs.
895  * Should only be used from a task_work including IO_URING_F_MULTISHOT.
896  */
io_req_post_cqe32(struct io_kiocb * req,struct io_uring_cqe cqe[2])897 bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe cqe[2])
898 {
899 	struct io_ring_ctx *ctx = req->ctx;
900 	bool posted;
901 
902 	lockdep_assert(!io_wq_current_is_worker());
903 	lockdep_assert_held(&ctx->uring_lock);
904 
905 	cqe[0].user_data = req->cqe.user_data;
906 	if (!(ctx->int_flags & IO_RING_F_LOCKLESS_CQ)) {
907 		spin_lock(&ctx->completion_lock);
908 		posted = io_fill_cqe_aux32(ctx, cqe);
909 		spin_unlock(&ctx->completion_lock);
910 	} else {
911 		posted = io_fill_cqe_aux32(ctx, cqe);
912 	}
913 
914 	ctx->submit_state.cq_flush = true;
915 	return posted;
916 }
917 
io_req_complete_post(struct io_kiocb * req,unsigned issue_flags)918 static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
919 {
920 	struct io_ring_ctx *ctx = req->ctx;
921 	bool completed = true;
922 
923 	/*
924 	 * All execution paths but io-wq use the deferred completions by
925 	 * passing IO_URING_F_COMPLETE_DEFER and thus should not end up here.
926 	 */
927 	if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ)))
928 		return;
929 
930 	/*
931 	 * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
932 	 * the submitter task context, IOPOLL protects with uring_lock.
933 	 */
934 	if ((ctx->int_flags & IO_RING_F_LOCKLESS_CQ) || (req->flags & REQ_F_REISSUE)) {
935 defer_complete:
936 		req->io_task_work.func = io_req_task_complete;
937 		io_req_task_work_add(req);
938 		return;
939 	}
940 
941 	io_cq_lock(ctx);
942 	if (!(req->flags & REQ_F_CQE_SKIP))
943 		completed = io_fill_cqe_req(ctx, req);
944 	io_cq_unlock_post(ctx);
945 
946 	if (!completed)
947 		goto defer_complete;
948 
949 	/*
950 	 * We don't free the request here because we know it's called from
951 	 * io-wq only, which holds a reference, so it cannot be the last put.
952 	 */
953 	req_ref_put(req);
954 }
955 
io_req_defer_failed(struct io_kiocb * req,s32 res)956 void io_req_defer_failed(struct io_kiocb *req, s32 res)
957 	__must_hold(&ctx->uring_lock)
958 {
959 	const struct io_cold_def *def = &io_cold_defs[req->opcode];
960 
961 	lockdep_assert_held(&req->ctx->uring_lock);
962 
963 	req_set_fail(req);
964 	io_req_set_res(req, res, io_put_kbuf(req, res, NULL));
965 	if (def->fail)
966 		def->fail(req);
967 	io_req_complete_defer(req);
968 }
969 
970 /*
971  * A request might get retired back into the request caches even before opcode
972  * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
973  * Because of that, io_alloc_req() should be called only under ->uring_lock
974  * and with extra caution to not get a request that is still worked on.
975  */
__io_alloc_req_refill(struct io_ring_ctx * ctx)976 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
977 	__must_hold(&ctx->uring_lock)
978 {
979 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO;
980 	void *reqs[IO_REQ_ALLOC_BATCH];
981 	int ret;
982 
983 	ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
984 
985 	/*
986 	 * Bulk alloc is all-or-nothing. If we fail to get a batch,
987 	 * retry single alloc to be on the safe side.
988 	 */
989 	if (unlikely(ret <= 0)) {
990 		reqs[0] = kmem_cache_alloc(req_cachep, gfp);
991 		if (!reqs[0])
992 			return false;
993 		ret = 1;
994 	}
995 
996 	percpu_ref_get_many(&ctx->refs, ret);
997 	ctx->nr_req_allocated += ret;
998 
999 	while (ret--) {
1000 		struct io_kiocb *req = reqs[ret];
1001 
1002 		io_req_add_to_cache(req, ctx);
1003 	}
1004 	return true;
1005 }
1006 
io_free_req(struct io_kiocb * req)1007 __cold void io_free_req(struct io_kiocb *req)
1008 {
1009 	/* refs were already put, restore them for io_req_task_complete() */
1010 	req->flags &= ~REQ_F_REFCOUNT;
1011 	/* we only want to free it, don't post CQEs */
1012 	req->flags |= REQ_F_CQE_SKIP;
1013 	req->io_task_work.func = io_req_task_complete;
1014 	io_req_task_work_add(req);
1015 }
1016 
__io_req_find_next_prep(struct io_kiocb * req)1017 static void __io_req_find_next_prep(struct io_kiocb *req)
1018 {
1019 	struct io_ring_ctx *ctx = req->ctx;
1020 
1021 	spin_lock(&ctx->completion_lock);
1022 	io_disarm_next(req);
1023 	spin_unlock(&ctx->completion_lock);
1024 }
1025 
io_req_find_next(struct io_kiocb * req)1026 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1027 {
1028 	struct io_kiocb *nxt;
1029 
1030 	/*
1031 	 * If LINK is set, we have dependent requests in this chain. If we
1032 	 * didn't fail this request, queue the first one up, moving any other
1033 	 * dependencies to the next request. In case of failure, fail the rest
1034 	 * of the chain.
1035 	 */
1036 	if (unlikely(req->flags & IO_DISARM_MASK))
1037 		__io_req_find_next_prep(req);
1038 	nxt = req->link;
1039 	req->link = NULL;
1040 	return nxt;
1041 }
1042 
io_req_task_cancel(struct io_tw_req tw_req,io_tw_token_t tw)1043 static void io_req_task_cancel(struct io_tw_req tw_req, io_tw_token_t tw)
1044 {
1045 	struct io_kiocb *req = tw_req.req;
1046 
1047 	io_tw_lock(req->ctx, tw);
1048 	io_req_defer_failed(req, req->cqe.res);
1049 }
1050 
io_req_task_submit(struct io_tw_req tw_req,io_tw_token_t tw)1051 void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw)
1052 {
1053 	struct io_kiocb *req = tw_req.req;
1054 	struct io_ring_ctx *ctx = req->ctx;
1055 
1056 	io_tw_lock(ctx, tw);
1057 	if (unlikely(tw.cancel))
1058 		io_req_defer_failed(req, -EFAULT);
1059 	else if (req->flags & REQ_F_FORCE_ASYNC)
1060 		io_queue_iowq(req);
1061 	else
1062 		io_queue_sqe(req, 0);
1063 }
1064 
io_req_task_queue_fail(struct io_kiocb * req,int ret)1065 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1066 {
1067 	io_req_set_res(req, ret, 0);
1068 	req->io_task_work.func = io_req_task_cancel;
1069 	io_req_task_work_add(req);
1070 }
1071 
io_req_task_queue(struct io_kiocb * req)1072 void io_req_task_queue(struct io_kiocb *req)
1073 {
1074 	req->io_task_work.func = io_req_task_submit;
1075 	io_req_task_work_add(req);
1076 }
1077 
io_queue_next(struct io_kiocb * req)1078 void io_queue_next(struct io_kiocb *req)
1079 {
1080 	struct io_kiocb *nxt = io_req_find_next(req);
1081 
1082 	if (nxt)
1083 		io_req_task_queue(nxt);
1084 }
1085 
io_req_put_rsrc_nodes(struct io_kiocb * req)1086 static inline void io_req_put_rsrc_nodes(struct io_kiocb *req)
1087 {
1088 	struct io_ring_ctx *ctx = req->ctx;
1089 
1090 	if (req->file_node) {
1091 		io_put_rsrc_node(ctx, req->file_node);
1092 		req->file_node = NULL;
1093 	}
1094 	if (req->flags & REQ_F_BUF_NODE)
1095 		io_put_rsrc_node(ctx, req->buf_node);
1096 }
1097 
io_free_batch_list(struct io_ring_ctx * ctx,struct io_wq_work_node * node)1098 static void io_free_batch_list(struct io_ring_ctx *ctx,
1099 			       struct io_wq_work_node *node)
1100 	__must_hold(&ctx->uring_lock)
1101 {
1102 	do {
1103 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1104 						    comp_list);
1105 
1106 		if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1107 			if (req->flags & REQ_F_REISSUE) {
1108 				node = req->comp_list.next;
1109 				req->flags &= ~REQ_F_REISSUE;
1110 				io_queue_iowq(req);
1111 				continue;
1112 			}
1113 			if (req->flags & REQ_F_REFCOUNT) {
1114 				node = req->comp_list.next;
1115 				if (!req_ref_put_and_test(req))
1116 					continue;
1117 			}
1118 			if ((req->flags & REQ_F_POLLED) && req->apoll) {
1119 				struct async_poll *apoll = req->apoll;
1120 
1121 				if (apoll->double_poll)
1122 					kfree(apoll->double_poll);
1123 				io_cache_free(&ctx->apoll_cache, apoll);
1124 				req->flags &= ~REQ_F_POLLED;
1125 			}
1126 			if (req->flags & IO_REQ_LINK_FLAGS)
1127 				io_queue_next(req);
1128 			if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1129 				io_clean_op(req);
1130 		}
1131 		io_put_file(req);
1132 		io_req_put_rsrc_nodes(req);
1133 		io_put_task(req);
1134 
1135 		node = req->comp_list.next;
1136 		io_req_add_to_cache(req, ctx);
1137 	} while (node);
1138 }
1139 
__io_submit_flush_completions(struct io_ring_ctx * ctx)1140 void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1141 	__must_hold(&ctx->uring_lock)
1142 {
1143 	struct io_submit_state *state = &ctx->submit_state;
1144 	struct io_wq_work_node *node;
1145 
1146 	__io_cq_lock(ctx);
1147 	__wq_list_for_each(node, &state->compl_reqs) {
1148 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1149 					    comp_list);
1150 
1151 		/*
1152 		 * Requests marked with REQUEUE should not post a CQE, they
1153 		 * will go through the io-wq retry machinery and post one
1154 		 * later.
1155 		 */
1156 		if (!(req->flags & (REQ_F_CQE_SKIP | REQ_F_REISSUE)) &&
1157 		    unlikely(!io_fill_cqe_req(ctx, req))) {
1158 			if (ctx->int_flags & IO_RING_F_LOCKLESS_CQ)
1159 				io_cqe_overflow(ctx, &req->cqe, &req->big_cqe);
1160 			else
1161 				io_cqe_overflow_locked(ctx, &req->cqe, &req->big_cqe);
1162 		}
1163 	}
1164 	__io_cq_unlock_post(ctx);
1165 
1166 	if (!wq_list_empty(&state->compl_reqs)) {
1167 		io_free_batch_list(ctx, state->compl_reqs.first);
1168 		INIT_WQ_LIST(&state->compl_reqs);
1169 	}
1170 
1171 	if (unlikely(ctx->int_flags & IO_RING_F_DRAIN_ACTIVE))
1172 		io_queue_deferred(ctx);
1173 
1174 	ctx->submit_state.cq_flush = false;
1175 }
1176 
1177 /*
1178  * We can't just wait for polled events to come to us, we have to actively
1179  * find and complete them.
1180  */
io_iopoll_try_reap_events(struct io_ring_ctx * ctx)1181 __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
1182 {
1183 	if (!(ctx->flags & IORING_SETUP_IOPOLL))
1184 		return;
1185 
1186 	mutex_lock(&ctx->uring_lock);
1187 	while (!list_empty(&ctx->iopoll_list)) {
1188 		/* let it sleep and repeat later if can't complete a request */
1189 		if (io_do_iopoll(ctx, true) == 0)
1190 			break;
1191 		/*
1192 		 * Ensure we allow local-to-the-cpu processing to take place,
1193 		 * in this case we need to ensure that we reap all events.
1194 		 * Also let task_work, etc. to progress by releasing the mutex
1195 		 */
1196 		if (need_resched()) {
1197 			mutex_unlock(&ctx->uring_lock);
1198 			cond_resched();
1199 			mutex_lock(&ctx->uring_lock);
1200 		}
1201 	}
1202 	mutex_unlock(&ctx->uring_lock);
1203 
1204 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
1205 		io_move_task_work_from_local(ctx);
1206 }
1207 
io_iopoll_check(struct io_ring_ctx * ctx,unsigned int min_events)1208 static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned int min_events)
1209 {
1210 	unsigned long check_cq;
1211 
1212 	min_events = min(min_events, ctx->cq_entries);
1213 
1214 	lockdep_assert_held(&ctx->uring_lock);
1215 
1216 	if (!io_allowed_run_tw(ctx))
1217 		return -EEXIST;
1218 
1219 	check_cq = READ_ONCE(ctx->check_cq);
1220 	if (unlikely(check_cq)) {
1221 		if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
1222 			__io_cqring_overflow_flush(ctx, false);
1223 		/*
1224 		 * Similarly do not spin if we have not informed the user of any
1225 		 * dropped CQE.
1226 		 */
1227 		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
1228 			return -EBADR;
1229 	}
1230 	/*
1231 	 * Don't enter poll loop if we already have events pending.
1232 	 * If we do, we can potentially be spinning for commands that
1233 	 * already triggered a CQE (eg in error).
1234 	 */
1235 	if (io_cqring_events(ctx))
1236 		return 0;
1237 
1238 	do {
1239 		int ret = 0;
1240 
1241 		/*
1242 		 * If a submit got punted to a workqueue, we can have the
1243 		 * application entering polling for a command before it gets
1244 		 * issued. That app will hold the uring_lock for the duration
1245 		 * of the poll right here, so we need to take a breather every
1246 		 * now and then to ensure that the issue has a chance to add
1247 		 * the poll to the issued list. Otherwise we can spin here
1248 		 * forever, while the workqueue is stuck trying to acquire the
1249 		 * very same mutex.
1250 		 */
1251 		if (list_empty(&ctx->iopoll_list) || io_task_work_pending(ctx)) {
1252 			(void) io_run_local_work_locked(ctx, min_events);
1253 
1254 			if (task_work_pending(current) || list_empty(&ctx->iopoll_list)) {
1255 				mutex_unlock(&ctx->uring_lock);
1256 				io_run_task_work();
1257 				mutex_lock(&ctx->uring_lock);
1258 			}
1259 			/* some requests don't go through iopoll_list */
1260 			if (list_empty(&ctx->iopoll_list))
1261 				break;
1262 		}
1263 		ret = io_do_iopoll(ctx, !min_events);
1264 		if (unlikely(ret < 0))
1265 			return ret;
1266 
1267 		if (task_sigpending(current))
1268 			return -EINTR;
1269 		if (need_resched())
1270 			break;
1271 	} while (io_cqring_events(ctx) < min_events);
1272 
1273 	return 0;
1274 }
1275 
io_req_task_complete(struct io_tw_req tw_req,io_tw_token_t tw)1276 void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw)
1277 {
1278 	io_req_complete_defer(tw_req.req);
1279 }
1280 
1281 /*
1282  * After the iocb has been issued, it's safe to be found on the poll list.
1283  * Adding the kiocb to the list AFTER submission ensures that we don't
1284  * find it from a io_do_iopoll() thread before the issuer is done
1285  * accessing the kiocb cookie.
1286  */
io_iopoll_req_issued(struct io_kiocb * req,unsigned int issue_flags)1287 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1288 {
1289 	struct io_ring_ctx *ctx = req->ctx;
1290 	const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
1291 
1292 	/* workqueue context doesn't hold uring_lock, grab it now */
1293 	if (unlikely(needs_lock))
1294 		mutex_lock(&ctx->uring_lock);
1295 
1296 	/*
1297 	 * Track whether we have multiple files in our lists. This will impact
1298 	 * how we do polling eventually, not spinning if we're on potentially
1299 	 * different devices.
1300 	 */
1301 	if (list_empty(&ctx->iopoll_list)) {
1302 		ctx->poll_multi_queue = false;
1303 	} else if (!ctx->poll_multi_queue) {
1304 		struct io_kiocb *list_req;
1305 
1306 		list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb, iopoll_node);
1307 		if (list_req->file != req->file)
1308 			ctx->poll_multi_queue = true;
1309 	}
1310 
1311 	list_add_tail(&req->iopoll_node, &ctx->iopoll_list);
1312 
1313 	if (unlikely(needs_lock)) {
1314 		/*
1315 		 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
1316 		 * in sq thread task context or in io worker task context. If
1317 		 * current task context is sq thread, we don't need to check
1318 		 * whether should wake up sq thread.
1319 		 */
1320 		if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1321 		    wq_has_sleeper(&ctx->sq_data->wait))
1322 			wake_up(&ctx->sq_data->wait);
1323 
1324 		mutex_unlock(&ctx->uring_lock);
1325 	}
1326 }
1327 
io_file_get_flags(struct file * file)1328 io_req_flags_t io_file_get_flags(struct file *file)
1329 {
1330 	io_req_flags_t res = 0;
1331 
1332 	BUILD_BUG_ON(REQ_F_ISREG_BIT != REQ_F_SUPPORT_NOWAIT_BIT + 1);
1333 
1334 	if (S_ISREG(file_inode(file)->i_mode))
1335 		res |= REQ_F_ISREG;
1336 	if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT))
1337 		res |= REQ_F_SUPPORT_NOWAIT;
1338 	return res;
1339 }
1340 
io_drain_req(struct io_kiocb * req)1341 static __cold void io_drain_req(struct io_kiocb *req)
1342 	__must_hold(&ctx->uring_lock)
1343 {
1344 	struct io_ring_ctx *ctx = req->ctx;
1345 	bool drain = req->flags & IOSQE_IO_DRAIN;
1346 	struct io_defer_entry *de;
1347 
1348 	de = kmalloc_obj(*de, GFP_KERNEL_ACCOUNT);
1349 	if (!de) {
1350 		io_req_defer_failed(req, -ENOMEM);
1351 		return;
1352 	}
1353 
1354 	io_prep_async_link(req);
1355 	trace_io_uring_defer(req);
1356 	de->req = req;
1357 
1358 	ctx->nr_drained += io_linked_nr(req);
1359 	list_add_tail(&de->list, &ctx->defer_list);
1360 	io_queue_deferred(ctx);
1361 	if (!drain && list_empty(&ctx->defer_list))
1362 		ctx->int_flags &= ~IO_RING_F_DRAIN_ACTIVE;
1363 }
1364 
io_assign_file(struct io_kiocb * req,const struct io_issue_def * def,unsigned int issue_flags)1365 static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
1366 			   unsigned int issue_flags)
1367 {
1368 	if (req->file || !def->needs_file)
1369 		return true;
1370 
1371 	if (req->flags & REQ_F_FIXED_FILE)
1372 		req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
1373 	else
1374 		req->file = io_file_get_normal(req, req->cqe.fd);
1375 
1376 	return !!req->file;
1377 }
1378 
1379 #define REQ_ISSUE_SLOW_FLAGS	(REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
1380 
__io_issue_sqe(struct io_kiocb * req,unsigned int issue_flags,const struct io_issue_def * def)1381 static inline int __io_issue_sqe(struct io_kiocb *req,
1382 				 unsigned int issue_flags,
1383 				 const struct io_issue_def *def)
1384 {
1385 	const struct cred *creds = NULL;
1386 	struct io_kiocb *link = NULL;
1387 	int ret;
1388 
1389 	if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
1390 		if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
1391 			creds = override_creds(req->creds);
1392 		if (req->flags & REQ_F_ARM_LTIMEOUT)
1393 			link = __io_prep_linked_timeout(req);
1394 	}
1395 
1396 	if (!def->audit_skip)
1397 		audit_uring_entry(req->opcode);
1398 
1399 	ret = def->issue(req, issue_flags);
1400 
1401 	if (!def->audit_skip)
1402 		audit_uring_exit(!ret, ret);
1403 
1404 	if (unlikely(creds || link)) {
1405 		if (creds)
1406 			revert_creds(creds);
1407 		if (link)
1408 			io_queue_linked_timeout(link);
1409 	}
1410 
1411 	return ret;
1412 }
1413 
io_issue_sqe(struct io_kiocb * req,unsigned int issue_flags)1414 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1415 {
1416 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1417 	int ret;
1418 
1419 	if (unlikely(!io_assign_file(req, def, issue_flags)))
1420 		return -EBADF;
1421 
1422 	ret = __io_issue_sqe(req, issue_flags, def);
1423 
1424 	if (ret == IOU_COMPLETE) {
1425 		if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1426 			io_req_complete_defer(req);
1427 		else
1428 			io_req_complete_post(req, issue_flags);
1429 
1430 		return 0;
1431 	}
1432 
1433 	if (ret == IOU_ISSUE_SKIP_COMPLETE) {
1434 		ret = 0;
1435 
1436 		if (req->flags & REQ_F_IOPOLL)
1437 			io_iopoll_req_issued(req, issue_flags);
1438 	}
1439 	return ret;
1440 }
1441 
io_poll_issue(struct io_kiocb * req,io_tw_token_t tw)1442 int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw)
1443 {
1444 	const unsigned int issue_flags = IO_URING_F_NONBLOCK |
1445 					 IO_URING_F_MULTISHOT |
1446 					 IO_URING_F_COMPLETE_DEFER;
1447 	int ret;
1448 
1449 	io_tw_lock(req->ctx, tw);
1450 
1451 	WARN_ON_ONCE(!req->file);
1452 	if (WARN_ON_ONCE(req->flags & REQ_F_IOPOLL))
1453 		return -EFAULT;
1454 
1455 	ret = __io_issue_sqe(req, issue_flags, &io_issue_defs[req->opcode]);
1456 
1457 	WARN_ON_ONCE(ret == IOU_ISSUE_SKIP_COMPLETE);
1458 	return ret;
1459 }
1460 
io_wq_free_work(struct io_wq_work * work)1461 struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
1462 {
1463 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1464 	struct io_kiocb *nxt = NULL;
1465 
1466 	if (req_ref_put_and_test_atomic(req)) {
1467 		if (req->flags & IO_REQ_LINK_FLAGS) {
1468 			struct io_ring_ctx *ctx = req->ctx;
1469 
1470 			mutex_lock(&ctx->uring_lock);
1471 			nxt = io_req_find_next(req);
1472 			mutex_unlock(&ctx->uring_lock);
1473 		}
1474 		io_free_req(req);
1475 	}
1476 	return nxt ? &nxt->work : NULL;
1477 }
1478 
io_wq_submit_work(struct io_wq_work * work)1479 void io_wq_submit_work(struct io_wq_work *work)
1480 {
1481 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1482 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1483 	unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ;
1484 	bool needs_poll = false;
1485 	int ret = 0, err = -ECANCELED;
1486 
1487 	/* one will be dropped by io_wq_free_work() after returning to io-wq */
1488 	if (!(req->flags & REQ_F_REFCOUNT))
1489 		__io_req_set_refcount(req, 2);
1490 	else
1491 		req_ref_get(req);
1492 
1493 	/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
1494 	if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
1495 fail:
1496 		io_req_task_queue_fail(req, err);
1497 		return;
1498 	}
1499 	if (!io_assign_file(req, def, issue_flags)) {
1500 		err = -EBADF;
1501 		atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
1502 		goto fail;
1503 	}
1504 
1505 	/*
1506 	 * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the
1507 	 * submitter task context. Final request completions are handed to the
1508 	 * right context, however this is not the case of auxiliary CQEs,
1509 	 * which is the main mean of operation for multishot requests.
1510 	 * Don't allow any multishot execution from io-wq. It's more restrictive
1511 	 * than necessary and also cleaner.
1512 	 */
1513 	if (req->flags & (REQ_F_MULTISHOT|REQ_F_APOLL_MULTISHOT)) {
1514 		err = -EBADFD;
1515 		if (!io_file_can_poll(req))
1516 			goto fail;
1517 		if (req->file->f_flags & O_NONBLOCK ||
1518 		    req->file->f_mode & FMODE_NOWAIT) {
1519 			err = -ECANCELED;
1520 			if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
1521 				goto fail;
1522 			return;
1523 		} else {
1524 			req->flags &= ~(REQ_F_APOLL_MULTISHOT|REQ_F_MULTISHOT);
1525 		}
1526 	}
1527 
1528 	if (req->flags & REQ_F_FORCE_ASYNC) {
1529 		bool opcode_poll = def->pollin || def->pollout;
1530 
1531 		if (opcode_poll && io_file_can_poll(req)) {
1532 			needs_poll = true;
1533 			issue_flags |= IO_URING_F_NONBLOCK;
1534 		}
1535 	}
1536 
1537 	do {
1538 		ret = io_issue_sqe(req, issue_flags);
1539 		if (ret != -EAGAIN)
1540 			break;
1541 
1542 		/*
1543 		 * If REQ_F_NOWAIT is set, then don't wait or retry with
1544 		 * poll. -EAGAIN is final for that case.
1545 		 */
1546 		if (req->flags & REQ_F_NOWAIT)
1547 			break;
1548 
1549 		/*
1550 		 * We can get EAGAIN for iopolled IO even though we're
1551 		 * forcing a sync submission from here, since we can't
1552 		 * wait for request slots on the block side.
1553 		 */
1554 		if (!needs_poll) {
1555 			if (!(req->flags & REQ_F_IOPOLL))
1556 				break;
1557 			if (io_wq_worker_stopped())
1558 				break;
1559 			cond_resched();
1560 			continue;
1561 		}
1562 
1563 		if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
1564 			return;
1565 		/* aborted or ready, in either case retry blocking */
1566 		needs_poll = false;
1567 		issue_flags &= ~IO_URING_F_NONBLOCK;
1568 	} while (1);
1569 
1570 	/* avoid locking problems by failing it from a clean context */
1571 	if (ret)
1572 		io_req_task_queue_fail(req, ret);
1573 }
1574 
io_file_get_fixed(struct io_kiocb * req,int fd,unsigned int issue_flags)1575 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1576 				      unsigned int issue_flags)
1577 {
1578 	struct io_ring_ctx *ctx = req->ctx;
1579 	struct io_rsrc_node *node;
1580 	struct file *file = NULL;
1581 
1582 	io_ring_submit_lock(ctx, issue_flags);
1583 	node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
1584 	if (node) {
1585 		node->refs++;
1586 		req->file_node = node;
1587 		req->flags |= io_slot_flags(node);
1588 		file = io_slot_file(node);
1589 	}
1590 	io_ring_submit_unlock(ctx, issue_flags);
1591 	return file;
1592 }
1593 
io_file_get_normal(struct io_kiocb * req,int fd)1594 struct file *io_file_get_normal(struct io_kiocb *req, int fd)
1595 {
1596 	struct file *file = fget(fd);
1597 
1598 	trace_io_uring_file_get(req, fd);
1599 
1600 	/* we don't allow fixed io_uring files */
1601 	if (file && io_is_uring_fops(file))
1602 		io_req_track_inflight(req);
1603 	return file;
1604 }
1605 
io_req_sqe_copy(struct io_kiocb * req,unsigned int issue_flags)1606 static int io_req_sqe_copy(struct io_kiocb *req, unsigned int issue_flags)
1607 {
1608 	const struct io_cold_def *def = &io_cold_defs[req->opcode];
1609 
1610 	if (req->flags & REQ_F_SQE_COPIED)
1611 		return 0;
1612 	req->flags |= REQ_F_SQE_COPIED;
1613 	if (!def->sqe_copy)
1614 		return 0;
1615 	if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_INLINE)))
1616 		return -EFAULT;
1617 	def->sqe_copy(req);
1618 	return 0;
1619 }
1620 
io_queue_async(struct io_kiocb * req,unsigned int issue_flags,int ret)1621 static void io_queue_async(struct io_kiocb *req, unsigned int issue_flags, int ret)
1622 	__must_hold(&req->ctx->uring_lock)
1623 {
1624 	if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
1625 fail:
1626 		io_req_defer_failed(req, ret);
1627 		return;
1628 	}
1629 
1630 	ret = io_req_sqe_copy(req, issue_flags);
1631 	if (unlikely(ret))
1632 		goto fail;
1633 
1634 	switch (io_arm_poll_handler(req, 0)) {
1635 	case IO_APOLL_READY:
1636 		io_req_task_queue(req);
1637 		break;
1638 	case IO_APOLL_ABORTED:
1639 		io_queue_iowq(req);
1640 		break;
1641 	case IO_APOLL_OK:
1642 		break;
1643 	}
1644 }
1645 
io_queue_sqe(struct io_kiocb * req,unsigned int extra_flags)1646 static inline void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags)
1647 	__must_hold(&req->ctx->uring_lock)
1648 {
1649 	unsigned int issue_flags = IO_URING_F_NONBLOCK |
1650 				   IO_URING_F_COMPLETE_DEFER | extra_flags;
1651 	int ret;
1652 
1653 	ret = io_issue_sqe(req, issue_flags);
1654 
1655 	/*
1656 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
1657 	 * doesn't support non-blocking read/write attempts
1658 	 */
1659 	if (unlikely(ret))
1660 		io_queue_async(req, issue_flags, ret);
1661 }
1662 
io_queue_sqe_fallback(struct io_kiocb * req)1663 static void io_queue_sqe_fallback(struct io_kiocb *req)
1664 	__must_hold(&req->ctx->uring_lock)
1665 {
1666 	if (unlikely(req->flags & REQ_F_FAIL)) {
1667 		/*
1668 		 * We don't submit, fail them all, for that replace hardlinks
1669 		 * with normal links. Extra REQ_F_LINK is tolerated.
1670 		 */
1671 		req->flags &= ~REQ_F_HARDLINK;
1672 		req->flags |= REQ_F_LINK;
1673 		io_req_defer_failed(req, req->cqe.res);
1674 	} else {
1675 		/* can't fail with IO_URING_F_INLINE */
1676 		io_req_sqe_copy(req, IO_URING_F_INLINE);
1677 		if (unlikely(req->ctx->int_flags & IO_RING_F_DRAIN_ACTIVE))
1678 			io_drain_req(req);
1679 		else
1680 			io_queue_iowq(req);
1681 	}
1682 }
1683 
1684 /*
1685  * Check SQE restrictions (opcode and flags).
1686  *
1687  * Returns 'true' if SQE is allowed, 'false' otherwise.
1688  */
io_check_restriction(struct io_ring_ctx * ctx,struct io_kiocb * req,unsigned int sqe_flags)1689 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
1690 					struct io_kiocb *req,
1691 					unsigned int sqe_flags)
1692 {
1693 	if (!(ctx->int_flags & IO_RING_F_OP_RESTRICTED))
1694 		return true;
1695 	if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
1696 		return false;
1697 
1698 	if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
1699 	    ctx->restrictions.sqe_flags_required)
1700 		return false;
1701 
1702 	if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
1703 			  ctx->restrictions.sqe_flags_required))
1704 		return false;
1705 
1706 	return true;
1707 }
1708 
io_init_drain(struct io_ring_ctx * ctx)1709 static void io_init_drain(struct io_ring_ctx *ctx)
1710 {
1711 	struct io_kiocb *head = ctx->submit_state.link.head;
1712 
1713 	ctx->int_flags |= IO_RING_F_DRAIN_ACTIVE;
1714 	if (head) {
1715 		/*
1716 		 * If we need to drain a request in the middle of a link, drain
1717 		 * the head request and the next request/link after the current
1718 		 * link. Considering sequential execution of links,
1719 		 * REQ_F_IO_DRAIN will be maintained for every request of our
1720 		 * link.
1721 		 */
1722 		head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
1723 		ctx->int_flags |= IO_RING_F_DRAIN_NEXT;
1724 	}
1725 }
1726 
io_init_fail_req(struct io_kiocb * req,int err)1727 static __cold int io_init_fail_req(struct io_kiocb *req, int err)
1728 {
1729 	/* ensure per-opcode data is cleared if we fail before prep */
1730 	memset(&req->cmd.data, 0, sizeof(req->cmd.data));
1731 	return err;
1732 }
1733 
io_init_req(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct io_uring_sqe * sqe,unsigned int * left)1734 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
1735 		       const struct io_uring_sqe *sqe, unsigned int *left)
1736 	__must_hold(&ctx->uring_lock)
1737 {
1738 	const struct io_issue_def *def;
1739 	unsigned int sqe_flags;
1740 	int personality;
1741 	u8 opcode;
1742 
1743 	req->ctx = ctx;
1744 	req->opcode = opcode = READ_ONCE(sqe->opcode);
1745 	/* same numerical values with corresponding REQ_F_*, safe to copy */
1746 	sqe_flags = READ_ONCE(sqe->flags);
1747 	req->flags = (__force io_req_flags_t) sqe_flags;
1748 	req->cqe.user_data = READ_ONCE(sqe->user_data);
1749 	req->file = NULL;
1750 	req->tctx = current->io_uring;
1751 	req->cancel_seq_set = false;
1752 	req->async_data = NULL;
1753 
1754 	if (unlikely(opcode >= IORING_OP_LAST)) {
1755 		req->opcode = 0;
1756 		return io_init_fail_req(req, -EINVAL);
1757 	}
1758 	opcode = array_index_nospec(opcode, IORING_OP_LAST);
1759 
1760 	def = &io_issue_defs[opcode];
1761 	if (def->is_128 && !(ctx->flags & IORING_SETUP_SQE128)) {
1762 		/*
1763 		 * A 128b op on a non-128b SQ requires mixed SQE support as
1764 		 * well as 2 contiguous entries.
1765 		 */
1766 		if (!(ctx->flags & IORING_SETUP_SQE_MIXED) || *left < 2 ||
1767 		    (unsigned)(sqe - ctx->sq_sqes) >= ctx->sq_entries - 1)
1768 			return io_init_fail_req(req, -EINVAL);
1769 		/*
1770 		 * A 128b operation on a mixed SQ uses two entries, so we have
1771 		 * to increment the head and cached refs, and decrement what's
1772 		 * left.
1773 		 */
1774 		current->io_uring->cached_refs++;
1775 		ctx->cached_sq_head++;
1776 		(*left)--;
1777 	}
1778 
1779 	if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
1780 		/* enforce forwards compatibility on users */
1781 		if (sqe_flags & ~SQE_VALID_FLAGS)
1782 			return io_init_fail_req(req, -EINVAL);
1783 		if (sqe_flags & IOSQE_BUFFER_SELECT) {
1784 			if (!def->buffer_select)
1785 				return io_init_fail_req(req, -EOPNOTSUPP);
1786 			req->buf_index = READ_ONCE(sqe->buf_group);
1787 		}
1788 		if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
1789 			ctx->int_flags |= IO_RING_F_DRAIN_DISABLED;
1790 		if (sqe_flags & IOSQE_IO_DRAIN) {
1791 			if (ctx->int_flags & IO_RING_F_DRAIN_DISABLED)
1792 				return io_init_fail_req(req, -EOPNOTSUPP);
1793 			io_init_drain(ctx);
1794 		}
1795 	}
1796 	if (unlikely(ctx->int_flags & (IO_RING_F_OP_RESTRICTED | IO_RING_F_DRAIN_ACTIVE | IO_RING_F_DRAIN_NEXT))) {
1797 		if (!io_check_restriction(ctx, req, sqe_flags))
1798 			return io_init_fail_req(req, -EACCES);
1799 		/* knock it to the slow queue path, will be drained there */
1800 		if (ctx->int_flags & IO_RING_F_DRAIN_ACTIVE)
1801 			req->flags |= REQ_F_FORCE_ASYNC;
1802 		/* if there is no link, we're at "next" request and need to drain */
1803 		if (unlikely(ctx->int_flags & IO_RING_F_DRAIN_NEXT) && !ctx->submit_state.link.head) {
1804 			ctx->int_flags &= ~IO_RING_F_DRAIN_NEXT;
1805 			ctx->int_flags |= IO_RING_F_DRAIN_ACTIVE;
1806 			req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
1807 		}
1808 	}
1809 
1810 	if (!def->ioprio && sqe->ioprio)
1811 		return io_init_fail_req(req, -EINVAL);
1812 	if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
1813 		return io_init_fail_req(req, -EINVAL);
1814 
1815 	if (def->needs_file) {
1816 		struct io_submit_state *state = &ctx->submit_state;
1817 
1818 		req->cqe.fd = READ_ONCE(sqe->fd);
1819 
1820 		/*
1821 		 * Plug now if we have more than 2 IO left after this, and the
1822 		 * target is potentially a read/write to block based storage.
1823 		 */
1824 		if (state->need_plug && def->plug) {
1825 			state->plug_started = true;
1826 			state->need_plug = false;
1827 			blk_start_plug_nr_ios(&state->plug, state->submit_nr);
1828 		}
1829 	}
1830 
1831 	personality = READ_ONCE(sqe->personality);
1832 	if (personality) {
1833 		int ret;
1834 
1835 		req->creds = xa_load(&ctx->personalities, personality);
1836 		if (!req->creds)
1837 			return io_init_fail_req(req, -EINVAL);
1838 		get_cred(req->creds);
1839 		ret = security_uring_override_creds(req->creds);
1840 		if (ret) {
1841 			put_cred(req->creds);
1842 			return io_init_fail_req(req, ret);
1843 		}
1844 		req->flags |= REQ_F_CREDS;
1845 	}
1846 
1847 	return def->prep(req, sqe);
1848 }
1849 
io_submit_fail_init(const struct io_uring_sqe * sqe,struct io_kiocb * req,int ret)1850 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
1851 				      struct io_kiocb *req, int ret)
1852 {
1853 	struct io_ring_ctx *ctx = req->ctx;
1854 	struct io_submit_link *link = &ctx->submit_state.link;
1855 	struct io_kiocb *head = link->head;
1856 
1857 	trace_io_uring_req_failed(sqe, req, ret);
1858 
1859 	/*
1860 	 * Avoid breaking links in the middle as it renders links with SQPOLL
1861 	 * unusable. Instead of failing eagerly, continue assembling the link if
1862 	 * applicable and mark the head with REQ_F_FAIL. The link flushing code
1863 	 * should find the flag and handle the rest.
1864 	 */
1865 	req_fail_link_node(req, ret);
1866 	if (head && !(head->flags & REQ_F_FAIL))
1867 		req_fail_link_node(head, -ECANCELED);
1868 
1869 	if (!(req->flags & IO_REQ_LINK_FLAGS)) {
1870 		if (head) {
1871 			link->last->link = req;
1872 			link->head = NULL;
1873 			req = head;
1874 		}
1875 		io_queue_sqe_fallback(req);
1876 		return ret;
1877 	}
1878 
1879 	if (head)
1880 		link->last->link = req;
1881 	else
1882 		link->head = req;
1883 	link->last = req;
1884 	return 0;
1885 }
1886 
io_submit_sqe(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct io_uring_sqe * sqe,unsigned int * left)1887 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
1888 			 const struct io_uring_sqe *sqe, unsigned int *left)
1889 	__must_hold(&ctx->uring_lock)
1890 {
1891 	struct io_submit_link *link = &ctx->submit_state.link;
1892 	int ret;
1893 
1894 	ret = io_init_req(ctx, req, sqe, left);
1895 	if (unlikely(ret))
1896 		return io_submit_fail_init(sqe, req, ret);
1897 
1898 	if (unlikely(ctx->bpf_filters)) {
1899 		ret = io_uring_run_bpf_filters(ctx->bpf_filters, req);
1900 		if (ret)
1901 			return io_submit_fail_init(sqe, req, ret);
1902 	}
1903 
1904 	trace_io_uring_submit_req(req);
1905 
1906 	/*
1907 	 * If we already have a head request, queue this one for async
1908 	 * submittal once the head completes. If we don't have a head but
1909 	 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
1910 	 * submitted sync once the chain is complete. If none of those
1911 	 * conditions are true (normal request), then just queue it.
1912 	 */
1913 	if (unlikely(link->head)) {
1914 		trace_io_uring_link(req, link->last);
1915 		io_req_sqe_copy(req, IO_URING_F_INLINE);
1916 		link->last->link = req;
1917 		link->last = req;
1918 
1919 		if (req->flags & IO_REQ_LINK_FLAGS)
1920 			return 0;
1921 		/* last request of the link, flush it */
1922 		req = link->head;
1923 		link->head = NULL;
1924 		if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
1925 			goto fallback;
1926 
1927 	} else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
1928 					  REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
1929 		if (req->flags & IO_REQ_LINK_FLAGS) {
1930 			link->head = req;
1931 			link->last = req;
1932 		} else {
1933 fallback:
1934 			io_queue_sqe_fallback(req);
1935 		}
1936 		return 0;
1937 	}
1938 
1939 	io_queue_sqe(req, IO_URING_F_INLINE);
1940 	return 0;
1941 }
1942 
1943 /*
1944  * Batched submission is done, ensure local IO is flushed out.
1945  */
io_submit_state_end(struct io_ring_ctx * ctx)1946 static void io_submit_state_end(struct io_ring_ctx *ctx)
1947 {
1948 	struct io_submit_state *state = &ctx->submit_state;
1949 
1950 	if (unlikely(state->link.head))
1951 		io_queue_sqe_fallback(state->link.head);
1952 	/* flush only after queuing links as they can generate completions */
1953 	io_submit_flush_completions(ctx);
1954 	if (state->plug_started)
1955 		blk_finish_plug(&state->plug);
1956 }
1957 
1958 /*
1959  * Start submission side cache.
1960  */
io_submit_state_start(struct io_submit_state * state,unsigned int max_ios)1961 static void io_submit_state_start(struct io_submit_state *state,
1962 				  unsigned int max_ios)
1963 {
1964 	state->plug_started = false;
1965 	state->need_plug = max_ios > 2;
1966 	state->submit_nr = max_ios;
1967 	/* set only head, no need to init link_last in advance */
1968 	state->link.head = NULL;
1969 }
1970 
io_commit_sqring(struct io_ring_ctx * ctx)1971 static void io_commit_sqring(struct io_ring_ctx *ctx)
1972 {
1973 	struct io_rings *rings = ctx->rings;
1974 
1975 	if (ctx->flags & IORING_SETUP_SQ_REWIND) {
1976 		ctx->cached_sq_head = 0;
1977 	} else {
1978 		/*
1979 		 * Ensure any loads from the SQEs are done at this point,
1980 		 * since once we write the new head, the application could
1981 		 * write new data to them.
1982 		 */
1983 		smp_store_release(&rings->sq.head, ctx->cached_sq_head);
1984 	}
1985 }
1986 
1987 /*
1988  * Fetch an sqe, if one is available. Note this returns a pointer to memory
1989  * that is mapped by userspace. This means that care needs to be taken to
1990  * ensure that reads are stable, as we cannot rely on userspace always
1991  * being a good citizen. If members of the sqe are validated and then later
1992  * used, it's important that those reads are done through READ_ONCE() to
1993  * prevent a re-load down the line.
1994  */
io_get_sqe(struct io_ring_ctx * ctx,const struct io_uring_sqe ** sqe)1995 static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
1996 {
1997 	unsigned mask = ctx->sq_entries - 1;
1998 	unsigned head = ctx->cached_sq_head++ & mask;
1999 
2000 	if (static_branch_unlikely(&io_key_has_sqarray.key) &&
2001 	    (!(ctx->flags & IORING_SETUP_NO_SQARRAY))) {
2002 		head = READ_ONCE(ctx->sq_array[head]);
2003 		if (unlikely(head >= ctx->sq_entries)) {
2004 			WRITE_ONCE(ctx->rings->sq_dropped,
2005 				   READ_ONCE(ctx->rings->sq_dropped) + 1);
2006 			return false;
2007 		}
2008 		head = array_index_nospec(head, ctx->sq_entries);
2009 	}
2010 
2011 	/*
2012 	 * The cached sq head (or cq tail) serves two purposes:
2013 	 *
2014 	 * 1) allows us to batch the cost of updating the user visible
2015 	 *    head updates.
2016 	 * 2) allows the kernel side to track the head on its own, even
2017 	 *    though the application is the one updating it.
2018 	 */
2019 
2020 	/* double index for 128-byte SQEs, twice as long */
2021 	if (ctx->flags & IORING_SETUP_SQE128)
2022 		head <<= 1;
2023 	*sqe = &ctx->sq_sqes[head];
2024 	return true;
2025 }
2026 
io_submit_sqes(struct io_ring_ctx * ctx,unsigned int nr)2027 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2028 	__must_hold(&ctx->uring_lock)
2029 {
2030 	unsigned int entries;
2031 	unsigned int left;
2032 	int ret;
2033 
2034 	if (ctx->flags & IORING_SETUP_SQ_REWIND)
2035 		entries = ctx->sq_entries;
2036 	else
2037 		entries = __io_sqring_entries(ctx);
2038 
2039 	entries = min(nr, entries);
2040 	if (unlikely(!entries))
2041 		return 0;
2042 
2043 	ret = left = entries;
2044 	io_get_task_refs(left);
2045 	io_submit_state_start(&ctx->submit_state, left);
2046 
2047 	do {
2048 		const struct io_uring_sqe *sqe;
2049 		struct io_kiocb *req;
2050 
2051 		if (unlikely(!io_alloc_req(ctx, &req)))
2052 			break;
2053 		if (unlikely(!io_get_sqe(ctx, &sqe))) {
2054 			io_req_add_to_cache(req, ctx);
2055 			break;
2056 		}
2057 
2058 		/*
2059 		 * Continue submitting even for sqe failure if the
2060 		 * ring was setup with IORING_SETUP_SUBMIT_ALL
2061 		 */
2062 		if (unlikely(io_submit_sqe(ctx, req, sqe, &left)) &&
2063 		    !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
2064 			left--;
2065 			break;
2066 		}
2067 	} while (--left);
2068 
2069 	if (unlikely(left)) {
2070 		ret -= left;
2071 		/* try again if it submitted nothing and can't allocate a req */
2072 		if (!ret && io_req_cache_empty(ctx))
2073 			ret = -EAGAIN;
2074 		current->io_uring->cached_refs += left;
2075 	}
2076 
2077 	io_submit_state_end(ctx);
2078 	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
2079 	io_commit_sqring(ctx);
2080 	return ret;
2081 }
2082 
io_rings_free(struct io_ring_ctx * ctx)2083 static void io_rings_free(struct io_ring_ctx *ctx)
2084 {
2085 	io_free_region(ctx->user, &ctx->sq_region);
2086 	io_free_region(ctx->user, &ctx->ring_region);
2087 	ctx->rings = NULL;
2088 	RCU_INIT_POINTER(ctx->rings_rcu, NULL);
2089 	ctx->sq_sqes = NULL;
2090 }
2091 
rings_size(unsigned int flags,unsigned int sq_entries,unsigned int cq_entries,struct io_rings_layout * rl)2092 static int rings_size(unsigned int flags, unsigned int sq_entries,
2093 		      unsigned int cq_entries, struct io_rings_layout *rl)
2094 {
2095 	struct io_rings *rings;
2096 	size_t sqe_size;
2097 	size_t off;
2098 
2099 	if (flags & IORING_SETUP_CQE_MIXED) {
2100 		if (cq_entries < 2)
2101 			return -EOVERFLOW;
2102 	}
2103 	if (flags & IORING_SETUP_SQE_MIXED) {
2104 		if (sq_entries < 2)
2105 			return -EOVERFLOW;
2106 	}
2107 
2108 	rl->sq_array_offset = SIZE_MAX;
2109 
2110 	sqe_size = sizeof(struct io_uring_sqe);
2111 	if (flags & IORING_SETUP_SQE128)
2112 		sqe_size *= 2;
2113 
2114 	rl->sq_size = array_size(sqe_size, sq_entries);
2115 	if (rl->sq_size == SIZE_MAX)
2116 		return -EOVERFLOW;
2117 
2118 	off = struct_size(rings, cqes, cq_entries);
2119 	if (flags & IORING_SETUP_CQE32)
2120 		off = size_mul(off, 2);
2121 	if (off == SIZE_MAX)
2122 		return -EOVERFLOW;
2123 
2124 #ifdef CONFIG_SMP
2125 	off = ALIGN(off, SMP_CACHE_BYTES);
2126 	if (off == 0)
2127 		return -EOVERFLOW;
2128 #endif
2129 
2130 	if (!(flags & IORING_SETUP_NO_SQARRAY)) {
2131 		size_t sq_array_size;
2132 
2133 		rl->sq_array_offset = off;
2134 
2135 		sq_array_size = array_size(sizeof(u32), sq_entries);
2136 		off = size_add(off, sq_array_size);
2137 		if (off == SIZE_MAX)
2138 			return -EOVERFLOW;
2139 	}
2140 
2141 	rl->rings_size = off;
2142 	return 0;
2143 }
2144 
__io_req_caches_free(struct io_ring_ctx * ctx)2145 static __cold void __io_req_caches_free(struct io_ring_ctx *ctx)
2146 {
2147 	struct io_kiocb *req;
2148 	int nr = 0;
2149 
2150 	while (!io_req_cache_empty(ctx)) {
2151 		req = io_extract_req(ctx);
2152 		io_poison_req(req);
2153 		kmem_cache_free(req_cachep, req);
2154 		nr++;
2155 	}
2156 	if (nr) {
2157 		ctx->nr_req_allocated -= nr;
2158 		percpu_ref_put_many(&ctx->refs, nr);
2159 	}
2160 }
2161 
io_req_caches_free(struct io_ring_ctx * ctx)2162 static __cold void io_req_caches_free(struct io_ring_ctx *ctx)
2163 {
2164 	guard(mutex)(&ctx->uring_lock);
2165 	__io_req_caches_free(ctx);
2166 }
2167 
io_ring_ctx_free(struct io_ring_ctx * ctx)2168 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2169 {
2170 	io_unregister_bpf_ops(ctx);
2171 	io_sq_thread_finish(ctx);
2172 
2173 	mutex_lock(&ctx->uring_lock);
2174 	io_sqe_buffers_unregister(ctx);
2175 	io_sqe_files_unregister(ctx);
2176 	io_unregister_zcrx(ctx);
2177 	io_cqring_overflow_kill(ctx);
2178 	io_eventfd_unregister(ctx);
2179 	io_free_alloc_caches(ctx);
2180 	io_destroy_buffers(ctx);
2181 	io_free_region(ctx->user, &ctx->param_region);
2182 	mutex_unlock(&ctx->uring_lock);
2183 	if (ctx->sq_creds)
2184 		put_cred(ctx->sq_creds);
2185 	if (ctx->submitter_task)
2186 		put_task_struct(ctx->submitter_task);
2187 
2188 	WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2189 
2190 	if (ctx->mm_account) {
2191 		mmdrop(ctx->mm_account);
2192 		ctx->mm_account = NULL;
2193 	}
2194 	io_rings_free(ctx);
2195 
2196 	if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
2197 		static_branch_slow_dec_deferred(&io_key_has_sqarray);
2198 
2199 	percpu_ref_exit(&ctx->refs);
2200 	free_uid(ctx->user);
2201 	io_req_caches_free(ctx);
2202 
2203 	if (ctx->restrictions.bpf_filters) {
2204 		WARN_ON_ONCE(ctx->bpf_filters !=
2205 			     ctx->restrictions.bpf_filters->filters);
2206 	} else {
2207 		WARN_ON_ONCE(ctx->bpf_filters);
2208 	}
2209 	io_put_bpf_filters(&ctx->restrictions);
2210 
2211 	WARN_ON_ONCE(ctx->nr_req_allocated);
2212 
2213 	if (ctx->hash_map)
2214 		io_wq_put_hash(ctx->hash_map);
2215 	io_napi_free(ctx);
2216 	kvfree(ctx->cancel_table.hbs);
2217 	xa_destroy(&ctx->io_bl_xa);
2218 	kfree(ctx);
2219 }
2220 
io_activate_pollwq_cb(struct callback_head * cb)2221 static __cold void io_activate_pollwq_cb(struct callback_head *cb)
2222 {
2223 	struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
2224 					       poll_wq_task_work);
2225 
2226 	mutex_lock(&ctx->uring_lock);
2227 	ctx->int_flags |= IO_RING_F_POLL_ACTIVATED;
2228 	mutex_unlock(&ctx->uring_lock);
2229 
2230 	/*
2231 	 * Wake ups for some events between start of polling and activation
2232 	 * might've been lost due to loose synchronisation.
2233 	 */
2234 	wake_up_all(&ctx->poll_wq);
2235 	percpu_ref_put(&ctx->refs);
2236 }
2237 
io_activate_pollwq(struct io_ring_ctx * ctx)2238 __cold void io_activate_pollwq(struct io_ring_ctx *ctx)
2239 {
2240 	spin_lock(&ctx->completion_lock);
2241 	/* already activated or in progress */
2242 	if ((ctx->int_flags & IO_RING_F_POLL_ACTIVATED) || ctx->poll_wq_task_work.func)
2243 		goto out;
2244 	if (WARN_ON_ONCE(!(ctx->int_flags & IO_RING_F_TASK_COMPLETE)))
2245 		goto out;
2246 	if (!ctx->submitter_task)
2247 		goto out;
2248 	/*
2249 	 * with ->submitter_task only the submitter task completes requests, we
2250 	 * only need to sync with it, which is done by injecting a tw
2251 	 */
2252 	init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb);
2253 	percpu_ref_get(&ctx->refs);
2254 	if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL))
2255 		percpu_ref_put(&ctx->refs);
2256 out:
2257 	spin_unlock(&ctx->completion_lock);
2258 }
2259 
io_uring_poll(struct file * file,poll_table * wait)2260 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
2261 {
2262 	struct io_ring_ctx *ctx = file->private_data;
2263 	__poll_t mask = 0;
2264 
2265 	if (unlikely(!(data_race(ctx->int_flags) & IO_RING_F_POLL_ACTIVATED)))
2266 		io_activate_pollwq(ctx);
2267 	/*
2268 	 * provides mb() which pairs with barrier from wq_has_sleeper
2269 	 * call in io_commit_cqring
2270 	 */
2271 	poll_wait(file, &ctx->poll_wq, wait);
2272 
2273 	rcu_read_lock();
2274 
2275 	if (!__io_sqring_full(ctx))
2276 		mask |= EPOLLOUT | EPOLLWRNORM;
2277 
2278 	/*
2279 	 * Don't flush cqring overflow list here, just do a simple check.
2280 	 * Otherwise there could possible be ABBA deadlock:
2281 	 *      CPU0                    CPU1
2282 	 *      ----                    ----
2283 	 * lock(&ctx->uring_lock);
2284 	 *                              lock(&ep->mtx);
2285 	 *                              lock(&ctx->uring_lock);
2286 	 * lock(&ep->mtx);
2287 	 *
2288 	 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
2289 	 * pushes them to do the flush.
2290 	 */
2291 
2292 	if (__io_cqring_events_user(ctx) || io_has_work(ctx))
2293 		mask |= EPOLLIN | EPOLLRDNORM;
2294 
2295 	rcu_read_unlock();
2296 	return mask;
2297 }
2298 
2299 struct io_tctx_exit {
2300 	struct callback_head		task_work;
2301 	struct completion		completion;
2302 	struct io_ring_ctx		*ctx;
2303 };
2304 
io_tctx_exit_cb(struct callback_head * cb)2305 static __cold void io_tctx_exit_cb(struct callback_head *cb)
2306 {
2307 	struct io_uring_task *tctx = current->io_uring;
2308 	struct io_tctx_exit *work;
2309 
2310 	work = container_of(cb, struct io_tctx_exit, task_work);
2311 	/*
2312 	 * When @in_cancel, we're in cancellation and it's racy to remove the
2313 	 * node. It'll be removed by the end of cancellation, just ignore it.
2314 	 * tctx can be NULL if the queueing of this task_work raced with
2315 	 * work cancelation off the exec path.
2316 	 */
2317 	if (tctx && !atomic_read(&tctx->in_cancel))
2318 		io_uring_del_tctx_node((unsigned long)work->ctx);
2319 	complete(&work->completion);
2320 }
2321 
io_ring_exit_work(struct work_struct * work)2322 static __cold void io_ring_exit_work(struct work_struct *work)
2323 {
2324 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
2325 	unsigned long timeout = jiffies + IO_URING_EXIT_WAIT_MAX;
2326 	unsigned long interval = HZ / 20;
2327 	struct io_tctx_exit exit;
2328 	struct io_tctx_node *node;
2329 	int ret;
2330 
2331 	mutex_lock(&ctx->uring_lock);
2332 	io_terminate_zcrx(ctx);
2333 	mutex_unlock(&ctx->uring_lock);
2334 
2335 	/*
2336 	 * If we're doing polled IO and end up having requests being
2337 	 * submitted async (out-of-line), then completions can come in while
2338 	 * we're waiting for refs to drop. We need to reap these manually,
2339 	 * as nobody else will be looking for them.
2340 	 */
2341 	do {
2342 		if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
2343 			mutex_lock(&ctx->uring_lock);
2344 			io_cqring_overflow_kill(ctx);
2345 			mutex_unlock(&ctx->uring_lock);
2346 		}
2347 
2348 		/* The SQPOLL thread never reaches this path */
2349 		do {
2350 			if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2351 				io_move_task_work_from_local(ctx);
2352 			cond_resched();
2353 		} while (io_uring_try_cancel_requests(ctx, NULL, true, false));
2354 
2355 		if (ctx->sq_data) {
2356 			struct io_sq_data *sqd = ctx->sq_data;
2357 			struct task_struct *tsk;
2358 
2359 			io_sq_thread_park(sqd);
2360 			tsk = sqpoll_task_locked(sqd);
2361 			if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
2362 				io_wq_cancel_cb(tsk->io_uring->io_wq,
2363 						io_cancel_ctx_cb, ctx, true);
2364 			io_sq_thread_unpark(sqd);
2365 		}
2366 
2367 		io_req_caches_free(ctx);
2368 
2369 		if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
2370 			/* there is little hope left, don't run it too often */
2371 			interval = HZ * 60;
2372 		}
2373 		/*
2374 		 * This is really an uninterruptible wait, as it has to be
2375 		 * complete. But it's also run from a kworker, which doesn't
2376 		 * take signals, so it's fine to make it interruptible. This
2377 		 * avoids scenarios where we knowingly can wait much longer
2378 		 * on completions, for example if someone does a SIGSTOP on
2379 		 * a task that needs to finish task_work to make this loop
2380 		 * complete. That's a synthetic situation that should not
2381 		 * cause a stuck task backtrace, and hence a potential panic
2382 		 * on stuck tasks if that is enabled.
2383 		 */
2384 	} while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval));
2385 
2386 	init_completion(&exit.completion);
2387 	init_task_work(&exit.task_work, io_tctx_exit_cb);
2388 	exit.ctx = ctx;
2389 
2390 	mutex_lock(&ctx->uring_lock);
2391 	mutex_lock(&ctx->tctx_lock);
2392 	while (!list_empty(&ctx->tctx_list)) {
2393 		WARN_ON_ONCE(time_after(jiffies, timeout));
2394 
2395 		node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
2396 					ctx_node);
2397 		/* don't spin on a single task if cancellation failed */
2398 		list_rotate_left(&ctx->tctx_list);
2399 		ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
2400 		if (WARN_ON_ONCE(ret))
2401 			continue;
2402 
2403 		mutex_unlock(&ctx->tctx_lock);
2404 		mutex_unlock(&ctx->uring_lock);
2405 		/*
2406 		 * See comment above for
2407 		 * wait_for_completion_interruptible_timeout() on why this
2408 		 * wait is marked as interruptible.
2409 		 */
2410 		wait_for_completion_interruptible(&exit.completion);
2411 		mutex_lock(&ctx->uring_lock);
2412 		mutex_lock(&ctx->tctx_lock);
2413 	}
2414 	mutex_unlock(&ctx->tctx_lock);
2415 	mutex_unlock(&ctx->uring_lock);
2416 	spin_lock(&ctx->completion_lock);
2417 	spin_unlock(&ctx->completion_lock);
2418 
2419 	/* pairs with RCU read section in io_req_local_work_add() */
2420 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2421 		synchronize_rcu();
2422 
2423 	io_ring_ctx_free(ctx);
2424 }
2425 
io_ring_ctx_wait_and_kill(struct io_ring_ctx * ctx)2426 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
2427 {
2428 	unsigned long index;
2429 	struct cred *creds;
2430 
2431 	mutex_lock(&ctx->uring_lock);
2432 	percpu_ref_kill(&ctx->refs);
2433 	xa_for_each(&ctx->personalities, index, creds)
2434 		io_unregister_personality(ctx, index);
2435 	mutex_unlock(&ctx->uring_lock);
2436 
2437 	flush_delayed_work(&ctx->fallback_work);
2438 
2439 	INIT_WORK(&ctx->exit_work, io_ring_exit_work);
2440 	/*
2441 	 * Use system_dfl_wq to avoid spawning tons of event kworkers
2442 	 * if we're exiting a ton of rings at the same time. It just adds
2443 	 * noise and overhead, there's no discernable change in runtime
2444 	 * over using system_percpu_wq.
2445 	 */
2446 	queue_work(iou_wq, &ctx->exit_work);
2447 }
2448 
io_uring_release(struct inode * inode,struct file * file)2449 static int io_uring_release(struct inode *inode, struct file *file)
2450 {
2451 	struct io_ring_ctx *ctx = file->private_data;
2452 
2453 	file->private_data = NULL;
2454 	io_ring_ctx_wait_and_kill(ctx);
2455 	return 0;
2456 }
2457 
io_get_ext_arg_reg(struct io_ring_ctx * ctx,const struct io_uring_getevents_arg __user * uarg)2458 static struct io_uring_reg_wait *io_get_ext_arg_reg(struct io_ring_ctx *ctx,
2459 			const struct io_uring_getevents_arg __user *uarg)
2460 {
2461 	unsigned long size = sizeof(struct io_uring_reg_wait);
2462 	unsigned long offset = (uintptr_t)uarg;
2463 	unsigned long end;
2464 
2465 	if (unlikely(offset % sizeof(long)))
2466 		return ERR_PTR(-EFAULT);
2467 
2468 	/* also protects from NULL ->cq_wait_arg as the size would be 0 */
2469 	if (unlikely(check_add_overflow(offset, size, &end) ||
2470 		     end > ctx->cq_wait_size))
2471 		return ERR_PTR(-EFAULT);
2472 
2473 	offset = array_index_nospec(offset, ctx->cq_wait_size - size);
2474 	return ctx->cq_wait_arg + offset;
2475 }
2476 
io_validate_ext_arg(struct io_ring_ctx * ctx,unsigned flags,const void __user * argp,size_t argsz)2477 static int io_validate_ext_arg(struct io_ring_ctx *ctx, unsigned flags,
2478 			       const void __user *argp, size_t argsz)
2479 {
2480 	struct io_uring_getevents_arg arg;
2481 
2482 	if (!(flags & IORING_ENTER_EXT_ARG))
2483 		return 0;
2484 	if (flags & IORING_ENTER_EXT_ARG_REG)
2485 		return -EINVAL;
2486 	if (argsz != sizeof(arg))
2487 		return -EINVAL;
2488 	if (copy_from_user(&arg, argp, sizeof(arg)))
2489 		return -EFAULT;
2490 	return 0;
2491 }
2492 
io_get_ext_arg(struct io_ring_ctx * ctx,unsigned flags,const void __user * argp,struct ext_arg * ext_arg)2493 static int io_get_ext_arg(struct io_ring_ctx *ctx, unsigned flags,
2494 			  const void __user *argp, struct ext_arg *ext_arg)
2495 {
2496 	const struct io_uring_getevents_arg __user *uarg = argp;
2497 	struct io_uring_getevents_arg arg;
2498 
2499 	ext_arg->iowait = !(flags & IORING_ENTER_NO_IOWAIT);
2500 
2501 	/*
2502 	 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
2503 	 * is just a pointer to the sigset_t.
2504 	 */
2505 	if (!(flags & IORING_ENTER_EXT_ARG)) {
2506 		ext_arg->sig = (const sigset_t __user *) argp;
2507 		return 0;
2508 	}
2509 
2510 	if (flags & IORING_ENTER_EXT_ARG_REG) {
2511 		struct io_uring_reg_wait *w;
2512 
2513 		if (ext_arg->argsz != sizeof(struct io_uring_reg_wait))
2514 			return -EINVAL;
2515 		w = io_get_ext_arg_reg(ctx, argp);
2516 		if (IS_ERR(w))
2517 			return PTR_ERR(w);
2518 
2519 		if (w->flags & ~IORING_REG_WAIT_TS)
2520 			return -EINVAL;
2521 		ext_arg->min_time = READ_ONCE(w->min_wait_usec) * NSEC_PER_USEC;
2522 		ext_arg->sig = u64_to_user_ptr(READ_ONCE(w->sigmask));
2523 		ext_arg->argsz = READ_ONCE(w->sigmask_sz);
2524 		if (w->flags & IORING_REG_WAIT_TS) {
2525 			ext_arg->ts.tv_sec = READ_ONCE(w->ts.tv_sec);
2526 			ext_arg->ts.tv_nsec = READ_ONCE(w->ts.tv_nsec);
2527 			ext_arg->ts_set = true;
2528 		}
2529 		return 0;
2530 	}
2531 
2532 	/*
2533 	 * EXT_ARG is set - ensure we agree on the size of it and copy in our
2534 	 * timespec and sigset_t pointers if good.
2535 	 */
2536 	if (ext_arg->argsz != sizeof(arg))
2537 		return -EINVAL;
2538 #ifdef CONFIG_64BIT
2539 	if (!user_access_begin(uarg, sizeof(*uarg)))
2540 		return -EFAULT;
2541 	unsafe_get_user(arg.sigmask, &uarg->sigmask, uaccess_end);
2542 	unsafe_get_user(arg.sigmask_sz, &uarg->sigmask_sz, uaccess_end);
2543 	unsafe_get_user(arg.min_wait_usec, &uarg->min_wait_usec, uaccess_end);
2544 	unsafe_get_user(arg.ts, &uarg->ts, uaccess_end);
2545 	user_access_end();
2546 #else
2547 	if (copy_from_user(&arg, uarg, sizeof(arg)))
2548 		return -EFAULT;
2549 #endif
2550 	ext_arg->min_time = arg.min_wait_usec * NSEC_PER_USEC;
2551 	ext_arg->sig = u64_to_user_ptr(arg.sigmask);
2552 	ext_arg->argsz = arg.sigmask_sz;
2553 	if (arg.ts) {
2554 		if (get_timespec64(&ext_arg->ts, u64_to_user_ptr(arg.ts)))
2555 			return -EFAULT;
2556 		ext_arg->ts_set = true;
2557 	}
2558 	return 0;
2559 #ifdef CONFIG_64BIT
2560 uaccess_end:
2561 	user_access_end();
2562 	return -EFAULT;
2563 #endif
2564 }
2565 
2566 /*
2567  * Given an 'fd' value, return the ctx associated with if. If 'registered' is
2568  * true, then the registered index is used. Otherwise, the normal fd table.
2569  * Caller must call fput() on the returned file if it isn't a registered file,
2570  * unless it's an ERR_PTR.
2571  */
io_uring_ctx_get_file(unsigned int fd,bool registered)2572 struct file *io_uring_ctx_get_file(unsigned int fd, bool registered)
2573 {
2574 	struct file *file;
2575 
2576 	if (registered) {
2577 		/*
2578 		 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
2579 		 * need only dereference our task private array to find it.
2580 		 */
2581 		struct io_uring_task *tctx = current->io_uring;
2582 
2583 		if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
2584 			return ERR_PTR(-EINVAL);
2585 		fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
2586 		file = tctx->registered_rings[fd];
2587 	} else {
2588 		file = fget(fd);
2589 	}
2590 
2591 	if (unlikely(!file))
2592 		return ERR_PTR(-EBADF);
2593 	if (io_is_uring_fops(file))
2594 		return file;
2595 	if (!registered)
2596 		fput(file);
2597 	return ERR_PTR(-EOPNOTSUPP);
2598 }
2599 
2600 
SYSCALL_DEFINE6(io_uring_enter,unsigned int,fd,u32,to_submit,u32,min_complete,u32,flags,const void __user *,argp,size_t,argsz)2601 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
2602 		u32, min_complete, u32, flags, const void __user *, argp,
2603 		size_t, argsz)
2604 {
2605 	struct io_ring_ctx *ctx;
2606 	struct file *file;
2607 	long ret;
2608 
2609 	if (unlikely(flags & ~IORING_ENTER_FLAGS))
2610 		return -EINVAL;
2611 
2612 	file = io_uring_ctx_get_file(fd, flags & IORING_ENTER_REGISTERED_RING);
2613 	if (IS_ERR(file))
2614 		return PTR_ERR(file);
2615 	ctx = file->private_data;
2616 	ret = -EBADFD;
2617 	/*
2618 	 * Keep IORING_SETUP_R_DISABLED check before submitter_task load
2619 	 * in io_uring_add_tctx_node() -> __io_uring_add_tctx_node_from_submit()
2620 	 */
2621 	if (unlikely(smp_load_acquire(&ctx->flags) & IORING_SETUP_R_DISABLED))
2622 		goto out;
2623 
2624 	if (io_has_loop_ops(ctx)) {
2625 		ret = io_run_loop(ctx);
2626 		goto out;
2627 	}
2628 
2629 	/*
2630 	 * For SQ polling, the thread will do all submissions and completions.
2631 	 * Just return the requested submit count, and wake the thread if
2632 	 * we were asked to.
2633 	 */
2634 	ret = 0;
2635 	if (ctx->flags & IORING_SETUP_SQPOLL) {
2636 		if (unlikely(ctx->sq_data->thread == NULL)) {
2637 			ret = -EOWNERDEAD;
2638 			goto out;
2639 		}
2640 		if (flags & IORING_ENTER_SQ_WAKEUP)
2641 			wake_up(&ctx->sq_data->wait);
2642 		if (flags & IORING_ENTER_SQ_WAIT)
2643 			io_sqpoll_wait_sq(ctx);
2644 
2645 		ret = to_submit;
2646 	} else if (to_submit) {
2647 		ret = io_uring_add_tctx_node(ctx);
2648 		if (unlikely(ret))
2649 			goto out;
2650 
2651 		mutex_lock(&ctx->uring_lock);
2652 		ret = io_submit_sqes(ctx, to_submit);
2653 		if (ret != to_submit) {
2654 			mutex_unlock(&ctx->uring_lock);
2655 			goto out;
2656 		}
2657 		if (flags & IORING_ENTER_GETEVENTS) {
2658 			if (ctx->int_flags & IO_RING_F_SYSCALL_IOPOLL)
2659 				goto iopoll_locked;
2660 			/*
2661 			 * Ignore errors, we'll soon call io_cqring_wait() and
2662 			 * it should handle ownership problems if any.
2663 			 */
2664 			if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2665 				(void)io_run_local_work_locked(ctx, min_complete);
2666 		}
2667 		mutex_unlock(&ctx->uring_lock);
2668 	}
2669 
2670 	if (flags & IORING_ENTER_GETEVENTS) {
2671 		int ret2;
2672 
2673 		if (ctx->int_flags & IO_RING_F_SYSCALL_IOPOLL) {
2674 			/*
2675 			 * We disallow the app entering submit/complete with
2676 			 * polling, but we still need to lock the ring to
2677 			 * prevent racing with polled issue that got punted to
2678 			 * a workqueue.
2679 			 */
2680 			mutex_lock(&ctx->uring_lock);
2681 iopoll_locked:
2682 			ret2 = io_validate_ext_arg(ctx, flags, argp, argsz);
2683 			if (likely(!ret2))
2684 				ret2 = io_iopoll_check(ctx, min_complete);
2685 			mutex_unlock(&ctx->uring_lock);
2686 		} else {
2687 			struct ext_arg ext_arg = { .argsz = argsz };
2688 
2689 			ret2 = io_get_ext_arg(ctx, flags, argp, &ext_arg);
2690 			if (likely(!ret2))
2691 				ret2 = io_cqring_wait(ctx, min_complete, flags,
2692 						      &ext_arg);
2693 		}
2694 
2695 		if (!ret) {
2696 			ret = ret2;
2697 
2698 			/*
2699 			 * EBADR indicates that one or more CQE were dropped.
2700 			 * Once the user has been informed we can clear the bit
2701 			 * as they are obviously ok with those drops.
2702 			 */
2703 			if (unlikely(ret2 == -EBADR))
2704 				clear_bit(IO_CHECK_CQ_DROPPED_BIT,
2705 					  &ctx->check_cq);
2706 		}
2707 	}
2708 out:
2709 	if (!(flags & IORING_ENTER_REGISTERED_RING))
2710 		fput(file);
2711 	return ret;
2712 }
2713 
2714 static const struct file_operations io_uring_fops = {
2715 	.release	= io_uring_release,
2716 	.mmap		= io_uring_mmap,
2717 	.get_unmapped_area = io_uring_get_unmapped_area,
2718 #ifndef CONFIG_MMU
2719 	.mmap_capabilities = io_uring_nommu_mmap_capabilities,
2720 #endif
2721 	.poll		= io_uring_poll,
2722 #ifdef CONFIG_PROC_FS
2723 	.show_fdinfo	= io_uring_show_fdinfo,
2724 #endif
2725 };
2726 
io_is_uring_fops(struct file * file)2727 bool io_is_uring_fops(struct file *file)
2728 {
2729 	return file->f_op == &io_uring_fops;
2730 }
2731 
io_allocate_scq_urings(struct io_ring_ctx * ctx,struct io_ctx_config * config)2732 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
2733 					 struct io_ctx_config *config)
2734 {
2735 	struct io_uring_params *p = &config->p;
2736 	struct io_rings_layout *rl = &config->layout;
2737 	struct io_uring_region_desc rd;
2738 	struct io_rings *rings;
2739 	int ret;
2740 
2741 	/* make sure these are sane, as we already accounted them */
2742 	ctx->sq_entries = p->sq_entries;
2743 	ctx->cq_entries = p->cq_entries;
2744 
2745 	memset(&rd, 0, sizeof(rd));
2746 	rd.size = PAGE_ALIGN(rl->rings_size);
2747 	if (ctx->flags & IORING_SETUP_NO_MMAP) {
2748 		rd.user_addr = p->cq_off.user_addr;
2749 		rd.flags |= IORING_MEM_REGION_TYPE_USER;
2750 	}
2751 	ret = io_create_region(ctx, &ctx->ring_region, &rd, IORING_OFF_CQ_RING);
2752 	if (ret)
2753 		return ret;
2754 	ctx->rings = rings = io_region_get_ptr(&ctx->ring_region);
2755 	rcu_assign_pointer(ctx->rings_rcu, rings);
2756 	if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
2757 		ctx->sq_array = (u32 *)((char *)rings + rl->sq_array_offset);
2758 
2759 	memset(&rd, 0, sizeof(rd));
2760 	rd.size = PAGE_ALIGN(rl->sq_size);
2761 	if (ctx->flags & IORING_SETUP_NO_MMAP) {
2762 		rd.user_addr = p->sq_off.user_addr;
2763 		rd.flags |= IORING_MEM_REGION_TYPE_USER;
2764 	}
2765 	ret = io_create_region(ctx, &ctx->sq_region, &rd, IORING_OFF_SQES);
2766 	if (ret) {
2767 		io_rings_free(ctx);
2768 		return ret;
2769 	}
2770 	ctx->sq_sqes = io_region_get_ptr(&ctx->sq_region);
2771 
2772 	memset(rings, 0, sizeof(*rings));
2773 	WRITE_ONCE(rings->sq_ring_mask, ctx->sq_entries - 1);
2774 	WRITE_ONCE(rings->cq_ring_mask, ctx->cq_entries - 1);
2775 	WRITE_ONCE(rings->sq_ring_entries, ctx->sq_entries);
2776 	WRITE_ONCE(rings->cq_ring_entries, ctx->cq_entries);
2777 	return 0;
2778 }
2779 
io_uring_install_fd(struct file * file)2780 static int io_uring_install_fd(struct file *file)
2781 {
2782 	int fd;
2783 
2784 	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
2785 	if (fd < 0)
2786 		return fd;
2787 	fd_install(fd, file);
2788 	return fd;
2789 }
2790 
2791 /*
2792  * Allocate an anonymous fd, this is what constitutes the application
2793  * visible backing of an io_uring instance. The application mmaps this
2794  * fd to gain access to the SQ/CQ ring details.
2795  */
io_uring_get_file(struct io_ring_ctx * ctx)2796 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
2797 {
2798 	/* Create a new inode so that the LSM can block the creation.  */
2799 	return anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx,
2800 					 O_RDWR | O_CLOEXEC, NULL);
2801 }
2802 
io_uring_sanitise_params(struct io_uring_params * p)2803 static int io_uring_sanitise_params(struct io_uring_params *p)
2804 {
2805 	unsigned flags = p->flags;
2806 
2807 	if (flags & ~IORING_SETUP_FLAGS)
2808 		return -EINVAL;
2809 
2810 	if (flags & IORING_SETUP_SQ_REWIND) {
2811 		if ((flags & IORING_SETUP_SQPOLL) ||
2812 		    !(flags & IORING_SETUP_NO_SQARRAY))
2813 			return -EINVAL;
2814 	}
2815 
2816 	/* There is no way to mmap rings without a real fd */
2817 	if ((flags & IORING_SETUP_REGISTERED_FD_ONLY) &&
2818 	    !(flags & IORING_SETUP_NO_MMAP))
2819 		return -EINVAL;
2820 
2821 	if (flags & IORING_SETUP_SQPOLL) {
2822 		/* IPI related flags don't make sense with SQPOLL */
2823 		if (flags & (IORING_SETUP_COOP_TASKRUN |
2824 			     IORING_SETUP_TASKRUN_FLAG |
2825 			     IORING_SETUP_DEFER_TASKRUN))
2826 			return -EINVAL;
2827 	}
2828 
2829 	if (flags & IORING_SETUP_TASKRUN_FLAG) {
2830 		if (!(flags & (IORING_SETUP_COOP_TASKRUN |
2831 			       IORING_SETUP_DEFER_TASKRUN)))
2832 			return -EINVAL;
2833 	}
2834 
2835 	/* HYBRID_IOPOLL only valid with IOPOLL */
2836 	if ((flags & IORING_SETUP_HYBRID_IOPOLL) && !(flags & IORING_SETUP_IOPOLL))
2837 		return -EINVAL;
2838 
2839 	/*
2840 	 * For DEFER_TASKRUN we require the completion task to be the same as
2841 	 * the submission task. This implies that there is only one submitter.
2842 	 */
2843 	if ((flags & IORING_SETUP_DEFER_TASKRUN) &&
2844 	    !(flags & IORING_SETUP_SINGLE_ISSUER))
2845 		return -EINVAL;
2846 
2847 	/*
2848 	 * Nonsensical to ask for CQE32 and mixed CQE support, it's not
2849 	 * supported to post 16b CQEs on a ring setup with CQE32.
2850 	 */
2851 	if ((flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)) ==
2852 	    (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED))
2853 		return -EINVAL;
2854 	/*
2855 	 * Nonsensical to ask for SQE128 and mixed SQE support, it's not
2856 	 * supported to post 64b SQEs on a ring setup with SQE128.
2857 	 */
2858 	if ((flags & (IORING_SETUP_SQE128|IORING_SETUP_SQE_MIXED)) ==
2859 	    (IORING_SETUP_SQE128|IORING_SETUP_SQE_MIXED))
2860 		return -EINVAL;
2861 
2862 	return 0;
2863 }
2864 
io_uring_fill_params(struct io_uring_params * p)2865 static int io_uring_fill_params(struct io_uring_params *p)
2866 {
2867 	unsigned entries = p->sq_entries;
2868 
2869 	if (!entries)
2870 		return -EINVAL;
2871 	if (entries > IORING_MAX_ENTRIES) {
2872 		if (!(p->flags & IORING_SETUP_CLAMP))
2873 			return -EINVAL;
2874 		entries = IORING_MAX_ENTRIES;
2875 	}
2876 
2877 	/*
2878 	 * Use twice as many entries for the CQ ring. It's possible for the
2879 	 * application to drive a higher depth than the size of the SQ ring,
2880 	 * since the sqes are only used at submission time. This allows for
2881 	 * some flexibility in overcommitting a bit. If the application has
2882 	 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
2883 	 * of CQ ring entries manually.
2884 	 */
2885 	p->sq_entries = roundup_pow_of_two(entries);
2886 	if (p->flags & IORING_SETUP_CQSIZE) {
2887 		/*
2888 		 * If IORING_SETUP_CQSIZE is set, we do the same roundup
2889 		 * to a power-of-two, if it isn't already. We do NOT impose
2890 		 * any cq vs sq ring sizing.
2891 		 */
2892 		if (!p->cq_entries)
2893 			return -EINVAL;
2894 		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
2895 			if (!(p->flags & IORING_SETUP_CLAMP))
2896 				return -EINVAL;
2897 			p->cq_entries = IORING_MAX_CQ_ENTRIES;
2898 		}
2899 		p->cq_entries = roundup_pow_of_two(p->cq_entries);
2900 		if (p->cq_entries < p->sq_entries)
2901 			return -EINVAL;
2902 	} else {
2903 		p->cq_entries = 2 * p->sq_entries;
2904 	}
2905 
2906 	return 0;
2907 }
2908 
io_prepare_config(struct io_ctx_config * config)2909 int io_prepare_config(struct io_ctx_config *config)
2910 {
2911 	struct io_uring_params *p = &config->p;
2912 	int ret;
2913 
2914 	ret = io_uring_sanitise_params(p);
2915 	if (ret)
2916 		return ret;
2917 
2918 	ret = io_uring_fill_params(p);
2919 	if (ret)
2920 		return ret;
2921 
2922 	ret = rings_size(p->flags, p->sq_entries, p->cq_entries,
2923 			 &config->layout);
2924 	if (ret)
2925 		return ret;
2926 
2927 	p->sq_off.head = offsetof(struct io_rings, sq.head);
2928 	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
2929 	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
2930 	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
2931 	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
2932 	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
2933 	p->sq_off.resv1 = 0;
2934 	if (!(p->flags & IORING_SETUP_NO_MMAP))
2935 		p->sq_off.user_addr = 0;
2936 
2937 	p->cq_off.head = offsetof(struct io_rings, cq.head);
2938 	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
2939 	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
2940 	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
2941 	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
2942 	p->cq_off.cqes = offsetof(struct io_rings, cqes);
2943 	p->cq_off.flags = offsetof(struct io_rings, cq_flags);
2944 	p->cq_off.resv1 = 0;
2945 	if (!(p->flags & IORING_SETUP_NO_MMAP))
2946 		p->cq_off.user_addr = 0;
2947 	if (!(p->flags & IORING_SETUP_NO_SQARRAY))
2948 		p->sq_off.array = config->layout.sq_array_offset;
2949 
2950 	return 0;
2951 }
2952 
io_restriction_clone(struct io_restriction * dst,struct io_restriction * src)2953 void io_restriction_clone(struct io_restriction *dst, struct io_restriction *src)
2954 {
2955 	memcpy(&dst->register_op, &src->register_op, sizeof(dst->register_op));
2956 	memcpy(&dst->sqe_op, &src->sqe_op, sizeof(dst->sqe_op));
2957 	dst->sqe_flags_allowed = src->sqe_flags_allowed;
2958 	dst->sqe_flags_required = src->sqe_flags_required;
2959 	dst->op_registered = src->op_registered;
2960 	dst->reg_registered = src->reg_registered;
2961 
2962 	io_bpf_filter_clone(dst, src);
2963 }
2964 
io_ctx_restriction_clone(struct io_ring_ctx * ctx,struct io_restriction * src)2965 static void io_ctx_restriction_clone(struct io_ring_ctx *ctx,
2966 				     struct io_restriction *src)
2967 {
2968 	struct io_restriction *dst = &ctx->restrictions;
2969 
2970 	io_restriction_clone(dst, src);
2971 	if (dst->bpf_filters)
2972 		WRITE_ONCE(ctx->bpf_filters, dst->bpf_filters->filters);
2973 	if (dst->op_registered)
2974 		ctx->int_flags |= IO_RING_F_OP_RESTRICTED;
2975 	if (dst->reg_registered)
2976 		ctx->int_flags |= IO_RING_F_REG_RESTRICTED;
2977 }
2978 
io_uring_create(struct io_ctx_config * config)2979 static __cold int io_uring_create(struct io_ctx_config *config)
2980 {
2981 	struct io_uring_params *p = &config->p;
2982 	struct io_ring_ctx *ctx;
2983 	struct io_uring_task *tctx;
2984 	struct file *file;
2985 	int ret;
2986 
2987 	ret = io_prepare_config(config);
2988 	if (ret)
2989 		return ret;
2990 
2991 	ctx = io_ring_ctx_alloc(p);
2992 	if (!ctx)
2993 		return -ENOMEM;
2994 
2995 	ctx->clockid = CLOCK_MONOTONIC;
2996 	ctx->clock_offset = 0;
2997 
2998 	if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
2999 		static_branch_deferred_inc(&io_key_has_sqarray);
3000 
3001 	if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3002 	    !(ctx->flags & IORING_SETUP_IOPOLL))
3003 		ctx->int_flags |= IO_RING_F_TASK_COMPLETE;
3004 
3005 	if ((ctx->int_flags & IO_RING_F_TASK_COMPLETE) ||
3006 	    (ctx->flags & IORING_SETUP_IOPOLL))
3007 		ctx->int_flags |= IO_RING_F_LOCKLESS_CQ;
3008 
3009 	/*
3010 	 * lazy poll_wq activation relies on ->task_complete for synchronisation
3011 	 * purposes, see io_activate_pollwq()
3012 	 */
3013 	if (!(ctx->int_flags & IO_RING_F_TASK_COMPLETE))
3014 		ctx->int_flags |= IO_RING_F_POLL_ACTIVATED;
3015 
3016 	/*
3017 	 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
3018 	 * space applications don't need to do io completion events
3019 	 * polling again, they can rely on io_sq_thread to do polling
3020 	 * work, which can reduce cpu usage and uring_lock contention.
3021 	 */
3022 	if (ctx->flags & IORING_SETUP_IOPOLL &&
3023 	    !(ctx->flags & IORING_SETUP_SQPOLL))
3024 		ctx->int_flags |= IO_RING_F_SYSCALL_IOPOLL;
3025 
3026 	if (in_compat_syscall())
3027 		ctx->int_flags |= IO_RING_F_COMPAT;
3028 	if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK))
3029 		ctx->user = get_uid(current_user());
3030 
3031 	/*
3032 	 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
3033 	 * COOP_TASKRUN is set, then IPIs are never needed by the app.
3034 	 */
3035 	if (ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_COOP_TASKRUN))
3036 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3037 	else
3038 		ctx->notify_method = TWA_SIGNAL;
3039 
3040 	/*
3041 	 * If the current task has restrictions enabled, then copy them to
3042 	 * our newly created ring and mark it as registered.
3043 	 */
3044 	if (current->io_uring_restrict)
3045 		io_ctx_restriction_clone(ctx, current->io_uring_restrict);
3046 
3047 	/*
3048 	 * This is just grabbed for accounting purposes. When a process exits,
3049 	 * the mm is exited and dropped before the files, hence we need to hang
3050 	 * on to this mm purely for the purposes of being able to unaccount
3051 	 * memory (locked/pinned vm). It's not used for anything else.
3052 	 */
3053 	mmgrab(current->mm);
3054 	ctx->mm_account = current->mm;
3055 
3056 	ret = io_allocate_scq_urings(ctx, config);
3057 	if (ret)
3058 		goto err;
3059 
3060 	ret = io_sq_offload_create(ctx, p);
3061 	if (ret)
3062 		goto err;
3063 
3064 	p->features = IORING_FEAT_FLAGS;
3065 
3066 	if (copy_to_user(config->uptr, p, sizeof(*p))) {
3067 		ret = -EFAULT;
3068 		goto err;
3069 	}
3070 
3071 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
3072 	    && !(ctx->flags & IORING_SETUP_R_DISABLED))
3073 		ctx->submitter_task = get_task_struct(current);
3074 
3075 	file = io_uring_get_file(ctx);
3076 	if (IS_ERR(file)) {
3077 		ret = PTR_ERR(file);
3078 		goto err;
3079 	}
3080 
3081 	ret = __io_uring_add_tctx_node(ctx);
3082 	if (ret)
3083 		goto err_fput;
3084 	tctx = current->io_uring;
3085 
3086 	/*
3087 	 * Install ring fd as the very last thing, so we don't risk someone
3088 	 * having closed it before we finish setup
3089 	 */
3090 	if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
3091 		ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX);
3092 	else
3093 		ret = io_uring_install_fd(file);
3094 	if (ret < 0)
3095 		goto err_fput;
3096 
3097 	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
3098 	return ret;
3099 err:
3100 	io_ring_ctx_wait_and_kill(ctx);
3101 	return ret;
3102 err_fput:
3103 	fput(file);
3104 	return ret;
3105 }
3106 
3107 /*
3108  * Sets up an aio uring context, and returns the fd. Applications asks for a
3109  * ring size, we return the actual sq/cq ring sizes (among other things) in the
3110  * params structure passed in.
3111  */
io_uring_setup(u32 entries,struct io_uring_params __user * params)3112 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3113 {
3114 	struct io_ctx_config config;
3115 
3116 	memset(&config, 0, sizeof(config));
3117 
3118 	if (copy_from_user(&config.p, params, sizeof(config.p)))
3119 		return -EFAULT;
3120 
3121 	if (!mem_is_zero(&config.p.resv, sizeof(config.p.resv)))
3122 		return -EINVAL;
3123 
3124 	config.p.sq_entries = entries;
3125 	config.uptr = params;
3126 	return io_uring_create(&config);
3127 }
3128 
io_uring_allowed(void)3129 static inline int io_uring_allowed(void)
3130 {
3131 	int disabled = READ_ONCE(sysctl_io_uring_disabled);
3132 	kgid_t io_uring_group;
3133 
3134 	if (disabled == 2)
3135 		return -EPERM;
3136 
3137 	if (disabled == 0 || capable(CAP_SYS_ADMIN))
3138 		goto allowed_lsm;
3139 
3140 	io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group);
3141 	if (!gid_valid(io_uring_group))
3142 		return -EPERM;
3143 
3144 	if (!in_group_p(io_uring_group))
3145 		return -EPERM;
3146 
3147 allowed_lsm:
3148 	return security_uring_allowed();
3149 }
3150 
SYSCALL_DEFINE2(io_uring_setup,u32,entries,struct io_uring_params __user *,params)3151 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3152 		struct io_uring_params __user *, params)
3153 {
3154 	int ret;
3155 
3156 	ret = io_uring_allowed();
3157 	if (ret)
3158 		return ret;
3159 
3160 	return io_uring_setup(entries, params);
3161 }
3162 
io_uring_init(void)3163 static int __init io_uring_init(void)
3164 {
3165 	struct kmem_cache_args kmem_args = {
3166 		.useroffset = offsetof(struct io_kiocb, cmd.data),
3167 		.usersize = sizeof_field(struct io_kiocb, cmd.data),
3168 		.freeptr_offset = offsetof(struct io_kiocb, work),
3169 		.use_freeptr_offset = true,
3170 	};
3171 
3172 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
3173 	BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
3174 	BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
3175 } while (0)
3176 
3177 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
3178 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
3179 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
3180 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
3181 	BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
3182 	BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
3183 	BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
3184 	BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
3185 	BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
3186 	BUILD_BUG_SQE_ELEM(8,  __u64,  off);
3187 	BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
3188 	BUILD_BUG_SQE_ELEM(8,  __u32,  cmd_op);
3189 	BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
3190 	BUILD_BUG_SQE_ELEM(16, __u64,  addr);
3191 	BUILD_BUG_SQE_ELEM(16, __u64,  splice_off_in);
3192 	BUILD_BUG_SQE_ELEM(24, __u32,  len);
3193 	BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
3194 	BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
3195 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
3196 	BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
3197 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u16,  poll_events);
3198 	BUILD_BUG_SQE_ELEM(28, __u32,  poll32_events);
3199 	BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
3200 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
3201 	BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
3202 	BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
3203 	BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
3204 	BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
3205 	BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
3206 	BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
3207 	BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
3208 	BUILD_BUG_SQE_ELEM(28, __u32,  rename_flags);
3209 	BUILD_BUG_SQE_ELEM(28, __u32,  unlink_flags);
3210 	BUILD_BUG_SQE_ELEM(28, __u32,  hardlink_flags);
3211 	BUILD_BUG_SQE_ELEM(28, __u32,  xattr_flags);
3212 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_ring_flags);
3213 	BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
3214 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
3215 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_group);
3216 	BUILD_BUG_SQE_ELEM(42, __u16,  personality);
3217 	BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
3218 	BUILD_BUG_SQE_ELEM(44, __u32,  file_index);
3219 	BUILD_BUG_SQE_ELEM(44, __u16,  addr_len);
3220 	BUILD_BUG_SQE_ELEM(44, __u8,   write_stream);
3221 	BUILD_BUG_SQE_ELEM(45, __u8,   __pad4[0]);
3222 	BUILD_BUG_SQE_ELEM(46, __u16,  __pad3[0]);
3223 	BUILD_BUG_SQE_ELEM(48, __u64,  addr3);
3224 	BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
3225 	BUILD_BUG_SQE_ELEM(48, __u64, attr_ptr);
3226 	BUILD_BUG_SQE_ELEM(56, __u64, attr_type_mask);
3227 	BUILD_BUG_SQE_ELEM(56, __u64,  __pad2);
3228 
3229 	BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
3230 		     sizeof(struct io_uring_rsrc_update));
3231 	BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
3232 		     sizeof(struct io_uring_rsrc_update2));
3233 
3234 	/* ->buf_index is u16 */
3235 	BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
3236 	BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
3237 		     offsetof(struct io_uring_buf_ring, tail));
3238 
3239 	/* should fit into one byte */
3240 	BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
3241 	BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
3242 	BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
3243 
3244 	BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof_field(struct io_kiocb, flags));
3245 
3246 	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
3247 
3248 	/* top 8bits are for internal use */
3249 	BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0);
3250 
3251 	io_uring_optable_init();
3252 
3253 	/* imu->dir is u8 */
3254 	BUILD_BUG_ON((IO_IMU_DEST | IO_IMU_SOURCE) > U8_MAX);
3255 
3256 	/*
3257 	 * Allow user copy in the per-command field, which starts after the
3258 	 * file in io_kiocb and until the opcode field. The openat2 handling
3259 	 * requires copying in user memory into the io_kiocb object in that
3260 	 * range, and HARDENED_USERCOPY will complain if we haven't
3261 	 * correctly annotated this range.
3262 	 */
3263 	req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
3264 				SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
3265 				SLAB_TYPESAFE_BY_RCU);
3266 
3267 	iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
3268 	BUG_ON(!iou_wq);
3269 
3270 #ifdef CONFIG_SYSCTL
3271 	register_sysctl_init("kernel", kernel_io_uring_disabled_table);
3272 #endif
3273 
3274 	return 0;
3275 };
3276 __initcall(io_uring_init);
3277