xref: /linux/io_uring/io_uring.c (revision 665db14d0712ac27f6a0081510bd811efb3faa3c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared application/kernel submission and completion ring pairs, for
4  * supporting fast/efficient IO.
5  *
6  * A note on the read/write ordering memory barriers that are matched between
7  * the application and kernel side.
8  *
9  * After the application reads the CQ ring tail, it must use an
10  * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11  * before writing the tail (using smp_load_acquire to read the tail will
12  * do). It also needs a smp_mb() before updating CQ head (ordering the
13  * entry load(s) with the head store), pairing with an implicit barrier
14  * through a control-dependency in io_get_cqe (smp_store_release to
15  * store head will do). Failure to do so could lead to reading invalid
16  * CQ entries.
17  *
18  * Likewise, the application must use an appropriate smp_wmb() before
19  * writing the SQ tail (ordering SQ entry stores with the tail store),
20  * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21  * to store the tail will do). And it needs a barrier ordering the SQ
22  * head load before writing new SQ entries (smp_load_acquire to read
23  * head will do).
24  *
25  * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27  * updating the SQ tail; a full memory barrier smp_mb() is needed
28  * between.
29  *
30  * Also see the examples in the liburing library:
31  *
32  *	git://git.kernel.dk/liburing
33  *
34  * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35  * from data shared between the kernel and application. This is done both
36  * for ordering purposes, but also to ensure that once a value is loaded from
37  * data that the application could potentially modify, it remains stable.
38  *
39  * Copyright (C) 2018-2019 Jens Axboe
40  * Copyright (c) 2018-2019 Christoph Hellwig
41  */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <net/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 #include <linux/bits.h>
50 
51 #include <linux/sched/signal.h>
52 #include <linux/fs.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
55 #include <linux/mm.h>
56 #include <linux/mman.h>
57 #include <linux/percpu.h>
58 #include <linux/slab.h>
59 #include <linux/bvec.h>
60 #include <linux/net.h>
61 #include <net/sock.h>
62 #include <linux/anon_inodes.h>
63 #include <linux/sched/mm.h>
64 #include <linux/uaccess.h>
65 #include <linux/nospec.h>
66 #include <linux/fsnotify.h>
67 #include <linux/fadvise.h>
68 #include <linux/task_work.h>
69 #include <linux/io_uring.h>
70 #include <linux/io_uring/cmd.h>
71 #include <linux/audit.h>
72 #include <linux/security.h>
73 #include <asm/shmparam.h>
74 
75 #define CREATE_TRACE_POINTS
76 #include <trace/events/io_uring.h>
77 
78 #include <uapi/linux/io_uring.h>
79 
80 #include "io-wq.h"
81 
82 #include "io_uring.h"
83 #include "opdef.h"
84 #include "refs.h"
85 #include "tctx.h"
86 #include "register.h"
87 #include "sqpoll.h"
88 #include "fdinfo.h"
89 #include "kbuf.h"
90 #include "rsrc.h"
91 #include "cancel.h"
92 #include "net.h"
93 #include "notif.h"
94 #include "waitid.h"
95 #include "futex.h"
96 #include "napi.h"
97 #include "uring_cmd.h"
98 #include "msg_ring.h"
99 #include "memmap.h"
100 
101 #include "timeout.h"
102 #include "poll.h"
103 #include "rw.h"
104 #include "alloc_cache.h"
105 #include "eventfd.h"
106 
107 #define IORING_MAX_ENTRIES	32768
108 #define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
109 
110 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
111 			  IOSQE_IO_HARDLINK | IOSQE_ASYNC)
112 
113 #define SQE_VALID_FLAGS	(SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
114 			IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
115 
116 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
117 				REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
118 				REQ_F_ASYNC_DATA)
119 
120 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
121 				 IO_REQ_CLEAN_FLAGS)
122 
123 #define IO_TCTX_REFS_CACHE_NR	(1U << 10)
124 
125 #define IO_COMPL_BATCH			32
126 #define IO_REQ_ALLOC_BATCH		8
127 
128 struct io_defer_entry {
129 	struct list_head	list;
130 	struct io_kiocb		*req;
131 	u32			seq;
132 };
133 
134 /* requests with any of those set should undergo io_disarm_next() */
135 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
136 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
137 
138 /*
139  * No waiters. It's larger than any valid value of the tw counter
140  * so that tests against ->cq_wait_nr would fail and skip wake_up().
141  */
142 #define IO_CQ_WAKE_INIT		(-1U)
143 /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */
144 #define IO_CQ_WAKE_FORCE	(IO_CQ_WAKE_INIT >> 1)
145 
146 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
147 					 struct task_struct *task,
148 					 bool cancel_all);
149 
150 static void io_queue_sqe(struct io_kiocb *req);
151 
152 struct kmem_cache *req_cachep;
153 static struct workqueue_struct *iou_wq __ro_after_init;
154 
155 static int __read_mostly sysctl_io_uring_disabled;
156 static int __read_mostly sysctl_io_uring_group = -1;
157 
158 #ifdef CONFIG_SYSCTL
159 static struct ctl_table kernel_io_uring_disabled_table[] = {
160 	{
161 		.procname	= "io_uring_disabled",
162 		.data		= &sysctl_io_uring_disabled,
163 		.maxlen		= sizeof(sysctl_io_uring_disabled),
164 		.mode		= 0644,
165 		.proc_handler	= proc_dointvec_minmax,
166 		.extra1		= SYSCTL_ZERO,
167 		.extra2		= SYSCTL_TWO,
168 	},
169 	{
170 		.procname	= "io_uring_group",
171 		.data		= &sysctl_io_uring_group,
172 		.maxlen		= sizeof(gid_t),
173 		.mode		= 0644,
174 		.proc_handler	= proc_dointvec,
175 	},
176 };
177 #endif
178 
179 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
180 {
181 	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
182 }
183 
184 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
185 {
186 	return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
187 }
188 
189 static bool io_match_linked(struct io_kiocb *head)
190 {
191 	struct io_kiocb *req;
192 
193 	io_for_each_link(req, head) {
194 		if (req->flags & REQ_F_INFLIGHT)
195 			return true;
196 	}
197 	return false;
198 }
199 
200 /*
201  * As io_match_task() but protected against racing with linked timeouts.
202  * User must not hold timeout_lock.
203  */
204 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
205 			bool cancel_all)
206 {
207 	bool matched;
208 
209 	if (task && head->task != task)
210 		return false;
211 	if (cancel_all)
212 		return true;
213 
214 	if (head->flags & REQ_F_LINK_TIMEOUT) {
215 		struct io_ring_ctx *ctx = head->ctx;
216 
217 		/* protect against races with linked timeouts */
218 		spin_lock_irq(&ctx->timeout_lock);
219 		matched = io_match_linked(head);
220 		spin_unlock_irq(&ctx->timeout_lock);
221 	} else {
222 		matched = io_match_linked(head);
223 	}
224 	return matched;
225 }
226 
227 static inline void req_fail_link_node(struct io_kiocb *req, int res)
228 {
229 	req_set_fail(req);
230 	io_req_set_res(req, res, 0);
231 }
232 
233 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
234 {
235 	wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
236 }
237 
238 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
239 {
240 	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
241 
242 	complete(&ctx->ref_comp);
243 }
244 
245 static __cold void io_fallback_req_func(struct work_struct *work)
246 {
247 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
248 						fallback_work.work);
249 	struct llist_node *node = llist_del_all(&ctx->fallback_llist);
250 	struct io_kiocb *req, *tmp;
251 	struct io_tw_state ts = {};
252 
253 	percpu_ref_get(&ctx->refs);
254 	mutex_lock(&ctx->uring_lock);
255 	llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
256 		req->io_task_work.func(req, &ts);
257 	io_submit_flush_completions(ctx);
258 	mutex_unlock(&ctx->uring_lock);
259 	percpu_ref_put(&ctx->refs);
260 }
261 
262 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
263 {
264 	unsigned hash_buckets = 1U << bits;
265 	size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
266 
267 	table->hbs = kmalloc(hash_size, GFP_KERNEL);
268 	if (!table->hbs)
269 		return -ENOMEM;
270 
271 	table->hash_bits = bits;
272 	init_hash_table(table, hash_buckets);
273 	return 0;
274 }
275 
276 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
277 {
278 	struct io_ring_ctx *ctx;
279 	int hash_bits;
280 	bool ret;
281 
282 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
283 	if (!ctx)
284 		return NULL;
285 
286 	xa_init(&ctx->io_bl_xa);
287 
288 	/*
289 	 * Use 5 bits less than the max cq entries, that should give us around
290 	 * 32 entries per hash list if totally full and uniformly spread, but
291 	 * don't keep too many buckets to not overconsume memory.
292 	 */
293 	hash_bits = ilog2(p->cq_entries) - 5;
294 	hash_bits = clamp(hash_bits, 1, 8);
295 	if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
296 		goto err;
297 	if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
298 		goto err;
299 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
300 			    0, GFP_KERNEL))
301 		goto err;
302 
303 	ctx->flags = p->flags;
304 	atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
305 	init_waitqueue_head(&ctx->sqo_sq_wait);
306 	INIT_LIST_HEAD(&ctx->sqd_list);
307 	INIT_LIST_HEAD(&ctx->cq_overflow_list);
308 	INIT_LIST_HEAD(&ctx->io_buffers_cache);
309 	ret = io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
310 			    sizeof(struct io_rsrc_node));
311 	ret |= io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
312 			    sizeof(struct async_poll));
313 	ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
314 			    sizeof(struct io_async_msghdr));
315 	ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
316 			    sizeof(struct io_async_rw));
317 	ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
318 			    sizeof(struct uring_cache));
319 	spin_lock_init(&ctx->msg_lock);
320 	ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
321 			    sizeof(struct io_kiocb));
322 	ret |= io_futex_cache_init(ctx);
323 	if (ret)
324 		goto err;
325 	init_completion(&ctx->ref_comp);
326 	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
327 	mutex_init(&ctx->uring_lock);
328 	init_waitqueue_head(&ctx->cq_wait);
329 	init_waitqueue_head(&ctx->poll_wq);
330 	init_waitqueue_head(&ctx->rsrc_quiesce_wq);
331 	spin_lock_init(&ctx->completion_lock);
332 	spin_lock_init(&ctx->timeout_lock);
333 	INIT_WQ_LIST(&ctx->iopoll_list);
334 	INIT_LIST_HEAD(&ctx->io_buffers_comp);
335 	INIT_LIST_HEAD(&ctx->defer_list);
336 	INIT_LIST_HEAD(&ctx->timeout_list);
337 	INIT_LIST_HEAD(&ctx->ltimeout_list);
338 	INIT_LIST_HEAD(&ctx->rsrc_ref_list);
339 	init_llist_head(&ctx->work_llist);
340 	INIT_LIST_HEAD(&ctx->tctx_list);
341 	ctx->submit_state.free_list.next = NULL;
342 	INIT_HLIST_HEAD(&ctx->waitid_list);
343 #ifdef CONFIG_FUTEX
344 	INIT_HLIST_HEAD(&ctx->futex_list);
345 #endif
346 	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
347 	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
348 	INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd);
349 	io_napi_init(ctx);
350 
351 	return ctx;
352 err:
353 	io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
354 	io_alloc_cache_free(&ctx->apoll_cache, kfree);
355 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
356 	io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
357 	io_alloc_cache_free(&ctx->uring_cache, kfree);
358 	io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
359 	io_futex_cache_free(ctx);
360 	kfree(ctx->cancel_table.hbs);
361 	kfree(ctx->cancel_table_locked.hbs);
362 	xa_destroy(&ctx->io_bl_xa);
363 	kfree(ctx);
364 	return NULL;
365 }
366 
367 static void io_account_cq_overflow(struct io_ring_ctx *ctx)
368 {
369 	struct io_rings *r = ctx->rings;
370 
371 	WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
372 	ctx->cq_extra--;
373 }
374 
375 static bool req_need_defer(struct io_kiocb *req, u32 seq)
376 {
377 	if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
378 		struct io_ring_ctx *ctx = req->ctx;
379 
380 		return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
381 	}
382 
383 	return false;
384 }
385 
386 static void io_clean_op(struct io_kiocb *req)
387 {
388 	if (req->flags & REQ_F_BUFFER_SELECTED) {
389 		spin_lock(&req->ctx->completion_lock);
390 		io_kbuf_drop(req);
391 		spin_unlock(&req->ctx->completion_lock);
392 	}
393 
394 	if (req->flags & REQ_F_NEED_CLEANUP) {
395 		const struct io_cold_def *def = &io_cold_defs[req->opcode];
396 
397 		if (def->cleanup)
398 			def->cleanup(req);
399 	}
400 	if ((req->flags & REQ_F_POLLED) && req->apoll) {
401 		kfree(req->apoll->double_poll);
402 		kfree(req->apoll);
403 		req->apoll = NULL;
404 	}
405 	if (req->flags & REQ_F_INFLIGHT) {
406 		struct io_uring_task *tctx = req->task->io_uring;
407 
408 		atomic_dec(&tctx->inflight_tracked);
409 	}
410 	if (req->flags & REQ_F_CREDS)
411 		put_cred(req->creds);
412 	if (req->flags & REQ_F_ASYNC_DATA) {
413 		kfree(req->async_data);
414 		req->async_data = NULL;
415 	}
416 	req->flags &= ~IO_REQ_CLEAN_FLAGS;
417 }
418 
419 static inline void io_req_track_inflight(struct io_kiocb *req)
420 {
421 	if (!(req->flags & REQ_F_INFLIGHT)) {
422 		req->flags |= REQ_F_INFLIGHT;
423 		atomic_inc(&req->task->io_uring->inflight_tracked);
424 	}
425 }
426 
427 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
428 {
429 	if (WARN_ON_ONCE(!req->link))
430 		return NULL;
431 
432 	req->flags &= ~REQ_F_ARM_LTIMEOUT;
433 	req->flags |= REQ_F_LINK_TIMEOUT;
434 
435 	/* linked timeouts should have two refs once prep'ed */
436 	io_req_set_refcount(req);
437 	__io_req_set_refcount(req->link, 2);
438 	return req->link;
439 }
440 
441 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
442 {
443 	if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
444 		return NULL;
445 	return __io_prep_linked_timeout(req);
446 }
447 
448 static noinline void __io_arm_ltimeout(struct io_kiocb *req)
449 {
450 	io_queue_linked_timeout(__io_prep_linked_timeout(req));
451 }
452 
453 static inline void io_arm_ltimeout(struct io_kiocb *req)
454 {
455 	if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
456 		__io_arm_ltimeout(req);
457 }
458 
459 static void io_prep_async_work(struct io_kiocb *req)
460 {
461 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
462 	struct io_ring_ctx *ctx = req->ctx;
463 
464 	if (!(req->flags & REQ_F_CREDS)) {
465 		req->flags |= REQ_F_CREDS;
466 		req->creds = get_current_cred();
467 	}
468 
469 	req->work.list.next = NULL;
470 	atomic_set(&req->work.flags, 0);
471 	if (req->flags & REQ_F_FORCE_ASYNC)
472 		atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags);
473 
474 	if (req->file && !(req->flags & REQ_F_FIXED_FILE))
475 		req->flags |= io_file_get_flags(req->file);
476 
477 	if (req->file && (req->flags & REQ_F_ISREG)) {
478 		bool should_hash = def->hash_reg_file;
479 
480 		/* don't serialize this request if the fs doesn't need it */
481 		if (should_hash && (req->file->f_flags & O_DIRECT) &&
482 		    (req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE))
483 			should_hash = false;
484 		if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL))
485 			io_wq_hash_work(&req->work, file_inode(req->file));
486 	} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
487 		if (def->unbound_nonreg_file)
488 			atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags);
489 	}
490 }
491 
492 static void io_prep_async_link(struct io_kiocb *req)
493 {
494 	struct io_kiocb *cur;
495 
496 	if (req->flags & REQ_F_LINK_TIMEOUT) {
497 		struct io_ring_ctx *ctx = req->ctx;
498 
499 		spin_lock_irq(&ctx->timeout_lock);
500 		io_for_each_link(cur, req)
501 			io_prep_async_work(cur);
502 		spin_unlock_irq(&ctx->timeout_lock);
503 	} else {
504 		io_for_each_link(cur, req)
505 			io_prep_async_work(cur);
506 	}
507 }
508 
509 static void io_queue_iowq(struct io_kiocb *req)
510 {
511 	struct io_kiocb *link = io_prep_linked_timeout(req);
512 	struct io_uring_task *tctx = req->task->io_uring;
513 
514 	BUG_ON(!tctx);
515 	BUG_ON(!tctx->io_wq);
516 
517 	/* init ->work of the whole link before punting */
518 	io_prep_async_link(req);
519 
520 	/*
521 	 * Not expected to happen, but if we do have a bug where this _can_
522 	 * happen, catch it here and ensure the request is marked as
523 	 * canceled. That will make io-wq go through the usual work cancel
524 	 * procedure rather than attempt to run this request (or create a new
525 	 * worker for it).
526 	 */
527 	if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
528 		atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
529 
530 	trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
531 	io_wq_enqueue(tctx->io_wq, &req->work);
532 	if (link)
533 		io_queue_linked_timeout(link);
534 }
535 
536 static void io_req_queue_iowq_tw(struct io_kiocb *req, struct io_tw_state *ts)
537 {
538 	io_queue_iowq(req);
539 }
540 
541 void io_req_queue_iowq(struct io_kiocb *req)
542 {
543 	req->io_task_work.func = io_req_queue_iowq_tw;
544 	io_req_task_work_add(req);
545 }
546 
547 static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
548 {
549 	while (!list_empty(&ctx->defer_list)) {
550 		struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
551 						struct io_defer_entry, list);
552 
553 		if (req_need_defer(de->req, de->seq))
554 			break;
555 		list_del_init(&de->list);
556 		io_req_task_queue(de->req);
557 		kfree(de);
558 	}
559 }
560 
561 void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
562 {
563 	if (ctx->poll_activated)
564 		io_poll_wq_wake(ctx);
565 	if (ctx->off_timeout_used)
566 		io_flush_timeouts(ctx);
567 	if (ctx->drain_active) {
568 		spin_lock(&ctx->completion_lock);
569 		io_queue_deferred(ctx);
570 		spin_unlock(&ctx->completion_lock);
571 	}
572 	if (ctx->has_evfd)
573 		io_eventfd_flush_signal(ctx);
574 }
575 
576 static inline void __io_cq_lock(struct io_ring_ctx *ctx)
577 {
578 	if (!ctx->lockless_cq)
579 		spin_lock(&ctx->completion_lock);
580 }
581 
582 static inline void io_cq_lock(struct io_ring_ctx *ctx)
583 	__acquires(ctx->completion_lock)
584 {
585 	spin_lock(&ctx->completion_lock);
586 }
587 
588 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
589 {
590 	io_commit_cqring(ctx);
591 	if (!ctx->task_complete) {
592 		if (!ctx->lockless_cq)
593 			spin_unlock(&ctx->completion_lock);
594 		/* IOPOLL rings only need to wake up if it's also SQPOLL */
595 		if (!ctx->syscall_iopoll)
596 			io_cqring_wake(ctx);
597 	}
598 	io_commit_cqring_flush(ctx);
599 }
600 
601 static void io_cq_unlock_post(struct io_ring_ctx *ctx)
602 	__releases(ctx->completion_lock)
603 {
604 	io_commit_cqring(ctx);
605 	spin_unlock(&ctx->completion_lock);
606 	io_cqring_wake(ctx);
607 	io_commit_cqring_flush(ctx);
608 }
609 
610 static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
611 {
612 	size_t cqe_size = sizeof(struct io_uring_cqe);
613 
614 	lockdep_assert_held(&ctx->uring_lock);
615 
616 	/* don't abort if we're dying, entries must get freed */
617 	if (!dying && __io_cqring_events(ctx) == ctx->cq_entries)
618 		return;
619 
620 	if (ctx->flags & IORING_SETUP_CQE32)
621 		cqe_size <<= 1;
622 
623 	io_cq_lock(ctx);
624 	while (!list_empty(&ctx->cq_overflow_list)) {
625 		struct io_uring_cqe *cqe;
626 		struct io_overflow_cqe *ocqe;
627 
628 		ocqe = list_first_entry(&ctx->cq_overflow_list,
629 					struct io_overflow_cqe, list);
630 
631 		if (!dying) {
632 			if (!io_get_cqe_overflow(ctx, &cqe, true))
633 				break;
634 			memcpy(cqe, &ocqe->cqe, cqe_size);
635 		}
636 		list_del(&ocqe->list);
637 		kfree(ocqe);
638 	}
639 
640 	if (list_empty(&ctx->cq_overflow_list)) {
641 		clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
642 		atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
643 	}
644 	io_cq_unlock_post(ctx);
645 }
646 
647 static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
648 {
649 	if (ctx->rings)
650 		__io_cqring_overflow_flush(ctx, true);
651 }
652 
653 static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
654 {
655 	mutex_lock(&ctx->uring_lock);
656 	__io_cqring_overflow_flush(ctx, false);
657 	mutex_unlock(&ctx->uring_lock);
658 }
659 
660 /* can be called by any task */
661 static void io_put_task_remote(struct task_struct *task)
662 {
663 	struct io_uring_task *tctx = task->io_uring;
664 
665 	percpu_counter_sub(&tctx->inflight, 1);
666 	if (unlikely(atomic_read(&tctx->in_cancel)))
667 		wake_up(&tctx->wait);
668 	put_task_struct(task);
669 }
670 
671 /* used by a task to put its own references */
672 static void io_put_task_local(struct task_struct *task)
673 {
674 	task->io_uring->cached_refs++;
675 }
676 
677 /* must to be called somewhat shortly after putting a request */
678 static inline void io_put_task(struct task_struct *task)
679 {
680 	if (likely(task == current))
681 		io_put_task_local(task);
682 	else
683 		io_put_task_remote(task);
684 }
685 
686 void io_task_refs_refill(struct io_uring_task *tctx)
687 {
688 	unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
689 
690 	percpu_counter_add(&tctx->inflight, refill);
691 	refcount_add(refill, &current->usage);
692 	tctx->cached_refs += refill;
693 }
694 
695 static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
696 {
697 	struct io_uring_task *tctx = task->io_uring;
698 	unsigned int refs = tctx->cached_refs;
699 
700 	if (refs) {
701 		tctx->cached_refs = 0;
702 		percpu_counter_sub(&tctx->inflight, refs);
703 		put_task_struct_many(task, refs);
704 	}
705 }
706 
707 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
708 				     s32 res, u32 cflags, u64 extra1, u64 extra2)
709 {
710 	struct io_overflow_cqe *ocqe;
711 	size_t ocq_size = sizeof(struct io_overflow_cqe);
712 	bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
713 
714 	lockdep_assert_held(&ctx->completion_lock);
715 
716 	if (is_cqe32)
717 		ocq_size += sizeof(struct io_uring_cqe);
718 
719 	ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
720 	trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
721 	if (!ocqe) {
722 		/*
723 		 * If we're in ring overflow flush mode, or in task cancel mode,
724 		 * or cannot allocate an overflow entry, then we need to drop it
725 		 * on the floor.
726 		 */
727 		io_account_cq_overflow(ctx);
728 		set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
729 		return false;
730 	}
731 	if (list_empty(&ctx->cq_overflow_list)) {
732 		set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
733 		atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
734 
735 	}
736 	ocqe->cqe.user_data = user_data;
737 	ocqe->cqe.res = res;
738 	ocqe->cqe.flags = cflags;
739 	if (is_cqe32) {
740 		ocqe->cqe.big_cqe[0] = extra1;
741 		ocqe->cqe.big_cqe[1] = extra2;
742 	}
743 	list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
744 	return true;
745 }
746 
747 static void io_req_cqe_overflow(struct io_kiocb *req)
748 {
749 	io_cqring_event_overflow(req->ctx, req->cqe.user_data,
750 				req->cqe.res, req->cqe.flags,
751 				req->big_cqe.extra1, req->big_cqe.extra2);
752 	memset(&req->big_cqe, 0, sizeof(req->big_cqe));
753 }
754 
755 /*
756  * writes to the cq entry need to come after reading head; the
757  * control dependency is enough as we're using WRITE_ONCE to
758  * fill the cq entry
759  */
760 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow)
761 {
762 	struct io_rings *rings = ctx->rings;
763 	unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
764 	unsigned int free, queued, len;
765 
766 	/*
767 	 * Posting into the CQ when there are pending overflowed CQEs may break
768 	 * ordering guarantees, which will affect links, F_MORE users and more.
769 	 * Force overflow the completion.
770 	 */
771 	if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
772 		return false;
773 
774 	/* userspace may cheat modifying the tail, be safe and do min */
775 	queued = min(__io_cqring_events(ctx), ctx->cq_entries);
776 	free = ctx->cq_entries - queued;
777 	/* we need a contiguous range, limit based on the current array offset */
778 	len = min(free, ctx->cq_entries - off);
779 	if (!len)
780 		return false;
781 
782 	if (ctx->flags & IORING_SETUP_CQE32) {
783 		off <<= 1;
784 		len <<= 1;
785 	}
786 
787 	ctx->cqe_cached = &rings->cqes[off];
788 	ctx->cqe_sentinel = ctx->cqe_cached + len;
789 	return true;
790 }
791 
792 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
793 			      u32 cflags)
794 {
795 	struct io_uring_cqe *cqe;
796 
797 	ctx->cq_extra++;
798 
799 	/*
800 	 * If we can't get a cq entry, userspace overflowed the
801 	 * submission (by quite a lot). Increment the overflow count in
802 	 * the ring.
803 	 */
804 	if (likely(io_get_cqe(ctx, &cqe))) {
805 		trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
806 
807 		WRITE_ONCE(cqe->user_data, user_data);
808 		WRITE_ONCE(cqe->res, res);
809 		WRITE_ONCE(cqe->flags, cflags);
810 
811 		if (ctx->flags & IORING_SETUP_CQE32) {
812 			WRITE_ONCE(cqe->big_cqe[0], 0);
813 			WRITE_ONCE(cqe->big_cqe[1], 0);
814 		}
815 		return true;
816 	}
817 	return false;
818 }
819 
820 static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res,
821 			      u32 cflags)
822 {
823 	bool filled;
824 
825 	filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
826 	if (!filled)
827 		filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
828 
829 	return filled;
830 }
831 
832 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
833 {
834 	bool filled;
835 
836 	io_cq_lock(ctx);
837 	filled = __io_post_aux_cqe(ctx, user_data, res, cflags);
838 	io_cq_unlock_post(ctx);
839 	return filled;
840 }
841 
842 /*
843  * Must be called from inline task_work so we now a flush will happen later,
844  * and obviously with ctx->uring_lock held (tw always has that).
845  */
846 void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
847 {
848 	if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) {
849 		spin_lock(&ctx->completion_lock);
850 		io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
851 		spin_unlock(&ctx->completion_lock);
852 	}
853 	ctx->submit_state.cq_flush = true;
854 }
855 
856 /*
857  * A helper for multishot requests posting additional CQEs.
858  * Should only be used from a task_work including IO_URING_F_MULTISHOT.
859  */
860 bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
861 {
862 	struct io_ring_ctx *ctx = req->ctx;
863 	bool posted;
864 
865 	lockdep_assert(!io_wq_current_is_worker());
866 	lockdep_assert_held(&ctx->uring_lock);
867 
868 	__io_cq_lock(ctx);
869 	posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
870 	ctx->submit_state.cq_flush = true;
871 	__io_cq_unlock_post(ctx);
872 	return posted;
873 }
874 
875 static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
876 {
877 	struct io_ring_ctx *ctx = req->ctx;
878 
879 	/*
880 	 * All execution paths but io-wq use the deferred completions by
881 	 * passing IO_URING_F_COMPLETE_DEFER and thus should not end up here.
882 	 */
883 	if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ)))
884 		return;
885 
886 	/*
887 	 * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
888 	 * the submitter task context, IOPOLL protects with uring_lock.
889 	 */
890 	if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) {
891 		req->io_task_work.func = io_req_task_complete;
892 		io_req_task_work_add(req);
893 		return;
894 	}
895 
896 	io_cq_lock(ctx);
897 	if (!(req->flags & REQ_F_CQE_SKIP)) {
898 		if (!io_fill_cqe_req(ctx, req))
899 			io_req_cqe_overflow(req);
900 	}
901 	io_cq_unlock_post(ctx);
902 
903 	/*
904 	 * We don't free the request here because we know it's called from
905 	 * io-wq only, which holds a reference, so it cannot be the last put.
906 	 */
907 	req_ref_put(req);
908 }
909 
910 void io_req_defer_failed(struct io_kiocb *req, s32 res)
911 	__must_hold(&ctx->uring_lock)
912 {
913 	const struct io_cold_def *def = &io_cold_defs[req->opcode];
914 
915 	lockdep_assert_held(&req->ctx->uring_lock);
916 
917 	req_set_fail(req);
918 	io_req_set_res(req, res, io_put_kbuf(req, res, IO_URING_F_UNLOCKED));
919 	if (def->fail)
920 		def->fail(req);
921 	io_req_complete_defer(req);
922 }
923 
924 /*
925  * Don't initialise the fields below on every allocation, but do that in
926  * advance and keep them valid across allocations.
927  */
928 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
929 {
930 	req->ctx = ctx;
931 	req->link = NULL;
932 	req->async_data = NULL;
933 	/* not necessary, but safer to zero */
934 	memset(&req->cqe, 0, sizeof(req->cqe));
935 	memset(&req->big_cqe, 0, sizeof(req->big_cqe));
936 }
937 
938 /*
939  * A request might get retired back into the request caches even before opcode
940  * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
941  * Because of that, io_alloc_req() should be called only under ->uring_lock
942  * and with extra caution to not get a request that is still worked on.
943  */
944 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
945 	__must_hold(&ctx->uring_lock)
946 {
947 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
948 	void *reqs[IO_REQ_ALLOC_BATCH];
949 	int ret;
950 
951 	ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
952 
953 	/*
954 	 * Bulk alloc is all-or-nothing. If we fail to get a batch,
955 	 * retry single alloc to be on the safe side.
956 	 */
957 	if (unlikely(ret <= 0)) {
958 		reqs[0] = kmem_cache_alloc(req_cachep, gfp);
959 		if (!reqs[0])
960 			return false;
961 		ret = 1;
962 	}
963 
964 	percpu_ref_get_many(&ctx->refs, ret);
965 	while (ret--) {
966 		struct io_kiocb *req = reqs[ret];
967 
968 		io_preinit_req(req, ctx);
969 		io_req_add_to_cache(req, ctx);
970 	}
971 	return true;
972 }
973 
974 __cold void io_free_req(struct io_kiocb *req)
975 {
976 	/* refs were already put, restore them for io_req_task_complete() */
977 	req->flags &= ~REQ_F_REFCOUNT;
978 	/* we only want to free it, don't post CQEs */
979 	req->flags |= REQ_F_CQE_SKIP;
980 	req->io_task_work.func = io_req_task_complete;
981 	io_req_task_work_add(req);
982 }
983 
984 static void __io_req_find_next_prep(struct io_kiocb *req)
985 {
986 	struct io_ring_ctx *ctx = req->ctx;
987 
988 	spin_lock(&ctx->completion_lock);
989 	io_disarm_next(req);
990 	spin_unlock(&ctx->completion_lock);
991 }
992 
993 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
994 {
995 	struct io_kiocb *nxt;
996 
997 	/*
998 	 * If LINK is set, we have dependent requests in this chain. If we
999 	 * didn't fail this request, queue the first one up, moving any other
1000 	 * dependencies to the next request. In case of failure, fail the rest
1001 	 * of the chain.
1002 	 */
1003 	if (unlikely(req->flags & IO_DISARM_MASK))
1004 		__io_req_find_next_prep(req);
1005 	nxt = req->link;
1006 	req->link = NULL;
1007 	return nxt;
1008 }
1009 
1010 static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
1011 {
1012 	if (!ctx)
1013 		return;
1014 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1015 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1016 
1017 	io_submit_flush_completions(ctx);
1018 	mutex_unlock(&ctx->uring_lock);
1019 	percpu_ref_put(&ctx->refs);
1020 }
1021 
1022 /*
1023  * Run queued task_work, returning the number of entries processed in *count.
1024  * If more entries than max_entries are available, stop processing once this
1025  * is reached and return the rest of the list.
1026  */
1027 struct llist_node *io_handle_tw_list(struct llist_node *node,
1028 				     unsigned int *count,
1029 				     unsigned int max_entries)
1030 {
1031 	struct io_ring_ctx *ctx = NULL;
1032 	struct io_tw_state ts = { };
1033 
1034 	do {
1035 		struct llist_node *next = node->next;
1036 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1037 						    io_task_work.node);
1038 
1039 		if (req->ctx != ctx) {
1040 			ctx_flush_and_put(ctx, &ts);
1041 			ctx = req->ctx;
1042 			mutex_lock(&ctx->uring_lock);
1043 			percpu_ref_get(&ctx->refs);
1044 		}
1045 		INDIRECT_CALL_2(req->io_task_work.func,
1046 				io_poll_task_func, io_req_rw_complete,
1047 				req, &ts);
1048 		node = next;
1049 		(*count)++;
1050 		if (unlikely(need_resched())) {
1051 			ctx_flush_and_put(ctx, &ts);
1052 			ctx = NULL;
1053 			cond_resched();
1054 		}
1055 	} while (node && *count < max_entries);
1056 
1057 	ctx_flush_and_put(ctx, &ts);
1058 	return node;
1059 }
1060 
1061 /**
1062  * io_llist_xchg - swap all entries in a lock-less list
1063  * @head:	the head of lock-less list to delete all entries
1064  * @new:	new entry as the head of the list
1065  *
1066  * If list is empty, return NULL, otherwise, return the pointer to the first entry.
1067  * The order of entries returned is from the newest to the oldest added one.
1068  */
1069 static inline struct llist_node *io_llist_xchg(struct llist_head *head,
1070 					       struct llist_node *new)
1071 {
1072 	return xchg(&head->first, new);
1073 }
1074 
1075 static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
1076 {
1077 	struct llist_node *node = llist_del_all(&tctx->task_list);
1078 	struct io_ring_ctx *last_ctx = NULL;
1079 	struct io_kiocb *req;
1080 
1081 	while (node) {
1082 		req = container_of(node, struct io_kiocb, io_task_work.node);
1083 		node = node->next;
1084 		if (sync && last_ctx != req->ctx) {
1085 			if (last_ctx) {
1086 				flush_delayed_work(&last_ctx->fallback_work);
1087 				percpu_ref_put(&last_ctx->refs);
1088 			}
1089 			last_ctx = req->ctx;
1090 			percpu_ref_get(&last_ctx->refs);
1091 		}
1092 		if (llist_add(&req->io_task_work.node,
1093 			      &req->ctx->fallback_llist))
1094 			schedule_delayed_work(&req->ctx->fallback_work, 1);
1095 	}
1096 
1097 	if (last_ctx) {
1098 		flush_delayed_work(&last_ctx->fallback_work);
1099 		percpu_ref_put(&last_ctx->refs);
1100 	}
1101 }
1102 
1103 struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
1104 				      unsigned int max_entries,
1105 				      unsigned int *count)
1106 {
1107 	struct llist_node *node;
1108 
1109 	if (unlikely(current->flags & PF_EXITING)) {
1110 		io_fallback_tw(tctx, true);
1111 		return NULL;
1112 	}
1113 
1114 	node = llist_del_all(&tctx->task_list);
1115 	if (node) {
1116 		node = llist_reverse_order(node);
1117 		node = io_handle_tw_list(node, count, max_entries);
1118 	}
1119 
1120 	/* relaxed read is enough as only the task itself sets ->in_cancel */
1121 	if (unlikely(atomic_read(&tctx->in_cancel)))
1122 		io_uring_drop_tctx_refs(current);
1123 
1124 	trace_io_uring_task_work_run(tctx, *count);
1125 	return node;
1126 }
1127 
1128 void tctx_task_work(struct callback_head *cb)
1129 {
1130 	struct io_uring_task *tctx;
1131 	struct llist_node *ret;
1132 	unsigned int count = 0;
1133 
1134 	tctx = container_of(cb, struct io_uring_task, task_work);
1135 	ret = tctx_task_work_run(tctx, UINT_MAX, &count);
1136 	/* can't happen */
1137 	WARN_ON_ONCE(ret);
1138 }
1139 
1140 static inline void io_req_local_work_add(struct io_kiocb *req,
1141 					 struct io_ring_ctx *ctx,
1142 					 unsigned flags)
1143 {
1144 	unsigned nr_wait, nr_tw, nr_tw_prev;
1145 	struct llist_node *head;
1146 
1147 	/* See comment above IO_CQ_WAKE_INIT */
1148 	BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES);
1149 
1150 	/*
1151 	 * We don't know how many reuqests is there in the link and whether
1152 	 * they can even be queued lazily, fall back to non-lazy.
1153 	 */
1154 	if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
1155 		flags &= ~IOU_F_TWQ_LAZY_WAKE;
1156 
1157 	guard(rcu)();
1158 
1159 	head = READ_ONCE(ctx->work_llist.first);
1160 	do {
1161 		nr_tw_prev = 0;
1162 		if (head) {
1163 			struct io_kiocb *first_req = container_of(head,
1164 							struct io_kiocb,
1165 							io_task_work.node);
1166 			/*
1167 			 * Might be executed at any moment, rely on
1168 			 * SLAB_TYPESAFE_BY_RCU to keep it alive.
1169 			 */
1170 			nr_tw_prev = READ_ONCE(first_req->nr_tw);
1171 		}
1172 
1173 		/*
1174 		 * Theoretically, it can overflow, but that's fine as one of
1175 		 * previous adds should've tried to wake the task.
1176 		 */
1177 		nr_tw = nr_tw_prev + 1;
1178 		if (!(flags & IOU_F_TWQ_LAZY_WAKE))
1179 			nr_tw = IO_CQ_WAKE_FORCE;
1180 
1181 		req->nr_tw = nr_tw;
1182 		req->io_task_work.node.next = head;
1183 	} while (!try_cmpxchg(&ctx->work_llist.first, &head,
1184 			      &req->io_task_work.node));
1185 
1186 	/*
1187 	 * cmpxchg implies a full barrier, which pairs with the barrier
1188 	 * in set_current_state() on the io_cqring_wait() side. It's used
1189 	 * to ensure that either we see updated ->cq_wait_nr, or waiters
1190 	 * going to sleep will observe the work added to the list, which
1191 	 * is similar to the wait/wawke task state sync.
1192 	 */
1193 
1194 	if (!head) {
1195 		if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1196 			atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1197 		if (ctx->has_evfd)
1198 			io_eventfd_signal(ctx);
1199 	}
1200 
1201 	nr_wait = atomic_read(&ctx->cq_wait_nr);
1202 	/* not enough or no one is waiting */
1203 	if (nr_tw < nr_wait)
1204 		return;
1205 	/* the previous add has already woken it up */
1206 	if (nr_tw_prev >= nr_wait)
1207 		return;
1208 	wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
1209 }
1210 
1211 static void io_req_normal_work_add(struct io_kiocb *req)
1212 {
1213 	struct io_uring_task *tctx = req->task->io_uring;
1214 	struct io_ring_ctx *ctx = req->ctx;
1215 
1216 	/* task_work already pending, we're done */
1217 	if (!llist_add(&req->io_task_work.node, &tctx->task_list))
1218 		return;
1219 
1220 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1221 		atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1222 
1223 	/* SQPOLL doesn't need the task_work added, it'll run it itself */
1224 	if (ctx->flags & IORING_SETUP_SQPOLL) {
1225 		struct io_sq_data *sqd = ctx->sq_data;
1226 
1227 		if (sqd->thread)
1228 			__set_notify_signal(sqd->thread);
1229 		return;
1230 	}
1231 
1232 	if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
1233 		return;
1234 
1235 	io_fallback_tw(tctx, false);
1236 }
1237 
1238 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
1239 {
1240 	if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
1241 		io_req_local_work_add(req, req->ctx, flags);
1242 	else
1243 		io_req_normal_work_add(req);
1244 }
1245 
1246 void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
1247 				 unsigned flags)
1248 {
1249 	if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
1250 		return;
1251 	io_req_local_work_add(req, ctx, flags);
1252 }
1253 
1254 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
1255 {
1256 	struct llist_node *node;
1257 
1258 	node = llist_del_all(&ctx->work_llist);
1259 	while (node) {
1260 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1261 						    io_task_work.node);
1262 
1263 		node = node->next;
1264 		io_req_normal_work_add(req);
1265 	}
1266 }
1267 
1268 static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
1269 				       int min_events)
1270 {
1271 	if (llist_empty(&ctx->work_llist))
1272 		return false;
1273 	if (events < min_events)
1274 		return true;
1275 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1276 		atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1277 	return false;
1278 }
1279 
1280 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
1281 			       int min_events)
1282 {
1283 	struct llist_node *node;
1284 	unsigned int loops = 0;
1285 	int ret = 0;
1286 
1287 	if (WARN_ON_ONCE(ctx->submitter_task != current))
1288 		return -EEXIST;
1289 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1290 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1291 again:
1292 	/*
1293 	 * llists are in reverse order, flip it back the right way before
1294 	 * running the pending items.
1295 	 */
1296 	node = llist_reverse_order(io_llist_xchg(&ctx->work_llist, NULL));
1297 	while (node) {
1298 		struct llist_node *next = node->next;
1299 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1300 						    io_task_work.node);
1301 		INDIRECT_CALL_2(req->io_task_work.func,
1302 				io_poll_task_func, io_req_rw_complete,
1303 				req, ts);
1304 		ret++;
1305 		node = next;
1306 	}
1307 	loops++;
1308 
1309 	if (io_run_local_work_continue(ctx, ret, min_events))
1310 		goto again;
1311 	io_submit_flush_completions(ctx);
1312 	if (io_run_local_work_continue(ctx, ret, min_events))
1313 		goto again;
1314 
1315 	trace_io_uring_local_work_run(ctx, ret, loops);
1316 	return ret;
1317 }
1318 
1319 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
1320 					   int min_events)
1321 {
1322 	struct io_tw_state ts = {};
1323 
1324 	if (llist_empty(&ctx->work_llist))
1325 		return 0;
1326 	return __io_run_local_work(ctx, &ts, min_events);
1327 }
1328 
1329 static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
1330 {
1331 	struct io_tw_state ts = {};
1332 	int ret;
1333 
1334 	mutex_lock(&ctx->uring_lock);
1335 	ret = __io_run_local_work(ctx, &ts, min_events);
1336 	mutex_unlock(&ctx->uring_lock);
1337 	return ret;
1338 }
1339 
1340 static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
1341 {
1342 	io_tw_lock(req->ctx, ts);
1343 	io_req_defer_failed(req, req->cqe.res);
1344 }
1345 
1346 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
1347 {
1348 	io_tw_lock(req->ctx, ts);
1349 	/* req->task == current here, checking PF_EXITING is safe */
1350 	if (unlikely(req->task->flags & PF_EXITING))
1351 		io_req_defer_failed(req, -EFAULT);
1352 	else if (req->flags & REQ_F_FORCE_ASYNC)
1353 		io_queue_iowq(req);
1354 	else
1355 		io_queue_sqe(req);
1356 }
1357 
1358 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1359 {
1360 	io_req_set_res(req, ret, 0);
1361 	req->io_task_work.func = io_req_task_cancel;
1362 	io_req_task_work_add(req);
1363 }
1364 
1365 void io_req_task_queue(struct io_kiocb *req)
1366 {
1367 	req->io_task_work.func = io_req_task_submit;
1368 	io_req_task_work_add(req);
1369 }
1370 
1371 void io_queue_next(struct io_kiocb *req)
1372 {
1373 	struct io_kiocb *nxt = io_req_find_next(req);
1374 
1375 	if (nxt)
1376 		io_req_task_queue(nxt);
1377 }
1378 
1379 static void io_free_batch_list(struct io_ring_ctx *ctx,
1380 			       struct io_wq_work_node *node)
1381 	__must_hold(&ctx->uring_lock)
1382 {
1383 	do {
1384 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1385 						    comp_list);
1386 
1387 		if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1388 			if (req->flags & REQ_F_REFCOUNT) {
1389 				node = req->comp_list.next;
1390 				if (!req_ref_put_and_test(req))
1391 					continue;
1392 			}
1393 			if ((req->flags & REQ_F_POLLED) && req->apoll) {
1394 				struct async_poll *apoll = req->apoll;
1395 
1396 				if (apoll->double_poll)
1397 					kfree(apoll->double_poll);
1398 				if (!io_alloc_cache_put(&ctx->apoll_cache, apoll))
1399 					kfree(apoll);
1400 				req->flags &= ~REQ_F_POLLED;
1401 			}
1402 			if (req->flags & IO_REQ_LINK_FLAGS)
1403 				io_queue_next(req);
1404 			if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1405 				io_clean_op(req);
1406 		}
1407 		io_put_file(req);
1408 		io_put_rsrc_node(ctx, req->rsrc_node);
1409 		io_put_task(req->task);
1410 
1411 		node = req->comp_list.next;
1412 		io_req_add_to_cache(req, ctx);
1413 	} while (node);
1414 }
1415 
1416 void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1417 	__must_hold(&ctx->uring_lock)
1418 {
1419 	struct io_submit_state *state = &ctx->submit_state;
1420 	struct io_wq_work_node *node;
1421 
1422 	__io_cq_lock(ctx);
1423 	__wq_list_for_each(node, &state->compl_reqs) {
1424 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1425 					    comp_list);
1426 
1427 		if (!(req->flags & REQ_F_CQE_SKIP) &&
1428 		    unlikely(!io_fill_cqe_req(ctx, req))) {
1429 			if (ctx->lockless_cq) {
1430 				spin_lock(&ctx->completion_lock);
1431 				io_req_cqe_overflow(req);
1432 				spin_unlock(&ctx->completion_lock);
1433 			} else {
1434 				io_req_cqe_overflow(req);
1435 			}
1436 		}
1437 	}
1438 	__io_cq_unlock_post(ctx);
1439 
1440 	if (!wq_list_empty(&state->compl_reqs)) {
1441 		io_free_batch_list(ctx, state->compl_reqs.first);
1442 		INIT_WQ_LIST(&state->compl_reqs);
1443 	}
1444 	ctx->submit_state.cq_flush = false;
1445 }
1446 
1447 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
1448 {
1449 	/* See comment at the top of this file */
1450 	smp_rmb();
1451 	return __io_cqring_events(ctx);
1452 }
1453 
1454 /*
1455  * We can't just wait for polled events to come to us, we have to actively
1456  * find and complete them.
1457  */
1458 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
1459 {
1460 	if (!(ctx->flags & IORING_SETUP_IOPOLL))
1461 		return;
1462 
1463 	mutex_lock(&ctx->uring_lock);
1464 	while (!wq_list_empty(&ctx->iopoll_list)) {
1465 		/* let it sleep and repeat later if can't complete a request */
1466 		if (io_do_iopoll(ctx, true) == 0)
1467 			break;
1468 		/*
1469 		 * Ensure we allow local-to-the-cpu processing to take place,
1470 		 * in this case we need to ensure that we reap all events.
1471 		 * Also let task_work, etc. to progress by releasing the mutex
1472 		 */
1473 		if (need_resched()) {
1474 			mutex_unlock(&ctx->uring_lock);
1475 			cond_resched();
1476 			mutex_lock(&ctx->uring_lock);
1477 		}
1478 	}
1479 	mutex_unlock(&ctx->uring_lock);
1480 }
1481 
1482 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
1483 {
1484 	unsigned int nr_events = 0;
1485 	unsigned long check_cq;
1486 
1487 	lockdep_assert_held(&ctx->uring_lock);
1488 
1489 	if (!io_allowed_run_tw(ctx))
1490 		return -EEXIST;
1491 
1492 	check_cq = READ_ONCE(ctx->check_cq);
1493 	if (unlikely(check_cq)) {
1494 		if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
1495 			__io_cqring_overflow_flush(ctx, false);
1496 		/*
1497 		 * Similarly do not spin if we have not informed the user of any
1498 		 * dropped CQE.
1499 		 */
1500 		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
1501 			return -EBADR;
1502 	}
1503 	/*
1504 	 * Don't enter poll loop if we already have events pending.
1505 	 * If we do, we can potentially be spinning for commands that
1506 	 * already triggered a CQE (eg in error).
1507 	 */
1508 	if (io_cqring_events(ctx))
1509 		return 0;
1510 
1511 	do {
1512 		int ret = 0;
1513 
1514 		/*
1515 		 * If a submit got punted to a workqueue, we can have the
1516 		 * application entering polling for a command before it gets
1517 		 * issued. That app will hold the uring_lock for the duration
1518 		 * of the poll right here, so we need to take a breather every
1519 		 * now and then to ensure that the issue has a chance to add
1520 		 * the poll to the issued list. Otherwise we can spin here
1521 		 * forever, while the workqueue is stuck trying to acquire the
1522 		 * very same mutex.
1523 		 */
1524 		if (wq_list_empty(&ctx->iopoll_list) ||
1525 		    io_task_work_pending(ctx)) {
1526 			u32 tail = ctx->cached_cq_tail;
1527 
1528 			(void) io_run_local_work_locked(ctx, min);
1529 
1530 			if (task_work_pending(current) ||
1531 			    wq_list_empty(&ctx->iopoll_list)) {
1532 				mutex_unlock(&ctx->uring_lock);
1533 				io_run_task_work();
1534 				mutex_lock(&ctx->uring_lock);
1535 			}
1536 			/* some requests don't go through iopoll_list */
1537 			if (tail != ctx->cached_cq_tail ||
1538 			    wq_list_empty(&ctx->iopoll_list))
1539 				break;
1540 		}
1541 		ret = io_do_iopoll(ctx, !min);
1542 		if (unlikely(ret < 0))
1543 			return ret;
1544 
1545 		if (task_sigpending(current))
1546 			return -EINTR;
1547 		if (need_resched())
1548 			break;
1549 
1550 		nr_events += ret;
1551 	} while (nr_events < min);
1552 
1553 	return 0;
1554 }
1555 
1556 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
1557 {
1558 	io_req_complete_defer(req);
1559 }
1560 
1561 /*
1562  * After the iocb has been issued, it's safe to be found on the poll list.
1563  * Adding the kiocb to the list AFTER submission ensures that we don't
1564  * find it from a io_do_iopoll() thread before the issuer is done
1565  * accessing the kiocb cookie.
1566  */
1567 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1568 {
1569 	struct io_ring_ctx *ctx = req->ctx;
1570 	const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
1571 
1572 	/* workqueue context doesn't hold uring_lock, grab it now */
1573 	if (unlikely(needs_lock))
1574 		mutex_lock(&ctx->uring_lock);
1575 
1576 	/*
1577 	 * Track whether we have multiple files in our lists. This will impact
1578 	 * how we do polling eventually, not spinning if we're on potentially
1579 	 * different devices.
1580 	 */
1581 	if (wq_list_empty(&ctx->iopoll_list)) {
1582 		ctx->poll_multi_queue = false;
1583 	} else if (!ctx->poll_multi_queue) {
1584 		struct io_kiocb *list_req;
1585 
1586 		list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
1587 					comp_list);
1588 		if (list_req->file != req->file)
1589 			ctx->poll_multi_queue = true;
1590 	}
1591 
1592 	/*
1593 	 * For fast devices, IO may have already completed. If it has, add
1594 	 * it to the front so we find it first.
1595 	 */
1596 	if (READ_ONCE(req->iopoll_completed))
1597 		wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
1598 	else
1599 		wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
1600 
1601 	if (unlikely(needs_lock)) {
1602 		/*
1603 		 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
1604 		 * in sq thread task context or in io worker task context. If
1605 		 * current task context is sq thread, we don't need to check
1606 		 * whether should wake up sq thread.
1607 		 */
1608 		if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1609 		    wq_has_sleeper(&ctx->sq_data->wait))
1610 			wake_up(&ctx->sq_data->wait);
1611 
1612 		mutex_unlock(&ctx->uring_lock);
1613 	}
1614 }
1615 
1616 io_req_flags_t io_file_get_flags(struct file *file)
1617 {
1618 	io_req_flags_t res = 0;
1619 
1620 	if (S_ISREG(file_inode(file)->i_mode))
1621 		res |= REQ_F_ISREG;
1622 	if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT))
1623 		res |= REQ_F_SUPPORT_NOWAIT;
1624 	return res;
1625 }
1626 
1627 bool io_alloc_async_data(struct io_kiocb *req)
1628 {
1629 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1630 
1631 	WARN_ON_ONCE(!def->async_size);
1632 	req->async_data = kmalloc(def->async_size, GFP_KERNEL);
1633 	if (req->async_data) {
1634 		req->flags |= REQ_F_ASYNC_DATA;
1635 		return false;
1636 	}
1637 	return true;
1638 }
1639 
1640 static u32 io_get_sequence(struct io_kiocb *req)
1641 {
1642 	u32 seq = req->ctx->cached_sq_head;
1643 	struct io_kiocb *cur;
1644 
1645 	/* need original cached_sq_head, but it was increased for each req */
1646 	io_for_each_link(cur, req)
1647 		seq--;
1648 	return seq;
1649 }
1650 
1651 static __cold void io_drain_req(struct io_kiocb *req)
1652 	__must_hold(&ctx->uring_lock)
1653 {
1654 	struct io_ring_ctx *ctx = req->ctx;
1655 	struct io_defer_entry *de;
1656 	int ret;
1657 	u32 seq = io_get_sequence(req);
1658 
1659 	/* Still need defer if there is pending req in defer list. */
1660 	spin_lock(&ctx->completion_lock);
1661 	if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
1662 		spin_unlock(&ctx->completion_lock);
1663 queue:
1664 		ctx->drain_active = false;
1665 		io_req_task_queue(req);
1666 		return;
1667 	}
1668 	spin_unlock(&ctx->completion_lock);
1669 
1670 	io_prep_async_link(req);
1671 	de = kmalloc(sizeof(*de), GFP_KERNEL);
1672 	if (!de) {
1673 		ret = -ENOMEM;
1674 		io_req_defer_failed(req, ret);
1675 		return;
1676 	}
1677 
1678 	spin_lock(&ctx->completion_lock);
1679 	if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
1680 		spin_unlock(&ctx->completion_lock);
1681 		kfree(de);
1682 		goto queue;
1683 	}
1684 
1685 	trace_io_uring_defer(req);
1686 	de->req = req;
1687 	de->seq = seq;
1688 	list_add_tail(&de->list, &ctx->defer_list);
1689 	spin_unlock(&ctx->completion_lock);
1690 }
1691 
1692 static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
1693 			   unsigned int issue_flags)
1694 {
1695 	if (req->file || !def->needs_file)
1696 		return true;
1697 
1698 	if (req->flags & REQ_F_FIXED_FILE)
1699 		req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
1700 	else
1701 		req->file = io_file_get_normal(req, req->cqe.fd);
1702 
1703 	return !!req->file;
1704 }
1705 
1706 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1707 {
1708 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1709 	const struct cred *creds = NULL;
1710 	int ret;
1711 
1712 	if (unlikely(!io_assign_file(req, def, issue_flags)))
1713 		return -EBADF;
1714 
1715 	if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
1716 		creds = override_creds(req->creds);
1717 
1718 	if (!def->audit_skip)
1719 		audit_uring_entry(req->opcode);
1720 
1721 	ret = def->issue(req, issue_flags);
1722 
1723 	if (!def->audit_skip)
1724 		audit_uring_exit(!ret, ret);
1725 
1726 	if (creds)
1727 		revert_creds(creds);
1728 
1729 	if (ret == IOU_OK) {
1730 		if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1731 			io_req_complete_defer(req);
1732 		else
1733 			io_req_complete_post(req, issue_flags);
1734 
1735 		return 0;
1736 	}
1737 
1738 	if (ret == IOU_ISSUE_SKIP_COMPLETE) {
1739 		ret = 0;
1740 		io_arm_ltimeout(req);
1741 
1742 		/* If the op doesn't have a file, we're not polling for it */
1743 		if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
1744 			io_iopoll_req_issued(req, issue_flags);
1745 	}
1746 	return ret;
1747 }
1748 
1749 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts)
1750 {
1751 	io_tw_lock(req->ctx, ts);
1752 	return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
1753 				 IO_URING_F_COMPLETE_DEFER);
1754 }
1755 
1756 struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
1757 {
1758 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1759 	struct io_kiocb *nxt = NULL;
1760 
1761 	if (req_ref_put_and_test(req)) {
1762 		if (req->flags & IO_REQ_LINK_FLAGS)
1763 			nxt = io_req_find_next(req);
1764 		io_free_req(req);
1765 	}
1766 	return nxt ? &nxt->work : NULL;
1767 }
1768 
1769 void io_wq_submit_work(struct io_wq_work *work)
1770 {
1771 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1772 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1773 	unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ;
1774 	bool needs_poll = false;
1775 	int ret = 0, err = -ECANCELED;
1776 
1777 	/* one will be dropped by ->io_wq_free_work() after returning to io-wq */
1778 	if (!(req->flags & REQ_F_REFCOUNT))
1779 		__io_req_set_refcount(req, 2);
1780 	else
1781 		req_ref_get(req);
1782 
1783 	io_arm_ltimeout(req);
1784 
1785 	/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
1786 	if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
1787 fail:
1788 		io_req_task_queue_fail(req, err);
1789 		return;
1790 	}
1791 	if (!io_assign_file(req, def, issue_flags)) {
1792 		err = -EBADF;
1793 		atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
1794 		goto fail;
1795 	}
1796 
1797 	/*
1798 	 * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the
1799 	 * submitter task context. Final request completions are handed to the
1800 	 * right context, however this is not the case of auxiliary CQEs,
1801 	 * which is the main mean of operation for multishot requests.
1802 	 * Don't allow any multishot execution from io-wq. It's more restrictive
1803 	 * than necessary and also cleaner.
1804 	 */
1805 	if (req->flags & REQ_F_APOLL_MULTISHOT) {
1806 		err = -EBADFD;
1807 		if (!io_file_can_poll(req))
1808 			goto fail;
1809 		if (req->file->f_flags & O_NONBLOCK ||
1810 		    req->file->f_mode & FMODE_NOWAIT) {
1811 			err = -ECANCELED;
1812 			if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
1813 				goto fail;
1814 			return;
1815 		} else {
1816 			req->flags &= ~REQ_F_APOLL_MULTISHOT;
1817 		}
1818 	}
1819 
1820 	if (req->flags & REQ_F_FORCE_ASYNC) {
1821 		bool opcode_poll = def->pollin || def->pollout;
1822 
1823 		if (opcode_poll && io_file_can_poll(req)) {
1824 			needs_poll = true;
1825 			issue_flags |= IO_URING_F_NONBLOCK;
1826 		}
1827 	}
1828 
1829 	do {
1830 		ret = io_issue_sqe(req, issue_flags);
1831 		if (ret != -EAGAIN)
1832 			break;
1833 
1834 		/*
1835 		 * If REQ_F_NOWAIT is set, then don't wait or retry with
1836 		 * poll. -EAGAIN is final for that case.
1837 		 */
1838 		if (req->flags & REQ_F_NOWAIT)
1839 			break;
1840 
1841 		/*
1842 		 * We can get EAGAIN for iopolled IO even though we're
1843 		 * forcing a sync submission from here, since we can't
1844 		 * wait for request slots on the block side.
1845 		 */
1846 		if (!needs_poll) {
1847 			if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
1848 				break;
1849 			if (io_wq_worker_stopped())
1850 				break;
1851 			cond_resched();
1852 			continue;
1853 		}
1854 
1855 		if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
1856 			return;
1857 		/* aborted or ready, in either case retry blocking */
1858 		needs_poll = false;
1859 		issue_flags &= ~IO_URING_F_NONBLOCK;
1860 	} while (1);
1861 
1862 	/* avoid locking problems by failing it from a clean context */
1863 	if (ret)
1864 		io_req_task_queue_fail(req, ret);
1865 }
1866 
1867 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1868 				      unsigned int issue_flags)
1869 {
1870 	struct io_ring_ctx *ctx = req->ctx;
1871 	struct io_fixed_file *slot;
1872 	struct file *file = NULL;
1873 
1874 	io_ring_submit_lock(ctx, issue_flags);
1875 
1876 	if (unlikely((unsigned int)fd >= ctx->nr_user_files))
1877 		goto out;
1878 	fd = array_index_nospec(fd, ctx->nr_user_files);
1879 	slot = io_fixed_file_slot(&ctx->file_table, fd);
1880 	if (!req->rsrc_node)
1881 		__io_req_set_rsrc_node(req, ctx);
1882 	req->flags |= io_slot_flags(slot);
1883 	file = io_slot_file(slot);
1884 out:
1885 	io_ring_submit_unlock(ctx, issue_flags);
1886 	return file;
1887 }
1888 
1889 struct file *io_file_get_normal(struct io_kiocb *req, int fd)
1890 {
1891 	struct file *file = fget(fd);
1892 
1893 	trace_io_uring_file_get(req, fd);
1894 
1895 	/* we don't allow fixed io_uring files */
1896 	if (file && io_is_uring_fops(file))
1897 		io_req_track_inflight(req);
1898 	return file;
1899 }
1900 
1901 static void io_queue_async(struct io_kiocb *req, int ret)
1902 	__must_hold(&req->ctx->uring_lock)
1903 {
1904 	struct io_kiocb *linked_timeout;
1905 
1906 	if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
1907 		io_req_defer_failed(req, ret);
1908 		return;
1909 	}
1910 
1911 	linked_timeout = io_prep_linked_timeout(req);
1912 
1913 	switch (io_arm_poll_handler(req, 0)) {
1914 	case IO_APOLL_READY:
1915 		io_kbuf_recycle(req, 0);
1916 		io_req_task_queue(req);
1917 		break;
1918 	case IO_APOLL_ABORTED:
1919 		io_kbuf_recycle(req, 0);
1920 		io_queue_iowq(req);
1921 		break;
1922 	case IO_APOLL_OK:
1923 		break;
1924 	}
1925 
1926 	if (linked_timeout)
1927 		io_queue_linked_timeout(linked_timeout);
1928 }
1929 
1930 static inline void io_queue_sqe(struct io_kiocb *req)
1931 	__must_hold(&req->ctx->uring_lock)
1932 {
1933 	int ret;
1934 
1935 	ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
1936 
1937 	/*
1938 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
1939 	 * doesn't support non-blocking read/write attempts
1940 	 */
1941 	if (unlikely(ret))
1942 		io_queue_async(req, ret);
1943 }
1944 
1945 static void io_queue_sqe_fallback(struct io_kiocb *req)
1946 	__must_hold(&req->ctx->uring_lock)
1947 {
1948 	if (unlikely(req->flags & REQ_F_FAIL)) {
1949 		/*
1950 		 * We don't submit, fail them all, for that replace hardlinks
1951 		 * with normal links. Extra REQ_F_LINK is tolerated.
1952 		 */
1953 		req->flags &= ~REQ_F_HARDLINK;
1954 		req->flags |= REQ_F_LINK;
1955 		io_req_defer_failed(req, req->cqe.res);
1956 	} else {
1957 		if (unlikely(req->ctx->drain_active))
1958 			io_drain_req(req);
1959 		else
1960 			io_queue_iowq(req);
1961 	}
1962 }
1963 
1964 /*
1965  * Check SQE restrictions (opcode and flags).
1966  *
1967  * Returns 'true' if SQE is allowed, 'false' otherwise.
1968  */
1969 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
1970 					struct io_kiocb *req,
1971 					unsigned int sqe_flags)
1972 {
1973 	if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
1974 		return false;
1975 
1976 	if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
1977 	    ctx->restrictions.sqe_flags_required)
1978 		return false;
1979 
1980 	if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
1981 			  ctx->restrictions.sqe_flags_required))
1982 		return false;
1983 
1984 	return true;
1985 }
1986 
1987 static void io_init_req_drain(struct io_kiocb *req)
1988 {
1989 	struct io_ring_ctx *ctx = req->ctx;
1990 	struct io_kiocb *head = ctx->submit_state.link.head;
1991 
1992 	ctx->drain_active = true;
1993 	if (head) {
1994 		/*
1995 		 * If we need to drain a request in the middle of a link, drain
1996 		 * the head request and the next request/link after the current
1997 		 * link. Considering sequential execution of links,
1998 		 * REQ_F_IO_DRAIN will be maintained for every request of our
1999 		 * link.
2000 		 */
2001 		head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2002 		ctx->drain_next = true;
2003 	}
2004 }
2005 
2006 static __cold int io_init_fail_req(struct io_kiocb *req, int err)
2007 {
2008 	/* ensure per-opcode data is cleared if we fail before prep */
2009 	memset(&req->cmd.data, 0, sizeof(req->cmd.data));
2010 	return err;
2011 }
2012 
2013 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2014 		       const struct io_uring_sqe *sqe)
2015 	__must_hold(&ctx->uring_lock)
2016 {
2017 	const struct io_issue_def *def;
2018 	unsigned int sqe_flags;
2019 	int personality;
2020 	u8 opcode;
2021 
2022 	/* req is partially pre-initialised, see io_preinit_req() */
2023 	req->opcode = opcode = READ_ONCE(sqe->opcode);
2024 	/* same numerical values with corresponding REQ_F_*, safe to copy */
2025 	sqe_flags = READ_ONCE(sqe->flags);
2026 	req->flags = (io_req_flags_t) sqe_flags;
2027 	req->cqe.user_data = READ_ONCE(sqe->user_data);
2028 	req->file = NULL;
2029 	req->rsrc_node = NULL;
2030 	req->task = current;
2031 	req->cancel_seq_set = false;
2032 
2033 	if (unlikely(opcode >= IORING_OP_LAST)) {
2034 		req->opcode = 0;
2035 		return io_init_fail_req(req, -EINVAL);
2036 	}
2037 	def = &io_issue_defs[opcode];
2038 	if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
2039 		/* enforce forwards compatibility on users */
2040 		if (sqe_flags & ~SQE_VALID_FLAGS)
2041 			return io_init_fail_req(req, -EINVAL);
2042 		if (sqe_flags & IOSQE_BUFFER_SELECT) {
2043 			if (!def->buffer_select)
2044 				return io_init_fail_req(req, -EOPNOTSUPP);
2045 			req->buf_index = READ_ONCE(sqe->buf_group);
2046 		}
2047 		if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
2048 			ctx->drain_disabled = true;
2049 		if (sqe_flags & IOSQE_IO_DRAIN) {
2050 			if (ctx->drain_disabled)
2051 				return io_init_fail_req(req, -EOPNOTSUPP);
2052 			io_init_req_drain(req);
2053 		}
2054 	}
2055 	if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
2056 		if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
2057 			return io_init_fail_req(req, -EACCES);
2058 		/* knock it to the slow queue path, will be drained there */
2059 		if (ctx->drain_active)
2060 			req->flags |= REQ_F_FORCE_ASYNC;
2061 		/* if there is no link, we're at "next" request and need to drain */
2062 		if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
2063 			ctx->drain_next = false;
2064 			ctx->drain_active = true;
2065 			req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2066 		}
2067 	}
2068 
2069 	if (!def->ioprio && sqe->ioprio)
2070 		return io_init_fail_req(req, -EINVAL);
2071 	if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
2072 		return io_init_fail_req(req, -EINVAL);
2073 
2074 	if (def->needs_file) {
2075 		struct io_submit_state *state = &ctx->submit_state;
2076 
2077 		req->cqe.fd = READ_ONCE(sqe->fd);
2078 
2079 		/*
2080 		 * Plug now if we have more than 2 IO left after this, and the
2081 		 * target is potentially a read/write to block based storage.
2082 		 */
2083 		if (state->need_plug && def->plug) {
2084 			state->plug_started = true;
2085 			state->need_plug = false;
2086 			blk_start_plug_nr_ios(&state->plug, state->submit_nr);
2087 		}
2088 	}
2089 
2090 	personality = READ_ONCE(sqe->personality);
2091 	if (personality) {
2092 		int ret;
2093 
2094 		req->creds = xa_load(&ctx->personalities, personality);
2095 		if (!req->creds)
2096 			return io_init_fail_req(req, -EINVAL);
2097 		get_cred(req->creds);
2098 		ret = security_uring_override_creds(req->creds);
2099 		if (ret) {
2100 			put_cred(req->creds);
2101 			return io_init_fail_req(req, ret);
2102 		}
2103 		req->flags |= REQ_F_CREDS;
2104 	}
2105 
2106 	return def->prep(req, sqe);
2107 }
2108 
2109 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
2110 				      struct io_kiocb *req, int ret)
2111 {
2112 	struct io_ring_ctx *ctx = req->ctx;
2113 	struct io_submit_link *link = &ctx->submit_state.link;
2114 	struct io_kiocb *head = link->head;
2115 
2116 	trace_io_uring_req_failed(sqe, req, ret);
2117 
2118 	/*
2119 	 * Avoid breaking links in the middle as it renders links with SQPOLL
2120 	 * unusable. Instead of failing eagerly, continue assembling the link if
2121 	 * applicable and mark the head with REQ_F_FAIL. The link flushing code
2122 	 * should find the flag and handle the rest.
2123 	 */
2124 	req_fail_link_node(req, ret);
2125 	if (head && !(head->flags & REQ_F_FAIL))
2126 		req_fail_link_node(head, -ECANCELED);
2127 
2128 	if (!(req->flags & IO_REQ_LINK_FLAGS)) {
2129 		if (head) {
2130 			link->last->link = req;
2131 			link->head = NULL;
2132 			req = head;
2133 		}
2134 		io_queue_sqe_fallback(req);
2135 		return ret;
2136 	}
2137 
2138 	if (head)
2139 		link->last->link = req;
2140 	else
2141 		link->head = req;
2142 	link->last = req;
2143 	return 0;
2144 }
2145 
2146 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2147 			 const struct io_uring_sqe *sqe)
2148 	__must_hold(&ctx->uring_lock)
2149 {
2150 	struct io_submit_link *link = &ctx->submit_state.link;
2151 	int ret;
2152 
2153 	ret = io_init_req(ctx, req, sqe);
2154 	if (unlikely(ret))
2155 		return io_submit_fail_init(sqe, req, ret);
2156 
2157 	trace_io_uring_submit_req(req);
2158 
2159 	/*
2160 	 * If we already have a head request, queue this one for async
2161 	 * submittal once the head completes. If we don't have a head but
2162 	 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2163 	 * submitted sync once the chain is complete. If none of those
2164 	 * conditions are true (normal request), then just queue it.
2165 	 */
2166 	if (unlikely(link->head)) {
2167 		trace_io_uring_link(req, link->head);
2168 		link->last->link = req;
2169 		link->last = req;
2170 
2171 		if (req->flags & IO_REQ_LINK_FLAGS)
2172 			return 0;
2173 		/* last request of the link, flush it */
2174 		req = link->head;
2175 		link->head = NULL;
2176 		if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
2177 			goto fallback;
2178 
2179 	} else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
2180 					  REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
2181 		if (req->flags & IO_REQ_LINK_FLAGS) {
2182 			link->head = req;
2183 			link->last = req;
2184 		} else {
2185 fallback:
2186 			io_queue_sqe_fallback(req);
2187 		}
2188 		return 0;
2189 	}
2190 
2191 	io_queue_sqe(req);
2192 	return 0;
2193 }
2194 
2195 /*
2196  * Batched submission is done, ensure local IO is flushed out.
2197  */
2198 static void io_submit_state_end(struct io_ring_ctx *ctx)
2199 {
2200 	struct io_submit_state *state = &ctx->submit_state;
2201 
2202 	if (unlikely(state->link.head))
2203 		io_queue_sqe_fallback(state->link.head);
2204 	/* flush only after queuing links as they can generate completions */
2205 	io_submit_flush_completions(ctx);
2206 	if (state->plug_started)
2207 		blk_finish_plug(&state->plug);
2208 }
2209 
2210 /*
2211  * Start submission side cache.
2212  */
2213 static void io_submit_state_start(struct io_submit_state *state,
2214 				  unsigned int max_ios)
2215 {
2216 	state->plug_started = false;
2217 	state->need_plug = max_ios > 2;
2218 	state->submit_nr = max_ios;
2219 	/* set only head, no need to init link_last in advance */
2220 	state->link.head = NULL;
2221 }
2222 
2223 static void io_commit_sqring(struct io_ring_ctx *ctx)
2224 {
2225 	struct io_rings *rings = ctx->rings;
2226 
2227 	/*
2228 	 * Ensure any loads from the SQEs are done at this point,
2229 	 * since once we write the new head, the application could
2230 	 * write new data to them.
2231 	 */
2232 	smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2233 }
2234 
2235 /*
2236  * Fetch an sqe, if one is available. Note this returns a pointer to memory
2237  * that is mapped by userspace. This means that care needs to be taken to
2238  * ensure that reads are stable, as we cannot rely on userspace always
2239  * being a good citizen. If members of the sqe are validated and then later
2240  * used, it's important that those reads are done through READ_ONCE() to
2241  * prevent a re-load down the line.
2242  */
2243 static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
2244 {
2245 	unsigned mask = ctx->sq_entries - 1;
2246 	unsigned head = ctx->cached_sq_head++ & mask;
2247 
2248 	if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) {
2249 		head = READ_ONCE(ctx->sq_array[head]);
2250 		if (unlikely(head >= ctx->sq_entries)) {
2251 			/* drop invalid entries */
2252 			spin_lock(&ctx->completion_lock);
2253 			ctx->cq_extra--;
2254 			spin_unlock(&ctx->completion_lock);
2255 			WRITE_ONCE(ctx->rings->sq_dropped,
2256 				   READ_ONCE(ctx->rings->sq_dropped) + 1);
2257 			return false;
2258 		}
2259 	}
2260 
2261 	/*
2262 	 * The cached sq head (or cq tail) serves two purposes:
2263 	 *
2264 	 * 1) allows us to batch the cost of updating the user visible
2265 	 *    head updates.
2266 	 * 2) allows the kernel side to track the head on its own, even
2267 	 *    though the application is the one updating it.
2268 	 */
2269 
2270 	/* double index for 128-byte SQEs, twice as long */
2271 	if (ctx->flags & IORING_SETUP_SQE128)
2272 		head <<= 1;
2273 	*sqe = &ctx->sq_sqes[head];
2274 	return true;
2275 }
2276 
2277 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2278 	__must_hold(&ctx->uring_lock)
2279 {
2280 	unsigned int entries = io_sqring_entries(ctx);
2281 	unsigned int left;
2282 	int ret;
2283 
2284 	if (unlikely(!entries))
2285 		return 0;
2286 	/* make sure SQ entry isn't read before tail */
2287 	ret = left = min(nr, entries);
2288 	io_get_task_refs(left);
2289 	io_submit_state_start(&ctx->submit_state, left);
2290 
2291 	do {
2292 		const struct io_uring_sqe *sqe;
2293 		struct io_kiocb *req;
2294 
2295 		if (unlikely(!io_alloc_req(ctx, &req)))
2296 			break;
2297 		if (unlikely(!io_get_sqe(ctx, &sqe))) {
2298 			io_req_add_to_cache(req, ctx);
2299 			break;
2300 		}
2301 
2302 		/*
2303 		 * Continue submitting even for sqe failure if the
2304 		 * ring was setup with IORING_SETUP_SUBMIT_ALL
2305 		 */
2306 		if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
2307 		    !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
2308 			left--;
2309 			break;
2310 		}
2311 	} while (--left);
2312 
2313 	if (unlikely(left)) {
2314 		ret -= left;
2315 		/* try again if it submitted nothing and can't allocate a req */
2316 		if (!ret && io_req_cache_empty(ctx))
2317 			ret = -EAGAIN;
2318 		current->io_uring->cached_refs += left;
2319 	}
2320 
2321 	io_submit_state_end(ctx);
2322 	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
2323 	io_commit_sqring(ctx);
2324 	return ret;
2325 }
2326 
2327 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2328 			    int wake_flags, void *key)
2329 {
2330 	struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq);
2331 
2332 	/*
2333 	 * Cannot safely flush overflowed CQEs from here, ensure we wake up
2334 	 * the task, and the next invocation will do it.
2335 	 */
2336 	if (io_should_wake(iowq) || io_has_work(iowq->ctx))
2337 		return autoremove_wake_function(curr, mode, wake_flags, key);
2338 	return -1;
2339 }
2340 
2341 int io_run_task_work_sig(struct io_ring_ctx *ctx)
2342 {
2343 	if (!llist_empty(&ctx->work_llist)) {
2344 		__set_current_state(TASK_RUNNING);
2345 		if (io_run_local_work(ctx, INT_MAX) > 0)
2346 			return 0;
2347 	}
2348 	if (io_run_task_work() > 0)
2349 		return 0;
2350 	if (task_sigpending(current))
2351 		return -EINTR;
2352 	return 0;
2353 }
2354 
2355 static bool current_pending_io(void)
2356 {
2357 	struct io_uring_task *tctx = current->io_uring;
2358 
2359 	if (!tctx)
2360 		return false;
2361 	return percpu_counter_read_positive(&tctx->inflight);
2362 }
2363 
2364 static enum hrtimer_restart io_cqring_timer_wakeup(struct hrtimer *timer)
2365 {
2366 	struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
2367 
2368 	WRITE_ONCE(iowq->hit_timeout, 1);
2369 	iowq->min_timeout = 0;
2370 	wake_up_process(iowq->wq.private);
2371 	return HRTIMER_NORESTART;
2372 }
2373 
2374 /*
2375  * Doing min_timeout portion. If we saw any timeouts, events, or have work,
2376  * wake up. If not, and we have a normal timeout, switch to that and keep
2377  * sleeping.
2378  */
2379 static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
2380 {
2381 	struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
2382 	struct io_ring_ctx *ctx = iowq->ctx;
2383 
2384 	/* no general timeout, or shorter (or equal), we are done */
2385 	if (iowq->timeout == KTIME_MAX ||
2386 	    ktime_compare(iowq->min_timeout, iowq->timeout) >= 0)
2387 		goto out_wake;
2388 	/* work we may need to run, wake function will see if we need to wake */
2389 	if (io_has_work(ctx))
2390 		goto out_wake;
2391 	/* got events since we started waiting, min timeout is done */
2392 	if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail))
2393 		goto out_wake;
2394 	/* if we have any events and min timeout expired, we're done */
2395 	if (io_cqring_events(ctx))
2396 		goto out_wake;
2397 
2398 	/*
2399 	 * If using deferred task_work running and application is waiting on
2400 	 * more than one request, ensure we reset it now where we are switching
2401 	 * to normal sleeps. Any request completion post min_wait should wake
2402 	 * the task and return.
2403 	 */
2404 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
2405 		atomic_set(&ctx->cq_wait_nr, 1);
2406 		smp_mb();
2407 		if (!llist_empty(&ctx->work_llist))
2408 			goto out_wake;
2409 	}
2410 
2411 	iowq->t.function = io_cqring_timer_wakeup;
2412 	hrtimer_set_expires(timer, iowq->timeout);
2413 	return HRTIMER_RESTART;
2414 out_wake:
2415 	return io_cqring_timer_wakeup(timer);
2416 }
2417 
2418 static int io_cqring_schedule_timeout(struct io_wait_queue *iowq,
2419 				      clockid_t clock_id, ktime_t start_time)
2420 {
2421 	ktime_t timeout;
2422 
2423 	hrtimer_init_on_stack(&iowq->t, clock_id, HRTIMER_MODE_ABS);
2424 	if (iowq->min_timeout) {
2425 		timeout = ktime_add_ns(iowq->min_timeout, start_time);
2426 		iowq->t.function = io_cqring_min_timer_wakeup;
2427 	} else {
2428 		timeout = iowq->timeout;
2429 		iowq->t.function = io_cqring_timer_wakeup;
2430 	}
2431 
2432 	hrtimer_set_expires_range_ns(&iowq->t, timeout, 0);
2433 	hrtimer_start_expires(&iowq->t, HRTIMER_MODE_ABS);
2434 
2435 	if (!READ_ONCE(iowq->hit_timeout))
2436 		schedule();
2437 
2438 	hrtimer_cancel(&iowq->t);
2439 	destroy_hrtimer_on_stack(&iowq->t);
2440 	__set_current_state(TASK_RUNNING);
2441 
2442 	return READ_ONCE(iowq->hit_timeout) ? -ETIME : 0;
2443 }
2444 
2445 static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2446 				     struct io_wait_queue *iowq,
2447 				     ktime_t start_time)
2448 {
2449 	int ret = 0;
2450 
2451 	/*
2452 	 * Mark us as being in io_wait if we have pending requests, so cpufreq
2453 	 * can take into account that the task is waiting for IO - turns out
2454 	 * to be important for low QD IO.
2455 	 */
2456 	if (current_pending_io())
2457 		current->in_iowait = 1;
2458 	if (iowq->timeout != KTIME_MAX || iowq->min_timeout)
2459 		ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time);
2460 	else
2461 		schedule();
2462 	current->in_iowait = 0;
2463 	return ret;
2464 }
2465 
2466 /* If this returns > 0, the caller should retry */
2467 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2468 					  struct io_wait_queue *iowq,
2469 					  ktime_t start_time)
2470 {
2471 	if (unlikely(READ_ONCE(ctx->check_cq)))
2472 		return 1;
2473 	if (unlikely(!llist_empty(&ctx->work_llist)))
2474 		return 1;
2475 	if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL)))
2476 		return 1;
2477 	if (unlikely(task_sigpending(current)))
2478 		return -EINTR;
2479 	if (unlikely(io_should_wake(iowq)))
2480 		return 0;
2481 
2482 	return __io_cqring_wait_schedule(ctx, iowq, start_time);
2483 }
2484 
2485 struct ext_arg {
2486 	size_t argsz;
2487 	struct __kernel_timespec __user *ts;
2488 	const sigset_t __user *sig;
2489 	ktime_t min_time;
2490 };
2491 
2492 /*
2493  * Wait until events become available, if we don't already have some. The
2494  * application must reap them itself, as they reside on the shared cq ring.
2495  */
2496 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
2497 			  struct ext_arg *ext_arg)
2498 {
2499 	struct io_wait_queue iowq;
2500 	struct io_rings *rings = ctx->rings;
2501 	ktime_t start_time;
2502 	int ret;
2503 
2504 	if (!io_allowed_run_tw(ctx))
2505 		return -EEXIST;
2506 	if (!llist_empty(&ctx->work_llist))
2507 		io_run_local_work(ctx, min_events);
2508 	io_run_task_work();
2509 
2510 	if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)))
2511 		io_cqring_do_overflow_flush(ctx);
2512 	if (__io_cqring_events_user(ctx) >= min_events)
2513 		return 0;
2514 
2515 	init_waitqueue_func_entry(&iowq.wq, io_wake_function);
2516 	iowq.wq.private = current;
2517 	INIT_LIST_HEAD(&iowq.wq.entry);
2518 	iowq.ctx = ctx;
2519 	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
2520 	iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail);
2521 	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2522 	iowq.hit_timeout = 0;
2523 	iowq.min_timeout = ext_arg->min_time;
2524 	iowq.timeout = KTIME_MAX;
2525 	start_time = io_get_time(ctx);
2526 
2527 	if (ext_arg->ts) {
2528 		struct timespec64 ts;
2529 
2530 		if (get_timespec64(&ts, ext_arg->ts))
2531 			return -EFAULT;
2532 
2533 		iowq.timeout = timespec64_to_ktime(ts);
2534 		if (!(flags & IORING_ENTER_ABS_TIMER))
2535 			iowq.timeout = ktime_add(iowq.timeout, start_time);
2536 	}
2537 
2538 	if (ext_arg->sig) {
2539 #ifdef CONFIG_COMPAT
2540 		if (in_compat_syscall())
2541 			ret = set_compat_user_sigmask((const compat_sigset_t __user *)ext_arg->sig,
2542 						      ext_arg->argsz);
2543 		else
2544 #endif
2545 			ret = set_user_sigmask(ext_arg->sig, ext_arg->argsz);
2546 
2547 		if (ret)
2548 			return ret;
2549 	}
2550 
2551 	io_napi_busy_loop(ctx, &iowq);
2552 
2553 	trace_io_uring_cqring_wait(ctx, min_events);
2554 	do {
2555 		unsigned long check_cq;
2556 		int nr_wait;
2557 
2558 		/* if min timeout has been hit, don't reset wait count */
2559 		if (!iowq.hit_timeout)
2560 			nr_wait = (int) iowq.cq_tail -
2561 					READ_ONCE(ctx->rings->cq.tail);
2562 		else
2563 			nr_wait = 1;
2564 
2565 		if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
2566 			atomic_set(&ctx->cq_wait_nr, nr_wait);
2567 			set_current_state(TASK_INTERRUPTIBLE);
2568 		} else {
2569 			prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
2570 							TASK_INTERRUPTIBLE);
2571 		}
2572 
2573 		ret = io_cqring_wait_schedule(ctx, &iowq, start_time);
2574 		__set_current_state(TASK_RUNNING);
2575 		atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
2576 
2577 		/*
2578 		 * Run task_work after scheduling and before io_should_wake().
2579 		 * If we got woken because of task_work being processed, run it
2580 		 * now rather than let the caller do another wait loop.
2581 		 */
2582 		io_run_task_work();
2583 		if (!llist_empty(&ctx->work_llist))
2584 			io_run_local_work(ctx, nr_wait);
2585 
2586 		/*
2587 		 * Non-local task_work will be run on exit to userspace, but
2588 		 * if we're using DEFER_TASKRUN, then we could have waited
2589 		 * with a timeout for a number of requests. If the timeout
2590 		 * hits, we could have some requests ready to process. Ensure
2591 		 * this break is _after_ we have run task_work, to avoid
2592 		 * deferring running potentially pending requests until the
2593 		 * next time we wait for events.
2594 		 */
2595 		if (ret < 0)
2596 			break;
2597 
2598 		check_cq = READ_ONCE(ctx->check_cq);
2599 		if (unlikely(check_cq)) {
2600 			/* let the caller flush overflows, retry */
2601 			if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
2602 				io_cqring_do_overflow_flush(ctx);
2603 			if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
2604 				ret = -EBADR;
2605 				break;
2606 			}
2607 		}
2608 
2609 		if (io_should_wake(&iowq)) {
2610 			ret = 0;
2611 			break;
2612 		}
2613 		cond_resched();
2614 	} while (1);
2615 
2616 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
2617 		finish_wait(&ctx->cq_wait, &iowq.wq);
2618 	restore_saved_sigmask_unless(ret == -EINTR);
2619 
2620 	return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2621 }
2622 
2623 static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
2624 			  size_t size)
2625 {
2626 	return __io_uaddr_map(&ctx->ring_pages, &ctx->n_ring_pages, uaddr,
2627 				size);
2628 }
2629 
2630 static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr,
2631 			 size_t size)
2632 {
2633 	return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr,
2634 				size);
2635 }
2636 
2637 static void io_rings_free(struct io_ring_ctx *ctx)
2638 {
2639 	if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
2640 		io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages,
2641 				true);
2642 		io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages,
2643 				true);
2644 	} else {
2645 		io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
2646 		ctx->n_ring_pages = 0;
2647 		io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
2648 		ctx->n_sqe_pages = 0;
2649 		vunmap(ctx->rings);
2650 		vunmap(ctx->sq_sqes);
2651 	}
2652 
2653 	ctx->rings = NULL;
2654 	ctx->sq_sqes = NULL;
2655 }
2656 
2657 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
2658 				unsigned int cq_entries, size_t *sq_offset)
2659 {
2660 	struct io_rings *rings;
2661 	size_t off, sq_array_size;
2662 
2663 	off = struct_size(rings, cqes, cq_entries);
2664 	if (off == SIZE_MAX)
2665 		return SIZE_MAX;
2666 	if (ctx->flags & IORING_SETUP_CQE32) {
2667 		if (check_shl_overflow(off, 1, &off))
2668 			return SIZE_MAX;
2669 	}
2670 
2671 #ifdef CONFIG_SMP
2672 	off = ALIGN(off, SMP_CACHE_BYTES);
2673 	if (off == 0)
2674 		return SIZE_MAX;
2675 #endif
2676 
2677 	if (ctx->flags & IORING_SETUP_NO_SQARRAY) {
2678 		*sq_offset = SIZE_MAX;
2679 		return off;
2680 	}
2681 
2682 	*sq_offset = off;
2683 
2684 	sq_array_size = array_size(sizeof(u32), sq_entries);
2685 	if (sq_array_size == SIZE_MAX)
2686 		return SIZE_MAX;
2687 
2688 	if (check_add_overflow(off, sq_array_size, &off))
2689 		return SIZE_MAX;
2690 
2691 	return off;
2692 }
2693 
2694 static void io_req_caches_free(struct io_ring_ctx *ctx)
2695 {
2696 	struct io_kiocb *req;
2697 	int nr = 0;
2698 
2699 	mutex_lock(&ctx->uring_lock);
2700 
2701 	while (!io_req_cache_empty(ctx)) {
2702 		req = io_extract_req(ctx);
2703 		kmem_cache_free(req_cachep, req);
2704 		nr++;
2705 	}
2706 	if (nr)
2707 		percpu_ref_put_many(&ctx->refs, nr);
2708 	mutex_unlock(&ctx->uring_lock);
2709 }
2710 
2711 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2712 {
2713 	io_sq_thread_finish(ctx);
2714 	/* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
2715 	if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)))
2716 		return;
2717 
2718 	mutex_lock(&ctx->uring_lock);
2719 	if (ctx->buf_data)
2720 		__io_sqe_buffers_unregister(ctx);
2721 	if (ctx->file_data)
2722 		__io_sqe_files_unregister(ctx);
2723 	io_cqring_overflow_kill(ctx);
2724 	io_eventfd_unregister(ctx);
2725 	io_alloc_cache_free(&ctx->apoll_cache, kfree);
2726 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
2727 	io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
2728 	io_alloc_cache_free(&ctx->uring_cache, kfree);
2729 	io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
2730 	io_futex_cache_free(ctx);
2731 	io_destroy_buffers(ctx);
2732 	mutex_unlock(&ctx->uring_lock);
2733 	if (ctx->sq_creds)
2734 		put_cred(ctx->sq_creds);
2735 	if (ctx->submitter_task)
2736 		put_task_struct(ctx->submitter_task);
2737 
2738 	/* there are no registered resources left, nobody uses it */
2739 	if (ctx->rsrc_node)
2740 		io_rsrc_node_destroy(ctx, ctx->rsrc_node);
2741 
2742 	WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
2743 	WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2744 
2745 	io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
2746 	if (ctx->mm_account) {
2747 		mmdrop(ctx->mm_account);
2748 		ctx->mm_account = NULL;
2749 	}
2750 	io_rings_free(ctx);
2751 
2752 	percpu_ref_exit(&ctx->refs);
2753 	free_uid(ctx->user);
2754 	io_req_caches_free(ctx);
2755 	if (ctx->hash_map)
2756 		io_wq_put_hash(ctx->hash_map);
2757 	io_napi_free(ctx);
2758 	kfree(ctx->cancel_table.hbs);
2759 	kfree(ctx->cancel_table_locked.hbs);
2760 	xa_destroy(&ctx->io_bl_xa);
2761 	kfree(ctx);
2762 }
2763 
2764 static __cold void io_activate_pollwq_cb(struct callback_head *cb)
2765 {
2766 	struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
2767 					       poll_wq_task_work);
2768 
2769 	mutex_lock(&ctx->uring_lock);
2770 	ctx->poll_activated = true;
2771 	mutex_unlock(&ctx->uring_lock);
2772 
2773 	/*
2774 	 * Wake ups for some events between start of polling and activation
2775 	 * might've been lost due to loose synchronisation.
2776 	 */
2777 	wake_up_all(&ctx->poll_wq);
2778 	percpu_ref_put(&ctx->refs);
2779 }
2780 
2781 __cold void io_activate_pollwq(struct io_ring_ctx *ctx)
2782 {
2783 	spin_lock(&ctx->completion_lock);
2784 	/* already activated or in progress */
2785 	if (ctx->poll_activated || ctx->poll_wq_task_work.func)
2786 		goto out;
2787 	if (WARN_ON_ONCE(!ctx->task_complete))
2788 		goto out;
2789 	if (!ctx->submitter_task)
2790 		goto out;
2791 	/*
2792 	 * with ->submitter_task only the submitter task completes requests, we
2793 	 * only need to sync with it, which is done by injecting a tw
2794 	 */
2795 	init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb);
2796 	percpu_ref_get(&ctx->refs);
2797 	if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL))
2798 		percpu_ref_put(&ctx->refs);
2799 out:
2800 	spin_unlock(&ctx->completion_lock);
2801 }
2802 
2803 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
2804 {
2805 	struct io_ring_ctx *ctx = file->private_data;
2806 	__poll_t mask = 0;
2807 
2808 	if (unlikely(!ctx->poll_activated))
2809 		io_activate_pollwq(ctx);
2810 
2811 	poll_wait(file, &ctx->poll_wq, wait);
2812 	/*
2813 	 * synchronizes with barrier from wq_has_sleeper call in
2814 	 * io_commit_cqring
2815 	 */
2816 	smp_rmb();
2817 	if (!io_sqring_full(ctx))
2818 		mask |= EPOLLOUT | EPOLLWRNORM;
2819 
2820 	/*
2821 	 * Don't flush cqring overflow list here, just do a simple check.
2822 	 * Otherwise there could possible be ABBA deadlock:
2823 	 *      CPU0                    CPU1
2824 	 *      ----                    ----
2825 	 * lock(&ctx->uring_lock);
2826 	 *                              lock(&ep->mtx);
2827 	 *                              lock(&ctx->uring_lock);
2828 	 * lock(&ep->mtx);
2829 	 *
2830 	 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
2831 	 * pushes them to do the flush.
2832 	 */
2833 
2834 	if (__io_cqring_events_user(ctx) || io_has_work(ctx))
2835 		mask |= EPOLLIN | EPOLLRDNORM;
2836 
2837 	return mask;
2838 }
2839 
2840 struct io_tctx_exit {
2841 	struct callback_head		task_work;
2842 	struct completion		completion;
2843 	struct io_ring_ctx		*ctx;
2844 };
2845 
2846 static __cold void io_tctx_exit_cb(struct callback_head *cb)
2847 {
2848 	struct io_uring_task *tctx = current->io_uring;
2849 	struct io_tctx_exit *work;
2850 
2851 	work = container_of(cb, struct io_tctx_exit, task_work);
2852 	/*
2853 	 * When @in_cancel, we're in cancellation and it's racy to remove the
2854 	 * node. It'll be removed by the end of cancellation, just ignore it.
2855 	 * tctx can be NULL if the queueing of this task_work raced with
2856 	 * work cancelation off the exec path.
2857 	 */
2858 	if (tctx && !atomic_read(&tctx->in_cancel))
2859 		io_uring_del_tctx_node((unsigned long)work->ctx);
2860 	complete(&work->completion);
2861 }
2862 
2863 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
2864 {
2865 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2866 
2867 	return req->ctx == data;
2868 }
2869 
2870 static __cold void io_ring_exit_work(struct work_struct *work)
2871 {
2872 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
2873 	unsigned long timeout = jiffies + HZ * 60 * 5;
2874 	unsigned long interval = HZ / 20;
2875 	struct io_tctx_exit exit;
2876 	struct io_tctx_node *node;
2877 	int ret;
2878 
2879 	/*
2880 	 * If we're doing polled IO and end up having requests being
2881 	 * submitted async (out-of-line), then completions can come in while
2882 	 * we're waiting for refs to drop. We need to reap these manually,
2883 	 * as nobody else will be looking for them.
2884 	 */
2885 	do {
2886 		if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
2887 			mutex_lock(&ctx->uring_lock);
2888 			io_cqring_overflow_kill(ctx);
2889 			mutex_unlock(&ctx->uring_lock);
2890 		}
2891 
2892 		if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2893 			io_move_task_work_from_local(ctx);
2894 
2895 		while (io_uring_try_cancel_requests(ctx, NULL, true))
2896 			cond_resched();
2897 
2898 		if (ctx->sq_data) {
2899 			struct io_sq_data *sqd = ctx->sq_data;
2900 			struct task_struct *tsk;
2901 
2902 			io_sq_thread_park(sqd);
2903 			tsk = sqd->thread;
2904 			if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
2905 				io_wq_cancel_cb(tsk->io_uring->io_wq,
2906 						io_cancel_ctx_cb, ctx, true);
2907 			io_sq_thread_unpark(sqd);
2908 		}
2909 
2910 		io_req_caches_free(ctx);
2911 
2912 		if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
2913 			/* there is little hope left, don't run it too often */
2914 			interval = HZ * 60;
2915 		}
2916 		/*
2917 		 * This is really an uninterruptible wait, as it has to be
2918 		 * complete. But it's also run from a kworker, which doesn't
2919 		 * take signals, so it's fine to make it interruptible. This
2920 		 * avoids scenarios where we knowingly can wait much longer
2921 		 * on completions, for example if someone does a SIGSTOP on
2922 		 * a task that needs to finish task_work to make this loop
2923 		 * complete. That's a synthetic situation that should not
2924 		 * cause a stuck task backtrace, and hence a potential panic
2925 		 * on stuck tasks if that is enabled.
2926 		 */
2927 	} while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval));
2928 
2929 	init_completion(&exit.completion);
2930 	init_task_work(&exit.task_work, io_tctx_exit_cb);
2931 	exit.ctx = ctx;
2932 
2933 	mutex_lock(&ctx->uring_lock);
2934 	while (!list_empty(&ctx->tctx_list)) {
2935 		WARN_ON_ONCE(time_after(jiffies, timeout));
2936 
2937 		node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
2938 					ctx_node);
2939 		/* don't spin on a single task if cancellation failed */
2940 		list_rotate_left(&ctx->tctx_list);
2941 		ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
2942 		if (WARN_ON_ONCE(ret))
2943 			continue;
2944 
2945 		mutex_unlock(&ctx->uring_lock);
2946 		/*
2947 		 * See comment above for
2948 		 * wait_for_completion_interruptible_timeout() on why this
2949 		 * wait is marked as interruptible.
2950 		 */
2951 		wait_for_completion_interruptible(&exit.completion);
2952 		mutex_lock(&ctx->uring_lock);
2953 	}
2954 	mutex_unlock(&ctx->uring_lock);
2955 	spin_lock(&ctx->completion_lock);
2956 	spin_unlock(&ctx->completion_lock);
2957 
2958 	/* pairs with RCU read section in io_req_local_work_add() */
2959 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2960 		synchronize_rcu();
2961 
2962 	io_ring_ctx_free(ctx);
2963 }
2964 
2965 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
2966 {
2967 	unsigned long index;
2968 	struct creds *creds;
2969 
2970 	mutex_lock(&ctx->uring_lock);
2971 	percpu_ref_kill(&ctx->refs);
2972 	xa_for_each(&ctx->personalities, index, creds)
2973 		io_unregister_personality(ctx, index);
2974 	mutex_unlock(&ctx->uring_lock);
2975 
2976 	flush_delayed_work(&ctx->fallback_work);
2977 
2978 	INIT_WORK(&ctx->exit_work, io_ring_exit_work);
2979 	/*
2980 	 * Use system_unbound_wq to avoid spawning tons of event kworkers
2981 	 * if we're exiting a ton of rings at the same time. It just adds
2982 	 * noise and overhead, there's no discernable change in runtime
2983 	 * over using system_wq.
2984 	 */
2985 	queue_work(iou_wq, &ctx->exit_work);
2986 }
2987 
2988 static int io_uring_release(struct inode *inode, struct file *file)
2989 {
2990 	struct io_ring_ctx *ctx = file->private_data;
2991 
2992 	file->private_data = NULL;
2993 	io_ring_ctx_wait_and_kill(ctx);
2994 	return 0;
2995 }
2996 
2997 struct io_task_cancel {
2998 	struct task_struct *task;
2999 	bool all;
3000 };
3001 
3002 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
3003 {
3004 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3005 	struct io_task_cancel *cancel = data;
3006 
3007 	return io_match_task_safe(req, cancel->task, cancel->all);
3008 }
3009 
3010 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
3011 					 struct task_struct *task,
3012 					 bool cancel_all)
3013 {
3014 	struct io_defer_entry *de;
3015 	LIST_HEAD(list);
3016 
3017 	spin_lock(&ctx->completion_lock);
3018 	list_for_each_entry_reverse(de, &ctx->defer_list, list) {
3019 		if (io_match_task_safe(de->req, task, cancel_all)) {
3020 			list_cut_position(&list, &ctx->defer_list, &de->list);
3021 			break;
3022 		}
3023 	}
3024 	spin_unlock(&ctx->completion_lock);
3025 	if (list_empty(&list))
3026 		return false;
3027 
3028 	while (!list_empty(&list)) {
3029 		de = list_first_entry(&list, struct io_defer_entry, list);
3030 		list_del_init(&de->list);
3031 		io_req_task_queue_fail(de->req, -ECANCELED);
3032 		kfree(de);
3033 	}
3034 	return true;
3035 }
3036 
3037 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
3038 {
3039 	struct io_tctx_node *node;
3040 	enum io_wq_cancel cret;
3041 	bool ret = false;
3042 
3043 	mutex_lock(&ctx->uring_lock);
3044 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
3045 		struct io_uring_task *tctx = node->task->io_uring;
3046 
3047 		/*
3048 		 * io_wq will stay alive while we hold uring_lock, because it's
3049 		 * killed after ctx nodes, which requires to take the lock.
3050 		 */
3051 		if (!tctx || !tctx->io_wq)
3052 			continue;
3053 		cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
3054 		ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3055 	}
3056 	mutex_unlock(&ctx->uring_lock);
3057 
3058 	return ret;
3059 }
3060 
3061 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
3062 						struct task_struct *task,
3063 						bool cancel_all)
3064 {
3065 	struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
3066 	struct io_uring_task *tctx = task ? task->io_uring : NULL;
3067 	enum io_wq_cancel cret;
3068 	bool ret = false;
3069 
3070 	/* set it so io_req_local_work_add() would wake us up */
3071 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
3072 		atomic_set(&ctx->cq_wait_nr, 1);
3073 		smp_mb();
3074 	}
3075 
3076 	/* failed during ring init, it couldn't have issued any requests */
3077 	if (!ctx->rings)
3078 		return false;
3079 
3080 	if (!task) {
3081 		ret |= io_uring_try_cancel_iowq(ctx);
3082 	} else if (tctx && tctx->io_wq) {
3083 		/*
3084 		 * Cancels requests of all rings, not only @ctx, but
3085 		 * it's fine as the task is in exit/exec.
3086 		 */
3087 		cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
3088 				       &cancel, true);
3089 		ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3090 	}
3091 
3092 	/* SQPOLL thread does its own polling */
3093 	if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
3094 	    (ctx->sq_data && ctx->sq_data->thread == current)) {
3095 		while (!wq_list_empty(&ctx->iopoll_list)) {
3096 			io_iopoll_try_reap_events(ctx);
3097 			ret = true;
3098 			cond_resched();
3099 		}
3100 	}
3101 
3102 	if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3103 	    io_allowed_defer_tw_run(ctx))
3104 		ret |= io_run_local_work(ctx, INT_MAX) > 0;
3105 	ret |= io_cancel_defer_files(ctx, task, cancel_all);
3106 	mutex_lock(&ctx->uring_lock);
3107 	ret |= io_poll_remove_all(ctx, task, cancel_all);
3108 	ret |= io_waitid_remove_all(ctx, task, cancel_all);
3109 	ret |= io_futex_remove_all(ctx, task, cancel_all);
3110 	ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all);
3111 	mutex_unlock(&ctx->uring_lock);
3112 	ret |= io_kill_timeouts(ctx, task, cancel_all);
3113 	if (task)
3114 		ret |= io_run_task_work() > 0;
3115 	else
3116 		ret |= flush_delayed_work(&ctx->fallback_work);
3117 	return ret;
3118 }
3119 
3120 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
3121 {
3122 	if (tracked)
3123 		return atomic_read(&tctx->inflight_tracked);
3124 	return percpu_counter_sum(&tctx->inflight);
3125 }
3126 
3127 /*
3128  * Find any io_uring ctx that this task has registered or done IO on, and cancel
3129  * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
3130  */
3131 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
3132 {
3133 	struct io_uring_task *tctx = current->io_uring;
3134 	struct io_ring_ctx *ctx;
3135 	struct io_tctx_node *node;
3136 	unsigned long index;
3137 	s64 inflight;
3138 	DEFINE_WAIT(wait);
3139 
3140 	WARN_ON_ONCE(sqd && sqd->thread != current);
3141 
3142 	if (!current->io_uring)
3143 		return;
3144 	if (tctx->io_wq)
3145 		io_wq_exit_start(tctx->io_wq);
3146 
3147 	atomic_inc(&tctx->in_cancel);
3148 	do {
3149 		bool loop = false;
3150 
3151 		io_uring_drop_tctx_refs(current);
3152 		if (!tctx_inflight(tctx, !cancel_all))
3153 			break;
3154 
3155 		/* read completions before cancelations */
3156 		inflight = tctx_inflight(tctx, false);
3157 		if (!inflight)
3158 			break;
3159 
3160 		if (!sqd) {
3161 			xa_for_each(&tctx->xa, index, node) {
3162 				/* sqpoll task will cancel all its requests */
3163 				if (node->ctx->sq_data)
3164 					continue;
3165 				loop |= io_uring_try_cancel_requests(node->ctx,
3166 							current, cancel_all);
3167 			}
3168 		} else {
3169 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
3170 				loop |= io_uring_try_cancel_requests(ctx,
3171 								     current,
3172 								     cancel_all);
3173 		}
3174 
3175 		if (loop) {
3176 			cond_resched();
3177 			continue;
3178 		}
3179 
3180 		prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
3181 		io_run_task_work();
3182 		io_uring_drop_tctx_refs(current);
3183 		xa_for_each(&tctx->xa, index, node) {
3184 			if (!llist_empty(&node->ctx->work_llist)) {
3185 				WARN_ON_ONCE(node->ctx->submitter_task &&
3186 					     node->ctx->submitter_task != current);
3187 				goto end_wait;
3188 			}
3189 		}
3190 		/*
3191 		 * If we've seen completions, retry without waiting. This
3192 		 * avoids a race where a completion comes in before we did
3193 		 * prepare_to_wait().
3194 		 */
3195 		if (inflight == tctx_inflight(tctx, !cancel_all))
3196 			schedule();
3197 end_wait:
3198 		finish_wait(&tctx->wait, &wait);
3199 	} while (1);
3200 
3201 	io_uring_clean_tctx(tctx);
3202 	if (cancel_all) {
3203 		/*
3204 		 * We shouldn't run task_works after cancel, so just leave
3205 		 * ->in_cancel set for normal exit.
3206 		 */
3207 		atomic_dec(&tctx->in_cancel);
3208 		/* for exec all current's requests should be gone, kill tctx */
3209 		__io_uring_free(current);
3210 	}
3211 }
3212 
3213 void __io_uring_cancel(bool cancel_all)
3214 {
3215 	io_uring_cancel_generic(cancel_all, NULL);
3216 }
3217 
3218 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
3219 {
3220 	if (flags & IORING_ENTER_EXT_ARG) {
3221 		struct io_uring_getevents_arg arg;
3222 
3223 		if (argsz != sizeof(arg))
3224 			return -EINVAL;
3225 		if (copy_from_user(&arg, argp, sizeof(arg)))
3226 			return -EFAULT;
3227 	}
3228 	return 0;
3229 }
3230 
3231 static int io_get_ext_arg(unsigned flags, const void __user *argp,
3232 			  struct ext_arg *ext_arg)
3233 {
3234 	struct io_uring_getevents_arg arg;
3235 
3236 	/*
3237 	 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
3238 	 * is just a pointer to the sigset_t.
3239 	 */
3240 	if (!(flags & IORING_ENTER_EXT_ARG)) {
3241 		ext_arg->sig = (const sigset_t __user *) argp;
3242 		ext_arg->ts = NULL;
3243 		return 0;
3244 	}
3245 
3246 	/*
3247 	 * EXT_ARG is set - ensure we agree on the size of it and copy in our
3248 	 * timespec and sigset_t pointers if good.
3249 	 */
3250 	if (ext_arg->argsz != sizeof(arg))
3251 		return -EINVAL;
3252 	if (copy_from_user(&arg, argp, sizeof(arg)))
3253 		return -EFAULT;
3254 	ext_arg->min_time = arg.min_wait_usec * NSEC_PER_USEC;
3255 	ext_arg->sig = u64_to_user_ptr(arg.sigmask);
3256 	ext_arg->argsz = arg.sigmask_sz;
3257 	ext_arg->ts = u64_to_user_ptr(arg.ts);
3258 	return 0;
3259 }
3260 
3261 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3262 		u32, min_complete, u32, flags, const void __user *, argp,
3263 		size_t, argsz)
3264 {
3265 	struct io_ring_ctx *ctx;
3266 	struct file *file;
3267 	long ret;
3268 
3269 	if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
3270 			       IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
3271 			       IORING_ENTER_REGISTERED_RING |
3272 			       IORING_ENTER_ABS_TIMER)))
3273 		return -EINVAL;
3274 
3275 	/*
3276 	 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
3277 	 * need only dereference our task private array to find it.
3278 	 */
3279 	if (flags & IORING_ENTER_REGISTERED_RING) {
3280 		struct io_uring_task *tctx = current->io_uring;
3281 
3282 		if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
3283 			return -EINVAL;
3284 		fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
3285 		file = tctx->registered_rings[fd];
3286 		if (unlikely(!file))
3287 			return -EBADF;
3288 	} else {
3289 		file = fget(fd);
3290 		if (unlikely(!file))
3291 			return -EBADF;
3292 		ret = -EOPNOTSUPP;
3293 		if (unlikely(!io_is_uring_fops(file)))
3294 			goto out;
3295 	}
3296 
3297 	ctx = file->private_data;
3298 	ret = -EBADFD;
3299 	if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
3300 		goto out;
3301 
3302 	/*
3303 	 * For SQ polling, the thread will do all submissions and completions.
3304 	 * Just return the requested submit count, and wake the thread if
3305 	 * we were asked to.
3306 	 */
3307 	ret = 0;
3308 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3309 		if (unlikely(ctx->sq_data->thread == NULL)) {
3310 			ret = -EOWNERDEAD;
3311 			goto out;
3312 		}
3313 		if (flags & IORING_ENTER_SQ_WAKEUP)
3314 			wake_up(&ctx->sq_data->wait);
3315 		if (flags & IORING_ENTER_SQ_WAIT)
3316 			io_sqpoll_wait_sq(ctx);
3317 
3318 		ret = to_submit;
3319 	} else if (to_submit) {
3320 		ret = io_uring_add_tctx_node(ctx);
3321 		if (unlikely(ret))
3322 			goto out;
3323 
3324 		mutex_lock(&ctx->uring_lock);
3325 		ret = io_submit_sqes(ctx, to_submit);
3326 		if (ret != to_submit) {
3327 			mutex_unlock(&ctx->uring_lock);
3328 			goto out;
3329 		}
3330 		if (flags & IORING_ENTER_GETEVENTS) {
3331 			if (ctx->syscall_iopoll)
3332 				goto iopoll_locked;
3333 			/*
3334 			 * Ignore errors, we'll soon call io_cqring_wait() and
3335 			 * it should handle ownership problems if any.
3336 			 */
3337 			if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3338 				(void)io_run_local_work_locked(ctx, min_complete);
3339 		}
3340 		mutex_unlock(&ctx->uring_lock);
3341 	}
3342 
3343 	if (flags & IORING_ENTER_GETEVENTS) {
3344 		int ret2;
3345 
3346 		if (ctx->syscall_iopoll) {
3347 			/*
3348 			 * We disallow the app entering submit/complete with
3349 			 * polling, but we still need to lock the ring to
3350 			 * prevent racing with polled issue that got punted to
3351 			 * a workqueue.
3352 			 */
3353 			mutex_lock(&ctx->uring_lock);
3354 iopoll_locked:
3355 			ret2 = io_validate_ext_arg(flags, argp, argsz);
3356 			if (likely(!ret2)) {
3357 				min_complete = min(min_complete,
3358 						   ctx->cq_entries);
3359 				ret2 = io_iopoll_check(ctx, min_complete);
3360 			}
3361 			mutex_unlock(&ctx->uring_lock);
3362 		} else {
3363 			struct ext_arg ext_arg = { .argsz = argsz };
3364 
3365 			ret2 = io_get_ext_arg(flags, argp, &ext_arg);
3366 			if (likely(!ret2)) {
3367 				min_complete = min(min_complete,
3368 						   ctx->cq_entries);
3369 				ret2 = io_cqring_wait(ctx, min_complete, flags,
3370 						      &ext_arg);
3371 			}
3372 		}
3373 
3374 		if (!ret) {
3375 			ret = ret2;
3376 
3377 			/*
3378 			 * EBADR indicates that one or more CQE were dropped.
3379 			 * Once the user has been informed we can clear the bit
3380 			 * as they are obviously ok with those drops.
3381 			 */
3382 			if (unlikely(ret2 == -EBADR))
3383 				clear_bit(IO_CHECK_CQ_DROPPED_BIT,
3384 					  &ctx->check_cq);
3385 		}
3386 	}
3387 out:
3388 	if (!(flags & IORING_ENTER_REGISTERED_RING))
3389 		fput(file);
3390 	return ret;
3391 }
3392 
3393 static const struct file_operations io_uring_fops = {
3394 	.release	= io_uring_release,
3395 	.mmap		= io_uring_mmap,
3396 	.get_unmapped_area = io_uring_get_unmapped_area,
3397 #ifndef CONFIG_MMU
3398 	.mmap_capabilities = io_uring_nommu_mmap_capabilities,
3399 #endif
3400 	.poll		= io_uring_poll,
3401 #ifdef CONFIG_PROC_FS
3402 	.show_fdinfo	= io_uring_show_fdinfo,
3403 #endif
3404 };
3405 
3406 bool io_is_uring_fops(struct file *file)
3407 {
3408 	return file->f_op == &io_uring_fops;
3409 }
3410 
3411 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3412 					 struct io_uring_params *p)
3413 {
3414 	struct io_rings *rings;
3415 	size_t size, sq_array_offset;
3416 	void *ptr;
3417 
3418 	/* make sure these are sane, as we already accounted them */
3419 	ctx->sq_entries = p->sq_entries;
3420 	ctx->cq_entries = p->cq_entries;
3421 
3422 	size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
3423 	if (size == SIZE_MAX)
3424 		return -EOVERFLOW;
3425 
3426 	if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3427 		rings = io_pages_map(&ctx->ring_pages, &ctx->n_ring_pages, size);
3428 	else
3429 		rings = io_rings_map(ctx, p->cq_off.user_addr, size);
3430 
3431 	if (IS_ERR(rings))
3432 		return PTR_ERR(rings);
3433 
3434 	ctx->rings = rings;
3435 	if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
3436 		ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3437 	rings->sq_ring_mask = p->sq_entries - 1;
3438 	rings->cq_ring_mask = p->cq_entries - 1;
3439 	rings->sq_ring_entries = p->sq_entries;
3440 	rings->cq_ring_entries = p->cq_entries;
3441 
3442 	if (p->flags & IORING_SETUP_SQE128)
3443 		size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
3444 	else
3445 		size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3446 	if (size == SIZE_MAX) {
3447 		io_rings_free(ctx);
3448 		return -EOVERFLOW;
3449 	}
3450 
3451 	if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3452 		ptr = io_pages_map(&ctx->sqe_pages, &ctx->n_sqe_pages, size);
3453 	else
3454 		ptr = io_sqes_map(ctx, p->sq_off.user_addr, size);
3455 
3456 	if (IS_ERR(ptr)) {
3457 		io_rings_free(ctx);
3458 		return PTR_ERR(ptr);
3459 	}
3460 
3461 	ctx->sq_sqes = ptr;
3462 	return 0;
3463 }
3464 
3465 static int io_uring_install_fd(struct file *file)
3466 {
3467 	int fd;
3468 
3469 	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3470 	if (fd < 0)
3471 		return fd;
3472 	fd_install(fd, file);
3473 	return fd;
3474 }
3475 
3476 /*
3477  * Allocate an anonymous fd, this is what constitutes the application
3478  * visible backing of an io_uring instance. The application mmaps this
3479  * fd to gain access to the SQ/CQ ring details.
3480  */
3481 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
3482 {
3483 	/* Create a new inode so that the LSM can block the creation.  */
3484 	return anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx,
3485 					 O_RDWR | O_CLOEXEC, NULL);
3486 }
3487 
3488 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
3489 				  struct io_uring_params __user *params)
3490 {
3491 	struct io_ring_ctx *ctx;
3492 	struct io_uring_task *tctx;
3493 	struct file *file;
3494 	int ret;
3495 
3496 	if (!entries)
3497 		return -EINVAL;
3498 	if (entries > IORING_MAX_ENTRIES) {
3499 		if (!(p->flags & IORING_SETUP_CLAMP))
3500 			return -EINVAL;
3501 		entries = IORING_MAX_ENTRIES;
3502 	}
3503 
3504 	if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
3505 	    && !(p->flags & IORING_SETUP_NO_MMAP))
3506 		return -EINVAL;
3507 
3508 	/*
3509 	 * Use twice as many entries for the CQ ring. It's possible for the
3510 	 * application to drive a higher depth than the size of the SQ ring,
3511 	 * since the sqes are only used at submission time. This allows for
3512 	 * some flexibility in overcommitting a bit. If the application has
3513 	 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
3514 	 * of CQ ring entries manually.
3515 	 */
3516 	p->sq_entries = roundup_pow_of_two(entries);
3517 	if (p->flags & IORING_SETUP_CQSIZE) {
3518 		/*
3519 		 * If IORING_SETUP_CQSIZE is set, we do the same roundup
3520 		 * to a power-of-two, if it isn't already. We do NOT impose
3521 		 * any cq vs sq ring sizing.
3522 		 */
3523 		if (!p->cq_entries)
3524 			return -EINVAL;
3525 		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
3526 			if (!(p->flags & IORING_SETUP_CLAMP))
3527 				return -EINVAL;
3528 			p->cq_entries = IORING_MAX_CQ_ENTRIES;
3529 		}
3530 		p->cq_entries = roundup_pow_of_two(p->cq_entries);
3531 		if (p->cq_entries < p->sq_entries)
3532 			return -EINVAL;
3533 	} else {
3534 		p->cq_entries = 2 * p->sq_entries;
3535 	}
3536 
3537 	ctx = io_ring_ctx_alloc(p);
3538 	if (!ctx)
3539 		return -ENOMEM;
3540 
3541 	ctx->clockid = CLOCK_MONOTONIC;
3542 	ctx->clock_offset = 0;
3543 
3544 	if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3545 	    !(ctx->flags & IORING_SETUP_IOPOLL) &&
3546 	    !(ctx->flags & IORING_SETUP_SQPOLL))
3547 		ctx->task_complete = true;
3548 
3549 	if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL))
3550 		ctx->lockless_cq = true;
3551 
3552 	/*
3553 	 * lazy poll_wq activation relies on ->task_complete for synchronisation
3554 	 * purposes, see io_activate_pollwq()
3555 	 */
3556 	if (!ctx->task_complete)
3557 		ctx->poll_activated = true;
3558 
3559 	/*
3560 	 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
3561 	 * space applications don't need to do io completion events
3562 	 * polling again, they can rely on io_sq_thread to do polling
3563 	 * work, which can reduce cpu usage and uring_lock contention.
3564 	 */
3565 	if (ctx->flags & IORING_SETUP_IOPOLL &&
3566 	    !(ctx->flags & IORING_SETUP_SQPOLL))
3567 		ctx->syscall_iopoll = 1;
3568 
3569 	ctx->compat = in_compat_syscall();
3570 	if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK))
3571 		ctx->user = get_uid(current_user());
3572 
3573 	/*
3574 	 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
3575 	 * COOP_TASKRUN is set, then IPIs are never needed by the app.
3576 	 */
3577 	ret = -EINVAL;
3578 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3579 		/* IPI related flags don't make sense with SQPOLL */
3580 		if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
3581 				  IORING_SETUP_TASKRUN_FLAG |
3582 				  IORING_SETUP_DEFER_TASKRUN))
3583 			goto err;
3584 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3585 	} else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
3586 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3587 	} else {
3588 		if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
3589 		    !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
3590 			goto err;
3591 		ctx->notify_method = TWA_SIGNAL;
3592 	}
3593 
3594 	/*
3595 	 * For DEFER_TASKRUN we require the completion task to be the same as the
3596 	 * submission task. This implies that there is only one submitter, so enforce
3597 	 * that.
3598 	 */
3599 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
3600 	    !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
3601 		goto err;
3602 	}
3603 
3604 	/*
3605 	 * This is just grabbed for accounting purposes. When a process exits,
3606 	 * the mm is exited and dropped before the files, hence we need to hang
3607 	 * on to this mm purely for the purposes of being able to unaccount
3608 	 * memory (locked/pinned vm). It's not used for anything else.
3609 	 */
3610 	mmgrab(current->mm);
3611 	ctx->mm_account = current->mm;
3612 
3613 	ret = io_allocate_scq_urings(ctx, p);
3614 	if (ret)
3615 		goto err;
3616 
3617 	ret = io_sq_offload_create(ctx, p);
3618 	if (ret)
3619 		goto err;
3620 
3621 	ret = io_rsrc_init(ctx);
3622 	if (ret)
3623 		goto err;
3624 
3625 	p->sq_off.head = offsetof(struct io_rings, sq.head);
3626 	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3627 	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3628 	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3629 	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3630 	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3631 	if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
3632 		p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
3633 	p->sq_off.resv1 = 0;
3634 	if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3635 		p->sq_off.user_addr = 0;
3636 
3637 	p->cq_off.head = offsetof(struct io_rings, cq.head);
3638 	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3639 	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3640 	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3641 	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3642 	p->cq_off.cqes = offsetof(struct io_rings, cqes);
3643 	p->cq_off.flags = offsetof(struct io_rings, cq_flags);
3644 	p->cq_off.resv1 = 0;
3645 	if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3646 		p->cq_off.user_addr = 0;
3647 
3648 	p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
3649 			IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
3650 			IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
3651 			IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
3652 			IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
3653 			IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
3654 			IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING |
3655 			IORING_FEAT_RECVSEND_BUNDLE | IORING_FEAT_MIN_TIMEOUT;
3656 
3657 	if (copy_to_user(params, p, sizeof(*p))) {
3658 		ret = -EFAULT;
3659 		goto err;
3660 	}
3661 
3662 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
3663 	    && !(ctx->flags & IORING_SETUP_R_DISABLED))
3664 		WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
3665 
3666 	file = io_uring_get_file(ctx);
3667 	if (IS_ERR(file)) {
3668 		ret = PTR_ERR(file);
3669 		goto err;
3670 	}
3671 
3672 	ret = __io_uring_add_tctx_node(ctx);
3673 	if (ret)
3674 		goto err_fput;
3675 	tctx = current->io_uring;
3676 
3677 	/*
3678 	 * Install ring fd as the very last thing, so we don't risk someone
3679 	 * having closed it before we finish setup
3680 	 */
3681 	if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
3682 		ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX);
3683 	else
3684 		ret = io_uring_install_fd(file);
3685 	if (ret < 0)
3686 		goto err_fput;
3687 
3688 	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
3689 	return ret;
3690 err:
3691 	io_ring_ctx_wait_and_kill(ctx);
3692 	return ret;
3693 err_fput:
3694 	fput(file);
3695 	return ret;
3696 }
3697 
3698 /*
3699  * Sets up an aio uring context, and returns the fd. Applications asks for a
3700  * ring size, we return the actual sq/cq ring sizes (among other things) in the
3701  * params structure passed in.
3702  */
3703 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3704 {
3705 	struct io_uring_params p;
3706 	int i;
3707 
3708 	if (copy_from_user(&p, params, sizeof(p)))
3709 		return -EFAULT;
3710 	for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3711 		if (p.resv[i])
3712 			return -EINVAL;
3713 	}
3714 
3715 	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3716 			IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
3717 			IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
3718 			IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
3719 			IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
3720 			IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
3721 			IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN |
3722 			IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY |
3723 			IORING_SETUP_NO_SQARRAY))
3724 		return -EINVAL;
3725 
3726 	return io_uring_create(entries, &p, params);
3727 }
3728 
3729 static inline bool io_uring_allowed(void)
3730 {
3731 	int disabled = READ_ONCE(sysctl_io_uring_disabled);
3732 	kgid_t io_uring_group;
3733 
3734 	if (disabled == 2)
3735 		return false;
3736 
3737 	if (disabled == 0 || capable(CAP_SYS_ADMIN))
3738 		return true;
3739 
3740 	io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group);
3741 	if (!gid_valid(io_uring_group))
3742 		return false;
3743 
3744 	return in_group_p(io_uring_group);
3745 }
3746 
3747 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3748 		struct io_uring_params __user *, params)
3749 {
3750 	if (!io_uring_allowed())
3751 		return -EPERM;
3752 
3753 	return io_uring_setup(entries, params);
3754 }
3755 
3756 static int __init io_uring_init(void)
3757 {
3758 	struct kmem_cache_args kmem_args = {
3759 		.useroffset = offsetof(struct io_kiocb, cmd.data),
3760 		.usersize = sizeof_field(struct io_kiocb, cmd.data),
3761 	};
3762 
3763 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
3764 	BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
3765 	BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
3766 } while (0)
3767 
3768 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
3769 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
3770 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
3771 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
3772 	BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
3773 	BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
3774 	BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
3775 	BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
3776 	BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
3777 	BUILD_BUG_SQE_ELEM(8,  __u64,  off);
3778 	BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
3779 	BUILD_BUG_SQE_ELEM(8,  __u32,  cmd_op);
3780 	BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
3781 	BUILD_BUG_SQE_ELEM(16, __u64,  addr);
3782 	BUILD_BUG_SQE_ELEM(16, __u64,  splice_off_in);
3783 	BUILD_BUG_SQE_ELEM(24, __u32,  len);
3784 	BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
3785 	BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
3786 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
3787 	BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
3788 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u16,  poll_events);
3789 	BUILD_BUG_SQE_ELEM(28, __u32,  poll32_events);
3790 	BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
3791 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
3792 	BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
3793 	BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
3794 	BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
3795 	BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
3796 	BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
3797 	BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
3798 	BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
3799 	BUILD_BUG_SQE_ELEM(28, __u32,  rename_flags);
3800 	BUILD_BUG_SQE_ELEM(28, __u32,  unlink_flags);
3801 	BUILD_BUG_SQE_ELEM(28, __u32,  hardlink_flags);
3802 	BUILD_BUG_SQE_ELEM(28, __u32,  xattr_flags);
3803 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_ring_flags);
3804 	BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
3805 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
3806 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_group);
3807 	BUILD_BUG_SQE_ELEM(42, __u16,  personality);
3808 	BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
3809 	BUILD_BUG_SQE_ELEM(44, __u32,  file_index);
3810 	BUILD_BUG_SQE_ELEM(44, __u16,  addr_len);
3811 	BUILD_BUG_SQE_ELEM(46, __u16,  __pad3[0]);
3812 	BUILD_BUG_SQE_ELEM(48, __u64,  addr3);
3813 	BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
3814 	BUILD_BUG_SQE_ELEM(56, __u64,  __pad2);
3815 
3816 	BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
3817 		     sizeof(struct io_uring_rsrc_update));
3818 	BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
3819 		     sizeof(struct io_uring_rsrc_update2));
3820 
3821 	/* ->buf_index is u16 */
3822 	BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
3823 	BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
3824 		     offsetof(struct io_uring_buf_ring, tail));
3825 
3826 	/* should fit into one byte */
3827 	BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
3828 	BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
3829 	BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
3830 
3831 	BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof_field(struct io_kiocb, flags));
3832 
3833 	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
3834 
3835 	/* top 8bits are for internal use */
3836 	BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0);
3837 
3838 	io_uring_optable_init();
3839 
3840 	/*
3841 	 * Allow user copy in the per-command field, which starts after the
3842 	 * file in io_kiocb and until the opcode field. The openat2 handling
3843 	 * requires copying in user memory into the io_kiocb object in that
3844 	 * range, and HARDENED_USERCOPY will complain if we haven't
3845 	 * correctly annotated this range.
3846 	 */
3847 	req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
3848 				SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
3849 				SLAB_TYPESAFE_BY_RCU);
3850 	io_buf_cachep = KMEM_CACHE(io_buffer,
3851 					  SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
3852 
3853 	iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
3854 
3855 #ifdef CONFIG_SYSCTL
3856 	register_sysctl_init("kernel", kernel_io_uring_disabled_table);
3857 #endif
3858 
3859 	return 0;
3860 };
3861 __initcall(io_uring_init);
3862