xref: /linux/fs/aio.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 /*
2  *	An async IO implementation for Linux
3  *	Written by Benjamin LaHaise <bcrl@kvack.org>
4  *
5  *	Implements an efficient asynchronous io interface.
6  *
7  *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
8  *	Copyright 2018 Christoph Hellwig.
9  *
10  *	See ../COPYING for licensing terms.
11  */
12 #define pr_fmt(fmt) "%s: " fmt, __func__
13 
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/time.h>
18 #include <linux/aio_abi.h>
19 #include <linux/export.h>
20 #include <linux/syscalls.h>
21 #include <linux/backing-dev.h>
22 #include <linux/refcount.h>
23 #include <linux/uio.h>
24 
25 #include <linux/sched/signal.h>
26 #include <linux/fs.h>
27 #include <linux/file.h>
28 #include <linux/mm.h>
29 #include <linux/mman.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
32 #include <linux/timer.h>
33 #include <linux/aio.h>
34 #include <linux/highmem.h>
35 #include <linux/workqueue.h>
36 #include <linux/security.h>
37 #include <linux/eventfd.h>
38 #include <linux/blkdev.h>
39 #include <linux/compat.h>
40 #include <linux/migrate.h>
41 #include <linux/ramfs.h>
42 #include <linux/percpu-refcount.h>
43 #include <linux/mount.h>
44 #include <linux/pseudo_fs.h>
45 
46 #include <linux/uaccess.h>
47 #include <linux/nospec.h>
48 
49 #include "internal.h"
50 
51 #define KIOCB_KEY		0
52 
53 #define AIO_RING_MAGIC			0xa10a10a1
54 #define AIO_RING_COMPAT_FEATURES	1
55 #define AIO_RING_INCOMPAT_FEATURES	0
56 struct aio_ring {
57 	unsigned	id;	/* kernel internal index number */
58 	unsigned	nr;	/* number of io_events */
59 	unsigned	head;	/* Written to by userland or under ring_lock
60 				 * mutex by aio_read_events_ring(). */
61 	unsigned	tail;
62 
63 	unsigned	magic;
64 	unsigned	compat_features;
65 	unsigned	incompat_features;
66 	unsigned	header_length;	/* size of aio_ring */
67 
68 
69 	struct io_event		io_events[];
70 }; /* 128 bytes + ring size */
71 
72 /*
73  * Plugging is meant to work with larger batches of IOs. If we don't
74  * have more than the below, then don't bother setting up a plug.
75  */
76 #define AIO_PLUG_THRESHOLD	2
77 
78 #define AIO_RING_PAGES	8
79 
80 struct kioctx_table {
81 	struct rcu_head		rcu;
82 	unsigned		nr;
83 	struct kioctx __rcu	*table[] __counted_by(nr);
84 };
85 
86 struct kioctx_cpu {
87 	unsigned		reqs_available;
88 };
89 
90 struct ctx_rq_wait {
91 	struct completion comp;
92 	atomic_t count;
93 };
94 
95 struct kioctx {
96 	struct percpu_ref	users;
97 	atomic_t		dead;
98 
99 	struct percpu_ref	reqs;
100 
101 	unsigned long		user_id;
102 
103 	struct kioctx_cpu __percpu *cpu;
104 
105 	/*
106 	 * For percpu reqs_available, number of slots we move to/from global
107 	 * counter at a time:
108 	 */
109 	unsigned		req_batch;
110 	/*
111 	 * This is what userspace passed to io_setup(), it's not used for
112 	 * anything but counting against the global max_reqs quota.
113 	 *
114 	 * The real limit is nr_events - 1, which will be larger (see
115 	 * aio_setup_ring())
116 	 */
117 	unsigned		max_reqs;
118 
119 	/* Size of ringbuffer, in units of struct io_event */
120 	unsigned		nr_events;
121 
122 	unsigned long		mmap_base;
123 	unsigned long		mmap_size;
124 
125 	struct folio		**ring_folios;
126 	long			nr_pages;
127 
128 	struct rcu_work		free_rwork;	/* see free_ioctx() */
129 
130 	/*
131 	 * signals when all in-flight requests are done
132 	 */
133 	struct ctx_rq_wait	*rq_wait;
134 
135 	struct {
136 		/*
137 		 * This counts the number of available slots in the ringbuffer,
138 		 * so we avoid overflowing it: it's decremented (if positive)
139 		 * when allocating a kiocb and incremented when the resulting
140 		 * io_event is pulled off the ringbuffer.
141 		 *
142 		 * We batch accesses to it with a percpu version.
143 		 */
144 		atomic_t	reqs_available;
145 	} ____cacheline_aligned_in_smp;
146 
147 	struct {
148 		spinlock_t	ctx_lock;
149 		struct list_head active_reqs;	/* used for cancellation */
150 	} ____cacheline_aligned_in_smp;
151 
152 	struct {
153 		struct mutex	ring_lock;
154 		wait_queue_head_t wait;
155 	} ____cacheline_aligned_in_smp;
156 
157 	struct {
158 		unsigned	tail;
159 		unsigned	completed_events;
160 		spinlock_t	completion_lock;
161 	} ____cacheline_aligned_in_smp;
162 
163 	struct folio		*internal_folios[AIO_RING_PAGES];
164 	struct file		*aio_ring_file;
165 
166 	unsigned		id;
167 };
168 
169 /*
170  * First field must be the file pointer in all the
171  * iocb unions! See also 'struct kiocb' in <linux/fs.h>
172  */
173 struct fsync_iocb {
174 	struct file		*file;
175 	struct work_struct	work;
176 	bool			datasync;
177 	struct cred		*creds;
178 };
179 
180 struct poll_iocb {
181 	struct file		*file;
182 	struct wait_queue_head	*head;
183 	__poll_t		events;
184 	bool			cancelled;
185 	bool			work_scheduled;
186 	bool			work_need_resched;
187 	struct wait_queue_entry	wait;
188 	struct work_struct	work;
189 };
190 
191 /*
192  * NOTE! Each of the iocb union members has the file pointer
193  * as the first entry in their struct definition. So you can
194  * access the file pointer through any of the sub-structs,
195  * or directly as just 'ki_filp' in this struct.
196  */
197 struct aio_kiocb {
198 	union {
199 		struct file		*ki_filp;
200 		struct kiocb		rw;
201 		struct fsync_iocb	fsync;
202 		struct poll_iocb	poll;
203 	};
204 
205 	struct kioctx		*ki_ctx;
206 	kiocb_cancel_fn		*ki_cancel;
207 
208 	struct io_event		ki_res;
209 
210 	struct list_head	ki_list;	/* the aio core uses this
211 						 * for cancellation */
212 	refcount_t		ki_refcnt;
213 
214 	/*
215 	 * If the aio_resfd field of the userspace iocb is not zero,
216 	 * this is the underlying eventfd context to deliver events to.
217 	 */
218 	struct eventfd_ctx	*ki_eventfd;
219 };
220 
221 struct aio_inode_info {
222 	struct inode vfs_inode;
223 	spinlock_t migrate_lock;
224 	struct kioctx *ctx;
225 };
226 
227 static inline struct aio_inode_info *AIO_I(struct inode *inode)
228 {
229 	return container_of(inode, struct aio_inode_info, vfs_inode);
230 }
231 
232 /*------ sysctl variables----*/
233 static DEFINE_SPINLOCK(aio_nr_lock);
234 static unsigned long aio_nr;		/* current system wide number of aio requests */
235 static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
236 /*----end sysctl variables---*/
237 #ifdef CONFIG_SYSCTL
238 static const struct ctl_table aio_sysctls[] = {
239 	{
240 		.procname	= "aio-nr",
241 		.data		= &aio_nr,
242 		.maxlen		= sizeof(aio_nr),
243 		.mode		= 0444,
244 		.proc_handler	= proc_doulongvec_minmax,
245 	},
246 	{
247 		.procname	= "aio-max-nr",
248 		.data		= &aio_max_nr,
249 		.maxlen		= sizeof(aio_max_nr),
250 		.mode		= 0644,
251 		.proc_handler	= proc_doulongvec_minmax,
252 	},
253 };
254 
255 static void __init aio_sysctl_init(void)
256 {
257 	register_sysctl_init("fs", aio_sysctls);
258 }
259 #else
260 #define aio_sysctl_init() do { } while (0)
261 #endif
262 
263 static struct kmem_cache	*kiocb_cachep;
264 static struct kmem_cache	*kioctx_cachep;
265 static struct kmem_cache	*aio_inode_cachep;
266 
267 static struct vfsmount *aio_mnt;
268 
269 static const struct file_operations aio_ring_fops;
270 static const struct address_space_operations aio_ctx_aops;
271 
272 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
273 {
274 	struct file *file;
275 	struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
276 
277 	if (IS_ERR(inode))
278 		return ERR_CAST(inode);
279 
280 	inode->i_mapping->a_ops = &aio_ctx_aops;
281 	AIO_I(inode)->ctx = ctx;
282 	inode->i_size = PAGE_SIZE * nr_pages;
283 
284 	file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
285 				O_RDWR, &aio_ring_fops);
286 	if (IS_ERR(file))
287 		iput(inode);
288 	return file;
289 }
290 
291 static struct inode *aio_alloc_inode(struct super_block *sb)
292 {
293 	struct aio_inode_info *ai;
294 
295 	ai = alloc_inode_sb(sb, aio_inode_cachep, GFP_KERNEL);
296 	if (!ai)
297 		return NULL;
298 	ai->ctx = NULL;
299 
300 	return &ai->vfs_inode;
301 }
302 
303 static void aio_free_inode(struct inode *inode)
304 {
305 	kmem_cache_free(aio_inode_cachep, AIO_I(inode));
306 }
307 
308 static const struct super_operations aio_super_operations = {
309 	.alloc_inode	= aio_alloc_inode,
310 	.free_inode	= aio_free_inode,
311 	.statfs		= simple_statfs,
312 };
313 
314 static int aio_init_fs_context(struct fs_context *fc)
315 {
316 	struct pseudo_fs_context *pfc;
317 
318 	pfc = init_pseudo(fc, AIO_RING_MAGIC);
319 	if (!pfc)
320 		return -ENOMEM;
321 	fc->s_iflags |= SB_I_NOEXEC;
322 	pfc->ops = &aio_super_operations;
323 	return 0;
324 }
325 
326 static void init_once(void *obj)
327 {
328 	struct aio_inode_info *ai = obj;
329 
330 	inode_init_once(&ai->vfs_inode);
331 	spin_lock_init(&ai->migrate_lock);
332 }
333 
334 /* aio_setup
335  *	Creates the slab caches used by the aio routines, panic on
336  *	failure as this is done early during the boot sequence.
337  */
338 static int __init aio_setup(void)
339 {
340 	static struct file_system_type aio_fs = {
341 		.name		= "aio",
342 		.init_fs_context = aio_init_fs_context,
343 		.kill_sb	= kill_anon_super,
344 	};
345 
346 	aio_inode_cachep = kmem_cache_create("aio_inode_cache",
347 				sizeof(struct aio_inode_info), 0,
348 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT),
349 				init_once);
350 	aio_mnt = kern_mount(&aio_fs);
351 	if (IS_ERR(aio_mnt))
352 		panic("Failed to create aio fs mount.");
353 
354 	kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
355 	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
356 	aio_sysctl_init();
357 	return 0;
358 }
359 __initcall(aio_setup);
360 
361 static void put_aio_ring_file(struct kioctx *ctx)
362 {
363 	struct file *aio_ring_file = ctx->aio_ring_file;
364 
365 	if (aio_ring_file) {
366 		struct inode *inode = file_inode(aio_ring_file);
367 
368 		truncate_setsize(inode, 0);
369 
370 		/* Prevent further access to the kioctx from migratepages */
371 		spin_lock(&AIO_I(inode)->migrate_lock);
372 		AIO_I(inode)->ctx = NULL;
373 		ctx->aio_ring_file = NULL;
374 		spin_unlock(&AIO_I(inode)->migrate_lock);
375 
376 		fput(aio_ring_file);
377 	}
378 }
379 
380 static void aio_free_ring(struct kioctx *ctx)
381 {
382 	int i;
383 
384 	/* Disconnect the kiotx from the ring file.  This prevents future
385 	 * accesses to the kioctx from page migration.
386 	 */
387 	put_aio_ring_file(ctx);
388 
389 	for (i = 0; i < ctx->nr_pages; i++) {
390 		struct folio *folio = ctx->ring_folios[i];
391 
392 		if (!folio)
393 			continue;
394 
395 		pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
396 			 folio_ref_count(folio));
397 		ctx->ring_folios[i] = NULL;
398 		folio_put(folio);
399 	}
400 
401 	if (ctx->ring_folios && ctx->ring_folios != ctx->internal_folios) {
402 		kfree(ctx->ring_folios);
403 		ctx->ring_folios = NULL;
404 	}
405 }
406 
407 static int aio_ring_mremap(struct vm_area_struct *vma)
408 {
409 	struct file *file = vma->vm_file;
410 	struct mm_struct *mm = vma->vm_mm;
411 	struct kioctx_table *table;
412 	int i, res = -EINVAL;
413 
414 	spin_lock(&mm->ioctx_lock);
415 	rcu_read_lock();
416 	table = rcu_dereference(mm->ioctx_table);
417 	if (!table)
418 		goto out_unlock;
419 
420 	for (i = 0; i < table->nr; i++) {
421 		struct kioctx *ctx;
422 
423 		ctx = rcu_dereference(table->table[i]);
424 		if (ctx && ctx->aio_ring_file == file) {
425 			if (!atomic_read(&ctx->dead) &&
426 			    (ctx->mmap_size == (vma->vm_end - vma->vm_start))) {
427 				ctx->user_id = ctx->mmap_base = vma->vm_start;
428 				res = 0;
429 			}
430 			break;
431 		}
432 	}
433 
434 out_unlock:
435 	rcu_read_unlock();
436 	spin_unlock(&mm->ioctx_lock);
437 	return res;
438 }
439 
440 static const struct vm_operations_struct aio_ring_vm_ops = {
441 	.mremap		= aio_ring_mremap,
442 #if IS_ENABLED(CONFIG_MMU)
443 	.fault		= filemap_fault,
444 	.map_pages	= filemap_map_pages,
445 	.page_mkwrite	= filemap_page_mkwrite,
446 #endif
447 };
448 
449 static int aio_ring_mmap_prepare(struct vm_area_desc *desc)
450 {
451 	vma_desc_set_flags(desc, VMA_DONTEXPAND_BIT, VMA_DONTCOPY_BIT);
452 	desc->vm_ops = &aio_ring_vm_ops;
453 	return 0;
454 }
455 
456 static const struct file_operations aio_ring_fops = {
457 	.mmap_prepare = aio_ring_mmap_prepare,
458 };
459 
460 #if IS_ENABLED(CONFIG_MIGRATION)
461 static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
462 			struct folio *src, enum migrate_mode mode)
463 {
464 	struct kioctx *ctx;
465 	struct aio_inode_info *ai = AIO_I(mapping->host);
466 	unsigned long flags;
467 	pgoff_t idx;
468 	int rc = 0;
469 
470 	/* ai->migrate_lock here protects against the kioctx teardown.  */
471 	spin_lock(&ai->migrate_lock);
472 	ctx = ai->ctx;
473 	if (!ctx) {
474 		rc = -EINVAL;
475 		goto out;
476 	}
477 
478 	/* The ring_lock mutex.  The prevents aio_read_events() from writing
479 	 * to the ring's head, and prevents page migration from mucking in
480 	 * a partially initialized kiotx.
481 	 */
482 	if (!mutex_trylock(&ctx->ring_lock)) {
483 		rc = -EAGAIN;
484 		goto out;
485 	}
486 
487 	idx = src->index;
488 	if (idx < (pgoff_t)ctx->nr_pages) {
489 		/* Make sure the old folio hasn't already been changed */
490 		if (ctx->ring_folios[idx] != src)
491 			rc = -EAGAIN;
492 	} else
493 		rc = -EINVAL;
494 
495 	if (rc != 0)
496 		goto out_unlock;
497 
498 	/* Writeback must be complete */
499 	BUG_ON(folio_test_writeback(src));
500 	folio_get(dst);
501 
502 	rc = folio_migrate_mapping(mapping, dst, src, 1);
503 	if (rc) {
504 		folio_put(dst);
505 		goto out_unlock;
506 	}
507 
508 	/* Take completion_lock to prevent other writes to the ring buffer
509 	 * while the old folio is copied to the new.  This prevents new
510 	 * events from being lost.
511 	 */
512 	spin_lock_irqsave(&ctx->completion_lock, flags);
513 	folio_copy(dst, src);
514 	folio_migrate_flags(dst, src);
515 	BUG_ON(ctx->ring_folios[idx] != src);
516 	ctx->ring_folios[idx] = dst;
517 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
518 
519 	/* The old folio is no longer accessible. */
520 	folio_put(src);
521 
522 out_unlock:
523 	mutex_unlock(&ctx->ring_lock);
524 out:
525 	spin_unlock(&ai->migrate_lock);
526 	return rc;
527 }
528 #else
529 #define aio_migrate_folio NULL
530 #endif
531 
532 static const struct address_space_operations aio_ctx_aops = {
533 	.dirty_folio	= noop_dirty_folio,
534 	.migrate_folio	= aio_migrate_folio,
535 };
536 
537 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
538 {
539 	struct aio_ring *ring;
540 	struct mm_struct *mm = current->mm;
541 	unsigned long size, unused;
542 	int nr_pages;
543 	int i;
544 	struct file *file;
545 
546 	/* Compensate for the ring buffer's head/tail overlap entry */
547 	nr_events += 2;	/* 1 is required, 2 for good luck */
548 
549 	size = sizeof(struct aio_ring);
550 	size += sizeof(struct io_event) * nr_events;
551 
552 	nr_pages = PFN_UP(size);
553 	if (nr_pages < 0)
554 		return -EINVAL;
555 
556 	file = aio_private_file(ctx, nr_pages);
557 	if (IS_ERR(file)) {
558 		ctx->aio_ring_file = NULL;
559 		return -ENOMEM;
560 	}
561 
562 	ctx->aio_ring_file = file;
563 	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
564 			/ sizeof(struct io_event);
565 
566 	ctx->ring_folios = ctx->internal_folios;
567 	if (nr_pages > AIO_RING_PAGES) {
568 		ctx->ring_folios = kzalloc_objs(struct folio *, nr_pages);
569 		if (!ctx->ring_folios) {
570 			put_aio_ring_file(ctx);
571 			return -ENOMEM;
572 		}
573 	}
574 
575 	for (i = 0; i < nr_pages; i++) {
576 		struct folio *folio;
577 
578 		folio = __filemap_get_folio(file->f_mapping, i,
579 					    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
580 					    GFP_USER | __GFP_ZERO);
581 		if (IS_ERR(folio))
582 			break;
583 
584 		pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
585 			 folio_ref_count(folio));
586 		folio_end_read(folio, true);
587 
588 		ctx->ring_folios[i] = folio;
589 	}
590 	ctx->nr_pages = i;
591 
592 	if (unlikely(i != nr_pages)) {
593 		aio_free_ring(ctx);
594 		return -ENOMEM;
595 	}
596 
597 	ctx->mmap_size = nr_pages * PAGE_SIZE;
598 	pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
599 
600 	if (mmap_write_lock_killable(mm)) {
601 		ctx->mmap_size = 0;
602 		aio_free_ring(ctx);
603 		return -EINTR;
604 	}
605 
606 	ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
607 				 PROT_READ | PROT_WRITE,
608 				 MAP_SHARED, 0, 0, &unused, NULL);
609 	mmap_write_unlock(mm);
610 	if (IS_ERR((void *)ctx->mmap_base)) {
611 		ctx->mmap_size = 0;
612 		aio_free_ring(ctx);
613 		return -ENOMEM;
614 	}
615 
616 	pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
617 
618 	ctx->user_id = ctx->mmap_base;
619 	ctx->nr_events = nr_events; /* trusted copy */
620 
621 	ring = folio_address(ctx->ring_folios[0]);
622 	ring->nr = nr_events;	/* user copy */
623 	ring->id = ~0U;
624 	ring->head = ring->tail = 0;
625 	ring->magic = AIO_RING_MAGIC;
626 	ring->compat_features = AIO_RING_COMPAT_FEATURES;
627 	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
628 	ring->header_length = sizeof(struct aio_ring);
629 	flush_dcache_folio(ctx->ring_folios[0]);
630 
631 	return 0;
632 }
633 
634 #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
635 #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
636 #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
637 
638 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
639 {
640 	struct aio_kiocb *req;
641 	struct kioctx *ctx;
642 	unsigned long flags;
643 
644 	/*
645 	 * kiocb didn't come from aio or is neither a read nor a write, hence
646 	 * ignore it.
647 	 */
648 	if (!(iocb->ki_flags & IOCB_AIO_RW))
649 		return;
650 
651 	req = container_of(iocb, struct aio_kiocb, rw);
652 
653 	if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
654 		return;
655 
656 	ctx = req->ki_ctx;
657 
658 	spin_lock_irqsave(&ctx->ctx_lock, flags);
659 	list_add_tail(&req->ki_list, &ctx->active_reqs);
660 	req->ki_cancel = cancel;
661 	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
662 }
663 EXPORT_SYMBOL(kiocb_set_cancel_fn);
664 
665 /*
666  * free_ioctx() should be RCU delayed to synchronize against the RCU
667  * protected lookup_ioctx() and also needs process context to call
668  * aio_free_ring().  Use rcu_work.
669  */
670 static void free_ioctx(struct work_struct *work)
671 {
672 	struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
673 					  free_rwork);
674 	pr_debug("freeing %p\n", ctx);
675 
676 	aio_free_ring(ctx);
677 	free_percpu(ctx->cpu);
678 	percpu_ref_exit(&ctx->reqs);
679 	percpu_ref_exit(&ctx->users);
680 	kmem_cache_free(kioctx_cachep, ctx);
681 }
682 
683 static void free_ioctx_reqs(struct percpu_ref *ref)
684 {
685 	struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
686 
687 	/* At this point we know that there are no any in-flight requests */
688 	if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
689 		complete(&ctx->rq_wait->comp);
690 
691 	/* Synchronize against RCU protected table->table[] dereferences */
692 	INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
693 	queue_rcu_work(system_percpu_wq, &ctx->free_rwork);
694 }
695 
696 /*
697  * When this function runs, the kioctx has been removed from the "hash table"
698  * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
699  * now it's safe to cancel any that need to be.
700  */
701 static void free_ioctx_users(struct percpu_ref *ref)
702 {
703 	struct kioctx *ctx = container_of(ref, struct kioctx, users);
704 	struct aio_kiocb *req;
705 
706 	spin_lock_irq(&ctx->ctx_lock);
707 
708 	while (!list_empty(&ctx->active_reqs)) {
709 		req = list_first_entry(&ctx->active_reqs,
710 				       struct aio_kiocb, ki_list);
711 		req->ki_cancel(&req->rw);
712 		list_del_init(&req->ki_list);
713 	}
714 
715 	spin_unlock_irq(&ctx->ctx_lock);
716 
717 	percpu_ref_kill(&ctx->reqs);
718 	percpu_ref_put(&ctx->reqs);
719 }
720 
721 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
722 {
723 	unsigned i, new_nr;
724 	struct kioctx_table *table, *old;
725 	struct aio_ring *ring;
726 
727 	spin_lock(&mm->ioctx_lock);
728 	table = rcu_dereference_raw(mm->ioctx_table);
729 
730 	while (1) {
731 		if (table)
732 			for (i = 0; i < table->nr; i++)
733 				if (!rcu_access_pointer(table->table[i])) {
734 					ctx->id = i;
735 					rcu_assign_pointer(table->table[i], ctx);
736 					spin_unlock(&mm->ioctx_lock);
737 
738 					/* While kioctx setup is in progress,
739 					 * we are protected from page migration
740 					 * changes ring_folios by ->ring_lock.
741 					 */
742 					ring = folio_address(ctx->ring_folios[0]);
743 					ring->id = ctx->id;
744 					return 0;
745 				}
746 
747 		new_nr = (table ? table->nr : 1) * 4;
748 		spin_unlock(&mm->ioctx_lock);
749 
750 		table = kzalloc_flex(*table, table, new_nr);
751 		if (!table)
752 			return -ENOMEM;
753 
754 		table->nr = new_nr;
755 
756 		spin_lock(&mm->ioctx_lock);
757 		old = rcu_dereference_raw(mm->ioctx_table);
758 
759 		if (!old) {
760 			rcu_assign_pointer(mm->ioctx_table, table);
761 		} else if (table->nr > old->nr) {
762 			memcpy(table->table, old->table,
763 			       old->nr * sizeof(struct kioctx *));
764 
765 			rcu_assign_pointer(mm->ioctx_table, table);
766 			kfree_rcu(old, rcu);
767 		} else {
768 			kfree(table);
769 			table = old;
770 		}
771 	}
772 }
773 
774 static void aio_nr_sub(unsigned nr)
775 {
776 	spin_lock(&aio_nr_lock);
777 	if (WARN_ON(aio_nr - nr > aio_nr))
778 		aio_nr = 0;
779 	else
780 		aio_nr -= nr;
781 	spin_unlock(&aio_nr_lock);
782 }
783 
784 /* ioctx_alloc
785  *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
786  */
787 static struct kioctx *ioctx_alloc(unsigned nr_events)
788 {
789 	struct mm_struct *mm = current->mm;
790 	struct kioctx *ctx;
791 	int err = -ENOMEM;
792 
793 	/*
794 	 * Store the original nr_events -- what userspace passed to io_setup(),
795 	 * for counting against the global limit -- before it changes.
796 	 */
797 	unsigned int max_reqs = nr_events;
798 
799 	/*
800 	 * We keep track of the number of available ringbuffer slots, to prevent
801 	 * overflow (reqs_available), and we also use percpu counters for this.
802 	 *
803 	 * So since up to half the slots might be on other cpu's percpu counters
804 	 * and unavailable, double nr_events so userspace sees what they
805 	 * expected: additionally, we move req_batch slots to/from percpu
806 	 * counters at a time, so make sure that isn't 0:
807 	 */
808 	nr_events = max(nr_events, num_possible_cpus() * 4);
809 	nr_events *= 2;
810 
811 	/* Prevent overflows */
812 	if (nr_events > (0x10000000U / sizeof(struct io_event))) {
813 		pr_debug("ENOMEM: nr_events too high\n");
814 		return ERR_PTR(-EINVAL);
815 	}
816 
817 	if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
818 		return ERR_PTR(-EAGAIN);
819 
820 	ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
821 	if (!ctx)
822 		return ERR_PTR(-ENOMEM);
823 
824 	ctx->max_reqs = max_reqs;
825 
826 	spin_lock_init(&ctx->ctx_lock);
827 	spin_lock_init(&ctx->completion_lock);
828 	mutex_init(&ctx->ring_lock);
829 	/* Protect against page migration throughout kiotx setup by keeping
830 	 * the ring_lock mutex held until setup is complete. */
831 	mutex_lock(&ctx->ring_lock);
832 	init_waitqueue_head(&ctx->wait);
833 
834 	INIT_LIST_HEAD(&ctx->active_reqs);
835 
836 	if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
837 		goto err;
838 
839 	if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
840 		goto err;
841 
842 	ctx->cpu = alloc_percpu(struct kioctx_cpu);
843 	if (!ctx->cpu)
844 		goto err;
845 
846 	err = aio_setup_ring(ctx, nr_events);
847 	if (err < 0)
848 		goto err;
849 
850 	atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
851 	ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
852 	if (ctx->req_batch < 1)
853 		ctx->req_batch = 1;
854 
855 	/* limit the number of system wide aios */
856 	spin_lock(&aio_nr_lock);
857 	if (aio_nr + ctx->max_reqs > aio_max_nr ||
858 	    aio_nr + ctx->max_reqs < aio_nr) {
859 		spin_unlock(&aio_nr_lock);
860 		err = -EAGAIN;
861 		goto err_ctx;
862 	}
863 	aio_nr += ctx->max_reqs;
864 	spin_unlock(&aio_nr_lock);
865 
866 	percpu_ref_get(&ctx->users);	/* io_setup() will drop this ref */
867 	percpu_ref_get(&ctx->reqs);	/* free_ioctx_users() will drop this */
868 
869 	err = ioctx_add_table(ctx, mm);
870 	if (err)
871 		goto err_cleanup;
872 
873 	/* Release the ring_lock mutex now that all setup is complete. */
874 	mutex_unlock(&ctx->ring_lock);
875 
876 	pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
877 		 ctx, ctx->user_id, mm, ctx->nr_events);
878 	return ctx;
879 
880 err_cleanup:
881 	aio_nr_sub(ctx->max_reqs);
882 err_ctx:
883 	atomic_set(&ctx->dead, 1);
884 	if (ctx->mmap_size)
885 		vm_munmap(ctx->mmap_base, ctx->mmap_size);
886 	aio_free_ring(ctx);
887 err:
888 	mutex_unlock(&ctx->ring_lock);
889 	free_percpu(ctx->cpu);
890 	percpu_ref_exit(&ctx->reqs);
891 	percpu_ref_exit(&ctx->users);
892 	kmem_cache_free(kioctx_cachep, ctx);
893 	pr_debug("error allocating ioctx %d\n", err);
894 	return ERR_PTR(err);
895 }
896 
897 /* kill_ioctx
898  *	Cancels all outstanding aio requests on an aio context.  Used
899  *	when the processes owning a context have all exited to encourage
900  *	the rapid destruction of the kioctx.
901  */
902 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
903 		      struct ctx_rq_wait *wait)
904 {
905 	struct kioctx_table *table;
906 
907 	spin_lock(&mm->ioctx_lock);
908 	if (atomic_xchg(&ctx->dead, 1)) {
909 		spin_unlock(&mm->ioctx_lock);
910 		return -EINVAL;
911 	}
912 
913 	table = rcu_dereference_raw(mm->ioctx_table);
914 	WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
915 	RCU_INIT_POINTER(table->table[ctx->id], NULL);
916 	spin_unlock(&mm->ioctx_lock);
917 
918 	/* free_ioctx_reqs() will do the necessary RCU synchronization */
919 	wake_up_all(&ctx->wait);
920 
921 	/*
922 	 * It'd be more correct to do this in free_ioctx(), after all
923 	 * the outstanding kiocbs have finished - but by then io_destroy
924 	 * has already returned, so io_setup() could potentially return
925 	 * -EAGAIN with no ioctxs actually in use (as far as userspace
926 	 *  could tell).
927 	 */
928 	aio_nr_sub(ctx->max_reqs);
929 
930 	if (ctx->mmap_size)
931 		vm_munmap(ctx->mmap_base, ctx->mmap_size);
932 
933 	ctx->rq_wait = wait;
934 	percpu_ref_kill(&ctx->users);
935 	return 0;
936 }
937 
938 /*
939  * exit_aio: called when the last user of mm goes away.  At this point, there is
940  * no way for any new requests to be submited or any of the io_* syscalls to be
941  * called on the context.
942  *
943  * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
944  * them.
945  */
946 void exit_aio(struct mm_struct *mm)
947 {
948 	struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
949 	struct ctx_rq_wait wait;
950 	int i, skipped;
951 
952 	if (!table)
953 		return;
954 
955 	atomic_set(&wait.count, table->nr);
956 	init_completion(&wait.comp);
957 
958 	skipped = 0;
959 	for (i = 0; i < table->nr; ++i) {
960 		struct kioctx *ctx =
961 			rcu_dereference_protected(table->table[i], true);
962 
963 		if (!ctx) {
964 			skipped++;
965 			continue;
966 		}
967 
968 		/*
969 		 * We don't need to bother with munmap() here - exit_mmap(mm)
970 		 * is coming and it'll unmap everything. And we simply can't,
971 		 * this is not necessarily our ->mm.
972 		 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
973 		 * that it needs to unmap the area, just set it to 0.
974 		 */
975 		ctx->mmap_size = 0;
976 		kill_ioctx(mm, ctx, &wait);
977 	}
978 
979 	if (!atomic_sub_and_test(skipped, &wait.count)) {
980 		/* Wait until all IO for the context are done. */
981 		wait_for_completion(&wait.comp);
982 	}
983 
984 	RCU_INIT_POINTER(mm->ioctx_table, NULL);
985 	kfree(table);
986 }
987 
988 static void put_reqs_available(struct kioctx *ctx, unsigned nr)
989 {
990 	struct kioctx_cpu *kcpu;
991 	unsigned long flags;
992 
993 	local_irq_save(flags);
994 	kcpu = this_cpu_ptr(ctx->cpu);
995 	kcpu->reqs_available += nr;
996 
997 	while (kcpu->reqs_available >= ctx->req_batch * 2) {
998 		kcpu->reqs_available -= ctx->req_batch;
999 		atomic_add(ctx->req_batch, &ctx->reqs_available);
1000 	}
1001 
1002 	local_irq_restore(flags);
1003 }
1004 
1005 static bool __get_reqs_available(struct kioctx *ctx)
1006 {
1007 	struct kioctx_cpu *kcpu;
1008 	bool ret = false;
1009 	unsigned long flags;
1010 
1011 	local_irq_save(flags);
1012 	kcpu = this_cpu_ptr(ctx->cpu);
1013 	if (!kcpu->reqs_available) {
1014 		int avail = atomic_read(&ctx->reqs_available);
1015 
1016 		do {
1017 			if (avail < ctx->req_batch)
1018 				goto out;
1019 		} while (!atomic_try_cmpxchg(&ctx->reqs_available,
1020 					     &avail, avail - ctx->req_batch));
1021 
1022 		kcpu->reqs_available += ctx->req_batch;
1023 	}
1024 
1025 	ret = true;
1026 	kcpu->reqs_available--;
1027 out:
1028 	local_irq_restore(flags);
1029 	return ret;
1030 }
1031 
1032 /* refill_reqs_available
1033  *	Updates the reqs_available reference counts used for tracking the
1034  *	number of free slots in the completion ring.  This can be called
1035  *	from aio_complete() (to optimistically update reqs_available) or
1036  *	from aio_get_req() (the we're out of events case).  It must be
1037  *	called holding ctx->completion_lock.
1038  */
1039 static void refill_reqs_available(struct kioctx *ctx, unsigned head,
1040                                   unsigned tail)
1041 {
1042 	unsigned events_in_ring, completed;
1043 
1044 	/* Clamp head since userland can write to it. */
1045 	head %= ctx->nr_events;
1046 	if (head <= tail)
1047 		events_in_ring = tail - head;
1048 	else
1049 		events_in_ring = ctx->nr_events - (head - tail);
1050 
1051 	completed = ctx->completed_events;
1052 	if (events_in_ring < completed)
1053 		completed -= events_in_ring;
1054 	else
1055 		completed = 0;
1056 
1057 	if (!completed)
1058 		return;
1059 
1060 	ctx->completed_events -= completed;
1061 	put_reqs_available(ctx, completed);
1062 }
1063 
1064 /* user_refill_reqs_available
1065  *	Called to refill reqs_available when aio_get_req() encounters an
1066  *	out of space in the completion ring.
1067  */
1068 static void user_refill_reqs_available(struct kioctx *ctx)
1069 {
1070 	spin_lock_irq(&ctx->completion_lock);
1071 	if (ctx->completed_events) {
1072 		struct aio_ring *ring;
1073 		unsigned head;
1074 
1075 		/* Access of ring->head may race with aio_read_events_ring()
1076 		 * here, but that's okay since whether we read the old version
1077 		 * or the new version, and either will be valid.  The important
1078 		 * part is that head cannot pass tail since we prevent
1079 		 * aio_complete() from updating tail by holding
1080 		 * ctx->completion_lock.  Even if head is invalid, the check
1081 		 * against ctx->completed_events below will make sure we do the
1082 		 * safe/right thing.
1083 		 */
1084 		ring = folio_address(ctx->ring_folios[0]);
1085 		head = ring->head;
1086 
1087 		refill_reqs_available(ctx, head, ctx->tail);
1088 	}
1089 
1090 	spin_unlock_irq(&ctx->completion_lock);
1091 }
1092 
1093 static bool get_reqs_available(struct kioctx *ctx)
1094 {
1095 	if (__get_reqs_available(ctx))
1096 		return true;
1097 	user_refill_reqs_available(ctx);
1098 	return __get_reqs_available(ctx);
1099 }
1100 
1101 /* aio_get_req
1102  *	Allocate a slot for an aio request.
1103  * Returns NULL if no requests are free.
1104  *
1105  * The refcount is initialized to 2 - one for the async op completion,
1106  * one for the synchronous code that does this.
1107  */
1108 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1109 {
1110 	struct aio_kiocb *req;
1111 
1112 	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
1113 	if (unlikely(!req))
1114 		return NULL;
1115 
1116 	if (unlikely(!get_reqs_available(ctx))) {
1117 		kmem_cache_free(kiocb_cachep, req);
1118 		return NULL;
1119 	}
1120 
1121 	percpu_ref_get(&ctx->reqs);
1122 	req->ki_ctx = ctx;
1123 	INIT_LIST_HEAD(&req->ki_list);
1124 	refcount_set(&req->ki_refcnt, 2);
1125 	req->ki_eventfd = NULL;
1126 	return req;
1127 }
1128 
1129 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1130 {
1131 	struct aio_ring __user *ring  = (void __user *)ctx_id;
1132 	struct mm_struct *mm = current->mm;
1133 	struct kioctx *ctx, *ret = NULL;
1134 	struct kioctx_table *table;
1135 	unsigned id;
1136 
1137 	if (get_user(id, &ring->id))
1138 		return NULL;
1139 
1140 	rcu_read_lock();
1141 	table = rcu_dereference(mm->ioctx_table);
1142 
1143 	if (!table || id >= table->nr)
1144 		goto out;
1145 
1146 	id = array_index_nospec(id, table->nr);
1147 	ctx = rcu_dereference(table->table[id]);
1148 	if (ctx && ctx->user_id == ctx_id) {
1149 		if (percpu_ref_tryget_live(&ctx->users))
1150 			ret = ctx;
1151 	}
1152 out:
1153 	rcu_read_unlock();
1154 	return ret;
1155 }
1156 
1157 static inline void iocb_destroy(struct aio_kiocb *iocb)
1158 {
1159 	if (iocb->ki_eventfd)
1160 		eventfd_ctx_put(iocb->ki_eventfd);
1161 	if (iocb->ki_filp)
1162 		fput(iocb->ki_filp);
1163 	percpu_ref_put(&iocb->ki_ctx->reqs);
1164 	kmem_cache_free(kiocb_cachep, iocb);
1165 }
1166 
1167 struct aio_waiter {
1168 	struct wait_queue_entry	w;
1169 	size_t			min_nr;
1170 };
1171 
1172 /* aio_complete
1173  *	Called when the io request on the given iocb is complete.
1174  */
1175 static void aio_complete(struct aio_kiocb *iocb)
1176 {
1177 	struct kioctx	*ctx = iocb->ki_ctx;
1178 	struct aio_ring	*ring;
1179 	struct io_event	*ev_page, *event;
1180 	unsigned tail, pos, head, avail;
1181 	unsigned long	flags;
1182 
1183 	/*
1184 	 * Add a completion event to the ring buffer. Must be done holding
1185 	 * ctx->completion_lock to prevent other code from messing with the tail
1186 	 * pointer since we might be called from irq context.
1187 	 */
1188 	spin_lock_irqsave(&ctx->completion_lock, flags);
1189 
1190 	tail = ctx->tail;
1191 	pos = tail + AIO_EVENTS_OFFSET;
1192 
1193 	if (++tail >= ctx->nr_events)
1194 		tail = 0;
1195 
1196 	ev_page = folio_address(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
1197 	event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1198 
1199 	*event = iocb->ki_res;
1200 
1201 	flush_dcache_folio(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
1202 
1203 	pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1204 		 (void __user *)(unsigned long)iocb->ki_res.obj,
1205 		 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1206 
1207 	/* after flagging the request as done, we
1208 	 * must never even look at it again
1209 	 */
1210 	smp_wmb();	/* make event visible before updating tail */
1211 
1212 	ctx->tail = tail;
1213 
1214 	ring = folio_address(ctx->ring_folios[0]);
1215 	head = ring->head;
1216 	ring->tail = tail;
1217 	flush_dcache_folio(ctx->ring_folios[0]);
1218 
1219 	ctx->completed_events++;
1220 	if (ctx->completed_events > 1)
1221 		refill_reqs_available(ctx, head, tail);
1222 
1223 	avail = tail > head
1224 		? tail - head
1225 		: tail + ctx->nr_events - head;
1226 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1227 
1228 	pr_debug("added to ring %p at [%u]\n", iocb, tail);
1229 
1230 	/*
1231 	 * Check if the user asked us to deliver the result through an
1232 	 * eventfd. The eventfd_signal() function is safe to be called
1233 	 * from IRQ context.
1234 	 */
1235 	if (iocb->ki_eventfd)
1236 		eventfd_signal(iocb->ki_eventfd);
1237 
1238 	/*
1239 	 * We have to order our ring_info tail store above and test
1240 	 * of the wait list below outside the wait lock.  This is
1241 	 * like in wake_up_bit() where clearing a bit has to be
1242 	 * ordered with the unlocked test.
1243 	 */
1244 	smp_mb();
1245 
1246 	if (waitqueue_active(&ctx->wait)) {
1247 		struct aio_waiter *curr, *next;
1248 		unsigned long flags;
1249 
1250 		spin_lock_irqsave(&ctx->wait.lock, flags);
1251 		list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry)
1252 			if (avail >= curr->min_nr) {
1253 				wake_up_process(curr->w.private);
1254 				list_del_init_careful(&curr->w.entry);
1255 			}
1256 		spin_unlock_irqrestore(&ctx->wait.lock, flags);
1257 	}
1258 }
1259 
1260 static inline void iocb_put(struct aio_kiocb *iocb)
1261 {
1262 	if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1263 		aio_complete(iocb);
1264 		iocb_destroy(iocb);
1265 	}
1266 }
1267 
1268 /* aio_read_events_ring
1269  *	Pull an event off of the ioctx's event ring.  Returns the number of
1270  *	events fetched
1271  */
1272 static long aio_read_events_ring(struct kioctx *ctx,
1273 				 struct io_event __user *event, long nr)
1274 {
1275 	struct aio_ring *ring;
1276 	unsigned head, tail, pos;
1277 	long ret = 0;
1278 	int copy_ret;
1279 
1280 	/*
1281 	 * The mutex can block and wake us up and that will cause
1282 	 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1283 	 * and repeat. This should be rare enough that it doesn't cause
1284 	 * peformance issues. See the comment in read_events() for more detail.
1285 	 */
1286 	sched_annotate_sleep();
1287 	mutex_lock(&ctx->ring_lock);
1288 
1289 	/* Access to ->ring_folios here is protected by ctx->ring_lock. */
1290 	ring = folio_address(ctx->ring_folios[0]);
1291 	head = ring->head;
1292 	tail = ring->tail;
1293 
1294 	/*
1295 	 * Ensure that once we've read the current tail pointer, that
1296 	 * we also see the events that were stored up to the tail.
1297 	 */
1298 	smp_rmb();
1299 
1300 	pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1301 
1302 	if (head == tail)
1303 		goto out;
1304 
1305 	head %= ctx->nr_events;
1306 	tail %= ctx->nr_events;
1307 
1308 	while (ret < nr) {
1309 		long avail;
1310 		struct io_event *ev;
1311 		struct folio *folio;
1312 
1313 		avail = (head <= tail ?  tail : ctx->nr_events) - head;
1314 		if (head == tail)
1315 			break;
1316 
1317 		pos = head + AIO_EVENTS_OFFSET;
1318 		folio = ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE];
1319 		pos %= AIO_EVENTS_PER_PAGE;
1320 
1321 		avail = min(avail, nr - ret);
1322 		avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1323 
1324 		ev = folio_address(folio);
1325 		copy_ret = copy_to_user(event + ret, ev + pos,
1326 					sizeof(*ev) * avail);
1327 
1328 		if (unlikely(copy_ret)) {
1329 			ret = -EFAULT;
1330 			goto out;
1331 		}
1332 
1333 		ret += avail;
1334 		head += avail;
1335 		head %= ctx->nr_events;
1336 	}
1337 
1338 	ring = folio_address(ctx->ring_folios[0]);
1339 	ring->head = head;
1340 	flush_dcache_folio(ctx->ring_folios[0]);
1341 
1342 	pr_debug("%li  h%u t%u\n", ret, head, tail);
1343 out:
1344 	mutex_unlock(&ctx->ring_lock);
1345 
1346 	return ret;
1347 }
1348 
1349 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1350 			    struct io_event __user *event, long *i)
1351 {
1352 	long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1353 
1354 	if (ret > 0)
1355 		*i += ret;
1356 
1357 	if (unlikely(atomic_read(&ctx->dead)))
1358 		ret = -EINVAL;
1359 
1360 	if (!*i)
1361 		*i = ret;
1362 
1363 	return ret < 0 || *i >= min_nr;
1364 }
1365 
1366 static long read_events(struct kioctx *ctx, long min_nr, long nr,
1367 			struct io_event __user *event,
1368 			ktime_t until)
1369 {
1370 	struct hrtimer_sleeper	t;
1371 	struct aio_waiter	w;
1372 	long ret = 0, ret2 = 0;
1373 
1374 	/*
1375 	 * Note that aio_read_events() is being called as the conditional - i.e.
1376 	 * we're calling it after prepare_to_wait() has set task state to
1377 	 * TASK_INTERRUPTIBLE.
1378 	 *
1379 	 * But aio_read_events() can block, and if it blocks it's going to flip
1380 	 * the task state back to TASK_RUNNING.
1381 	 *
1382 	 * This should be ok, provided it doesn't flip the state back to
1383 	 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1384 	 * will only happen if the mutex_lock() call blocks, and we then find
1385 	 * the ringbuffer empty. So in practice we should be ok, but it's
1386 	 * something to be aware of when touching this code.
1387 	 */
1388 	aio_read_events(ctx, min_nr, nr, event, &ret);
1389 	if (until == 0 || ret < 0 || ret >= min_nr)
1390 		return ret;
1391 
1392 	hrtimer_setup_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1393 	if (until != KTIME_MAX) {
1394 		hrtimer_set_expires_range_ns(&t.timer, until, current->timer_slack_ns);
1395 		hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
1396 	}
1397 
1398 	init_wait(&w.w);
1399 
1400 	while (1) {
1401 		unsigned long nr_got = ret;
1402 
1403 		w.min_nr = min_nr - ret;
1404 
1405 		ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE);
1406 		if (!ret2 && !t.task)
1407 			ret2 = -ETIME;
1408 
1409 		if (aio_read_events(ctx, min_nr, nr, event, &ret) || ret2)
1410 			break;
1411 
1412 		if (nr_got == ret)
1413 			schedule();
1414 	}
1415 
1416 	finish_wait(&ctx->wait, &w.w);
1417 	hrtimer_cancel(&t.timer);
1418 	destroy_hrtimer_on_stack(&t.timer);
1419 
1420 	return ret;
1421 }
1422 
1423 /* sys_io_setup:
1424  *	Create an aio_context capable of receiving at least nr_events.
1425  *	ctxp must not point to an aio_context that already exists, and
1426  *	must be initialized to 0 prior to the call.  On successful
1427  *	creation of the aio_context, *ctxp is filled in with the resulting
1428  *	handle.  May fail with -EINVAL if *ctxp is not initialized,
1429  *	if the specified nr_events exceeds internal limits.  May fail
1430  *	with -EAGAIN if the specified nr_events exceeds the user's limit
1431  *	of available events.  May fail with -ENOMEM if insufficient kernel
1432  *	resources are available.  May fail with -EFAULT if an invalid
1433  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
1434  *	implemented.
1435  */
1436 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1437 {
1438 	struct kioctx *ioctx = NULL;
1439 	unsigned long ctx;
1440 	long ret;
1441 
1442 	ret = get_user(ctx, ctxp);
1443 	if (unlikely(ret))
1444 		goto out;
1445 
1446 	ret = -EINVAL;
1447 	if (unlikely(ctx || nr_events == 0)) {
1448 		pr_debug("EINVAL: ctx %lu nr_events %u\n",
1449 		         ctx, nr_events);
1450 		goto out;
1451 	}
1452 
1453 	ioctx = ioctx_alloc(nr_events);
1454 	ret = PTR_ERR(ioctx);
1455 	if (!IS_ERR(ioctx)) {
1456 		ret = put_user(ioctx->user_id, ctxp);
1457 		if (ret)
1458 			kill_ioctx(current->mm, ioctx, NULL);
1459 		percpu_ref_put(&ioctx->users);
1460 	}
1461 
1462 out:
1463 	return ret;
1464 }
1465 
1466 #ifdef CONFIG_COMPAT
1467 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1468 {
1469 	struct kioctx *ioctx = NULL;
1470 	unsigned long ctx;
1471 	long ret;
1472 
1473 	ret = get_user(ctx, ctx32p);
1474 	if (unlikely(ret))
1475 		goto out;
1476 
1477 	ret = -EINVAL;
1478 	if (unlikely(ctx || nr_events == 0)) {
1479 		pr_debug("EINVAL: ctx %lu nr_events %u\n",
1480 		         ctx, nr_events);
1481 		goto out;
1482 	}
1483 
1484 	ioctx = ioctx_alloc(nr_events);
1485 	ret = PTR_ERR(ioctx);
1486 	if (!IS_ERR(ioctx)) {
1487 		/* truncating is ok because it's a user address */
1488 		ret = put_user((u32)ioctx->user_id, ctx32p);
1489 		if (ret)
1490 			kill_ioctx(current->mm, ioctx, NULL);
1491 		percpu_ref_put(&ioctx->users);
1492 	}
1493 
1494 out:
1495 	return ret;
1496 }
1497 #endif
1498 
1499 /* sys_io_destroy:
1500  *	Destroy the aio_context specified.  May cancel any outstanding
1501  *	AIOs and block on completion.  Will fail with -ENOSYS if not
1502  *	implemented.  May fail with -EINVAL if the context pointed to
1503  *	is invalid.
1504  */
1505 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1506 {
1507 	struct kioctx *ioctx = lookup_ioctx(ctx);
1508 	if (likely(NULL != ioctx)) {
1509 		struct ctx_rq_wait wait;
1510 		int ret;
1511 
1512 		init_completion(&wait.comp);
1513 		atomic_set(&wait.count, 1);
1514 
1515 		/* Pass requests_done to kill_ioctx() where it can be set
1516 		 * in a thread-safe way. If we try to set it here then we have
1517 		 * a race condition if two io_destroy() called simultaneously.
1518 		 */
1519 		ret = kill_ioctx(current->mm, ioctx, &wait);
1520 		percpu_ref_put(&ioctx->users);
1521 
1522 		/* Wait until all IO for the context are done. Otherwise kernel
1523 		 * keep using user-space buffers even if user thinks the context
1524 		 * is destroyed.
1525 		 */
1526 		if (!ret)
1527 			wait_for_completion(&wait.comp);
1528 
1529 		return ret;
1530 	}
1531 	pr_debug("EINVAL: invalid context id\n");
1532 	return -EINVAL;
1533 }
1534 
1535 static void aio_remove_iocb(struct aio_kiocb *iocb)
1536 {
1537 	struct kioctx *ctx = iocb->ki_ctx;
1538 	unsigned long flags;
1539 
1540 	spin_lock_irqsave(&ctx->ctx_lock, flags);
1541 	list_del(&iocb->ki_list);
1542 	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1543 }
1544 
1545 static void aio_complete_rw(struct kiocb *kiocb, long res)
1546 {
1547 	struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1548 
1549 	if (!list_empty_careful(&iocb->ki_list))
1550 		aio_remove_iocb(iocb);
1551 
1552 	if (kiocb->ki_flags & IOCB_WRITE) {
1553 		struct inode *inode = file_inode(kiocb->ki_filp);
1554 
1555 		if (S_ISREG(inode->i_mode))
1556 			kiocb_end_write(kiocb);
1557 	}
1558 
1559 	iocb->ki_res.res = res;
1560 	iocb->ki_res.res2 = 0;
1561 	iocb_put(iocb);
1562 }
1563 
1564 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb, int rw_type)
1565 {
1566 	int ret;
1567 
1568 	req->ki_write_stream = 0;
1569 	req->ki_complete = aio_complete_rw;
1570 	req->private = NULL;
1571 	req->ki_pos = iocb->aio_offset;
1572 	req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
1573 	if (iocb->aio_flags & IOCB_FLAG_RESFD)
1574 		req->ki_flags |= IOCB_EVENTFD;
1575 	if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1576 		/*
1577 		 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1578 		 * aio_reqprio is interpreted as an I/O scheduling
1579 		 * class and priority.
1580 		 */
1581 		ret = ioprio_check_cap(iocb->aio_reqprio);
1582 		if (ret) {
1583 			pr_debug("aio ioprio check cap error: %d\n", ret);
1584 			return ret;
1585 		}
1586 
1587 		req->ki_ioprio = iocb->aio_reqprio;
1588 	} else
1589 		req->ki_ioprio = get_current_ioprio();
1590 
1591 	ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags, rw_type);
1592 	if (unlikely(ret))
1593 		return ret;
1594 
1595 	req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1596 	return 0;
1597 }
1598 
1599 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1600 		struct iovec **iovec, bool vectored, bool compat,
1601 		struct iov_iter *iter)
1602 {
1603 	void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1604 	size_t len = iocb->aio_nbytes;
1605 
1606 	if (!vectored) {
1607 		ssize_t ret = import_ubuf(rw, buf, len, iter);
1608 		*iovec = NULL;
1609 		return ret;
1610 	}
1611 
1612 	return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
1613 }
1614 
1615 static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1616 {
1617 	switch (ret) {
1618 	case -EIOCBQUEUED:
1619 		break;
1620 	case -ERESTARTSYS:
1621 	case -ERESTARTNOINTR:
1622 	case -ERESTARTNOHAND:
1623 	case -ERESTART_RESTARTBLOCK:
1624 		/*
1625 		 * There's no easy way to restart the syscall since other AIO's
1626 		 * may be already running. Just fail this IO with EINTR.
1627 		 */
1628 		ret = -EINTR;
1629 		fallthrough;
1630 	default:
1631 		req->ki_complete(req, ret);
1632 	}
1633 }
1634 
1635 static int aio_read(struct kiocb *req, const struct iocb *iocb,
1636 			bool vectored, bool compat)
1637 {
1638 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1639 	struct iov_iter iter;
1640 	struct file *file;
1641 	int ret;
1642 
1643 	ret = aio_prep_rw(req, iocb, READ);
1644 	if (ret)
1645 		return ret;
1646 	file = req->ki_filp;
1647 	if (unlikely(!(file->f_mode & FMODE_READ)))
1648 		return -EBADF;
1649 	if (unlikely(!file->f_op->read_iter))
1650 		return -EINVAL;
1651 
1652 	ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
1653 	if (ret < 0)
1654 		return ret;
1655 	ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1656 	if (!ret)
1657 		aio_rw_done(req, file->f_op->read_iter(req, &iter));
1658 	kfree(iovec);
1659 	return ret;
1660 }
1661 
1662 static int aio_write(struct kiocb *req, const struct iocb *iocb,
1663 			 bool vectored, bool compat)
1664 {
1665 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1666 	struct iov_iter iter;
1667 	struct file *file;
1668 	int ret;
1669 
1670 	ret = aio_prep_rw(req, iocb, WRITE);
1671 	if (ret)
1672 		return ret;
1673 	file = req->ki_filp;
1674 
1675 	if (unlikely(!(file->f_mode & FMODE_WRITE)))
1676 		return -EBADF;
1677 	if (unlikely(!file->f_op->write_iter))
1678 		return -EINVAL;
1679 
1680 	ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
1681 	if (ret < 0)
1682 		return ret;
1683 	ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1684 	if (!ret) {
1685 		if (S_ISREG(file_inode(file)->i_mode))
1686 			kiocb_start_write(req);
1687 		req->ki_flags |= IOCB_WRITE;
1688 		aio_rw_done(req, file->f_op->write_iter(req, &iter));
1689 	}
1690 	kfree(iovec);
1691 	return ret;
1692 }
1693 
1694 static void aio_fsync_work(struct work_struct *work)
1695 {
1696 	struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1697 
1698 	scoped_with_creds(iocb->fsync.creds)
1699 		iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1700 
1701 	put_cred(iocb->fsync.creds);
1702 	iocb_put(iocb);
1703 }
1704 
1705 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1706 		     bool datasync)
1707 {
1708 	if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1709 			iocb->aio_rw_flags))
1710 		return -EINVAL;
1711 
1712 	if (unlikely(!req->file->f_op->fsync))
1713 		return -EINVAL;
1714 
1715 	req->creds = prepare_creds();
1716 	if (!req->creds)
1717 		return -ENOMEM;
1718 
1719 	req->datasync = datasync;
1720 	INIT_WORK(&req->work, aio_fsync_work);
1721 	schedule_work(&req->work);
1722 	return 0;
1723 }
1724 
1725 static void aio_poll_put_work(struct work_struct *work)
1726 {
1727 	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1728 	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1729 
1730 	iocb_put(iocb);
1731 }
1732 
1733 /*
1734  * Safely lock the waitqueue which the request is on, synchronizing with the
1735  * case where the ->poll() provider decides to free its waitqueue early.
1736  *
1737  * Returns true on success, meaning that req->head->lock was locked, req->wait
1738  * is on req->head, and an RCU read lock was taken.  Returns false if the
1739  * request was already removed from its waitqueue (which might no longer exist).
1740  */
1741 static bool poll_iocb_lock_wq(struct poll_iocb *req)
1742 {
1743 	wait_queue_head_t *head;
1744 
1745 	/*
1746 	 * While we hold the waitqueue lock and the waitqueue is nonempty,
1747 	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
1748 	 * lock in the first place can race with the waitqueue being freed.
1749 	 *
1750 	 * We solve this as eventpoll does: by taking advantage of the fact that
1751 	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
1752 	 * we enter rcu_read_lock() and see that the pointer to the queue is
1753 	 * non-NULL, we can then lock it without the memory being freed out from
1754 	 * under us, then check whether the request is still on the queue.
1755 	 *
1756 	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
1757 	 * case the caller deletes the entry from the queue, leaving it empty.
1758 	 * In that case, only RCU prevents the queue memory from being freed.
1759 	 */
1760 	rcu_read_lock();
1761 	head = smp_load_acquire(&req->head);
1762 	if (head) {
1763 		spin_lock(&head->lock);
1764 		if (!list_empty(&req->wait.entry))
1765 			return true;
1766 		spin_unlock(&head->lock);
1767 	}
1768 	rcu_read_unlock();
1769 	return false;
1770 }
1771 
1772 static void poll_iocb_unlock_wq(struct poll_iocb *req)
1773 {
1774 	spin_unlock(&req->head->lock);
1775 	rcu_read_unlock();
1776 }
1777 
1778 static void aio_poll_complete_work(struct work_struct *work)
1779 {
1780 	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1781 	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1782 	struct poll_table_struct pt = { ._key = req->events };
1783 	struct kioctx *ctx = iocb->ki_ctx;
1784 	__poll_t mask = 0;
1785 
1786 	if (!READ_ONCE(req->cancelled))
1787 		mask = vfs_poll(req->file, &pt) & req->events;
1788 
1789 	/*
1790 	 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1791 	 * calling ->ki_cancel.  We need the ctx_lock roundtrip here to
1792 	 * synchronize with them.  In the cancellation case the list_del_init
1793 	 * itself is not actually needed, but harmless so we keep it in to
1794 	 * avoid further branches in the fast path.
1795 	 */
1796 	spin_lock_irq(&ctx->ctx_lock);
1797 	if (poll_iocb_lock_wq(req)) {
1798 		if (!mask && !READ_ONCE(req->cancelled)) {
1799 			/*
1800 			 * The request isn't actually ready to be completed yet.
1801 			 * Reschedule completion if another wakeup came in.
1802 			 */
1803 			if (req->work_need_resched) {
1804 				schedule_work(&req->work);
1805 				req->work_need_resched = false;
1806 			} else {
1807 				req->work_scheduled = false;
1808 			}
1809 			poll_iocb_unlock_wq(req);
1810 			spin_unlock_irq(&ctx->ctx_lock);
1811 			return;
1812 		}
1813 		list_del_init(&req->wait.entry);
1814 		poll_iocb_unlock_wq(req);
1815 	} /* else, POLLFREE has freed the waitqueue, so we must complete */
1816 	list_del_init(&iocb->ki_list);
1817 	iocb->ki_res.res = mangle_poll(mask);
1818 	spin_unlock_irq(&ctx->ctx_lock);
1819 
1820 	iocb_put(iocb);
1821 }
1822 
1823 /* assumes we are called with irqs disabled */
1824 static int aio_poll_cancel(struct kiocb *iocb)
1825 {
1826 	struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1827 	struct poll_iocb *req = &aiocb->poll;
1828 
1829 	if (poll_iocb_lock_wq(req)) {
1830 		WRITE_ONCE(req->cancelled, true);
1831 		if (!req->work_scheduled) {
1832 			schedule_work(&aiocb->poll.work);
1833 			req->work_scheduled = true;
1834 		}
1835 		poll_iocb_unlock_wq(req);
1836 	} /* else, the request was force-cancelled by POLLFREE already */
1837 
1838 	return 0;
1839 }
1840 
1841 static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1842 		void *key)
1843 {
1844 	struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
1845 	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1846 	__poll_t mask = key_to_poll(key);
1847 	unsigned long flags;
1848 
1849 	/* for instances that support it check for an event match first: */
1850 	if (mask && !(mask & req->events))
1851 		return 0;
1852 
1853 	/*
1854 	 * Complete the request inline if possible.  This requires that three
1855 	 * conditions be met:
1856 	 *   1. An event mask must have been passed.  If a plain wakeup was done
1857 	 *	instead, then mask == 0 and we have to call vfs_poll() to get
1858 	 *	the events, so inline completion isn't possible.
1859 	 *   2. The completion work must not have already been scheduled.
1860 	 *   3. ctx_lock must not be busy.  We have to use trylock because we
1861 	 *	already hold the waitqueue lock, so this inverts the normal
1862 	 *	locking order.  Use irqsave/irqrestore because not all
1863 	 *	filesystems (e.g. fuse) call this function with IRQs disabled,
1864 	 *	yet IRQs have to be disabled before ctx_lock is obtained.
1865 	 */
1866 	if (mask && !req->work_scheduled &&
1867 	    spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1868 		struct kioctx *ctx = iocb->ki_ctx;
1869 
1870 		list_del_init(&req->wait.entry);
1871 		list_del(&iocb->ki_list);
1872 		iocb->ki_res.res = mangle_poll(mask);
1873 		if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
1874 			iocb = NULL;
1875 			INIT_WORK(&req->work, aio_poll_put_work);
1876 			schedule_work(&req->work);
1877 		}
1878 		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1879 		if (iocb)
1880 			iocb_put(iocb);
1881 	} else {
1882 		/*
1883 		 * Schedule the completion work if needed.  If it was already
1884 		 * scheduled, record that another wakeup came in.
1885 		 *
1886 		 * Don't remove the request from the waitqueue here, as it might
1887 		 * not actually be complete yet (we won't know until vfs_poll()
1888 		 * is called), and we must not miss any wakeups.  POLLFREE is an
1889 		 * exception to this; see below.
1890 		 */
1891 		if (req->work_scheduled) {
1892 			req->work_need_resched = true;
1893 		} else {
1894 			schedule_work(&req->work);
1895 			req->work_scheduled = true;
1896 		}
1897 
1898 		/*
1899 		 * If the waitqueue is being freed early but we can't complete
1900 		 * the request inline, we have to tear down the request as best
1901 		 * we can.  That means immediately removing the request from its
1902 		 * waitqueue and preventing all further accesses to the
1903 		 * waitqueue via the request.  We also need to schedule the
1904 		 * completion work (done above).  Also mark the request as
1905 		 * cancelled, to potentially skip an unneeded call to ->poll().
1906 		 */
1907 		if (mask & POLLFREE) {
1908 			WRITE_ONCE(req->cancelled, true);
1909 			list_del_init(&req->wait.entry);
1910 
1911 			/*
1912 			 * Careful: this *must* be the last step, since as soon
1913 			 * as req->head is NULL'ed out, the request can be
1914 			 * completed and freed, since aio_poll_complete_work()
1915 			 * will no longer need to take the waitqueue lock.
1916 			 */
1917 			smp_store_release(&req->head, NULL);
1918 		}
1919 	}
1920 	return 1;
1921 }
1922 
1923 struct aio_poll_table {
1924 	struct poll_table_struct	pt;
1925 	struct aio_kiocb		*iocb;
1926 	bool				queued;
1927 	int				error;
1928 };
1929 
1930 static void
1931 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1932 		struct poll_table_struct *p)
1933 {
1934 	struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1935 
1936 	/* multiple wait queues per file are not supported */
1937 	if (unlikely(pt->queued)) {
1938 		pt->error = -EINVAL;
1939 		return;
1940 	}
1941 
1942 	pt->queued = true;
1943 	pt->error = 0;
1944 	pt->iocb->poll.head = head;
1945 	add_wait_queue(head, &pt->iocb->poll.wait);
1946 }
1947 
1948 static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1949 {
1950 	struct kioctx *ctx = aiocb->ki_ctx;
1951 	struct poll_iocb *req = &aiocb->poll;
1952 	struct aio_poll_table apt;
1953 	bool cancel = false;
1954 	__poll_t mask;
1955 
1956 	/* reject any unknown events outside the normal event mask. */
1957 	if ((u16)iocb->aio_buf != iocb->aio_buf)
1958 		return -EINVAL;
1959 	/* reject fields that are not defined for poll */
1960 	if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1961 		return -EINVAL;
1962 
1963 	INIT_WORK(&req->work, aio_poll_complete_work);
1964 	req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1965 
1966 	req->head = NULL;
1967 	req->cancelled = false;
1968 	req->work_scheduled = false;
1969 	req->work_need_resched = false;
1970 
1971 	apt.pt._qproc = aio_poll_queue_proc;
1972 	apt.pt._key = req->events;
1973 	apt.iocb = aiocb;
1974 	apt.queued = false;
1975 	apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1976 
1977 	/* initialized the list so that we can do list_empty checks */
1978 	INIT_LIST_HEAD(&req->wait.entry);
1979 	init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1980 
1981 	mask = vfs_poll(req->file, &apt.pt) & req->events;
1982 	spin_lock_irq(&ctx->ctx_lock);
1983 	if (likely(apt.queued)) {
1984 		bool on_queue = poll_iocb_lock_wq(req);
1985 
1986 		if (!on_queue || req->work_scheduled) {
1987 			/*
1988 			 * aio_poll_wake() already either scheduled the async
1989 			 * completion work, or completed the request inline.
1990 			 */
1991 			if (apt.error) /* unsupported case: multiple queues */
1992 				cancel = true;
1993 			apt.error = 0;
1994 			mask = 0;
1995 		}
1996 		if (mask || apt.error) {
1997 			/* Steal to complete synchronously. */
1998 			list_del_init(&req->wait.entry);
1999 		} else if (cancel) {
2000 			/* Cancel if possible (may be too late though). */
2001 			WRITE_ONCE(req->cancelled, true);
2002 		} else if (on_queue) {
2003 			/*
2004 			 * Actually waiting for an event, so add the request to
2005 			 * active_reqs so that it can be cancelled if needed.
2006 			 */
2007 			list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
2008 			aiocb->ki_cancel = aio_poll_cancel;
2009 		}
2010 		if (on_queue)
2011 			poll_iocb_unlock_wq(req);
2012 	}
2013 	if (mask) { /* no async, we'd stolen it */
2014 		aiocb->ki_res.res = mangle_poll(mask);
2015 		apt.error = 0;
2016 	}
2017 	spin_unlock_irq(&ctx->ctx_lock);
2018 	if (mask)
2019 		iocb_put(aiocb);
2020 	return apt.error;
2021 }
2022 
2023 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
2024 			   struct iocb __user *user_iocb, struct aio_kiocb *req,
2025 			   bool compat)
2026 {
2027 	req->ki_filp = fget(iocb->aio_fildes);
2028 	if (unlikely(!req->ki_filp))
2029 		return -EBADF;
2030 
2031 	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
2032 		struct eventfd_ctx *eventfd;
2033 		/*
2034 		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
2035 		 * instance of the file* now. The file descriptor must be
2036 		 * an eventfd() fd, and will be signaled for each completed
2037 		 * event using the eventfd_signal() function.
2038 		 */
2039 		eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
2040 		if (IS_ERR(eventfd))
2041 			return PTR_ERR(eventfd);
2042 
2043 		req->ki_eventfd = eventfd;
2044 	}
2045 
2046 	if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
2047 		pr_debug("EFAULT: aio_key\n");
2048 		return -EFAULT;
2049 	}
2050 
2051 	req->ki_res.obj = (u64)(unsigned long)user_iocb;
2052 	req->ki_res.data = iocb->aio_data;
2053 	req->ki_res.res = 0;
2054 	req->ki_res.res2 = 0;
2055 
2056 	switch (iocb->aio_lio_opcode) {
2057 	case IOCB_CMD_PREAD:
2058 		return aio_read(&req->rw, iocb, false, compat);
2059 	case IOCB_CMD_PWRITE:
2060 		return aio_write(&req->rw, iocb, false, compat);
2061 	case IOCB_CMD_PREADV:
2062 		return aio_read(&req->rw, iocb, true, compat);
2063 	case IOCB_CMD_PWRITEV:
2064 		return aio_write(&req->rw, iocb, true, compat);
2065 	case IOCB_CMD_FSYNC:
2066 		return aio_fsync(&req->fsync, iocb, false);
2067 	case IOCB_CMD_FDSYNC:
2068 		return aio_fsync(&req->fsync, iocb, true);
2069 	case IOCB_CMD_POLL:
2070 		return aio_poll(req, iocb);
2071 	default:
2072 		pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
2073 		return -EINVAL;
2074 	}
2075 }
2076 
2077 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
2078 			 bool compat)
2079 {
2080 	struct aio_kiocb *req;
2081 	struct iocb iocb;
2082 	int err;
2083 
2084 	if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
2085 		return -EFAULT;
2086 
2087 	/* enforce forwards compatibility on users */
2088 	if (unlikely(iocb.aio_reserved2)) {
2089 		pr_debug("EINVAL: reserve field set\n");
2090 		return -EINVAL;
2091 	}
2092 
2093 	/* prevent overflows */
2094 	if (unlikely(
2095 	    (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
2096 	    (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
2097 	    ((ssize_t)iocb.aio_nbytes < 0)
2098 	   )) {
2099 		pr_debug("EINVAL: overflow check\n");
2100 		return -EINVAL;
2101 	}
2102 
2103 	req = aio_get_req(ctx);
2104 	if (unlikely(!req))
2105 		return -EAGAIN;
2106 
2107 	err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
2108 
2109 	/* Done with the synchronous reference */
2110 	iocb_put(req);
2111 
2112 	/*
2113 	 * If err is 0, we'd either done aio_complete() ourselves or have
2114 	 * arranged for that to be done asynchronously.  Anything non-zero
2115 	 * means that we need to destroy req ourselves.
2116 	 */
2117 	if (unlikely(err)) {
2118 		iocb_destroy(req);
2119 		put_reqs_available(ctx, 1);
2120 	}
2121 	return err;
2122 }
2123 
2124 /* sys_io_submit:
2125  *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
2126  *	the number of iocbs queued.  May return -EINVAL if the aio_context
2127  *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
2128  *	*iocbpp[0] is not properly initialized, if the operation specified
2129  *	is invalid for the file descriptor in the iocb.  May fail with
2130  *	-EFAULT if any of the data structures point to invalid data.  May
2131  *	fail with -EBADF if the file descriptor specified in the first
2132  *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
2133  *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
2134  *	fail with -ENOSYS if not implemented.
2135  */
2136 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
2137 		struct iocb __user * __user *, iocbpp)
2138 {
2139 	struct kioctx *ctx;
2140 	long ret = 0;
2141 	int i = 0;
2142 	struct blk_plug plug;
2143 
2144 	if (unlikely(nr < 0))
2145 		return -EINVAL;
2146 
2147 	ctx = lookup_ioctx(ctx_id);
2148 	if (unlikely(!ctx)) {
2149 		pr_debug("EINVAL: invalid context id\n");
2150 		return -EINVAL;
2151 	}
2152 
2153 	if (nr > ctx->nr_events)
2154 		nr = ctx->nr_events;
2155 
2156 	if (nr > AIO_PLUG_THRESHOLD)
2157 		blk_start_plug(&plug);
2158 	for (i = 0; i < nr; i++) {
2159 		struct iocb __user *user_iocb;
2160 
2161 		if (unlikely(get_user(user_iocb, iocbpp + i))) {
2162 			ret = -EFAULT;
2163 			break;
2164 		}
2165 
2166 		ret = io_submit_one(ctx, user_iocb, false);
2167 		if (ret)
2168 			break;
2169 	}
2170 	if (nr > AIO_PLUG_THRESHOLD)
2171 		blk_finish_plug(&plug);
2172 
2173 	percpu_ref_put(&ctx->users);
2174 	return i ? i : ret;
2175 }
2176 
2177 #ifdef CONFIG_COMPAT
2178 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
2179 		       int, nr, compat_uptr_t __user *, iocbpp)
2180 {
2181 	struct kioctx *ctx;
2182 	long ret = 0;
2183 	int i = 0;
2184 	struct blk_plug plug;
2185 
2186 	if (unlikely(nr < 0))
2187 		return -EINVAL;
2188 
2189 	ctx = lookup_ioctx(ctx_id);
2190 	if (unlikely(!ctx)) {
2191 		pr_debug("EINVAL: invalid context id\n");
2192 		return -EINVAL;
2193 	}
2194 
2195 	if (nr > ctx->nr_events)
2196 		nr = ctx->nr_events;
2197 
2198 	if (nr > AIO_PLUG_THRESHOLD)
2199 		blk_start_plug(&plug);
2200 	for (i = 0; i < nr; i++) {
2201 		compat_uptr_t user_iocb;
2202 
2203 		if (unlikely(get_user(user_iocb, iocbpp + i))) {
2204 			ret = -EFAULT;
2205 			break;
2206 		}
2207 
2208 		ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
2209 		if (ret)
2210 			break;
2211 	}
2212 	if (nr > AIO_PLUG_THRESHOLD)
2213 		blk_finish_plug(&plug);
2214 
2215 	percpu_ref_put(&ctx->users);
2216 	return i ? i : ret;
2217 }
2218 #endif
2219 
2220 /* sys_io_cancel:
2221  *	Attempts to cancel an iocb previously passed to io_submit.  If
2222  *	the operation is successfully cancelled, the resulting event is
2223  *	copied into the memory pointed to by result without being placed
2224  *	into the completion queue and 0 is returned.  May fail with
2225  *	-EFAULT if any of the data structures pointed to are invalid.
2226  *	May fail with -EINVAL if aio_context specified by ctx_id is
2227  *	invalid.  May fail with -EAGAIN if the iocb specified was not
2228  *	cancelled.  Will fail with -ENOSYS if not implemented.
2229  */
2230 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2231 		struct io_event __user *, result)
2232 {
2233 	struct kioctx *ctx;
2234 	struct aio_kiocb *kiocb;
2235 	int ret = -EINVAL;
2236 	u32 key;
2237 	u64 obj = (u64)(unsigned long)iocb;
2238 
2239 	if (unlikely(get_user(key, &iocb->aio_key)))
2240 		return -EFAULT;
2241 	if (unlikely(key != KIOCB_KEY))
2242 		return -EINVAL;
2243 
2244 	ctx = lookup_ioctx(ctx_id);
2245 	if (unlikely(!ctx))
2246 		return -EINVAL;
2247 
2248 	spin_lock_irq(&ctx->ctx_lock);
2249 	list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2250 		if (kiocb->ki_res.obj == obj) {
2251 			ret = kiocb->ki_cancel(&kiocb->rw);
2252 			list_del_init(&kiocb->ki_list);
2253 			break;
2254 		}
2255 	}
2256 	spin_unlock_irq(&ctx->ctx_lock);
2257 
2258 	if (!ret) {
2259 		/*
2260 		 * The result argument is no longer used - the io_event is
2261 		 * always delivered via the ring buffer. -EINPROGRESS indicates
2262 		 * cancellation is progress:
2263 		 */
2264 		ret = -EINPROGRESS;
2265 	}
2266 
2267 	percpu_ref_put(&ctx->users);
2268 
2269 	return ret;
2270 }
2271 
2272 static long do_io_getevents(aio_context_t ctx_id,
2273 		long min_nr,
2274 		long nr,
2275 		struct io_event __user *events,
2276 		struct timespec64 *ts)
2277 {
2278 	ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2279 	struct kioctx *ioctx = lookup_ioctx(ctx_id);
2280 	long ret = -EINVAL;
2281 
2282 	if (likely(ioctx)) {
2283 		if (likely(min_nr <= nr && min_nr >= 0))
2284 			ret = read_events(ioctx, min_nr, nr, events, until);
2285 		percpu_ref_put(&ioctx->users);
2286 	}
2287 
2288 	return ret;
2289 }
2290 
2291 /* io_getevents:
2292  *	Attempts to read at least min_nr events and up to nr events from
2293  *	the completion queue for the aio_context specified by ctx_id. If
2294  *	it succeeds, the number of read events is returned. May fail with
2295  *	-EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2296  *	out of range, if timeout is out of range.  May fail with -EFAULT
2297  *	if any of the memory specified is invalid.  May return 0 or
2298  *	< min_nr if the timeout specified by timeout has elapsed
2299  *	before sufficient events are available, where timeout == NULL
2300  *	specifies an infinite timeout. Note that the timeout pointed to by
2301  *	timeout is relative.  Will fail with -ENOSYS if not implemented.
2302  */
2303 #ifdef CONFIG_64BIT
2304 
2305 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2306 		long, min_nr,
2307 		long, nr,
2308 		struct io_event __user *, events,
2309 		struct __kernel_timespec __user *, timeout)
2310 {
2311 	struct timespec64	ts;
2312 	int			ret;
2313 
2314 	if (timeout && unlikely(get_timespec64(&ts, timeout)))
2315 		return -EFAULT;
2316 
2317 	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2318 	if (!ret && signal_pending(current))
2319 		ret = -EINTR;
2320 	return ret;
2321 }
2322 
2323 #endif
2324 
2325 struct __aio_sigset {
2326 	const sigset_t __user	*sigmask;
2327 	size_t		sigsetsize;
2328 };
2329 
2330 SYSCALL_DEFINE6(io_pgetevents,
2331 		aio_context_t, ctx_id,
2332 		long, min_nr,
2333 		long, nr,
2334 		struct io_event __user *, events,
2335 		struct __kernel_timespec __user *, timeout,
2336 		const struct __aio_sigset __user *, usig)
2337 {
2338 	struct __aio_sigset	ksig = { NULL, };
2339 	struct timespec64	ts;
2340 	bool interrupted;
2341 	int ret;
2342 
2343 	if (timeout && unlikely(get_timespec64(&ts, timeout)))
2344 		return -EFAULT;
2345 
2346 	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2347 		return -EFAULT;
2348 
2349 	ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2350 	if (ret)
2351 		return ret;
2352 
2353 	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2354 
2355 	interrupted = signal_pending(current);
2356 	restore_saved_sigmask_unless(interrupted);
2357 	if (interrupted && !ret)
2358 		ret = -ERESTARTNOHAND;
2359 
2360 	return ret;
2361 }
2362 
2363 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2364 
2365 SYSCALL_DEFINE6(io_pgetevents_time32,
2366 		aio_context_t, ctx_id,
2367 		long, min_nr,
2368 		long, nr,
2369 		struct io_event __user *, events,
2370 		struct old_timespec32 __user *, timeout,
2371 		const struct __aio_sigset __user *, usig)
2372 {
2373 	struct __aio_sigset	ksig = { NULL, };
2374 	struct timespec64	ts;
2375 	bool interrupted;
2376 	int ret;
2377 
2378 	if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2379 		return -EFAULT;
2380 
2381 	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2382 		return -EFAULT;
2383 
2384 
2385 	ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2386 	if (ret)
2387 		return ret;
2388 
2389 	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2390 
2391 	interrupted = signal_pending(current);
2392 	restore_saved_sigmask_unless(interrupted);
2393 	if (interrupted && !ret)
2394 		ret = -ERESTARTNOHAND;
2395 
2396 	return ret;
2397 }
2398 
2399 #endif
2400 
2401 #if defined(CONFIG_COMPAT_32BIT_TIME)
2402 
2403 SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2404 		__s32, min_nr,
2405 		__s32, nr,
2406 		struct io_event __user *, events,
2407 		struct old_timespec32 __user *, timeout)
2408 {
2409 	struct timespec64 t;
2410 	int ret;
2411 
2412 	if (timeout && get_old_timespec32(&t, timeout))
2413 		return -EFAULT;
2414 
2415 	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2416 	if (!ret && signal_pending(current))
2417 		ret = -EINTR;
2418 	return ret;
2419 }
2420 
2421 #endif
2422 
2423 #ifdef CONFIG_COMPAT
2424 
2425 struct __compat_aio_sigset {
2426 	compat_uptr_t		sigmask;
2427 	compat_size_t		sigsetsize;
2428 };
2429 
2430 #if defined(CONFIG_COMPAT_32BIT_TIME)
2431 
2432 COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2433 		compat_aio_context_t, ctx_id,
2434 		compat_long_t, min_nr,
2435 		compat_long_t, nr,
2436 		struct io_event __user *, events,
2437 		struct old_timespec32 __user *, timeout,
2438 		const struct __compat_aio_sigset __user *, usig)
2439 {
2440 	struct __compat_aio_sigset ksig = { 0, };
2441 	struct timespec64 t;
2442 	bool interrupted;
2443 	int ret;
2444 
2445 	if (timeout && get_old_timespec32(&t, timeout))
2446 		return -EFAULT;
2447 
2448 	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2449 		return -EFAULT;
2450 
2451 	ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2452 	if (ret)
2453 		return ret;
2454 
2455 	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2456 
2457 	interrupted = signal_pending(current);
2458 	restore_saved_sigmask_unless(interrupted);
2459 	if (interrupted && !ret)
2460 		ret = -ERESTARTNOHAND;
2461 
2462 	return ret;
2463 }
2464 
2465 #endif
2466 
2467 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2468 		compat_aio_context_t, ctx_id,
2469 		compat_long_t, min_nr,
2470 		compat_long_t, nr,
2471 		struct io_event __user *, events,
2472 		struct __kernel_timespec __user *, timeout,
2473 		const struct __compat_aio_sigset __user *, usig)
2474 {
2475 	struct __compat_aio_sigset ksig = { 0, };
2476 	struct timespec64 t;
2477 	bool interrupted;
2478 	int ret;
2479 
2480 	if (timeout && get_timespec64(&t, timeout))
2481 		return -EFAULT;
2482 
2483 	if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2484 		return -EFAULT;
2485 
2486 	ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2487 	if (ret)
2488 		return ret;
2489 
2490 	ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2491 
2492 	interrupted = signal_pending(current);
2493 	restore_saved_sigmask_unless(interrupted);
2494 	if (interrupted && !ret)
2495 		ret = -ERESTARTNOHAND;
2496 
2497 	return ret;
2498 }
2499 #endif
2500