1 /* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * Copyright 2018 Christoph Hellwig. 9 * 10 * See ../COPYING for licensing terms. 11 */ 12 #define pr_fmt(fmt) "%s: " fmt, __func__ 13 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/errno.h> 17 #include <linux/time.h> 18 #include <linux/aio_abi.h> 19 #include <linux/export.h> 20 #include <linux/syscalls.h> 21 #include <linux/backing-dev.h> 22 #include <linux/refcount.h> 23 #include <linux/uio.h> 24 25 #include <linux/sched/signal.h> 26 #include <linux/fs.h> 27 #include <linux/file.h> 28 #include <linux/mm.h> 29 #include <linux/mman.h> 30 #include <linux/percpu.h> 31 #include <linux/slab.h> 32 #include <linux/timer.h> 33 #include <linux/aio.h> 34 #include <linux/highmem.h> 35 #include <linux/workqueue.h> 36 #include <linux/security.h> 37 #include <linux/eventfd.h> 38 #include <linux/blkdev.h> 39 #include <linux/compat.h> 40 #include <linux/migrate.h> 41 #include <linux/ramfs.h> 42 #include <linux/percpu-refcount.h> 43 #include <linux/mount.h> 44 #include <linux/pseudo_fs.h> 45 46 #include <linux/uaccess.h> 47 #include <linux/nospec.h> 48 49 #include "internal.h" 50 51 #define KIOCB_KEY 0 52 53 #define AIO_RING_MAGIC 0xa10a10a1 54 #define AIO_RING_COMPAT_FEATURES 1 55 #define AIO_RING_INCOMPAT_FEATURES 0 56 struct aio_ring { 57 unsigned id; /* kernel internal index number */ 58 unsigned nr; /* number of io_events */ 59 unsigned head; /* Written to by userland or under ring_lock 60 * mutex by aio_read_events_ring(). */ 61 unsigned tail; 62 63 unsigned magic; 64 unsigned compat_features; 65 unsigned incompat_features; 66 unsigned header_length; /* size of aio_ring */ 67 68 69 struct io_event io_events[]; 70 }; /* 128 bytes + ring size */ 71 72 /* 73 * Plugging is meant to work with larger batches of IOs. If we don't 74 * have more than the below, then don't bother setting up a plug. 75 */ 76 #define AIO_PLUG_THRESHOLD 2 77 78 #define AIO_RING_PAGES 8 79 80 struct kioctx_table { 81 struct rcu_head rcu; 82 unsigned nr; 83 struct kioctx __rcu *table[] __counted_by(nr); 84 }; 85 86 struct kioctx_cpu { 87 unsigned reqs_available; 88 }; 89 90 struct ctx_rq_wait { 91 struct completion comp; 92 atomic_t count; 93 }; 94 95 struct kioctx { 96 struct percpu_ref users; 97 atomic_t dead; 98 99 struct percpu_ref reqs; 100 101 unsigned long user_id; 102 103 struct __percpu kioctx_cpu *cpu; 104 105 /* 106 * For percpu reqs_available, number of slots we move to/from global 107 * counter at a time: 108 */ 109 unsigned req_batch; 110 /* 111 * This is what userspace passed to io_setup(), it's not used for 112 * anything but counting against the global max_reqs quota. 113 * 114 * The real limit is nr_events - 1, which will be larger (see 115 * aio_setup_ring()) 116 */ 117 unsigned max_reqs; 118 119 /* Size of ringbuffer, in units of struct io_event */ 120 unsigned nr_events; 121 122 unsigned long mmap_base; 123 unsigned long mmap_size; 124 125 struct folio **ring_folios; 126 long nr_pages; 127 128 struct rcu_work free_rwork; /* see free_ioctx() */ 129 130 /* 131 * signals when all in-flight requests are done 132 */ 133 struct ctx_rq_wait *rq_wait; 134 135 struct { 136 /* 137 * This counts the number of available slots in the ringbuffer, 138 * so we avoid overflowing it: it's decremented (if positive) 139 * when allocating a kiocb and incremented when the resulting 140 * io_event is pulled off the ringbuffer. 141 * 142 * We batch accesses to it with a percpu version. 143 */ 144 atomic_t reqs_available; 145 } ____cacheline_aligned_in_smp; 146 147 struct { 148 spinlock_t ctx_lock; 149 struct list_head active_reqs; /* used for cancellation */ 150 } ____cacheline_aligned_in_smp; 151 152 struct { 153 struct mutex ring_lock; 154 wait_queue_head_t wait; 155 } ____cacheline_aligned_in_smp; 156 157 struct { 158 unsigned tail; 159 unsigned completed_events; 160 spinlock_t completion_lock; 161 } ____cacheline_aligned_in_smp; 162 163 struct folio *internal_folios[AIO_RING_PAGES]; 164 struct file *aio_ring_file; 165 166 unsigned id; 167 }; 168 169 /* 170 * First field must be the file pointer in all the 171 * iocb unions! See also 'struct kiocb' in <linux/fs.h> 172 */ 173 struct fsync_iocb { 174 struct file *file; 175 struct work_struct work; 176 bool datasync; 177 struct cred *creds; 178 }; 179 180 struct poll_iocb { 181 struct file *file; 182 struct wait_queue_head *head; 183 __poll_t events; 184 bool cancelled; 185 bool work_scheduled; 186 bool work_need_resched; 187 struct wait_queue_entry wait; 188 struct work_struct work; 189 }; 190 191 /* 192 * NOTE! Each of the iocb union members has the file pointer 193 * as the first entry in their struct definition. So you can 194 * access the file pointer through any of the sub-structs, 195 * or directly as just 'ki_filp' in this struct. 196 */ 197 struct aio_kiocb { 198 union { 199 struct file *ki_filp; 200 struct kiocb rw; 201 struct fsync_iocb fsync; 202 struct poll_iocb poll; 203 }; 204 205 struct kioctx *ki_ctx; 206 kiocb_cancel_fn *ki_cancel; 207 208 struct io_event ki_res; 209 210 struct list_head ki_list; /* the aio core uses this 211 * for cancellation */ 212 refcount_t ki_refcnt; 213 214 /* 215 * If the aio_resfd field of the userspace iocb is not zero, 216 * this is the underlying eventfd context to deliver events to. 217 */ 218 struct eventfd_ctx *ki_eventfd; 219 }; 220 221 /*------ sysctl variables----*/ 222 static DEFINE_SPINLOCK(aio_nr_lock); 223 static unsigned long aio_nr; /* current system wide number of aio requests */ 224 static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 225 /*----end sysctl variables---*/ 226 #ifdef CONFIG_SYSCTL 227 static struct ctl_table aio_sysctls[] = { 228 { 229 .procname = "aio-nr", 230 .data = &aio_nr, 231 .maxlen = sizeof(aio_nr), 232 .mode = 0444, 233 .proc_handler = proc_doulongvec_minmax, 234 }, 235 { 236 .procname = "aio-max-nr", 237 .data = &aio_max_nr, 238 .maxlen = sizeof(aio_max_nr), 239 .mode = 0644, 240 .proc_handler = proc_doulongvec_minmax, 241 }, 242 }; 243 244 static void __init aio_sysctl_init(void) 245 { 246 register_sysctl_init("fs", aio_sysctls); 247 } 248 #else 249 #define aio_sysctl_init() do { } while (0) 250 #endif 251 252 static struct kmem_cache *kiocb_cachep; 253 static struct kmem_cache *kioctx_cachep; 254 255 static struct vfsmount *aio_mnt; 256 257 static const struct file_operations aio_ring_fops; 258 static const struct address_space_operations aio_ctx_aops; 259 260 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 261 { 262 struct file *file; 263 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); 264 if (IS_ERR(inode)) 265 return ERR_CAST(inode); 266 267 inode->i_mapping->a_ops = &aio_ctx_aops; 268 inode->i_mapping->i_private_data = ctx; 269 inode->i_size = PAGE_SIZE * nr_pages; 270 271 file = alloc_file_pseudo(inode, aio_mnt, "[aio]", 272 O_RDWR, &aio_ring_fops); 273 if (IS_ERR(file)) 274 iput(inode); 275 return file; 276 } 277 278 static int aio_init_fs_context(struct fs_context *fc) 279 { 280 if (!init_pseudo(fc, AIO_RING_MAGIC)) 281 return -ENOMEM; 282 fc->s_iflags |= SB_I_NOEXEC; 283 return 0; 284 } 285 286 /* aio_setup 287 * Creates the slab caches used by the aio routines, panic on 288 * failure as this is done early during the boot sequence. 289 */ 290 static int __init aio_setup(void) 291 { 292 static struct file_system_type aio_fs = { 293 .name = "aio", 294 .init_fs_context = aio_init_fs_context, 295 .kill_sb = kill_anon_super, 296 }; 297 aio_mnt = kern_mount(&aio_fs); 298 if (IS_ERR(aio_mnt)) 299 panic("Failed to create aio fs mount."); 300 301 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 302 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 303 aio_sysctl_init(); 304 return 0; 305 } 306 __initcall(aio_setup); 307 308 static void put_aio_ring_file(struct kioctx *ctx) 309 { 310 struct file *aio_ring_file = ctx->aio_ring_file; 311 struct address_space *i_mapping; 312 313 if (aio_ring_file) { 314 truncate_setsize(file_inode(aio_ring_file), 0); 315 316 /* Prevent further access to the kioctx from migratepages */ 317 i_mapping = aio_ring_file->f_mapping; 318 spin_lock(&i_mapping->i_private_lock); 319 i_mapping->i_private_data = NULL; 320 ctx->aio_ring_file = NULL; 321 spin_unlock(&i_mapping->i_private_lock); 322 323 fput(aio_ring_file); 324 } 325 } 326 327 static void aio_free_ring(struct kioctx *ctx) 328 { 329 int i; 330 331 /* Disconnect the kiotx from the ring file. This prevents future 332 * accesses to the kioctx from page migration. 333 */ 334 put_aio_ring_file(ctx); 335 336 for (i = 0; i < ctx->nr_pages; i++) { 337 struct folio *folio = ctx->ring_folios[i]; 338 339 if (!folio) 340 continue; 341 342 pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i, 343 folio_ref_count(folio)); 344 ctx->ring_folios[i] = NULL; 345 folio_put(folio); 346 } 347 348 if (ctx->ring_folios && ctx->ring_folios != ctx->internal_folios) { 349 kfree(ctx->ring_folios); 350 ctx->ring_folios = NULL; 351 } 352 } 353 354 static int aio_ring_mremap(struct vm_area_struct *vma) 355 { 356 struct file *file = vma->vm_file; 357 struct mm_struct *mm = vma->vm_mm; 358 struct kioctx_table *table; 359 int i, res = -EINVAL; 360 361 spin_lock(&mm->ioctx_lock); 362 rcu_read_lock(); 363 table = rcu_dereference(mm->ioctx_table); 364 if (!table) 365 goto out_unlock; 366 367 for (i = 0; i < table->nr; i++) { 368 struct kioctx *ctx; 369 370 ctx = rcu_dereference(table->table[i]); 371 if (ctx && ctx->aio_ring_file == file) { 372 if (!atomic_read(&ctx->dead)) { 373 ctx->user_id = ctx->mmap_base = vma->vm_start; 374 res = 0; 375 } 376 break; 377 } 378 } 379 380 out_unlock: 381 rcu_read_unlock(); 382 spin_unlock(&mm->ioctx_lock); 383 return res; 384 } 385 386 static const struct vm_operations_struct aio_ring_vm_ops = { 387 .mremap = aio_ring_mremap, 388 #if IS_ENABLED(CONFIG_MMU) 389 .fault = filemap_fault, 390 .map_pages = filemap_map_pages, 391 .page_mkwrite = filemap_page_mkwrite, 392 #endif 393 }; 394 395 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 396 { 397 vm_flags_set(vma, VM_DONTEXPAND); 398 vma->vm_ops = &aio_ring_vm_ops; 399 return 0; 400 } 401 402 static const struct file_operations aio_ring_fops = { 403 .mmap = aio_ring_mmap, 404 }; 405 406 #if IS_ENABLED(CONFIG_MIGRATION) 407 static int aio_migrate_folio(struct address_space *mapping, struct folio *dst, 408 struct folio *src, enum migrate_mode mode) 409 { 410 struct kioctx *ctx; 411 unsigned long flags; 412 pgoff_t idx; 413 int rc; 414 415 /* 416 * We cannot support the _NO_COPY case here, because copy needs to 417 * happen under the ctx->completion_lock. That does not work with the 418 * migration workflow of MIGRATE_SYNC_NO_COPY. 419 */ 420 if (mode == MIGRATE_SYNC_NO_COPY) 421 return -EINVAL; 422 423 rc = 0; 424 425 /* mapping->i_private_lock here protects against the kioctx teardown. */ 426 spin_lock(&mapping->i_private_lock); 427 ctx = mapping->i_private_data; 428 if (!ctx) { 429 rc = -EINVAL; 430 goto out; 431 } 432 433 /* The ring_lock mutex. The prevents aio_read_events() from writing 434 * to the ring's head, and prevents page migration from mucking in 435 * a partially initialized kiotx. 436 */ 437 if (!mutex_trylock(&ctx->ring_lock)) { 438 rc = -EAGAIN; 439 goto out; 440 } 441 442 idx = src->index; 443 if (idx < (pgoff_t)ctx->nr_pages) { 444 /* Make sure the old folio hasn't already been changed */ 445 if (ctx->ring_folios[idx] != src) 446 rc = -EAGAIN; 447 } else 448 rc = -EINVAL; 449 450 if (rc != 0) 451 goto out_unlock; 452 453 /* Writeback must be complete */ 454 BUG_ON(folio_test_writeback(src)); 455 folio_get(dst); 456 457 rc = folio_migrate_mapping(mapping, dst, src, 1); 458 if (rc != MIGRATEPAGE_SUCCESS) { 459 folio_put(dst); 460 goto out_unlock; 461 } 462 463 /* Take completion_lock to prevent other writes to the ring buffer 464 * while the old folio is copied to the new. This prevents new 465 * events from being lost. 466 */ 467 spin_lock_irqsave(&ctx->completion_lock, flags); 468 folio_migrate_copy(dst, src); 469 BUG_ON(ctx->ring_folios[idx] != src); 470 ctx->ring_folios[idx] = dst; 471 spin_unlock_irqrestore(&ctx->completion_lock, flags); 472 473 /* The old folio is no longer accessible. */ 474 folio_put(src); 475 476 out_unlock: 477 mutex_unlock(&ctx->ring_lock); 478 out: 479 spin_unlock(&mapping->i_private_lock); 480 return rc; 481 } 482 #else 483 #define aio_migrate_folio NULL 484 #endif 485 486 static const struct address_space_operations aio_ctx_aops = { 487 .dirty_folio = noop_dirty_folio, 488 .migrate_folio = aio_migrate_folio, 489 }; 490 491 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) 492 { 493 struct aio_ring *ring; 494 struct mm_struct *mm = current->mm; 495 unsigned long size, unused; 496 int nr_pages; 497 int i; 498 struct file *file; 499 500 /* Compensate for the ring buffer's head/tail overlap entry */ 501 nr_events += 2; /* 1 is required, 2 for good luck */ 502 503 size = sizeof(struct aio_ring); 504 size += sizeof(struct io_event) * nr_events; 505 506 nr_pages = PFN_UP(size); 507 if (nr_pages < 0) 508 return -EINVAL; 509 510 file = aio_private_file(ctx, nr_pages); 511 if (IS_ERR(file)) { 512 ctx->aio_ring_file = NULL; 513 return -ENOMEM; 514 } 515 516 ctx->aio_ring_file = file; 517 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) 518 / sizeof(struct io_event); 519 520 ctx->ring_folios = ctx->internal_folios; 521 if (nr_pages > AIO_RING_PAGES) { 522 ctx->ring_folios = kcalloc(nr_pages, sizeof(struct folio *), 523 GFP_KERNEL); 524 if (!ctx->ring_folios) { 525 put_aio_ring_file(ctx); 526 return -ENOMEM; 527 } 528 } 529 530 for (i = 0; i < nr_pages; i++) { 531 struct folio *folio; 532 533 folio = __filemap_get_folio(file->f_mapping, i, 534 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 535 GFP_USER | __GFP_ZERO); 536 if (IS_ERR(folio)) 537 break; 538 539 pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i, 540 folio_ref_count(folio)); 541 folio_end_read(folio, true); 542 543 ctx->ring_folios[i] = folio; 544 } 545 ctx->nr_pages = i; 546 547 if (unlikely(i != nr_pages)) { 548 aio_free_ring(ctx); 549 return -ENOMEM; 550 } 551 552 ctx->mmap_size = nr_pages * PAGE_SIZE; 553 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); 554 555 if (mmap_write_lock_killable(mm)) { 556 ctx->mmap_size = 0; 557 aio_free_ring(ctx); 558 return -EINTR; 559 } 560 561 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size, 562 PROT_READ | PROT_WRITE, 563 MAP_SHARED, 0, 0, &unused, NULL); 564 mmap_write_unlock(mm); 565 if (IS_ERR((void *)ctx->mmap_base)) { 566 ctx->mmap_size = 0; 567 aio_free_ring(ctx); 568 return -ENOMEM; 569 } 570 571 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 572 573 ctx->user_id = ctx->mmap_base; 574 ctx->nr_events = nr_events; /* trusted copy */ 575 576 ring = folio_address(ctx->ring_folios[0]); 577 ring->nr = nr_events; /* user copy */ 578 ring->id = ~0U; 579 ring->head = ring->tail = 0; 580 ring->magic = AIO_RING_MAGIC; 581 ring->compat_features = AIO_RING_COMPAT_FEATURES; 582 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 583 ring->header_length = sizeof(struct aio_ring); 584 flush_dcache_folio(ctx->ring_folios[0]); 585 586 return 0; 587 } 588 589 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 590 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 591 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 592 593 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) 594 { 595 struct aio_kiocb *req; 596 struct kioctx *ctx; 597 unsigned long flags; 598 599 /* 600 * kiocb didn't come from aio or is neither a read nor a write, hence 601 * ignore it. 602 */ 603 if (!(iocb->ki_flags & IOCB_AIO_RW)) 604 return; 605 606 req = container_of(iocb, struct aio_kiocb, rw); 607 608 if (WARN_ON_ONCE(!list_empty(&req->ki_list))) 609 return; 610 611 ctx = req->ki_ctx; 612 613 spin_lock_irqsave(&ctx->ctx_lock, flags); 614 list_add_tail(&req->ki_list, &ctx->active_reqs); 615 req->ki_cancel = cancel; 616 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 617 } 618 EXPORT_SYMBOL(kiocb_set_cancel_fn); 619 620 /* 621 * free_ioctx() should be RCU delayed to synchronize against the RCU 622 * protected lookup_ioctx() and also needs process context to call 623 * aio_free_ring(). Use rcu_work. 624 */ 625 static void free_ioctx(struct work_struct *work) 626 { 627 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, 628 free_rwork); 629 pr_debug("freeing %p\n", ctx); 630 631 aio_free_ring(ctx); 632 free_percpu(ctx->cpu); 633 percpu_ref_exit(&ctx->reqs); 634 percpu_ref_exit(&ctx->users); 635 kmem_cache_free(kioctx_cachep, ctx); 636 } 637 638 static void free_ioctx_reqs(struct percpu_ref *ref) 639 { 640 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); 641 642 /* At this point we know that there are no any in-flight requests */ 643 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) 644 complete(&ctx->rq_wait->comp); 645 646 /* Synchronize against RCU protected table->table[] dereferences */ 647 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); 648 queue_rcu_work(system_wq, &ctx->free_rwork); 649 } 650 651 /* 652 * When this function runs, the kioctx has been removed from the "hash table" 653 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - 654 * now it's safe to cancel any that need to be. 655 */ 656 static void free_ioctx_users(struct percpu_ref *ref) 657 { 658 struct kioctx *ctx = container_of(ref, struct kioctx, users); 659 struct aio_kiocb *req; 660 661 spin_lock_irq(&ctx->ctx_lock); 662 663 while (!list_empty(&ctx->active_reqs)) { 664 req = list_first_entry(&ctx->active_reqs, 665 struct aio_kiocb, ki_list); 666 req->ki_cancel(&req->rw); 667 list_del_init(&req->ki_list); 668 } 669 670 spin_unlock_irq(&ctx->ctx_lock); 671 672 percpu_ref_kill(&ctx->reqs); 673 percpu_ref_put(&ctx->reqs); 674 } 675 676 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) 677 { 678 unsigned i, new_nr; 679 struct kioctx_table *table, *old; 680 struct aio_ring *ring; 681 682 spin_lock(&mm->ioctx_lock); 683 table = rcu_dereference_raw(mm->ioctx_table); 684 685 while (1) { 686 if (table) 687 for (i = 0; i < table->nr; i++) 688 if (!rcu_access_pointer(table->table[i])) { 689 ctx->id = i; 690 rcu_assign_pointer(table->table[i], ctx); 691 spin_unlock(&mm->ioctx_lock); 692 693 /* While kioctx setup is in progress, 694 * we are protected from page migration 695 * changes ring_folios by ->ring_lock. 696 */ 697 ring = folio_address(ctx->ring_folios[0]); 698 ring->id = ctx->id; 699 return 0; 700 } 701 702 new_nr = (table ? table->nr : 1) * 4; 703 spin_unlock(&mm->ioctx_lock); 704 705 table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL); 706 if (!table) 707 return -ENOMEM; 708 709 table->nr = new_nr; 710 711 spin_lock(&mm->ioctx_lock); 712 old = rcu_dereference_raw(mm->ioctx_table); 713 714 if (!old) { 715 rcu_assign_pointer(mm->ioctx_table, table); 716 } else if (table->nr > old->nr) { 717 memcpy(table->table, old->table, 718 old->nr * sizeof(struct kioctx *)); 719 720 rcu_assign_pointer(mm->ioctx_table, table); 721 kfree_rcu(old, rcu); 722 } else { 723 kfree(table); 724 table = old; 725 } 726 } 727 } 728 729 static void aio_nr_sub(unsigned nr) 730 { 731 spin_lock(&aio_nr_lock); 732 if (WARN_ON(aio_nr - nr > aio_nr)) 733 aio_nr = 0; 734 else 735 aio_nr -= nr; 736 spin_unlock(&aio_nr_lock); 737 } 738 739 /* ioctx_alloc 740 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 741 */ 742 static struct kioctx *ioctx_alloc(unsigned nr_events) 743 { 744 struct mm_struct *mm = current->mm; 745 struct kioctx *ctx; 746 int err = -ENOMEM; 747 748 /* 749 * Store the original nr_events -- what userspace passed to io_setup(), 750 * for counting against the global limit -- before it changes. 751 */ 752 unsigned int max_reqs = nr_events; 753 754 /* 755 * We keep track of the number of available ringbuffer slots, to prevent 756 * overflow (reqs_available), and we also use percpu counters for this. 757 * 758 * So since up to half the slots might be on other cpu's percpu counters 759 * and unavailable, double nr_events so userspace sees what they 760 * expected: additionally, we move req_batch slots to/from percpu 761 * counters at a time, so make sure that isn't 0: 762 */ 763 nr_events = max(nr_events, num_possible_cpus() * 4); 764 nr_events *= 2; 765 766 /* Prevent overflows */ 767 if (nr_events > (0x10000000U / sizeof(struct io_event))) { 768 pr_debug("ENOMEM: nr_events too high\n"); 769 return ERR_PTR(-EINVAL); 770 } 771 772 if (!nr_events || (unsigned long)max_reqs > aio_max_nr) 773 return ERR_PTR(-EAGAIN); 774 775 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 776 if (!ctx) 777 return ERR_PTR(-ENOMEM); 778 779 ctx->max_reqs = max_reqs; 780 781 spin_lock_init(&ctx->ctx_lock); 782 spin_lock_init(&ctx->completion_lock); 783 mutex_init(&ctx->ring_lock); 784 /* Protect against page migration throughout kiotx setup by keeping 785 * the ring_lock mutex held until setup is complete. */ 786 mutex_lock(&ctx->ring_lock); 787 init_waitqueue_head(&ctx->wait); 788 789 INIT_LIST_HEAD(&ctx->active_reqs); 790 791 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) 792 goto err; 793 794 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) 795 goto err; 796 797 ctx->cpu = alloc_percpu(struct kioctx_cpu); 798 if (!ctx->cpu) 799 goto err; 800 801 err = aio_setup_ring(ctx, nr_events); 802 if (err < 0) 803 goto err; 804 805 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); 806 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); 807 if (ctx->req_batch < 1) 808 ctx->req_batch = 1; 809 810 /* limit the number of system wide aios */ 811 spin_lock(&aio_nr_lock); 812 if (aio_nr + ctx->max_reqs > aio_max_nr || 813 aio_nr + ctx->max_reqs < aio_nr) { 814 spin_unlock(&aio_nr_lock); 815 err = -EAGAIN; 816 goto err_ctx; 817 } 818 aio_nr += ctx->max_reqs; 819 spin_unlock(&aio_nr_lock); 820 821 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 822 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ 823 824 err = ioctx_add_table(ctx, mm); 825 if (err) 826 goto err_cleanup; 827 828 /* Release the ring_lock mutex now that all setup is complete. */ 829 mutex_unlock(&ctx->ring_lock); 830 831 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 832 ctx, ctx->user_id, mm, ctx->nr_events); 833 return ctx; 834 835 err_cleanup: 836 aio_nr_sub(ctx->max_reqs); 837 err_ctx: 838 atomic_set(&ctx->dead, 1); 839 if (ctx->mmap_size) 840 vm_munmap(ctx->mmap_base, ctx->mmap_size); 841 aio_free_ring(ctx); 842 err: 843 mutex_unlock(&ctx->ring_lock); 844 free_percpu(ctx->cpu); 845 percpu_ref_exit(&ctx->reqs); 846 percpu_ref_exit(&ctx->users); 847 kmem_cache_free(kioctx_cachep, ctx); 848 pr_debug("error allocating ioctx %d\n", err); 849 return ERR_PTR(err); 850 } 851 852 /* kill_ioctx 853 * Cancels all outstanding aio requests on an aio context. Used 854 * when the processes owning a context have all exited to encourage 855 * the rapid destruction of the kioctx. 856 */ 857 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, 858 struct ctx_rq_wait *wait) 859 { 860 struct kioctx_table *table; 861 862 spin_lock(&mm->ioctx_lock); 863 if (atomic_xchg(&ctx->dead, 1)) { 864 spin_unlock(&mm->ioctx_lock); 865 return -EINVAL; 866 } 867 868 table = rcu_dereference_raw(mm->ioctx_table); 869 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); 870 RCU_INIT_POINTER(table->table[ctx->id], NULL); 871 spin_unlock(&mm->ioctx_lock); 872 873 /* free_ioctx_reqs() will do the necessary RCU synchronization */ 874 wake_up_all(&ctx->wait); 875 876 /* 877 * It'd be more correct to do this in free_ioctx(), after all 878 * the outstanding kiocbs have finished - but by then io_destroy 879 * has already returned, so io_setup() could potentially return 880 * -EAGAIN with no ioctxs actually in use (as far as userspace 881 * could tell). 882 */ 883 aio_nr_sub(ctx->max_reqs); 884 885 if (ctx->mmap_size) 886 vm_munmap(ctx->mmap_base, ctx->mmap_size); 887 888 ctx->rq_wait = wait; 889 percpu_ref_kill(&ctx->users); 890 return 0; 891 } 892 893 /* 894 * exit_aio: called when the last user of mm goes away. At this point, there is 895 * no way for any new requests to be submited or any of the io_* syscalls to be 896 * called on the context. 897 * 898 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on 899 * them. 900 */ 901 void exit_aio(struct mm_struct *mm) 902 { 903 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); 904 struct ctx_rq_wait wait; 905 int i, skipped; 906 907 if (!table) 908 return; 909 910 atomic_set(&wait.count, table->nr); 911 init_completion(&wait.comp); 912 913 skipped = 0; 914 for (i = 0; i < table->nr; ++i) { 915 struct kioctx *ctx = 916 rcu_dereference_protected(table->table[i], true); 917 918 if (!ctx) { 919 skipped++; 920 continue; 921 } 922 923 /* 924 * We don't need to bother with munmap() here - exit_mmap(mm) 925 * is coming and it'll unmap everything. And we simply can't, 926 * this is not necessarily our ->mm. 927 * Since kill_ioctx() uses non-zero ->mmap_size as indicator 928 * that it needs to unmap the area, just set it to 0. 929 */ 930 ctx->mmap_size = 0; 931 kill_ioctx(mm, ctx, &wait); 932 } 933 934 if (!atomic_sub_and_test(skipped, &wait.count)) { 935 /* Wait until all IO for the context are done. */ 936 wait_for_completion(&wait.comp); 937 } 938 939 RCU_INIT_POINTER(mm->ioctx_table, NULL); 940 kfree(table); 941 } 942 943 static void put_reqs_available(struct kioctx *ctx, unsigned nr) 944 { 945 struct kioctx_cpu *kcpu; 946 unsigned long flags; 947 948 local_irq_save(flags); 949 kcpu = this_cpu_ptr(ctx->cpu); 950 kcpu->reqs_available += nr; 951 952 while (kcpu->reqs_available >= ctx->req_batch * 2) { 953 kcpu->reqs_available -= ctx->req_batch; 954 atomic_add(ctx->req_batch, &ctx->reqs_available); 955 } 956 957 local_irq_restore(flags); 958 } 959 960 static bool __get_reqs_available(struct kioctx *ctx) 961 { 962 struct kioctx_cpu *kcpu; 963 bool ret = false; 964 unsigned long flags; 965 966 local_irq_save(flags); 967 kcpu = this_cpu_ptr(ctx->cpu); 968 if (!kcpu->reqs_available) { 969 int avail = atomic_read(&ctx->reqs_available); 970 971 do { 972 if (avail < ctx->req_batch) 973 goto out; 974 } while (!atomic_try_cmpxchg(&ctx->reqs_available, 975 &avail, avail - ctx->req_batch)); 976 977 kcpu->reqs_available += ctx->req_batch; 978 } 979 980 ret = true; 981 kcpu->reqs_available--; 982 out: 983 local_irq_restore(flags); 984 return ret; 985 } 986 987 /* refill_reqs_available 988 * Updates the reqs_available reference counts used for tracking the 989 * number of free slots in the completion ring. This can be called 990 * from aio_complete() (to optimistically update reqs_available) or 991 * from aio_get_req() (the we're out of events case). It must be 992 * called holding ctx->completion_lock. 993 */ 994 static void refill_reqs_available(struct kioctx *ctx, unsigned head, 995 unsigned tail) 996 { 997 unsigned events_in_ring, completed; 998 999 /* Clamp head since userland can write to it. */ 1000 head %= ctx->nr_events; 1001 if (head <= tail) 1002 events_in_ring = tail - head; 1003 else 1004 events_in_ring = ctx->nr_events - (head - tail); 1005 1006 completed = ctx->completed_events; 1007 if (events_in_ring < completed) 1008 completed -= events_in_ring; 1009 else 1010 completed = 0; 1011 1012 if (!completed) 1013 return; 1014 1015 ctx->completed_events -= completed; 1016 put_reqs_available(ctx, completed); 1017 } 1018 1019 /* user_refill_reqs_available 1020 * Called to refill reqs_available when aio_get_req() encounters an 1021 * out of space in the completion ring. 1022 */ 1023 static void user_refill_reqs_available(struct kioctx *ctx) 1024 { 1025 spin_lock_irq(&ctx->completion_lock); 1026 if (ctx->completed_events) { 1027 struct aio_ring *ring; 1028 unsigned head; 1029 1030 /* Access of ring->head may race with aio_read_events_ring() 1031 * here, but that's okay since whether we read the old version 1032 * or the new version, and either will be valid. The important 1033 * part is that head cannot pass tail since we prevent 1034 * aio_complete() from updating tail by holding 1035 * ctx->completion_lock. Even if head is invalid, the check 1036 * against ctx->completed_events below will make sure we do the 1037 * safe/right thing. 1038 */ 1039 ring = folio_address(ctx->ring_folios[0]); 1040 head = ring->head; 1041 1042 refill_reqs_available(ctx, head, ctx->tail); 1043 } 1044 1045 spin_unlock_irq(&ctx->completion_lock); 1046 } 1047 1048 static bool get_reqs_available(struct kioctx *ctx) 1049 { 1050 if (__get_reqs_available(ctx)) 1051 return true; 1052 user_refill_reqs_available(ctx); 1053 return __get_reqs_available(ctx); 1054 } 1055 1056 /* aio_get_req 1057 * Allocate a slot for an aio request. 1058 * Returns NULL if no requests are free. 1059 * 1060 * The refcount is initialized to 2 - one for the async op completion, 1061 * one for the synchronous code that does this. 1062 */ 1063 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) 1064 { 1065 struct aio_kiocb *req; 1066 1067 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 1068 if (unlikely(!req)) 1069 return NULL; 1070 1071 if (unlikely(!get_reqs_available(ctx))) { 1072 kmem_cache_free(kiocb_cachep, req); 1073 return NULL; 1074 } 1075 1076 percpu_ref_get(&ctx->reqs); 1077 req->ki_ctx = ctx; 1078 INIT_LIST_HEAD(&req->ki_list); 1079 refcount_set(&req->ki_refcnt, 2); 1080 req->ki_eventfd = NULL; 1081 return req; 1082 } 1083 1084 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 1085 { 1086 struct aio_ring __user *ring = (void __user *)ctx_id; 1087 struct mm_struct *mm = current->mm; 1088 struct kioctx *ctx, *ret = NULL; 1089 struct kioctx_table *table; 1090 unsigned id; 1091 1092 if (get_user(id, &ring->id)) 1093 return NULL; 1094 1095 rcu_read_lock(); 1096 table = rcu_dereference(mm->ioctx_table); 1097 1098 if (!table || id >= table->nr) 1099 goto out; 1100 1101 id = array_index_nospec(id, table->nr); 1102 ctx = rcu_dereference(table->table[id]); 1103 if (ctx && ctx->user_id == ctx_id) { 1104 if (percpu_ref_tryget_live(&ctx->users)) 1105 ret = ctx; 1106 } 1107 out: 1108 rcu_read_unlock(); 1109 return ret; 1110 } 1111 1112 static inline void iocb_destroy(struct aio_kiocb *iocb) 1113 { 1114 if (iocb->ki_eventfd) 1115 eventfd_ctx_put(iocb->ki_eventfd); 1116 if (iocb->ki_filp) 1117 fput(iocb->ki_filp); 1118 percpu_ref_put(&iocb->ki_ctx->reqs); 1119 kmem_cache_free(kiocb_cachep, iocb); 1120 } 1121 1122 struct aio_waiter { 1123 struct wait_queue_entry w; 1124 size_t min_nr; 1125 }; 1126 1127 /* aio_complete 1128 * Called when the io request on the given iocb is complete. 1129 */ 1130 static void aio_complete(struct aio_kiocb *iocb) 1131 { 1132 struct kioctx *ctx = iocb->ki_ctx; 1133 struct aio_ring *ring; 1134 struct io_event *ev_page, *event; 1135 unsigned tail, pos, head, avail; 1136 unsigned long flags; 1137 1138 /* 1139 * Add a completion event to the ring buffer. Must be done holding 1140 * ctx->completion_lock to prevent other code from messing with the tail 1141 * pointer since we might be called from irq context. 1142 */ 1143 spin_lock_irqsave(&ctx->completion_lock, flags); 1144 1145 tail = ctx->tail; 1146 pos = tail + AIO_EVENTS_OFFSET; 1147 1148 if (++tail >= ctx->nr_events) 1149 tail = 0; 1150 1151 ev_page = folio_address(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]); 1152 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1153 1154 *event = iocb->ki_res; 1155 1156 flush_dcache_folio(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]); 1157 1158 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, 1159 (void __user *)(unsigned long)iocb->ki_res.obj, 1160 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2); 1161 1162 /* after flagging the request as done, we 1163 * must never even look at it again 1164 */ 1165 smp_wmb(); /* make event visible before updating tail */ 1166 1167 ctx->tail = tail; 1168 1169 ring = folio_address(ctx->ring_folios[0]); 1170 head = ring->head; 1171 ring->tail = tail; 1172 flush_dcache_folio(ctx->ring_folios[0]); 1173 1174 ctx->completed_events++; 1175 if (ctx->completed_events > 1) 1176 refill_reqs_available(ctx, head, tail); 1177 1178 avail = tail > head 1179 ? tail - head 1180 : tail + ctx->nr_events - head; 1181 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1182 1183 pr_debug("added to ring %p at [%u]\n", iocb, tail); 1184 1185 /* 1186 * Check if the user asked us to deliver the result through an 1187 * eventfd. The eventfd_signal() function is safe to be called 1188 * from IRQ context. 1189 */ 1190 if (iocb->ki_eventfd) 1191 eventfd_signal(iocb->ki_eventfd); 1192 1193 /* 1194 * We have to order our ring_info tail store above and test 1195 * of the wait list below outside the wait lock. This is 1196 * like in wake_up_bit() where clearing a bit has to be 1197 * ordered with the unlocked test. 1198 */ 1199 smp_mb(); 1200 1201 if (waitqueue_active(&ctx->wait)) { 1202 struct aio_waiter *curr, *next; 1203 unsigned long flags; 1204 1205 spin_lock_irqsave(&ctx->wait.lock, flags); 1206 list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry) 1207 if (avail >= curr->min_nr) { 1208 wake_up_process(curr->w.private); 1209 list_del_init_careful(&curr->w.entry); 1210 } 1211 spin_unlock_irqrestore(&ctx->wait.lock, flags); 1212 } 1213 } 1214 1215 static inline void iocb_put(struct aio_kiocb *iocb) 1216 { 1217 if (refcount_dec_and_test(&iocb->ki_refcnt)) { 1218 aio_complete(iocb); 1219 iocb_destroy(iocb); 1220 } 1221 } 1222 1223 /* aio_read_events_ring 1224 * Pull an event off of the ioctx's event ring. Returns the number of 1225 * events fetched 1226 */ 1227 static long aio_read_events_ring(struct kioctx *ctx, 1228 struct io_event __user *event, long nr) 1229 { 1230 struct aio_ring *ring; 1231 unsigned head, tail, pos; 1232 long ret = 0; 1233 int copy_ret; 1234 1235 /* 1236 * The mutex can block and wake us up and that will cause 1237 * wait_event_interruptible_hrtimeout() to schedule without sleeping 1238 * and repeat. This should be rare enough that it doesn't cause 1239 * peformance issues. See the comment in read_events() for more detail. 1240 */ 1241 sched_annotate_sleep(); 1242 mutex_lock(&ctx->ring_lock); 1243 1244 /* Access to ->ring_folios here is protected by ctx->ring_lock. */ 1245 ring = folio_address(ctx->ring_folios[0]); 1246 head = ring->head; 1247 tail = ring->tail; 1248 1249 /* 1250 * Ensure that once we've read the current tail pointer, that 1251 * we also see the events that were stored up to the tail. 1252 */ 1253 smp_rmb(); 1254 1255 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); 1256 1257 if (head == tail) 1258 goto out; 1259 1260 head %= ctx->nr_events; 1261 tail %= ctx->nr_events; 1262 1263 while (ret < nr) { 1264 long avail; 1265 struct io_event *ev; 1266 struct folio *folio; 1267 1268 avail = (head <= tail ? tail : ctx->nr_events) - head; 1269 if (head == tail) 1270 break; 1271 1272 pos = head + AIO_EVENTS_OFFSET; 1273 folio = ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]; 1274 pos %= AIO_EVENTS_PER_PAGE; 1275 1276 avail = min(avail, nr - ret); 1277 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); 1278 1279 ev = folio_address(folio); 1280 copy_ret = copy_to_user(event + ret, ev + pos, 1281 sizeof(*ev) * avail); 1282 1283 if (unlikely(copy_ret)) { 1284 ret = -EFAULT; 1285 goto out; 1286 } 1287 1288 ret += avail; 1289 head += avail; 1290 head %= ctx->nr_events; 1291 } 1292 1293 ring = folio_address(ctx->ring_folios[0]); 1294 ring->head = head; 1295 flush_dcache_folio(ctx->ring_folios[0]); 1296 1297 pr_debug("%li h%u t%u\n", ret, head, tail); 1298 out: 1299 mutex_unlock(&ctx->ring_lock); 1300 1301 return ret; 1302 } 1303 1304 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, 1305 struct io_event __user *event, long *i) 1306 { 1307 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); 1308 1309 if (ret > 0) 1310 *i += ret; 1311 1312 if (unlikely(atomic_read(&ctx->dead))) 1313 ret = -EINVAL; 1314 1315 if (!*i) 1316 *i = ret; 1317 1318 return ret < 0 || *i >= min_nr; 1319 } 1320 1321 static long read_events(struct kioctx *ctx, long min_nr, long nr, 1322 struct io_event __user *event, 1323 ktime_t until) 1324 { 1325 struct hrtimer_sleeper t; 1326 struct aio_waiter w; 1327 long ret = 0, ret2 = 0; 1328 1329 /* 1330 * Note that aio_read_events() is being called as the conditional - i.e. 1331 * we're calling it after prepare_to_wait() has set task state to 1332 * TASK_INTERRUPTIBLE. 1333 * 1334 * But aio_read_events() can block, and if it blocks it's going to flip 1335 * the task state back to TASK_RUNNING. 1336 * 1337 * This should be ok, provided it doesn't flip the state back to 1338 * TASK_RUNNING and return 0 too much - that causes us to spin. That 1339 * will only happen if the mutex_lock() call blocks, and we then find 1340 * the ringbuffer empty. So in practice we should be ok, but it's 1341 * something to be aware of when touching this code. 1342 */ 1343 aio_read_events(ctx, min_nr, nr, event, &ret); 1344 if (until == 0 || ret < 0 || ret >= min_nr) 1345 return ret; 1346 1347 hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1348 if (until != KTIME_MAX) { 1349 hrtimer_set_expires_range_ns(&t.timer, until, current->timer_slack_ns); 1350 hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL); 1351 } 1352 1353 init_wait(&w.w); 1354 1355 while (1) { 1356 unsigned long nr_got = ret; 1357 1358 w.min_nr = min_nr - ret; 1359 1360 ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE); 1361 if (!ret2 && !t.task) 1362 ret2 = -ETIME; 1363 1364 if (aio_read_events(ctx, min_nr, nr, event, &ret) || ret2) 1365 break; 1366 1367 if (nr_got == ret) 1368 schedule(); 1369 } 1370 1371 finish_wait(&ctx->wait, &w.w); 1372 hrtimer_cancel(&t.timer); 1373 destroy_hrtimer_on_stack(&t.timer); 1374 1375 return ret; 1376 } 1377 1378 /* sys_io_setup: 1379 * Create an aio_context capable of receiving at least nr_events. 1380 * ctxp must not point to an aio_context that already exists, and 1381 * must be initialized to 0 prior to the call. On successful 1382 * creation of the aio_context, *ctxp is filled in with the resulting 1383 * handle. May fail with -EINVAL if *ctxp is not initialized, 1384 * if the specified nr_events exceeds internal limits. May fail 1385 * with -EAGAIN if the specified nr_events exceeds the user's limit 1386 * of available events. May fail with -ENOMEM if insufficient kernel 1387 * resources are available. May fail with -EFAULT if an invalid 1388 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1389 * implemented. 1390 */ 1391 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1392 { 1393 struct kioctx *ioctx = NULL; 1394 unsigned long ctx; 1395 long ret; 1396 1397 ret = get_user(ctx, ctxp); 1398 if (unlikely(ret)) 1399 goto out; 1400 1401 ret = -EINVAL; 1402 if (unlikely(ctx || nr_events == 0)) { 1403 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1404 ctx, nr_events); 1405 goto out; 1406 } 1407 1408 ioctx = ioctx_alloc(nr_events); 1409 ret = PTR_ERR(ioctx); 1410 if (!IS_ERR(ioctx)) { 1411 ret = put_user(ioctx->user_id, ctxp); 1412 if (ret) 1413 kill_ioctx(current->mm, ioctx, NULL); 1414 percpu_ref_put(&ioctx->users); 1415 } 1416 1417 out: 1418 return ret; 1419 } 1420 1421 #ifdef CONFIG_COMPAT 1422 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p) 1423 { 1424 struct kioctx *ioctx = NULL; 1425 unsigned long ctx; 1426 long ret; 1427 1428 ret = get_user(ctx, ctx32p); 1429 if (unlikely(ret)) 1430 goto out; 1431 1432 ret = -EINVAL; 1433 if (unlikely(ctx || nr_events == 0)) { 1434 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1435 ctx, nr_events); 1436 goto out; 1437 } 1438 1439 ioctx = ioctx_alloc(nr_events); 1440 ret = PTR_ERR(ioctx); 1441 if (!IS_ERR(ioctx)) { 1442 /* truncating is ok because it's a user address */ 1443 ret = put_user((u32)ioctx->user_id, ctx32p); 1444 if (ret) 1445 kill_ioctx(current->mm, ioctx, NULL); 1446 percpu_ref_put(&ioctx->users); 1447 } 1448 1449 out: 1450 return ret; 1451 } 1452 #endif 1453 1454 /* sys_io_destroy: 1455 * Destroy the aio_context specified. May cancel any outstanding 1456 * AIOs and block on completion. Will fail with -ENOSYS if not 1457 * implemented. May fail with -EINVAL if the context pointed to 1458 * is invalid. 1459 */ 1460 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1461 { 1462 struct kioctx *ioctx = lookup_ioctx(ctx); 1463 if (likely(NULL != ioctx)) { 1464 struct ctx_rq_wait wait; 1465 int ret; 1466 1467 init_completion(&wait.comp); 1468 atomic_set(&wait.count, 1); 1469 1470 /* Pass requests_done to kill_ioctx() where it can be set 1471 * in a thread-safe way. If we try to set it here then we have 1472 * a race condition if two io_destroy() called simultaneously. 1473 */ 1474 ret = kill_ioctx(current->mm, ioctx, &wait); 1475 percpu_ref_put(&ioctx->users); 1476 1477 /* Wait until all IO for the context are done. Otherwise kernel 1478 * keep using user-space buffers even if user thinks the context 1479 * is destroyed. 1480 */ 1481 if (!ret) 1482 wait_for_completion(&wait.comp); 1483 1484 return ret; 1485 } 1486 pr_debug("EINVAL: invalid context id\n"); 1487 return -EINVAL; 1488 } 1489 1490 static void aio_remove_iocb(struct aio_kiocb *iocb) 1491 { 1492 struct kioctx *ctx = iocb->ki_ctx; 1493 unsigned long flags; 1494 1495 spin_lock_irqsave(&ctx->ctx_lock, flags); 1496 list_del(&iocb->ki_list); 1497 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1498 } 1499 1500 static void aio_complete_rw(struct kiocb *kiocb, long res) 1501 { 1502 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); 1503 1504 if (!list_empty_careful(&iocb->ki_list)) 1505 aio_remove_iocb(iocb); 1506 1507 if (kiocb->ki_flags & IOCB_WRITE) { 1508 struct inode *inode = file_inode(kiocb->ki_filp); 1509 1510 if (S_ISREG(inode->i_mode)) 1511 kiocb_end_write(kiocb); 1512 } 1513 1514 iocb->ki_res.res = res; 1515 iocb->ki_res.res2 = 0; 1516 iocb_put(iocb); 1517 } 1518 1519 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) 1520 { 1521 int ret; 1522 1523 req->ki_complete = aio_complete_rw; 1524 req->private = NULL; 1525 req->ki_pos = iocb->aio_offset; 1526 req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW; 1527 if (iocb->aio_flags & IOCB_FLAG_RESFD) 1528 req->ki_flags |= IOCB_EVENTFD; 1529 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) { 1530 /* 1531 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then 1532 * aio_reqprio is interpreted as an I/O scheduling 1533 * class and priority. 1534 */ 1535 ret = ioprio_check_cap(iocb->aio_reqprio); 1536 if (ret) { 1537 pr_debug("aio ioprio check cap error: %d\n", ret); 1538 return ret; 1539 } 1540 1541 req->ki_ioprio = iocb->aio_reqprio; 1542 } else 1543 req->ki_ioprio = get_current_ioprio(); 1544 1545 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags); 1546 if (unlikely(ret)) 1547 return ret; 1548 1549 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ 1550 return 0; 1551 } 1552 1553 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb, 1554 struct iovec **iovec, bool vectored, bool compat, 1555 struct iov_iter *iter) 1556 { 1557 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; 1558 size_t len = iocb->aio_nbytes; 1559 1560 if (!vectored) { 1561 ssize_t ret = import_ubuf(rw, buf, len, iter); 1562 *iovec = NULL; 1563 return ret; 1564 } 1565 1566 return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat); 1567 } 1568 1569 static inline void aio_rw_done(struct kiocb *req, ssize_t ret) 1570 { 1571 switch (ret) { 1572 case -EIOCBQUEUED: 1573 break; 1574 case -ERESTARTSYS: 1575 case -ERESTARTNOINTR: 1576 case -ERESTARTNOHAND: 1577 case -ERESTART_RESTARTBLOCK: 1578 /* 1579 * There's no easy way to restart the syscall since other AIO's 1580 * may be already running. Just fail this IO with EINTR. 1581 */ 1582 ret = -EINTR; 1583 fallthrough; 1584 default: 1585 req->ki_complete(req, ret); 1586 } 1587 } 1588 1589 static int aio_read(struct kiocb *req, const struct iocb *iocb, 1590 bool vectored, bool compat) 1591 { 1592 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1593 struct iov_iter iter; 1594 struct file *file; 1595 int ret; 1596 1597 ret = aio_prep_rw(req, iocb); 1598 if (ret) 1599 return ret; 1600 file = req->ki_filp; 1601 if (unlikely(!(file->f_mode & FMODE_READ))) 1602 return -EBADF; 1603 if (unlikely(!file->f_op->read_iter)) 1604 return -EINVAL; 1605 1606 ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter); 1607 if (ret < 0) 1608 return ret; 1609 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); 1610 if (!ret) 1611 aio_rw_done(req, file->f_op->read_iter(req, &iter)); 1612 kfree(iovec); 1613 return ret; 1614 } 1615 1616 static int aio_write(struct kiocb *req, const struct iocb *iocb, 1617 bool vectored, bool compat) 1618 { 1619 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1620 struct iov_iter iter; 1621 struct file *file; 1622 int ret; 1623 1624 ret = aio_prep_rw(req, iocb); 1625 if (ret) 1626 return ret; 1627 file = req->ki_filp; 1628 1629 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1630 return -EBADF; 1631 if (unlikely(!file->f_op->write_iter)) 1632 return -EINVAL; 1633 1634 ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter); 1635 if (ret < 0) 1636 return ret; 1637 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); 1638 if (!ret) { 1639 if (S_ISREG(file_inode(file)->i_mode)) 1640 kiocb_start_write(req); 1641 req->ki_flags |= IOCB_WRITE; 1642 aio_rw_done(req, file->f_op->write_iter(req, &iter)); 1643 } 1644 kfree(iovec); 1645 return ret; 1646 } 1647 1648 static void aio_fsync_work(struct work_struct *work) 1649 { 1650 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); 1651 const struct cred *old_cred = override_creds(iocb->fsync.creds); 1652 1653 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); 1654 revert_creds(old_cred); 1655 put_cred(iocb->fsync.creds); 1656 iocb_put(iocb); 1657 } 1658 1659 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, 1660 bool datasync) 1661 { 1662 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || 1663 iocb->aio_rw_flags)) 1664 return -EINVAL; 1665 1666 if (unlikely(!req->file->f_op->fsync)) 1667 return -EINVAL; 1668 1669 req->creds = prepare_creds(); 1670 if (!req->creds) 1671 return -ENOMEM; 1672 1673 req->datasync = datasync; 1674 INIT_WORK(&req->work, aio_fsync_work); 1675 schedule_work(&req->work); 1676 return 0; 1677 } 1678 1679 static void aio_poll_put_work(struct work_struct *work) 1680 { 1681 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1682 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1683 1684 iocb_put(iocb); 1685 } 1686 1687 /* 1688 * Safely lock the waitqueue which the request is on, synchronizing with the 1689 * case where the ->poll() provider decides to free its waitqueue early. 1690 * 1691 * Returns true on success, meaning that req->head->lock was locked, req->wait 1692 * is on req->head, and an RCU read lock was taken. Returns false if the 1693 * request was already removed from its waitqueue (which might no longer exist). 1694 */ 1695 static bool poll_iocb_lock_wq(struct poll_iocb *req) 1696 { 1697 wait_queue_head_t *head; 1698 1699 /* 1700 * While we hold the waitqueue lock and the waitqueue is nonempty, 1701 * wake_up_pollfree() will wait for us. However, taking the waitqueue 1702 * lock in the first place can race with the waitqueue being freed. 1703 * 1704 * We solve this as eventpoll does: by taking advantage of the fact that 1705 * all users of wake_up_pollfree() will RCU-delay the actual free. If 1706 * we enter rcu_read_lock() and see that the pointer to the queue is 1707 * non-NULL, we can then lock it without the memory being freed out from 1708 * under us, then check whether the request is still on the queue. 1709 * 1710 * Keep holding rcu_read_lock() as long as we hold the queue lock, in 1711 * case the caller deletes the entry from the queue, leaving it empty. 1712 * In that case, only RCU prevents the queue memory from being freed. 1713 */ 1714 rcu_read_lock(); 1715 head = smp_load_acquire(&req->head); 1716 if (head) { 1717 spin_lock(&head->lock); 1718 if (!list_empty(&req->wait.entry)) 1719 return true; 1720 spin_unlock(&head->lock); 1721 } 1722 rcu_read_unlock(); 1723 return false; 1724 } 1725 1726 static void poll_iocb_unlock_wq(struct poll_iocb *req) 1727 { 1728 spin_unlock(&req->head->lock); 1729 rcu_read_unlock(); 1730 } 1731 1732 static void aio_poll_complete_work(struct work_struct *work) 1733 { 1734 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1735 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1736 struct poll_table_struct pt = { ._key = req->events }; 1737 struct kioctx *ctx = iocb->ki_ctx; 1738 __poll_t mask = 0; 1739 1740 if (!READ_ONCE(req->cancelled)) 1741 mask = vfs_poll(req->file, &pt) & req->events; 1742 1743 /* 1744 * Note that ->ki_cancel callers also delete iocb from active_reqs after 1745 * calling ->ki_cancel. We need the ctx_lock roundtrip here to 1746 * synchronize with them. In the cancellation case the list_del_init 1747 * itself is not actually needed, but harmless so we keep it in to 1748 * avoid further branches in the fast path. 1749 */ 1750 spin_lock_irq(&ctx->ctx_lock); 1751 if (poll_iocb_lock_wq(req)) { 1752 if (!mask && !READ_ONCE(req->cancelled)) { 1753 /* 1754 * The request isn't actually ready to be completed yet. 1755 * Reschedule completion if another wakeup came in. 1756 */ 1757 if (req->work_need_resched) { 1758 schedule_work(&req->work); 1759 req->work_need_resched = false; 1760 } else { 1761 req->work_scheduled = false; 1762 } 1763 poll_iocb_unlock_wq(req); 1764 spin_unlock_irq(&ctx->ctx_lock); 1765 return; 1766 } 1767 list_del_init(&req->wait.entry); 1768 poll_iocb_unlock_wq(req); 1769 } /* else, POLLFREE has freed the waitqueue, so we must complete */ 1770 list_del_init(&iocb->ki_list); 1771 iocb->ki_res.res = mangle_poll(mask); 1772 spin_unlock_irq(&ctx->ctx_lock); 1773 1774 iocb_put(iocb); 1775 } 1776 1777 /* assumes we are called with irqs disabled */ 1778 static int aio_poll_cancel(struct kiocb *iocb) 1779 { 1780 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); 1781 struct poll_iocb *req = &aiocb->poll; 1782 1783 if (poll_iocb_lock_wq(req)) { 1784 WRITE_ONCE(req->cancelled, true); 1785 if (!req->work_scheduled) { 1786 schedule_work(&aiocb->poll.work); 1787 req->work_scheduled = true; 1788 } 1789 poll_iocb_unlock_wq(req); 1790 } /* else, the request was force-cancelled by POLLFREE already */ 1791 1792 return 0; 1793 } 1794 1795 static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, 1796 void *key) 1797 { 1798 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); 1799 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1800 __poll_t mask = key_to_poll(key); 1801 unsigned long flags; 1802 1803 /* for instances that support it check for an event match first: */ 1804 if (mask && !(mask & req->events)) 1805 return 0; 1806 1807 /* 1808 * Complete the request inline if possible. This requires that three 1809 * conditions be met: 1810 * 1. An event mask must have been passed. If a plain wakeup was done 1811 * instead, then mask == 0 and we have to call vfs_poll() to get 1812 * the events, so inline completion isn't possible. 1813 * 2. The completion work must not have already been scheduled. 1814 * 3. ctx_lock must not be busy. We have to use trylock because we 1815 * already hold the waitqueue lock, so this inverts the normal 1816 * locking order. Use irqsave/irqrestore because not all 1817 * filesystems (e.g. fuse) call this function with IRQs disabled, 1818 * yet IRQs have to be disabled before ctx_lock is obtained. 1819 */ 1820 if (mask && !req->work_scheduled && 1821 spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { 1822 struct kioctx *ctx = iocb->ki_ctx; 1823 1824 list_del_init(&req->wait.entry); 1825 list_del(&iocb->ki_list); 1826 iocb->ki_res.res = mangle_poll(mask); 1827 if (iocb->ki_eventfd && !eventfd_signal_allowed()) { 1828 iocb = NULL; 1829 INIT_WORK(&req->work, aio_poll_put_work); 1830 schedule_work(&req->work); 1831 } 1832 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1833 if (iocb) 1834 iocb_put(iocb); 1835 } else { 1836 /* 1837 * Schedule the completion work if needed. If it was already 1838 * scheduled, record that another wakeup came in. 1839 * 1840 * Don't remove the request from the waitqueue here, as it might 1841 * not actually be complete yet (we won't know until vfs_poll() 1842 * is called), and we must not miss any wakeups. POLLFREE is an 1843 * exception to this; see below. 1844 */ 1845 if (req->work_scheduled) { 1846 req->work_need_resched = true; 1847 } else { 1848 schedule_work(&req->work); 1849 req->work_scheduled = true; 1850 } 1851 1852 /* 1853 * If the waitqueue is being freed early but we can't complete 1854 * the request inline, we have to tear down the request as best 1855 * we can. That means immediately removing the request from its 1856 * waitqueue and preventing all further accesses to the 1857 * waitqueue via the request. We also need to schedule the 1858 * completion work (done above). Also mark the request as 1859 * cancelled, to potentially skip an unneeded call to ->poll(). 1860 */ 1861 if (mask & POLLFREE) { 1862 WRITE_ONCE(req->cancelled, true); 1863 list_del_init(&req->wait.entry); 1864 1865 /* 1866 * Careful: this *must* be the last step, since as soon 1867 * as req->head is NULL'ed out, the request can be 1868 * completed and freed, since aio_poll_complete_work() 1869 * will no longer need to take the waitqueue lock. 1870 */ 1871 smp_store_release(&req->head, NULL); 1872 } 1873 } 1874 return 1; 1875 } 1876 1877 struct aio_poll_table { 1878 struct poll_table_struct pt; 1879 struct aio_kiocb *iocb; 1880 bool queued; 1881 int error; 1882 }; 1883 1884 static void 1885 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, 1886 struct poll_table_struct *p) 1887 { 1888 struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt); 1889 1890 /* multiple wait queues per file are not supported */ 1891 if (unlikely(pt->queued)) { 1892 pt->error = -EINVAL; 1893 return; 1894 } 1895 1896 pt->queued = true; 1897 pt->error = 0; 1898 pt->iocb->poll.head = head; 1899 add_wait_queue(head, &pt->iocb->poll.wait); 1900 } 1901 1902 static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) 1903 { 1904 struct kioctx *ctx = aiocb->ki_ctx; 1905 struct poll_iocb *req = &aiocb->poll; 1906 struct aio_poll_table apt; 1907 bool cancel = false; 1908 __poll_t mask; 1909 1910 /* reject any unknown events outside the normal event mask. */ 1911 if ((u16)iocb->aio_buf != iocb->aio_buf) 1912 return -EINVAL; 1913 /* reject fields that are not defined for poll */ 1914 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags) 1915 return -EINVAL; 1916 1917 INIT_WORK(&req->work, aio_poll_complete_work); 1918 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; 1919 1920 req->head = NULL; 1921 req->cancelled = false; 1922 req->work_scheduled = false; 1923 req->work_need_resched = false; 1924 1925 apt.pt._qproc = aio_poll_queue_proc; 1926 apt.pt._key = req->events; 1927 apt.iocb = aiocb; 1928 apt.queued = false; 1929 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ 1930 1931 /* initialized the list so that we can do list_empty checks */ 1932 INIT_LIST_HEAD(&req->wait.entry); 1933 init_waitqueue_func_entry(&req->wait, aio_poll_wake); 1934 1935 mask = vfs_poll(req->file, &apt.pt) & req->events; 1936 spin_lock_irq(&ctx->ctx_lock); 1937 if (likely(apt.queued)) { 1938 bool on_queue = poll_iocb_lock_wq(req); 1939 1940 if (!on_queue || req->work_scheduled) { 1941 /* 1942 * aio_poll_wake() already either scheduled the async 1943 * completion work, or completed the request inline. 1944 */ 1945 if (apt.error) /* unsupported case: multiple queues */ 1946 cancel = true; 1947 apt.error = 0; 1948 mask = 0; 1949 } 1950 if (mask || apt.error) { 1951 /* Steal to complete synchronously. */ 1952 list_del_init(&req->wait.entry); 1953 } else if (cancel) { 1954 /* Cancel if possible (may be too late though). */ 1955 WRITE_ONCE(req->cancelled, true); 1956 } else if (on_queue) { 1957 /* 1958 * Actually waiting for an event, so add the request to 1959 * active_reqs so that it can be cancelled if needed. 1960 */ 1961 list_add_tail(&aiocb->ki_list, &ctx->active_reqs); 1962 aiocb->ki_cancel = aio_poll_cancel; 1963 } 1964 if (on_queue) 1965 poll_iocb_unlock_wq(req); 1966 } 1967 if (mask) { /* no async, we'd stolen it */ 1968 aiocb->ki_res.res = mangle_poll(mask); 1969 apt.error = 0; 1970 } 1971 spin_unlock_irq(&ctx->ctx_lock); 1972 if (mask) 1973 iocb_put(aiocb); 1974 return apt.error; 1975 } 1976 1977 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, 1978 struct iocb __user *user_iocb, struct aio_kiocb *req, 1979 bool compat) 1980 { 1981 req->ki_filp = fget(iocb->aio_fildes); 1982 if (unlikely(!req->ki_filp)) 1983 return -EBADF; 1984 1985 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1986 struct eventfd_ctx *eventfd; 1987 /* 1988 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1989 * instance of the file* now. The file descriptor must be 1990 * an eventfd() fd, and will be signaled for each completed 1991 * event using the eventfd_signal() function. 1992 */ 1993 eventfd = eventfd_ctx_fdget(iocb->aio_resfd); 1994 if (IS_ERR(eventfd)) 1995 return PTR_ERR(eventfd); 1996 1997 req->ki_eventfd = eventfd; 1998 } 1999 2000 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) { 2001 pr_debug("EFAULT: aio_key\n"); 2002 return -EFAULT; 2003 } 2004 2005 req->ki_res.obj = (u64)(unsigned long)user_iocb; 2006 req->ki_res.data = iocb->aio_data; 2007 req->ki_res.res = 0; 2008 req->ki_res.res2 = 0; 2009 2010 switch (iocb->aio_lio_opcode) { 2011 case IOCB_CMD_PREAD: 2012 return aio_read(&req->rw, iocb, false, compat); 2013 case IOCB_CMD_PWRITE: 2014 return aio_write(&req->rw, iocb, false, compat); 2015 case IOCB_CMD_PREADV: 2016 return aio_read(&req->rw, iocb, true, compat); 2017 case IOCB_CMD_PWRITEV: 2018 return aio_write(&req->rw, iocb, true, compat); 2019 case IOCB_CMD_FSYNC: 2020 return aio_fsync(&req->fsync, iocb, false); 2021 case IOCB_CMD_FDSYNC: 2022 return aio_fsync(&req->fsync, iocb, true); 2023 case IOCB_CMD_POLL: 2024 return aio_poll(req, iocb); 2025 default: 2026 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); 2027 return -EINVAL; 2028 } 2029 } 2030 2031 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 2032 bool compat) 2033 { 2034 struct aio_kiocb *req; 2035 struct iocb iocb; 2036 int err; 2037 2038 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) 2039 return -EFAULT; 2040 2041 /* enforce forwards compatibility on users */ 2042 if (unlikely(iocb.aio_reserved2)) { 2043 pr_debug("EINVAL: reserve field set\n"); 2044 return -EINVAL; 2045 } 2046 2047 /* prevent overflows */ 2048 if (unlikely( 2049 (iocb.aio_buf != (unsigned long)iocb.aio_buf) || 2050 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) || 2051 ((ssize_t)iocb.aio_nbytes < 0) 2052 )) { 2053 pr_debug("EINVAL: overflow check\n"); 2054 return -EINVAL; 2055 } 2056 2057 req = aio_get_req(ctx); 2058 if (unlikely(!req)) 2059 return -EAGAIN; 2060 2061 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat); 2062 2063 /* Done with the synchronous reference */ 2064 iocb_put(req); 2065 2066 /* 2067 * If err is 0, we'd either done aio_complete() ourselves or have 2068 * arranged for that to be done asynchronously. Anything non-zero 2069 * means that we need to destroy req ourselves. 2070 */ 2071 if (unlikely(err)) { 2072 iocb_destroy(req); 2073 put_reqs_available(ctx, 1); 2074 } 2075 return err; 2076 } 2077 2078 /* sys_io_submit: 2079 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 2080 * the number of iocbs queued. May return -EINVAL if the aio_context 2081 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 2082 * *iocbpp[0] is not properly initialized, if the operation specified 2083 * is invalid for the file descriptor in the iocb. May fail with 2084 * -EFAULT if any of the data structures point to invalid data. May 2085 * fail with -EBADF if the file descriptor specified in the first 2086 * iocb is invalid. May fail with -EAGAIN if insufficient resources 2087 * are available to queue any iocbs. Will return 0 if nr is 0. Will 2088 * fail with -ENOSYS if not implemented. 2089 */ 2090 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 2091 struct iocb __user * __user *, iocbpp) 2092 { 2093 struct kioctx *ctx; 2094 long ret = 0; 2095 int i = 0; 2096 struct blk_plug plug; 2097 2098 if (unlikely(nr < 0)) 2099 return -EINVAL; 2100 2101 ctx = lookup_ioctx(ctx_id); 2102 if (unlikely(!ctx)) { 2103 pr_debug("EINVAL: invalid context id\n"); 2104 return -EINVAL; 2105 } 2106 2107 if (nr > ctx->nr_events) 2108 nr = ctx->nr_events; 2109 2110 if (nr > AIO_PLUG_THRESHOLD) 2111 blk_start_plug(&plug); 2112 for (i = 0; i < nr; i++) { 2113 struct iocb __user *user_iocb; 2114 2115 if (unlikely(get_user(user_iocb, iocbpp + i))) { 2116 ret = -EFAULT; 2117 break; 2118 } 2119 2120 ret = io_submit_one(ctx, user_iocb, false); 2121 if (ret) 2122 break; 2123 } 2124 if (nr > AIO_PLUG_THRESHOLD) 2125 blk_finish_plug(&plug); 2126 2127 percpu_ref_put(&ctx->users); 2128 return i ? i : ret; 2129 } 2130 2131 #ifdef CONFIG_COMPAT 2132 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, 2133 int, nr, compat_uptr_t __user *, iocbpp) 2134 { 2135 struct kioctx *ctx; 2136 long ret = 0; 2137 int i = 0; 2138 struct blk_plug plug; 2139 2140 if (unlikely(nr < 0)) 2141 return -EINVAL; 2142 2143 ctx = lookup_ioctx(ctx_id); 2144 if (unlikely(!ctx)) { 2145 pr_debug("EINVAL: invalid context id\n"); 2146 return -EINVAL; 2147 } 2148 2149 if (nr > ctx->nr_events) 2150 nr = ctx->nr_events; 2151 2152 if (nr > AIO_PLUG_THRESHOLD) 2153 blk_start_plug(&plug); 2154 for (i = 0; i < nr; i++) { 2155 compat_uptr_t user_iocb; 2156 2157 if (unlikely(get_user(user_iocb, iocbpp + i))) { 2158 ret = -EFAULT; 2159 break; 2160 } 2161 2162 ret = io_submit_one(ctx, compat_ptr(user_iocb), true); 2163 if (ret) 2164 break; 2165 } 2166 if (nr > AIO_PLUG_THRESHOLD) 2167 blk_finish_plug(&plug); 2168 2169 percpu_ref_put(&ctx->users); 2170 return i ? i : ret; 2171 } 2172 #endif 2173 2174 /* sys_io_cancel: 2175 * Attempts to cancel an iocb previously passed to io_submit. If 2176 * the operation is successfully cancelled, the resulting event is 2177 * copied into the memory pointed to by result without being placed 2178 * into the completion queue and 0 is returned. May fail with 2179 * -EFAULT if any of the data structures pointed to are invalid. 2180 * May fail with -EINVAL if aio_context specified by ctx_id is 2181 * invalid. May fail with -EAGAIN if the iocb specified was not 2182 * cancelled. Will fail with -ENOSYS if not implemented. 2183 */ 2184 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 2185 struct io_event __user *, result) 2186 { 2187 struct kioctx *ctx; 2188 struct aio_kiocb *kiocb; 2189 int ret = -EINVAL; 2190 u32 key; 2191 u64 obj = (u64)(unsigned long)iocb; 2192 2193 if (unlikely(get_user(key, &iocb->aio_key))) 2194 return -EFAULT; 2195 if (unlikely(key != KIOCB_KEY)) 2196 return -EINVAL; 2197 2198 ctx = lookup_ioctx(ctx_id); 2199 if (unlikely(!ctx)) 2200 return -EINVAL; 2201 2202 spin_lock_irq(&ctx->ctx_lock); 2203 /* TODO: use a hash or array, this sucks. */ 2204 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { 2205 if (kiocb->ki_res.obj == obj) { 2206 ret = kiocb->ki_cancel(&kiocb->rw); 2207 list_del_init(&kiocb->ki_list); 2208 break; 2209 } 2210 } 2211 spin_unlock_irq(&ctx->ctx_lock); 2212 2213 if (!ret) { 2214 /* 2215 * The result argument is no longer used - the io_event is 2216 * always delivered via the ring buffer. -EINPROGRESS indicates 2217 * cancellation is progress: 2218 */ 2219 ret = -EINPROGRESS; 2220 } 2221 2222 percpu_ref_put(&ctx->users); 2223 2224 return ret; 2225 } 2226 2227 static long do_io_getevents(aio_context_t ctx_id, 2228 long min_nr, 2229 long nr, 2230 struct io_event __user *events, 2231 struct timespec64 *ts) 2232 { 2233 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX; 2234 struct kioctx *ioctx = lookup_ioctx(ctx_id); 2235 long ret = -EINVAL; 2236 2237 if (likely(ioctx)) { 2238 if (likely(min_nr <= nr && min_nr >= 0)) 2239 ret = read_events(ioctx, min_nr, nr, events, until); 2240 percpu_ref_put(&ioctx->users); 2241 } 2242 2243 return ret; 2244 } 2245 2246 /* io_getevents: 2247 * Attempts to read at least min_nr events and up to nr events from 2248 * the completion queue for the aio_context specified by ctx_id. If 2249 * it succeeds, the number of read events is returned. May fail with 2250 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 2251 * out of range, if timeout is out of range. May fail with -EFAULT 2252 * if any of the memory specified is invalid. May return 0 or 2253 * < min_nr if the timeout specified by timeout has elapsed 2254 * before sufficient events are available, where timeout == NULL 2255 * specifies an infinite timeout. Note that the timeout pointed to by 2256 * timeout is relative. Will fail with -ENOSYS if not implemented. 2257 */ 2258 #ifdef CONFIG_64BIT 2259 2260 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 2261 long, min_nr, 2262 long, nr, 2263 struct io_event __user *, events, 2264 struct __kernel_timespec __user *, timeout) 2265 { 2266 struct timespec64 ts; 2267 int ret; 2268 2269 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2270 return -EFAULT; 2271 2272 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2273 if (!ret && signal_pending(current)) 2274 ret = -EINTR; 2275 return ret; 2276 } 2277 2278 #endif 2279 2280 struct __aio_sigset { 2281 const sigset_t __user *sigmask; 2282 size_t sigsetsize; 2283 }; 2284 2285 SYSCALL_DEFINE6(io_pgetevents, 2286 aio_context_t, ctx_id, 2287 long, min_nr, 2288 long, nr, 2289 struct io_event __user *, events, 2290 struct __kernel_timespec __user *, timeout, 2291 const struct __aio_sigset __user *, usig) 2292 { 2293 struct __aio_sigset ksig = { NULL, }; 2294 struct timespec64 ts; 2295 bool interrupted; 2296 int ret; 2297 2298 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2299 return -EFAULT; 2300 2301 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2302 return -EFAULT; 2303 2304 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); 2305 if (ret) 2306 return ret; 2307 2308 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2309 2310 interrupted = signal_pending(current); 2311 restore_saved_sigmask_unless(interrupted); 2312 if (interrupted && !ret) 2313 ret = -ERESTARTNOHAND; 2314 2315 return ret; 2316 } 2317 2318 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) 2319 2320 SYSCALL_DEFINE6(io_pgetevents_time32, 2321 aio_context_t, ctx_id, 2322 long, min_nr, 2323 long, nr, 2324 struct io_event __user *, events, 2325 struct old_timespec32 __user *, timeout, 2326 const struct __aio_sigset __user *, usig) 2327 { 2328 struct __aio_sigset ksig = { NULL, }; 2329 struct timespec64 ts; 2330 bool interrupted; 2331 int ret; 2332 2333 if (timeout && unlikely(get_old_timespec32(&ts, timeout))) 2334 return -EFAULT; 2335 2336 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2337 return -EFAULT; 2338 2339 2340 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); 2341 if (ret) 2342 return ret; 2343 2344 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2345 2346 interrupted = signal_pending(current); 2347 restore_saved_sigmask_unless(interrupted); 2348 if (interrupted && !ret) 2349 ret = -ERESTARTNOHAND; 2350 2351 return ret; 2352 } 2353 2354 #endif 2355 2356 #if defined(CONFIG_COMPAT_32BIT_TIME) 2357 2358 SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id, 2359 __s32, min_nr, 2360 __s32, nr, 2361 struct io_event __user *, events, 2362 struct old_timespec32 __user *, timeout) 2363 { 2364 struct timespec64 t; 2365 int ret; 2366 2367 if (timeout && get_old_timespec32(&t, timeout)) 2368 return -EFAULT; 2369 2370 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2371 if (!ret && signal_pending(current)) 2372 ret = -EINTR; 2373 return ret; 2374 } 2375 2376 #endif 2377 2378 #ifdef CONFIG_COMPAT 2379 2380 struct __compat_aio_sigset { 2381 compat_uptr_t sigmask; 2382 compat_size_t sigsetsize; 2383 }; 2384 2385 #if defined(CONFIG_COMPAT_32BIT_TIME) 2386 2387 COMPAT_SYSCALL_DEFINE6(io_pgetevents, 2388 compat_aio_context_t, ctx_id, 2389 compat_long_t, min_nr, 2390 compat_long_t, nr, 2391 struct io_event __user *, events, 2392 struct old_timespec32 __user *, timeout, 2393 const struct __compat_aio_sigset __user *, usig) 2394 { 2395 struct __compat_aio_sigset ksig = { 0, }; 2396 struct timespec64 t; 2397 bool interrupted; 2398 int ret; 2399 2400 if (timeout && get_old_timespec32(&t, timeout)) 2401 return -EFAULT; 2402 2403 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2404 return -EFAULT; 2405 2406 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize); 2407 if (ret) 2408 return ret; 2409 2410 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2411 2412 interrupted = signal_pending(current); 2413 restore_saved_sigmask_unless(interrupted); 2414 if (interrupted && !ret) 2415 ret = -ERESTARTNOHAND; 2416 2417 return ret; 2418 } 2419 2420 #endif 2421 2422 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, 2423 compat_aio_context_t, ctx_id, 2424 compat_long_t, min_nr, 2425 compat_long_t, nr, 2426 struct io_event __user *, events, 2427 struct __kernel_timespec __user *, timeout, 2428 const struct __compat_aio_sigset __user *, usig) 2429 { 2430 struct __compat_aio_sigset ksig = { 0, }; 2431 struct timespec64 t; 2432 bool interrupted; 2433 int ret; 2434 2435 if (timeout && get_timespec64(&t, timeout)) 2436 return -EFAULT; 2437 2438 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2439 return -EFAULT; 2440 2441 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize); 2442 if (ret) 2443 return ret; 2444 2445 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2446 2447 interrupted = signal_pending(current); 2448 restore_saved_sigmask_unless(interrupted); 2449 if (interrupted && !ret) 2450 ret = -ERESTARTNOHAND; 2451 2452 return ret; 2453 } 2454 #endif 2455