1 /* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * Copyright 2018 Christoph Hellwig. 9 * 10 * See ../COPYING for licensing terms. 11 */ 12 #define pr_fmt(fmt) "%s: " fmt, __func__ 13 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/errno.h> 17 #include <linux/time.h> 18 #include <linux/aio_abi.h> 19 #include <linux/export.h> 20 #include <linux/syscalls.h> 21 #include <linux/backing-dev.h> 22 #include <linux/refcount.h> 23 #include <linux/uio.h> 24 25 #include <linux/sched/signal.h> 26 #include <linux/fs.h> 27 #include <linux/file.h> 28 #include <linux/mm.h> 29 #include <linux/mman.h> 30 #include <linux/percpu.h> 31 #include <linux/slab.h> 32 #include <linux/timer.h> 33 #include <linux/aio.h> 34 #include <linux/highmem.h> 35 #include <linux/workqueue.h> 36 #include <linux/security.h> 37 #include <linux/eventfd.h> 38 #include <linux/blkdev.h> 39 #include <linux/compat.h> 40 #include <linux/migrate.h> 41 #include <linux/ramfs.h> 42 #include <linux/percpu-refcount.h> 43 #include <linux/mount.h> 44 #include <linux/pseudo_fs.h> 45 46 #include <linux/uaccess.h> 47 #include <linux/nospec.h> 48 49 #include "internal.h" 50 51 #define KIOCB_KEY 0 52 53 #define AIO_RING_MAGIC 0xa10a10a1 54 #define AIO_RING_COMPAT_FEATURES 1 55 #define AIO_RING_INCOMPAT_FEATURES 0 56 struct aio_ring { 57 unsigned id; /* kernel internal index number */ 58 unsigned nr; /* number of io_events */ 59 unsigned head; /* Written to by userland or under ring_lock 60 * mutex by aio_read_events_ring(). */ 61 unsigned tail; 62 63 unsigned magic; 64 unsigned compat_features; 65 unsigned incompat_features; 66 unsigned header_length; /* size of aio_ring */ 67 68 69 struct io_event io_events[]; 70 }; /* 128 bytes + ring size */ 71 72 /* 73 * Plugging is meant to work with larger batches of IOs. If we don't 74 * have more than the below, then don't bother setting up a plug. 75 */ 76 #define AIO_PLUG_THRESHOLD 2 77 78 #define AIO_RING_PAGES 8 79 80 struct kioctx_table { 81 struct rcu_head rcu; 82 unsigned nr; 83 struct kioctx __rcu *table[] __counted_by(nr); 84 }; 85 86 struct kioctx_cpu { 87 unsigned reqs_available; 88 }; 89 90 struct ctx_rq_wait { 91 struct completion comp; 92 atomic_t count; 93 }; 94 95 struct kioctx { 96 struct percpu_ref users; 97 atomic_t dead; 98 99 struct percpu_ref reqs; 100 101 unsigned long user_id; 102 103 struct __percpu kioctx_cpu *cpu; 104 105 /* 106 * For percpu reqs_available, number of slots we move to/from global 107 * counter at a time: 108 */ 109 unsigned req_batch; 110 /* 111 * This is what userspace passed to io_setup(), it's not used for 112 * anything but counting against the global max_reqs quota. 113 * 114 * The real limit is nr_events - 1, which will be larger (see 115 * aio_setup_ring()) 116 */ 117 unsigned max_reqs; 118 119 /* Size of ringbuffer, in units of struct io_event */ 120 unsigned nr_events; 121 122 unsigned long mmap_base; 123 unsigned long mmap_size; 124 125 struct page **ring_pages; 126 long nr_pages; 127 128 struct rcu_work free_rwork; /* see free_ioctx() */ 129 130 /* 131 * signals when all in-flight requests are done 132 */ 133 struct ctx_rq_wait *rq_wait; 134 135 struct { 136 /* 137 * This counts the number of available slots in the ringbuffer, 138 * so we avoid overflowing it: it's decremented (if positive) 139 * when allocating a kiocb and incremented when the resulting 140 * io_event is pulled off the ringbuffer. 141 * 142 * We batch accesses to it with a percpu version. 143 */ 144 atomic_t reqs_available; 145 } ____cacheline_aligned_in_smp; 146 147 struct { 148 spinlock_t ctx_lock; 149 struct list_head active_reqs; /* used for cancellation */ 150 } ____cacheline_aligned_in_smp; 151 152 struct { 153 struct mutex ring_lock; 154 wait_queue_head_t wait; 155 } ____cacheline_aligned_in_smp; 156 157 struct { 158 unsigned tail; 159 unsigned completed_events; 160 spinlock_t completion_lock; 161 } ____cacheline_aligned_in_smp; 162 163 struct page *internal_pages[AIO_RING_PAGES]; 164 struct file *aio_ring_file; 165 166 unsigned id; 167 }; 168 169 /* 170 * First field must be the file pointer in all the 171 * iocb unions! See also 'struct kiocb' in <linux/fs.h> 172 */ 173 struct fsync_iocb { 174 struct file *file; 175 struct work_struct work; 176 bool datasync; 177 struct cred *creds; 178 }; 179 180 struct poll_iocb { 181 struct file *file; 182 struct wait_queue_head *head; 183 __poll_t events; 184 bool cancelled; 185 bool work_scheduled; 186 bool work_need_resched; 187 struct wait_queue_entry wait; 188 struct work_struct work; 189 }; 190 191 /* 192 * NOTE! Each of the iocb union members has the file pointer 193 * as the first entry in their struct definition. So you can 194 * access the file pointer through any of the sub-structs, 195 * or directly as just 'ki_filp' in this struct. 196 */ 197 struct aio_kiocb { 198 union { 199 struct file *ki_filp; 200 struct kiocb rw; 201 struct fsync_iocb fsync; 202 struct poll_iocb poll; 203 }; 204 205 struct kioctx *ki_ctx; 206 kiocb_cancel_fn *ki_cancel; 207 208 struct io_event ki_res; 209 210 struct list_head ki_list; /* the aio core uses this 211 * for cancellation */ 212 refcount_t ki_refcnt; 213 214 /* 215 * If the aio_resfd field of the userspace iocb is not zero, 216 * this is the underlying eventfd context to deliver events to. 217 */ 218 struct eventfd_ctx *ki_eventfd; 219 }; 220 221 /*------ sysctl variables----*/ 222 static DEFINE_SPINLOCK(aio_nr_lock); 223 static unsigned long aio_nr; /* current system wide number of aio requests */ 224 static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 225 /*----end sysctl variables---*/ 226 #ifdef CONFIG_SYSCTL 227 static struct ctl_table aio_sysctls[] = { 228 { 229 .procname = "aio-nr", 230 .data = &aio_nr, 231 .maxlen = sizeof(aio_nr), 232 .mode = 0444, 233 .proc_handler = proc_doulongvec_minmax, 234 }, 235 { 236 .procname = "aio-max-nr", 237 .data = &aio_max_nr, 238 .maxlen = sizeof(aio_max_nr), 239 .mode = 0644, 240 .proc_handler = proc_doulongvec_minmax, 241 }, 242 {} 243 }; 244 245 static void __init aio_sysctl_init(void) 246 { 247 register_sysctl_init("fs", aio_sysctls); 248 } 249 #else 250 #define aio_sysctl_init() do { } while (0) 251 #endif 252 253 static struct kmem_cache *kiocb_cachep; 254 static struct kmem_cache *kioctx_cachep; 255 256 static struct vfsmount *aio_mnt; 257 258 static const struct file_operations aio_ring_fops; 259 static const struct address_space_operations aio_ctx_aops; 260 261 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 262 { 263 struct file *file; 264 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); 265 if (IS_ERR(inode)) 266 return ERR_CAST(inode); 267 268 inode->i_mapping->a_ops = &aio_ctx_aops; 269 inode->i_mapping->i_private_data = ctx; 270 inode->i_size = PAGE_SIZE * nr_pages; 271 272 file = alloc_file_pseudo(inode, aio_mnt, "[aio]", 273 O_RDWR, &aio_ring_fops); 274 if (IS_ERR(file)) 275 iput(inode); 276 return file; 277 } 278 279 static int aio_init_fs_context(struct fs_context *fc) 280 { 281 if (!init_pseudo(fc, AIO_RING_MAGIC)) 282 return -ENOMEM; 283 fc->s_iflags |= SB_I_NOEXEC; 284 return 0; 285 } 286 287 /* aio_setup 288 * Creates the slab caches used by the aio routines, panic on 289 * failure as this is done early during the boot sequence. 290 */ 291 static int __init aio_setup(void) 292 { 293 static struct file_system_type aio_fs = { 294 .name = "aio", 295 .init_fs_context = aio_init_fs_context, 296 .kill_sb = kill_anon_super, 297 }; 298 aio_mnt = kern_mount(&aio_fs); 299 if (IS_ERR(aio_mnt)) 300 panic("Failed to create aio fs mount."); 301 302 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 303 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 304 aio_sysctl_init(); 305 return 0; 306 } 307 __initcall(aio_setup); 308 309 static void put_aio_ring_file(struct kioctx *ctx) 310 { 311 struct file *aio_ring_file = ctx->aio_ring_file; 312 struct address_space *i_mapping; 313 314 if (aio_ring_file) { 315 truncate_setsize(file_inode(aio_ring_file), 0); 316 317 /* Prevent further access to the kioctx from migratepages */ 318 i_mapping = aio_ring_file->f_mapping; 319 spin_lock(&i_mapping->i_private_lock); 320 i_mapping->i_private_data = NULL; 321 ctx->aio_ring_file = NULL; 322 spin_unlock(&i_mapping->i_private_lock); 323 324 fput(aio_ring_file); 325 } 326 } 327 328 static void aio_free_ring(struct kioctx *ctx) 329 { 330 int i; 331 332 /* Disconnect the kiotx from the ring file. This prevents future 333 * accesses to the kioctx from page migration. 334 */ 335 put_aio_ring_file(ctx); 336 337 for (i = 0; i < ctx->nr_pages; i++) { 338 struct page *page; 339 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 340 page_count(ctx->ring_pages[i])); 341 page = ctx->ring_pages[i]; 342 if (!page) 343 continue; 344 ctx->ring_pages[i] = NULL; 345 put_page(page); 346 } 347 348 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { 349 kfree(ctx->ring_pages); 350 ctx->ring_pages = NULL; 351 } 352 } 353 354 static int aio_ring_mremap(struct vm_area_struct *vma) 355 { 356 struct file *file = vma->vm_file; 357 struct mm_struct *mm = vma->vm_mm; 358 struct kioctx_table *table; 359 int i, res = -EINVAL; 360 361 spin_lock(&mm->ioctx_lock); 362 rcu_read_lock(); 363 table = rcu_dereference(mm->ioctx_table); 364 if (!table) 365 goto out_unlock; 366 367 for (i = 0; i < table->nr; i++) { 368 struct kioctx *ctx; 369 370 ctx = rcu_dereference(table->table[i]); 371 if (ctx && ctx->aio_ring_file == file) { 372 if (!atomic_read(&ctx->dead)) { 373 ctx->user_id = ctx->mmap_base = vma->vm_start; 374 res = 0; 375 } 376 break; 377 } 378 } 379 380 out_unlock: 381 rcu_read_unlock(); 382 spin_unlock(&mm->ioctx_lock); 383 return res; 384 } 385 386 static const struct vm_operations_struct aio_ring_vm_ops = { 387 .mremap = aio_ring_mremap, 388 #if IS_ENABLED(CONFIG_MMU) 389 .fault = filemap_fault, 390 .map_pages = filemap_map_pages, 391 .page_mkwrite = filemap_page_mkwrite, 392 #endif 393 }; 394 395 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 396 { 397 vm_flags_set(vma, VM_DONTEXPAND); 398 vma->vm_ops = &aio_ring_vm_ops; 399 return 0; 400 } 401 402 static const struct file_operations aio_ring_fops = { 403 .mmap = aio_ring_mmap, 404 }; 405 406 #if IS_ENABLED(CONFIG_MIGRATION) 407 static int aio_migrate_folio(struct address_space *mapping, struct folio *dst, 408 struct folio *src, enum migrate_mode mode) 409 { 410 struct kioctx *ctx; 411 unsigned long flags; 412 pgoff_t idx; 413 int rc; 414 415 /* 416 * We cannot support the _NO_COPY case here, because copy needs to 417 * happen under the ctx->completion_lock. That does not work with the 418 * migration workflow of MIGRATE_SYNC_NO_COPY. 419 */ 420 if (mode == MIGRATE_SYNC_NO_COPY) 421 return -EINVAL; 422 423 rc = 0; 424 425 /* mapping->i_private_lock here protects against the kioctx teardown. */ 426 spin_lock(&mapping->i_private_lock); 427 ctx = mapping->i_private_data; 428 if (!ctx) { 429 rc = -EINVAL; 430 goto out; 431 } 432 433 /* The ring_lock mutex. The prevents aio_read_events() from writing 434 * to the ring's head, and prevents page migration from mucking in 435 * a partially initialized kiotx. 436 */ 437 if (!mutex_trylock(&ctx->ring_lock)) { 438 rc = -EAGAIN; 439 goto out; 440 } 441 442 idx = src->index; 443 if (idx < (pgoff_t)ctx->nr_pages) { 444 /* Make sure the old folio hasn't already been changed */ 445 if (ctx->ring_pages[idx] != &src->page) 446 rc = -EAGAIN; 447 } else 448 rc = -EINVAL; 449 450 if (rc != 0) 451 goto out_unlock; 452 453 /* Writeback must be complete */ 454 BUG_ON(folio_test_writeback(src)); 455 folio_get(dst); 456 457 rc = folio_migrate_mapping(mapping, dst, src, 1); 458 if (rc != MIGRATEPAGE_SUCCESS) { 459 folio_put(dst); 460 goto out_unlock; 461 } 462 463 /* Take completion_lock to prevent other writes to the ring buffer 464 * while the old folio is copied to the new. This prevents new 465 * events from being lost. 466 */ 467 spin_lock_irqsave(&ctx->completion_lock, flags); 468 folio_migrate_copy(dst, src); 469 BUG_ON(ctx->ring_pages[idx] != &src->page); 470 ctx->ring_pages[idx] = &dst->page; 471 spin_unlock_irqrestore(&ctx->completion_lock, flags); 472 473 /* The old folio is no longer accessible. */ 474 folio_put(src); 475 476 out_unlock: 477 mutex_unlock(&ctx->ring_lock); 478 out: 479 spin_unlock(&mapping->i_private_lock); 480 return rc; 481 } 482 #else 483 #define aio_migrate_folio NULL 484 #endif 485 486 static const struct address_space_operations aio_ctx_aops = { 487 .dirty_folio = noop_dirty_folio, 488 .migrate_folio = aio_migrate_folio, 489 }; 490 491 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) 492 { 493 struct aio_ring *ring; 494 struct mm_struct *mm = current->mm; 495 unsigned long size, unused; 496 int nr_pages; 497 int i; 498 struct file *file; 499 500 /* Compensate for the ring buffer's head/tail overlap entry */ 501 nr_events += 2; /* 1 is required, 2 for good luck */ 502 503 size = sizeof(struct aio_ring); 504 size += sizeof(struct io_event) * nr_events; 505 506 nr_pages = PFN_UP(size); 507 if (nr_pages < 0) 508 return -EINVAL; 509 510 file = aio_private_file(ctx, nr_pages); 511 if (IS_ERR(file)) { 512 ctx->aio_ring_file = NULL; 513 return -ENOMEM; 514 } 515 516 ctx->aio_ring_file = file; 517 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) 518 / sizeof(struct io_event); 519 520 ctx->ring_pages = ctx->internal_pages; 521 if (nr_pages > AIO_RING_PAGES) { 522 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 523 GFP_KERNEL); 524 if (!ctx->ring_pages) { 525 put_aio_ring_file(ctx); 526 return -ENOMEM; 527 } 528 } 529 530 for (i = 0; i < nr_pages; i++) { 531 struct page *page; 532 page = find_or_create_page(file->f_mapping, 533 i, GFP_USER | __GFP_ZERO); 534 if (!page) 535 break; 536 pr_debug("pid(%d) page[%d]->count=%d\n", 537 current->pid, i, page_count(page)); 538 SetPageUptodate(page); 539 unlock_page(page); 540 541 ctx->ring_pages[i] = page; 542 } 543 ctx->nr_pages = i; 544 545 if (unlikely(i != nr_pages)) { 546 aio_free_ring(ctx); 547 return -ENOMEM; 548 } 549 550 ctx->mmap_size = nr_pages * PAGE_SIZE; 551 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); 552 553 if (mmap_write_lock_killable(mm)) { 554 ctx->mmap_size = 0; 555 aio_free_ring(ctx); 556 return -EINTR; 557 } 558 559 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size, 560 PROT_READ | PROT_WRITE, 561 MAP_SHARED, 0, 0, &unused, NULL); 562 mmap_write_unlock(mm); 563 if (IS_ERR((void *)ctx->mmap_base)) { 564 ctx->mmap_size = 0; 565 aio_free_ring(ctx); 566 return -ENOMEM; 567 } 568 569 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 570 571 ctx->user_id = ctx->mmap_base; 572 ctx->nr_events = nr_events; /* trusted copy */ 573 574 ring = page_address(ctx->ring_pages[0]); 575 ring->nr = nr_events; /* user copy */ 576 ring->id = ~0U; 577 ring->head = ring->tail = 0; 578 ring->magic = AIO_RING_MAGIC; 579 ring->compat_features = AIO_RING_COMPAT_FEATURES; 580 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 581 ring->header_length = sizeof(struct aio_ring); 582 flush_dcache_page(ctx->ring_pages[0]); 583 584 return 0; 585 } 586 587 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 588 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 589 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 590 591 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) 592 { 593 struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw); 594 struct kioctx *ctx = req->ki_ctx; 595 unsigned long flags; 596 597 if (WARN_ON_ONCE(!list_empty(&req->ki_list))) 598 return; 599 600 spin_lock_irqsave(&ctx->ctx_lock, flags); 601 list_add_tail(&req->ki_list, &ctx->active_reqs); 602 req->ki_cancel = cancel; 603 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 604 } 605 EXPORT_SYMBOL(kiocb_set_cancel_fn); 606 607 /* 608 * free_ioctx() should be RCU delayed to synchronize against the RCU 609 * protected lookup_ioctx() and also needs process context to call 610 * aio_free_ring(). Use rcu_work. 611 */ 612 static void free_ioctx(struct work_struct *work) 613 { 614 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, 615 free_rwork); 616 pr_debug("freeing %p\n", ctx); 617 618 aio_free_ring(ctx); 619 free_percpu(ctx->cpu); 620 percpu_ref_exit(&ctx->reqs); 621 percpu_ref_exit(&ctx->users); 622 kmem_cache_free(kioctx_cachep, ctx); 623 } 624 625 static void free_ioctx_reqs(struct percpu_ref *ref) 626 { 627 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); 628 629 /* At this point we know that there are no any in-flight requests */ 630 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) 631 complete(&ctx->rq_wait->comp); 632 633 /* Synchronize against RCU protected table->table[] dereferences */ 634 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); 635 queue_rcu_work(system_wq, &ctx->free_rwork); 636 } 637 638 /* 639 * When this function runs, the kioctx has been removed from the "hash table" 640 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - 641 * now it's safe to cancel any that need to be. 642 */ 643 static void free_ioctx_users(struct percpu_ref *ref) 644 { 645 struct kioctx *ctx = container_of(ref, struct kioctx, users); 646 struct aio_kiocb *req; 647 648 spin_lock_irq(&ctx->ctx_lock); 649 650 while (!list_empty(&ctx->active_reqs)) { 651 req = list_first_entry(&ctx->active_reqs, 652 struct aio_kiocb, ki_list); 653 req->ki_cancel(&req->rw); 654 list_del_init(&req->ki_list); 655 } 656 657 spin_unlock_irq(&ctx->ctx_lock); 658 659 percpu_ref_kill(&ctx->reqs); 660 percpu_ref_put(&ctx->reqs); 661 } 662 663 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) 664 { 665 unsigned i, new_nr; 666 struct kioctx_table *table, *old; 667 struct aio_ring *ring; 668 669 spin_lock(&mm->ioctx_lock); 670 table = rcu_dereference_raw(mm->ioctx_table); 671 672 while (1) { 673 if (table) 674 for (i = 0; i < table->nr; i++) 675 if (!rcu_access_pointer(table->table[i])) { 676 ctx->id = i; 677 rcu_assign_pointer(table->table[i], ctx); 678 spin_unlock(&mm->ioctx_lock); 679 680 /* While kioctx setup is in progress, 681 * we are protected from page migration 682 * changes ring_pages by ->ring_lock. 683 */ 684 ring = page_address(ctx->ring_pages[0]); 685 ring->id = ctx->id; 686 return 0; 687 } 688 689 new_nr = (table ? table->nr : 1) * 4; 690 spin_unlock(&mm->ioctx_lock); 691 692 table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL); 693 if (!table) 694 return -ENOMEM; 695 696 table->nr = new_nr; 697 698 spin_lock(&mm->ioctx_lock); 699 old = rcu_dereference_raw(mm->ioctx_table); 700 701 if (!old) { 702 rcu_assign_pointer(mm->ioctx_table, table); 703 } else if (table->nr > old->nr) { 704 memcpy(table->table, old->table, 705 old->nr * sizeof(struct kioctx *)); 706 707 rcu_assign_pointer(mm->ioctx_table, table); 708 kfree_rcu(old, rcu); 709 } else { 710 kfree(table); 711 table = old; 712 } 713 } 714 } 715 716 static void aio_nr_sub(unsigned nr) 717 { 718 spin_lock(&aio_nr_lock); 719 if (WARN_ON(aio_nr - nr > aio_nr)) 720 aio_nr = 0; 721 else 722 aio_nr -= nr; 723 spin_unlock(&aio_nr_lock); 724 } 725 726 /* ioctx_alloc 727 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 728 */ 729 static struct kioctx *ioctx_alloc(unsigned nr_events) 730 { 731 struct mm_struct *mm = current->mm; 732 struct kioctx *ctx; 733 int err = -ENOMEM; 734 735 /* 736 * Store the original nr_events -- what userspace passed to io_setup(), 737 * for counting against the global limit -- before it changes. 738 */ 739 unsigned int max_reqs = nr_events; 740 741 /* 742 * We keep track of the number of available ringbuffer slots, to prevent 743 * overflow (reqs_available), and we also use percpu counters for this. 744 * 745 * So since up to half the slots might be on other cpu's percpu counters 746 * and unavailable, double nr_events so userspace sees what they 747 * expected: additionally, we move req_batch slots to/from percpu 748 * counters at a time, so make sure that isn't 0: 749 */ 750 nr_events = max(nr_events, num_possible_cpus() * 4); 751 nr_events *= 2; 752 753 /* Prevent overflows */ 754 if (nr_events > (0x10000000U / sizeof(struct io_event))) { 755 pr_debug("ENOMEM: nr_events too high\n"); 756 return ERR_PTR(-EINVAL); 757 } 758 759 if (!nr_events || (unsigned long)max_reqs > aio_max_nr) 760 return ERR_PTR(-EAGAIN); 761 762 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 763 if (!ctx) 764 return ERR_PTR(-ENOMEM); 765 766 ctx->max_reqs = max_reqs; 767 768 spin_lock_init(&ctx->ctx_lock); 769 spin_lock_init(&ctx->completion_lock); 770 mutex_init(&ctx->ring_lock); 771 /* Protect against page migration throughout kiotx setup by keeping 772 * the ring_lock mutex held until setup is complete. */ 773 mutex_lock(&ctx->ring_lock); 774 init_waitqueue_head(&ctx->wait); 775 776 INIT_LIST_HEAD(&ctx->active_reqs); 777 778 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) 779 goto err; 780 781 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) 782 goto err; 783 784 ctx->cpu = alloc_percpu(struct kioctx_cpu); 785 if (!ctx->cpu) 786 goto err; 787 788 err = aio_setup_ring(ctx, nr_events); 789 if (err < 0) 790 goto err; 791 792 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); 793 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); 794 if (ctx->req_batch < 1) 795 ctx->req_batch = 1; 796 797 /* limit the number of system wide aios */ 798 spin_lock(&aio_nr_lock); 799 if (aio_nr + ctx->max_reqs > aio_max_nr || 800 aio_nr + ctx->max_reqs < aio_nr) { 801 spin_unlock(&aio_nr_lock); 802 err = -EAGAIN; 803 goto err_ctx; 804 } 805 aio_nr += ctx->max_reqs; 806 spin_unlock(&aio_nr_lock); 807 808 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 809 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ 810 811 err = ioctx_add_table(ctx, mm); 812 if (err) 813 goto err_cleanup; 814 815 /* Release the ring_lock mutex now that all setup is complete. */ 816 mutex_unlock(&ctx->ring_lock); 817 818 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 819 ctx, ctx->user_id, mm, ctx->nr_events); 820 return ctx; 821 822 err_cleanup: 823 aio_nr_sub(ctx->max_reqs); 824 err_ctx: 825 atomic_set(&ctx->dead, 1); 826 if (ctx->mmap_size) 827 vm_munmap(ctx->mmap_base, ctx->mmap_size); 828 aio_free_ring(ctx); 829 err: 830 mutex_unlock(&ctx->ring_lock); 831 free_percpu(ctx->cpu); 832 percpu_ref_exit(&ctx->reqs); 833 percpu_ref_exit(&ctx->users); 834 kmem_cache_free(kioctx_cachep, ctx); 835 pr_debug("error allocating ioctx %d\n", err); 836 return ERR_PTR(err); 837 } 838 839 /* kill_ioctx 840 * Cancels all outstanding aio requests on an aio context. Used 841 * when the processes owning a context have all exited to encourage 842 * the rapid destruction of the kioctx. 843 */ 844 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, 845 struct ctx_rq_wait *wait) 846 { 847 struct kioctx_table *table; 848 849 spin_lock(&mm->ioctx_lock); 850 if (atomic_xchg(&ctx->dead, 1)) { 851 spin_unlock(&mm->ioctx_lock); 852 return -EINVAL; 853 } 854 855 table = rcu_dereference_raw(mm->ioctx_table); 856 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); 857 RCU_INIT_POINTER(table->table[ctx->id], NULL); 858 spin_unlock(&mm->ioctx_lock); 859 860 /* free_ioctx_reqs() will do the necessary RCU synchronization */ 861 wake_up_all(&ctx->wait); 862 863 /* 864 * It'd be more correct to do this in free_ioctx(), after all 865 * the outstanding kiocbs have finished - but by then io_destroy 866 * has already returned, so io_setup() could potentially return 867 * -EAGAIN with no ioctxs actually in use (as far as userspace 868 * could tell). 869 */ 870 aio_nr_sub(ctx->max_reqs); 871 872 if (ctx->mmap_size) 873 vm_munmap(ctx->mmap_base, ctx->mmap_size); 874 875 ctx->rq_wait = wait; 876 percpu_ref_kill(&ctx->users); 877 return 0; 878 } 879 880 /* 881 * exit_aio: called when the last user of mm goes away. At this point, there is 882 * no way for any new requests to be submited or any of the io_* syscalls to be 883 * called on the context. 884 * 885 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on 886 * them. 887 */ 888 void exit_aio(struct mm_struct *mm) 889 { 890 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); 891 struct ctx_rq_wait wait; 892 int i, skipped; 893 894 if (!table) 895 return; 896 897 atomic_set(&wait.count, table->nr); 898 init_completion(&wait.comp); 899 900 skipped = 0; 901 for (i = 0; i < table->nr; ++i) { 902 struct kioctx *ctx = 903 rcu_dereference_protected(table->table[i], true); 904 905 if (!ctx) { 906 skipped++; 907 continue; 908 } 909 910 /* 911 * We don't need to bother with munmap() here - exit_mmap(mm) 912 * is coming and it'll unmap everything. And we simply can't, 913 * this is not necessarily our ->mm. 914 * Since kill_ioctx() uses non-zero ->mmap_size as indicator 915 * that it needs to unmap the area, just set it to 0. 916 */ 917 ctx->mmap_size = 0; 918 kill_ioctx(mm, ctx, &wait); 919 } 920 921 if (!atomic_sub_and_test(skipped, &wait.count)) { 922 /* Wait until all IO for the context are done. */ 923 wait_for_completion(&wait.comp); 924 } 925 926 RCU_INIT_POINTER(mm->ioctx_table, NULL); 927 kfree(table); 928 } 929 930 static void put_reqs_available(struct kioctx *ctx, unsigned nr) 931 { 932 struct kioctx_cpu *kcpu; 933 unsigned long flags; 934 935 local_irq_save(flags); 936 kcpu = this_cpu_ptr(ctx->cpu); 937 kcpu->reqs_available += nr; 938 939 while (kcpu->reqs_available >= ctx->req_batch * 2) { 940 kcpu->reqs_available -= ctx->req_batch; 941 atomic_add(ctx->req_batch, &ctx->reqs_available); 942 } 943 944 local_irq_restore(flags); 945 } 946 947 static bool __get_reqs_available(struct kioctx *ctx) 948 { 949 struct kioctx_cpu *kcpu; 950 bool ret = false; 951 unsigned long flags; 952 953 local_irq_save(flags); 954 kcpu = this_cpu_ptr(ctx->cpu); 955 if (!kcpu->reqs_available) { 956 int avail = atomic_read(&ctx->reqs_available); 957 958 do { 959 if (avail < ctx->req_batch) 960 goto out; 961 } while (!atomic_try_cmpxchg(&ctx->reqs_available, 962 &avail, avail - ctx->req_batch)); 963 964 kcpu->reqs_available += ctx->req_batch; 965 } 966 967 ret = true; 968 kcpu->reqs_available--; 969 out: 970 local_irq_restore(flags); 971 return ret; 972 } 973 974 /* refill_reqs_available 975 * Updates the reqs_available reference counts used for tracking the 976 * number of free slots in the completion ring. This can be called 977 * from aio_complete() (to optimistically update reqs_available) or 978 * from aio_get_req() (the we're out of events case). It must be 979 * called holding ctx->completion_lock. 980 */ 981 static void refill_reqs_available(struct kioctx *ctx, unsigned head, 982 unsigned tail) 983 { 984 unsigned events_in_ring, completed; 985 986 /* Clamp head since userland can write to it. */ 987 head %= ctx->nr_events; 988 if (head <= tail) 989 events_in_ring = tail - head; 990 else 991 events_in_ring = ctx->nr_events - (head - tail); 992 993 completed = ctx->completed_events; 994 if (events_in_ring < completed) 995 completed -= events_in_ring; 996 else 997 completed = 0; 998 999 if (!completed) 1000 return; 1001 1002 ctx->completed_events -= completed; 1003 put_reqs_available(ctx, completed); 1004 } 1005 1006 /* user_refill_reqs_available 1007 * Called to refill reqs_available when aio_get_req() encounters an 1008 * out of space in the completion ring. 1009 */ 1010 static void user_refill_reqs_available(struct kioctx *ctx) 1011 { 1012 spin_lock_irq(&ctx->completion_lock); 1013 if (ctx->completed_events) { 1014 struct aio_ring *ring; 1015 unsigned head; 1016 1017 /* Access of ring->head may race with aio_read_events_ring() 1018 * here, but that's okay since whether we read the old version 1019 * or the new version, and either will be valid. The important 1020 * part is that head cannot pass tail since we prevent 1021 * aio_complete() from updating tail by holding 1022 * ctx->completion_lock. Even if head is invalid, the check 1023 * against ctx->completed_events below will make sure we do the 1024 * safe/right thing. 1025 */ 1026 ring = page_address(ctx->ring_pages[0]); 1027 head = ring->head; 1028 1029 refill_reqs_available(ctx, head, ctx->tail); 1030 } 1031 1032 spin_unlock_irq(&ctx->completion_lock); 1033 } 1034 1035 static bool get_reqs_available(struct kioctx *ctx) 1036 { 1037 if (__get_reqs_available(ctx)) 1038 return true; 1039 user_refill_reqs_available(ctx); 1040 return __get_reqs_available(ctx); 1041 } 1042 1043 /* aio_get_req 1044 * Allocate a slot for an aio request. 1045 * Returns NULL if no requests are free. 1046 * 1047 * The refcount is initialized to 2 - one for the async op completion, 1048 * one for the synchronous code that does this. 1049 */ 1050 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) 1051 { 1052 struct aio_kiocb *req; 1053 1054 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 1055 if (unlikely(!req)) 1056 return NULL; 1057 1058 if (unlikely(!get_reqs_available(ctx))) { 1059 kmem_cache_free(kiocb_cachep, req); 1060 return NULL; 1061 } 1062 1063 percpu_ref_get(&ctx->reqs); 1064 req->ki_ctx = ctx; 1065 INIT_LIST_HEAD(&req->ki_list); 1066 refcount_set(&req->ki_refcnt, 2); 1067 req->ki_eventfd = NULL; 1068 return req; 1069 } 1070 1071 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 1072 { 1073 struct aio_ring __user *ring = (void __user *)ctx_id; 1074 struct mm_struct *mm = current->mm; 1075 struct kioctx *ctx, *ret = NULL; 1076 struct kioctx_table *table; 1077 unsigned id; 1078 1079 if (get_user(id, &ring->id)) 1080 return NULL; 1081 1082 rcu_read_lock(); 1083 table = rcu_dereference(mm->ioctx_table); 1084 1085 if (!table || id >= table->nr) 1086 goto out; 1087 1088 id = array_index_nospec(id, table->nr); 1089 ctx = rcu_dereference(table->table[id]); 1090 if (ctx && ctx->user_id == ctx_id) { 1091 if (percpu_ref_tryget_live(&ctx->users)) 1092 ret = ctx; 1093 } 1094 out: 1095 rcu_read_unlock(); 1096 return ret; 1097 } 1098 1099 static inline void iocb_destroy(struct aio_kiocb *iocb) 1100 { 1101 if (iocb->ki_eventfd) 1102 eventfd_ctx_put(iocb->ki_eventfd); 1103 if (iocb->ki_filp) 1104 fput(iocb->ki_filp); 1105 percpu_ref_put(&iocb->ki_ctx->reqs); 1106 kmem_cache_free(kiocb_cachep, iocb); 1107 } 1108 1109 struct aio_waiter { 1110 struct wait_queue_entry w; 1111 size_t min_nr; 1112 }; 1113 1114 /* aio_complete 1115 * Called when the io request on the given iocb is complete. 1116 */ 1117 static void aio_complete(struct aio_kiocb *iocb) 1118 { 1119 struct kioctx *ctx = iocb->ki_ctx; 1120 struct aio_ring *ring; 1121 struct io_event *ev_page, *event; 1122 unsigned tail, pos, head, avail; 1123 unsigned long flags; 1124 1125 /* 1126 * Add a completion event to the ring buffer. Must be done holding 1127 * ctx->completion_lock to prevent other code from messing with the tail 1128 * pointer since we might be called from irq context. 1129 */ 1130 spin_lock_irqsave(&ctx->completion_lock, flags); 1131 1132 tail = ctx->tail; 1133 pos = tail + AIO_EVENTS_OFFSET; 1134 1135 if (++tail >= ctx->nr_events) 1136 tail = 0; 1137 1138 ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1139 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1140 1141 *event = iocb->ki_res; 1142 1143 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1144 1145 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, 1146 (void __user *)(unsigned long)iocb->ki_res.obj, 1147 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2); 1148 1149 /* after flagging the request as done, we 1150 * must never even look at it again 1151 */ 1152 smp_wmb(); /* make event visible before updating tail */ 1153 1154 ctx->tail = tail; 1155 1156 ring = page_address(ctx->ring_pages[0]); 1157 head = ring->head; 1158 ring->tail = tail; 1159 flush_dcache_page(ctx->ring_pages[0]); 1160 1161 ctx->completed_events++; 1162 if (ctx->completed_events > 1) 1163 refill_reqs_available(ctx, head, tail); 1164 1165 avail = tail > head 1166 ? tail - head 1167 : tail + ctx->nr_events - head; 1168 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1169 1170 pr_debug("added to ring %p at [%u]\n", iocb, tail); 1171 1172 /* 1173 * Check if the user asked us to deliver the result through an 1174 * eventfd. The eventfd_signal() function is safe to be called 1175 * from IRQ context. 1176 */ 1177 if (iocb->ki_eventfd) 1178 eventfd_signal(iocb->ki_eventfd); 1179 1180 /* 1181 * We have to order our ring_info tail store above and test 1182 * of the wait list below outside the wait lock. This is 1183 * like in wake_up_bit() where clearing a bit has to be 1184 * ordered with the unlocked test. 1185 */ 1186 smp_mb(); 1187 1188 if (waitqueue_active(&ctx->wait)) { 1189 struct aio_waiter *curr, *next; 1190 unsigned long flags; 1191 1192 spin_lock_irqsave(&ctx->wait.lock, flags); 1193 list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry) 1194 if (avail >= curr->min_nr) { 1195 list_del_init_careful(&curr->w.entry); 1196 wake_up_process(curr->w.private); 1197 } 1198 spin_unlock_irqrestore(&ctx->wait.lock, flags); 1199 } 1200 } 1201 1202 static inline void iocb_put(struct aio_kiocb *iocb) 1203 { 1204 if (refcount_dec_and_test(&iocb->ki_refcnt)) { 1205 aio_complete(iocb); 1206 iocb_destroy(iocb); 1207 } 1208 } 1209 1210 /* aio_read_events_ring 1211 * Pull an event off of the ioctx's event ring. Returns the number of 1212 * events fetched 1213 */ 1214 static long aio_read_events_ring(struct kioctx *ctx, 1215 struct io_event __user *event, long nr) 1216 { 1217 struct aio_ring *ring; 1218 unsigned head, tail, pos; 1219 long ret = 0; 1220 int copy_ret; 1221 1222 /* 1223 * The mutex can block and wake us up and that will cause 1224 * wait_event_interruptible_hrtimeout() to schedule without sleeping 1225 * and repeat. This should be rare enough that it doesn't cause 1226 * peformance issues. See the comment in read_events() for more detail. 1227 */ 1228 sched_annotate_sleep(); 1229 mutex_lock(&ctx->ring_lock); 1230 1231 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ 1232 ring = page_address(ctx->ring_pages[0]); 1233 head = ring->head; 1234 tail = ring->tail; 1235 1236 /* 1237 * Ensure that once we've read the current tail pointer, that 1238 * we also see the events that were stored up to the tail. 1239 */ 1240 smp_rmb(); 1241 1242 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); 1243 1244 if (head == tail) 1245 goto out; 1246 1247 head %= ctx->nr_events; 1248 tail %= ctx->nr_events; 1249 1250 while (ret < nr) { 1251 long avail; 1252 struct io_event *ev; 1253 struct page *page; 1254 1255 avail = (head <= tail ? tail : ctx->nr_events) - head; 1256 if (head == tail) 1257 break; 1258 1259 pos = head + AIO_EVENTS_OFFSET; 1260 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; 1261 pos %= AIO_EVENTS_PER_PAGE; 1262 1263 avail = min(avail, nr - ret); 1264 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); 1265 1266 ev = page_address(page); 1267 copy_ret = copy_to_user(event + ret, ev + pos, 1268 sizeof(*ev) * avail); 1269 1270 if (unlikely(copy_ret)) { 1271 ret = -EFAULT; 1272 goto out; 1273 } 1274 1275 ret += avail; 1276 head += avail; 1277 head %= ctx->nr_events; 1278 } 1279 1280 ring = page_address(ctx->ring_pages[0]); 1281 ring->head = head; 1282 flush_dcache_page(ctx->ring_pages[0]); 1283 1284 pr_debug("%li h%u t%u\n", ret, head, tail); 1285 out: 1286 mutex_unlock(&ctx->ring_lock); 1287 1288 return ret; 1289 } 1290 1291 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, 1292 struct io_event __user *event, long *i) 1293 { 1294 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); 1295 1296 if (ret > 0) 1297 *i += ret; 1298 1299 if (unlikely(atomic_read(&ctx->dead))) 1300 ret = -EINVAL; 1301 1302 if (!*i) 1303 *i = ret; 1304 1305 return ret < 0 || *i >= min_nr; 1306 } 1307 1308 static long read_events(struct kioctx *ctx, long min_nr, long nr, 1309 struct io_event __user *event, 1310 ktime_t until) 1311 { 1312 struct hrtimer_sleeper t; 1313 struct aio_waiter w; 1314 long ret = 0, ret2 = 0; 1315 1316 /* 1317 * Note that aio_read_events() is being called as the conditional - i.e. 1318 * we're calling it after prepare_to_wait() has set task state to 1319 * TASK_INTERRUPTIBLE. 1320 * 1321 * But aio_read_events() can block, and if it blocks it's going to flip 1322 * the task state back to TASK_RUNNING. 1323 * 1324 * This should be ok, provided it doesn't flip the state back to 1325 * TASK_RUNNING and return 0 too much - that causes us to spin. That 1326 * will only happen if the mutex_lock() call blocks, and we then find 1327 * the ringbuffer empty. So in practice we should be ok, but it's 1328 * something to be aware of when touching this code. 1329 */ 1330 aio_read_events(ctx, min_nr, nr, event, &ret); 1331 if (until == 0 || ret < 0 || ret >= min_nr) 1332 return ret; 1333 1334 hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1335 if (until != KTIME_MAX) { 1336 hrtimer_set_expires_range_ns(&t.timer, until, current->timer_slack_ns); 1337 hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL); 1338 } 1339 1340 init_wait(&w.w); 1341 1342 while (1) { 1343 unsigned long nr_got = ret; 1344 1345 w.min_nr = min_nr - ret; 1346 1347 ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE); 1348 if (!ret2 && !t.task) 1349 ret2 = -ETIME; 1350 1351 if (aio_read_events(ctx, min_nr, nr, event, &ret) || ret2) 1352 break; 1353 1354 if (nr_got == ret) 1355 schedule(); 1356 } 1357 1358 finish_wait(&ctx->wait, &w.w); 1359 hrtimer_cancel(&t.timer); 1360 destroy_hrtimer_on_stack(&t.timer); 1361 1362 return ret; 1363 } 1364 1365 /* sys_io_setup: 1366 * Create an aio_context capable of receiving at least nr_events. 1367 * ctxp must not point to an aio_context that already exists, and 1368 * must be initialized to 0 prior to the call. On successful 1369 * creation of the aio_context, *ctxp is filled in with the resulting 1370 * handle. May fail with -EINVAL if *ctxp is not initialized, 1371 * if the specified nr_events exceeds internal limits. May fail 1372 * with -EAGAIN if the specified nr_events exceeds the user's limit 1373 * of available events. May fail with -ENOMEM if insufficient kernel 1374 * resources are available. May fail with -EFAULT if an invalid 1375 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1376 * implemented. 1377 */ 1378 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1379 { 1380 struct kioctx *ioctx = NULL; 1381 unsigned long ctx; 1382 long ret; 1383 1384 ret = get_user(ctx, ctxp); 1385 if (unlikely(ret)) 1386 goto out; 1387 1388 ret = -EINVAL; 1389 if (unlikely(ctx || nr_events == 0)) { 1390 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1391 ctx, nr_events); 1392 goto out; 1393 } 1394 1395 ioctx = ioctx_alloc(nr_events); 1396 ret = PTR_ERR(ioctx); 1397 if (!IS_ERR(ioctx)) { 1398 ret = put_user(ioctx->user_id, ctxp); 1399 if (ret) 1400 kill_ioctx(current->mm, ioctx, NULL); 1401 percpu_ref_put(&ioctx->users); 1402 } 1403 1404 out: 1405 return ret; 1406 } 1407 1408 #ifdef CONFIG_COMPAT 1409 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p) 1410 { 1411 struct kioctx *ioctx = NULL; 1412 unsigned long ctx; 1413 long ret; 1414 1415 ret = get_user(ctx, ctx32p); 1416 if (unlikely(ret)) 1417 goto out; 1418 1419 ret = -EINVAL; 1420 if (unlikely(ctx || nr_events == 0)) { 1421 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1422 ctx, nr_events); 1423 goto out; 1424 } 1425 1426 ioctx = ioctx_alloc(nr_events); 1427 ret = PTR_ERR(ioctx); 1428 if (!IS_ERR(ioctx)) { 1429 /* truncating is ok because it's a user address */ 1430 ret = put_user((u32)ioctx->user_id, ctx32p); 1431 if (ret) 1432 kill_ioctx(current->mm, ioctx, NULL); 1433 percpu_ref_put(&ioctx->users); 1434 } 1435 1436 out: 1437 return ret; 1438 } 1439 #endif 1440 1441 /* sys_io_destroy: 1442 * Destroy the aio_context specified. May cancel any outstanding 1443 * AIOs and block on completion. Will fail with -ENOSYS if not 1444 * implemented. May fail with -EINVAL if the context pointed to 1445 * is invalid. 1446 */ 1447 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1448 { 1449 struct kioctx *ioctx = lookup_ioctx(ctx); 1450 if (likely(NULL != ioctx)) { 1451 struct ctx_rq_wait wait; 1452 int ret; 1453 1454 init_completion(&wait.comp); 1455 atomic_set(&wait.count, 1); 1456 1457 /* Pass requests_done to kill_ioctx() where it can be set 1458 * in a thread-safe way. If we try to set it here then we have 1459 * a race condition if two io_destroy() called simultaneously. 1460 */ 1461 ret = kill_ioctx(current->mm, ioctx, &wait); 1462 percpu_ref_put(&ioctx->users); 1463 1464 /* Wait until all IO for the context are done. Otherwise kernel 1465 * keep using user-space buffers even if user thinks the context 1466 * is destroyed. 1467 */ 1468 if (!ret) 1469 wait_for_completion(&wait.comp); 1470 1471 return ret; 1472 } 1473 pr_debug("EINVAL: invalid context id\n"); 1474 return -EINVAL; 1475 } 1476 1477 static void aio_remove_iocb(struct aio_kiocb *iocb) 1478 { 1479 struct kioctx *ctx = iocb->ki_ctx; 1480 unsigned long flags; 1481 1482 spin_lock_irqsave(&ctx->ctx_lock, flags); 1483 list_del(&iocb->ki_list); 1484 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1485 } 1486 1487 static void aio_complete_rw(struct kiocb *kiocb, long res) 1488 { 1489 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); 1490 1491 if (!list_empty_careful(&iocb->ki_list)) 1492 aio_remove_iocb(iocb); 1493 1494 if (kiocb->ki_flags & IOCB_WRITE) { 1495 struct inode *inode = file_inode(kiocb->ki_filp); 1496 1497 if (S_ISREG(inode->i_mode)) 1498 kiocb_end_write(kiocb); 1499 } 1500 1501 iocb->ki_res.res = res; 1502 iocb->ki_res.res2 = 0; 1503 iocb_put(iocb); 1504 } 1505 1506 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) 1507 { 1508 int ret; 1509 1510 req->ki_complete = aio_complete_rw; 1511 req->private = NULL; 1512 req->ki_pos = iocb->aio_offset; 1513 req->ki_flags = req->ki_filp->f_iocb_flags; 1514 if (iocb->aio_flags & IOCB_FLAG_RESFD) 1515 req->ki_flags |= IOCB_EVENTFD; 1516 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) { 1517 /* 1518 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then 1519 * aio_reqprio is interpreted as an I/O scheduling 1520 * class and priority. 1521 */ 1522 ret = ioprio_check_cap(iocb->aio_reqprio); 1523 if (ret) { 1524 pr_debug("aio ioprio check cap error: %d\n", ret); 1525 return ret; 1526 } 1527 1528 req->ki_ioprio = iocb->aio_reqprio; 1529 } else 1530 req->ki_ioprio = get_current_ioprio(); 1531 1532 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags); 1533 if (unlikely(ret)) 1534 return ret; 1535 1536 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ 1537 return 0; 1538 } 1539 1540 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb, 1541 struct iovec **iovec, bool vectored, bool compat, 1542 struct iov_iter *iter) 1543 { 1544 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; 1545 size_t len = iocb->aio_nbytes; 1546 1547 if (!vectored) { 1548 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); 1549 *iovec = NULL; 1550 return ret; 1551 } 1552 1553 return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat); 1554 } 1555 1556 static inline void aio_rw_done(struct kiocb *req, ssize_t ret) 1557 { 1558 switch (ret) { 1559 case -EIOCBQUEUED: 1560 break; 1561 case -ERESTARTSYS: 1562 case -ERESTARTNOINTR: 1563 case -ERESTARTNOHAND: 1564 case -ERESTART_RESTARTBLOCK: 1565 /* 1566 * There's no easy way to restart the syscall since other AIO's 1567 * may be already running. Just fail this IO with EINTR. 1568 */ 1569 ret = -EINTR; 1570 fallthrough; 1571 default: 1572 req->ki_complete(req, ret); 1573 } 1574 } 1575 1576 static int aio_read(struct kiocb *req, const struct iocb *iocb, 1577 bool vectored, bool compat) 1578 { 1579 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1580 struct iov_iter iter; 1581 struct file *file; 1582 int ret; 1583 1584 ret = aio_prep_rw(req, iocb); 1585 if (ret) 1586 return ret; 1587 file = req->ki_filp; 1588 if (unlikely(!(file->f_mode & FMODE_READ))) 1589 return -EBADF; 1590 if (unlikely(!file->f_op->read_iter)) 1591 return -EINVAL; 1592 1593 ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter); 1594 if (ret < 0) 1595 return ret; 1596 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); 1597 if (!ret) 1598 aio_rw_done(req, call_read_iter(file, req, &iter)); 1599 kfree(iovec); 1600 return ret; 1601 } 1602 1603 static int aio_write(struct kiocb *req, const struct iocb *iocb, 1604 bool vectored, bool compat) 1605 { 1606 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1607 struct iov_iter iter; 1608 struct file *file; 1609 int ret; 1610 1611 ret = aio_prep_rw(req, iocb); 1612 if (ret) 1613 return ret; 1614 file = req->ki_filp; 1615 1616 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1617 return -EBADF; 1618 if (unlikely(!file->f_op->write_iter)) 1619 return -EINVAL; 1620 1621 ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter); 1622 if (ret < 0) 1623 return ret; 1624 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); 1625 if (!ret) { 1626 if (S_ISREG(file_inode(file)->i_mode)) 1627 kiocb_start_write(req); 1628 req->ki_flags |= IOCB_WRITE; 1629 aio_rw_done(req, call_write_iter(file, req, &iter)); 1630 } 1631 kfree(iovec); 1632 return ret; 1633 } 1634 1635 static void aio_fsync_work(struct work_struct *work) 1636 { 1637 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); 1638 const struct cred *old_cred = override_creds(iocb->fsync.creds); 1639 1640 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); 1641 revert_creds(old_cred); 1642 put_cred(iocb->fsync.creds); 1643 iocb_put(iocb); 1644 } 1645 1646 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, 1647 bool datasync) 1648 { 1649 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || 1650 iocb->aio_rw_flags)) 1651 return -EINVAL; 1652 1653 if (unlikely(!req->file->f_op->fsync)) 1654 return -EINVAL; 1655 1656 req->creds = prepare_creds(); 1657 if (!req->creds) 1658 return -ENOMEM; 1659 1660 req->datasync = datasync; 1661 INIT_WORK(&req->work, aio_fsync_work); 1662 schedule_work(&req->work); 1663 return 0; 1664 } 1665 1666 static void aio_poll_put_work(struct work_struct *work) 1667 { 1668 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1669 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1670 1671 iocb_put(iocb); 1672 } 1673 1674 /* 1675 * Safely lock the waitqueue which the request is on, synchronizing with the 1676 * case where the ->poll() provider decides to free its waitqueue early. 1677 * 1678 * Returns true on success, meaning that req->head->lock was locked, req->wait 1679 * is on req->head, and an RCU read lock was taken. Returns false if the 1680 * request was already removed from its waitqueue (which might no longer exist). 1681 */ 1682 static bool poll_iocb_lock_wq(struct poll_iocb *req) 1683 { 1684 wait_queue_head_t *head; 1685 1686 /* 1687 * While we hold the waitqueue lock and the waitqueue is nonempty, 1688 * wake_up_pollfree() will wait for us. However, taking the waitqueue 1689 * lock in the first place can race with the waitqueue being freed. 1690 * 1691 * We solve this as eventpoll does: by taking advantage of the fact that 1692 * all users of wake_up_pollfree() will RCU-delay the actual free. If 1693 * we enter rcu_read_lock() and see that the pointer to the queue is 1694 * non-NULL, we can then lock it without the memory being freed out from 1695 * under us, then check whether the request is still on the queue. 1696 * 1697 * Keep holding rcu_read_lock() as long as we hold the queue lock, in 1698 * case the caller deletes the entry from the queue, leaving it empty. 1699 * In that case, only RCU prevents the queue memory from being freed. 1700 */ 1701 rcu_read_lock(); 1702 head = smp_load_acquire(&req->head); 1703 if (head) { 1704 spin_lock(&head->lock); 1705 if (!list_empty(&req->wait.entry)) 1706 return true; 1707 spin_unlock(&head->lock); 1708 } 1709 rcu_read_unlock(); 1710 return false; 1711 } 1712 1713 static void poll_iocb_unlock_wq(struct poll_iocb *req) 1714 { 1715 spin_unlock(&req->head->lock); 1716 rcu_read_unlock(); 1717 } 1718 1719 static void aio_poll_complete_work(struct work_struct *work) 1720 { 1721 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1722 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1723 struct poll_table_struct pt = { ._key = req->events }; 1724 struct kioctx *ctx = iocb->ki_ctx; 1725 __poll_t mask = 0; 1726 1727 if (!READ_ONCE(req->cancelled)) 1728 mask = vfs_poll(req->file, &pt) & req->events; 1729 1730 /* 1731 * Note that ->ki_cancel callers also delete iocb from active_reqs after 1732 * calling ->ki_cancel. We need the ctx_lock roundtrip here to 1733 * synchronize with them. In the cancellation case the list_del_init 1734 * itself is not actually needed, but harmless so we keep it in to 1735 * avoid further branches in the fast path. 1736 */ 1737 spin_lock_irq(&ctx->ctx_lock); 1738 if (poll_iocb_lock_wq(req)) { 1739 if (!mask && !READ_ONCE(req->cancelled)) { 1740 /* 1741 * The request isn't actually ready to be completed yet. 1742 * Reschedule completion if another wakeup came in. 1743 */ 1744 if (req->work_need_resched) { 1745 schedule_work(&req->work); 1746 req->work_need_resched = false; 1747 } else { 1748 req->work_scheduled = false; 1749 } 1750 poll_iocb_unlock_wq(req); 1751 spin_unlock_irq(&ctx->ctx_lock); 1752 return; 1753 } 1754 list_del_init(&req->wait.entry); 1755 poll_iocb_unlock_wq(req); 1756 } /* else, POLLFREE has freed the waitqueue, so we must complete */ 1757 list_del_init(&iocb->ki_list); 1758 iocb->ki_res.res = mangle_poll(mask); 1759 spin_unlock_irq(&ctx->ctx_lock); 1760 1761 iocb_put(iocb); 1762 } 1763 1764 /* assumes we are called with irqs disabled */ 1765 static int aio_poll_cancel(struct kiocb *iocb) 1766 { 1767 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); 1768 struct poll_iocb *req = &aiocb->poll; 1769 1770 if (poll_iocb_lock_wq(req)) { 1771 WRITE_ONCE(req->cancelled, true); 1772 if (!req->work_scheduled) { 1773 schedule_work(&aiocb->poll.work); 1774 req->work_scheduled = true; 1775 } 1776 poll_iocb_unlock_wq(req); 1777 } /* else, the request was force-cancelled by POLLFREE already */ 1778 1779 return 0; 1780 } 1781 1782 static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, 1783 void *key) 1784 { 1785 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); 1786 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1787 __poll_t mask = key_to_poll(key); 1788 unsigned long flags; 1789 1790 /* for instances that support it check for an event match first: */ 1791 if (mask && !(mask & req->events)) 1792 return 0; 1793 1794 /* 1795 * Complete the request inline if possible. This requires that three 1796 * conditions be met: 1797 * 1. An event mask must have been passed. If a plain wakeup was done 1798 * instead, then mask == 0 and we have to call vfs_poll() to get 1799 * the events, so inline completion isn't possible. 1800 * 2. The completion work must not have already been scheduled. 1801 * 3. ctx_lock must not be busy. We have to use trylock because we 1802 * already hold the waitqueue lock, so this inverts the normal 1803 * locking order. Use irqsave/irqrestore because not all 1804 * filesystems (e.g. fuse) call this function with IRQs disabled, 1805 * yet IRQs have to be disabled before ctx_lock is obtained. 1806 */ 1807 if (mask && !req->work_scheduled && 1808 spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { 1809 struct kioctx *ctx = iocb->ki_ctx; 1810 1811 list_del_init(&req->wait.entry); 1812 list_del(&iocb->ki_list); 1813 iocb->ki_res.res = mangle_poll(mask); 1814 if (iocb->ki_eventfd && !eventfd_signal_allowed()) { 1815 iocb = NULL; 1816 INIT_WORK(&req->work, aio_poll_put_work); 1817 schedule_work(&req->work); 1818 } 1819 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1820 if (iocb) 1821 iocb_put(iocb); 1822 } else { 1823 /* 1824 * Schedule the completion work if needed. If it was already 1825 * scheduled, record that another wakeup came in. 1826 * 1827 * Don't remove the request from the waitqueue here, as it might 1828 * not actually be complete yet (we won't know until vfs_poll() 1829 * is called), and we must not miss any wakeups. POLLFREE is an 1830 * exception to this; see below. 1831 */ 1832 if (req->work_scheduled) { 1833 req->work_need_resched = true; 1834 } else { 1835 schedule_work(&req->work); 1836 req->work_scheduled = true; 1837 } 1838 1839 /* 1840 * If the waitqueue is being freed early but we can't complete 1841 * the request inline, we have to tear down the request as best 1842 * we can. That means immediately removing the request from its 1843 * waitqueue and preventing all further accesses to the 1844 * waitqueue via the request. We also need to schedule the 1845 * completion work (done above). Also mark the request as 1846 * cancelled, to potentially skip an unneeded call to ->poll(). 1847 */ 1848 if (mask & POLLFREE) { 1849 WRITE_ONCE(req->cancelled, true); 1850 list_del_init(&req->wait.entry); 1851 1852 /* 1853 * Careful: this *must* be the last step, since as soon 1854 * as req->head is NULL'ed out, the request can be 1855 * completed and freed, since aio_poll_complete_work() 1856 * will no longer need to take the waitqueue lock. 1857 */ 1858 smp_store_release(&req->head, NULL); 1859 } 1860 } 1861 return 1; 1862 } 1863 1864 struct aio_poll_table { 1865 struct poll_table_struct pt; 1866 struct aio_kiocb *iocb; 1867 bool queued; 1868 int error; 1869 }; 1870 1871 static void 1872 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, 1873 struct poll_table_struct *p) 1874 { 1875 struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt); 1876 1877 /* multiple wait queues per file are not supported */ 1878 if (unlikely(pt->queued)) { 1879 pt->error = -EINVAL; 1880 return; 1881 } 1882 1883 pt->queued = true; 1884 pt->error = 0; 1885 pt->iocb->poll.head = head; 1886 add_wait_queue(head, &pt->iocb->poll.wait); 1887 } 1888 1889 static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) 1890 { 1891 struct kioctx *ctx = aiocb->ki_ctx; 1892 struct poll_iocb *req = &aiocb->poll; 1893 struct aio_poll_table apt; 1894 bool cancel = false; 1895 __poll_t mask; 1896 1897 /* reject any unknown events outside the normal event mask. */ 1898 if ((u16)iocb->aio_buf != iocb->aio_buf) 1899 return -EINVAL; 1900 /* reject fields that are not defined for poll */ 1901 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags) 1902 return -EINVAL; 1903 1904 INIT_WORK(&req->work, aio_poll_complete_work); 1905 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; 1906 1907 req->head = NULL; 1908 req->cancelled = false; 1909 req->work_scheduled = false; 1910 req->work_need_resched = false; 1911 1912 apt.pt._qproc = aio_poll_queue_proc; 1913 apt.pt._key = req->events; 1914 apt.iocb = aiocb; 1915 apt.queued = false; 1916 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ 1917 1918 /* initialized the list so that we can do list_empty checks */ 1919 INIT_LIST_HEAD(&req->wait.entry); 1920 init_waitqueue_func_entry(&req->wait, aio_poll_wake); 1921 1922 mask = vfs_poll(req->file, &apt.pt) & req->events; 1923 spin_lock_irq(&ctx->ctx_lock); 1924 if (likely(apt.queued)) { 1925 bool on_queue = poll_iocb_lock_wq(req); 1926 1927 if (!on_queue || req->work_scheduled) { 1928 /* 1929 * aio_poll_wake() already either scheduled the async 1930 * completion work, or completed the request inline. 1931 */ 1932 if (apt.error) /* unsupported case: multiple queues */ 1933 cancel = true; 1934 apt.error = 0; 1935 mask = 0; 1936 } 1937 if (mask || apt.error) { 1938 /* Steal to complete synchronously. */ 1939 list_del_init(&req->wait.entry); 1940 } else if (cancel) { 1941 /* Cancel if possible (may be too late though). */ 1942 WRITE_ONCE(req->cancelled, true); 1943 } else if (on_queue) { 1944 /* 1945 * Actually waiting for an event, so add the request to 1946 * active_reqs so that it can be cancelled if needed. 1947 */ 1948 list_add_tail(&aiocb->ki_list, &ctx->active_reqs); 1949 aiocb->ki_cancel = aio_poll_cancel; 1950 } 1951 if (on_queue) 1952 poll_iocb_unlock_wq(req); 1953 } 1954 if (mask) { /* no async, we'd stolen it */ 1955 aiocb->ki_res.res = mangle_poll(mask); 1956 apt.error = 0; 1957 } 1958 spin_unlock_irq(&ctx->ctx_lock); 1959 if (mask) 1960 iocb_put(aiocb); 1961 return apt.error; 1962 } 1963 1964 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, 1965 struct iocb __user *user_iocb, struct aio_kiocb *req, 1966 bool compat) 1967 { 1968 req->ki_filp = fget(iocb->aio_fildes); 1969 if (unlikely(!req->ki_filp)) 1970 return -EBADF; 1971 1972 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1973 struct eventfd_ctx *eventfd; 1974 /* 1975 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1976 * instance of the file* now. The file descriptor must be 1977 * an eventfd() fd, and will be signaled for each completed 1978 * event using the eventfd_signal() function. 1979 */ 1980 eventfd = eventfd_ctx_fdget(iocb->aio_resfd); 1981 if (IS_ERR(eventfd)) 1982 return PTR_ERR(eventfd); 1983 1984 req->ki_eventfd = eventfd; 1985 } 1986 1987 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) { 1988 pr_debug("EFAULT: aio_key\n"); 1989 return -EFAULT; 1990 } 1991 1992 req->ki_res.obj = (u64)(unsigned long)user_iocb; 1993 req->ki_res.data = iocb->aio_data; 1994 req->ki_res.res = 0; 1995 req->ki_res.res2 = 0; 1996 1997 switch (iocb->aio_lio_opcode) { 1998 case IOCB_CMD_PREAD: 1999 return aio_read(&req->rw, iocb, false, compat); 2000 case IOCB_CMD_PWRITE: 2001 return aio_write(&req->rw, iocb, false, compat); 2002 case IOCB_CMD_PREADV: 2003 return aio_read(&req->rw, iocb, true, compat); 2004 case IOCB_CMD_PWRITEV: 2005 return aio_write(&req->rw, iocb, true, compat); 2006 case IOCB_CMD_FSYNC: 2007 return aio_fsync(&req->fsync, iocb, false); 2008 case IOCB_CMD_FDSYNC: 2009 return aio_fsync(&req->fsync, iocb, true); 2010 case IOCB_CMD_POLL: 2011 return aio_poll(req, iocb); 2012 default: 2013 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); 2014 return -EINVAL; 2015 } 2016 } 2017 2018 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 2019 bool compat) 2020 { 2021 struct aio_kiocb *req; 2022 struct iocb iocb; 2023 int err; 2024 2025 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) 2026 return -EFAULT; 2027 2028 /* enforce forwards compatibility on users */ 2029 if (unlikely(iocb.aio_reserved2)) { 2030 pr_debug("EINVAL: reserve field set\n"); 2031 return -EINVAL; 2032 } 2033 2034 /* prevent overflows */ 2035 if (unlikely( 2036 (iocb.aio_buf != (unsigned long)iocb.aio_buf) || 2037 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) || 2038 ((ssize_t)iocb.aio_nbytes < 0) 2039 )) { 2040 pr_debug("EINVAL: overflow check\n"); 2041 return -EINVAL; 2042 } 2043 2044 req = aio_get_req(ctx); 2045 if (unlikely(!req)) 2046 return -EAGAIN; 2047 2048 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat); 2049 2050 /* Done with the synchronous reference */ 2051 iocb_put(req); 2052 2053 /* 2054 * If err is 0, we'd either done aio_complete() ourselves or have 2055 * arranged for that to be done asynchronously. Anything non-zero 2056 * means that we need to destroy req ourselves. 2057 */ 2058 if (unlikely(err)) { 2059 iocb_destroy(req); 2060 put_reqs_available(ctx, 1); 2061 } 2062 return err; 2063 } 2064 2065 /* sys_io_submit: 2066 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 2067 * the number of iocbs queued. May return -EINVAL if the aio_context 2068 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 2069 * *iocbpp[0] is not properly initialized, if the operation specified 2070 * is invalid for the file descriptor in the iocb. May fail with 2071 * -EFAULT if any of the data structures point to invalid data. May 2072 * fail with -EBADF if the file descriptor specified in the first 2073 * iocb is invalid. May fail with -EAGAIN if insufficient resources 2074 * are available to queue any iocbs. Will return 0 if nr is 0. Will 2075 * fail with -ENOSYS if not implemented. 2076 */ 2077 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 2078 struct iocb __user * __user *, iocbpp) 2079 { 2080 struct kioctx *ctx; 2081 long ret = 0; 2082 int i = 0; 2083 struct blk_plug plug; 2084 2085 if (unlikely(nr < 0)) 2086 return -EINVAL; 2087 2088 ctx = lookup_ioctx(ctx_id); 2089 if (unlikely(!ctx)) { 2090 pr_debug("EINVAL: invalid context id\n"); 2091 return -EINVAL; 2092 } 2093 2094 if (nr > ctx->nr_events) 2095 nr = ctx->nr_events; 2096 2097 if (nr > AIO_PLUG_THRESHOLD) 2098 blk_start_plug(&plug); 2099 for (i = 0; i < nr; i++) { 2100 struct iocb __user *user_iocb; 2101 2102 if (unlikely(get_user(user_iocb, iocbpp + i))) { 2103 ret = -EFAULT; 2104 break; 2105 } 2106 2107 ret = io_submit_one(ctx, user_iocb, false); 2108 if (ret) 2109 break; 2110 } 2111 if (nr > AIO_PLUG_THRESHOLD) 2112 blk_finish_plug(&plug); 2113 2114 percpu_ref_put(&ctx->users); 2115 return i ? i : ret; 2116 } 2117 2118 #ifdef CONFIG_COMPAT 2119 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, 2120 int, nr, compat_uptr_t __user *, iocbpp) 2121 { 2122 struct kioctx *ctx; 2123 long ret = 0; 2124 int i = 0; 2125 struct blk_plug plug; 2126 2127 if (unlikely(nr < 0)) 2128 return -EINVAL; 2129 2130 ctx = lookup_ioctx(ctx_id); 2131 if (unlikely(!ctx)) { 2132 pr_debug("EINVAL: invalid context id\n"); 2133 return -EINVAL; 2134 } 2135 2136 if (nr > ctx->nr_events) 2137 nr = ctx->nr_events; 2138 2139 if (nr > AIO_PLUG_THRESHOLD) 2140 blk_start_plug(&plug); 2141 for (i = 0; i < nr; i++) { 2142 compat_uptr_t user_iocb; 2143 2144 if (unlikely(get_user(user_iocb, iocbpp + i))) { 2145 ret = -EFAULT; 2146 break; 2147 } 2148 2149 ret = io_submit_one(ctx, compat_ptr(user_iocb), true); 2150 if (ret) 2151 break; 2152 } 2153 if (nr > AIO_PLUG_THRESHOLD) 2154 blk_finish_plug(&plug); 2155 2156 percpu_ref_put(&ctx->users); 2157 return i ? i : ret; 2158 } 2159 #endif 2160 2161 /* sys_io_cancel: 2162 * Attempts to cancel an iocb previously passed to io_submit. If 2163 * the operation is successfully cancelled, the resulting event is 2164 * copied into the memory pointed to by result without being placed 2165 * into the completion queue and 0 is returned. May fail with 2166 * -EFAULT if any of the data structures pointed to are invalid. 2167 * May fail with -EINVAL if aio_context specified by ctx_id is 2168 * invalid. May fail with -EAGAIN if the iocb specified was not 2169 * cancelled. Will fail with -ENOSYS if not implemented. 2170 */ 2171 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 2172 struct io_event __user *, result) 2173 { 2174 struct kioctx *ctx; 2175 struct aio_kiocb *kiocb; 2176 int ret = -EINVAL; 2177 u32 key; 2178 u64 obj = (u64)(unsigned long)iocb; 2179 2180 if (unlikely(get_user(key, &iocb->aio_key))) 2181 return -EFAULT; 2182 if (unlikely(key != KIOCB_KEY)) 2183 return -EINVAL; 2184 2185 ctx = lookup_ioctx(ctx_id); 2186 if (unlikely(!ctx)) 2187 return -EINVAL; 2188 2189 spin_lock_irq(&ctx->ctx_lock); 2190 /* TODO: use a hash or array, this sucks. */ 2191 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { 2192 if (kiocb->ki_res.obj == obj) { 2193 ret = kiocb->ki_cancel(&kiocb->rw); 2194 list_del_init(&kiocb->ki_list); 2195 break; 2196 } 2197 } 2198 spin_unlock_irq(&ctx->ctx_lock); 2199 2200 if (!ret) { 2201 /* 2202 * The result argument is no longer used - the io_event is 2203 * always delivered via the ring buffer. -EINPROGRESS indicates 2204 * cancellation is progress: 2205 */ 2206 ret = -EINPROGRESS; 2207 } 2208 2209 percpu_ref_put(&ctx->users); 2210 2211 return ret; 2212 } 2213 2214 static long do_io_getevents(aio_context_t ctx_id, 2215 long min_nr, 2216 long nr, 2217 struct io_event __user *events, 2218 struct timespec64 *ts) 2219 { 2220 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX; 2221 struct kioctx *ioctx = lookup_ioctx(ctx_id); 2222 long ret = -EINVAL; 2223 2224 if (likely(ioctx)) { 2225 if (likely(min_nr <= nr && min_nr >= 0)) 2226 ret = read_events(ioctx, min_nr, nr, events, until); 2227 percpu_ref_put(&ioctx->users); 2228 } 2229 2230 return ret; 2231 } 2232 2233 /* io_getevents: 2234 * Attempts to read at least min_nr events and up to nr events from 2235 * the completion queue for the aio_context specified by ctx_id. If 2236 * it succeeds, the number of read events is returned. May fail with 2237 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 2238 * out of range, if timeout is out of range. May fail with -EFAULT 2239 * if any of the memory specified is invalid. May return 0 or 2240 * < min_nr if the timeout specified by timeout has elapsed 2241 * before sufficient events are available, where timeout == NULL 2242 * specifies an infinite timeout. Note that the timeout pointed to by 2243 * timeout is relative. Will fail with -ENOSYS if not implemented. 2244 */ 2245 #ifdef CONFIG_64BIT 2246 2247 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 2248 long, min_nr, 2249 long, nr, 2250 struct io_event __user *, events, 2251 struct __kernel_timespec __user *, timeout) 2252 { 2253 struct timespec64 ts; 2254 int ret; 2255 2256 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2257 return -EFAULT; 2258 2259 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2260 if (!ret && signal_pending(current)) 2261 ret = -EINTR; 2262 return ret; 2263 } 2264 2265 #endif 2266 2267 struct __aio_sigset { 2268 const sigset_t __user *sigmask; 2269 size_t sigsetsize; 2270 }; 2271 2272 SYSCALL_DEFINE6(io_pgetevents, 2273 aio_context_t, ctx_id, 2274 long, min_nr, 2275 long, nr, 2276 struct io_event __user *, events, 2277 struct __kernel_timespec __user *, timeout, 2278 const struct __aio_sigset __user *, usig) 2279 { 2280 struct __aio_sigset ksig = { NULL, }; 2281 struct timespec64 ts; 2282 bool interrupted; 2283 int ret; 2284 2285 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2286 return -EFAULT; 2287 2288 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2289 return -EFAULT; 2290 2291 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); 2292 if (ret) 2293 return ret; 2294 2295 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2296 2297 interrupted = signal_pending(current); 2298 restore_saved_sigmask_unless(interrupted); 2299 if (interrupted && !ret) 2300 ret = -ERESTARTNOHAND; 2301 2302 return ret; 2303 } 2304 2305 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) 2306 2307 SYSCALL_DEFINE6(io_pgetevents_time32, 2308 aio_context_t, ctx_id, 2309 long, min_nr, 2310 long, nr, 2311 struct io_event __user *, events, 2312 struct old_timespec32 __user *, timeout, 2313 const struct __aio_sigset __user *, usig) 2314 { 2315 struct __aio_sigset ksig = { NULL, }; 2316 struct timespec64 ts; 2317 bool interrupted; 2318 int ret; 2319 2320 if (timeout && unlikely(get_old_timespec32(&ts, timeout))) 2321 return -EFAULT; 2322 2323 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2324 return -EFAULT; 2325 2326 2327 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); 2328 if (ret) 2329 return ret; 2330 2331 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2332 2333 interrupted = signal_pending(current); 2334 restore_saved_sigmask_unless(interrupted); 2335 if (interrupted && !ret) 2336 ret = -ERESTARTNOHAND; 2337 2338 return ret; 2339 } 2340 2341 #endif 2342 2343 #if defined(CONFIG_COMPAT_32BIT_TIME) 2344 2345 SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id, 2346 __s32, min_nr, 2347 __s32, nr, 2348 struct io_event __user *, events, 2349 struct old_timespec32 __user *, timeout) 2350 { 2351 struct timespec64 t; 2352 int ret; 2353 2354 if (timeout && get_old_timespec32(&t, timeout)) 2355 return -EFAULT; 2356 2357 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2358 if (!ret && signal_pending(current)) 2359 ret = -EINTR; 2360 return ret; 2361 } 2362 2363 #endif 2364 2365 #ifdef CONFIG_COMPAT 2366 2367 struct __compat_aio_sigset { 2368 compat_uptr_t sigmask; 2369 compat_size_t sigsetsize; 2370 }; 2371 2372 #if defined(CONFIG_COMPAT_32BIT_TIME) 2373 2374 COMPAT_SYSCALL_DEFINE6(io_pgetevents, 2375 compat_aio_context_t, ctx_id, 2376 compat_long_t, min_nr, 2377 compat_long_t, nr, 2378 struct io_event __user *, events, 2379 struct old_timespec32 __user *, timeout, 2380 const struct __compat_aio_sigset __user *, usig) 2381 { 2382 struct __compat_aio_sigset ksig = { 0, }; 2383 struct timespec64 t; 2384 bool interrupted; 2385 int ret; 2386 2387 if (timeout && get_old_timespec32(&t, timeout)) 2388 return -EFAULT; 2389 2390 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2391 return -EFAULT; 2392 2393 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize); 2394 if (ret) 2395 return ret; 2396 2397 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2398 2399 interrupted = signal_pending(current); 2400 restore_saved_sigmask_unless(interrupted); 2401 if (interrupted && !ret) 2402 ret = -ERESTARTNOHAND; 2403 2404 return ret; 2405 } 2406 2407 #endif 2408 2409 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, 2410 compat_aio_context_t, ctx_id, 2411 compat_long_t, min_nr, 2412 compat_long_t, nr, 2413 struct io_event __user *, events, 2414 struct __kernel_timespec __user *, timeout, 2415 const struct __compat_aio_sigset __user *, usig) 2416 { 2417 struct __compat_aio_sigset ksig = { 0, }; 2418 struct timespec64 t; 2419 bool interrupted; 2420 int ret; 2421 2422 if (timeout && get_timespec64(&t, timeout)) 2423 return -EFAULT; 2424 2425 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2426 return -EFAULT; 2427 2428 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize); 2429 if (ret) 2430 return ret; 2431 2432 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2433 2434 interrupted = signal_pending(current); 2435 restore_saved_sigmask_unless(interrupted); 2436 if (interrupted && !ret) 2437 ret = -ERESTARTNOHAND; 2438 2439 return ret; 2440 } 2441 #endif 2442