1 /* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * 9 * See ../COPYING for licensing terms. 10 */ 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/errno.h> 14 #include <linux/time.h> 15 #include <linux/aio_abi.h> 16 #include <linux/module.h> 17 #include <linux/syscalls.h> 18 #include <linux/backing-dev.h> 19 #include <linux/uio.h> 20 21 #define DEBUG 0 22 23 #include <linux/sched.h> 24 #include <linux/fs.h> 25 #include <linux/file.h> 26 #include <linux/mm.h> 27 #include <linux/mman.h> 28 #include <linux/mmu_context.h> 29 #include <linux/slab.h> 30 #include <linux/timer.h> 31 #include <linux/aio.h> 32 #include <linux/highmem.h> 33 #include <linux/workqueue.h> 34 #include <linux/security.h> 35 #include <linux/eventfd.h> 36 #include <linux/blkdev.h> 37 #include <linux/compat.h> 38 39 #include <asm/kmap_types.h> 40 #include <asm/uaccess.h> 41 42 #if DEBUG > 1 43 #define dprintk printk 44 #else 45 #define dprintk(x...) do { ; } while (0) 46 #endif 47 48 /*------ sysctl variables----*/ 49 static DEFINE_SPINLOCK(aio_nr_lock); 50 unsigned long aio_nr; /* current system wide number of aio requests */ 51 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 52 /*----end sysctl variables---*/ 53 54 static struct kmem_cache *kiocb_cachep; 55 static struct kmem_cache *kioctx_cachep; 56 57 static struct workqueue_struct *aio_wq; 58 59 /* Used for rare fput completion. */ 60 static void aio_fput_routine(struct work_struct *); 61 static DECLARE_WORK(fput_work, aio_fput_routine); 62 63 static DEFINE_SPINLOCK(fput_lock); 64 static LIST_HEAD(fput_head); 65 66 static void aio_kick_handler(struct work_struct *); 67 static void aio_queue_work(struct kioctx *); 68 69 /* aio_setup 70 * Creates the slab caches used by the aio routines, panic on 71 * failure as this is done early during the boot sequence. 72 */ 73 static int __init aio_setup(void) 74 { 75 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 76 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 77 78 aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */ 79 BUG_ON(!aio_wq); 80 81 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); 82 83 return 0; 84 } 85 __initcall(aio_setup); 86 87 static void aio_free_ring(struct kioctx *ctx) 88 { 89 struct aio_ring_info *info = &ctx->ring_info; 90 long i; 91 92 for (i=0; i<info->nr_pages; i++) 93 put_page(info->ring_pages[i]); 94 95 if (info->mmap_size) { 96 down_write(&ctx->mm->mmap_sem); 97 do_munmap(ctx->mm, info->mmap_base, info->mmap_size); 98 up_write(&ctx->mm->mmap_sem); 99 } 100 101 if (info->ring_pages && info->ring_pages != info->internal_pages) 102 kfree(info->ring_pages); 103 info->ring_pages = NULL; 104 info->nr = 0; 105 } 106 107 static int aio_setup_ring(struct kioctx *ctx) 108 { 109 struct aio_ring *ring; 110 struct aio_ring_info *info = &ctx->ring_info; 111 unsigned nr_events = ctx->max_reqs; 112 unsigned long size; 113 int nr_pages; 114 115 /* Compensate for the ring buffer's head/tail overlap entry */ 116 nr_events += 2; /* 1 is required, 2 for good luck */ 117 118 size = sizeof(struct aio_ring); 119 size += sizeof(struct io_event) * nr_events; 120 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; 121 122 if (nr_pages < 0) 123 return -EINVAL; 124 125 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); 126 127 info->nr = 0; 128 info->ring_pages = info->internal_pages; 129 if (nr_pages > AIO_RING_PAGES) { 130 info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 131 if (!info->ring_pages) 132 return -ENOMEM; 133 } 134 135 info->mmap_size = nr_pages * PAGE_SIZE; 136 dprintk("attempting mmap of %lu bytes\n", info->mmap_size); 137 down_write(&ctx->mm->mmap_sem); 138 info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 139 PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, 140 0); 141 if (IS_ERR((void *)info->mmap_base)) { 142 up_write(&ctx->mm->mmap_sem); 143 info->mmap_size = 0; 144 aio_free_ring(ctx); 145 return -EAGAIN; 146 } 147 148 dprintk("mmap address: 0x%08lx\n", info->mmap_base); 149 info->nr_pages = get_user_pages(current, ctx->mm, 150 info->mmap_base, nr_pages, 151 1, 0, info->ring_pages, NULL); 152 up_write(&ctx->mm->mmap_sem); 153 154 if (unlikely(info->nr_pages != nr_pages)) { 155 aio_free_ring(ctx); 156 return -EAGAIN; 157 } 158 159 ctx->user_id = info->mmap_base; 160 161 info->nr = nr_events; /* trusted copy */ 162 163 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 164 ring->nr = nr_events; /* user copy */ 165 ring->id = ctx->user_id; 166 ring->head = ring->tail = 0; 167 ring->magic = AIO_RING_MAGIC; 168 ring->compat_features = AIO_RING_COMPAT_FEATURES; 169 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 170 ring->header_length = sizeof(struct aio_ring); 171 kunmap_atomic(ring, KM_USER0); 172 173 return 0; 174 } 175 176 177 /* aio_ring_event: returns a pointer to the event at the given index from 178 * kmap_atomic(, km). Release the pointer with put_aio_ring_event(); 179 */ 180 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 181 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 182 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 183 184 #define aio_ring_event(info, nr, km) ({ \ 185 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ 186 struct io_event *__event; \ 187 __event = kmap_atomic( \ 188 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ 189 __event += pos % AIO_EVENTS_PER_PAGE; \ 190 __event; \ 191 }) 192 193 #define put_aio_ring_event(event, km) do { \ 194 struct io_event *__event = (event); \ 195 (void)__event; \ 196 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ 197 } while(0) 198 199 static void ctx_rcu_free(struct rcu_head *head) 200 { 201 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); 202 unsigned nr_events = ctx->max_reqs; 203 204 kmem_cache_free(kioctx_cachep, ctx); 205 206 if (nr_events) { 207 spin_lock(&aio_nr_lock); 208 BUG_ON(aio_nr - nr_events > aio_nr); 209 aio_nr -= nr_events; 210 spin_unlock(&aio_nr_lock); 211 } 212 } 213 214 /* __put_ioctx 215 * Called when the last user of an aio context has gone away, 216 * and the struct needs to be freed. 217 */ 218 static void __put_ioctx(struct kioctx *ctx) 219 { 220 BUG_ON(ctx->reqs_active); 221 222 cancel_delayed_work(&ctx->wq); 223 cancel_work_sync(&ctx->wq.work); 224 aio_free_ring(ctx); 225 mmdrop(ctx->mm); 226 ctx->mm = NULL; 227 pr_debug("__put_ioctx: freeing %p\n", ctx); 228 call_rcu(&ctx->rcu_head, ctx_rcu_free); 229 } 230 231 static inline void get_ioctx(struct kioctx *kioctx) 232 { 233 BUG_ON(atomic_read(&kioctx->users) <= 0); 234 atomic_inc(&kioctx->users); 235 } 236 237 static inline int try_get_ioctx(struct kioctx *kioctx) 238 { 239 return atomic_inc_not_zero(&kioctx->users); 240 } 241 242 static inline void put_ioctx(struct kioctx *kioctx) 243 { 244 BUG_ON(atomic_read(&kioctx->users) <= 0); 245 if (unlikely(atomic_dec_and_test(&kioctx->users))) 246 __put_ioctx(kioctx); 247 } 248 249 /* ioctx_alloc 250 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 251 */ 252 static struct kioctx *ioctx_alloc(unsigned nr_events) 253 { 254 struct mm_struct *mm; 255 struct kioctx *ctx; 256 int did_sync = 0; 257 258 /* Prevent overflows */ 259 if ((nr_events > (0x10000000U / sizeof(struct io_event))) || 260 (nr_events > (0x10000000U / sizeof(struct kiocb)))) { 261 pr_debug("ENOMEM: nr_events too high\n"); 262 return ERR_PTR(-EINVAL); 263 } 264 265 if ((unsigned long)nr_events > aio_max_nr) 266 return ERR_PTR(-EAGAIN); 267 268 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 269 if (!ctx) 270 return ERR_PTR(-ENOMEM); 271 272 ctx->max_reqs = nr_events; 273 mm = ctx->mm = current->mm; 274 atomic_inc(&mm->mm_count); 275 276 atomic_set(&ctx->users, 1); 277 spin_lock_init(&ctx->ctx_lock); 278 spin_lock_init(&ctx->ring_info.ring_lock); 279 init_waitqueue_head(&ctx->wait); 280 281 INIT_LIST_HEAD(&ctx->active_reqs); 282 INIT_LIST_HEAD(&ctx->run_list); 283 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); 284 285 if (aio_setup_ring(ctx) < 0) 286 goto out_freectx; 287 288 /* limit the number of system wide aios */ 289 do { 290 spin_lock_bh(&aio_nr_lock); 291 if (aio_nr + nr_events > aio_max_nr || 292 aio_nr + nr_events < aio_nr) 293 ctx->max_reqs = 0; 294 else 295 aio_nr += ctx->max_reqs; 296 spin_unlock_bh(&aio_nr_lock); 297 if (ctx->max_reqs || did_sync) 298 break; 299 300 /* wait for rcu callbacks to have completed before giving up */ 301 synchronize_rcu(); 302 did_sync = 1; 303 ctx->max_reqs = nr_events; 304 } while (1); 305 306 if (ctx->max_reqs == 0) 307 goto out_cleanup; 308 309 /* now link into global list. */ 310 spin_lock(&mm->ioctx_lock); 311 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); 312 spin_unlock(&mm->ioctx_lock); 313 314 dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 315 ctx, ctx->user_id, current->mm, ctx->ring_info.nr); 316 return ctx; 317 318 out_cleanup: 319 __put_ioctx(ctx); 320 return ERR_PTR(-EAGAIN); 321 322 out_freectx: 323 mmdrop(mm); 324 kmem_cache_free(kioctx_cachep, ctx); 325 ctx = ERR_PTR(-ENOMEM); 326 327 dprintk("aio: error allocating ioctx %p\n", ctx); 328 return ctx; 329 } 330 331 /* aio_cancel_all 332 * Cancels all outstanding aio requests on an aio context. Used 333 * when the processes owning a context have all exited to encourage 334 * the rapid destruction of the kioctx. 335 */ 336 static void aio_cancel_all(struct kioctx *ctx) 337 { 338 int (*cancel)(struct kiocb *, struct io_event *); 339 struct io_event res; 340 spin_lock_irq(&ctx->ctx_lock); 341 ctx->dead = 1; 342 while (!list_empty(&ctx->active_reqs)) { 343 struct list_head *pos = ctx->active_reqs.next; 344 struct kiocb *iocb = list_kiocb(pos); 345 list_del_init(&iocb->ki_list); 346 cancel = iocb->ki_cancel; 347 kiocbSetCancelled(iocb); 348 if (cancel) { 349 iocb->ki_users++; 350 spin_unlock_irq(&ctx->ctx_lock); 351 cancel(iocb, &res); 352 spin_lock_irq(&ctx->ctx_lock); 353 } 354 } 355 spin_unlock_irq(&ctx->ctx_lock); 356 } 357 358 static void wait_for_all_aios(struct kioctx *ctx) 359 { 360 struct task_struct *tsk = current; 361 DECLARE_WAITQUEUE(wait, tsk); 362 363 spin_lock_irq(&ctx->ctx_lock); 364 if (!ctx->reqs_active) 365 goto out; 366 367 add_wait_queue(&ctx->wait, &wait); 368 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 369 while (ctx->reqs_active) { 370 spin_unlock_irq(&ctx->ctx_lock); 371 io_schedule(); 372 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 373 spin_lock_irq(&ctx->ctx_lock); 374 } 375 __set_task_state(tsk, TASK_RUNNING); 376 remove_wait_queue(&ctx->wait, &wait); 377 378 out: 379 spin_unlock_irq(&ctx->ctx_lock); 380 } 381 382 /* wait_on_sync_kiocb: 383 * Waits on the given sync kiocb to complete. 384 */ 385 ssize_t wait_on_sync_kiocb(struct kiocb *iocb) 386 { 387 while (iocb->ki_users) { 388 set_current_state(TASK_UNINTERRUPTIBLE); 389 if (!iocb->ki_users) 390 break; 391 io_schedule(); 392 } 393 __set_current_state(TASK_RUNNING); 394 return iocb->ki_user_data; 395 } 396 EXPORT_SYMBOL(wait_on_sync_kiocb); 397 398 /* exit_aio: called when the last user of mm goes away. At this point, 399 * there is no way for any new requests to be submited or any of the 400 * io_* syscalls to be called on the context. However, there may be 401 * outstanding requests which hold references to the context; as they 402 * go away, they will call put_ioctx and release any pinned memory 403 * associated with the request (held via struct page * references). 404 */ 405 void exit_aio(struct mm_struct *mm) 406 { 407 struct kioctx *ctx; 408 409 while (!hlist_empty(&mm->ioctx_list)) { 410 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); 411 hlist_del_rcu(&ctx->list); 412 413 aio_cancel_all(ctx); 414 415 wait_for_all_aios(ctx); 416 /* 417 * Ensure we don't leave the ctx on the aio_wq 418 */ 419 cancel_work_sync(&ctx->wq.work); 420 421 if (1 != atomic_read(&ctx->users)) 422 printk(KERN_DEBUG 423 "exit_aio:ioctx still alive: %d %d %d\n", 424 atomic_read(&ctx->users), ctx->dead, 425 ctx->reqs_active); 426 put_ioctx(ctx); 427 } 428 } 429 430 /* aio_get_req 431 * Allocate a slot for an aio request. Increments the users count 432 * of the kioctx so that the kioctx stays around until all requests are 433 * complete. Returns NULL if no requests are free. 434 * 435 * Returns with kiocb->users set to 2. The io submit code path holds 436 * an extra reference while submitting the i/o. 437 * This prevents races between the aio code path referencing the 438 * req (after submitting it) and aio_complete() freeing the req. 439 */ 440 static struct kiocb *__aio_get_req(struct kioctx *ctx) 441 { 442 struct kiocb *req = NULL; 443 444 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 445 if (unlikely(!req)) 446 return NULL; 447 448 req->ki_flags = 0; 449 req->ki_users = 2; 450 req->ki_key = 0; 451 req->ki_ctx = ctx; 452 req->ki_cancel = NULL; 453 req->ki_retry = NULL; 454 req->ki_dtor = NULL; 455 req->private = NULL; 456 req->ki_iovec = NULL; 457 INIT_LIST_HEAD(&req->ki_run_list); 458 req->ki_eventfd = NULL; 459 460 return req; 461 } 462 463 /* 464 * struct kiocb's are allocated in batches to reduce the number of 465 * times the ctx lock is acquired and released. 466 */ 467 #define KIOCB_BATCH_SIZE 32L 468 struct kiocb_batch { 469 struct list_head head; 470 long count; /* number of requests left to allocate */ 471 }; 472 473 static void kiocb_batch_init(struct kiocb_batch *batch, long total) 474 { 475 INIT_LIST_HEAD(&batch->head); 476 batch->count = total; 477 } 478 479 static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch) 480 { 481 struct kiocb *req, *n; 482 483 if (list_empty(&batch->head)) 484 return; 485 486 spin_lock_irq(&ctx->ctx_lock); 487 list_for_each_entry_safe(req, n, &batch->head, ki_batch) { 488 list_del(&req->ki_batch); 489 list_del(&req->ki_list); 490 kmem_cache_free(kiocb_cachep, req); 491 ctx->reqs_active--; 492 } 493 if (unlikely(!ctx->reqs_active && ctx->dead)) 494 wake_up_all(&ctx->wait); 495 spin_unlock_irq(&ctx->ctx_lock); 496 } 497 498 /* 499 * Allocate a batch of kiocbs. This avoids taking and dropping the 500 * context lock a lot during setup. 501 */ 502 static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch) 503 { 504 unsigned short allocated, to_alloc; 505 long avail; 506 bool called_fput = false; 507 struct kiocb *req, *n; 508 struct aio_ring *ring; 509 510 to_alloc = min(batch->count, KIOCB_BATCH_SIZE); 511 for (allocated = 0; allocated < to_alloc; allocated++) { 512 req = __aio_get_req(ctx); 513 if (!req) 514 /* allocation failed, go with what we've got */ 515 break; 516 list_add(&req->ki_batch, &batch->head); 517 } 518 519 if (allocated == 0) 520 goto out; 521 522 retry: 523 spin_lock_irq(&ctx->ctx_lock); 524 ring = kmap_atomic(ctx->ring_info.ring_pages[0]); 525 526 avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active; 527 BUG_ON(avail < 0); 528 if (avail == 0 && !called_fput) { 529 /* 530 * Handle a potential starvation case. It is possible that 531 * we hold the last reference on a struct file, causing us 532 * to delay the final fput to non-irq context. In this case, 533 * ctx->reqs_active is artificially high. Calling the fput 534 * routine here may free up a slot in the event completion 535 * ring, allowing this allocation to succeed. 536 */ 537 kunmap_atomic(ring); 538 spin_unlock_irq(&ctx->ctx_lock); 539 aio_fput_routine(NULL); 540 called_fput = true; 541 goto retry; 542 } 543 544 if (avail < allocated) { 545 /* Trim back the number of requests. */ 546 list_for_each_entry_safe(req, n, &batch->head, ki_batch) { 547 list_del(&req->ki_batch); 548 kmem_cache_free(kiocb_cachep, req); 549 if (--allocated <= avail) 550 break; 551 } 552 } 553 554 batch->count -= allocated; 555 list_for_each_entry(req, &batch->head, ki_batch) { 556 list_add(&req->ki_list, &ctx->active_reqs); 557 ctx->reqs_active++; 558 } 559 560 kunmap_atomic(ring); 561 spin_unlock_irq(&ctx->ctx_lock); 562 563 out: 564 return allocated; 565 } 566 567 static inline struct kiocb *aio_get_req(struct kioctx *ctx, 568 struct kiocb_batch *batch) 569 { 570 struct kiocb *req; 571 572 if (list_empty(&batch->head)) 573 if (kiocb_batch_refill(ctx, batch) == 0) 574 return NULL; 575 req = list_first_entry(&batch->head, struct kiocb, ki_batch); 576 list_del(&req->ki_batch); 577 return req; 578 } 579 580 static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) 581 { 582 assert_spin_locked(&ctx->ctx_lock); 583 584 if (req->ki_eventfd != NULL) 585 eventfd_ctx_put(req->ki_eventfd); 586 if (req->ki_dtor) 587 req->ki_dtor(req); 588 if (req->ki_iovec != &req->ki_inline_vec) 589 kfree(req->ki_iovec); 590 kmem_cache_free(kiocb_cachep, req); 591 ctx->reqs_active--; 592 593 if (unlikely(!ctx->reqs_active && ctx->dead)) 594 wake_up_all(&ctx->wait); 595 } 596 597 static void aio_fput_routine(struct work_struct *data) 598 { 599 spin_lock_irq(&fput_lock); 600 while (likely(!list_empty(&fput_head))) { 601 struct kiocb *req = list_kiocb(fput_head.next); 602 struct kioctx *ctx = req->ki_ctx; 603 604 list_del(&req->ki_list); 605 spin_unlock_irq(&fput_lock); 606 607 /* Complete the fput(s) */ 608 if (req->ki_filp != NULL) 609 fput(req->ki_filp); 610 611 /* Link the iocb into the context's free list */ 612 spin_lock_irq(&ctx->ctx_lock); 613 really_put_req(ctx, req); 614 spin_unlock_irq(&ctx->ctx_lock); 615 616 put_ioctx(ctx); 617 spin_lock_irq(&fput_lock); 618 } 619 spin_unlock_irq(&fput_lock); 620 } 621 622 /* __aio_put_req 623 * Returns true if this put was the last user of the request. 624 */ 625 static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) 626 { 627 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", 628 req, atomic_long_read(&req->ki_filp->f_count)); 629 630 assert_spin_locked(&ctx->ctx_lock); 631 632 req->ki_users--; 633 BUG_ON(req->ki_users < 0); 634 if (likely(req->ki_users)) 635 return 0; 636 list_del(&req->ki_list); /* remove from active_reqs */ 637 req->ki_cancel = NULL; 638 req->ki_retry = NULL; 639 640 /* 641 * Try to optimize the aio and eventfd file* puts, by avoiding to 642 * schedule work in case it is not final fput() time. In normal cases, 643 * we would not be holding the last reference to the file*, so 644 * this function will be executed w/out any aio kthread wakeup. 645 */ 646 if (unlikely(!fput_atomic(req->ki_filp))) { 647 get_ioctx(ctx); 648 spin_lock(&fput_lock); 649 list_add(&req->ki_list, &fput_head); 650 spin_unlock(&fput_lock); 651 schedule_work(&fput_work); 652 } else { 653 req->ki_filp = NULL; 654 really_put_req(ctx, req); 655 } 656 return 1; 657 } 658 659 /* aio_put_req 660 * Returns true if this put was the last user of the kiocb, 661 * false if the request is still in use. 662 */ 663 int aio_put_req(struct kiocb *req) 664 { 665 struct kioctx *ctx = req->ki_ctx; 666 int ret; 667 spin_lock_irq(&ctx->ctx_lock); 668 ret = __aio_put_req(ctx, req); 669 spin_unlock_irq(&ctx->ctx_lock); 670 return ret; 671 } 672 EXPORT_SYMBOL(aio_put_req); 673 674 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 675 { 676 struct mm_struct *mm = current->mm; 677 struct kioctx *ctx, *ret = NULL; 678 struct hlist_node *n; 679 680 rcu_read_lock(); 681 682 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { 683 /* 684 * RCU protects us against accessing freed memory but 685 * we have to be careful not to get a reference when the 686 * reference count already dropped to 0 (ctx->dead test 687 * is unreliable because of races). 688 */ 689 if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){ 690 ret = ctx; 691 break; 692 } 693 } 694 695 rcu_read_unlock(); 696 return ret; 697 } 698 699 /* 700 * Queue up a kiocb to be retried. Assumes that the kiocb 701 * has already been marked as kicked, and places it on 702 * the retry run list for the corresponding ioctx, if it 703 * isn't already queued. Returns 1 if it actually queued 704 * the kiocb (to tell the caller to activate the work 705 * queue to process it), or 0, if it found that it was 706 * already queued. 707 */ 708 static inline int __queue_kicked_iocb(struct kiocb *iocb) 709 { 710 struct kioctx *ctx = iocb->ki_ctx; 711 712 assert_spin_locked(&ctx->ctx_lock); 713 714 if (list_empty(&iocb->ki_run_list)) { 715 list_add_tail(&iocb->ki_run_list, 716 &ctx->run_list); 717 return 1; 718 } 719 return 0; 720 } 721 722 /* aio_run_iocb 723 * This is the core aio execution routine. It is 724 * invoked both for initial i/o submission and 725 * subsequent retries via the aio_kick_handler. 726 * Expects to be invoked with iocb->ki_ctx->lock 727 * already held. The lock is released and reacquired 728 * as needed during processing. 729 * 730 * Calls the iocb retry method (already setup for the 731 * iocb on initial submission) for operation specific 732 * handling, but takes care of most of common retry 733 * execution details for a given iocb. The retry method 734 * needs to be non-blocking as far as possible, to avoid 735 * holding up other iocbs waiting to be serviced by the 736 * retry kernel thread. 737 * 738 * The trickier parts in this code have to do with 739 * ensuring that only one retry instance is in progress 740 * for a given iocb at any time. Providing that guarantee 741 * simplifies the coding of individual aio operations as 742 * it avoids various potential races. 743 */ 744 static ssize_t aio_run_iocb(struct kiocb *iocb) 745 { 746 struct kioctx *ctx = iocb->ki_ctx; 747 ssize_t (*retry)(struct kiocb *); 748 ssize_t ret; 749 750 if (!(retry = iocb->ki_retry)) { 751 printk("aio_run_iocb: iocb->ki_retry = NULL\n"); 752 return 0; 753 } 754 755 /* 756 * We don't want the next retry iteration for this 757 * operation to start until this one has returned and 758 * updated the iocb state. However, wait_queue functions 759 * can trigger a kick_iocb from interrupt context in the 760 * meantime, indicating that data is available for the next 761 * iteration. We want to remember that and enable the 762 * next retry iteration _after_ we are through with 763 * this one. 764 * 765 * So, in order to be able to register a "kick", but 766 * prevent it from being queued now, we clear the kick 767 * flag, but make the kick code *think* that the iocb is 768 * still on the run list until we are actually done. 769 * When we are done with this iteration, we check if 770 * the iocb was kicked in the meantime and if so, queue 771 * it up afresh. 772 */ 773 774 kiocbClearKicked(iocb); 775 776 /* 777 * This is so that aio_complete knows it doesn't need to 778 * pull the iocb off the run list (We can't just call 779 * INIT_LIST_HEAD because we don't want a kick_iocb to 780 * queue this on the run list yet) 781 */ 782 iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL; 783 spin_unlock_irq(&ctx->ctx_lock); 784 785 /* Quit retrying if the i/o has been cancelled */ 786 if (kiocbIsCancelled(iocb)) { 787 ret = -EINTR; 788 aio_complete(iocb, ret, 0); 789 /* must not access the iocb after this */ 790 goto out; 791 } 792 793 /* 794 * Now we are all set to call the retry method in async 795 * context. 796 */ 797 ret = retry(iocb); 798 799 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { 800 /* 801 * There's no easy way to restart the syscall since other AIO's 802 * may be already running. Just fail this IO with EINTR. 803 */ 804 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || 805 ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK)) 806 ret = -EINTR; 807 aio_complete(iocb, ret, 0); 808 } 809 out: 810 spin_lock_irq(&ctx->ctx_lock); 811 812 if (-EIOCBRETRY == ret) { 813 /* 814 * OK, now that we are done with this iteration 815 * and know that there is more left to go, 816 * this is where we let go so that a subsequent 817 * "kick" can start the next iteration 818 */ 819 820 /* will make __queue_kicked_iocb succeed from here on */ 821 INIT_LIST_HEAD(&iocb->ki_run_list); 822 /* we must queue the next iteration ourselves, if it 823 * has already been kicked */ 824 if (kiocbIsKicked(iocb)) { 825 __queue_kicked_iocb(iocb); 826 827 /* 828 * __queue_kicked_iocb will always return 1 here, because 829 * iocb->ki_run_list is empty at this point so it should 830 * be safe to unconditionally queue the context into the 831 * work queue. 832 */ 833 aio_queue_work(ctx); 834 } 835 } 836 return ret; 837 } 838 839 /* 840 * __aio_run_iocbs: 841 * Process all pending retries queued on the ioctx 842 * run list. 843 * Assumes it is operating within the aio issuer's mm 844 * context. 845 */ 846 static int __aio_run_iocbs(struct kioctx *ctx) 847 { 848 struct kiocb *iocb; 849 struct list_head run_list; 850 851 assert_spin_locked(&ctx->ctx_lock); 852 853 list_replace_init(&ctx->run_list, &run_list); 854 while (!list_empty(&run_list)) { 855 iocb = list_entry(run_list.next, struct kiocb, 856 ki_run_list); 857 list_del(&iocb->ki_run_list); 858 /* 859 * Hold an extra reference while retrying i/o. 860 */ 861 iocb->ki_users++; /* grab extra reference */ 862 aio_run_iocb(iocb); 863 __aio_put_req(ctx, iocb); 864 } 865 if (!list_empty(&ctx->run_list)) 866 return 1; 867 return 0; 868 } 869 870 static void aio_queue_work(struct kioctx * ctx) 871 { 872 unsigned long timeout; 873 /* 874 * if someone is waiting, get the work started right 875 * away, otherwise, use a longer delay 876 */ 877 smp_mb(); 878 if (waitqueue_active(&ctx->wait)) 879 timeout = 1; 880 else 881 timeout = HZ/10; 882 queue_delayed_work(aio_wq, &ctx->wq, timeout); 883 } 884 885 /* 886 * aio_run_all_iocbs: 887 * Process all pending retries queued on the ioctx 888 * run list, and keep running them until the list 889 * stays empty. 890 * Assumes it is operating within the aio issuer's mm context. 891 */ 892 static inline void aio_run_all_iocbs(struct kioctx *ctx) 893 { 894 spin_lock_irq(&ctx->ctx_lock); 895 while (__aio_run_iocbs(ctx)) 896 ; 897 spin_unlock_irq(&ctx->ctx_lock); 898 } 899 900 /* 901 * aio_kick_handler: 902 * Work queue handler triggered to process pending 903 * retries on an ioctx. Takes on the aio issuer's 904 * mm context before running the iocbs, so that 905 * copy_xxx_user operates on the issuer's address 906 * space. 907 * Run on aiod's context. 908 */ 909 static void aio_kick_handler(struct work_struct *work) 910 { 911 struct kioctx *ctx = container_of(work, struct kioctx, wq.work); 912 mm_segment_t oldfs = get_fs(); 913 struct mm_struct *mm; 914 int requeue; 915 916 set_fs(USER_DS); 917 use_mm(ctx->mm); 918 spin_lock_irq(&ctx->ctx_lock); 919 requeue =__aio_run_iocbs(ctx); 920 mm = ctx->mm; 921 spin_unlock_irq(&ctx->ctx_lock); 922 unuse_mm(mm); 923 set_fs(oldfs); 924 /* 925 * we're in a worker thread already, don't use queue_delayed_work, 926 */ 927 if (requeue) 928 queue_delayed_work(aio_wq, &ctx->wq, 0); 929 } 930 931 932 /* 933 * Called by kick_iocb to queue the kiocb for retry 934 * and if required activate the aio work queue to process 935 * it 936 */ 937 static void try_queue_kicked_iocb(struct kiocb *iocb) 938 { 939 struct kioctx *ctx = iocb->ki_ctx; 940 unsigned long flags; 941 int run = 0; 942 943 spin_lock_irqsave(&ctx->ctx_lock, flags); 944 /* set this inside the lock so that we can't race with aio_run_iocb() 945 * testing it and putting the iocb on the run list under the lock */ 946 if (!kiocbTryKick(iocb)) 947 run = __queue_kicked_iocb(iocb); 948 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 949 if (run) 950 aio_queue_work(ctx); 951 } 952 953 /* 954 * kick_iocb: 955 * Called typically from a wait queue callback context 956 * to trigger a retry of the iocb. 957 * The retry is usually executed by aio workqueue 958 * threads (See aio_kick_handler). 959 */ 960 void kick_iocb(struct kiocb *iocb) 961 { 962 /* sync iocbs are easy: they can only ever be executing from a 963 * single context. */ 964 if (is_sync_kiocb(iocb)) { 965 kiocbSetKicked(iocb); 966 wake_up_process(iocb->ki_obj.tsk); 967 return; 968 } 969 970 try_queue_kicked_iocb(iocb); 971 } 972 EXPORT_SYMBOL(kick_iocb); 973 974 /* aio_complete 975 * Called when the io request on the given iocb is complete. 976 * Returns true if this is the last user of the request. The 977 * only other user of the request can be the cancellation code. 978 */ 979 int aio_complete(struct kiocb *iocb, long res, long res2) 980 { 981 struct kioctx *ctx = iocb->ki_ctx; 982 struct aio_ring_info *info; 983 struct aio_ring *ring; 984 struct io_event *event; 985 unsigned long flags; 986 unsigned long tail; 987 int ret; 988 989 /* 990 * Special case handling for sync iocbs: 991 * - events go directly into the iocb for fast handling 992 * - the sync task with the iocb in its stack holds the single iocb 993 * ref, no other paths have a way to get another ref 994 * - the sync task helpfully left a reference to itself in the iocb 995 */ 996 if (is_sync_kiocb(iocb)) { 997 BUG_ON(iocb->ki_users != 1); 998 iocb->ki_user_data = res; 999 iocb->ki_users = 0; 1000 wake_up_process(iocb->ki_obj.tsk); 1001 return 1; 1002 } 1003 1004 info = &ctx->ring_info; 1005 1006 /* add a completion event to the ring buffer. 1007 * must be done holding ctx->ctx_lock to prevent 1008 * other code from messing with the tail 1009 * pointer since we might be called from irq 1010 * context. 1011 */ 1012 spin_lock_irqsave(&ctx->ctx_lock, flags); 1013 1014 if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list)) 1015 list_del_init(&iocb->ki_run_list); 1016 1017 /* 1018 * cancelled requests don't get events, userland was given one 1019 * when the event got cancelled. 1020 */ 1021 if (kiocbIsCancelled(iocb)) 1022 goto put_rq; 1023 1024 ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); 1025 1026 tail = info->tail; 1027 event = aio_ring_event(info, tail, KM_IRQ0); 1028 if (++tail >= info->nr) 1029 tail = 0; 1030 1031 event->obj = (u64)(unsigned long)iocb->ki_obj.user; 1032 event->data = iocb->ki_user_data; 1033 event->res = res; 1034 event->res2 = res2; 1035 1036 dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", 1037 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, 1038 res, res2); 1039 1040 /* after flagging the request as done, we 1041 * must never even look at it again 1042 */ 1043 smp_wmb(); /* make event visible before updating tail */ 1044 1045 info->tail = tail; 1046 ring->tail = tail; 1047 1048 put_aio_ring_event(event, KM_IRQ0); 1049 kunmap_atomic(ring, KM_IRQ1); 1050 1051 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 1052 1053 /* 1054 * Check if the user asked us to deliver the result through an 1055 * eventfd. The eventfd_signal() function is safe to be called 1056 * from IRQ context. 1057 */ 1058 if (iocb->ki_eventfd != NULL) 1059 eventfd_signal(iocb->ki_eventfd, 1); 1060 1061 put_rq: 1062 /* everything turned out well, dispose of the aiocb. */ 1063 ret = __aio_put_req(ctx, iocb); 1064 1065 /* 1066 * We have to order our ring_info tail store above and test 1067 * of the wait list below outside the wait lock. This is 1068 * like in wake_up_bit() where clearing a bit has to be 1069 * ordered with the unlocked test. 1070 */ 1071 smp_mb(); 1072 1073 if (waitqueue_active(&ctx->wait)) 1074 wake_up(&ctx->wait); 1075 1076 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1077 return ret; 1078 } 1079 EXPORT_SYMBOL(aio_complete); 1080 1081 /* aio_read_evt 1082 * Pull an event off of the ioctx's event ring. Returns the number of 1083 * events fetched (0 or 1 ;-) 1084 * FIXME: make this use cmpxchg. 1085 * TODO: make the ringbuffer user mmap()able (requires FIXME). 1086 */ 1087 static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) 1088 { 1089 struct aio_ring_info *info = &ioctx->ring_info; 1090 struct aio_ring *ring; 1091 unsigned long head; 1092 int ret = 0; 1093 1094 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 1095 dprintk("in aio_read_evt h%lu t%lu m%lu\n", 1096 (unsigned long)ring->head, (unsigned long)ring->tail, 1097 (unsigned long)ring->nr); 1098 1099 if (ring->head == ring->tail) 1100 goto out; 1101 1102 spin_lock(&info->ring_lock); 1103 1104 head = ring->head % info->nr; 1105 if (head != ring->tail) { 1106 struct io_event *evp = aio_ring_event(info, head, KM_USER1); 1107 *ent = *evp; 1108 head = (head + 1) % info->nr; 1109 smp_mb(); /* finish reading the event before updatng the head */ 1110 ring->head = head; 1111 ret = 1; 1112 put_aio_ring_event(evp, KM_USER1); 1113 } 1114 spin_unlock(&info->ring_lock); 1115 1116 out: 1117 kunmap_atomic(ring, KM_USER0); 1118 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, 1119 (unsigned long)ring->head, (unsigned long)ring->tail); 1120 return ret; 1121 } 1122 1123 struct aio_timeout { 1124 struct timer_list timer; 1125 int timed_out; 1126 struct task_struct *p; 1127 }; 1128 1129 static void timeout_func(unsigned long data) 1130 { 1131 struct aio_timeout *to = (struct aio_timeout *)data; 1132 1133 to->timed_out = 1; 1134 wake_up_process(to->p); 1135 } 1136 1137 static inline void init_timeout(struct aio_timeout *to) 1138 { 1139 setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to); 1140 to->timed_out = 0; 1141 to->p = current; 1142 } 1143 1144 static inline void set_timeout(long start_jiffies, struct aio_timeout *to, 1145 const struct timespec *ts) 1146 { 1147 to->timer.expires = start_jiffies + timespec_to_jiffies(ts); 1148 if (time_after(to->timer.expires, jiffies)) 1149 add_timer(&to->timer); 1150 else 1151 to->timed_out = 1; 1152 } 1153 1154 static inline void clear_timeout(struct aio_timeout *to) 1155 { 1156 del_singleshot_timer_sync(&to->timer); 1157 } 1158 1159 static int read_events(struct kioctx *ctx, 1160 long min_nr, long nr, 1161 struct io_event __user *event, 1162 struct timespec __user *timeout) 1163 { 1164 long start_jiffies = jiffies; 1165 struct task_struct *tsk = current; 1166 DECLARE_WAITQUEUE(wait, tsk); 1167 int ret; 1168 int i = 0; 1169 struct io_event ent; 1170 struct aio_timeout to; 1171 int retry = 0; 1172 1173 /* needed to zero any padding within an entry (there shouldn't be 1174 * any, but C is fun! 1175 */ 1176 memset(&ent, 0, sizeof(ent)); 1177 retry: 1178 ret = 0; 1179 while (likely(i < nr)) { 1180 ret = aio_read_evt(ctx, &ent); 1181 if (unlikely(ret <= 0)) 1182 break; 1183 1184 dprintk("read event: %Lx %Lx %Lx %Lx\n", 1185 ent.data, ent.obj, ent.res, ent.res2); 1186 1187 /* Could we split the check in two? */ 1188 ret = -EFAULT; 1189 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 1190 dprintk("aio: lost an event due to EFAULT.\n"); 1191 break; 1192 } 1193 ret = 0; 1194 1195 /* Good, event copied to userland, update counts. */ 1196 event ++; 1197 i ++; 1198 } 1199 1200 if (min_nr <= i) 1201 return i; 1202 if (ret) 1203 return ret; 1204 1205 /* End fast path */ 1206 1207 /* racey check, but it gets redone */ 1208 if (!retry && unlikely(!list_empty(&ctx->run_list))) { 1209 retry = 1; 1210 aio_run_all_iocbs(ctx); 1211 goto retry; 1212 } 1213 1214 init_timeout(&to); 1215 if (timeout) { 1216 struct timespec ts; 1217 ret = -EFAULT; 1218 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) 1219 goto out; 1220 1221 set_timeout(start_jiffies, &to, &ts); 1222 } 1223 1224 while (likely(i < nr)) { 1225 add_wait_queue_exclusive(&ctx->wait, &wait); 1226 do { 1227 set_task_state(tsk, TASK_INTERRUPTIBLE); 1228 ret = aio_read_evt(ctx, &ent); 1229 if (ret) 1230 break; 1231 if (min_nr <= i) 1232 break; 1233 if (unlikely(ctx->dead)) { 1234 ret = -EINVAL; 1235 break; 1236 } 1237 if (to.timed_out) /* Only check after read evt */ 1238 break; 1239 /* Try to only show up in io wait if there are ops 1240 * in flight */ 1241 if (ctx->reqs_active) 1242 io_schedule(); 1243 else 1244 schedule(); 1245 if (signal_pending(tsk)) { 1246 ret = -EINTR; 1247 break; 1248 } 1249 /*ret = aio_read_evt(ctx, &ent);*/ 1250 } while (1) ; 1251 1252 set_task_state(tsk, TASK_RUNNING); 1253 remove_wait_queue(&ctx->wait, &wait); 1254 1255 if (unlikely(ret <= 0)) 1256 break; 1257 1258 ret = -EFAULT; 1259 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 1260 dprintk("aio: lost an event due to EFAULT.\n"); 1261 break; 1262 } 1263 1264 /* Good, event copied to userland, update counts. */ 1265 event ++; 1266 i ++; 1267 } 1268 1269 if (timeout) 1270 clear_timeout(&to); 1271 out: 1272 destroy_timer_on_stack(&to.timer); 1273 return i ? i : ret; 1274 } 1275 1276 /* Take an ioctx and remove it from the list of ioctx's. Protects 1277 * against races with itself via ->dead. 1278 */ 1279 static void io_destroy(struct kioctx *ioctx) 1280 { 1281 struct mm_struct *mm = current->mm; 1282 int was_dead; 1283 1284 /* delete the entry from the list is someone else hasn't already */ 1285 spin_lock(&mm->ioctx_lock); 1286 was_dead = ioctx->dead; 1287 ioctx->dead = 1; 1288 hlist_del_rcu(&ioctx->list); 1289 spin_unlock(&mm->ioctx_lock); 1290 1291 dprintk("aio_release(%p)\n", ioctx); 1292 if (likely(!was_dead)) 1293 put_ioctx(ioctx); /* twice for the list */ 1294 1295 aio_cancel_all(ioctx); 1296 wait_for_all_aios(ioctx); 1297 1298 /* 1299 * Wake up any waiters. The setting of ctx->dead must be seen 1300 * by other CPUs at this point. Right now, we rely on the 1301 * locking done by the above calls to ensure this consistency. 1302 */ 1303 wake_up_all(&ioctx->wait); 1304 put_ioctx(ioctx); /* once for the lookup */ 1305 } 1306 1307 /* sys_io_setup: 1308 * Create an aio_context capable of receiving at least nr_events. 1309 * ctxp must not point to an aio_context that already exists, and 1310 * must be initialized to 0 prior to the call. On successful 1311 * creation of the aio_context, *ctxp is filled in with the resulting 1312 * handle. May fail with -EINVAL if *ctxp is not initialized, 1313 * if the specified nr_events exceeds internal limits. May fail 1314 * with -EAGAIN if the specified nr_events exceeds the user's limit 1315 * of available events. May fail with -ENOMEM if insufficient kernel 1316 * resources are available. May fail with -EFAULT if an invalid 1317 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1318 * implemented. 1319 */ 1320 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1321 { 1322 struct kioctx *ioctx = NULL; 1323 unsigned long ctx; 1324 long ret; 1325 1326 ret = get_user(ctx, ctxp); 1327 if (unlikely(ret)) 1328 goto out; 1329 1330 ret = -EINVAL; 1331 if (unlikely(ctx || nr_events == 0)) { 1332 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", 1333 ctx, nr_events); 1334 goto out; 1335 } 1336 1337 ioctx = ioctx_alloc(nr_events); 1338 ret = PTR_ERR(ioctx); 1339 if (!IS_ERR(ioctx)) { 1340 ret = put_user(ioctx->user_id, ctxp); 1341 if (!ret) 1342 return 0; 1343 1344 get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ 1345 io_destroy(ioctx); 1346 } 1347 1348 out: 1349 return ret; 1350 } 1351 1352 /* sys_io_destroy: 1353 * Destroy the aio_context specified. May cancel any outstanding 1354 * AIOs and block on completion. Will fail with -ENOSYS if not 1355 * implemented. May fail with -EINVAL if the context pointed to 1356 * is invalid. 1357 */ 1358 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1359 { 1360 struct kioctx *ioctx = lookup_ioctx(ctx); 1361 if (likely(NULL != ioctx)) { 1362 io_destroy(ioctx); 1363 return 0; 1364 } 1365 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1366 return -EINVAL; 1367 } 1368 1369 static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) 1370 { 1371 struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; 1372 1373 BUG_ON(ret <= 0); 1374 1375 while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { 1376 ssize_t this = min((ssize_t)iov->iov_len, ret); 1377 iov->iov_base += this; 1378 iov->iov_len -= this; 1379 iocb->ki_left -= this; 1380 ret -= this; 1381 if (iov->iov_len == 0) { 1382 iocb->ki_cur_seg++; 1383 iov++; 1384 } 1385 } 1386 1387 /* the caller should not have done more io than what fit in 1388 * the remaining iovecs */ 1389 BUG_ON(ret > 0 && iocb->ki_left == 0); 1390 } 1391 1392 static ssize_t aio_rw_vect_retry(struct kiocb *iocb) 1393 { 1394 struct file *file = iocb->ki_filp; 1395 struct address_space *mapping = file->f_mapping; 1396 struct inode *inode = mapping->host; 1397 ssize_t (*rw_op)(struct kiocb *, const struct iovec *, 1398 unsigned long, loff_t); 1399 ssize_t ret = 0; 1400 unsigned short opcode; 1401 1402 if ((iocb->ki_opcode == IOCB_CMD_PREADV) || 1403 (iocb->ki_opcode == IOCB_CMD_PREAD)) { 1404 rw_op = file->f_op->aio_read; 1405 opcode = IOCB_CMD_PREADV; 1406 } else { 1407 rw_op = file->f_op->aio_write; 1408 opcode = IOCB_CMD_PWRITEV; 1409 } 1410 1411 /* This matches the pread()/pwrite() logic */ 1412 if (iocb->ki_pos < 0) 1413 return -EINVAL; 1414 1415 do { 1416 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], 1417 iocb->ki_nr_segs - iocb->ki_cur_seg, 1418 iocb->ki_pos); 1419 if (ret > 0) 1420 aio_advance_iovec(iocb, ret); 1421 1422 /* retry all partial writes. retry partial reads as long as its a 1423 * regular file. */ 1424 } while (ret > 0 && iocb->ki_left > 0 && 1425 (opcode == IOCB_CMD_PWRITEV || 1426 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); 1427 1428 /* This means we must have transferred all that we could */ 1429 /* No need to retry anymore */ 1430 if ((ret == 0) || (iocb->ki_left == 0)) 1431 ret = iocb->ki_nbytes - iocb->ki_left; 1432 1433 /* If we managed to write some out we return that, rather than 1434 * the eventual error. */ 1435 if (opcode == IOCB_CMD_PWRITEV 1436 && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY 1437 && iocb->ki_nbytes - iocb->ki_left) 1438 ret = iocb->ki_nbytes - iocb->ki_left; 1439 1440 return ret; 1441 } 1442 1443 static ssize_t aio_fdsync(struct kiocb *iocb) 1444 { 1445 struct file *file = iocb->ki_filp; 1446 ssize_t ret = -EINVAL; 1447 1448 if (file->f_op->aio_fsync) 1449 ret = file->f_op->aio_fsync(iocb, 1); 1450 return ret; 1451 } 1452 1453 static ssize_t aio_fsync(struct kiocb *iocb) 1454 { 1455 struct file *file = iocb->ki_filp; 1456 ssize_t ret = -EINVAL; 1457 1458 if (file->f_op->aio_fsync) 1459 ret = file->f_op->aio_fsync(iocb, 0); 1460 return ret; 1461 } 1462 1463 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) 1464 { 1465 ssize_t ret; 1466 1467 #ifdef CONFIG_COMPAT 1468 if (compat) 1469 ret = compat_rw_copy_check_uvector(type, 1470 (struct compat_iovec __user *)kiocb->ki_buf, 1471 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 1472 &kiocb->ki_iovec, 1); 1473 else 1474 #endif 1475 ret = rw_copy_check_uvector(type, 1476 (struct iovec __user *)kiocb->ki_buf, 1477 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 1478 &kiocb->ki_iovec, 1); 1479 if (ret < 0) 1480 goto out; 1481 1482 kiocb->ki_nr_segs = kiocb->ki_nbytes; 1483 kiocb->ki_cur_seg = 0; 1484 /* ki_nbytes/left now reflect bytes instead of segs */ 1485 kiocb->ki_nbytes = ret; 1486 kiocb->ki_left = ret; 1487 1488 ret = 0; 1489 out: 1490 return ret; 1491 } 1492 1493 static ssize_t aio_setup_single_vector(struct kiocb *kiocb) 1494 { 1495 kiocb->ki_iovec = &kiocb->ki_inline_vec; 1496 kiocb->ki_iovec->iov_base = kiocb->ki_buf; 1497 kiocb->ki_iovec->iov_len = kiocb->ki_left; 1498 kiocb->ki_nr_segs = 1; 1499 kiocb->ki_cur_seg = 0; 1500 return 0; 1501 } 1502 1503 /* 1504 * aio_setup_iocb: 1505 * Performs the initial checks and aio retry method 1506 * setup for the kiocb at the time of io submission. 1507 */ 1508 static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) 1509 { 1510 struct file *file = kiocb->ki_filp; 1511 ssize_t ret = 0; 1512 1513 switch (kiocb->ki_opcode) { 1514 case IOCB_CMD_PREAD: 1515 ret = -EBADF; 1516 if (unlikely(!(file->f_mode & FMODE_READ))) 1517 break; 1518 ret = -EFAULT; 1519 if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, 1520 kiocb->ki_left))) 1521 break; 1522 ret = security_file_permission(file, MAY_READ); 1523 if (unlikely(ret)) 1524 break; 1525 ret = aio_setup_single_vector(kiocb); 1526 if (ret) 1527 break; 1528 ret = -EINVAL; 1529 if (file->f_op->aio_read) 1530 kiocb->ki_retry = aio_rw_vect_retry; 1531 break; 1532 case IOCB_CMD_PWRITE: 1533 ret = -EBADF; 1534 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1535 break; 1536 ret = -EFAULT; 1537 if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, 1538 kiocb->ki_left))) 1539 break; 1540 ret = security_file_permission(file, MAY_WRITE); 1541 if (unlikely(ret)) 1542 break; 1543 ret = aio_setup_single_vector(kiocb); 1544 if (ret) 1545 break; 1546 ret = -EINVAL; 1547 if (file->f_op->aio_write) 1548 kiocb->ki_retry = aio_rw_vect_retry; 1549 break; 1550 case IOCB_CMD_PREADV: 1551 ret = -EBADF; 1552 if (unlikely(!(file->f_mode & FMODE_READ))) 1553 break; 1554 ret = security_file_permission(file, MAY_READ); 1555 if (unlikely(ret)) 1556 break; 1557 ret = aio_setup_vectored_rw(READ, kiocb, compat); 1558 if (ret) 1559 break; 1560 ret = -EINVAL; 1561 if (file->f_op->aio_read) 1562 kiocb->ki_retry = aio_rw_vect_retry; 1563 break; 1564 case IOCB_CMD_PWRITEV: 1565 ret = -EBADF; 1566 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1567 break; 1568 ret = security_file_permission(file, MAY_WRITE); 1569 if (unlikely(ret)) 1570 break; 1571 ret = aio_setup_vectored_rw(WRITE, kiocb, compat); 1572 if (ret) 1573 break; 1574 ret = -EINVAL; 1575 if (file->f_op->aio_write) 1576 kiocb->ki_retry = aio_rw_vect_retry; 1577 break; 1578 case IOCB_CMD_FDSYNC: 1579 ret = -EINVAL; 1580 if (file->f_op->aio_fsync) 1581 kiocb->ki_retry = aio_fdsync; 1582 break; 1583 case IOCB_CMD_FSYNC: 1584 ret = -EINVAL; 1585 if (file->f_op->aio_fsync) 1586 kiocb->ki_retry = aio_fsync; 1587 break; 1588 default: 1589 dprintk("EINVAL: io_submit: no operation provided\n"); 1590 ret = -EINVAL; 1591 } 1592 1593 if (!kiocb->ki_retry) 1594 return ret; 1595 1596 return 0; 1597 } 1598 1599 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1600 struct iocb *iocb, struct kiocb_batch *batch, 1601 bool compat) 1602 { 1603 struct kiocb *req; 1604 struct file *file; 1605 ssize_t ret; 1606 1607 /* enforce forwards compatibility on users */ 1608 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { 1609 pr_debug("EINVAL: io_submit: reserve field set\n"); 1610 return -EINVAL; 1611 } 1612 1613 /* prevent overflows */ 1614 if (unlikely( 1615 (iocb->aio_buf != (unsigned long)iocb->aio_buf) || 1616 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1617 ((ssize_t)iocb->aio_nbytes < 0) 1618 )) { 1619 pr_debug("EINVAL: io_submit: overflow check\n"); 1620 return -EINVAL; 1621 } 1622 1623 file = fget(iocb->aio_fildes); 1624 if (unlikely(!file)) 1625 return -EBADF; 1626 1627 req = aio_get_req(ctx, batch); /* returns with 2 references to req */ 1628 if (unlikely(!req)) { 1629 fput(file); 1630 return -EAGAIN; 1631 } 1632 req->ki_filp = file; 1633 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1634 /* 1635 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1636 * instance of the file* now. The file descriptor must be 1637 * an eventfd() fd, and will be signaled for each completed 1638 * event using the eventfd_signal() function. 1639 */ 1640 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); 1641 if (IS_ERR(req->ki_eventfd)) { 1642 ret = PTR_ERR(req->ki_eventfd); 1643 req->ki_eventfd = NULL; 1644 goto out_put_req; 1645 } 1646 } 1647 1648 ret = put_user(req->ki_key, &user_iocb->aio_key); 1649 if (unlikely(ret)) { 1650 dprintk("EFAULT: aio_key\n"); 1651 goto out_put_req; 1652 } 1653 1654 req->ki_obj.user = user_iocb; 1655 req->ki_user_data = iocb->aio_data; 1656 req->ki_pos = iocb->aio_offset; 1657 1658 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; 1659 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; 1660 req->ki_opcode = iocb->aio_lio_opcode; 1661 1662 ret = aio_setup_iocb(req, compat); 1663 1664 if (ret) 1665 goto out_put_req; 1666 1667 spin_lock_irq(&ctx->ctx_lock); 1668 /* 1669 * We could have raced with io_destroy() and are currently holding a 1670 * reference to ctx which should be destroyed. We cannot submit IO 1671 * since ctx gets freed as soon as io_submit() puts its reference. The 1672 * check here is reliable: io_destroy() sets ctx->dead before waiting 1673 * for outstanding IO and the barrier between these two is realized by 1674 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we 1675 * increment ctx->reqs_active before checking for ctx->dead and the 1676 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we 1677 * don't see ctx->dead set here, io_destroy() waits for our IO to 1678 * finish. 1679 */ 1680 if (ctx->dead) { 1681 spin_unlock_irq(&ctx->ctx_lock); 1682 ret = -EINVAL; 1683 goto out_put_req; 1684 } 1685 aio_run_iocb(req); 1686 if (!list_empty(&ctx->run_list)) { 1687 /* drain the run list */ 1688 while (__aio_run_iocbs(ctx)) 1689 ; 1690 } 1691 spin_unlock_irq(&ctx->ctx_lock); 1692 1693 aio_put_req(req); /* drop extra ref to req */ 1694 return 0; 1695 1696 out_put_req: 1697 aio_put_req(req); /* drop extra ref to req */ 1698 aio_put_req(req); /* drop i/o ref to req */ 1699 return ret; 1700 } 1701 1702 long do_io_submit(aio_context_t ctx_id, long nr, 1703 struct iocb __user *__user *iocbpp, bool compat) 1704 { 1705 struct kioctx *ctx; 1706 long ret = 0; 1707 int i = 0; 1708 struct blk_plug plug; 1709 struct kiocb_batch batch; 1710 1711 if (unlikely(nr < 0)) 1712 return -EINVAL; 1713 1714 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) 1715 nr = LONG_MAX/sizeof(*iocbpp); 1716 1717 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) 1718 return -EFAULT; 1719 1720 ctx = lookup_ioctx(ctx_id); 1721 if (unlikely(!ctx)) { 1722 pr_debug("EINVAL: io_submit: invalid context id\n"); 1723 return -EINVAL; 1724 } 1725 1726 kiocb_batch_init(&batch, nr); 1727 1728 blk_start_plug(&plug); 1729 1730 /* 1731 * AKPM: should this return a partial result if some of the IOs were 1732 * successfully submitted? 1733 */ 1734 for (i=0; i<nr; i++) { 1735 struct iocb __user *user_iocb; 1736 struct iocb tmp; 1737 1738 if (unlikely(__get_user(user_iocb, iocbpp + i))) { 1739 ret = -EFAULT; 1740 break; 1741 } 1742 1743 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { 1744 ret = -EFAULT; 1745 break; 1746 } 1747 1748 ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat); 1749 if (ret) 1750 break; 1751 } 1752 blk_finish_plug(&plug); 1753 1754 kiocb_batch_free(ctx, &batch); 1755 put_ioctx(ctx); 1756 return i ? i : ret; 1757 } 1758 1759 /* sys_io_submit: 1760 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 1761 * the number of iocbs queued. May return -EINVAL if the aio_context 1762 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 1763 * *iocbpp[0] is not properly initialized, if the operation specified 1764 * is invalid for the file descriptor in the iocb. May fail with 1765 * -EFAULT if any of the data structures point to invalid data. May 1766 * fail with -EBADF if the file descriptor specified in the first 1767 * iocb is invalid. May fail with -EAGAIN if insufficient resources 1768 * are available to queue any iocbs. Will return 0 if nr is 0. Will 1769 * fail with -ENOSYS if not implemented. 1770 */ 1771 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 1772 struct iocb __user * __user *, iocbpp) 1773 { 1774 return do_io_submit(ctx_id, nr, iocbpp, 0); 1775 } 1776 1777 /* lookup_kiocb 1778 * Finds a given iocb for cancellation. 1779 */ 1780 static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, 1781 u32 key) 1782 { 1783 struct list_head *pos; 1784 1785 assert_spin_locked(&ctx->ctx_lock); 1786 1787 /* TODO: use a hash or array, this sucks. */ 1788 list_for_each(pos, &ctx->active_reqs) { 1789 struct kiocb *kiocb = list_kiocb(pos); 1790 if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) 1791 return kiocb; 1792 } 1793 return NULL; 1794 } 1795 1796 /* sys_io_cancel: 1797 * Attempts to cancel an iocb previously passed to io_submit. If 1798 * the operation is successfully cancelled, the resulting event is 1799 * copied into the memory pointed to by result without being placed 1800 * into the completion queue and 0 is returned. May fail with 1801 * -EFAULT if any of the data structures pointed to are invalid. 1802 * May fail with -EINVAL if aio_context specified by ctx_id is 1803 * invalid. May fail with -EAGAIN if the iocb specified was not 1804 * cancelled. Will fail with -ENOSYS if not implemented. 1805 */ 1806 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 1807 struct io_event __user *, result) 1808 { 1809 int (*cancel)(struct kiocb *iocb, struct io_event *res); 1810 struct kioctx *ctx; 1811 struct kiocb *kiocb; 1812 u32 key; 1813 int ret; 1814 1815 ret = get_user(key, &iocb->aio_key); 1816 if (unlikely(ret)) 1817 return -EFAULT; 1818 1819 ctx = lookup_ioctx(ctx_id); 1820 if (unlikely(!ctx)) 1821 return -EINVAL; 1822 1823 spin_lock_irq(&ctx->ctx_lock); 1824 ret = -EAGAIN; 1825 kiocb = lookup_kiocb(ctx, iocb, key); 1826 if (kiocb && kiocb->ki_cancel) { 1827 cancel = kiocb->ki_cancel; 1828 kiocb->ki_users ++; 1829 kiocbSetCancelled(kiocb); 1830 } else 1831 cancel = NULL; 1832 spin_unlock_irq(&ctx->ctx_lock); 1833 1834 if (NULL != cancel) { 1835 struct io_event tmp; 1836 pr_debug("calling cancel\n"); 1837 memset(&tmp, 0, sizeof(tmp)); 1838 tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; 1839 tmp.data = kiocb->ki_user_data; 1840 ret = cancel(kiocb, &tmp); 1841 if (!ret) { 1842 /* Cancellation succeeded -- copy the result 1843 * into the user's buffer. 1844 */ 1845 if (copy_to_user(result, &tmp, sizeof(tmp))) 1846 ret = -EFAULT; 1847 } 1848 } else 1849 ret = -EINVAL; 1850 1851 put_ioctx(ctx); 1852 1853 return ret; 1854 } 1855 1856 /* io_getevents: 1857 * Attempts to read at least min_nr events and up to nr events from 1858 * the completion queue for the aio_context specified by ctx_id. If 1859 * it succeeds, the number of read events is returned. May fail with 1860 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 1861 * out of range, if timeout is out of range. May fail with -EFAULT 1862 * if any of the memory specified is invalid. May return 0 or 1863 * < min_nr if the timeout specified by timeout has elapsed 1864 * before sufficient events are available, where timeout == NULL 1865 * specifies an infinite timeout. Note that the timeout pointed to by 1866 * timeout is relative and will be updated if not NULL and the 1867 * operation blocks. Will fail with -ENOSYS if not implemented. 1868 */ 1869 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 1870 long, min_nr, 1871 long, nr, 1872 struct io_event __user *, events, 1873 struct timespec __user *, timeout) 1874 { 1875 struct kioctx *ioctx = lookup_ioctx(ctx_id); 1876 long ret = -EINVAL; 1877 1878 if (likely(ioctx)) { 1879 if (likely(min_nr <= nr && min_nr >= 0)) 1880 ret = read_events(ioctx, min_nr, nr, events, timeout); 1881 put_ioctx(ioctx); 1882 } 1883 1884 asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout); 1885 return ret; 1886 } 1887