1 /* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * 9 * See ../COPYING for licensing terms. 10 */ 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/errno.h> 14 #include <linux/time.h> 15 #include <linux/aio_abi.h> 16 #include <linux/module.h> 17 #include <linux/syscalls.h> 18 #include <linux/uio.h> 19 20 #define DEBUG 0 21 22 #include <linux/sched.h> 23 #include <linux/fs.h> 24 #include <linux/file.h> 25 #include <linux/mm.h> 26 #include <linux/mman.h> 27 #include <linux/slab.h> 28 #include <linux/timer.h> 29 #include <linux/aio.h> 30 #include <linux/highmem.h> 31 #include <linux/workqueue.h> 32 #include <linux/security.h> 33 #include <linux/eventfd.h> 34 35 #include <asm/kmap_types.h> 36 #include <asm/uaccess.h> 37 #include <asm/mmu_context.h> 38 39 #if DEBUG > 1 40 #define dprintk printk 41 #else 42 #define dprintk(x...) do { ; } while (0) 43 #endif 44 45 /*------ sysctl variables----*/ 46 static DEFINE_SPINLOCK(aio_nr_lock); 47 unsigned long aio_nr; /* current system wide number of aio requests */ 48 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 49 /*----end sysctl variables---*/ 50 51 static struct kmem_cache *kiocb_cachep; 52 static struct kmem_cache *kioctx_cachep; 53 54 static struct workqueue_struct *aio_wq; 55 56 /* Used for rare fput completion. */ 57 static void aio_fput_routine(struct work_struct *); 58 static DECLARE_WORK(fput_work, aio_fput_routine); 59 60 static DEFINE_SPINLOCK(fput_lock); 61 static LIST_HEAD(fput_head); 62 63 static void aio_kick_handler(struct work_struct *); 64 static void aio_queue_work(struct kioctx *); 65 66 /* aio_setup 67 * Creates the slab caches used by the aio routines, panic on 68 * failure as this is done early during the boot sequence. 69 */ 70 static int __init aio_setup(void) 71 { 72 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 73 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 74 75 aio_wq = create_workqueue("aio"); 76 77 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); 78 79 return 0; 80 } 81 82 static void aio_free_ring(struct kioctx *ctx) 83 { 84 struct aio_ring_info *info = &ctx->ring_info; 85 long i; 86 87 for (i=0; i<info->nr_pages; i++) 88 put_page(info->ring_pages[i]); 89 90 if (info->mmap_size) { 91 down_write(&ctx->mm->mmap_sem); 92 do_munmap(ctx->mm, info->mmap_base, info->mmap_size); 93 up_write(&ctx->mm->mmap_sem); 94 } 95 96 if (info->ring_pages && info->ring_pages != info->internal_pages) 97 kfree(info->ring_pages); 98 info->ring_pages = NULL; 99 info->nr = 0; 100 } 101 102 static int aio_setup_ring(struct kioctx *ctx) 103 { 104 struct aio_ring *ring; 105 struct aio_ring_info *info = &ctx->ring_info; 106 unsigned nr_events = ctx->max_reqs; 107 unsigned long size; 108 int nr_pages; 109 110 /* Compensate for the ring buffer's head/tail overlap entry */ 111 nr_events += 2; /* 1 is required, 2 for good luck */ 112 113 size = sizeof(struct aio_ring); 114 size += sizeof(struct io_event) * nr_events; 115 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; 116 117 if (nr_pages < 0) 118 return -EINVAL; 119 120 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); 121 122 info->nr = 0; 123 info->ring_pages = info->internal_pages; 124 if (nr_pages > AIO_RING_PAGES) { 125 info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 126 if (!info->ring_pages) 127 return -ENOMEM; 128 } 129 130 info->mmap_size = nr_pages * PAGE_SIZE; 131 dprintk("attempting mmap of %lu bytes\n", info->mmap_size); 132 down_write(&ctx->mm->mmap_sem); 133 info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 134 PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, 135 0); 136 if (IS_ERR((void *)info->mmap_base)) { 137 up_write(&ctx->mm->mmap_sem); 138 info->mmap_size = 0; 139 aio_free_ring(ctx); 140 return -EAGAIN; 141 } 142 143 dprintk("mmap address: 0x%08lx\n", info->mmap_base); 144 info->nr_pages = get_user_pages(current, ctx->mm, 145 info->mmap_base, nr_pages, 146 1, 0, info->ring_pages, NULL); 147 up_write(&ctx->mm->mmap_sem); 148 149 if (unlikely(info->nr_pages != nr_pages)) { 150 aio_free_ring(ctx); 151 return -EAGAIN; 152 } 153 154 ctx->user_id = info->mmap_base; 155 156 info->nr = nr_events; /* trusted copy */ 157 158 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 159 ring->nr = nr_events; /* user copy */ 160 ring->id = ctx->user_id; 161 ring->head = ring->tail = 0; 162 ring->magic = AIO_RING_MAGIC; 163 ring->compat_features = AIO_RING_COMPAT_FEATURES; 164 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 165 ring->header_length = sizeof(struct aio_ring); 166 kunmap_atomic(ring, KM_USER0); 167 168 return 0; 169 } 170 171 172 /* aio_ring_event: returns a pointer to the event at the given index from 173 * kmap_atomic(, km). Release the pointer with put_aio_ring_event(); 174 */ 175 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 176 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 177 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 178 179 #define aio_ring_event(info, nr, km) ({ \ 180 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ 181 struct io_event *__event; \ 182 __event = kmap_atomic( \ 183 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ 184 __event += pos % AIO_EVENTS_PER_PAGE; \ 185 __event; \ 186 }) 187 188 #define put_aio_ring_event(event, km) do { \ 189 struct io_event *__event = (event); \ 190 (void)__event; \ 191 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ 192 } while(0) 193 194 static void ctx_rcu_free(struct rcu_head *head) 195 { 196 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); 197 unsigned nr_events = ctx->max_reqs; 198 199 kmem_cache_free(kioctx_cachep, ctx); 200 201 if (nr_events) { 202 spin_lock(&aio_nr_lock); 203 BUG_ON(aio_nr - nr_events > aio_nr); 204 aio_nr -= nr_events; 205 spin_unlock(&aio_nr_lock); 206 } 207 } 208 209 /* __put_ioctx 210 * Called when the last user of an aio context has gone away, 211 * and the struct needs to be freed. 212 */ 213 static void __put_ioctx(struct kioctx *ctx) 214 { 215 BUG_ON(ctx->reqs_active); 216 217 cancel_delayed_work(&ctx->wq); 218 cancel_work_sync(&ctx->wq.work); 219 aio_free_ring(ctx); 220 mmdrop(ctx->mm); 221 ctx->mm = NULL; 222 pr_debug("__put_ioctx: freeing %p\n", ctx); 223 call_rcu(&ctx->rcu_head, ctx_rcu_free); 224 } 225 226 #define get_ioctx(kioctx) do { \ 227 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 228 atomic_inc(&(kioctx)->users); \ 229 } while (0) 230 #define put_ioctx(kioctx) do { \ 231 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 232 if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ 233 __put_ioctx(kioctx); \ 234 } while (0) 235 236 /* ioctx_alloc 237 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 238 */ 239 static struct kioctx *ioctx_alloc(unsigned nr_events) 240 { 241 struct mm_struct *mm; 242 struct kioctx *ctx; 243 int did_sync = 0; 244 245 /* Prevent overflows */ 246 if ((nr_events > (0x10000000U / sizeof(struct io_event))) || 247 (nr_events > (0x10000000U / sizeof(struct kiocb)))) { 248 pr_debug("ENOMEM: nr_events too high\n"); 249 return ERR_PTR(-EINVAL); 250 } 251 252 if ((unsigned long)nr_events > aio_max_nr) 253 return ERR_PTR(-EAGAIN); 254 255 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 256 if (!ctx) 257 return ERR_PTR(-ENOMEM); 258 259 ctx->max_reqs = nr_events; 260 mm = ctx->mm = current->mm; 261 atomic_inc(&mm->mm_count); 262 263 atomic_set(&ctx->users, 1); 264 spin_lock_init(&ctx->ctx_lock); 265 spin_lock_init(&ctx->ring_info.ring_lock); 266 init_waitqueue_head(&ctx->wait); 267 268 INIT_LIST_HEAD(&ctx->active_reqs); 269 INIT_LIST_HEAD(&ctx->run_list); 270 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); 271 272 if (aio_setup_ring(ctx) < 0) 273 goto out_freectx; 274 275 /* limit the number of system wide aios */ 276 do { 277 spin_lock_bh(&aio_nr_lock); 278 if (aio_nr + nr_events > aio_max_nr || 279 aio_nr + nr_events < aio_nr) 280 ctx->max_reqs = 0; 281 else 282 aio_nr += ctx->max_reqs; 283 spin_unlock_bh(&aio_nr_lock); 284 if (ctx->max_reqs || did_sync) 285 break; 286 287 /* wait for rcu callbacks to have completed before giving up */ 288 synchronize_rcu(); 289 did_sync = 1; 290 ctx->max_reqs = nr_events; 291 } while (1); 292 293 if (ctx->max_reqs == 0) 294 goto out_cleanup; 295 296 /* now link into global list. */ 297 spin_lock(&mm->ioctx_lock); 298 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); 299 spin_unlock(&mm->ioctx_lock); 300 301 dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 302 ctx, ctx->user_id, current->mm, ctx->ring_info.nr); 303 return ctx; 304 305 out_cleanup: 306 __put_ioctx(ctx); 307 return ERR_PTR(-EAGAIN); 308 309 out_freectx: 310 mmdrop(mm); 311 kmem_cache_free(kioctx_cachep, ctx); 312 ctx = ERR_PTR(-ENOMEM); 313 314 dprintk("aio: error allocating ioctx %p\n", ctx); 315 return ctx; 316 } 317 318 /* aio_cancel_all 319 * Cancels all outstanding aio requests on an aio context. Used 320 * when the processes owning a context have all exited to encourage 321 * the rapid destruction of the kioctx. 322 */ 323 static void aio_cancel_all(struct kioctx *ctx) 324 { 325 int (*cancel)(struct kiocb *, struct io_event *); 326 struct io_event res; 327 spin_lock_irq(&ctx->ctx_lock); 328 ctx->dead = 1; 329 while (!list_empty(&ctx->active_reqs)) { 330 struct list_head *pos = ctx->active_reqs.next; 331 struct kiocb *iocb = list_kiocb(pos); 332 list_del_init(&iocb->ki_list); 333 cancel = iocb->ki_cancel; 334 kiocbSetCancelled(iocb); 335 if (cancel) { 336 iocb->ki_users++; 337 spin_unlock_irq(&ctx->ctx_lock); 338 cancel(iocb, &res); 339 spin_lock_irq(&ctx->ctx_lock); 340 } 341 } 342 spin_unlock_irq(&ctx->ctx_lock); 343 } 344 345 static void wait_for_all_aios(struct kioctx *ctx) 346 { 347 struct task_struct *tsk = current; 348 DECLARE_WAITQUEUE(wait, tsk); 349 350 spin_lock_irq(&ctx->ctx_lock); 351 if (!ctx->reqs_active) 352 goto out; 353 354 add_wait_queue(&ctx->wait, &wait); 355 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 356 while (ctx->reqs_active) { 357 spin_unlock_irq(&ctx->ctx_lock); 358 io_schedule(); 359 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 360 spin_lock_irq(&ctx->ctx_lock); 361 } 362 __set_task_state(tsk, TASK_RUNNING); 363 remove_wait_queue(&ctx->wait, &wait); 364 365 out: 366 spin_unlock_irq(&ctx->ctx_lock); 367 } 368 369 /* wait_on_sync_kiocb: 370 * Waits on the given sync kiocb to complete. 371 */ 372 ssize_t wait_on_sync_kiocb(struct kiocb *iocb) 373 { 374 while (iocb->ki_users) { 375 set_current_state(TASK_UNINTERRUPTIBLE); 376 if (!iocb->ki_users) 377 break; 378 io_schedule(); 379 } 380 __set_current_state(TASK_RUNNING); 381 return iocb->ki_user_data; 382 } 383 384 /* exit_aio: called when the last user of mm goes away. At this point, 385 * there is no way for any new requests to be submited or any of the 386 * io_* syscalls to be called on the context. However, there may be 387 * outstanding requests which hold references to the context; as they 388 * go away, they will call put_ioctx and release any pinned memory 389 * associated with the request (held via struct page * references). 390 */ 391 void exit_aio(struct mm_struct *mm) 392 { 393 struct kioctx *ctx; 394 395 while (!hlist_empty(&mm->ioctx_list)) { 396 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); 397 hlist_del_rcu(&ctx->list); 398 399 aio_cancel_all(ctx); 400 401 wait_for_all_aios(ctx); 402 /* 403 * Ensure we don't leave the ctx on the aio_wq 404 */ 405 cancel_work_sync(&ctx->wq.work); 406 407 if (1 != atomic_read(&ctx->users)) 408 printk(KERN_DEBUG 409 "exit_aio:ioctx still alive: %d %d %d\n", 410 atomic_read(&ctx->users), ctx->dead, 411 ctx->reqs_active); 412 put_ioctx(ctx); 413 } 414 } 415 416 /* aio_get_req 417 * Allocate a slot for an aio request. Increments the users count 418 * of the kioctx so that the kioctx stays around until all requests are 419 * complete. Returns NULL if no requests are free. 420 * 421 * Returns with kiocb->users set to 2. The io submit code path holds 422 * an extra reference while submitting the i/o. 423 * This prevents races between the aio code path referencing the 424 * req (after submitting it) and aio_complete() freeing the req. 425 */ 426 static struct kiocb *__aio_get_req(struct kioctx *ctx) 427 { 428 struct kiocb *req = NULL; 429 struct aio_ring *ring; 430 int okay = 0; 431 432 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 433 if (unlikely(!req)) 434 return NULL; 435 436 req->ki_flags = 0; 437 req->ki_users = 2; 438 req->ki_key = 0; 439 req->ki_ctx = ctx; 440 req->ki_cancel = NULL; 441 req->ki_retry = NULL; 442 req->ki_dtor = NULL; 443 req->private = NULL; 444 req->ki_iovec = NULL; 445 INIT_LIST_HEAD(&req->ki_run_list); 446 req->ki_eventfd = NULL; 447 448 /* Check if the completion queue has enough free space to 449 * accept an event from this io. 450 */ 451 spin_lock_irq(&ctx->ctx_lock); 452 ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); 453 if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { 454 list_add(&req->ki_list, &ctx->active_reqs); 455 ctx->reqs_active++; 456 okay = 1; 457 } 458 kunmap_atomic(ring, KM_USER0); 459 spin_unlock_irq(&ctx->ctx_lock); 460 461 if (!okay) { 462 kmem_cache_free(kiocb_cachep, req); 463 req = NULL; 464 } 465 466 return req; 467 } 468 469 static inline struct kiocb *aio_get_req(struct kioctx *ctx) 470 { 471 struct kiocb *req; 472 /* Handle a potential starvation case -- should be exceedingly rare as 473 * requests will be stuck on fput_head only if the aio_fput_routine is 474 * delayed and the requests were the last user of the struct file. 475 */ 476 req = __aio_get_req(ctx); 477 if (unlikely(NULL == req)) { 478 aio_fput_routine(NULL); 479 req = __aio_get_req(ctx); 480 } 481 return req; 482 } 483 484 static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) 485 { 486 assert_spin_locked(&ctx->ctx_lock); 487 488 if (req->ki_dtor) 489 req->ki_dtor(req); 490 if (req->ki_iovec != &req->ki_inline_vec) 491 kfree(req->ki_iovec); 492 kmem_cache_free(kiocb_cachep, req); 493 ctx->reqs_active--; 494 495 if (unlikely(!ctx->reqs_active && ctx->dead)) 496 wake_up(&ctx->wait); 497 } 498 499 static void aio_fput_routine(struct work_struct *data) 500 { 501 spin_lock_irq(&fput_lock); 502 while (likely(!list_empty(&fput_head))) { 503 struct kiocb *req = list_kiocb(fput_head.next); 504 struct kioctx *ctx = req->ki_ctx; 505 506 list_del(&req->ki_list); 507 spin_unlock_irq(&fput_lock); 508 509 /* Complete the fput(s) */ 510 if (req->ki_filp != NULL) 511 __fput(req->ki_filp); 512 if (req->ki_eventfd != NULL) 513 __fput(req->ki_eventfd); 514 515 /* Link the iocb into the context's free list */ 516 spin_lock_irq(&ctx->ctx_lock); 517 really_put_req(ctx, req); 518 spin_unlock_irq(&ctx->ctx_lock); 519 520 put_ioctx(ctx); 521 spin_lock_irq(&fput_lock); 522 } 523 spin_unlock_irq(&fput_lock); 524 } 525 526 /* __aio_put_req 527 * Returns true if this put was the last user of the request. 528 */ 529 static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) 530 { 531 int schedule_putreq = 0; 532 533 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", 534 req, atomic_long_read(&req->ki_filp->f_count)); 535 536 assert_spin_locked(&ctx->ctx_lock); 537 538 req->ki_users--; 539 BUG_ON(req->ki_users < 0); 540 if (likely(req->ki_users)) 541 return 0; 542 list_del(&req->ki_list); /* remove from active_reqs */ 543 req->ki_cancel = NULL; 544 req->ki_retry = NULL; 545 546 /* 547 * Try to optimize the aio and eventfd file* puts, by avoiding to 548 * schedule work in case it is not __fput() time. In normal cases, 549 * we would not be holding the last reference to the file*, so 550 * this function will be executed w/out any aio kthread wakeup. 551 */ 552 if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) 553 schedule_putreq++; 554 else 555 req->ki_filp = NULL; 556 if (req->ki_eventfd != NULL) { 557 if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count))) 558 schedule_putreq++; 559 else 560 req->ki_eventfd = NULL; 561 } 562 if (unlikely(schedule_putreq)) { 563 get_ioctx(ctx); 564 spin_lock(&fput_lock); 565 list_add(&req->ki_list, &fput_head); 566 spin_unlock(&fput_lock); 567 queue_work(aio_wq, &fput_work); 568 } else 569 really_put_req(ctx, req); 570 return 1; 571 } 572 573 /* aio_put_req 574 * Returns true if this put was the last user of the kiocb, 575 * false if the request is still in use. 576 */ 577 int aio_put_req(struct kiocb *req) 578 { 579 struct kioctx *ctx = req->ki_ctx; 580 int ret; 581 spin_lock_irq(&ctx->ctx_lock); 582 ret = __aio_put_req(ctx, req); 583 spin_unlock_irq(&ctx->ctx_lock); 584 return ret; 585 } 586 587 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 588 { 589 struct mm_struct *mm = current->mm; 590 struct kioctx *ctx, *ret = NULL; 591 struct hlist_node *n; 592 593 rcu_read_lock(); 594 595 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { 596 if (ctx->user_id == ctx_id && !ctx->dead) { 597 get_ioctx(ctx); 598 ret = ctx; 599 break; 600 } 601 } 602 603 rcu_read_unlock(); 604 return ret; 605 } 606 607 /* 608 * use_mm 609 * Makes the calling kernel thread take on the specified 610 * mm context. 611 * Called by the retry thread execute retries within the 612 * iocb issuer's mm context, so that copy_from/to_user 613 * operations work seamlessly for aio. 614 * (Note: this routine is intended to be called only 615 * from a kernel thread context) 616 */ 617 static void use_mm(struct mm_struct *mm) 618 { 619 struct mm_struct *active_mm; 620 struct task_struct *tsk = current; 621 622 task_lock(tsk); 623 active_mm = tsk->active_mm; 624 atomic_inc(&mm->mm_count); 625 tsk->mm = mm; 626 tsk->active_mm = mm; 627 switch_mm(active_mm, mm, tsk); 628 task_unlock(tsk); 629 630 mmdrop(active_mm); 631 } 632 633 /* 634 * unuse_mm 635 * Reverses the effect of use_mm, i.e. releases the 636 * specified mm context which was earlier taken on 637 * by the calling kernel thread 638 * (Note: this routine is intended to be called only 639 * from a kernel thread context) 640 */ 641 static void unuse_mm(struct mm_struct *mm) 642 { 643 struct task_struct *tsk = current; 644 645 task_lock(tsk); 646 tsk->mm = NULL; 647 /* active_mm is still 'mm' */ 648 enter_lazy_tlb(mm, tsk); 649 task_unlock(tsk); 650 } 651 652 /* 653 * Queue up a kiocb to be retried. Assumes that the kiocb 654 * has already been marked as kicked, and places it on 655 * the retry run list for the corresponding ioctx, if it 656 * isn't already queued. Returns 1 if it actually queued 657 * the kiocb (to tell the caller to activate the work 658 * queue to process it), or 0, if it found that it was 659 * already queued. 660 */ 661 static inline int __queue_kicked_iocb(struct kiocb *iocb) 662 { 663 struct kioctx *ctx = iocb->ki_ctx; 664 665 assert_spin_locked(&ctx->ctx_lock); 666 667 if (list_empty(&iocb->ki_run_list)) { 668 list_add_tail(&iocb->ki_run_list, 669 &ctx->run_list); 670 return 1; 671 } 672 return 0; 673 } 674 675 /* aio_run_iocb 676 * This is the core aio execution routine. It is 677 * invoked both for initial i/o submission and 678 * subsequent retries via the aio_kick_handler. 679 * Expects to be invoked with iocb->ki_ctx->lock 680 * already held. The lock is released and reacquired 681 * as needed during processing. 682 * 683 * Calls the iocb retry method (already setup for the 684 * iocb on initial submission) for operation specific 685 * handling, but takes care of most of common retry 686 * execution details for a given iocb. The retry method 687 * needs to be non-blocking as far as possible, to avoid 688 * holding up other iocbs waiting to be serviced by the 689 * retry kernel thread. 690 * 691 * The trickier parts in this code have to do with 692 * ensuring that only one retry instance is in progress 693 * for a given iocb at any time. Providing that guarantee 694 * simplifies the coding of individual aio operations as 695 * it avoids various potential races. 696 */ 697 static ssize_t aio_run_iocb(struct kiocb *iocb) 698 { 699 struct kioctx *ctx = iocb->ki_ctx; 700 ssize_t (*retry)(struct kiocb *); 701 ssize_t ret; 702 703 if (!(retry = iocb->ki_retry)) { 704 printk("aio_run_iocb: iocb->ki_retry = NULL\n"); 705 return 0; 706 } 707 708 /* 709 * We don't want the next retry iteration for this 710 * operation to start until this one has returned and 711 * updated the iocb state. However, wait_queue functions 712 * can trigger a kick_iocb from interrupt context in the 713 * meantime, indicating that data is available for the next 714 * iteration. We want to remember that and enable the 715 * next retry iteration _after_ we are through with 716 * this one. 717 * 718 * So, in order to be able to register a "kick", but 719 * prevent it from being queued now, we clear the kick 720 * flag, but make the kick code *think* that the iocb is 721 * still on the run list until we are actually done. 722 * When we are done with this iteration, we check if 723 * the iocb was kicked in the meantime and if so, queue 724 * it up afresh. 725 */ 726 727 kiocbClearKicked(iocb); 728 729 /* 730 * This is so that aio_complete knows it doesn't need to 731 * pull the iocb off the run list (We can't just call 732 * INIT_LIST_HEAD because we don't want a kick_iocb to 733 * queue this on the run list yet) 734 */ 735 iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL; 736 spin_unlock_irq(&ctx->ctx_lock); 737 738 /* Quit retrying if the i/o has been cancelled */ 739 if (kiocbIsCancelled(iocb)) { 740 ret = -EINTR; 741 aio_complete(iocb, ret, 0); 742 /* must not access the iocb after this */ 743 goto out; 744 } 745 746 /* 747 * Now we are all set to call the retry method in async 748 * context. 749 */ 750 ret = retry(iocb); 751 752 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { 753 BUG_ON(!list_empty(&iocb->ki_wait.task_list)); 754 aio_complete(iocb, ret, 0); 755 } 756 out: 757 spin_lock_irq(&ctx->ctx_lock); 758 759 if (-EIOCBRETRY == ret) { 760 /* 761 * OK, now that we are done with this iteration 762 * and know that there is more left to go, 763 * this is where we let go so that a subsequent 764 * "kick" can start the next iteration 765 */ 766 767 /* will make __queue_kicked_iocb succeed from here on */ 768 INIT_LIST_HEAD(&iocb->ki_run_list); 769 /* we must queue the next iteration ourselves, if it 770 * has already been kicked */ 771 if (kiocbIsKicked(iocb)) { 772 __queue_kicked_iocb(iocb); 773 774 /* 775 * __queue_kicked_iocb will always return 1 here, because 776 * iocb->ki_run_list is empty at this point so it should 777 * be safe to unconditionally queue the context into the 778 * work queue. 779 */ 780 aio_queue_work(ctx); 781 } 782 } 783 return ret; 784 } 785 786 /* 787 * __aio_run_iocbs: 788 * Process all pending retries queued on the ioctx 789 * run list. 790 * Assumes it is operating within the aio issuer's mm 791 * context. 792 */ 793 static int __aio_run_iocbs(struct kioctx *ctx) 794 { 795 struct kiocb *iocb; 796 struct list_head run_list; 797 798 assert_spin_locked(&ctx->ctx_lock); 799 800 list_replace_init(&ctx->run_list, &run_list); 801 while (!list_empty(&run_list)) { 802 iocb = list_entry(run_list.next, struct kiocb, 803 ki_run_list); 804 list_del(&iocb->ki_run_list); 805 /* 806 * Hold an extra reference while retrying i/o. 807 */ 808 iocb->ki_users++; /* grab extra reference */ 809 aio_run_iocb(iocb); 810 __aio_put_req(ctx, iocb); 811 } 812 if (!list_empty(&ctx->run_list)) 813 return 1; 814 return 0; 815 } 816 817 static void aio_queue_work(struct kioctx * ctx) 818 { 819 unsigned long timeout; 820 /* 821 * if someone is waiting, get the work started right 822 * away, otherwise, use a longer delay 823 */ 824 smp_mb(); 825 if (waitqueue_active(&ctx->wait)) 826 timeout = 1; 827 else 828 timeout = HZ/10; 829 queue_delayed_work(aio_wq, &ctx->wq, timeout); 830 } 831 832 833 /* 834 * aio_run_iocbs: 835 * Process all pending retries queued on the ioctx 836 * run list. 837 * Assumes it is operating within the aio issuer's mm 838 * context. 839 */ 840 static inline void aio_run_iocbs(struct kioctx *ctx) 841 { 842 int requeue; 843 844 spin_lock_irq(&ctx->ctx_lock); 845 846 requeue = __aio_run_iocbs(ctx); 847 spin_unlock_irq(&ctx->ctx_lock); 848 if (requeue) 849 aio_queue_work(ctx); 850 } 851 852 /* 853 * just like aio_run_iocbs, but keeps running them until 854 * the list stays empty 855 */ 856 static inline void aio_run_all_iocbs(struct kioctx *ctx) 857 { 858 spin_lock_irq(&ctx->ctx_lock); 859 while (__aio_run_iocbs(ctx)) 860 ; 861 spin_unlock_irq(&ctx->ctx_lock); 862 } 863 864 /* 865 * aio_kick_handler: 866 * Work queue handler triggered to process pending 867 * retries on an ioctx. Takes on the aio issuer's 868 * mm context before running the iocbs, so that 869 * copy_xxx_user operates on the issuer's address 870 * space. 871 * Run on aiod's context. 872 */ 873 static void aio_kick_handler(struct work_struct *work) 874 { 875 struct kioctx *ctx = container_of(work, struct kioctx, wq.work); 876 mm_segment_t oldfs = get_fs(); 877 struct mm_struct *mm; 878 int requeue; 879 880 set_fs(USER_DS); 881 use_mm(ctx->mm); 882 spin_lock_irq(&ctx->ctx_lock); 883 requeue =__aio_run_iocbs(ctx); 884 mm = ctx->mm; 885 spin_unlock_irq(&ctx->ctx_lock); 886 unuse_mm(mm); 887 set_fs(oldfs); 888 /* 889 * we're in a worker thread already, don't use queue_delayed_work, 890 */ 891 if (requeue) 892 queue_delayed_work(aio_wq, &ctx->wq, 0); 893 } 894 895 896 /* 897 * Called by kick_iocb to queue the kiocb for retry 898 * and if required activate the aio work queue to process 899 * it 900 */ 901 static void try_queue_kicked_iocb(struct kiocb *iocb) 902 { 903 struct kioctx *ctx = iocb->ki_ctx; 904 unsigned long flags; 905 int run = 0; 906 907 /* We're supposed to be the only path putting the iocb back on the run 908 * list. If we find that the iocb is *back* on a wait queue already 909 * than retry has happened before we could queue the iocb. This also 910 * means that the retry could have completed and freed our iocb, no 911 * good. */ 912 BUG_ON((!list_empty(&iocb->ki_wait.task_list))); 913 914 spin_lock_irqsave(&ctx->ctx_lock, flags); 915 /* set this inside the lock so that we can't race with aio_run_iocb() 916 * testing it and putting the iocb on the run list under the lock */ 917 if (!kiocbTryKick(iocb)) 918 run = __queue_kicked_iocb(iocb); 919 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 920 if (run) 921 aio_queue_work(ctx); 922 } 923 924 /* 925 * kick_iocb: 926 * Called typically from a wait queue callback context 927 * (aio_wake_function) to trigger a retry of the iocb. 928 * The retry is usually executed by aio workqueue 929 * threads (See aio_kick_handler). 930 */ 931 void kick_iocb(struct kiocb *iocb) 932 { 933 /* sync iocbs are easy: they can only ever be executing from a 934 * single context. */ 935 if (is_sync_kiocb(iocb)) { 936 kiocbSetKicked(iocb); 937 wake_up_process(iocb->ki_obj.tsk); 938 return; 939 } 940 941 try_queue_kicked_iocb(iocb); 942 } 943 EXPORT_SYMBOL(kick_iocb); 944 945 /* aio_complete 946 * Called when the io request on the given iocb is complete. 947 * Returns true if this is the last user of the request. The 948 * only other user of the request can be the cancellation code. 949 */ 950 int aio_complete(struct kiocb *iocb, long res, long res2) 951 { 952 struct kioctx *ctx = iocb->ki_ctx; 953 struct aio_ring_info *info; 954 struct aio_ring *ring; 955 struct io_event *event; 956 unsigned long flags; 957 unsigned long tail; 958 int ret; 959 960 /* 961 * Special case handling for sync iocbs: 962 * - events go directly into the iocb for fast handling 963 * - the sync task with the iocb in its stack holds the single iocb 964 * ref, no other paths have a way to get another ref 965 * - the sync task helpfully left a reference to itself in the iocb 966 */ 967 if (is_sync_kiocb(iocb)) { 968 BUG_ON(iocb->ki_users != 1); 969 iocb->ki_user_data = res; 970 iocb->ki_users = 0; 971 wake_up_process(iocb->ki_obj.tsk); 972 return 1; 973 } 974 975 info = &ctx->ring_info; 976 977 /* add a completion event to the ring buffer. 978 * must be done holding ctx->ctx_lock to prevent 979 * other code from messing with the tail 980 * pointer since we might be called from irq 981 * context. 982 */ 983 spin_lock_irqsave(&ctx->ctx_lock, flags); 984 985 if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list)) 986 list_del_init(&iocb->ki_run_list); 987 988 /* 989 * cancelled requests don't get events, userland was given one 990 * when the event got cancelled. 991 */ 992 if (kiocbIsCancelled(iocb)) 993 goto put_rq; 994 995 ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); 996 997 tail = info->tail; 998 event = aio_ring_event(info, tail, KM_IRQ0); 999 if (++tail >= info->nr) 1000 tail = 0; 1001 1002 event->obj = (u64)(unsigned long)iocb->ki_obj.user; 1003 event->data = iocb->ki_user_data; 1004 event->res = res; 1005 event->res2 = res2; 1006 1007 dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", 1008 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, 1009 res, res2); 1010 1011 /* after flagging the request as done, we 1012 * must never even look at it again 1013 */ 1014 smp_wmb(); /* make event visible before updating tail */ 1015 1016 info->tail = tail; 1017 ring->tail = tail; 1018 1019 put_aio_ring_event(event, KM_IRQ0); 1020 kunmap_atomic(ring, KM_IRQ1); 1021 1022 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 1023 1024 /* 1025 * Check if the user asked us to deliver the result through an 1026 * eventfd. The eventfd_signal() function is safe to be called 1027 * from IRQ context. 1028 */ 1029 if (iocb->ki_eventfd != NULL) 1030 eventfd_signal(iocb->ki_eventfd, 1); 1031 1032 put_rq: 1033 /* everything turned out well, dispose of the aiocb. */ 1034 ret = __aio_put_req(ctx, iocb); 1035 1036 /* 1037 * We have to order our ring_info tail store above and test 1038 * of the wait list below outside the wait lock. This is 1039 * like in wake_up_bit() where clearing a bit has to be 1040 * ordered with the unlocked test. 1041 */ 1042 smp_mb(); 1043 1044 if (waitqueue_active(&ctx->wait)) 1045 wake_up(&ctx->wait); 1046 1047 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1048 return ret; 1049 } 1050 1051 /* aio_read_evt 1052 * Pull an event off of the ioctx's event ring. Returns the number of 1053 * events fetched (0 or 1 ;-) 1054 * FIXME: make this use cmpxchg. 1055 * TODO: make the ringbuffer user mmap()able (requires FIXME). 1056 */ 1057 static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) 1058 { 1059 struct aio_ring_info *info = &ioctx->ring_info; 1060 struct aio_ring *ring; 1061 unsigned long head; 1062 int ret = 0; 1063 1064 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 1065 dprintk("in aio_read_evt h%lu t%lu m%lu\n", 1066 (unsigned long)ring->head, (unsigned long)ring->tail, 1067 (unsigned long)ring->nr); 1068 1069 if (ring->head == ring->tail) 1070 goto out; 1071 1072 spin_lock(&info->ring_lock); 1073 1074 head = ring->head % info->nr; 1075 if (head != ring->tail) { 1076 struct io_event *evp = aio_ring_event(info, head, KM_USER1); 1077 *ent = *evp; 1078 head = (head + 1) % info->nr; 1079 smp_mb(); /* finish reading the event before updatng the head */ 1080 ring->head = head; 1081 ret = 1; 1082 put_aio_ring_event(evp, KM_USER1); 1083 } 1084 spin_unlock(&info->ring_lock); 1085 1086 out: 1087 kunmap_atomic(ring, KM_USER0); 1088 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, 1089 (unsigned long)ring->head, (unsigned long)ring->tail); 1090 return ret; 1091 } 1092 1093 struct aio_timeout { 1094 struct timer_list timer; 1095 int timed_out; 1096 struct task_struct *p; 1097 }; 1098 1099 static void timeout_func(unsigned long data) 1100 { 1101 struct aio_timeout *to = (struct aio_timeout *)data; 1102 1103 to->timed_out = 1; 1104 wake_up_process(to->p); 1105 } 1106 1107 static inline void init_timeout(struct aio_timeout *to) 1108 { 1109 setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to); 1110 to->timed_out = 0; 1111 to->p = current; 1112 } 1113 1114 static inline void set_timeout(long start_jiffies, struct aio_timeout *to, 1115 const struct timespec *ts) 1116 { 1117 to->timer.expires = start_jiffies + timespec_to_jiffies(ts); 1118 if (time_after(to->timer.expires, jiffies)) 1119 add_timer(&to->timer); 1120 else 1121 to->timed_out = 1; 1122 } 1123 1124 static inline void clear_timeout(struct aio_timeout *to) 1125 { 1126 del_singleshot_timer_sync(&to->timer); 1127 } 1128 1129 static int read_events(struct kioctx *ctx, 1130 long min_nr, long nr, 1131 struct io_event __user *event, 1132 struct timespec __user *timeout) 1133 { 1134 long start_jiffies = jiffies; 1135 struct task_struct *tsk = current; 1136 DECLARE_WAITQUEUE(wait, tsk); 1137 int ret; 1138 int i = 0; 1139 struct io_event ent; 1140 struct aio_timeout to; 1141 int retry = 0; 1142 1143 /* needed to zero any padding within an entry (there shouldn't be 1144 * any, but C is fun! 1145 */ 1146 memset(&ent, 0, sizeof(ent)); 1147 retry: 1148 ret = 0; 1149 while (likely(i < nr)) { 1150 ret = aio_read_evt(ctx, &ent); 1151 if (unlikely(ret <= 0)) 1152 break; 1153 1154 dprintk("read event: %Lx %Lx %Lx %Lx\n", 1155 ent.data, ent.obj, ent.res, ent.res2); 1156 1157 /* Could we split the check in two? */ 1158 ret = -EFAULT; 1159 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 1160 dprintk("aio: lost an event due to EFAULT.\n"); 1161 break; 1162 } 1163 ret = 0; 1164 1165 /* Good, event copied to userland, update counts. */ 1166 event ++; 1167 i ++; 1168 } 1169 1170 if (min_nr <= i) 1171 return i; 1172 if (ret) 1173 return ret; 1174 1175 /* End fast path */ 1176 1177 /* racey check, but it gets redone */ 1178 if (!retry && unlikely(!list_empty(&ctx->run_list))) { 1179 retry = 1; 1180 aio_run_all_iocbs(ctx); 1181 goto retry; 1182 } 1183 1184 init_timeout(&to); 1185 if (timeout) { 1186 struct timespec ts; 1187 ret = -EFAULT; 1188 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) 1189 goto out; 1190 1191 set_timeout(start_jiffies, &to, &ts); 1192 } 1193 1194 while (likely(i < nr)) { 1195 add_wait_queue_exclusive(&ctx->wait, &wait); 1196 do { 1197 set_task_state(tsk, TASK_INTERRUPTIBLE); 1198 ret = aio_read_evt(ctx, &ent); 1199 if (ret) 1200 break; 1201 if (min_nr <= i) 1202 break; 1203 if (unlikely(ctx->dead)) { 1204 ret = -EINVAL; 1205 break; 1206 } 1207 if (to.timed_out) /* Only check after read evt */ 1208 break; 1209 /* Try to only show up in io wait if there are ops 1210 * in flight */ 1211 if (ctx->reqs_active) 1212 io_schedule(); 1213 else 1214 schedule(); 1215 if (signal_pending(tsk)) { 1216 ret = -EINTR; 1217 break; 1218 } 1219 /*ret = aio_read_evt(ctx, &ent);*/ 1220 } while (1) ; 1221 1222 set_task_state(tsk, TASK_RUNNING); 1223 remove_wait_queue(&ctx->wait, &wait); 1224 1225 if (unlikely(ret <= 0)) 1226 break; 1227 1228 ret = -EFAULT; 1229 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 1230 dprintk("aio: lost an event due to EFAULT.\n"); 1231 break; 1232 } 1233 1234 /* Good, event copied to userland, update counts. */ 1235 event ++; 1236 i ++; 1237 } 1238 1239 if (timeout) 1240 clear_timeout(&to); 1241 out: 1242 destroy_timer_on_stack(&to.timer); 1243 return i ? i : ret; 1244 } 1245 1246 /* Take an ioctx and remove it from the list of ioctx's. Protects 1247 * against races with itself via ->dead. 1248 */ 1249 static void io_destroy(struct kioctx *ioctx) 1250 { 1251 struct mm_struct *mm = current->mm; 1252 int was_dead; 1253 1254 /* delete the entry from the list is someone else hasn't already */ 1255 spin_lock(&mm->ioctx_lock); 1256 was_dead = ioctx->dead; 1257 ioctx->dead = 1; 1258 hlist_del_rcu(&ioctx->list); 1259 spin_unlock(&mm->ioctx_lock); 1260 1261 dprintk("aio_release(%p)\n", ioctx); 1262 if (likely(!was_dead)) 1263 put_ioctx(ioctx); /* twice for the list */ 1264 1265 aio_cancel_all(ioctx); 1266 wait_for_all_aios(ioctx); 1267 1268 /* 1269 * Wake up any waiters. The setting of ctx->dead must be seen 1270 * by other CPUs at this point. Right now, we rely on the 1271 * locking done by the above calls to ensure this consistency. 1272 */ 1273 wake_up(&ioctx->wait); 1274 put_ioctx(ioctx); /* once for the lookup */ 1275 } 1276 1277 /* sys_io_setup: 1278 * Create an aio_context capable of receiving at least nr_events. 1279 * ctxp must not point to an aio_context that already exists, and 1280 * must be initialized to 0 prior to the call. On successful 1281 * creation of the aio_context, *ctxp is filled in with the resulting 1282 * handle. May fail with -EINVAL if *ctxp is not initialized, 1283 * if the specified nr_events exceeds internal limits. May fail 1284 * with -EAGAIN if the specified nr_events exceeds the user's limit 1285 * of available events. May fail with -ENOMEM if insufficient kernel 1286 * resources are available. May fail with -EFAULT if an invalid 1287 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1288 * implemented. 1289 */ 1290 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1291 { 1292 struct kioctx *ioctx = NULL; 1293 unsigned long ctx; 1294 long ret; 1295 1296 ret = get_user(ctx, ctxp); 1297 if (unlikely(ret)) 1298 goto out; 1299 1300 ret = -EINVAL; 1301 if (unlikely(ctx || nr_events == 0)) { 1302 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", 1303 ctx, nr_events); 1304 goto out; 1305 } 1306 1307 ioctx = ioctx_alloc(nr_events); 1308 ret = PTR_ERR(ioctx); 1309 if (!IS_ERR(ioctx)) { 1310 ret = put_user(ioctx->user_id, ctxp); 1311 if (!ret) 1312 return 0; 1313 1314 get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ 1315 io_destroy(ioctx); 1316 } 1317 1318 out: 1319 return ret; 1320 } 1321 1322 /* sys_io_destroy: 1323 * Destroy the aio_context specified. May cancel any outstanding 1324 * AIOs and block on completion. Will fail with -ENOSYS if not 1325 * implemented. May fail with -EFAULT if the context pointed to 1326 * is invalid. 1327 */ 1328 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1329 { 1330 struct kioctx *ioctx = lookup_ioctx(ctx); 1331 if (likely(NULL != ioctx)) { 1332 io_destroy(ioctx); 1333 return 0; 1334 } 1335 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1336 return -EINVAL; 1337 } 1338 1339 static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) 1340 { 1341 struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; 1342 1343 BUG_ON(ret <= 0); 1344 1345 while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { 1346 ssize_t this = min((ssize_t)iov->iov_len, ret); 1347 iov->iov_base += this; 1348 iov->iov_len -= this; 1349 iocb->ki_left -= this; 1350 ret -= this; 1351 if (iov->iov_len == 0) { 1352 iocb->ki_cur_seg++; 1353 iov++; 1354 } 1355 } 1356 1357 /* the caller should not have done more io than what fit in 1358 * the remaining iovecs */ 1359 BUG_ON(ret > 0 && iocb->ki_left == 0); 1360 } 1361 1362 static ssize_t aio_rw_vect_retry(struct kiocb *iocb) 1363 { 1364 struct file *file = iocb->ki_filp; 1365 struct address_space *mapping = file->f_mapping; 1366 struct inode *inode = mapping->host; 1367 ssize_t (*rw_op)(struct kiocb *, const struct iovec *, 1368 unsigned long, loff_t); 1369 ssize_t ret = 0; 1370 unsigned short opcode; 1371 1372 if ((iocb->ki_opcode == IOCB_CMD_PREADV) || 1373 (iocb->ki_opcode == IOCB_CMD_PREAD)) { 1374 rw_op = file->f_op->aio_read; 1375 opcode = IOCB_CMD_PREADV; 1376 } else { 1377 rw_op = file->f_op->aio_write; 1378 opcode = IOCB_CMD_PWRITEV; 1379 } 1380 1381 /* This matches the pread()/pwrite() logic */ 1382 if (iocb->ki_pos < 0) 1383 return -EINVAL; 1384 1385 do { 1386 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], 1387 iocb->ki_nr_segs - iocb->ki_cur_seg, 1388 iocb->ki_pos); 1389 if (ret > 0) 1390 aio_advance_iovec(iocb, ret); 1391 1392 /* retry all partial writes. retry partial reads as long as its a 1393 * regular file. */ 1394 } while (ret > 0 && iocb->ki_left > 0 && 1395 (opcode == IOCB_CMD_PWRITEV || 1396 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); 1397 1398 /* This means we must have transferred all that we could */ 1399 /* No need to retry anymore */ 1400 if ((ret == 0) || (iocb->ki_left == 0)) 1401 ret = iocb->ki_nbytes - iocb->ki_left; 1402 1403 /* If we managed to write some out we return that, rather than 1404 * the eventual error. */ 1405 if (opcode == IOCB_CMD_PWRITEV 1406 && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY 1407 && iocb->ki_nbytes - iocb->ki_left) 1408 ret = iocb->ki_nbytes - iocb->ki_left; 1409 1410 return ret; 1411 } 1412 1413 static ssize_t aio_fdsync(struct kiocb *iocb) 1414 { 1415 struct file *file = iocb->ki_filp; 1416 ssize_t ret = -EINVAL; 1417 1418 if (file->f_op->aio_fsync) 1419 ret = file->f_op->aio_fsync(iocb, 1); 1420 return ret; 1421 } 1422 1423 static ssize_t aio_fsync(struct kiocb *iocb) 1424 { 1425 struct file *file = iocb->ki_filp; 1426 ssize_t ret = -EINVAL; 1427 1428 if (file->f_op->aio_fsync) 1429 ret = file->f_op->aio_fsync(iocb, 0); 1430 return ret; 1431 } 1432 1433 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb) 1434 { 1435 ssize_t ret; 1436 1437 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf, 1438 kiocb->ki_nbytes, 1, 1439 &kiocb->ki_inline_vec, &kiocb->ki_iovec); 1440 if (ret < 0) 1441 goto out; 1442 1443 kiocb->ki_nr_segs = kiocb->ki_nbytes; 1444 kiocb->ki_cur_seg = 0; 1445 /* ki_nbytes/left now reflect bytes instead of segs */ 1446 kiocb->ki_nbytes = ret; 1447 kiocb->ki_left = ret; 1448 1449 ret = 0; 1450 out: 1451 return ret; 1452 } 1453 1454 static ssize_t aio_setup_single_vector(struct kiocb *kiocb) 1455 { 1456 kiocb->ki_iovec = &kiocb->ki_inline_vec; 1457 kiocb->ki_iovec->iov_base = kiocb->ki_buf; 1458 kiocb->ki_iovec->iov_len = kiocb->ki_left; 1459 kiocb->ki_nr_segs = 1; 1460 kiocb->ki_cur_seg = 0; 1461 return 0; 1462 } 1463 1464 /* 1465 * aio_setup_iocb: 1466 * Performs the initial checks and aio retry method 1467 * setup for the kiocb at the time of io submission. 1468 */ 1469 static ssize_t aio_setup_iocb(struct kiocb *kiocb) 1470 { 1471 struct file *file = kiocb->ki_filp; 1472 ssize_t ret = 0; 1473 1474 switch (kiocb->ki_opcode) { 1475 case IOCB_CMD_PREAD: 1476 ret = -EBADF; 1477 if (unlikely(!(file->f_mode & FMODE_READ))) 1478 break; 1479 ret = -EFAULT; 1480 if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, 1481 kiocb->ki_left))) 1482 break; 1483 ret = security_file_permission(file, MAY_READ); 1484 if (unlikely(ret)) 1485 break; 1486 ret = aio_setup_single_vector(kiocb); 1487 if (ret) 1488 break; 1489 ret = -EINVAL; 1490 if (file->f_op->aio_read) 1491 kiocb->ki_retry = aio_rw_vect_retry; 1492 break; 1493 case IOCB_CMD_PWRITE: 1494 ret = -EBADF; 1495 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1496 break; 1497 ret = -EFAULT; 1498 if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, 1499 kiocb->ki_left))) 1500 break; 1501 ret = security_file_permission(file, MAY_WRITE); 1502 if (unlikely(ret)) 1503 break; 1504 ret = aio_setup_single_vector(kiocb); 1505 if (ret) 1506 break; 1507 ret = -EINVAL; 1508 if (file->f_op->aio_write) 1509 kiocb->ki_retry = aio_rw_vect_retry; 1510 break; 1511 case IOCB_CMD_PREADV: 1512 ret = -EBADF; 1513 if (unlikely(!(file->f_mode & FMODE_READ))) 1514 break; 1515 ret = security_file_permission(file, MAY_READ); 1516 if (unlikely(ret)) 1517 break; 1518 ret = aio_setup_vectored_rw(READ, kiocb); 1519 if (ret) 1520 break; 1521 ret = -EINVAL; 1522 if (file->f_op->aio_read) 1523 kiocb->ki_retry = aio_rw_vect_retry; 1524 break; 1525 case IOCB_CMD_PWRITEV: 1526 ret = -EBADF; 1527 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1528 break; 1529 ret = security_file_permission(file, MAY_WRITE); 1530 if (unlikely(ret)) 1531 break; 1532 ret = aio_setup_vectored_rw(WRITE, kiocb); 1533 if (ret) 1534 break; 1535 ret = -EINVAL; 1536 if (file->f_op->aio_write) 1537 kiocb->ki_retry = aio_rw_vect_retry; 1538 break; 1539 case IOCB_CMD_FDSYNC: 1540 ret = -EINVAL; 1541 if (file->f_op->aio_fsync) 1542 kiocb->ki_retry = aio_fdsync; 1543 break; 1544 case IOCB_CMD_FSYNC: 1545 ret = -EINVAL; 1546 if (file->f_op->aio_fsync) 1547 kiocb->ki_retry = aio_fsync; 1548 break; 1549 default: 1550 dprintk("EINVAL: io_submit: no operation provided\n"); 1551 ret = -EINVAL; 1552 } 1553 1554 if (!kiocb->ki_retry) 1555 return ret; 1556 1557 return 0; 1558 } 1559 1560 /* 1561 * aio_wake_function: 1562 * wait queue callback function for aio notification, 1563 * Simply triggers a retry of the operation via kick_iocb. 1564 * 1565 * This callback is specified in the wait queue entry in 1566 * a kiocb. 1567 * 1568 * Note: 1569 * This routine is executed with the wait queue lock held. 1570 * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests 1571 * the ioctx lock inside the wait queue lock. This is safe 1572 * because this callback isn't used for wait queues which 1573 * are nested inside ioctx lock (i.e. ctx->wait) 1574 */ 1575 static int aio_wake_function(wait_queue_t *wait, unsigned mode, 1576 int sync, void *key) 1577 { 1578 struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait); 1579 1580 list_del_init(&wait->task_list); 1581 kick_iocb(iocb); 1582 return 1; 1583 } 1584 1585 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1586 struct iocb *iocb) 1587 { 1588 struct kiocb *req; 1589 struct file *file; 1590 ssize_t ret; 1591 1592 /* enforce forwards compatibility on users */ 1593 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { 1594 pr_debug("EINVAL: io_submit: reserve field set\n"); 1595 return -EINVAL; 1596 } 1597 1598 /* prevent overflows */ 1599 if (unlikely( 1600 (iocb->aio_buf != (unsigned long)iocb->aio_buf) || 1601 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1602 ((ssize_t)iocb->aio_nbytes < 0) 1603 )) { 1604 pr_debug("EINVAL: io_submit: overflow check\n"); 1605 return -EINVAL; 1606 } 1607 1608 file = fget(iocb->aio_fildes); 1609 if (unlikely(!file)) 1610 return -EBADF; 1611 1612 req = aio_get_req(ctx); /* returns with 2 references to req */ 1613 if (unlikely(!req)) { 1614 fput(file); 1615 return -EAGAIN; 1616 } 1617 req->ki_filp = file; 1618 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1619 /* 1620 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1621 * instance of the file* now. The file descriptor must be 1622 * an eventfd() fd, and will be signaled for each completed 1623 * event using the eventfd_signal() function. 1624 */ 1625 req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); 1626 if (IS_ERR(req->ki_eventfd)) { 1627 ret = PTR_ERR(req->ki_eventfd); 1628 req->ki_eventfd = NULL; 1629 goto out_put_req; 1630 } 1631 } 1632 1633 ret = put_user(req->ki_key, &user_iocb->aio_key); 1634 if (unlikely(ret)) { 1635 dprintk("EFAULT: aio_key\n"); 1636 goto out_put_req; 1637 } 1638 1639 req->ki_obj.user = user_iocb; 1640 req->ki_user_data = iocb->aio_data; 1641 req->ki_pos = iocb->aio_offset; 1642 1643 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; 1644 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; 1645 req->ki_opcode = iocb->aio_lio_opcode; 1646 init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); 1647 INIT_LIST_HEAD(&req->ki_wait.task_list); 1648 1649 ret = aio_setup_iocb(req); 1650 1651 if (ret) 1652 goto out_put_req; 1653 1654 spin_lock_irq(&ctx->ctx_lock); 1655 aio_run_iocb(req); 1656 if (!list_empty(&ctx->run_list)) { 1657 /* drain the run list */ 1658 while (__aio_run_iocbs(ctx)) 1659 ; 1660 } 1661 spin_unlock_irq(&ctx->ctx_lock); 1662 aio_put_req(req); /* drop extra ref to req */ 1663 return 0; 1664 1665 out_put_req: 1666 aio_put_req(req); /* drop extra ref to req */ 1667 aio_put_req(req); /* drop i/o ref to req */ 1668 return ret; 1669 } 1670 1671 /* sys_io_submit: 1672 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 1673 * the number of iocbs queued. May return -EINVAL if the aio_context 1674 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 1675 * *iocbpp[0] is not properly initialized, if the operation specified 1676 * is invalid for the file descriptor in the iocb. May fail with 1677 * -EFAULT if any of the data structures point to invalid data. May 1678 * fail with -EBADF if the file descriptor specified in the first 1679 * iocb is invalid. May fail with -EAGAIN if insufficient resources 1680 * are available to queue any iocbs. Will return 0 if nr is 0. Will 1681 * fail with -ENOSYS if not implemented. 1682 */ 1683 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 1684 struct iocb __user * __user *, iocbpp) 1685 { 1686 struct kioctx *ctx; 1687 long ret = 0; 1688 int i; 1689 1690 if (unlikely(nr < 0)) 1691 return -EINVAL; 1692 1693 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) 1694 return -EFAULT; 1695 1696 ctx = lookup_ioctx(ctx_id); 1697 if (unlikely(!ctx)) { 1698 pr_debug("EINVAL: io_submit: invalid context id\n"); 1699 return -EINVAL; 1700 } 1701 1702 /* 1703 * AKPM: should this return a partial result if some of the IOs were 1704 * successfully submitted? 1705 */ 1706 for (i=0; i<nr; i++) { 1707 struct iocb __user *user_iocb; 1708 struct iocb tmp; 1709 1710 if (unlikely(__get_user(user_iocb, iocbpp + i))) { 1711 ret = -EFAULT; 1712 break; 1713 } 1714 1715 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { 1716 ret = -EFAULT; 1717 break; 1718 } 1719 1720 ret = io_submit_one(ctx, user_iocb, &tmp); 1721 if (ret) 1722 break; 1723 } 1724 1725 put_ioctx(ctx); 1726 return i ? i : ret; 1727 } 1728 1729 /* lookup_kiocb 1730 * Finds a given iocb for cancellation. 1731 */ 1732 static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, 1733 u32 key) 1734 { 1735 struct list_head *pos; 1736 1737 assert_spin_locked(&ctx->ctx_lock); 1738 1739 /* TODO: use a hash or array, this sucks. */ 1740 list_for_each(pos, &ctx->active_reqs) { 1741 struct kiocb *kiocb = list_kiocb(pos); 1742 if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) 1743 return kiocb; 1744 } 1745 return NULL; 1746 } 1747 1748 /* sys_io_cancel: 1749 * Attempts to cancel an iocb previously passed to io_submit. If 1750 * the operation is successfully cancelled, the resulting event is 1751 * copied into the memory pointed to by result without being placed 1752 * into the completion queue and 0 is returned. May fail with 1753 * -EFAULT if any of the data structures pointed to are invalid. 1754 * May fail with -EINVAL if aio_context specified by ctx_id is 1755 * invalid. May fail with -EAGAIN if the iocb specified was not 1756 * cancelled. Will fail with -ENOSYS if not implemented. 1757 */ 1758 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 1759 struct io_event __user *, result) 1760 { 1761 int (*cancel)(struct kiocb *iocb, struct io_event *res); 1762 struct kioctx *ctx; 1763 struct kiocb *kiocb; 1764 u32 key; 1765 int ret; 1766 1767 ret = get_user(key, &iocb->aio_key); 1768 if (unlikely(ret)) 1769 return -EFAULT; 1770 1771 ctx = lookup_ioctx(ctx_id); 1772 if (unlikely(!ctx)) 1773 return -EINVAL; 1774 1775 spin_lock_irq(&ctx->ctx_lock); 1776 ret = -EAGAIN; 1777 kiocb = lookup_kiocb(ctx, iocb, key); 1778 if (kiocb && kiocb->ki_cancel) { 1779 cancel = kiocb->ki_cancel; 1780 kiocb->ki_users ++; 1781 kiocbSetCancelled(kiocb); 1782 } else 1783 cancel = NULL; 1784 spin_unlock_irq(&ctx->ctx_lock); 1785 1786 if (NULL != cancel) { 1787 struct io_event tmp; 1788 pr_debug("calling cancel\n"); 1789 memset(&tmp, 0, sizeof(tmp)); 1790 tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; 1791 tmp.data = kiocb->ki_user_data; 1792 ret = cancel(kiocb, &tmp); 1793 if (!ret) { 1794 /* Cancellation succeeded -- copy the result 1795 * into the user's buffer. 1796 */ 1797 if (copy_to_user(result, &tmp, sizeof(tmp))) 1798 ret = -EFAULT; 1799 } 1800 } else 1801 ret = -EINVAL; 1802 1803 put_ioctx(ctx); 1804 1805 return ret; 1806 } 1807 1808 /* io_getevents: 1809 * Attempts to read at least min_nr events and up to nr events from 1810 * the completion queue for the aio_context specified by ctx_id. May 1811 * fail with -EINVAL if ctx_id is invalid, if min_nr is out of range, 1812 * if nr is out of range, if when is out of range. May fail with 1813 * -EFAULT if any of the memory specified to is invalid. May return 1814 * 0 or < min_nr if no events are available and the timeout specified 1815 * by when has elapsed, where when == NULL specifies an infinite 1816 * timeout. Note that the timeout pointed to by when is relative and 1817 * will be updated if not NULL and the operation blocks. Will fail 1818 * with -ENOSYS if not implemented. 1819 */ 1820 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 1821 long, min_nr, 1822 long, nr, 1823 struct io_event __user *, events, 1824 struct timespec __user *, timeout) 1825 { 1826 struct kioctx *ioctx = lookup_ioctx(ctx_id); 1827 long ret = -EINVAL; 1828 1829 if (likely(ioctx)) { 1830 if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0)) 1831 ret = read_events(ioctx, min_nr, nr, events, timeout); 1832 put_ioctx(ioctx); 1833 } 1834 1835 asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout); 1836 return ret; 1837 } 1838 1839 __initcall(aio_setup); 1840 1841 EXPORT_SYMBOL(aio_complete); 1842 EXPORT_SYMBOL(aio_put_req); 1843 EXPORT_SYMBOL(wait_on_sync_kiocb); 1844