1 /* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * 9 * See ../COPYING for licensing terms. 10 */ 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/errno.h> 14 #include <linux/time.h> 15 #include <linux/aio_abi.h> 16 #include <linux/module.h> 17 #include <linux/syscalls.h> 18 #include <linux/backing-dev.h> 19 #include <linux/uio.h> 20 21 #define DEBUG 0 22 23 #include <linux/sched.h> 24 #include <linux/fs.h> 25 #include <linux/file.h> 26 #include <linux/mm.h> 27 #include <linux/mman.h> 28 #include <linux/mmu_context.h> 29 #include <linux/slab.h> 30 #include <linux/timer.h> 31 #include <linux/aio.h> 32 #include <linux/highmem.h> 33 #include <linux/workqueue.h> 34 #include <linux/security.h> 35 #include <linux/eventfd.h> 36 #include <linux/blkdev.h> 37 #include <linux/mempool.h> 38 #include <linux/hash.h> 39 #include <linux/compat.h> 40 41 #include <asm/kmap_types.h> 42 #include <asm/uaccess.h> 43 44 #if DEBUG > 1 45 #define dprintk printk 46 #else 47 #define dprintk(x...) do { ; } while (0) 48 #endif 49 50 /*------ sysctl variables----*/ 51 static DEFINE_SPINLOCK(aio_nr_lock); 52 unsigned long aio_nr; /* current system wide number of aio requests */ 53 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 54 /*----end sysctl variables---*/ 55 56 static struct kmem_cache *kiocb_cachep; 57 static struct kmem_cache *kioctx_cachep; 58 59 static struct workqueue_struct *aio_wq; 60 61 /* Used for rare fput completion. */ 62 static void aio_fput_routine(struct work_struct *); 63 static DECLARE_WORK(fput_work, aio_fput_routine); 64 65 static DEFINE_SPINLOCK(fput_lock); 66 static LIST_HEAD(fput_head); 67 68 #define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */ 69 #define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS) 70 struct aio_batch_entry { 71 struct hlist_node list; 72 struct address_space *mapping; 73 }; 74 mempool_t *abe_pool; 75 76 static void aio_kick_handler(struct work_struct *); 77 static void aio_queue_work(struct kioctx *); 78 79 /* aio_setup 80 * Creates the slab caches used by the aio routines, panic on 81 * failure as this is done early during the boot sequence. 82 */ 83 static int __init aio_setup(void) 84 { 85 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 86 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 87 88 aio_wq = create_workqueue("aio"); 89 abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); 90 BUG_ON(!abe_pool); 91 92 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); 93 94 return 0; 95 } 96 __initcall(aio_setup); 97 98 static void aio_free_ring(struct kioctx *ctx) 99 { 100 struct aio_ring_info *info = &ctx->ring_info; 101 long i; 102 103 for (i=0; i<info->nr_pages; i++) 104 put_page(info->ring_pages[i]); 105 106 if (info->mmap_size) { 107 down_write(&ctx->mm->mmap_sem); 108 do_munmap(ctx->mm, info->mmap_base, info->mmap_size); 109 up_write(&ctx->mm->mmap_sem); 110 } 111 112 if (info->ring_pages && info->ring_pages != info->internal_pages) 113 kfree(info->ring_pages); 114 info->ring_pages = NULL; 115 info->nr = 0; 116 } 117 118 static int aio_setup_ring(struct kioctx *ctx) 119 { 120 struct aio_ring *ring; 121 struct aio_ring_info *info = &ctx->ring_info; 122 unsigned nr_events = ctx->max_reqs; 123 unsigned long size; 124 int nr_pages; 125 126 /* Compensate for the ring buffer's head/tail overlap entry */ 127 nr_events += 2; /* 1 is required, 2 for good luck */ 128 129 size = sizeof(struct aio_ring); 130 size += sizeof(struct io_event) * nr_events; 131 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; 132 133 if (nr_pages < 0) 134 return -EINVAL; 135 136 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); 137 138 info->nr = 0; 139 info->ring_pages = info->internal_pages; 140 if (nr_pages > AIO_RING_PAGES) { 141 info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 142 if (!info->ring_pages) 143 return -ENOMEM; 144 } 145 146 info->mmap_size = nr_pages * PAGE_SIZE; 147 dprintk("attempting mmap of %lu bytes\n", info->mmap_size); 148 down_write(&ctx->mm->mmap_sem); 149 info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 150 PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, 151 0); 152 if (IS_ERR((void *)info->mmap_base)) { 153 up_write(&ctx->mm->mmap_sem); 154 info->mmap_size = 0; 155 aio_free_ring(ctx); 156 return -EAGAIN; 157 } 158 159 dprintk("mmap address: 0x%08lx\n", info->mmap_base); 160 info->nr_pages = get_user_pages(current, ctx->mm, 161 info->mmap_base, nr_pages, 162 1, 0, info->ring_pages, NULL); 163 up_write(&ctx->mm->mmap_sem); 164 165 if (unlikely(info->nr_pages != nr_pages)) { 166 aio_free_ring(ctx); 167 return -EAGAIN; 168 } 169 170 ctx->user_id = info->mmap_base; 171 172 info->nr = nr_events; /* trusted copy */ 173 174 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 175 ring->nr = nr_events; /* user copy */ 176 ring->id = ctx->user_id; 177 ring->head = ring->tail = 0; 178 ring->magic = AIO_RING_MAGIC; 179 ring->compat_features = AIO_RING_COMPAT_FEATURES; 180 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 181 ring->header_length = sizeof(struct aio_ring); 182 kunmap_atomic(ring, KM_USER0); 183 184 return 0; 185 } 186 187 188 /* aio_ring_event: returns a pointer to the event at the given index from 189 * kmap_atomic(, km). Release the pointer with put_aio_ring_event(); 190 */ 191 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 192 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 193 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 194 195 #define aio_ring_event(info, nr, km) ({ \ 196 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ 197 struct io_event *__event; \ 198 __event = kmap_atomic( \ 199 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ 200 __event += pos % AIO_EVENTS_PER_PAGE; \ 201 __event; \ 202 }) 203 204 #define put_aio_ring_event(event, km) do { \ 205 struct io_event *__event = (event); \ 206 (void)__event; \ 207 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ 208 } while(0) 209 210 static void ctx_rcu_free(struct rcu_head *head) 211 { 212 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); 213 unsigned nr_events = ctx->max_reqs; 214 215 kmem_cache_free(kioctx_cachep, ctx); 216 217 if (nr_events) { 218 spin_lock(&aio_nr_lock); 219 BUG_ON(aio_nr - nr_events > aio_nr); 220 aio_nr -= nr_events; 221 spin_unlock(&aio_nr_lock); 222 } 223 } 224 225 /* __put_ioctx 226 * Called when the last user of an aio context has gone away, 227 * and the struct needs to be freed. 228 */ 229 static void __put_ioctx(struct kioctx *ctx) 230 { 231 BUG_ON(ctx->reqs_active); 232 233 cancel_delayed_work(&ctx->wq); 234 cancel_work_sync(&ctx->wq.work); 235 aio_free_ring(ctx); 236 mmdrop(ctx->mm); 237 ctx->mm = NULL; 238 pr_debug("__put_ioctx: freeing %p\n", ctx); 239 call_rcu(&ctx->rcu_head, ctx_rcu_free); 240 } 241 242 #define get_ioctx(kioctx) do { \ 243 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 244 atomic_inc(&(kioctx)->users); \ 245 } while (0) 246 #define put_ioctx(kioctx) do { \ 247 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 248 if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ 249 __put_ioctx(kioctx); \ 250 } while (0) 251 252 /* ioctx_alloc 253 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 254 */ 255 static struct kioctx *ioctx_alloc(unsigned nr_events) 256 { 257 struct mm_struct *mm; 258 struct kioctx *ctx; 259 int did_sync = 0; 260 261 /* Prevent overflows */ 262 if ((nr_events > (0x10000000U / sizeof(struct io_event))) || 263 (nr_events > (0x10000000U / sizeof(struct kiocb)))) { 264 pr_debug("ENOMEM: nr_events too high\n"); 265 return ERR_PTR(-EINVAL); 266 } 267 268 if ((unsigned long)nr_events > aio_max_nr) 269 return ERR_PTR(-EAGAIN); 270 271 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 272 if (!ctx) 273 return ERR_PTR(-ENOMEM); 274 275 ctx->max_reqs = nr_events; 276 mm = ctx->mm = current->mm; 277 atomic_inc(&mm->mm_count); 278 279 atomic_set(&ctx->users, 1); 280 spin_lock_init(&ctx->ctx_lock); 281 spin_lock_init(&ctx->ring_info.ring_lock); 282 init_waitqueue_head(&ctx->wait); 283 284 INIT_LIST_HEAD(&ctx->active_reqs); 285 INIT_LIST_HEAD(&ctx->run_list); 286 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); 287 288 if (aio_setup_ring(ctx) < 0) 289 goto out_freectx; 290 291 /* limit the number of system wide aios */ 292 do { 293 spin_lock_bh(&aio_nr_lock); 294 if (aio_nr + nr_events > aio_max_nr || 295 aio_nr + nr_events < aio_nr) 296 ctx->max_reqs = 0; 297 else 298 aio_nr += ctx->max_reqs; 299 spin_unlock_bh(&aio_nr_lock); 300 if (ctx->max_reqs || did_sync) 301 break; 302 303 /* wait for rcu callbacks to have completed before giving up */ 304 synchronize_rcu(); 305 did_sync = 1; 306 ctx->max_reqs = nr_events; 307 } while (1); 308 309 if (ctx->max_reqs == 0) 310 goto out_cleanup; 311 312 /* now link into global list. */ 313 spin_lock(&mm->ioctx_lock); 314 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); 315 spin_unlock(&mm->ioctx_lock); 316 317 dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 318 ctx, ctx->user_id, current->mm, ctx->ring_info.nr); 319 return ctx; 320 321 out_cleanup: 322 __put_ioctx(ctx); 323 return ERR_PTR(-EAGAIN); 324 325 out_freectx: 326 mmdrop(mm); 327 kmem_cache_free(kioctx_cachep, ctx); 328 ctx = ERR_PTR(-ENOMEM); 329 330 dprintk("aio: error allocating ioctx %p\n", ctx); 331 return ctx; 332 } 333 334 /* aio_cancel_all 335 * Cancels all outstanding aio requests on an aio context. Used 336 * when the processes owning a context have all exited to encourage 337 * the rapid destruction of the kioctx. 338 */ 339 static void aio_cancel_all(struct kioctx *ctx) 340 { 341 int (*cancel)(struct kiocb *, struct io_event *); 342 struct io_event res; 343 spin_lock_irq(&ctx->ctx_lock); 344 ctx->dead = 1; 345 while (!list_empty(&ctx->active_reqs)) { 346 struct list_head *pos = ctx->active_reqs.next; 347 struct kiocb *iocb = list_kiocb(pos); 348 list_del_init(&iocb->ki_list); 349 cancel = iocb->ki_cancel; 350 kiocbSetCancelled(iocb); 351 if (cancel) { 352 iocb->ki_users++; 353 spin_unlock_irq(&ctx->ctx_lock); 354 cancel(iocb, &res); 355 spin_lock_irq(&ctx->ctx_lock); 356 } 357 } 358 spin_unlock_irq(&ctx->ctx_lock); 359 } 360 361 static void wait_for_all_aios(struct kioctx *ctx) 362 { 363 struct task_struct *tsk = current; 364 DECLARE_WAITQUEUE(wait, tsk); 365 366 spin_lock_irq(&ctx->ctx_lock); 367 if (!ctx->reqs_active) 368 goto out; 369 370 add_wait_queue(&ctx->wait, &wait); 371 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 372 while (ctx->reqs_active) { 373 spin_unlock_irq(&ctx->ctx_lock); 374 io_schedule(); 375 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 376 spin_lock_irq(&ctx->ctx_lock); 377 } 378 __set_task_state(tsk, TASK_RUNNING); 379 remove_wait_queue(&ctx->wait, &wait); 380 381 out: 382 spin_unlock_irq(&ctx->ctx_lock); 383 } 384 385 /* wait_on_sync_kiocb: 386 * Waits on the given sync kiocb to complete. 387 */ 388 ssize_t wait_on_sync_kiocb(struct kiocb *iocb) 389 { 390 while (iocb->ki_users) { 391 set_current_state(TASK_UNINTERRUPTIBLE); 392 if (!iocb->ki_users) 393 break; 394 io_schedule(); 395 } 396 __set_current_state(TASK_RUNNING); 397 return iocb->ki_user_data; 398 } 399 EXPORT_SYMBOL(wait_on_sync_kiocb); 400 401 /* exit_aio: called when the last user of mm goes away. At this point, 402 * there is no way for any new requests to be submited or any of the 403 * io_* syscalls to be called on the context. However, there may be 404 * outstanding requests which hold references to the context; as they 405 * go away, they will call put_ioctx and release any pinned memory 406 * associated with the request (held via struct page * references). 407 */ 408 void exit_aio(struct mm_struct *mm) 409 { 410 struct kioctx *ctx; 411 412 while (!hlist_empty(&mm->ioctx_list)) { 413 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); 414 hlist_del_rcu(&ctx->list); 415 416 aio_cancel_all(ctx); 417 418 wait_for_all_aios(ctx); 419 /* 420 * Ensure we don't leave the ctx on the aio_wq 421 */ 422 cancel_work_sync(&ctx->wq.work); 423 424 if (1 != atomic_read(&ctx->users)) 425 printk(KERN_DEBUG 426 "exit_aio:ioctx still alive: %d %d %d\n", 427 atomic_read(&ctx->users), ctx->dead, 428 ctx->reqs_active); 429 put_ioctx(ctx); 430 } 431 } 432 433 /* aio_get_req 434 * Allocate a slot for an aio request. Increments the users count 435 * of the kioctx so that the kioctx stays around until all requests are 436 * complete. Returns NULL if no requests are free. 437 * 438 * Returns with kiocb->users set to 2. The io submit code path holds 439 * an extra reference while submitting the i/o. 440 * This prevents races between the aio code path referencing the 441 * req (after submitting it) and aio_complete() freeing the req. 442 */ 443 static struct kiocb *__aio_get_req(struct kioctx *ctx) 444 { 445 struct kiocb *req = NULL; 446 struct aio_ring *ring; 447 int okay = 0; 448 449 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 450 if (unlikely(!req)) 451 return NULL; 452 453 req->ki_flags = 0; 454 req->ki_users = 2; 455 req->ki_key = 0; 456 req->ki_ctx = ctx; 457 req->ki_cancel = NULL; 458 req->ki_retry = NULL; 459 req->ki_dtor = NULL; 460 req->private = NULL; 461 req->ki_iovec = NULL; 462 INIT_LIST_HEAD(&req->ki_run_list); 463 req->ki_eventfd = NULL; 464 465 /* Check if the completion queue has enough free space to 466 * accept an event from this io. 467 */ 468 spin_lock_irq(&ctx->ctx_lock); 469 ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); 470 if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { 471 list_add(&req->ki_list, &ctx->active_reqs); 472 ctx->reqs_active++; 473 okay = 1; 474 } 475 kunmap_atomic(ring, KM_USER0); 476 spin_unlock_irq(&ctx->ctx_lock); 477 478 if (!okay) { 479 kmem_cache_free(kiocb_cachep, req); 480 req = NULL; 481 } 482 483 return req; 484 } 485 486 static inline struct kiocb *aio_get_req(struct kioctx *ctx) 487 { 488 struct kiocb *req; 489 /* Handle a potential starvation case -- should be exceedingly rare as 490 * requests will be stuck on fput_head only if the aio_fput_routine is 491 * delayed and the requests were the last user of the struct file. 492 */ 493 req = __aio_get_req(ctx); 494 if (unlikely(NULL == req)) { 495 aio_fput_routine(NULL); 496 req = __aio_get_req(ctx); 497 } 498 return req; 499 } 500 501 static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) 502 { 503 assert_spin_locked(&ctx->ctx_lock); 504 505 if (req->ki_eventfd != NULL) 506 eventfd_ctx_put(req->ki_eventfd); 507 if (req->ki_dtor) 508 req->ki_dtor(req); 509 if (req->ki_iovec != &req->ki_inline_vec) 510 kfree(req->ki_iovec); 511 kmem_cache_free(kiocb_cachep, req); 512 ctx->reqs_active--; 513 514 if (unlikely(!ctx->reqs_active && ctx->dead)) 515 wake_up(&ctx->wait); 516 } 517 518 static void aio_fput_routine(struct work_struct *data) 519 { 520 spin_lock_irq(&fput_lock); 521 while (likely(!list_empty(&fput_head))) { 522 struct kiocb *req = list_kiocb(fput_head.next); 523 struct kioctx *ctx = req->ki_ctx; 524 525 list_del(&req->ki_list); 526 spin_unlock_irq(&fput_lock); 527 528 /* Complete the fput(s) */ 529 if (req->ki_filp != NULL) 530 fput(req->ki_filp); 531 532 /* Link the iocb into the context's free list */ 533 spin_lock_irq(&ctx->ctx_lock); 534 really_put_req(ctx, req); 535 spin_unlock_irq(&ctx->ctx_lock); 536 537 put_ioctx(ctx); 538 spin_lock_irq(&fput_lock); 539 } 540 spin_unlock_irq(&fput_lock); 541 } 542 543 /* __aio_put_req 544 * Returns true if this put was the last user of the request. 545 */ 546 static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) 547 { 548 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", 549 req, atomic_long_read(&req->ki_filp->f_count)); 550 551 assert_spin_locked(&ctx->ctx_lock); 552 553 req->ki_users--; 554 BUG_ON(req->ki_users < 0); 555 if (likely(req->ki_users)) 556 return 0; 557 list_del(&req->ki_list); /* remove from active_reqs */ 558 req->ki_cancel = NULL; 559 req->ki_retry = NULL; 560 561 /* 562 * Try to optimize the aio and eventfd file* puts, by avoiding to 563 * schedule work in case it is not final fput() time. In normal cases, 564 * we would not be holding the last reference to the file*, so 565 * this function will be executed w/out any aio kthread wakeup. 566 */ 567 if (unlikely(!fput_atomic(req->ki_filp))) { 568 get_ioctx(ctx); 569 spin_lock(&fput_lock); 570 list_add(&req->ki_list, &fput_head); 571 spin_unlock(&fput_lock); 572 queue_work(aio_wq, &fput_work); 573 } else { 574 req->ki_filp = NULL; 575 really_put_req(ctx, req); 576 } 577 return 1; 578 } 579 580 /* aio_put_req 581 * Returns true if this put was the last user of the kiocb, 582 * false if the request is still in use. 583 */ 584 int aio_put_req(struct kiocb *req) 585 { 586 struct kioctx *ctx = req->ki_ctx; 587 int ret; 588 spin_lock_irq(&ctx->ctx_lock); 589 ret = __aio_put_req(ctx, req); 590 spin_unlock_irq(&ctx->ctx_lock); 591 return ret; 592 } 593 EXPORT_SYMBOL(aio_put_req); 594 595 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 596 { 597 struct mm_struct *mm = current->mm; 598 struct kioctx *ctx, *ret = NULL; 599 struct hlist_node *n; 600 601 rcu_read_lock(); 602 603 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { 604 if (ctx->user_id == ctx_id && !ctx->dead) { 605 get_ioctx(ctx); 606 ret = ctx; 607 break; 608 } 609 } 610 611 rcu_read_unlock(); 612 return ret; 613 } 614 615 /* 616 * Queue up a kiocb to be retried. Assumes that the kiocb 617 * has already been marked as kicked, and places it on 618 * the retry run list for the corresponding ioctx, if it 619 * isn't already queued. Returns 1 if it actually queued 620 * the kiocb (to tell the caller to activate the work 621 * queue to process it), or 0, if it found that it was 622 * already queued. 623 */ 624 static inline int __queue_kicked_iocb(struct kiocb *iocb) 625 { 626 struct kioctx *ctx = iocb->ki_ctx; 627 628 assert_spin_locked(&ctx->ctx_lock); 629 630 if (list_empty(&iocb->ki_run_list)) { 631 list_add_tail(&iocb->ki_run_list, 632 &ctx->run_list); 633 return 1; 634 } 635 return 0; 636 } 637 638 /* aio_run_iocb 639 * This is the core aio execution routine. It is 640 * invoked both for initial i/o submission and 641 * subsequent retries via the aio_kick_handler. 642 * Expects to be invoked with iocb->ki_ctx->lock 643 * already held. The lock is released and reacquired 644 * as needed during processing. 645 * 646 * Calls the iocb retry method (already setup for the 647 * iocb on initial submission) for operation specific 648 * handling, but takes care of most of common retry 649 * execution details for a given iocb. The retry method 650 * needs to be non-blocking as far as possible, to avoid 651 * holding up other iocbs waiting to be serviced by the 652 * retry kernel thread. 653 * 654 * The trickier parts in this code have to do with 655 * ensuring that only one retry instance is in progress 656 * for a given iocb at any time. Providing that guarantee 657 * simplifies the coding of individual aio operations as 658 * it avoids various potential races. 659 */ 660 static ssize_t aio_run_iocb(struct kiocb *iocb) 661 { 662 struct kioctx *ctx = iocb->ki_ctx; 663 ssize_t (*retry)(struct kiocb *); 664 ssize_t ret; 665 666 if (!(retry = iocb->ki_retry)) { 667 printk("aio_run_iocb: iocb->ki_retry = NULL\n"); 668 return 0; 669 } 670 671 /* 672 * We don't want the next retry iteration for this 673 * operation to start until this one has returned and 674 * updated the iocb state. However, wait_queue functions 675 * can trigger a kick_iocb from interrupt context in the 676 * meantime, indicating that data is available for the next 677 * iteration. We want to remember that and enable the 678 * next retry iteration _after_ we are through with 679 * this one. 680 * 681 * So, in order to be able to register a "kick", but 682 * prevent it from being queued now, we clear the kick 683 * flag, but make the kick code *think* that the iocb is 684 * still on the run list until we are actually done. 685 * When we are done with this iteration, we check if 686 * the iocb was kicked in the meantime and if so, queue 687 * it up afresh. 688 */ 689 690 kiocbClearKicked(iocb); 691 692 /* 693 * This is so that aio_complete knows it doesn't need to 694 * pull the iocb off the run list (We can't just call 695 * INIT_LIST_HEAD because we don't want a kick_iocb to 696 * queue this on the run list yet) 697 */ 698 iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL; 699 spin_unlock_irq(&ctx->ctx_lock); 700 701 /* Quit retrying if the i/o has been cancelled */ 702 if (kiocbIsCancelled(iocb)) { 703 ret = -EINTR; 704 aio_complete(iocb, ret, 0); 705 /* must not access the iocb after this */ 706 goto out; 707 } 708 709 /* 710 * Now we are all set to call the retry method in async 711 * context. 712 */ 713 ret = retry(iocb); 714 715 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) 716 aio_complete(iocb, ret, 0); 717 out: 718 spin_lock_irq(&ctx->ctx_lock); 719 720 if (-EIOCBRETRY == ret) { 721 /* 722 * OK, now that we are done with this iteration 723 * and know that there is more left to go, 724 * this is where we let go so that a subsequent 725 * "kick" can start the next iteration 726 */ 727 728 /* will make __queue_kicked_iocb succeed from here on */ 729 INIT_LIST_HEAD(&iocb->ki_run_list); 730 /* we must queue the next iteration ourselves, if it 731 * has already been kicked */ 732 if (kiocbIsKicked(iocb)) { 733 __queue_kicked_iocb(iocb); 734 735 /* 736 * __queue_kicked_iocb will always return 1 here, because 737 * iocb->ki_run_list is empty at this point so it should 738 * be safe to unconditionally queue the context into the 739 * work queue. 740 */ 741 aio_queue_work(ctx); 742 } 743 } 744 return ret; 745 } 746 747 /* 748 * __aio_run_iocbs: 749 * Process all pending retries queued on the ioctx 750 * run list. 751 * Assumes it is operating within the aio issuer's mm 752 * context. 753 */ 754 static int __aio_run_iocbs(struct kioctx *ctx) 755 { 756 struct kiocb *iocb; 757 struct list_head run_list; 758 759 assert_spin_locked(&ctx->ctx_lock); 760 761 list_replace_init(&ctx->run_list, &run_list); 762 while (!list_empty(&run_list)) { 763 iocb = list_entry(run_list.next, struct kiocb, 764 ki_run_list); 765 list_del(&iocb->ki_run_list); 766 /* 767 * Hold an extra reference while retrying i/o. 768 */ 769 iocb->ki_users++; /* grab extra reference */ 770 aio_run_iocb(iocb); 771 __aio_put_req(ctx, iocb); 772 } 773 if (!list_empty(&ctx->run_list)) 774 return 1; 775 return 0; 776 } 777 778 static void aio_queue_work(struct kioctx * ctx) 779 { 780 unsigned long timeout; 781 /* 782 * if someone is waiting, get the work started right 783 * away, otherwise, use a longer delay 784 */ 785 smp_mb(); 786 if (waitqueue_active(&ctx->wait)) 787 timeout = 1; 788 else 789 timeout = HZ/10; 790 queue_delayed_work(aio_wq, &ctx->wq, timeout); 791 } 792 793 794 /* 795 * aio_run_iocbs: 796 * Process all pending retries queued on the ioctx 797 * run list. 798 * Assumes it is operating within the aio issuer's mm 799 * context. 800 */ 801 static inline void aio_run_iocbs(struct kioctx *ctx) 802 { 803 int requeue; 804 805 spin_lock_irq(&ctx->ctx_lock); 806 807 requeue = __aio_run_iocbs(ctx); 808 spin_unlock_irq(&ctx->ctx_lock); 809 if (requeue) 810 aio_queue_work(ctx); 811 } 812 813 /* 814 * just like aio_run_iocbs, but keeps running them until 815 * the list stays empty 816 */ 817 static inline void aio_run_all_iocbs(struct kioctx *ctx) 818 { 819 spin_lock_irq(&ctx->ctx_lock); 820 while (__aio_run_iocbs(ctx)) 821 ; 822 spin_unlock_irq(&ctx->ctx_lock); 823 } 824 825 /* 826 * aio_kick_handler: 827 * Work queue handler triggered to process pending 828 * retries on an ioctx. Takes on the aio issuer's 829 * mm context before running the iocbs, so that 830 * copy_xxx_user operates on the issuer's address 831 * space. 832 * Run on aiod's context. 833 */ 834 static void aio_kick_handler(struct work_struct *work) 835 { 836 struct kioctx *ctx = container_of(work, struct kioctx, wq.work); 837 mm_segment_t oldfs = get_fs(); 838 struct mm_struct *mm; 839 int requeue; 840 841 set_fs(USER_DS); 842 use_mm(ctx->mm); 843 spin_lock_irq(&ctx->ctx_lock); 844 requeue =__aio_run_iocbs(ctx); 845 mm = ctx->mm; 846 spin_unlock_irq(&ctx->ctx_lock); 847 unuse_mm(mm); 848 set_fs(oldfs); 849 /* 850 * we're in a worker thread already, don't use queue_delayed_work, 851 */ 852 if (requeue) 853 queue_delayed_work(aio_wq, &ctx->wq, 0); 854 } 855 856 857 /* 858 * Called by kick_iocb to queue the kiocb for retry 859 * and if required activate the aio work queue to process 860 * it 861 */ 862 static void try_queue_kicked_iocb(struct kiocb *iocb) 863 { 864 struct kioctx *ctx = iocb->ki_ctx; 865 unsigned long flags; 866 int run = 0; 867 868 spin_lock_irqsave(&ctx->ctx_lock, flags); 869 /* set this inside the lock so that we can't race with aio_run_iocb() 870 * testing it and putting the iocb on the run list under the lock */ 871 if (!kiocbTryKick(iocb)) 872 run = __queue_kicked_iocb(iocb); 873 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 874 if (run) 875 aio_queue_work(ctx); 876 } 877 878 /* 879 * kick_iocb: 880 * Called typically from a wait queue callback context 881 * to trigger a retry of the iocb. 882 * The retry is usually executed by aio workqueue 883 * threads (See aio_kick_handler). 884 */ 885 void kick_iocb(struct kiocb *iocb) 886 { 887 /* sync iocbs are easy: they can only ever be executing from a 888 * single context. */ 889 if (is_sync_kiocb(iocb)) { 890 kiocbSetKicked(iocb); 891 wake_up_process(iocb->ki_obj.tsk); 892 return; 893 } 894 895 try_queue_kicked_iocb(iocb); 896 } 897 EXPORT_SYMBOL(kick_iocb); 898 899 /* aio_complete 900 * Called when the io request on the given iocb is complete. 901 * Returns true if this is the last user of the request. The 902 * only other user of the request can be the cancellation code. 903 */ 904 int aio_complete(struct kiocb *iocb, long res, long res2) 905 { 906 struct kioctx *ctx = iocb->ki_ctx; 907 struct aio_ring_info *info; 908 struct aio_ring *ring; 909 struct io_event *event; 910 unsigned long flags; 911 unsigned long tail; 912 int ret; 913 914 /* 915 * Special case handling for sync iocbs: 916 * - events go directly into the iocb for fast handling 917 * - the sync task with the iocb in its stack holds the single iocb 918 * ref, no other paths have a way to get another ref 919 * - the sync task helpfully left a reference to itself in the iocb 920 */ 921 if (is_sync_kiocb(iocb)) { 922 BUG_ON(iocb->ki_users != 1); 923 iocb->ki_user_data = res; 924 iocb->ki_users = 0; 925 wake_up_process(iocb->ki_obj.tsk); 926 return 1; 927 } 928 929 info = &ctx->ring_info; 930 931 /* add a completion event to the ring buffer. 932 * must be done holding ctx->ctx_lock to prevent 933 * other code from messing with the tail 934 * pointer since we might be called from irq 935 * context. 936 */ 937 spin_lock_irqsave(&ctx->ctx_lock, flags); 938 939 if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list)) 940 list_del_init(&iocb->ki_run_list); 941 942 /* 943 * cancelled requests don't get events, userland was given one 944 * when the event got cancelled. 945 */ 946 if (kiocbIsCancelled(iocb)) 947 goto put_rq; 948 949 ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); 950 951 tail = info->tail; 952 event = aio_ring_event(info, tail, KM_IRQ0); 953 if (++tail >= info->nr) 954 tail = 0; 955 956 event->obj = (u64)(unsigned long)iocb->ki_obj.user; 957 event->data = iocb->ki_user_data; 958 event->res = res; 959 event->res2 = res2; 960 961 dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", 962 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, 963 res, res2); 964 965 /* after flagging the request as done, we 966 * must never even look at it again 967 */ 968 smp_wmb(); /* make event visible before updating tail */ 969 970 info->tail = tail; 971 ring->tail = tail; 972 973 put_aio_ring_event(event, KM_IRQ0); 974 kunmap_atomic(ring, KM_IRQ1); 975 976 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 977 978 /* 979 * Check if the user asked us to deliver the result through an 980 * eventfd. The eventfd_signal() function is safe to be called 981 * from IRQ context. 982 */ 983 if (iocb->ki_eventfd != NULL) 984 eventfd_signal(iocb->ki_eventfd, 1); 985 986 put_rq: 987 /* everything turned out well, dispose of the aiocb. */ 988 ret = __aio_put_req(ctx, iocb); 989 990 /* 991 * We have to order our ring_info tail store above and test 992 * of the wait list below outside the wait lock. This is 993 * like in wake_up_bit() where clearing a bit has to be 994 * ordered with the unlocked test. 995 */ 996 smp_mb(); 997 998 if (waitqueue_active(&ctx->wait)) 999 wake_up(&ctx->wait); 1000 1001 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1002 return ret; 1003 } 1004 EXPORT_SYMBOL(aio_complete); 1005 1006 /* aio_read_evt 1007 * Pull an event off of the ioctx's event ring. Returns the number of 1008 * events fetched (0 or 1 ;-) 1009 * FIXME: make this use cmpxchg. 1010 * TODO: make the ringbuffer user mmap()able (requires FIXME). 1011 */ 1012 static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) 1013 { 1014 struct aio_ring_info *info = &ioctx->ring_info; 1015 struct aio_ring *ring; 1016 unsigned long head; 1017 int ret = 0; 1018 1019 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 1020 dprintk("in aio_read_evt h%lu t%lu m%lu\n", 1021 (unsigned long)ring->head, (unsigned long)ring->tail, 1022 (unsigned long)ring->nr); 1023 1024 if (ring->head == ring->tail) 1025 goto out; 1026 1027 spin_lock(&info->ring_lock); 1028 1029 head = ring->head % info->nr; 1030 if (head != ring->tail) { 1031 struct io_event *evp = aio_ring_event(info, head, KM_USER1); 1032 *ent = *evp; 1033 head = (head + 1) % info->nr; 1034 smp_mb(); /* finish reading the event before updatng the head */ 1035 ring->head = head; 1036 ret = 1; 1037 put_aio_ring_event(evp, KM_USER1); 1038 } 1039 spin_unlock(&info->ring_lock); 1040 1041 out: 1042 kunmap_atomic(ring, KM_USER0); 1043 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, 1044 (unsigned long)ring->head, (unsigned long)ring->tail); 1045 return ret; 1046 } 1047 1048 struct aio_timeout { 1049 struct timer_list timer; 1050 int timed_out; 1051 struct task_struct *p; 1052 }; 1053 1054 static void timeout_func(unsigned long data) 1055 { 1056 struct aio_timeout *to = (struct aio_timeout *)data; 1057 1058 to->timed_out = 1; 1059 wake_up_process(to->p); 1060 } 1061 1062 static inline void init_timeout(struct aio_timeout *to) 1063 { 1064 setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to); 1065 to->timed_out = 0; 1066 to->p = current; 1067 } 1068 1069 static inline void set_timeout(long start_jiffies, struct aio_timeout *to, 1070 const struct timespec *ts) 1071 { 1072 to->timer.expires = start_jiffies + timespec_to_jiffies(ts); 1073 if (time_after(to->timer.expires, jiffies)) 1074 add_timer(&to->timer); 1075 else 1076 to->timed_out = 1; 1077 } 1078 1079 static inline void clear_timeout(struct aio_timeout *to) 1080 { 1081 del_singleshot_timer_sync(&to->timer); 1082 } 1083 1084 static int read_events(struct kioctx *ctx, 1085 long min_nr, long nr, 1086 struct io_event __user *event, 1087 struct timespec __user *timeout) 1088 { 1089 long start_jiffies = jiffies; 1090 struct task_struct *tsk = current; 1091 DECLARE_WAITQUEUE(wait, tsk); 1092 int ret; 1093 int i = 0; 1094 struct io_event ent; 1095 struct aio_timeout to; 1096 int retry = 0; 1097 1098 /* needed to zero any padding within an entry (there shouldn't be 1099 * any, but C is fun! 1100 */ 1101 memset(&ent, 0, sizeof(ent)); 1102 retry: 1103 ret = 0; 1104 while (likely(i < nr)) { 1105 ret = aio_read_evt(ctx, &ent); 1106 if (unlikely(ret <= 0)) 1107 break; 1108 1109 dprintk("read event: %Lx %Lx %Lx %Lx\n", 1110 ent.data, ent.obj, ent.res, ent.res2); 1111 1112 /* Could we split the check in two? */ 1113 ret = -EFAULT; 1114 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 1115 dprintk("aio: lost an event due to EFAULT.\n"); 1116 break; 1117 } 1118 ret = 0; 1119 1120 /* Good, event copied to userland, update counts. */ 1121 event ++; 1122 i ++; 1123 } 1124 1125 if (min_nr <= i) 1126 return i; 1127 if (ret) 1128 return ret; 1129 1130 /* End fast path */ 1131 1132 /* racey check, but it gets redone */ 1133 if (!retry && unlikely(!list_empty(&ctx->run_list))) { 1134 retry = 1; 1135 aio_run_all_iocbs(ctx); 1136 goto retry; 1137 } 1138 1139 init_timeout(&to); 1140 if (timeout) { 1141 struct timespec ts; 1142 ret = -EFAULT; 1143 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) 1144 goto out; 1145 1146 set_timeout(start_jiffies, &to, &ts); 1147 } 1148 1149 while (likely(i < nr)) { 1150 add_wait_queue_exclusive(&ctx->wait, &wait); 1151 do { 1152 set_task_state(tsk, TASK_INTERRUPTIBLE); 1153 ret = aio_read_evt(ctx, &ent); 1154 if (ret) 1155 break; 1156 if (min_nr <= i) 1157 break; 1158 if (unlikely(ctx->dead)) { 1159 ret = -EINVAL; 1160 break; 1161 } 1162 if (to.timed_out) /* Only check after read evt */ 1163 break; 1164 /* Try to only show up in io wait if there are ops 1165 * in flight */ 1166 if (ctx->reqs_active) 1167 io_schedule(); 1168 else 1169 schedule(); 1170 if (signal_pending(tsk)) { 1171 ret = -EINTR; 1172 break; 1173 } 1174 /*ret = aio_read_evt(ctx, &ent);*/ 1175 } while (1) ; 1176 1177 set_task_state(tsk, TASK_RUNNING); 1178 remove_wait_queue(&ctx->wait, &wait); 1179 1180 if (unlikely(ret <= 0)) 1181 break; 1182 1183 ret = -EFAULT; 1184 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 1185 dprintk("aio: lost an event due to EFAULT.\n"); 1186 break; 1187 } 1188 1189 /* Good, event copied to userland, update counts. */ 1190 event ++; 1191 i ++; 1192 } 1193 1194 if (timeout) 1195 clear_timeout(&to); 1196 out: 1197 destroy_timer_on_stack(&to.timer); 1198 return i ? i : ret; 1199 } 1200 1201 /* Take an ioctx and remove it from the list of ioctx's. Protects 1202 * against races with itself via ->dead. 1203 */ 1204 static void io_destroy(struct kioctx *ioctx) 1205 { 1206 struct mm_struct *mm = current->mm; 1207 int was_dead; 1208 1209 /* delete the entry from the list is someone else hasn't already */ 1210 spin_lock(&mm->ioctx_lock); 1211 was_dead = ioctx->dead; 1212 ioctx->dead = 1; 1213 hlist_del_rcu(&ioctx->list); 1214 spin_unlock(&mm->ioctx_lock); 1215 1216 dprintk("aio_release(%p)\n", ioctx); 1217 if (likely(!was_dead)) 1218 put_ioctx(ioctx); /* twice for the list */ 1219 1220 aio_cancel_all(ioctx); 1221 wait_for_all_aios(ioctx); 1222 1223 /* 1224 * Wake up any waiters. The setting of ctx->dead must be seen 1225 * by other CPUs at this point. Right now, we rely on the 1226 * locking done by the above calls to ensure this consistency. 1227 */ 1228 wake_up(&ioctx->wait); 1229 put_ioctx(ioctx); /* once for the lookup */ 1230 } 1231 1232 /* sys_io_setup: 1233 * Create an aio_context capable of receiving at least nr_events. 1234 * ctxp must not point to an aio_context that already exists, and 1235 * must be initialized to 0 prior to the call. On successful 1236 * creation of the aio_context, *ctxp is filled in with the resulting 1237 * handle. May fail with -EINVAL if *ctxp is not initialized, 1238 * if the specified nr_events exceeds internal limits. May fail 1239 * with -EAGAIN if the specified nr_events exceeds the user's limit 1240 * of available events. May fail with -ENOMEM if insufficient kernel 1241 * resources are available. May fail with -EFAULT if an invalid 1242 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1243 * implemented. 1244 */ 1245 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1246 { 1247 struct kioctx *ioctx = NULL; 1248 unsigned long ctx; 1249 long ret; 1250 1251 ret = get_user(ctx, ctxp); 1252 if (unlikely(ret)) 1253 goto out; 1254 1255 ret = -EINVAL; 1256 if (unlikely(ctx || nr_events == 0)) { 1257 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", 1258 ctx, nr_events); 1259 goto out; 1260 } 1261 1262 ioctx = ioctx_alloc(nr_events); 1263 ret = PTR_ERR(ioctx); 1264 if (!IS_ERR(ioctx)) { 1265 ret = put_user(ioctx->user_id, ctxp); 1266 if (!ret) 1267 return 0; 1268 1269 get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ 1270 io_destroy(ioctx); 1271 } 1272 1273 out: 1274 return ret; 1275 } 1276 1277 /* sys_io_destroy: 1278 * Destroy the aio_context specified. May cancel any outstanding 1279 * AIOs and block on completion. Will fail with -ENOSYS if not 1280 * implemented. May fail with -EINVAL if the context pointed to 1281 * is invalid. 1282 */ 1283 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1284 { 1285 struct kioctx *ioctx = lookup_ioctx(ctx); 1286 if (likely(NULL != ioctx)) { 1287 io_destroy(ioctx); 1288 return 0; 1289 } 1290 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1291 return -EINVAL; 1292 } 1293 1294 static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) 1295 { 1296 struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; 1297 1298 BUG_ON(ret <= 0); 1299 1300 while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { 1301 ssize_t this = min((ssize_t)iov->iov_len, ret); 1302 iov->iov_base += this; 1303 iov->iov_len -= this; 1304 iocb->ki_left -= this; 1305 ret -= this; 1306 if (iov->iov_len == 0) { 1307 iocb->ki_cur_seg++; 1308 iov++; 1309 } 1310 } 1311 1312 /* the caller should not have done more io than what fit in 1313 * the remaining iovecs */ 1314 BUG_ON(ret > 0 && iocb->ki_left == 0); 1315 } 1316 1317 static ssize_t aio_rw_vect_retry(struct kiocb *iocb) 1318 { 1319 struct file *file = iocb->ki_filp; 1320 struct address_space *mapping = file->f_mapping; 1321 struct inode *inode = mapping->host; 1322 ssize_t (*rw_op)(struct kiocb *, const struct iovec *, 1323 unsigned long, loff_t); 1324 ssize_t ret = 0; 1325 unsigned short opcode; 1326 1327 if ((iocb->ki_opcode == IOCB_CMD_PREADV) || 1328 (iocb->ki_opcode == IOCB_CMD_PREAD)) { 1329 rw_op = file->f_op->aio_read; 1330 opcode = IOCB_CMD_PREADV; 1331 } else { 1332 rw_op = file->f_op->aio_write; 1333 opcode = IOCB_CMD_PWRITEV; 1334 } 1335 1336 /* This matches the pread()/pwrite() logic */ 1337 if (iocb->ki_pos < 0) 1338 return -EINVAL; 1339 1340 do { 1341 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], 1342 iocb->ki_nr_segs - iocb->ki_cur_seg, 1343 iocb->ki_pos); 1344 if (ret > 0) 1345 aio_advance_iovec(iocb, ret); 1346 1347 /* retry all partial writes. retry partial reads as long as its a 1348 * regular file. */ 1349 } while (ret > 0 && iocb->ki_left > 0 && 1350 (opcode == IOCB_CMD_PWRITEV || 1351 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); 1352 1353 /* This means we must have transferred all that we could */ 1354 /* No need to retry anymore */ 1355 if ((ret == 0) || (iocb->ki_left == 0)) 1356 ret = iocb->ki_nbytes - iocb->ki_left; 1357 1358 /* If we managed to write some out we return that, rather than 1359 * the eventual error. */ 1360 if (opcode == IOCB_CMD_PWRITEV 1361 && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY 1362 && iocb->ki_nbytes - iocb->ki_left) 1363 ret = iocb->ki_nbytes - iocb->ki_left; 1364 1365 return ret; 1366 } 1367 1368 static ssize_t aio_fdsync(struct kiocb *iocb) 1369 { 1370 struct file *file = iocb->ki_filp; 1371 ssize_t ret = -EINVAL; 1372 1373 if (file->f_op->aio_fsync) 1374 ret = file->f_op->aio_fsync(iocb, 1); 1375 return ret; 1376 } 1377 1378 static ssize_t aio_fsync(struct kiocb *iocb) 1379 { 1380 struct file *file = iocb->ki_filp; 1381 ssize_t ret = -EINVAL; 1382 1383 if (file->f_op->aio_fsync) 1384 ret = file->f_op->aio_fsync(iocb, 0); 1385 return ret; 1386 } 1387 1388 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) 1389 { 1390 ssize_t ret; 1391 1392 #ifdef CONFIG_COMPAT 1393 if (compat) 1394 ret = compat_rw_copy_check_uvector(type, 1395 (struct compat_iovec __user *)kiocb->ki_buf, 1396 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 1397 &kiocb->ki_iovec); 1398 else 1399 #endif 1400 ret = rw_copy_check_uvector(type, 1401 (struct iovec __user *)kiocb->ki_buf, 1402 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 1403 &kiocb->ki_iovec); 1404 if (ret < 0) 1405 goto out; 1406 1407 kiocb->ki_nr_segs = kiocb->ki_nbytes; 1408 kiocb->ki_cur_seg = 0; 1409 /* ki_nbytes/left now reflect bytes instead of segs */ 1410 kiocb->ki_nbytes = ret; 1411 kiocb->ki_left = ret; 1412 1413 ret = 0; 1414 out: 1415 return ret; 1416 } 1417 1418 static ssize_t aio_setup_single_vector(struct kiocb *kiocb) 1419 { 1420 kiocb->ki_iovec = &kiocb->ki_inline_vec; 1421 kiocb->ki_iovec->iov_base = kiocb->ki_buf; 1422 kiocb->ki_iovec->iov_len = kiocb->ki_left; 1423 kiocb->ki_nr_segs = 1; 1424 kiocb->ki_cur_seg = 0; 1425 return 0; 1426 } 1427 1428 /* 1429 * aio_setup_iocb: 1430 * Performs the initial checks and aio retry method 1431 * setup for the kiocb at the time of io submission. 1432 */ 1433 static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) 1434 { 1435 struct file *file = kiocb->ki_filp; 1436 ssize_t ret = 0; 1437 1438 switch (kiocb->ki_opcode) { 1439 case IOCB_CMD_PREAD: 1440 ret = -EBADF; 1441 if (unlikely(!(file->f_mode & FMODE_READ))) 1442 break; 1443 ret = -EFAULT; 1444 if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, 1445 kiocb->ki_left))) 1446 break; 1447 ret = security_file_permission(file, MAY_READ); 1448 if (unlikely(ret)) 1449 break; 1450 ret = aio_setup_single_vector(kiocb); 1451 if (ret) 1452 break; 1453 ret = -EINVAL; 1454 if (file->f_op->aio_read) 1455 kiocb->ki_retry = aio_rw_vect_retry; 1456 break; 1457 case IOCB_CMD_PWRITE: 1458 ret = -EBADF; 1459 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1460 break; 1461 ret = -EFAULT; 1462 if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, 1463 kiocb->ki_left))) 1464 break; 1465 ret = security_file_permission(file, MAY_WRITE); 1466 if (unlikely(ret)) 1467 break; 1468 ret = aio_setup_single_vector(kiocb); 1469 if (ret) 1470 break; 1471 ret = -EINVAL; 1472 if (file->f_op->aio_write) 1473 kiocb->ki_retry = aio_rw_vect_retry; 1474 break; 1475 case IOCB_CMD_PREADV: 1476 ret = -EBADF; 1477 if (unlikely(!(file->f_mode & FMODE_READ))) 1478 break; 1479 ret = security_file_permission(file, MAY_READ); 1480 if (unlikely(ret)) 1481 break; 1482 ret = aio_setup_vectored_rw(READ, kiocb, compat); 1483 if (ret) 1484 break; 1485 ret = -EINVAL; 1486 if (file->f_op->aio_read) 1487 kiocb->ki_retry = aio_rw_vect_retry; 1488 break; 1489 case IOCB_CMD_PWRITEV: 1490 ret = -EBADF; 1491 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1492 break; 1493 ret = security_file_permission(file, MAY_WRITE); 1494 if (unlikely(ret)) 1495 break; 1496 ret = aio_setup_vectored_rw(WRITE, kiocb, compat); 1497 if (ret) 1498 break; 1499 ret = -EINVAL; 1500 if (file->f_op->aio_write) 1501 kiocb->ki_retry = aio_rw_vect_retry; 1502 break; 1503 case IOCB_CMD_FDSYNC: 1504 ret = -EINVAL; 1505 if (file->f_op->aio_fsync) 1506 kiocb->ki_retry = aio_fdsync; 1507 break; 1508 case IOCB_CMD_FSYNC: 1509 ret = -EINVAL; 1510 if (file->f_op->aio_fsync) 1511 kiocb->ki_retry = aio_fsync; 1512 break; 1513 default: 1514 dprintk("EINVAL: io_submit: no operation provided\n"); 1515 ret = -EINVAL; 1516 } 1517 1518 if (!kiocb->ki_retry) 1519 return ret; 1520 1521 return 0; 1522 } 1523 1524 static void aio_batch_add(struct address_space *mapping, 1525 struct hlist_head *batch_hash) 1526 { 1527 struct aio_batch_entry *abe; 1528 struct hlist_node *pos; 1529 unsigned bucket; 1530 1531 bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS); 1532 hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) { 1533 if (abe->mapping == mapping) 1534 return; 1535 } 1536 1537 abe = mempool_alloc(abe_pool, GFP_KERNEL); 1538 BUG_ON(!igrab(mapping->host)); 1539 abe->mapping = mapping; 1540 hlist_add_head(&abe->list, &batch_hash[bucket]); 1541 return; 1542 } 1543 1544 static void aio_batch_free(struct hlist_head *batch_hash) 1545 { 1546 struct aio_batch_entry *abe; 1547 struct hlist_node *pos, *n; 1548 int i; 1549 1550 for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) { 1551 hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) { 1552 blk_run_address_space(abe->mapping); 1553 iput(abe->mapping->host); 1554 hlist_del(&abe->list); 1555 mempool_free(abe, abe_pool); 1556 } 1557 } 1558 } 1559 1560 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1561 struct iocb *iocb, struct hlist_head *batch_hash, 1562 bool compat) 1563 { 1564 struct kiocb *req; 1565 struct file *file; 1566 ssize_t ret; 1567 1568 /* enforce forwards compatibility on users */ 1569 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { 1570 pr_debug("EINVAL: io_submit: reserve field set\n"); 1571 return -EINVAL; 1572 } 1573 1574 /* prevent overflows */ 1575 if (unlikely( 1576 (iocb->aio_buf != (unsigned long)iocb->aio_buf) || 1577 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1578 ((ssize_t)iocb->aio_nbytes < 0) 1579 )) { 1580 pr_debug("EINVAL: io_submit: overflow check\n"); 1581 return -EINVAL; 1582 } 1583 1584 file = fget(iocb->aio_fildes); 1585 if (unlikely(!file)) 1586 return -EBADF; 1587 1588 req = aio_get_req(ctx); /* returns with 2 references to req */ 1589 if (unlikely(!req)) { 1590 fput(file); 1591 return -EAGAIN; 1592 } 1593 req->ki_filp = file; 1594 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1595 /* 1596 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1597 * instance of the file* now. The file descriptor must be 1598 * an eventfd() fd, and will be signaled for each completed 1599 * event using the eventfd_signal() function. 1600 */ 1601 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); 1602 if (IS_ERR(req->ki_eventfd)) { 1603 ret = PTR_ERR(req->ki_eventfd); 1604 req->ki_eventfd = NULL; 1605 goto out_put_req; 1606 } 1607 } 1608 1609 ret = put_user(req->ki_key, &user_iocb->aio_key); 1610 if (unlikely(ret)) { 1611 dprintk("EFAULT: aio_key\n"); 1612 goto out_put_req; 1613 } 1614 1615 req->ki_obj.user = user_iocb; 1616 req->ki_user_data = iocb->aio_data; 1617 req->ki_pos = iocb->aio_offset; 1618 1619 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; 1620 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; 1621 req->ki_opcode = iocb->aio_lio_opcode; 1622 1623 ret = aio_setup_iocb(req, compat); 1624 1625 if (ret) 1626 goto out_put_req; 1627 1628 spin_lock_irq(&ctx->ctx_lock); 1629 aio_run_iocb(req); 1630 if (!list_empty(&ctx->run_list)) { 1631 /* drain the run list */ 1632 while (__aio_run_iocbs(ctx)) 1633 ; 1634 } 1635 spin_unlock_irq(&ctx->ctx_lock); 1636 if (req->ki_opcode == IOCB_CMD_PREAD || 1637 req->ki_opcode == IOCB_CMD_PREADV || 1638 req->ki_opcode == IOCB_CMD_PWRITE || 1639 req->ki_opcode == IOCB_CMD_PWRITEV) 1640 aio_batch_add(file->f_mapping, batch_hash); 1641 1642 aio_put_req(req); /* drop extra ref to req */ 1643 return 0; 1644 1645 out_put_req: 1646 aio_put_req(req); /* drop extra ref to req */ 1647 aio_put_req(req); /* drop i/o ref to req */ 1648 return ret; 1649 } 1650 1651 long do_io_submit(aio_context_t ctx_id, long nr, 1652 struct iocb __user *__user *iocbpp, bool compat) 1653 { 1654 struct kioctx *ctx; 1655 long ret = 0; 1656 int i; 1657 struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, }; 1658 1659 if (unlikely(nr < 0)) 1660 return -EINVAL; 1661 1662 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) 1663 return -EFAULT; 1664 1665 ctx = lookup_ioctx(ctx_id); 1666 if (unlikely(!ctx)) { 1667 pr_debug("EINVAL: io_submit: invalid context id\n"); 1668 return -EINVAL; 1669 } 1670 1671 /* 1672 * AKPM: should this return a partial result if some of the IOs were 1673 * successfully submitted? 1674 */ 1675 for (i=0; i<nr; i++) { 1676 struct iocb __user *user_iocb; 1677 struct iocb tmp; 1678 1679 if (unlikely(__get_user(user_iocb, iocbpp + i))) { 1680 ret = -EFAULT; 1681 break; 1682 } 1683 1684 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { 1685 ret = -EFAULT; 1686 break; 1687 } 1688 1689 ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat); 1690 if (ret) 1691 break; 1692 } 1693 aio_batch_free(batch_hash); 1694 1695 put_ioctx(ctx); 1696 return i ? i : ret; 1697 } 1698 1699 /* sys_io_submit: 1700 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 1701 * the number of iocbs queued. May return -EINVAL if the aio_context 1702 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 1703 * *iocbpp[0] is not properly initialized, if the operation specified 1704 * is invalid for the file descriptor in the iocb. May fail with 1705 * -EFAULT if any of the data structures point to invalid data. May 1706 * fail with -EBADF if the file descriptor specified in the first 1707 * iocb is invalid. May fail with -EAGAIN if insufficient resources 1708 * are available to queue any iocbs. Will return 0 if nr is 0. Will 1709 * fail with -ENOSYS if not implemented. 1710 */ 1711 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 1712 struct iocb __user * __user *, iocbpp) 1713 { 1714 return do_io_submit(ctx_id, nr, iocbpp, 0); 1715 } 1716 1717 /* lookup_kiocb 1718 * Finds a given iocb for cancellation. 1719 */ 1720 static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, 1721 u32 key) 1722 { 1723 struct list_head *pos; 1724 1725 assert_spin_locked(&ctx->ctx_lock); 1726 1727 /* TODO: use a hash or array, this sucks. */ 1728 list_for_each(pos, &ctx->active_reqs) { 1729 struct kiocb *kiocb = list_kiocb(pos); 1730 if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) 1731 return kiocb; 1732 } 1733 return NULL; 1734 } 1735 1736 /* sys_io_cancel: 1737 * Attempts to cancel an iocb previously passed to io_submit. If 1738 * the operation is successfully cancelled, the resulting event is 1739 * copied into the memory pointed to by result without being placed 1740 * into the completion queue and 0 is returned. May fail with 1741 * -EFAULT if any of the data structures pointed to are invalid. 1742 * May fail with -EINVAL if aio_context specified by ctx_id is 1743 * invalid. May fail with -EAGAIN if the iocb specified was not 1744 * cancelled. Will fail with -ENOSYS if not implemented. 1745 */ 1746 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 1747 struct io_event __user *, result) 1748 { 1749 int (*cancel)(struct kiocb *iocb, struct io_event *res); 1750 struct kioctx *ctx; 1751 struct kiocb *kiocb; 1752 u32 key; 1753 int ret; 1754 1755 ret = get_user(key, &iocb->aio_key); 1756 if (unlikely(ret)) 1757 return -EFAULT; 1758 1759 ctx = lookup_ioctx(ctx_id); 1760 if (unlikely(!ctx)) 1761 return -EINVAL; 1762 1763 spin_lock_irq(&ctx->ctx_lock); 1764 ret = -EAGAIN; 1765 kiocb = lookup_kiocb(ctx, iocb, key); 1766 if (kiocb && kiocb->ki_cancel) { 1767 cancel = kiocb->ki_cancel; 1768 kiocb->ki_users ++; 1769 kiocbSetCancelled(kiocb); 1770 } else 1771 cancel = NULL; 1772 spin_unlock_irq(&ctx->ctx_lock); 1773 1774 if (NULL != cancel) { 1775 struct io_event tmp; 1776 pr_debug("calling cancel\n"); 1777 memset(&tmp, 0, sizeof(tmp)); 1778 tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; 1779 tmp.data = kiocb->ki_user_data; 1780 ret = cancel(kiocb, &tmp); 1781 if (!ret) { 1782 /* Cancellation succeeded -- copy the result 1783 * into the user's buffer. 1784 */ 1785 if (copy_to_user(result, &tmp, sizeof(tmp))) 1786 ret = -EFAULT; 1787 } 1788 } else 1789 ret = -EINVAL; 1790 1791 put_ioctx(ctx); 1792 1793 return ret; 1794 } 1795 1796 /* io_getevents: 1797 * Attempts to read at least min_nr events and up to nr events from 1798 * the completion queue for the aio_context specified by ctx_id. If 1799 * it succeeds, the number of read events is returned. May fail with 1800 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 1801 * out of range, if timeout is out of range. May fail with -EFAULT 1802 * if any of the memory specified is invalid. May return 0 or 1803 * < min_nr if the timeout specified by timeout has elapsed 1804 * before sufficient events are available, where timeout == NULL 1805 * specifies an infinite timeout. Note that the timeout pointed to by 1806 * timeout is relative and will be updated if not NULL and the 1807 * operation blocks. Will fail with -ENOSYS if not implemented. 1808 */ 1809 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 1810 long, min_nr, 1811 long, nr, 1812 struct io_event __user *, events, 1813 struct timespec __user *, timeout) 1814 { 1815 struct kioctx *ioctx = lookup_ioctx(ctx_id); 1816 long ret = -EINVAL; 1817 1818 if (likely(ioctx)) { 1819 if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0)) 1820 ret = read_events(ioctx, min_nr, nr, events, timeout); 1821 put_ioctx(ioctx); 1822 } 1823 1824 asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout); 1825 return ret; 1826 } 1827