1 /* 2 * fs/direct-io.c 3 * 4 * Copyright (C) 2002, Linus Torvalds. 5 * 6 * O_DIRECT 7 * 8 * 04Jul2002 Andrew Morton 9 * Initial version 10 * 11Sep2002 janetinc@us.ibm.com 11 * added readv/writev support. 12 * 29Oct2002 Andrew Morton 13 * rewrote bio_add_page() support. 14 * 30Oct2002 pbadari@us.ibm.com 15 * added support for non-aligned IO. 16 * 06Nov2002 pbadari@us.ibm.com 17 * added asynchronous IO support. 18 * 21Jul2003 nathans@sgi.com 19 * added IO completion notifier. 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/fs.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/highmem.h> 29 #include <linux/pagemap.h> 30 #include <linux/task_io_accounting_ops.h> 31 #include <linux/bio.h> 32 #include <linux/wait.h> 33 #include <linux/err.h> 34 #include <linux/blkdev.h> 35 #include <linux/buffer_head.h> 36 #include <linux/rwsem.h> 37 #include <linux/uio.h> 38 #include <linux/atomic.h> 39 #include <linux/prefetch.h> 40 41 /* 42 * How many user pages to map in one call to get_user_pages(). This determines 43 * the size of a structure in the slab cache 44 */ 45 #define DIO_PAGES 64 46 47 /* 48 * This code generally works in units of "dio_blocks". A dio_block is 49 * somewhere between the hard sector size and the filesystem block size. it 50 * is determined on a per-invocation basis. When talking to the filesystem 51 * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity 52 * down by dio->blkfactor. Similarly, fs-blocksize quantities are converted 53 * to bio_block quantities by shifting left by blkfactor. 54 * 55 * If blkfactor is zero then the user's request was aligned to the filesystem's 56 * blocksize. 57 */ 58 59 /* dio_state only used in the submission path */ 60 61 struct dio_submit { 62 struct bio *bio; /* bio under assembly */ 63 unsigned blkbits; /* doesn't change */ 64 unsigned blkfactor; /* When we're using an alignment which 65 is finer than the filesystem's soft 66 blocksize, this specifies how much 67 finer. blkfactor=2 means 1/4-block 68 alignment. Does not change */ 69 unsigned start_zero_done; /* flag: sub-blocksize zeroing has 70 been performed at the start of a 71 write */ 72 int pages_in_io; /* approximate total IO pages */ 73 size_t size; /* total request size (doesn't change)*/ 74 sector_t block_in_file; /* Current offset into the underlying 75 file in dio_block units. */ 76 unsigned blocks_available; /* At block_in_file. changes */ 77 int reap_counter; /* rate limit reaping */ 78 sector_t final_block_in_request;/* doesn't change */ 79 unsigned first_block_in_page; /* doesn't change, Used only once */ 80 int boundary; /* prev block is at a boundary */ 81 get_block_t *get_block; /* block mapping function */ 82 dio_submit_t *submit_io; /* IO submition function */ 83 84 loff_t logical_offset_in_bio; /* current first logical block in bio */ 85 sector_t final_block_in_bio; /* current final block in bio + 1 */ 86 sector_t next_block_for_io; /* next block to be put under IO, 87 in dio_blocks units */ 88 89 /* 90 * Deferred addition of a page to the dio. These variables are 91 * private to dio_send_cur_page(), submit_page_section() and 92 * dio_bio_add_page(). 93 */ 94 struct page *cur_page; /* The page */ 95 unsigned cur_page_offset; /* Offset into it, in bytes */ 96 unsigned cur_page_len; /* Nr of bytes at cur_page_offset */ 97 sector_t cur_page_block; /* Where it starts */ 98 loff_t cur_page_fs_offset; /* Offset in file */ 99 100 /* 101 * Page fetching state. These variables belong to dio_refill_pages(). 102 */ 103 int curr_page; /* changes */ 104 int total_pages; /* doesn't change */ 105 unsigned long curr_user_address;/* changes */ 106 107 /* 108 * Page queue. These variables belong to dio_refill_pages() and 109 * dio_get_page(). 110 */ 111 unsigned head; /* next page to process */ 112 unsigned tail; /* last valid page + 1 */ 113 }; 114 115 /* dio_state communicated between submission path and end_io */ 116 struct dio { 117 int flags; /* doesn't change */ 118 int rw; 119 struct inode *inode; 120 loff_t i_size; /* i_size when submitted */ 121 dio_iodone_t *end_io; /* IO completion function */ 122 123 void *private; /* copy from map_bh.b_private */ 124 125 /* BIO completion state */ 126 spinlock_t bio_lock; /* protects BIO fields below */ 127 int page_errors; /* errno from get_user_pages() */ 128 int is_async; /* is IO async ? */ 129 int io_error; /* IO error in completion path */ 130 unsigned long refcount; /* direct_io_worker() and bios */ 131 struct bio *bio_list; /* singly linked via bi_private */ 132 struct task_struct *waiter; /* waiting task (NULL if none) */ 133 134 /* AIO related stuff */ 135 struct kiocb *iocb; /* kiocb */ 136 ssize_t result; /* IO result */ 137 138 /* 139 * pages[] (and any fields placed after it) are not zeroed out at 140 * allocation time. Don't add new fields after pages[] unless you 141 * wish that they not be zeroed. 142 */ 143 struct page *pages[DIO_PAGES]; /* page buffer */ 144 } ____cacheline_aligned_in_smp; 145 146 static struct kmem_cache *dio_cache __read_mostly; 147 148 /* 149 * How many pages are in the queue? 150 */ 151 static inline unsigned dio_pages_present(struct dio_submit *sdio) 152 { 153 return sdio->tail - sdio->head; 154 } 155 156 /* 157 * Go grab and pin some userspace pages. Typically we'll get 64 at a time. 158 */ 159 static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) 160 { 161 int ret; 162 int nr_pages; 163 164 nr_pages = min(sdio->total_pages - sdio->curr_page, DIO_PAGES); 165 ret = get_user_pages_fast( 166 sdio->curr_user_address, /* Where from? */ 167 nr_pages, /* How many pages? */ 168 dio->rw == READ, /* Write to memory? */ 169 &dio->pages[0]); /* Put results here */ 170 171 if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { 172 struct page *page = ZERO_PAGE(0); 173 /* 174 * A memory fault, but the filesystem has some outstanding 175 * mapped blocks. We need to use those blocks up to avoid 176 * leaking stale data in the file. 177 */ 178 if (dio->page_errors == 0) 179 dio->page_errors = ret; 180 page_cache_get(page); 181 dio->pages[0] = page; 182 sdio->head = 0; 183 sdio->tail = 1; 184 ret = 0; 185 goto out; 186 } 187 188 if (ret >= 0) { 189 sdio->curr_user_address += ret * PAGE_SIZE; 190 sdio->curr_page += ret; 191 sdio->head = 0; 192 sdio->tail = ret; 193 ret = 0; 194 } 195 out: 196 return ret; 197 } 198 199 /* 200 * Get another userspace page. Returns an ERR_PTR on error. Pages are 201 * buffered inside the dio so that we can call get_user_pages() against a 202 * decent number of pages, less frequently. To provide nicer use of the 203 * L1 cache. 204 */ 205 static inline struct page *dio_get_page(struct dio *dio, 206 struct dio_submit *sdio) 207 { 208 if (dio_pages_present(sdio) == 0) { 209 int ret; 210 211 ret = dio_refill_pages(dio, sdio); 212 if (ret) 213 return ERR_PTR(ret); 214 BUG_ON(dio_pages_present(sdio) == 0); 215 } 216 return dio->pages[sdio->head++]; 217 } 218 219 /** 220 * dio_complete() - called when all DIO BIO I/O has been completed 221 * @offset: the byte offset in the file of the completed operation 222 * 223 * This releases locks as dictated by the locking type, lets interested parties 224 * know that a DIO operation has completed, and calculates the resulting return 225 * code for the operation. 226 * 227 * It lets the filesystem know if it registered an interest earlier via 228 * get_block. Pass the private field of the map buffer_head so that 229 * filesystems can use it to hold additional state between get_block calls and 230 * dio_complete. 231 */ 232 static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is_async) 233 { 234 ssize_t transferred = 0; 235 236 /* 237 * AIO submission can race with bio completion to get here while 238 * expecting to have the last io completed by bio completion. 239 * In that case -EIOCBQUEUED is in fact not an error we want 240 * to preserve through this call. 241 */ 242 if (ret == -EIOCBQUEUED) 243 ret = 0; 244 245 if (dio->result) { 246 transferred = dio->result; 247 248 /* Check for short read case */ 249 if ((dio->rw == READ) && ((offset + transferred) > dio->i_size)) 250 transferred = dio->i_size - offset; 251 } 252 253 if (ret == 0) 254 ret = dio->page_errors; 255 if (ret == 0) 256 ret = dio->io_error; 257 if (ret == 0) 258 ret = transferred; 259 260 if (dio->end_io && dio->result) { 261 dio->end_io(dio->iocb, offset, transferred, 262 dio->private, ret, is_async); 263 } else { 264 inode_dio_done(dio->inode); 265 if (is_async) 266 aio_complete(dio->iocb, ret, 0); 267 } 268 269 return ret; 270 } 271 272 static int dio_bio_complete(struct dio *dio, struct bio *bio); 273 /* 274 * Asynchronous IO callback. 275 */ 276 static void dio_bio_end_aio(struct bio *bio, int error) 277 { 278 struct dio *dio = bio->bi_private; 279 unsigned long remaining; 280 unsigned long flags; 281 282 /* cleanup the bio */ 283 dio_bio_complete(dio, bio); 284 285 spin_lock_irqsave(&dio->bio_lock, flags); 286 remaining = --dio->refcount; 287 if (remaining == 1 && dio->waiter) 288 wake_up_process(dio->waiter); 289 spin_unlock_irqrestore(&dio->bio_lock, flags); 290 291 if (remaining == 0) { 292 dio_complete(dio, dio->iocb->ki_pos, 0, true); 293 kmem_cache_free(dio_cache, dio); 294 } 295 } 296 297 /* 298 * The BIO completion handler simply queues the BIO up for the process-context 299 * handler. 300 * 301 * During I/O bi_private points at the dio. After I/O, bi_private is used to 302 * implement a singly-linked list of completed BIOs, at dio->bio_list. 303 */ 304 static void dio_bio_end_io(struct bio *bio, int error) 305 { 306 struct dio *dio = bio->bi_private; 307 unsigned long flags; 308 309 spin_lock_irqsave(&dio->bio_lock, flags); 310 bio->bi_private = dio->bio_list; 311 dio->bio_list = bio; 312 if (--dio->refcount == 1 && dio->waiter) 313 wake_up_process(dio->waiter); 314 spin_unlock_irqrestore(&dio->bio_lock, flags); 315 } 316 317 /** 318 * dio_end_io - handle the end io action for the given bio 319 * @bio: The direct io bio thats being completed 320 * @error: Error if there was one 321 * 322 * This is meant to be called by any filesystem that uses their own dio_submit_t 323 * so that the DIO specific endio actions are dealt with after the filesystem 324 * has done it's completion work. 325 */ 326 void dio_end_io(struct bio *bio, int error) 327 { 328 struct dio *dio = bio->bi_private; 329 330 if (dio->is_async) 331 dio_bio_end_aio(bio, error); 332 else 333 dio_bio_end_io(bio, error); 334 } 335 EXPORT_SYMBOL_GPL(dio_end_io); 336 337 static inline void 338 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, 339 struct block_device *bdev, 340 sector_t first_sector, int nr_vecs) 341 { 342 struct bio *bio; 343 344 /* 345 * bio_alloc() is guaranteed to return a bio when called with 346 * __GFP_WAIT and we request a valid number of vectors. 347 */ 348 bio = bio_alloc(GFP_KERNEL, nr_vecs); 349 350 bio->bi_bdev = bdev; 351 bio->bi_sector = first_sector; 352 if (dio->is_async) 353 bio->bi_end_io = dio_bio_end_aio; 354 else 355 bio->bi_end_io = dio_bio_end_io; 356 357 sdio->bio = bio; 358 sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; 359 } 360 361 /* 362 * In the AIO read case we speculatively dirty the pages before starting IO. 363 * During IO completion, any of these pages which happen to have been written 364 * back will be redirtied by bio_check_pages_dirty(). 365 * 366 * bios hold a dio reference between submit_bio and ->end_io. 367 */ 368 static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) 369 { 370 struct bio *bio = sdio->bio; 371 unsigned long flags; 372 373 bio->bi_private = dio; 374 375 spin_lock_irqsave(&dio->bio_lock, flags); 376 dio->refcount++; 377 spin_unlock_irqrestore(&dio->bio_lock, flags); 378 379 if (dio->is_async && dio->rw == READ) 380 bio_set_pages_dirty(bio); 381 382 if (sdio->submit_io) 383 sdio->submit_io(dio->rw, bio, dio->inode, 384 sdio->logical_offset_in_bio); 385 else 386 submit_bio(dio->rw, bio); 387 388 sdio->bio = NULL; 389 sdio->boundary = 0; 390 sdio->logical_offset_in_bio = 0; 391 } 392 393 /* 394 * Release any resources in case of a failure 395 */ 396 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) 397 { 398 while (dio_pages_present(sdio)) 399 page_cache_release(dio_get_page(dio, sdio)); 400 } 401 402 /* 403 * Wait for the next BIO to complete. Remove it and return it. NULL is 404 * returned once all BIOs have been completed. This must only be called once 405 * all bios have been issued so that dio->refcount can only decrease. This 406 * requires that that the caller hold a reference on the dio. 407 */ 408 static struct bio *dio_await_one(struct dio *dio) 409 { 410 unsigned long flags; 411 struct bio *bio = NULL; 412 413 spin_lock_irqsave(&dio->bio_lock, flags); 414 415 /* 416 * Wait as long as the list is empty and there are bios in flight. bio 417 * completion drops the count, maybe adds to the list, and wakes while 418 * holding the bio_lock so we don't need set_current_state()'s barrier 419 * and can call it after testing our condition. 420 */ 421 while (dio->refcount > 1 && dio->bio_list == NULL) { 422 __set_current_state(TASK_UNINTERRUPTIBLE); 423 dio->waiter = current; 424 spin_unlock_irqrestore(&dio->bio_lock, flags); 425 io_schedule(); 426 /* wake up sets us TASK_RUNNING */ 427 spin_lock_irqsave(&dio->bio_lock, flags); 428 dio->waiter = NULL; 429 } 430 if (dio->bio_list) { 431 bio = dio->bio_list; 432 dio->bio_list = bio->bi_private; 433 } 434 spin_unlock_irqrestore(&dio->bio_lock, flags); 435 return bio; 436 } 437 438 /* 439 * Process one completed BIO. No locks are held. 440 */ 441 static int dio_bio_complete(struct dio *dio, struct bio *bio) 442 { 443 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 444 struct bio_vec *bvec = bio->bi_io_vec; 445 int page_no; 446 447 if (!uptodate) 448 dio->io_error = -EIO; 449 450 if (dio->is_async && dio->rw == READ) { 451 bio_check_pages_dirty(bio); /* transfers ownership */ 452 } else { 453 for (page_no = 0; page_no < bio->bi_vcnt; page_no++) { 454 struct page *page = bvec[page_no].bv_page; 455 456 if (dio->rw == READ && !PageCompound(page)) 457 set_page_dirty_lock(page); 458 page_cache_release(page); 459 } 460 bio_put(bio); 461 } 462 return uptodate ? 0 : -EIO; 463 } 464 465 /* 466 * Wait on and process all in-flight BIOs. This must only be called once 467 * all bios have been issued so that the refcount can only decrease. 468 * This just waits for all bios to make it through dio_bio_complete. IO 469 * errors are propagated through dio->io_error and should be propagated via 470 * dio_complete(). 471 */ 472 static void dio_await_completion(struct dio *dio) 473 { 474 struct bio *bio; 475 do { 476 bio = dio_await_one(dio); 477 if (bio) 478 dio_bio_complete(dio, bio); 479 } while (bio); 480 } 481 482 /* 483 * A really large O_DIRECT read or write can generate a lot of BIOs. So 484 * to keep the memory consumption sane we periodically reap any completed BIOs 485 * during the BIO generation phase. 486 * 487 * This also helps to limit the peak amount of pinned userspace memory. 488 */ 489 static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio) 490 { 491 int ret = 0; 492 493 if (sdio->reap_counter++ >= 64) { 494 while (dio->bio_list) { 495 unsigned long flags; 496 struct bio *bio; 497 int ret2; 498 499 spin_lock_irqsave(&dio->bio_lock, flags); 500 bio = dio->bio_list; 501 dio->bio_list = bio->bi_private; 502 spin_unlock_irqrestore(&dio->bio_lock, flags); 503 ret2 = dio_bio_complete(dio, bio); 504 if (ret == 0) 505 ret = ret2; 506 } 507 sdio->reap_counter = 0; 508 } 509 return ret; 510 } 511 512 /* 513 * Call into the fs to map some more disk blocks. We record the current number 514 * of available blocks at sdio->blocks_available. These are in units of the 515 * fs blocksize, (1 << inode->i_blkbits). 516 * 517 * The fs is allowed to map lots of blocks at once. If it wants to do that, 518 * it uses the passed inode-relative block number as the file offset, as usual. 519 * 520 * get_block() is passed the number of i_blkbits-sized blocks which direct_io 521 * has remaining to do. The fs should not map more than this number of blocks. 522 * 523 * If the fs has mapped a lot of blocks, it should populate bh->b_size to 524 * indicate how much contiguous disk space has been made available at 525 * bh->b_blocknr. 526 * 527 * If *any* of the mapped blocks are new, then the fs must set buffer_new(). 528 * This isn't very efficient... 529 * 530 * In the case of filesystem holes: the fs may return an arbitrarily-large 531 * hole by returning an appropriate value in b_size and by clearing 532 * buffer_mapped(). However the direct-io code will only process holes one 533 * block at a time - it will repeatedly call get_block() as it walks the hole. 534 */ 535 static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, 536 struct buffer_head *map_bh) 537 { 538 int ret; 539 sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ 540 sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ 541 unsigned long fs_count; /* Number of filesystem-sized blocks */ 542 int create; 543 unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; 544 545 /* 546 * If there was a memory error and we've overwritten all the 547 * mapped blocks then we can now return that memory error 548 */ 549 ret = dio->page_errors; 550 if (ret == 0) { 551 BUG_ON(sdio->block_in_file >= sdio->final_block_in_request); 552 fs_startblk = sdio->block_in_file >> sdio->blkfactor; 553 fs_endblk = (sdio->final_block_in_request - 1) >> 554 sdio->blkfactor; 555 fs_count = fs_endblk - fs_startblk + 1; 556 557 map_bh->b_state = 0; 558 map_bh->b_size = fs_count << i_blkbits; 559 560 /* 561 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we 562 * forbid block creations: only overwrites are permitted. 563 * We will return early to the caller once we see an 564 * unmapped buffer head returned, and the caller will fall 565 * back to buffered I/O. 566 * 567 * Otherwise the decision is left to the get_blocks method, 568 * which may decide to handle it or also return an unmapped 569 * buffer head. 570 */ 571 create = dio->rw & WRITE; 572 if (dio->flags & DIO_SKIP_HOLES) { 573 if (sdio->block_in_file < (i_size_read(dio->inode) >> 574 sdio->blkbits)) 575 create = 0; 576 } 577 578 ret = (*sdio->get_block)(dio->inode, fs_startblk, 579 map_bh, create); 580 581 /* Store for completion */ 582 dio->private = map_bh->b_private; 583 } 584 return ret; 585 } 586 587 /* 588 * There is no bio. Make one now. 589 */ 590 static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, 591 sector_t start_sector, struct buffer_head *map_bh) 592 { 593 sector_t sector; 594 int ret, nr_pages; 595 596 ret = dio_bio_reap(dio, sdio); 597 if (ret) 598 goto out; 599 sector = start_sector << (sdio->blkbits - 9); 600 nr_pages = min(sdio->pages_in_io, bio_get_nr_vecs(map_bh->b_bdev)); 601 nr_pages = min(nr_pages, BIO_MAX_PAGES); 602 BUG_ON(nr_pages <= 0); 603 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); 604 sdio->boundary = 0; 605 out: 606 return ret; 607 } 608 609 /* 610 * Attempt to put the current chunk of 'cur_page' into the current BIO. If 611 * that was successful then update final_block_in_bio and take a ref against 612 * the just-added page. 613 * 614 * Return zero on success. Non-zero means the caller needs to start a new BIO. 615 */ 616 static inline int dio_bio_add_page(struct dio_submit *sdio) 617 { 618 int ret; 619 620 ret = bio_add_page(sdio->bio, sdio->cur_page, 621 sdio->cur_page_len, sdio->cur_page_offset); 622 if (ret == sdio->cur_page_len) { 623 /* 624 * Decrement count only, if we are done with this page 625 */ 626 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) 627 sdio->pages_in_io--; 628 page_cache_get(sdio->cur_page); 629 sdio->final_block_in_bio = sdio->cur_page_block + 630 (sdio->cur_page_len >> sdio->blkbits); 631 ret = 0; 632 } else { 633 ret = 1; 634 } 635 return ret; 636 } 637 638 /* 639 * Put cur_page under IO. The section of cur_page which is described by 640 * cur_page_offset,cur_page_len is put into a BIO. The section of cur_page 641 * starts on-disk at cur_page_block. 642 * 643 * We take a ref against the page here (on behalf of its presence in the bio). 644 * 645 * The caller of this function is responsible for removing cur_page from the 646 * dio, and for dropping the refcount which came from that presence. 647 */ 648 static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, 649 struct buffer_head *map_bh) 650 { 651 int ret = 0; 652 653 if (sdio->bio) { 654 loff_t cur_offset = sdio->cur_page_fs_offset; 655 loff_t bio_next_offset = sdio->logical_offset_in_bio + 656 sdio->bio->bi_size; 657 658 /* 659 * See whether this new request is contiguous with the old. 660 * 661 * Btrfs cannot handle having logically non-contiguous requests 662 * submitted. For example if you have 663 * 664 * Logical: [0-4095][HOLE][8192-12287] 665 * Physical: [0-4095] [4096-8191] 666 * 667 * We cannot submit those pages together as one BIO. So if our 668 * current logical offset in the file does not equal what would 669 * be the next logical offset in the bio, submit the bio we 670 * have. 671 */ 672 if (sdio->final_block_in_bio != sdio->cur_page_block || 673 cur_offset != bio_next_offset) 674 dio_bio_submit(dio, sdio); 675 } 676 677 if (sdio->bio == NULL) { 678 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); 679 if (ret) 680 goto out; 681 } 682 683 if (dio_bio_add_page(sdio) != 0) { 684 dio_bio_submit(dio, sdio); 685 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); 686 if (ret == 0) { 687 ret = dio_bio_add_page(sdio); 688 BUG_ON(ret != 0); 689 } 690 } 691 out: 692 return ret; 693 } 694 695 /* 696 * An autonomous function to put a chunk of a page under deferred IO. 697 * 698 * The caller doesn't actually know (or care) whether this piece of page is in 699 * a BIO, or is under IO or whatever. We just take care of all possible 700 * situations here. The separation between the logic of do_direct_IO() and 701 * that of submit_page_section() is important for clarity. Please don't break. 702 * 703 * The chunk of page starts on-disk at blocknr. 704 * 705 * We perform deferred IO, by recording the last-submitted page inside our 706 * private part of the dio structure. If possible, we just expand the IO 707 * across that page here. 708 * 709 * If that doesn't work out then we put the old page into the bio and add this 710 * page to the dio instead. 711 */ 712 static inline int 713 submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, 714 unsigned offset, unsigned len, sector_t blocknr, 715 struct buffer_head *map_bh) 716 { 717 int ret = 0; 718 719 if (dio->rw & WRITE) { 720 /* 721 * Read accounting is performed in submit_bio() 722 */ 723 task_io_account_write(len); 724 } 725 726 /* 727 * Can we just grow the current page's presence in the dio? 728 */ 729 if (sdio->cur_page == page && 730 sdio->cur_page_offset + sdio->cur_page_len == offset && 731 sdio->cur_page_block + 732 (sdio->cur_page_len >> sdio->blkbits) == blocknr) { 733 sdio->cur_page_len += len; 734 goto out; 735 } 736 737 /* 738 * If there's a deferred page already there then send it. 739 */ 740 if (sdio->cur_page) { 741 ret = dio_send_cur_page(dio, sdio, map_bh); 742 page_cache_release(sdio->cur_page); 743 sdio->cur_page = NULL; 744 if (ret) 745 return ret; 746 } 747 748 page_cache_get(page); /* It is in dio */ 749 sdio->cur_page = page; 750 sdio->cur_page_offset = offset; 751 sdio->cur_page_len = len; 752 sdio->cur_page_block = blocknr; 753 sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; 754 out: 755 /* 756 * If sdio->boundary then we want to schedule the IO now to 757 * avoid metadata seeks. 758 */ 759 if (sdio->boundary) { 760 ret = dio_send_cur_page(dio, sdio, map_bh); 761 dio_bio_submit(dio, sdio); 762 page_cache_release(sdio->cur_page); 763 sdio->cur_page = NULL; 764 } 765 return ret; 766 } 767 768 /* 769 * Clean any dirty buffers in the blockdev mapping which alias newly-created 770 * file blocks. Only called for S_ISREG files - blockdevs do not set 771 * buffer_new 772 */ 773 static void clean_blockdev_aliases(struct dio *dio, struct buffer_head *map_bh) 774 { 775 unsigned i; 776 unsigned nblocks; 777 778 nblocks = map_bh->b_size >> dio->inode->i_blkbits; 779 780 for (i = 0; i < nblocks; i++) { 781 unmap_underlying_metadata(map_bh->b_bdev, 782 map_bh->b_blocknr + i); 783 } 784 } 785 786 /* 787 * If we are not writing the entire block and get_block() allocated 788 * the block for us, we need to fill-in the unused portion of the 789 * block with zeros. This happens only if user-buffer, fileoffset or 790 * io length is not filesystem block-size multiple. 791 * 792 * `end' is zero if we're doing the start of the IO, 1 at the end of the 793 * IO. 794 */ 795 static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, 796 int end, struct buffer_head *map_bh) 797 { 798 unsigned dio_blocks_per_fs_block; 799 unsigned this_chunk_blocks; /* In dio_blocks */ 800 unsigned this_chunk_bytes; 801 struct page *page; 802 803 sdio->start_zero_done = 1; 804 if (!sdio->blkfactor || !buffer_new(map_bh)) 805 return; 806 807 dio_blocks_per_fs_block = 1 << sdio->blkfactor; 808 this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1); 809 810 if (!this_chunk_blocks) 811 return; 812 813 /* 814 * We need to zero out part of an fs block. It is either at the 815 * beginning or the end of the fs block. 816 */ 817 if (end) 818 this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks; 819 820 this_chunk_bytes = this_chunk_blocks << sdio->blkbits; 821 822 page = ZERO_PAGE(0); 823 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes, 824 sdio->next_block_for_io, map_bh)) 825 return; 826 827 sdio->next_block_for_io += this_chunk_blocks; 828 } 829 830 /* 831 * Walk the user pages, and the file, mapping blocks to disk and generating 832 * a sequence of (page,offset,len,block) mappings. These mappings are injected 833 * into submit_page_section(), which takes care of the next stage of submission 834 * 835 * Direct IO against a blockdev is different from a file. Because we can 836 * happily perform page-sized but 512-byte aligned IOs. It is important that 837 * blockdev IO be able to have fine alignment and large sizes. 838 * 839 * So what we do is to permit the ->get_block function to populate bh.b_size 840 * with the size of IO which is permitted at this offset and this i_blkbits. 841 * 842 * For best results, the blockdev should be set up with 512-byte i_blkbits and 843 * it should set b_size to PAGE_SIZE or more inside get_block(). This gives 844 * fine alignment but still allows this function to work in PAGE_SIZE units. 845 */ 846 static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, 847 struct buffer_head *map_bh) 848 { 849 const unsigned blkbits = sdio->blkbits; 850 const unsigned blocks_per_page = PAGE_SIZE >> blkbits; 851 struct page *page; 852 unsigned block_in_page; 853 int ret = 0; 854 855 /* The I/O can start at any block offset within the first page */ 856 block_in_page = sdio->first_block_in_page; 857 858 while (sdio->block_in_file < sdio->final_block_in_request) { 859 page = dio_get_page(dio, sdio); 860 if (IS_ERR(page)) { 861 ret = PTR_ERR(page); 862 goto out; 863 } 864 865 while (block_in_page < blocks_per_page) { 866 unsigned offset_in_page = block_in_page << blkbits; 867 unsigned this_chunk_bytes; /* # of bytes mapped */ 868 unsigned this_chunk_blocks; /* # of blocks */ 869 unsigned u; 870 871 if (sdio->blocks_available == 0) { 872 /* 873 * Need to go and map some more disk 874 */ 875 unsigned long blkmask; 876 unsigned long dio_remainder; 877 878 ret = get_more_blocks(dio, sdio, map_bh); 879 if (ret) { 880 page_cache_release(page); 881 goto out; 882 } 883 if (!buffer_mapped(map_bh)) 884 goto do_holes; 885 886 sdio->blocks_available = 887 map_bh->b_size >> sdio->blkbits; 888 sdio->next_block_for_io = 889 map_bh->b_blocknr << sdio->blkfactor; 890 if (buffer_new(map_bh)) 891 clean_blockdev_aliases(dio, map_bh); 892 893 if (!sdio->blkfactor) 894 goto do_holes; 895 896 blkmask = (1 << sdio->blkfactor) - 1; 897 dio_remainder = (sdio->block_in_file & blkmask); 898 899 /* 900 * If we are at the start of IO and that IO 901 * starts partway into a fs-block, 902 * dio_remainder will be non-zero. If the IO 903 * is a read then we can simply advance the IO 904 * cursor to the first block which is to be 905 * read. But if the IO is a write and the 906 * block was newly allocated we cannot do that; 907 * the start of the fs block must be zeroed out 908 * on-disk 909 */ 910 if (!buffer_new(map_bh)) 911 sdio->next_block_for_io += dio_remainder; 912 sdio->blocks_available -= dio_remainder; 913 } 914 do_holes: 915 /* Handle holes */ 916 if (!buffer_mapped(map_bh)) { 917 loff_t i_size_aligned; 918 919 /* AKPM: eargh, -ENOTBLK is a hack */ 920 if (dio->rw & WRITE) { 921 page_cache_release(page); 922 return -ENOTBLK; 923 } 924 925 /* 926 * Be sure to account for a partial block as the 927 * last block in the file 928 */ 929 i_size_aligned = ALIGN(i_size_read(dio->inode), 930 1 << blkbits); 931 if (sdio->block_in_file >= 932 i_size_aligned >> blkbits) { 933 /* We hit eof */ 934 page_cache_release(page); 935 goto out; 936 } 937 zero_user(page, block_in_page << blkbits, 938 1 << blkbits); 939 sdio->block_in_file++; 940 block_in_page++; 941 goto next_block; 942 } 943 944 /* 945 * If we're performing IO which has an alignment which 946 * is finer than the underlying fs, go check to see if 947 * we must zero out the start of this block. 948 */ 949 if (unlikely(sdio->blkfactor && !sdio->start_zero_done)) 950 dio_zero_block(dio, sdio, 0, map_bh); 951 952 /* 953 * Work out, in this_chunk_blocks, how much disk we 954 * can add to this page 955 */ 956 this_chunk_blocks = sdio->blocks_available; 957 u = (PAGE_SIZE - offset_in_page) >> blkbits; 958 if (this_chunk_blocks > u) 959 this_chunk_blocks = u; 960 u = sdio->final_block_in_request - sdio->block_in_file; 961 if (this_chunk_blocks > u) 962 this_chunk_blocks = u; 963 this_chunk_bytes = this_chunk_blocks << blkbits; 964 BUG_ON(this_chunk_bytes == 0); 965 966 if (this_chunk_blocks == sdio->blocks_available) 967 sdio->boundary = buffer_boundary(map_bh); 968 ret = submit_page_section(dio, sdio, page, 969 offset_in_page, 970 this_chunk_bytes, 971 sdio->next_block_for_io, 972 map_bh); 973 if (ret) { 974 page_cache_release(page); 975 goto out; 976 } 977 sdio->next_block_for_io += this_chunk_blocks; 978 979 sdio->block_in_file += this_chunk_blocks; 980 block_in_page += this_chunk_blocks; 981 sdio->blocks_available -= this_chunk_blocks; 982 next_block: 983 BUG_ON(sdio->block_in_file > sdio->final_block_in_request); 984 if (sdio->block_in_file == sdio->final_block_in_request) 985 break; 986 } 987 988 /* Drop the ref which was taken in get_user_pages() */ 989 page_cache_release(page); 990 block_in_page = 0; 991 } 992 out: 993 return ret; 994 } 995 996 static inline int drop_refcount(struct dio *dio) 997 { 998 int ret2; 999 unsigned long flags; 1000 1001 /* 1002 * Sync will always be dropping the final ref and completing the 1003 * operation. AIO can if it was a broken operation described above or 1004 * in fact if all the bios race to complete before we get here. In 1005 * that case dio_complete() translates the EIOCBQUEUED into the proper 1006 * return code that the caller will hand to aio_complete(). 1007 * 1008 * This is managed by the bio_lock instead of being an atomic_t so that 1009 * completion paths can drop their ref and use the remaining count to 1010 * decide to wake the submission path atomically. 1011 */ 1012 spin_lock_irqsave(&dio->bio_lock, flags); 1013 ret2 = --dio->refcount; 1014 spin_unlock_irqrestore(&dio->bio_lock, flags); 1015 return ret2; 1016 } 1017 1018 /* 1019 * This is a library function for use by filesystem drivers. 1020 * 1021 * The locking rules are governed by the flags parameter: 1022 * - if the flags value contains DIO_LOCKING we use a fancy locking 1023 * scheme for dumb filesystems. 1024 * For writes this function is called under i_mutex and returns with 1025 * i_mutex held, for reads, i_mutex is not held on entry, but it is 1026 * taken and dropped again before returning. 1027 * - if the flags value does NOT contain DIO_LOCKING we don't use any 1028 * internal locking but rather rely on the filesystem to synchronize 1029 * direct I/O reads/writes versus each other and truncate. 1030 * 1031 * To help with locking against truncate we incremented the i_dio_count 1032 * counter before starting direct I/O, and decrement it once we are done. 1033 * Truncate can wait for it to reach zero to provide exclusion. It is 1034 * expected that filesystem provide exclusion between new direct I/O 1035 * and truncates. For DIO_LOCKING filesystems this is done by i_mutex, 1036 * but other filesystems need to take care of this on their own. 1037 * 1038 * NOTE: if you pass "sdio" to anything by pointer make sure that function 1039 * is always inlined. Otherwise gcc is unable to split the structure into 1040 * individual fields and will generate much worse code. This is important 1041 * for the whole file. 1042 */ 1043 static inline ssize_t 1044 do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 1045 struct block_device *bdev, const struct iovec *iov, loff_t offset, 1046 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, 1047 dio_submit_t submit_io, int flags) 1048 { 1049 int seg; 1050 size_t size; 1051 unsigned long addr; 1052 unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); 1053 unsigned blkbits = i_blkbits; 1054 unsigned blocksize_mask = (1 << blkbits) - 1; 1055 ssize_t retval = -EINVAL; 1056 loff_t end = offset; 1057 struct dio *dio; 1058 struct dio_submit sdio = { 0, }; 1059 unsigned long user_addr; 1060 size_t bytes; 1061 struct buffer_head map_bh = { 0, }; 1062 struct blk_plug plug; 1063 1064 if (rw & WRITE) 1065 rw = WRITE_ODIRECT; 1066 1067 /* 1068 * Avoid references to bdev if not absolutely needed to give 1069 * the early prefetch in the caller enough time. 1070 */ 1071 1072 if (offset & blocksize_mask) { 1073 if (bdev) 1074 blkbits = blksize_bits(bdev_logical_block_size(bdev)); 1075 blocksize_mask = (1 << blkbits) - 1; 1076 if (offset & blocksize_mask) 1077 goto out; 1078 } 1079 1080 /* Check the memory alignment. Blocks cannot straddle pages */ 1081 for (seg = 0; seg < nr_segs; seg++) { 1082 addr = (unsigned long)iov[seg].iov_base; 1083 size = iov[seg].iov_len; 1084 end += size; 1085 if (unlikely((addr & blocksize_mask) || 1086 (size & blocksize_mask))) { 1087 if (bdev) 1088 blkbits = blksize_bits( 1089 bdev_logical_block_size(bdev)); 1090 blocksize_mask = (1 << blkbits) - 1; 1091 if ((addr & blocksize_mask) || (size & blocksize_mask)) 1092 goto out; 1093 } 1094 } 1095 1096 /* watch out for a 0 len io from a tricksy fs */ 1097 if (rw == READ && end == offset) 1098 return 0; 1099 1100 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); 1101 retval = -ENOMEM; 1102 if (!dio) 1103 goto out; 1104 /* 1105 * Believe it or not, zeroing out the page array caused a .5% 1106 * performance regression in a database benchmark. So, we take 1107 * care to only zero out what's needed. 1108 */ 1109 memset(dio, 0, offsetof(struct dio, pages)); 1110 1111 dio->flags = flags; 1112 if (dio->flags & DIO_LOCKING) { 1113 if (rw == READ) { 1114 struct address_space *mapping = 1115 iocb->ki_filp->f_mapping; 1116 1117 /* will be released by direct_io_worker */ 1118 mutex_lock(&inode->i_mutex); 1119 1120 retval = filemap_write_and_wait_range(mapping, offset, 1121 end - 1); 1122 if (retval) { 1123 mutex_unlock(&inode->i_mutex); 1124 kmem_cache_free(dio_cache, dio); 1125 goto out; 1126 } 1127 } 1128 } 1129 1130 /* 1131 * Will be decremented at I/O completion time. 1132 */ 1133 atomic_inc(&inode->i_dio_count); 1134 1135 /* 1136 * For file extending writes updating i_size before data 1137 * writeouts complete can expose uninitialized blocks. So 1138 * even for AIO, we need to wait for i/o to complete before 1139 * returning in this case. 1140 */ 1141 dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) && 1142 (end > i_size_read(inode))); 1143 1144 retval = 0; 1145 1146 dio->inode = inode; 1147 dio->rw = rw; 1148 sdio.blkbits = blkbits; 1149 sdio.blkfactor = i_blkbits - blkbits; 1150 sdio.block_in_file = offset >> blkbits; 1151 1152 sdio.get_block = get_block; 1153 dio->end_io = end_io; 1154 sdio.submit_io = submit_io; 1155 sdio.final_block_in_bio = -1; 1156 sdio.next_block_for_io = -1; 1157 1158 dio->iocb = iocb; 1159 dio->i_size = i_size_read(inode); 1160 1161 spin_lock_init(&dio->bio_lock); 1162 dio->refcount = 1; 1163 1164 /* 1165 * In case of non-aligned buffers, we may need 2 more 1166 * pages since we need to zero out first and last block. 1167 */ 1168 if (unlikely(sdio.blkfactor)) 1169 sdio.pages_in_io = 2; 1170 1171 for (seg = 0; seg < nr_segs; seg++) { 1172 user_addr = (unsigned long)iov[seg].iov_base; 1173 sdio.pages_in_io += 1174 ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) / 1175 PAGE_SIZE - user_addr / PAGE_SIZE); 1176 } 1177 1178 blk_start_plug(&plug); 1179 1180 for (seg = 0; seg < nr_segs; seg++) { 1181 user_addr = (unsigned long)iov[seg].iov_base; 1182 sdio.size += bytes = iov[seg].iov_len; 1183 1184 /* Index into the first page of the first block */ 1185 sdio.first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits; 1186 sdio.final_block_in_request = sdio.block_in_file + 1187 (bytes >> blkbits); 1188 /* Page fetching state */ 1189 sdio.head = 0; 1190 sdio.tail = 0; 1191 sdio.curr_page = 0; 1192 1193 sdio.total_pages = 0; 1194 if (user_addr & (PAGE_SIZE-1)) { 1195 sdio.total_pages++; 1196 bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1)); 1197 } 1198 sdio.total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE; 1199 sdio.curr_user_address = user_addr; 1200 1201 retval = do_direct_IO(dio, &sdio, &map_bh); 1202 1203 dio->result += iov[seg].iov_len - 1204 ((sdio.final_block_in_request - sdio.block_in_file) << 1205 blkbits); 1206 1207 if (retval) { 1208 dio_cleanup(dio, &sdio); 1209 break; 1210 } 1211 } /* end iovec loop */ 1212 1213 if (retval == -ENOTBLK) { 1214 /* 1215 * The remaining part of the request will be 1216 * be handled by buffered I/O when we return 1217 */ 1218 retval = 0; 1219 } 1220 /* 1221 * There may be some unwritten disk at the end of a part-written 1222 * fs-block-sized block. Go zero that now. 1223 */ 1224 dio_zero_block(dio, &sdio, 1, &map_bh); 1225 1226 if (sdio.cur_page) { 1227 ssize_t ret2; 1228 1229 ret2 = dio_send_cur_page(dio, &sdio, &map_bh); 1230 if (retval == 0) 1231 retval = ret2; 1232 page_cache_release(sdio.cur_page); 1233 sdio.cur_page = NULL; 1234 } 1235 if (sdio.bio) 1236 dio_bio_submit(dio, &sdio); 1237 1238 blk_finish_plug(&plug); 1239 1240 /* 1241 * It is possible that, we return short IO due to end of file. 1242 * In that case, we need to release all the pages we got hold on. 1243 */ 1244 dio_cleanup(dio, &sdio); 1245 1246 /* 1247 * All block lookups have been performed. For READ requests 1248 * we can let i_mutex go now that its achieved its purpose 1249 * of protecting us from looking up uninitialized blocks. 1250 */ 1251 if (rw == READ && (dio->flags & DIO_LOCKING)) 1252 mutex_unlock(&dio->inode->i_mutex); 1253 1254 /* 1255 * The only time we want to leave bios in flight is when a successful 1256 * partial aio read or full aio write have been setup. In that case 1257 * bio completion will call aio_complete. The only time it's safe to 1258 * call aio_complete is when we return -EIOCBQUEUED, so we key on that. 1259 * This had *better* be the only place that raises -EIOCBQUEUED. 1260 */ 1261 BUG_ON(retval == -EIOCBQUEUED); 1262 if (dio->is_async && retval == 0 && dio->result && 1263 ((rw == READ) || (dio->result == sdio.size))) 1264 retval = -EIOCBQUEUED; 1265 1266 if (retval != -EIOCBQUEUED) 1267 dio_await_completion(dio); 1268 1269 if (drop_refcount(dio) == 0) { 1270 retval = dio_complete(dio, offset, retval, false); 1271 kmem_cache_free(dio_cache, dio); 1272 } else 1273 BUG_ON(retval != -EIOCBQUEUED); 1274 1275 out: 1276 return retval; 1277 } 1278 1279 ssize_t 1280 __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 1281 struct block_device *bdev, const struct iovec *iov, loff_t offset, 1282 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, 1283 dio_submit_t submit_io, int flags) 1284 { 1285 /* 1286 * The block device state is needed in the end to finally 1287 * submit everything. Since it's likely to be cache cold 1288 * prefetch it here as first thing to hide some of the 1289 * latency. 1290 * 1291 * Attempt to prefetch the pieces we likely need later. 1292 */ 1293 prefetch(&bdev->bd_disk->part_tbl); 1294 prefetch(bdev->bd_queue); 1295 prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); 1296 1297 return do_blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 1298 nr_segs, get_block, end_io, 1299 submit_io, flags); 1300 } 1301 1302 EXPORT_SYMBOL(__blockdev_direct_IO); 1303 1304 static __init int dio_init(void) 1305 { 1306 dio_cache = KMEM_CACHE(dio, SLAB_PANIC); 1307 return 0; 1308 } 1309 module_init(dio_init) 1310