1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 23 */ 24 25 #include <linux/module.h> 26 #include <linux/fs.h> 27 #include <linux/time.h> 28 #include <linux/jbd2.h> 29 #include <linux/highuid.h> 30 #include <linux/pagemap.h> 31 #include <linux/quotaops.h> 32 #include <linux/string.h> 33 #include <linux/buffer_head.h> 34 #include <linux/writeback.h> 35 #include <linux/pagevec.h> 36 #include <linux/mpage.h> 37 #include <linux/namei.h> 38 #include <linux/uio.h> 39 #include <linux/bio.h> 40 #include <linux/workqueue.h> 41 42 #include "ext4_jbd2.h" 43 #include "xattr.h" 44 #include "acl.h" 45 #include "ext4_extents.h" 46 47 #include <trace/events/ext4.h> 48 49 #define MPAGE_DA_EXTENT_TAIL 0x01 50 51 static inline int ext4_begin_ordered_truncate(struct inode *inode, 52 loff_t new_size) 53 { 54 return jbd2_journal_begin_ordered_truncate( 55 EXT4_SB(inode->i_sb)->s_journal, 56 &EXT4_I(inode)->jinode, 57 new_size); 58 } 59 60 static void ext4_invalidatepage(struct page *page, unsigned long offset); 61 62 /* 63 * Test whether an inode is a fast symlink. 64 */ 65 static int ext4_inode_is_fast_symlink(struct inode *inode) 66 { 67 int ea_blocks = EXT4_I(inode)->i_file_acl ? 68 (inode->i_sb->s_blocksize >> 9) : 0; 69 70 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 71 } 72 73 /* 74 * The ext4 forget function must perform a revoke if we are freeing data 75 * which has been journaled. Metadata (eg. indirect blocks) must be 76 * revoked in all cases. 77 * 78 * "bh" may be NULL: a metadata block may have been freed from memory 79 * but there may still be a record of it in the journal, and that record 80 * still needs to be revoked. 81 * 82 * If the handle isn't valid we're not journaling, but we still need to 83 * call into ext4_journal_revoke() to put the buffer head. 84 */ 85 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 86 struct buffer_head *bh, ext4_fsblk_t blocknr) 87 { 88 int err; 89 90 might_sleep(); 91 92 BUFFER_TRACE(bh, "enter"); 93 94 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 95 "data mode %x\n", 96 bh, is_metadata, inode->i_mode, 97 test_opt(inode->i_sb, DATA_FLAGS)); 98 99 /* Never use the revoke function if we are doing full data 100 * journaling: there is no need to, and a V1 superblock won't 101 * support it. Otherwise, only skip the revoke on un-journaled 102 * data blocks. */ 103 104 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || 105 (!is_metadata && !ext4_should_journal_data(inode))) { 106 if (bh) { 107 BUFFER_TRACE(bh, "call jbd2_journal_forget"); 108 return ext4_journal_forget(handle, bh); 109 } 110 return 0; 111 } 112 113 /* 114 * data!=journal && (is_metadata || should_journal_data(inode)) 115 */ 116 BUFFER_TRACE(bh, "call ext4_journal_revoke"); 117 err = ext4_journal_revoke(handle, blocknr, bh); 118 if (err) 119 ext4_abort(inode->i_sb, __func__, 120 "error %d when attempting revoke", err); 121 BUFFER_TRACE(bh, "exit"); 122 return err; 123 } 124 125 /* 126 * Work out how many blocks we need to proceed with the next chunk of a 127 * truncate transaction. 128 */ 129 static unsigned long blocks_for_truncate(struct inode *inode) 130 { 131 ext4_lblk_t needed; 132 133 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 134 135 /* Give ourselves just enough room to cope with inodes in which 136 * i_blocks is corrupt: we've seen disk corruptions in the past 137 * which resulted in random data in an inode which looked enough 138 * like a regular file for ext4 to try to delete it. Things 139 * will go a bit crazy if that happens, but at least we should 140 * try not to panic the whole kernel. */ 141 if (needed < 2) 142 needed = 2; 143 144 /* But we need to bound the transaction so we don't overflow the 145 * journal. */ 146 if (needed > EXT4_MAX_TRANS_DATA) 147 needed = EXT4_MAX_TRANS_DATA; 148 149 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 150 } 151 152 /* 153 * Truncate transactions can be complex and absolutely huge. So we need to 154 * be able to restart the transaction at a conventient checkpoint to make 155 * sure we don't overflow the journal. 156 * 157 * start_transaction gets us a new handle for a truncate transaction, 158 * and extend_transaction tries to extend the existing one a bit. If 159 * extend fails, we need to propagate the failure up and restart the 160 * transaction in the top-level truncate loop. --sct 161 */ 162 static handle_t *start_transaction(struct inode *inode) 163 { 164 handle_t *result; 165 166 result = ext4_journal_start(inode, blocks_for_truncate(inode)); 167 if (!IS_ERR(result)) 168 return result; 169 170 ext4_std_error(inode->i_sb, PTR_ERR(result)); 171 return result; 172 } 173 174 /* 175 * Try to extend this transaction for the purposes of truncation. 176 * 177 * Returns 0 if we managed to create more room. If we can't create more 178 * room, and the transaction must be restarted we return 1. 179 */ 180 static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 181 { 182 if (!ext4_handle_valid(handle)) 183 return 0; 184 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 185 return 0; 186 if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 187 return 0; 188 return 1; 189 } 190 191 /* 192 * Restart the transaction associated with *handle. This does a commit, 193 * so before we call here everything must be consistently dirtied against 194 * this transaction. 195 */ 196 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 197 int nblocks) 198 { 199 int ret; 200 201 /* 202 * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this 203 * moment, get_block can be called only for blocks inside i_size since 204 * page cache has been already dropped and writes are blocked by 205 * i_mutex. So we can safely drop the i_data_sem here. 206 */ 207 BUG_ON(EXT4_JOURNAL(inode) == NULL); 208 jbd_debug(2, "restarting handle %p\n", handle); 209 up_write(&EXT4_I(inode)->i_data_sem); 210 ret = ext4_journal_restart(handle, blocks_for_truncate(inode)); 211 down_write(&EXT4_I(inode)->i_data_sem); 212 ext4_discard_preallocations(inode); 213 214 return ret; 215 } 216 217 /* 218 * Called at the last iput() if i_nlink is zero. 219 */ 220 void ext4_delete_inode(struct inode *inode) 221 { 222 handle_t *handle; 223 int err; 224 225 if (ext4_should_order_data(inode)) 226 ext4_begin_ordered_truncate(inode, 0); 227 truncate_inode_pages(&inode->i_data, 0); 228 229 if (is_bad_inode(inode)) 230 goto no_delete; 231 232 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); 233 if (IS_ERR(handle)) { 234 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 235 /* 236 * If we're going to skip the normal cleanup, we still need to 237 * make sure that the in-core orphan linked list is properly 238 * cleaned up. 239 */ 240 ext4_orphan_del(NULL, inode); 241 goto no_delete; 242 } 243 244 if (IS_SYNC(inode)) 245 ext4_handle_sync(handle); 246 inode->i_size = 0; 247 err = ext4_mark_inode_dirty(handle, inode); 248 if (err) { 249 ext4_warning(inode->i_sb, __func__, 250 "couldn't mark inode dirty (err %d)", err); 251 goto stop_handle; 252 } 253 if (inode->i_blocks) 254 ext4_truncate(inode); 255 256 /* 257 * ext4_ext_truncate() doesn't reserve any slop when it 258 * restarts journal transactions; therefore there may not be 259 * enough credits left in the handle to remove the inode from 260 * the orphan list and set the dtime field. 261 */ 262 if (!ext4_handle_has_enough_credits(handle, 3)) { 263 err = ext4_journal_extend(handle, 3); 264 if (err > 0) 265 err = ext4_journal_restart(handle, 3); 266 if (err != 0) { 267 ext4_warning(inode->i_sb, __func__, 268 "couldn't extend journal (err %d)", err); 269 stop_handle: 270 ext4_journal_stop(handle); 271 goto no_delete; 272 } 273 } 274 275 /* 276 * Kill off the orphan record which ext4_truncate created. 277 * AKPM: I think this can be inside the above `if'. 278 * Note that ext4_orphan_del() has to be able to cope with the 279 * deletion of a non-existent orphan - this is because we don't 280 * know if ext4_truncate() actually created an orphan record. 281 * (Well, we could do this if we need to, but heck - it works) 282 */ 283 ext4_orphan_del(handle, inode); 284 EXT4_I(inode)->i_dtime = get_seconds(); 285 286 /* 287 * One subtle ordering requirement: if anything has gone wrong 288 * (transaction abort, IO errors, whatever), then we can still 289 * do these next steps (the fs will already have been marked as 290 * having errors), but we can't free the inode if the mark_dirty 291 * fails. 292 */ 293 if (ext4_mark_inode_dirty(handle, inode)) 294 /* If that failed, just do the required in-core inode clear. */ 295 clear_inode(inode); 296 else 297 ext4_free_inode(handle, inode); 298 ext4_journal_stop(handle); 299 return; 300 no_delete: 301 clear_inode(inode); /* We must guarantee clearing of inode... */ 302 } 303 304 typedef struct { 305 __le32 *p; 306 __le32 key; 307 struct buffer_head *bh; 308 } Indirect; 309 310 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 311 { 312 p->key = *(p->p = v); 313 p->bh = bh; 314 } 315 316 /** 317 * ext4_block_to_path - parse the block number into array of offsets 318 * @inode: inode in question (we are only interested in its superblock) 319 * @i_block: block number to be parsed 320 * @offsets: array to store the offsets in 321 * @boundary: set this non-zero if the referred-to block is likely to be 322 * followed (on disk) by an indirect block. 323 * 324 * To store the locations of file's data ext4 uses a data structure common 325 * for UNIX filesystems - tree of pointers anchored in the inode, with 326 * data blocks at leaves and indirect blocks in intermediate nodes. 327 * This function translates the block number into path in that tree - 328 * return value is the path length and @offsets[n] is the offset of 329 * pointer to (n+1)th node in the nth one. If @block is out of range 330 * (negative or too large) warning is printed and zero returned. 331 * 332 * Note: function doesn't find node addresses, so no IO is needed. All 333 * we need to know is the capacity of indirect blocks (taken from the 334 * inode->i_sb). 335 */ 336 337 /* 338 * Portability note: the last comparison (check that we fit into triple 339 * indirect block) is spelled differently, because otherwise on an 340 * architecture with 32-bit longs and 8Kb pages we might get into trouble 341 * if our filesystem had 8Kb blocks. We might use long long, but that would 342 * kill us on x86. Oh, well, at least the sign propagation does not matter - 343 * i_block would have to be negative in the very beginning, so we would not 344 * get there at all. 345 */ 346 347 static int ext4_block_to_path(struct inode *inode, 348 ext4_lblk_t i_block, 349 ext4_lblk_t offsets[4], int *boundary) 350 { 351 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 352 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 353 const long direct_blocks = EXT4_NDIR_BLOCKS, 354 indirect_blocks = ptrs, 355 double_blocks = (1 << (ptrs_bits * 2)); 356 int n = 0; 357 int final = 0; 358 359 if (i_block < direct_blocks) { 360 offsets[n++] = i_block; 361 final = direct_blocks; 362 } else if ((i_block -= direct_blocks) < indirect_blocks) { 363 offsets[n++] = EXT4_IND_BLOCK; 364 offsets[n++] = i_block; 365 final = ptrs; 366 } else if ((i_block -= indirect_blocks) < double_blocks) { 367 offsets[n++] = EXT4_DIND_BLOCK; 368 offsets[n++] = i_block >> ptrs_bits; 369 offsets[n++] = i_block & (ptrs - 1); 370 final = ptrs; 371 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 372 offsets[n++] = EXT4_TIND_BLOCK; 373 offsets[n++] = i_block >> (ptrs_bits * 2); 374 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 375 offsets[n++] = i_block & (ptrs - 1); 376 final = ptrs; 377 } else { 378 ext4_warning(inode->i_sb, "ext4_block_to_path", 379 "block %lu > max in inode %lu", 380 i_block + direct_blocks + 381 indirect_blocks + double_blocks, inode->i_ino); 382 } 383 if (boundary) 384 *boundary = final - 1 - (i_block & (ptrs - 1)); 385 return n; 386 } 387 388 static int __ext4_check_blockref(const char *function, struct inode *inode, 389 __le32 *p, unsigned int max) 390 { 391 __le32 *bref = p; 392 unsigned int blk; 393 394 while (bref < p+max) { 395 blk = le32_to_cpu(*bref++); 396 if (blk && 397 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), 398 blk, 1))) { 399 ext4_error(inode->i_sb, function, 400 "invalid block reference %u " 401 "in inode #%lu", blk, inode->i_ino); 402 return -EIO; 403 } 404 } 405 return 0; 406 } 407 408 409 #define ext4_check_indirect_blockref(inode, bh) \ 410 __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \ 411 EXT4_ADDR_PER_BLOCK((inode)->i_sb)) 412 413 #define ext4_check_inode_blockref(inode) \ 414 __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \ 415 EXT4_NDIR_BLOCKS) 416 417 /** 418 * ext4_get_branch - read the chain of indirect blocks leading to data 419 * @inode: inode in question 420 * @depth: depth of the chain (1 - direct pointer, etc.) 421 * @offsets: offsets of pointers in inode/indirect blocks 422 * @chain: place to store the result 423 * @err: here we store the error value 424 * 425 * Function fills the array of triples <key, p, bh> and returns %NULL 426 * if everything went OK or the pointer to the last filled triple 427 * (incomplete one) otherwise. Upon the return chain[i].key contains 428 * the number of (i+1)-th block in the chain (as it is stored in memory, 429 * i.e. little-endian 32-bit), chain[i].p contains the address of that 430 * number (it points into struct inode for i==0 and into the bh->b_data 431 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 432 * block for i>0 and NULL for i==0. In other words, it holds the block 433 * numbers of the chain, addresses they were taken from (and where we can 434 * verify that chain did not change) and buffer_heads hosting these 435 * numbers. 436 * 437 * Function stops when it stumbles upon zero pointer (absent block) 438 * (pointer to last triple returned, *@err == 0) 439 * or when it gets an IO error reading an indirect block 440 * (ditto, *@err == -EIO) 441 * or when it reads all @depth-1 indirect blocks successfully and finds 442 * the whole chain, all way to the data (returns %NULL, *err == 0). 443 * 444 * Need to be called with 445 * down_read(&EXT4_I(inode)->i_data_sem) 446 */ 447 static Indirect *ext4_get_branch(struct inode *inode, int depth, 448 ext4_lblk_t *offsets, 449 Indirect chain[4], int *err) 450 { 451 struct super_block *sb = inode->i_sb; 452 Indirect *p = chain; 453 struct buffer_head *bh; 454 455 *err = 0; 456 /* i_data is not going away, no lock needed */ 457 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 458 if (!p->key) 459 goto no_block; 460 while (--depth) { 461 bh = sb_getblk(sb, le32_to_cpu(p->key)); 462 if (unlikely(!bh)) 463 goto failure; 464 465 if (!bh_uptodate_or_lock(bh)) { 466 if (bh_submit_read(bh) < 0) { 467 put_bh(bh); 468 goto failure; 469 } 470 /* validate block references */ 471 if (ext4_check_indirect_blockref(inode, bh)) { 472 put_bh(bh); 473 goto failure; 474 } 475 } 476 477 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 478 /* Reader: end */ 479 if (!p->key) 480 goto no_block; 481 } 482 return NULL; 483 484 failure: 485 *err = -EIO; 486 no_block: 487 return p; 488 } 489 490 /** 491 * ext4_find_near - find a place for allocation with sufficient locality 492 * @inode: owner 493 * @ind: descriptor of indirect block. 494 * 495 * This function returns the preferred place for block allocation. 496 * It is used when heuristic for sequential allocation fails. 497 * Rules are: 498 * + if there is a block to the left of our position - allocate near it. 499 * + if pointer will live in indirect block - allocate near that block. 500 * + if pointer will live in inode - allocate in the same 501 * cylinder group. 502 * 503 * In the latter case we colour the starting block by the callers PID to 504 * prevent it from clashing with concurrent allocations for a different inode 505 * in the same block group. The PID is used here so that functionally related 506 * files will be close-by on-disk. 507 * 508 * Caller must make sure that @ind is valid and will stay that way. 509 */ 510 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 511 { 512 struct ext4_inode_info *ei = EXT4_I(inode); 513 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 514 __le32 *p; 515 ext4_fsblk_t bg_start; 516 ext4_fsblk_t last_block; 517 ext4_grpblk_t colour; 518 ext4_group_t block_group; 519 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 520 521 /* Try to find previous block */ 522 for (p = ind->p - 1; p >= start; p--) { 523 if (*p) 524 return le32_to_cpu(*p); 525 } 526 527 /* No such thing, so let's try location of indirect block */ 528 if (ind->bh) 529 return ind->bh->b_blocknr; 530 531 /* 532 * It is going to be referred to from the inode itself? OK, just put it 533 * into the same cylinder group then. 534 */ 535 block_group = ei->i_block_group; 536 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 537 block_group &= ~(flex_size-1); 538 if (S_ISREG(inode->i_mode)) 539 block_group++; 540 } 541 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 542 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 543 544 /* 545 * If we are doing delayed allocation, we don't need take 546 * colour into account. 547 */ 548 if (test_opt(inode->i_sb, DELALLOC)) 549 return bg_start; 550 551 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 552 colour = (current->pid % 16) * 553 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 554 else 555 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 556 return bg_start + colour; 557 } 558 559 /** 560 * ext4_find_goal - find a preferred place for allocation. 561 * @inode: owner 562 * @block: block we want 563 * @partial: pointer to the last triple within a chain 564 * 565 * Normally this function find the preferred place for block allocation, 566 * returns it. 567 * Because this is only used for non-extent files, we limit the block nr 568 * to 32 bits. 569 */ 570 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 571 Indirect *partial) 572 { 573 ext4_fsblk_t goal; 574 575 /* 576 * XXX need to get goal block from mballoc's data structures 577 */ 578 579 goal = ext4_find_near(inode, partial); 580 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 581 return goal; 582 } 583 584 /** 585 * ext4_blks_to_allocate: Look up the block map and count the number 586 * of direct blocks need to be allocated for the given branch. 587 * 588 * @branch: chain of indirect blocks 589 * @k: number of blocks need for indirect blocks 590 * @blks: number of data blocks to be mapped. 591 * @blocks_to_boundary: the offset in the indirect block 592 * 593 * return the total number of blocks to be allocate, including the 594 * direct and indirect blocks. 595 */ 596 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 597 int blocks_to_boundary) 598 { 599 unsigned int count = 0; 600 601 /* 602 * Simple case, [t,d]Indirect block(s) has not allocated yet 603 * then it's clear blocks on that path have not allocated 604 */ 605 if (k > 0) { 606 /* right now we don't handle cross boundary allocation */ 607 if (blks < blocks_to_boundary + 1) 608 count += blks; 609 else 610 count += blocks_to_boundary + 1; 611 return count; 612 } 613 614 count++; 615 while (count < blks && count <= blocks_to_boundary && 616 le32_to_cpu(*(branch[0].p + count)) == 0) { 617 count++; 618 } 619 return count; 620 } 621 622 /** 623 * ext4_alloc_blocks: multiple allocate blocks needed for a branch 624 * @indirect_blks: the number of blocks need to allocate for indirect 625 * blocks 626 * 627 * @new_blocks: on return it will store the new block numbers for 628 * the indirect blocks(if needed) and the first direct block, 629 * @blks: on return it will store the total number of allocated 630 * direct blocks 631 */ 632 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 633 ext4_lblk_t iblock, ext4_fsblk_t goal, 634 int indirect_blks, int blks, 635 ext4_fsblk_t new_blocks[4], int *err) 636 { 637 struct ext4_allocation_request ar; 638 int target, i; 639 unsigned long count = 0, blk_allocated = 0; 640 int index = 0; 641 ext4_fsblk_t current_block = 0; 642 int ret = 0; 643 644 /* 645 * Here we try to allocate the requested multiple blocks at once, 646 * on a best-effort basis. 647 * To build a branch, we should allocate blocks for 648 * the indirect blocks(if not allocated yet), and at least 649 * the first direct block of this branch. That's the 650 * minimum number of blocks need to allocate(required) 651 */ 652 /* first we try to allocate the indirect blocks */ 653 target = indirect_blks; 654 while (target > 0) { 655 count = target; 656 /* allocating blocks for indirect blocks and direct blocks */ 657 current_block = ext4_new_meta_blocks(handle, inode, 658 goal, &count, err); 659 if (*err) 660 goto failed_out; 661 662 BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS); 663 664 target -= count; 665 /* allocate blocks for indirect blocks */ 666 while (index < indirect_blks && count) { 667 new_blocks[index++] = current_block++; 668 count--; 669 } 670 if (count > 0) { 671 /* 672 * save the new block number 673 * for the first direct block 674 */ 675 new_blocks[index] = current_block; 676 printk(KERN_INFO "%s returned more blocks than " 677 "requested\n", __func__); 678 WARN_ON(1); 679 break; 680 } 681 } 682 683 target = blks - count ; 684 blk_allocated = count; 685 if (!target) 686 goto allocated; 687 /* Now allocate data blocks */ 688 memset(&ar, 0, sizeof(ar)); 689 ar.inode = inode; 690 ar.goal = goal; 691 ar.len = target; 692 ar.logical = iblock; 693 if (S_ISREG(inode->i_mode)) 694 /* enable in-core preallocation only for regular files */ 695 ar.flags = EXT4_MB_HINT_DATA; 696 697 current_block = ext4_mb_new_blocks(handle, &ar, err); 698 BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS); 699 700 if (*err && (target == blks)) { 701 /* 702 * if the allocation failed and we didn't allocate 703 * any blocks before 704 */ 705 goto failed_out; 706 } 707 if (!*err) { 708 if (target == blks) { 709 /* 710 * save the new block number 711 * for the first direct block 712 */ 713 new_blocks[index] = current_block; 714 } 715 blk_allocated += ar.len; 716 } 717 allocated: 718 /* total number of blocks allocated for direct blocks */ 719 ret = blk_allocated; 720 *err = 0; 721 return ret; 722 failed_out: 723 for (i = 0; i < index; i++) 724 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 725 return ret; 726 } 727 728 /** 729 * ext4_alloc_branch - allocate and set up a chain of blocks. 730 * @inode: owner 731 * @indirect_blks: number of allocated indirect blocks 732 * @blks: number of allocated direct blocks 733 * @offsets: offsets (in the blocks) to store the pointers to next. 734 * @branch: place to store the chain in. 735 * 736 * This function allocates blocks, zeroes out all but the last one, 737 * links them into chain and (if we are synchronous) writes them to disk. 738 * In other words, it prepares a branch that can be spliced onto the 739 * inode. It stores the information about that chain in the branch[], in 740 * the same format as ext4_get_branch() would do. We are calling it after 741 * we had read the existing part of chain and partial points to the last 742 * triple of that (one with zero ->key). Upon the exit we have the same 743 * picture as after the successful ext4_get_block(), except that in one 744 * place chain is disconnected - *branch->p is still zero (we did not 745 * set the last link), but branch->key contains the number that should 746 * be placed into *branch->p to fill that gap. 747 * 748 * If allocation fails we free all blocks we've allocated (and forget 749 * their buffer_heads) and return the error value the from failed 750 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 751 * as described above and return 0. 752 */ 753 static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 754 ext4_lblk_t iblock, int indirect_blks, 755 int *blks, ext4_fsblk_t goal, 756 ext4_lblk_t *offsets, Indirect *branch) 757 { 758 int blocksize = inode->i_sb->s_blocksize; 759 int i, n = 0; 760 int err = 0; 761 struct buffer_head *bh; 762 int num; 763 ext4_fsblk_t new_blocks[4]; 764 ext4_fsblk_t current_block; 765 766 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, 767 *blks, new_blocks, &err); 768 if (err) 769 return err; 770 771 branch[0].key = cpu_to_le32(new_blocks[0]); 772 /* 773 * metadata blocks and data blocks are allocated. 774 */ 775 for (n = 1; n <= indirect_blks; n++) { 776 /* 777 * Get buffer_head for parent block, zero it out 778 * and set the pointer to new one, then send 779 * parent to disk. 780 */ 781 bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 782 branch[n].bh = bh; 783 lock_buffer(bh); 784 BUFFER_TRACE(bh, "call get_create_access"); 785 err = ext4_journal_get_create_access(handle, bh); 786 if (err) { 787 /* Don't brelse(bh) here; it's done in 788 * ext4_journal_forget() below */ 789 unlock_buffer(bh); 790 goto failed; 791 } 792 793 memset(bh->b_data, 0, blocksize); 794 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 795 branch[n].key = cpu_to_le32(new_blocks[n]); 796 *branch[n].p = branch[n].key; 797 if (n == indirect_blks) { 798 current_block = new_blocks[n]; 799 /* 800 * End of chain, update the last new metablock of 801 * the chain to point to the new allocated 802 * data blocks numbers 803 */ 804 for (i = 1; i < num; i++) 805 *(branch[n].p + i) = cpu_to_le32(++current_block); 806 } 807 BUFFER_TRACE(bh, "marking uptodate"); 808 set_buffer_uptodate(bh); 809 unlock_buffer(bh); 810 811 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 812 err = ext4_handle_dirty_metadata(handle, inode, bh); 813 if (err) 814 goto failed; 815 } 816 *blks = num; 817 return err; 818 failed: 819 /* Allocation failed, free what we already allocated */ 820 for (i = 1; i <= n ; i++) { 821 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget"); 822 ext4_journal_forget(handle, branch[i].bh); 823 } 824 for (i = 0; i < indirect_blks; i++) 825 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 826 827 ext4_free_blocks(handle, inode, new_blocks[i], num, 0); 828 829 return err; 830 } 831 832 /** 833 * ext4_splice_branch - splice the allocated branch onto inode. 834 * @inode: owner 835 * @block: (logical) number of block we are adding 836 * @chain: chain of indirect blocks (with a missing link - see 837 * ext4_alloc_branch) 838 * @where: location of missing link 839 * @num: number of indirect blocks we are adding 840 * @blks: number of direct blocks we are adding 841 * 842 * This function fills the missing link and does all housekeeping needed in 843 * inode (->i_blocks, etc.). In case of success we end up with the full 844 * chain to new block and return 0. 845 */ 846 static int ext4_splice_branch(handle_t *handle, struct inode *inode, 847 ext4_lblk_t block, Indirect *where, int num, 848 int blks) 849 { 850 int i; 851 int err = 0; 852 ext4_fsblk_t current_block; 853 854 /* 855 * If we're splicing into a [td]indirect block (as opposed to the 856 * inode) then we need to get write access to the [td]indirect block 857 * before the splice. 858 */ 859 if (where->bh) { 860 BUFFER_TRACE(where->bh, "get_write_access"); 861 err = ext4_journal_get_write_access(handle, where->bh); 862 if (err) 863 goto err_out; 864 } 865 /* That's it */ 866 867 *where->p = where->key; 868 869 /* 870 * Update the host buffer_head or inode to point to more just allocated 871 * direct blocks blocks 872 */ 873 if (num == 0 && blks > 1) { 874 current_block = le32_to_cpu(where->key) + 1; 875 for (i = 1; i < blks; i++) 876 *(where->p + i) = cpu_to_le32(current_block++); 877 } 878 879 /* We are done with atomic stuff, now do the rest of housekeeping */ 880 /* had we spliced it onto indirect block? */ 881 if (where->bh) { 882 /* 883 * If we spliced it onto an indirect block, we haven't 884 * altered the inode. Note however that if it is being spliced 885 * onto an indirect block at the very end of the file (the 886 * file is growing) then we *will* alter the inode to reflect 887 * the new i_size. But that is not done here - it is done in 888 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 889 */ 890 jbd_debug(5, "splicing indirect only\n"); 891 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 892 err = ext4_handle_dirty_metadata(handle, inode, where->bh); 893 if (err) 894 goto err_out; 895 } else { 896 /* 897 * OK, we spliced it into the inode itself on a direct block. 898 */ 899 ext4_mark_inode_dirty(handle, inode); 900 jbd_debug(5, "splicing direct\n"); 901 } 902 return err; 903 904 err_out: 905 for (i = 1; i <= num; i++) { 906 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget"); 907 ext4_journal_forget(handle, where[i].bh); 908 ext4_free_blocks(handle, inode, 909 le32_to_cpu(where[i-1].key), 1, 0); 910 } 911 ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0); 912 913 return err; 914 } 915 916 /* 917 * The ext4_ind_get_blocks() function handles non-extents inodes 918 * (i.e., using the traditional indirect/double-indirect i_blocks 919 * scheme) for ext4_get_blocks(). 920 * 921 * Allocation strategy is simple: if we have to allocate something, we will 922 * have to go the whole way to leaf. So let's do it before attaching anything 923 * to tree, set linkage between the newborn blocks, write them if sync is 924 * required, recheck the path, free and repeat if check fails, otherwise 925 * set the last missing link (that will protect us from any truncate-generated 926 * removals - all blocks on the path are immune now) and possibly force the 927 * write on the parent block. 928 * That has a nice additional property: no special recovery from the failed 929 * allocations is needed - we simply release blocks and do not touch anything 930 * reachable from inode. 931 * 932 * `handle' can be NULL if create == 0. 933 * 934 * return > 0, # of blocks mapped or allocated. 935 * return = 0, if plain lookup failed. 936 * return < 0, error case. 937 * 938 * The ext4_ind_get_blocks() function should be called with 939 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem 940 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or 941 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system 942 * blocks. 943 */ 944 static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, 945 ext4_lblk_t iblock, unsigned int maxblocks, 946 struct buffer_head *bh_result, 947 int flags) 948 { 949 int err = -EIO; 950 ext4_lblk_t offsets[4]; 951 Indirect chain[4]; 952 Indirect *partial; 953 ext4_fsblk_t goal; 954 int indirect_blks; 955 int blocks_to_boundary = 0; 956 int depth; 957 int count = 0; 958 ext4_fsblk_t first_block = 0; 959 960 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); 961 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 962 depth = ext4_block_to_path(inode, iblock, offsets, 963 &blocks_to_boundary); 964 965 if (depth == 0) 966 goto out; 967 968 partial = ext4_get_branch(inode, depth, offsets, chain, &err); 969 970 /* Simplest case - block found, no allocation needed */ 971 if (!partial) { 972 first_block = le32_to_cpu(chain[depth - 1].key); 973 clear_buffer_new(bh_result); 974 count++; 975 /*map more blocks*/ 976 while (count < maxblocks && count <= blocks_to_boundary) { 977 ext4_fsblk_t blk; 978 979 blk = le32_to_cpu(*(chain[depth-1].p + count)); 980 981 if (blk == first_block + count) 982 count++; 983 else 984 break; 985 } 986 goto got_it; 987 } 988 989 /* Next simple case - plain lookup or failed read of indirect block */ 990 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) 991 goto cleanup; 992 993 /* 994 * Okay, we need to do block allocation. 995 */ 996 goal = ext4_find_goal(inode, iblock, partial); 997 998 /* the number of blocks need to allocate for [d,t]indirect blocks */ 999 indirect_blks = (chain + depth) - partial - 1; 1000 1001 /* 1002 * Next look up the indirect map to count the totoal number of 1003 * direct blocks to allocate for this branch. 1004 */ 1005 count = ext4_blks_to_allocate(partial, indirect_blks, 1006 maxblocks, blocks_to_boundary); 1007 /* 1008 * Block out ext4_truncate while we alter the tree 1009 */ 1010 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, 1011 &count, goal, 1012 offsets + (partial - chain), partial); 1013 1014 /* 1015 * The ext4_splice_branch call will free and forget any buffers 1016 * on the new chain if there is a failure, but that risks using 1017 * up transaction credits, especially for bitmaps where the 1018 * credits cannot be returned. Can we handle this somehow? We 1019 * may need to return -EAGAIN upwards in the worst case. --sct 1020 */ 1021 if (!err) 1022 err = ext4_splice_branch(handle, inode, iblock, 1023 partial, indirect_blks, count); 1024 else 1025 goto cleanup; 1026 1027 set_buffer_new(bh_result); 1028 got_it: 1029 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 1030 if (count > blocks_to_boundary) 1031 set_buffer_boundary(bh_result); 1032 err = count; 1033 /* Clean up and exit */ 1034 partial = chain + depth - 1; /* the whole chain */ 1035 cleanup: 1036 while (partial > chain) { 1037 BUFFER_TRACE(partial->bh, "call brelse"); 1038 brelse(partial->bh); 1039 partial--; 1040 } 1041 BUFFER_TRACE(bh_result, "returned"); 1042 out: 1043 return err; 1044 } 1045 1046 qsize_t ext4_get_reserved_space(struct inode *inode) 1047 { 1048 unsigned long long total; 1049 1050 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1051 total = EXT4_I(inode)->i_reserved_data_blocks + 1052 EXT4_I(inode)->i_reserved_meta_blocks; 1053 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1054 1055 return total; 1056 } 1057 /* 1058 * Calculate the number of metadata blocks need to reserve 1059 * to allocate @blocks for non extent file based file 1060 */ 1061 static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks) 1062 { 1063 int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1064 int ind_blks, dind_blks, tind_blks; 1065 1066 /* number of new indirect blocks needed */ 1067 ind_blks = (blocks + icap - 1) / icap; 1068 1069 dind_blks = (ind_blks + icap - 1) / icap; 1070 1071 tind_blks = 1; 1072 1073 return ind_blks + dind_blks + tind_blks; 1074 } 1075 1076 /* 1077 * Calculate the number of metadata blocks need to reserve 1078 * to allocate given number of blocks 1079 */ 1080 static int ext4_calc_metadata_amount(struct inode *inode, int blocks) 1081 { 1082 if (!blocks) 1083 return 0; 1084 1085 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 1086 return ext4_ext_calc_metadata_amount(inode, blocks); 1087 1088 return ext4_indirect_calc_metadata_amount(inode, blocks); 1089 } 1090 1091 static void ext4_da_update_reserve_space(struct inode *inode, int used) 1092 { 1093 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1094 int total, mdb, mdb_free; 1095 1096 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1097 /* recalculate the number of metablocks still need to be reserved */ 1098 total = EXT4_I(inode)->i_reserved_data_blocks - used; 1099 mdb = ext4_calc_metadata_amount(inode, total); 1100 1101 /* figure out how many metablocks to release */ 1102 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1103 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1104 1105 if (mdb_free) { 1106 /* Account for allocated meta_blocks */ 1107 mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; 1108 1109 /* update fs dirty blocks counter */ 1110 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); 1111 EXT4_I(inode)->i_allocated_meta_blocks = 0; 1112 EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1113 } 1114 1115 /* update per-inode reservations */ 1116 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); 1117 EXT4_I(inode)->i_reserved_data_blocks -= used; 1118 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1119 1120 /* 1121 * free those over-booking quota for metadata blocks 1122 */ 1123 if (mdb_free) 1124 vfs_dq_release_reservation_block(inode, mdb_free); 1125 1126 /* 1127 * If we have done all the pending block allocations and if 1128 * there aren't any writers on the inode, we can discard the 1129 * inode's preallocations. 1130 */ 1131 if (!total && (atomic_read(&inode->i_writecount) == 0)) 1132 ext4_discard_preallocations(inode); 1133 } 1134 1135 static int check_block_validity(struct inode *inode, const char *msg, 1136 sector_t logical, sector_t phys, int len) 1137 { 1138 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { 1139 ext4_error(inode->i_sb, msg, 1140 "inode #%lu logical block %llu mapped to %llu " 1141 "(size %d)", inode->i_ino, 1142 (unsigned long long) logical, 1143 (unsigned long long) phys, len); 1144 return -EIO; 1145 } 1146 return 0; 1147 } 1148 1149 /* 1150 * Return the number of contiguous dirty pages in a given inode 1151 * starting at page frame idx. 1152 */ 1153 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 1154 unsigned int max_pages) 1155 { 1156 struct address_space *mapping = inode->i_mapping; 1157 pgoff_t index; 1158 struct pagevec pvec; 1159 pgoff_t num = 0; 1160 int i, nr_pages, done = 0; 1161 1162 if (max_pages == 0) 1163 return 0; 1164 pagevec_init(&pvec, 0); 1165 while (!done) { 1166 index = idx; 1167 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 1168 PAGECACHE_TAG_DIRTY, 1169 (pgoff_t)PAGEVEC_SIZE); 1170 if (nr_pages == 0) 1171 break; 1172 for (i = 0; i < nr_pages; i++) { 1173 struct page *page = pvec.pages[i]; 1174 struct buffer_head *bh, *head; 1175 1176 lock_page(page); 1177 if (unlikely(page->mapping != mapping) || 1178 !PageDirty(page) || 1179 PageWriteback(page) || 1180 page->index != idx) { 1181 done = 1; 1182 unlock_page(page); 1183 break; 1184 } 1185 if (page_has_buffers(page)) { 1186 bh = head = page_buffers(page); 1187 do { 1188 if (!buffer_delay(bh) && 1189 !buffer_unwritten(bh)) 1190 done = 1; 1191 bh = bh->b_this_page; 1192 } while (!done && (bh != head)); 1193 } 1194 unlock_page(page); 1195 if (done) 1196 break; 1197 idx++; 1198 num++; 1199 if (num >= max_pages) 1200 break; 1201 } 1202 pagevec_release(&pvec); 1203 } 1204 return num; 1205 } 1206 1207 /* 1208 * The ext4_get_blocks() function tries to look up the requested blocks, 1209 * and returns if the blocks are already mapped. 1210 * 1211 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1212 * and store the allocated blocks in the result buffer head and mark it 1213 * mapped. 1214 * 1215 * If file type is extents based, it will call ext4_ext_get_blocks(), 1216 * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping 1217 * based files 1218 * 1219 * On success, it returns the number of blocks being mapped or allocate. 1220 * if create==0 and the blocks are pre-allocated and uninitialized block, 1221 * the result buffer head is unmapped. If the create ==1, it will make sure 1222 * the buffer head is mapped. 1223 * 1224 * It returns 0 if plain look up failed (blocks have not been allocated), in 1225 * that casem, buffer head is unmapped 1226 * 1227 * It returns the error in case of allocation failure. 1228 */ 1229 int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, 1230 unsigned int max_blocks, struct buffer_head *bh, 1231 int flags) 1232 { 1233 int retval; 1234 1235 clear_buffer_mapped(bh); 1236 clear_buffer_unwritten(bh); 1237 1238 ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u," 1239 "logical block %lu\n", inode->i_ino, flags, max_blocks, 1240 (unsigned long)block); 1241 /* 1242 * Try to see if we can get the block without requesting a new 1243 * file system block. 1244 */ 1245 down_read((&EXT4_I(inode)->i_data_sem)); 1246 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1247 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1248 bh, 0); 1249 } else { 1250 retval = ext4_ind_get_blocks(handle, inode, block, max_blocks, 1251 bh, 0); 1252 } 1253 up_read((&EXT4_I(inode)->i_data_sem)); 1254 1255 if (retval > 0 && buffer_mapped(bh)) { 1256 int ret = check_block_validity(inode, "file system corruption", 1257 block, bh->b_blocknr, retval); 1258 if (ret != 0) 1259 return ret; 1260 } 1261 1262 /* If it is only a block(s) look up */ 1263 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 1264 return retval; 1265 1266 /* 1267 * Returns if the blocks have already allocated 1268 * 1269 * Note that if blocks have been preallocated 1270 * ext4_ext_get_block() returns th create = 0 1271 * with buffer head unmapped. 1272 */ 1273 if (retval > 0 && buffer_mapped(bh)) 1274 return retval; 1275 1276 /* 1277 * When we call get_blocks without the create flag, the 1278 * BH_Unwritten flag could have gotten set if the blocks 1279 * requested were part of a uninitialized extent. We need to 1280 * clear this flag now that we are committed to convert all or 1281 * part of the uninitialized extent to be an initialized 1282 * extent. This is because we need to avoid the combination 1283 * of BH_Unwritten and BH_Mapped flags being simultaneously 1284 * set on the buffer_head. 1285 */ 1286 clear_buffer_unwritten(bh); 1287 1288 /* 1289 * New blocks allocate and/or writing to uninitialized extent 1290 * will possibly result in updating i_data, so we take 1291 * the write lock of i_data_sem, and call get_blocks() 1292 * with create == 1 flag. 1293 */ 1294 down_write((&EXT4_I(inode)->i_data_sem)); 1295 1296 /* 1297 * if the caller is from delayed allocation writeout path 1298 * we have already reserved fs blocks for allocation 1299 * let the underlying get_block() function know to 1300 * avoid double accounting 1301 */ 1302 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1303 EXT4_I(inode)->i_delalloc_reserved_flag = 1; 1304 /* 1305 * We need to check for EXT4 here because migrate 1306 * could have changed the inode type in between 1307 */ 1308 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1309 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1310 bh, flags); 1311 } else { 1312 retval = ext4_ind_get_blocks(handle, inode, block, 1313 max_blocks, bh, flags); 1314 1315 if (retval > 0 && buffer_new(bh)) { 1316 /* 1317 * We allocated new blocks which will result in 1318 * i_data's format changing. Force the migrate 1319 * to fail by clearing migrate flags 1320 */ 1321 EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; 1322 } 1323 } 1324 1325 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1326 EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1327 1328 /* 1329 * Update reserved blocks/metadata blocks after successful 1330 * block allocation which had been deferred till now. 1331 */ 1332 if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE)) 1333 ext4_da_update_reserve_space(inode, retval); 1334 1335 up_write((&EXT4_I(inode)->i_data_sem)); 1336 if (retval > 0 && buffer_mapped(bh)) { 1337 int ret = check_block_validity(inode, "file system " 1338 "corruption after allocation", 1339 block, bh->b_blocknr, retval); 1340 if (ret != 0) 1341 return ret; 1342 } 1343 return retval; 1344 } 1345 1346 /* Maximum number of blocks we map for direct IO at once. */ 1347 #define DIO_MAX_BLOCKS 4096 1348 1349 int ext4_get_block(struct inode *inode, sector_t iblock, 1350 struct buffer_head *bh_result, int create) 1351 { 1352 handle_t *handle = ext4_journal_current_handle(); 1353 int ret = 0, started = 0; 1354 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1355 int dio_credits; 1356 1357 if (create && !handle) { 1358 /* Direct IO write... */ 1359 if (max_blocks > DIO_MAX_BLOCKS) 1360 max_blocks = DIO_MAX_BLOCKS; 1361 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 1362 handle = ext4_journal_start(inode, dio_credits); 1363 if (IS_ERR(handle)) { 1364 ret = PTR_ERR(handle); 1365 goto out; 1366 } 1367 started = 1; 1368 } 1369 1370 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, 1371 create ? EXT4_GET_BLOCKS_CREATE : 0); 1372 if (ret > 0) { 1373 bh_result->b_size = (ret << inode->i_blkbits); 1374 ret = 0; 1375 } 1376 if (started) 1377 ext4_journal_stop(handle); 1378 out: 1379 return ret; 1380 } 1381 1382 /* 1383 * `handle' can be NULL if create is zero 1384 */ 1385 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1386 ext4_lblk_t block, int create, int *errp) 1387 { 1388 struct buffer_head dummy; 1389 int fatal = 0, err; 1390 int flags = 0; 1391 1392 J_ASSERT(handle != NULL || create == 0); 1393 1394 dummy.b_state = 0; 1395 dummy.b_blocknr = -1000; 1396 buffer_trace_init(&dummy.b_history); 1397 if (create) 1398 flags |= EXT4_GET_BLOCKS_CREATE; 1399 err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags); 1400 /* 1401 * ext4_get_blocks() returns number of blocks mapped. 0 in 1402 * case of a HOLE. 1403 */ 1404 if (err > 0) { 1405 if (err > 1) 1406 WARN_ON(1); 1407 err = 0; 1408 } 1409 *errp = err; 1410 if (!err && buffer_mapped(&dummy)) { 1411 struct buffer_head *bh; 1412 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1413 if (!bh) { 1414 *errp = -EIO; 1415 goto err; 1416 } 1417 if (buffer_new(&dummy)) { 1418 J_ASSERT(create != 0); 1419 J_ASSERT(handle != NULL); 1420 1421 /* 1422 * Now that we do not always journal data, we should 1423 * keep in mind whether this should always journal the 1424 * new buffer as metadata. For now, regular file 1425 * writes use ext4_get_block instead, so it's not a 1426 * problem. 1427 */ 1428 lock_buffer(bh); 1429 BUFFER_TRACE(bh, "call get_create_access"); 1430 fatal = ext4_journal_get_create_access(handle, bh); 1431 if (!fatal && !buffer_uptodate(bh)) { 1432 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1433 set_buffer_uptodate(bh); 1434 } 1435 unlock_buffer(bh); 1436 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1437 err = ext4_handle_dirty_metadata(handle, inode, bh); 1438 if (!fatal) 1439 fatal = err; 1440 } else { 1441 BUFFER_TRACE(bh, "not a new buffer"); 1442 } 1443 if (fatal) { 1444 *errp = fatal; 1445 brelse(bh); 1446 bh = NULL; 1447 } 1448 return bh; 1449 } 1450 err: 1451 return NULL; 1452 } 1453 1454 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1455 ext4_lblk_t block, int create, int *err) 1456 { 1457 struct buffer_head *bh; 1458 1459 bh = ext4_getblk(handle, inode, block, create, err); 1460 if (!bh) 1461 return bh; 1462 if (buffer_uptodate(bh)) 1463 return bh; 1464 ll_rw_block(READ_META, 1, &bh); 1465 wait_on_buffer(bh); 1466 if (buffer_uptodate(bh)) 1467 return bh; 1468 put_bh(bh); 1469 *err = -EIO; 1470 return NULL; 1471 } 1472 1473 static int walk_page_buffers(handle_t *handle, 1474 struct buffer_head *head, 1475 unsigned from, 1476 unsigned to, 1477 int *partial, 1478 int (*fn)(handle_t *handle, 1479 struct buffer_head *bh)) 1480 { 1481 struct buffer_head *bh; 1482 unsigned block_start, block_end; 1483 unsigned blocksize = head->b_size; 1484 int err, ret = 0; 1485 struct buffer_head *next; 1486 1487 for (bh = head, block_start = 0; 1488 ret == 0 && (bh != head || !block_start); 1489 block_start = block_end, bh = next) { 1490 next = bh->b_this_page; 1491 block_end = block_start + blocksize; 1492 if (block_end <= from || block_start >= to) { 1493 if (partial && !buffer_uptodate(bh)) 1494 *partial = 1; 1495 continue; 1496 } 1497 err = (*fn)(handle, bh); 1498 if (!ret) 1499 ret = err; 1500 } 1501 return ret; 1502 } 1503 1504 /* 1505 * To preserve ordering, it is essential that the hole instantiation and 1506 * the data write be encapsulated in a single transaction. We cannot 1507 * close off a transaction and start a new one between the ext4_get_block() 1508 * and the commit_write(). So doing the jbd2_journal_start at the start of 1509 * prepare_write() is the right place. 1510 * 1511 * Also, this function can nest inside ext4_writepage() -> 1512 * block_write_full_page(). In that case, we *know* that ext4_writepage() 1513 * has generated enough buffer credits to do the whole page. So we won't 1514 * block on the journal in that case, which is good, because the caller may 1515 * be PF_MEMALLOC. 1516 * 1517 * By accident, ext4 can be reentered when a transaction is open via 1518 * quota file writes. If we were to commit the transaction while thus 1519 * reentered, there can be a deadlock - we would be holding a quota 1520 * lock, and the commit would never complete if another thread had a 1521 * transaction open and was blocking on the quota lock - a ranking 1522 * violation. 1523 * 1524 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1525 * will _not_ run commit under these circumstances because handle->h_ref 1526 * is elevated. We'll still have enough credits for the tiny quotafile 1527 * write. 1528 */ 1529 static int do_journal_get_write_access(handle_t *handle, 1530 struct buffer_head *bh) 1531 { 1532 if (!buffer_mapped(bh) || buffer_freed(bh)) 1533 return 0; 1534 return ext4_journal_get_write_access(handle, bh); 1535 } 1536 1537 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1538 loff_t pos, unsigned len, unsigned flags, 1539 struct page **pagep, void **fsdata) 1540 { 1541 struct inode *inode = mapping->host; 1542 int ret, needed_blocks; 1543 handle_t *handle; 1544 int retries = 0; 1545 struct page *page; 1546 pgoff_t index; 1547 unsigned from, to; 1548 1549 trace_ext4_write_begin(inode, pos, len, flags); 1550 /* 1551 * Reserve one block more for addition to orphan list in case 1552 * we allocate blocks but write fails for some reason 1553 */ 1554 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1555 index = pos >> PAGE_CACHE_SHIFT; 1556 from = pos & (PAGE_CACHE_SIZE - 1); 1557 to = from + len; 1558 1559 retry: 1560 handle = ext4_journal_start(inode, needed_blocks); 1561 if (IS_ERR(handle)) { 1562 ret = PTR_ERR(handle); 1563 goto out; 1564 } 1565 1566 /* We cannot recurse into the filesystem as the transaction is already 1567 * started */ 1568 flags |= AOP_FLAG_NOFS; 1569 1570 page = grab_cache_page_write_begin(mapping, index, flags); 1571 if (!page) { 1572 ext4_journal_stop(handle); 1573 ret = -ENOMEM; 1574 goto out; 1575 } 1576 *pagep = page; 1577 1578 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1579 ext4_get_block); 1580 1581 if (!ret && ext4_should_journal_data(inode)) { 1582 ret = walk_page_buffers(handle, page_buffers(page), 1583 from, to, NULL, do_journal_get_write_access); 1584 } 1585 1586 if (ret) { 1587 unlock_page(page); 1588 page_cache_release(page); 1589 /* 1590 * block_write_begin may have instantiated a few blocks 1591 * outside i_size. Trim these off again. Don't need 1592 * i_size_read because we hold i_mutex. 1593 * 1594 * Add inode to orphan list in case we crash before 1595 * truncate finishes 1596 */ 1597 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1598 ext4_orphan_add(handle, inode); 1599 1600 ext4_journal_stop(handle); 1601 if (pos + len > inode->i_size) { 1602 ext4_truncate(inode); 1603 /* 1604 * If truncate failed early the inode might 1605 * still be on the orphan list; we need to 1606 * make sure the inode is removed from the 1607 * orphan list in that case. 1608 */ 1609 if (inode->i_nlink) 1610 ext4_orphan_del(NULL, inode); 1611 } 1612 } 1613 1614 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1615 goto retry; 1616 out: 1617 return ret; 1618 } 1619 1620 /* For write_end() in data=journal mode */ 1621 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1622 { 1623 if (!buffer_mapped(bh) || buffer_freed(bh)) 1624 return 0; 1625 set_buffer_uptodate(bh); 1626 return ext4_handle_dirty_metadata(handle, NULL, bh); 1627 } 1628 1629 static int ext4_generic_write_end(struct file *file, 1630 struct address_space *mapping, 1631 loff_t pos, unsigned len, unsigned copied, 1632 struct page *page, void *fsdata) 1633 { 1634 int i_size_changed = 0; 1635 struct inode *inode = mapping->host; 1636 handle_t *handle = ext4_journal_current_handle(); 1637 1638 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1639 1640 /* 1641 * No need to use i_size_read() here, the i_size 1642 * cannot change under us because we hold i_mutex. 1643 * 1644 * But it's important to update i_size while still holding page lock: 1645 * page writeout could otherwise come in and zero beyond i_size. 1646 */ 1647 if (pos + copied > inode->i_size) { 1648 i_size_write(inode, pos + copied); 1649 i_size_changed = 1; 1650 } 1651 1652 if (pos + copied > EXT4_I(inode)->i_disksize) { 1653 /* We need to mark inode dirty even if 1654 * new_i_size is less that inode->i_size 1655 * bu greater than i_disksize.(hint delalloc) 1656 */ 1657 ext4_update_i_disksize(inode, (pos + copied)); 1658 i_size_changed = 1; 1659 } 1660 unlock_page(page); 1661 page_cache_release(page); 1662 1663 /* 1664 * Don't mark the inode dirty under page lock. First, it unnecessarily 1665 * makes the holding time of page lock longer. Second, it forces lock 1666 * ordering of page lock and transaction start for journaling 1667 * filesystems. 1668 */ 1669 if (i_size_changed) 1670 ext4_mark_inode_dirty(handle, inode); 1671 1672 return copied; 1673 } 1674 1675 /* 1676 * We need to pick up the new inode size which generic_commit_write gave us 1677 * `file' can be NULL - eg, when called from page_symlink(). 1678 * 1679 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1680 * buffers are managed internally. 1681 */ 1682 static int ext4_ordered_write_end(struct file *file, 1683 struct address_space *mapping, 1684 loff_t pos, unsigned len, unsigned copied, 1685 struct page *page, void *fsdata) 1686 { 1687 handle_t *handle = ext4_journal_current_handle(); 1688 struct inode *inode = mapping->host; 1689 int ret = 0, ret2; 1690 1691 trace_ext4_ordered_write_end(inode, pos, len, copied); 1692 ret = ext4_jbd2_file_inode(handle, inode); 1693 1694 if (ret == 0) { 1695 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1696 page, fsdata); 1697 copied = ret2; 1698 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1699 /* if we have allocated more blocks and copied 1700 * less. We will have blocks allocated outside 1701 * inode->i_size. So truncate them 1702 */ 1703 ext4_orphan_add(handle, inode); 1704 if (ret2 < 0) 1705 ret = ret2; 1706 } 1707 ret2 = ext4_journal_stop(handle); 1708 if (!ret) 1709 ret = ret2; 1710 1711 if (pos + len > inode->i_size) { 1712 ext4_truncate(inode); 1713 /* 1714 * If truncate failed early the inode might still be 1715 * on the orphan list; we need to make sure the inode 1716 * is removed from the orphan list in that case. 1717 */ 1718 if (inode->i_nlink) 1719 ext4_orphan_del(NULL, inode); 1720 } 1721 1722 1723 return ret ? ret : copied; 1724 } 1725 1726 static int ext4_writeback_write_end(struct file *file, 1727 struct address_space *mapping, 1728 loff_t pos, unsigned len, unsigned copied, 1729 struct page *page, void *fsdata) 1730 { 1731 handle_t *handle = ext4_journal_current_handle(); 1732 struct inode *inode = mapping->host; 1733 int ret = 0, ret2; 1734 1735 trace_ext4_writeback_write_end(inode, pos, len, copied); 1736 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1737 page, fsdata); 1738 copied = ret2; 1739 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1740 /* if we have allocated more blocks and copied 1741 * less. We will have blocks allocated outside 1742 * inode->i_size. So truncate them 1743 */ 1744 ext4_orphan_add(handle, inode); 1745 1746 if (ret2 < 0) 1747 ret = ret2; 1748 1749 ret2 = ext4_journal_stop(handle); 1750 if (!ret) 1751 ret = ret2; 1752 1753 if (pos + len > inode->i_size) { 1754 ext4_truncate(inode); 1755 /* 1756 * If truncate failed early the inode might still be 1757 * on the orphan list; we need to make sure the inode 1758 * is removed from the orphan list in that case. 1759 */ 1760 if (inode->i_nlink) 1761 ext4_orphan_del(NULL, inode); 1762 } 1763 1764 return ret ? ret : copied; 1765 } 1766 1767 static int ext4_journalled_write_end(struct file *file, 1768 struct address_space *mapping, 1769 loff_t pos, unsigned len, unsigned copied, 1770 struct page *page, void *fsdata) 1771 { 1772 handle_t *handle = ext4_journal_current_handle(); 1773 struct inode *inode = mapping->host; 1774 int ret = 0, ret2; 1775 int partial = 0; 1776 unsigned from, to; 1777 loff_t new_i_size; 1778 1779 trace_ext4_journalled_write_end(inode, pos, len, copied); 1780 from = pos & (PAGE_CACHE_SIZE - 1); 1781 to = from + len; 1782 1783 if (copied < len) { 1784 if (!PageUptodate(page)) 1785 copied = 0; 1786 page_zero_new_buffers(page, from+copied, to); 1787 } 1788 1789 ret = walk_page_buffers(handle, page_buffers(page), from, 1790 to, &partial, write_end_fn); 1791 if (!partial) 1792 SetPageUptodate(page); 1793 new_i_size = pos + copied; 1794 if (new_i_size > inode->i_size) 1795 i_size_write(inode, pos+copied); 1796 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 1797 if (new_i_size > EXT4_I(inode)->i_disksize) { 1798 ext4_update_i_disksize(inode, new_i_size); 1799 ret2 = ext4_mark_inode_dirty(handle, inode); 1800 if (!ret) 1801 ret = ret2; 1802 } 1803 1804 unlock_page(page); 1805 page_cache_release(page); 1806 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1807 /* if we have allocated more blocks and copied 1808 * less. We will have blocks allocated outside 1809 * inode->i_size. So truncate them 1810 */ 1811 ext4_orphan_add(handle, inode); 1812 1813 ret2 = ext4_journal_stop(handle); 1814 if (!ret) 1815 ret = ret2; 1816 if (pos + len > inode->i_size) { 1817 ext4_truncate(inode); 1818 /* 1819 * If truncate failed early the inode might still be 1820 * on the orphan list; we need to make sure the inode 1821 * is removed from the orphan list in that case. 1822 */ 1823 if (inode->i_nlink) 1824 ext4_orphan_del(NULL, inode); 1825 } 1826 1827 return ret ? ret : copied; 1828 } 1829 1830 static int ext4_da_reserve_space(struct inode *inode, int nrblocks) 1831 { 1832 int retries = 0; 1833 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1834 unsigned long md_needed, mdblocks, total = 0; 1835 1836 /* 1837 * recalculate the amount of metadata blocks to reserve 1838 * in order to allocate nrblocks 1839 * worse case is one extent per block 1840 */ 1841 repeat: 1842 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1843 total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks; 1844 mdblocks = ext4_calc_metadata_amount(inode, total); 1845 BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks); 1846 1847 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; 1848 total = md_needed + nrblocks; 1849 1850 /* 1851 * Make quota reservation here to prevent quota overflow 1852 * later. Real quota accounting is done at pages writeout 1853 * time. 1854 */ 1855 if (vfs_dq_reserve_block(inode, total)) { 1856 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1857 return -EDQUOT; 1858 } 1859 1860 if (ext4_claim_free_blocks(sbi, total)) { 1861 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1862 vfs_dq_release_reservation_block(inode, total); 1863 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1864 yield(); 1865 goto repeat; 1866 } 1867 return -ENOSPC; 1868 } 1869 EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1870 EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; 1871 1872 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1873 return 0; /* success */ 1874 } 1875 1876 static void ext4_da_release_space(struct inode *inode, int to_free) 1877 { 1878 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1879 int total, mdb, mdb_free, release; 1880 1881 if (!to_free) 1882 return; /* Nothing to release, exit */ 1883 1884 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1885 1886 if (!EXT4_I(inode)->i_reserved_data_blocks) { 1887 /* 1888 * if there is no reserved blocks, but we try to free some 1889 * then the counter is messed up somewhere. 1890 * but since this function is called from invalidate 1891 * page, it's harmless to return without any action 1892 */ 1893 printk(KERN_INFO "ext4 delalloc try to release %d reserved " 1894 "blocks for inode %lu, but there is no reserved " 1895 "data blocks\n", to_free, inode->i_ino); 1896 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1897 return; 1898 } 1899 1900 /* recalculate the number of metablocks still need to be reserved */ 1901 total = EXT4_I(inode)->i_reserved_data_blocks - to_free; 1902 mdb = ext4_calc_metadata_amount(inode, total); 1903 1904 /* figure out how many metablocks to release */ 1905 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1906 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1907 1908 release = to_free + mdb_free; 1909 1910 /* update fs dirty blocks counter for truncate case */ 1911 percpu_counter_sub(&sbi->s_dirtyblocks_counter, release); 1912 1913 /* update per-inode reservations */ 1914 BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks); 1915 EXT4_I(inode)->i_reserved_data_blocks -= to_free; 1916 1917 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1918 EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1919 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1920 1921 vfs_dq_release_reservation_block(inode, release); 1922 } 1923 1924 static void ext4_da_page_release_reservation(struct page *page, 1925 unsigned long offset) 1926 { 1927 int to_release = 0; 1928 struct buffer_head *head, *bh; 1929 unsigned int curr_off = 0; 1930 1931 head = page_buffers(page); 1932 bh = head; 1933 do { 1934 unsigned int next_off = curr_off + bh->b_size; 1935 1936 if ((offset <= curr_off) && (buffer_delay(bh))) { 1937 to_release++; 1938 clear_buffer_delay(bh); 1939 } 1940 curr_off = next_off; 1941 } while ((bh = bh->b_this_page) != head); 1942 ext4_da_release_space(page->mapping->host, to_release); 1943 } 1944 1945 /* 1946 * Delayed allocation stuff 1947 */ 1948 1949 /* 1950 * mpage_da_submit_io - walks through extent of pages and try to write 1951 * them with writepage() call back 1952 * 1953 * @mpd->inode: inode 1954 * @mpd->first_page: first page of the extent 1955 * @mpd->next_page: page after the last page of the extent 1956 * 1957 * By the time mpage_da_submit_io() is called we expect all blocks 1958 * to be allocated. this may be wrong if allocation failed. 1959 * 1960 * As pages are already locked by write_cache_pages(), we can't use it 1961 */ 1962 static int mpage_da_submit_io(struct mpage_da_data *mpd) 1963 { 1964 long pages_skipped; 1965 struct pagevec pvec; 1966 unsigned long index, end; 1967 int ret = 0, err, nr_pages, i; 1968 struct inode *inode = mpd->inode; 1969 struct address_space *mapping = inode->i_mapping; 1970 1971 BUG_ON(mpd->next_page <= mpd->first_page); 1972 /* 1973 * We need to start from the first_page to the next_page - 1 1974 * to make sure we also write the mapped dirty buffer_heads. 1975 * If we look at mpd->b_blocknr we would only be looking 1976 * at the currently mapped buffer_heads. 1977 */ 1978 index = mpd->first_page; 1979 end = mpd->next_page - 1; 1980 1981 pagevec_init(&pvec, 0); 1982 while (index <= end) { 1983 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1984 if (nr_pages == 0) 1985 break; 1986 for (i = 0; i < nr_pages; i++) { 1987 struct page *page = pvec.pages[i]; 1988 1989 index = page->index; 1990 if (index > end) 1991 break; 1992 index++; 1993 1994 BUG_ON(!PageLocked(page)); 1995 BUG_ON(PageWriteback(page)); 1996 1997 pages_skipped = mpd->wbc->pages_skipped; 1998 err = mapping->a_ops->writepage(page, mpd->wbc); 1999 if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 2000 /* 2001 * have successfully written the page 2002 * without skipping the same 2003 */ 2004 mpd->pages_written++; 2005 /* 2006 * In error case, we have to continue because 2007 * remaining pages are still locked 2008 * XXX: unlock and re-dirty them? 2009 */ 2010 if (ret == 0) 2011 ret = err; 2012 } 2013 pagevec_release(&pvec); 2014 } 2015 return ret; 2016 } 2017 2018 /* 2019 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers 2020 * 2021 * @mpd->inode - inode to walk through 2022 * @exbh->b_blocknr - first block on a disk 2023 * @exbh->b_size - amount of space in bytes 2024 * @logical - first logical block to start assignment with 2025 * 2026 * the function goes through all passed space and put actual disk 2027 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten 2028 */ 2029 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, 2030 struct buffer_head *exbh) 2031 { 2032 struct inode *inode = mpd->inode; 2033 struct address_space *mapping = inode->i_mapping; 2034 int blocks = exbh->b_size >> inode->i_blkbits; 2035 sector_t pblock = exbh->b_blocknr, cur_logical; 2036 struct buffer_head *head, *bh; 2037 pgoff_t index, end; 2038 struct pagevec pvec; 2039 int nr_pages, i; 2040 2041 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2042 end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2043 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2044 2045 pagevec_init(&pvec, 0); 2046 2047 while (index <= end) { 2048 /* XXX: optimize tail */ 2049 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2050 if (nr_pages == 0) 2051 break; 2052 for (i = 0; i < nr_pages; i++) { 2053 struct page *page = pvec.pages[i]; 2054 2055 index = page->index; 2056 if (index > end) 2057 break; 2058 index++; 2059 2060 BUG_ON(!PageLocked(page)); 2061 BUG_ON(PageWriteback(page)); 2062 BUG_ON(!page_has_buffers(page)); 2063 2064 bh = page_buffers(page); 2065 head = bh; 2066 2067 /* skip blocks out of the range */ 2068 do { 2069 if (cur_logical >= logical) 2070 break; 2071 cur_logical++; 2072 } while ((bh = bh->b_this_page) != head); 2073 2074 do { 2075 if (cur_logical >= logical + blocks) 2076 break; 2077 2078 if (buffer_delay(bh) || 2079 buffer_unwritten(bh)) { 2080 2081 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev); 2082 2083 if (buffer_delay(bh)) { 2084 clear_buffer_delay(bh); 2085 bh->b_blocknr = pblock; 2086 } else { 2087 /* 2088 * unwritten already should have 2089 * blocknr assigned. Verify that 2090 */ 2091 clear_buffer_unwritten(bh); 2092 BUG_ON(bh->b_blocknr != pblock); 2093 } 2094 2095 } else if (buffer_mapped(bh)) 2096 BUG_ON(bh->b_blocknr != pblock); 2097 2098 cur_logical++; 2099 pblock++; 2100 } while ((bh = bh->b_this_page) != head); 2101 } 2102 pagevec_release(&pvec); 2103 } 2104 } 2105 2106 2107 /* 2108 * __unmap_underlying_blocks - just a helper function to unmap 2109 * set of blocks described by @bh 2110 */ 2111 static inline void __unmap_underlying_blocks(struct inode *inode, 2112 struct buffer_head *bh) 2113 { 2114 struct block_device *bdev = inode->i_sb->s_bdev; 2115 int blocks, i; 2116 2117 blocks = bh->b_size >> inode->i_blkbits; 2118 for (i = 0; i < blocks; i++) 2119 unmap_underlying_metadata(bdev, bh->b_blocknr + i); 2120 } 2121 2122 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 2123 sector_t logical, long blk_cnt) 2124 { 2125 int nr_pages, i; 2126 pgoff_t index, end; 2127 struct pagevec pvec; 2128 struct inode *inode = mpd->inode; 2129 struct address_space *mapping = inode->i_mapping; 2130 2131 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2132 end = (logical + blk_cnt - 1) >> 2133 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2134 while (index <= end) { 2135 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2136 if (nr_pages == 0) 2137 break; 2138 for (i = 0; i < nr_pages; i++) { 2139 struct page *page = pvec.pages[i]; 2140 index = page->index; 2141 if (index > end) 2142 break; 2143 index++; 2144 2145 BUG_ON(!PageLocked(page)); 2146 BUG_ON(PageWriteback(page)); 2147 block_invalidatepage(page, 0); 2148 ClearPageUptodate(page); 2149 unlock_page(page); 2150 } 2151 } 2152 return; 2153 } 2154 2155 static void ext4_print_free_blocks(struct inode *inode) 2156 { 2157 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2158 printk(KERN_CRIT "Total free blocks count %lld\n", 2159 ext4_count_free_blocks(inode->i_sb)); 2160 printk(KERN_CRIT "Free/Dirty block details\n"); 2161 printk(KERN_CRIT "free_blocks=%lld\n", 2162 (long long) percpu_counter_sum(&sbi->s_freeblocks_counter)); 2163 printk(KERN_CRIT "dirty_blocks=%lld\n", 2164 (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 2165 printk(KERN_CRIT "Block reservation details\n"); 2166 printk(KERN_CRIT "i_reserved_data_blocks=%u\n", 2167 EXT4_I(inode)->i_reserved_data_blocks); 2168 printk(KERN_CRIT "i_reserved_meta_blocks=%u\n", 2169 EXT4_I(inode)->i_reserved_meta_blocks); 2170 return; 2171 } 2172 2173 /* 2174 * mpage_da_map_blocks - go through given space 2175 * 2176 * @mpd - bh describing space 2177 * 2178 * The function skips space we know is already mapped to disk blocks. 2179 * 2180 */ 2181 static int mpage_da_map_blocks(struct mpage_da_data *mpd) 2182 { 2183 int err, blks, get_blocks_flags; 2184 struct buffer_head new; 2185 sector_t next = mpd->b_blocknr; 2186 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 2187 loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 2188 handle_t *handle = NULL; 2189 2190 /* 2191 * We consider only non-mapped and non-allocated blocks 2192 */ 2193 if ((mpd->b_state & (1 << BH_Mapped)) && 2194 !(mpd->b_state & (1 << BH_Delay)) && 2195 !(mpd->b_state & (1 << BH_Unwritten))) 2196 return 0; 2197 2198 /* 2199 * If we didn't accumulate anything to write simply return 2200 */ 2201 if (!mpd->b_size) 2202 return 0; 2203 2204 handle = ext4_journal_current_handle(); 2205 BUG_ON(!handle); 2206 2207 /* 2208 * Call ext4_get_blocks() to allocate any delayed allocation 2209 * blocks, or to convert an uninitialized extent to be 2210 * initialized (in the case where we have written into 2211 * one or more preallocated blocks). 2212 * 2213 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 2214 * indicate that we are on the delayed allocation path. This 2215 * affects functions in many different parts of the allocation 2216 * call path. This flag exists primarily because we don't 2217 * want to change *many* call functions, so ext4_get_blocks() 2218 * will set the magic i_delalloc_reserved_flag once the 2219 * inode's allocation semaphore is taken. 2220 * 2221 * If the blocks in questions were delalloc blocks, set 2222 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 2223 * variables are updated after the blocks have been allocated. 2224 */ 2225 new.b_state = 0; 2226 get_blocks_flags = (EXT4_GET_BLOCKS_CREATE | 2227 EXT4_GET_BLOCKS_DELALLOC_RESERVE); 2228 if (mpd->b_state & (1 << BH_Delay)) 2229 get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE; 2230 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, 2231 &new, get_blocks_flags); 2232 if (blks < 0) { 2233 err = blks; 2234 /* 2235 * If get block returns with error we simply 2236 * return. Later writepage will redirty the page and 2237 * writepages will find the dirty page again 2238 */ 2239 if (err == -EAGAIN) 2240 return 0; 2241 2242 if (err == -ENOSPC && 2243 ext4_count_free_blocks(mpd->inode->i_sb)) { 2244 mpd->retval = err; 2245 return 0; 2246 } 2247 2248 /* 2249 * get block failure will cause us to loop in 2250 * writepages, because a_ops->writepage won't be able 2251 * to make progress. The page will be redirtied by 2252 * writepage and writepages will again try to write 2253 * the same. 2254 */ 2255 ext4_msg(mpd->inode->i_sb, KERN_CRIT, 2256 "delayed block allocation failed for inode %lu at " 2257 "logical offset %llu with max blocks %zd with " 2258 "error %d\n", mpd->inode->i_ino, 2259 (unsigned long long) next, 2260 mpd->b_size >> mpd->inode->i_blkbits, err); 2261 printk(KERN_CRIT "This should not happen!! " 2262 "Data will be lost\n"); 2263 if (err == -ENOSPC) { 2264 ext4_print_free_blocks(mpd->inode); 2265 } 2266 /* invalidate all the pages */ 2267 ext4_da_block_invalidatepages(mpd, next, 2268 mpd->b_size >> mpd->inode->i_blkbits); 2269 return err; 2270 } 2271 BUG_ON(blks == 0); 2272 2273 new.b_size = (blks << mpd->inode->i_blkbits); 2274 2275 if (buffer_new(&new)) 2276 __unmap_underlying_blocks(mpd->inode, &new); 2277 2278 /* 2279 * If blocks are delayed marked, we need to 2280 * put actual blocknr and drop delayed bit 2281 */ 2282 if ((mpd->b_state & (1 << BH_Delay)) || 2283 (mpd->b_state & (1 << BH_Unwritten))) 2284 mpage_put_bnr_to_bhs(mpd, next, &new); 2285 2286 if (ext4_should_order_data(mpd->inode)) { 2287 err = ext4_jbd2_file_inode(handle, mpd->inode); 2288 if (err) 2289 return err; 2290 } 2291 2292 /* 2293 * Update on-disk size along with block allocation. 2294 */ 2295 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 2296 if (disksize > i_size_read(mpd->inode)) 2297 disksize = i_size_read(mpd->inode); 2298 if (disksize > EXT4_I(mpd->inode)->i_disksize) { 2299 ext4_update_i_disksize(mpd->inode, disksize); 2300 return ext4_mark_inode_dirty(handle, mpd->inode); 2301 } 2302 2303 return 0; 2304 } 2305 2306 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 2307 (1 << BH_Delay) | (1 << BH_Unwritten)) 2308 2309 /* 2310 * mpage_add_bh_to_extent - try to add one more block to extent of blocks 2311 * 2312 * @mpd->lbh - extent of blocks 2313 * @logical - logical number of the block in the file 2314 * @bh - bh of the block (used to access block's state) 2315 * 2316 * the function is used to collect contig. blocks in same state 2317 */ 2318 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 2319 sector_t logical, size_t b_size, 2320 unsigned long b_state) 2321 { 2322 sector_t next; 2323 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 2324 2325 /* check if thereserved journal credits might overflow */ 2326 if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { 2327 if (nrblocks >= EXT4_MAX_TRANS_DATA) { 2328 /* 2329 * With non-extent format we are limited by the journal 2330 * credit available. Total credit needed to insert 2331 * nrblocks contiguous blocks is dependent on the 2332 * nrblocks. So limit nrblocks. 2333 */ 2334 goto flush_it; 2335 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 2336 EXT4_MAX_TRANS_DATA) { 2337 /* 2338 * Adding the new buffer_head would make it cross the 2339 * allowed limit for which we have journal credit 2340 * reserved. So limit the new bh->b_size 2341 */ 2342 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 2343 mpd->inode->i_blkbits; 2344 /* we will do mpage_da_submit_io in the next loop */ 2345 } 2346 } 2347 /* 2348 * First block in the extent 2349 */ 2350 if (mpd->b_size == 0) { 2351 mpd->b_blocknr = logical; 2352 mpd->b_size = b_size; 2353 mpd->b_state = b_state & BH_FLAGS; 2354 return; 2355 } 2356 2357 next = mpd->b_blocknr + nrblocks; 2358 /* 2359 * Can we merge the block to our big extent? 2360 */ 2361 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 2362 mpd->b_size += b_size; 2363 return; 2364 } 2365 2366 flush_it: 2367 /* 2368 * We couldn't merge the block to our extent, so we 2369 * need to flush current extent and start new one 2370 */ 2371 if (mpage_da_map_blocks(mpd) == 0) 2372 mpage_da_submit_io(mpd); 2373 mpd->io_done = 1; 2374 return; 2375 } 2376 2377 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 2378 { 2379 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 2380 } 2381 2382 /* 2383 * __mpage_da_writepage - finds extent of pages and blocks 2384 * 2385 * @page: page to consider 2386 * @wbc: not used, we just follow rules 2387 * @data: context 2388 * 2389 * The function finds extents of pages and scan them for all blocks. 2390 */ 2391 static int __mpage_da_writepage(struct page *page, 2392 struct writeback_control *wbc, void *data) 2393 { 2394 struct mpage_da_data *mpd = data; 2395 struct inode *inode = mpd->inode; 2396 struct buffer_head *bh, *head; 2397 sector_t logical; 2398 2399 if (mpd->io_done) { 2400 /* 2401 * Rest of the page in the page_vec 2402 * redirty then and skip then. We will 2403 * try to write them again after 2404 * starting a new transaction 2405 */ 2406 redirty_page_for_writepage(wbc, page); 2407 unlock_page(page); 2408 return MPAGE_DA_EXTENT_TAIL; 2409 } 2410 /* 2411 * Can we merge this page to current extent? 2412 */ 2413 if (mpd->next_page != page->index) { 2414 /* 2415 * Nope, we can't. So, we map non-allocated blocks 2416 * and start IO on them using writepage() 2417 */ 2418 if (mpd->next_page != mpd->first_page) { 2419 if (mpage_da_map_blocks(mpd) == 0) 2420 mpage_da_submit_io(mpd); 2421 /* 2422 * skip rest of the page in the page_vec 2423 */ 2424 mpd->io_done = 1; 2425 redirty_page_for_writepage(wbc, page); 2426 unlock_page(page); 2427 return MPAGE_DA_EXTENT_TAIL; 2428 } 2429 2430 /* 2431 * Start next extent of pages ... 2432 */ 2433 mpd->first_page = page->index; 2434 2435 /* 2436 * ... and blocks 2437 */ 2438 mpd->b_size = 0; 2439 mpd->b_state = 0; 2440 mpd->b_blocknr = 0; 2441 } 2442 2443 mpd->next_page = page->index + 1; 2444 logical = (sector_t) page->index << 2445 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2446 2447 if (!page_has_buffers(page)) { 2448 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, 2449 (1 << BH_Dirty) | (1 << BH_Uptodate)); 2450 if (mpd->io_done) 2451 return MPAGE_DA_EXTENT_TAIL; 2452 } else { 2453 /* 2454 * Page with regular buffer heads, just add all dirty ones 2455 */ 2456 head = page_buffers(page); 2457 bh = head; 2458 do { 2459 BUG_ON(buffer_locked(bh)); 2460 /* 2461 * We need to try to allocate 2462 * unmapped blocks in the same page. 2463 * Otherwise we won't make progress 2464 * with the page in ext4_writepage 2465 */ 2466 if (ext4_bh_delay_or_unwritten(NULL, bh)) { 2467 mpage_add_bh_to_extent(mpd, logical, 2468 bh->b_size, 2469 bh->b_state); 2470 if (mpd->io_done) 2471 return MPAGE_DA_EXTENT_TAIL; 2472 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 2473 /* 2474 * mapped dirty buffer. We need to update 2475 * the b_state because we look at 2476 * b_state in mpage_da_map_blocks. We don't 2477 * update b_size because if we find an 2478 * unmapped buffer_head later we need to 2479 * use the b_state flag of that buffer_head. 2480 */ 2481 if (mpd->b_size == 0) 2482 mpd->b_state = bh->b_state & BH_FLAGS; 2483 } 2484 logical++; 2485 } while ((bh = bh->b_this_page) != head); 2486 } 2487 2488 return 0; 2489 } 2490 2491 /* 2492 * This is a special get_blocks_t callback which is used by 2493 * ext4_da_write_begin(). It will either return mapped block or 2494 * reserve space for a single block. 2495 * 2496 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 2497 * We also have b_blocknr = -1 and b_bdev initialized properly 2498 * 2499 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 2500 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 2501 * initialized properly. 2502 */ 2503 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 2504 struct buffer_head *bh_result, int create) 2505 { 2506 int ret = 0; 2507 sector_t invalid_block = ~((sector_t) 0xffff); 2508 2509 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 2510 invalid_block = ~0; 2511 2512 BUG_ON(create == 0); 2513 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2514 2515 /* 2516 * first, we need to know whether the block is allocated already 2517 * preallocated blocks are unmapped but should treated 2518 * the same as allocated blocks. 2519 */ 2520 ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0); 2521 if ((ret == 0) && !buffer_delay(bh_result)) { 2522 /* the block isn't (pre)allocated yet, let's reserve space */ 2523 /* 2524 * XXX: __block_prepare_write() unmaps passed block, 2525 * is it OK? 2526 */ 2527 ret = ext4_da_reserve_space(inode, 1); 2528 if (ret) 2529 /* not enough space to reserve */ 2530 return ret; 2531 2532 map_bh(bh_result, inode->i_sb, invalid_block); 2533 set_buffer_new(bh_result); 2534 set_buffer_delay(bh_result); 2535 } else if (ret > 0) { 2536 bh_result->b_size = (ret << inode->i_blkbits); 2537 if (buffer_unwritten(bh_result)) { 2538 /* A delayed write to unwritten bh should 2539 * be marked new and mapped. Mapped ensures 2540 * that we don't do get_block multiple times 2541 * when we write to the same offset and new 2542 * ensures that we do proper zero out for 2543 * partial write. 2544 */ 2545 set_buffer_new(bh_result); 2546 set_buffer_mapped(bh_result); 2547 } 2548 ret = 0; 2549 } 2550 2551 return ret; 2552 } 2553 2554 /* 2555 * This function is used as a standard get_block_t calback function 2556 * when there is no desire to allocate any blocks. It is used as a 2557 * callback function for block_prepare_write(), nobh_writepage(), and 2558 * block_write_full_page(). These functions should only try to map a 2559 * single block at a time. 2560 * 2561 * Since this function doesn't do block allocations even if the caller 2562 * requests it by passing in create=1, it is critically important that 2563 * any caller checks to make sure that any buffer heads are returned 2564 * by this function are either all already mapped or marked for 2565 * delayed allocation before calling nobh_writepage() or 2566 * block_write_full_page(). Otherwise, b_blocknr could be left 2567 * unitialized, and the page write functions will be taken by 2568 * surprise. 2569 */ 2570 static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 2571 struct buffer_head *bh_result, int create) 2572 { 2573 int ret = 0; 2574 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 2575 2576 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2577 2578 /* 2579 * we don't want to do block allocation in writepage 2580 * so call get_block_wrap with create = 0 2581 */ 2582 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0); 2583 if (ret > 0) { 2584 bh_result->b_size = (ret << inode->i_blkbits); 2585 ret = 0; 2586 } 2587 return ret; 2588 } 2589 2590 static int bget_one(handle_t *handle, struct buffer_head *bh) 2591 { 2592 get_bh(bh); 2593 return 0; 2594 } 2595 2596 static int bput_one(handle_t *handle, struct buffer_head *bh) 2597 { 2598 put_bh(bh); 2599 return 0; 2600 } 2601 2602 static int __ext4_journalled_writepage(struct page *page, 2603 struct writeback_control *wbc, 2604 unsigned int len) 2605 { 2606 struct address_space *mapping = page->mapping; 2607 struct inode *inode = mapping->host; 2608 struct buffer_head *page_bufs; 2609 handle_t *handle = NULL; 2610 int ret = 0; 2611 int err; 2612 2613 page_bufs = page_buffers(page); 2614 BUG_ON(!page_bufs); 2615 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); 2616 /* As soon as we unlock the page, it can go away, but we have 2617 * references to buffers so we are safe */ 2618 unlock_page(page); 2619 2620 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 2621 if (IS_ERR(handle)) { 2622 ret = PTR_ERR(handle); 2623 goto out; 2624 } 2625 2626 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, 2627 do_journal_get_write_access); 2628 2629 err = walk_page_buffers(handle, page_bufs, 0, len, NULL, 2630 write_end_fn); 2631 if (ret == 0) 2632 ret = err; 2633 err = ext4_journal_stop(handle); 2634 if (!ret) 2635 ret = err; 2636 2637 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); 2638 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 2639 out: 2640 return ret; 2641 } 2642 2643 /* 2644 * Note that we don't need to start a transaction unless we're journaling data 2645 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2646 * need to file the inode to the transaction's list in ordered mode because if 2647 * we are writing back data added by write(), the inode is already there and if 2648 * we are writing back data modified via mmap(), noone guarantees in which 2649 * transaction the data will hit the disk. In case we are journaling data, we 2650 * cannot start transaction directly because transaction start ranks above page 2651 * lock so we have to do some magic. 2652 * 2653 * This function can get called via... 2654 * - ext4_da_writepages after taking page lock (have journal handle) 2655 * - journal_submit_inode_data_buffers (no journal handle) 2656 * - shrink_page_list via pdflush (no journal handle) 2657 * - grab_page_cache when doing write_begin (have journal handle) 2658 * 2659 * We don't do any block allocation in this function. If we have page with 2660 * multiple blocks we need to write those buffer_heads that are mapped. This 2661 * is important for mmaped based write. So if we do with blocksize 1K 2662 * truncate(f, 1024); 2663 * a = mmap(f, 0, 4096); 2664 * a[0] = 'a'; 2665 * truncate(f, 4096); 2666 * we have in the page first buffer_head mapped via page_mkwrite call back 2667 * but other bufer_heads would be unmapped but dirty(dirty done via the 2668 * do_wp_page). So writepage should write the first block. If we modify 2669 * the mmap area beyond 1024 we will again get a page_fault and the 2670 * page_mkwrite callback will do the block allocation and mark the 2671 * buffer_heads mapped. 2672 * 2673 * We redirty the page if we have any buffer_heads that is either delay or 2674 * unwritten in the page. 2675 * 2676 * We can get recursively called as show below. 2677 * 2678 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2679 * ext4_writepage() 2680 * 2681 * But since we don't do any block allocation we should not deadlock. 2682 * Page also have the dirty flag cleared so we don't get recurive page_lock. 2683 */ 2684 static int ext4_writepage(struct page *page, 2685 struct writeback_control *wbc) 2686 { 2687 int ret = 0; 2688 loff_t size; 2689 unsigned int len; 2690 struct buffer_head *page_bufs; 2691 struct inode *inode = page->mapping->host; 2692 2693 trace_ext4_writepage(inode, page); 2694 size = i_size_read(inode); 2695 if (page->index == size >> PAGE_CACHE_SHIFT) 2696 len = size & ~PAGE_CACHE_MASK; 2697 else 2698 len = PAGE_CACHE_SIZE; 2699 2700 if (page_has_buffers(page)) { 2701 page_bufs = page_buffers(page); 2702 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2703 ext4_bh_delay_or_unwritten)) { 2704 /* 2705 * We don't want to do block allocation 2706 * So redirty the page and return 2707 * We may reach here when we do a journal commit 2708 * via journal_submit_inode_data_buffers. 2709 * If we don't have mapping block we just ignore 2710 * them. We can also reach here via shrink_page_list 2711 */ 2712 redirty_page_for_writepage(wbc, page); 2713 unlock_page(page); 2714 return 0; 2715 } 2716 } else { 2717 /* 2718 * The test for page_has_buffers() is subtle: 2719 * We know the page is dirty but it lost buffers. That means 2720 * that at some moment in time after write_begin()/write_end() 2721 * has been called all buffers have been clean and thus they 2722 * must have been written at least once. So they are all 2723 * mapped and we can happily proceed with mapping them 2724 * and writing the page. 2725 * 2726 * Try to initialize the buffer_heads and check whether 2727 * all are mapped and non delay. We don't want to 2728 * do block allocation here. 2729 */ 2730 ret = block_prepare_write(page, 0, len, 2731 noalloc_get_block_write); 2732 if (!ret) { 2733 page_bufs = page_buffers(page); 2734 /* check whether all are mapped and non delay */ 2735 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2736 ext4_bh_delay_or_unwritten)) { 2737 redirty_page_for_writepage(wbc, page); 2738 unlock_page(page); 2739 return 0; 2740 } 2741 } else { 2742 /* 2743 * We can't do block allocation here 2744 * so just redity the page and unlock 2745 * and return 2746 */ 2747 redirty_page_for_writepage(wbc, page); 2748 unlock_page(page); 2749 return 0; 2750 } 2751 /* now mark the buffer_heads as dirty and uptodate */ 2752 block_commit_write(page, 0, len); 2753 } 2754 2755 if (PageChecked(page) && ext4_should_journal_data(inode)) { 2756 /* 2757 * It's mmapped pagecache. Add buffers and journal it. There 2758 * doesn't seem much point in redirtying the page here. 2759 */ 2760 ClearPageChecked(page); 2761 return __ext4_journalled_writepage(page, wbc, len); 2762 } 2763 2764 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2765 ret = nobh_writepage(page, noalloc_get_block_write, wbc); 2766 else 2767 ret = block_write_full_page(page, noalloc_get_block_write, 2768 wbc); 2769 2770 return ret; 2771 } 2772 2773 /* 2774 * This is called via ext4_da_writepages() to 2775 * calulate the total number of credits to reserve to fit 2776 * a single extent allocation into a single transaction, 2777 * ext4_da_writpeages() will loop calling this before 2778 * the block allocation. 2779 */ 2780 2781 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2782 { 2783 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2784 2785 /* 2786 * With non-extent format the journal credit needed to 2787 * insert nrblocks contiguous block is dependent on 2788 * number of contiguous block. So we will limit 2789 * number of contiguous block to a sane value 2790 */ 2791 if (!(inode->i_flags & EXT4_EXTENTS_FL) && 2792 (max_blocks > EXT4_MAX_TRANS_DATA)) 2793 max_blocks = EXT4_MAX_TRANS_DATA; 2794 2795 return ext4_chunk_trans_blocks(inode, max_blocks); 2796 } 2797 2798 static int ext4_da_writepages(struct address_space *mapping, 2799 struct writeback_control *wbc) 2800 { 2801 pgoff_t index; 2802 int range_whole = 0; 2803 handle_t *handle = NULL; 2804 struct mpage_da_data mpd; 2805 struct inode *inode = mapping->host; 2806 int no_nrwrite_index_update; 2807 int pages_written = 0; 2808 long pages_skipped; 2809 unsigned int max_pages; 2810 int range_cyclic, cycled = 1, io_done = 0; 2811 int needed_blocks, ret = 0; 2812 long desired_nr_to_write, nr_to_writebump = 0; 2813 loff_t range_start = wbc->range_start; 2814 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2815 2816 trace_ext4_da_writepages(inode, wbc); 2817 2818 /* 2819 * No pages to write? This is mainly a kludge to avoid starting 2820 * a transaction for special inodes like journal inode on last iput() 2821 * because that could violate lock ordering on umount 2822 */ 2823 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2824 return 0; 2825 2826 /* 2827 * If the filesystem has aborted, it is read-only, so return 2828 * right away instead of dumping stack traces later on that 2829 * will obscure the real source of the problem. We test 2830 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 2831 * the latter could be true if the filesystem is mounted 2832 * read-only, and in that case, ext4_da_writepages should 2833 * *never* be called, so if that ever happens, we would want 2834 * the stack trace. 2835 */ 2836 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 2837 return -EROFS; 2838 2839 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2840 range_whole = 1; 2841 2842 range_cyclic = wbc->range_cyclic; 2843 if (wbc->range_cyclic) { 2844 index = mapping->writeback_index; 2845 if (index) 2846 cycled = 0; 2847 wbc->range_start = index << PAGE_CACHE_SHIFT; 2848 wbc->range_end = LLONG_MAX; 2849 wbc->range_cyclic = 0; 2850 } else 2851 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2852 2853 /* 2854 * This works around two forms of stupidity. The first is in 2855 * the writeback code, which caps the maximum number of pages 2856 * written to be 1024 pages. This is wrong on multiple 2857 * levels; different architectues have a different page size, 2858 * which changes the maximum amount of data which gets 2859 * written. Secondly, 4 megabytes is way too small. XFS 2860 * forces this value to be 16 megabytes by multiplying 2861 * nr_to_write parameter by four, and then relies on its 2862 * allocator to allocate larger extents to make them 2863 * contiguous. Unfortunately this brings us to the second 2864 * stupidity, which is that ext4's mballoc code only allocates 2865 * at most 2048 blocks. So we force contiguous writes up to 2866 * the number of dirty blocks in the inode, or 2867 * sbi->max_writeback_mb_bump whichever is smaller. 2868 */ 2869 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2870 if (!range_cyclic && range_whole) 2871 desired_nr_to_write = wbc->nr_to_write * 8; 2872 else 2873 desired_nr_to_write = ext4_num_dirty_pages(inode, index, 2874 max_pages); 2875 if (desired_nr_to_write > max_pages) 2876 desired_nr_to_write = max_pages; 2877 2878 if (wbc->nr_to_write < desired_nr_to_write) { 2879 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 2880 wbc->nr_to_write = desired_nr_to_write; 2881 } 2882 2883 mpd.wbc = wbc; 2884 mpd.inode = mapping->host; 2885 2886 /* 2887 * we don't want write_cache_pages to update 2888 * nr_to_write and writeback_index 2889 */ 2890 no_nrwrite_index_update = wbc->no_nrwrite_index_update; 2891 wbc->no_nrwrite_index_update = 1; 2892 pages_skipped = wbc->pages_skipped; 2893 2894 retry: 2895 while (!ret && wbc->nr_to_write > 0) { 2896 2897 /* 2898 * we insert one extent at a time. So we need 2899 * credit needed for single extent allocation. 2900 * journalled mode is currently not supported 2901 * by delalloc 2902 */ 2903 BUG_ON(ext4_should_journal_data(inode)); 2904 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2905 2906 /* start a new transaction*/ 2907 handle = ext4_journal_start(inode, needed_blocks); 2908 if (IS_ERR(handle)) { 2909 ret = PTR_ERR(handle); 2910 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2911 "%ld pages, ino %lu; err %d\n", __func__, 2912 wbc->nr_to_write, inode->i_ino, ret); 2913 goto out_writepages; 2914 } 2915 2916 /* 2917 * Now call __mpage_da_writepage to find the next 2918 * contiguous region of logical blocks that need 2919 * blocks to be allocated by ext4. We don't actually 2920 * submit the blocks for I/O here, even though 2921 * write_cache_pages thinks it will, and will set the 2922 * pages as clean for write before calling 2923 * __mpage_da_writepage(). 2924 */ 2925 mpd.b_size = 0; 2926 mpd.b_state = 0; 2927 mpd.b_blocknr = 0; 2928 mpd.first_page = 0; 2929 mpd.next_page = 0; 2930 mpd.io_done = 0; 2931 mpd.pages_written = 0; 2932 mpd.retval = 0; 2933 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, 2934 &mpd); 2935 /* 2936 * If we have a contigous extent of pages and we 2937 * haven't done the I/O yet, map the blocks and submit 2938 * them for I/O. 2939 */ 2940 if (!mpd.io_done && mpd.next_page != mpd.first_page) { 2941 if (mpage_da_map_blocks(&mpd) == 0) 2942 mpage_da_submit_io(&mpd); 2943 mpd.io_done = 1; 2944 ret = MPAGE_DA_EXTENT_TAIL; 2945 } 2946 trace_ext4_da_write_pages(inode, &mpd); 2947 wbc->nr_to_write -= mpd.pages_written; 2948 2949 ext4_journal_stop(handle); 2950 2951 if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 2952 /* commit the transaction which would 2953 * free blocks released in the transaction 2954 * and try again 2955 */ 2956 jbd2_journal_force_commit_nested(sbi->s_journal); 2957 wbc->pages_skipped = pages_skipped; 2958 ret = 0; 2959 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2960 /* 2961 * got one extent now try with 2962 * rest of the pages 2963 */ 2964 pages_written += mpd.pages_written; 2965 wbc->pages_skipped = pages_skipped; 2966 ret = 0; 2967 io_done = 1; 2968 } else if (wbc->nr_to_write) 2969 /* 2970 * There is no more writeout needed 2971 * or we requested for a noblocking writeout 2972 * and we found the device congested 2973 */ 2974 break; 2975 } 2976 if (!io_done && !cycled) { 2977 cycled = 1; 2978 index = 0; 2979 wbc->range_start = index << PAGE_CACHE_SHIFT; 2980 wbc->range_end = mapping->writeback_index - 1; 2981 goto retry; 2982 } 2983 if (pages_skipped != wbc->pages_skipped) 2984 ext4_msg(inode->i_sb, KERN_CRIT, 2985 "This should not happen leaving %s " 2986 "with nr_to_write = %ld ret = %d\n", 2987 __func__, wbc->nr_to_write, ret); 2988 2989 /* Update index */ 2990 index += pages_written; 2991 wbc->range_cyclic = range_cyclic; 2992 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2993 /* 2994 * set the writeback_index so that range_cyclic 2995 * mode will write it back later 2996 */ 2997 mapping->writeback_index = index; 2998 2999 out_writepages: 3000 if (!no_nrwrite_index_update) 3001 wbc->no_nrwrite_index_update = 0; 3002 if (wbc->nr_to_write > nr_to_writebump) 3003 wbc->nr_to_write -= nr_to_writebump; 3004 wbc->range_start = range_start; 3005 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 3006 return ret; 3007 } 3008 3009 #define FALL_BACK_TO_NONDELALLOC 1 3010 static int ext4_nonda_switch(struct super_block *sb) 3011 { 3012 s64 free_blocks, dirty_blocks; 3013 struct ext4_sb_info *sbi = EXT4_SB(sb); 3014 3015 /* 3016 * switch to non delalloc mode if we are running low 3017 * on free block. The free block accounting via percpu 3018 * counters can get slightly wrong with percpu_counter_batch getting 3019 * accumulated on each CPU without updating global counters 3020 * Delalloc need an accurate free block accounting. So switch 3021 * to non delalloc when we are near to error range. 3022 */ 3023 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 3024 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); 3025 if (2 * free_blocks < 3 * dirty_blocks || 3026 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { 3027 /* 3028 * free block count is less that 150% of dirty blocks 3029 * or free blocks is less that watermark 3030 */ 3031 return 1; 3032 } 3033 return 0; 3034 } 3035 3036 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 3037 loff_t pos, unsigned len, unsigned flags, 3038 struct page **pagep, void **fsdata) 3039 { 3040 int ret, retries = 0; 3041 struct page *page; 3042 pgoff_t index; 3043 unsigned from, to; 3044 struct inode *inode = mapping->host; 3045 handle_t *handle; 3046 3047 index = pos >> PAGE_CACHE_SHIFT; 3048 from = pos & (PAGE_CACHE_SIZE - 1); 3049 to = from + len; 3050 3051 if (ext4_nonda_switch(inode->i_sb)) { 3052 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 3053 return ext4_write_begin(file, mapping, pos, 3054 len, flags, pagep, fsdata); 3055 } 3056 *fsdata = (void *)0; 3057 trace_ext4_da_write_begin(inode, pos, len, flags); 3058 retry: 3059 /* 3060 * With delayed allocation, we don't log the i_disksize update 3061 * if there is delayed block allocation. But we still need 3062 * to journalling the i_disksize update if writes to the end 3063 * of file which has an already mapped buffer. 3064 */ 3065 handle = ext4_journal_start(inode, 1); 3066 if (IS_ERR(handle)) { 3067 ret = PTR_ERR(handle); 3068 goto out; 3069 } 3070 /* We cannot recurse into the filesystem as the transaction is already 3071 * started */ 3072 flags |= AOP_FLAG_NOFS; 3073 3074 page = grab_cache_page_write_begin(mapping, index, flags); 3075 if (!page) { 3076 ext4_journal_stop(handle); 3077 ret = -ENOMEM; 3078 goto out; 3079 } 3080 *pagep = page; 3081 3082 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 3083 ext4_da_get_block_prep); 3084 if (ret < 0) { 3085 unlock_page(page); 3086 ext4_journal_stop(handle); 3087 page_cache_release(page); 3088 /* 3089 * block_write_begin may have instantiated a few blocks 3090 * outside i_size. Trim these off again. Don't need 3091 * i_size_read because we hold i_mutex. 3092 */ 3093 if (pos + len > inode->i_size) 3094 ext4_truncate(inode); 3095 } 3096 3097 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3098 goto retry; 3099 out: 3100 return ret; 3101 } 3102 3103 /* 3104 * Check if we should update i_disksize 3105 * when write to the end of file but not require block allocation 3106 */ 3107 static int ext4_da_should_update_i_disksize(struct page *page, 3108 unsigned long offset) 3109 { 3110 struct buffer_head *bh; 3111 struct inode *inode = page->mapping->host; 3112 unsigned int idx; 3113 int i; 3114 3115 bh = page_buffers(page); 3116 idx = offset >> inode->i_blkbits; 3117 3118 for (i = 0; i < idx; i++) 3119 bh = bh->b_this_page; 3120 3121 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 3122 return 0; 3123 return 1; 3124 } 3125 3126 static int ext4_da_write_end(struct file *file, 3127 struct address_space *mapping, 3128 loff_t pos, unsigned len, unsigned copied, 3129 struct page *page, void *fsdata) 3130 { 3131 struct inode *inode = mapping->host; 3132 int ret = 0, ret2; 3133 handle_t *handle = ext4_journal_current_handle(); 3134 loff_t new_i_size; 3135 unsigned long start, end; 3136 int write_mode = (int)(unsigned long)fsdata; 3137 3138 if (write_mode == FALL_BACK_TO_NONDELALLOC) { 3139 if (ext4_should_order_data(inode)) { 3140 return ext4_ordered_write_end(file, mapping, pos, 3141 len, copied, page, fsdata); 3142 } else if (ext4_should_writeback_data(inode)) { 3143 return ext4_writeback_write_end(file, mapping, pos, 3144 len, copied, page, fsdata); 3145 } else { 3146 BUG(); 3147 } 3148 } 3149 3150 trace_ext4_da_write_end(inode, pos, len, copied); 3151 start = pos & (PAGE_CACHE_SIZE - 1); 3152 end = start + copied - 1; 3153 3154 /* 3155 * generic_write_end() will run mark_inode_dirty() if i_size 3156 * changes. So let's piggyback the i_disksize mark_inode_dirty 3157 * into that. 3158 */ 3159 3160 new_i_size = pos + copied; 3161 if (new_i_size > EXT4_I(inode)->i_disksize) { 3162 if (ext4_da_should_update_i_disksize(page, end)) { 3163 down_write(&EXT4_I(inode)->i_data_sem); 3164 if (new_i_size > EXT4_I(inode)->i_disksize) { 3165 /* 3166 * Updating i_disksize when extending file 3167 * without needing block allocation 3168 */ 3169 if (ext4_should_order_data(inode)) 3170 ret = ext4_jbd2_file_inode(handle, 3171 inode); 3172 3173 EXT4_I(inode)->i_disksize = new_i_size; 3174 } 3175 up_write(&EXT4_I(inode)->i_data_sem); 3176 /* We need to mark inode dirty even if 3177 * new_i_size is less that inode->i_size 3178 * bu greater than i_disksize.(hint delalloc) 3179 */ 3180 ext4_mark_inode_dirty(handle, inode); 3181 } 3182 } 3183 ret2 = generic_write_end(file, mapping, pos, len, copied, 3184 page, fsdata); 3185 copied = ret2; 3186 if (ret2 < 0) 3187 ret = ret2; 3188 ret2 = ext4_journal_stop(handle); 3189 if (!ret) 3190 ret = ret2; 3191 3192 return ret ? ret : copied; 3193 } 3194 3195 static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 3196 { 3197 /* 3198 * Drop reserved blocks 3199 */ 3200 BUG_ON(!PageLocked(page)); 3201 if (!page_has_buffers(page)) 3202 goto out; 3203 3204 ext4_da_page_release_reservation(page, offset); 3205 3206 out: 3207 ext4_invalidatepage(page, offset); 3208 3209 return; 3210 } 3211 3212 /* 3213 * Force all delayed allocation blocks to be allocated for a given inode. 3214 */ 3215 int ext4_alloc_da_blocks(struct inode *inode) 3216 { 3217 trace_ext4_alloc_da_blocks(inode); 3218 3219 if (!EXT4_I(inode)->i_reserved_data_blocks && 3220 !EXT4_I(inode)->i_reserved_meta_blocks) 3221 return 0; 3222 3223 /* 3224 * We do something simple for now. The filemap_flush() will 3225 * also start triggering a write of the data blocks, which is 3226 * not strictly speaking necessary (and for users of 3227 * laptop_mode, not even desirable). However, to do otherwise 3228 * would require replicating code paths in: 3229 * 3230 * ext4_da_writepages() -> 3231 * write_cache_pages() ---> (via passed in callback function) 3232 * __mpage_da_writepage() --> 3233 * mpage_add_bh_to_extent() 3234 * mpage_da_map_blocks() 3235 * 3236 * The problem is that write_cache_pages(), located in 3237 * mm/page-writeback.c, marks pages clean in preparation for 3238 * doing I/O, which is not desirable if we're not planning on 3239 * doing I/O at all. 3240 * 3241 * We could call write_cache_pages(), and then redirty all of 3242 * the pages by calling redirty_page_for_writeback() but that 3243 * would be ugly in the extreme. So instead we would need to 3244 * replicate parts of the code in the above functions, 3245 * simplifying them becuase we wouldn't actually intend to 3246 * write out the pages, but rather only collect contiguous 3247 * logical block extents, call the multi-block allocator, and 3248 * then update the buffer heads with the block allocations. 3249 * 3250 * For now, though, we'll cheat by calling filemap_flush(), 3251 * which will map the blocks, and start the I/O, but not 3252 * actually wait for the I/O to complete. 3253 */ 3254 return filemap_flush(inode->i_mapping); 3255 } 3256 3257 /* 3258 * bmap() is special. It gets used by applications such as lilo and by 3259 * the swapper to find the on-disk block of a specific piece of data. 3260 * 3261 * Naturally, this is dangerous if the block concerned is still in the 3262 * journal. If somebody makes a swapfile on an ext4 data-journaling 3263 * filesystem and enables swap, then they may get a nasty shock when the 3264 * data getting swapped to that swapfile suddenly gets overwritten by 3265 * the original zero's written out previously to the journal and 3266 * awaiting writeback in the kernel's buffer cache. 3267 * 3268 * So, if we see any bmap calls here on a modified, data-journaled file, 3269 * take extra steps to flush any blocks which might be in the cache. 3270 */ 3271 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3272 { 3273 struct inode *inode = mapping->host; 3274 journal_t *journal; 3275 int err; 3276 3277 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3278 test_opt(inode->i_sb, DELALLOC)) { 3279 /* 3280 * With delalloc we want to sync the file 3281 * so that we can make sure we allocate 3282 * blocks for file 3283 */ 3284 filemap_write_and_wait(mapping); 3285 } 3286 3287 if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { 3288 /* 3289 * This is a REALLY heavyweight approach, but the use of 3290 * bmap on dirty files is expected to be extremely rare: 3291 * only if we run lilo or swapon on a freshly made file 3292 * do we expect this to happen. 3293 * 3294 * (bmap requires CAP_SYS_RAWIO so this does not 3295 * represent an unprivileged user DOS attack --- we'd be 3296 * in trouble if mortal users could trigger this path at 3297 * will.) 3298 * 3299 * NB. EXT4_STATE_JDATA is not set on files other than 3300 * regular files. If somebody wants to bmap a directory 3301 * or symlink and gets confused because the buffer 3302 * hasn't yet been flushed to disk, they deserve 3303 * everything they get. 3304 */ 3305 3306 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; 3307 journal = EXT4_JOURNAL(inode); 3308 jbd2_journal_lock_updates(journal); 3309 err = jbd2_journal_flush(journal); 3310 jbd2_journal_unlock_updates(journal); 3311 3312 if (err) 3313 return 0; 3314 } 3315 3316 return generic_block_bmap(mapping, block, ext4_get_block); 3317 } 3318 3319 static int ext4_readpage(struct file *file, struct page *page) 3320 { 3321 return mpage_readpage(page, ext4_get_block); 3322 } 3323 3324 static int 3325 ext4_readpages(struct file *file, struct address_space *mapping, 3326 struct list_head *pages, unsigned nr_pages) 3327 { 3328 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 3329 } 3330 3331 static void ext4_invalidatepage(struct page *page, unsigned long offset) 3332 { 3333 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3334 3335 /* 3336 * If it's a full truncate we just forget about the pending dirtying 3337 */ 3338 if (offset == 0) 3339 ClearPageChecked(page); 3340 3341 if (journal) 3342 jbd2_journal_invalidatepage(journal, page, offset); 3343 else 3344 block_invalidatepage(page, offset); 3345 } 3346 3347 static int ext4_releasepage(struct page *page, gfp_t wait) 3348 { 3349 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3350 3351 WARN_ON(PageChecked(page)); 3352 if (!page_has_buffers(page)) 3353 return 0; 3354 if (journal) 3355 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3356 else 3357 return try_to_free_buffers(page); 3358 } 3359 3360 /* 3361 * O_DIRECT for ext3 (or indirect map) based files 3362 * 3363 * If the O_DIRECT write will extend the file then add this inode to the 3364 * orphan list. So recovery will truncate it back to the original size 3365 * if the machine crashes during the write. 3366 * 3367 * If the O_DIRECT write is intantiating holes inside i_size and the machine 3368 * crashes then stale disk data _may_ be exposed inside the file. But current 3369 * VFS code falls back into buffered path in that case so we are safe. 3370 */ 3371 static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, 3372 const struct iovec *iov, loff_t offset, 3373 unsigned long nr_segs) 3374 { 3375 struct file *file = iocb->ki_filp; 3376 struct inode *inode = file->f_mapping->host; 3377 struct ext4_inode_info *ei = EXT4_I(inode); 3378 handle_t *handle; 3379 ssize_t ret; 3380 int orphan = 0; 3381 size_t count = iov_length(iov, nr_segs); 3382 int retries = 0; 3383 3384 if (rw == WRITE) { 3385 loff_t final_size = offset + count; 3386 3387 if (final_size > inode->i_size) { 3388 /* Credits for sb + inode write */ 3389 handle = ext4_journal_start(inode, 2); 3390 if (IS_ERR(handle)) { 3391 ret = PTR_ERR(handle); 3392 goto out; 3393 } 3394 ret = ext4_orphan_add(handle, inode); 3395 if (ret) { 3396 ext4_journal_stop(handle); 3397 goto out; 3398 } 3399 orphan = 1; 3400 ei->i_disksize = inode->i_size; 3401 ext4_journal_stop(handle); 3402 } 3403 } 3404 3405 retry: 3406 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3407 offset, nr_segs, 3408 ext4_get_block, NULL); 3409 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3410 goto retry; 3411 3412 if (orphan) { 3413 int err; 3414 3415 /* Credits for sb + inode write */ 3416 handle = ext4_journal_start(inode, 2); 3417 if (IS_ERR(handle)) { 3418 /* This is really bad luck. We've written the data 3419 * but cannot extend i_size. Bail out and pretend 3420 * the write failed... */ 3421 ret = PTR_ERR(handle); 3422 goto out; 3423 } 3424 if (inode->i_nlink) 3425 ext4_orphan_del(handle, inode); 3426 if (ret > 0) { 3427 loff_t end = offset + ret; 3428 if (end > inode->i_size) { 3429 ei->i_disksize = end; 3430 i_size_write(inode, end); 3431 /* 3432 * We're going to return a positive `ret' 3433 * here due to non-zero-length I/O, so there's 3434 * no way of reporting error returns from 3435 * ext4_mark_inode_dirty() to userspace. So 3436 * ignore it. 3437 */ 3438 ext4_mark_inode_dirty(handle, inode); 3439 } 3440 } 3441 err = ext4_journal_stop(handle); 3442 if (ret == 0) 3443 ret = err; 3444 } 3445 out: 3446 return ret; 3447 } 3448 3449 static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock, 3450 struct buffer_head *bh_result, int create) 3451 { 3452 handle_t *handle = NULL; 3453 int ret = 0; 3454 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 3455 int dio_credits; 3456 3457 ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n", 3458 inode->i_ino, create); 3459 /* 3460 * DIO VFS code passes create = 0 flag for write to 3461 * the middle of file. It does this to avoid block 3462 * allocation for holes, to prevent expose stale data 3463 * out when there is parallel buffered read (which does 3464 * not hold the i_mutex lock) while direct IO write has 3465 * not completed. DIO request on holes finally falls back 3466 * to buffered IO for this reason. 3467 * 3468 * For ext4 extent based file, since we support fallocate, 3469 * new allocated extent as uninitialized, for holes, we 3470 * could fallocate blocks for holes, thus parallel 3471 * buffered IO read will zero out the page when read on 3472 * a hole while parallel DIO write to the hole has not completed. 3473 * 3474 * when we come here, we know it's a direct IO write to 3475 * to the middle of file (<i_size) 3476 * so it's safe to override the create flag from VFS. 3477 */ 3478 create = EXT4_GET_BLOCKS_DIO_CREATE_EXT; 3479 3480 if (max_blocks > DIO_MAX_BLOCKS) 3481 max_blocks = DIO_MAX_BLOCKS; 3482 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 3483 handle = ext4_journal_start(inode, dio_credits); 3484 if (IS_ERR(handle)) { 3485 ret = PTR_ERR(handle); 3486 goto out; 3487 } 3488 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, 3489 create); 3490 if (ret > 0) { 3491 bh_result->b_size = (ret << inode->i_blkbits); 3492 ret = 0; 3493 } 3494 ext4_journal_stop(handle); 3495 out: 3496 return ret; 3497 } 3498 3499 static void ext4_free_io_end(ext4_io_end_t *io) 3500 { 3501 BUG_ON(!io); 3502 iput(io->inode); 3503 kfree(io); 3504 } 3505 static void dump_aio_dio_list(struct inode * inode) 3506 { 3507 #ifdef EXT4_DEBUG 3508 struct list_head *cur, *before, *after; 3509 ext4_io_end_t *io, *io0, *io1; 3510 3511 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ 3512 ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino); 3513 return; 3514 } 3515 3516 ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino); 3517 list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){ 3518 cur = &io->list; 3519 before = cur->prev; 3520 io0 = container_of(before, ext4_io_end_t, list); 3521 after = cur->next; 3522 io1 = container_of(after, ext4_io_end_t, list); 3523 3524 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", 3525 io, inode->i_ino, io0, io1); 3526 } 3527 #endif 3528 } 3529 3530 /* 3531 * check a range of space and convert unwritten extents to written. 3532 */ 3533 static int ext4_end_aio_dio_nolock(ext4_io_end_t *io) 3534 { 3535 struct inode *inode = io->inode; 3536 loff_t offset = io->offset; 3537 size_t size = io->size; 3538 int ret = 0; 3539 3540 ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p," 3541 "list->prev 0x%p\n", 3542 io, inode->i_ino, io->list.next, io->list.prev); 3543 3544 if (list_empty(&io->list)) 3545 return ret; 3546 3547 if (io->flag != DIO_AIO_UNWRITTEN) 3548 return ret; 3549 3550 if (offset + size <= i_size_read(inode)) 3551 ret = ext4_convert_unwritten_extents(inode, offset, size); 3552 3553 if (ret < 0) { 3554 printk(KERN_EMERG "%s: failed to convert unwritten" 3555 "extents to written extents, error is %d" 3556 " io is still on inode %lu aio dio list\n", 3557 __func__, ret, inode->i_ino); 3558 return ret; 3559 } 3560 3561 /* clear the DIO AIO unwritten flag */ 3562 io->flag = 0; 3563 return ret; 3564 } 3565 /* 3566 * work on completed aio dio IO, to convert unwritten extents to extents 3567 */ 3568 static void ext4_end_aio_dio_work(struct work_struct *work) 3569 { 3570 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); 3571 struct inode *inode = io->inode; 3572 int ret = 0; 3573 3574 mutex_lock(&inode->i_mutex); 3575 ret = ext4_end_aio_dio_nolock(io); 3576 if (ret >= 0) { 3577 if (!list_empty(&io->list)) 3578 list_del_init(&io->list); 3579 ext4_free_io_end(io); 3580 } 3581 mutex_unlock(&inode->i_mutex); 3582 } 3583 /* 3584 * This function is called from ext4_sync_file(). 3585 * 3586 * When AIO DIO IO is completed, the work to convert unwritten 3587 * extents to written is queued on workqueue but may not get immediately 3588 * scheduled. When fsync is called, we need to ensure the 3589 * conversion is complete before fsync returns. 3590 * The inode keeps track of a list of completed AIO from DIO path 3591 * that might needs to do the conversion. This function walks through 3592 * the list and convert the related unwritten extents to written. 3593 */ 3594 int flush_aio_dio_completed_IO(struct inode *inode) 3595 { 3596 ext4_io_end_t *io; 3597 int ret = 0; 3598 int ret2 = 0; 3599 3600 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)) 3601 return ret; 3602 3603 dump_aio_dio_list(inode); 3604 while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ 3605 io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next, 3606 ext4_io_end_t, list); 3607 /* 3608 * Calling ext4_end_aio_dio_nolock() to convert completed 3609 * IO to written. 3610 * 3611 * When ext4_sync_file() is called, run_queue() may already 3612 * about to flush the work corresponding to this io structure. 3613 * It will be upset if it founds the io structure related 3614 * to the work-to-be schedule is freed. 3615 * 3616 * Thus we need to keep the io structure still valid here after 3617 * convertion finished. The io structure has a flag to 3618 * avoid double converting from both fsync and background work 3619 * queue work. 3620 */ 3621 ret = ext4_end_aio_dio_nolock(io); 3622 if (ret < 0) 3623 ret2 = ret; 3624 else 3625 list_del_init(&io->list); 3626 } 3627 return (ret2 < 0) ? ret2 : 0; 3628 } 3629 3630 static ext4_io_end_t *ext4_init_io_end (struct inode *inode) 3631 { 3632 ext4_io_end_t *io = NULL; 3633 3634 io = kmalloc(sizeof(*io), GFP_NOFS); 3635 3636 if (io) { 3637 igrab(inode); 3638 io->inode = inode; 3639 io->flag = 0; 3640 io->offset = 0; 3641 io->size = 0; 3642 io->error = 0; 3643 INIT_WORK(&io->work, ext4_end_aio_dio_work); 3644 INIT_LIST_HEAD(&io->list); 3645 } 3646 3647 return io; 3648 } 3649 3650 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3651 ssize_t size, void *private) 3652 { 3653 ext4_io_end_t *io_end = iocb->private; 3654 struct workqueue_struct *wq; 3655 3656 /* if not async direct IO or dio with 0 bytes write, just return */ 3657 if (!io_end || !size) 3658 return; 3659 3660 ext_debug("ext4_end_io_dio(): io_end 0x%p" 3661 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", 3662 iocb->private, io_end->inode->i_ino, iocb, offset, 3663 size); 3664 3665 /* if not aio dio with unwritten extents, just free io and return */ 3666 if (io_end->flag != DIO_AIO_UNWRITTEN){ 3667 ext4_free_io_end(io_end); 3668 iocb->private = NULL; 3669 return; 3670 } 3671 3672 io_end->offset = offset; 3673 io_end->size = size; 3674 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; 3675 3676 /* queue the work to convert unwritten extents to written */ 3677 queue_work(wq, &io_end->work); 3678 3679 /* Add the io_end to per-inode completed aio dio list*/ 3680 list_add_tail(&io_end->list, 3681 &EXT4_I(io_end->inode)->i_aio_dio_complete_list); 3682 iocb->private = NULL; 3683 } 3684 /* 3685 * For ext4 extent files, ext4 will do direct-io write to holes, 3686 * preallocated extents, and those write extend the file, no need to 3687 * fall back to buffered IO. 3688 * 3689 * For holes, we fallocate those blocks, mark them as unintialized 3690 * If those blocks were preallocated, we mark sure they are splited, but 3691 * still keep the range to write as unintialized. 3692 * 3693 * The unwrritten extents will be converted to written when DIO is completed. 3694 * For async direct IO, since the IO may still pending when return, we 3695 * set up an end_io call back function, which will do the convertion 3696 * when async direct IO completed. 3697 * 3698 * If the O_DIRECT write will extend the file then add this inode to the 3699 * orphan list. So recovery will truncate it back to the original size 3700 * if the machine crashes during the write. 3701 * 3702 */ 3703 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 3704 const struct iovec *iov, loff_t offset, 3705 unsigned long nr_segs) 3706 { 3707 struct file *file = iocb->ki_filp; 3708 struct inode *inode = file->f_mapping->host; 3709 ssize_t ret; 3710 size_t count = iov_length(iov, nr_segs); 3711 3712 loff_t final_size = offset + count; 3713 if (rw == WRITE && final_size <= inode->i_size) { 3714 /* 3715 * We could direct write to holes and fallocate. 3716 * 3717 * Allocated blocks to fill the hole are marked as uninitialized 3718 * to prevent paralel buffered read to expose the stale data 3719 * before DIO complete the data IO. 3720 * 3721 * As to previously fallocated extents, ext4 get_block 3722 * will just simply mark the buffer mapped but still 3723 * keep the extents uninitialized. 3724 * 3725 * for non AIO case, we will convert those unwritten extents 3726 * to written after return back from blockdev_direct_IO. 3727 * 3728 * for async DIO, the conversion needs to be defered when 3729 * the IO is completed. The ext4 end_io callback function 3730 * will be called to take care of the conversion work. 3731 * Here for async case, we allocate an io_end structure to 3732 * hook to the iocb. 3733 */ 3734 iocb->private = NULL; 3735 EXT4_I(inode)->cur_aio_dio = NULL; 3736 if (!is_sync_kiocb(iocb)) { 3737 iocb->private = ext4_init_io_end(inode); 3738 if (!iocb->private) 3739 return -ENOMEM; 3740 /* 3741 * we save the io structure for current async 3742 * direct IO, so that later ext4_get_blocks() 3743 * could flag the io structure whether there 3744 * is a unwritten extents needs to be converted 3745 * when IO is completed. 3746 */ 3747 EXT4_I(inode)->cur_aio_dio = iocb->private; 3748 } 3749 3750 ret = blockdev_direct_IO(rw, iocb, inode, 3751 inode->i_sb->s_bdev, iov, 3752 offset, nr_segs, 3753 ext4_get_block_dio_write, 3754 ext4_end_io_dio); 3755 if (iocb->private) 3756 EXT4_I(inode)->cur_aio_dio = NULL; 3757 /* 3758 * The io_end structure takes a reference to the inode, 3759 * that structure needs to be destroyed and the 3760 * reference to the inode need to be dropped, when IO is 3761 * complete, even with 0 byte write, or failed. 3762 * 3763 * In the successful AIO DIO case, the io_end structure will be 3764 * desctroyed and the reference to the inode will be dropped 3765 * after the end_io call back function is called. 3766 * 3767 * In the case there is 0 byte write, or error case, since 3768 * VFS direct IO won't invoke the end_io call back function, 3769 * we need to free the end_io structure here. 3770 */ 3771 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 3772 ext4_free_io_end(iocb->private); 3773 iocb->private = NULL; 3774 } else if (ret > 0 && (EXT4_I(inode)->i_state & 3775 EXT4_STATE_DIO_UNWRITTEN)) { 3776 int err; 3777 /* 3778 * for non AIO case, since the IO is already 3779 * completed, we could do the convertion right here 3780 */ 3781 err = ext4_convert_unwritten_extents(inode, 3782 offset, ret); 3783 if (err < 0) 3784 ret = err; 3785 EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN; 3786 } 3787 return ret; 3788 } 3789 3790 /* for write the the end of file case, we fall back to old way */ 3791 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3792 } 3793 3794 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3795 const struct iovec *iov, loff_t offset, 3796 unsigned long nr_segs) 3797 { 3798 struct file *file = iocb->ki_filp; 3799 struct inode *inode = file->f_mapping->host; 3800 3801 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 3802 return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 3803 3804 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3805 } 3806 3807 /* 3808 * Pages can be marked dirty completely asynchronously from ext4's journalling 3809 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3810 * much here because ->set_page_dirty is called under VFS locks. The page is 3811 * not necessarily locked. 3812 * 3813 * We cannot just dirty the page and leave attached buffers clean, because the 3814 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3815 * or jbddirty because all the journalling code will explode. 3816 * 3817 * So what we do is to mark the page "pending dirty" and next time writepage 3818 * is called, propagate that into the buffers appropriately. 3819 */ 3820 static int ext4_journalled_set_page_dirty(struct page *page) 3821 { 3822 SetPageChecked(page); 3823 return __set_page_dirty_nobuffers(page); 3824 } 3825 3826 static const struct address_space_operations ext4_ordered_aops = { 3827 .readpage = ext4_readpage, 3828 .readpages = ext4_readpages, 3829 .writepage = ext4_writepage, 3830 .sync_page = block_sync_page, 3831 .write_begin = ext4_write_begin, 3832 .write_end = ext4_ordered_write_end, 3833 .bmap = ext4_bmap, 3834 .invalidatepage = ext4_invalidatepage, 3835 .releasepage = ext4_releasepage, 3836 .direct_IO = ext4_direct_IO, 3837 .migratepage = buffer_migrate_page, 3838 .is_partially_uptodate = block_is_partially_uptodate, 3839 .error_remove_page = generic_error_remove_page, 3840 }; 3841 3842 static const struct address_space_operations ext4_writeback_aops = { 3843 .readpage = ext4_readpage, 3844 .readpages = ext4_readpages, 3845 .writepage = ext4_writepage, 3846 .sync_page = block_sync_page, 3847 .write_begin = ext4_write_begin, 3848 .write_end = ext4_writeback_write_end, 3849 .bmap = ext4_bmap, 3850 .invalidatepage = ext4_invalidatepage, 3851 .releasepage = ext4_releasepage, 3852 .direct_IO = ext4_direct_IO, 3853 .migratepage = buffer_migrate_page, 3854 .is_partially_uptodate = block_is_partially_uptodate, 3855 .error_remove_page = generic_error_remove_page, 3856 }; 3857 3858 static const struct address_space_operations ext4_journalled_aops = { 3859 .readpage = ext4_readpage, 3860 .readpages = ext4_readpages, 3861 .writepage = ext4_writepage, 3862 .sync_page = block_sync_page, 3863 .write_begin = ext4_write_begin, 3864 .write_end = ext4_journalled_write_end, 3865 .set_page_dirty = ext4_journalled_set_page_dirty, 3866 .bmap = ext4_bmap, 3867 .invalidatepage = ext4_invalidatepage, 3868 .releasepage = ext4_releasepage, 3869 .is_partially_uptodate = block_is_partially_uptodate, 3870 .error_remove_page = generic_error_remove_page, 3871 }; 3872 3873 static const struct address_space_operations ext4_da_aops = { 3874 .readpage = ext4_readpage, 3875 .readpages = ext4_readpages, 3876 .writepage = ext4_writepage, 3877 .writepages = ext4_da_writepages, 3878 .sync_page = block_sync_page, 3879 .write_begin = ext4_da_write_begin, 3880 .write_end = ext4_da_write_end, 3881 .bmap = ext4_bmap, 3882 .invalidatepage = ext4_da_invalidatepage, 3883 .releasepage = ext4_releasepage, 3884 .direct_IO = ext4_direct_IO, 3885 .migratepage = buffer_migrate_page, 3886 .is_partially_uptodate = block_is_partially_uptodate, 3887 .error_remove_page = generic_error_remove_page, 3888 }; 3889 3890 void ext4_set_aops(struct inode *inode) 3891 { 3892 if (ext4_should_order_data(inode) && 3893 test_opt(inode->i_sb, DELALLOC)) 3894 inode->i_mapping->a_ops = &ext4_da_aops; 3895 else if (ext4_should_order_data(inode)) 3896 inode->i_mapping->a_ops = &ext4_ordered_aops; 3897 else if (ext4_should_writeback_data(inode) && 3898 test_opt(inode->i_sb, DELALLOC)) 3899 inode->i_mapping->a_ops = &ext4_da_aops; 3900 else if (ext4_should_writeback_data(inode)) 3901 inode->i_mapping->a_ops = &ext4_writeback_aops; 3902 else 3903 inode->i_mapping->a_ops = &ext4_journalled_aops; 3904 } 3905 3906 /* 3907 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3908 * up to the end of the block which corresponds to `from'. 3909 * This required during truncate. We need to physically zero the tail end 3910 * of that block so it doesn't yield old data if the file is later grown. 3911 */ 3912 int ext4_block_truncate_page(handle_t *handle, 3913 struct address_space *mapping, loff_t from) 3914 { 3915 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3916 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3917 unsigned blocksize, length, pos; 3918 ext4_lblk_t iblock; 3919 struct inode *inode = mapping->host; 3920 struct buffer_head *bh; 3921 struct page *page; 3922 int err = 0; 3923 3924 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3925 mapping_gfp_mask(mapping) & ~__GFP_FS); 3926 if (!page) 3927 return -EINVAL; 3928 3929 blocksize = inode->i_sb->s_blocksize; 3930 length = blocksize - (offset & (blocksize - 1)); 3931 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3932 3933 /* 3934 * For "nobh" option, we can only work if we don't need to 3935 * read-in the page - otherwise we create buffers to do the IO. 3936 */ 3937 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 3938 ext4_should_writeback_data(inode) && PageUptodate(page)) { 3939 zero_user(page, offset, length); 3940 set_page_dirty(page); 3941 goto unlock; 3942 } 3943 3944 if (!page_has_buffers(page)) 3945 create_empty_buffers(page, blocksize, 0); 3946 3947 /* Find the buffer that contains "offset" */ 3948 bh = page_buffers(page); 3949 pos = blocksize; 3950 while (offset >= pos) { 3951 bh = bh->b_this_page; 3952 iblock++; 3953 pos += blocksize; 3954 } 3955 3956 err = 0; 3957 if (buffer_freed(bh)) { 3958 BUFFER_TRACE(bh, "freed: skip"); 3959 goto unlock; 3960 } 3961 3962 if (!buffer_mapped(bh)) { 3963 BUFFER_TRACE(bh, "unmapped"); 3964 ext4_get_block(inode, iblock, bh, 0); 3965 /* unmapped? It's a hole - nothing to do */ 3966 if (!buffer_mapped(bh)) { 3967 BUFFER_TRACE(bh, "still unmapped"); 3968 goto unlock; 3969 } 3970 } 3971 3972 /* Ok, it's mapped. Make sure it's up-to-date */ 3973 if (PageUptodate(page)) 3974 set_buffer_uptodate(bh); 3975 3976 if (!buffer_uptodate(bh)) { 3977 err = -EIO; 3978 ll_rw_block(READ, 1, &bh); 3979 wait_on_buffer(bh); 3980 /* Uhhuh. Read error. Complain and punt. */ 3981 if (!buffer_uptodate(bh)) 3982 goto unlock; 3983 } 3984 3985 if (ext4_should_journal_data(inode)) { 3986 BUFFER_TRACE(bh, "get write access"); 3987 err = ext4_journal_get_write_access(handle, bh); 3988 if (err) 3989 goto unlock; 3990 } 3991 3992 zero_user(page, offset, length); 3993 3994 BUFFER_TRACE(bh, "zeroed end of block"); 3995 3996 err = 0; 3997 if (ext4_should_journal_data(inode)) { 3998 err = ext4_handle_dirty_metadata(handle, inode, bh); 3999 } else { 4000 if (ext4_should_order_data(inode)) 4001 err = ext4_jbd2_file_inode(handle, inode); 4002 mark_buffer_dirty(bh); 4003 } 4004 4005 unlock: 4006 unlock_page(page); 4007 page_cache_release(page); 4008 return err; 4009 } 4010 4011 /* 4012 * Probably it should be a library function... search for first non-zero word 4013 * or memcmp with zero_page, whatever is better for particular architecture. 4014 * Linus? 4015 */ 4016 static inline int all_zeroes(__le32 *p, __le32 *q) 4017 { 4018 while (p < q) 4019 if (*p++) 4020 return 0; 4021 return 1; 4022 } 4023 4024 /** 4025 * ext4_find_shared - find the indirect blocks for partial truncation. 4026 * @inode: inode in question 4027 * @depth: depth of the affected branch 4028 * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 4029 * @chain: place to store the pointers to partial indirect blocks 4030 * @top: place to the (detached) top of branch 4031 * 4032 * This is a helper function used by ext4_truncate(). 4033 * 4034 * When we do truncate() we may have to clean the ends of several 4035 * indirect blocks but leave the blocks themselves alive. Block is 4036 * partially truncated if some data below the new i_size is refered 4037 * from it (and it is on the path to the first completely truncated 4038 * data block, indeed). We have to free the top of that path along 4039 * with everything to the right of the path. Since no allocation 4040 * past the truncation point is possible until ext4_truncate() 4041 * finishes, we may safely do the latter, but top of branch may 4042 * require special attention - pageout below the truncation point 4043 * might try to populate it. 4044 * 4045 * We atomically detach the top of branch from the tree, store the 4046 * block number of its root in *@top, pointers to buffer_heads of 4047 * partially truncated blocks - in @chain[].bh and pointers to 4048 * their last elements that should not be removed - in 4049 * @chain[].p. Return value is the pointer to last filled element 4050 * of @chain. 4051 * 4052 * The work left to caller to do the actual freeing of subtrees: 4053 * a) free the subtree starting from *@top 4054 * b) free the subtrees whose roots are stored in 4055 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 4056 * c) free the subtrees growing from the inode past the @chain[0]. 4057 * (no partially truncated stuff there). */ 4058 4059 static Indirect *ext4_find_shared(struct inode *inode, int depth, 4060 ext4_lblk_t offsets[4], Indirect chain[4], 4061 __le32 *top) 4062 { 4063 Indirect *partial, *p; 4064 int k, err; 4065 4066 *top = 0; 4067 /* Make k index the deepest non-null offest + 1 */ 4068 for (k = depth; k > 1 && !offsets[k-1]; k--) 4069 ; 4070 partial = ext4_get_branch(inode, k, offsets, chain, &err); 4071 /* Writer: pointers */ 4072 if (!partial) 4073 partial = chain + k-1; 4074 /* 4075 * If the branch acquired continuation since we've looked at it - 4076 * fine, it should all survive and (new) top doesn't belong to us. 4077 */ 4078 if (!partial->key && *partial->p) 4079 /* Writer: end */ 4080 goto no_top; 4081 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 4082 ; 4083 /* 4084 * OK, we've found the last block that must survive. The rest of our 4085 * branch should be detached before unlocking. However, if that rest 4086 * of branch is all ours and does not grow immediately from the inode 4087 * it's easier to cheat and just decrement partial->p. 4088 */ 4089 if (p == chain + k - 1 && p > chain) { 4090 p->p--; 4091 } else { 4092 *top = *p->p; 4093 /* Nope, don't do this in ext4. Must leave the tree intact */ 4094 #if 0 4095 *p->p = 0; 4096 #endif 4097 } 4098 /* Writer: end */ 4099 4100 while (partial > p) { 4101 brelse(partial->bh); 4102 partial--; 4103 } 4104 no_top: 4105 return partial; 4106 } 4107 4108 /* 4109 * Zero a number of block pointers in either an inode or an indirect block. 4110 * If we restart the transaction we must again get write access to the 4111 * indirect block for further modification. 4112 * 4113 * We release `count' blocks on disk, but (last - first) may be greater 4114 * than `count' because there can be holes in there. 4115 */ 4116 static void ext4_clear_blocks(handle_t *handle, struct inode *inode, 4117 struct buffer_head *bh, 4118 ext4_fsblk_t block_to_free, 4119 unsigned long count, __le32 *first, 4120 __le32 *last) 4121 { 4122 __le32 *p; 4123 if (try_to_extend_transaction(handle, inode)) { 4124 if (bh) { 4125 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4126 ext4_handle_dirty_metadata(handle, inode, bh); 4127 } 4128 ext4_mark_inode_dirty(handle, inode); 4129 ext4_truncate_restart_trans(handle, inode, 4130 blocks_for_truncate(inode)); 4131 if (bh) { 4132 BUFFER_TRACE(bh, "retaking write access"); 4133 ext4_journal_get_write_access(handle, bh); 4134 } 4135 } 4136 4137 /* 4138 * Any buffers which are on the journal will be in memory. We 4139 * find them on the hash table so jbd2_journal_revoke() will 4140 * run jbd2_journal_forget() on them. We've already detached 4141 * each block from the file, so bforget() in 4142 * jbd2_journal_forget() should be safe. 4143 * 4144 * AKPM: turn on bforget in jbd2_journal_forget()!!! 4145 */ 4146 for (p = first; p < last; p++) { 4147 u32 nr = le32_to_cpu(*p); 4148 if (nr) { 4149 struct buffer_head *tbh; 4150 4151 *p = 0; 4152 tbh = sb_find_get_block(inode->i_sb, nr); 4153 ext4_forget(handle, 0, inode, tbh, nr); 4154 } 4155 } 4156 4157 ext4_free_blocks(handle, inode, block_to_free, count, 0); 4158 } 4159 4160 /** 4161 * ext4_free_data - free a list of data blocks 4162 * @handle: handle for this transaction 4163 * @inode: inode we are dealing with 4164 * @this_bh: indirect buffer_head which contains *@first and *@last 4165 * @first: array of block numbers 4166 * @last: points immediately past the end of array 4167 * 4168 * We are freeing all blocks refered from that array (numbers are stored as 4169 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 4170 * 4171 * We accumulate contiguous runs of blocks to free. Conveniently, if these 4172 * blocks are contiguous then releasing them at one time will only affect one 4173 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 4174 * actually use a lot of journal space. 4175 * 4176 * @this_bh will be %NULL if @first and @last point into the inode's direct 4177 * block pointers. 4178 */ 4179 static void ext4_free_data(handle_t *handle, struct inode *inode, 4180 struct buffer_head *this_bh, 4181 __le32 *first, __le32 *last) 4182 { 4183 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 4184 unsigned long count = 0; /* Number of blocks in the run */ 4185 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 4186 corresponding to 4187 block_to_free */ 4188 ext4_fsblk_t nr; /* Current block # */ 4189 __le32 *p; /* Pointer into inode/ind 4190 for current block */ 4191 int err; 4192 4193 if (this_bh) { /* For indirect block */ 4194 BUFFER_TRACE(this_bh, "get_write_access"); 4195 err = ext4_journal_get_write_access(handle, this_bh); 4196 /* Important: if we can't update the indirect pointers 4197 * to the blocks, we can't free them. */ 4198 if (err) 4199 return; 4200 } 4201 4202 for (p = first; p < last; p++) { 4203 nr = le32_to_cpu(*p); 4204 if (nr) { 4205 /* accumulate blocks to free if they're contiguous */ 4206 if (count == 0) { 4207 block_to_free = nr; 4208 block_to_free_p = p; 4209 count = 1; 4210 } else if (nr == block_to_free + count) { 4211 count++; 4212 } else { 4213 ext4_clear_blocks(handle, inode, this_bh, 4214 block_to_free, 4215 count, block_to_free_p, p); 4216 block_to_free = nr; 4217 block_to_free_p = p; 4218 count = 1; 4219 } 4220 } 4221 } 4222 4223 if (count > 0) 4224 ext4_clear_blocks(handle, inode, this_bh, block_to_free, 4225 count, block_to_free_p, p); 4226 4227 if (this_bh) { 4228 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 4229 4230 /* 4231 * The buffer head should have an attached journal head at this 4232 * point. However, if the data is corrupted and an indirect 4233 * block pointed to itself, it would have been detached when 4234 * the block was cleared. Check for this instead of OOPSing. 4235 */ 4236 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 4237 ext4_handle_dirty_metadata(handle, inode, this_bh); 4238 else 4239 ext4_error(inode->i_sb, __func__, 4240 "circular indirect block detected, " 4241 "inode=%lu, block=%llu", 4242 inode->i_ino, 4243 (unsigned long long) this_bh->b_blocknr); 4244 } 4245 } 4246 4247 /** 4248 * ext4_free_branches - free an array of branches 4249 * @handle: JBD handle for this transaction 4250 * @inode: inode we are dealing with 4251 * @parent_bh: the buffer_head which contains *@first and *@last 4252 * @first: array of block numbers 4253 * @last: pointer immediately past the end of array 4254 * @depth: depth of the branches to free 4255 * 4256 * We are freeing all blocks refered from these branches (numbers are 4257 * stored as little-endian 32-bit) and updating @inode->i_blocks 4258 * appropriately. 4259 */ 4260 static void ext4_free_branches(handle_t *handle, struct inode *inode, 4261 struct buffer_head *parent_bh, 4262 __le32 *first, __le32 *last, int depth) 4263 { 4264 ext4_fsblk_t nr; 4265 __le32 *p; 4266 4267 if (ext4_handle_is_aborted(handle)) 4268 return; 4269 4270 if (depth--) { 4271 struct buffer_head *bh; 4272 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 4273 p = last; 4274 while (--p >= first) { 4275 nr = le32_to_cpu(*p); 4276 if (!nr) 4277 continue; /* A hole */ 4278 4279 /* Go read the buffer for the next level down */ 4280 bh = sb_bread(inode->i_sb, nr); 4281 4282 /* 4283 * A read failure? Report error and clear slot 4284 * (should be rare). 4285 */ 4286 if (!bh) { 4287 ext4_error(inode->i_sb, "ext4_free_branches", 4288 "Read failure, inode=%lu, block=%llu", 4289 inode->i_ino, nr); 4290 continue; 4291 } 4292 4293 /* This zaps the entire block. Bottom up. */ 4294 BUFFER_TRACE(bh, "free child branches"); 4295 ext4_free_branches(handle, inode, bh, 4296 (__le32 *) bh->b_data, 4297 (__le32 *) bh->b_data + addr_per_block, 4298 depth); 4299 4300 /* 4301 * We've probably journalled the indirect block several 4302 * times during the truncate. But it's no longer 4303 * needed and we now drop it from the transaction via 4304 * jbd2_journal_revoke(). 4305 * 4306 * That's easy if it's exclusively part of this 4307 * transaction. But if it's part of the committing 4308 * transaction then jbd2_journal_forget() will simply 4309 * brelse() it. That means that if the underlying 4310 * block is reallocated in ext4_get_block(), 4311 * unmap_underlying_metadata() will find this block 4312 * and will try to get rid of it. damn, damn. 4313 * 4314 * If this block has already been committed to the 4315 * journal, a revoke record will be written. And 4316 * revoke records must be emitted *before* clearing 4317 * this block's bit in the bitmaps. 4318 */ 4319 ext4_forget(handle, 1, inode, bh, bh->b_blocknr); 4320 4321 /* 4322 * Everything below this this pointer has been 4323 * released. Now let this top-of-subtree go. 4324 * 4325 * We want the freeing of this indirect block to be 4326 * atomic in the journal with the updating of the 4327 * bitmap block which owns it. So make some room in 4328 * the journal. 4329 * 4330 * We zero the parent pointer *after* freeing its 4331 * pointee in the bitmaps, so if extend_transaction() 4332 * for some reason fails to put the bitmap changes and 4333 * the release into the same transaction, recovery 4334 * will merely complain about releasing a free block, 4335 * rather than leaking blocks. 4336 */ 4337 if (ext4_handle_is_aborted(handle)) 4338 return; 4339 if (try_to_extend_transaction(handle, inode)) { 4340 ext4_mark_inode_dirty(handle, inode); 4341 ext4_truncate_restart_trans(handle, inode, 4342 blocks_for_truncate(inode)); 4343 } 4344 4345 ext4_free_blocks(handle, inode, nr, 1, 1); 4346 4347 if (parent_bh) { 4348 /* 4349 * The block which we have just freed is 4350 * pointed to by an indirect block: journal it 4351 */ 4352 BUFFER_TRACE(parent_bh, "get_write_access"); 4353 if (!ext4_journal_get_write_access(handle, 4354 parent_bh)){ 4355 *p = 0; 4356 BUFFER_TRACE(parent_bh, 4357 "call ext4_handle_dirty_metadata"); 4358 ext4_handle_dirty_metadata(handle, 4359 inode, 4360 parent_bh); 4361 } 4362 } 4363 } 4364 } else { 4365 /* We have reached the bottom of the tree. */ 4366 BUFFER_TRACE(parent_bh, "free data blocks"); 4367 ext4_free_data(handle, inode, parent_bh, first, last); 4368 } 4369 } 4370 4371 int ext4_can_truncate(struct inode *inode) 4372 { 4373 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 4374 return 0; 4375 if (S_ISREG(inode->i_mode)) 4376 return 1; 4377 if (S_ISDIR(inode->i_mode)) 4378 return 1; 4379 if (S_ISLNK(inode->i_mode)) 4380 return !ext4_inode_is_fast_symlink(inode); 4381 return 0; 4382 } 4383 4384 /* 4385 * ext4_truncate() 4386 * 4387 * We block out ext4_get_block() block instantiations across the entire 4388 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4389 * simultaneously on behalf of the same inode. 4390 * 4391 * As we work through the truncate and commmit bits of it to the journal there 4392 * is one core, guiding principle: the file's tree must always be consistent on 4393 * disk. We must be able to restart the truncate after a crash. 4394 * 4395 * The file's tree may be transiently inconsistent in memory (although it 4396 * probably isn't), but whenever we close off and commit a journal transaction, 4397 * the contents of (the filesystem + the journal) must be consistent and 4398 * restartable. It's pretty simple, really: bottom up, right to left (although 4399 * left-to-right works OK too). 4400 * 4401 * Note that at recovery time, journal replay occurs *before* the restart of 4402 * truncate against the orphan inode list. 4403 * 4404 * The committed inode has the new, desired i_size (which is the same as 4405 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4406 * that this inode's truncate did not complete and it will again call 4407 * ext4_truncate() to have another go. So there will be instantiated blocks 4408 * to the right of the truncation point in a crashed ext4 filesystem. But 4409 * that's fine - as long as they are linked from the inode, the post-crash 4410 * ext4_truncate() run will find them and release them. 4411 */ 4412 void ext4_truncate(struct inode *inode) 4413 { 4414 handle_t *handle; 4415 struct ext4_inode_info *ei = EXT4_I(inode); 4416 __le32 *i_data = ei->i_data; 4417 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 4418 struct address_space *mapping = inode->i_mapping; 4419 ext4_lblk_t offsets[4]; 4420 Indirect chain[4]; 4421 Indirect *partial; 4422 __le32 nr = 0; 4423 int n; 4424 ext4_lblk_t last_block; 4425 unsigned blocksize = inode->i_sb->s_blocksize; 4426 4427 if (!ext4_can_truncate(inode)) 4428 return; 4429 4430 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4431 ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE; 4432 4433 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 4434 ext4_ext_truncate(inode); 4435 return; 4436 } 4437 4438 handle = start_transaction(inode); 4439 if (IS_ERR(handle)) 4440 return; /* AKPM: return what? */ 4441 4442 last_block = (inode->i_size + blocksize-1) 4443 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 4444 4445 if (inode->i_size & (blocksize - 1)) 4446 if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 4447 goto out_stop; 4448 4449 n = ext4_block_to_path(inode, last_block, offsets, NULL); 4450 if (n == 0) 4451 goto out_stop; /* error */ 4452 4453 /* 4454 * OK. This truncate is going to happen. We add the inode to the 4455 * orphan list, so that if this truncate spans multiple transactions, 4456 * and we crash, we will resume the truncate when the filesystem 4457 * recovers. It also marks the inode dirty, to catch the new size. 4458 * 4459 * Implication: the file must always be in a sane, consistent 4460 * truncatable state while each transaction commits. 4461 */ 4462 if (ext4_orphan_add(handle, inode)) 4463 goto out_stop; 4464 4465 /* 4466 * From here we block out all ext4_get_block() callers who want to 4467 * modify the block allocation tree. 4468 */ 4469 down_write(&ei->i_data_sem); 4470 4471 ext4_discard_preallocations(inode); 4472 4473 /* 4474 * The orphan list entry will now protect us from any crash which 4475 * occurs before the truncate completes, so it is now safe to propagate 4476 * the new, shorter inode size (held for now in i_size) into the 4477 * on-disk inode. We do this via i_disksize, which is the value which 4478 * ext4 *really* writes onto the disk inode. 4479 */ 4480 ei->i_disksize = inode->i_size; 4481 4482 if (n == 1) { /* direct blocks */ 4483 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 4484 i_data + EXT4_NDIR_BLOCKS); 4485 goto do_indirects; 4486 } 4487 4488 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 4489 /* Kill the top of shared branch (not detached) */ 4490 if (nr) { 4491 if (partial == chain) { 4492 /* Shared branch grows from the inode */ 4493 ext4_free_branches(handle, inode, NULL, 4494 &nr, &nr+1, (chain+n-1) - partial); 4495 *partial->p = 0; 4496 /* 4497 * We mark the inode dirty prior to restart, 4498 * and prior to stop. No need for it here. 4499 */ 4500 } else { 4501 /* Shared branch grows from an indirect block */ 4502 BUFFER_TRACE(partial->bh, "get_write_access"); 4503 ext4_free_branches(handle, inode, partial->bh, 4504 partial->p, 4505 partial->p+1, (chain+n-1) - partial); 4506 } 4507 } 4508 /* Clear the ends of indirect blocks on the shared branch */ 4509 while (partial > chain) { 4510 ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 4511 (__le32*)partial->bh->b_data+addr_per_block, 4512 (chain+n-1) - partial); 4513 BUFFER_TRACE(partial->bh, "call brelse"); 4514 brelse(partial->bh); 4515 partial--; 4516 } 4517 do_indirects: 4518 /* Kill the remaining (whole) subtrees */ 4519 switch (offsets[0]) { 4520 default: 4521 nr = i_data[EXT4_IND_BLOCK]; 4522 if (nr) { 4523 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 4524 i_data[EXT4_IND_BLOCK] = 0; 4525 } 4526 case EXT4_IND_BLOCK: 4527 nr = i_data[EXT4_DIND_BLOCK]; 4528 if (nr) { 4529 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 4530 i_data[EXT4_DIND_BLOCK] = 0; 4531 } 4532 case EXT4_DIND_BLOCK: 4533 nr = i_data[EXT4_TIND_BLOCK]; 4534 if (nr) { 4535 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 4536 i_data[EXT4_TIND_BLOCK] = 0; 4537 } 4538 case EXT4_TIND_BLOCK: 4539 ; 4540 } 4541 4542 up_write(&ei->i_data_sem); 4543 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4544 ext4_mark_inode_dirty(handle, inode); 4545 4546 /* 4547 * In a multi-transaction truncate, we only make the final transaction 4548 * synchronous 4549 */ 4550 if (IS_SYNC(inode)) 4551 ext4_handle_sync(handle); 4552 out_stop: 4553 /* 4554 * If this was a simple ftruncate(), and the file will remain alive 4555 * then we need to clear up the orphan record which we created above. 4556 * However, if this was a real unlink then we were called by 4557 * ext4_delete_inode(), and we allow that function to clean up the 4558 * orphan info for us. 4559 */ 4560 if (inode->i_nlink) 4561 ext4_orphan_del(handle, inode); 4562 4563 ext4_journal_stop(handle); 4564 } 4565 4566 /* 4567 * ext4_get_inode_loc returns with an extra refcount against the inode's 4568 * underlying buffer_head on success. If 'in_mem' is true, we have all 4569 * data in memory that is needed to recreate the on-disk version of this 4570 * inode. 4571 */ 4572 static int __ext4_get_inode_loc(struct inode *inode, 4573 struct ext4_iloc *iloc, int in_mem) 4574 { 4575 struct ext4_group_desc *gdp; 4576 struct buffer_head *bh; 4577 struct super_block *sb = inode->i_sb; 4578 ext4_fsblk_t block; 4579 int inodes_per_block, inode_offset; 4580 4581 iloc->bh = NULL; 4582 if (!ext4_valid_inum(sb, inode->i_ino)) 4583 return -EIO; 4584 4585 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4586 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4587 if (!gdp) 4588 return -EIO; 4589 4590 /* 4591 * Figure out the offset within the block group inode table 4592 */ 4593 inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); 4594 inode_offset = ((inode->i_ino - 1) % 4595 EXT4_INODES_PER_GROUP(sb)); 4596 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4597 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4598 4599 bh = sb_getblk(sb, block); 4600 if (!bh) { 4601 ext4_error(sb, "ext4_get_inode_loc", "unable to read " 4602 "inode block - inode=%lu, block=%llu", 4603 inode->i_ino, block); 4604 return -EIO; 4605 } 4606 if (!buffer_uptodate(bh)) { 4607 lock_buffer(bh); 4608 4609 /* 4610 * If the buffer has the write error flag, we have failed 4611 * to write out another inode in the same block. In this 4612 * case, we don't have to read the block because we may 4613 * read the old inode data successfully. 4614 */ 4615 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4616 set_buffer_uptodate(bh); 4617 4618 if (buffer_uptodate(bh)) { 4619 /* someone brought it uptodate while we waited */ 4620 unlock_buffer(bh); 4621 goto has_buffer; 4622 } 4623 4624 /* 4625 * If we have all information of the inode in memory and this 4626 * is the only valid inode in the block, we need not read the 4627 * block. 4628 */ 4629 if (in_mem) { 4630 struct buffer_head *bitmap_bh; 4631 int i, start; 4632 4633 start = inode_offset & ~(inodes_per_block - 1); 4634 4635 /* Is the inode bitmap in cache? */ 4636 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4637 if (!bitmap_bh) 4638 goto make_io; 4639 4640 /* 4641 * If the inode bitmap isn't in cache then the 4642 * optimisation may end up performing two reads instead 4643 * of one, so skip it. 4644 */ 4645 if (!buffer_uptodate(bitmap_bh)) { 4646 brelse(bitmap_bh); 4647 goto make_io; 4648 } 4649 for (i = start; i < start + inodes_per_block; i++) { 4650 if (i == inode_offset) 4651 continue; 4652 if (ext4_test_bit(i, bitmap_bh->b_data)) 4653 break; 4654 } 4655 brelse(bitmap_bh); 4656 if (i == start + inodes_per_block) { 4657 /* all other inodes are free, so skip I/O */ 4658 memset(bh->b_data, 0, bh->b_size); 4659 set_buffer_uptodate(bh); 4660 unlock_buffer(bh); 4661 goto has_buffer; 4662 } 4663 } 4664 4665 make_io: 4666 /* 4667 * If we need to do any I/O, try to pre-readahead extra 4668 * blocks from the inode table. 4669 */ 4670 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4671 ext4_fsblk_t b, end, table; 4672 unsigned num; 4673 4674 table = ext4_inode_table(sb, gdp); 4675 /* s_inode_readahead_blks is always a power of 2 */ 4676 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 4677 if (table > b) 4678 b = table; 4679 end = b + EXT4_SB(sb)->s_inode_readahead_blks; 4680 num = EXT4_INODES_PER_GROUP(sb); 4681 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4682 EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 4683 num -= ext4_itable_unused_count(sb, gdp); 4684 table += num / inodes_per_block; 4685 if (end > table) 4686 end = table; 4687 while (b <= end) 4688 sb_breadahead(sb, b++); 4689 } 4690 4691 /* 4692 * There are other valid inodes in the buffer, this inode 4693 * has in-inode xattrs, or we don't have this inode in memory. 4694 * Read the block from disk. 4695 */ 4696 get_bh(bh); 4697 bh->b_end_io = end_buffer_read_sync; 4698 submit_bh(READ_META, bh); 4699 wait_on_buffer(bh); 4700 if (!buffer_uptodate(bh)) { 4701 ext4_error(sb, __func__, 4702 "unable to read inode block - inode=%lu, " 4703 "block=%llu", inode->i_ino, block); 4704 brelse(bh); 4705 return -EIO; 4706 } 4707 } 4708 has_buffer: 4709 iloc->bh = bh; 4710 return 0; 4711 } 4712 4713 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4714 { 4715 /* We have all inode data except xattrs in memory here. */ 4716 return __ext4_get_inode_loc(inode, iloc, 4717 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); 4718 } 4719 4720 void ext4_set_inode_flags(struct inode *inode) 4721 { 4722 unsigned int flags = EXT4_I(inode)->i_flags; 4723 4724 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 4725 if (flags & EXT4_SYNC_FL) 4726 inode->i_flags |= S_SYNC; 4727 if (flags & EXT4_APPEND_FL) 4728 inode->i_flags |= S_APPEND; 4729 if (flags & EXT4_IMMUTABLE_FL) 4730 inode->i_flags |= S_IMMUTABLE; 4731 if (flags & EXT4_NOATIME_FL) 4732 inode->i_flags |= S_NOATIME; 4733 if (flags & EXT4_DIRSYNC_FL) 4734 inode->i_flags |= S_DIRSYNC; 4735 } 4736 4737 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4738 void ext4_get_inode_flags(struct ext4_inode_info *ei) 4739 { 4740 unsigned int flags = ei->vfs_inode.i_flags; 4741 4742 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 4743 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); 4744 if (flags & S_SYNC) 4745 ei->i_flags |= EXT4_SYNC_FL; 4746 if (flags & S_APPEND) 4747 ei->i_flags |= EXT4_APPEND_FL; 4748 if (flags & S_IMMUTABLE) 4749 ei->i_flags |= EXT4_IMMUTABLE_FL; 4750 if (flags & S_NOATIME) 4751 ei->i_flags |= EXT4_NOATIME_FL; 4752 if (flags & S_DIRSYNC) 4753 ei->i_flags |= EXT4_DIRSYNC_FL; 4754 } 4755 4756 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4757 struct ext4_inode_info *ei) 4758 { 4759 blkcnt_t i_blocks ; 4760 struct inode *inode = &(ei->vfs_inode); 4761 struct super_block *sb = inode->i_sb; 4762 4763 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4764 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 4765 /* we are using combined 48 bit field */ 4766 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4767 le32_to_cpu(raw_inode->i_blocks_lo); 4768 if (ei->i_flags & EXT4_HUGE_FILE_FL) { 4769 /* i_blocks represent file system block size */ 4770 return i_blocks << (inode->i_blkbits - 9); 4771 } else { 4772 return i_blocks; 4773 } 4774 } else { 4775 return le32_to_cpu(raw_inode->i_blocks_lo); 4776 } 4777 } 4778 4779 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4780 { 4781 struct ext4_iloc iloc; 4782 struct ext4_inode *raw_inode; 4783 struct ext4_inode_info *ei; 4784 struct buffer_head *bh; 4785 struct inode *inode; 4786 long ret; 4787 int block; 4788 4789 inode = iget_locked(sb, ino); 4790 if (!inode) 4791 return ERR_PTR(-ENOMEM); 4792 if (!(inode->i_state & I_NEW)) 4793 return inode; 4794 4795 ei = EXT4_I(inode); 4796 4797 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4798 if (ret < 0) 4799 goto bad_inode; 4800 bh = iloc.bh; 4801 raw_inode = ext4_raw_inode(&iloc); 4802 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4803 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4804 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4805 if (!(test_opt(inode->i_sb, NO_UID32))) { 4806 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4807 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4808 } 4809 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4810 4811 ei->i_state = 0; 4812 ei->i_dir_start_lookup = 0; 4813 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4814 /* We now have enough fields to check if the inode was active or not. 4815 * This is needed because nfsd might try to access dead inodes 4816 * the test is that same one that e2fsck uses 4817 * NeilBrown 1999oct15 4818 */ 4819 if (inode->i_nlink == 0) { 4820 if (inode->i_mode == 0 || 4821 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 4822 /* this inode is deleted */ 4823 brelse(bh); 4824 ret = -ESTALE; 4825 goto bad_inode; 4826 } 4827 /* The only unlinked inodes we let through here have 4828 * valid i_mode and are being read by the orphan 4829 * recovery code: that's fine, we're about to complete 4830 * the process of deleting those. */ 4831 } 4832 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4833 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4834 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4835 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 4836 ei->i_file_acl |= 4837 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4838 inode->i_size = ext4_isize(raw_inode); 4839 ei->i_disksize = inode->i_size; 4840 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4841 ei->i_block_group = iloc.block_group; 4842 ei->i_last_alloc_group = ~0; 4843 /* 4844 * NOTE! The in-memory inode i_data array is in little-endian order 4845 * even on big-endian machines: we do NOT byteswap the block numbers! 4846 */ 4847 for (block = 0; block < EXT4_N_BLOCKS; block++) 4848 ei->i_data[block] = raw_inode->i_block[block]; 4849 INIT_LIST_HEAD(&ei->i_orphan); 4850 4851 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4852 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4853 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4854 EXT4_INODE_SIZE(inode->i_sb)) { 4855 brelse(bh); 4856 ret = -EIO; 4857 goto bad_inode; 4858 } 4859 if (ei->i_extra_isize == 0) { 4860 /* The extra space is currently unused. Use it. */ 4861 ei->i_extra_isize = sizeof(struct ext4_inode) - 4862 EXT4_GOOD_OLD_INODE_SIZE; 4863 } else { 4864 __le32 *magic = (void *)raw_inode + 4865 EXT4_GOOD_OLD_INODE_SIZE + 4866 ei->i_extra_isize; 4867 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 4868 ei->i_state |= EXT4_STATE_XATTR; 4869 } 4870 } else 4871 ei->i_extra_isize = 0; 4872 4873 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4874 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4875 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4876 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4877 4878 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 4879 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4880 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4881 inode->i_version |= 4882 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4883 } 4884 4885 ret = 0; 4886 if (ei->i_file_acl && 4887 ((ei->i_file_acl < 4888 (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) + 4889 EXT4_SB(sb)->s_gdb_count)) || 4890 (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) { 4891 ext4_error(sb, __func__, 4892 "bad extended attribute block %llu in inode #%lu", 4893 ei->i_file_acl, inode->i_ino); 4894 ret = -EIO; 4895 goto bad_inode; 4896 } else if (ei->i_flags & EXT4_EXTENTS_FL) { 4897 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4898 (S_ISLNK(inode->i_mode) && 4899 !ext4_inode_is_fast_symlink(inode))) 4900 /* Validate extent which is part of inode */ 4901 ret = ext4_ext_check_inode(inode); 4902 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4903 (S_ISLNK(inode->i_mode) && 4904 !ext4_inode_is_fast_symlink(inode))) { 4905 /* Validate block references which are part of inode */ 4906 ret = ext4_check_inode_blockref(inode); 4907 } 4908 if (ret) { 4909 brelse(bh); 4910 goto bad_inode; 4911 } 4912 4913 if (S_ISREG(inode->i_mode)) { 4914 inode->i_op = &ext4_file_inode_operations; 4915 inode->i_fop = &ext4_file_operations; 4916 ext4_set_aops(inode); 4917 } else if (S_ISDIR(inode->i_mode)) { 4918 inode->i_op = &ext4_dir_inode_operations; 4919 inode->i_fop = &ext4_dir_operations; 4920 } else if (S_ISLNK(inode->i_mode)) { 4921 if (ext4_inode_is_fast_symlink(inode)) { 4922 inode->i_op = &ext4_fast_symlink_inode_operations; 4923 nd_terminate_link(ei->i_data, inode->i_size, 4924 sizeof(ei->i_data) - 1); 4925 } else { 4926 inode->i_op = &ext4_symlink_inode_operations; 4927 ext4_set_aops(inode); 4928 } 4929 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4930 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4931 inode->i_op = &ext4_special_inode_operations; 4932 if (raw_inode->i_block[0]) 4933 init_special_inode(inode, inode->i_mode, 4934 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4935 else 4936 init_special_inode(inode, inode->i_mode, 4937 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4938 } else { 4939 brelse(bh); 4940 ret = -EIO; 4941 ext4_error(inode->i_sb, __func__, 4942 "bogus i_mode (%o) for inode=%lu", 4943 inode->i_mode, inode->i_ino); 4944 goto bad_inode; 4945 } 4946 brelse(iloc.bh); 4947 ext4_set_inode_flags(inode); 4948 unlock_new_inode(inode); 4949 return inode; 4950 4951 bad_inode: 4952 iget_failed(inode); 4953 return ERR_PTR(ret); 4954 } 4955 4956 static int ext4_inode_blocks_set(handle_t *handle, 4957 struct ext4_inode *raw_inode, 4958 struct ext4_inode_info *ei) 4959 { 4960 struct inode *inode = &(ei->vfs_inode); 4961 u64 i_blocks = inode->i_blocks; 4962 struct super_block *sb = inode->i_sb; 4963 4964 if (i_blocks <= ~0U) { 4965 /* 4966 * i_blocks can be represnted in a 32 bit variable 4967 * as multiple of 512 bytes 4968 */ 4969 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4970 raw_inode->i_blocks_high = 0; 4971 ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4972 return 0; 4973 } 4974 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 4975 return -EFBIG; 4976 4977 if (i_blocks <= 0xffffffffffffULL) { 4978 /* 4979 * i_blocks can be represented in a 48 bit variable 4980 * as multiple of 512 bytes 4981 */ 4982 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4983 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4984 ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4985 } else { 4986 ei->i_flags |= EXT4_HUGE_FILE_FL; 4987 /* i_block is stored in file system block size */ 4988 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4989 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4990 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4991 } 4992 return 0; 4993 } 4994 4995 /* 4996 * Post the struct inode info into an on-disk inode location in the 4997 * buffer-cache. This gobbles the caller's reference to the 4998 * buffer_head in the inode location struct. 4999 * 5000 * The caller must have write access to iloc->bh. 5001 */ 5002 static int ext4_do_update_inode(handle_t *handle, 5003 struct inode *inode, 5004 struct ext4_iloc *iloc) 5005 { 5006 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 5007 struct ext4_inode_info *ei = EXT4_I(inode); 5008 struct buffer_head *bh = iloc->bh; 5009 int err = 0, rc, block; 5010 5011 /* For fields not not tracking in the in-memory inode, 5012 * initialise them to zero for new inodes. */ 5013 if (ei->i_state & EXT4_STATE_NEW) 5014 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 5015 5016 ext4_get_inode_flags(ei); 5017 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 5018 if (!(test_opt(inode->i_sb, NO_UID32))) { 5019 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 5020 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 5021 /* 5022 * Fix up interoperability with old kernels. Otherwise, old inodes get 5023 * re-used with the upper 16 bits of the uid/gid intact 5024 */ 5025 if (!ei->i_dtime) { 5026 raw_inode->i_uid_high = 5027 cpu_to_le16(high_16_bits(inode->i_uid)); 5028 raw_inode->i_gid_high = 5029 cpu_to_le16(high_16_bits(inode->i_gid)); 5030 } else { 5031 raw_inode->i_uid_high = 0; 5032 raw_inode->i_gid_high = 0; 5033 } 5034 } else { 5035 raw_inode->i_uid_low = 5036 cpu_to_le16(fs_high2lowuid(inode->i_uid)); 5037 raw_inode->i_gid_low = 5038 cpu_to_le16(fs_high2lowgid(inode->i_gid)); 5039 raw_inode->i_uid_high = 0; 5040 raw_inode->i_gid_high = 0; 5041 } 5042 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 5043 5044 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 5045 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 5046 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 5047 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 5048 5049 if (ext4_inode_blocks_set(handle, raw_inode, ei)) 5050 goto out_brelse; 5051 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 5052 raw_inode->i_flags = cpu_to_le32(ei->i_flags); 5053 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 5054 cpu_to_le32(EXT4_OS_HURD)) 5055 raw_inode->i_file_acl_high = 5056 cpu_to_le16(ei->i_file_acl >> 32); 5057 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 5058 ext4_isize_set(raw_inode, ei->i_disksize); 5059 if (ei->i_disksize > 0x7fffffffULL) { 5060 struct super_block *sb = inode->i_sb; 5061 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 5062 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 5063 EXT4_SB(sb)->s_es->s_rev_level == 5064 cpu_to_le32(EXT4_GOOD_OLD_REV)) { 5065 /* If this is the first large file 5066 * created, add a flag to the superblock. 5067 */ 5068 err = ext4_journal_get_write_access(handle, 5069 EXT4_SB(sb)->s_sbh); 5070 if (err) 5071 goto out_brelse; 5072 ext4_update_dynamic_rev(sb); 5073 EXT4_SET_RO_COMPAT_FEATURE(sb, 5074 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 5075 sb->s_dirt = 1; 5076 ext4_handle_sync(handle); 5077 err = ext4_handle_dirty_metadata(handle, inode, 5078 EXT4_SB(sb)->s_sbh); 5079 } 5080 } 5081 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 5082 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 5083 if (old_valid_dev(inode->i_rdev)) { 5084 raw_inode->i_block[0] = 5085 cpu_to_le32(old_encode_dev(inode->i_rdev)); 5086 raw_inode->i_block[1] = 0; 5087 } else { 5088 raw_inode->i_block[0] = 0; 5089 raw_inode->i_block[1] = 5090 cpu_to_le32(new_encode_dev(inode->i_rdev)); 5091 raw_inode->i_block[2] = 0; 5092 } 5093 } else 5094 for (block = 0; block < EXT4_N_BLOCKS; block++) 5095 raw_inode->i_block[block] = ei->i_data[block]; 5096 5097 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 5098 if (ei->i_extra_isize) { 5099 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 5100 raw_inode->i_version_hi = 5101 cpu_to_le32(inode->i_version >> 32); 5102 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 5103 } 5104 5105 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5106 rc = ext4_handle_dirty_metadata(handle, inode, bh); 5107 if (!err) 5108 err = rc; 5109 ei->i_state &= ~EXT4_STATE_NEW; 5110 5111 out_brelse: 5112 brelse(bh); 5113 ext4_std_error(inode->i_sb, err); 5114 return err; 5115 } 5116 5117 /* 5118 * ext4_write_inode() 5119 * 5120 * We are called from a few places: 5121 * 5122 * - Within generic_file_write() for O_SYNC files. 5123 * Here, there will be no transaction running. We wait for any running 5124 * trasnaction to commit. 5125 * 5126 * - Within sys_sync(), kupdate and such. 5127 * We wait on commit, if tol to. 5128 * 5129 * - Within prune_icache() (PF_MEMALLOC == true) 5130 * Here we simply return. We can't afford to block kswapd on the 5131 * journal commit. 5132 * 5133 * In all cases it is actually safe for us to return without doing anything, 5134 * because the inode has been copied into a raw inode buffer in 5135 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 5136 * knfsd. 5137 * 5138 * Note that we are absolutely dependent upon all inode dirtiers doing the 5139 * right thing: they *must* call mark_inode_dirty() after dirtying info in 5140 * which we are interested. 5141 * 5142 * It would be a bug for them to not do this. The code: 5143 * 5144 * mark_inode_dirty(inode) 5145 * stuff(); 5146 * inode->i_size = expr; 5147 * 5148 * is in error because a kswapd-driven write_inode() could occur while 5149 * `stuff()' is running, and the new i_size will be lost. Plus the inode 5150 * will no longer be on the superblock's dirty inode list. 5151 */ 5152 int ext4_write_inode(struct inode *inode, int wait) 5153 { 5154 int err; 5155 5156 if (current->flags & PF_MEMALLOC) 5157 return 0; 5158 5159 if (EXT4_SB(inode->i_sb)->s_journal) { 5160 if (ext4_journal_current_handle()) { 5161 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 5162 dump_stack(); 5163 return -EIO; 5164 } 5165 5166 if (!wait) 5167 return 0; 5168 5169 err = ext4_force_commit(inode->i_sb); 5170 } else { 5171 struct ext4_iloc iloc; 5172 5173 err = ext4_get_inode_loc(inode, &iloc); 5174 if (err) 5175 return err; 5176 if (wait) 5177 sync_dirty_buffer(iloc.bh); 5178 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5179 ext4_error(inode->i_sb, __func__, 5180 "IO error syncing inode, " 5181 "inode=%lu, block=%llu", 5182 inode->i_ino, 5183 (unsigned long long)iloc.bh->b_blocknr); 5184 err = -EIO; 5185 } 5186 } 5187 return err; 5188 } 5189 5190 /* 5191 * ext4_setattr() 5192 * 5193 * Called from notify_change. 5194 * 5195 * We want to trap VFS attempts to truncate the file as soon as 5196 * possible. In particular, we want to make sure that when the VFS 5197 * shrinks i_size, we put the inode on the orphan list and modify 5198 * i_disksize immediately, so that during the subsequent flushing of 5199 * dirty pages and freeing of disk blocks, we can guarantee that any 5200 * commit will leave the blocks being flushed in an unused state on 5201 * disk. (On recovery, the inode will get truncated and the blocks will 5202 * be freed, so we have a strong guarantee that no future commit will 5203 * leave these blocks visible to the user.) 5204 * 5205 * Another thing we have to assure is that if we are in ordered mode 5206 * and inode is still attached to the committing transaction, we must 5207 * we start writeout of all the dirty pages which are being truncated. 5208 * This way we are sure that all the data written in the previous 5209 * transaction are already on disk (truncate waits for pages under 5210 * writeback). 5211 * 5212 * Called with inode->i_mutex down. 5213 */ 5214 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 5215 { 5216 struct inode *inode = dentry->d_inode; 5217 int error, rc = 0; 5218 const unsigned int ia_valid = attr->ia_valid; 5219 5220 error = inode_change_ok(inode, attr); 5221 if (error) 5222 return error; 5223 5224 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 5225 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 5226 handle_t *handle; 5227 5228 /* (user+group)*(old+new) structure, inode write (sb, 5229 * inode block, ? - but truncate inode update has it) */ 5230 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+ 5231 EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3); 5232 if (IS_ERR(handle)) { 5233 error = PTR_ERR(handle); 5234 goto err_out; 5235 } 5236 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; 5237 if (error) { 5238 ext4_journal_stop(handle); 5239 return error; 5240 } 5241 /* Update corresponding info in inode so that everything is in 5242 * one transaction */ 5243 if (attr->ia_valid & ATTR_UID) 5244 inode->i_uid = attr->ia_uid; 5245 if (attr->ia_valid & ATTR_GID) 5246 inode->i_gid = attr->ia_gid; 5247 error = ext4_mark_inode_dirty(handle, inode); 5248 ext4_journal_stop(handle); 5249 } 5250 5251 if (attr->ia_valid & ATTR_SIZE) { 5252 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { 5253 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5254 5255 if (attr->ia_size > sbi->s_bitmap_maxbytes) { 5256 error = -EFBIG; 5257 goto err_out; 5258 } 5259 } 5260 } 5261 5262 if (S_ISREG(inode->i_mode) && 5263 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 5264 handle_t *handle; 5265 5266 handle = ext4_journal_start(inode, 3); 5267 if (IS_ERR(handle)) { 5268 error = PTR_ERR(handle); 5269 goto err_out; 5270 } 5271 5272 error = ext4_orphan_add(handle, inode); 5273 EXT4_I(inode)->i_disksize = attr->ia_size; 5274 rc = ext4_mark_inode_dirty(handle, inode); 5275 if (!error) 5276 error = rc; 5277 ext4_journal_stop(handle); 5278 5279 if (ext4_should_order_data(inode)) { 5280 error = ext4_begin_ordered_truncate(inode, 5281 attr->ia_size); 5282 if (error) { 5283 /* Do as much error cleanup as possible */ 5284 handle = ext4_journal_start(inode, 3); 5285 if (IS_ERR(handle)) { 5286 ext4_orphan_del(NULL, inode); 5287 goto err_out; 5288 } 5289 ext4_orphan_del(handle, inode); 5290 ext4_journal_stop(handle); 5291 goto err_out; 5292 } 5293 } 5294 } 5295 5296 rc = inode_setattr(inode, attr); 5297 5298 /* If inode_setattr's call to ext4_truncate failed to get a 5299 * transaction handle at all, we need to clean up the in-core 5300 * orphan list manually. */ 5301 if (inode->i_nlink) 5302 ext4_orphan_del(NULL, inode); 5303 5304 if (!rc && (ia_valid & ATTR_MODE)) 5305 rc = ext4_acl_chmod(inode); 5306 5307 err_out: 5308 ext4_std_error(inode->i_sb, error); 5309 if (!error) 5310 error = rc; 5311 return error; 5312 } 5313 5314 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 5315 struct kstat *stat) 5316 { 5317 struct inode *inode; 5318 unsigned long delalloc_blocks; 5319 5320 inode = dentry->d_inode; 5321 generic_fillattr(inode, stat); 5322 5323 /* 5324 * We can't update i_blocks if the block allocation is delayed 5325 * otherwise in the case of system crash before the real block 5326 * allocation is done, we will have i_blocks inconsistent with 5327 * on-disk file blocks. 5328 * We always keep i_blocks updated together with real 5329 * allocation. But to not confuse with user, stat 5330 * will return the blocks that include the delayed allocation 5331 * blocks for this file. 5332 */ 5333 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 5334 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 5335 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 5336 5337 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 5338 return 0; 5339 } 5340 5341 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, 5342 int chunk) 5343 { 5344 int indirects; 5345 5346 /* if nrblocks are contiguous */ 5347 if (chunk) { 5348 /* 5349 * With N contiguous data blocks, it need at most 5350 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks 5351 * 2 dindirect blocks 5352 * 1 tindirect block 5353 */ 5354 indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); 5355 return indirects + 3; 5356 } 5357 /* 5358 * if nrblocks are not contiguous, worse case, each block touch 5359 * a indirect block, and each indirect block touch a double indirect 5360 * block, plus a triple indirect block 5361 */ 5362 indirects = nrblocks * 2 + 1; 5363 return indirects; 5364 } 5365 5366 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5367 { 5368 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 5369 return ext4_indirect_trans_blocks(inode, nrblocks, chunk); 5370 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 5371 } 5372 5373 /* 5374 * Account for index blocks, block groups bitmaps and block group 5375 * descriptor blocks if modify datablocks and index blocks 5376 * worse case, the indexs blocks spread over different block groups 5377 * 5378 * If datablocks are discontiguous, they are possible to spread over 5379 * different block groups too. If they are contiugous, with flexbg, 5380 * they could still across block group boundary. 5381 * 5382 * Also account for superblock, inode, quota and xattr blocks 5383 */ 5384 int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5385 { 5386 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5387 int gdpblocks; 5388 int idxblocks; 5389 int ret = 0; 5390 5391 /* 5392 * How many index blocks need to touch to modify nrblocks? 5393 * The "Chunk" flag indicating whether the nrblocks is 5394 * physically contiguous on disk 5395 * 5396 * For Direct IO and fallocate, they calls get_block to allocate 5397 * one single extent at a time, so they could set the "Chunk" flag 5398 */ 5399 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 5400 5401 ret = idxblocks; 5402 5403 /* 5404 * Now let's see how many group bitmaps and group descriptors need 5405 * to account 5406 */ 5407 groups = idxblocks; 5408 if (chunk) 5409 groups += 1; 5410 else 5411 groups += nrblocks; 5412 5413 gdpblocks = groups; 5414 if (groups > ngroups) 5415 groups = ngroups; 5416 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5417 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5418 5419 /* bitmaps and block group descriptor blocks */ 5420 ret += groups + gdpblocks; 5421 5422 /* Blocks for super block, inode, quota and xattr blocks */ 5423 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5424 5425 return ret; 5426 } 5427 5428 /* 5429 * Calulate the total number of credits to reserve to fit 5430 * the modification of a single pages into a single transaction, 5431 * which may include multiple chunks of block allocations. 5432 * 5433 * This could be called via ext4_write_begin() 5434 * 5435 * We need to consider the worse case, when 5436 * one new block per extent. 5437 */ 5438 int ext4_writepage_trans_blocks(struct inode *inode) 5439 { 5440 int bpp = ext4_journal_blocks_per_page(inode); 5441 int ret; 5442 5443 ret = ext4_meta_trans_blocks(inode, bpp, 0); 5444 5445 /* Account for data blocks for journalled mode */ 5446 if (ext4_should_journal_data(inode)) 5447 ret += bpp; 5448 return ret; 5449 } 5450 5451 /* 5452 * Calculate the journal credits for a chunk of data modification. 5453 * 5454 * This is called from DIO, fallocate or whoever calling 5455 * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks. 5456 * 5457 * journal buffers for data blocks are not included here, as DIO 5458 * and fallocate do no need to journal data buffers. 5459 */ 5460 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5461 { 5462 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5463 } 5464 5465 /* 5466 * The caller must have previously called ext4_reserve_inode_write(). 5467 * Give this, we know that the caller already has write access to iloc->bh. 5468 */ 5469 int ext4_mark_iloc_dirty(handle_t *handle, 5470 struct inode *inode, struct ext4_iloc *iloc) 5471 { 5472 int err = 0; 5473 5474 if (test_opt(inode->i_sb, I_VERSION)) 5475 inode_inc_iversion(inode); 5476 5477 /* the do_update_inode consumes one bh->b_count */ 5478 get_bh(iloc->bh); 5479 5480 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5481 err = ext4_do_update_inode(handle, inode, iloc); 5482 put_bh(iloc->bh); 5483 return err; 5484 } 5485 5486 /* 5487 * On success, We end up with an outstanding reference count against 5488 * iloc->bh. This _must_ be cleaned up later. 5489 */ 5490 5491 int 5492 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5493 struct ext4_iloc *iloc) 5494 { 5495 int err; 5496 5497 err = ext4_get_inode_loc(inode, iloc); 5498 if (!err) { 5499 BUFFER_TRACE(iloc->bh, "get_write_access"); 5500 err = ext4_journal_get_write_access(handle, iloc->bh); 5501 if (err) { 5502 brelse(iloc->bh); 5503 iloc->bh = NULL; 5504 } 5505 } 5506 ext4_std_error(inode->i_sb, err); 5507 return err; 5508 } 5509 5510 /* 5511 * Expand an inode by new_extra_isize bytes. 5512 * Returns 0 on success or negative error number on failure. 5513 */ 5514 static int ext4_expand_extra_isize(struct inode *inode, 5515 unsigned int new_extra_isize, 5516 struct ext4_iloc iloc, 5517 handle_t *handle) 5518 { 5519 struct ext4_inode *raw_inode; 5520 struct ext4_xattr_ibody_header *header; 5521 struct ext4_xattr_entry *entry; 5522 5523 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 5524 return 0; 5525 5526 raw_inode = ext4_raw_inode(&iloc); 5527 5528 header = IHDR(inode, raw_inode); 5529 entry = IFIRST(header); 5530 5531 /* No extended attributes present */ 5532 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || 5533 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5534 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5535 new_extra_isize); 5536 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5537 return 0; 5538 } 5539 5540 /* try to expand with EAs present */ 5541 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 5542 raw_inode, handle); 5543 } 5544 5545 /* 5546 * What we do here is to mark the in-core inode as clean with respect to inode 5547 * dirtiness (it may still be data-dirty). 5548 * This means that the in-core inode may be reaped by prune_icache 5549 * without having to perform any I/O. This is a very good thing, 5550 * because *any* task may call prune_icache - even ones which 5551 * have a transaction open against a different journal. 5552 * 5553 * Is this cheating? Not really. Sure, we haven't written the 5554 * inode out, but prune_icache isn't a user-visible syncing function. 5555 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5556 * we start and wait on commits. 5557 * 5558 * Is this efficient/effective? Well, we're being nice to the system 5559 * by cleaning up our inodes proactively so they can be reaped 5560 * without I/O. But we are potentially leaving up to five seconds' 5561 * worth of inodes floating about which prune_icache wants us to 5562 * write out. One way to fix that would be to get prune_icache() 5563 * to do a write_super() to free up some memory. It has the desired 5564 * effect. 5565 */ 5566 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5567 { 5568 struct ext4_iloc iloc; 5569 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5570 static unsigned int mnt_count; 5571 int err, ret; 5572 5573 might_sleep(); 5574 err = ext4_reserve_inode_write(handle, inode, &iloc); 5575 if (ext4_handle_valid(handle) && 5576 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5577 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { 5578 /* 5579 * We need extra buffer credits since we may write into EA block 5580 * with this same handle. If journal_extend fails, then it will 5581 * only result in a minor loss of functionality for that inode. 5582 * If this is felt to be critical, then e2fsck should be run to 5583 * force a large enough s_min_extra_isize. 5584 */ 5585 if ((jbd2_journal_extend(handle, 5586 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 5587 ret = ext4_expand_extra_isize(inode, 5588 sbi->s_want_extra_isize, 5589 iloc, handle); 5590 if (ret) { 5591 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; 5592 if (mnt_count != 5593 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5594 ext4_warning(inode->i_sb, __func__, 5595 "Unable to expand inode %lu. Delete" 5596 " some EAs or run e2fsck.", 5597 inode->i_ino); 5598 mnt_count = 5599 le16_to_cpu(sbi->s_es->s_mnt_count); 5600 } 5601 } 5602 } 5603 } 5604 if (!err) 5605 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 5606 return err; 5607 } 5608 5609 /* 5610 * ext4_dirty_inode() is called from __mark_inode_dirty() 5611 * 5612 * We're really interested in the case where a file is being extended. 5613 * i_size has been changed by generic_commit_write() and we thus need 5614 * to include the updated inode in the current transaction. 5615 * 5616 * Also, vfs_dq_alloc_block() will always dirty the inode when blocks 5617 * are allocated to the file. 5618 * 5619 * If the inode is marked synchronous, we don't honour that here - doing 5620 * so would cause a commit on atime updates, which we don't bother doing. 5621 * We handle synchronous inodes at the highest possible level. 5622 */ 5623 void ext4_dirty_inode(struct inode *inode) 5624 { 5625 handle_t *handle; 5626 5627 handle = ext4_journal_start(inode, 2); 5628 if (IS_ERR(handle)) 5629 goto out; 5630 5631 ext4_mark_inode_dirty(handle, inode); 5632 5633 ext4_journal_stop(handle); 5634 out: 5635 return; 5636 } 5637 5638 #if 0 5639 /* 5640 * Bind an inode's backing buffer_head into this transaction, to prevent 5641 * it from being flushed to disk early. Unlike 5642 * ext4_reserve_inode_write, this leaves behind no bh reference and 5643 * returns no iloc structure, so the caller needs to repeat the iloc 5644 * lookup to mark the inode dirty later. 5645 */ 5646 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5647 { 5648 struct ext4_iloc iloc; 5649 5650 int err = 0; 5651 if (handle) { 5652 err = ext4_get_inode_loc(inode, &iloc); 5653 if (!err) { 5654 BUFFER_TRACE(iloc.bh, "get_write_access"); 5655 err = jbd2_journal_get_write_access(handle, iloc.bh); 5656 if (!err) 5657 err = ext4_handle_dirty_metadata(handle, 5658 inode, 5659 iloc.bh); 5660 brelse(iloc.bh); 5661 } 5662 } 5663 ext4_std_error(inode->i_sb, err); 5664 return err; 5665 } 5666 #endif 5667 5668 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5669 { 5670 journal_t *journal; 5671 handle_t *handle; 5672 int err; 5673 5674 /* 5675 * We have to be very careful here: changing a data block's 5676 * journaling status dynamically is dangerous. If we write a 5677 * data block to the journal, change the status and then delete 5678 * that block, we risk forgetting to revoke the old log record 5679 * from the journal and so a subsequent replay can corrupt data. 5680 * So, first we make sure that the journal is empty and that 5681 * nobody is changing anything. 5682 */ 5683 5684 journal = EXT4_JOURNAL(inode); 5685 if (!journal) 5686 return 0; 5687 if (is_journal_aborted(journal)) 5688 return -EROFS; 5689 5690 jbd2_journal_lock_updates(journal); 5691 jbd2_journal_flush(journal); 5692 5693 /* 5694 * OK, there are no updates running now, and all cached data is 5695 * synced to disk. We are now in a completely consistent state 5696 * which doesn't have anything in the journal, and we know that 5697 * no filesystem updates are running, so it is safe to modify 5698 * the inode's in-core data-journaling state flag now. 5699 */ 5700 5701 if (val) 5702 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; 5703 else 5704 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; 5705 ext4_set_aops(inode); 5706 5707 jbd2_journal_unlock_updates(journal); 5708 5709 /* Finally we can mark the inode as dirty. */ 5710 5711 handle = ext4_journal_start(inode, 1); 5712 if (IS_ERR(handle)) 5713 return PTR_ERR(handle); 5714 5715 err = ext4_mark_inode_dirty(handle, inode); 5716 ext4_handle_sync(handle); 5717 ext4_journal_stop(handle); 5718 ext4_std_error(inode->i_sb, err); 5719 5720 return err; 5721 } 5722 5723 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5724 { 5725 return !buffer_mapped(bh); 5726 } 5727 5728 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 5729 { 5730 struct page *page = vmf->page; 5731 loff_t size; 5732 unsigned long len; 5733 int ret = -EINVAL; 5734 void *fsdata; 5735 struct file *file = vma->vm_file; 5736 struct inode *inode = file->f_path.dentry->d_inode; 5737 struct address_space *mapping = inode->i_mapping; 5738 5739 /* 5740 * Get i_alloc_sem to stop truncates messing with the inode. We cannot 5741 * get i_mutex because we are already holding mmap_sem. 5742 */ 5743 down_read(&inode->i_alloc_sem); 5744 size = i_size_read(inode); 5745 if (page->mapping != mapping || size <= page_offset(page) 5746 || !PageUptodate(page)) { 5747 /* page got truncated from under us? */ 5748 goto out_unlock; 5749 } 5750 ret = 0; 5751 if (PageMappedToDisk(page)) 5752 goto out_unlock; 5753 5754 if (page->index == size >> PAGE_CACHE_SHIFT) 5755 len = size & ~PAGE_CACHE_MASK; 5756 else 5757 len = PAGE_CACHE_SIZE; 5758 5759 lock_page(page); 5760 /* 5761 * return if we have all the buffers mapped. This avoid 5762 * the need to call write_begin/write_end which does a 5763 * journal_start/journal_stop which can block and take 5764 * long time 5765 */ 5766 if (page_has_buffers(page)) { 5767 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 5768 ext4_bh_unmapped)) { 5769 unlock_page(page); 5770 goto out_unlock; 5771 } 5772 } 5773 unlock_page(page); 5774 /* 5775 * OK, we need to fill the hole... Do write_begin write_end 5776 * to do block allocation/reservation.We are not holding 5777 * inode.i__mutex here. That allow * parallel write_begin, 5778 * write_end call. lock_page prevent this from happening 5779 * on the same page though 5780 */ 5781 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), 5782 len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 5783 if (ret < 0) 5784 goto out_unlock; 5785 ret = mapping->a_ops->write_end(file, mapping, page_offset(page), 5786 len, len, page, fsdata); 5787 if (ret < 0) 5788 goto out_unlock; 5789 ret = 0; 5790 out_unlock: 5791 if (ret) 5792 ret = VM_FAULT_SIGBUS; 5793 up_read(&inode->i_alloc_sem); 5794 return ret; 5795 } 5796