1 /* 2 * linux/fs/ext4/indirect.c 3 * 4 * from 5 * 6 * linux/fs/ext4/inode.c 7 * 8 * Copyright (C) 1992, 1993, 1994, 1995 9 * Remy Card (card@masi.ibp.fr) 10 * Laboratoire MASI - Institut Blaise Pascal 11 * Universite Pierre et Marie Curie (Paris VI) 12 * 13 * from 14 * 15 * linux/fs/minix/inode.c 16 * 17 * Copyright (C) 1991, 1992 Linus Torvalds 18 * 19 * Goal-directed block allocation by Stephen Tweedie 20 * (sct@redhat.com), 1993, 1998 21 */ 22 23 #include "ext4_jbd2.h" 24 #include "truncate.h" 25 #include <linux/uio.h> 26 27 #include <trace/events/ext4.h> 28 29 typedef struct { 30 __le32 *p; 31 __le32 key; 32 struct buffer_head *bh; 33 } Indirect; 34 35 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 36 { 37 p->key = *(p->p = v); 38 p->bh = bh; 39 } 40 41 /** 42 * ext4_block_to_path - parse the block number into array of offsets 43 * @inode: inode in question (we are only interested in its superblock) 44 * @i_block: block number to be parsed 45 * @offsets: array to store the offsets in 46 * @boundary: set this non-zero if the referred-to block is likely to be 47 * followed (on disk) by an indirect block. 48 * 49 * To store the locations of file's data ext4 uses a data structure common 50 * for UNIX filesystems - tree of pointers anchored in the inode, with 51 * data blocks at leaves and indirect blocks in intermediate nodes. 52 * This function translates the block number into path in that tree - 53 * return value is the path length and @offsets[n] is the offset of 54 * pointer to (n+1)th node in the nth one. If @block is out of range 55 * (negative or too large) warning is printed and zero returned. 56 * 57 * Note: function doesn't find node addresses, so no IO is needed. All 58 * we need to know is the capacity of indirect blocks (taken from the 59 * inode->i_sb). 60 */ 61 62 /* 63 * Portability note: the last comparison (check that we fit into triple 64 * indirect block) is spelled differently, because otherwise on an 65 * architecture with 32-bit longs and 8Kb pages we might get into trouble 66 * if our filesystem had 8Kb blocks. We might use long long, but that would 67 * kill us on x86. Oh, well, at least the sign propagation does not matter - 68 * i_block would have to be negative in the very beginning, so we would not 69 * get there at all. 70 */ 71 72 static int ext4_block_to_path(struct inode *inode, 73 ext4_lblk_t i_block, 74 ext4_lblk_t offsets[4], int *boundary) 75 { 76 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 77 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 78 const long direct_blocks = EXT4_NDIR_BLOCKS, 79 indirect_blocks = ptrs, 80 double_blocks = (1 << (ptrs_bits * 2)); 81 int n = 0; 82 int final = 0; 83 84 if (i_block < direct_blocks) { 85 offsets[n++] = i_block; 86 final = direct_blocks; 87 } else if ((i_block -= direct_blocks) < indirect_blocks) { 88 offsets[n++] = EXT4_IND_BLOCK; 89 offsets[n++] = i_block; 90 final = ptrs; 91 } else if ((i_block -= indirect_blocks) < double_blocks) { 92 offsets[n++] = EXT4_DIND_BLOCK; 93 offsets[n++] = i_block >> ptrs_bits; 94 offsets[n++] = i_block & (ptrs - 1); 95 final = ptrs; 96 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 97 offsets[n++] = EXT4_TIND_BLOCK; 98 offsets[n++] = i_block >> (ptrs_bits * 2); 99 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 100 offsets[n++] = i_block & (ptrs - 1); 101 final = ptrs; 102 } else { 103 ext4_warning(inode->i_sb, "block %lu > max in inode %lu", 104 i_block + direct_blocks + 105 indirect_blocks + double_blocks, inode->i_ino); 106 } 107 if (boundary) 108 *boundary = final - 1 - (i_block & (ptrs - 1)); 109 return n; 110 } 111 112 /** 113 * ext4_get_branch - read the chain of indirect blocks leading to data 114 * @inode: inode in question 115 * @depth: depth of the chain (1 - direct pointer, etc.) 116 * @offsets: offsets of pointers in inode/indirect blocks 117 * @chain: place to store the result 118 * @err: here we store the error value 119 * 120 * Function fills the array of triples <key, p, bh> and returns %NULL 121 * if everything went OK or the pointer to the last filled triple 122 * (incomplete one) otherwise. Upon the return chain[i].key contains 123 * the number of (i+1)-th block in the chain (as it is stored in memory, 124 * i.e. little-endian 32-bit), chain[i].p contains the address of that 125 * number (it points into struct inode for i==0 and into the bh->b_data 126 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 127 * block for i>0 and NULL for i==0. In other words, it holds the block 128 * numbers of the chain, addresses they were taken from (and where we can 129 * verify that chain did not change) and buffer_heads hosting these 130 * numbers. 131 * 132 * Function stops when it stumbles upon zero pointer (absent block) 133 * (pointer to last triple returned, *@err == 0) 134 * or when it gets an IO error reading an indirect block 135 * (ditto, *@err == -EIO) 136 * or when it reads all @depth-1 indirect blocks successfully and finds 137 * the whole chain, all way to the data (returns %NULL, *err == 0). 138 * 139 * Need to be called with 140 * down_read(&EXT4_I(inode)->i_data_sem) 141 */ 142 static Indirect *ext4_get_branch(struct inode *inode, int depth, 143 ext4_lblk_t *offsets, 144 Indirect chain[4], int *err) 145 { 146 struct super_block *sb = inode->i_sb; 147 Indirect *p = chain; 148 struct buffer_head *bh; 149 int ret = -EIO; 150 151 *err = 0; 152 /* i_data is not going away, no lock needed */ 153 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 154 if (!p->key) 155 goto no_block; 156 while (--depth) { 157 bh = sb_getblk(sb, le32_to_cpu(p->key)); 158 if (unlikely(!bh)) { 159 ret = -ENOMEM; 160 goto failure; 161 } 162 163 if (!bh_uptodate_or_lock(bh)) { 164 if (bh_submit_read(bh) < 0) { 165 put_bh(bh); 166 goto failure; 167 } 168 /* validate block references */ 169 if (ext4_check_indirect_blockref(inode, bh)) { 170 put_bh(bh); 171 goto failure; 172 } 173 } 174 175 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 176 /* Reader: end */ 177 if (!p->key) 178 goto no_block; 179 } 180 return NULL; 181 182 failure: 183 *err = ret; 184 no_block: 185 return p; 186 } 187 188 /** 189 * ext4_find_near - find a place for allocation with sufficient locality 190 * @inode: owner 191 * @ind: descriptor of indirect block. 192 * 193 * This function returns the preferred place for block allocation. 194 * It is used when heuristic for sequential allocation fails. 195 * Rules are: 196 * + if there is a block to the left of our position - allocate near it. 197 * + if pointer will live in indirect block - allocate near that block. 198 * + if pointer will live in inode - allocate in the same 199 * cylinder group. 200 * 201 * In the latter case we colour the starting block by the callers PID to 202 * prevent it from clashing with concurrent allocations for a different inode 203 * in the same block group. The PID is used here so that functionally related 204 * files will be close-by on-disk. 205 * 206 * Caller must make sure that @ind is valid and will stay that way. 207 */ 208 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 209 { 210 struct ext4_inode_info *ei = EXT4_I(inode); 211 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 212 __le32 *p; 213 214 /* Try to find previous block */ 215 for (p = ind->p - 1; p >= start; p--) { 216 if (*p) 217 return le32_to_cpu(*p); 218 } 219 220 /* No such thing, so let's try location of indirect block */ 221 if (ind->bh) 222 return ind->bh->b_blocknr; 223 224 /* 225 * It is going to be referred to from the inode itself? OK, just put it 226 * into the same cylinder group then. 227 */ 228 return ext4_inode_to_goal_block(inode); 229 } 230 231 /** 232 * ext4_find_goal - find a preferred place for allocation. 233 * @inode: owner 234 * @block: block we want 235 * @partial: pointer to the last triple within a chain 236 * 237 * Normally this function find the preferred place for block allocation, 238 * returns it. 239 * Because this is only used for non-extent files, we limit the block nr 240 * to 32 bits. 241 */ 242 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 243 Indirect *partial) 244 { 245 ext4_fsblk_t goal; 246 247 /* 248 * XXX need to get goal block from mballoc's data structures 249 */ 250 251 goal = ext4_find_near(inode, partial); 252 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 253 return goal; 254 } 255 256 /** 257 * ext4_blks_to_allocate - Look up the block map and count the number 258 * of direct blocks need to be allocated for the given branch. 259 * 260 * @branch: chain of indirect blocks 261 * @k: number of blocks need for indirect blocks 262 * @blks: number of data blocks to be mapped. 263 * @blocks_to_boundary: the offset in the indirect block 264 * 265 * return the total number of blocks to be allocate, including the 266 * direct and indirect blocks. 267 */ 268 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 269 int blocks_to_boundary) 270 { 271 unsigned int count = 0; 272 273 /* 274 * Simple case, [t,d]Indirect block(s) has not allocated yet 275 * then it's clear blocks on that path have not allocated 276 */ 277 if (k > 0) { 278 /* right now we don't handle cross boundary allocation */ 279 if (blks < blocks_to_boundary + 1) 280 count += blks; 281 else 282 count += blocks_to_boundary + 1; 283 return count; 284 } 285 286 count++; 287 while (count < blks && count <= blocks_to_boundary && 288 le32_to_cpu(*(branch[0].p + count)) == 0) { 289 count++; 290 } 291 return count; 292 } 293 294 /** 295 * ext4_alloc_branch - allocate and set up a chain of blocks. 296 * @handle: handle for this transaction 297 * @inode: owner 298 * @indirect_blks: number of allocated indirect blocks 299 * @blks: number of allocated direct blocks 300 * @goal: preferred place for allocation 301 * @offsets: offsets (in the blocks) to store the pointers to next. 302 * @branch: place to store the chain in. 303 * 304 * This function allocates blocks, zeroes out all but the last one, 305 * links them into chain and (if we are synchronous) writes them to disk. 306 * In other words, it prepares a branch that can be spliced onto the 307 * inode. It stores the information about that chain in the branch[], in 308 * the same format as ext4_get_branch() would do. We are calling it after 309 * we had read the existing part of chain and partial points to the last 310 * triple of that (one with zero ->key). Upon the exit we have the same 311 * picture as after the successful ext4_get_block(), except that in one 312 * place chain is disconnected - *branch->p is still zero (we did not 313 * set the last link), but branch->key contains the number that should 314 * be placed into *branch->p to fill that gap. 315 * 316 * If allocation fails we free all blocks we've allocated (and forget 317 * their buffer_heads) and return the error value the from failed 318 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 319 * as described above and return 0. 320 */ 321 static int ext4_alloc_branch(handle_t *handle, 322 struct ext4_allocation_request *ar, 323 int indirect_blks, ext4_lblk_t *offsets, 324 Indirect *branch) 325 { 326 struct buffer_head * bh; 327 ext4_fsblk_t b, new_blocks[4]; 328 __le32 *p; 329 int i, j, err, len = 1; 330 331 for (i = 0; i <= indirect_blks; i++) { 332 if (i == indirect_blks) { 333 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err); 334 } else 335 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle, 336 ar->inode, ar->goal, 337 ar->flags & EXT4_MB_DELALLOC_RESERVED, 338 NULL, &err); 339 if (err) { 340 i--; 341 goto failed; 342 } 343 branch[i].key = cpu_to_le32(new_blocks[i]); 344 if (i == 0) 345 continue; 346 347 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]); 348 if (unlikely(!bh)) { 349 err = -ENOMEM; 350 goto failed; 351 } 352 lock_buffer(bh); 353 BUFFER_TRACE(bh, "call get_create_access"); 354 err = ext4_journal_get_create_access(handle, bh); 355 if (err) { 356 unlock_buffer(bh); 357 goto failed; 358 } 359 360 memset(bh->b_data, 0, bh->b_size); 361 p = branch[i].p = (__le32 *) bh->b_data + offsets[i]; 362 b = new_blocks[i]; 363 364 if (i == indirect_blks) 365 len = ar->len; 366 for (j = 0; j < len; j++) 367 *p++ = cpu_to_le32(b++); 368 369 BUFFER_TRACE(bh, "marking uptodate"); 370 set_buffer_uptodate(bh); 371 unlock_buffer(bh); 372 373 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 374 err = ext4_handle_dirty_metadata(handle, ar->inode, bh); 375 if (err) 376 goto failed; 377 } 378 return 0; 379 failed: 380 for (; i >= 0; i--) { 381 /* 382 * We want to ext4_forget() only freshly allocated indirect 383 * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and 384 * buffer at branch[0].bh is indirect block / inode already 385 * existing before ext4_alloc_branch() was called. 386 */ 387 if (i > 0 && i != indirect_blks && branch[i].bh) 388 ext4_forget(handle, 1, ar->inode, branch[i].bh, 389 branch[i].bh->b_blocknr); 390 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i], 391 (i == indirect_blks) ? ar->len : 1, 0); 392 } 393 return err; 394 } 395 396 /** 397 * ext4_splice_branch - splice the allocated branch onto inode. 398 * @handle: handle for this transaction 399 * @inode: owner 400 * @block: (logical) number of block we are adding 401 * @chain: chain of indirect blocks (with a missing link - see 402 * ext4_alloc_branch) 403 * @where: location of missing link 404 * @num: number of indirect blocks we are adding 405 * @blks: number of direct blocks we are adding 406 * 407 * This function fills the missing link and does all housekeeping needed in 408 * inode (->i_blocks, etc.). In case of success we end up with the full 409 * chain to new block and return 0. 410 */ 411 static int ext4_splice_branch(handle_t *handle, 412 struct ext4_allocation_request *ar, 413 Indirect *where, int num) 414 { 415 int i; 416 int err = 0; 417 ext4_fsblk_t current_block; 418 419 /* 420 * If we're splicing into a [td]indirect block (as opposed to the 421 * inode) then we need to get write access to the [td]indirect block 422 * before the splice. 423 */ 424 if (where->bh) { 425 BUFFER_TRACE(where->bh, "get_write_access"); 426 err = ext4_journal_get_write_access(handle, where->bh); 427 if (err) 428 goto err_out; 429 } 430 /* That's it */ 431 432 *where->p = where->key; 433 434 /* 435 * Update the host buffer_head or inode to point to more just allocated 436 * direct blocks blocks 437 */ 438 if (num == 0 && ar->len > 1) { 439 current_block = le32_to_cpu(where->key) + 1; 440 for (i = 1; i < ar->len; i++) 441 *(where->p + i) = cpu_to_le32(current_block++); 442 } 443 444 /* We are done with atomic stuff, now do the rest of housekeeping */ 445 /* had we spliced it onto indirect block? */ 446 if (where->bh) { 447 /* 448 * If we spliced it onto an indirect block, we haven't 449 * altered the inode. Note however that if it is being spliced 450 * onto an indirect block at the very end of the file (the 451 * file is growing) then we *will* alter the inode to reflect 452 * the new i_size. But that is not done here - it is done in 453 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 454 */ 455 jbd_debug(5, "splicing indirect only\n"); 456 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 457 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh); 458 if (err) 459 goto err_out; 460 } else { 461 /* 462 * OK, we spliced it into the inode itself on a direct block. 463 */ 464 ext4_mark_inode_dirty(handle, ar->inode); 465 jbd_debug(5, "splicing direct\n"); 466 } 467 return err; 468 469 err_out: 470 for (i = 1; i <= num; i++) { 471 /* 472 * branch[i].bh is newly allocated, so there is no 473 * need to revoke the block, which is why we don't 474 * need to set EXT4_FREE_BLOCKS_METADATA. 475 */ 476 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1, 477 EXT4_FREE_BLOCKS_FORGET); 478 } 479 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key), 480 ar->len, 0); 481 482 return err; 483 } 484 485 /* 486 * The ext4_ind_map_blocks() function handles non-extents inodes 487 * (i.e., using the traditional indirect/double-indirect i_blocks 488 * scheme) for ext4_map_blocks(). 489 * 490 * Allocation strategy is simple: if we have to allocate something, we will 491 * have to go the whole way to leaf. So let's do it before attaching anything 492 * to tree, set linkage between the newborn blocks, write them if sync is 493 * required, recheck the path, free and repeat if check fails, otherwise 494 * set the last missing link (that will protect us from any truncate-generated 495 * removals - all blocks on the path are immune now) and possibly force the 496 * write on the parent block. 497 * That has a nice additional property: no special recovery from the failed 498 * allocations is needed - we simply release blocks and do not touch anything 499 * reachable from inode. 500 * 501 * `handle' can be NULL if create == 0. 502 * 503 * return > 0, # of blocks mapped or allocated. 504 * return = 0, if plain lookup failed. 505 * return < 0, error case. 506 * 507 * The ext4_ind_get_blocks() function should be called with 508 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem 509 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or 510 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system 511 * blocks. 512 */ 513 int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, 514 struct ext4_map_blocks *map, 515 int flags) 516 { 517 struct ext4_allocation_request ar; 518 int err = -EIO; 519 ext4_lblk_t offsets[4]; 520 Indirect chain[4]; 521 Indirect *partial; 522 int indirect_blks; 523 int blocks_to_boundary = 0; 524 int depth; 525 int count = 0; 526 ext4_fsblk_t first_block = 0; 527 528 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 529 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); 530 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 531 depth = ext4_block_to_path(inode, map->m_lblk, offsets, 532 &blocks_to_boundary); 533 534 if (depth == 0) 535 goto out; 536 537 partial = ext4_get_branch(inode, depth, offsets, chain, &err); 538 539 /* Simplest case - block found, no allocation needed */ 540 if (!partial) { 541 first_block = le32_to_cpu(chain[depth - 1].key); 542 count++; 543 /*map more blocks*/ 544 while (count < map->m_len && count <= blocks_to_boundary) { 545 ext4_fsblk_t blk; 546 547 blk = le32_to_cpu(*(chain[depth-1].p + count)); 548 549 if (blk == first_block + count) 550 count++; 551 else 552 break; 553 } 554 goto got_it; 555 } 556 557 /* Next simple case - plain lookup or failed read of indirect block */ 558 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) 559 goto cleanup; 560 561 /* 562 * Okay, we need to do block allocation. 563 */ 564 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 565 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { 566 EXT4_ERROR_INODE(inode, "Can't allocate blocks for " 567 "non-extent mapped inodes with bigalloc"); 568 return -EUCLEAN; 569 } 570 571 /* Set up for the direct block allocation */ 572 memset(&ar, 0, sizeof(ar)); 573 ar.inode = inode; 574 ar.logical = map->m_lblk; 575 if (S_ISREG(inode->i_mode)) 576 ar.flags = EXT4_MB_HINT_DATA; 577 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 578 ar.flags |= EXT4_MB_DELALLOC_RESERVED; 579 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 580 ar.flags |= EXT4_MB_USE_RESERVED; 581 582 ar.goal = ext4_find_goal(inode, map->m_lblk, partial); 583 584 /* the number of blocks need to allocate for [d,t]indirect blocks */ 585 indirect_blks = (chain + depth) - partial - 1; 586 587 /* 588 * Next look up the indirect map to count the totoal number of 589 * direct blocks to allocate for this branch. 590 */ 591 ar.len = ext4_blks_to_allocate(partial, indirect_blks, 592 map->m_len, blocks_to_boundary); 593 594 /* 595 * Block out ext4_truncate while we alter the tree 596 */ 597 err = ext4_alloc_branch(handle, &ar, indirect_blks, 598 offsets + (partial - chain), partial); 599 600 /* 601 * The ext4_splice_branch call will free and forget any buffers 602 * on the new chain if there is a failure, but that risks using 603 * up transaction credits, especially for bitmaps where the 604 * credits cannot be returned. Can we handle this somehow? We 605 * may need to return -EAGAIN upwards in the worst case. --sct 606 */ 607 if (!err) 608 err = ext4_splice_branch(handle, &ar, partial, indirect_blks); 609 if (err) 610 goto cleanup; 611 612 map->m_flags |= EXT4_MAP_NEW; 613 614 ext4_update_inode_fsync_trans(handle, inode, 1); 615 count = ar.len; 616 got_it: 617 map->m_flags |= EXT4_MAP_MAPPED; 618 map->m_pblk = le32_to_cpu(chain[depth-1].key); 619 map->m_len = count; 620 if (count > blocks_to_boundary) 621 map->m_flags |= EXT4_MAP_BOUNDARY; 622 err = count; 623 /* Clean up and exit */ 624 partial = chain + depth - 1; /* the whole chain */ 625 cleanup: 626 while (partial > chain) { 627 BUFFER_TRACE(partial->bh, "call brelse"); 628 brelse(partial->bh); 629 partial--; 630 } 631 out: 632 trace_ext4_ind_map_blocks_exit(inode, flags, map, err); 633 return err; 634 } 635 636 /* 637 * O_DIRECT for ext3 (or indirect map) based files 638 * 639 * If the O_DIRECT write will extend the file then add this inode to the 640 * orphan list. So recovery will truncate it back to the original size 641 * if the machine crashes during the write. 642 * 643 * If the O_DIRECT write is intantiating holes inside i_size and the machine 644 * crashes then stale disk data _may_ be exposed inside the file. But current 645 * VFS code falls back into buffered path in that case so we are safe. 646 */ 647 ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 648 loff_t offset) 649 { 650 struct file *file = iocb->ki_filp; 651 struct inode *inode = file->f_mapping->host; 652 struct ext4_inode_info *ei = EXT4_I(inode); 653 handle_t *handle; 654 ssize_t ret; 655 int orphan = 0; 656 size_t count = iov_iter_count(iter); 657 int retries = 0; 658 659 if (iov_iter_rw(iter) == WRITE) { 660 loff_t final_size = offset + count; 661 662 if (final_size > inode->i_size) { 663 /* Credits for sb + inode write */ 664 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 665 if (IS_ERR(handle)) { 666 ret = PTR_ERR(handle); 667 goto out; 668 } 669 ret = ext4_orphan_add(handle, inode); 670 if (ret) { 671 ext4_journal_stop(handle); 672 goto out; 673 } 674 orphan = 1; 675 ei->i_disksize = inode->i_size; 676 ext4_journal_stop(handle); 677 } 678 } 679 680 retry: 681 if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) { 682 /* 683 * Nolock dioread optimization may be dynamically disabled 684 * via ext4_inode_block_unlocked_dio(). Check inode's state 685 * while holding extra i_dio_count ref. 686 */ 687 inode_dio_begin(inode); 688 smp_mb(); 689 if (unlikely(ext4_test_inode_state(inode, 690 EXT4_STATE_DIOREAD_LOCK))) { 691 inode_dio_end(inode); 692 goto locked; 693 } 694 if (IS_DAX(inode)) 695 ret = dax_do_io(iocb, inode, iter, offset, 696 ext4_get_block, NULL, 0); 697 else 698 ret = __blockdev_direct_IO(iocb, inode, 699 inode->i_sb->s_bdev, iter, 700 offset, ext4_get_block, NULL, 701 NULL, 0); 702 inode_dio_end(inode); 703 } else { 704 locked: 705 if (IS_DAX(inode)) 706 ret = dax_do_io(iocb, inode, iter, offset, 707 ext4_get_block, NULL, DIO_LOCKING); 708 else 709 ret = blockdev_direct_IO(iocb, inode, iter, offset, 710 ext4_get_block); 711 712 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { 713 loff_t isize = i_size_read(inode); 714 loff_t end = offset + count; 715 716 if (end > isize) 717 ext4_truncate_failed_write(inode); 718 } 719 } 720 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 721 goto retry; 722 723 if (orphan) { 724 int err; 725 726 /* Credits for sb + inode write */ 727 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 728 if (IS_ERR(handle)) { 729 /* This is really bad luck. We've written the data 730 * but cannot extend i_size. Bail out and pretend 731 * the write failed... */ 732 ret = PTR_ERR(handle); 733 if (inode->i_nlink) 734 ext4_orphan_del(NULL, inode); 735 736 goto out; 737 } 738 if (inode->i_nlink) 739 ext4_orphan_del(handle, inode); 740 if (ret > 0) { 741 loff_t end = offset + ret; 742 if (end > inode->i_size) { 743 ei->i_disksize = end; 744 i_size_write(inode, end); 745 /* 746 * We're going to return a positive `ret' 747 * here due to non-zero-length I/O, so there's 748 * no way of reporting error returns from 749 * ext4_mark_inode_dirty() to userspace. So 750 * ignore it. 751 */ 752 ext4_mark_inode_dirty(handle, inode); 753 } 754 } 755 err = ext4_journal_stop(handle); 756 if (ret == 0) 757 ret = err; 758 } 759 out: 760 return ret; 761 } 762 763 /* 764 * Calculate the number of metadata blocks need to reserve 765 * to allocate a new block at @lblocks for non extent file based file 766 */ 767 int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) 768 { 769 struct ext4_inode_info *ei = EXT4_I(inode); 770 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); 771 int blk_bits; 772 773 if (lblock < EXT4_NDIR_BLOCKS) 774 return 0; 775 776 lblock -= EXT4_NDIR_BLOCKS; 777 778 if (ei->i_da_metadata_calc_len && 779 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) { 780 ei->i_da_metadata_calc_len++; 781 return 0; 782 } 783 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; 784 ei->i_da_metadata_calc_len = 1; 785 blk_bits = order_base_2(lblock); 786 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; 787 } 788 789 /* 790 * Calculate number of indirect blocks touched by mapping @nrblocks logically 791 * contiguous blocks 792 */ 793 int ext4_ind_trans_blocks(struct inode *inode, int nrblocks) 794 { 795 /* 796 * With N contiguous data blocks, we need at most 797 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, 798 * 2 dindirect blocks, and 1 tindirect block 799 */ 800 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; 801 } 802 803 /* 804 * Truncate transactions can be complex and absolutely huge. So we need to 805 * be able to restart the transaction at a conventient checkpoint to make 806 * sure we don't overflow the journal. 807 * 808 * Try to extend this transaction for the purposes of truncation. If 809 * extend fails, we need to propagate the failure up and restart the 810 * transaction in the top-level truncate loop. --sct 811 * 812 * Returns 0 if we managed to create more room. If we can't create more 813 * room, and the transaction must be restarted we return 1. 814 */ 815 static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 816 { 817 if (!ext4_handle_valid(handle)) 818 return 0; 819 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 820 return 0; 821 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode))) 822 return 0; 823 return 1; 824 } 825 826 /* 827 * Probably it should be a library function... search for first non-zero word 828 * or memcmp with zero_page, whatever is better for particular architecture. 829 * Linus? 830 */ 831 static inline int all_zeroes(__le32 *p, __le32 *q) 832 { 833 while (p < q) 834 if (*p++) 835 return 0; 836 return 1; 837 } 838 839 /** 840 * ext4_find_shared - find the indirect blocks for partial truncation. 841 * @inode: inode in question 842 * @depth: depth of the affected branch 843 * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 844 * @chain: place to store the pointers to partial indirect blocks 845 * @top: place to the (detached) top of branch 846 * 847 * This is a helper function used by ext4_truncate(). 848 * 849 * When we do truncate() we may have to clean the ends of several 850 * indirect blocks but leave the blocks themselves alive. Block is 851 * partially truncated if some data below the new i_size is referred 852 * from it (and it is on the path to the first completely truncated 853 * data block, indeed). We have to free the top of that path along 854 * with everything to the right of the path. Since no allocation 855 * past the truncation point is possible until ext4_truncate() 856 * finishes, we may safely do the latter, but top of branch may 857 * require special attention - pageout below the truncation point 858 * might try to populate it. 859 * 860 * We atomically detach the top of branch from the tree, store the 861 * block number of its root in *@top, pointers to buffer_heads of 862 * partially truncated blocks - in @chain[].bh and pointers to 863 * their last elements that should not be removed - in 864 * @chain[].p. Return value is the pointer to last filled element 865 * of @chain. 866 * 867 * The work left to caller to do the actual freeing of subtrees: 868 * a) free the subtree starting from *@top 869 * b) free the subtrees whose roots are stored in 870 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 871 * c) free the subtrees growing from the inode past the @chain[0]. 872 * (no partially truncated stuff there). */ 873 874 static Indirect *ext4_find_shared(struct inode *inode, int depth, 875 ext4_lblk_t offsets[4], Indirect chain[4], 876 __le32 *top) 877 { 878 Indirect *partial, *p; 879 int k, err; 880 881 *top = 0; 882 /* Make k index the deepest non-null offset + 1 */ 883 for (k = depth; k > 1 && !offsets[k-1]; k--) 884 ; 885 partial = ext4_get_branch(inode, k, offsets, chain, &err); 886 /* Writer: pointers */ 887 if (!partial) 888 partial = chain + k-1; 889 /* 890 * If the branch acquired continuation since we've looked at it - 891 * fine, it should all survive and (new) top doesn't belong to us. 892 */ 893 if (!partial->key && *partial->p) 894 /* Writer: end */ 895 goto no_top; 896 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 897 ; 898 /* 899 * OK, we've found the last block that must survive. The rest of our 900 * branch should be detached before unlocking. However, if that rest 901 * of branch is all ours and does not grow immediately from the inode 902 * it's easier to cheat and just decrement partial->p. 903 */ 904 if (p == chain + k - 1 && p > chain) { 905 p->p--; 906 } else { 907 *top = *p->p; 908 /* Nope, don't do this in ext4. Must leave the tree intact */ 909 #if 0 910 *p->p = 0; 911 #endif 912 } 913 /* Writer: end */ 914 915 while (partial > p) { 916 brelse(partial->bh); 917 partial--; 918 } 919 no_top: 920 return partial; 921 } 922 923 /* 924 * Zero a number of block pointers in either an inode or an indirect block. 925 * If we restart the transaction we must again get write access to the 926 * indirect block for further modification. 927 * 928 * We release `count' blocks on disk, but (last - first) may be greater 929 * than `count' because there can be holes in there. 930 * 931 * Return 0 on success, 1 on invalid block range 932 * and < 0 on fatal error. 933 */ 934 static int ext4_clear_blocks(handle_t *handle, struct inode *inode, 935 struct buffer_head *bh, 936 ext4_fsblk_t block_to_free, 937 unsigned long count, __le32 *first, 938 __le32 *last) 939 { 940 __le32 *p; 941 int flags = EXT4_FREE_BLOCKS_VALIDATED; 942 int err; 943 944 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 945 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA; 946 else if (ext4_should_journal_data(inode)) 947 flags |= EXT4_FREE_BLOCKS_FORGET; 948 949 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, 950 count)) { 951 EXT4_ERROR_INODE(inode, "attempt to clear invalid " 952 "blocks %llu len %lu", 953 (unsigned long long) block_to_free, count); 954 return 1; 955 } 956 957 if (try_to_extend_transaction(handle, inode)) { 958 if (bh) { 959 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 960 err = ext4_handle_dirty_metadata(handle, inode, bh); 961 if (unlikely(err)) 962 goto out_err; 963 } 964 err = ext4_mark_inode_dirty(handle, inode); 965 if (unlikely(err)) 966 goto out_err; 967 err = ext4_truncate_restart_trans(handle, inode, 968 ext4_blocks_for_truncate(inode)); 969 if (unlikely(err)) 970 goto out_err; 971 if (bh) { 972 BUFFER_TRACE(bh, "retaking write access"); 973 err = ext4_journal_get_write_access(handle, bh); 974 if (unlikely(err)) 975 goto out_err; 976 } 977 } 978 979 for (p = first; p < last; p++) 980 *p = 0; 981 982 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); 983 return 0; 984 out_err: 985 ext4_std_error(inode->i_sb, err); 986 return err; 987 } 988 989 /** 990 * ext4_free_data - free a list of data blocks 991 * @handle: handle for this transaction 992 * @inode: inode we are dealing with 993 * @this_bh: indirect buffer_head which contains *@first and *@last 994 * @first: array of block numbers 995 * @last: points immediately past the end of array 996 * 997 * We are freeing all blocks referred from that array (numbers are stored as 998 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 999 * 1000 * We accumulate contiguous runs of blocks to free. Conveniently, if these 1001 * blocks are contiguous then releasing them at one time will only affect one 1002 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 1003 * actually use a lot of journal space. 1004 * 1005 * @this_bh will be %NULL if @first and @last point into the inode's direct 1006 * block pointers. 1007 */ 1008 static void ext4_free_data(handle_t *handle, struct inode *inode, 1009 struct buffer_head *this_bh, 1010 __le32 *first, __le32 *last) 1011 { 1012 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 1013 unsigned long count = 0; /* Number of blocks in the run */ 1014 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 1015 corresponding to 1016 block_to_free */ 1017 ext4_fsblk_t nr; /* Current block # */ 1018 __le32 *p; /* Pointer into inode/ind 1019 for current block */ 1020 int err = 0; 1021 1022 if (this_bh) { /* For indirect block */ 1023 BUFFER_TRACE(this_bh, "get_write_access"); 1024 err = ext4_journal_get_write_access(handle, this_bh); 1025 /* Important: if we can't update the indirect pointers 1026 * to the blocks, we can't free them. */ 1027 if (err) 1028 return; 1029 } 1030 1031 for (p = first; p < last; p++) { 1032 nr = le32_to_cpu(*p); 1033 if (nr) { 1034 /* accumulate blocks to free if they're contiguous */ 1035 if (count == 0) { 1036 block_to_free = nr; 1037 block_to_free_p = p; 1038 count = 1; 1039 } else if (nr == block_to_free + count) { 1040 count++; 1041 } else { 1042 err = ext4_clear_blocks(handle, inode, this_bh, 1043 block_to_free, count, 1044 block_to_free_p, p); 1045 if (err) 1046 break; 1047 block_to_free = nr; 1048 block_to_free_p = p; 1049 count = 1; 1050 } 1051 } 1052 } 1053 1054 if (!err && count > 0) 1055 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, 1056 count, block_to_free_p, p); 1057 if (err < 0) 1058 /* fatal error */ 1059 return; 1060 1061 if (this_bh) { 1062 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 1063 1064 /* 1065 * The buffer head should have an attached journal head at this 1066 * point. However, if the data is corrupted and an indirect 1067 * block pointed to itself, it would have been detached when 1068 * the block was cleared. Check for this instead of OOPSing. 1069 */ 1070 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 1071 ext4_handle_dirty_metadata(handle, inode, this_bh); 1072 else 1073 EXT4_ERROR_INODE(inode, 1074 "circular indirect block detected at " 1075 "block %llu", 1076 (unsigned long long) this_bh->b_blocknr); 1077 } 1078 } 1079 1080 /** 1081 * ext4_free_branches - free an array of branches 1082 * @handle: JBD handle for this transaction 1083 * @inode: inode we are dealing with 1084 * @parent_bh: the buffer_head which contains *@first and *@last 1085 * @first: array of block numbers 1086 * @last: pointer immediately past the end of array 1087 * @depth: depth of the branches to free 1088 * 1089 * We are freeing all blocks referred from these branches (numbers are 1090 * stored as little-endian 32-bit) and updating @inode->i_blocks 1091 * appropriately. 1092 */ 1093 static void ext4_free_branches(handle_t *handle, struct inode *inode, 1094 struct buffer_head *parent_bh, 1095 __le32 *first, __le32 *last, int depth) 1096 { 1097 ext4_fsblk_t nr; 1098 __le32 *p; 1099 1100 if (ext4_handle_is_aborted(handle)) 1101 return; 1102 1103 if (depth--) { 1104 struct buffer_head *bh; 1105 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1106 p = last; 1107 while (--p >= first) { 1108 nr = le32_to_cpu(*p); 1109 if (!nr) 1110 continue; /* A hole */ 1111 1112 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), 1113 nr, 1)) { 1114 EXT4_ERROR_INODE(inode, 1115 "invalid indirect mapped " 1116 "block %lu (level %d)", 1117 (unsigned long) nr, depth); 1118 break; 1119 } 1120 1121 /* Go read the buffer for the next level down */ 1122 bh = sb_bread(inode->i_sb, nr); 1123 1124 /* 1125 * A read failure? Report error and clear slot 1126 * (should be rare). 1127 */ 1128 if (!bh) { 1129 EXT4_ERROR_INODE_BLOCK(inode, nr, 1130 "Read failure"); 1131 continue; 1132 } 1133 1134 /* This zaps the entire block. Bottom up. */ 1135 BUFFER_TRACE(bh, "free child branches"); 1136 ext4_free_branches(handle, inode, bh, 1137 (__le32 *) bh->b_data, 1138 (__le32 *) bh->b_data + addr_per_block, 1139 depth); 1140 brelse(bh); 1141 1142 /* 1143 * Everything below this this pointer has been 1144 * released. Now let this top-of-subtree go. 1145 * 1146 * We want the freeing of this indirect block to be 1147 * atomic in the journal with the updating of the 1148 * bitmap block which owns it. So make some room in 1149 * the journal. 1150 * 1151 * We zero the parent pointer *after* freeing its 1152 * pointee in the bitmaps, so if extend_transaction() 1153 * for some reason fails to put the bitmap changes and 1154 * the release into the same transaction, recovery 1155 * will merely complain about releasing a free block, 1156 * rather than leaking blocks. 1157 */ 1158 if (ext4_handle_is_aborted(handle)) 1159 return; 1160 if (try_to_extend_transaction(handle, inode)) { 1161 ext4_mark_inode_dirty(handle, inode); 1162 ext4_truncate_restart_trans(handle, inode, 1163 ext4_blocks_for_truncate(inode)); 1164 } 1165 1166 /* 1167 * The forget flag here is critical because if 1168 * we are journaling (and not doing data 1169 * journaling), we have to make sure a revoke 1170 * record is written to prevent the journal 1171 * replay from overwriting the (former) 1172 * indirect block if it gets reallocated as a 1173 * data block. This must happen in the same 1174 * transaction where the data blocks are 1175 * actually freed. 1176 */ 1177 ext4_free_blocks(handle, inode, NULL, nr, 1, 1178 EXT4_FREE_BLOCKS_METADATA| 1179 EXT4_FREE_BLOCKS_FORGET); 1180 1181 if (parent_bh) { 1182 /* 1183 * The block which we have just freed is 1184 * pointed to by an indirect block: journal it 1185 */ 1186 BUFFER_TRACE(parent_bh, "get_write_access"); 1187 if (!ext4_journal_get_write_access(handle, 1188 parent_bh)){ 1189 *p = 0; 1190 BUFFER_TRACE(parent_bh, 1191 "call ext4_handle_dirty_metadata"); 1192 ext4_handle_dirty_metadata(handle, 1193 inode, 1194 parent_bh); 1195 } 1196 } 1197 } 1198 } else { 1199 /* We have reached the bottom of the tree. */ 1200 BUFFER_TRACE(parent_bh, "free data blocks"); 1201 ext4_free_data(handle, inode, parent_bh, first, last); 1202 } 1203 } 1204 1205 void ext4_ind_truncate(handle_t *handle, struct inode *inode) 1206 { 1207 struct ext4_inode_info *ei = EXT4_I(inode); 1208 __le32 *i_data = ei->i_data; 1209 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1210 ext4_lblk_t offsets[4]; 1211 Indirect chain[4]; 1212 Indirect *partial; 1213 __le32 nr = 0; 1214 int n = 0; 1215 ext4_lblk_t last_block, max_block; 1216 unsigned blocksize = inode->i_sb->s_blocksize; 1217 1218 last_block = (inode->i_size + blocksize-1) 1219 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 1220 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) 1221 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 1222 1223 if (last_block != max_block) { 1224 n = ext4_block_to_path(inode, last_block, offsets, NULL); 1225 if (n == 0) 1226 return; 1227 } 1228 1229 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); 1230 1231 /* 1232 * The orphan list entry will now protect us from any crash which 1233 * occurs before the truncate completes, so it is now safe to propagate 1234 * the new, shorter inode size (held for now in i_size) into the 1235 * on-disk inode. We do this via i_disksize, which is the value which 1236 * ext4 *really* writes onto the disk inode. 1237 */ 1238 ei->i_disksize = inode->i_size; 1239 1240 if (last_block == max_block) { 1241 /* 1242 * It is unnecessary to free any data blocks if last_block is 1243 * equal to the indirect block limit. 1244 */ 1245 return; 1246 } else if (n == 1) { /* direct blocks */ 1247 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 1248 i_data + EXT4_NDIR_BLOCKS); 1249 goto do_indirects; 1250 } 1251 1252 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 1253 /* Kill the top of shared branch (not detached) */ 1254 if (nr) { 1255 if (partial == chain) { 1256 /* Shared branch grows from the inode */ 1257 ext4_free_branches(handle, inode, NULL, 1258 &nr, &nr+1, (chain+n-1) - partial); 1259 *partial->p = 0; 1260 /* 1261 * We mark the inode dirty prior to restart, 1262 * and prior to stop. No need for it here. 1263 */ 1264 } else { 1265 /* Shared branch grows from an indirect block */ 1266 BUFFER_TRACE(partial->bh, "get_write_access"); 1267 ext4_free_branches(handle, inode, partial->bh, 1268 partial->p, 1269 partial->p+1, (chain+n-1) - partial); 1270 } 1271 } 1272 /* Clear the ends of indirect blocks on the shared branch */ 1273 while (partial > chain) { 1274 ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 1275 (__le32*)partial->bh->b_data+addr_per_block, 1276 (chain+n-1) - partial); 1277 BUFFER_TRACE(partial->bh, "call brelse"); 1278 brelse(partial->bh); 1279 partial--; 1280 } 1281 do_indirects: 1282 /* Kill the remaining (whole) subtrees */ 1283 switch (offsets[0]) { 1284 default: 1285 nr = i_data[EXT4_IND_BLOCK]; 1286 if (nr) { 1287 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 1288 i_data[EXT4_IND_BLOCK] = 0; 1289 } 1290 case EXT4_IND_BLOCK: 1291 nr = i_data[EXT4_DIND_BLOCK]; 1292 if (nr) { 1293 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 1294 i_data[EXT4_DIND_BLOCK] = 0; 1295 } 1296 case EXT4_DIND_BLOCK: 1297 nr = i_data[EXT4_TIND_BLOCK]; 1298 if (nr) { 1299 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 1300 i_data[EXT4_TIND_BLOCK] = 0; 1301 } 1302 case EXT4_TIND_BLOCK: 1303 ; 1304 } 1305 } 1306 1307 /** 1308 * ext4_ind_remove_space - remove space from the range 1309 * @handle: JBD handle for this transaction 1310 * @inode: inode we are dealing with 1311 * @start: First block to remove 1312 * @end: One block after the last block to remove (exclusive) 1313 * 1314 * Free the blocks in the defined range (end is exclusive endpoint of 1315 * range). This is used by ext4_punch_hole(). 1316 */ 1317 int ext4_ind_remove_space(handle_t *handle, struct inode *inode, 1318 ext4_lblk_t start, ext4_lblk_t end) 1319 { 1320 struct ext4_inode_info *ei = EXT4_I(inode); 1321 __le32 *i_data = ei->i_data; 1322 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1323 ext4_lblk_t offsets[4], offsets2[4]; 1324 Indirect chain[4], chain2[4]; 1325 Indirect *partial, *partial2; 1326 ext4_lblk_t max_block; 1327 __le32 nr = 0, nr2 = 0; 1328 int n = 0, n2 = 0; 1329 unsigned blocksize = inode->i_sb->s_blocksize; 1330 1331 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) 1332 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 1333 if (end >= max_block) 1334 end = max_block; 1335 if ((start >= end) || (start > max_block)) 1336 return 0; 1337 1338 n = ext4_block_to_path(inode, start, offsets, NULL); 1339 n2 = ext4_block_to_path(inode, end, offsets2, NULL); 1340 1341 BUG_ON(n > n2); 1342 1343 if ((n == 1) && (n == n2)) { 1344 /* We're punching only within direct block range */ 1345 ext4_free_data(handle, inode, NULL, i_data + offsets[0], 1346 i_data + offsets2[0]); 1347 return 0; 1348 } else if (n2 > n) { 1349 /* 1350 * Start and end are on a different levels so we're going to 1351 * free partial block at start, and partial block at end of 1352 * the range. If there are some levels in between then 1353 * do_indirects label will take care of that. 1354 */ 1355 1356 if (n == 1) { 1357 /* 1358 * Start is at the direct block level, free 1359 * everything to the end of the level. 1360 */ 1361 ext4_free_data(handle, inode, NULL, i_data + offsets[0], 1362 i_data + EXT4_NDIR_BLOCKS); 1363 goto end_range; 1364 } 1365 1366 1367 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 1368 if (nr) { 1369 if (partial == chain) { 1370 /* Shared branch grows from the inode */ 1371 ext4_free_branches(handle, inode, NULL, 1372 &nr, &nr+1, (chain+n-1) - partial); 1373 *partial->p = 0; 1374 } else { 1375 /* Shared branch grows from an indirect block */ 1376 BUFFER_TRACE(partial->bh, "get_write_access"); 1377 ext4_free_branches(handle, inode, partial->bh, 1378 partial->p, 1379 partial->p+1, (chain+n-1) - partial); 1380 } 1381 } 1382 1383 /* 1384 * Clear the ends of indirect blocks on the shared branch 1385 * at the start of the range 1386 */ 1387 while (partial > chain) { 1388 ext4_free_branches(handle, inode, partial->bh, 1389 partial->p + 1, 1390 (__le32 *)partial->bh->b_data+addr_per_block, 1391 (chain+n-1) - partial); 1392 BUFFER_TRACE(partial->bh, "call brelse"); 1393 brelse(partial->bh); 1394 partial--; 1395 } 1396 1397 end_range: 1398 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); 1399 if (nr2) { 1400 if (partial2 == chain2) { 1401 /* 1402 * Remember, end is exclusive so here we're at 1403 * the start of the next level we're not going 1404 * to free. Everything was covered by the start 1405 * of the range. 1406 */ 1407 goto do_indirects; 1408 } 1409 } else { 1410 /* 1411 * ext4_find_shared returns Indirect structure which 1412 * points to the last element which should not be 1413 * removed by truncate. But this is end of the range 1414 * in punch_hole so we need to point to the next element 1415 */ 1416 partial2->p++; 1417 } 1418 1419 /* 1420 * Clear the ends of indirect blocks on the shared branch 1421 * at the end of the range 1422 */ 1423 while (partial2 > chain2) { 1424 ext4_free_branches(handle, inode, partial2->bh, 1425 (__le32 *)partial2->bh->b_data, 1426 partial2->p, 1427 (chain2+n2-1) - partial2); 1428 BUFFER_TRACE(partial2->bh, "call brelse"); 1429 brelse(partial2->bh); 1430 partial2--; 1431 } 1432 goto do_indirects; 1433 } 1434 1435 /* Punch happened within the same level (n == n2) */ 1436 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 1437 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); 1438 1439 /* Free top, but only if partial2 isn't its subtree. */ 1440 if (nr) { 1441 int level = min(partial - chain, partial2 - chain2); 1442 int i; 1443 int subtree = 1; 1444 1445 for (i = 0; i <= level; i++) { 1446 if (offsets[i] != offsets2[i]) { 1447 subtree = 0; 1448 break; 1449 } 1450 } 1451 1452 if (!subtree) { 1453 if (partial == chain) { 1454 /* Shared branch grows from the inode */ 1455 ext4_free_branches(handle, inode, NULL, 1456 &nr, &nr+1, 1457 (chain+n-1) - partial); 1458 *partial->p = 0; 1459 } else { 1460 /* Shared branch grows from an indirect block */ 1461 BUFFER_TRACE(partial->bh, "get_write_access"); 1462 ext4_free_branches(handle, inode, partial->bh, 1463 partial->p, 1464 partial->p+1, 1465 (chain+n-1) - partial); 1466 } 1467 } 1468 } 1469 1470 if (!nr2) { 1471 /* 1472 * ext4_find_shared returns Indirect structure which 1473 * points to the last element which should not be 1474 * removed by truncate. But this is end of the range 1475 * in punch_hole so we need to point to the next element 1476 */ 1477 partial2->p++; 1478 } 1479 1480 while (partial > chain || partial2 > chain2) { 1481 int depth = (chain+n-1) - partial; 1482 int depth2 = (chain2+n2-1) - partial2; 1483 1484 if (partial > chain && partial2 > chain2 && 1485 partial->bh->b_blocknr == partial2->bh->b_blocknr) { 1486 /* 1487 * We've converged on the same block. Clear the range, 1488 * then we're done. 1489 */ 1490 ext4_free_branches(handle, inode, partial->bh, 1491 partial->p + 1, 1492 partial2->p, 1493 (chain+n-1) - partial); 1494 BUFFER_TRACE(partial->bh, "call brelse"); 1495 brelse(partial->bh); 1496 BUFFER_TRACE(partial2->bh, "call brelse"); 1497 brelse(partial2->bh); 1498 return 0; 1499 } 1500 1501 /* 1502 * The start and end partial branches may not be at the same 1503 * level even though the punch happened within one level. So, we 1504 * give them a chance to arrive at the same level, then walk 1505 * them in step with each other until we converge on the same 1506 * block. 1507 */ 1508 if (partial > chain && depth <= depth2) { 1509 ext4_free_branches(handle, inode, partial->bh, 1510 partial->p + 1, 1511 (__le32 *)partial->bh->b_data+addr_per_block, 1512 (chain+n-1) - partial); 1513 BUFFER_TRACE(partial->bh, "call brelse"); 1514 brelse(partial->bh); 1515 partial--; 1516 } 1517 if (partial2 > chain2 && depth2 <= depth) { 1518 ext4_free_branches(handle, inode, partial2->bh, 1519 (__le32 *)partial2->bh->b_data, 1520 partial2->p, 1521 (chain2+n2-1) - partial2); 1522 BUFFER_TRACE(partial2->bh, "call brelse"); 1523 brelse(partial2->bh); 1524 partial2--; 1525 } 1526 } 1527 return 0; 1528 1529 do_indirects: 1530 /* Kill the remaining (whole) subtrees */ 1531 switch (offsets[0]) { 1532 default: 1533 if (++n >= n2) 1534 return 0; 1535 nr = i_data[EXT4_IND_BLOCK]; 1536 if (nr) { 1537 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 1538 i_data[EXT4_IND_BLOCK] = 0; 1539 } 1540 case EXT4_IND_BLOCK: 1541 if (++n >= n2) 1542 return 0; 1543 nr = i_data[EXT4_DIND_BLOCK]; 1544 if (nr) { 1545 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 1546 i_data[EXT4_DIND_BLOCK] = 0; 1547 } 1548 case EXT4_DIND_BLOCK: 1549 if (++n >= n2) 1550 return 0; 1551 nr = i_data[EXT4_TIND_BLOCK]; 1552 if (nr) { 1553 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 1554 i_data[EXT4_TIND_BLOCK] = 0; 1555 } 1556 case EXT4_TIND_BLOCK: 1557 ; 1558 } 1559 return 0; 1560 } 1561