1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 * 6 * Architecture independence: 7 * Copyright (c) 2005, Bull S.A. 8 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 9 */ 10 11 /* 12 * Extents support for EXT4 13 * 14 * TODO: 15 * - ext4*_error() should be used in some situations 16 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 17 * - smart tree reduction 18 */ 19 20 #include <linux/fs.h> 21 #include <linux/time.h> 22 #include <linux/jbd2.h> 23 #include <linux/highuid.h> 24 #include <linux/pagemap.h> 25 #include <linux/quotaops.h> 26 #include <linux/string.h> 27 #include <linux/slab.h> 28 #include <linux/uaccess.h> 29 #include <linux/fiemap.h> 30 #include <linux/backing-dev.h> 31 #include "ext4_jbd2.h" 32 #include "ext4_extents.h" 33 #include "xattr.h" 34 35 #include <trace/events/ext4.h> 36 37 /* 38 * used by extent splitting. 39 */ 40 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 41 due to ENOSPC */ 42 #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ 43 #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ 44 45 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 46 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 47 48 static __le32 ext4_extent_block_csum(struct inode *inode, 49 struct ext4_extent_header *eh) 50 { 51 struct ext4_inode_info *ei = EXT4_I(inode); 52 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 53 __u32 csum; 54 55 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 56 EXT4_EXTENT_TAIL_OFFSET(eh)); 57 return cpu_to_le32(csum); 58 } 59 60 static int ext4_extent_block_csum_verify(struct inode *inode, 61 struct ext4_extent_header *eh) 62 { 63 struct ext4_extent_tail *et; 64 65 if (!ext4_has_metadata_csum(inode->i_sb)) 66 return 1; 67 68 et = find_ext4_extent_tail(eh); 69 if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 70 return 0; 71 return 1; 72 } 73 74 static void ext4_extent_block_csum_set(struct inode *inode, 75 struct ext4_extent_header *eh) 76 { 77 struct ext4_extent_tail *et; 78 79 if (!ext4_has_metadata_csum(inode->i_sb)) 80 return; 81 82 et = find_ext4_extent_tail(eh); 83 et->et_checksum = ext4_extent_block_csum(inode, eh); 84 } 85 86 static int ext4_split_extent(handle_t *handle, 87 struct inode *inode, 88 struct ext4_ext_path **ppath, 89 struct ext4_map_blocks *map, 90 int split_flag, 91 int flags); 92 93 static int ext4_split_extent_at(handle_t *handle, 94 struct inode *inode, 95 struct ext4_ext_path **ppath, 96 ext4_lblk_t split, 97 int split_flag, 98 int flags); 99 100 static int ext4_find_delayed_extent(struct inode *inode, 101 struct extent_status *newes); 102 103 static int ext4_ext_truncate_extend_restart(handle_t *handle, 104 struct inode *inode, 105 int needed) 106 { 107 int err; 108 109 if (!ext4_handle_valid(handle)) 110 return 0; 111 if (handle->h_buffer_credits >= needed) 112 return 0; 113 /* 114 * If we need to extend the journal get a few extra blocks 115 * while we're at it for efficiency's sake. 116 */ 117 needed += 3; 118 err = ext4_journal_extend(handle, needed - handle->h_buffer_credits); 119 if (err <= 0) 120 return err; 121 err = ext4_truncate_restart_trans(handle, inode, needed); 122 if (err == 0) 123 err = -EAGAIN; 124 125 return err; 126 } 127 128 /* 129 * could return: 130 * - EROFS 131 * - ENOMEM 132 */ 133 static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 134 struct ext4_ext_path *path) 135 { 136 if (path->p_bh) { 137 /* path points to block */ 138 BUFFER_TRACE(path->p_bh, "get_write_access"); 139 return ext4_journal_get_write_access(handle, path->p_bh); 140 } 141 /* path points to leaf/index in inode body */ 142 /* we use in-core data, no need to protect them */ 143 return 0; 144 } 145 146 /* 147 * could return: 148 * - EROFS 149 * - ENOMEM 150 * - EIO 151 */ 152 int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle, 153 struct inode *inode, struct ext4_ext_path *path) 154 { 155 int err; 156 157 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 158 if (path->p_bh) { 159 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 160 /* path points to block */ 161 err = __ext4_handle_dirty_metadata(where, line, handle, 162 inode, path->p_bh); 163 } else { 164 /* path points to leaf/index in inode body */ 165 err = ext4_mark_inode_dirty(handle, inode); 166 } 167 return err; 168 } 169 170 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 171 struct ext4_ext_path *path, 172 ext4_lblk_t block) 173 { 174 if (path) { 175 int depth = path->p_depth; 176 struct ext4_extent *ex; 177 178 /* 179 * Try to predict block placement assuming that we are 180 * filling in a file which will eventually be 181 * non-sparse --- i.e., in the case of libbfd writing 182 * an ELF object sections out-of-order but in a way 183 * the eventually results in a contiguous object or 184 * executable file, or some database extending a table 185 * space file. However, this is actually somewhat 186 * non-ideal if we are writing a sparse file such as 187 * qemu or KVM writing a raw image file that is going 188 * to stay fairly sparse, since it will end up 189 * fragmenting the file system's free space. Maybe we 190 * should have some hueristics or some way to allow 191 * userspace to pass a hint to file system, 192 * especially if the latter case turns out to be 193 * common. 194 */ 195 ex = path[depth].p_ext; 196 if (ex) { 197 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 198 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 199 200 if (block > ext_block) 201 return ext_pblk + (block - ext_block); 202 else 203 return ext_pblk - (ext_block - block); 204 } 205 206 /* it looks like index is empty; 207 * try to find starting block from index itself */ 208 if (path[depth].p_bh) 209 return path[depth].p_bh->b_blocknr; 210 } 211 212 /* OK. use inode's group */ 213 return ext4_inode_to_goal_block(inode); 214 } 215 216 /* 217 * Allocation for a meta data block 218 */ 219 static ext4_fsblk_t 220 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 221 struct ext4_ext_path *path, 222 struct ext4_extent *ex, int *err, unsigned int flags) 223 { 224 ext4_fsblk_t goal, newblock; 225 226 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 227 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 228 NULL, err); 229 return newblock; 230 } 231 232 static inline int ext4_ext_space_block(struct inode *inode, int check) 233 { 234 int size; 235 236 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 237 / sizeof(struct ext4_extent); 238 #ifdef AGGRESSIVE_TEST 239 if (!check && size > 6) 240 size = 6; 241 #endif 242 return size; 243 } 244 245 static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 246 { 247 int size; 248 249 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 250 / sizeof(struct ext4_extent_idx); 251 #ifdef AGGRESSIVE_TEST 252 if (!check && size > 5) 253 size = 5; 254 #endif 255 return size; 256 } 257 258 static inline int ext4_ext_space_root(struct inode *inode, int check) 259 { 260 int size; 261 262 size = sizeof(EXT4_I(inode)->i_data); 263 size -= sizeof(struct ext4_extent_header); 264 size /= sizeof(struct ext4_extent); 265 #ifdef AGGRESSIVE_TEST 266 if (!check && size > 3) 267 size = 3; 268 #endif 269 return size; 270 } 271 272 static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 273 { 274 int size; 275 276 size = sizeof(EXT4_I(inode)->i_data); 277 size -= sizeof(struct ext4_extent_header); 278 size /= sizeof(struct ext4_extent_idx); 279 #ifdef AGGRESSIVE_TEST 280 if (!check && size > 4) 281 size = 4; 282 #endif 283 return size; 284 } 285 286 static inline int 287 ext4_force_split_extent_at(handle_t *handle, struct inode *inode, 288 struct ext4_ext_path **ppath, ext4_lblk_t lblk, 289 int nofail) 290 { 291 struct ext4_ext_path *path = *ppath; 292 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); 293 294 return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? 295 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, 296 EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | 297 (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0)); 298 } 299 300 /* 301 * Calculate the number of metadata blocks needed 302 * to allocate @blocks 303 * Worse case is one block per extent 304 */ 305 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 306 { 307 struct ext4_inode_info *ei = EXT4_I(inode); 308 int idxs; 309 310 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 311 / sizeof(struct ext4_extent_idx)); 312 313 /* 314 * If the new delayed allocation block is contiguous with the 315 * previous da block, it can share index blocks with the 316 * previous block, so we only need to allocate a new index 317 * block every idxs leaf blocks. At ldxs**2 blocks, we need 318 * an additional index block, and at ldxs**3 blocks, yet 319 * another index blocks. 320 */ 321 if (ei->i_da_metadata_calc_len && 322 ei->i_da_metadata_calc_last_lblock+1 == lblock) { 323 int num = 0; 324 325 if ((ei->i_da_metadata_calc_len % idxs) == 0) 326 num++; 327 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) 328 num++; 329 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { 330 num++; 331 ei->i_da_metadata_calc_len = 0; 332 } else 333 ei->i_da_metadata_calc_len++; 334 ei->i_da_metadata_calc_last_lblock++; 335 return num; 336 } 337 338 /* 339 * In the worst case we need a new set of index blocks at 340 * every level of the inode's extent tree. 341 */ 342 ei->i_da_metadata_calc_len = 1; 343 ei->i_da_metadata_calc_last_lblock = lblock; 344 return ext_depth(inode) + 1; 345 } 346 347 static int 348 ext4_ext_max_entries(struct inode *inode, int depth) 349 { 350 int max; 351 352 if (depth == ext_depth(inode)) { 353 if (depth == 0) 354 max = ext4_ext_space_root(inode, 1); 355 else 356 max = ext4_ext_space_root_idx(inode, 1); 357 } else { 358 if (depth == 0) 359 max = ext4_ext_space_block(inode, 1); 360 else 361 max = ext4_ext_space_block_idx(inode, 1); 362 } 363 364 return max; 365 } 366 367 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 368 { 369 ext4_fsblk_t block = ext4_ext_pblock(ext); 370 int len = ext4_ext_get_actual_len(ext); 371 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 372 373 /* 374 * We allow neither: 375 * - zero length 376 * - overflow/wrap-around 377 */ 378 if (lblock + len <= lblock) 379 return 0; 380 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 381 } 382 383 static int ext4_valid_extent_idx(struct inode *inode, 384 struct ext4_extent_idx *ext_idx) 385 { 386 ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 387 388 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 389 } 390 391 static int ext4_valid_extent_entries(struct inode *inode, 392 struct ext4_extent_header *eh, 393 int depth) 394 { 395 unsigned short entries; 396 if (eh->eh_entries == 0) 397 return 1; 398 399 entries = le16_to_cpu(eh->eh_entries); 400 401 if (depth == 0) { 402 /* leaf entries */ 403 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 404 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 405 ext4_fsblk_t pblock = 0; 406 ext4_lblk_t lblock = 0; 407 ext4_lblk_t prev = 0; 408 int len = 0; 409 while (entries) { 410 if (!ext4_valid_extent(inode, ext)) 411 return 0; 412 413 /* Check for overlapping extents */ 414 lblock = le32_to_cpu(ext->ee_block); 415 len = ext4_ext_get_actual_len(ext); 416 if ((lblock <= prev) && prev) { 417 pblock = ext4_ext_pblock(ext); 418 es->s_last_error_block = cpu_to_le64(pblock); 419 return 0; 420 } 421 ext++; 422 entries--; 423 prev = lblock + len - 1; 424 } 425 } else { 426 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 427 while (entries) { 428 if (!ext4_valid_extent_idx(inode, ext_idx)) 429 return 0; 430 ext_idx++; 431 entries--; 432 } 433 } 434 return 1; 435 } 436 437 static int __ext4_ext_check(const char *function, unsigned int line, 438 struct inode *inode, struct ext4_extent_header *eh, 439 int depth, ext4_fsblk_t pblk) 440 { 441 const char *error_msg; 442 int max = 0, err = -EFSCORRUPTED; 443 444 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 445 error_msg = "invalid magic"; 446 goto corrupted; 447 } 448 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 449 error_msg = "unexpected eh_depth"; 450 goto corrupted; 451 } 452 if (unlikely(eh->eh_max == 0)) { 453 error_msg = "invalid eh_max"; 454 goto corrupted; 455 } 456 max = ext4_ext_max_entries(inode, depth); 457 if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 458 error_msg = "too large eh_max"; 459 goto corrupted; 460 } 461 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 462 error_msg = "invalid eh_entries"; 463 goto corrupted; 464 } 465 if (!ext4_valid_extent_entries(inode, eh, depth)) { 466 error_msg = "invalid extent entries"; 467 goto corrupted; 468 } 469 if (unlikely(depth > 32)) { 470 error_msg = "too large eh_depth"; 471 goto corrupted; 472 } 473 /* Verify checksum on non-root extent tree nodes */ 474 if (ext_depth(inode) != depth && 475 !ext4_extent_block_csum_verify(inode, eh)) { 476 error_msg = "extent tree corrupted"; 477 err = -EFSBADCRC; 478 goto corrupted; 479 } 480 return 0; 481 482 corrupted: 483 ext4_error_inode(inode, function, line, 0, 484 "pblk %llu bad header/extent: %s - magic %x, " 485 "entries %u, max %u(%u), depth %u(%u)", 486 (unsigned long long) pblk, error_msg, 487 le16_to_cpu(eh->eh_magic), 488 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 489 max, le16_to_cpu(eh->eh_depth), depth); 490 return err; 491 } 492 493 #define ext4_ext_check(inode, eh, depth, pblk) \ 494 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk)) 495 496 int ext4_ext_check_inode(struct inode *inode) 497 { 498 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); 499 } 500 501 static struct buffer_head * 502 __read_extent_tree_block(const char *function, unsigned int line, 503 struct inode *inode, ext4_fsblk_t pblk, int depth, 504 int flags) 505 { 506 struct buffer_head *bh; 507 int err; 508 509 bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS); 510 if (unlikely(!bh)) 511 return ERR_PTR(-ENOMEM); 512 513 if (!bh_uptodate_or_lock(bh)) { 514 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); 515 err = bh_submit_read(bh); 516 if (err < 0) 517 goto errout; 518 } 519 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) 520 return bh; 521 if (!ext4_has_feature_journal(inode->i_sb) || 522 (inode->i_ino != 523 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) { 524 err = __ext4_ext_check(function, line, inode, 525 ext_block_hdr(bh), depth, pblk); 526 if (err) 527 goto errout; 528 } 529 set_buffer_verified(bh); 530 /* 531 * If this is a leaf block, cache all of its entries 532 */ 533 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { 534 struct ext4_extent_header *eh = ext_block_hdr(bh); 535 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); 536 ext4_lblk_t prev = 0; 537 int i; 538 539 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { 540 unsigned int status = EXTENT_STATUS_WRITTEN; 541 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); 542 int len = ext4_ext_get_actual_len(ex); 543 544 if (prev && (prev != lblk)) 545 ext4_es_cache_extent(inode, prev, 546 lblk - prev, ~0, 547 EXTENT_STATUS_HOLE); 548 549 if (ext4_ext_is_unwritten(ex)) 550 status = EXTENT_STATUS_UNWRITTEN; 551 ext4_es_cache_extent(inode, lblk, len, 552 ext4_ext_pblock(ex), status); 553 prev = lblk + len; 554 } 555 } 556 return bh; 557 errout: 558 put_bh(bh); 559 return ERR_PTR(err); 560 561 } 562 563 #define read_extent_tree_block(inode, pblk, depth, flags) \ 564 __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ 565 (depth), (flags)) 566 567 /* 568 * This function is called to cache a file's extent information in the 569 * extent status tree 570 */ 571 int ext4_ext_precache(struct inode *inode) 572 { 573 struct ext4_inode_info *ei = EXT4_I(inode); 574 struct ext4_ext_path *path = NULL; 575 struct buffer_head *bh; 576 int i = 0, depth, ret = 0; 577 578 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 579 return 0; /* not an extent-mapped inode */ 580 581 down_read(&ei->i_data_sem); 582 depth = ext_depth(inode); 583 584 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 585 GFP_NOFS); 586 if (path == NULL) { 587 up_read(&ei->i_data_sem); 588 return -ENOMEM; 589 } 590 591 /* Don't cache anything if there are no external extent blocks */ 592 if (depth == 0) 593 goto out; 594 path[0].p_hdr = ext_inode_hdr(inode); 595 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); 596 if (ret) 597 goto out; 598 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); 599 while (i >= 0) { 600 /* 601 * If this is a leaf block or we've reached the end of 602 * the index block, go up 603 */ 604 if ((i == depth) || 605 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { 606 brelse(path[i].p_bh); 607 path[i].p_bh = NULL; 608 i--; 609 continue; 610 } 611 bh = read_extent_tree_block(inode, 612 ext4_idx_pblock(path[i].p_idx++), 613 depth - i - 1, 614 EXT4_EX_FORCE_CACHE); 615 if (IS_ERR(bh)) { 616 ret = PTR_ERR(bh); 617 break; 618 } 619 i++; 620 path[i].p_bh = bh; 621 path[i].p_hdr = ext_block_hdr(bh); 622 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); 623 } 624 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 625 out: 626 up_read(&ei->i_data_sem); 627 ext4_ext_drop_refs(path); 628 kfree(path); 629 return ret; 630 } 631 632 #ifdef EXT_DEBUG 633 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 634 { 635 int k, l = path->p_depth; 636 637 ext_debug("path:"); 638 for (k = 0; k <= l; k++, path++) { 639 if (path->p_idx) { 640 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 641 ext4_idx_pblock(path->p_idx)); 642 } else if (path->p_ext) { 643 ext_debug(" %d:[%d]%d:%llu ", 644 le32_to_cpu(path->p_ext->ee_block), 645 ext4_ext_is_unwritten(path->p_ext), 646 ext4_ext_get_actual_len(path->p_ext), 647 ext4_ext_pblock(path->p_ext)); 648 } else 649 ext_debug(" []"); 650 } 651 ext_debug("\n"); 652 } 653 654 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 655 { 656 int depth = ext_depth(inode); 657 struct ext4_extent_header *eh; 658 struct ext4_extent *ex; 659 int i; 660 661 if (!path) 662 return; 663 664 eh = path[depth].p_hdr; 665 ex = EXT_FIRST_EXTENT(eh); 666 667 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 668 669 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 670 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 671 ext4_ext_is_unwritten(ex), 672 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 673 } 674 ext_debug("\n"); 675 } 676 677 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 678 ext4_fsblk_t newblock, int level) 679 { 680 int depth = ext_depth(inode); 681 struct ext4_extent *ex; 682 683 if (depth != level) { 684 struct ext4_extent_idx *idx; 685 idx = path[level].p_idx; 686 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 687 ext_debug("%d: move %d:%llu in new index %llu\n", level, 688 le32_to_cpu(idx->ei_block), 689 ext4_idx_pblock(idx), 690 newblock); 691 idx++; 692 } 693 694 return; 695 } 696 697 ex = path[depth].p_ext; 698 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 699 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 700 le32_to_cpu(ex->ee_block), 701 ext4_ext_pblock(ex), 702 ext4_ext_is_unwritten(ex), 703 ext4_ext_get_actual_len(ex), 704 newblock); 705 ex++; 706 } 707 } 708 709 #else 710 #define ext4_ext_show_path(inode, path) 711 #define ext4_ext_show_leaf(inode, path) 712 #define ext4_ext_show_move(inode, path, newblock, level) 713 #endif 714 715 void ext4_ext_drop_refs(struct ext4_ext_path *path) 716 { 717 int depth, i; 718 719 if (!path) 720 return; 721 depth = path->p_depth; 722 for (i = 0; i <= depth; i++, path++) 723 if (path->p_bh) { 724 brelse(path->p_bh); 725 path->p_bh = NULL; 726 } 727 } 728 729 /* 730 * ext4_ext_binsearch_idx: 731 * binary search for the closest index of the given block 732 * the header must be checked before calling this 733 */ 734 static void 735 ext4_ext_binsearch_idx(struct inode *inode, 736 struct ext4_ext_path *path, ext4_lblk_t block) 737 { 738 struct ext4_extent_header *eh = path->p_hdr; 739 struct ext4_extent_idx *r, *l, *m; 740 741 742 ext_debug("binsearch for %u(idx): ", block); 743 744 l = EXT_FIRST_INDEX(eh) + 1; 745 r = EXT_LAST_INDEX(eh); 746 while (l <= r) { 747 m = l + (r - l) / 2; 748 if (block < le32_to_cpu(m->ei_block)) 749 r = m - 1; 750 else 751 l = m + 1; 752 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 753 m, le32_to_cpu(m->ei_block), 754 r, le32_to_cpu(r->ei_block)); 755 } 756 757 path->p_idx = l - 1; 758 ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 759 ext4_idx_pblock(path->p_idx)); 760 761 #ifdef CHECK_BINSEARCH 762 { 763 struct ext4_extent_idx *chix, *ix; 764 int k; 765 766 chix = ix = EXT_FIRST_INDEX(eh); 767 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 768 if (k != 0 && 769 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 770 printk(KERN_DEBUG "k=%d, ix=0x%p, " 771 "first=0x%p\n", k, 772 ix, EXT_FIRST_INDEX(eh)); 773 printk(KERN_DEBUG "%u <= %u\n", 774 le32_to_cpu(ix->ei_block), 775 le32_to_cpu(ix[-1].ei_block)); 776 } 777 BUG_ON(k && le32_to_cpu(ix->ei_block) 778 <= le32_to_cpu(ix[-1].ei_block)); 779 if (block < le32_to_cpu(ix->ei_block)) 780 break; 781 chix = ix; 782 } 783 BUG_ON(chix != path->p_idx); 784 } 785 #endif 786 787 } 788 789 /* 790 * ext4_ext_binsearch: 791 * binary search for closest extent of the given block 792 * the header must be checked before calling this 793 */ 794 static void 795 ext4_ext_binsearch(struct inode *inode, 796 struct ext4_ext_path *path, ext4_lblk_t block) 797 { 798 struct ext4_extent_header *eh = path->p_hdr; 799 struct ext4_extent *r, *l, *m; 800 801 if (eh->eh_entries == 0) { 802 /* 803 * this leaf is empty: 804 * we get such a leaf in split/add case 805 */ 806 return; 807 } 808 809 ext_debug("binsearch for %u: ", block); 810 811 l = EXT_FIRST_EXTENT(eh) + 1; 812 r = EXT_LAST_EXTENT(eh); 813 814 while (l <= r) { 815 m = l + (r - l) / 2; 816 if (block < le32_to_cpu(m->ee_block)) 817 r = m - 1; 818 else 819 l = m + 1; 820 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 821 m, le32_to_cpu(m->ee_block), 822 r, le32_to_cpu(r->ee_block)); 823 } 824 825 path->p_ext = l - 1; 826 ext_debug(" -> %d:%llu:[%d]%d ", 827 le32_to_cpu(path->p_ext->ee_block), 828 ext4_ext_pblock(path->p_ext), 829 ext4_ext_is_unwritten(path->p_ext), 830 ext4_ext_get_actual_len(path->p_ext)); 831 832 #ifdef CHECK_BINSEARCH 833 { 834 struct ext4_extent *chex, *ex; 835 int k; 836 837 chex = ex = EXT_FIRST_EXTENT(eh); 838 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 839 BUG_ON(k && le32_to_cpu(ex->ee_block) 840 <= le32_to_cpu(ex[-1].ee_block)); 841 if (block < le32_to_cpu(ex->ee_block)) 842 break; 843 chex = ex; 844 } 845 BUG_ON(chex != path->p_ext); 846 } 847 #endif 848 849 } 850 851 int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 852 { 853 struct ext4_extent_header *eh; 854 855 eh = ext_inode_hdr(inode); 856 eh->eh_depth = 0; 857 eh->eh_entries = 0; 858 eh->eh_magic = EXT4_EXT_MAGIC; 859 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 860 ext4_mark_inode_dirty(handle, inode); 861 return 0; 862 } 863 864 struct ext4_ext_path * 865 ext4_find_extent(struct inode *inode, ext4_lblk_t block, 866 struct ext4_ext_path **orig_path, int flags) 867 { 868 struct ext4_extent_header *eh; 869 struct buffer_head *bh; 870 struct ext4_ext_path *path = orig_path ? *orig_path : NULL; 871 short int depth, i, ppos = 0; 872 int ret; 873 874 eh = ext_inode_hdr(inode); 875 depth = ext_depth(inode); 876 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { 877 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", 878 depth); 879 ret = -EFSCORRUPTED; 880 goto err; 881 } 882 883 if (path) { 884 ext4_ext_drop_refs(path); 885 if (depth > path[0].p_maxdepth) { 886 kfree(path); 887 *orig_path = path = NULL; 888 } 889 } 890 if (!path) { 891 /* account possible depth increase */ 892 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path), 893 GFP_NOFS); 894 if (unlikely(!path)) 895 return ERR_PTR(-ENOMEM); 896 path[0].p_maxdepth = depth + 1; 897 } 898 path[0].p_hdr = eh; 899 path[0].p_bh = NULL; 900 901 i = depth; 902 /* walk through the tree */ 903 while (i) { 904 ext_debug("depth %d: num %d, max %d\n", 905 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 906 907 ext4_ext_binsearch_idx(inode, path + ppos, block); 908 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 909 path[ppos].p_depth = i; 910 path[ppos].p_ext = NULL; 911 912 bh = read_extent_tree_block(inode, path[ppos].p_block, --i, 913 flags); 914 if (IS_ERR(bh)) { 915 ret = PTR_ERR(bh); 916 goto err; 917 } 918 919 eh = ext_block_hdr(bh); 920 ppos++; 921 path[ppos].p_bh = bh; 922 path[ppos].p_hdr = eh; 923 } 924 925 path[ppos].p_depth = i; 926 path[ppos].p_ext = NULL; 927 path[ppos].p_idx = NULL; 928 929 /* find extent */ 930 ext4_ext_binsearch(inode, path + ppos, block); 931 /* if not an empty leaf */ 932 if (path[ppos].p_ext) 933 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 934 935 ext4_ext_show_path(inode, path); 936 937 return path; 938 939 err: 940 ext4_ext_drop_refs(path); 941 kfree(path); 942 if (orig_path) 943 *orig_path = NULL; 944 return ERR_PTR(ret); 945 } 946 947 /* 948 * ext4_ext_insert_index: 949 * insert new index [@logical;@ptr] into the block at @curp; 950 * check where to insert: before @curp or after @curp 951 */ 952 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 953 struct ext4_ext_path *curp, 954 int logical, ext4_fsblk_t ptr) 955 { 956 struct ext4_extent_idx *ix; 957 int len, err; 958 959 err = ext4_ext_get_access(handle, inode, curp); 960 if (err) 961 return err; 962 963 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 964 EXT4_ERROR_INODE(inode, 965 "logical %d == ei_block %d!", 966 logical, le32_to_cpu(curp->p_idx->ei_block)); 967 return -EFSCORRUPTED; 968 } 969 970 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 971 >= le16_to_cpu(curp->p_hdr->eh_max))) { 972 EXT4_ERROR_INODE(inode, 973 "eh_entries %d >= eh_max %d!", 974 le16_to_cpu(curp->p_hdr->eh_entries), 975 le16_to_cpu(curp->p_hdr->eh_max)); 976 return -EFSCORRUPTED; 977 } 978 979 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 980 /* insert after */ 981 ext_debug("insert new index %d after: %llu\n", logical, ptr); 982 ix = curp->p_idx + 1; 983 } else { 984 /* insert before */ 985 ext_debug("insert new index %d before: %llu\n", logical, ptr); 986 ix = curp->p_idx; 987 } 988 989 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 990 BUG_ON(len < 0); 991 if (len > 0) { 992 ext_debug("insert new index %d: " 993 "move %d indices from 0x%p to 0x%p\n", 994 logical, len, ix, ix + 1); 995 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 996 } 997 998 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 999 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 1000 return -EFSCORRUPTED; 1001 } 1002 1003 ix->ei_block = cpu_to_le32(logical); 1004 ext4_idx_store_pblock(ix, ptr); 1005 le16_add_cpu(&curp->p_hdr->eh_entries, 1); 1006 1007 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 1008 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 1009 return -EFSCORRUPTED; 1010 } 1011 1012 err = ext4_ext_dirty(handle, inode, curp); 1013 ext4_std_error(inode->i_sb, err); 1014 1015 return err; 1016 } 1017 1018 /* 1019 * ext4_ext_split: 1020 * inserts new subtree into the path, using free index entry 1021 * at depth @at: 1022 * - allocates all needed blocks (new leaf and all intermediate index blocks) 1023 * - makes decision where to split 1024 * - moves remaining extents and index entries (right to the split point) 1025 * into the newly allocated blocks 1026 * - initializes subtree 1027 */ 1028 static int ext4_ext_split(handle_t *handle, struct inode *inode, 1029 unsigned int flags, 1030 struct ext4_ext_path *path, 1031 struct ext4_extent *newext, int at) 1032 { 1033 struct buffer_head *bh = NULL; 1034 int depth = ext_depth(inode); 1035 struct ext4_extent_header *neh; 1036 struct ext4_extent_idx *fidx; 1037 int i = at, k, m, a; 1038 ext4_fsblk_t newblock, oldblock; 1039 __le32 border; 1040 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 1041 int err = 0; 1042 size_t ext_size = 0; 1043 1044 /* make decision: where to split? */ 1045 /* FIXME: now decision is simplest: at current extent */ 1046 1047 /* if current leaf will be split, then we should use 1048 * border from split point */ 1049 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 1050 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 1051 return -EFSCORRUPTED; 1052 } 1053 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 1054 border = path[depth].p_ext[1].ee_block; 1055 ext_debug("leaf will be split." 1056 " next leaf starts at %d\n", 1057 le32_to_cpu(border)); 1058 } else { 1059 border = newext->ee_block; 1060 ext_debug("leaf will be added." 1061 " next leaf starts at %d\n", 1062 le32_to_cpu(border)); 1063 } 1064 1065 /* 1066 * If error occurs, then we break processing 1067 * and mark filesystem read-only. index won't 1068 * be inserted and tree will be in consistent 1069 * state. Next mount will repair buffers too. 1070 */ 1071 1072 /* 1073 * Get array to track all allocated blocks. 1074 * We need this to handle errors and free blocks 1075 * upon them. 1076 */ 1077 ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), GFP_NOFS); 1078 if (!ablocks) 1079 return -ENOMEM; 1080 1081 /* allocate all needed blocks */ 1082 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 1083 for (a = 0; a < depth - at; a++) { 1084 newblock = ext4_ext_new_meta_block(handle, inode, path, 1085 newext, &err, flags); 1086 if (newblock == 0) 1087 goto cleanup; 1088 ablocks[a] = newblock; 1089 } 1090 1091 /* initialize new leaf */ 1092 newblock = ablocks[--a]; 1093 if (unlikely(newblock == 0)) { 1094 EXT4_ERROR_INODE(inode, "newblock == 0!"); 1095 err = -EFSCORRUPTED; 1096 goto cleanup; 1097 } 1098 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1099 if (unlikely(!bh)) { 1100 err = -ENOMEM; 1101 goto cleanup; 1102 } 1103 lock_buffer(bh); 1104 1105 err = ext4_journal_get_create_access(handle, bh); 1106 if (err) 1107 goto cleanup; 1108 1109 neh = ext_block_hdr(bh); 1110 neh->eh_entries = 0; 1111 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1112 neh->eh_magic = EXT4_EXT_MAGIC; 1113 neh->eh_depth = 0; 1114 1115 /* move remainder of path[depth] to the new leaf */ 1116 if (unlikely(path[depth].p_hdr->eh_entries != 1117 path[depth].p_hdr->eh_max)) { 1118 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 1119 path[depth].p_hdr->eh_entries, 1120 path[depth].p_hdr->eh_max); 1121 err = -EFSCORRUPTED; 1122 goto cleanup; 1123 } 1124 /* start copy from next extent */ 1125 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 1126 ext4_ext_show_move(inode, path, newblock, depth); 1127 if (m) { 1128 struct ext4_extent *ex; 1129 ex = EXT_FIRST_EXTENT(neh); 1130 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 1131 le16_add_cpu(&neh->eh_entries, m); 1132 } 1133 1134 /* zero out unused area in the extent block */ 1135 ext_size = sizeof(struct ext4_extent_header) + 1136 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); 1137 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1138 ext4_extent_block_csum_set(inode, neh); 1139 set_buffer_uptodate(bh); 1140 unlock_buffer(bh); 1141 1142 err = ext4_handle_dirty_metadata(handle, inode, bh); 1143 if (err) 1144 goto cleanup; 1145 brelse(bh); 1146 bh = NULL; 1147 1148 /* correct old leaf */ 1149 if (m) { 1150 err = ext4_ext_get_access(handle, inode, path + depth); 1151 if (err) 1152 goto cleanup; 1153 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 1154 err = ext4_ext_dirty(handle, inode, path + depth); 1155 if (err) 1156 goto cleanup; 1157 1158 } 1159 1160 /* create intermediate indexes */ 1161 k = depth - at - 1; 1162 if (unlikely(k < 0)) { 1163 EXT4_ERROR_INODE(inode, "k %d < 0!", k); 1164 err = -EFSCORRUPTED; 1165 goto cleanup; 1166 } 1167 if (k) 1168 ext_debug("create %d intermediate indices\n", k); 1169 /* insert new index into current index block */ 1170 /* current depth stored in i var */ 1171 i = depth - 1; 1172 while (k--) { 1173 oldblock = newblock; 1174 newblock = ablocks[--a]; 1175 bh = sb_getblk(inode->i_sb, newblock); 1176 if (unlikely(!bh)) { 1177 err = -ENOMEM; 1178 goto cleanup; 1179 } 1180 lock_buffer(bh); 1181 1182 err = ext4_journal_get_create_access(handle, bh); 1183 if (err) 1184 goto cleanup; 1185 1186 neh = ext_block_hdr(bh); 1187 neh->eh_entries = cpu_to_le16(1); 1188 neh->eh_magic = EXT4_EXT_MAGIC; 1189 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1190 neh->eh_depth = cpu_to_le16(depth - i); 1191 fidx = EXT_FIRST_INDEX(neh); 1192 fidx->ei_block = border; 1193 ext4_idx_store_pblock(fidx, oldblock); 1194 1195 ext_debug("int.index at %d (block %llu): %u -> %llu\n", 1196 i, newblock, le32_to_cpu(border), oldblock); 1197 1198 /* move remainder of path[i] to the new index block */ 1199 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1200 EXT_LAST_INDEX(path[i].p_hdr))) { 1201 EXT4_ERROR_INODE(inode, 1202 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1203 le32_to_cpu(path[i].p_ext->ee_block)); 1204 err = -EFSCORRUPTED; 1205 goto cleanup; 1206 } 1207 /* start copy indexes */ 1208 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 1209 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 1210 EXT_MAX_INDEX(path[i].p_hdr)); 1211 ext4_ext_show_move(inode, path, newblock, i); 1212 if (m) { 1213 memmove(++fidx, path[i].p_idx, 1214 sizeof(struct ext4_extent_idx) * m); 1215 le16_add_cpu(&neh->eh_entries, m); 1216 } 1217 /* zero out unused area in the extent block */ 1218 ext_size = sizeof(struct ext4_extent_header) + 1219 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); 1220 memset(bh->b_data + ext_size, 0, 1221 inode->i_sb->s_blocksize - ext_size); 1222 ext4_extent_block_csum_set(inode, neh); 1223 set_buffer_uptodate(bh); 1224 unlock_buffer(bh); 1225 1226 err = ext4_handle_dirty_metadata(handle, inode, bh); 1227 if (err) 1228 goto cleanup; 1229 brelse(bh); 1230 bh = NULL; 1231 1232 /* correct old index */ 1233 if (m) { 1234 err = ext4_ext_get_access(handle, inode, path + i); 1235 if (err) 1236 goto cleanup; 1237 le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1238 err = ext4_ext_dirty(handle, inode, path + i); 1239 if (err) 1240 goto cleanup; 1241 } 1242 1243 i--; 1244 } 1245 1246 /* insert new index */ 1247 err = ext4_ext_insert_index(handle, inode, path + at, 1248 le32_to_cpu(border), newblock); 1249 1250 cleanup: 1251 if (bh) { 1252 if (buffer_locked(bh)) 1253 unlock_buffer(bh); 1254 brelse(bh); 1255 } 1256 1257 if (err) { 1258 /* free all allocated blocks in error case */ 1259 for (i = 0; i < depth; i++) { 1260 if (!ablocks[i]) 1261 continue; 1262 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1263 EXT4_FREE_BLOCKS_METADATA); 1264 } 1265 } 1266 kfree(ablocks); 1267 1268 return err; 1269 } 1270 1271 /* 1272 * ext4_ext_grow_indepth: 1273 * implements tree growing procedure: 1274 * - allocates new block 1275 * - moves top-level data (index block or leaf) into the new block 1276 * - initializes new top-level, creating index that points to the 1277 * just created block 1278 */ 1279 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1280 unsigned int flags) 1281 { 1282 struct ext4_extent_header *neh; 1283 struct buffer_head *bh; 1284 ext4_fsblk_t newblock, goal = 0; 1285 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 1286 int err = 0; 1287 size_t ext_size = 0; 1288 1289 /* Try to prepend new index to old one */ 1290 if (ext_depth(inode)) 1291 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); 1292 if (goal > le32_to_cpu(es->s_first_data_block)) { 1293 flags |= EXT4_MB_HINT_TRY_GOAL; 1294 goal--; 1295 } else 1296 goal = ext4_inode_to_goal_block(inode); 1297 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 1298 NULL, &err); 1299 if (newblock == 0) 1300 return err; 1301 1302 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1303 if (unlikely(!bh)) 1304 return -ENOMEM; 1305 lock_buffer(bh); 1306 1307 err = ext4_journal_get_create_access(handle, bh); 1308 if (err) { 1309 unlock_buffer(bh); 1310 goto out; 1311 } 1312 1313 ext_size = sizeof(EXT4_I(inode)->i_data); 1314 /* move top-level index/leaf into new block */ 1315 memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); 1316 /* zero out unused area in the extent block */ 1317 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1318 1319 /* set size of new block */ 1320 neh = ext_block_hdr(bh); 1321 /* old root could have indexes or leaves 1322 * so calculate e_max right way */ 1323 if (ext_depth(inode)) 1324 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1325 else 1326 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1327 neh->eh_magic = EXT4_EXT_MAGIC; 1328 ext4_extent_block_csum_set(inode, neh); 1329 set_buffer_uptodate(bh); 1330 unlock_buffer(bh); 1331 1332 err = ext4_handle_dirty_metadata(handle, inode, bh); 1333 if (err) 1334 goto out; 1335 1336 /* Update top-level index: num,max,pointer */ 1337 neh = ext_inode_hdr(inode); 1338 neh->eh_entries = cpu_to_le16(1); 1339 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 1340 if (neh->eh_depth == 0) { 1341 /* Root extent block becomes index block */ 1342 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 1343 EXT_FIRST_INDEX(neh)->ei_block = 1344 EXT_FIRST_EXTENT(neh)->ee_block; 1345 } 1346 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1347 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 1348 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1349 ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1350 1351 le16_add_cpu(&neh->eh_depth, 1); 1352 ext4_mark_inode_dirty(handle, inode); 1353 out: 1354 brelse(bh); 1355 1356 return err; 1357 } 1358 1359 /* 1360 * ext4_ext_create_new_leaf: 1361 * finds empty index and adds new leaf. 1362 * if no free index is found, then it requests in-depth growing. 1363 */ 1364 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1365 unsigned int mb_flags, 1366 unsigned int gb_flags, 1367 struct ext4_ext_path **ppath, 1368 struct ext4_extent *newext) 1369 { 1370 struct ext4_ext_path *path = *ppath; 1371 struct ext4_ext_path *curp; 1372 int depth, i, err = 0; 1373 1374 repeat: 1375 i = depth = ext_depth(inode); 1376 1377 /* walk up to the tree and look for free index entry */ 1378 curp = path + depth; 1379 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1380 i--; 1381 curp--; 1382 } 1383 1384 /* we use already allocated block for index block, 1385 * so subsequent data blocks should be contiguous */ 1386 if (EXT_HAS_FREE_INDEX(curp)) { 1387 /* if we found index with free entry, then use that 1388 * entry: create all needed subtree and add new leaf */ 1389 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); 1390 if (err) 1391 goto out; 1392 1393 /* refill path */ 1394 path = ext4_find_extent(inode, 1395 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1396 ppath, gb_flags); 1397 if (IS_ERR(path)) 1398 err = PTR_ERR(path); 1399 } else { 1400 /* tree is full, time to grow in depth */ 1401 err = ext4_ext_grow_indepth(handle, inode, mb_flags); 1402 if (err) 1403 goto out; 1404 1405 /* refill path */ 1406 path = ext4_find_extent(inode, 1407 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1408 ppath, gb_flags); 1409 if (IS_ERR(path)) { 1410 err = PTR_ERR(path); 1411 goto out; 1412 } 1413 1414 /* 1415 * only first (depth 0 -> 1) produces free space; 1416 * in all other cases we have to split the grown tree 1417 */ 1418 depth = ext_depth(inode); 1419 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1420 /* now we need to split */ 1421 goto repeat; 1422 } 1423 } 1424 1425 out: 1426 return err; 1427 } 1428 1429 /* 1430 * search the closest allocated block to the left for *logical 1431 * and returns it at @logical + it's physical address at @phys 1432 * if *logical is the smallest allocated block, the function 1433 * returns 0 at @phys 1434 * return value contains 0 (success) or error code 1435 */ 1436 static int ext4_ext_search_left(struct inode *inode, 1437 struct ext4_ext_path *path, 1438 ext4_lblk_t *logical, ext4_fsblk_t *phys) 1439 { 1440 struct ext4_extent_idx *ix; 1441 struct ext4_extent *ex; 1442 int depth, ee_len; 1443 1444 if (unlikely(path == NULL)) { 1445 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1446 return -EFSCORRUPTED; 1447 } 1448 depth = path->p_depth; 1449 *phys = 0; 1450 1451 if (depth == 0 && path->p_ext == NULL) 1452 return 0; 1453 1454 /* usually extent in the path covers blocks smaller 1455 * then *logical, but it can be that extent is the 1456 * first one in the file */ 1457 1458 ex = path[depth].p_ext; 1459 ee_len = ext4_ext_get_actual_len(ex); 1460 if (*logical < le32_to_cpu(ex->ee_block)) { 1461 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1462 EXT4_ERROR_INODE(inode, 1463 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1464 *logical, le32_to_cpu(ex->ee_block)); 1465 return -EFSCORRUPTED; 1466 } 1467 while (--depth >= 0) { 1468 ix = path[depth].p_idx; 1469 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1470 EXT4_ERROR_INODE(inode, 1471 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 1472 ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1473 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 1474 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1475 depth); 1476 return -EFSCORRUPTED; 1477 } 1478 } 1479 return 0; 1480 } 1481 1482 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1483 EXT4_ERROR_INODE(inode, 1484 "logical %d < ee_block %d + ee_len %d!", 1485 *logical, le32_to_cpu(ex->ee_block), ee_len); 1486 return -EFSCORRUPTED; 1487 } 1488 1489 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1490 *phys = ext4_ext_pblock(ex) + ee_len - 1; 1491 return 0; 1492 } 1493 1494 /* 1495 * search the closest allocated block to the right for *logical 1496 * and returns it at @logical + it's physical address at @phys 1497 * if *logical is the largest allocated block, the function 1498 * returns 0 at @phys 1499 * return value contains 0 (success) or error code 1500 */ 1501 static int ext4_ext_search_right(struct inode *inode, 1502 struct ext4_ext_path *path, 1503 ext4_lblk_t *logical, ext4_fsblk_t *phys, 1504 struct ext4_extent **ret_ex) 1505 { 1506 struct buffer_head *bh = NULL; 1507 struct ext4_extent_header *eh; 1508 struct ext4_extent_idx *ix; 1509 struct ext4_extent *ex; 1510 ext4_fsblk_t block; 1511 int depth; /* Note, NOT eh_depth; depth from top of tree */ 1512 int ee_len; 1513 1514 if (unlikely(path == NULL)) { 1515 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1516 return -EFSCORRUPTED; 1517 } 1518 depth = path->p_depth; 1519 *phys = 0; 1520 1521 if (depth == 0 && path->p_ext == NULL) 1522 return 0; 1523 1524 /* usually extent in the path covers blocks smaller 1525 * then *logical, but it can be that extent is the 1526 * first one in the file */ 1527 1528 ex = path[depth].p_ext; 1529 ee_len = ext4_ext_get_actual_len(ex); 1530 if (*logical < le32_to_cpu(ex->ee_block)) { 1531 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1532 EXT4_ERROR_INODE(inode, 1533 "first_extent(path[%d].p_hdr) != ex", 1534 depth); 1535 return -EFSCORRUPTED; 1536 } 1537 while (--depth >= 0) { 1538 ix = path[depth].p_idx; 1539 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1540 EXT4_ERROR_INODE(inode, 1541 "ix != EXT_FIRST_INDEX *logical %d!", 1542 *logical); 1543 return -EFSCORRUPTED; 1544 } 1545 } 1546 goto found_extent; 1547 } 1548 1549 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1550 EXT4_ERROR_INODE(inode, 1551 "logical %d < ee_block %d + ee_len %d!", 1552 *logical, le32_to_cpu(ex->ee_block), ee_len); 1553 return -EFSCORRUPTED; 1554 } 1555 1556 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 1557 /* next allocated block in this leaf */ 1558 ex++; 1559 goto found_extent; 1560 } 1561 1562 /* go up and search for index to the right */ 1563 while (--depth >= 0) { 1564 ix = path[depth].p_idx; 1565 if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 1566 goto got_index; 1567 } 1568 1569 /* we've gone up to the root and found no index to the right */ 1570 return 0; 1571 1572 got_index: 1573 /* we've found index to the right, let's 1574 * follow it and find the closest allocated 1575 * block to the right */ 1576 ix++; 1577 block = ext4_idx_pblock(ix); 1578 while (++depth < path->p_depth) { 1579 /* subtract from p_depth to get proper eh_depth */ 1580 bh = read_extent_tree_block(inode, block, 1581 path->p_depth - depth, 0); 1582 if (IS_ERR(bh)) 1583 return PTR_ERR(bh); 1584 eh = ext_block_hdr(bh); 1585 ix = EXT_FIRST_INDEX(eh); 1586 block = ext4_idx_pblock(ix); 1587 put_bh(bh); 1588 } 1589 1590 bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); 1591 if (IS_ERR(bh)) 1592 return PTR_ERR(bh); 1593 eh = ext_block_hdr(bh); 1594 ex = EXT_FIRST_EXTENT(eh); 1595 found_extent: 1596 *logical = le32_to_cpu(ex->ee_block); 1597 *phys = ext4_ext_pblock(ex); 1598 *ret_ex = ex; 1599 if (bh) 1600 put_bh(bh); 1601 return 0; 1602 } 1603 1604 /* 1605 * ext4_ext_next_allocated_block: 1606 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1607 * NOTE: it considers block number from index entry as 1608 * allocated block. Thus, index entries have to be consistent 1609 * with leaves. 1610 */ 1611 ext4_lblk_t 1612 ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1613 { 1614 int depth; 1615 1616 BUG_ON(path == NULL); 1617 depth = path->p_depth; 1618 1619 if (depth == 0 && path->p_ext == NULL) 1620 return EXT_MAX_BLOCKS; 1621 1622 while (depth >= 0) { 1623 if (depth == path->p_depth) { 1624 /* leaf */ 1625 if (path[depth].p_ext && 1626 path[depth].p_ext != 1627 EXT_LAST_EXTENT(path[depth].p_hdr)) 1628 return le32_to_cpu(path[depth].p_ext[1].ee_block); 1629 } else { 1630 /* index */ 1631 if (path[depth].p_idx != 1632 EXT_LAST_INDEX(path[depth].p_hdr)) 1633 return le32_to_cpu(path[depth].p_idx[1].ei_block); 1634 } 1635 depth--; 1636 } 1637 1638 return EXT_MAX_BLOCKS; 1639 } 1640 1641 /* 1642 * ext4_ext_next_leaf_block: 1643 * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1644 */ 1645 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1646 { 1647 int depth; 1648 1649 BUG_ON(path == NULL); 1650 depth = path->p_depth; 1651 1652 /* zero-tree has no leaf blocks at all */ 1653 if (depth == 0) 1654 return EXT_MAX_BLOCKS; 1655 1656 /* go to index block */ 1657 depth--; 1658 1659 while (depth >= 0) { 1660 if (path[depth].p_idx != 1661 EXT_LAST_INDEX(path[depth].p_hdr)) 1662 return (ext4_lblk_t) 1663 le32_to_cpu(path[depth].p_idx[1].ei_block); 1664 depth--; 1665 } 1666 1667 return EXT_MAX_BLOCKS; 1668 } 1669 1670 /* 1671 * ext4_ext_correct_indexes: 1672 * if leaf gets modified and modified extent is first in the leaf, 1673 * then we have to correct all indexes above. 1674 * TODO: do we need to correct tree in all cases? 1675 */ 1676 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1677 struct ext4_ext_path *path) 1678 { 1679 struct ext4_extent_header *eh; 1680 int depth = ext_depth(inode); 1681 struct ext4_extent *ex; 1682 __le32 border; 1683 int k, err = 0; 1684 1685 eh = path[depth].p_hdr; 1686 ex = path[depth].p_ext; 1687 1688 if (unlikely(ex == NULL || eh == NULL)) { 1689 EXT4_ERROR_INODE(inode, 1690 "ex %p == NULL or eh %p == NULL", ex, eh); 1691 return -EFSCORRUPTED; 1692 } 1693 1694 if (depth == 0) { 1695 /* there is no tree at all */ 1696 return 0; 1697 } 1698 1699 if (ex != EXT_FIRST_EXTENT(eh)) { 1700 /* we correct tree if first leaf got modified only */ 1701 return 0; 1702 } 1703 1704 /* 1705 * TODO: we need correction if border is smaller than current one 1706 */ 1707 k = depth - 1; 1708 border = path[depth].p_ext->ee_block; 1709 err = ext4_ext_get_access(handle, inode, path + k); 1710 if (err) 1711 return err; 1712 path[k].p_idx->ei_block = border; 1713 err = ext4_ext_dirty(handle, inode, path + k); 1714 if (err) 1715 return err; 1716 1717 while (k--) { 1718 /* change all left-side indexes */ 1719 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1720 break; 1721 err = ext4_ext_get_access(handle, inode, path + k); 1722 if (err) 1723 break; 1724 path[k].p_idx->ei_block = border; 1725 err = ext4_ext_dirty(handle, inode, path + k); 1726 if (err) 1727 break; 1728 } 1729 1730 return err; 1731 } 1732 1733 int 1734 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1735 struct ext4_extent *ex2) 1736 { 1737 unsigned short ext1_ee_len, ext2_ee_len; 1738 1739 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) 1740 return 0; 1741 1742 ext1_ee_len = ext4_ext_get_actual_len(ex1); 1743 ext2_ee_len = ext4_ext_get_actual_len(ex2); 1744 1745 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 1746 le32_to_cpu(ex2->ee_block)) 1747 return 0; 1748 1749 /* 1750 * To allow future support for preallocated extents to be added 1751 * as an RO_COMPAT feature, refuse to merge to extents if 1752 * this can result in the top bit of ee_len being set. 1753 */ 1754 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) 1755 return 0; 1756 /* 1757 * The check for IO to unwritten extent is somewhat racy as we 1758 * increment i_unwritten / set EXT4_STATE_DIO_UNWRITTEN only after 1759 * dropping i_data_sem. But reserved blocks should save us in that 1760 * case. 1761 */ 1762 if (ext4_ext_is_unwritten(ex1) && 1763 (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) || 1764 atomic_read(&EXT4_I(inode)->i_unwritten) || 1765 (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN))) 1766 return 0; 1767 #ifdef AGGRESSIVE_TEST 1768 if (ext1_ee_len >= 4) 1769 return 0; 1770 #endif 1771 1772 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1773 return 1; 1774 return 0; 1775 } 1776 1777 /* 1778 * This function tries to merge the "ex" extent to the next extent in the tree. 1779 * It always tries to merge towards right. If you want to merge towards 1780 * left, pass "ex - 1" as argument instead of "ex". 1781 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 1782 * 1 if they got merged. 1783 */ 1784 static int ext4_ext_try_to_merge_right(struct inode *inode, 1785 struct ext4_ext_path *path, 1786 struct ext4_extent *ex) 1787 { 1788 struct ext4_extent_header *eh; 1789 unsigned int depth, len; 1790 int merge_done = 0, unwritten; 1791 1792 depth = ext_depth(inode); 1793 BUG_ON(path[depth].p_hdr == NULL); 1794 eh = path[depth].p_hdr; 1795 1796 while (ex < EXT_LAST_EXTENT(eh)) { 1797 if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 1798 break; 1799 /* merge with next extent! */ 1800 unwritten = ext4_ext_is_unwritten(ex); 1801 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1802 + ext4_ext_get_actual_len(ex + 1)); 1803 if (unwritten) 1804 ext4_ext_mark_unwritten(ex); 1805 1806 if (ex + 1 < EXT_LAST_EXTENT(eh)) { 1807 len = (EXT_LAST_EXTENT(eh) - ex - 1) 1808 * sizeof(struct ext4_extent); 1809 memmove(ex + 1, ex + 2, len); 1810 } 1811 le16_add_cpu(&eh->eh_entries, -1); 1812 merge_done = 1; 1813 WARN_ON(eh->eh_entries == 0); 1814 if (!eh->eh_entries) 1815 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 1816 } 1817 1818 return merge_done; 1819 } 1820 1821 /* 1822 * This function does a very simple check to see if we can collapse 1823 * an extent tree with a single extent tree leaf block into the inode. 1824 */ 1825 static void ext4_ext_try_to_merge_up(handle_t *handle, 1826 struct inode *inode, 1827 struct ext4_ext_path *path) 1828 { 1829 size_t s; 1830 unsigned max_root = ext4_ext_space_root(inode, 0); 1831 ext4_fsblk_t blk; 1832 1833 if ((path[0].p_depth != 1) || 1834 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1835 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1836 return; 1837 1838 /* 1839 * We need to modify the block allocation bitmap and the block 1840 * group descriptor to release the extent tree block. If we 1841 * can't get the journal credits, give up. 1842 */ 1843 if (ext4_journal_extend(handle, 2)) 1844 return; 1845 1846 /* 1847 * Copy the extent data up to the inode 1848 */ 1849 blk = ext4_idx_pblock(path[0].p_idx); 1850 s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1851 sizeof(struct ext4_extent_idx); 1852 s += sizeof(struct ext4_extent_header); 1853 1854 path[1].p_maxdepth = path[0].p_maxdepth; 1855 memcpy(path[0].p_hdr, path[1].p_hdr, s); 1856 path[0].p_depth = 0; 1857 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1858 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1859 path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1860 1861 brelse(path[1].p_bh); 1862 ext4_free_blocks(handle, inode, NULL, blk, 1, 1863 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1864 } 1865 1866 /* 1867 * This function tries to merge the @ex extent to neighbours in the tree. 1868 * return 1 if merge left else 0. 1869 */ 1870 static void ext4_ext_try_to_merge(handle_t *handle, 1871 struct inode *inode, 1872 struct ext4_ext_path *path, 1873 struct ext4_extent *ex) { 1874 struct ext4_extent_header *eh; 1875 unsigned int depth; 1876 int merge_done = 0; 1877 1878 depth = ext_depth(inode); 1879 BUG_ON(path[depth].p_hdr == NULL); 1880 eh = path[depth].p_hdr; 1881 1882 if (ex > EXT_FIRST_EXTENT(eh)) 1883 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1884 1885 if (!merge_done) 1886 (void) ext4_ext_try_to_merge_right(inode, path, ex); 1887 1888 ext4_ext_try_to_merge_up(handle, inode, path); 1889 } 1890 1891 /* 1892 * check if a portion of the "newext" extent overlaps with an 1893 * existing extent. 1894 * 1895 * If there is an overlap discovered, it updates the length of the newext 1896 * such that there will be no overlap, and then returns 1. 1897 * If there is no overlap found, it returns 0. 1898 */ 1899 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 1900 struct inode *inode, 1901 struct ext4_extent *newext, 1902 struct ext4_ext_path *path) 1903 { 1904 ext4_lblk_t b1, b2; 1905 unsigned int depth, len1; 1906 unsigned int ret = 0; 1907 1908 b1 = le32_to_cpu(newext->ee_block); 1909 len1 = ext4_ext_get_actual_len(newext); 1910 depth = ext_depth(inode); 1911 if (!path[depth].p_ext) 1912 goto out; 1913 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); 1914 1915 /* 1916 * get the next allocated block if the extent in the path 1917 * is before the requested block(s) 1918 */ 1919 if (b2 < b1) { 1920 b2 = ext4_ext_next_allocated_block(path); 1921 if (b2 == EXT_MAX_BLOCKS) 1922 goto out; 1923 b2 = EXT4_LBLK_CMASK(sbi, b2); 1924 } 1925 1926 /* check for wrap through zero on extent logical start block*/ 1927 if (b1 + len1 < b1) { 1928 len1 = EXT_MAX_BLOCKS - b1; 1929 newext->ee_len = cpu_to_le16(len1); 1930 ret = 1; 1931 } 1932 1933 /* check for overlap */ 1934 if (b1 + len1 > b2) { 1935 newext->ee_len = cpu_to_le16(b2 - b1); 1936 ret = 1; 1937 } 1938 out: 1939 return ret; 1940 } 1941 1942 /* 1943 * ext4_ext_insert_extent: 1944 * tries to merge requsted extent into the existing extent or 1945 * inserts requested extent as new one into the tree, 1946 * creating new leaf in the no-space case. 1947 */ 1948 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1949 struct ext4_ext_path **ppath, 1950 struct ext4_extent *newext, int gb_flags) 1951 { 1952 struct ext4_ext_path *path = *ppath; 1953 struct ext4_extent_header *eh; 1954 struct ext4_extent *ex, *fex; 1955 struct ext4_extent *nearex; /* nearest extent */ 1956 struct ext4_ext_path *npath = NULL; 1957 int depth, len, err; 1958 ext4_lblk_t next; 1959 int mb_flags = 0, unwritten; 1960 1961 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1962 mb_flags |= EXT4_MB_DELALLOC_RESERVED; 1963 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1964 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1965 return -EFSCORRUPTED; 1966 } 1967 depth = ext_depth(inode); 1968 ex = path[depth].p_ext; 1969 eh = path[depth].p_hdr; 1970 if (unlikely(path[depth].p_hdr == NULL)) { 1971 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1972 return -EFSCORRUPTED; 1973 } 1974 1975 /* try to insert block into found extent and return */ 1976 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { 1977 1978 /* 1979 * Try to see whether we should rather test the extent on 1980 * right from ex, or from the left of ex. This is because 1981 * ext4_find_extent() can return either extent on the 1982 * left, or on the right from the searched position. This 1983 * will make merging more effective. 1984 */ 1985 if (ex < EXT_LAST_EXTENT(eh) && 1986 (le32_to_cpu(ex->ee_block) + 1987 ext4_ext_get_actual_len(ex) < 1988 le32_to_cpu(newext->ee_block))) { 1989 ex += 1; 1990 goto prepend; 1991 } else if ((ex > EXT_FIRST_EXTENT(eh)) && 1992 (le32_to_cpu(newext->ee_block) + 1993 ext4_ext_get_actual_len(newext) < 1994 le32_to_cpu(ex->ee_block))) 1995 ex -= 1; 1996 1997 /* Try to append newex to the ex */ 1998 if (ext4_can_extents_be_merged(inode, ex, newext)) { 1999 ext_debug("append [%d]%d block to %u:[%d]%d" 2000 "(from %llu)\n", 2001 ext4_ext_is_unwritten(newext), 2002 ext4_ext_get_actual_len(newext), 2003 le32_to_cpu(ex->ee_block), 2004 ext4_ext_is_unwritten(ex), 2005 ext4_ext_get_actual_len(ex), 2006 ext4_ext_pblock(ex)); 2007 err = ext4_ext_get_access(handle, inode, 2008 path + depth); 2009 if (err) 2010 return err; 2011 unwritten = ext4_ext_is_unwritten(ex); 2012 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2013 + ext4_ext_get_actual_len(newext)); 2014 if (unwritten) 2015 ext4_ext_mark_unwritten(ex); 2016 eh = path[depth].p_hdr; 2017 nearex = ex; 2018 goto merge; 2019 } 2020 2021 prepend: 2022 /* Try to prepend newex to the ex */ 2023 if (ext4_can_extents_be_merged(inode, newext, ex)) { 2024 ext_debug("prepend %u[%d]%d block to %u:[%d]%d" 2025 "(from %llu)\n", 2026 le32_to_cpu(newext->ee_block), 2027 ext4_ext_is_unwritten(newext), 2028 ext4_ext_get_actual_len(newext), 2029 le32_to_cpu(ex->ee_block), 2030 ext4_ext_is_unwritten(ex), 2031 ext4_ext_get_actual_len(ex), 2032 ext4_ext_pblock(ex)); 2033 err = ext4_ext_get_access(handle, inode, 2034 path + depth); 2035 if (err) 2036 return err; 2037 2038 unwritten = ext4_ext_is_unwritten(ex); 2039 ex->ee_block = newext->ee_block; 2040 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 2041 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2042 + ext4_ext_get_actual_len(newext)); 2043 if (unwritten) 2044 ext4_ext_mark_unwritten(ex); 2045 eh = path[depth].p_hdr; 2046 nearex = ex; 2047 goto merge; 2048 } 2049 } 2050 2051 depth = ext_depth(inode); 2052 eh = path[depth].p_hdr; 2053 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 2054 goto has_space; 2055 2056 /* probably next leaf has space for us? */ 2057 fex = EXT_LAST_EXTENT(eh); 2058 next = EXT_MAX_BLOCKS; 2059 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 2060 next = ext4_ext_next_leaf_block(path); 2061 if (next != EXT_MAX_BLOCKS) { 2062 ext_debug("next leaf block - %u\n", next); 2063 BUG_ON(npath != NULL); 2064 npath = ext4_find_extent(inode, next, NULL, 0); 2065 if (IS_ERR(npath)) 2066 return PTR_ERR(npath); 2067 BUG_ON(npath->p_depth != path->p_depth); 2068 eh = npath[depth].p_hdr; 2069 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 2070 ext_debug("next leaf isn't full(%d)\n", 2071 le16_to_cpu(eh->eh_entries)); 2072 path = npath; 2073 goto has_space; 2074 } 2075 ext_debug("next leaf has no free space(%d,%d)\n", 2076 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 2077 } 2078 2079 /* 2080 * There is no free space in the found leaf. 2081 * We're gonna add a new leaf in the tree. 2082 */ 2083 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2084 mb_flags |= EXT4_MB_USE_RESERVED; 2085 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2086 ppath, newext); 2087 if (err) 2088 goto cleanup; 2089 depth = ext_depth(inode); 2090 eh = path[depth].p_hdr; 2091 2092 has_space: 2093 nearex = path[depth].p_ext; 2094 2095 err = ext4_ext_get_access(handle, inode, path + depth); 2096 if (err) 2097 goto cleanup; 2098 2099 if (!nearex) { 2100 /* there is no extent in this leaf, create first one */ 2101 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", 2102 le32_to_cpu(newext->ee_block), 2103 ext4_ext_pblock(newext), 2104 ext4_ext_is_unwritten(newext), 2105 ext4_ext_get_actual_len(newext)); 2106 nearex = EXT_FIRST_EXTENT(eh); 2107 } else { 2108 if (le32_to_cpu(newext->ee_block) 2109 > le32_to_cpu(nearex->ee_block)) { 2110 /* Insert after */ 2111 ext_debug("insert %u:%llu:[%d]%d before: " 2112 "nearest %p\n", 2113 le32_to_cpu(newext->ee_block), 2114 ext4_ext_pblock(newext), 2115 ext4_ext_is_unwritten(newext), 2116 ext4_ext_get_actual_len(newext), 2117 nearex); 2118 nearex++; 2119 } else { 2120 /* Insert before */ 2121 BUG_ON(newext->ee_block == nearex->ee_block); 2122 ext_debug("insert %u:%llu:[%d]%d after: " 2123 "nearest %p\n", 2124 le32_to_cpu(newext->ee_block), 2125 ext4_ext_pblock(newext), 2126 ext4_ext_is_unwritten(newext), 2127 ext4_ext_get_actual_len(newext), 2128 nearex); 2129 } 2130 len = EXT_LAST_EXTENT(eh) - nearex + 1; 2131 if (len > 0) { 2132 ext_debug("insert %u:%llu:[%d]%d: " 2133 "move %d extents from 0x%p to 0x%p\n", 2134 le32_to_cpu(newext->ee_block), 2135 ext4_ext_pblock(newext), 2136 ext4_ext_is_unwritten(newext), 2137 ext4_ext_get_actual_len(newext), 2138 len, nearex, nearex + 1); 2139 memmove(nearex + 1, nearex, 2140 len * sizeof(struct ext4_extent)); 2141 } 2142 } 2143 2144 le16_add_cpu(&eh->eh_entries, 1); 2145 path[depth].p_ext = nearex; 2146 nearex->ee_block = newext->ee_block; 2147 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2148 nearex->ee_len = newext->ee_len; 2149 2150 merge: 2151 /* try to merge extents */ 2152 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) 2153 ext4_ext_try_to_merge(handle, inode, path, nearex); 2154 2155 2156 /* time to correct all indexes above */ 2157 err = ext4_ext_correct_indexes(handle, inode, path); 2158 if (err) 2159 goto cleanup; 2160 2161 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2162 2163 cleanup: 2164 ext4_ext_drop_refs(npath); 2165 kfree(npath); 2166 return err; 2167 } 2168 2169 static int ext4_fill_fiemap_extents(struct inode *inode, 2170 ext4_lblk_t block, ext4_lblk_t num, 2171 struct fiemap_extent_info *fieinfo) 2172 { 2173 struct ext4_ext_path *path = NULL; 2174 struct ext4_extent *ex; 2175 struct extent_status es; 2176 ext4_lblk_t next, next_del, start = 0, end = 0; 2177 ext4_lblk_t last = block + num; 2178 int exists, depth = 0, err = 0; 2179 unsigned int flags = 0; 2180 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 2181 2182 while (block < last && block != EXT_MAX_BLOCKS) { 2183 num = last - block; 2184 /* find extent for this block */ 2185 down_read(&EXT4_I(inode)->i_data_sem); 2186 2187 path = ext4_find_extent(inode, block, &path, 0); 2188 if (IS_ERR(path)) { 2189 up_read(&EXT4_I(inode)->i_data_sem); 2190 err = PTR_ERR(path); 2191 path = NULL; 2192 break; 2193 } 2194 2195 depth = ext_depth(inode); 2196 if (unlikely(path[depth].p_hdr == NULL)) { 2197 up_read(&EXT4_I(inode)->i_data_sem); 2198 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2199 err = -EFSCORRUPTED; 2200 break; 2201 } 2202 ex = path[depth].p_ext; 2203 next = ext4_ext_next_allocated_block(path); 2204 2205 flags = 0; 2206 exists = 0; 2207 if (!ex) { 2208 /* there is no extent yet, so try to allocate 2209 * all requested space */ 2210 start = block; 2211 end = block + num; 2212 } else if (le32_to_cpu(ex->ee_block) > block) { 2213 /* need to allocate space before found extent */ 2214 start = block; 2215 end = le32_to_cpu(ex->ee_block); 2216 if (block + num < end) 2217 end = block + num; 2218 } else if (block >= le32_to_cpu(ex->ee_block) 2219 + ext4_ext_get_actual_len(ex)) { 2220 /* need to allocate space after found extent */ 2221 start = block; 2222 end = block + num; 2223 if (end >= next) 2224 end = next; 2225 } else if (block >= le32_to_cpu(ex->ee_block)) { 2226 /* 2227 * some part of requested space is covered 2228 * by found extent 2229 */ 2230 start = block; 2231 end = le32_to_cpu(ex->ee_block) 2232 + ext4_ext_get_actual_len(ex); 2233 if (block + num < end) 2234 end = block + num; 2235 exists = 1; 2236 } else { 2237 BUG(); 2238 } 2239 BUG_ON(end <= start); 2240 2241 if (!exists) { 2242 es.es_lblk = start; 2243 es.es_len = end - start; 2244 es.es_pblk = 0; 2245 } else { 2246 es.es_lblk = le32_to_cpu(ex->ee_block); 2247 es.es_len = ext4_ext_get_actual_len(ex); 2248 es.es_pblk = ext4_ext_pblock(ex); 2249 if (ext4_ext_is_unwritten(ex)) 2250 flags |= FIEMAP_EXTENT_UNWRITTEN; 2251 } 2252 2253 /* 2254 * Find delayed extent and update es accordingly. We call 2255 * it even in !exists case to find out whether es is the 2256 * last existing extent or not. 2257 */ 2258 next_del = ext4_find_delayed_extent(inode, &es); 2259 if (!exists && next_del) { 2260 exists = 1; 2261 flags |= (FIEMAP_EXTENT_DELALLOC | 2262 FIEMAP_EXTENT_UNKNOWN); 2263 } 2264 up_read(&EXT4_I(inode)->i_data_sem); 2265 2266 if (unlikely(es.es_len == 0)) { 2267 EXT4_ERROR_INODE(inode, "es.es_len == 0"); 2268 err = -EFSCORRUPTED; 2269 break; 2270 } 2271 2272 /* 2273 * This is possible iff next == next_del == EXT_MAX_BLOCKS. 2274 * we need to check next == EXT_MAX_BLOCKS because it is 2275 * possible that an extent is with unwritten and delayed 2276 * status due to when an extent is delayed allocated and 2277 * is allocated by fallocate status tree will track both of 2278 * them in a extent. 2279 * 2280 * So we could return a unwritten and delayed extent, and 2281 * its block is equal to 'next'. 2282 */ 2283 if (next == next_del && next == EXT_MAX_BLOCKS) { 2284 flags |= FIEMAP_EXTENT_LAST; 2285 if (unlikely(next_del != EXT_MAX_BLOCKS || 2286 next != EXT_MAX_BLOCKS)) { 2287 EXT4_ERROR_INODE(inode, 2288 "next extent == %u, next " 2289 "delalloc extent = %u", 2290 next, next_del); 2291 err = -EFSCORRUPTED; 2292 break; 2293 } 2294 } 2295 2296 if (exists) { 2297 err = fiemap_fill_next_extent(fieinfo, 2298 (__u64)es.es_lblk << blksize_bits, 2299 (__u64)es.es_pblk << blksize_bits, 2300 (__u64)es.es_len << blksize_bits, 2301 flags); 2302 if (err < 0) 2303 break; 2304 if (err == 1) { 2305 err = 0; 2306 break; 2307 } 2308 } 2309 2310 block = es.es_lblk + es.es_len; 2311 } 2312 2313 ext4_ext_drop_refs(path); 2314 kfree(path); 2315 return err; 2316 } 2317 2318 /* 2319 * ext4_ext_determine_hole - determine hole around given block 2320 * @inode: inode we lookup in 2321 * @path: path in extent tree to @lblk 2322 * @lblk: pointer to logical block around which we want to determine hole 2323 * 2324 * Determine hole length (and start if easily possible) around given logical 2325 * block. We don't try too hard to find the beginning of the hole but @path 2326 * actually points to extent before @lblk, we provide it. 2327 * 2328 * The function returns the length of a hole starting at @lblk. We update @lblk 2329 * to the beginning of the hole if we managed to find it. 2330 */ 2331 static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode, 2332 struct ext4_ext_path *path, 2333 ext4_lblk_t *lblk) 2334 { 2335 int depth = ext_depth(inode); 2336 struct ext4_extent *ex; 2337 ext4_lblk_t len; 2338 2339 ex = path[depth].p_ext; 2340 if (ex == NULL) { 2341 /* there is no extent yet, so gap is [0;-] */ 2342 *lblk = 0; 2343 len = EXT_MAX_BLOCKS; 2344 } else if (*lblk < le32_to_cpu(ex->ee_block)) { 2345 len = le32_to_cpu(ex->ee_block) - *lblk; 2346 } else if (*lblk >= le32_to_cpu(ex->ee_block) 2347 + ext4_ext_get_actual_len(ex)) { 2348 ext4_lblk_t next; 2349 2350 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 2351 next = ext4_ext_next_allocated_block(path); 2352 BUG_ON(next == *lblk); 2353 len = next - *lblk; 2354 } else { 2355 BUG(); 2356 } 2357 return len; 2358 } 2359 2360 /* 2361 * ext4_ext_put_gap_in_cache: 2362 * calculate boundaries of the gap that the requested block fits into 2363 * and cache this gap 2364 */ 2365 static void 2366 ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start, 2367 ext4_lblk_t hole_len) 2368 { 2369 struct extent_status es; 2370 2371 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, 2372 hole_start + hole_len - 1, &es); 2373 if (es.es_len) { 2374 /* There's delayed extent containing lblock? */ 2375 if (es.es_lblk <= hole_start) 2376 return; 2377 hole_len = min(es.es_lblk - hole_start, hole_len); 2378 } 2379 ext_debug(" -> %u:%u\n", hole_start, hole_len); 2380 ext4_es_insert_extent(inode, hole_start, hole_len, ~0, 2381 EXTENT_STATUS_HOLE); 2382 } 2383 2384 /* 2385 * ext4_ext_rm_idx: 2386 * removes index from the index block. 2387 */ 2388 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2389 struct ext4_ext_path *path, int depth) 2390 { 2391 int err; 2392 ext4_fsblk_t leaf; 2393 2394 /* free index block */ 2395 depth--; 2396 path = path + depth; 2397 leaf = ext4_idx_pblock(path->p_idx); 2398 if (unlikely(path->p_hdr->eh_entries == 0)) { 2399 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2400 return -EFSCORRUPTED; 2401 } 2402 err = ext4_ext_get_access(handle, inode, path); 2403 if (err) 2404 return err; 2405 2406 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 2407 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 2408 len *= sizeof(struct ext4_extent_idx); 2409 memmove(path->p_idx, path->p_idx + 1, len); 2410 } 2411 2412 le16_add_cpu(&path->p_hdr->eh_entries, -1); 2413 err = ext4_ext_dirty(handle, inode, path); 2414 if (err) 2415 return err; 2416 ext_debug("index is empty, remove it, free block %llu\n", leaf); 2417 trace_ext4_ext_rm_idx(inode, leaf); 2418 2419 ext4_free_blocks(handle, inode, NULL, leaf, 1, 2420 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2421 2422 while (--depth >= 0) { 2423 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2424 break; 2425 path--; 2426 err = ext4_ext_get_access(handle, inode, path); 2427 if (err) 2428 break; 2429 path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2430 err = ext4_ext_dirty(handle, inode, path); 2431 if (err) 2432 break; 2433 } 2434 return err; 2435 } 2436 2437 /* 2438 * ext4_ext_calc_credits_for_single_extent: 2439 * This routine returns max. credits that needed to insert an extent 2440 * to the extent tree. 2441 * When pass the actual path, the caller should calculate credits 2442 * under i_data_sem. 2443 */ 2444 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2445 struct ext4_ext_path *path) 2446 { 2447 if (path) { 2448 int depth = ext_depth(inode); 2449 int ret = 0; 2450 2451 /* probably there is space in leaf? */ 2452 if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2453 < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2454 2455 /* 2456 * There are some space in the leaf tree, no 2457 * need to account for leaf block credit 2458 * 2459 * bitmaps and block group descriptor blocks 2460 * and other metadata blocks still need to be 2461 * accounted. 2462 */ 2463 /* 1 bitmap, 1 block group descriptor */ 2464 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 2465 return ret; 2466 } 2467 } 2468 2469 return ext4_chunk_trans_blocks(inode, nrblocks); 2470 } 2471 2472 /* 2473 * How many index/leaf blocks need to change/allocate to add @extents extents? 2474 * 2475 * If we add a single extent, then in the worse case, each tree level 2476 * index/leaf need to be changed in case of the tree split. 2477 * 2478 * If more extents are inserted, they could cause the whole tree split more 2479 * than once, but this is really rare. 2480 */ 2481 int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2482 { 2483 int index; 2484 int depth; 2485 2486 /* If we are converting the inline data, only one is needed here. */ 2487 if (ext4_has_inline_data(inode)) 2488 return 1; 2489 2490 depth = ext_depth(inode); 2491 2492 if (extents <= 1) 2493 index = depth * 2; 2494 else 2495 index = depth * 3; 2496 2497 return index; 2498 } 2499 2500 static inline int get_default_free_blocks_flags(struct inode *inode) 2501 { 2502 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || 2503 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) 2504 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2505 else if (ext4_should_journal_data(inode)) 2506 return EXT4_FREE_BLOCKS_FORGET; 2507 return 0; 2508 } 2509 2510 /* 2511 * ext4_rereserve_cluster - increment the reserved cluster count when 2512 * freeing a cluster with a pending reservation 2513 * 2514 * @inode - file containing the cluster 2515 * @lblk - logical block in cluster to be reserved 2516 * 2517 * Increments the reserved cluster count and adjusts quota in a bigalloc 2518 * file system when freeing a partial cluster containing at least one 2519 * delayed and unwritten block. A partial cluster meeting that 2520 * requirement will have a pending reservation. If so, the 2521 * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to 2522 * defer reserved and allocated space accounting to a subsequent call 2523 * to this function. 2524 */ 2525 static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk) 2526 { 2527 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2528 struct ext4_inode_info *ei = EXT4_I(inode); 2529 2530 dquot_reclaim_block(inode, EXT4_C2B(sbi, 1)); 2531 2532 spin_lock(&ei->i_block_reservation_lock); 2533 ei->i_reserved_data_blocks++; 2534 percpu_counter_add(&sbi->s_dirtyclusters_counter, 1); 2535 spin_unlock(&ei->i_block_reservation_lock); 2536 2537 percpu_counter_add(&sbi->s_freeclusters_counter, 1); 2538 ext4_remove_pending(inode, lblk); 2539 } 2540 2541 static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2542 struct ext4_extent *ex, 2543 struct partial_cluster *partial, 2544 ext4_lblk_t from, ext4_lblk_t to) 2545 { 2546 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2547 unsigned short ee_len = ext4_ext_get_actual_len(ex); 2548 ext4_fsblk_t last_pblk, pblk; 2549 ext4_lblk_t num; 2550 int flags; 2551 2552 /* only extent tail removal is allowed */ 2553 if (from < le32_to_cpu(ex->ee_block) || 2554 to != le32_to_cpu(ex->ee_block) + ee_len - 1) { 2555 ext4_error(sbi->s_sb, 2556 "strange request: removal(2) %u-%u from %u:%u", 2557 from, to, le32_to_cpu(ex->ee_block), ee_len); 2558 return 0; 2559 } 2560 2561 #ifdef EXTENTS_STATS 2562 spin_lock(&sbi->s_ext_stats_lock); 2563 sbi->s_ext_blocks += ee_len; 2564 sbi->s_ext_extents++; 2565 if (ee_len < sbi->s_ext_min) 2566 sbi->s_ext_min = ee_len; 2567 if (ee_len > sbi->s_ext_max) 2568 sbi->s_ext_max = ee_len; 2569 if (ext_depth(inode) > sbi->s_depth_max) 2570 sbi->s_depth_max = ext_depth(inode); 2571 spin_unlock(&sbi->s_ext_stats_lock); 2572 #endif 2573 2574 trace_ext4_remove_blocks(inode, ex, from, to, partial); 2575 2576 /* 2577 * if we have a partial cluster, and it's different from the 2578 * cluster of the last block in the extent, we free it 2579 */ 2580 last_pblk = ext4_ext_pblock(ex) + ee_len - 1; 2581 2582 if (partial->state != initial && 2583 partial->pclu != EXT4_B2C(sbi, last_pblk)) { 2584 if (partial->state == tofree) { 2585 flags = get_default_free_blocks_flags(inode); 2586 if (ext4_is_pending(inode, partial->lblk)) 2587 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2588 ext4_free_blocks(handle, inode, NULL, 2589 EXT4_C2B(sbi, partial->pclu), 2590 sbi->s_cluster_ratio, flags); 2591 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2592 ext4_rereserve_cluster(inode, partial->lblk); 2593 } 2594 partial->state = initial; 2595 } 2596 2597 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2598 pblk = ext4_ext_pblock(ex) + ee_len - num; 2599 2600 /* 2601 * We free the partial cluster at the end of the extent (if any), 2602 * unless the cluster is used by another extent (partial_cluster 2603 * state is nofree). If a partial cluster exists here, it must be 2604 * shared with the last block in the extent. 2605 */ 2606 flags = get_default_free_blocks_flags(inode); 2607 2608 /* partial, left end cluster aligned, right end unaligned */ 2609 if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) && 2610 (EXT4_LBLK_CMASK(sbi, to) >= from) && 2611 (partial->state != nofree)) { 2612 if (ext4_is_pending(inode, to)) 2613 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2614 ext4_free_blocks(handle, inode, NULL, 2615 EXT4_PBLK_CMASK(sbi, last_pblk), 2616 sbi->s_cluster_ratio, flags); 2617 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2618 ext4_rereserve_cluster(inode, to); 2619 partial->state = initial; 2620 flags = get_default_free_blocks_flags(inode); 2621 } 2622 2623 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2624 2625 /* 2626 * For bigalloc file systems, we never free a partial cluster 2627 * at the beginning of the extent. Instead, we check to see if we 2628 * need to free it on a subsequent call to ext4_remove_blocks, 2629 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. 2630 */ 2631 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 2632 ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 2633 2634 /* reset the partial cluster if we've freed past it */ 2635 if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk)) 2636 partial->state = initial; 2637 2638 /* 2639 * If we've freed the entire extent but the beginning is not left 2640 * cluster aligned and is not marked as ineligible for freeing we 2641 * record the partial cluster at the beginning of the extent. It 2642 * wasn't freed by the preceding ext4_free_blocks() call, and we 2643 * need to look farther to the left to determine if it's to be freed 2644 * (not shared with another extent). Else, reset the partial 2645 * cluster - we're either done freeing or the beginning of the 2646 * extent is left cluster aligned. 2647 */ 2648 if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) { 2649 if (partial->state == initial) { 2650 partial->pclu = EXT4_B2C(sbi, pblk); 2651 partial->lblk = from; 2652 partial->state = tofree; 2653 } 2654 } else { 2655 partial->state = initial; 2656 } 2657 2658 return 0; 2659 } 2660 2661 /* 2662 * ext4_ext_rm_leaf() Removes the extents associated with the 2663 * blocks appearing between "start" and "end". Both "start" 2664 * and "end" must appear in the same extent or EIO is returned. 2665 * 2666 * @handle: The journal handle 2667 * @inode: The files inode 2668 * @path: The path to the leaf 2669 * @partial_cluster: The cluster which we'll have to free if all extents 2670 * has been released from it. However, if this value is 2671 * negative, it's a cluster just to the right of the 2672 * punched region and it must not be freed. 2673 * @start: The first block to remove 2674 * @end: The last block to remove 2675 */ 2676 static int 2677 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2678 struct ext4_ext_path *path, 2679 struct partial_cluster *partial, 2680 ext4_lblk_t start, ext4_lblk_t end) 2681 { 2682 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2683 int err = 0, correct_index = 0; 2684 int depth = ext_depth(inode), credits; 2685 struct ext4_extent_header *eh; 2686 ext4_lblk_t a, b; 2687 unsigned num; 2688 ext4_lblk_t ex_ee_block; 2689 unsigned short ex_ee_len; 2690 unsigned unwritten = 0; 2691 struct ext4_extent *ex; 2692 ext4_fsblk_t pblk; 2693 2694 /* the header must be checked already in ext4_ext_remove_space() */ 2695 ext_debug("truncate since %u in leaf to %u\n", start, end); 2696 if (!path[depth].p_hdr) 2697 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2698 eh = path[depth].p_hdr; 2699 if (unlikely(path[depth].p_hdr == NULL)) { 2700 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2701 return -EFSCORRUPTED; 2702 } 2703 /* find where to start removing */ 2704 ex = path[depth].p_ext; 2705 if (!ex) 2706 ex = EXT_LAST_EXTENT(eh); 2707 2708 ex_ee_block = le32_to_cpu(ex->ee_block); 2709 ex_ee_len = ext4_ext_get_actual_len(ex); 2710 2711 trace_ext4_ext_rm_leaf(inode, start, ex, partial); 2712 2713 while (ex >= EXT_FIRST_EXTENT(eh) && 2714 ex_ee_block + ex_ee_len > start) { 2715 2716 if (ext4_ext_is_unwritten(ex)) 2717 unwritten = 1; 2718 else 2719 unwritten = 0; 2720 2721 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2722 unwritten, ex_ee_len); 2723 path[depth].p_ext = ex; 2724 2725 a = ex_ee_block > start ? ex_ee_block : start; 2726 b = ex_ee_block+ex_ee_len - 1 < end ? 2727 ex_ee_block+ex_ee_len - 1 : end; 2728 2729 ext_debug(" border %u:%u\n", a, b); 2730 2731 /* If this extent is beyond the end of the hole, skip it */ 2732 if (end < ex_ee_block) { 2733 /* 2734 * We're going to skip this extent and move to another, 2735 * so note that its first cluster is in use to avoid 2736 * freeing it when removing blocks. Eventually, the 2737 * right edge of the truncated/punched region will 2738 * be just to the left. 2739 */ 2740 if (sbi->s_cluster_ratio > 1) { 2741 pblk = ext4_ext_pblock(ex); 2742 partial->pclu = EXT4_B2C(sbi, pblk); 2743 partial->state = nofree; 2744 } 2745 ex--; 2746 ex_ee_block = le32_to_cpu(ex->ee_block); 2747 ex_ee_len = ext4_ext_get_actual_len(ex); 2748 continue; 2749 } else if (b != ex_ee_block + ex_ee_len - 1) { 2750 EXT4_ERROR_INODE(inode, 2751 "can not handle truncate %u:%u " 2752 "on extent %u:%u", 2753 start, end, ex_ee_block, 2754 ex_ee_block + ex_ee_len - 1); 2755 err = -EFSCORRUPTED; 2756 goto out; 2757 } else if (a != ex_ee_block) { 2758 /* remove tail of the extent */ 2759 num = a - ex_ee_block; 2760 } else { 2761 /* remove whole extent: excellent! */ 2762 num = 0; 2763 } 2764 /* 2765 * 3 for leaf, sb, and inode plus 2 (bmap and group 2766 * descriptor) for each block group; assume two block 2767 * groups plus ex_ee_len/blocks_per_block_group for 2768 * the worst case 2769 */ 2770 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2771 if (ex == EXT_FIRST_EXTENT(eh)) { 2772 correct_index = 1; 2773 credits += (ext_depth(inode)) + 1; 2774 } 2775 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2776 2777 err = ext4_ext_truncate_extend_restart(handle, inode, credits); 2778 if (err) 2779 goto out; 2780 2781 err = ext4_ext_get_access(handle, inode, path + depth); 2782 if (err) 2783 goto out; 2784 2785 err = ext4_remove_blocks(handle, inode, ex, partial, a, b); 2786 if (err) 2787 goto out; 2788 2789 if (num == 0) 2790 /* this extent is removed; mark slot entirely unused */ 2791 ext4_ext_store_pblock(ex, 0); 2792 2793 ex->ee_len = cpu_to_le16(num); 2794 /* 2795 * Do not mark unwritten if all the blocks in the 2796 * extent have been removed. 2797 */ 2798 if (unwritten && num) 2799 ext4_ext_mark_unwritten(ex); 2800 /* 2801 * If the extent was completely released, 2802 * we need to remove it from the leaf 2803 */ 2804 if (num == 0) { 2805 if (end != EXT_MAX_BLOCKS - 1) { 2806 /* 2807 * For hole punching, we need to scoot all the 2808 * extents up when an extent is removed so that 2809 * we dont have blank extents in the middle 2810 */ 2811 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2812 sizeof(struct ext4_extent)); 2813 2814 /* Now get rid of the one at the end */ 2815 memset(EXT_LAST_EXTENT(eh), 0, 2816 sizeof(struct ext4_extent)); 2817 } 2818 le16_add_cpu(&eh->eh_entries, -1); 2819 } 2820 2821 err = ext4_ext_dirty(handle, inode, path + depth); 2822 if (err) 2823 goto out; 2824 2825 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, 2826 ext4_ext_pblock(ex)); 2827 ex--; 2828 ex_ee_block = le32_to_cpu(ex->ee_block); 2829 ex_ee_len = ext4_ext_get_actual_len(ex); 2830 } 2831 2832 if (correct_index && eh->eh_entries) 2833 err = ext4_ext_correct_indexes(handle, inode, path); 2834 2835 /* 2836 * If there's a partial cluster and at least one extent remains in 2837 * the leaf, free the partial cluster if it isn't shared with the 2838 * current extent. If it is shared with the current extent 2839 * we reset the partial cluster because we've reached the start of the 2840 * truncated/punched region and we're done removing blocks. 2841 */ 2842 if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) { 2843 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; 2844 if (partial->pclu != EXT4_B2C(sbi, pblk)) { 2845 int flags = get_default_free_blocks_flags(inode); 2846 2847 if (ext4_is_pending(inode, partial->lblk)) 2848 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2849 ext4_free_blocks(handle, inode, NULL, 2850 EXT4_C2B(sbi, partial->pclu), 2851 sbi->s_cluster_ratio, flags); 2852 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2853 ext4_rereserve_cluster(inode, partial->lblk); 2854 } 2855 partial->state = initial; 2856 } 2857 2858 /* if this leaf is free, then we should 2859 * remove it from index block above */ 2860 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2861 err = ext4_ext_rm_idx(handle, inode, path, depth); 2862 2863 out: 2864 return err; 2865 } 2866 2867 /* 2868 * ext4_ext_more_to_rm: 2869 * returns 1 if current index has to be freed (even partial) 2870 */ 2871 static int 2872 ext4_ext_more_to_rm(struct ext4_ext_path *path) 2873 { 2874 BUG_ON(path->p_idx == NULL); 2875 2876 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2877 return 0; 2878 2879 /* 2880 * if truncate on deeper level happened, it wasn't partial, 2881 * so we have to consider current index for truncation 2882 */ 2883 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2884 return 0; 2885 return 1; 2886 } 2887 2888 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 2889 ext4_lblk_t end) 2890 { 2891 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2892 int depth = ext_depth(inode); 2893 struct ext4_ext_path *path = NULL; 2894 struct partial_cluster partial; 2895 handle_t *handle; 2896 int i = 0, err = 0; 2897 2898 partial.pclu = 0; 2899 partial.lblk = 0; 2900 partial.state = initial; 2901 2902 ext_debug("truncate since %u to %u\n", start, end); 2903 2904 /* probably first extent we're gonna free will be last in block */ 2905 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1); 2906 if (IS_ERR(handle)) 2907 return PTR_ERR(handle); 2908 2909 again: 2910 trace_ext4_ext_remove_space(inode, start, end, depth); 2911 2912 /* 2913 * Check if we are removing extents inside the extent tree. If that 2914 * is the case, we are going to punch a hole inside the extent tree 2915 * so we have to check whether we need to split the extent covering 2916 * the last block to remove so we can easily remove the part of it 2917 * in ext4_ext_rm_leaf(). 2918 */ 2919 if (end < EXT_MAX_BLOCKS - 1) { 2920 struct ext4_extent *ex; 2921 ext4_lblk_t ee_block, ex_end, lblk; 2922 ext4_fsblk_t pblk; 2923 2924 /* find extent for or closest extent to this block */ 2925 path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); 2926 if (IS_ERR(path)) { 2927 ext4_journal_stop(handle); 2928 return PTR_ERR(path); 2929 } 2930 depth = ext_depth(inode); 2931 /* Leaf not may not exist only if inode has no blocks at all */ 2932 ex = path[depth].p_ext; 2933 if (!ex) { 2934 if (depth) { 2935 EXT4_ERROR_INODE(inode, 2936 "path[%d].p_hdr == NULL", 2937 depth); 2938 err = -EFSCORRUPTED; 2939 } 2940 goto out; 2941 } 2942 2943 ee_block = le32_to_cpu(ex->ee_block); 2944 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; 2945 2946 /* 2947 * See if the last block is inside the extent, if so split 2948 * the extent at 'end' block so we can easily remove the 2949 * tail of the first part of the split extent in 2950 * ext4_ext_rm_leaf(). 2951 */ 2952 if (end >= ee_block && end < ex_end) { 2953 2954 /* 2955 * If we're going to split the extent, note that 2956 * the cluster containing the block after 'end' is 2957 * in use to avoid freeing it when removing blocks. 2958 */ 2959 if (sbi->s_cluster_ratio > 1) { 2960 pblk = ext4_ext_pblock(ex) + end - ee_block + 2; 2961 partial.pclu = EXT4_B2C(sbi, pblk); 2962 partial.state = nofree; 2963 } 2964 2965 /* 2966 * Split the extent in two so that 'end' is the last 2967 * block in the first new extent. Also we should not 2968 * fail removing space due to ENOSPC so try to use 2969 * reserved block if that happens. 2970 */ 2971 err = ext4_force_split_extent_at(handle, inode, &path, 2972 end + 1, 1); 2973 if (err < 0) 2974 goto out; 2975 2976 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end && 2977 partial.state == initial) { 2978 /* 2979 * If we're punching, there's an extent to the right. 2980 * If the partial cluster hasn't been set, set it to 2981 * that extent's first cluster and its state to nofree 2982 * so it won't be freed should it contain blocks to be 2983 * removed. If it's already set (tofree/nofree), we're 2984 * retrying and keep the original partial cluster info 2985 * so a cluster marked tofree as a result of earlier 2986 * extent removal is not lost. 2987 */ 2988 lblk = ex_end + 1; 2989 err = ext4_ext_search_right(inode, path, &lblk, &pblk, 2990 &ex); 2991 if (err) 2992 goto out; 2993 if (pblk) { 2994 partial.pclu = EXT4_B2C(sbi, pblk); 2995 partial.state = nofree; 2996 } 2997 } 2998 } 2999 /* 3000 * We start scanning from right side, freeing all the blocks 3001 * after i_size and walking into the tree depth-wise. 3002 */ 3003 depth = ext_depth(inode); 3004 if (path) { 3005 int k = i = depth; 3006 while (--k > 0) 3007 path[k].p_block = 3008 le16_to_cpu(path[k].p_hdr->eh_entries)+1; 3009 } else { 3010 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 3011 GFP_NOFS); 3012 if (path == NULL) { 3013 ext4_journal_stop(handle); 3014 return -ENOMEM; 3015 } 3016 path[0].p_maxdepth = path[0].p_depth = depth; 3017 path[0].p_hdr = ext_inode_hdr(inode); 3018 i = 0; 3019 3020 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { 3021 err = -EFSCORRUPTED; 3022 goto out; 3023 } 3024 } 3025 err = 0; 3026 3027 while (i >= 0 && err == 0) { 3028 if (i == depth) { 3029 /* this is leaf block */ 3030 err = ext4_ext_rm_leaf(handle, inode, path, 3031 &partial, start, end); 3032 /* root level has p_bh == NULL, brelse() eats this */ 3033 brelse(path[i].p_bh); 3034 path[i].p_bh = NULL; 3035 i--; 3036 continue; 3037 } 3038 3039 /* this is index block */ 3040 if (!path[i].p_hdr) { 3041 ext_debug("initialize header\n"); 3042 path[i].p_hdr = ext_block_hdr(path[i].p_bh); 3043 } 3044 3045 if (!path[i].p_idx) { 3046 /* this level hasn't been touched yet */ 3047 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 3048 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 3049 ext_debug("init index ptr: hdr 0x%p, num %d\n", 3050 path[i].p_hdr, 3051 le16_to_cpu(path[i].p_hdr->eh_entries)); 3052 } else { 3053 /* we were already here, see at next index */ 3054 path[i].p_idx--; 3055 } 3056 3057 ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 3058 i, EXT_FIRST_INDEX(path[i].p_hdr), 3059 path[i].p_idx); 3060 if (ext4_ext_more_to_rm(path + i)) { 3061 struct buffer_head *bh; 3062 /* go to the next level */ 3063 ext_debug("move to level %d (block %llu)\n", 3064 i + 1, ext4_idx_pblock(path[i].p_idx)); 3065 memset(path + i + 1, 0, sizeof(*path)); 3066 bh = read_extent_tree_block(inode, 3067 ext4_idx_pblock(path[i].p_idx), depth - i - 1, 3068 EXT4_EX_NOCACHE); 3069 if (IS_ERR(bh)) { 3070 /* should we reset i_size? */ 3071 err = PTR_ERR(bh); 3072 break; 3073 } 3074 /* Yield here to deal with large extent trees. 3075 * Should be a no-op if we did IO above. */ 3076 cond_resched(); 3077 if (WARN_ON(i + 1 > depth)) { 3078 err = -EFSCORRUPTED; 3079 break; 3080 } 3081 path[i + 1].p_bh = bh; 3082 3083 /* save actual number of indexes since this 3084 * number is changed at the next iteration */ 3085 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 3086 i++; 3087 } else { 3088 /* we finished processing this index, go up */ 3089 if (path[i].p_hdr->eh_entries == 0 && i > 0) { 3090 /* index is empty, remove it; 3091 * handle must be already prepared by the 3092 * truncatei_leaf() */ 3093 err = ext4_ext_rm_idx(handle, inode, path, i); 3094 } 3095 /* root level has p_bh == NULL, brelse() eats this */ 3096 brelse(path[i].p_bh); 3097 path[i].p_bh = NULL; 3098 i--; 3099 ext_debug("return to level %d\n", i); 3100 } 3101 } 3102 3103 trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial, 3104 path->p_hdr->eh_entries); 3105 3106 /* 3107 * if there's a partial cluster and we have removed the first extent 3108 * in the file, then we also free the partial cluster, if any 3109 */ 3110 if (partial.state == tofree && err == 0) { 3111 int flags = get_default_free_blocks_flags(inode); 3112 3113 if (ext4_is_pending(inode, partial.lblk)) 3114 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 3115 ext4_free_blocks(handle, inode, NULL, 3116 EXT4_C2B(sbi, partial.pclu), 3117 sbi->s_cluster_ratio, flags); 3118 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 3119 ext4_rereserve_cluster(inode, partial.lblk); 3120 partial.state = initial; 3121 } 3122 3123 /* TODO: flexible tree reduction should be here */ 3124 if (path->p_hdr->eh_entries == 0) { 3125 /* 3126 * truncate to zero freed all the tree, 3127 * so we need to correct eh_depth 3128 */ 3129 err = ext4_ext_get_access(handle, inode, path); 3130 if (err == 0) { 3131 ext_inode_hdr(inode)->eh_depth = 0; 3132 ext_inode_hdr(inode)->eh_max = 3133 cpu_to_le16(ext4_ext_space_root(inode, 0)); 3134 err = ext4_ext_dirty(handle, inode, path); 3135 } 3136 } 3137 out: 3138 ext4_ext_drop_refs(path); 3139 kfree(path); 3140 path = NULL; 3141 if (err == -EAGAIN) 3142 goto again; 3143 ext4_journal_stop(handle); 3144 3145 return err; 3146 } 3147 3148 /* 3149 * called at mount time 3150 */ 3151 void ext4_ext_init(struct super_block *sb) 3152 { 3153 /* 3154 * possible initialization would be here 3155 */ 3156 3157 if (ext4_has_feature_extents(sb)) { 3158 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 3159 printk(KERN_INFO "EXT4-fs: file extents enabled" 3160 #ifdef AGGRESSIVE_TEST 3161 ", aggressive tests" 3162 #endif 3163 #ifdef CHECK_BINSEARCH 3164 ", check binsearch" 3165 #endif 3166 #ifdef EXTENTS_STATS 3167 ", stats" 3168 #endif 3169 "\n"); 3170 #endif 3171 #ifdef EXTENTS_STATS 3172 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 3173 EXT4_SB(sb)->s_ext_min = 1 << 30; 3174 EXT4_SB(sb)->s_ext_max = 0; 3175 #endif 3176 } 3177 } 3178 3179 /* 3180 * called at umount time 3181 */ 3182 void ext4_ext_release(struct super_block *sb) 3183 { 3184 if (!ext4_has_feature_extents(sb)) 3185 return; 3186 3187 #ifdef EXTENTS_STATS 3188 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 3189 struct ext4_sb_info *sbi = EXT4_SB(sb); 3190 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 3191 sbi->s_ext_blocks, sbi->s_ext_extents, 3192 sbi->s_ext_blocks / sbi->s_ext_extents); 3193 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 3194 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 3195 } 3196 #endif 3197 } 3198 3199 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) 3200 { 3201 ext4_lblk_t ee_block; 3202 ext4_fsblk_t ee_pblock; 3203 unsigned int ee_len; 3204 3205 ee_block = le32_to_cpu(ex->ee_block); 3206 ee_len = ext4_ext_get_actual_len(ex); 3207 ee_pblock = ext4_ext_pblock(ex); 3208 3209 if (ee_len == 0) 3210 return 0; 3211 3212 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 3213 EXTENT_STATUS_WRITTEN); 3214 } 3215 3216 /* FIXME!! we need to try to merge to left or right after zero-out */ 3217 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 3218 { 3219 ext4_fsblk_t ee_pblock; 3220 unsigned int ee_len; 3221 3222 ee_len = ext4_ext_get_actual_len(ex); 3223 ee_pblock = ext4_ext_pblock(ex); 3224 return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock, 3225 ee_len); 3226 } 3227 3228 /* 3229 * ext4_split_extent_at() splits an extent at given block. 3230 * 3231 * @handle: the journal handle 3232 * @inode: the file inode 3233 * @path: the path to the extent 3234 * @split: the logical block where the extent is splitted. 3235 * @split_flags: indicates if the extent could be zeroout if split fails, and 3236 * the states(init or unwritten) of new extents. 3237 * @flags: flags used to insert new extent to extent tree. 3238 * 3239 * 3240 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 3241 * of which are deterimined by split_flag. 3242 * 3243 * There are two cases: 3244 * a> the extent are splitted into two extent. 3245 * b> split is not needed, and just mark the extent. 3246 * 3247 * return 0 on success. 3248 */ 3249 static int ext4_split_extent_at(handle_t *handle, 3250 struct inode *inode, 3251 struct ext4_ext_path **ppath, 3252 ext4_lblk_t split, 3253 int split_flag, 3254 int flags) 3255 { 3256 struct ext4_ext_path *path = *ppath; 3257 ext4_fsblk_t newblock; 3258 ext4_lblk_t ee_block; 3259 struct ext4_extent *ex, newex, orig_ex, zero_ex; 3260 struct ext4_extent *ex2 = NULL; 3261 unsigned int ee_len, depth; 3262 int err = 0; 3263 3264 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3265 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3266 3267 ext_debug("ext4_split_extents_at: inode %lu, logical" 3268 "block %llu\n", inode->i_ino, (unsigned long long)split); 3269 3270 ext4_ext_show_leaf(inode, path); 3271 3272 depth = ext_depth(inode); 3273 ex = path[depth].p_ext; 3274 ee_block = le32_to_cpu(ex->ee_block); 3275 ee_len = ext4_ext_get_actual_len(ex); 3276 newblock = split - ee_block + ext4_ext_pblock(ex); 3277 3278 BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3279 BUG_ON(!ext4_ext_is_unwritten(ex) && 3280 split_flag & (EXT4_EXT_MAY_ZEROOUT | 3281 EXT4_EXT_MARK_UNWRIT1 | 3282 EXT4_EXT_MARK_UNWRIT2)); 3283 3284 err = ext4_ext_get_access(handle, inode, path + depth); 3285 if (err) 3286 goto out; 3287 3288 if (split == ee_block) { 3289 /* 3290 * case b: block @split is the block that the extent begins with 3291 * then we just change the state of the extent, and splitting 3292 * is not needed. 3293 */ 3294 if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3295 ext4_ext_mark_unwritten(ex); 3296 else 3297 ext4_ext_mark_initialized(ex); 3298 3299 if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3300 ext4_ext_try_to_merge(handle, inode, path, ex); 3301 3302 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3303 goto out; 3304 } 3305 3306 /* case a */ 3307 memcpy(&orig_ex, ex, sizeof(orig_ex)); 3308 ex->ee_len = cpu_to_le16(split - ee_block); 3309 if (split_flag & EXT4_EXT_MARK_UNWRIT1) 3310 ext4_ext_mark_unwritten(ex); 3311 3312 /* 3313 * path may lead to new leaf, not to original leaf any more 3314 * after ext4_ext_insert_extent() returns, 3315 */ 3316 err = ext4_ext_dirty(handle, inode, path + depth); 3317 if (err) 3318 goto fix_extent_len; 3319 3320 ex2 = &newex; 3321 ex2->ee_block = cpu_to_le32(split); 3322 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 3323 ext4_ext_store_pblock(ex2, newblock); 3324 if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3325 ext4_ext_mark_unwritten(ex2); 3326 3327 err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); 3328 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3329 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3330 if (split_flag & EXT4_EXT_DATA_VALID1) { 3331 err = ext4_ext_zeroout(inode, ex2); 3332 zero_ex.ee_block = ex2->ee_block; 3333 zero_ex.ee_len = cpu_to_le16( 3334 ext4_ext_get_actual_len(ex2)); 3335 ext4_ext_store_pblock(&zero_ex, 3336 ext4_ext_pblock(ex2)); 3337 } else { 3338 err = ext4_ext_zeroout(inode, ex); 3339 zero_ex.ee_block = ex->ee_block; 3340 zero_ex.ee_len = cpu_to_le16( 3341 ext4_ext_get_actual_len(ex)); 3342 ext4_ext_store_pblock(&zero_ex, 3343 ext4_ext_pblock(ex)); 3344 } 3345 } else { 3346 err = ext4_ext_zeroout(inode, &orig_ex); 3347 zero_ex.ee_block = orig_ex.ee_block; 3348 zero_ex.ee_len = cpu_to_le16( 3349 ext4_ext_get_actual_len(&orig_ex)); 3350 ext4_ext_store_pblock(&zero_ex, 3351 ext4_ext_pblock(&orig_ex)); 3352 } 3353 3354 if (err) 3355 goto fix_extent_len; 3356 /* update the extent length and mark as initialized */ 3357 ex->ee_len = cpu_to_le16(ee_len); 3358 ext4_ext_try_to_merge(handle, inode, path, ex); 3359 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3360 if (err) 3361 goto fix_extent_len; 3362 3363 /* update extent status tree */ 3364 err = ext4_zeroout_es(inode, &zero_ex); 3365 3366 goto out; 3367 } else if (err) 3368 goto fix_extent_len; 3369 3370 out: 3371 ext4_ext_show_leaf(inode, path); 3372 return err; 3373 3374 fix_extent_len: 3375 ex->ee_len = orig_ex.ee_len; 3376 ext4_ext_dirty(handle, inode, path + path->p_depth); 3377 return err; 3378 } 3379 3380 /* 3381 * ext4_split_extents() splits an extent and mark extent which is covered 3382 * by @map as split_flags indicates 3383 * 3384 * It may result in splitting the extent into multiple extents (up to three) 3385 * There are three possibilities: 3386 * a> There is no split required 3387 * b> Splits in two extents: Split is happening at either end of the extent 3388 * c> Splits in three extents: Somone is splitting in middle of the extent 3389 * 3390 */ 3391 static int ext4_split_extent(handle_t *handle, 3392 struct inode *inode, 3393 struct ext4_ext_path **ppath, 3394 struct ext4_map_blocks *map, 3395 int split_flag, 3396 int flags) 3397 { 3398 struct ext4_ext_path *path = *ppath; 3399 ext4_lblk_t ee_block; 3400 struct ext4_extent *ex; 3401 unsigned int ee_len, depth; 3402 int err = 0; 3403 int unwritten; 3404 int split_flag1, flags1; 3405 int allocated = map->m_len; 3406 3407 depth = ext_depth(inode); 3408 ex = path[depth].p_ext; 3409 ee_block = le32_to_cpu(ex->ee_block); 3410 ee_len = ext4_ext_get_actual_len(ex); 3411 unwritten = ext4_ext_is_unwritten(ex); 3412 3413 if (map->m_lblk + map->m_len < ee_block + ee_len) { 3414 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 3415 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3416 if (unwritten) 3417 split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | 3418 EXT4_EXT_MARK_UNWRIT2; 3419 if (split_flag & EXT4_EXT_DATA_VALID2) 3420 split_flag1 |= EXT4_EXT_DATA_VALID1; 3421 err = ext4_split_extent_at(handle, inode, ppath, 3422 map->m_lblk + map->m_len, split_flag1, flags1); 3423 if (err) 3424 goto out; 3425 } else { 3426 allocated = ee_len - (map->m_lblk - ee_block); 3427 } 3428 /* 3429 * Update path is required because previous ext4_split_extent_at() may 3430 * result in split of original leaf or extent zeroout. 3431 */ 3432 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3433 if (IS_ERR(path)) 3434 return PTR_ERR(path); 3435 depth = ext_depth(inode); 3436 ex = path[depth].p_ext; 3437 if (!ex) { 3438 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3439 (unsigned long) map->m_lblk); 3440 return -EFSCORRUPTED; 3441 } 3442 unwritten = ext4_ext_is_unwritten(ex); 3443 split_flag1 = 0; 3444 3445 if (map->m_lblk >= ee_block) { 3446 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3447 if (unwritten) { 3448 split_flag1 |= EXT4_EXT_MARK_UNWRIT1; 3449 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3450 EXT4_EXT_MARK_UNWRIT2); 3451 } 3452 err = ext4_split_extent_at(handle, inode, ppath, 3453 map->m_lblk, split_flag1, flags); 3454 if (err) 3455 goto out; 3456 } 3457 3458 ext4_ext_show_leaf(inode, path); 3459 out: 3460 return err ? err : allocated; 3461 } 3462 3463 /* 3464 * This function is called by ext4_ext_map_blocks() if someone tries to write 3465 * to an unwritten extent. It may result in splitting the unwritten 3466 * extent into multiple extents (up to three - one initialized and two 3467 * unwritten). 3468 * There are three possibilities: 3469 * a> There is no split required: Entire extent should be initialized 3470 * b> Splits in two extents: Write is happening at either end of the extent 3471 * c> Splits in three extents: Somone is writing in middle of the extent 3472 * 3473 * Pre-conditions: 3474 * - The extent pointed to by 'path' is unwritten. 3475 * - The extent pointed to by 'path' contains a superset 3476 * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 3477 * 3478 * Post-conditions on success: 3479 * - the returned value is the number of blocks beyond map->l_lblk 3480 * that are allocated and initialized. 3481 * It is guaranteed to be >= map->m_len. 3482 */ 3483 static int ext4_ext_convert_to_initialized(handle_t *handle, 3484 struct inode *inode, 3485 struct ext4_map_blocks *map, 3486 struct ext4_ext_path **ppath, 3487 int flags) 3488 { 3489 struct ext4_ext_path *path = *ppath; 3490 struct ext4_sb_info *sbi; 3491 struct ext4_extent_header *eh; 3492 struct ext4_map_blocks split_map; 3493 struct ext4_extent zero_ex1, zero_ex2; 3494 struct ext4_extent *ex, *abut_ex; 3495 ext4_lblk_t ee_block, eof_block; 3496 unsigned int ee_len, depth, map_len = map->m_len; 3497 int allocated = 0, max_zeroout = 0; 3498 int err = 0; 3499 int split_flag = EXT4_EXT_DATA_VALID2; 3500 3501 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 3502 "block %llu, max_blocks %u\n", inode->i_ino, 3503 (unsigned long long)map->m_lblk, map_len); 3504 3505 sbi = EXT4_SB(inode->i_sb); 3506 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 3507 inode->i_sb->s_blocksize_bits; 3508 if (eof_block < map->m_lblk + map_len) 3509 eof_block = map->m_lblk + map_len; 3510 3511 depth = ext_depth(inode); 3512 eh = path[depth].p_hdr; 3513 ex = path[depth].p_ext; 3514 ee_block = le32_to_cpu(ex->ee_block); 3515 ee_len = ext4_ext_get_actual_len(ex); 3516 zero_ex1.ee_len = 0; 3517 zero_ex2.ee_len = 0; 3518 3519 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 3520 3521 /* Pre-conditions */ 3522 BUG_ON(!ext4_ext_is_unwritten(ex)); 3523 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 3524 3525 /* 3526 * Attempt to transfer newly initialized blocks from the currently 3527 * unwritten extent to its neighbor. This is much cheaper 3528 * than an insertion followed by a merge as those involve costly 3529 * memmove() calls. Transferring to the left is the common case in 3530 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3531 * followed by append writes. 3532 * 3533 * Limitations of the current logic: 3534 * - L1: we do not deal with writes covering the whole extent. 3535 * This would require removing the extent if the transfer 3536 * is possible. 3537 * - L2: we only attempt to merge with an extent stored in the 3538 * same extent tree node. 3539 */ 3540 if ((map->m_lblk == ee_block) && 3541 /* See if we can merge left */ 3542 (map_len < ee_len) && /*L1*/ 3543 (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 3544 ext4_lblk_t prev_lblk; 3545 ext4_fsblk_t prev_pblk, ee_pblk; 3546 unsigned int prev_len; 3547 3548 abut_ex = ex - 1; 3549 prev_lblk = le32_to_cpu(abut_ex->ee_block); 3550 prev_len = ext4_ext_get_actual_len(abut_ex); 3551 prev_pblk = ext4_ext_pblock(abut_ex); 3552 ee_pblk = ext4_ext_pblock(ex); 3553 3554 /* 3555 * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3556 * upon those conditions: 3557 * - C1: abut_ex is initialized, 3558 * - C2: abut_ex is logically abutting ex, 3559 * - C3: abut_ex is physically abutting ex, 3560 * - C4: abut_ex can receive the additional blocks without 3561 * overflowing the (initialized) length limit. 3562 */ 3563 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3564 ((prev_lblk + prev_len) == ee_block) && /*C2*/ 3565 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3566 (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3567 err = ext4_ext_get_access(handle, inode, path + depth); 3568 if (err) 3569 goto out; 3570 3571 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3572 map, ex, abut_ex); 3573 3574 /* Shift the start of ex by 'map_len' blocks */ 3575 ex->ee_block = cpu_to_le32(ee_block + map_len); 3576 ext4_ext_store_pblock(ex, ee_pblk + map_len); 3577 ex->ee_len = cpu_to_le16(ee_len - map_len); 3578 ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3579 3580 /* Extend abut_ex by 'map_len' blocks */ 3581 abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 3582 3583 /* Result: number of initialized blocks past m_lblk */ 3584 allocated = map_len; 3585 } 3586 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3587 (map_len < ee_len) && /*L1*/ 3588 ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3589 /* See if we can merge right */ 3590 ext4_lblk_t next_lblk; 3591 ext4_fsblk_t next_pblk, ee_pblk; 3592 unsigned int next_len; 3593 3594 abut_ex = ex + 1; 3595 next_lblk = le32_to_cpu(abut_ex->ee_block); 3596 next_len = ext4_ext_get_actual_len(abut_ex); 3597 next_pblk = ext4_ext_pblock(abut_ex); 3598 ee_pblk = ext4_ext_pblock(ex); 3599 3600 /* 3601 * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3602 * upon those conditions: 3603 * - C1: abut_ex is initialized, 3604 * - C2: abut_ex is logically abutting ex, 3605 * - C3: abut_ex is physically abutting ex, 3606 * - C4: abut_ex can receive the additional blocks without 3607 * overflowing the (initialized) length limit. 3608 */ 3609 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3610 ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3611 ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3612 (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3613 err = ext4_ext_get_access(handle, inode, path + depth); 3614 if (err) 3615 goto out; 3616 3617 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3618 map, ex, abut_ex); 3619 3620 /* Shift the start of abut_ex by 'map_len' blocks */ 3621 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3622 ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3623 ex->ee_len = cpu_to_le16(ee_len - map_len); 3624 ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3625 3626 /* Extend abut_ex by 'map_len' blocks */ 3627 abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3628 3629 /* Result: number of initialized blocks past m_lblk */ 3630 allocated = map_len; 3631 } 3632 } 3633 if (allocated) { 3634 /* Mark the block containing both extents as dirty */ 3635 ext4_ext_dirty(handle, inode, path + depth); 3636 3637 /* Update path to point to the right extent */ 3638 path[depth].p_ext = abut_ex; 3639 goto out; 3640 } else 3641 allocated = ee_len - (map->m_lblk - ee_block); 3642 3643 WARN_ON(map->m_lblk < ee_block); 3644 /* 3645 * It is safe to convert extent to initialized via explicit 3646 * zeroout only if extent is fully inside i_size or new_size. 3647 */ 3648 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3649 3650 if (EXT4_EXT_MAY_ZEROOUT & split_flag) 3651 max_zeroout = sbi->s_extent_max_zeroout_kb >> 3652 (inode->i_sb->s_blocksize_bits - 10); 3653 3654 if (IS_ENCRYPTED(inode)) 3655 max_zeroout = 0; 3656 3657 /* 3658 * five cases: 3659 * 1. split the extent into three extents. 3660 * 2. split the extent into two extents, zeroout the head of the first 3661 * extent. 3662 * 3. split the extent into two extents, zeroout the tail of the second 3663 * extent. 3664 * 4. split the extent into two extents with out zeroout. 3665 * 5. no splitting needed, just possibly zeroout the head and / or the 3666 * tail of the extent. 3667 */ 3668 split_map.m_lblk = map->m_lblk; 3669 split_map.m_len = map->m_len; 3670 3671 if (max_zeroout && (allocated > split_map.m_len)) { 3672 if (allocated <= max_zeroout) { 3673 /* case 3 or 5 */ 3674 zero_ex1.ee_block = 3675 cpu_to_le32(split_map.m_lblk + 3676 split_map.m_len); 3677 zero_ex1.ee_len = 3678 cpu_to_le16(allocated - split_map.m_len); 3679 ext4_ext_store_pblock(&zero_ex1, 3680 ext4_ext_pblock(ex) + split_map.m_lblk + 3681 split_map.m_len - ee_block); 3682 err = ext4_ext_zeroout(inode, &zero_ex1); 3683 if (err) 3684 goto out; 3685 split_map.m_len = allocated; 3686 } 3687 if (split_map.m_lblk - ee_block + split_map.m_len < 3688 max_zeroout) { 3689 /* case 2 or 5 */ 3690 if (split_map.m_lblk != ee_block) { 3691 zero_ex2.ee_block = ex->ee_block; 3692 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - 3693 ee_block); 3694 ext4_ext_store_pblock(&zero_ex2, 3695 ext4_ext_pblock(ex)); 3696 err = ext4_ext_zeroout(inode, &zero_ex2); 3697 if (err) 3698 goto out; 3699 } 3700 3701 split_map.m_len += split_map.m_lblk - ee_block; 3702 split_map.m_lblk = ee_block; 3703 allocated = map->m_len; 3704 } 3705 } 3706 3707 err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, 3708 flags); 3709 if (err > 0) 3710 err = 0; 3711 out: 3712 /* If we have gotten a failure, don't zero out status tree */ 3713 if (!err) { 3714 err = ext4_zeroout_es(inode, &zero_ex1); 3715 if (!err) 3716 err = ext4_zeroout_es(inode, &zero_ex2); 3717 } 3718 return err ? err : allocated; 3719 } 3720 3721 /* 3722 * This function is called by ext4_ext_map_blocks() from 3723 * ext4_get_blocks_dio_write() when DIO to write 3724 * to an unwritten extent. 3725 * 3726 * Writing to an unwritten extent may result in splitting the unwritten 3727 * extent into multiple initialized/unwritten extents (up to three) 3728 * There are three possibilities: 3729 * a> There is no split required: Entire extent should be unwritten 3730 * b> Splits in two extents: Write is happening at either end of the extent 3731 * c> Splits in three extents: Somone is writing in middle of the extent 3732 * 3733 * This works the same way in the case of initialized -> unwritten conversion. 3734 * 3735 * One of more index blocks maybe needed if the extent tree grow after 3736 * the unwritten extent split. To prevent ENOSPC occur at the IO 3737 * complete, we need to split the unwritten extent before DIO submit 3738 * the IO. The unwritten extent called at this time will be split 3739 * into three unwritten extent(at most). After IO complete, the part 3740 * being filled will be convert to initialized by the end_io callback function 3741 * via ext4_convert_unwritten_extents(). 3742 * 3743 * Returns the size of unwritten extent to be written on success. 3744 */ 3745 static int ext4_split_convert_extents(handle_t *handle, 3746 struct inode *inode, 3747 struct ext4_map_blocks *map, 3748 struct ext4_ext_path **ppath, 3749 int flags) 3750 { 3751 struct ext4_ext_path *path = *ppath; 3752 ext4_lblk_t eof_block; 3753 ext4_lblk_t ee_block; 3754 struct ext4_extent *ex; 3755 unsigned int ee_len; 3756 int split_flag = 0, depth; 3757 3758 ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n", 3759 __func__, inode->i_ino, 3760 (unsigned long long)map->m_lblk, map->m_len); 3761 3762 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 3763 inode->i_sb->s_blocksize_bits; 3764 if (eof_block < map->m_lblk + map->m_len) 3765 eof_block = map->m_lblk + map->m_len; 3766 /* 3767 * It is safe to convert extent to initialized via explicit 3768 * zeroout only if extent is fully insde i_size or new_size. 3769 */ 3770 depth = ext_depth(inode); 3771 ex = path[depth].p_ext; 3772 ee_block = le32_to_cpu(ex->ee_block); 3773 ee_len = ext4_ext_get_actual_len(ex); 3774 3775 /* Convert to unwritten */ 3776 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { 3777 split_flag |= EXT4_EXT_DATA_VALID1; 3778 /* Convert to initialized */ 3779 } else if (flags & EXT4_GET_BLOCKS_CONVERT) { 3780 split_flag |= ee_block + ee_len <= eof_block ? 3781 EXT4_EXT_MAY_ZEROOUT : 0; 3782 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); 3783 } 3784 flags |= EXT4_GET_BLOCKS_PRE_IO; 3785 return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); 3786 } 3787 3788 static int ext4_convert_unwritten_extents_endio(handle_t *handle, 3789 struct inode *inode, 3790 struct ext4_map_blocks *map, 3791 struct ext4_ext_path **ppath) 3792 { 3793 struct ext4_ext_path *path = *ppath; 3794 struct ext4_extent *ex; 3795 ext4_lblk_t ee_block; 3796 unsigned int ee_len; 3797 int depth; 3798 int err = 0; 3799 3800 depth = ext_depth(inode); 3801 ex = path[depth].p_ext; 3802 ee_block = le32_to_cpu(ex->ee_block); 3803 ee_len = ext4_ext_get_actual_len(ex); 3804 3805 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3806 "block %llu, max_blocks %u\n", inode->i_ino, 3807 (unsigned long long)ee_block, ee_len); 3808 3809 /* If extent is larger than requested it is a clear sign that we still 3810 * have some extent state machine issues left. So extent_split is still 3811 * required. 3812 * TODO: Once all related issues will be fixed this situation should be 3813 * illegal. 3814 */ 3815 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3816 #ifdef EXT4_DEBUG 3817 ext4_warning("Inode (%ld) finished: extent logical block %llu," 3818 " len %u; IO logical block %llu, len %u", 3819 inode->i_ino, (unsigned long long)ee_block, ee_len, 3820 (unsigned long long)map->m_lblk, map->m_len); 3821 #endif 3822 err = ext4_split_convert_extents(handle, inode, map, ppath, 3823 EXT4_GET_BLOCKS_CONVERT); 3824 if (err < 0) 3825 return err; 3826 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3827 if (IS_ERR(path)) 3828 return PTR_ERR(path); 3829 depth = ext_depth(inode); 3830 ex = path[depth].p_ext; 3831 } 3832 3833 err = ext4_ext_get_access(handle, inode, path + depth); 3834 if (err) 3835 goto out; 3836 /* first mark the extent as initialized */ 3837 ext4_ext_mark_initialized(ex); 3838 3839 /* note: ext4_ext_correct_indexes() isn't needed here because 3840 * borders are not changed 3841 */ 3842 ext4_ext_try_to_merge(handle, inode, path, ex); 3843 3844 /* Mark modified extent as dirty */ 3845 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3846 out: 3847 ext4_ext_show_leaf(inode, path); 3848 return err; 3849 } 3850 3851 /* 3852 * Handle EOFBLOCKS_FL flag, clearing it if necessary 3853 */ 3854 static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3855 ext4_lblk_t lblk, 3856 struct ext4_ext_path *path, 3857 unsigned int len) 3858 { 3859 int i, depth; 3860 struct ext4_extent_header *eh; 3861 struct ext4_extent *last_ex; 3862 3863 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 3864 return 0; 3865 3866 depth = ext_depth(inode); 3867 eh = path[depth].p_hdr; 3868 3869 /* 3870 * We're going to remove EOFBLOCKS_FL entirely in future so we 3871 * do not care for this case anymore. Simply remove the flag 3872 * if there are no extents. 3873 */ 3874 if (unlikely(!eh->eh_entries)) 3875 goto out; 3876 last_ex = EXT_LAST_EXTENT(eh); 3877 /* 3878 * We should clear the EOFBLOCKS_FL flag if we are writing the 3879 * last block in the last extent in the file. We test this by 3880 * first checking to see if the caller to 3881 * ext4_ext_get_blocks() was interested in the last block (or 3882 * a block beyond the last block) in the current extent. If 3883 * this turns out to be false, we can bail out from this 3884 * function immediately. 3885 */ 3886 if (lblk + len < le32_to_cpu(last_ex->ee_block) + 3887 ext4_ext_get_actual_len(last_ex)) 3888 return 0; 3889 /* 3890 * If the caller does appear to be planning to write at or 3891 * beyond the end of the current extent, we then test to see 3892 * if the current extent is the last extent in the file, by 3893 * checking to make sure it was reached via the rightmost node 3894 * at each level of the tree. 3895 */ 3896 for (i = depth-1; i >= 0; i--) 3897 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) 3898 return 0; 3899 out: 3900 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3901 return ext4_mark_inode_dirty(handle, inode); 3902 } 3903 3904 static int 3905 convert_initialized_extent(handle_t *handle, struct inode *inode, 3906 struct ext4_map_blocks *map, 3907 struct ext4_ext_path **ppath, 3908 unsigned int allocated) 3909 { 3910 struct ext4_ext_path *path = *ppath; 3911 struct ext4_extent *ex; 3912 ext4_lblk_t ee_block; 3913 unsigned int ee_len; 3914 int depth; 3915 int err = 0; 3916 3917 /* 3918 * Make sure that the extent is no bigger than we support with 3919 * unwritten extent 3920 */ 3921 if (map->m_len > EXT_UNWRITTEN_MAX_LEN) 3922 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; 3923 3924 depth = ext_depth(inode); 3925 ex = path[depth].p_ext; 3926 ee_block = le32_to_cpu(ex->ee_block); 3927 ee_len = ext4_ext_get_actual_len(ex); 3928 3929 ext_debug("%s: inode %lu, logical" 3930 "block %llu, max_blocks %u\n", __func__, inode->i_ino, 3931 (unsigned long long)ee_block, ee_len); 3932 3933 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3934 err = ext4_split_convert_extents(handle, inode, map, ppath, 3935 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); 3936 if (err < 0) 3937 return err; 3938 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3939 if (IS_ERR(path)) 3940 return PTR_ERR(path); 3941 depth = ext_depth(inode); 3942 ex = path[depth].p_ext; 3943 if (!ex) { 3944 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3945 (unsigned long) map->m_lblk); 3946 return -EFSCORRUPTED; 3947 } 3948 } 3949 3950 err = ext4_ext_get_access(handle, inode, path + depth); 3951 if (err) 3952 return err; 3953 /* first mark the extent as unwritten */ 3954 ext4_ext_mark_unwritten(ex); 3955 3956 /* note: ext4_ext_correct_indexes() isn't needed here because 3957 * borders are not changed 3958 */ 3959 ext4_ext_try_to_merge(handle, inode, path, ex); 3960 3961 /* Mark modified extent as dirty */ 3962 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3963 if (err) 3964 return err; 3965 ext4_ext_show_leaf(inode, path); 3966 3967 ext4_update_inode_fsync_trans(handle, inode, 1); 3968 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); 3969 if (err) 3970 return err; 3971 map->m_flags |= EXT4_MAP_UNWRITTEN; 3972 if (allocated > map->m_len) 3973 allocated = map->m_len; 3974 map->m_len = allocated; 3975 return allocated; 3976 } 3977 3978 static int 3979 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, 3980 struct ext4_map_blocks *map, 3981 struct ext4_ext_path **ppath, int flags, 3982 unsigned int allocated, ext4_fsblk_t newblock) 3983 { 3984 struct ext4_ext_path *path = *ppath; 3985 int ret = 0; 3986 int err = 0; 3987 3988 ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical " 3989 "block %llu, max_blocks %u, flags %x, allocated %u\n", 3990 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 3991 flags, allocated); 3992 ext4_ext_show_leaf(inode, path); 3993 3994 /* 3995 * When writing into unwritten space, we should not fail to 3996 * allocate metadata blocks for the new extent block if needed. 3997 */ 3998 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 3999 4000 trace_ext4_ext_handle_unwritten_extents(inode, map, flags, 4001 allocated, newblock); 4002 4003 /* get_block() before submit the IO, split the extent */ 4004 if (flags & EXT4_GET_BLOCKS_PRE_IO) { 4005 ret = ext4_split_convert_extents(handle, inode, map, ppath, 4006 flags | EXT4_GET_BLOCKS_CONVERT); 4007 if (ret <= 0) 4008 goto out; 4009 map->m_flags |= EXT4_MAP_UNWRITTEN; 4010 goto out; 4011 } 4012 /* IO end_io complete, convert the filled extent to written */ 4013 if (flags & EXT4_GET_BLOCKS_CONVERT) { 4014 if (flags & EXT4_GET_BLOCKS_ZERO) { 4015 if (allocated > map->m_len) 4016 allocated = map->m_len; 4017 err = ext4_issue_zeroout(inode, map->m_lblk, newblock, 4018 allocated); 4019 if (err < 0) 4020 goto out2; 4021 } 4022 ret = ext4_convert_unwritten_extents_endio(handle, inode, map, 4023 ppath); 4024 if (ret >= 0) { 4025 ext4_update_inode_fsync_trans(handle, inode, 1); 4026 err = check_eofblocks_fl(handle, inode, map->m_lblk, 4027 path, map->m_len); 4028 } else 4029 err = ret; 4030 map->m_flags |= EXT4_MAP_MAPPED; 4031 map->m_pblk = newblock; 4032 if (allocated > map->m_len) 4033 allocated = map->m_len; 4034 map->m_len = allocated; 4035 goto out2; 4036 } 4037 /* buffered IO case */ 4038 /* 4039 * repeat fallocate creation request 4040 * we already have an unwritten extent 4041 */ 4042 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 4043 map->m_flags |= EXT4_MAP_UNWRITTEN; 4044 goto map_out; 4045 } 4046 4047 /* buffered READ or buffered write_begin() lookup */ 4048 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4049 /* 4050 * We have blocks reserved already. We 4051 * return allocated blocks so that delalloc 4052 * won't do block reservation for us. But 4053 * the buffer head will be unmapped so that 4054 * a read from the block returns 0s. 4055 */ 4056 map->m_flags |= EXT4_MAP_UNWRITTEN; 4057 goto out1; 4058 } 4059 4060 /* buffered write, writepage time, convert*/ 4061 ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); 4062 if (ret >= 0) 4063 ext4_update_inode_fsync_trans(handle, inode, 1); 4064 out: 4065 if (ret <= 0) { 4066 err = ret; 4067 goto out2; 4068 } else 4069 allocated = ret; 4070 map->m_flags |= EXT4_MAP_NEW; 4071 if (allocated > map->m_len) 4072 allocated = map->m_len; 4073 map->m_len = allocated; 4074 4075 map_out: 4076 map->m_flags |= EXT4_MAP_MAPPED; 4077 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { 4078 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, 4079 map->m_len); 4080 if (err < 0) 4081 goto out2; 4082 } 4083 out1: 4084 if (allocated > map->m_len) 4085 allocated = map->m_len; 4086 ext4_ext_show_leaf(inode, path); 4087 map->m_pblk = newblock; 4088 map->m_len = allocated; 4089 out2: 4090 return err ? err : allocated; 4091 } 4092 4093 /* 4094 * get_implied_cluster_alloc - check to see if the requested 4095 * allocation (in the map structure) overlaps with a cluster already 4096 * allocated in an extent. 4097 * @sb The filesystem superblock structure 4098 * @map The requested lblk->pblk mapping 4099 * @ex The extent structure which might contain an implied 4100 * cluster allocation 4101 * 4102 * This function is called by ext4_ext_map_blocks() after we failed to 4103 * find blocks that were already in the inode's extent tree. Hence, 4104 * we know that the beginning of the requested region cannot overlap 4105 * the extent from the inode's extent tree. There are three cases we 4106 * want to catch. The first is this case: 4107 * 4108 * |--- cluster # N--| 4109 * |--- extent ---| |---- requested region ---| 4110 * |==========| 4111 * 4112 * The second case that we need to test for is this one: 4113 * 4114 * |--------- cluster # N ----------------| 4115 * |--- requested region --| |------- extent ----| 4116 * |=======================| 4117 * 4118 * The third case is when the requested region lies between two extents 4119 * within the same cluster: 4120 * |------------- cluster # N-------------| 4121 * |----- ex -----| |---- ex_right ----| 4122 * |------ requested region ------| 4123 * |================| 4124 * 4125 * In each of the above cases, we need to set the map->m_pblk and 4126 * map->m_len so it corresponds to the return the extent labelled as 4127 * "|====|" from cluster #N, since it is already in use for data in 4128 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 4129 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 4130 * as a new "allocated" block region. Otherwise, we will return 0 and 4131 * ext4_ext_map_blocks() will then allocate one or more new clusters 4132 * by calling ext4_mb_new_blocks(). 4133 */ 4134 static int get_implied_cluster_alloc(struct super_block *sb, 4135 struct ext4_map_blocks *map, 4136 struct ext4_extent *ex, 4137 struct ext4_ext_path *path) 4138 { 4139 struct ext4_sb_info *sbi = EXT4_SB(sb); 4140 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4141 ext4_lblk_t ex_cluster_start, ex_cluster_end; 4142 ext4_lblk_t rr_cluster_start; 4143 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4144 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4145 unsigned short ee_len = ext4_ext_get_actual_len(ex); 4146 4147 /* The extent passed in that we are trying to match */ 4148 ex_cluster_start = EXT4_B2C(sbi, ee_block); 4149 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 4150 4151 /* The requested region passed into ext4_map_blocks() */ 4152 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 4153 4154 if ((rr_cluster_start == ex_cluster_end) || 4155 (rr_cluster_start == ex_cluster_start)) { 4156 if (rr_cluster_start == ex_cluster_end) 4157 ee_start += ee_len - 1; 4158 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; 4159 map->m_len = min(map->m_len, 4160 (unsigned) sbi->s_cluster_ratio - c_offset); 4161 /* 4162 * Check for and handle this case: 4163 * 4164 * |--------- cluster # N-------------| 4165 * |------- extent ----| 4166 * |--- requested region ---| 4167 * |===========| 4168 */ 4169 4170 if (map->m_lblk < ee_block) 4171 map->m_len = min(map->m_len, ee_block - map->m_lblk); 4172 4173 /* 4174 * Check for the case where there is already another allocated 4175 * block to the right of 'ex' but before the end of the cluster. 4176 * 4177 * |------------- cluster # N-------------| 4178 * |----- ex -----| |---- ex_right ----| 4179 * |------ requested region ------| 4180 * |================| 4181 */ 4182 if (map->m_lblk > ee_block) { 4183 ext4_lblk_t next = ext4_ext_next_allocated_block(path); 4184 map->m_len = min(map->m_len, next - map->m_lblk); 4185 } 4186 4187 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 4188 return 1; 4189 } 4190 4191 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 4192 return 0; 4193 } 4194 4195 4196 /* 4197 * Block allocation/map/preallocation routine for extents based files 4198 * 4199 * 4200 * Need to be called with 4201 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 4202 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4203 * 4204 * return > 0, number of of blocks already mapped/allocated 4205 * if create == 0 and these are pre-allocated blocks 4206 * buffer head is unmapped 4207 * otherwise blocks are mapped 4208 * 4209 * return = 0, if plain look up failed (blocks have not been allocated) 4210 * buffer head is unmapped 4211 * 4212 * return < 0, error case. 4213 */ 4214 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4215 struct ext4_map_blocks *map, int flags) 4216 { 4217 struct ext4_ext_path *path = NULL; 4218 struct ext4_extent newex, *ex, *ex2; 4219 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4220 ext4_fsblk_t newblock = 0; 4221 int free_on_err = 0, err = 0, depth, ret; 4222 unsigned int allocated = 0, offset = 0; 4223 unsigned int allocated_clusters = 0; 4224 struct ext4_allocation_request ar; 4225 ext4_lblk_t cluster_offset; 4226 bool map_from_cluster = false; 4227 4228 ext_debug("blocks %u/%u requested for inode %lu\n", 4229 map->m_lblk, map->m_len, inode->i_ino); 4230 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4231 4232 /* find extent for this block */ 4233 path = ext4_find_extent(inode, map->m_lblk, NULL, 0); 4234 if (IS_ERR(path)) { 4235 err = PTR_ERR(path); 4236 path = NULL; 4237 goto out2; 4238 } 4239 4240 depth = ext_depth(inode); 4241 4242 /* 4243 * consistent leaf must not be empty; 4244 * this situation is possible, though, _during_ tree modification; 4245 * this is why assert can't be put in ext4_find_extent() 4246 */ 4247 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4248 EXT4_ERROR_INODE(inode, "bad extent address " 4249 "lblock: %lu, depth: %d pblock %lld", 4250 (unsigned long) map->m_lblk, depth, 4251 path[depth].p_block); 4252 err = -EFSCORRUPTED; 4253 goto out2; 4254 } 4255 4256 ex = path[depth].p_ext; 4257 if (ex) { 4258 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4259 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4260 unsigned short ee_len; 4261 4262 4263 /* 4264 * unwritten extents are treated as holes, except that 4265 * we split out initialized portions during a write. 4266 */ 4267 ee_len = ext4_ext_get_actual_len(ex); 4268 4269 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4270 4271 /* if found extent covers block, simply return it */ 4272 if (in_range(map->m_lblk, ee_block, ee_len)) { 4273 newblock = map->m_lblk - ee_block + ee_start; 4274 /* number of remaining blocks in the extent */ 4275 allocated = ee_len - (map->m_lblk - ee_block); 4276 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 4277 ee_block, ee_len, newblock); 4278 4279 /* 4280 * If the extent is initialized check whether the 4281 * caller wants to convert it to unwritten. 4282 */ 4283 if ((!ext4_ext_is_unwritten(ex)) && 4284 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4285 allocated = convert_initialized_extent( 4286 handle, inode, map, &path, 4287 allocated); 4288 goto out2; 4289 } else if (!ext4_ext_is_unwritten(ex)) 4290 goto out; 4291 4292 ret = ext4_ext_handle_unwritten_extents( 4293 handle, inode, map, &path, flags, 4294 allocated, newblock); 4295 if (ret < 0) 4296 err = ret; 4297 else 4298 allocated = ret; 4299 goto out2; 4300 } 4301 } 4302 4303 /* 4304 * requested block isn't allocated yet; 4305 * we couldn't try to create block if create flag is zero 4306 */ 4307 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4308 ext4_lblk_t hole_start, hole_len; 4309 4310 hole_start = map->m_lblk; 4311 hole_len = ext4_ext_determine_hole(inode, path, &hole_start); 4312 /* 4313 * put just found gap into cache to speed up 4314 * subsequent requests 4315 */ 4316 ext4_ext_put_gap_in_cache(inode, hole_start, hole_len); 4317 4318 /* Update hole_len to reflect hole size after map->m_lblk */ 4319 if (hole_start != map->m_lblk) 4320 hole_len -= map->m_lblk - hole_start; 4321 map->m_pblk = 0; 4322 map->m_len = min_t(unsigned int, map->m_len, hole_len); 4323 4324 goto out2; 4325 } 4326 4327 /* 4328 * Okay, we need to do block allocation. 4329 */ 4330 newex.ee_block = cpu_to_le32(map->m_lblk); 4331 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4332 4333 /* 4334 * If we are doing bigalloc, check to see if the extent returned 4335 * by ext4_find_extent() implies a cluster we can use. 4336 */ 4337 if (cluster_offset && ex && 4338 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 4339 ar.len = allocated = map->m_len; 4340 newblock = map->m_pblk; 4341 map_from_cluster = true; 4342 goto got_allocated_blocks; 4343 } 4344 4345 /* find neighbour allocated blocks */ 4346 ar.lleft = map->m_lblk; 4347 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4348 if (err) 4349 goto out2; 4350 ar.lright = map->m_lblk; 4351 ex2 = NULL; 4352 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4353 if (err) 4354 goto out2; 4355 4356 /* Check if the extent after searching to the right implies a 4357 * cluster we can use. */ 4358 if ((sbi->s_cluster_ratio > 1) && ex2 && 4359 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 4360 ar.len = allocated = map->m_len; 4361 newblock = map->m_pblk; 4362 map_from_cluster = true; 4363 goto got_allocated_blocks; 4364 } 4365 4366 /* 4367 * See if request is beyond maximum number of blocks we can have in 4368 * a single extent. For an initialized extent this limit is 4369 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is 4370 * EXT_UNWRITTEN_MAX_LEN. 4371 */ 4372 if (map->m_len > EXT_INIT_MAX_LEN && 4373 !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4374 map->m_len = EXT_INIT_MAX_LEN; 4375 else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && 4376 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4377 map->m_len = EXT_UNWRITTEN_MAX_LEN; 4378 4379 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4380 newex.ee_len = cpu_to_le16(map->m_len); 4381 err = ext4_ext_check_overlap(sbi, inode, &newex, path); 4382 if (err) 4383 allocated = ext4_ext_get_actual_len(&newex); 4384 else 4385 allocated = map->m_len; 4386 4387 /* allocate new block */ 4388 ar.inode = inode; 4389 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4390 ar.logical = map->m_lblk; 4391 /* 4392 * We calculate the offset from the beginning of the cluster 4393 * for the logical block number, since when we allocate a 4394 * physical cluster, the physical block should start at the 4395 * same offset from the beginning of the cluster. This is 4396 * needed so that future calls to get_implied_cluster_alloc() 4397 * work correctly. 4398 */ 4399 offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4400 ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 4401 ar.goal -= offset; 4402 ar.logical -= offset; 4403 if (S_ISREG(inode->i_mode)) 4404 ar.flags = EXT4_MB_HINT_DATA; 4405 else 4406 /* disable in-core preallocation for non-regular files */ 4407 ar.flags = 0; 4408 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4409 ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4410 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4411 ar.flags |= EXT4_MB_DELALLOC_RESERVED; 4412 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 4413 ar.flags |= EXT4_MB_USE_RESERVED; 4414 newblock = ext4_mb_new_blocks(handle, &ar, &err); 4415 if (!newblock) 4416 goto out2; 4417 ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4418 ar.goal, newblock, allocated); 4419 free_on_err = 1; 4420 allocated_clusters = ar.len; 4421 ar.len = EXT4_C2B(sbi, ar.len) - offset; 4422 if (ar.len > allocated) 4423 ar.len = allocated; 4424 4425 got_allocated_blocks: 4426 /* try to insert new extent into found leaf and return */ 4427 ext4_ext_store_pblock(&newex, newblock + offset); 4428 newex.ee_len = cpu_to_le16(ar.len); 4429 /* Mark unwritten */ 4430 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){ 4431 ext4_ext_mark_unwritten(&newex); 4432 map->m_flags |= EXT4_MAP_UNWRITTEN; 4433 } 4434 4435 err = 0; 4436 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) 4437 err = check_eofblocks_fl(handle, inode, map->m_lblk, 4438 path, ar.len); 4439 if (!err) 4440 err = ext4_ext_insert_extent(handle, inode, &path, 4441 &newex, flags); 4442 4443 if (err && free_on_err) { 4444 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? 4445 EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; 4446 /* free data blocks we just allocated */ 4447 /* not a good idea to call discard here directly, 4448 * but otherwise we'd need to call it every free() */ 4449 ext4_discard_preallocations(inode); 4450 ext4_free_blocks(handle, inode, NULL, newblock, 4451 EXT4_C2B(sbi, allocated_clusters), fb_flags); 4452 goto out2; 4453 } 4454 4455 /* previous routine could use block we allocated */ 4456 newblock = ext4_ext_pblock(&newex); 4457 allocated = ext4_ext_get_actual_len(&newex); 4458 if (allocated > map->m_len) 4459 allocated = map->m_len; 4460 map->m_flags |= EXT4_MAP_NEW; 4461 4462 /* 4463 * Reduce the reserved cluster count to reflect successful deferred 4464 * allocation of delayed allocated clusters or direct allocation of 4465 * clusters discovered to be delayed allocated. Once allocated, a 4466 * cluster is not included in the reserved count. 4467 */ 4468 if (test_opt(inode->i_sb, DELALLOC) && !map_from_cluster) { 4469 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 4470 /* 4471 * When allocating delayed allocated clusters, simply 4472 * reduce the reserved cluster count and claim quota 4473 */ 4474 ext4_da_update_reserve_space(inode, allocated_clusters, 4475 1); 4476 } else { 4477 ext4_lblk_t lblk, len; 4478 unsigned int n; 4479 4480 /* 4481 * When allocating non-delayed allocated clusters 4482 * (from fallocate, filemap, DIO, or clusters 4483 * allocated when delalloc has been disabled by 4484 * ext4_nonda_switch), reduce the reserved cluster 4485 * count by the number of allocated clusters that 4486 * have previously been delayed allocated. Quota 4487 * has been claimed by ext4_mb_new_blocks() above, 4488 * so release the quota reservations made for any 4489 * previously delayed allocated clusters. 4490 */ 4491 lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk); 4492 len = allocated_clusters << sbi->s_cluster_bits; 4493 n = ext4_es_delayed_clu(inode, lblk, len); 4494 if (n > 0) 4495 ext4_da_update_reserve_space(inode, (int) n, 0); 4496 } 4497 } 4498 4499 /* 4500 * Cache the extent and update transaction to commit on fdatasync only 4501 * when it is _not_ an unwritten extent. 4502 */ 4503 if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) 4504 ext4_update_inode_fsync_trans(handle, inode, 1); 4505 else 4506 ext4_update_inode_fsync_trans(handle, inode, 0); 4507 out: 4508 if (allocated > map->m_len) 4509 allocated = map->m_len; 4510 ext4_ext_show_leaf(inode, path); 4511 map->m_flags |= EXT4_MAP_MAPPED; 4512 map->m_pblk = newblock; 4513 map->m_len = allocated; 4514 out2: 4515 ext4_ext_drop_refs(path); 4516 kfree(path); 4517 4518 trace_ext4_ext_map_blocks_exit(inode, flags, map, 4519 err ? err : allocated); 4520 return err ? err : allocated; 4521 } 4522 4523 int ext4_ext_truncate(handle_t *handle, struct inode *inode) 4524 { 4525 struct super_block *sb = inode->i_sb; 4526 ext4_lblk_t last_block; 4527 int err = 0; 4528 4529 /* 4530 * TODO: optimization is possible here. 4531 * Probably we need not scan at all, 4532 * because page truncation is enough. 4533 */ 4534 4535 /* we have to know where to truncate from in crash case */ 4536 EXT4_I(inode)->i_disksize = inode->i_size; 4537 err = ext4_mark_inode_dirty(handle, inode); 4538 if (err) 4539 return err; 4540 4541 last_block = (inode->i_size + sb->s_blocksize - 1) 4542 >> EXT4_BLOCK_SIZE_BITS(sb); 4543 retry: 4544 err = ext4_es_remove_extent(inode, last_block, 4545 EXT_MAX_BLOCKS - last_block); 4546 if (err == -ENOMEM) { 4547 cond_resched(); 4548 congestion_wait(BLK_RW_ASYNC, HZ/50); 4549 goto retry; 4550 } 4551 if (err) 4552 return err; 4553 return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 4554 } 4555 4556 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4557 ext4_lblk_t len, loff_t new_size, 4558 int flags) 4559 { 4560 struct inode *inode = file_inode(file); 4561 handle_t *handle; 4562 int ret = 0; 4563 int ret2 = 0; 4564 int retries = 0; 4565 int depth = 0; 4566 struct ext4_map_blocks map; 4567 unsigned int credits; 4568 loff_t epos; 4569 4570 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); 4571 map.m_lblk = offset; 4572 map.m_len = len; 4573 /* 4574 * Don't normalize the request if it can fit in one extent so 4575 * that it doesn't get unnecessarily split into multiple 4576 * extents. 4577 */ 4578 if (len <= EXT_UNWRITTEN_MAX_LEN) 4579 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 4580 4581 /* 4582 * credits to insert 1 extent into extent tree 4583 */ 4584 credits = ext4_chunk_trans_blocks(inode, len); 4585 depth = ext_depth(inode); 4586 4587 retry: 4588 while (ret >= 0 && len) { 4589 /* 4590 * Recalculate credits when extent tree depth changes. 4591 */ 4592 if (depth != ext_depth(inode)) { 4593 credits = ext4_chunk_trans_blocks(inode, len); 4594 depth = ext_depth(inode); 4595 } 4596 4597 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4598 credits); 4599 if (IS_ERR(handle)) { 4600 ret = PTR_ERR(handle); 4601 break; 4602 } 4603 ret = ext4_map_blocks(handle, inode, &map, flags); 4604 if (ret <= 0) { 4605 ext4_debug("inode #%lu: block %u: len %u: " 4606 "ext4_ext_map_blocks returned %d", 4607 inode->i_ino, map.m_lblk, 4608 map.m_len, ret); 4609 ext4_mark_inode_dirty(handle, inode); 4610 ret2 = ext4_journal_stop(handle); 4611 break; 4612 } 4613 map.m_lblk += ret; 4614 map.m_len = len = len - ret; 4615 epos = (loff_t)map.m_lblk << inode->i_blkbits; 4616 inode->i_ctime = current_time(inode); 4617 if (new_size) { 4618 if (epos > new_size) 4619 epos = new_size; 4620 if (ext4_update_inode_size(inode, epos) & 0x1) 4621 inode->i_mtime = inode->i_ctime; 4622 } else { 4623 if (epos > inode->i_size) 4624 ext4_set_inode_flag(inode, 4625 EXT4_INODE_EOFBLOCKS); 4626 } 4627 ext4_mark_inode_dirty(handle, inode); 4628 ext4_update_inode_fsync_trans(handle, inode, 1); 4629 ret2 = ext4_journal_stop(handle); 4630 if (ret2) 4631 break; 4632 } 4633 if (ret == -ENOSPC && 4634 ext4_should_retry_alloc(inode->i_sb, &retries)) { 4635 ret = 0; 4636 goto retry; 4637 } 4638 4639 return ret > 0 ? ret2 : ret; 4640 } 4641 4642 static long ext4_zero_range(struct file *file, loff_t offset, 4643 loff_t len, int mode) 4644 { 4645 struct inode *inode = file_inode(file); 4646 handle_t *handle = NULL; 4647 unsigned int max_blocks; 4648 loff_t new_size = 0; 4649 int ret = 0; 4650 int flags; 4651 int credits; 4652 int partial_begin, partial_end; 4653 loff_t start, end; 4654 ext4_lblk_t lblk; 4655 unsigned int blkbits = inode->i_blkbits; 4656 4657 trace_ext4_zero_range(inode, offset, len, mode); 4658 4659 if (!S_ISREG(inode->i_mode)) 4660 return -EINVAL; 4661 4662 /* Call ext4_force_commit to flush all data in case of data=journal. */ 4663 if (ext4_should_journal_data(inode)) { 4664 ret = ext4_force_commit(inode->i_sb); 4665 if (ret) 4666 return ret; 4667 } 4668 4669 /* 4670 * Round up offset. This is not fallocate, we neet to zero out 4671 * blocks, so convert interior block aligned part of the range to 4672 * unwritten and possibly manually zero out unaligned parts of the 4673 * range. 4674 */ 4675 start = round_up(offset, 1 << blkbits); 4676 end = round_down((offset + len), 1 << blkbits); 4677 4678 if (start < offset || end > offset + len) 4679 return -EINVAL; 4680 partial_begin = offset & ((1 << blkbits) - 1); 4681 partial_end = (offset + len) & ((1 << blkbits) - 1); 4682 4683 lblk = start >> blkbits; 4684 max_blocks = (end >> blkbits); 4685 if (max_blocks < lblk) 4686 max_blocks = 0; 4687 else 4688 max_blocks -= lblk; 4689 4690 inode_lock(inode); 4691 4692 /* 4693 * Indirect files do not support unwritten extnets 4694 */ 4695 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4696 ret = -EOPNOTSUPP; 4697 goto out_mutex; 4698 } 4699 4700 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4701 (offset + len > i_size_read(inode) || 4702 offset + len > EXT4_I(inode)->i_disksize)) { 4703 new_size = offset + len; 4704 ret = inode_newsize_ok(inode, new_size); 4705 if (ret) 4706 goto out_mutex; 4707 } 4708 4709 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 4710 if (mode & FALLOC_FL_KEEP_SIZE) 4711 flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4712 4713 /* Wait all existing dio workers, newcomers will block on i_mutex */ 4714 inode_dio_wait(inode); 4715 4716 /* Preallocate the range including the unaligned edges */ 4717 if (partial_begin || partial_end) { 4718 ret = ext4_alloc_file_blocks(file, 4719 round_down(offset, 1 << blkbits) >> blkbits, 4720 (round_up((offset + len), 1 << blkbits) - 4721 round_down(offset, 1 << blkbits)) >> blkbits, 4722 new_size, flags); 4723 if (ret) 4724 goto out_mutex; 4725 4726 } 4727 4728 /* Zero range excluding the unaligned edges */ 4729 if (max_blocks > 0) { 4730 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | 4731 EXT4_EX_NOCACHE); 4732 4733 /* 4734 * Prevent page faults from reinstantiating pages we have 4735 * released from page cache. 4736 */ 4737 down_write(&EXT4_I(inode)->i_mmap_sem); 4738 4739 ret = ext4_break_layouts(inode); 4740 if (ret) { 4741 up_write(&EXT4_I(inode)->i_mmap_sem); 4742 goto out_mutex; 4743 } 4744 4745 ret = ext4_update_disksize_before_punch(inode, offset, len); 4746 if (ret) { 4747 up_write(&EXT4_I(inode)->i_mmap_sem); 4748 goto out_mutex; 4749 } 4750 /* Now release the pages and zero block aligned part of pages */ 4751 truncate_pagecache_range(inode, start, end - 1); 4752 inode->i_mtime = inode->i_ctime = current_time(inode); 4753 4754 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 4755 flags); 4756 up_write(&EXT4_I(inode)->i_mmap_sem); 4757 if (ret) 4758 goto out_mutex; 4759 } 4760 if (!partial_begin && !partial_end) 4761 goto out_mutex; 4762 4763 /* 4764 * In worst case we have to writeout two nonadjacent unwritten 4765 * blocks and update the inode 4766 */ 4767 credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; 4768 if (ext4_should_journal_data(inode)) 4769 credits += 2; 4770 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); 4771 if (IS_ERR(handle)) { 4772 ret = PTR_ERR(handle); 4773 ext4_std_error(inode->i_sb, ret); 4774 goto out_mutex; 4775 } 4776 4777 inode->i_mtime = inode->i_ctime = current_time(inode); 4778 if (new_size) { 4779 ext4_update_inode_size(inode, new_size); 4780 } else { 4781 /* 4782 * Mark that we allocate beyond EOF so the subsequent truncate 4783 * can proceed even if the new size is the same as i_size. 4784 */ 4785 if ((offset + len) > i_size_read(inode)) 4786 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4787 } 4788 ext4_mark_inode_dirty(handle, inode); 4789 4790 /* Zero out partial block at the edges of the range */ 4791 ret = ext4_zero_partial_blocks(handle, inode, offset, len); 4792 if (ret >= 0) 4793 ext4_update_inode_fsync_trans(handle, inode, 1); 4794 4795 if (file->f_flags & O_SYNC) 4796 ext4_handle_sync(handle); 4797 4798 ext4_journal_stop(handle); 4799 out_mutex: 4800 inode_unlock(inode); 4801 return ret; 4802 } 4803 4804 /* 4805 * preallocate space for a file. This implements ext4's fallocate file 4806 * operation, which gets called from sys_fallocate system call. 4807 * For block-mapped files, posix_fallocate should fall back to the method 4808 * of writing zeroes to the required new blocks (the same behavior which is 4809 * expected for file systems which do not support fallocate() system call). 4810 */ 4811 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4812 { 4813 struct inode *inode = file_inode(file); 4814 loff_t new_size = 0; 4815 unsigned int max_blocks; 4816 int ret = 0; 4817 int flags; 4818 ext4_lblk_t lblk; 4819 unsigned int blkbits = inode->i_blkbits; 4820 4821 /* 4822 * Encrypted inodes can't handle collapse range or insert 4823 * range since we would need to re-encrypt blocks with a 4824 * different IV or XTS tweak (which are based on the logical 4825 * block number). 4826 * 4827 * XXX It's not clear why zero range isn't working, but we'll 4828 * leave it disabled for encrypted inodes for now. This is a 4829 * bug we should fix.... 4830 */ 4831 if (IS_ENCRYPTED(inode) && 4832 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE | 4833 FALLOC_FL_ZERO_RANGE))) 4834 return -EOPNOTSUPP; 4835 4836 /* Return error if mode is not supported */ 4837 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4838 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 4839 FALLOC_FL_INSERT_RANGE)) 4840 return -EOPNOTSUPP; 4841 4842 if (mode & FALLOC_FL_PUNCH_HOLE) 4843 return ext4_punch_hole(inode, offset, len); 4844 4845 ret = ext4_convert_inline_data(inode); 4846 if (ret) 4847 return ret; 4848 4849 if (mode & FALLOC_FL_COLLAPSE_RANGE) 4850 return ext4_collapse_range(inode, offset, len); 4851 4852 if (mode & FALLOC_FL_INSERT_RANGE) 4853 return ext4_insert_range(inode, offset, len); 4854 4855 if (mode & FALLOC_FL_ZERO_RANGE) 4856 return ext4_zero_range(file, offset, len, mode); 4857 4858 trace_ext4_fallocate_enter(inode, offset, len, mode); 4859 lblk = offset >> blkbits; 4860 4861 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4862 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 4863 if (mode & FALLOC_FL_KEEP_SIZE) 4864 flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4865 4866 inode_lock(inode); 4867 4868 /* 4869 * We only support preallocation for extent-based files only 4870 */ 4871 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4872 ret = -EOPNOTSUPP; 4873 goto out; 4874 } 4875 4876 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4877 (offset + len > i_size_read(inode) || 4878 offset + len > EXT4_I(inode)->i_disksize)) { 4879 new_size = offset + len; 4880 ret = inode_newsize_ok(inode, new_size); 4881 if (ret) 4882 goto out; 4883 } 4884 4885 /* Wait all existing dio workers, newcomers will block on i_mutex */ 4886 inode_dio_wait(inode); 4887 4888 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); 4889 if (ret) 4890 goto out; 4891 4892 if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { 4893 ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, 4894 EXT4_I(inode)->i_sync_tid); 4895 } 4896 out: 4897 inode_unlock(inode); 4898 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 4899 return ret; 4900 } 4901 4902 /* 4903 * This function convert a range of blocks to written extents 4904 * The caller of this function will pass the start offset and the size. 4905 * all unwritten extents within this range will be converted to 4906 * written extents. 4907 * 4908 * This function is called from the direct IO end io call back 4909 * function, to convert the fallocated extents after IO is completed. 4910 * Returns 0 on success. 4911 */ 4912 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 4913 loff_t offset, ssize_t len) 4914 { 4915 unsigned int max_blocks; 4916 int ret = 0; 4917 int ret2 = 0; 4918 struct ext4_map_blocks map; 4919 unsigned int credits, blkbits = inode->i_blkbits; 4920 4921 map.m_lblk = offset >> blkbits; 4922 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4923 4924 /* 4925 * This is somewhat ugly but the idea is clear: When transaction is 4926 * reserved, everything goes into it. Otherwise we rather start several 4927 * smaller transactions for conversion of each extent separately. 4928 */ 4929 if (handle) { 4930 handle = ext4_journal_start_reserved(handle, 4931 EXT4_HT_EXT_CONVERT); 4932 if (IS_ERR(handle)) 4933 return PTR_ERR(handle); 4934 credits = 0; 4935 } else { 4936 /* 4937 * credits to insert 1 extent into extent tree 4938 */ 4939 credits = ext4_chunk_trans_blocks(inode, max_blocks); 4940 } 4941 while (ret >= 0 && ret < max_blocks) { 4942 map.m_lblk += ret; 4943 map.m_len = (max_blocks -= ret); 4944 if (credits) { 4945 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4946 credits); 4947 if (IS_ERR(handle)) { 4948 ret = PTR_ERR(handle); 4949 break; 4950 } 4951 } 4952 ret = ext4_map_blocks(handle, inode, &map, 4953 EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4954 if (ret <= 0) 4955 ext4_warning(inode->i_sb, 4956 "inode #%lu: block %u: len %u: " 4957 "ext4_ext_map_blocks returned %d", 4958 inode->i_ino, map.m_lblk, 4959 map.m_len, ret); 4960 ext4_mark_inode_dirty(handle, inode); 4961 if (credits) 4962 ret2 = ext4_journal_stop(handle); 4963 if (ret <= 0 || ret2) 4964 break; 4965 } 4966 if (!credits) 4967 ret2 = ext4_journal_stop(handle); 4968 return ret > 0 ? ret2 : ret; 4969 } 4970 4971 /* 4972 * If newes is not existing extent (newes->ec_pblk equals zero) find 4973 * delayed extent at start of newes and update newes accordingly and 4974 * return start of the next delayed extent. 4975 * 4976 * If newes is existing extent (newes->ec_pblk is not equal zero) 4977 * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed 4978 * extent found. Leave newes unmodified. 4979 */ 4980 static int ext4_find_delayed_extent(struct inode *inode, 4981 struct extent_status *newes) 4982 { 4983 struct extent_status es; 4984 ext4_lblk_t block, next_del; 4985 4986 if (newes->es_pblk == 0) { 4987 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, 4988 newes->es_lblk, 4989 newes->es_lblk + newes->es_len - 1, 4990 &es); 4991 4992 /* 4993 * No extent in extent-tree contains block @newes->es_pblk, 4994 * then the block may stay in 1)a hole or 2)delayed-extent. 4995 */ 4996 if (es.es_len == 0) 4997 /* A hole found. */ 4998 return 0; 4999 5000 if (es.es_lblk > newes->es_lblk) { 5001 /* A hole found. */ 5002 newes->es_len = min(es.es_lblk - newes->es_lblk, 5003 newes->es_len); 5004 return 0; 5005 } 5006 5007 newes->es_len = es.es_lblk + es.es_len - newes->es_lblk; 5008 } 5009 5010 block = newes->es_lblk + newes->es_len; 5011 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, block, 5012 EXT_MAX_BLOCKS, &es); 5013 if (es.es_len == 0) 5014 next_del = EXT_MAX_BLOCKS; 5015 else 5016 next_del = es.es_lblk; 5017 5018 return next_del; 5019 } 5020 /* fiemap flags we can handle specified here */ 5021 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 5022 5023 static int ext4_xattr_fiemap(struct inode *inode, 5024 struct fiemap_extent_info *fieinfo) 5025 { 5026 __u64 physical = 0; 5027 __u64 length; 5028 __u32 flags = FIEMAP_EXTENT_LAST; 5029 int blockbits = inode->i_sb->s_blocksize_bits; 5030 int error = 0; 5031 5032 /* in-inode? */ 5033 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 5034 struct ext4_iloc iloc; 5035 int offset; /* offset of xattr in inode */ 5036 5037 error = ext4_get_inode_loc(inode, &iloc); 5038 if (error) 5039 return error; 5040 physical = (__u64)iloc.bh->b_blocknr << blockbits; 5041 offset = EXT4_GOOD_OLD_INODE_SIZE + 5042 EXT4_I(inode)->i_extra_isize; 5043 physical += offset; 5044 length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 5045 flags |= FIEMAP_EXTENT_DATA_INLINE; 5046 brelse(iloc.bh); 5047 } else { /* external block */ 5048 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 5049 length = inode->i_sb->s_blocksize; 5050 } 5051 5052 if (physical) 5053 error = fiemap_fill_next_extent(fieinfo, 0, physical, 5054 length, flags); 5055 return (error < 0 ? error : 0); 5056 } 5057 5058 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 5059 __u64 start, __u64 len) 5060 { 5061 ext4_lblk_t start_blk; 5062 int error = 0; 5063 5064 if (ext4_has_inline_data(inode)) { 5065 int has_inline = 1; 5066 5067 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline, 5068 start, len); 5069 5070 if (has_inline) 5071 return error; 5072 } 5073 5074 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 5075 error = ext4_ext_precache(inode); 5076 if (error) 5077 return error; 5078 } 5079 5080 /* fallback to generic here if not in extents fmt */ 5081 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5082 return generic_block_fiemap(inode, fieinfo, start, len, 5083 ext4_get_block); 5084 5085 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 5086 return -EBADR; 5087 5088 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 5089 error = ext4_xattr_fiemap(inode, fieinfo); 5090 } else { 5091 ext4_lblk_t len_blks; 5092 __u64 last_blk; 5093 5094 start_blk = start >> inode->i_sb->s_blocksize_bits; 5095 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 5096 if (last_blk >= EXT_MAX_BLOCKS) 5097 last_blk = EXT_MAX_BLOCKS-1; 5098 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 5099 5100 /* 5101 * Walk the extent tree gathering extent information 5102 * and pushing extents back to the user. 5103 */ 5104 error = ext4_fill_fiemap_extents(inode, start_blk, 5105 len_blks, fieinfo); 5106 } 5107 return error; 5108 } 5109 5110 /* 5111 * ext4_access_path: 5112 * Function to access the path buffer for marking it dirty. 5113 * It also checks if there are sufficient credits left in the journal handle 5114 * to update path. 5115 */ 5116 static int 5117 ext4_access_path(handle_t *handle, struct inode *inode, 5118 struct ext4_ext_path *path) 5119 { 5120 int credits, err; 5121 5122 if (!ext4_handle_valid(handle)) 5123 return 0; 5124 5125 /* 5126 * Check if need to extend journal credits 5127 * 3 for leaf, sb, and inode plus 2 (bmap and group 5128 * descriptor) for each block group; assume two block 5129 * groups 5130 */ 5131 if (handle->h_buffer_credits < 7) { 5132 credits = ext4_writepage_trans_blocks(inode); 5133 err = ext4_ext_truncate_extend_restart(handle, inode, credits); 5134 /* EAGAIN is success */ 5135 if (err && err != -EAGAIN) 5136 return err; 5137 } 5138 5139 err = ext4_ext_get_access(handle, inode, path); 5140 return err; 5141 } 5142 5143 /* 5144 * ext4_ext_shift_path_extents: 5145 * Shift the extents of a path structure lying between path[depth].p_ext 5146 * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells 5147 * if it is right shift or left shift operation. 5148 */ 5149 static int 5150 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, 5151 struct inode *inode, handle_t *handle, 5152 enum SHIFT_DIRECTION SHIFT) 5153 { 5154 int depth, err = 0; 5155 struct ext4_extent *ex_start, *ex_last; 5156 bool update = 0; 5157 depth = path->p_depth; 5158 5159 while (depth >= 0) { 5160 if (depth == path->p_depth) { 5161 ex_start = path[depth].p_ext; 5162 if (!ex_start) 5163 return -EFSCORRUPTED; 5164 5165 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 5166 5167 err = ext4_access_path(handle, inode, path + depth); 5168 if (err) 5169 goto out; 5170 5171 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) 5172 update = 1; 5173 5174 while (ex_start <= ex_last) { 5175 if (SHIFT == SHIFT_LEFT) { 5176 le32_add_cpu(&ex_start->ee_block, 5177 -shift); 5178 /* Try to merge to the left. */ 5179 if ((ex_start > 5180 EXT_FIRST_EXTENT(path[depth].p_hdr)) 5181 && 5182 ext4_ext_try_to_merge_right(inode, 5183 path, ex_start - 1)) 5184 ex_last--; 5185 else 5186 ex_start++; 5187 } else { 5188 le32_add_cpu(&ex_last->ee_block, shift); 5189 ext4_ext_try_to_merge_right(inode, path, 5190 ex_last); 5191 ex_last--; 5192 } 5193 } 5194 err = ext4_ext_dirty(handle, inode, path + depth); 5195 if (err) 5196 goto out; 5197 5198 if (--depth < 0 || !update) 5199 break; 5200 } 5201 5202 /* Update index too */ 5203 err = ext4_access_path(handle, inode, path + depth); 5204 if (err) 5205 goto out; 5206 5207 if (SHIFT == SHIFT_LEFT) 5208 le32_add_cpu(&path[depth].p_idx->ei_block, -shift); 5209 else 5210 le32_add_cpu(&path[depth].p_idx->ei_block, shift); 5211 err = ext4_ext_dirty(handle, inode, path + depth); 5212 if (err) 5213 goto out; 5214 5215 /* we are done if current index is not a starting index */ 5216 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) 5217 break; 5218 5219 depth--; 5220 } 5221 5222 out: 5223 return err; 5224 } 5225 5226 /* 5227 * ext4_ext_shift_extents: 5228 * All the extents which lies in the range from @start to the last allocated 5229 * block for the @inode are shifted either towards left or right (depending 5230 * upon @SHIFT) by @shift blocks. 5231 * On success, 0 is returned, error otherwise. 5232 */ 5233 static int 5234 ext4_ext_shift_extents(struct inode *inode, handle_t *handle, 5235 ext4_lblk_t start, ext4_lblk_t shift, 5236 enum SHIFT_DIRECTION SHIFT) 5237 { 5238 struct ext4_ext_path *path; 5239 int ret = 0, depth; 5240 struct ext4_extent *extent; 5241 ext4_lblk_t stop, *iterator, ex_start, ex_end; 5242 5243 /* Let path point to the last extent */ 5244 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 5245 EXT4_EX_NOCACHE); 5246 if (IS_ERR(path)) 5247 return PTR_ERR(path); 5248 5249 depth = path->p_depth; 5250 extent = path[depth].p_ext; 5251 if (!extent) 5252 goto out; 5253 5254 stop = le32_to_cpu(extent->ee_block); 5255 5256 /* 5257 * For left shifts, make sure the hole on the left is big enough to 5258 * accommodate the shift. For right shifts, make sure the last extent 5259 * won't be shifted beyond EXT_MAX_BLOCKS. 5260 */ 5261 if (SHIFT == SHIFT_LEFT) { 5262 path = ext4_find_extent(inode, start - 1, &path, 5263 EXT4_EX_NOCACHE); 5264 if (IS_ERR(path)) 5265 return PTR_ERR(path); 5266 depth = path->p_depth; 5267 extent = path[depth].p_ext; 5268 if (extent) { 5269 ex_start = le32_to_cpu(extent->ee_block); 5270 ex_end = le32_to_cpu(extent->ee_block) + 5271 ext4_ext_get_actual_len(extent); 5272 } else { 5273 ex_start = 0; 5274 ex_end = 0; 5275 } 5276 5277 if ((start == ex_start && shift > ex_start) || 5278 (shift > start - ex_end)) { 5279 ret = -EINVAL; 5280 goto out; 5281 } 5282 } else { 5283 if (shift > EXT_MAX_BLOCKS - 5284 (stop + ext4_ext_get_actual_len(extent))) { 5285 ret = -EINVAL; 5286 goto out; 5287 } 5288 } 5289 5290 /* 5291 * In case of left shift, iterator points to start and it is increased 5292 * till we reach stop. In case of right shift, iterator points to stop 5293 * and it is decreased till we reach start. 5294 */ 5295 if (SHIFT == SHIFT_LEFT) 5296 iterator = &start; 5297 else 5298 iterator = &stop; 5299 5300 /* 5301 * Its safe to start updating extents. Start and stop are unsigned, so 5302 * in case of right shift if extent with 0 block is reached, iterator 5303 * becomes NULL to indicate the end of the loop. 5304 */ 5305 while (iterator && start <= stop) { 5306 path = ext4_find_extent(inode, *iterator, &path, 5307 EXT4_EX_NOCACHE); 5308 if (IS_ERR(path)) 5309 return PTR_ERR(path); 5310 depth = path->p_depth; 5311 extent = path[depth].p_ext; 5312 if (!extent) { 5313 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 5314 (unsigned long) *iterator); 5315 return -EFSCORRUPTED; 5316 } 5317 if (SHIFT == SHIFT_LEFT && *iterator > 5318 le32_to_cpu(extent->ee_block)) { 5319 /* Hole, move to the next extent */ 5320 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { 5321 path[depth].p_ext++; 5322 } else { 5323 *iterator = ext4_ext_next_allocated_block(path); 5324 continue; 5325 } 5326 } 5327 5328 if (SHIFT == SHIFT_LEFT) { 5329 extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5330 *iterator = le32_to_cpu(extent->ee_block) + 5331 ext4_ext_get_actual_len(extent); 5332 } else { 5333 extent = EXT_FIRST_EXTENT(path[depth].p_hdr); 5334 if (le32_to_cpu(extent->ee_block) > 0) 5335 *iterator = le32_to_cpu(extent->ee_block) - 1; 5336 else 5337 /* Beginning is reached, end of the loop */ 5338 iterator = NULL; 5339 /* Update path extent in case we need to stop */ 5340 while (le32_to_cpu(extent->ee_block) < start) 5341 extent++; 5342 path[depth].p_ext = extent; 5343 } 5344 ret = ext4_ext_shift_path_extents(path, shift, inode, 5345 handle, SHIFT); 5346 if (ret) 5347 break; 5348 } 5349 out: 5350 ext4_ext_drop_refs(path); 5351 kfree(path); 5352 return ret; 5353 } 5354 5355 /* 5356 * ext4_collapse_range: 5357 * This implements the fallocate's collapse range functionality for ext4 5358 * Returns: 0 and non-zero on error. 5359 */ 5360 int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) 5361 { 5362 struct super_block *sb = inode->i_sb; 5363 ext4_lblk_t punch_start, punch_stop; 5364 handle_t *handle; 5365 unsigned int credits; 5366 loff_t new_size, ioffset; 5367 int ret; 5368 5369 /* 5370 * We need to test this early because xfstests assumes that a 5371 * collapse range of (0, 1) will return EOPNOTSUPP if the file 5372 * system does not support collapse range. 5373 */ 5374 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5375 return -EOPNOTSUPP; 5376 5377 /* Collapse range works only on fs block size aligned offsets. */ 5378 if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) || 5379 len & (EXT4_CLUSTER_SIZE(sb) - 1)) 5380 return -EINVAL; 5381 5382 if (!S_ISREG(inode->i_mode)) 5383 return -EINVAL; 5384 5385 trace_ext4_collapse_range(inode, offset, len); 5386 5387 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5388 punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); 5389 5390 /* Call ext4_force_commit to flush all data in case of data=journal. */ 5391 if (ext4_should_journal_data(inode)) { 5392 ret = ext4_force_commit(inode->i_sb); 5393 if (ret) 5394 return ret; 5395 } 5396 5397 inode_lock(inode); 5398 /* 5399 * There is no need to overlap collapse range with EOF, in which case 5400 * it is effectively a truncate operation 5401 */ 5402 if (offset + len >= i_size_read(inode)) { 5403 ret = -EINVAL; 5404 goto out_mutex; 5405 } 5406 5407 /* Currently just for extent based files */ 5408 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5409 ret = -EOPNOTSUPP; 5410 goto out_mutex; 5411 } 5412 5413 /* Wait for existing dio to complete */ 5414 inode_dio_wait(inode); 5415 5416 /* 5417 * Prevent page faults from reinstantiating pages we have released from 5418 * page cache. 5419 */ 5420 down_write(&EXT4_I(inode)->i_mmap_sem); 5421 5422 ret = ext4_break_layouts(inode); 5423 if (ret) 5424 goto out_mmap; 5425 5426 /* 5427 * Need to round down offset to be aligned with page size boundary 5428 * for page size > block size. 5429 */ 5430 ioffset = round_down(offset, PAGE_SIZE); 5431 /* 5432 * Write tail of the last page before removed range since it will get 5433 * removed from the page cache below. 5434 */ 5435 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset); 5436 if (ret) 5437 goto out_mmap; 5438 /* 5439 * Write data that will be shifted to preserve them when discarding 5440 * page cache below. We are also protected from pages becoming dirty 5441 * by i_mmap_sem. 5442 */ 5443 ret = filemap_write_and_wait_range(inode->i_mapping, offset + len, 5444 LLONG_MAX); 5445 if (ret) 5446 goto out_mmap; 5447 truncate_pagecache(inode, ioffset); 5448 5449 credits = ext4_writepage_trans_blocks(inode); 5450 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5451 if (IS_ERR(handle)) { 5452 ret = PTR_ERR(handle); 5453 goto out_mmap; 5454 } 5455 5456 down_write(&EXT4_I(inode)->i_data_sem); 5457 ext4_discard_preallocations(inode); 5458 5459 ret = ext4_es_remove_extent(inode, punch_start, 5460 EXT_MAX_BLOCKS - punch_start); 5461 if (ret) { 5462 up_write(&EXT4_I(inode)->i_data_sem); 5463 goto out_stop; 5464 } 5465 5466 ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); 5467 if (ret) { 5468 up_write(&EXT4_I(inode)->i_data_sem); 5469 goto out_stop; 5470 } 5471 ext4_discard_preallocations(inode); 5472 5473 ret = ext4_ext_shift_extents(inode, handle, punch_stop, 5474 punch_stop - punch_start, SHIFT_LEFT); 5475 if (ret) { 5476 up_write(&EXT4_I(inode)->i_data_sem); 5477 goto out_stop; 5478 } 5479 5480 new_size = i_size_read(inode) - len; 5481 i_size_write(inode, new_size); 5482 EXT4_I(inode)->i_disksize = new_size; 5483 5484 up_write(&EXT4_I(inode)->i_data_sem); 5485 if (IS_SYNC(inode)) 5486 ext4_handle_sync(handle); 5487 inode->i_mtime = inode->i_ctime = current_time(inode); 5488 ext4_mark_inode_dirty(handle, inode); 5489 ext4_update_inode_fsync_trans(handle, inode, 1); 5490 5491 out_stop: 5492 ext4_journal_stop(handle); 5493 out_mmap: 5494 up_write(&EXT4_I(inode)->i_mmap_sem); 5495 out_mutex: 5496 inode_unlock(inode); 5497 return ret; 5498 } 5499 5500 /* 5501 * ext4_insert_range: 5502 * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate. 5503 * The data blocks starting from @offset to the EOF are shifted by @len 5504 * towards right to create a hole in the @inode. Inode size is increased 5505 * by len bytes. 5506 * Returns 0 on success, error otherwise. 5507 */ 5508 int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) 5509 { 5510 struct super_block *sb = inode->i_sb; 5511 handle_t *handle; 5512 struct ext4_ext_path *path; 5513 struct ext4_extent *extent; 5514 ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; 5515 unsigned int credits, ee_len; 5516 int ret = 0, depth, split_flag = 0; 5517 loff_t ioffset; 5518 5519 /* 5520 * We need to test this early because xfstests assumes that an 5521 * insert range of (0, 1) will return EOPNOTSUPP if the file 5522 * system does not support insert range. 5523 */ 5524 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5525 return -EOPNOTSUPP; 5526 5527 /* Insert range works only on fs block size aligned offsets. */ 5528 if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) || 5529 len & (EXT4_CLUSTER_SIZE(sb) - 1)) 5530 return -EINVAL; 5531 5532 if (!S_ISREG(inode->i_mode)) 5533 return -EOPNOTSUPP; 5534 5535 trace_ext4_insert_range(inode, offset, len); 5536 5537 offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5538 len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); 5539 5540 /* Call ext4_force_commit to flush all data in case of data=journal */ 5541 if (ext4_should_journal_data(inode)) { 5542 ret = ext4_force_commit(inode->i_sb); 5543 if (ret) 5544 return ret; 5545 } 5546 5547 inode_lock(inode); 5548 /* Currently just for extent based files */ 5549 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5550 ret = -EOPNOTSUPP; 5551 goto out_mutex; 5552 } 5553 5554 /* Check for wrap through zero */ 5555 if (inode->i_size + len > inode->i_sb->s_maxbytes) { 5556 ret = -EFBIG; 5557 goto out_mutex; 5558 } 5559 5560 /* Offset should be less than i_size */ 5561 if (offset >= i_size_read(inode)) { 5562 ret = -EINVAL; 5563 goto out_mutex; 5564 } 5565 5566 /* Wait for existing dio to complete */ 5567 inode_dio_wait(inode); 5568 5569 /* 5570 * Prevent page faults from reinstantiating pages we have released from 5571 * page cache. 5572 */ 5573 down_write(&EXT4_I(inode)->i_mmap_sem); 5574 5575 ret = ext4_break_layouts(inode); 5576 if (ret) 5577 goto out_mmap; 5578 5579 /* 5580 * Need to round down to align start offset to page size boundary 5581 * for page size > block size. 5582 */ 5583 ioffset = round_down(offset, PAGE_SIZE); 5584 /* Write out all dirty pages */ 5585 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, 5586 LLONG_MAX); 5587 if (ret) 5588 goto out_mmap; 5589 truncate_pagecache(inode, ioffset); 5590 5591 credits = ext4_writepage_trans_blocks(inode); 5592 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5593 if (IS_ERR(handle)) { 5594 ret = PTR_ERR(handle); 5595 goto out_mmap; 5596 } 5597 5598 /* Expand file to avoid data loss if there is error while shifting */ 5599 inode->i_size += len; 5600 EXT4_I(inode)->i_disksize += len; 5601 inode->i_mtime = inode->i_ctime = current_time(inode); 5602 ret = ext4_mark_inode_dirty(handle, inode); 5603 if (ret) 5604 goto out_stop; 5605 5606 down_write(&EXT4_I(inode)->i_data_sem); 5607 ext4_discard_preallocations(inode); 5608 5609 path = ext4_find_extent(inode, offset_lblk, NULL, 0); 5610 if (IS_ERR(path)) { 5611 up_write(&EXT4_I(inode)->i_data_sem); 5612 goto out_stop; 5613 } 5614 5615 depth = ext_depth(inode); 5616 extent = path[depth].p_ext; 5617 if (extent) { 5618 ee_start_lblk = le32_to_cpu(extent->ee_block); 5619 ee_len = ext4_ext_get_actual_len(extent); 5620 5621 /* 5622 * If offset_lblk is not the starting block of extent, split 5623 * the extent @offset_lblk 5624 */ 5625 if ((offset_lblk > ee_start_lblk) && 5626 (offset_lblk < (ee_start_lblk + ee_len))) { 5627 if (ext4_ext_is_unwritten(extent)) 5628 split_flag = EXT4_EXT_MARK_UNWRIT1 | 5629 EXT4_EXT_MARK_UNWRIT2; 5630 ret = ext4_split_extent_at(handle, inode, &path, 5631 offset_lblk, split_flag, 5632 EXT4_EX_NOCACHE | 5633 EXT4_GET_BLOCKS_PRE_IO | 5634 EXT4_GET_BLOCKS_METADATA_NOFAIL); 5635 } 5636 5637 ext4_ext_drop_refs(path); 5638 kfree(path); 5639 if (ret < 0) { 5640 up_write(&EXT4_I(inode)->i_data_sem); 5641 goto out_stop; 5642 } 5643 } else { 5644 ext4_ext_drop_refs(path); 5645 kfree(path); 5646 } 5647 5648 ret = ext4_es_remove_extent(inode, offset_lblk, 5649 EXT_MAX_BLOCKS - offset_lblk); 5650 if (ret) { 5651 up_write(&EXT4_I(inode)->i_data_sem); 5652 goto out_stop; 5653 } 5654 5655 /* 5656 * if offset_lblk lies in a hole which is at start of file, use 5657 * ee_start_lblk to shift extents 5658 */ 5659 ret = ext4_ext_shift_extents(inode, handle, 5660 ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk, 5661 len_lblk, SHIFT_RIGHT); 5662 5663 up_write(&EXT4_I(inode)->i_data_sem); 5664 if (IS_SYNC(inode)) 5665 ext4_handle_sync(handle); 5666 if (ret >= 0) 5667 ext4_update_inode_fsync_trans(handle, inode, 1); 5668 5669 out_stop: 5670 ext4_journal_stop(handle); 5671 out_mmap: 5672 up_write(&EXT4_I(inode)->i_mmap_sem); 5673 out_mutex: 5674 inode_unlock(inode); 5675 return ret; 5676 } 5677 5678 /** 5679 * ext4_swap_extents - Swap extents between two inodes 5680 * 5681 * @inode1: First inode 5682 * @inode2: Second inode 5683 * @lblk1: Start block for first inode 5684 * @lblk2: Start block for second inode 5685 * @count: Number of blocks to swap 5686 * @unwritten: Mark second inode's extents as unwritten after swap 5687 * @erp: Pointer to save error value 5688 * 5689 * This helper routine does exactly what is promise "swap extents". All other 5690 * stuff such as page-cache locking consistency, bh mapping consistency or 5691 * extent's data copying must be performed by caller. 5692 * Locking: 5693 * i_mutex is held for both inodes 5694 * i_data_sem is locked for write for both inodes 5695 * Assumptions: 5696 * All pages from requested range are locked for both inodes 5697 */ 5698 int 5699 ext4_swap_extents(handle_t *handle, struct inode *inode1, 5700 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, 5701 ext4_lblk_t count, int unwritten, int *erp) 5702 { 5703 struct ext4_ext_path *path1 = NULL; 5704 struct ext4_ext_path *path2 = NULL; 5705 int replaced_count = 0; 5706 5707 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); 5708 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); 5709 BUG_ON(!inode_is_locked(inode1)); 5710 BUG_ON(!inode_is_locked(inode2)); 5711 5712 *erp = ext4_es_remove_extent(inode1, lblk1, count); 5713 if (unlikely(*erp)) 5714 return 0; 5715 *erp = ext4_es_remove_extent(inode2, lblk2, count); 5716 if (unlikely(*erp)) 5717 return 0; 5718 5719 while (count) { 5720 struct ext4_extent *ex1, *ex2, tmp_ex; 5721 ext4_lblk_t e1_blk, e2_blk; 5722 int e1_len, e2_len, len; 5723 int split = 0; 5724 5725 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); 5726 if (IS_ERR(path1)) { 5727 *erp = PTR_ERR(path1); 5728 path1 = NULL; 5729 finish: 5730 count = 0; 5731 goto repeat; 5732 } 5733 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); 5734 if (IS_ERR(path2)) { 5735 *erp = PTR_ERR(path2); 5736 path2 = NULL; 5737 goto finish; 5738 } 5739 ex1 = path1[path1->p_depth].p_ext; 5740 ex2 = path2[path2->p_depth].p_ext; 5741 /* Do we have somthing to swap ? */ 5742 if (unlikely(!ex2 || !ex1)) 5743 goto finish; 5744 5745 e1_blk = le32_to_cpu(ex1->ee_block); 5746 e2_blk = le32_to_cpu(ex2->ee_block); 5747 e1_len = ext4_ext_get_actual_len(ex1); 5748 e2_len = ext4_ext_get_actual_len(ex2); 5749 5750 /* Hole handling */ 5751 if (!in_range(lblk1, e1_blk, e1_len) || 5752 !in_range(lblk2, e2_blk, e2_len)) { 5753 ext4_lblk_t next1, next2; 5754 5755 /* if hole after extent, then go to next extent */ 5756 next1 = ext4_ext_next_allocated_block(path1); 5757 next2 = ext4_ext_next_allocated_block(path2); 5758 /* If hole before extent, then shift to that extent */ 5759 if (e1_blk > lblk1) 5760 next1 = e1_blk; 5761 if (e2_blk > lblk2) 5762 next2 = e2_blk; 5763 /* Do we have something to swap */ 5764 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) 5765 goto finish; 5766 /* Move to the rightest boundary */ 5767 len = next1 - lblk1; 5768 if (len < next2 - lblk2) 5769 len = next2 - lblk2; 5770 if (len > count) 5771 len = count; 5772 lblk1 += len; 5773 lblk2 += len; 5774 count -= len; 5775 goto repeat; 5776 } 5777 5778 /* Prepare left boundary */ 5779 if (e1_blk < lblk1) { 5780 split = 1; 5781 *erp = ext4_force_split_extent_at(handle, inode1, 5782 &path1, lblk1, 0); 5783 if (unlikely(*erp)) 5784 goto finish; 5785 } 5786 if (e2_blk < lblk2) { 5787 split = 1; 5788 *erp = ext4_force_split_extent_at(handle, inode2, 5789 &path2, lblk2, 0); 5790 if (unlikely(*erp)) 5791 goto finish; 5792 } 5793 /* ext4_split_extent_at() may result in leaf extent split, 5794 * path must to be revalidated. */ 5795 if (split) 5796 goto repeat; 5797 5798 /* Prepare right boundary */ 5799 len = count; 5800 if (len > e1_blk + e1_len - lblk1) 5801 len = e1_blk + e1_len - lblk1; 5802 if (len > e2_blk + e2_len - lblk2) 5803 len = e2_blk + e2_len - lblk2; 5804 5805 if (len != e1_len) { 5806 split = 1; 5807 *erp = ext4_force_split_extent_at(handle, inode1, 5808 &path1, lblk1 + len, 0); 5809 if (unlikely(*erp)) 5810 goto finish; 5811 } 5812 if (len != e2_len) { 5813 split = 1; 5814 *erp = ext4_force_split_extent_at(handle, inode2, 5815 &path2, lblk2 + len, 0); 5816 if (*erp) 5817 goto finish; 5818 } 5819 /* ext4_split_extent_at() may result in leaf extent split, 5820 * path must to be revalidated. */ 5821 if (split) 5822 goto repeat; 5823 5824 BUG_ON(e2_len != e1_len); 5825 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); 5826 if (unlikely(*erp)) 5827 goto finish; 5828 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); 5829 if (unlikely(*erp)) 5830 goto finish; 5831 5832 /* Both extents are fully inside boundaries. Swap it now */ 5833 tmp_ex = *ex1; 5834 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); 5835 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); 5836 ex1->ee_len = cpu_to_le16(e2_len); 5837 ex2->ee_len = cpu_to_le16(e1_len); 5838 if (unwritten) 5839 ext4_ext_mark_unwritten(ex2); 5840 if (ext4_ext_is_unwritten(&tmp_ex)) 5841 ext4_ext_mark_unwritten(ex1); 5842 5843 ext4_ext_try_to_merge(handle, inode2, path2, ex2); 5844 ext4_ext_try_to_merge(handle, inode1, path1, ex1); 5845 *erp = ext4_ext_dirty(handle, inode2, path2 + 5846 path2->p_depth); 5847 if (unlikely(*erp)) 5848 goto finish; 5849 *erp = ext4_ext_dirty(handle, inode1, path1 + 5850 path1->p_depth); 5851 /* 5852 * Looks scarry ah..? second inode already points to new blocks, 5853 * and it was successfully dirtied. But luckily error may happen 5854 * only due to journal error, so full transaction will be 5855 * aborted anyway. 5856 */ 5857 if (unlikely(*erp)) 5858 goto finish; 5859 lblk1 += len; 5860 lblk2 += len; 5861 replaced_count += len; 5862 count -= len; 5863 5864 repeat: 5865 ext4_ext_drop_refs(path1); 5866 kfree(path1); 5867 ext4_ext_drop_refs(path2); 5868 kfree(path2); 5869 path1 = path2 = NULL; 5870 } 5871 return replaced_count; 5872 } 5873 5874 /* 5875 * ext4_clu_mapped - determine whether any block in a logical cluster has 5876 * been mapped to a physical cluster 5877 * 5878 * @inode - file containing the logical cluster 5879 * @lclu - logical cluster of interest 5880 * 5881 * Returns 1 if any block in the logical cluster is mapped, signifying 5882 * that a physical cluster has been allocated for it. Otherwise, 5883 * returns 0. Can also return negative error codes. Derived from 5884 * ext4_ext_map_blocks(). 5885 */ 5886 int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) 5887 { 5888 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5889 struct ext4_ext_path *path; 5890 int depth, mapped = 0, err = 0; 5891 struct ext4_extent *extent; 5892 ext4_lblk_t first_lblk, first_lclu, last_lclu; 5893 5894 /* search for the extent closest to the first block in the cluster */ 5895 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); 5896 if (IS_ERR(path)) { 5897 err = PTR_ERR(path); 5898 path = NULL; 5899 goto out; 5900 } 5901 5902 depth = ext_depth(inode); 5903 5904 /* 5905 * A consistent leaf must not be empty. This situation is possible, 5906 * though, _during_ tree modification, and it's why an assert can't 5907 * be put in ext4_find_extent(). 5908 */ 5909 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 5910 EXT4_ERROR_INODE(inode, 5911 "bad extent address - lblock: %lu, depth: %d, pblock: %lld", 5912 (unsigned long) EXT4_C2B(sbi, lclu), 5913 depth, path[depth].p_block); 5914 err = -EFSCORRUPTED; 5915 goto out; 5916 } 5917 5918 extent = path[depth].p_ext; 5919 5920 /* can't be mapped if the extent tree is empty */ 5921 if (extent == NULL) 5922 goto out; 5923 5924 first_lblk = le32_to_cpu(extent->ee_block); 5925 first_lclu = EXT4_B2C(sbi, first_lblk); 5926 5927 /* 5928 * Three possible outcomes at this point - found extent spanning 5929 * the target cluster, to the left of the target cluster, or to the 5930 * right of the target cluster. The first two cases are handled here. 5931 * The last case indicates the target cluster is not mapped. 5932 */ 5933 if (lclu >= first_lclu) { 5934 last_lclu = EXT4_B2C(sbi, first_lblk + 5935 ext4_ext_get_actual_len(extent) - 1); 5936 if (lclu <= last_lclu) { 5937 mapped = 1; 5938 } else { 5939 first_lblk = ext4_ext_next_allocated_block(path); 5940 first_lclu = EXT4_B2C(sbi, first_lblk); 5941 if (lclu == first_lclu) 5942 mapped = 1; 5943 } 5944 } 5945 5946 out: 5947 ext4_ext_drop_refs(path); 5948 kfree(path); 5949 5950 return err ? err : mapped; 5951 } 5952