1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 * 6 * Architecture independence: 7 * Copyright (c) 2005, Bull S.A. 8 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 9 */ 10 11 /* 12 * Extents support for EXT4 13 * 14 * TODO: 15 * - ext4*_error() should be used in some situations 16 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 17 * - smart tree reduction 18 */ 19 20 #include <linux/fs.h> 21 #include <linux/time.h> 22 #include <linux/jbd2.h> 23 #include <linux/highuid.h> 24 #include <linux/pagemap.h> 25 #include <linux/quotaops.h> 26 #include <linux/string.h> 27 #include <linux/slab.h> 28 #include <linux/uaccess.h> 29 #include <linux/fiemap.h> 30 #include <linux/iomap.h> 31 #include <linux/sched/mm.h> 32 #include "ext4_jbd2.h" 33 #include "ext4_extents.h" 34 #include "xattr.h" 35 36 #include <trace/events/ext4.h> 37 38 /* 39 * used by extent splitting. 40 */ 41 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 42 due to ENOSPC */ 43 #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ 44 #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ 45 46 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 47 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 48 49 static __le32 ext4_extent_block_csum(struct inode *inode, 50 struct ext4_extent_header *eh) 51 { 52 struct ext4_inode_info *ei = EXT4_I(inode); 53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54 __u32 csum; 55 56 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 57 EXT4_EXTENT_TAIL_OFFSET(eh)); 58 return cpu_to_le32(csum); 59 } 60 61 static int ext4_extent_block_csum_verify(struct inode *inode, 62 struct ext4_extent_header *eh) 63 { 64 struct ext4_extent_tail *et; 65 66 if (!ext4_has_metadata_csum(inode->i_sb)) 67 return 1; 68 69 et = find_ext4_extent_tail(eh); 70 if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 71 return 0; 72 return 1; 73 } 74 75 static void ext4_extent_block_csum_set(struct inode *inode, 76 struct ext4_extent_header *eh) 77 { 78 struct ext4_extent_tail *et; 79 80 if (!ext4_has_metadata_csum(inode->i_sb)) 81 return; 82 83 et = find_ext4_extent_tail(eh); 84 et->et_checksum = ext4_extent_block_csum(inode, eh); 85 } 86 87 static int ext4_split_extent_at(handle_t *handle, 88 struct inode *inode, 89 struct ext4_ext_path **ppath, 90 ext4_lblk_t split, 91 int split_flag, 92 int flags); 93 94 static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) 95 { 96 /* 97 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 98 * moment, get_block can be called only for blocks inside i_size since 99 * page cache has been already dropped and writes are blocked by 100 * i_rwsem. So we can safely drop the i_data_sem here. 101 */ 102 BUG_ON(EXT4_JOURNAL(inode) == NULL); 103 ext4_discard_preallocations(inode); 104 up_write(&EXT4_I(inode)->i_data_sem); 105 *dropped = 1; 106 return 0; 107 } 108 109 static inline void ext4_ext_path_brelse(struct ext4_ext_path *path) 110 { 111 brelse(path->p_bh); 112 path->p_bh = NULL; 113 } 114 115 static void ext4_ext_drop_refs(struct ext4_ext_path *path) 116 { 117 int depth, i; 118 119 if (!path) 120 return; 121 depth = path->p_depth; 122 for (i = 0; i <= depth; i++, path++) 123 ext4_ext_path_brelse(path); 124 } 125 126 void ext4_free_ext_path(struct ext4_ext_path *path) 127 { 128 ext4_ext_drop_refs(path); 129 kfree(path); 130 } 131 132 /* 133 * Make sure 'handle' has at least 'check_cred' credits. If not, restart 134 * transaction with 'restart_cred' credits. The function drops i_data_sem 135 * when restarting transaction and gets it after transaction is restarted. 136 * 137 * The function returns 0 on success, 1 if transaction had to be restarted, 138 * and < 0 in case of fatal error. 139 */ 140 int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode, 141 int check_cred, int restart_cred, 142 int revoke_cred) 143 { 144 int ret; 145 int dropped = 0; 146 147 ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred, 148 revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped)); 149 if (dropped) 150 down_write(&EXT4_I(inode)->i_data_sem); 151 return ret; 152 } 153 154 /* 155 * could return: 156 * - EROFS 157 * - ENOMEM 158 */ 159 static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 160 struct ext4_ext_path *path) 161 { 162 int err = 0; 163 164 if (path->p_bh) { 165 /* path points to block */ 166 BUFFER_TRACE(path->p_bh, "get_write_access"); 167 err = ext4_journal_get_write_access(handle, inode->i_sb, 168 path->p_bh, EXT4_JTR_NONE); 169 /* 170 * The extent buffer's verified bit will be set again in 171 * __ext4_ext_dirty(). We could leave an inconsistent 172 * buffer if the extents updating procudure break off du 173 * to some error happens, force to check it again. 174 */ 175 if (!err) 176 clear_buffer_verified(path->p_bh); 177 } 178 /* path points to leaf/index in inode body */ 179 /* we use in-core data, no need to protect them */ 180 return err; 181 } 182 183 /* 184 * could return: 185 * - EROFS 186 * - ENOMEM 187 * - EIO 188 */ 189 static int __ext4_ext_dirty(const char *where, unsigned int line, 190 handle_t *handle, struct inode *inode, 191 struct ext4_ext_path *path) 192 { 193 int err; 194 195 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 196 if (path->p_bh) { 197 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 198 /* path points to block */ 199 err = __ext4_handle_dirty_metadata(where, line, handle, 200 inode, path->p_bh); 201 /* Extents updating done, re-set verified flag */ 202 if (!err) 203 set_buffer_verified(path->p_bh); 204 } else { 205 /* path points to leaf/index in inode body */ 206 err = ext4_mark_inode_dirty(handle, inode); 207 } 208 return err; 209 } 210 211 #define ext4_ext_dirty(handle, inode, path) \ 212 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) 213 214 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 215 struct ext4_ext_path *path, 216 ext4_lblk_t block) 217 { 218 if (path) { 219 int depth = path->p_depth; 220 struct ext4_extent *ex; 221 222 /* 223 * Try to predict block placement assuming that we are 224 * filling in a file which will eventually be 225 * non-sparse --- i.e., in the case of libbfd writing 226 * an ELF object sections out-of-order but in a way 227 * the eventually results in a contiguous object or 228 * executable file, or some database extending a table 229 * space file. However, this is actually somewhat 230 * non-ideal if we are writing a sparse file such as 231 * qemu or KVM writing a raw image file that is going 232 * to stay fairly sparse, since it will end up 233 * fragmenting the file system's free space. Maybe we 234 * should have some hueristics or some way to allow 235 * userspace to pass a hint to file system, 236 * especially if the latter case turns out to be 237 * common. 238 */ 239 ex = path[depth].p_ext; 240 if (ex) { 241 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 242 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 243 244 if (block > ext_block) 245 return ext_pblk + (block - ext_block); 246 else 247 return ext_pblk - (ext_block - block); 248 } 249 250 /* it looks like index is empty; 251 * try to find starting block from index itself */ 252 if (path[depth].p_bh) 253 return path[depth].p_bh->b_blocknr; 254 } 255 256 /* OK. use inode's group */ 257 return ext4_inode_to_goal_block(inode); 258 } 259 260 /* 261 * Allocation for a meta data block 262 */ 263 static ext4_fsblk_t 264 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 265 struct ext4_ext_path *path, 266 struct ext4_extent *ex, int *err, unsigned int flags) 267 { 268 ext4_fsblk_t goal, newblock; 269 270 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 271 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 272 NULL, err); 273 return newblock; 274 } 275 276 static inline int ext4_ext_space_block(struct inode *inode, int check) 277 { 278 int size; 279 280 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 281 / sizeof(struct ext4_extent); 282 #ifdef AGGRESSIVE_TEST 283 if (!check && size > 6) 284 size = 6; 285 #endif 286 return size; 287 } 288 289 static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 290 { 291 int size; 292 293 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 294 / sizeof(struct ext4_extent_idx); 295 #ifdef AGGRESSIVE_TEST 296 if (!check && size > 5) 297 size = 5; 298 #endif 299 return size; 300 } 301 302 static inline int ext4_ext_space_root(struct inode *inode, int check) 303 { 304 int size; 305 306 size = sizeof(EXT4_I(inode)->i_data); 307 size -= sizeof(struct ext4_extent_header); 308 size /= sizeof(struct ext4_extent); 309 #ifdef AGGRESSIVE_TEST 310 if (!check && size > 3) 311 size = 3; 312 #endif 313 return size; 314 } 315 316 static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 317 { 318 int size; 319 320 size = sizeof(EXT4_I(inode)->i_data); 321 size -= sizeof(struct ext4_extent_header); 322 size /= sizeof(struct ext4_extent_idx); 323 #ifdef AGGRESSIVE_TEST 324 if (!check && size > 4) 325 size = 4; 326 #endif 327 return size; 328 } 329 330 static inline int 331 ext4_force_split_extent_at(handle_t *handle, struct inode *inode, 332 struct ext4_ext_path **ppath, ext4_lblk_t lblk, 333 int nofail) 334 { 335 struct ext4_ext_path *path = *ppath; 336 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); 337 int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO; 338 339 if (nofail) 340 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL; 341 342 return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? 343 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, 344 flags); 345 } 346 347 static int 348 ext4_ext_max_entries(struct inode *inode, int depth) 349 { 350 int max; 351 352 if (depth == ext_depth(inode)) { 353 if (depth == 0) 354 max = ext4_ext_space_root(inode, 1); 355 else 356 max = ext4_ext_space_root_idx(inode, 1); 357 } else { 358 if (depth == 0) 359 max = ext4_ext_space_block(inode, 1); 360 else 361 max = ext4_ext_space_block_idx(inode, 1); 362 } 363 364 return max; 365 } 366 367 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 368 { 369 ext4_fsblk_t block = ext4_ext_pblock(ext); 370 int len = ext4_ext_get_actual_len(ext); 371 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 372 373 /* 374 * We allow neither: 375 * - zero length 376 * - overflow/wrap-around 377 */ 378 if (lblock + len <= lblock) 379 return 0; 380 return ext4_inode_block_valid(inode, block, len); 381 } 382 383 static int ext4_valid_extent_idx(struct inode *inode, 384 struct ext4_extent_idx *ext_idx) 385 { 386 ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 387 388 return ext4_inode_block_valid(inode, block, 1); 389 } 390 391 static int ext4_valid_extent_entries(struct inode *inode, 392 struct ext4_extent_header *eh, 393 ext4_lblk_t lblk, ext4_fsblk_t *pblk, 394 int depth) 395 { 396 unsigned short entries; 397 ext4_lblk_t lblock = 0; 398 ext4_lblk_t cur = 0; 399 400 if (eh->eh_entries == 0) 401 return 1; 402 403 entries = le16_to_cpu(eh->eh_entries); 404 405 if (depth == 0) { 406 /* leaf entries */ 407 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 408 409 /* 410 * The logical block in the first entry should equal to 411 * the number in the index block. 412 */ 413 if (depth != ext_depth(inode) && 414 lblk != le32_to_cpu(ext->ee_block)) 415 return 0; 416 while (entries) { 417 if (!ext4_valid_extent(inode, ext)) 418 return 0; 419 420 /* Check for overlapping extents */ 421 lblock = le32_to_cpu(ext->ee_block); 422 if (lblock < cur) { 423 *pblk = ext4_ext_pblock(ext); 424 return 0; 425 } 426 cur = lblock + ext4_ext_get_actual_len(ext); 427 ext++; 428 entries--; 429 } 430 } else { 431 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 432 433 /* 434 * The logical block in the first entry should equal to 435 * the number in the parent index block. 436 */ 437 if (depth != ext_depth(inode) && 438 lblk != le32_to_cpu(ext_idx->ei_block)) 439 return 0; 440 while (entries) { 441 if (!ext4_valid_extent_idx(inode, ext_idx)) 442 return 0; 443 444 /* Check for overlapping index extents */ 445 lblock = le32_to_cpu(ext_idx->ei_block); 446 if (lblock < cur) { 447 *pblk = ext4_idx_pblock(ext_idx); 448 return 0; 449 } 450 ext_idx++; 451 entries--; 452 cur = lblock + 1; 453 } 454 } 455 return 1; 456 } 457 458 static int __ext4_ext_check(const char *function, unsigned int line, 459 struct inode *inode, struct ext4_extent_header *eh, 460 int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk) 461 { 462 const char *error_msg; 463 int max = 0, err = -EFSCORRUPTED; 464 465 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 466 error_msg = "invalid magic"; 467 goto corrupted; 468 } 469 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 470 error_msg = "unexpected eh_depth"; 471 goto corrupted; 472 } 473 if (unlikely(eh->eh_max == 0)) { 474 error_msg = "invalid eh_max"; 475 goto corrupted; 476 } 477 max = ext4_ext_max_entries(inode, depth); 478 if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 479 error_msg = "too large eh_max"; 480 goto corrupted; 481 } 482 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 483 error_msg = "invalid eh_entries"; 484 goto corrupted; 485 } 486 if (unlikely((eh->eh_entries == 0) && (depth > 0))) { 487 error_msg = "eh_entries is 0 but eh_depth is > 0"; 488 goto corrupted; 489 } 490 if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) { 491 error_msg = "invalid extent entries"; 492 goto corrupted; 493 } 494 if (unlikely(depth > 32)) { 495 error_msg = "too large eh_depth"; 496 goto corrupted; 497 } 498 /* Verify checksum on non-root extent tree nodes */ 499 if (ext_depth(inode) != depth && 500 !ext4_extent_block_csum_verify(inode, eh)) { 501 error_msg = "extent tree corrupted"; 502 err = -EFSBADCRC; 503 goto corrupted; 504 } 505 return 0; 506 507 corrupted: 508 ext4_error_inode_err(inode, function, line, 0, -err, 509 "pblk %llu bad header/extent: %s - magic %x, " 510 "entries %u, max %u(%u), depth %u(%u)", 511 (unsigned long long) pblk, error_msg, 512 le16_to_cpu(eh->eh_magic), 513 le16_to_cpu(eh->eh_entries), 514 le16_to_cpu(eh->eh_max), 515 max, le16_to_cpu(eh->eh_depth), depth); 516 return err; 517 } 518 519 #define ext4_ext_check(inode, eh, depth, pblk) \ 520 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0) 521 522 int ext4_ext_check_inode(struct inode *inode) 523 { 524 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); 525 } 526 527 static void ext4_cache_extents(struct inode *inode, 528 struct ext4_extent_header *eh) 529 { 530 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); 531 ext4_lblk_t prev = 0; 532 int i; 533 534 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { 535 unsigned int status = EXTENT_STATUS_WRITTEN; 536 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); 537 int len = ext4_ext_get_actual_len(ex); 538 539 if (prev && (prev != lblk)) 540 ext4_es_cache_extent(inode, prev, lblk - prev, ~0, 541 EXTENT_STATUS_HOLE); 542 543 if (ext4_ext_is_unwritten(ex)) 544 status = EXTENT_STATUS_UNWRITTEN; 545 ext4_es_cache_extent(inode, lblk, len, 546 ext4_ext_pblock(ex), status); 547 prev = lblk + len; 548 } 549 } 550 551 static struct buffer_head * 552 __read_extent_tree_block(const char *function, unsigned int line, 553 struct inode *inode, struct ext4_extent_idx *idx, 554 int depth, int flags) 555 { 556 struct buffer_head *bh; 557 int err; 558 gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS; 559 ext4_fsblk_t pblk; 560 561 if (flags & EXT4_EX_NOFAIL) 562 gfp_flags |= __GFP_NOFAIL; 563 564 pblk = ext4_idx_pblock(idx); 565 bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags); 566 if (unlikely(!bh)) 567 return ERR_PTR(-ENOMEM); 568 569 if (!bh_uptodate_or_lock(bh)) { 570 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); 571 err = ext4_read_bh(bh, 0, NULL); 572 if (err < 0) 573 goto errout; 574 } 575 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) 576 return bh; 577 err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh), 578 depth, pblk, le32_to_cpu(idx->ei_block)); 579 if (err) 580 goto errout; 581 set_buffer_verified(bh); 582 /* 583 * If this is a leaf block, cache all of its entries 584 */ 585 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { 586 struct ext4_extent_header *eh = ext_block_hdr(bh); 587 ext4_cache_extents(inode, eh); 588 } 589 return bh; 590 errout: 591 put_bh(bh); 592 return ERR_PTR(err); 593 594 } 595 596 #define read_extent_tree_block(inode, idx, depth, flags) \ 597 __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \ 598 (depth), (flags)) 599 600 /* 601 * This function is called to cache a file's extent information in the 602 * extent status tree 603 */ 604 int ext4_ext_precache(struct inode *inode) 605 { 606 struct ext4_inode_info *ei = EXT4_I(inode); 607 struct ext4_ext_path *path = NULL; 608 struct buffer_head *bh; 609 int i = 0, depth, ret = 0; 610 611 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 612 return 0; /* not an extent-mapped inode */ 613 614 down_read(&ei->i_data_sem); 615 depth = ext_depth(inode); 616 617 /* Don't cache anything if there are no external extent blocks */ 618 if (!depth) { 619 up_read(&ei->i_data_sem); 620 return ret; 621 } 622 623 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 624 GFP_NOFS); 625 if (path == NULL) { 626 up_read(&ei->i_data_sem); 627 return -ENOMEM; 628 } 629 630 path[0].p_hdr = ext_inode_hdr(inode); 631 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); 632 if (ret) 633 goto out; 634 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); 635 while (i >= 0) { 636 /* 637 * If this is a leaf block or we've reached the end of 638 * the index block, go up 639 */ 640 if ((i == depth) || 641 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { 642 ext4_ext_path_brelse(path + i); 643 i--; 644 continue; 645 } 646 bh = read_extent_tree_block(inode, path[i].p_idx++, 647 depth - i - 1, 648 EXT4_EX_FORCE_CACHE); 649 if (IS_ERR(bh)) { 650 ret = PTR_ERR(bh); 651 break; 652 } 653 i++; 654 path[i].p_bh = bh; 655 path[i].p_hdr = ext_block_hdr(bh); 656 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); 657 } 658 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 659 out: 660 up_read(&ei->i_data_sem); 661 ext4_free_ext_path(path); 662 return ret; 663 } 664 665 #ifdef EXT_DEBUG 666 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 667 { 668 int k, l = path->p_depth; 669 670 ext_debug(inode, "path:"); 671 for (k = 0; k <= l; k++, path++) { 672 if (path->p_idx) { 673 ext_debug(inode, " %d->%llu", 674 le32_to_cpu(path->p_idx->ei_block), 675 ext4_idx_pblock(path->p_idx)); 676 } else if (path->p_ext) { 677 ext_debug(inode, " %d:[%d]%d:%llu ", 678 le32_to_cpu(path->p_ext->ee_block), 679 ext4_ext_is_unwritten(path->p_ext), 680 ext4_ext_get_actual_len(path->p_ext), 681 ext4_ext_pblock(path->p_ext)); 682 } else 683 ext_debug(inode, " []"); 684 } 685 ext_debug(inode, "\n"); 686 } 687 688 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 689 { 690 int depth = ext_depth(inode); 691 struct ext4_extent_header *eh; 692 struct ext4_extent *ex; 693 int i; 694 695 if (!path) 696 return; 697 698 eh = path[depth].p_hdr; 699 ex = EXT_FIRST_EXTENT(eh); 700 701 ext_debug(inode, "Displaying leaf extents\n"); 702 703 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 704 ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 705 ext4_ext_is_unwritten(ex), 706 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 707 } 708 ext_debug(inode, "\n"); 709 } 710 711 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 712 ext4_fsblk_t newblock, int level) 713 { 714 int depth = ext_depth(inode); 715 struct ext4_extent *ex; 716 717 if (depth != level) { 718 struct ext4_extent_idx *idx; 719 idx = path[level].p_idx; 720 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 721 ext_debug(inode, "%d: move %d:%llu in new index %llu\n", 722 level, le32_to_cpu(idx->ei_block), 723 ext4_idx_pblock(idx), newblock); 724 idx++; 725 } 726 727 return; 728 } 729 730 ex = path[depth].p_ext; 731 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 732 ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n", 733 le32_to_cpu(ex->ee_block), 734 ext4_ext_pblock(ex), 735 ext4_ext_is_unwritten(ex), 736 ext4_ext_get_actual_len(ex), 737 newblock); 738 ex++; 739 } 740 } 741 742 #else 743 #define ext4_ext_show_path(inode, path) 744 #define ext4_ext_show_leaf(inode, path) 745 #define ext4_ext_show_move(inode, path, newblock, level) 746 #endif 747 748 /* 749 * ext4_ext_binsearch_idx: 750 * binary search for the closest index of the given block 751 * the header must be checked before calling this 752 */ 753 static void 754 ext4_ext_binsearch_idx(struct inode *inode, 755 struct ext4_ext_path *path, ext4_lblk_t block) 756 { 757 struct ext4_extent_header *eh = path->p_hdr; 758 struct ext4_extent_idx *r, *l, *m; 759 760 761 ext_debug(inode, "binsearch for %u(idx): ", block); 762 763 l = EXT_FIRST_INDEX(eh) + 1; 764 r = EXT_LAST_INDEX(eh); 765 while (l <= r) { 766 m = l + (r - l) / 2; 767 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, 768 le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block), 769 r, le32_to_cpu(r->ei_block)); 770 771 if (block < le32_to_cpu(m->ei_block)) 772 r = m - 1; 773 else 774 l = m + 1; 775 } 776 777 path->p_idx = l - 1; 778 ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 779 ext4_idx_pblock(path->p_idx)); 780 781 #ifdef CHECK_BINSEARCH 782 { 783 struct ext4_extent_idx *chix, *ix; 784 int k; 785 786 chix = ix = EXT_FIRST_INDEX(eh); 787 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 788 if (k != 0 && le32_to_cpu(ix->ei_block) <= 789 le32_to_cpu(ix[-1].ei_block)) { 790 printk(KERN_DEBUG "k=%d, ix=0x%p, " 791 "first=0x%p\n", k, 792 ix, EXT_FIRST_INDEX(eh)); 793 printk(KERN_DEBUG "%u <= %u\n", 794 le32_to_cpu(ix->ei_block), 795 le32_to_cpu(ix[-1].ei_block)); 796 } 797 BUG_ON(k && le32_to_cpu(ix->ei_block) 798 <= le32_to_cpu(ix[-1].ei_block)); 799 if (block < le32_to_cpu(ix->ei_block)) 800 break; 801 chix = ix; 802 } 803 BUG_ON(chix != path->p_idx); 804 } 805 #endif 806 807 } 808 809 /* 810 * ext4_ext_binsearch: 811 * binary search for closest extent of the given block 812 * the header must be checked before calling this 813 */ 814 static void 815 ext4_ext_binsearch(struct inode *inode, 816 struct ext4_ext_path *path, ext4_lblk_t block) 817 { 818 struct ext4_extent_header *eh = path->p_hdr; 819 struct ext4_extent *r, *l, *m; 820 821 if (eh->eh_entries == 0) { 822 /* 823 * this leaf is empty: 824 * we get such a leaf in split/add case 825 */ 826 return; 827 } 828 829 ext_debug(inode, "binsearch for %u: ", block); 830 831 l = EXT_FIRST_EXTENT(eh) + 1; 832 r = EXT_LAST_EXTENT(eh); 833 834 while (l <= r) { 835 m = l + (r - l) / 2; 836 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, 837 le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block), 838 r, le32_to_cpu(r->ee_block)); 839 840 if (block < le32_to_cpu(m->ee_block)) 841 r = m - 1; 842 else 843 l = m + 1; 844 } 845 846 path->p_ext = l - 1; 847 ext_debug(inode, " -> %d:%llu:[%d]%d ", 848 le32_to_cpu(path->p_ext->ee_block), 849 ext4_ext_pblock(path->p_ext), 850 ext4_ext_is_unwritten(path->p_ext), 851 ext4_ext_get_actual_len(path->p_ext)); 852 853 #ifdef CHECK_BINSEARCH 854 { 855 struct ext4_extent *chex, *ex; 856 int k; 857 858 chex = ex = EXT_FIRST_EXTENT(eh); 859 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 860 BUG_ON(k && le32_to_cpu(ex->ee_block) 861 <= le32_to_cpu(ex[-1].ee_block)); 862 if (block < le32_to_cpu(ex->ee_block)) 863 break; 864 chex = ex; 865 } 866 BUG_ON(chex != path->p_ext); 867 } 868 #endif 869 870 } 871 872 void ext4_ext_tree_init(handle_t *handle, struct inode *inode) 873 { 874 struct ext4_extent_header *eh; 875 876 eh = ext_inode_hdr(inode); 877 eh->eh_depth = 0; 878 eh->eh_entries = 0; 879 eh->eh_magic = EXT4_EXT_MAGIC; 880 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 881 eh->eh_generation = 0; 882 ext4_mark_inode_dirty(handle, inode); 883 } 884 885 struct ext4_ext_path * 886 ext4_find_extent(struct inode *inode, ext4_lblk_t block, 887 struct ext4_ext_path **orig_path, int flags) 888 { 889 struct ext4_extent_header *eh; 890 struct buffer_head *bh; 891 struct ext4_ext_path *path = orig_path ? *orig_path : NULL; 892 short int depth, i, ppos = 0; 893 int ret; 894 gfp_t gfp_flags = GFP_NOFS; 895 896 if (flags & EXT4_EX_NOFAIL) 897 gfp_flags |= __GFP_NOFAIL; 898 899 eh = ext_inode_hdr(inode); 900 depth = ext_depth(inode); 901 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { 902 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", 903 depth); 904 ret = -EFSCORRUPTED; 905 goto err; 906 } 907 908 if (path) { 909 ext4_ext_drop_refs(path); 910 if (depth > path[0].p_maxdepth) { 911 kfree(path); 912 *orig_path = path = NULL; 913 } 914 } 915 if (!path) { 916 /* account possible depth increase */ 917 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path), 918 gfp_flags); 919 if (unlikely(!path)) 920 return ERR_PTR(-ENOMEM); 921 path[0].p_maxdepth = depth + 1; 922 } 923 path[0].p_hdr = eh; 924 path[0].p_bh = NULL; 925 926 i = depth; 927 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) 928 ext4_cache_extents(inode, eh); 929 /* walk through the tree */ 930 while (i) { 931 ext_debug(inode, "depth %d: num %d, max %d\n", 932 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 933 934 ext4_ext_binsearch_idx(inode, path + ppos, block); 935 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 936 path[ppos].p_depth = i; 937 path[ppos].p_ext = NULL; 938 939 bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags); 940 if (IS_ERR(bh)) { 941 ret = PTR_ERR(bh); 942 goto err; 943 } 944 945 eh = ext_block_hdr(bh); 946 ppos++; 947 path[ppos].p_bh = bh; 948 path[ppos].p_hdr = eh; 949 } 950 951 path[ppos].p_depth = i; 952 path[ppos].p_ext = NULL; 953 path[ppos].p_idx = NULL; 954 955 /* find extent */ 956 ext4_ext_binsearch(inode, path + ppos, block); 957 /* if not an empty leaf */ 958 if (path[ppos].p_ext) 959 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 960 961 ext4_ext_show_path(inode, path); 962 963 if (orig_path) 964 *orig_path = path; 965 return path; 966 967 err: 968 ext4_free_ext_path(path); 969 if (orig_path) 970 *orig_path = NULL; 971 return ERR_PTR(ret); 972 } 973 974 /* 975 * ext4_ext_insert_index: 976 * insert new index [@logical;@ptr] into the block at @curp; 977 * check where to insert: before @curp or after @curp 978 */ 979 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 980 struct ext4_ext_path *curp, 981 int logical, ext4_fsblk_t ptr) 982 { 983 struct ext4_extent_idx *ix; 984 int len, err; 985 986 err = ext4_ext_get_access(handle, inode, curp); 987 if (err) 988 return err; 989 990 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 991 EXT4_ERROR_INODE(inode, 992 "logical %d == ei_block %d!", 993 logical, le32_to_cpu(curp->p_idx->ei_block)); 994 return -EFSCORRUPTED; 995 } 996 997 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 998 >= le16_to_cpu(curp->p_hdr->eh_max))) { 999 EXT4_ERROR_INODE(inode, 1000 "eh_entries %d >= eh_max %d!", 1001 le16_to_cpu(curp->p_hdr->eh_entries), 1002 le16_to_cpu(curp->p_hdr->eh_max)); 1003 return -EFSCORRUPTED; 1004 } 1005 1006 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 1007 /* insert after */ 1008 ext_debug(inode, "insert new index %d after: %llu\n", 1009 logical, ptr); 1010 ix = curp->p_idx + 1; 1011 } else { 1012 /* insert before */ 1013 ext_debug(inode, "insert new index %d before: %llu\n", 1014 logical, ptr); 1015 ix = curp->p_idx; 1016 } 1017 1018 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 1019 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 1020 return -EFSCORRUPTED; 1021 } 1022 1023 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 1024 BUG_ON(len < 0); 1025 if (len > 0) { 1026 ext_debug(inode, "insert new index %d: " 1027 "move %d indices from 0x%p to 0x%p\n", 1028 logical, len, ix, ix + 1); 1029 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 1030 } 1031 1032 ix->ei_block = cpu_to_le32(logical); 1033 ext4_idx_store_pblock(ix, ptr); 1034 le16_add_cpu(&curp->p_hdr->eh_entries, 1); 1035 1036 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 1037 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 1038 return -EFSCORRUPTED; 1039 } 1040 1041 err = ext4_ext_dirty(handle, inode, curp); 1042 ext4_std_error(inode->i_sb, err); 1043 1044 return err; 1045 } 1046 1047 /* 1048 * ext4_ext_split: 1049 * inserts new subtree into the path, using free index entry 1050 * at depth @at: 1051 * - allocates all needed blocks (new leaf and all intermediate index blocks) 1052 * - makes decision where to split 1053 * - moves remaining extents and index entries (right to the split point) 1054 * into the newly allocated blocks 1055 * - initializes subtree 1056 */ 1057 static int ext4_ext_split(handle_t *handle, struct inode *inode, 1058 unsigned int flags, 1059 struct ext4_ext_path *path, 1060 struct ext4_extent *newext, int at) 1061 { 1062 struct buffer_head *bh = NULL; 1063 int depth = ext_depth(inode); 1064 struct ext4_extent_header *neh; 1065 struct ext4_extent_idx *fidx; 1066 int i = at, k, m, a; 1067 ext4_fsblk_t newblock, oldblock; 1068 __le32 border; 1069 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 1070 gfp_t gfp_flags = GFP_NOFS; 1071 int err = 0; 1072 size_t ext_size = 0; 1073 1074 if (flags & EXT4_EX_NOFAIL) 1075 gfp_flags |= __GFP_NOFAIL; 1076 1077 /* make decision: where to split? */ 1078 /* FIXME: now decision is simplest: at current extent */ 1079 1080 /* if current leaf will be split, then we should use 1081 * border from split point */ 1082 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 1083 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 1084 return -EFSCORRUPTED; 1085 } 1086 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 1087 border = path[depth].p_ext[1].ee_block; 1088 ext_debug(inode, "leaf will be split." 1089 " next leaf starts at %d\n", 1090 le32_to_cpu(border)); 1091 } else { 1092 border = newext->ee_block; 1093 ext_debug(inode, "leaf will be added." 1094 " next leaf starts at %d\n", 1095 le32_to_cpu(border)); 1096 } 1097 1098 /* 1099 * If error occurs, then we break processing 1100 * and mark filesystem read-only. index won't 1101 * be inserted and tree will be in consistent 1102 * state. Next mount will repair buffers too. 1103 */ 1104 1105 /* 1106 * Get array to track all allocated blocks. 1107 * We need this to handle errors and free blocks 1108 * upon them. 1109 */ 1110 ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags); 1111 if (!ablocks) 1112 return -ENOMEM; 1113 1114 /* allocate all needed blocks */ 1115 ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at); 1116 for (a = 0; a < depth - at; a++) { 1117 newblock = ext4_ext_new_meta_block(handle, inode, path, 1118 newext, &err, flags); 1119 if (newblock == 0) 1120 goto cleanup; 1121 ablocks[a] = newblock; 1122 } 1123 1124 /* initialize new leaf */ 1125 newblock = ablocks[--a]; 1126 if (unlikely(newblock == 0)) { 1127 EXT4_ERROR_INODE(inode, "newblock == 0!"); 1128 err = -EFSCORRUPTED; 1129 goto cleanup; 1130 } 1131 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1132 if (unlikely(!bh)) { 1133 err = -ENOMEM; 1134 goto cleanup; 1135 } 1136 lock_buffer(bh); 1137 1138 err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1139 EXT4_JTR_NONE); 1140 if (err) 1141 goto cleanup; 1142 1143 neh = ext_block_hdr(bh); 1144 neh->eh_entries = 0; 1145 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1146 neh->eh_magic = EXT4_EXT_MAGIC; 1147 neh->eh_depth = 0; 1148 neh->eh_generation = 0; 1149 1150 /* move remainder of path[depth] to the new leaf */ 1151 if (unlikely(path[depth].p_hdr->eh_entries != 1152 path[depth].p_hdr->eh_max)) { 1153 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 1154 path[depth].p_hdr->eh_entries, 1155 path[depth].p_hdr->eh_max); 1156 err = -EFSCORRUPTED; 1157 goto cleanup; 1158 } 1159 /* start copy from next extent */ 1160 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 1161 ext4_ext_show_move(inode, path, newblock, depth); 1162 if (m) { 1163 struct ext4_extent *ex; 1164 ex = EXT_FIRST_EXTENT(neh); 1165 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 1166 le16_add_cpu(&neh->eh_entries, m); 1167 } 1168 1169 /* zero out unused area in the extent block */ 1170 ext_size = sizeof(struct ext4_extent_header) + 1171 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); 1172 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1173 ext4_extent_block_csum_set(inode, neh); 1174 set_buffer_uptodate(bh); 1175 unlock_buffer(bh); 1176 1177 err = ext4_handle_dirty_metadata(handle, inode, bh); 1178 if (err) 1179 goto cleanup; 1180 brelse(bh); 1181 bh = NULL; 1182 1183 /* correct old leaf */ 1184 if (m) { 1185 err = ext4_ext_get_access(handle, inode, path + depth); 1186 if (err) 1187 goto cleanup; 1188 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 1189 err = ext4_ext_dirty(handle, inode, path + depth); 1190 if (err) 1191 goto cleanup; 1192 1193 } 1194 1195 /* create intermediate indexes */ 1196 k = depth - at - 1; 1197 if (unlikely(k < 0)) { 1198 EXT4_ERROR_INODE(inode, "k %d < 0!", k); 1199 err = -EFSCORRUPTED; 1200 goto cleanup; 1201 } 1202 if (k) 1203 ext_debug(inode, "create %d intermediate indices\n", k); 1204 /* insert new index into current index block */ 1205 /* current depth stored in i var */ 1206 i = depth - 1; 1207 while (k--) { 1208 oldblock = newblock; 1209 newblock = ablocks[--a]; 1210 bh = sb_getblk(inode->i_sb, newblock); 1211 if (unlikely(!bh)) { 1212 err = -ENOMEM; 1213 goto cleanup; 1214 } 1215 lock_buffer(bh); 1216 1217 err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1218 EXT4_JTR_NONE); 1219 if (err) 1220 goto cleanup; 1221 1222 neh = ext_block_hdr(bh); 1223 neh->eh_entries = cpu_to_le16(1); 1224 neh->eh_magic = EXT4_EXT_MAGIC; 1225 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1226 neh->eh_depth = cpu_to_le16(depth - i); 1227 neh->eh_generation = 0; 1228 fidx = EXT_FIRST_INDEX(neh); 1229 fidx->ei_block = border; 1230 ext4_idx_store_pblock(fidx, oldblock); 1231 1232 ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n", 1233 i, newblock, le32_to_cpu(border), oldblock); 1234 1235 /* move remainder of path[i] to the new index block */ 1236 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1237 EXT_LAST_INDEX(path[i].p_hdr))) { 1238 EXT4_ERROR_INODE(inode, 1239 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1240 le32_to_cpu(path[i].p_ext->ee_block)); 1241 err = -EFSCORRUPTED; 1242 goto cleanup; 1243 } 1244 /* start copy indexes */ 1245 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 1246 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx, 1247 EXT_MAX_INDEX(path[i].p_hdr)); 1248 ext4_ext_show_move(inode, path, newblock, i); 1249 if (m) { 1250 memmove(++fidx, path[i].p_idx, 1251 sizeof(struct ext4_extent_idx) * m); 1252 le16_add_cpu(&neh->eh_entries, m); 1253 } 1254 /* zero out unused area in the extent block */ 1255 ext_size = sizeof(struct ext4_extent_header) + 1256 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); 1257 memset(bh->b_data + ext_size, 0, 1258 inode->i_sb->s_blocksize - ext_size); 1259 ext4_extent_block_csum_set(inode, neh); 1260 set_buffer_uptodate(bh); 1261 unlock_buffer(bh); 1262 1263 err = ext4_handle_dirty_metadata(handle, inode, bh); 1264 if (err) 1265 goto cleanup; 1266 brelse(bh); 1267 bh = NULL; 1268 1269 /* correct old index */ 1270 if (m) { 1271 err = ext4_ext_get_access(handle, inode, path + i); 1272 if (err) 1273 goto cleanup; 1274 le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1275 err = ext4_ext_dirty(handle, inode, path + i); 1276 if (err) 1277 goto cleanup; 1278 } 1279 1280 i--; 1281 } 1282 1283 /* insert new index */ 1284 err = ext4_ext_insert_index(handle, inode, path + at, 1285 le32_to_cpu(border), newblock); 1286 1287 cleanup: 1288 if (bh) { 1289 if (buffer_locked(bh)) 1290 unlock_buffer(bh); 1291 brelse(bh); 1292 } 1293 1294 if (err) { 1295 /* free all allocated blocks in error case */ 1296 for (i = 0; i < depth; i++) { 1297 if (!ablocks[i]) 1298 continue; 1299 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1300 EXT4_FREE_BLOCKS_METADATA); 1301 } 1302 } 1303 kfree(ablocks); 1304 1305 return err; 1306 } 1307 1308 /* 1309 * ext4_ext_grow_indepth: 1310 * implements tree growing procedure: 1311 * - allocates new block 1312 * - moves top-level data (index block or leaf) into the new block 1313 * - initializes new top-level, creating index that points to the 1314 * just created block 1315 */ 1316 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1317 unsigned int flags) 1318 { 1319 struct ext4_extent_header *neh; 1320 struct buffer_head *bh; 1321 ext4_fsblk_t newblock, goal = 0; 1322 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 1323 int err = 0; 1324 size_t ext_size = 0; 1325 1326 /* Try to prepend new index to old one */ 1327 if (ext_depth(inode)) 1328 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); 1329 if (goal > le32_to_cpu(es->s_first_data_block)) { 1330 flags |= EXT4_MB_HINT_TRY_GOAL; 1331 goal--; 1332 } else 1333 goal = ext4_inode_to_goal_block(inode); 1334 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 1335 NULL, &err); 1336 if (newblock == 0) 1337 return err; 1338 1339 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1340 if (unlikely(!bh)) 1341 return -ENOMEM; 1342 lock_buffer(bh); 1343 1344 err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1345 EXT4_JTR_NONE); 1346 if (err) { 1347 unlock_buffer(bh); 1348 goto out; 1349 } 1350 1351 ext_size = sizeof(EXT4_I(inode)->i_data); 1352 /* move top-level index/leaf into new block */ 1353 memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); 1354 /* zero out unused area in the extent block */ 1355 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1356 1357 /* set size of new block */ 1358 neh = ext_block_hdr(bh); 1359 /* old root could have indexes or leaves 1360 * so calculate e_max right way */ 1361 if (ext_depth(inode)) 1362 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1363 else 1364 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1365 neh->eh_magic = EXT4_EXT_MAGIC; 1366 ext4_extent_block_csum_set(inode, neh); 1367 set_buffer_uptodate(bh); 1368 set_buffer_verified(bh); 1369 unlock_buffer(bh); 1370 1371 err = ext4_handle_dirty_metadata(handle, inode, bh); 1372 if (err) 1373 goto out; 1374 1375 /* Update top-level index: num,max,pointer */ 1376 neh = ext_inode_hdr(inode); 1377 neh->eh_entries = cpu_to_le16(1); 1378 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 1379 if (neh->eh_depth == 0) { 1380 /* Root extent block becomes index block */ 1381 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 1382 EXT_FIRST_INDEX(neh)->ei_block = 1383 EXT_FIRST_EXTENT(neh)->ee_block; 1384 } 1385 ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n", 1386 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 1387 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1388 ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1389 1390 le16_add_cpu(&neh->eh_depth, 1); 1391 err = ext4_mark_inode_dirty(handle, inode); 1392 out: 1393 brelse(bh); 1394 1395 return err; 1396 } 1397 1398 /* 1399 * ext4_ext_create_new_leaf: 1400 * finds empty index and adds new leaf. 1401 * if no free index is found, then it requests in-depth growing. 1402 */ 1403 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1404 unsigned int mb_flags, 1405 unsigned int gb_flags, 1406 struct ext4_ext_path **ppath, 1407 struct ext4_extent *newext) 1408 { 1409 struct ext4_ext_path *path = *ppath; 1410 struct ext4_ext_path *curp; 1411 int depth, i, err = 0; 1412 1413 repeat: 1414 i = depth = ext_depth(inode); 1415 1416 /* walk up to the tree and look for free index entry */ 1417 curp = path + depth; 1418 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1419 i--; 1420 curp--; 1421 } 1422 1423 /* we use already allocated block for index block, 1424 * so subsequent data blocks should be contiguous */ 1425 if (EXT_HAS_FREE_INDEX(curp)) { 1426 /* if we found index with free entry, then use that 1427 * entry: create all needed subtree and add new leaf */ 1428 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); 1429 if (err) 1430 goto out; 1431 1432 /* refill path */ 1433 path = ext4_find_extent(inode, 1434 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1435 ppath, gb_flags); 1436 if (IS_ERR(path)) 1437 err = PTR_ERR(path); 1438 } else { 1439 /* tree is full, time to grow in depth */ 1440 err = ext4_ext_grow_indepth(handle, inode, mb_flags); 1441 if (err) 1442 goto out; 1443 1444 /* refill path */ 1445 path = ext4_find_extent(inode, 1446 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1447 ppath, gb_flags); 1448 if (IS_ERR(path)) { 1449 err = PTR_ERR(path); 1450 goto out; 1451 } 1452 1453 /* 1454 * only first (depth 0 -> 1) produces free space; 1455 * in all other cases we have to split the grown tree 1456 */ 1457 depth = ext_depth(inode); 1458 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1459 /* now we need to split */ 1460 goto repeat; 1461 } 1462 } 1463 1464 out: 1465 return err; 1466 } 1467 1468 /* 1469 * search the closest allocated block to the left for *logical 1470 * and returns it at @logical + it's physical address at @phys 1471 * if *logical is the smallest allocated block, the function 1472 * returns 0 at @phys 1473 * return value contains 0 (success) or error code 1474 */ 1475 static int ext4_ext_search_left(struct inode *inode, 1476 struct ext4_ext_path *path, 1477 ext4_lblk_t *logical, ext4_fsblk_t *phys) 1478 { 1479 struct ext4_extent_idx *ix; 1480 struct ext4_extent *ex; 1481 int depth, ee_len; 1482 1483 if (unlikely(path == NULL)) { 1484 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1485 return -EFSCORRUPTED; 1486 } 1487 depth = path->p_depth; 1488 *phys = 0; 1489 1490 if (depth == 0 && path->p_ext == NULL) 1491 return 0; 1492 1493 /* usually extent in the path covers blocks smaller 1494 * then *logical, but it can be that extent is the 1495 * first one in the file */ 1496 1497 ex = path[depth].p_ext; 1498 ee_len = ext4_ext_get_actual_len(ex); 1499 if (*logical < le32_to_cpu(ex->ee_block)) { 1500 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1501 EXT4_ERROR_INODE(inode, 1502 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1503 *logical, le32_to_cpu(ex->ee_block)); 1504 return -EFSCORRUPTED; 1505 } 1506 while (--depth >= 0) { 1507 ix = path[depth].p_idx; 1508 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1509 EXT4_ERROR_INODE(inode, 1510 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 1511 ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1512 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block), 1513 depth); 1514 return -EFSCORRUPTED; 1515 } 1516 } 1517 return 0; 1518 } 1519 1520 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1521 EXT4_ERROR_INODE(inode, 1522 "logical %d < ee_block %d + ee_len %d!", 1523 *logical, le32_to_cpu(ex->ee_block), ee_len); 1524 return -EFSCORRUPTED; 1525 } 1526 1527 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1528 *phys = ext4_ext_pblock(ex) + ee_len - 1; 1529 return 0; 1530 } 1531 1532 /* 1533 * Search the closest allocated block to the right for *logical 1534 * and returns it at @logical + it's physical address at @phys. 1535 * If not exists, return 0 and @phys is set to 0. We will return 1536 * 1 which means we found an allocated block and ret_ex is valid. 1537 * Or return a (< 0) error code. 1538 */ 1539 static int ext4_ext_search_right(struct inode *inode, 1540 struct ext4_ext_path *path, 1541 ext4_lblk_t *logical, ext4_fsblk_t *phys, 1542 struct ext4_extent *ret_ex) 1543 { 1544 struct buffer_head *bh = NULL; 1545 struct ext4_extent_header *eh; 1546 struct ext4_extent_idx *ix; 1547 struct ext4_extent *ex; 1548 int depth; /* Note, NOT eh_depth; depth from top of tree */ 1549 int ee_len; 1550 1551 if (unlikely(path == NULL)) { 1552 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1553 return -EFSCORRUPTED; 1554 } 1555 depth = path->p_depth; 1556 *phys = 0; 1557 1558 if (depth == 0 && path->p_ext == NULL) 1559 return 0; 1560 1561 /* usually extent in the path covers blocks smaller 1562 * then *logical, but it can be that extent is the 1563 * first one in the file */ 1564 1565 ex = path[depth].p_ext; 1566 ee_len = ext4_ext_get_actual_len(ex); 1567 if (*logical < le32_to_cpu(ex->ee_block)) { 1568 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1569 EXT4_ERROR_INODE(inode, 1570 "first_extent(path[%d].p_hdr) != ex", 1571 depth); 1572 return -EFSCORRUPTED; 1573 } 1574 while (--depth >= 0) { 1575 ix = path[depth].p_idx; 1576 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1577 EXT4_ERROR_INODE(inode, 1578 "ix != EXT_FIRST_INDEX *logical %d!", 1579 *logical); 1580 return -EFSCORRUPTED; 1581 } 1582 } 1583 goto found_extent; 1584 } 1585 1586 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1587 EXT4_ERROR_INODE(inode, 1588 "logical %d < ee_block %d + ee_len %d!", 1589 *logical, le32_to_cpu(ex->ee_block), ee_len); 1590 return -EFSCORRUPTED; 1591 } 1592 1593 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 1594 /* next allocated block in this leaf */ 1595 ex++; 1596 goto found_extent; 1597 } 1598 1599 /* go up and search for index to the right */ 1600 while (--depth >= 0) { 1601 ix = path[depth].p_idx; 1602 if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 1603 goto got_index; 1604 } 1605 1606 /* we've gone up to the root and found no index to the right */ 1607 return 0; 1608 1609 got_index: 1610 /* we've found index to the right, let's 1611 * follow it and find the closest allocated 1612 * block to the right */ 1613 ix++; 1614 while (++depth < path->p_depth) { 1615 /* subtract from p_depth to get proper eh_depth */ 1616 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); 1617 if (IS_ERR(bh)) 1618 return PTR_ERR(bh); 1619 eh = ext_block_hdr(bh); 1620 ix = EXT_FIRST_INDEX(eh); 1621 put_bh(bh); 1622 } 1623 1624 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); 1625 if (IS_ERR(bh)) 1626 return PTR_ERR(bh); 1627 eh = ext_block_hdr(bh); 1628 ex = EXT_FIRST_EXTENT(eh); 1629 found_extent: 1630 *logical = le32_to_cpu(ex->ee_block); 1631 *phys = ext4_ext_pblock(ex); 1632 if (ret_ex) 1633 *ret_ex = *ex; 1634 if (bh) 1635 put_bh(bh); 1636 return 1; 1637 } 1638 1639 /* 1640 * ext4_ext_next_allocated_block: 1641 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1642 * NOTE: it considers block number from index entry as 1643 * allocated block. Thus, index entries have to be consistent 1644 * with leaves. 1645 */ 1646 ext4_lblk_t 1647 ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1648 { 1649 int depth; 1650 1651 BUG_ON(path == NULL); 1652 depth = path->p_depth; 1653 1654 if (depth == 0 && path->p_ext == NULL) 1655 return EXT_MAX_BLOCKS; 1656 1657 while (depth >= 0) { 1658 struct ext4_ext_path *p = &path[depth]; 1659 1660 if (depth == path->p_depth) { 1661 /* leaf */ 1662 if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr)) 1663 return le32_to_cpu(p->p_ext[1].ee_block); 1664 } else { 1665 /* index */ 1666 if (p->p_idx != EXT_LAST_INDEX(p->p_hdr)) 1667 return le32_to_cpu(p->p_idx[1].ei_block); 1668 } 1669 depth--; 1670 } 1671 1672 return EXT_MAX_BLOCKS; 1673 } 1674 1675 /* 1676 * ext4_ext_next_leaf_block: 1677 * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1678 */ 1679 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1680 { 1681 int depth; 1682 1683 BUG_ON(path == NULL); 1684 depth = path->p_depth; 1685 1686 /* zero-tree has no leaf blocks at all */ 1687 if (depth == 0) 1688 return EXT_MAX_BLOCKS; 1689 1690 /* go to index block */ 1691 depth--; 1692 1693 while (depth >= 0) { 1694 if (path[depth].p_idx != 1695 EXT_LAST_INDEX(path[depth].p_hdr)) 1696 return (ext4_lblk_t) 1697 le32_to_cpu(path[depth].p_idx[1].ei_block); 1698 depth--; 1699 } 1700 1701 return EXT_MAX_BLOCKS; 1702 } 1703 1704 /* 1705 * ext4_ext_correct_indexes: 1706 * if leaf gets modified and modified extent is first in the leaf, 1707 * then we have to correct all indexes above. 1708 * TODO: do we need to correct tree in all cases? 1709 */ 1710 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1711 struct ext4_ext_path *path) 1712 { 1713 struct ext4_extent_header *eh; 1714 int depth = ext_depth(inode); 1715 struct ext4_extent *ex; 1716 __le32 border; 1717 int k, err = 0; 1718 1719 eh = path[depth].p_hdr; 1720 ex = path[depth].p_ext; 1721 1722 if (unlikely(ex == NULL || eh == NULL)) { 1723 EXT4_ERROR_INODE(inode, 1724 "ex %p == NULL or eh %p == NULL", ex, eh); 1725 return -EFSCORRUPTED; 1726 } 1727 1728 if (depth == 0) { 1729 /* there is no tree at all */ 1730 return 0; 1731 } 1732 1733 if (ex != EXT_FIRST_EXTENT(eh)) { 1734 /* we correct tree if first leaf got modified only */ 1735 return 0; 1736 } 1737 1738 /* 1739 * TODO: we need correction if border is smaller than current one 1740 */ 1741 k = depth - 1; 1742 border = path[depth].p_ext->ee_block; 1743 err = ext4_ext_get_access(handle, inode, path + k); 1744 if (err) 1745 return err; 1746 path[k].p_idx->ei_block = border; 1747 err = ext4_ext_dirty(handle, inode, path + k); 1748 if (err) 1749 return err; 1750 1751 while (k--) { 1752 /* change all left-side indexes */ 1753 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1754 break; 1755 err = ext4_ext_get_access(handle, inode, path + k); 1756 if (err) 1757 goto clean; 1758 path[k].p_idx->ei_block = border; 1759 err = ext4_ext_dirty(handle, inode, path + k); 1760 if (err) 1761 goto clean; 1762 } 1763 return 0; 1764 1765 clean: 1766 /* 1767 * The path[k].p_bh is either unmodified or with no verified bit 1768 * set (see ext4_ext_get_access()). So just clear the verified bit 1769 * of the successfully modified extents buffers, which will force 1770 * these extents to be checked to avoid using inconsistent data. 1771 */ 1772 while (++k < depth) 1773 clear_buffer_verified(path[k].p_bh); 1774 1775 return err; 1776 } 1777 1778 static int ext4_can_extents_be_merged(struct inode *inode, 1779 struct ext4_extent *ex1, 1780 struct ext4_extent *ex2) 1781 { 1782 unsigned short ext1_ee_len, ext2_ee_len; 1783 1784 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) 1785 return 0; 1786 1787 ext1_ee_len = ext4_ext_get_actual_len(ex1); 1788 ext2_ee_len = ext4_ext_get_actual_len(ex2); 1789 1790 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 1791 le32_to_cpu(ex2->ee_block)) 1792 return 0; 1793 1794 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) 1795 return 0; 1796 1797 if (ext4_ext_is_unwritten(ex1) && 1798 ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN) 1799 return 0; 1800 #ifdef AGGRESSIVE_TEST 1801 if (ext1_ee_len >= 4) 1802 return 0; 1803 #endif 1804 1805 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1806 return 1; 1807 return 0; 1808 } 1809 1810 /* 1811 * This function tries to merge the "ex" extent to the next extent in the tree. 1812 * It always tries to merge towards right. If you want to merge towards 1813 * left, pass "ex - 1" as argument instead of "ex". 1814 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 1815 * 1 if they got merged. 1816 */ 1817 static int ext4_ext_try_to_merge_right(struct inode *inode, 1818 struct ext4_ext_path *path, 1819 struct ext4_extent *ex) 1820 { 1821 struct ext4_extent_header *eh; 1822 unsigned int depth, len; 1823 int merge_done = 0, unwritten; 1824 1825 depth = ext_depth(inode); 1826 BUG_ON(path[depth].p_hdr == NULL); 1827 eh = path[depth].p_hdr; 1828 1829 while (ex < EXT_LAST_EXTENT(eh)) { 1830 if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 1831 break; 1832 /* merge with next extent! */ 1833 unwritten = ext4_ext_is_unwritten(ex); 1834 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1835 + ext4_ext_get_actual_len(ex + 1)); 1836 if (unwritten) 1837 ext4_ext_mark_unwritten(ex); 1838 1839 if (ex + 1 < EXT_LAST_EXTENT(eh)) { 1840 len = (EXT_LAST_EXTENT(eh) - ex - 1) 1841 * sizeof(struct ext4_extent); 1842 memmove(ex + 1, ex + 2, len); 1843 } 1844 le16_add_cpu(&eh->eh_entries, -1); 1845 merge_done = 1; 1846 WARN_ON(eh->eh_entries == 0); 1847 if (!eh->eh_entries) 1848 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 1849 } 1850 1851 return merge_done; 1852 } 1853 1854 /* 1855 * This function does a very simple check to see if we can collapse 1856 * an extent tree with a single extent tree leaf block into the inode. 1857 */ 1858 static void ext4_ext_try_to_merge_up(handle_t *handle, 1859 struct inode *inode, 1860 struct ext4_ext_path *path) 1861 { 1862 size_t s; 1863 unsigned max_root = ext4_ext_space_root(inode, 0); 1864 ext4_fsblk_t blk; 1865 1866 if ((path[0].p_depth != 1) || 1867 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1868 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1869 return; 1870 1871 /* 1872 * We need to modify the block allocation bitmap and the block 1873 * group descriptor to release the extent tree block. If we 1874 * can't get the journal credits, give up. 1875 */ 1876 if (ext4_journal_extend(handle, 2, 1877 ext4_free_metadata_revoke_credits(inode->i_sb, 1))) 1878 return; 1879 1880 /* 1881 * Copy the extent data up to the inode 1882 */ 1883 blk = ext4_idx_pblock(path[0].p_idx); 1884 s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1885 sizeof(struct ext4_extent_idx); 1886 s += sizeof(struct ext4_extent_header); 1887 1888 path[1].p_maxdepth = path[0].p_maxdepth; 1889 memcpy(path[0].p_hdr, path[1].p_hdr, s); 1890 path[0].p_depth = 0; 1891 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1892 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1893 path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1894 1895 ext4_ext_path_brelse(path + 1); 1896 ext4_free_blocks(handle, inode, NULL, blk, 1, 1897 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1898 } 1899 1900 /* 1901 * This function tries to merge the @ex extent to neighbours in the tree, then 1902 * tries to collapse the extent tree into the inode. 1903 */ 1904 static void ext4_ext_try_to_merge(handle_t *handle, 1905 struct inode *inode, 1906 struct ext4_ext_path *path, 1907 struct ext4_extent *ex) 1908 { 1909 struct ext4_extent_header *eh; 1910 unsigned int depth; 1911 int merge_done = 0; 1912 1913 depth = ext_depth(inode); 1914 BUG_ON(path[depth].p_hdr == NULL); 1915 eh = path[depth].p_hdr; 1916 1917 if (ex > EXT_FIRST_EXTENT(eh)) 1918 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1919 1920 if (!merge_done) 1921 (void) ext4_ext_try_to_merge_right(inode, path, ex); 1922 1923 ext4_ext_try_to_merge_up(handle, inode, path); 1924 } 1925 1926 /* 1927 * check if a portion of the "newext" extent overlaps with an 1928 * existing extent. 1929 * 1930 * If there is an overlap discovered, it updates the length of the newext 1931 * such that there will be no overlap, and then returns 1. 1932 * If there is no overlap found, it returns 0. 1933 */ 1934 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 1935 struct inode *inode, 1936 struct ext4_extent *newext, 1937 struct ext4_ext_path *path) 1938 { 1939 ext4_lblk_t b1, b2; 1940 unsigned int depth, len1; 1941 unsigned int ret = 0; 1942 1943 b1 = le32_to_cpu(newext->ee_block); 1944 len1 = ext4_ext_get_actual_len(newext); 1945 depth = ext_depth(inode); 1946 if (!path[depth].p_ext) 1947 goto out; 1948 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); 1949 1950 /* 1951 * get the next allocated block if the extent in the path 1952 * is before the requested block(s) 1953 */ 1954 if (b2 < b1) { 1955 b2 = ext4_ext_next_allocated_block(path); 1956 if (b2 == EXT_MAX_BLOCKS) 1957 goto out; 1958 b2 = EXT4_LBLK_CMASK(sbi, b2); 1959 } 1960 1961 /* check for wrap through zero on extent logical start block*/ 1962 if (b1 + len1 < b1) { 1963 len1 = EXT_MAX_BLOCKS - b1; 1964 newext->ee_len = cpu_to_le16(len1); 1965 ret = 1; 1966 } 1967 1968 /* check for overlap */ 1969 if (b1 + len1 > b2) { 1970 newext->ee_len = cpu_to_le16(b2 - b1); 1971 ret = 1; 1972 } 1973 out: 1974 return ret; 1975 } 1976 1977 /* 1978 * ext4_ext_insert_extent: 1979 * tries to merge requested extent into the existing extent or 1980 * inserts requested extent as new one into the tree, 1981 * creating new leaf in the no-space case. 1982 */ 1983 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1984 struct ext4_ext_path **ppath, 1985 struct ext4_extent *newext, int gb_flags) 1986 { 1987 struct ext4_ext_path *path = *ppath; 1988 struct ext4_extent_header *eh; 1989 struct ext4_extent *ex, *fex; 1990 struct ext4_extent *nearex; /* nearest extent */ 1991 struct ext4_ext_path *npath = NULL; 1992 int depth, len, err; 1993 ext4_lblk_t next; 1994 int mb_flags = 0, unwritten; 1995 1996 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1997 mb_flags |= EXT4_MB_DELALLOC_RESERVED; 1998 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1999 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 2000 return -EFSCORRUPTED; 2001 } 2002 depth = ext_depth(inode); 2003 ex = path[depth].p_ext; 2004 eh = path[depth].p_hdr; 2005 if (unlikely(path[depth].p_hdr == NULL)) { 2006 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2007 return -EFSCORRUPTED; 2008 } 2009 2010 /* try to insert block into found extent and return */ 2011 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { 2012 2013 /* 2014 * Try to see whether we should rather test the extent on 2015 * right from ex, or from the left of ex. This is because 2016 * ext4_find_extent() can return either extent on the 2017 * left, or on the right from the searched position. This 2018 * will make merging more effective. 2019 */ 2020 if (ex < EXT_LAST_EXTENT(eh) && 2021 (le32_to_cpu(ex->ee_block) + 2022 ext4_ext_get_actual_len(ex) < 2023 le32_to_cpu(newext->ee_block))) { 2024 ex += 1; 2025 goto prepend; 2026 } else if ((ex > EXT_FIRST_EXTENT(eh)) && 2027 (le32_to_cpu(newext->ee_block) + 2028 ext4_ext_get_actual_len(newext) < 2029 le32_to_cpu(ex->ee_block))) 2030 ex -= 1; 2031 2032 /* Try to append newex to the ex */ 2033 if (ext4_can_extents_be_merged(inode, ex, newext)) { 2034 ext_debug(inode, "append [%d]%d block to %u:[%d]%d" 2035 "(from %llu)\n", 2036 ext4_ext_is_unwritten(newext), 2037 ext4_ext_get_actual_len(newext), 2038 le32_to_cpu(ex->ee_block), 2039 ext4_ext_is_unwritten(ex), 2040 ext4_ext_get_actual_len(ex), 2041 ext4_ext_pblock(ex)); 2042 err = ext4_ext_get_access(handle, inode, 2043 path + depth); 2044 if (err) 2045 return err; 2046 unwritten = ext4_ext_is_unwritten(ex); 2047 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2048 + ext4_ext_get_actual_len(newext)); 2049 if (unwritten) 2050 ext4_ext_mark_unwritten(ex); 2051 nearex = ex; 2052 goto merge; 2053 } 2054 2055 prepend: 2056 /* Try to prepend newex to the ex */ 2057 if (ext4_can_extents_be_merged(inode, newext, ex)) { 2058 ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d" 2059 "(from %llu)\n", 2060 le32_to_cpu(newext->ee_block), 2061 ext4_ext_is_unwritten(newext), 2062 ext4_ext_get_actual_len(newext), 2063 le32_to_cpu(ex->ee_block), 2064 ext4_ext_is_unwritten(ex), 2065 ext4_ext_get_actual_len(ex), 2066 ext4_ext_pblock(ex)); 2067 err = ext4_ext_get_access(handle, inode, 2068 path + depth); 2069 if (err) 2070 return err; 2071 2072 unwritten = ext4_ext_is_unwritten(ex); 2073 ex->ee_block = newext->ee_block; 2074 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 2075 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2076 + ext4_ext_get_actual_len(newext)); 2077 if (unwritten) 2078 ext4_ext_mark_unwritten(ex); 2079 nearex = ex; 2080 goto merge; 2081 } 2082 } 2083 2084 depth = ext_depth(inode); 2085 eh = path[depth].p_hdr; 2086 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 2087 goto has_space; 2088 2089 /* probably next leaf has space for us? */ 2090 fex = EXT_LAST_EXTENT(eh); 2091 next = EXT_MAX_BLOCKS; 2092 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 2093 next = ext4_ext_next_leaf_block(path); 2094 if (next != EXT_MAX_BLOCKS) { 2095 ext_debug(inode, "next leaf block - %u\n", next); 2096 BUG_ON(npath != NULL); 2097 npath = ext4_find_extent(inode, next, NULL, gb_flags); 2098 if (IS_ERR(npath)) 2099 return PTR_ERR(npath); 2100 BUG_ON(npath->p_depth != path->p_depth); 2101 eh = npath[depth].p_hdr; 2102 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 2103 ext_debug(inode, "next leaf isn't full(%d)\n", 2104 le16_to_cpu(eh->eh_entries)); 2105 path = npath; 2106 goto has_space; 2107 } 2108 ext_debug(inode, "next leaf has no free space(%d,%d)\n", 2109 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 2110 } 2111 2112 /* 2113 * There is no free space in the found leaf. 2114 * We're gonna add a new leaf in the tree. 2115 */ 2116 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2117 mb_flags |= EXT4_MB_USE_RESERVED; 2118 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2119 ppath, newext); 2120 if (err) 2121 goto cleanup; 2122 path = *ppath; 2123 depth = ext_depth(inode); 2124 eh = path[depth].p_hdr; 2125 2126 has_space: 2127 nearex = path[depth].p_ext; 2128 2129 err = ext4_ext_get_access(handle, inode, path + depth); 2130 if (err) 2131 goto cleanup; 2132 2133 if (!nearex) { 2134 /* there is no extent in this leaf, create first one */ 2135 ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n", 2136 le32_to_cpu(newext->ee_block), 2137 ext4_ext_pblock(newext), 2138 ext4_ext_is_unwritten(newext), 2139 ext4_ext_get_actual_len(newext)); 2140 nearex = EXT_FIRST_EXTENT(eh); 2141 } else { 2142 if (le32_to_cpu(newext->ee_block) 2143 > le32_to_cpu(nearex->ee_block)) { 2144 /* Insert after */ 2145 ext_debug(inode, "insert %u:%llu:[%d]%d before: " 2146 "nearest %p\n", 2147 le32_to_cpu(newext->ee_block), 2148 ext4_ext_pblock(newext), 2149 ext4_ext_is_unwritten(newext), 2150 ext4_ext_get_actual_len(newext), 2151 nearex); 2152 nearex++; 2153 } else { 2154 /* Insert before */ 2155 BUG_ON(newext->ee_block == nearex->ee_block); 2156 ext_debug(inode, "insert %u:%llu:[%d]%d after: " 2157 "nearest %p\n", 2158 le32_to_cpu(newext->ee_block), 2159 ext4_ext_pblock(newext), 2160 ext4_ext_is_unwritten(newext), 2161 ext4_ext_get_actual_len(newext), 2162 nearex); 2163 } 2164 len = EXT_LAST_EXTENT(eh) - nearex + 1; 2165 if (len > 0) { 2166 ext_debug(inode, "insert %u:%llu:[%d]%d: " 2167 "move %d extents from 0x%p to 0x%p\n", 2168 le32_to_cpu(newext->ee_block), 2169 ext4_ext_pblock(newext), 2170 ext4_ext_is_unwritten(newext), 2171 ext4_ext_get_actual_len(newext), 2172 len, nearex, nearex + 1); 2173 memmove(nearex + 1, nearex, 2174 len * sizeof(struct ext4_extent)); 2175 } 2176 } 2177 2178 le16_add_cpu(&eh->eh_entries, 1); 2179 path[depth].p_ext = nearex; 2180 nearex->ee_block = newext->ee_block; 2181 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2182 nearex->ee_len = newext->ee_len; 2183 2184 merge: 2185 /* try to merge extents */ 2186 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) 2187 ext4_ext_try_to_merge(handle, inode, path, nearex); 2188 2189 2190 /* time to correct all indexes above */ 2191 err = ext4_ext_correct_indexes(handle, inode, path); 2192 if (err) 2193 goto cleanup; 2194 2195 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2196 2197 cleanup: 2198 ext4_free_ext_path(npath); 2199 return err; 2200 } 2201 2202 static int ext4_fill_es_cache_info(struct inode *inode, 2203 ext4_lblk_t block, ext4_lblk_t num, 2204 struct fiemap_extent_info *fieinfo) 2205 { 2206 ext4_lblk_t next, end = block + num - 1; 2207 struct extent_status es; 2208 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 2209 unsigned int flags; 2210 int err; 2211 2212 while (block <= end) { 2213 next = 0; 2214 flags = 0; 2215 if (!ext4_es_lookup_extent(inode, block, &next, &es)) 2216 break; 2217 if (ext4_es_is_unwritten(&es)) 2218 flags |= FIEMAP_EXTENT_UNWRITTEN; 2219 if (ext4_es_is_delayed(&es)) 2220 flags |= (FIEMAP_EXTENT_DELALLOC | 2221 FIEMAP_EXTENT_UNKNOWN); 2222 if (ext4_es_is_hole(&es)) 2223 flags |= EXT4_FIEMAP_EXTENT_HOLE; 2224 if (next == 0) 2225 flags |= FIEMAP_EXTENT_LAST; 2226 if (flags & (FIEMAP_EXTENT_DELALLOC| 2227 EXT4_FIEMAP_EXTENT_HOLE)) 2228 es.es_pblk = 0; 2229 else 2230 es.es_pblk = ext4_es_pblock(&es); 2231 err = fiemap_fill_next_extent(fieinfo, 2232 (__u64)es.es_lblk << blksize_bits, 2233 (__u64)es.es_pblk << blksize_bits, 2234 (__u64)es.es_len << blksize_bits, 2235 flags); 2236 if (next == 0) 2237 break; 2238 block = next; 2239 if (err < 0) 2240 return err; 2241 if (err == 1) 2242 return 0; 2243 } 2244 return 0; 2245 } 2246 2247 2248 /* 2249 * ext4_ext_find_hole - find hole around given block according to the given path 2250 * @inode: inode we lookup in 2251 * @path: path in extent tree to @lblk 2252 * @lblk: pointer to logical block around which we want to determine hole 2253 * 2254 * Determine hole length (and start if easily possible) around given logical 2255 * block. We don't try too hard to find the beginning of the hole but @path 2256 * actually points to extent before @lblk, we provide it. 2257 * 2258 * The function returns the length of a hole starting at @lblk. We update @lblk 2259 * to the beginning of the hole if we managed to find it. 2260 */ 2261 static ext4_lblk_t ext4_ext_find_hole(struct inode *inode, 2262 struct ext4_ext_path *path, 2263 ext4_lblk_t *lblk) 2264 { 2265 int depth = ext_depth(inode); 2266 struct ext4_extent *ex; 2267 ext4_lblk_t len; 2268 2269 ex = path[depth].p_ext; 2270 if (ex == NULL) { 2271 /* there is no extent yet, so gap is [0;-] */ 2272 *lblk = 0; 2273 len = EXT_MAX_BLOCKS; 2274 } else if (*lblk < le32_to_cpu(ex->ee_block)) { 2275 len = le32_to_cpu(ex->ee_block) - *lblk; 2276 } else if (*lblk >= le32_to_cpu(ex->ee_block) 2277 + ext4_ext_get_actual_len(ex)) { 2278 ext4_lblk_t next; 2279 2280 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 2281 next = ext4_ext_next_allocated_block(path); 2282 BUG_ON(next == *lblk); 2283 len = next - *lblk; 2284 } else { 2285 BUG(); 2286 } 2287 return len; 2288 } 2289 2290 /* 2291 * ext4_ext_rm_idx: 2292 * removes index from the index block. 2293 */ 2294 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2295 struct ext4_ext_path *path, int depth) 2296 { 2297 int err; 2298 ext4_fsblk_t leaf; 2299 int k = depth - 1; 2300 2301 /* free index block */ 2302 leaf = ext4_idx_pblock(path[k].p_idx); 2303 if (unlikely(path[k].p_hdr->eh_entries == 0)) { 2304 EXT4_ERROR_INODE(inode, "path[%d].p_hdr->eh_entries == 0", k); 2305 return -EFSCORRUPTED; 2306 } 2307 err = ext4_ext_get_access(handle, inode, path + k); 2308 if (err) 2309 return err; 2310 2311 if (path[k].p_idx != EXT_LAST_INDEX(path[k].p_hdr)) { 2312 int len = EXT_LAST_INDEX(path[k].p_hdr) - path[k].p_idx; 2313 len *= sizeof(struct ext4_extent_idx); 2314 memmove(path[k].p_idx, path[k].p_idx + 1, len); 2315 } 2316 2317 le16_add_cpu(&path[k].p_hdr->eh_entries, -1); 2318 err = ext4_ext_dirty(handle, inode, path + k); 2319 if (err) 2320 return err; 2321 ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf); 2322 trace_ext4_ext_rm_idx(inode, leaf); 2323 2324 ext4_free_blocks(handle, inode, NULL, leaf, 1, 2325 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2326 2327 while (--k >= 0) { 2328 if (path[k + 1].p_idx != EXT_FIRST_INDEX(path[k + 1].p_hdr)) 2329 break; 2330 err = ext4_ext_get_access(handle, inode, path + k); 2331 if (err) 2332 goto clean; 2333 path[k].p_idx->ei_block = path[k + 1].p_idx->ei_block; 2334 err = ext4_ext_dirty(handle, inode, path + k); 2335 if (err) 2336 goto clean; 2337 } 2338 return 0; 2339 2340 clean: 2341 /* 2342 * The path[k].p_bh is either unmodified or with no verified bit 2343 * set (see ext4_ext_get_access()). So just clear the verified bit 2344 * of the successfully modified extents buffers, which will force 2345 * these extents to be checked to avoid using inconsistent data. 2346 */ 2347 while (++k < depth) 2348 clear_buffer_verified(path[k].p_bh); 2349 2350 return err; 2351 } 2352 2353 /* 2354 * ext4_ext_calc_credits_for_single_extent: 2355 * This routine returns max. credits that needed to insert an extent 2356 * to the extent tree. 2357 * When pass the actual path, the caller should calculate credits 2358 * under i_data_sem. 2359 */ 2360 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2361 struct ext4_ext_path *path) 2362 { 2363 if (path) { 2364 int depth = ext_depth(inode); 2365 int ret = 0; 2366 2367 /* probably there is space in leaf? */ 2368 if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2369 < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2370 2371 /* 2372 * There are some space in the leaf tree, no 2373 * need to account for leaf block credit 2374 * 2375 * bitmaps and block group descriptor blocks 2376 * and other metadata blocks still need to be 2377 * accounted. 2378 */ 2379 /* 1 bitmap, 1 block group descriptor */ 2380 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 2381 return ret; 2382 } 2383 } 2384 2385 return ext4_chunk_trans_blocks(inode, nrblocks); 2386 } 2387 2388 /* 2389 * How many index/leaf blocks need to change/allocate to add @extents extents? 2390 * 2391 * If we add a single extent, then in the worse case, each tree level 2392 * index/leaf need to be changed in case of the tree split. 2393 * 2394 * If more extents are inserted, they could cause the whole tree split more 2395 * than once, but this is really rare. 2396 */ 2397 int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2398 { 2399 int index; 2400 int depth; 2401 2402 /* If we are converting the inline data, only one is needed here. */ 2403 if (ext4_has_inline_data(inode)) 2404 return 1; 2405 2406 depth = ext_depth(inode); 2407 2408 if (extents <= 1) 2409 index = depth * 2; 2410 else 2411 index = depth * 3; 2412 2413 return index; 2414 } 2415 2416 static inline int get_default_free_blocks_flags(struct inode *inode) 2417 { 2418 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || 2419 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) 2420 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2421 else if (ext4_should_journal_data(inode)) 2422 return EXT4_FREE_BLOCKS_FORGET; 2423 return 0; 2424 } 2425 2426 /* 2427 * ext4_rereserve_cluster - increment the reserved cluster count when 2428 * freeing a cluster with a pending reservation 2429 * 2430 * @inode - file containing the cluster 2431 * @lblk - logical block in cluster to be reserved 2432 * 2433 * Increments the reserved cluster count and adjusts quota in a bigalloc 2434 * file system when freeing a partial cluster containing at least one 2435 * delayed and unwritten block. A partial cluster meeting that 2436 * requirement will have a pending reservation. If so, the 2437 * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to 2438 * defer reserved and allocated space accounting to a subsequent call 2439 * to this function. 2440 */ 2441 static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk) 2442 { 2443 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2444 struct ext4_inode_info *ei = EXT4_I(inode); 2445 2446 dquot_reclaim_block(inode, EXT4_C2B(sbi, 1)); 2447 2448 spin_lock(&ei->i_block_reservation_lock); 2449 ei->i_reserved_data_blocks++; 2450 percpu_counter_add(&sbi->s_dirtyclusters_counter, 1); 2451 spin_unlock(&ei->i_block_reservation_lock); 2452 2453 percpu_counter_add(&sbi->s_freeclusters_counter, 1); 2454 ext4_remove_pending(inode, lblk); 2455 } 2456 2457 static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2458 struct ext4_extent *ex, 2459 struct partial_cluster *partial, 2460 ext4_lblk_t from, ext4_lblk_t to) 2461 { 2462 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2463 unsigned short ee_len = ext4_ext_get_actual_len(ex); 2464 ext4_fsblk_t last_pblk, pblk; 2465 ext4_lblk_t num; 2466 int flags; 2467 2468 /* only extent tail removal is allowed */ 2469 if (from < le32_to_cpu(ex->ee_block) || 2470 to != le32_to_cpu(ex->ee_block) + ee_len - 1) { 2471 ext4_error(sbi->s_sb, 2472 "strange request: removal(2) %u-%u from %u:%u", 2473 from, to, le32_to_cpu(ex->ee_block), ee_len); 2474 return 0; 2475 } 2476 2477 #ifdef EXTENTS_STATS 2478 spin_lock(&sbi->s_ext_stats_lock); 2479 sbi->s_ext_blocks += ee_len; 2480 sbi->s_ext_extents++; 2481 if (ee_len < sbi->s_ext_min) 2482 sbi->s_ext_min = ee_len; 2483 if (ee_len > sbi->s_ext_max) 2484 sbi->s_ext_max = ee_len; 2485 if (ext_depth(inode) > sbi->s_depth_max) 2486 sbi->s_depth_max = ext_depth(inode); 2487 spin_unlock(&sbi->s_ext_stats_lock); 2488 #endif 2489 2490 trace_ext4_remove_blocks(inode, ex, from, to, partial); 2491 2492 /* 2493 * if we have a partial cluster, and it's different from the 2494 * cluster of the last block in the extent, we free it 2495 */ 2496 last_pblk = ext4_ext_pblock(ex) + ee_len - 1; 2497 2498 if (partial->state != initial && 2499 partial->pclu != EXT4_B2C(sbi, last_pblk)) { 2500 if (partial->state == tofree) { 2501 flags = get_default_free_blocks_flags(inode); 2502 if (ext4_is_pending(inode, partial->lblk)) 2503 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2504 ext4_free_blocks(handle, inode, NULL, 2505 EXT4_C2B(sbi, partial->pclu), 2506 sbi->s_cluster_ratio, flags); 2507 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2508 ext4_rereserve_cluster(inode, partial->lblk); 2509 } 2510 partial->state = initial; 2511 } 2512 2513 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2514 pblk = ext4_ext_pblock(ex) + ee_len - num; 2515 2516 /* 2517 * We free the partial cluster at the end of the extent (if any), 2518 * unless the cluster is used by another extent (partial_cluster 2519 * state is nofree). If a partial cluster exists here, it must be 2520 * shared with the last block in the extent. 2521 */ 2522 flags = get_default_free_blocks_flags(inode); 2523 2524 /* partial, left end cluster aligned, right end unaligned */ 2525 if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) && 2526 (EXT4_LBLK_CMASK(sbi, to) >= from) && 2527 (partial->state != nofree)) { 2528 if (ext4_is_pending(inode, to)) 2529 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2530 ext4_free_blocks(handle, inode, NULL, 2531 EXT4_PBLK_CMASK(sbi, last_pblk), 2532 sbi->s_cluster_ratio, flags); 2533 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2534 ext4_rereserve_cluster(inode, to); 2535 partial->state = initial; 2536 flags = get_default_free_blocks_flags(inode); 2537 } 2538 2539 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2540 2541 /* 2542 * For bigalloc file systems, we never free a partial cluster 2543 * at the beginning of the extent. Instead, we check to see if we 2544 * need to free it on a subsequent call to ext4_remove_blocks, 2545 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. 2546 */ 2547 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 2548 ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 2549 2550 /* reset the partial cluster if we've freed past it */ 2551 if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk)) 2552 partial->state = initial; 2553 2554 /* 2555 * If we've freed the entire extent but the beginning is not left 2556 * cluster aligned and is not marked as ineligible for freeing we 2557 * record the partial cluster at the beginning of the extent. It 2558 * wasn't freed by the preceding ext4_free_blocks() call, and we 2559 * need to look farther to the left to determine if it's to be freed 2560 * (not shared with another extent). Else, reset the partial 2561 * cluster - we're either done freeing or the beginning of the 2562 * extent is left cluster aligned. 2563 */ 2564 if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) { 2565 if (partial->state == initial) { 2566 partial->pclu = EXT4_B2C(sbi, pblk); 2567 partial->lblk = from; 2568 partial->state = tofree; 2569 } 2570 } else { 2571 partial->state = initial; 2572 } 2573 2574 return 0; 2575 } 2576 2577 /* 2578 * ext4_ext_rm_leaf() Removes the extents associated with the 2579 * blocks appearing between "start" and "end". Both "start" 2580 * and "end" must appear in the same extent or EIO is returned. 2581 * 2582 * @handle: The journal handle 2583 * @inode: The files inode 2584 * @path: The path to the leaf 2585 * @partial_cluster: The cluster which we'll have to free if all extents 2586 * has been released from it. However, if this value is 2587 * negative, it's a cluster just to the right of the 2588 * punched region and it must not be freed. 2589 * @start: The first block to remove 2590 * @end: The last block to remove 2591 */ 2592 static int 2593 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2594 struct ext4_ext_path *path, 2595 struct partial_cluster *partial, 2596 ext4_lblk_t start, ext4_lblk_t end) 2597 { 2598 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2599 int err = 0, correct_index = 0; 2600 int depth = ext_depth(inode), credits, revoke_credits; 2601 struct ext4_extent_header *eh; 2602 ext4_lblk_t a, b; 2603 unsigned num; 2604 ext4_lblk_t ex_ee_block; 2605 unsigned short ex_ee_len; 2606 unsigned unwritten = 0; 2607 struct ext4_extent *ex; 2608 ext4_fsblk_t pblk; 2609 2610 /* the header must be checked already in ext4_ext_remove_space() */ 2611 ext_debug(inode, "truncate since %u in leaf to %u\n", start, end); 2612 if (!path[depth].p_hdr) 2613 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2614 eh = path[depth].p_hdr; 2615 if (unlikely(path[depth].p_hdr == NULL)) { 2616 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2617 return -EFSCORRUPTED; 2618 } 2619 /* find where to start removing */ 2620 ex = path[depth].p_ext; 2621 if (!ex) 2622 ex = EXT_LAST_EXTENT(eh); 2623 2624 ex_ee_block = le32_to_cpu(ex->ee_block); 2625 ex_ee_len = ext4_ext_get_actual_len(ex); 2626 2627 trace_ext4_ext_rm_leaf(inode, start, ex, partial); 2628 2629 while (ex >= EXT_FIRST_EXTENT(eh) && 2630 ex_ee_block + ex_ee_len > start) { 2631 2632 if (ext4_ext_is_unwritten(ex)) 2633 unwritten = 1; 2634 else 2635 unwritten = 0; 2636 2637 ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block, 2638 unwritten, ex_ee_len); 2639 path[depth].p_ext = ex; 2640 2641 a = max(ex_ee_block, start); 2642 b = min(ex_ee_block + ex_ee_len - 1, end); 2643 2644 ext_debug(inode, " border %u:%u\n", a, b); 2645 2646 /* If this extent is beyond the end of the hole, skip it */ 2647 if (end < ex_ee_block) { 2648 /* 2649 * We're going to skip this extent and move to another, 2650 * so note that its first cluster is in use to avoid 2651 * freeing it when removing blocks. Eventually, the 2652 * right edge of the truncated/punched region will 2653 * be just to the left. 2654 */ 2655 if (sbi->s_cluster_ratio > 1) { 2656 pblk = ext4_ext_pblock(ex); 2657 partial->pclu = EXT4_B2C(sbi, pblk); 2658 partial->state = nofree; 2659 } 2660 ex--; 2661 ex_ee_block = le32_to_cpu(ex->ee_block); 2662 ex_ee_len = ext4_ext_get_actual_len(ex); 2663 continue; 2664 } else if (b != ex_ee_block + ex_ee_len - 1) { 2665 EXT4_ERROR_INODE(inode, 2666 "can not handle truncate %u:%u " 2667 "on extent %u:%u", 2668 start, end, ex_ee_block, 2669 ex_ee_block + ex_ee_len - 1); 2670 err = -EFSCORRUPTED; 2671 goto out; 2672 } else if (a != ex_ee_block) { 2673 /* remove tail of the extent */ 2674 num = a - ex_ee_block; 2675 } else { 2676 /* remove whole extent: excellent! */ 2677 num = 0; 2678 } 2679 /* 2680 * 3 for leaf, sb, and inode plus 2 (bmap and group 2681 * descriptor) for each block group; assume two block 2682 * groups plus ex_ee_len/blocks_per_block_group for 2683 * the worst case 2684 */ 2685 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2686 if (ex == EXT_FIRST_EXTENT(eh)) { 2687 correct_index = 1; 2688 credits += (ext_depth(inode)) + 1; 2689 } 2690 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2691 /* 2692 * We may end up freeing some index blocks and data from the 2693 * punched range. Note that partial clusters are accounted for 2694 * by ext4_free_data_revoke_credits(). 2695 */ 2696 revoke_credits = 2697 ext4_free_metadata_revoke_credits(inode->i_sb, 2698 ext_depth(inode)) + 2699 ext4_free_data_revoke_credits(inode, b - a + 1); 2700 2701 err = ext4_datasem_ensure_credits(handle, inode, credits, 2702 credits, revoke_credits); 2703 if (err) { 2704 if (err > 0) 2705 err = -EAGAIN; 2706 goto out; 2707 } 2708 2709 err = ext4_ext_get_access(handle, inode, path + depth); 2710 if (err) 2711 goto out; 2712 2713 err = ext4_remove_blocks(handle, inode, ex, partial, a, b); 2714 if (err) 2715 goto out; 2716 2717 if (num == 0) 2718 /* this extent is removed; mark slot entirely unused */ 2719 ext4_ext_store_pblock(ex, 0); 2720 2721 ex->ee_len = cpu_to_le16(num); 2722 /* 2723 * Do not mark unwritten if all the blocks in the 2724 * extent have been removed. 2725 */ 2726 if (unwritten && num) 2727 ext4_ext_mark_unwritten(ex); 2728 /* 2729 * If the extent was completely released, 2730 * we need to remove it from the leaf 2731 */ 2732 if (num == 0) { 2733 if (end != EXT_MAX_BLOCKS - 1) { 2734 /* 2735 * For hole punching, we need to scoot all the 2736 * extents up when an extent is removed so that 2737 * we dont have blank extents in the middle 2738 */ 2739 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2740 sizeof(struct ext4_extent)); 2741 2742 /* Now get rid of the one at the end */ 2743 memset(EXT_LAST_EXTENT(eh), 0, 2744 sizeof(struct ext4_extent)); 2745 } 2746 le16_add_cpu(&eh->eh_entries, -1); 2747 } 2748 2749 err = ext4_ext_dirty(handle, inode, path + depth); 2750 if (err) 2751 goto out; 2752 2753 ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num, 2754 ext4_ext_pblock(ex)); 2755 ex--; 2756 ex_ee_block = le32_to_cpu(ex->ee_block); 2757 ex_ee_len = ext4_ext_get_actual_len(ex); 2758 } 2759 2760 if (correct_index && eh->eh_entries) 2761 err = ext4_ext_correct_indexes(handle, inode, path); 2762 2763 /* 2764 * If there's a partial cluster and at least one extent remains in 2765 * the leaf, free the partial cluster if it isn't shared with the 2766 * current extent. If it is shared with the current extent 2767 * we reset the partial cluster because we've reached the start of the 2768 * truncated/punched region and we're done removing blocks. 2769 */ 2770 if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) { 2771 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; 2772 if (partial->pclu != EXT4_B2C(sbi, pblk)) { 2773 int flags = get_default_free_blocks_flags(inode); 2774 2775 if (ext4_is_pending(inode, partial->lblk)) 2776 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2777 ext4_free_blocks(handle, inode, NULL, 2778 EXT4_C2B(sbi, partial->pclu), 2779 sbi->s_cluster_ratio, flags); 2780 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2781 ext4_rereserve_cluster(inode, partial->lblk); 2782 } 2783 partial->state = initial; 2784 } 2785 2786 /* if this leaf is free, then we should 2787 * remove it from index block above */ 2788 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2789 err = ext4_ext_rm_idx(handle, inode, path, depth); 2790 2791 out: 2792 return err; 2793 } 2794 2795 /* 2796 * ext4_ext_more_to_rm: 2797 * returns 1 if current index has to be freed (even partial) 2798 */ 2799 static int 2800 ext4_ext_more_to_rm(struct ext4_ext_path *path) 2801 { 2802 BUG_ON(path->p_idx == NULL); 2803 2804 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2805 return 0; 2806 2807 /* 2808 * if truncate on deeper level happened, it wasn't partial, 2809 * so we have to consider current index for truncation 2810 */ 2811 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2812 return 0; 2813 return 1; 2814 } 2815 2816 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 2817 ext4_lblk_t end) 2818 { 2819 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2820 int depth = ext_depth(inode); 2821 struct ext4_ext_path *path = NULL; 2822 struct partial_cluster partial; 2823 handle_t *handle; 2824 int i = 0, err = 0; 2825 2826 partial.pclu = 0; 2827 partial.lblk = 0; 2828 partial.state = initial; 2829 2830 ext_debug(inode, "truncate since %u to %u\n", start, end); 2831 2832 /* probably first extent we're gonna free will be last in block */ 2833 handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE, 2834 depth + 1, 2835 ext4_free_metadata_revoke_credits(inode->i_sb, depth)); 2836 if (IS_ERR(handle)) 2837 return PTR_ERR(handle); 2838 2839 again: 2840 trace_ext4_ext_remove_space(inode, start, end, depth); 2841 2842 /* 2843 * Check if we are removing extents inside the extent tree. If that 2844 * is the case, we are going to punch a hole inside the extent tree 2845 * so we have to check whether we need to split the extent covering 2846 * the last block to remove so we can easily remove the part of it 2847 * in ext4_ext_rm_leaf(). 2848 */ 2849 if (end < EXT_MAX_BLOCKS - 1) { 2850 struct ext4_extent *ex; 2851 ext4_lblk_t ee_block, ex_end, lblk; 2852 ext4_fsblk_t pblk; 2853 2854 /* find extent for or closest extent to this block */ 2855 path = ext4_find_extent(inode, end, NULL, 2856 EXT4_EX_NOCACHE | EXT4_EX_NOFAIL); 2857 if (IS_ERR(path)) { 2858 ext4_journal_stop(handle); 2859 return PTR_ERR(path); 2860 } 2861 depth = ext_depth(inode); 2862 /* Leaf not may not exist only if inode has no blocks at all */ 2863 ex = path[depth].p_ext; 2864 if (!ex) { 2865 if (depth) { 2866 EXT4_ERROR_INODE(inode, 2867 "path[%d].p_hdr == NULL", 2868 depth); 2869 err = -EFSCORRUPTED; 2870 } 2871 goto out; 2872 } 2873 2874 ee_block = le32_to_cpu(ex->ee_block); 2875 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; 2876 2877 /* 2878 * See if the last block is inside the extent, if so split 2879 * the extent at 'end' block so we can easily remove the 2880 * tail of the first part of the split extent in 2881 * ext4_ext_rm_leaf(). 2882 */ 2883 if (end >= ee_block && end < ex_end) { 2884 2885 /* 2886 * If we're going to split the extent, note that 2887 * the cluster containing the block after 'end' is 2888 * in use to avoid freeing it when removing blocks. 2889 */ 2890 if (sbi->s_cluster_ratio > 1) { 2891 pblk = ext4_ext_pblock(ex) + end - ee_block + 1; 2892 partial.pclu = EXT4_B2C(sbi, pblk); 2893 partial.state = nofree; 2894 } 2895 2896 /* 2897 * Split the extent in two so that 'end' is the last 2898 * block in the first new extent. Also we should not 2899 * fail removing space due to ENOSPC so try to use 2900 * reserved block if that happens. 2901 */ 2902 err = ext4_force_split_extent_at(handle, inode, &path, 2903 end + 1, 1); 2904 if (err < 0) 2905 goto out; 2906 2907 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end && 2908 partial.state == initial) { 2909 /* 2910 * If we're punching, there's an extent to the right. 2911 * If the partial cluster hasn't been set, set it to 2912 * that extent's first cluster and its state to nofree 2913 * so it won't be freed should it contain blocks to be 2914 * removed. If it's already set (tofree/nofree), we're 2915 * retrying and keep the original partial cluster info 2916 * so a cluster marked tofree as a result of earlier 2917 * extent removal is not lost. 2918 */ 2919 lblk = ex_end + 1; 2920 err = ext4_ext_search_right(inode, path, &lblk, &pblk, 2921 NULL); 2922 if (err < 0) 2923 goto out; 2924 if (pblk) { 2925 partial.pclu = EXT4_B2C(sbi, pblk); 2926 partial.state = nofree; 2927 } 2928 } 2929 } 2930 /* 2931 * We start scanning from right side, freeing all the blocks 2932 * after i_size and walking into the tree depth-wise. 2933 */ 2934 depth = ext_depth(inode); 2935 if (path) { 2936 int k = i = depth; 2937 while (--k > 0) 2938 path[k].p_block = 2939 le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2940 } else { 2941 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 2942 GFP_NOFS | __GFP_NOFAIL); 2943 if (path == NULL) { 2944 ext4_journal_stop(handle); 2945 return -ENOMEM; 2946 } 2947 path[0].p_maxdepth = path[0].p_depth = depth; 2948 path[0].p_hdr = ext_inode_hdr(inode); 2949 i = 0; 2950 2951 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { 2952 err = -EFSCORRUPTED; 2953 goto out; 2954 } 2955 } 2956 err = 0; 2957 2958 while (i >= 0 && err == 0) { 2959 if (i == depth) { 2960 /* this is leaf block */ 2961 err = ext4_ext_rm_leaf(handle, inode, path, 2962 &partial, start, end); 2963 /* root level has p_bh == NULL, brelse() eats this */ 2964 ext4_ext_path_brelse(path + i); 2965 i--; 2966 continue; 2967 } 2968 2969 /* this is index block */ 2970 if (!path[i].p_hdr) { 2971 ext_debug(inode, "initialize header\n"); 2972 path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2973 } 2974 2975 if (!path[i].p_idx) { 2976 /* this level hasn't been touched yet */ 2977 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2978 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2979 ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n", 2980 path[i].p_hdr, 2981 le16_to_cpu(path[i].p_hdr->eh_entries)); 2982 } else { 2983 /* we were already here, see at next index */ 2984 path[i].p_idx--; 2985 } 2986 2987 ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n", 2988 i, EXT_FIRST_INDEX(path[i].p_hdr), 2989 path[i].p_idx); 2990 if (ext4_ext_more_to_rm(path + i)) { 2991 struct buffer_head *bh; 2992 /* go to the next level */ 2993 ext_debug(inode, "move to level %d (block %llu)\n", 2994 i + 1, ext4_idx_pblock(path[i].p_idx)); 2995 memset(path + i + 1, 0, sizeof(*path)); 2996 bh = read_extent_tree_block(inode, path[i].p_idx, 2997 depth - i - 1, 2998 EXT4_EX_NOCACHE); 2999 if (IS_ERR(bh)) { 3000 /* should we reset i_size? */ 3001 err = PTR_ERR(bh); 3002 break; 3003 } 3004 /* Yield here to deal with large extent trees. 3005 * Should be a no-op if we did IO above. */ 3006 cond_resched(); 3007 if (WARN_ON(i + 1 > depth)) { 3008 err = -EFSCORRUPTED; 3009 break; 3010 } 3011 path[i + 1].p_bh = bh; 3012 3013 /* save actual number of indexes since this 3014 * number is changed at the next iteration */ 3015 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 3016 i++; 3017 } else { 3018 /* we finished processing this index, go up */ 3019 if (path[i].p_hdr->eh_entries == 0 && i > 0) { 3020 /* index is empty, remove it; 3021 * handle must be already prepared by the 3022 * truncatei_leaf() */ 3023 err = ext4_ext_rm_idx(handle, inode, path, i); 3024 } 3025 /* root level has p_bh == NULL, brelse() eats this */ 3026 ext4_ext_path_brelse(path + i); 3027 i--; 3028 ext_debug(inode, "return to level %d\n", i); 3029 } 3030 } 3031 3032 trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial, 3033 path->p_hdr->eh_entries); 3034 3035 /* 3036 * if there's a partial cluster and we have removed the first extent 3037 * in the file, then we also free the partial cluster, if any 3038 */ 3039 if (partial.state == tofree && err == 0) { 3040 int flags = get_default_free_blocks_flags(inode); 3041 3042 if (ext4_is_pending(inode, partial.lblk)) 3043 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 3044 ext4_free_blocks(handle, inode, NULL, 3045 EXT4_C2B(sbi, partial.pclu), 3046 sbi->s_cluster_ratio, flags); 3047 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 3048 ext4_rereserve_cluster(inode, partial.lblk); 3049 partial.state = initial; 3050 } 3051 3052 /* TODO: flexible tree reduction should be here */ 3053 if (path->p_hdr->eh_entries == 0) { 3054 /* 3055 * truncate to zero freed all the tree, 3056 * so we need to correct eh_depth 3057 */ 3058 err = ext4_ext_get_access(handle, inode, path); 3059 if (err == 0) { 3060 ext_inode_hdr(inode)->eh_depth = 0; 3061 ext_inode_hdr(inode)->eh_max = 3062 cpu_to_le16(ext4_ext_space_root(inode, 0)); 3063 err = ext4_ext_dirty(handle, inode, path); 3064 } 3065 } 3066 out: 3067 ext4_free_ext_path(path); 3068 path = NULL; 3069 if (err == -EAGAIN) 3070 goto again; 3071 ext4_journal_stop(handle); 3072 3073 return err; 3074 } 3075 3076 /* 3077 * called at mount time 3078 */ 3079 void ext4_ext_init(struct super_block *sb) 3080 { 3081 /* 3082 * possible initialization would be here 3083 */ 3084 3085 if (ext4_has_feature_extents(sb)) { 3086 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 3087 printk(KERN_INFO "EXT4-fs: file extents enabled" 3088 #ifdef AGGRESSIVE_TEST 3089 ", aggressive tests" 3090 #endif 3091 #ifdef CHECK_BINSEARCH 3092 ", check binsearch" 3093 #endif 3094 #ifdef EXTENTS_STATS 3095 ", stats" 3096 #endif 3097 "\n"); 3098 #endif 3099 #ifdef EXTENTS_STATS 3100 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 3101 EXT4_SB(sb)->s_ext_min = 1 << 30; 3102 EXT4_SB(sb)->s_ext_max = 0; 3103 #endif 3104 } 3105 } 3106 3107 /* 3108 * called at umount time 3109 */ 3110 void ext4_ext_release(struct super_block *sb) 3111 { 3112 if (!ext4_has_feature_extents(sb)) 3113 return; 3114 3115 #ifdef EXTENTS_STATS 3116 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 3117 struct ext4_sb_info *sbi = EXT4_SB(sb); 3118 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 3119 sbi->s_ext_blocks, sbi->s_ext_extents, 3120 sbi->s_ext_blocks / sbi->s_ext_extents); 3121 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 3122 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 3123 } 3124 #endif 3125 } 3126 3127 static void ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) 3128 { 3129 ext4_lblk_t ee_block; 3130 ext4_fsblk_t ee_pblock; 3131 unsigned int ee_len; 3132 3133 ee_block = le32_to_cpu(ex->ee_block); 3134 ee_len = ext4_ext_get_actual_len(ex); 3135 ee_pblock = ext4_ext_pblock(ex); 3136 3137 if (ee_len == 0) 3138 return; 3139 3140 ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 3141 EXTENT_STATUS_WRITTEN, 0); 3142 } 3143 3144 /* FIXME!! we need to try to merge to left or right after zero-out */ 3145 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 3146 { 3147 ext4_fsblk_t ee_pblock; 3148 unsigned int ee_len; 3149 3150 ee_len = ext4_ext_get_actual_len(ex); 3151 ee_pblock = ext4_ext_pblock(ex); 3152 return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock, 3153 ee_len); 3154 } 3155 3156 /* 3157 * ext4_split_extent_at() splits an extent at given block. 3158 * 3159 * @handle: the journal handle 3160 * @inode: the file inode 3161 * @path: the path to the extent 3162 * @split: the logical block where the extent is splitted. 3163 * @split_flags: indicates if the extent could be zeroout if split fails, and 3164 * the states(init or unwritten) of new extents. 3165 * @flags: flags used to insert new extent to extent tree. 3166 * 3167 * 3168 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 3169 * of which are determined by split_flag. 3170 * 3171 * There are two cases: 3172 * a> the extent are splitted into two extent. 3173 * b> split is not needed, and just mark the extent. 3174 * 3175 * return 0 on success. 3176 */ 3177 static int ext4_split_extent_at(handle_t *handle, 3178 struct inode *inode, 3179 struct ext4_ext_path **ppath, 3180 ext4_lblk_t split, 3181 int split_flag, 3182 int flags) 3183 { 3184 struct ext4_ext_path *path = *ppath; 3185 ext4_fsblk_t newblock; 3186 ext4_lblk_t ee_block; 3187 struct ext4_extent *ex, newex, orig_ex, zero_ex; 3188 struct ext4_extent *ex2 = NULL; 3189 unsigned int ee_len, depth; 3190 int err = 0; 3191 3192 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3193 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3194 3195 ext_debug(inode, "logical block %llu\n", (unsigned long long)split); 3196 3197 ext4_ext_show_leaf(inode, path); 3198 3199 depth = ext_depth(inode); 3200 ex = path[depth].p_ext; 3201 ee_block = le32_to_cpu(ex->ee_block); 3202 ee_len = ext4_ext_get_actual_len(ex); 3203 newblock = split - ee_block + ext4_ext_pblock(ex); 3204 3205 BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3206 BUG_ON(!ext4_ext_is_unwritten(ex) && 3207 split_flag & (EXT4_EXT_MAY_ZEROOUT | 3208 EXT4_EXT_MARK_UNWRIT1 | 3209 EXT4_EXT_MARK_UNWRIT2)); 3210 3211 err = ext4_ext_get_access(handle, inode, path + depth); 3212 if (err) 3213 goto out; 3214 3215 if (split == ee_block) { 3216 /* 3217 * case b: block @split is the block that the extent begins with 3218 * then we just change the state of the extent, and splitting 3219 * is not needed. 3220 */ 3221 if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3222 ext4_ext_mark_unwritten(ex); 3223 else 3224 ext4_ext_mark_initialized(ex); 3225 3226 if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3227 ext4_ext_try_to_merge(handle, inode, path, ex); 3228 3229 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3230 goto out; 3231 } 3232 3233 /* case a */ 3234 memcpy(&orig_ex, ex, sizeof(orig_ex)); 3235 ex->ee_len = cpu_to_le16(split - ee_block); 3236 if (split_flag & EXT4_EXT_MARK_UNWRIT1) 3237 ext4_ext_mark_unwritten(ex); 3238 3239 /* 3240 * path may lead to new leaf, not to original leaf any more 3241 * after ext4_ext_insert_extent() returns, 3242 */ 3243 err = ext4_ext_dirty(handle, inode, path + depth); 3244 if (err) 3245 goto fix_extent_len; 3246 3247 ex2 = &newex; 3248 ex2->ee_block = cpu_to_le32(split); 3249 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 3250 ext4_ext_store_pblock(ex2, newblock); 3251 if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3252 ext4_ext_mark_unwritten(ex2); 3253 3254 err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); 3255 if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM) 3256 goto out; 3257 3258 /* 3259 * Update path is required because previous ext4_ext_insert_extent() 3260 * may have freed or reallocated the path. Using EXT4_EX_NOFAIL 3261 * guarantees that ext4_find_extent() will not return -ENOMEM, 3262 * otherwise -ENOMEM will cause a retry in do_writepages(), and a 3263 * WARN_ON may be triggered in ext4_da_update_reserve_space() due to 3264 * an incorrect ee_len causing the i_reserved_data_blocks exception. 3265 */ 3266 path = ext4_find_extent(inode, ee_block, ppath, 3267 flags | EXT4_EX_NOFAIL); 3268 if (IS_ERR(path)) { 3269 EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld", 3270 split, PTR_ERR(path)); 3271 return PTR_ERR(path); 3272 } 3273 depth = ext_depth(inode); 3274 ex = path[depth].p_ext; 3275 3276 if (EXT4_EXT_MAY_ZEROOUT & split_flag) { 3277 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3278 if (split_flag & EXT4_EXT_DATA_VALID1) { 3279 err = ext4_ext_zeroout(inode, ex2); 3280 zero_ex.ee_block = ex2->ee_block; 3281 zero_ex.ee_len = cpu_to_le16( 3282 ext4_ext_get_actual_len(ex2)); 3283 ext4_ext_store_pblock(&zero_ex, 3284 ext4_ext_pblock(ex2)); 3285 } else { 3286 err = ext4_ext_zeroout(inode, ex); 3287 zero_ex.ee_block = ex->ee_block; 3288 zero_ex.ee_len = cpu_to_le16( 3289 ext4_ext_get_actual_len(ex)); 3290 ext4_ext_store_pblock(&zero_ex, 3291 ext4_ext_pblock(ex)); 3292 } 3293 } else { 3294 err = ext4_ext_zeroout(inode, &orig_ex); 3295 zero_ex.ee_block = orig_ex.ee_block; 3296 zero_ex.ee_len = cpu_to_le16( 3297 ext4_ext_get_actual_len(&orig_ex)); 3298 ext4_ext_store_pblock(&zero_ex, 3299 ext4_ext_pblock(&orig_ex)); 3300 } 3301 3302 if (!err) { 3303 /* update the extent length and mark as initialized */ 3304 ex->ee_len = cpu_to_le16(ee_len); 3305 ext4_ext_try_to_merge(handle, inode, path, ex); 3306 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3307 if (!err) 3308 /* update extent status tree */ 3309 ext4_zeroout_es(inode, &zero_ex); 3310 /* If we failed at this point, we don't know in which 3311 * state the extent tree exactly is so don't try to fix 3312 * length of the original extent as it may do even more 3313 * damage. 3314 */ 3315 goto out; 3316 } 3317 } 3318 3319 fix_extent_len: 3320 ex->ee_len = orig_ex.ee_len; 3321 /* 3322 * Ignore ext4_ext_dirty return value since we are already in error path 3323 * and err is a non-zero error code. 3324 */ 3325 ext4_ext_dirty(handle, inode, path + path->p_depth); 3326 return err; 3327 out: 3328 ext4_ext_show_leaf(inode, *ppath); 3329 return err; 3330 } 3331 3332 /* 3333 * ext4_split_extent() splits an extent and mark extent which is covered 3334 * by @map as split_flags indicates 3335 * 3336 * It may result in splitting the extent into multiple extents (up to three) 3337 * There are three possibilities: 3338 * a> There is no split required 3339 * b> Splits in two extents: Split is happening at either end of the extent 3340 * c> Splits in three extents: Somone is splitting in middle of the extent 3341 * 3342 */ 3343 static int ext4_split_extent(handle_t *handle, 3344 struct inode *inode, 3345 struct ext4_ext_path **ppath, 3346 struct ext4_map_blocks *map, 3347 int split_flag, 3348 int flags) 3349 { 3350 struct ext4_ext_path *path = *ppath; 3351 ext4_lblk_t ee_block; 3352 struct ext4_extent *ex; 3353 unsigned int ee_len, depth; 3354 int err = 0; 3355 int unwritten; 3356 int split_flag1, flags1; 3357 int allocated = map->m_len; 3358 3359 depth = ext_depth(inode); 3360 ex = path[depth].p_ext; 3361 ee_block = le32_to_cpu(ex->ee_block); 3362 ee_len = ext4_ext_get_actual_len(ex); 3363 unwritten = ext4_ext_is_unwritten(ex); 3364 3365 if (map->m_lblk + map->m_len < ee_block + ee_len) { 3366 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 3367 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3368 if (unwritten) 3369 split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | 3370 EXT4_EXT_MARK_UNWRIT2; 3371 if (split_flag & EXT4_EXT_DATA_VALID2) 3372 split_flag1 |= EXT4_EXT_DATA_VALID1; 3373 err = ext4_split_extent_at(handle, inode, ppath, 3374 map->m_lblk + map->m_len, split_flag1, flags1); 3375 if (err) 3376 goto out; 3377 } else { 3378 allocated = ee_len - (map->m_lblk - ee_block); 3379 } 3380 /* 3381 * Update path is required because previous ext4_split_extent_at() may 3382 * result in split of original leaf or extent zeroout. 3383 */ 3384 path = ext4_find_extent(inode, map->m_lblk, ppath, flags); 3385 if (IS_ERR(path)) 3386 return PTR_ERR(path); 3387 depth = ext_depth(inode); 3388 ex = path[depth].p_ext; 3389 if (!ex) { 3390 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3391 (unsigned long) map->m_lblk); 3392 return -EFSCORRUPTED; 3393 } 3394 unwritten = ext4_ext_is_unwritten(ex); 3395 3396 if (map->m_lblk >= ee_block) { 3397 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3398 if (unwritten) { 3399 split_flag1 |= EXT4_EXT_MARK_UNWRIT1; 3400 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3401 EXT4_EXT_MARK_UNWRIT2); 3402 } 3403 err = ext4_split_extent_at(handle, inode, ppath, 3404 map->m_lblk, split_flag1, flags); 3405 if (err) 3406 goto out; 3407 } 3408 3409 ext4_ext_show_leaf(inode, *ppath); 3410 out: 3411 return err ? err : allocated; 3412 } 3413 3414 /* 3415 * This function is called by ext4_ext_map_blocks() if someone tries to write 3416 * to an unwritten extent. It may result in splitting the unwritten 3417 * extent into multiple extents (up to three - one initialized and two 3418 * unwritten). 3419 * There are three possibilities: 3420 * a> There is no split required: Entire extent should be initialized 3421 * b> Splits in two extents: Write is happening at either end of the extent 3422 * c> Splits in three extents: Somone is writing in middle of the extent 3423 * 3424 * Pre-conditions: 3425 * - The extent pointed to by 'path' is unwritten. 3426 * - The extent pointed to by 'path' contains a superset 3427 * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 3428 * 3429 * Post-conditions on success: 3430 * - the returned value is the number of blocks beyond map->l_lblk 3431 * that are allocated and initialized. 3432 * It is guaranteed to be >= map->m_len. 3433 */ 3434 static int ext4_ext_convert_to_initialized(handle_t *handle, 3435 struct inode *inode, 3436 struct ext4_map_blocks *map, 3437 struct ext4_ext_path **ppath, 3438 int flags) 3439 { 3440 struct ext4_ext_path *path = *ppath; 3441 struct ext4_sb_info *sbi; 3442 struct ext4_extent_header *eh; 3443 struct ext4_map_blocks split_map; 3444 struct ext4_extent zero_ex1, zero_ex2; 3445 struct ext4_extent *ex, *abut_ex; 3446 ext4_lblk_t ee_block, eof_block; 3447 unsigned int ee_len, depth, map_len = map->m_len; 3448 int err = 0; 3449 int split_flag = EXT4_EXT_DATA_VALID2; 3450 int allocated = 0; 3451 unsigned int max_zeroout = 0; 3452 3453 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3454 (unsigned long long)map->m_lblk, map_len); 3455 3456 sbi = EXT4_SB(inode->i_sb); 3457 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) 3458 >> inode->i_sb->s_blocksize_bits; 3459 if (eof_block < map->m_lblk + map_len) 3460 eof_block = map->m_lblk + map_len; 3461 3462 depth = ext_depth(inode); 3463 eh = path[depth].p_hdr; 3464 ex = path[depth].p_ext; 3465 ee_block = le32_to_cpu(ex->ee_block); 3466 ee_len = ext4_ext_get_actual_len(ex); 3467 zero_ex1.ee_len = 0; 3468 zero_ex2.ee_len = 0; 3469 3470 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 3471 3472 /* Pre-conditions */ 3473 BUG_ON(!ext4_ext_is_unwritten(ex)); 3474 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 3475 3476 /* 3477 * Attempt to transfer newly initialized blocks from the currently 3478 * unwritten extent to its neighbor. This is much cheaper 3479 * than an insertion followed by a merge as those involve costly 3480 * memmove() calls. Transferring to the left is the common case in 3481 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3482 * followed by append writes. 3483 * 3484 * Limitations of the current logic: 3485 * - L1: we do not deal with writes covering the whole extent. 3486 * This would require removing the extent if the transfer 3487 * is possible. 3488 * - L2: we only attempt to merge with an extent stored in the 3489 * same extent tree node. 3490 */ 3491 if ((map->m_lblk == ee_block) && 3492 /* See if we can merge left */ 3493 (map_len < ee_len) && /*L1*/ 3494 (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 3495 ext4_lblk_t prev_lblk; 3496 ext4_fsblk_t prev_pblk, ee_pblk; 3497 unsigned int prev_len; 3498 3499 abut_ex = ex - 1; 3500 prev_lblk = le32_to_cpu(abut_ex->ee_block); 3501 prev_len = ext4_ext_get_actual_len(abut_ex); 3502 prev_pblk = ext4_ext_pblock(abut_ex); 3503 ee_pblk = ext4_ext_pblock(ex); 3504 3505 /* 3506 * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3507 * upon those conditions: 3508 * - C1: abut_ex is initialized, 3509 * - C2: abut_ex is logically abutting ex, 3510 * - C3: abut_ex is physically abutting ex, 3511 * - C4: abut_ex can receive the additional blocks without 3512 * overflowing the (initialized) length limit. 3513 */ 3514 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3515 ((prev_lblk + prev_len) == ee_block) && /*C2*/ 3516 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3517 (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3518 err = ext4_ext_get_access(handle, inode, path + depth); 3519 if (err) 3520 goto out; 3521 3522 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3523 map, ex, abut_ex); 3524 3525 /* Shift the start of ex by 'map_len' blocks */ 3526 ex->ee_block = cpu_to_le32(ee_block + map_len); 3527 ext4_ext_store_pblock(ex, ee_pblk + map_len); 3528 ex->ee_len = cpu_to_le16(ee_len - map_len); 3529 ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3530 3531 /* Extend abut_ex by 'map_len' blocks */ 3532 abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 3533 3534 /* Result: number of initialized blocks past m_lblk */ 3535 allocated = map_len; 3536 } 3537 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3538 (map_len < ee_len) && /*L1*/ 3539 ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3540 /* See if we can merge right */ 3541 ext4_lblk_t next_lblk; 3542 ext4_fsblk_t next_pblk, ee_pblk; 3543 unsigned int next_len; 3544 3545 abut_ex = ex + 1; 3546 next_lblk = le32_to_cpu(abut_ex->ee_block); 3547 next_len = ext4_ext_get_actual_len(abut_ex); 3548 next_pblk = ext4_ext_pblock(abut_ex); 3549 ee_pblk = ext4_ext_pblock(ex); 3550 3551 /* 3552 * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3553 * upon those conditions: 3554 * - C1: abut_ex is initialized, 3555 * - C2: abut_ex is logically abutting ex, 3556 * - C3: abut_ex is physically abutting ex, 3557 * - C4: abut_ex can receive the additional blocks without 3558 * overflowing the (initialized) length limit. 3559 */ 3560 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3561 ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3562 ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3563 (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3564 err = ext4_ext_get_access(handle, inode, path + depth); 3565 if (err) 3566 goto out; 3567 3568 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3569 map, ex, abut_ex); 3570 3571 /* Shift the start of abut_ex by 'map_len' blocks */ 3572 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3573 ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3574 ex->ee_len = cpu_to_le16(ee_len - map_len); 3575 ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3576 3577 /* Extend abut_ex by 'map_len' blocks */ 3578 abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3579 3580 /* Result: number of initialized blocks past m_lblk */ 3581 allocated = map_len; 3582 } 3583 } 3584 if (allocated) { 3585 /* Mark the block containing both extents as dirty */ 3586 err = ext4_ext_dirty(handle, inode, path + depth); 3587 3588 /* Update path to point to the right extent */ 3589 path[depth].p_ext = abut_ex; 3590 goto out; 3591 } else 3592 allocated = ee_len - (map->m_lblk - ee_block); 3593 3594 WARN_ON(map->m_lblk < ee_block); 3595 /* 3596 * It is safe to convert extent to initialized via explicit 3597 * zeroout only if extent is fully inside i_size or new_size. 3598 */ 3599 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3600 3601 if (EXT4_EXT_MAY_ZEROOUT & split_flag) 3602 max_zeroout = sbi->s_extent_max_zeroout_kb >> 3603 (inode->i_sb->s_blocksize_bits - 10); 3604 3605 /* 3606 * five cases: 3607 * 1. split the extent into three extents. 3608 * 2. split the extent into two extents, zeroout the head of the first 3609 * extent. 3610 * 3. split the extent into two extents, zeroout the tail of the second 3611 * extent. 3612 * 4. split the extent into two extents with out zeroout. 3613 * 5. no splitting needed, just possibly zeroout the head and / or the 3614 * tail of the extent. 3615 */ 3616 split_map.m_lblk = map->m_lblk; 3617 split_map.m_len = map->m_len; 3618 3619 if (max_zeroout && (allocated > split_map.m_len)) { 3620 if (allocated <= max_zeroout) { 3621 /* case 3 or 5 */ 3622 zero_ex1.ee_block = 3623 cpu_to_le32(split_map.m_lblk + 3624 split_map.m_len); 3625 zero_ex1.ee_len = 3626 cpu_to_le16(allocated - split_map.m_len); 3627 ext4_ext_store_pblock(&zero_ex1, 3628 ext4_ext_pblock(ex) + split_map.m_lblk + 3629 split_map.m_len - ee_block); 3630 err = ext4_ext_zeroout(inode, &zero_ex1); 3631 if (err) 3632 goto fallback; 3633 split_map.m_len = allocated; 3634 } 3635 if (split_map.m_lblk - ee_block + split_map.m_len < 3636 max_zeroout) { 3637 /* case 2 or 5 */ 3638 if (split_map.m_lblk != ee_block) { 3639 zero_ex2.ee_block = ex->ee_block; 3640 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - 3641 ee_block); 3642 ext4_ext_store_pblock(&zero_ex2, 3643 ext4_ext_pblock(ex)); 3644 err = ext4_ext_zeroout(inode, &zero_ex2); 3645 if (err) 3646 goto fallback; 3647 } 3648 3649 split_map.m_len += split_map.m_lblk - ee_block; 3650 split_map.m_lblk = ee_block; 3651 allocated = map->m_len; 3652 } 3653 } 3654 3655 fallback: 3656 err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, 3657 flags); 3658 if (err > 0) 3659 err = 0; 3660 out: 3661 /* If we have gotten a failure, don't zero out status tree */ 3662 if (!err) { 3663 ext4_zeroout_es(inode, &zero_ex1); 3664 ext4_zeroout_es(inode, &zero_ex2); 3665 } 3666 return err ? err : allocated; 3667 } 3668 3669 /* 3670 * This function is called by ext4_ext_map_blocks() from 3671 * ext4_get_blocks_dio_write() when DIO to write 3672 * to an unwritten extent. 3673 * 3674 * Writing to an unwritten extent may result in splitting the unwritten 3675 * extent into multiple initialized/unwritten extents (up to three) 3676 * There are three possibilities: 3677 * a> There is no split required: Entire extent should be unwritten 3678 * b> Splits in two extents: Write is happening at either end of the extent 3679 * c> Splits in three extents: Somone is writing in middle of the extent 3680 * 3681 * This works the same way in the case of initialized -> unwritten conversion. 3682 * 3683 * One of more index blocks maybe needed if the extent tree grow after 3684 * the unwritten extent split. To prevent ENOSPC occur at the IO 3685 * complete, we need to split the unwritten extent before DIO submit 3686 * the IO. The unwritten extent called at this time will be split 3687 * into three unwritten extent(at most). After IO complete, the part 3688 * being filled will be convert to initialized by the end_io callback function 3689 * via ext4_convert_unwritten_extents(). 3690 * 3691 * Returns the size of unwritten extent to be written on success. 3692 */ 3693 static int ext4_split_convert_extents(handle_t *handle, 3694 struct inode *inode, 3695 struct ext4_map_blocks *map, 3696 struct ext4_ext_path **ppath, 3697 int flags) 3698 { 3699 struct ext4_ext_path *path = *ppath; 3700 ext4_lblk_t eof_block; 3701 ext4_lblk_t ee_block; 3702 struct ext4_extent *ex; 3703 unsigned int ee_len; 3704 int split_flag = 0, depth; 3705 3706 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3707 (unsigned long long)map->m_lblk, map->m_len); 3708 3709 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) 3710 >> inode->i_sb->s_blocksize_bits; 3711 if (eof_block < map->m_lblk + map->m_len) 3712 eof_block = map->m_lblk + map->m_len; 3713 /* 3714 * It is safe to convert extent to initialized via explicit 3715 * zeroout only if extent is fully inside i_size or new_size. 3716 */ 3717 depth = ext_depth(inode); 3718 ex = path[depth].p_ext; 3719 ee_block = le32_to_cpu(ex->ee_block); 3720 ee_len = ext4_ext_get_actual_len(ex); 3721 3722 /* Convert to unwritten */ 3723 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { 3724 split_flag |= EXT4_EXT_DATA_VALID1; 3725 /* Convert to initialized */ 3726 } else if (flags & EXT4_GET_BLOCKS_CONVERT) { 3727 split_flag |= ee_block + ee_len <= eof_block ? 3728 EXT4_EXT_MAY_ZEROOUT : 0; 3729 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); 3730 } 3731 flags |= EXT4_GET_BLOCKS_PRE_IO; 3732 return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); 3733 } 3734 3735 static int ext4_convert_unwritten_extents_endio(handle_t *handle, 3736 struct inode *inode, 3737 struct ext4_map_blocks *map, 3738 struct ext4_ext_path **ppath) 3739 { 3740 struct ext4_ext_path *path = *ppath; 3741 struct ext4_extent *ex; 3742 ext4_lblk_t ee_block; 3743 unsigned int ee_len; 3744 int depth; 3745 int err = 0; 3746 3747 depth = ext_depth(inode); 3748 ex = path[depth].p_ext; 3749 ee_block = le32_to_cpu(ex->ee_block); 3750 ee_len = ext4_ext_get_actual_len(ex); 3751 3752 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3753 (unsigned long long)ee_block, ee_len); 3754 3755 /* If extent is larger than requested it is a clear sign that we still 3756 * have some extent state machine issues left. So extent_split is still 3757 * required. 3758 * TODO: Once all related issues will be fixed this situation should be 3759 * illegal. 3760 */ 3761 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3762 #ifdef CONFIG_EXT4_DEBUG 3763 ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu," 3764 " len %u; IO logical block %llu, len %u", 3765 inode->i_ino, (unsigned long long)ee_block, ee_len, 3766 (unsigned long long)map->m_lblk, map->m_len); 3767 #endif 3768 err = ext4_split_convert_extents(handle, inode, map, ppath, 3769 EXT4_GET_BLOCKS_CONVERT); 3770 if (err < 0) 3771 return err; 3772 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3773 if (IS_ERR(path)) 3774 return PTR_ERR(path); 3775 depth = ext_depth(inode); 3776 ex = path[depth].p_ext; 3777 } 3778 3779 err = ext4_ext_get_access(handle, inode, path + depth); 3780 if (err) 3781 goto out; 3782 /* first mark the extent as initialized */ 3783 ext4_ext_mark_initialized(ex); 3784 3785 /* note: ext4_ext_correct_indexes() isn't needed here because 3786 * borders are not changed 3787 */ 3788 ext4_ext_try_to_merge(handle, inode, path, ex); 3789 3790 /* Mark modified extent as dirty */ 3791 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3792 out: 3793 ext4_ext_show_leaf(inode, path); 3794 return err; 3795 } 3796 3797 static int 3798 convert_initialized_extent(handle_t *handle, struct inode *inode, 3799 struct ext4_map_blocks *map, 3800 struct ext4_ext_path **ppath, 3801 unsigned int *allocated) 3802 { 3803 struct ext4_ext_path *path = *ppath; 3804 struct ext4_extent *ex; 3805 ext4_lblk_t ee_block; 3806 unsigned int ee_len; 3807 int depth; 3808 int err = 0; 3809 3810 /* 3811 * Make sure that the extent is no bigger than we support with 3812 * unwritten extent 3813 */ 3814 if (map->m_len > EXT_UNWRITTEN_MAX_LEN) 3815 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; 3816 3817 depth = ext_depth(inode); 3818 ex = path[depth].p_ext; 3819 ee_block = le32_to_cpu(ex->ee_block); 3820 ee_len = ext4_ext_get_actual_len(ex); 3821 3822 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3823 (unsigned long long)ee_block, ee_len); 3824 3825 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3826 err = ext4_split_convert_extents(handle, inode, map, ppath, 3827 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); 3828 if (err < 0) 3829 return err; 3830 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3831 if (IS_ERR(path)) 3832 return PTR_ERR(path); 3833 depth = ext_depth(inode); 3834 ex = path[depth].p_ext; 3835 if (!ex) { 3836 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3837 (unsigned long) map->m_lblk); 3838 return -EFSCORRUPTED; 3839 } 3840 } 3841 3842 err = ext4_ext_get_access(handle, inode, path + depth); 3843 if (err) 3844 return err; 3845 /* first mark the extent as unwritten */ 3846 ext4_ext_mark_unwritten(ex); 3847 3848 /* note: ext4_ext_correct_indexes() isn't needed here because 3849 * borders are not changed 3850 */ 3851 ext4_ext_try_to_merge(handle, inode, path, ex); 3852 3853 /* Mark modified extent as dirty */ 3854 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3855 if (err) 3856 return err; 3857 ext4_ext_show_leaf(inode, path); 3858 3859 ext4_update_inode_fsync_trans(handle, inode, 1); 3860 3861 map->m_flags |= EXT4_MAP_UNWRITTEN; 3862 if (*allocated > map->m_len) 3863 *allocated = map->m_len; 3864 map->m_len = *allocated; 3865 return 0; 3866 } 3867 3868 static int 3869 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, 3870 struct ext4_map_blocks *map, 3871 struct ext4_ext_path **ppath, int flags, 3872 unsigned int allocated, ext4_fsblk_t newblock) 3873 { 3874 int ret = 0; 3875 int err = 0; 3876 3877 ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n", 3878 (unsigned long long)map->m_lblk, map->m_len, flags, 3879 allocated); 3880 ext4_ext_show_leaf(inode, *ppath); 3881 3882 /* 3883 * When writing into unwritten space, we should not fail to 3884 * allocate metadata blocks for the new extent block if needed. 3885 */ 3886 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 3887 3888 trace_ext4_ext_handle_unwritten_extents(inode, map, flags, 3889 allocated, newblock); 3890 3891 /* get_block() before submitting IO, split the extent */ 3892 if (flags & EXT4_GET_BLOCKS_PRE_IO) { 3893 ret = ext4_split_convert_extents(handle, inode, map, ppath, 3894 flags | EXT4_GET_BLOCKS_CONVERT); 3895 if (ret < 0) { 3896 err = ret; 3897 goto out2; 3898 } 3899 /* 3900 * shouldn't get a 0 return when splitting an extent unless 3901 * m_len is 0 (bug) or extent has been corrupted 3902 */ 3903 if (unlikely(ret == 0)) { 3904 EXT4_ERROR_INODE(inode, 3905 "unexpected ret == 0, m_len = %u", 3906 map->m_len); 3907 err = -EFSCORRUPTED; 3908 goto out2; 3909 } 3910 map->m_flags |= EXT4_MAP_UNWRITTEN; 3911 goto out; 3912 } 3913 /* IO end_io complete, convert the filled extent to written */ 3914 if (flags & EXT4_GET_BLOCKS_CONVERT) { 3915 err = ext4_convert_unwritten_extents_endio(handle, inode, map, 3916 ppath); 3917 if (err < 0) 3918 goto out2; 3919 ext4_update_inode_fsync_trans(handle, inode, 1); 3920 goto map_out; 3921 } 3922 /* buffered IO cases */ 3923 /* 3924 * repeat fallocate creation request 3925 * we already have an unwritten extent 3926 */ 3927 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 3928 map->m_flags |= EXT4_MAP_UNWRITTEN; 3929 goto map_out; 3930 } 3931 3932 /* buffered READ or buffered write_begin() lookup */ 3933 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3934 /* 3935 * We have blocks reserved already. We 3936 * return allocated blocks so that delalloc 3937 * won't do block reservation for us. But 3938 * the buffer head will be unmapped so that 3939 * a read from the block returns 0s. 3940 */ 3941 map->m_flags |= EXT4_MAP_UNWRITTEN; 3942 goto out1; 3943 } 3944 3945 /* 3946 * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1. 3947 * For buffered writes, at writepage time, etc. Convert a 3948 * discovered unwritten extent to written. 3949 */ 3950 ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); 3951 if (ret < 0) { 3952 err = ret; 3953 goto out2; 3954 } 3955 ext4_update_inode_fsync_trans(handle, inode, 1); 3956 /* 3957 * shouldn't get a 0 return when converting an unwritten extent 3958 * unless m_len is 0 (bug) or extent has been corrupted 3959 */ 3960 if (unlikely(ret == 0)) { 3961 EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u", 3962 map->m_len); 3963 err = -EFSCORRUPTED; 3964 goto out2; 3965 } 3966 3967 out: 3968 allocated = ret; 3969 map->m_flags |= EXT4_MAP_NEW; 3970 map_out: 3971 map->m_flags |= EXT4_MAP_MAPPED; 3972 out1: 3973 map->m_pblk = newblock; 3974 if (allocated > map->m_len) 3975 allocated = map->m_len; 3976 map->m_len = allocated; 3977 ext4_ext_show_leaf(inode, *ppath); 3978 out2: 3979 return err ? err : allocated; 3980 } 3981 3982 /* 3983 * get_implied_cluster_alloc - check to see if the requested 3984 * allocation (in the map structure) overlaps with a cluster already 3985 * allocated in an extent. 3986 * @sb The filesystem superblock structure 3987 * @map The requested lblk->pblk mapping 3988 * @ex The extent structure which might contain an implied 3989 * cluster allocation 3990 * 3991 * This function is called by ext4_ext_map_blocks() after we failed to 3992 * find blocks that were already in the inode's extent tree. Hence, 3993 * we know that the beginning of the requested region cannot overlap 3994 * the extent from the inode's extent tree. There are three cases we 3995 * want to catch. The first is this case: 3996 * 3997 * |--- cluster # N--| 3998 * |--- extent ---| |---- requested region ---| 3999 * |==========| 4000 * 4001 * The second case that we need to test for is this one: 4002 * 4003 * |--------- cluster # N ----------------| 4004 * |--- requested region --| |------- extent ----| 4005 * |=======================| 4006 * 4007 * The third case is when the requested region lies between two extents 4008 * within the same cluster: 4009 * |------------- cluster # N-------------| 4010 * |----- ex -----| |---- ex_right ----| 4011 * |------ requested region ------| 4012 * |================| 4013 * 4014 * In each of the above cases, we need to set the map->m_pblk and 4015 * map->m_len so it corresponds to the return the extent labelled as 4016 * "|====|" from cluster #N, since it is already in use for data in 4017 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 4018 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 4019 * as a new "allocated" block region. Otherwise, we will return 0 and 4020 * ext4_ext_map_blocks() will then allocate one or more new clusters 4021 * by calling ext4_mb_new_blocks(). 4022 */ 4023 static int get_implied_cluster_alloc(struct super_block *sb, 4024 struct ext4_map_blocks *map, 4025 struct ext4_extent *ex, 4026 struct ext4_ext_path *path) 4027 { 4028 struct ext4_sb_info *sbi = EXT4_SB(sb); 4029 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4030 ext4_lblk_t ex_cluster_start, ex_cluster_end; 4031 ext4_lblk_t rr_cluster_start; 4032 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4033 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4034 unsigned short ee_len = ext4_ext_get_actual_len(ex); 4035 4036 /* The extent passed in that we are trying to match */ 4037 ex_cluster_start = EXT4_B2C(sbi, ee_block); 4038 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 4039 4040 /* The requested region passed into ext4_map_blocks() */ 4041 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 4042 4043 if ((rr_cluster_start == ex_cluster_end) || 4044 (rr_cluster_start == ex_cluster_start)) { 4045 if (rr_cluster_start == ex_cluster_end) 4046 ee_start += ee_len - 1; 4047 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; 4048 map->m_len = min(map->m_len, 4049 (unsigned) sbi->s_cluster_ratio - c_offset); 4050 /* 4051 * Check for and handle this case: 4052 * 4053 * |--------- cluster # N-------------| 4054 * |------- extent ----| 4055 * |--- requested region ---| 4056 * |===========| 4057 */ 4058 4059 if (map->m_lblk < ee_block) 4060 map->m_len = min(map->m_len, ee_block - map->m_lblk); 4061 4062 /* 4063 * Check for the case where there is already another allocated 4064 * block to the right of 'ex' but before the end of the cluster. 4065 * 4066 * |------------- cluster # N-------------| 4067 * |----- ex -----| |---- ex_right ----| 4068 * |------ requested region ------| 4069 * |================| 4070 */ 4071 if (map->m_lblk > ee_block) { 4072 ext4_lblk_t next = ext4_ext_next_allocated_block(path); 4073 map->m_len = min(map->m_len, next - map->m_lblk); 4074 } 4075 4076 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 4077 return 1; 4078 } 4079 4080 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 4081 return 0; 4082 } 4083 4084 /* 4085 * Determine hole length around the given logical block, first try to 4086 * locate and expand the hole from the given @path, and then adjust it 4087 * if it's partially or completely converted to delayed extents, insert 4088 * it into the extent cache tree if it's indeed a hole, finally return 4089 * the length of the determined extent. 4090 */ 4091 static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode, 4092 struct ext4_ext_path *path, 4093 ext4_lblk_t lblk) 4094 { 4095 ext4_lblk_t hole_start, len; 4096 struct extent_status es; 4097 4098 hole_start = lblk; 4099 len = ext4_ext_find_hole(inode, path, &hole_start); 4100 again: 4101 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, 4102 hole_start + len - 1, &es); 4103 if (!es.es_len) 4104 goto insert_hole; 4105 4106 /* 4107 * There's a delalloc extent in the hole, handle it if the delalloc 4108 * extent is in front of, behind and straddle the queried range. 4109 */ 4110 if (lblk >= es.es_lblk + es.es_len) { 4111 /* 4112 * The delalloc extent is in front of the queried range, 4113 * find again from the queried start block. 4114 */ 4115 len -= lblk - hole_start; 4116 hole_start = lblk; 4117 goto again; 4118 } else if (in_range(lblk, es.es_lblk, es.es_len)) { 4119 /* 4120 * The delalloc extent containing lblk, it must have been 4121 * added after ext4_map_blocks() checked the extent status 4122 * tree so we are not holding i_rwsem and delalloc info is 4123 * only stabilized by i_data_sem we are going to release 4124 * soon. Don't modify the extent status tree and report 4125 * extent as a hole, just adjust the length to the delalloc 4126 * extent's after lblk. 4127 */ 4128 len = es.es_lblk + es.es_len - lblk; 4129 return len; 4130 } else { 4131 /* 4132 * The delalloc extent is partially or completely behind 4133 * the queried range, update hole length until the 4134 * beginning of the delalloc extent. 4135 */ 4136 len = min(es.es_lblk - hole_start, len); 4137 } 4138 4139 insert_hole: 4140 /* Put just found gap into cache to speed up subsequent requests */ 4141 ext_debug(inode, " -> %u:%u\n", hole_start, len); 4142 ext4_es_insert_extent(inode, hole_start, len, ~0, 4143 EXTENT_STATUS_HOLE, 0); 4144 4145 /* Update hole_len to reflect hole size after lblk */ 4146 if (hole_start != lblk) 4147 len -= lblk - hole_start; 4148 4149 return len; 4150 } 4151 4152 /* 4153 * Block allocation/map/preallocation routine for extents based files 4154 * 4155 * 4156 * Need to be called with 4157 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 4158 * (ie, flags is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4159 * 4160 * return > 0, number of blocks already mapped/allocated 4161 * if flags doesn't contain EXT4_GET_BLOCKS_CREATE and these are pre-allocated blocks 4162 * buffer head is unmapped 4163 * otherwise blocks are mapped 4164 * 4165 * return = 0, if plain look up failed (blocks have not been allocated) 4166 * buffer head is unmapped 4167 * 4168 * return < 0, error case. 4169 */ 4170 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4171 struct ext4_map_blocks *map, int flags) 4172 { 4173 struct ext4_ext_path *path = NULL; 4174 struct ext4_extent newex, *ex, ex2; 4175 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4176 ext4_fsblk_t newblock = 0, pblk; 4177 int err = 0, depth, ret; 4178 unsigned int allocated = 0, offset = 0; 4179 unsigned int allocated_clusters = 0; 4180 struct ext4_allocation_request ar; 4181 ext4_lblk_t cluster_offset; 4182 4183 ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len); 4184 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4185 4186 /* find extent for this block */ 4187 path = ext4_find_extent(inode, map->m_lblk, NULL, 0); 4188 if (IS_ERR(path)) { 4189 err = PTR_ERR(path); 4190 path = NULL; 4191 goto out; 4192 } 4193 4194 depth = ext_depth(inode); 4195 4196 /* 4197 * consistent leaf must not be empty; 4198 * this situation is possible, though, _during_ tree modification; 4199 * this is why assert can't be put in ext4_find_extent() 4200 */ 4201 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4202 EXT4_ERROR_INODE(inode, "bad extent address " 4203 "lblock: %lu, depth: %d pblock %lld", 4204 (unsigned long) map->m_lblk, depth, 4205 path[depth].p_block); 4206 err = -EFSCORRUPTED; 4207 goto out; 4208 } 4209 4210 ex = path[depth].p_ext; 4211 if (ex) { 4212 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4213 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4214 unsigned short ee_len; 4215 4216 4217 /* 4218 * unwritten extents are treated as holes, except that 4219 * we split out initialized portions during a write. 4220 */ 4221 ee_len = ext4_ext_get_actual_len(ex); 4222 4223 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4224 4225 /* if found extent covers block, simply return it */ 4226 if (in_range(map->m_lblk, ee_block, ee_len)) { 4227 newblock = map->m_lblk - ee_block + ee_start; 4228 /* number of remaining blocks in the extent */ 4229 allocated = ee_len - (map->m_lblk - ee_block); 4230 ext_debug(inode, "%u fit into %u:%d -> %llu\n", 4231 map->m_lblk, ee_block, ee_len, newblock); 4232 4233 /* 4234 * If the extent is initialized check whether the 4235 * caller wants to convert it to unwritten. 4236 */ 4237 if ((!ext4_ext_is_unwritten(ex)) && 4238 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4239 err = convert_initialized_extent(handle, 4240 inode, map, &path, &allocated); 4241 goto out; 4242 } else if (!ext4_ext_is_unwritten(ex)) { 4243 map->m_flags |= EXT4_MAP_MAPPED; 4244 map->m_pblk = newblock; 4245 if (allocated > map->m_len) 4246 allocated = map->m_len; 4247 map->m_len = allocated; 4248 ext4_ext_show_leaf(inode, path); 4249 goto out; 4250 } 4251 4252 ret = ext4_ext_handle_unwritten_extents( 4253 handle, inode, map, &path, flags, 4254 allocated, newblock); 4255 if (ret < 0) 4256 err = ret; 4257 else 4258 allocated = ret; 4259 goto out; 4260 } 4261 } 4262 4263 /* 4264 * requested block isn't allocated yet; 4265 * we couldn't try to create block if flags doesn't contain EXT4_GET_BLOCKS_CREATE 4266 */ 4267 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4268 ext4_lblk_t len; 4269 4270 len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk); 4271 4272 map->m_pblk = 0; 4273 map->m_len = min_t(unsigned int, map->m_len, len); 4274 goto out; 4275 } 4276 4277 /* 4278 * Okay, we need to do block allocation. 4279 */ 4280 newex.ee_block = cpu_to_le32(map->m_lblk); 4281 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4282 4283 /* 4284 * If we are doing bigalloc, check to see if the extent returned 4285 * by ext4_find_extent() implies a cluster we can use. 4286 */ 4287 if (cluster_offset && ex && 4288 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 4289 ar.len = allocated = map->m_len; 4290 newblock = map->m_pblk; 4291 goto got_allocated_blocks; 4292 } 4293 4294 /* find neighbour allocated blocks */ 4295 ar.lleft = map->m_lblk; 4296 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4297 if (err) 4298 goto out; 4299 ar.lright = map->m_lblk; 4300 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4301 if (err < 0) 4302 goto out; 4303 4304 /* Check if the extent after searching to the right implies a 4305 * cluster we can use. */ 4306 if ((sbi->s_cluster_ratio > 1) && err && 4307 get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) { 4308 ar.len = allocated = map->m_len; 4309 newblock = map->m_pblk; 4310 goto got_allocated_blocks; 4311 } 4312 4313 /* 4314 * See if request is beyond maximum number of blocks we can have in 4315 * a single extent. For an initialized extent this limit is 4316 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is 4317 * EXT_UNWRITTEN_MAX_LEN. 4318 */ 4319 if (map->m_len > EXT_INIT_MAX_LEN && 4320 !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4321 map->m_len = EXT_INIT_MAX_LEN; 4322 else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && 4323 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4324 map->m_len = EXT_UNWRITTEN_MAX_LEN; 4325 4326 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4327 newex.ee_len = cpu_to_le16(map->m_len); 4328 err = ext4_ext_check_overlap(sbi, inode, &newex, path); 4329 if (err) 4330 allocated = ext4_ext_get_actual_len(&newex); 4331 else 4332 allocated = map->m_len; 4333 4334 /* allocate new block */ 4335 ar.inode = inode; 4336 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4337 ar.logical = map->m_lblk; 4338 /* 4339 * We calculate the offset from the beginning of the cluster 4340 * for the logical block number, since when we allocate a 4341 * physical cluster, the physical block should start at the 4342 * same offset from the beginning of the cluster. This is 4343 * needed so that future calls to get_implied_cluster_alloc() 4344 * work correctly. 4345 */ 4346 offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4347 ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 4348 ar.goal -= offset; 4349 ar.logical -= offset; 4350 if (S_ISREG(inode->i_mode)) 4351 ar.flags = EXT4_MB_HINT_DATA; 4352 else 4353 /* disable in-core preallocation for non-regular files */ 4354 ar.flags = 0; 4355 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4356 ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4357 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4358 ar.flags |= EXT4_MB_DELALLOC_RESERVED; 4359 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 4360 ar.flags |= EXT4_MB_USE_RESERVED; 4361 newblock = ext4_mb_new_blocks(handle, &ar, &err); 4362 if (!newblock) 4363 goto out; 4364 allocated_clusters = ar.len; 4365 ar.len = EXT4_C2B(sbi, ar.len) - offset; 4366 ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n", 4367 ar.goal, newblock, ar.len, allocated); 4368 if (ar.len > allocated) 4369 ar.len = allocated; 4370 4371 got_allocated_blocks: 4372 /* try to insert new extent into found leaf and return */ 4373 pblk = newblock + offset; 4374 ext4_ext_store_pblock(&newex, pblk); 4375 newex.ee_len = cpu_to_le16(ar.len); 4376 /* Mark unwritten */ 4377 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 4378 ext4_ext_mark_unwritten(&newex); 4379 map->m_flags |= EXT4_MAP_UNWRITTEN; 4380 } 4381 4382 err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); 4383 if (err) { 4384 if (allocated_clusters) { 4385 int fb_flags = 0; 4386 4387 /* 4388 * free data blocks we just allocated. 4389 * not a good idea to call discard here directly, 4390 * but otherwise we'd need to call it every free(). 4391 */ 4392 ext4_discard_preallocations(inode); 4393 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4394 fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE; 4395 ext4_free_blocks(handle, inode, NULL, newblock, 4396 EXT4_C2B(sbi, allocated_clusters), 4397 fb_flags); 4398 } 4399 goto out; 4400 } 4401 4402 /* 4403 * Cache the extent and update transaction to commit on fdatasync only 4404 * when it is _not_ an unwritten extent. 4405 */ 4406 if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) 4407 ext4_update_inode_fsync_trans(handle, inode, 1); 4408 else 4409 ext4_update_inode_fsync_trans(handle, inode, 0); 4410 4411 map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED); 4412 map->m_pblk = pblk; 4413 map->m_len = ar.len; 4414 allocated = map->m_len; 4415 ext4_ext_show_leaf(inode, path); 4416 out: 4417 ext4_free_ext_path(path); 4418 4419 trace_ext4_ext_map_blocks_exit(inode, flags, map, 4420 err ? err : allocated); 4421 return err ? err : allocated; 4422 } 4423 4424 int ext4_ext_truncate(handle_t *handle, struct inode *inode) 4425 { 4426 struct super_block *sb = inode->i_sb; 4427 ext4_lblk_t last_block; 4428 int err = 0; 4429 4430 /* 4431 * TODO: optimization is possible here. 4432 * Probably we need not scan at all, 4433 * because page truncation is enough. 4434 */ 4435 4436 /* we have to know where to truncate from in crash case */ 4437 EXT4_I(inode)->i_disksize = inode->i_size; 4438 err = ext4_mark_inode_dirty(handle, inode); 4439 if (err) 4440 return err; 4441 4442 last_block = (inode->i_size + sb->s_blocksize - 1) 4443 >> EXT4_BLOCK_SIZE_BITS(sb); 4444 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); 4445 4446 retry_remove_space: 4447 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 4448 if (err == -ENOMEM) { 4449 memalloc_retry_wait(GFP_ATOMIC); 4450 goto retry_remove_space; 4451 } 4452 return err; 4453 } 4454 4455 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4456 ext4_lblk_t len, loff_t new_size, 4457 int flags) 4458 { 4459 struct inode *inode = file_inode(file); 4460 handle_t *handle; 4461 int ret = 0, ret2 = 0, ret3 = 0; 4462 int retries = 0; 4463 int depth = 0; 4464 struct ext4_map_blocks map; 4465 unsigned int credits; 4466 loff_t epos; 4467 4468 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); 4469 map.m_lblk = offset; 4470 map.m_len = len; 4471 /* 4472 * Don't normalize the request if it can fit in one extent so 4473 * that it doesn't get unnecessarily split into multiple 4474 * extents. 4475 */ 4476 if (len <= EXT_UNWRITTEN_MAX_LEN) 4477 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 4478 4479 /* 4480 * credits to insert 1 extent into extent tree 4481 */ 4482 credits = ext4_chunk_trans_blocks(inode, len); 4483 depth = ext_depth(inode); 4484 4485 retry: 4486 while (len) { 4487 /* 4488 * Recalculate credits when extent tree depth changes. 4489 */ 4490 if (depth != ext_depth(inode)) { 4491 credits = ext4_chunk_trans_blocks(inode, len); 4492 depth = ext_depth(inode); 4493 } 4494 4495 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4496 credits); 4497 if (IS_ERR(handle)) { 4498 ret = PTR_ERR(handle); 4499 break; 4500 } 4501 ret = ext4_map_blocks(handle, inode, &map, flags); 4502 if (ret <= 0) { 4503 ext4_debug("inode #%lu: block %u: len %u: " 4504 "ext4_ext_map_blocks returned %d", 4505 inode->i_ino, map.m_lblk, 4506 map.m_len, ret); 4507 ext4_mark_inode_dirty(handle, inode); 4508 ext4_journal_stop(handle); 4509 break; 4510 } 4511 /* 4512 * allow a full retry cycle for any remaining allocations 4513 */ 4514 retries = 0; 4515 map.m_lblk += ret; 4516 map.m_len = len = len - ret; 4517 epos = (loff_t)map.m_lblk << inode->i_blkbits; 4518 inode_set_ctime_current(inode); 4519 if (new_size) { 4520 if (epos > new_size) 4521 epos = new_size; 4522 if (ext4_update_inode_size(inode, epos) & 0x1) 4523 inode_set_mtime_to_ts(inode, 4524 inode_get_ctime(inode)); 4525 } 4526 ret2 = ext4_mark_inode_dirty(handle, inode); 4527 ext4_update_inode_fsync_trans(handle, inode, 1); 4528 ret3 = ext4_journal_stop(handle); 4529 ret2 = ret3 ? ret3 : ret2; 4530 if (unlikely(ret2)) 4531 break; 4532 } 4533 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 4534 goto retry; 4535 4536 return ret > 0 ? ret2 : ret; 4537 } 4538 4539 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len); 4540 4541 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len); 4542 4543 static long ext4_zero_range(struct file *file, loff_t offset, 4544 loff_t len, int mode) 4545 { 4546 struct inode *inode = file_inode(file); 4547 struct address_space *mapping = file->f_mapping; 4548 handle_t *handle = NULL; 4549 unsigned int max_blocks; 4550 loff_t new_size = 0; 4551 int ret = 0; 4552 int flags; 4553 int credits; 4554 int partial_begin, partial_end; 4555 loff_t start, end; 4556 ext4_lblk_t lblk; 4557 unsigned int blkbits = inode->i_blkbits; 4558 4559 trace_ext4_zero_range(inode, offset, len, mode); 4560 4561 /* 4562 * Round up offset. This is not fallocate, we need to zero out 4563 * blocks, so convert interior block aligned part of the range to 4564 * unwritten and possibly manually zero out unaligned parts of the 4565 * range. Here, start and partial_begin are inclusive, end and 4566 * partial_end are exclusive. 4567 */ 4568 start = round_up(offset, 1 << blkbits); 4569 end = round_down((offset + len), 1 << blkbits); 4570 4571 if (start < offset || end > offset + len) 4572 return -EINVAL; 4573 partial_begin = offset & ((1 << blkbits) - 1); 4574 partial_end = (offset + len) & ((1 << blkbits) - 1); 4575 4576 lblk = start >> blkbits; 4577 max_blocks = (end >> blkbits); 4578 if (max_blocks < lblk) 4579 max_blocks = 0; 4580 else 4581 max_blocks -= lblk; 4582 4583 inode_lock(inode); 4584 4585 /* 4586 * Indirect files do not support unwritten extents 4587 */ 4588 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4589 ret = -EOPNOTSUPP; 4590 goto out_mutex; 4591 } 4592 4593 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4594 (offset + len > inode->i_size || 4595 offset + len > EXT4_I(inode)->i_disksize)) { 4596 new_size = offset + len; 4597 ret = inode_newsize_ok(inode, new_size); 4598 if (ret) 4599 goto out_mutex; 4600 } 4601 4602 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 4603 4604 /* Wait all existing dio workers, newcomers will block on i_rwsem */ 4605 inode_dio_wait(inode); 4606 4607 ret = file_modified(file); 4608 if (ret) 4609 goto out_mutex; 4610 4611 /* Preallocate the range including the unaligned edges */ 4612 if (partial_begin || partial_end) { 4613 ret = ext4_alloc_file_blocks(file, 4614 round_down(offset, 1 << blkbits) >> blkbits, 4615 (round_up((offset + len), 1 << blkbits) - 4616 round_down(offset, 1 << blkbits)) >> blkbits, 4617 new_size, flags); 4618 if (ret) 4619 goto out_mutex; 4620 4621 } 4622 4623 /* Zero range excluding the unaligned edges */ 4624 if (max_blocks > 0) { 4625 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | 4626 EXT4_EX_NOCACHE); 4627 4628 /* 4629 * Prevent page faults from reinstantiating pages we have 4630 * released from page cache. 4631 */ 4632 filemap_invalidate_lock(mapping); 4633 4634 ret = ext4_break_layouts(inode); 4635 if (ret) { 4636 filemap_invalidate_unlock(mapping); 4637 goto out_mutex; 4638 } 4639 4640 ret = ext4_update_disksize_before_punch(inode, offset, len); 4641 if (ret) { 4642 filemap_invalidate_unlock(mapping); 4643 goto out_mutex; 4644 } 4645 4646 /* 4647 * For journalled data we need to write (and checkpoint) pages 4648 * before discarding page cache to avoid inconsitent data on 4649 * disk in case of crash before zeroing trans is committed. 4650 */ 4651 if (ext4_should_journal_data(inode)) { 4652 ret = filemap_write_and_wait_range(mapping, start, 4653 end - 1); 4654 if (ret) { 4655 filemap_invalidate_unlock(mapping); 4656 goto out_mutex; 4657 } 4658 } 4659 4660 /* Now release the pages and zero block aligned part of pages */ 4661 truncate_pagecache_range(inode, start, end - 1); 4662 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 4663 4664 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 4665 flags); 4666 filemap_invalidate_unlock(mapping); 4667 if (ret) 4668 goto out_mutex; 4669 } 4670 if (!partial_begin && !partial_end) 4671 goto out_mutex; 4672 4673 /* 4674 * In worst case we have to writeout two nonadjacent unwritten 4675 * blocks and update the inode 4676 */ 4677 credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; 4678 if (ext4_should_journal_data(inode)) 4679 credits += 2; 4680 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); 4681 if (IS_ERR(handle)) { 4682 ret = PTR_ERR(handle); 4683 ext4_std_error(inode->i_sb, ret); 4684 goto out_mutex; 4685 } 4686 4687 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 4688 if (new_size) 4689 ext4_update_inode_size(inode, new_size); 4690 ret = ext4_mark_inode_dirty(handle, inode); 4691 if (unlikely(ret)) 4692 goto out_handle; 4693 /* Zero out partial block at the edges of the range */ 4694 ret = ext4_zero_partial_blocks(handle, inode, offset, len); 4695 if (ret >= 0) 4696 ext4_update_inode_fsync_trans(handle, inode, 1); 4697 4698 if (file->f_flags & O_SYNC) 4699 ext4_handle_sync(handle); 4700 4701 out_handle: 4702 ext4_journal_stop(handle); 4703 out_mutex: 4704 inode_unlock(inode); 4705 return ret; 4706 } 4707 4708 /* 4709 * preallocate space for a file. This implements ext4's fallocate file 4710 * operation, which gets called from sys_fallocate system call. 4711 * For block-mapped files, posix_fallocate should fall back to the method 4712 * of writing zeroes to the required new blocks (the same behavior which is 4713 * expected for file systems which do not support fallocate() system call). 4714 */ 4715 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4716 { 4717 struct inode *inode = file_inode(file); 4718 loff_t new_size = 0; 4719 unsigned int max_blocks; 4720 int ret = 0; 4721 int flags; 4722 ext4_lblk_t lblk; 4723 unsigned int blkbits = inode->i_blkbits; 4724 4725 /* 4726 * Encrypted inodes can't handle collapse range or insert 4727 * range since we would need to re-encrypt blocks with a 4728 * different IV or XTS tweak (which are based on the logical 4729 * block number). 4730 */ 4731 if (IS_ENCRYPTED(inode) && 4732 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 4733 return -EOPNOTSUPP; 4734 4735 /* Return error if mode is not supported */ 4736 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4737 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 4738 FALLOC_FL_INSERT_RANGE)) 4739 return -EOPNOTSUPP; 4740 4741 inode_lock(inode); 4742 ret = ext4_convert_inline_data(inode); 4743 inode_unlock(inode); 4744 if (ret) 4745 goto exit; 4746 4747 if (mode & FALLOC_FL_PUNCH_HOLE) { 4748 ret = ext4_punch_hole(file, offset, len); 4749 goto exit; 4750 } 4751 4752 if (mode & FALLOC_FL_COLLAPSE_RANGE) { 4753 ret = ext4_collapse_range(file, offset, len); 4754 goto exit; 4755 } 4756 4757 if (mode & FALLOC_FL_INSERT_RANGE) { 4758 ret = ext4_insert_range(file, offset, len); 4759 goto exit; 4760 } 4761 4762 if (mode & FALLOC_FL_ZERO_RANGE) { 4763 ret = ext4_zero_range(file, offset, len, mode); 4764 goto exit; 4765 } 4766 trace_ext4_fallocate_enter(inode, offset, len, mode); 4767 lblk = offset >> blkbits; 4768 4769 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4770 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 4771 4772 inode_lock(inode); 4773 4774 /* 4775 * We only support preallocation for extent-based files only 4776 */ 4777 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4778 ret = -EOPNOTSUPP; 4779 goto out; 4780 } 4781 4782 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4783 (offset + len > inode->i_size || 4784 offset + len > EXT4_I(inode)->i_disksize)) { 4785 new_size = offset + len; 4786 ret = inode_newsize_ok(inode, new_size); 4787 if (ret) 4788 goto out; 4789 } 4790 4791 /* Wait all existing dio workers, newcomers will block on i_rwsem */ 4792 inode_dio_wait(inode); 4793 4794 ret = file_modified(file); 4795 if (ret) 4796 goto out; 4797 4798 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); 4799 if (ret) 4800 goto out; 4801 4802 if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { 4803 ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal, 4804 EXT4_I(inode)->i_sync_tid); 4805 } 4806 out: 4807 inode_unlock(inode); 4808 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 4809 exit: 4810 return ret; 4811 } 4812 4813 /* 4814 * This function convert a range of blocks to written extents 4815 * The caller of this function will pass the start offset and the size. 4816 * all unwritten extents within this range will be converted to 4817 * written extents. 4818 * 4819 * This function is called from the direct IO end io call back 4820 * function, to convert the fallocated extents after IO is completed. 4821 * Returns 0 on success. 4822 */ 4823 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 4824 loff_t offset, ssize_t len) 4825 { 4826 unsigned int max_blocks; 4827 int ret = 0, ret2 = 0, ret3 = 0; 4828 struct ext4_map_blocks map; 4829 unsigned int blkbits = inode->i_blkbits; 4830 unsigned int credits = 0; 4831 4832 map.m_lblk = offset >> blkbits; 4833 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4834 4835 if (!handle) { 4836 /* 4837 * credits to insert 1 extent into extent tree 4838 */ 4839 credits = ext4_chunk_trans_blocks(inode, max_blocks); 4840 } 4841 while (ret >= 0 && ret < max_blocks) { 4842 map.m_lblk += ret; 4843 map.m_len = (max_blocks -= ret); 4844 if (credits) { 4845 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4846 credits); 4847 if (IS_ERR(handle)) { 4848 ret = PTR_ERR(handle); 4849 break; 4850 } 4851 } 4852 ret = ext4_map_blocks(handle, inode, &map, 4853 EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4854 if (ret <= 0) 4855 ext4_warning(inode->i_sb, 4856 "inode #%lu: block %u: len %u: " 4857 "ext4_ext_map_blocks returned %d", 4858 inode->i_ino, map.m_lblk, 4859 map.m_len, ret); 4860 ret2 = ext4_mark_inode_dirty(handle, inode); 4861 if (credits) { 4862 ret3 = ext4_journal_stop(handle); 4863 if (unlikely(ret3)) 4864 ret2 = ret3; 4865 } 4866 4867 if (ret <= 0 || ret2) 4868 break; 4869 } 4870 return ret > 0 ? ret2 : ret; 4871 } 4872 4873 int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end) 4874 { 4875 int ret = 0, err = 0; 4876 struct ext4_io_end_vec *io_end_vec; 4877 4878 /* 4879 * This is somewhat ugly but the idea is clear: When transaction is 4880 * reserved, everything goes into it. Otherwise we rather start several 4881 * smaller transactions for conversion of each extent separately. 4882 */ 4883 if (handle) { 4884 handle = ext4_journal_start_reserved(handle, 4885 EXT4_HT_EXT_CONVERT); 4886 if (IS_ERR(handle)) 4887 return PTR_ERR(handle); 4888 } 4889 4890 list_for_each_entry(io_end_vec, &io_end->list_vec, list) { 4891 ret = ext4_convert_unwritten_extents(handle, io_end->inode, 4892 io_end_vec->offset, 4893 io_end_vec->size); 4894 if (ret) 4895 break; 4896 } 4897 4898 if (handle) 4899 err = ext4_journal_stop(handle); 4900 4901 return ret < 0 ? ret : err; 4902 } 4903 4904 static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap) 4905 { 4906 __u64 physical = 0; 4907 __u64 length = 0; 4908 int blockbits = inode->i_sb->s_blocksize_bits; 4909 int error = 0; 4910 u16 iomap_type; 4911 4912 /* in-inode? */ 4913 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 4914 struct ext4_iloc iloc; 4915 int offset; /* offset of xattr in inode */ 4916 4917 error = ext4_get_inode_loc(inode, &iloc); 4918 if (error) 4919 return error; 4920 physical = (__u64)iloc.bh->b_blocknr << blockbits; 4921 offset = EXT4_GOOD_OLD_INODE_SIZE + 4922 EXT4_I(inode)->i_extra_isize; 4923 physical += offset; 4924 length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 4925 brelse(iloc.bh); 4926 iomap_type = IOMAP_INLINE; 4927 } else if (EXT4_I(inode)->i_file_acl) { /* external block */ 4928 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 4929 length = inode->i_sb->s_blocksize; 4930 iomap_type = IOMAP_MAPPED; 4931 } else { 4932 /* no in-inode or external block for xattr, so return -ENOENT */ 4933 error = -ENOENT; 4934 goto out; 4935 } 4936 4937 iomap->addr = physical; 4938 iomap->offset = 0; 4939 iomap->length = length; 4940 iomap->type = iomap_type; 4941 iomap->flags = 0; 4942 out: 4943 return error; 4944 } 4945 4946 static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset, 4947 loff_t length, unsigned flags, 4948 struct iomap *iomap, struct iomap *srcmap) 4949 { 4950 int error; 4951 4952 error = ext4_iomap_xattr_fiemap(inode, iomap); 4953 if (error == 0 && (offset >= iomap->length)) 4954 error = -ENOENT; 4955 return error; 4956 } 4957 4958 static const struct iomap_ops ext4_iomap_xattr_ops = { 4959 .iomap_begin = ext4_iomap_xattr_begin, 4960 }; 4961 4962 static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len) 4963 { 4964 u64 maxbytes; 4965 4966 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4967 maxbytes = inode->i_sb->s_maxbytes; 4968 else 4969 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 4970 4971 if (*len == 0) 4972 return -EINVAL; 4973 if (start > maxbytes) 4974 return -EFBIG; 4975 4976 /* 4977 * Shrink request scope to what the fs can actually handle. 4978 */ 4979 if (*len > maxbytes || (maxbytes - *len) < start) 4980 *len = maxbytes - start; 4981 return 0; 4982 } 4983 4984 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4985 u64 start, u64 len) 4986 { 4987 int error = 0; 4988 4989 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 4990 error = ext4_ext_precache(inode); 4991 if (error) 4992 return error; 4993 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; 4994 } 4995 4996 /* 4997 * For bitmap files the maximum size limit could be smaller than 4998 * s_maxbytes, so check len here manually instead of just relying on the 4999 * generic check. 5000 */ 5001 error = ext4_fiemap_check_ranges(inode, start, &len); 5002 if (error) 5003 return error; 5004 5005 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 5006 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR; 5007 return iomap_fiemap(inode, fieinfo, start, len, 5008 &ext4_iomap_xattr_ops); 5009 } 5010 5011 return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops); 5012 } 5013 5014 int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, 5015 __u64 start, __u64 len) 5016 { 5017 ext4_lblk_t start_blk, len_blks; 5018 __u64 last_blk; 5019 int error = 0; 5020 5021 if (ext4_has_inline_data(inode)) { 5022 int has_inline; 5023 5024 down_read(&EXT4_I(inode)->xattr_sem); 5025 has_inline = ext4_has_inline_data(inode); 5026 up_read(&EXT4_I(inode)->xattr_sem); 5027 if (has_inline) 5028 return 0; 5029 } 5030 5031 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 5032 error = ext4_ext_precache(inode); 5033 if (error) 5034 return error; 5035 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; 5036 } 5037 5038 error = fiemap_prep(inode, fieinfo, start, &len, 0); 5039 if (error) 5040 return error; 5041 5042 error = ext4_fiemap_check_ranges(inode, start, &len); 5043 if (error) 5044 return error; 5045 5046 start_blk = start >> inode->i_sb->s_blocksize_bits; 5047 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 5048 if (last_blk >= EXT_MAX_BLOCKS) 5049 last_blk = EXT_MAX_BLOCKS-1; 5050 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 5051 5052 /* 5053 * Walk the extent tree gathering extent information 5054 * and pushing extents back to the user. 5055 */ 5056 return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo); 5057 } 5058 5059 /* 5060 * ext4_ext_shift_path_extents: 5061 * Shift the extents of a path structure lying between path[depth].p_ext 5062 * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells 5063 * if it is right shift or left shift operation. 5064 */ 5065 static int 5066 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, 5067 struct inode *inode, handle_t *handle, 5068 enum SHIFT_DIRECTION SHIFT) 5069 { 5070 int depth, err = 0; 5071 struct ext4_extent *ex_start, *ex_last; 5072 bool update = false; 5073 int credits, restart_credits; 5074 depth = path->p_depth; 5075 5076 while (depth >= 0) { 5077 if (depth == path->p_depth) { 5078 ex_start = path[depth].p_ext; 5079 if (!ex_start) 5080 return -EFSCORRUPTED; 5081 5082 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 5083 /* leaf + sb + inode */ 5084 credits = 3; 5085 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) { 5086 update = true; 5087 /* extent tree + sb + inode */ 5088 credits = depth + 2; 5089 } 5090 5091 restart_credits = ext4_writepage_trans_blocks(inode); 5092 err = ext4_datasem_ensure_credits(handle, inode, credits, 5093 restart_credits, 0); 5094 if (err) { 5095 if (err > 0) 5096 err = -EAGAIN; 5097 goto out; 5098 } 5099 5100 err = ext4_ext_get_access(handle, inode, path + depth); 5101 if (err) 5102 goto out; 5103 5104 while (ex_start <= ex_last) { 5105 if (SHIFT == SHIFT_LEFT) { 5106 le32_add_cpu(&ex_start->ee_block, 5107 -shift); 5108 /* Try to merge to the left. */ 5109 if ((ex_start > 5110 EXT_FIRST_EXTENT(path[depth].p_hdr)) 5111 && 5112 ext4_ext_try_to_merge_right(inode, 5113 path, ex_start - 1)) 5114 ex_last--; 5115 else 5116 ex_start++; 5117 } else { 5118 le32_add_cpu(&ex_last->ee_block, shift); 5119 ext4_ext_try_to_merge_right(inode, path, 5120 ex_last); 5121 ex_last--; 5122 } 5123 } 5124 err = ext4_ext_dirty(handle, inode, path + depth); 5125 if (err) 5126 goto out; 5127 5128 if (--depth < 0 || !update) 5129 break; 5130 } 5131 5132 /* Update index too */ 5133 err = ext4_ext_get_access(handle, inode, path + depth); 5134 if (err) 5135 goto out; 5136 5137 if (SHIFT == SHIFT_LEFT) 5138 le32_add_cpu(&path[depth].p_idx->ei_block, -shift); 5139 else 5140 le32_add_cpu(&path[depth].p_idx->ei_block, shift); 5141 err = ext4_ext_dirty(handle, inode, path + depth); 5142 if (err) 5143 goto out; 5144 5145 /* we are done if current index is not a starting index */ 5146 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) 5147 break; 5148 5149 depth--; 5150 } 5151 5152 out: 5153 return err; 5154 } 5155 5156 /* 5157 * ext4_ext_shift_extents: 5158 * All the extents which lies in the range from @start to the last allocated 5159 * block for the @inode are shifted either towards left or right (depending 5160 * upon @SHIFT) by @shift blocks. 5161 * On success, 0 is returned, error otherwise. 5162 */ 5163 static int 5164 ext4_ext_shift_extents(struct inode *inode, handle_t *handle, 5165 ext4_lblk_t start, ext4_lblk_t shift, 5166 enum SHIFT_DIRECTION SHIFT) 5167 { 5168 struct ext4_ext_path *path; 5169 int ret = 0, depth; 5170 struct ext4_extent *extent; 5171 ext4_lblk_t stop, *iterator, ex_start, ex_end; 5172 ext4_lblk_t tmp = EXT_MAX_BLOCKS; 5173 5174 /* Let path point to the last extent */ 5175 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 5176 EXT4_EX_NOCACHE); 5177 if (IS_ERR(path)) 5178 return PTR_ERR(path); 5179 5180 depth = path->p_depth; 5181 extent = path[depth].p_ext; 5182 if (!extent) 5183 goto out; 5184 5185 stop = le32_to_cpu(extent->ee_block); 5186 5187 /* 5188 * For left shifts, make sure the hole on the left is big enough to 5189 * accommodate the shift. For right shifts, make sure the last extent 5190 * won't be shifted beyond EXT_MAX_BLOCKS. 5191 */ 5192 if (SHIFT == SHIFT_LEFT) { 5193 path = ext4_find_extent(inode, start - 1, &path, 5194 EXT4_EX_NOCACHE); 5195 if (IS_ERR(path)) 5196 return PTR_ERR(path); 5197 depth = path->p_depth; 5198 extent = path[depth].p_ext; 5199 if (extent) { 5200 ex_start = le32_to_cpu(extent->ee_block); 5201 ex_end = le32_to_cpu(extent->ee_block) + 5202 ext4_ext_get_actual_len(extent); 5203 } else { 5204 ex_start = 0; 5205 ex_end = 0; 5206 } 5207 5208 if ((start == ex_start && shift > ex_start) || 5209 (shift > start - ex_end)) { 5210 ret = -EINVAL; 5211 goto out; 5212 } 5213 } else { 5214 if (shift > EXT_MAX_BLOCKS - 5215 (stop + ext4_ext_get_actual_len(extent))) { 5216 ret = -EINVAL; 5217 goto out; 5218 } 5219 } 5220 5221 /* 5222 * In case of left shift, iterator points to start and it is increased 5223 * till we reach stop. In case of right shift, iterator points to stop 5224 * and it is decreased till we reach start. 5225 */ 5226 again: 5227 ret = 0; 5228 if (SHIFT == SHIFT_LEFT) 5229 iterator = &start; 5230 else 5231 iterator = &stop; 5232 5233 if (tmp != EXT_MAX_BLOCKS) 5234 *iterator = tmp; 5235 5236 /* 5237 * Its safe to start updating extents. Start and stop are unsigned, so 5238 * in case of right shift if extent with 0 block is reached, iterator 5239 * becomes NULL to indicate the end of the loop. 5240 */ 5241 while (iterator && start <= stop) { 5242 path = ext4_find_extent(inode, *iterator, &path, 5243 EXT4_EX_NOCACHE); 5244 if (IS_ERR(path)) 5245 return PTR_ERR(path); 5246 depth = path->p_depth; 5247 extent = path[depth].p_ext; 5248 if (!extent) { 5249 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 5250 (unsigned long) *iterator); 5251 return -EFSCORRUPTED; 5252 } 5253 if (SHIFT == SHIFT_LEFT && *iterator > 5254 le32_to_cpu(extent->ee_block)) { 5255 /* Hole, move to the next extent */ 5256 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { 5257 path[depth].p_ext++; 5258 } else { 5259 *iterator = ext4_ext_next_allocated_block(path); 5260 continue; 5261 } 5262 } 5263 5264 tmp = *iterator; 5265 if (SHIFT == SHIFT_LEFT) { 5266 extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5267 *iterator = le32_to_cpu(extent->ee_block) + 5268 ext4_ext_get_actual_len(extent); 5269 } else { 5270 extent = EXT_FIRST_EXTENT(path[depth].p_hdr); 5271 if (le32_to_cpu(extent->ee_block) > start) 5272 *iterator = le32_to_cpu(extent->ee_block) - 1; 5273 else if (le32_to_cpu(extent->ee_block) == start) 5274 iterator = NULL; 5275 else { 5276 extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5277 while (le32_to_cpu(extent->ee_block) >= start) 5278 extent--; 5279 5280 if (extent == EXT_LAST_EXTENT(path[depth].p_hdr)) 5281 break; 5282 5283 extent++; 5284 iterator = NULL; 5285 } 5286 path[depth].p_ext = extent; 5287 } 5288 ret = ext4_ext_shift_path_extents(path, shift, inode, 5289 handle, SHIFT); 5290 /* iterator can be NULL which means we should break */ 5291 if (ret == -EAGAIN) 5292 goto again; 5293 if (ret) 5294 break; 5295 } 5296 out: 5297 ext4_free_ext_path(path); 5298 return ret; 5299 } 5300 5301 /* 5302 * ext4_collapse_range: 5303 * This implements the fallocate's collapse range functionality for ext4 5304 * Returns: 0 and non-zero on error. 5305 */ 5306 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len) 5307 { 5308 struct inode *inode = file_inode(file); 5309 struct super_block *sb = inode->i_sb; 5310 struct address_space *mapping = inode->i_mapping; 5311 ext4_lblk_t punch_start, punch_stop; 5312 handle_t *handle; 5313 unsigned int credits; 5314 loff_t new_size, ioffset; 5315 int ret; 5316 5317 /* 5318 * We need to test this early because xfstests assumes that a 5319 * collapse range of (0, 1) will return EOPNOTSUPP if the file 5320 * system does not support collapse range. 5321 */ 5322 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5323 return -EOPNOTSUPP; 5324 5325 /* Collapse range works only on fs cluster size aligned regions. */ 5326 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 5327 return -EINVAL; 5328 5329 trace_ext4_collapse_range(inode, offset, len); 5330 5331 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5332 punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); 5333 5334 inode_lock(inode); 5335 /* 5336 * There is no need to overlap collapse range with EOF, in which case 5337 * it is effectively a truncate operation 5338 */ 5339 if (offset + len >= inode->i_size) { 5340 ret = -EINVAL; 5341 goto out_mutex; 5342 } 5343 5344 /* Currently just for extent based files */ 5345 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5346 ret = -EOPNOTSUPP; 5347 goto out_mutex; 5348 } 5349 5350 /* Wait for existing dio to complete */ 5351 inode_dio_wait(inode); 5352 5353 ret = file_modified(file); 5354 if (ret) 5355 goto out_mutex; 5356 5357 /* 5358 * Prevent page faults from reinstantiating pages we have released from 5359 * page cache. 5360 */ 5361 filemap_invalidate_lock(mapping); 5362 5363 ret = ext4_break_layouts(inode); 5364 if (ret) 5365 goto out_mmap; 5366 5367 /* 5368 * Need to round down offset to be aligned with page size boundary 5369 * for page size > block size. 5370 */ 5371 ioffset = round_down(offset, PAGE_SIZE); 5372 /* 5373 * Write tail of the last page before removed range since it will get 5374 * removed from the page cache below. 5375 */ 5376 ret = filemap_write_and_wait_range(mapping, ioffset, offset); 5377 if (ret) 5378 goto out_mmap; 5379 /* 5380 * Write data that will be shifted to preserve them when discarding 5381 * page cache below. We are also protected from pages becoming dirty 5382 * by i_rwsem and invalidate_lock. 5383 */ 5384 ret = filemap_write_and_wait_range(mapping, offset + len, 5385 LLONG_MAX); 5386 if (ret) 5387 goto out_mmap; 5388 truncate_pagecache(inode, ioffset); 5389 5390 credits = ext4_writepage_trans_blocks(inode); 5391 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5392 if (IS_ERR(handle)) { 5393 ret = PTR_ERR(handle); 5394 goto out_mmap; 5395 } 5396 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle); 5397 5398 down_write(&EXT4_I(inode)->i_data_sem); 5399 ext4_discard_preallocations(inode); 5400 ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start); 5401 5402 ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); 5403 if (ret) { 5404 up_write(&EXT4_I(inode)->i_data_sem); 5405 goto out_stop; 5406 } 5407 ext4_discard_preallocations(inode); 5408 5409 ret = ext4_ext_shift_extents(inode, handle, punch_stop, 5410 punch_stop - punch_start, SHIFT_LEFT); 5411 if (ret) { 5412 up_write(&EXT4_I(inode)->i_data_sem); 5413 goto out_stop; 5414 } 5415 5416 new_size = inode->i_size - len; 5417 i_size_write(inode, new_size); 5418 EXT4_I(inode)->i_disksize = new_size; 5419 5420 up_write(&EXT4_I(inode)->i_data_sem); 5421 if (IS_SYNC(inode)) 5422 ext4_handle_sync(handle); 5423 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 5424 ret = ext4_mark_inode_dirty(handle, inode); 5425 ext4_update_inode_fsync_trans(handle, inode, 1); 5426 5427 out_stop: 5428 ext4_journal_stop(handle); 5429 out_mmap: 5430 filemap_invalidate_unlock(mapping); 5431 out_mutex: 5432 inode_unlock(inode); 5433 return ret; 5434 } 5435 5436 /* 5437 * ext4_insert_range: 5438 * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate. 5439 * The data blocks starting from @offset to the EOF are shifted by @len 5440 * towards right to create a hole in the @inode. Inode size is increased 5441 * by len bytes. 5442 * Returns 0 on success, error otherwise. 5443 */ 5444 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len) 5445 { 5446 struct inode *inode = file_inode(file); 5447 struct super_block *sb = inode->i_sb; 5448 struct address_space *mapping = inode->i_mapping; 5449 handle_t *handle; 5450 struct ext4_ext_path *path; 5451 struct ext4_extent *extent; 5452 ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; 5453 unsigned int credits, ee_len; 5454 int ret = 0, depth, split_flag = 0; 5455 loff_t ioffset; 5456 5457 /* 5458 * We need to test this early because xfstests assumes that an 5459 * insert range of (0, 1) will return EOPNOTSUPP if the file 5460 * system does not support insert range. 5461 */ 5462 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5463 return -EOPNOTSUPP; 5464 5465 /* Insert range works only on fs cluster size aligned regions. */ 5466 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 5467 return -EINVAL; 5468 5469 trace_ext4_insert_range(inode, offset, len); 5470 5471 offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5472 len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); 5473 5474 inode_lock(inode); 5475 /* Currently just for extent based files */ 5476 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5477 ret = -EOPNOTSUPP; 5478 goto out_mutex; 5479 } 5480 5481 /* Check whether the maximum file size would be exceeded */ 5482 if (len > inode->i_sb->s_maxbytes - inode->i_size) { 5483 ret = -EFBIG; 5484 goto out_mutex; 5485 } 5486 5487 /* Offset must be less than i_size */ 5488 if (offset >= inode->i_size) { 5489 ret = -EINVAL; 5490 goto out_mutex; 5491 } 5492 5493 /* Wait for existing dio to complete */ 5494 inode_dio_wait(inode); 5495 5496 ret = file_modified(file); 5497 if (ret) 5498 goto out_mutex; 5499 5500 /* 5501 * Prevent page faults from reinstantiating pages we have released from 5502 * page cache. 5503 */ 5504 filemap_invalidate_lock(mapping); 5505 5506 ret = ext4_break_layouts(inode); 5507 if (ret) 5508 goto out_mmap; 5509 5510 /* 5511 * Need to round down to align start offset to page size boundary 5512 * for page size > block size. 5513 */ 5514 ioffset = round_down(offset, PAGE_SIZE); 5515 /* Write out all dirty pages */ 5516 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, 5517 LLONG_MAX); 5518 if (ret) 5519 goto out_mmap; 5520 truncate_pagecache(inode, ioffset); 5521 5522 credits = ext4_writepage_trans_blocks(inode); 5523 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5524 if (IS_ERR(handle)) { 5525 ret = PTR_ERR(handle); 5526 goto out_mmap; 5527 } 5528 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle); 5529 5530 /* Expand file to avoid data loss if there is error while shifting */ 5531 inode->i_size += len; 5532 EXT4_I(inode)->i_disksize += len; 5533 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 5534 ret = ext4_mark_inode_dirty(handle, inode); 5535 if (ret) 5536 goto out_stop; 5537 5538 down_write(&EXT4_I(inode)->i_data_sem); 5539 ext4_discard_preallocations(inode); 5540 5541 path = ext4_find_extent(inode, offset_lblk, NULL, 0); 5542 if (IS_ERR(path)) { 5543 up_write(&EXT4_I(inode)->i_data_sem); 5544 ret = PTR_ERR(path); 5545 goto out_stop; 5546 } 5547 5548 depth = ext_depth(inode); 5549 extent = path[depth].p_ext; 5550 if (extent) { 5551 ee_start_lblk = le32_to_cpu(extent->ee_block); 5552 ee_len = ext4_ext_get_actual_len(extent); 5553 5554 /* 5555 * If offset_lblk is not the starting block of extent, split 5556 * the extent @offset_lblk 5557 */ 5558 if ((offset_lblk > ee_start_lblk) && 5559 (offset_lblk < (ee_start_lblk + ee_len))) { 5560 if (ext4_ext_is_unwritten(extent)) 5561 split_flag = EXT4_EXT_MARK_UNWRIT1 | 5562 EXT4_EXT_MARK_UNWRIT2; 5563 ret = ext4_split_extent_at(handle, inode, &path, 5564 offset_lblk, split_flag, 5565 EXT4_EX_NOCACHE | 5566 EXT4_GET_BLOCKS_PRE_IO | 5567 EXT4_GET_BLOCKS_METADATA_NOFAIL); 5568 } 5569 5570 ext4_free_ext_path(path); 5571 if (ret < 0) { 5572 up_write(&EXT4_I(inode)->i_data_sem); 5573 goto out_stop; 5574 } 5575 } else { 5576 ext4_free_ext_path(path); 5577 } 5578 5579 ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk); 5580 5581 /* 5582 * if offset_lblk lies in a hole which is at start of file, use 5583 * ee_start_lblk to shift extents 5584 */ 5585 ret = ext4_ext_shift_extents(inode, handle, 5586 max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT); 5587 5588 up_write(&EXT4_I(inode)->i_data_sem); 5589 if (IS_SYNC(inode)) 5590 ext4_handle_sync(handle); 5591 if (ret >= 0) 5592 ext4_update_inode_fsync_trans(handle, inode, 1); 5593 5594 out_stop: 5595 ext4_journal_stop(handle); 5596 out_mmap: 5597 filemap_invalidate_unlock(mapping); 5598 out_mutex: 5599 inode_unlock(inode); 5600 return ret; 5601 } 5602 5603 /** 5604 * ext4_swap_extents() - Swap extents between two inodes 5605 * @handle: handle for this transaction 5606 * @inode1: First inode 5607 * @inode2: Second inode 5608 * @lblk1: Start block for first inode 5609 * @lblk2: Start block for second inode 5610 * @count: Number of blocks to swap 5611 * @unwritten: Mark second inode's extents as unwritten after swap 5612 * @erp: Pointer to save error value 5613 * 5614 * This helper routine does exactly what is promise "swap extents". All other 5615 * stuff such as page-cache locking consistency, bh mapping consistency or 5616 * extent's data copying must be performed by caller. 5617 * Locking: 5618 * i_rwsem is held for both inodes 5619 * i_data_sem is locked for write for both inodes 5620 * Assumptions: 5621 * All pages from requested range are locked for both inodes 5622 */ 5623 int 5624 ext4_swap_extents(handle_t *handle, struct inode *inode1, 5625 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, 5626 ext4_lblk_t count, int unwritten, int *erp) 5627 { 5628 struct ext4_ext_path *path1 = NULL; 5629 struct ext4_ext_path *path2 = NULL; 5630 int replaced_count = 0; 5631 5632 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); 5633 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); 5634 BUG_ON(!inode_is_locked(inode1)); 5635 BUG_ON(!inode_is_locked(inode2)); 5636 5637 ext4_es_remove_extent(inode1, lblk1, count); 5638 ext4_es_remove_extent(inode2, lblk2, count); 5639 5640 while (count) { 5641 struct ext4_extent *ex1, *ex2, tmp_ex; 5642 ext4_lblk_t e1_blk, e2_blk; 5643 int e1_len, e2_len, len; 5644 int split = 0; 5645 5646 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); 5647 if (IS_ERR(path1)) { 5648 *erp = PTR_ERR(path1); 5649 path1 = NULL; 5650 finish: 5651 count = 0; 5652 goto repeat; 5653 } 5654 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); 5655 if (IS_ERR(path2)) { 5656 *erp = PTR_ERR(path2); 5657 path2 = NULL; 5658 goto finish; 5659 } 5660 ex1 = path1[path1->p_depth].p_ext; 5661 ex2 = path2[path2->p_depth].p_ext; 5662 /* Do we have something to swap ? */ 5663 if (unlikely(!ex2 || !ex1)) 5664 goto finish; 5665 5666 e1_blk = le32_to_cpu(ex1->ee_block); 5667 e2_blk = le32_to_cpu(ex2->ee_block); 5668 e1_len = ext4_ext_get_actual_len(ex1); 5669 e2_len = ext4_ext_get_actual_len(ex2); 5670 5671 /* Hole handling */ 5672 if (!in_range(lblk1, e1_blk, e1_len) || 5673 !in_range(lblk2, e2_blk, e2_len)) { 5674 ext4_lblk_t next1, next2; 5675 5676 /* if hole after extent, then go to next extent */ 5677 next1 = ext4_ext_next_allocated_block(path1); 5678 next2 = ext4_ext_next_allocated_block(path2); 5679 /* If hole before extent, then shift to that extent */ 5680 if (e1_blk > lblk1) 5681 next1 = e1_blk; 5682 if (e2_blk > lblk2) 5683 next2 = e2_blk; 5684 /* Do we have something to swap */ 5685 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) 5686 goto finish; 5687 /* Move to the rightest boundary */ 5688 len = next1 - lblk1; 5689 if (len < next2 - lblk2) 5690 len = next2 - lblk2; 5691 if (len > count) 5692 len = count; 5693 lblk1 += len; 5694 lblk2 += len; 5695 count -= len; 5696 goto repeat; 5697 } 5698 5699 /* Prepare left boundary */ 5700 if (e1_blk < lblk1) { 5701 split = 1; 5702 *erp = ext4_force_split_extent_at(handle, inode1, 5703 &path1, lblk1, 0); 5704 if (unlikely(*erp)) 5705 goto finish; 5706 } 5707 if (e2_blk < lblk2) { 5708 split = 1; 5709 *erp = ext4_force_split_extent_at(handle, inode2, 5710 &path2, lblk2, 0); 5711 if (unlikely(*erp)) 5712 goto finish; 5713 } 5714 /* ext4_split_extent_at() may result in leaf extent split, 5715 * path must to be revalidated. */ 5716 if (split) 5717 goto repeat; 5718 5719 /* Prepare right boundary */ 5720 len = count; 5721 if (len > e1_blk + e1_len - lblk1) 5722 len = e1_blk + e1_len - lblk1; 5723 if (len > e2_blk + e2_len - lblk2) 5724 len = e2_blk + e2_len - lblk2; 5725 5726 if (len != e1_len) { 5727 split = 1; 5728 *erp = ext4_force_split_extent_at(handle, inode1, 5729 &path1, lblk1 + len, 0); 5730 if (unlikely(*erp)) 5731 goto finish; 5732 } 5733 if (len != e2_len) { 5734 split = 1; 5735 *erp = ext4_force_split_extent_at(handle, inode2, 5736 &path2, lblk2 + len, 0); 5737 if (*erp) 5738 goto finish; 5739 } 5740 /* ext4_split_extent_at() may result in leaf extent split, 5741 * path must to be revalidated. */ 5742 if (split) 5743 goto repeat; 5744 5745 BUG_ON(e2_len != e1_len); 5746 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); 5747 if (unlikely(*erp)) 5748 goto finish; 5749 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); 5750 if (unlikely(*erp)) 5751 goto finish; 5752 5753 /* Both extents are fully inside boundaries. Swap it now */ 5754 tmp_ex = *ex1; 5755 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); 5756 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); 5757 ex1->ee_len = cpu_to_le16(e2_len); 5758 ex2->ee_len = cpu_to_le16(e1_len); 5759 if (unwritten) 5760 ext4_ext_mark_unwritten(ex2); 5761 if (ext4_ext_is_unwritten(&tmp_ex)) 5762 ext4_ext_mark_unwritten(ex1); 5763 5764 ext4_ext_try_to_merge(handle, inode2, path2, ex2); 5765 ext4_ext_try_to_merge(handle, inode1, path1, ex1); 5766 *erp = ext4_ext_dirty(handle, inode2, path2 + 5767 path2->p_depth); 5768 if (unlikely(*erp)) 5769 goto finish; 5770 *erp = ext4_ext_dirty(handle, inode1, path1 + 5771 path1->p_depth); 5772 /* 5773 * Looks scarry ah..? second inode already points to new blocks, 5774 * and it was successfully dirtied. But luckily error may happen 5775 * only due to journal error, so full transaction will be 5776 * aborted anyway. 5777 */ 5778 if (unlikely(*erp)) 5779 goto finish; 5780 lblk1 += len; 5781 lblk2 += len; 5782 replaced_count += len; 5783 count -= len; 5784 5785 repeat: 5786 ext4_free_ext_path(path1); 5787 ext4_free_ext_path(path2); 5788 path1 = path2 = NULL; 5789 } 5790 return replaced_count; 5791 } 5792 5793 /* 5794 * ext4_clu_mapped - determine whether any block in a logical cluster has 5795 * been mapped to a physical cluster 5796 * 5797 * @inode - file containing the logical cluster 5798 * @lclu - logical cluster of interest 5799 * 5800 * Returns 1 if any block in the logical cluster is mapped, signifying 5801 * that a physical cluster has been allocated for it. Otherwise, 5802 * returns 0. Can also return negative error codes. Derived from 5803 * ext4_ext_map_blocks(). 5804 */ 5805 int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) 5806 { 5807 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5808 struct ext4_ext_path *path; 5809 int depth, mapped = 0, err = 0; 5810 struct ext4_extent *extent; 5811 ext4_lblk_t first_lblk, first_lclu, last_lclu; 5812 5813 /* 5814 * if data can be stored inline, the logical cluster isn't 5815 * mapped - no physical clusters have been allocated, and the 5816 * file has no extents 5817 */ 5818 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) || 5819 ext4_has_inline_data(inode)) 5820 return 0; 5821 5822 /* search for the extent closest to the first block in the cluster */ 5823 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); 5824 if (IS_ERR(path)) { 5825 err = PTR_ERR(path); 5826 path = NULL; 5827 goto out; 5828 } 5829 5830 depth = ext_depth(inode); 5831 5832 /* 5833 * A consistent leaf must not be empty. This situation is possible, 5834 * though, _during_ tree modification, and it's why an assert can't 5835 * be put in ext4_find_extent(). 5836 */ 5837 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 5838 EXT4_ERROR_INODE(inode, 5839 "bad extent address - lblock: %lu, depth: %d, pblock: %lld", 5840 (unsigned long) EXT4_C2B(sbi, lclu), 5841 depth, path[depth].p_block); 5842 err = -EFSCORRUPTED; 5843 goto out; 5844 } 5845 5846 extent = path[depth].p_ext; 5847 5848 /* can't be mapped if the extent tree is empty */ 5849 if (extent == NULL) 5850 goto out; 5851 5852 first_lblk = le32_to_cpu(extent->ee_block); 5853 first_lclu = EXT4_B2C(sbi, first_lblk); 5854 5855 /* 5856 * Three possible outcomes at this point - found extent spanning 5857 * the target cluster, to the left of the target cluster, or to the 5858 * right of the target cluster. The first two cases are handled here. 5859 * The last case indicates the target cluster is not mapped. 5860 */ 5861 if (lclu >= first_lclu) { 5862 last_lclu = EXT4_B2C(sbi, first_lblk + 5863 ext4_ext_get_actual_len(extent) - 1); 5864 if (lclu <= last_lclu) { 5865 mapped = 1; 5866 } else { 5867 first_lblk = ext4_ext_next_allocated_block(path); 5868 first_lclu = EXT4_B2C(sbi, first_lblk); 5869 if (lclu == first_lclu) 5870 mapped = 1; 5871 } 5872 } 5873 5874 out: 5875 ext4_free_ext_path(path); 5876 5877 return err ? err : mapped; 5878 } 5879 5880 /* 5881 * Updates physical block address and unwritten status of extent 5882 * starting at lblk start and of len. If such an extent doesn't exist, 5883 * this function splits the extent tree appropriately to create an 5884 * extent like this. This function is called in the fast commit 5885 * replay path. Returns 0 on success and error on failure. 5886 */ 5887 int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start, 5888 int len, int unwritten, ext4_fsblk_t pblk) 5889 { 5890 struct ext4_ext_path *path; 5891 struct ext4_extent *ex; 5892 int ret; 5893 5894 path = ext4_find_extent(inode, start, NULL, 0); 5895 if (IS_ERR(path)) 5896 return PTR_ERR(path); 5897 ex = path[path->p_depth].p_ext; 5898 if (!ex) { 5899 ret = -EFSCORRUPTED; 5900 goto out; 5901 } 5902 5903 if (le32_to_cpu(ex->ee_block) != start || 5904 ext4_ext_get_actual_len(ex) != len) { 5905 /* We need to split this extent to match our extent first */ 5906 down_write(&EXT4_I(inode)->i_data_sem); 5907 ret = ext4_force_split_extent_at(NULL, inode, &path, start, 1); 5908 up_write(&EXT4_I(inode)->i_data_sem); 5909 if (ret) 5910 goto out; 5911 5912 path = ext4_find_extent(inode, start, &path, 0); 5913 if (IS_ERR(path)) 5914 return PTR_ERR(path); 5915 ex = path[path->p_depth].p_ext; 5916 WARN_ON(le32_to_cpu(ex->ee_block) != start); 5917 5918 if (ext4_ext_get_actual_len(ex) != len) { 5919 down_write(&EXT4_I(inode)->i_data_sem); 5920 ret = ext4_force_split_extent_at(NULL, inode, &path, 5921 start + len, 1); 5922 up_write(&EXT4_I(inode)->i_data_sem); 5923 if (ret) 5924 goto out; 5925 5926 path = ext4_find_extent(inode, start, &path, 0); 5927 if (IS_ERR(path)) 5928 return PTR_ERR(path); 5929 ex = path[path->p_depth].p_ext; 5930 } 5931 } 5932 if (unwritten) 5933 ext4_ext_mark_unwritten(ex); 5934 else 5935 ext4_ext_mark_initialized(ex); 5936 ext4_ext_store_pblock(ex, pblk); 5937 down_write(&EXT4_I(inode)->i_data_sem); 5938 ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]); 5939 up_write(&EXT4_I(inode)->i_data_sem); 5940 out: 5941 ext4_free_ext_path(path); 5942 ext4_mark_inode_dirty(NULL, inode); 5943 return ret; 5944 } 5945 5946 /* Try to shrink the extent tree */ 5947 void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end) 5948 { 5949 struct ext4_ext_path *path = NULL; 5950 struct ext4_extent *ex; 5951 ext4_lblk_t old_cur, cur = 0; 5952 5953 while (cur < end) { 5954 path = ext4_find_extent(inode, cur, NULL, 0); 5955 if (IS_ERR(path)) 5956 return; 5957 ex = path[path->p_depth].p_ext; 5958 if (!ex) { 5959 ext4_free_ext_path(path); 5960 ext4_mark_inode_dirty(NULL, inode); 5961 return; 5962 } 5963 old_cur = cur; 5964 cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 5965 if (cur <= old_cur) 5966 cur = old_cur + 1; 5967 ext4_ext_try_to_merge(NULL, inode, path, ex); 5968 down_write(&EXT4_I(inode)->i_data_sem); 5969 ext4_ext_dirty(NULL, inode, &path[path->p_depth]); 5970 up_write(&EXT4_I(inode)->i_data_sem); 5971 ext4_mark_inode_dirty(NULL, inode); 5972 ext4_free_ext_path(path); 5973 } 5974 } 5975 5976 /* Check if *cur is a hole and if it is, skip it */ 5977 static int skip_hole(struct inode *inode, ext4_lblk_t *cur) 5978 { 5979 int ret; 5980 struct ext4_map_blocks map; 5981 5982 map.m_lblk = *cur; 5983 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur; 5984 5985 ret = ext4_map_blocks(NULL, inode, &map, 0); 5986 if (ret < 0) 5987 return ret; 5988 if (ret != 0) 5989 return 0; 5990 *cur = *cur + map.m_len; 5991 return 0; 5992 } 5993 5994 /* Count number of blocks used by this inode and update i_blocks */ 5995 int ext4_ext_replay_set_iblocks(struct inode *inode) 5996 { 5997 struct ext4_ext_path *path = NULL, *path2 = NULL; 5998 struct ext4_extent *ex; 5999 ext4_lblk_t cur = 0, end; 6000 int numblks = 0, i, ret = 0; 6001 ext4_fsblk_t cmp1, cmp2; 6002 struct ext4_map_blocks map; 6003 6004 /* Determin the size of the file first */ 6005 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 6006 EXT4_EX_NOCACHE); 6007 if (IS_ERR(path)) 6008 return PTR_ERR(path); 6009 ex = path[path->p_depth].p_ext; 6010 if (!ex) { 6011 ext4_free_ext_path(path); 6012 goto out; 6013 } 6014 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 6015 ext4_free_ext_path(path); 6016 6017 /* Count the number of data blocks */ 6018 cur = 0; 6019 while (cur < end) { 6020 map.m_lblk = cur; 6021 map.m_len = end - cur; 6022 ret = ext4_map_blocks(NULL, inode, &map, 0); 6023 if (ret < 0) 6024 break; 6025 if (ret > 0) 6026 numblks += ret; 6027 cur = cur + map.m_len; 6028 } 6029 6030 /* 6031 * Count the number of extent tree blocks. We do it by looking up 6032 * two successive extents and determining the difference between 6033 * their paths. When path is different for 2 successive extents 6034 * we compare the blocks in the path at each level and increment 6035 * iblocks by total number of differences found. 6036 */ 6037 cur = 0; 6038 ret = skip_hole(inode, &cur); 6039 if (ret < 0) 6040 goto out; 6041 path = ext4_find_extent(inode, cur, NULL, 0); 6042 if (IS_ERR(path)) 6043 goto out; 6044 numblks += path->p_depth; 6045 ext4_free_ext_path(path); 6046 while (cur < end) { 6047 path = ext4_find_extent(inode, cur, NULL, 0); 6048 if (IS_ERR(path)) 6049 break; 6050 ex = path[path->p_depth].p_ext; 6051 if (!ex) { 6052 ext4_free_ext_path(path); 6053 return 0; 6054 } 6055 cur = max(cur + 1, le32_to_cpu(ex->ee_block) + 6056 ext4_ext_get_actual_len(ex)); 6057 ret = skip_hole(inode, &cur); 6058 if (ret < 0) { 6059 ext4_free_ext_path(path); 6060 break; 6061 } 6062 path2 = ext4_find_extent(inode, cur, NULL, 0); 6063 if (IS_ERR(path2)) { 6064 ext4_free_ext_path(path); 6065 break; 6066 } 6067 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) { 6068 cmp1 = cmp2 = 0; 6069 if (i <= path->p_depth) 6070 cmp1 = path[i].p_bh ? 6071 path[i].p_bh->b_blocknr : 0; 6072 if (i <= path2->p_depth) 6073 cmp2 = path2[i].p_bh ? 6074 path2[i].p_bh->b_blocknr : 0; 6075 if (cmp1 != cmp2 && cmp2 != 0) 6076 numblks++; 6077 } 6078 ext4_free_ext_path(path); 6079 ext4_free_ext_path(path2); 6080 } 6081 6082 out: 6083 inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9); 6084 ext4_mark_inode_dirty(NULL, inode); 6085 return 0; 6086 } 6087 6088 int ext4_ext_clear_bb(struct inode *inode) 6089 { 6090 struct ext4_ext_path *path = NULL; 6091 struct ext4_extent *ex; 6092 ext4_lblk_t cur = 0, end; 6093 int j, ret = 0; 6094 struct ext4_map_blocks map; 6095 6096 if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA)) 6097 return 0; 6098 6099 /* Determin the size of the file first */ 6100 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 6101 EXT4_EX_NOCACHE); 6102 if (IS_ERR(path)) 6103 return PTR_ERR(path); 6104 ex = path[path->p_depth].p_ext; 6105 if (!ex) { 6106 ext4_free_ext_path(path); 6107 return 0; 6108 } 6109 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 6110 ext4_free_ext_path(path); 6111 6112 cur = 0; 6113 while (cur < end) { 6114 map.m_lblk = cur; 6115 map.m_len = end - cur; 6116 ret = ext4_map_blocks(NULL, inode, &map, 0); 6117 if (ret < 0) 6118 break; 6119 if (ret > 0) { 6120 path = ext4_find_extent(inode, map.m_lblk, NULL, 0); 6121 if (!IS_ERR_OR_NULL(path)) { 6122 for (j = 0; j < path->p_depth; j++) { 6123 6124 ext4_mb_mark_bb(inode->i_sb, 6125 path[j].p_block, 1, false); 6126 ext4_fc_record_regions(inode->i_sb, inode->i_ino, 6127 0, path[j].p_block, 1, 1); 6128 } 6129 ext4_free_ext_path(path); 6130 } 6131 ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false); 6132 ext4_fc_record_regions(inode->i_sb, inode->i_ino, 6133 map.m_lblk, map.m_pblk, map.m_len, 1); 6134 } 6135 cur = cur + map.m_len; 6136 } 6137 6138 return 0; 6139 } 6140