1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010 Zheng Liu <lz@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/types.h> 34 #include <sys/kernel.h> 35 #include <sys/malloc.h> 36 #include <sys/vnode.h> 37 #include <sys/bio.h> 38 #include <sys/buf.h> 39 #include <sys/conf.h> 40 #include <sys/stat.h> 41 42 #include <fs/ext2fs/ext2_mount.h> 43 #include <fs/ext2fs/fs.h> 44 #include <fs/ext2fs/inode.h> 45 #include <fs/ext2fs/ext2fs.h> 46 #include <fs/ext2fs/ext2_extents.h> 47 #include <fs/ext2fs/ext2_extern.h> 48 49 static MALLOC_DEFINE(M_EXT2EXTENTS, "ext2_extents", "EXT2 extents"); 50 51 #ifdef EXT2FS_DEBUG 52 static void 53 ext4_ext_print_extent(struct ext4_extent *ep) 54 { 55 56 printf(" ext %p => (blk %u len %u start %lu)\n", 57 ep, ep->e_blk, ep->e_len, 58 (uint64_t)ep->e_start_hi << 32 | ep->e_start_lo); 59 } 60 61 static void ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp); 62 63 static void 64 ext4_ext_print_index(struct inode *ip, struct ext4_extent_index *ex, int do_walk) 65 { 66 struct m_ext2fs *fs; 67 struct buf *bp; 68 int error; 69 70 fs = ip->i_e2fs; 71 72 printf(" index %p => (blk %u pblk %lu)\n", 73 ex, ex->ei_blk, (uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo); 74 75 if(!do_walk) 76 return; 77 78 if ((error = bread(ip->i_devvp, 79 fsbtodb(fs, ((uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo)), 80 (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { 81 brelse(bp); 82 return; 83 } 84 85 ext4_ext_print_header(ip, (struct ext4_extent_header *)bp->b_data); 86 87 brelse(bp); 88 89 } 90 91 static void 92 ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp) 93 { 94 int i; 95 96 printf("header %p => (magic 0x%x entries %d max %d depth %d gen %d)\n", 97 ehp, ehp->eh_magic, ehp->eh_ecount, ehp->eh_max, ehp->eh_depth, 98 ehp->eh_gen); 99 100 for (i = 0; i < ehp->eh_ecount; i++) 101 if (ehp->eh_depth != 0) 102 ext4_ext_print_index(ip, 103 (struct ext4_extent_index *)(ehp + 1 + i), 1); 104 else 105 ext4_ext_print_extent((struct ext4_extent *)(ehp + 1 + i)); 106 } 107 108 static void 109 ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path) 110 { 111 int k, l; 112 113 l = path->ep_depth 114 115 printf("ip=%d, Path:\n", ip->i_number); 116 for (k = 0; k <= l; k++, path++) { 117 if (path->ep_index) { 118 ext4_ext_print_index(ip, path->ep_index, 0); 119 } else if (path->ep_ext) { 120 ext4_ext_print_extent(path->ep_ext); 121 } 122 } 123 } 124 125 void 126 ext4_ext_print_extent_tree_status(struct inode * ip) 127 { 128 struct m_ext2fs *fs; 129 struct ext4_extent_header *ehp; 130 131 fs = ip->i_e2fs; 132 ehp = (struct ext4_extent_header *)(char *)ip->i_db; 133 134 printf("Extent status:ip=%d\n", ip->i_number); 135 if (!(ip->i_flag & IN_E4EXTENTS)) 136 return; 137 138 ext4_ext_print_header(ip, ehp); 139 140 return; 141 } 142 #endif 143 144 static inline struct ext4_extent_header * 145 ext4_ext_inode_header(struct inode *ip) 146 { 147 148 return ((struct ext4_extent_header *)ip->i_db); 149 } 150 151 static inline struct ext4_extent_header * 152 ext4_ext_block_header(char *bdata) 153 { 154 155 return ((struct ext4_extent_header *)bdata); 156 } 157 158 static inline unsigned short 159 ext4_ext_inode_depth(struct inode *ip) 160 { 161 struct ext4_extent_header *ehp; 162 163 ehp = (struct ext4_extent_header *)ip->i_data; 164 return (ehp->eh_depth); 165 } 166 167 static inline e4fs_daddr_t 168 ext4_ext_index_pblock(struct ext4_extent_index *index) 169 { 170 e4fs_daddr_t blk; 171 172 blk = index->ei_leaf_lo; 173 blk |= (e4fs_daddr_t)index->ei_leaf_hi << 32; 174 175 return (blk); 176 } 177 178 static inline void 179 ext4_index_store_pblock(struct ext4_extent_index *index, e4fs_daddr_t pb) 180 { 181 182 index->ei_leaf_lo = pb & 0xffffffff; 183 index->ei_leaf_hi = (pb >> 32) & 0xffff; 184 } 185 186 187 static inline e4fs_daddr_t 188 ext4_ext_extent_pblock(struct ext4_extent *extent) 189 { 190 e4fs_daddr_t blk; 191 192 blk = extent->e_start_lo; 193 blk |= (e4fs_daddr_t)extent->e_start_hi << 32; 194 195 return (blk); 196 } 197 198 static inline void 199 ext4_ext_store_pblock(struct ext4_extent *ex, e4fs_daddr_t pb) 200 { 201 202 ex->e_start_lo = pb & 0xffffffff; 203 ex->e_start_hi = (pb >> 32) & 0xffff; 204 } 205 206 int 207 ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep) 208 { 209 struct ext4_extent_cache *ecp; 210 int ret = EXT4_EXT_CACHE_NO; 211 212 ecp = &ip->i_ext_cache; 213 if (ecp->ec_type == EXT4_EXT_CACHE_NO) 214 return (ret); 215 216 if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) { 217 ep->e_blk = ecp->ec_blk; 218 ep->e_start_lo = ecp->ec_start & 0xffffffff; 219 ep->e_start_hi = ecp->ec_start >> 32 & 0xffff; 220 ep->e_len = ecp->ec_len; 221 ret = ecp->ec_type; 222 } 223 return (ret); 224 } 225 226 static int 227 ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh) 228 { 229 struct m_ext2fs *fs; 230 char *error_msg; 231 232 fs = ip->i_e2fs; 233 234 if (eh->eh_magic != EXT4_EXT_MAGIC) { 235 error_msg = "invalid magic"; 236 goto corrupted; 237 } 238 if (eh->eh_max == 0) { 239 error_msg = "invalid eh_max"; 240 goto corrupted; 241 } 242 if (eh->eh_ecount > eh->eh_max) { 243 error_msg = "invalid eh_entries"; 244 goto corrupted; 245 } 246 247 return (0); 248 249 corrupted: 250 ext2_fserr(fs, ip->i_uid, error_msg); 251 return (EIO); 252 } 253 254 static void 255 ext4_ext_binsearch_index(struct ext4_extent_path *path, int blk) 256 { 257 struct ext4_extent_header *eh; 258 struct ext4_extent_index *r, *l, *m; 259 260 eh = path->ep_header; 261 262 KASSERT(eh->eh_ecount <= eh->eh_max && eh->eh_ecount > 0, 263 ("ext4_ext_binsearch_index: bad args")); 264 265 l = EXT_FIRST_INDEX(eh) + 1; 266 r = EXT_FIRST_INDEX(eh) + eh->eh_ecount - 1; 267 while (l <= r) { 268 m = l + (r - l) / 2; 269 if (blk < m->ei_blk) 270 r = m - 1; 271 else 272 l = m + 1; 273 } 274 275 path->ep_index = l - 1; 276 } 277 278 static void 279 ext4_ext_binsearch_ext(struct ext4_extent_path *path, int blk) 280 { 281 struct ext4_extent_header *eh; 282 struct ext4_extent *r, *l, *m; 283 284 eh = path->ep_header; 285 286 KASSERT(eh->eh_ecount <= eh->eh_max, 287 ("ext4_ext_binsearch_ext: bad args")); 288 289 if (eh->eh_ecount == 0) 290 return; 291 292 l = EXT_FIRST_EXTENT(eh) + 1; 293 r = EXT_FIRST_EXTENT(eh) + eh->eh_ecount - 1; 294 295 while (l <= r) { 296 m = l + (r - l) / 2; 297 if (blk < m->e_blk) 298 r = m - 1; 299 else 300 l = m + 1; 301 } 302 303 path->ep_ext = l - 1; 304 } 305 306 static int 307 ext4_ext_fill_path_bdata(struct ext4_extent_path *path, 308 struct buf *bp, uint64_t blk) 309 { 310 311 KASSERT(path->ep_data == NULL, 312 ("ext4_ext_fill_path_bdata: bad ep_data")); 313 314 path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK); 315 if (!path->ep_data) 316 return (ENOMEM); 317 318 memcpy(path->ep_data, bp->b_data, bp->b_bufsize); 319 path->ep_blk = blk; 320 321 return (0); 322 } 323 324 static void 325 ext4_ext_fill_path_buf(struct ext4_extent_path *path, struct buf *bp) 326 { 327 328 KASSERT(path->ep_data != NULL, 329 ("ext4_ext_fill_path_buf: bad ep_data")); 330 331 memcpy(bp->b_data, path->ep_data, bp->b_bufsize); 332 } 333 334 static void 335 ext4_ext_drop_refs(struct ext4_extent_path *path) 336 { 337 int depth, i; 338 339 if (!path) 340 return; 341 342 depth = path->ep_depth; 343 for (i = 0; i <= depth; i++, path++) 344 if (path->ep_data) { 345 free(path->ep_data, M_EXT2EXTENTS); 346 path->ep_data = NULL; 347 } 348 } 349 350 void 351 ext4_ext_path_free(struct ext4_extent_path *path) 352 { 353 354 if (!path) 355 return; 356 357 ext4_ext_drop_refs(path); 358 free(path, M_EXT2EXTENTS); 359 } 360 361 int 362 ext4_ext_find_extent(struct inode *ip, daddr_t block, 363 struct ext4_extent_path **ppath) 364 { 365 struct m_ext2fs *fs; 366 struct ext4_extent_header *eh; 367 struct ext4_extent_path *path; 368 struct buf *bp; 369 uint64_t blk; 370 int error, depth, i, ppos, alloc; 371 372 fs = ip->i_e2fs; 373 eh = ext4_ext_inode_header(ip); 374 depth = ext4_ext_inode_depth(ip); 375 ppos = 0; 376 alloc = 0; 377 378 error = ext4_ext_check_header(ip, eh); 379 if (error) 380 return (error); 381 382 if (ppath == NULL) 383 return (EINVAL); 384 385 path = *ppath; 386 if (path == NULL) { 387 path = malloc(EXT4_EXT_DEPTH_MAX * 388 sizeof(struct ext4_extent_path), 389 M_EXT2EXTENTS, M_WAITOK | M_ZERO); 390 if (!path) 391 return (ENOMEM); 392 393 *ppath = path; 394 alloc = 1; 395 } 396 397 path[0].ep_header = eh; 398 path[0].ep_data = NULL; 399 400 /* Walk through the tree. */ 401 i = depth; 402 while (i) { 403 ext4_ext_binsearch_index(&path[ppos], block); 404 blk = ext4_ext_index_pblock(path[ppos].ep_index); 405 path[ppos].ep_depth = i; 406 path[ppos].ep_ext = NULL; 407 408 error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk), 409 ip->i_e2fs->e2fs_bsize, NOCRED, &bp); 410 if (error) { 411 brelse(bp); 412 goto error; 413 } 414 415 ppos++; 416 if (ppos > depth) { 417 ext2_fserr(fs, ip->i_uid, 418 "ppos > depth => extent corrupted"); 419 error = EIO; 420 brelse(bp); 421 goto error; 422 } 423 424 ext4_ext_fill_path_bdata(&path[ppos], bp, blk); 425 bqrelse(bp); 426 427 eh = ext4_ext_block_header(path[ppos].ep_data); 428 error = ext4_ext_check_header(ip, eh); 429 if (error) 430 goto error; 431 432 path[ppos].ep_header = eh; 433 434 i--; 435 } 436 437 error = ext4_ext_check_header(ip, eh); 438 if (error) 439 goto error; 440 441 /* Find extent. */ 442 path[ppos].ep_depth = i; 443 path[ppos].ep_header = eh; 444 path[ppos].ep_ext = NULL; 445 path[ppos].ep_index = NULL; 446 ext4_ext_binsearch_ext(&path[ppos], block); 447 return (0); 448 449 error: 450 ext4_ext_drop_refs(path); 451 if (alloc) 452 free(path, M_EXT2EXTENTS); 453 454 *ppath = NULL; 455 456 return (error); 457 } 458 459 static inline int 460 ext4_ext_space_root(struct inode *ip) 461 { 462 int size; 463 464 size = sizeof(ip->i_data); 465 size -= sizeof(struct ext4_extent_header); 466 size /= sizeof(struct ext4_extent); 467 468 return (size); 469 } 470 471 static inline int 472 ext4_ext_space_block(struct inode *ip) 473 { 474 struct m_ext2fs *fs; 475 int size; 476 477 fs = ip->i_e2fs; 478 479 size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) / 480 sizeof(struct ext4_extent); 481 482 return (size); 483 } 484 485 static inline int 486 ext4_ext_space_block_index(struct inode *ip) 487 { 488 struct m_ext2fs *fs; 489 int size; 490 491 fs = ip->i_e2fs; 492 493 size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) / 494 sizeof(struct ext4_extent_index); 495 496 return (size); 497 } 498 499 void 500 ext4_ext_tree_init(struct inode *ip) 501 { 502 struct ext4_extent_header *ehp; 503 504 ip->i_flag |= IN_E4EXTENTS; 505 506 memset(ip->i_data, 0, EXT2_NDADDR + EXT2_NIADDR); 507 ehp = (struct ext4_extent_header *)ip->i_data; 508 ehp->eh_magic = EXT4_EXT_MAGIC; 509 ehp->eh_max = ext4_ext_space_root(ip); 510 ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO; 511 ip->i_flag |= IN_CHANGE | IN_UPDATE; 512 ext2_update(ip->i_vnode, 1); 513 } 514 515 static inline void 516 ext4_ext_put_in_cache(struct inode *ip, uint32_t blk, 517 uint32_t len, uint32_t start, int type) 518 { 519 520 KASSERT(len != 0, ("ext4_ext_put_in_cache: bad input")); 521 522 ip->i_ext_cache.ec_type = type; 523 ip->i_ext_cache.ec_blk = blk; 524 ip->i_ext_cache.ec_len = len; 525 ip->i_ext_cache.ec_start = start; 526 } 527 528 static e4fs_daddr_t 529 ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path, 530 e4fs_daddr_t block) 531 { 532 struct m_ext2fs *fs; 533 struct ext4_extent *ex; 534 e4fs_daddr_t bg_start; 535 int depth; 536 537 fs = ip->i_e2fs; 538 539 if (path) { 540 depth = path->ep_depth; 541 ex = path[depth].ep_ext; 542 if (ex) { 543 e4fs_daddr_t pblk = ext4_ext_extent_pblock(ex); 544 e2fs_daddr_t blk = ex->e_blk; 545 546 if (block > blk) 547 return (pblk + (block - blk)); 548 else 549 return (pblk - (blk - block)); 550 } 551 552 /* Try to get block from index itself. */ 553 if (path[depth].ep_data) 554 return (path[depth].ep_blk); 555 } 556 557 /* Use inode's group. */ 558 bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) + 559 fs->e2fs->e2fs_first_dblock; 560 561 return (bg_start + block); 562 } 563 564 static int inline 565 ext4_can_extents_be_merged(struct ext4_extent *ex1, 566 struct ext4_extent *ex2) 567 { 568 569 if (ex1->e_blk + ex1->e_len != ex2->e_blk) 570 return (0); 571 572 if (ex1->e_len + ex2->e_len > EXT4_MAX_LEN) 573 return (0); 574 575 if (ext4_ext_extent_pblock(ex1) + ex1->e_len == 576 ext4_ext_extent_pblock(ex2)) 577 return (1); 578 579 return (0); 580 } 581 582 static unsigned 583 ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path) 584 { 585 int depth = path->ep_depth; 586 587 /* Empty tree */ 588 if (depth == 0) 589 return (EXT4_MAX_BLOCKS); 590 591 /* Go to indexes. */ 592 depth--; 593 594 while (depth >= 0) { 595 if (path[depth].ep_index != 596 EXT_LAST_INDEX(path[depth].ep_header)) 597 return (path[depth].ep_index[1].ei_blk); 598 599 depth--; 600 } 601 602 return (EXT4_MAX_BLOCKS); 603 } 604 605 static int 606 ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path) 607 { 608 struct m_ext2fs *fs; 609 struct buf *bp; 610 uint64_t blk; 611 int error; 612 613 fs = ip->i_e2fs; 614 615 if (!path) 616 return (EINVAL); 617 618 if (path->ep_data) { 619 blk = path->ep_blk; 620 bp = getblk(ip->i_devvp, fsbtodb(fs, blk), 621 fs->e2fs_bsize, 0, 0, 0); 622 if (!bp) 623 return (EIO); 624 ext4_ext_fill_path_buf(path, bp); 625 error = bwrite(bp); 626 } else { 627 ip->i_flag |= IN_CHANGE | IN_UPDATE; 628 error = ext2_update(ip->i_vnode, 1); 629 } 630 631 return (error); 632 } 633 634 static int 635 ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path, 636 uint32_t lblk, e4fs_daddr_t blk) 637 { 638 struct m_ext2fs *fs; 639 struct ext4_extent_index *idx; 640 int len; 641 642 fs = ip->i_e2fs; 643 644 if (lblk == path->ep_index->ei_blk) { 645 ext2_fserr(fs, ip->i_uid, 646 "lblk == index blk => extent corrupted"); 647 return (EIO); 648 } 649 650 if (path->ep_header->eh_ecount >= path->ep_header->eh_max) { 651 ext2_fserr(fs, ip->i_uid, 652 "ecout > maxcount => extent corrupted"); 653 return (EIO); 654 } 655 656 if (lblk > path->ep_index->ei_blk) { 657 /* Insert after. */ 658 idx = path->ep_index + 1; 659 } else { 660 /* Insert before. */ 661 idx = path->ep_index; 662 } 663 664 len = EXT_LAST_INDEX(path->ep_header) - idx + 1; 665 if (len > 0) 666 memmove(idx + 1, idx, len * sizeof(struct ext4_extent_index)); 667 668 if (idx > EXT_MAX_INDEX(path->ep_header)) { 669 ext2_fserr(fs, ip->i_uid, 670 "index is out of range => extent corrupted"); 671 return (EIO); 672 } 673 674 idx->ei_blk = lblk; 675 ext4_index_store_pblock(idx, blk); 676 path->ep_header->eh_ecount++; 677 678 return (ext4_ext_dirty(ip, path)); 679 } 680 681 static e4fs_daddr_t 682 ext4_ext_alloc_meta(struct inode *ip) 683 { 684 e4fs_daddr_t blk = ext2_alloc_meta(ip); 685 if (blk) { 686 ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize); 687 ip->i_flag |= IN_CHANGE | IN_UPDATE; 688 ext2_update(ip->i_vnode, 1); 689 } 690 691 return (blk); 692 } 693 694 static void 695 ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags) 696 { 697 struct m_ext2fs *fs; 698 int i, blocksreleased; 699 700 fs = ip->i_e2fs; 701 blocksreleased = count; 702 703 for(i = 0; i < count; i++) 704 ext2_blkfree(ip, blk + i, fs->e2fs_bsize); 705 706 if (ip->i_blocks >= blocksreleased) 707 ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased); 708 else 709 ip->i_blocks = 0; 710 711 ip->i_flag |= IN_CHANGE | IN_UPDATE; 712 ext2_update(ip->i_vnode, 1); 713 } 714 715 static int 716 ext4_ext_split(struct inode *ip, struct ext4_extent_path *path, 717 struct ext4_extent *newext, int at) 718 { 719 struct m_ext2fs *fs; 720 struct buf *bp; 721 int depth = ext4_ext_inode_depth(ip); 722 struct ext4_extent_header *neh; 723 struct ext4_extent_index *fidx; 724 struct ext4_extent *ex; 725 int i = at, k, m, a; 726 e4fs_daddr_t newblk, oldblk; 727 uint32_t border; 728 e4fs_daddr_t *ablks = NULL; 729 int error = 0; 730 731 fs = ip->i_e2fs; 732 bp = NULL; 733 734 /* 735 * We will split at current extent for now. 736 */ 737 if (path[depth].ep_ext > EXT_MAX_EXTENT(path[depth].ep_header)) { 738 ext2_fserr(fs, ip->i_uid, 739 "extent is out of range => extent corrupted"); 740 return (EIO); 741 } 742 743 if (path[depth].ep_ext != EXT_MAX_EXTENT(path[depth].ep_header)) 744 border = path[depth].ep_ext[1].e_blk; 745 else 746 border = newext->e_blk; 747 748 /* Allocate new blocks. */ 749 ablks = malloc(sizeof(e4fs_daddr_t) * depth, 750 M_EXT2EXTENTS, M_WAITOK | M_ZERO); 751 if (!ablks) 752 return (ENOMEM); 753 for (a = 0; a < depth - at; a++) { 754 newblk = ext4_ext_alloc_meta(ip); 755 if (newblk == 0) 756 goto cleanup; 757 ablks[a] = newblk; 758 } 759 760 newblk = ablks[--a]; 761 bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0); 762 if (!bp) { 763 error = EIO; 764 goto cleanup; 765 } 766 767 neh = ext4_ext_block_header(bp->b_data); 768 neh->eh_ecount = 0; 769 neh->eh_max = ext4_ext_space_block(ip); 770 neh->eh_magic = EXT4_EXT_MAGIC; 771 neh->eh_depth = 0; 772 ex = EXT_FIRST_EXTENT(neh); 773 774 if (path[depth].ep_header->eh_ecount != path[depth].ep_header->eh_max) { 775 ext2_fserr(fs, ip->i_uid, 776 "extents count out of range => extent corrupted"); 777 error = EIO; 778 goto cleanup; 779 } 780 781 /* Start copy from next extent. */ 782 m = 0; 783 path[depth].ep_ext++; 784 while (path[depth].ep_ext <= EXT_MAX_EXTENT(path[depth].ep_header)) { 785 path[depth].ep_ext++; 786 m++; 787 } 788 if (m) { 789 memmove(ex, path[depth].ep_ext - m, 790 sizeof(struct ext4_extent) * m); 791 neh->eh_ecount = neh->eh_ecount + m; 792 } 793 794 bwrite(bp); 795 bp = NULL; 796 797 /* Fix old leaf. */ 798 if (m) { 799 path[depth].ep_header->eh_ecount = 800 path[depth].ep_header->eh_ecount - m; 801 ext4_ext_dirty(ip, path + depth); 802 } 803 804 /* Create intermediate indexes. */ 805 k = depth - at - 1; 806 KASSERT(k >= 0, ("ext4_ext_split: negative k")); 807 808 /* Insert new index into current index block. */ 809 i = depth - 1; 810 while (k--) { 811 oldblk = newblk; 812 newblk = ablks[--a]; 813 error = bread(ip->i_devvp, fsbtodb(fs, newblk), 814 (int)fs->e2fs_bsize, NOCRED, &bp); 815 if (error) { 816 brelse(bp); 817 goto cleanup; 818 } 819 820 neh = (struct ext4_extent_header *)bp->b_data; 821 neh->eh_ecount = 1; 822 neh->eh_magic = EXT4_EXT_MAGIC; 823 neh->eh_max = ext4_ext_space_block_index(ip); 824 neh->eh_depth = depth - i; 825 fidx = EXT_FIRST_INDEX(neh); 826 fidx->ei_blk = border; 827 ext4_index_store_pblock(fidx, oldblk); 828 829 m = 0; 830 path[i].ep_index++; 831 while (path[i].ep_index <= EXT_MAX_INDEX(path[i].ep_header)) { 832 path[i].ep_index++; 833 m++; 834 } 835 if (m) { 836 memmove(++fidx, path[i].ep_index - m, 837 sizeof(struct ext4_extent_index) * m); 838 neh->eh_ecount = neh->eh_ecount + m; 839 } 840 841 bwrite(bp); 842 bp = NULL; 843 844 /* Fix old index. */ 845 if (m) { 846 path[i].ep_header->eh_ecount = 847 path[i].ep_header->eh_ecount - m; 848 ext4_ext_dirty(ip, path + i); 849 } 850 851 i--; 852 } 853 854 error = ext4_ext_insert_index(ip, path + at, border, newblk); 855 856 cleanup: 857 if (bp) 858 brelse(bp); 859 860 if (error) { 861 for (i = 0; i < depth; i++) { 862 if (!ablks[i]) 863 continue; 864 ext4_ext_blkfree(ip, ablks[i], 1, 0); 865 } 866 } 867 868 free(ablks, M_EXT2EXTENTS); 869 870 return (error); 871 } 872 873 static int 874 ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path, 875 struct ext4_extent *newext) 876 { 877 struct m_ext2fs *fs; 878 struct ext4_extent_path *curpath; 879 struct ext4_extent_header *neh; 880 struct ext4_extent_index *fidx; 881 struct buf *bp; 882 e4fs_daddr_t newblk; 883 int error = 0; 884 885 fs = ip->i_e2fs; 886 curpath = path; 887 888 newblk = ext4_ext_alloc_meta(ip); 889 if (newblk == 0) 890 return (error); 891 892 bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0); 893 if (!bp) 894 return (EIO); 895 896 /* Move top-level index/leaf into new block. */ 897 memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data)); 898 899 /* Set size of new block */ 900 neh = ext4_ext_block_header(bp->b_data); 901 neh->eh_magic = EXT4_EXT_MAGIC; 902 903 if (ext4_ext_inode_depth(ip)) 904 neh->eh_max = ext4_ext_space_block_index(ip); 905 else 906 neh->eh_max = ext4_ext_space_block(ip); 907 908 error = bwrite(bp); 909 if (error) 910 goto out; 911 912 bp = NULL; 913 914 curpath->ep_header->eh_magic = EXT4_EXT_MAGIC; 915 curpath->ep_header->eh_max = ext4_ext_space_root(ip); 916 curpath->ep_header->eh_ecount = 1; 917 curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header); 918 curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk; 919 ext4_index_store_pblock(curpath->ep_index, newblk); 920 921 neh = ext4_ext_inode_header(ip); 922 fidx = EXT_FIRST_INDEX(neh); 923 neh->eh_depth = path->ep_depth + 1; 924 ext4_ext_dirty(ip, curpath); 925 out: 926 brelse(bp); 927 928 return (error); 929 } 930 931 static int 932 ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path, 933 struct ext4_extent *newext) 934 { 935 struct m_ext2fs *fs; 936 struct ext4_extent_path *curpath; 937 int depth, i, error; 938 939 fs = ip->i_e2fs; 940 941 repeat: 942 i = depth = ext4_ext_inode_depth(ip); 943 944 /* Look for free index entry int the tree */ 945 curpath = path + depth; 946 while (i > 0 && !EXT_HAS_FREE_INDEX(curpath)) { 947 i--; 948 curpath--; 949 } 950 951 /* 952 * We use already allocated block for index block, 953 * so subsequent data blocks should be contiguous. 954 */ 955 if (EXT_HAS_FREE_INDEX(curpath)) { 956 error = ext4_ext_split(ip, path, newext, i); 957 if (error) 958 goto out; 959 960 /* Refill path. */ 961 ext4_ext_drop_refs(path); 962 error = ext4_ext_find_extent(ip, newext->e_blk, &path); 963 if (error) 964 goto out; 965 } else { 966 /* Tree is full, do grow in depth. */ 967 error = ext4_ext_grow_indepth(ip, path, newext); 968 if (error) 969 goto out; 970 971 /* Refill path. */ 972 ext4_ext_drop_refs(path); 973 error = ext4_ext_find_extent(ip, newext->e_blk, &path); 974 if (error) 975 goto out; 976 977 /* Check and split tree if required. */ 978 depth = ext4_ext_inode_depth(ip); 979 if (path[depth].ep_header->eh_ecount == 980 path[depth].ep_header->eh_max) 981 goto repeat; 982 } 983 984 out: 985 return (error); 986 } 987 988 static int 989 ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path) 990 { 991 struct ext4_extent_header *eh; 992 struct ext4_extent *ex; 993 int32_t border; 994 int depth, k; 995 996 depth = ext4_ext_inode_depth(ip); 997 eh = path[depth].ep_header; 998 ex = path[depth].ep_ext; 999 1000 if (ex == NULL || eh == NULL) 1001 return (EIO); 1002 1003 if (!depth) 1004 return (0); 1005 1006 /* We will correct tree if first leaf got modified only. */ 1007 if (ex != EXT_FIRST_EXTENT(eh)) 1008 return (0); 1009 1010 k = depth - 1; 1011 border = path[depth].ep_ext->e_blk; 1012 path[k].ep_index->ei_blk = border; 1013 ext4_ext_dirty(ip, path + k); 1014 while (k--) { 1015 /* Change all left-side indexes. */ 1016 if (path[k+1].ep_index != EXT_FIRST_INDEX(path[k+1].ep_header)) 1017 break; 1018 1019 path[k].ep_index->ei_blk = border; 1020 ext4_ext_dirty(ip, path + k); 1021 } 1022 1023 return (0); 1024 } 1025 1026 static int 1027 ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path, 1028 struct ext4_extent *newext) 1029 { 1030 struct m_ext2fs *fs; 1031 struct ext4_extent_header * eh; 1032 struct ext4_extent *ex, *nex, *nearex; 1033 struct ext4_extent_path *npath; 1034 int depth, len, error, next; 1035 1036 fs = ip->i_e2fs; 1037 depth = ext4_ext_inode_depth(ip); 1038 ex = path[depth].ep_ext; 1039 npath = NULL; 1040 1041 if (newext->e_len == 0 || path[depth].ep_header == NULL) 1042 return (EINVAL); 1043 1044 /* Insert block into found extent. */ 1045 if (ex && ext4_can_extents_be_merged(ex, newext)) { 1046 ex->e_len = ex->e_len + newext->e_len; 1047 eh = path[depth].ep_header; 1048 nearex = ex; 1049 goto merge; 1050 } 1051 1052 repeat: 1053 depth = ext4_ext_inode_depth(ip); 1054 eh = path[depth].ep_header; 1055 if (eh->eh_ecount < eh->eh_max) 1056 goto has_space; 1057 1058 /* Try next leaf */ 1059 nex = EXT_LAST_EXTENT(eh); 1060 next = ext4_ext_next_leaf_block(ip, path); 1061 if (newext->e_blk > nex->e_blk && next != EXT4_MAX_BLOCKS) { 1062 KASSERT(npath == NULL, 1063 ("ext4_ext_insert_extent: bad path")); 1064 1065 error = ext4_ext_find_extent(ip, next, &npath); 1066 if (error) 1067 goto cleanup; 1068 1069 if (npath->ep_depth != path->ep_depth) { 1070 error = EIO; 1071 goto cleanup; 1072 } 1073 1074 eh = npath[depth].ep_header; 1075 if (eh->eh_ecount < eh->eh_max) { 1076 path = npath; 1077 goto repeat; 1078 } 1079 } 1080 1081 /* 1082 * There is no free space in the found leaf, 1083 * try to add a new leaf to the tree. 1084 */ 1085 error = ext4_ext_create_new_leaf(ip, path, newext); 1086 if (error) 1087 goto cleanup; 1088 1089 depth = ext4_ext_inode_depth(ip); 1090 eh = path[depth].ep_header; 1091 1092 has_space: 1093 nearex = path[depth].ep_ext; 1094 if (!nearex) { 1095 /* Create new extent in the leaf. */ 1096 path[depth].ep_ext = EXT_FIRST_EXTENT(eh); 1097 } else if (newext->e_blk > nearex->e_blk) { 1098 if (nearex != EXT_LAST_EXTENT(eh)) { 1099 len = EXT_MAX_EXTENT(eh) - nearex; 1100 len = (len - 1) * sizeof(struct ext4_extent); 1101 len = len < 0 ? 0 : len; 1102 memmove(nearex + 2, nearex + 1, len); 1103 } 1104 path[depth].ep_ext = nearex + 1; 1105 } else { 1106 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); 1107 len = len < 0 ? 0 : len; 1108 memmove(nearex + 1, nearex, len); 1109 path[depth].ep_ext = nearex; 1110 } 1111 1112 eh->eh_ecount = eh->eh_ecount + 1; 1113 nearex = path[depth].ep_ext; 1114 nearex->e_blk = newext->e_blk; 1115 nearex->e_start_lo = newext->e_start_lo; 1116 nearex->e_start_hi = newext->e_start_hi; 1117 nearex->e_len = newext->e_len; 1118 1119 merge: 1120 /* Try to merge extents to the right. */ 1121 while (nearex < EXT_LAST_EXTENT(eh)) { 1122 if (!ext4_can_extents_be_merged(nearex, nearex + 1)) 1123 break; 1124 1125 /* Merge with next extent. */ 1126 nearex->e_len = nearex->e_len + nearex[1].e_len; 1127 if (nearex + 1 < EXT_LAST_EXTENT(eh)) { 1128 len = (EXT_LAST_EXTENT(eh) - nearex - 1) * 1129 sizeof(struct ext4_extent); 1130 memmove(nearex + 1, nearex + 2, len); 1131 } 1132 1133 eh->eh_ecount = eh->eh_ecount - 1; 1134 KASSERT(eh->eh_ecount != 0, 1135 ("ext4_ext_insert_extent: bad ecount")); 1136 } 1137 1138 /* 1139 * Try to merge extents to the left, 1140 * start from inexes correction. 1141 */ 1142 error = ext4_ext_correct_indexes(ip, path); 1143 if (error) 1144 goto cleanup; 1145 1146 ext4_ext_dirty(ip, path + depth); 1147 1148 cleanup: 1149 if (npath) { 1150 ext4_ext_drop_refs(npath); 1151 free(npath, M_EXT2EXTENTS); 1152 } 1153 1154 ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO; 1155 return (error); 1156 } 1157 1158 static e4fs_daddr_t 1159 ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref, 1160 struct ucred *cred, unsigned long *count, int *perror) 1161 { 1162 struct m_ext2fs *fs; 1163 struct ext2mount *ump; 1164 e4fs_daddr_t newblk; 1165 1166 fs = ip->i_e2fs; 1167 ump = ip->i_ump; 1168 1169 /* 1170 * We will allocate only single block for now. 1171 */ 1172 if (*count > 1) 1173 return (0); 1174 1175 EXT2_LOCK(ip->i_ump); 1176 *perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk); 1177 if (*perror) 1178 return (0); 1179 1180 if (newblk) { 1181 ip->i_flag |= IN_CHANGE | IN_UPDATE; 1182 ext2_update(ip->i_vnode, 1); 1183 } 1184 1185 return (newblk); 1186 } 1187 1188 int 1189 ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk, 1190 unsigned long max_blocks, struct ucred *cred, struct buf **bpp, 1191 int *pallocated, uint32_t *nb) 1192 { 1193 struct m_ext2fs *fs; 1194 struct buf *bp = NULL; 1195 struct ext4_extent_path *path; 1196 struct ext4_extent newex, *ex; 1197 e4fs_daddr_t bpref, newblk = 0; 1198 unsigned long allocated = 0; 1199 int error = 0, depth; 1200 1201 fs = ip->i_e2fs; 1202 *pallocated = 0; 1203 path = NULL; 1204 if(bpp) 1205 *bpp = NULL; 1206 1207 /* Check cache. */ 1208 if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) { 1209 if (bpref == EXT4_EXT_CACHE_IN) { 1210 /* Block is already allocated. */ 1211 newblk = iblk - newex.e_blk + 1212 ext4_ext_extent_pblock(&newex); 1213 allocated = newex.e_len - (iblk - newex.e_blk); 1214 goto out; 1215 } else { 1216 error = EIO; 1217 goto out2; 1218 } 1219 } 1220 1221 error = ext4_ext_find_extent(ip, iblk, &path); 1222 if (error) { 1223 goto out2; 1224 } 1225 1226 depth = ext4_ext_inode_depth(ip); 1227 if (path[depth].ep_ext == NULL && depth != 0) { 1228 error = EIO; 1229 goto out2; 1230 } 1231 1232 if ((ex = path[depth].ep_ext)) { 1233 uint64_t lblk = ex->e_blk; 1234 uint16_t e_len = ex->e_len; 1235 e4fs_daddr_t e_start = ext4_ext_extent_pblock(ex); 1236 1237 if (e_len > EXT4_MAX_LEN) 1238 goto out2; 1239 1240 /* If we found extent covers block, simply return it. */ 1241 if (iblk >= lblk && iblk < lblk + e_len) { 1242 newblk = iblk - lblk + e_start; 1243 allocated = e_len - (iblk - lblk); 1244 ext4_ext_put_in_cache(ip, lblk, e_len, 1245 e_start, EXT4_EXT_CACHE_IN); 1246 goto out; 1247 } 1248 } 1249 1250 /* Allocate the new block. */ 1251 if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) { 1252 ip->i_next_alloc_goal = 0; 1253 } 1254 1255 bpref = ext4_ext_blkpref(ip, path, iblk); 1256 allocated = max_blocks; 1257 newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error); 1258 if (!newblk) 1259 goto out2; 1260 1261 /* Try to insert new extent into found leaf and return. */ 1262 newex.e_blk = iblk; 1263 ext4_ext_store_pblock(&newex, newblk); 1264 newex.e_len = allocated; 1265 error = ext4_ext_insert_extent(ip, path, &newex); 1266 if (error) 1267 goto out2; 1268 1269 newblk = ext4_ext_extent_pblock(&newex); 1270 ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN); 1271 *pallocated = 1; 1272 1273 out: 1274 if (allocated > max_blocks) 1275 allocated = max_blocks; 1276 1277 if (bpp) 1278 { 1279 error = bread(ip->i_devvp, fsbtodb(fs, newblk), 1280 fs->e2fs_bsize, cred, &bp); 1281 if (error) { 1282 brelse(bp); 1283 } else { 1284 *bpp = bp; 1285 } 1286 } 1287 1288 out2: 1289 if (path) { 1290 ext4_ext_drop_refs(path); 1291 free(path, M_EXT2EXTENTS); 1292 } 1293 1294 if (nb) 1295 *nb = newblk; 1296 1297 return (error); 1298 } 1299 1300 static inline uint16_t 1301 ext4_ext_get_actual_len(struct ext4_extent *ext) 1302 { 1303 1304 return (ext->e_len <= EXT_INIT_MAX_LEN ? 1305 ext->e_len : (ext->e_len - EXT_INIT_MAX_LEN)); 1306 } 1307 1308 static inline struct ext4_extent_header * 1309 ext4_ext_header(struct inode *ip) 1310 { 1311 1312 return (struct ext4_extent_header *)ip->i_db; 1313 } 1314 1315 static int 1316 ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex, 1317 unsigned long from, unsigned long to) 1318 { 1319 unsigned long num, start; 1320 1321 if (from >= ex->e_blk && 1322 to == ex->e_blk + ext4_ext_get_actual_len(ex) - 1) { 1323 /* Tail cleanup. */ 1324 num = ex->e_blk + ext4_ext_get_actual_len(ex) - from; 1325 start = ext4_ext_extent_pblock(ex) + 1326 ext4_ext_get_actual_len(ex) - num; 1327 ext4_ext_blkfree(ip, start, num, 0); 1328 } 1329 1330 return (0); 1331 } 1332 1333 static int 1334 ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path) 1335 { 1336 e4fs_daddr_t leaf; 1337 1338 /* Free index block. */ 1339 path--; 1340 leaf = ext4_ext_index_pblock(path->ep_index); 1341 KASSERT(path->ep_header->eh_ecount != 0, 1342 ("ext4_ext_rm_index: bad ecount")); 1343 path->ep_header->eh_ecount--; 1344 ext4_ext_dirty(ip, path); 1345 ext4_ext_blkfree(ip, leaf, 1, 0); 1346 return (0); 1347 } 1348 1349 static int 1350 ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path, 1351 uint64_t start) 1352 { 1353 struct m_ext2fs *fs; 1354 int depth; 1355 struct ext4_extent_header *eh; 1356 unsigned int a, b, block, num; 1357 unsigned long ex_blk; 1358 unsigned short ex_len; 1359 struct ext4_extent *ex; 1360 int error, correct_index; 1361 1362 fs = ip->i_e2fs; 1363 depth = ext4_ext_inode_depth(ip); 1364 correct_index = 0; 1365 1366 if (!path[depth].ep_header) { 1367 if (path[depth].ep_data == NULL) 1368 return (EINVAL); 1369 path[depth].ep_header = 1370 (struct ext4_extent_header* )path[depth].ep_data; 1371 } 1372 1373 eh = path[depth].ep_header; 1374 if (!eh) { 1375 ext2_fserr(fs, ip->i_uid, "bad header => extent corrupted"); 1376 return (EIO); 1377 } 1378 1379 ex = EXT_LAST_EXTENT(eh); 1380 ex_blk = ex->e_blk; 1381 ex_len = ext4_ext_get_actual_len(ex); 1382 1383 while (ex >= EXT_FIRST_EXTENT(eh) && ex_blk + ex_len > start) { 1384 path[depth].ep_ext = ex; 1385 a = ex_blk > start ? ex_blk : start; 1386 b = (uint64_t)ex_blk + ex_len - 1 < 1387 EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS; 1388 1389 if (a != ex_blk && b != ex_blk + ex_len - 1) 1390 return (EINVAL); 1391 else if (a != ex_blk) { 1392 /* Remove tail of the extent. */ 1393 block = ex_blk; 1394 num = a - block; 1395 } else if (b != ex_blk + ex_len - 1) { 1396 /* Remove head of the extent, not implemented. */ 1397 return (EINVAL); 1398 } else { 1399 /* Remove whole extent. */ 1400 block = ex_blk; 1401 num = 0; 1402 } 1403 1404 if (ex == EXT_FIRST_EXTENT(eh)) 1405 correct_index = 1; 1406 1407 error = ext4_remove_blocks(ip, ex, a, b); 1408 if (error) 1409 goto out; 1410 1411 if (num == 0) { 1412 ext4_ext_store_pblock(ex, 0); 1413 eh->eh_ecount--; 1414 } 1415 1416 ex->e_blk = block; 1417 ex->e_len = num; 1418 1419 ext4_ext_dirty(ip, path + depth); 1420 1421 ex--; 1422 ex_blk = ex->e_blk; 1423 ex_len = ext4_ext_get_actual_len(ex); 1424 }; 1425 1426 if (correct_index && eh->eh_ecount) 1427 error = ext4_ext_correct_indexes(ip, path); 1428 1429 /* 1430 * If this leaf is free, we should 1431 * remove it from index block above. 1432 */ 1433 if (error == 0 && eh->eh_ecount == 0 && path[depth].ep_data != NULL) 1434 error = ext4_ext_rm_index(ip, path + depth); 1435 1436 out: 1437 return (error); 1438 } 1439 1440 static struct buf * 1441 ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk, 1442 int depth, int flags) 1443 { 1444 struct m_ext2fs *fs; 1445 struct ext4_extent_header *eh; 1446 struct buf *bp; 1447 int error; 1448 1449 fs = ip->i_e2fs; 1450 1451 error = bread(ip->i_devvp, fsbtodb(fs, pblk), 1452 fs->e2fs_bsize, NOCRED, &bp); 1453 if (error) { 1454 brelse(bp); 1455 return (NULL); 1456 } 1457 1458 eh = ext4_ext_block_header(bp->b_data); 1459 if (eh->eh_depth != depth) { 1460 ext2_fserr(fs, ip->i_uid, "unexpected eh_depth"); 1461 goto err; 1462 } 1463 1464 error = ext4_ext_check_header(ip, eh); 1465 if (error) 1466 goto err; 1467 1468 return (bp); 1469 1470 err: 1471 brelse(bp); 1472 return (NULL); 1473 1474 } 1475 1476 static int inline 1477 ext4_ext_more_to_rm(struct ext4_extent_path *path) 1478 { 1479 1480 KASSERT(path->ep_index != NULL, 1481 ("ext4_ext_more_to_rm: bad index from path")); 1482 1483 if (path->ep_index < EXT_FIRST_INDEX(path->ep_header)) 1484 return (0); 1485 1486 if (path->ep_header->eh_ecount == path->index_count) 1487 return (0); 1488 1489 return (1); 1490 } 1491 1492 int 1493 ext4_ext_remove_space(struct inode *ip, off_t length, int flags, 1494 struct ucred *cred, struct thread *td) 1495 { 1496 struct buf *bp; 1497 struct ext4_extent_header *ehp; 1498 struct ext4_extent_path *path; 1499 int depth; 1500 int i, error; 1501 1502 ehp = (struct ext4_extent_header *)ip->i_db; 1503 depth = ext4_ext_inode_depth(ip); 1504 1505 error = ext4_ext_check_header(ip, ehp); 1506 if(error) 1507 return (error); 1508 1509 path = malloc(sizeof(struct ext4_extent_path) * (depth + 1), 1510 M_EXT2EXTENTS, M_WAITOK | M_ZERO); 1511 if (!path) 1512 return (ENOMEM); 1513 1514 i = 0; 1515 path[0].ep_header = ehp; 1516 path[0].ep_depth = depth; 1517 while (i >= 0 && error == 0) { 1518 if (i == depth) { 1519 /* This is leaf. */ 1520 error = ext4_ext_rm_leaf(ip, path, length); 1521 if (error) 1522 break; 1523 free(path[i].ep_data, M_EXT2EXTENTS); 1524 path[i].ep_data = NULL; 1525 i--; 1526 continue; 1527 } 1528 1529 /* This is index. */ 1530 if (!path[i].ep_header) 1531 path[i].ep_header = 1532 (struct ext4_extent_header *)path[i].ep_data; 1533 1534 if (!path[i].ep_index) { 1535 /* This level hasn't touched yet. */ 1536 path[i].ep_index = EXT_LAST_INDEX(path[i].ep_header); 1537 path[i].index_count = path[i].ep_header->eh_ecount + 1; 1538 } else { 1539 /* We've already was here, see at next index. */ 1540 path[i].ep_index--; 1541 } 1542 1543 if (ext4_ext_more_to_rm(path + i)) { 1544 memset(path + i + 1, 0, sizeof(*path)); 1545 bp = ext4_read_extent_tree_block(ip, 1546 ext4_ext_index_pblock(path[i].ep_index), 1547 path[0].ep_depth - (i + 1), 0); 1548 if (!bp) { 1549 error = EIO; 1550 break; 1551 } 1552 1553 ext4_ext_fill_path_bdata(&path[i+1], bp, 1554 ext4_ext_index_pblock(path[i].ep_index)); 1555 brelse(bp); 1556 path[i].index_count = path[i].ep_header->eh_ecount; 1557 i++; 1558 } else { 1559 if (path[i].ep_header->eh_ecount == 0 && i > 0) { 1560 /* Index is empty, remove it. */ 1561 error = ext4_ext_rm_index(ip, path + i); 1562 } 1563 free(path[i].ep_data, M_EXT2EXTENTS); 1564 path[i].ep_data = NULL; 1565 i--; 1566 } 1567 } 1568 1569 if (path->ep_header->eh_ecount == 0) { 1570 /* 1571 * Truncate the tree to zero. 1572 */ 1573 ext4_ext_header(ip)->eh_depth = 0; 1574 ext4_ext_header(ip)->eh_max = ext4_ext_space_root(ip); 1575 ext4_ext_dirty(ip, path); 1576 1577 } 1578 1579 ext4_ext_drop_refs(path); 1580 free(path, M_EXT2EXTENTS); 1581 1582 return (error); 1583 } 1584