Lines Matching +full:ip +full:- +full:block

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
70 ext4_ext_blk_check(struct inode *ip, e4fs_daddr_t blk)
74 fs = ip->i_e2fs;
76 if (blk < fs->e2fs->e2fs_first_dblock || blk >= fs->e2fs_bcount)
83 ext4_ext_walk_index(struct inode *ip, struct ext4_extent_index *ex, int depth,
91 fs = ip->i_e2fs;
95 le32toh(ex->ei_blk),
96 (uint64_t)le16toh(ex->ei_leaf_hi) << 32 |
97 le32toh(ex->ei_leaf_lo));
103 error = ext4_ext_blk_check(ip, blk);
107 if ((error = bread(ip->i_devvp,
108 fsbtodb(fs, blk), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
113 error = ext4_ext_walk_header(ip,
114 (struct ext4_extent_header *)bp->b_data, depth);
122 ext4_ext_walk_extent(struct inode *ip, struct ext4_extent *ep)
128 error = ext4_ext_blk_check(ip, blk);
134 ep, le32toh(ep->e_blk), le16toh(ep->e_len),
141 ext4_ext_walk_header(struct inode *ip, struct ext4_extent_header *eh, int depth)
145 error = ext4_ext_check_header(ip, eh, depth);
151 eh, le16toh(eh->eh_ecount),
152 le16toh(eh->eh_max), le16toh(eh->eh_depth),
153 le32toh(eh->eh_gen));
155 for (i = 0; i < le16toh(eh->eh_ecount) && error == 0; i++)
156 if (eh->eh_depth != 0)
157 error = ext4_ext_walk_index(ip,
158 (struct ext4_extent_index *)(eh + 1 + i), depth - 1,
161 error = ext4_ext_walk_extent(ip,
168 ext4_ext_walk(struct inode *ip)
172 ehp = (struct ext4_extent_header *)ip->i_db;
175 printf("Extent status:ip=%ju\n", ip->i_number);
177 if (!(ip->i_flag & IN_E4EXTENTS))
180 return (ext4_ext_walk_header(ip, ehp, 0));
184 ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path)
188 depth = path->ep_depth;
191 printf("ip=%ju, Path:\n", ip->i_number);
194 if (path->ep_index) {
195 error = ext4_ext_walk_index(ip, path->ep_index,
196 depth - 1, false);
197 } else if (path->ep_ext) {
198 error = ext4_ext_walk_extent(ip, path->ep_ext);
207 ext4_ext_inode_header(struct inode *ip)
210 return ((struct ext4_extent_header *)ip->i_db);
221 ext4_ext_inode_depth(struct inode *ip)
225 ehp = (struct ext4_extent_header *)ip->i_data;
226 return (le16toh(ehp->eh_depth));
234 blk = le32toh(index->ei_leaf_lo);
235 blk |= (e4fs_daddr_t)le16toh(index->ei_leaf_hi) << 32;
244 index->ei_leaf_lo = htole32(pb & 0xffffffff);
245 index->ei_leaf_hi = htole16((pb >> 32) & 0xffff);
253 blk = le32toh(extent->e_start_lo);
254 blk |= (e4fs_daddr_t)le16toh(extent->e_start_hi) << 32;
263 ex->e_start_lo = htole32(pb & 0xffffffff);
264 ex->e_start_hi = htole16((pb >> 32) & 0xffff);
268 ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep)
273 ecp = &ip->i_ext_cache;
274 if (ecp->ec_type == EXT4_EXT_CACHE_NO)
277 if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) {
278 ep->e_blk = htole32(ecp->ec_blk);
279 ep->e_start_lo = htole32(ecp->ec_start & 0xffffffff);
280 ep->e_start_hi = htole16(ecp->ec_start >> 32 & 0xffff);
281 ep->e_len = htole16(ecp->ec_len);
282 ret = ecp->ec_type;
288 ext4_ext_space_root(struct inode *ip)
292 size = sizeof(ip->i_data);
293 size -= sizeof(struct ext4_extent_header);
300 ext4_ext_space_block(struct inode *ip)
305 fs = ip->i_e2fs;
307 size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
314 ext4_ext_space_root_idx(struct inode *ip)
318 size = sizeof(ip->i_data);
319 size -= sizeof(struct ext4_extent_header);
326 ext4_ext_space_block_idx(struct inode *ip)
331 fs = ip->i_e2fs;
333 size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
340 ext4_ext_max_entries(struct inode *ip, int depth)
343 if (depth == ext4_ext_inode_depth(ip)) {
345 return (ext4_ext_space_root(ip));
347 return (ext4_ext_space_root_idx(ip));
350 return (ext4_ext_space_block(ip));
352 return (ext4_ext_space_block_idx(ip));
360 return (le16toh(ext->e_len) <= EXT_INIT_MAX_LEN ?
361 le16toh(ext->e_len) : (le16toh(ext->e_len) - EXT_INIT_MAX_LEN));
366 ext4_inode_block_validate(struct inode *ip, e4fs_daddr_t start_blk,
371 fs = ip->i_e2fs;
373 if ((start_blk <= le32toh(fs->e2fs->e2fs_first_dblock)) ||
375 (start_blk + count > fs->e2fs_bcount))
382 ext4_validate_extent(struct inode *ip, struct ext4_extent *ext)
385 uint32_t lblk = le32toh(ext->e_blk);
391 return (ext4_inode_block_validate(ip, blk, len));
395 ext4_validate_extent_idx(struct inode *ip, struct ext4_extent_index *ext_idx)
399 return (ext4_inode_block_validate(ip, blk, 1));
403 ext4_validate_extent_entries(struct inode *ip, struct ext4_extent_header *eh,
408 count = le16toh(eh->eh_ecount);
419 if (ext4_validate_extent(ip, ext))
423 lblk = le32toh(ext->e_blk);
429 count--;
430 prev = lblk + len - 1;
435 if (ext4_validate_extent_idx(ip, ext_idx))
439 count--;
447 ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh,
456 if (le16toh(eh->eh_magic) != EXT4_EXT_MAGIC) {
460 if (le16toh(eh->eh_depth) != depth ||
461 le16toh(eh->eh_depth) > EXT4_EXT_DEPTH_MAX)
466 if (eh->eh_max == 0) {
470 if (le16toh(eh->eh_max) > ext4_ext_max_entries(ip, depth)) {
474 if (le16toh(eh->eh_ecount) > le16toh(eh->eh_max)) {
478 if (le16toh(eh->eh_depth) > EXT4_EXT_DEPTH_MAX) {
482 if (ext4_validate_extent_entries(ip, eh, depth)) {
500 eh = path->ep_header;
502 KASSERT(le16toh(eh->eh_ecount) <= le16toh(eh->eh_max) &&
503 le16toh(eh->eh_ecount) > 0,
507 r = EXT_FIRST_INDEX(eh) + le16toh(eh->eh_ecount) - 1;
509 m = l + (r - l) / 2;
510 if (blk < le32toh(m->ei_blk))
511 r = m - 1;
516 path->ep_index = l - 1;
525 eh = path->ep_header;
527 KASSERT(le16toh(eh->eh_ecount) <= le16toh(eh->eh_max),
530 if (eh->eh_ecount == 0)
534 r = EXT_FIRST_EXTENT(eh) + le16toh(eh->eh_ecount) - 1;
537 m = l + (r - l) / 2;
538 if (blk < le32toh(m->e_blk))
539 r = m - 1;
544 path->ep_ext = l - 1;
552 KASSERT(path->ep_data == NULL,
555 path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK);
556 memcpy(path->ep_data, bp->b_data, bp->b_bufsize);
557 path->ep_blk = blk;
566 KASSERT(path->ep_data != NULL,
569 memcpy(bp->b_data, path->ep_data, bp->b_bufsize);
580 depth = path->ep_depth;
582 if (path->ep_data) {
583 free(path->ep_data, M_EXT2EXTENTS);
584 path->ep_data = NULL;
600 ext4_ext_find_extent(struct inode *ip, daddr_t block,
609 eh = ext4_ext_inode_header(ip);
610 depth = ext4_ext_inode_depth(ip);
614 error = ext4_ext_check_header(ip, eh, depth);
636 ext4_ext_binsearch_index(&path[ppos], block);
641 error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk),
642 ip->i_e2fs->e2fs_bsize, NOCRED, &bp);
660 if (ext4_ext_check_header(ip, eh, i - 1) ||
661 ext2_extent_blk_csum_verify(ip, path[ppos].ep_data)) {
668 i--;
671 error = ext4_ext_check_header(ip, eh, 0);
680 ext4_ext_binsearch_ext(&path[ppos], block);
694 ext4_ext_space_block_index(struct inode *ip)
699 fs = ip->i_e2fs;
701 size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
708 ext4_ext_tree_init(struct inode *ip)
712 ip->i_flag |= IN_E4EXTENTS;
714 memset(ip->i_data, 0, sizeof(ip->i_data));
715 ehp = (struct ext4_extent_header *)ip->i_data;
716 ehp->eh_magic = htole16(EXT4_EXT_MAGIC);
717 ehp->eh_max = htole16(ext4_ext_space_root(ip));
718 ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
719 ip->i_flag |= IN_CHANGE | IN_UPDATE;
720 ext2_update(ip->i_vnode, 1);
724 ext4_ext_put_in_cache(struct inode *ip, uint32_t blk,
730 ip->i_ext_cache.ec_type = type;
731 ip->i_ext_cache.ec_blk = blk;
732 ip->i_ext_cache.ec_len = len;
733 ip->i_ext_cache.ec_start = start;
737 ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path,
738 e4fs_daddr_t block)
745 fs = ip->i_e2fs;
748 depth = path->ep_depth;
752 e2fs_daddr_t blk = le32toh(ex->e_blk);
754 if (block > blk)
755 return (pblk + (block - blk));
757 return (pblk - (blk - block));
760 /* Try to get block from index itself. */
766 bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
767 le32toh(fs->e2fs->e2fs_first_dblock);
769 return (bg_start + block);
777 if (le32toh(ex1->e_blk) + le16toh(ex1->e_len) != le32toh(ex2->e_blk))
780 if (le16toh(ex1->e_len) + le16toh(ex2->e_len) > EXT4_MAX_LEN)
783 if (ext4_ext_extent_pblock(ex1) + le16toh(ex1->e_len) ==
791 ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path)
793 int depth = path->ep_depth;
800 depth--;
807 depth--;
814 ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path)
821 fs = ip->i_e2fs;
826 if (path->ep_data) {
827 blk = path->ep_blk;
828 bp = getblk(ip->i_devvp, fsbtodb(fs, blk),
829 fs->e2fs_bsize, 0, 0, 0);
833 ext2_extent_blk_csum_set(ip, bp->b_data);
836 ip->i_flag |= IN_CHANGE | IN_UPDATE;
837 error = ext2_update(ip->i_vnode, 1);
844 ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path,
850 if (lblk == le32toh(path->ep_index->ei_blk)) {
856 if (le16toh(path->ep_header->eh_ecount) >=
857 le16toh(path->ep_header->eh_max)) {
863 if (lblk > le32toh(path->ep_index->ei_blk)) {
865 idx = path->ep_index + 1;
868 idx = path->ep_index;
871 len = EXT_LAST_INDEX(path->ep_header) - idx + 1;
875 if (idx > EXT_MAX_INDEX(path->ep_header)) {
881 idx->ei_blk = htole32(lblk);
883 path->ep_header->eh_ecount =
884 htole16(le16toh(path->ep_header->eh_ecount) + 1);
886 return (ext4_ext_dirty(ip, path));
890 ext4_ext_alloc_meta(struct inode *ip)
892 e4fs_daddr_t blk = ext2_alloc_meta(ip);
894 ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize);
895 ip->i_flag |= IN_CHANGE | IN_UPDATE;
896 ext2_update(ip->i_vnode, 1);
903 ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags)
908 fs = ip->i_e2fs;
912 ext2_blkfree(ip, blk + i, fs->e2fs_bsize);
914 if (ip->i_blocks >= blocksreleased)
915 ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased);
917 ip->i_blocks = 0;
919 ip->i_flag |= IN_CHANGE | IN_UPDATE;
920 ext2_update(ip->i_vnode, 1);
924 ext4_ext_split(struct inode *ip, struct ext4_extent_path *path,
929 int depth = ext4_ext_inode_depth(ip);
939 fs = ip->i_e2fs;
954 border = le32toh(newext->e_blk);
959 for (a = 0; a < depth - at; a++) {
960 newblk = ext4_ext_alloc_meta(ip);
966 newblk = ablks[--a];
967 bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
973 neh = ext4_ext_block_header(bp->b_data);
974 neh->eh_ecount = 0;
975 neh->eh_max = le16toh(ext4_ext_space_block(ip));
976 neh->eh_magic = le16toh(EXT4_EXT_MAGIC);
977 neh->eh_depth = 0;
980 if (le16toh(path[depth].ep_header->eh_ecount) !=
981 le16toh(path[depth].ep_header->eh_max)) {
996 memmove(ex, path[depth].ep_ext - m,
998 neh->eh_ecount = htole16(le16toh(neh->eh_ecount) + m);
1001 ext2_extent_blk_csum_set(ip, bp->b_data);
1007 path[depth].ep_header->eh_ecount =
1008 htole16(le16toh(path[depth].ep_header->eh_ecount) - m);
1009 ext4_ext_dirty(ip, path + depth);
1013 k = depth - at - 1;
1016 /* Insert new index into current index block. */
1017 i = depth - 1;
1018 while (k--) {
1020 newblk = ablks[--a];
1021 error = bread(ip->i_devvp, fsbtodb(fs, newblk),
1022 (int)fs->e2fs_bsize, NOCRED, &bp);
1027 neh = (struct ext4_extent_header *)bp->b_data;
1028 neh->eh_ecount = htole16(1);
1029 neh->eh_magic = htole16(EXT4_EXT_MAGIC);
1030 neh->eh_max = htole16(ext4_ext_space_block_index(ip));
1031 neh->eh_depth = htole16(depth - i);
1033 fidx->ei_blk = htole32(border);
1043 memmove(++fidx, path[i].ep_index - m,
1045 neh->eh_ecount = htole16(le16toh(neh->eh_ecount) + m);
1048 ext2_extent_blk_csum_set(ip, bp->b_data);
1054 path[i].ep_header->eh_ecount =
1055 htole16(le16toh(path[i].ep_header->eh_ecount) - m);
1056 ext4_ext_dirty(ip, path + i);
1059 i--;
1062 error = ext4_ext_insert_index(ip, path + at, border, newblk);
1072 ext4_ext_blkfree(ip, ablks[i], 1, 0);
1082 ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path,
1092 fs = ip->i_e2fs;
1095 newblk = ext4_ext_alloc_meta(ip);
1099 bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
1101 ext4_ext_blkfree(ip, newblk, 1, 0);
1105 /* Move top-level index/leaf into new block. */
1106 memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data));
1108 /* Set size of new block */
1109 neh = ext4_ext_block_header(bp->b_data);
1110 neh->eh_magic = htole16(EXT4_EXT_MAGIC);
1112 if (ext4_ext_inode_depth(ip))
1113 neh->eh_max = htole16(ext4_ext_space_block_index(ip));
1115 neh->eh_max = htole16(ext4_ext_space_block(ip));
1117 ext2_extent_blk_csum_set(ip, bp->b_data);
1120 ext4_ext_blkfree(ip, newblk, 1, 0);
1126 curpath->ep_header->eh_magic = htole16(EXT4_EXT_MAGIC);
1127 curpath->ep_header->eh_max = htole16(ext4_ext_space_root(ip));
1128 curpath->ep_header->eh_ecount = htole16(1);
1129 curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header);
1130 curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk;
1131 ext4_index_store_pblock(curpath->ep_index, newblk);
1133 neh = ext4_ext_inode_header(ip);
1134 neh->eh_depth = htole16(path->ep_depth + 1);
1135 ext4_ext_dirty(ip, curpath);
1143 ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path,
1150 i = depth = ext4_ext_inode_depth(ip);
1155 i--;
1156 curpath--;
1160 * We use already allocated block for index block,
1164 error = ext4_ext_split(ip, path, newext, i);
1170 error = ext4_ext_find_extent(ip, le32toh(newext->e_blk), &path);
1175 error = ext4_ext_grow_indepth(ip, path, newext);
1181 error = ext4_ext_find_extent(ip, le32toh(newext->e_blk), &path);
1186 depth = ext4_ext_inode_depth(ip);
1187 if (le16toh(path[depth].ep_header->eh_ecount) ==
1188 le16toh(path[depth].ep_header->eh_max))
1197 ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path)
1204 depth = ext4_ext_inode_depth(ip);
1218 k = depth - 1;
1219 border = le32toh(path[depth].ep_ext->e_blk);
1220 path[k].ep_index->ei_blk = htole32(border);
1221 ext4_ext_dirty(ip, path + k);
1222 while (k--) {
1223 /* Change all left-side indexes. */
1227 path[k].ep_index->ei_blk = htole32(border);
1228 ext4_ext_dirty(ip, path + k);
1235 ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path,
1243 depth = ext4_ext_inode_depth(ip);
1247 if (htole16(newext->e_len) == 0 || path[depth].ep_header == NULL)
1250 /* Insert block into found extent. */
1252 ex->e_len = htole16(le16toh(ex->e_len) + le16toh(newext->e_len));
1259 depth = ext4_ext_inode_depth(ip);
1261 if (le16toh(eh->eh_ecount) < le16toh(eh->eh_max))
1266 next = ext4_ext_next_leaf_block(ip, path);
1267 if (le32toh(newext->e_blk) > le32toh(nex->e_blk) && next !=
1272 error = ext4_ext_find_extent(ip, next, &npath);
1276 if (npath->ep_depth != path->ep_depth) {
1282 if (le16toh(eh->eh_ecount) < le16toh(eh->eh_max)) {
1292 error = ext4_ext_create_new_leaf(ip, path, newext);
1296 depth = ext4_ext_inode_depth(ip);
1304 } else if (le32toh(newext->e_blk) > le32toh(nearex->e_blk)) {
1306 len = EXT_MAX_EXTENT(eh) - nearex;
1307 len = (len - 1) * sizeof(struct ext4_extent);
1313 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1319 eh->eh_ecount = htole16(le16toh(eh->eh_ecount) + 1);
1321 nearex->e_blk = newext->e_blk;
1322 nearex->e_start_lo = newext->e_start_lo;
1323 nearex->e_start_hi = newext->e_start_hi;
1324 nearex->e_len = newext->e_len;
1333 nearex->e_len = htole16(le16toh(nearex->e_len) +
1336 len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
1341 eh->eh_ecount = htole16(le16toh(eh->eh_ecount) - 1);
1342 KASSERT(le16toh(eh->eh_ecount) != 0,
1350 error = ext4_ext_correct_indexes(ip, path);
1354 ext4_ext_dirty(ip, path + depth);
1362 ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
1367 ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref,
1374 * We will allocate only single block for now.
1379 fs = ip->i_e2fs;
1380 EXT2_LOCK(ip->i_ump);
1381 *perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk);
1386 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1387 ext2_update(ip->i_vnode, 1);
1394 ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk,
1412 if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) {
1414 /* Block is already allocated. */
1415 newblk = iblk - le32toh(newex.e_blk) +
1417 allocated = le16toh(newex.e_len) - (iblk - le32toh(newex.e_blk));
1425 error = ext4_ext_find_extent(ip, iblk, &path);
1430 depth = ext4_ext_inode_depth(ip);
1437 uint64_t lblk = le32toh(ex->e_blk);
1438 uint16_t e_len = le16toh(ex->e_len);
1444 /* If we found extent covers block, simply return it. */
1446 newblk = iblk - lblk + e_start;
1447 allocated = e_len - (iblk - lblk);
1448 ext4_ext_put_in_cache(ip, lblk, e_len,
1454 /* Allocate the new block. */
1455 if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) {
1456 ip->i_next_alloc_goal = 0;
1459 bpref = ext4_ext_blkpref(ip, path, iblk);
1461 newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error);
1469 error = ext4_ext_insert_extent(ip, path, &newex);
1474 ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN);
1483 fs = ip->i_e2fs;
1484 error = bread(ip->i_devvp, fsbtodb(fs, newblk),
1485 fs->e2fs_bsize, cred, &bp);
1506 ext4_ext_header(struct inode *ip)
1509 return ((struct ext4_extent_header *)ip->i_db);
1513 ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex,
1518 if (from >= le32toh(ex->e_blk) &&
1519 to == le32toh(ex->e_blk) + ext4_ext_get_actual_len(ex) - 1) {
1521 num = le32toh(ex->e_blk) + ext4_ext_get_actual_len(ex) - from;
1523 ext4_ext_get_actual_len(ex) - num;
1524 ext4_ext_blkfree(ip, start, num, 0);
1531 ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path)
1535 /* Free index block. */
1536 path--;
1537 leaf = ext4_ext_index_pblock(path->ep_index);
1538 KASSERT(path->ep_header->eh_ecount != 0,
1540 path->ep_header->eh_ecount =
1541 htole16(le16toh(path->ep_header->eh_ecount) - 1);
1542 ext4_ext_dirty(ip, path);
1543 ext4_ext_blkfree(ip, leaf, 1, 0);
1548 ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path,
1553 unsigned int a, b, block, num;
1559 depth = ext4_ext_inode_depth(ip);
1575 ex_blk = le32toh(ex->e_blk);
1583 b = (uint64_t)ex_blk + ex_len - 1 <
1584 EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS;
1586 if (a != ex_blk && b != ex_blk + ex_len - 1)
1590 block = ex_blk;
1591 num = a - block;
1592 } else if (b != ex_blk + ex_len - 1) {
1597 block = ex_blk;
1604 error = ext4_remove_blocks(ip, ex, a, b);
1610 eh->eh_ecount = htole16(le16toh(eh->eh_ecount) - 1);
1613 ex->e_blk = htole32(block);
1614 ex->e_len = htole16(num);
1616 ext4_ext_dirty(ip, path + depth);
1618 ex--;
1619 ex_blk = htole32(ex->e_blk);
1623 if (correct_index && le16toh(eh->eh_ecount))
1624 error = ext4_ext_correct_indexes(ip, path);
1628 * remove it from index block above.
1630 if (error == 0 && eh->eh_ecount == 0 &&
1632 error = ext4_ext_rm_index(ip, path + depth);
1639 ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk,
1647 fs = ip->i_e2fs;
1648 error = bread(ip->i_devvp, fsbtodb(fs, pblk),
1649 fs->e2fs_bsize, NOCRED, &bp);
1654 eh = ext4_ext_block_header(bp->b_data);
1655 if (le16toh(eh->eh_depth) != depth) {
1661 error = ext4_ext_check_header(ip, eh, depth);
1677 KASSERT(path->ep_index != NULL,
1680 if (path->ep_index < EXT_FIRST_INDEX(path->ep_header))
1683 if (le16toh(path->ep_header->eh_ecount) == path->index_count)
1690 ext4_ext_remove_space(struct inode *ip, off_t length, int flags,
1699 ehp = (struct ext4_extent_header *)ip->i_db;
1700 depth = ext4_ext_inode_depth(ip);
1702 error = ext4_ext_check_header(ip, ehp, depth);
1714 error = ext4_ext_rm_leaf(ip, path, length);
1719 i--;
1732 le16toh(path[i].ep_header->eh_ecount) + 1;
1735 path[i].ep_index--;
1740 bp = ext4_read_extent_tree_block(ip,
1742 path[0].ep_depth - (i + 1), 0);
1752 le16toh(path[i].ep_header->eh_ecount);
1755 if (path[i].ep_header->eh_ecount == 0 && i > 0) {
1757 error = ext4_ext_rm_index(ip, path + i);
1761 i--;
1765 if (path->ep_header->eh_ecount == 0) {
1769 ext4_ext_header(ip)->eh_depth = 0;
1770 ext4_ext_header(ip)->eh_max = htole16(ext4_ext_space_root(ip));
1771 ext4_ext_dirty(ip, path);
1777 ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;