Lines Matching +full:level +full:- +full:high
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
49 __be32 magic = ops->buf_ops->magic[idx]; in xfs_btree_magic()
51 /* Ensure we asked for crc for crc-only magics. */ in xfs_btree_magic()
63 * on x86-64. Yes, gcc-11 fails to inline them, and explicit inlining of these
128 int level, in __xfs_btree_check_lblock_hdr() argument
131 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_lblock_hdr()
134 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)) in __xfs_btree_check_lblock_hdr()
136 if (block->bb_u.l.bb_blkno != in __xfs_btree_check_lblock_hdr()
139 if (block->bb_u.l.bb_pad != cpu_to_be32(0)) in __xfs_btree_check_lblock_hdr()
143 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops)) in __xfs_btree_check_lblock_hdr()
145 if (be16_to_cpu(block->bb_level) != level) in __xfs_btree_check_lblock_hdr()
147 if (be16_to_cpu(block->bb_numrecs) > in __xfs_btree_check_lblock_hdr()
148 cur->bc_ops->get_maxrecs(cur, level)) in __xfs_btree_check_lblock_hdr()
162 int level, in __xfs_btree_check_fsblock() argument
165 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_fsblock()
169 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp); in __xfs_btree_check_fsblock()
174 * For inode-rooted btrees, the root block sits in the inode fork. In in __xfs_btree_check_fsblock()
178 if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK)) in __xfs_btree_check_fsblock()
180 if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK)) in __xfs_btree_check_fsblock()
187 block->bb_u.l.bb_leftsib); in __xfs_btree_check_fsblock()
190 block->bb_u.l.bb_rightsib); in __xfs_btree_check_fsblock()
195 * Check an in-memory btree block header. Return the address of the failing
202 int level, in __xfs_btree_check_memblock() argument
205 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target; in __xfs_btree_check_memblock()
209 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp); in __xfs_btree_check_memblock()
215 block->bb_u.l.bb_leftsib); in __xfs_btree_check_memblock()
218 block->bb_u.l.bb_rightsib); in __xfs_btree_check_memblock()
230 int level, in __xfs_btree_check_agblock() argument
233 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_agblock()
234 struct xfs_perag *pag = to_perag(cur->bc_group); in __xfs_btree_check_agblock()
239 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid)) in __xfs_btree_check_agblock()
241 if (block->bb_u.s.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp))) in __xfs_btree_check_agblock()
245 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops)) in __xfs_btree_check_agblock()
247 if (be16_to_cpu(block->bb_level) != level) in __xfs_btree_check_agblock()
249 if (be16_to_cpu(block->bb_numrecs) > in __xfs_btree_check_agblock()
250 cur->bc_ops->get_maxrecs(cur, level)) in __xfs_btree_check_agblock()
255 block->bb_u.s.bb_leftsib); in __xfs_btree_check_agblock()
258 block->bb_u.s.bb_rightsib); in __xfs_btree_check_agblock()
271 int level, in __xfs_btree_check_block() argument
274 switch (cur->bc_ops->type) { in __xfs_btree_check_block()
276 return __xfs_btree_check_memblock(cur, block, level, bp); in __xfs_btree_check_block()
278 return __xfs_btree_check_agblock(cur, block, level, bp); in __xfs_btree_check_block()
280 return __xfs_btree_check_fsblock(cur, block, level, bp); in __xfs_btree_check_block()
289 if (cur->bc_ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN) in xfs_btree_block_errtag()
301 int level, /* level of the btree block */ in xfs_btree_check_block() argument
304 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_check_block()
307 fa = __xfs_btree_check_block(cur, block, level, bp); in xfs_btree_check_block()
313 return -EFSCORRUPTED; in xfs_btree_check_block()
323 int level) in __xfs_btree_check_ptr() argument
325 if (level <= 0) in __xfs_btree_check_ptr()
326 return -EFSCORRUPTED; in __xfs_btree_check_ptr()
328 switch (cur->bc_ops->type) { in __xfs_btree_check_ptr()
330 if (!xfbtree_verify_bno(cur->bc_mem.xfbtree, in __xfs_btree_check_ptr()
331 be64_to_cpu((&ptr->l)[index]))) in __xfs_btree_check_ptr()
332 return -EFSCORRUPTED; in __xfs_btree_check_ptr()
335 if (!xfs_verify_fsbno(cur->bc_mp, in __xfs_btree_check_ptr()
336 be64_to_cpu((&ptr->l)[index]))) in __xfs_btree_check_ptr()
337 return -EFSCORRUPTED; in __xfs_btree_check_ptr()
340 if (!xfs_verify_agbno(to_perag(cur->bc_group), in __xfs_btree_check_ptr()
341 be32_to_cpu((&ptr->s)[index]))) in __xfs_btree_check_ptr()
342 return -EFSCORRUPTED; in __xfs_btree_check_ptr()
350 * Check that a given (indexed) btree pointer at a certain level of a
358 int level) in xfs_btree_check_ptr() argument
362 error = __xfs_btree_check_ptr(cur, ptr, index, level); in xfs_btree_check_ptr()
364 switch (cur->bc_ops->type) { in xfs_btree_check_ptr()
366 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
367 "In-memory: Corrupt %sbt flags 0x%x pointer at level %d index %d fa %pS.", in xfs_btree_check_ptr()
368 cur->bc_ops->name, cur->bc_flags, level, index, in xfs_btree_check_ptr()
372 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
373 "Inode %llu fork %d: Corrupt %sbt pointer at level %d index %d.", in xfs_btree_check_ptr()
374 cur->bc_ino.ip->i_ino, in xfs_btree_check_ptr()
375 cur->bc_ino.whichfork, cur->bc_ops->name, in xfs_btree_check_ptr()
376 level, index); in xfs_btree_check_ptr()
379 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
380 "AG %u: Corrupt %sbt pointer at level %d index %d.", in xfs_btree_check_ptr()
381 cur->bc_group->xg_gno, cur->bc_ops->name, in xfs_btree_check_ptr()
382 level, index); in xfs_btree_check_ptr()
399 * long-form btree header.
410 struct xfs_buf_log_item *bip = bp->b_log_item; in xfs_btree_fsblock_calc_crc()
412 if (!xfs_has_crc(bp->b_mount)) in xfs_btree_fsblock_calc_crc()
415 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); in xfs_btree_fsblock_calc_crc()
424 struct xfs_mount *mp = bp->b_mount; in xfs_btree_fsblock_verify_crc()
427 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn))) in xfs_btree_fsblock_verify_crc()
437 * short-form btree header.
448 struct xfs_buf_log_item *bip = bp->b_log_item; in xfs_btree_agblock_calc_crc()
450 if (!xfs_has_crc(bp->b_mount)) in xfs_btree_agblock_calc_crc()
453 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); in xfs_btree_agblock_calc_crc()
462 struct xfs_mount *mp = bp->b_mount; in xfs_btree_agblock_verify_crc()
465 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn))) in xfs_btree_agblock_verify_crc()
486 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_free_block()
488 return -EFSCORRUPTED; in xfs_btree_free_block()
491 error = cur->bc_ops->free_block(cur, bp); in xfs_btree_free_block()
493 xfs_trans_binval(cur->bc_tp, bp); in xfs_btree_free_block()
507 int i; /* btree level */ in xfs_btree_del_cursor()
513 * code works from level n down to 0, and if we get an error along the in xfs_btree_del_cursor()
516 for (i = 0; i < cur->bc_nlevels; i++) { in xfs_btree_del_cursor()
517 if (cur->bc_levels[i].bp) in xfs_btree_del_cursor()
518 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[i].bp); in xfs_btree_del_cursor()
529 ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 || in xfs_btree_del_cursor()
530 xfs_is_shutdown(cur->bc_mp) || error != 0); in xfs_btree_del_cursor()
532 if (cur->bc_group) in xfs_btree_del_cursor()
533 xfs_group_put(cur->bc_group); in xfs_btree_del_cursor()
534 kmem_cache_free(cur->bc_cache, cur); in xfs_btree_del_cursor()
542 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM) in xfs_btree_buftarg()
543 return cur->bc_mem.xfbtree->target; in xfs_btree_buftarg()
544 return cur->bc_mp->m_ddev_targp; in xfs_btree_buftarg()
552 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM) in xfs_btree_bbsize()
554 return cur->bc_mp->m_bsize; in xfs_btree_bbsize()
559 * Allocate a new one, copy the record, re-get the buffers.
566 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_dup_cursor()
567 struct xfs_trans *tp = cur->bc_tp; in xfs_btree_dup_cursor()
577 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_dup_cursor()
579 return -EFSCORRUPTED; in xfs_btree_dup_cursor()
585 new = cur->bc_ops->dup_cursor(cur); in xfs_btree_dup_cursor()
590 new->bc_rec = cur->bc_rec; in xfs_btree_dup_cursor()
593 * For each level current, re-get the buffer and copy the ptr value. in xfs_btree_dup_cursor()
595 for (i = 0; i < new->bc_nlevels; i++) { in xfs_btree_dup_cursor()
596 new->bc_levels[i].ptr = cur->bc_levels[i].ptr; in xfs_btree_dup_cursor()
597 new->bc_levels[i].ra = cur->bc_levels[i].ra; in xfs_btree_dup_cursor()
598 bp = cur->bc_levels[i].bp; in xfs_btree_dup_cursor()
604 cur->bc_ops->buf_ops); in xfs_btree_dup_cursor()
613 new->bc_levels[i].bp = bp; in xfs_btree_dup_cursor()
622 * There are two types of blocks in the btree: leaf and non-leaf blocks.
625 * the values. A non-leaf block also starts with the same header, and
627 * to the btree blocks at the previous level.
629 * +--------+-------+-------+-------+-------+-------+-------+
631 * +--------+-------+-------+-------+-------+-------+-------+
633 * +--------+-------+-------+-------+-------+-------+-------+
634 * Non-Leaf: | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N |
635 * +--------+-------+-------+-------+-------+-------+-------+
651 * However, nodes are different: each pointer has two associated keys -- one
656 * that matches any record in the leaf and to recursively update the high keys
660 * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
661 * Non-Leaf: | header | lo1 | hi1 | lo2 | hi2 | ... | ptr 1 | ptr 2 | ... |
662 * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
665 * depth-first search and use the low and high keys to decide if we can skip
668 * entries. For a non-overlapped tree, simply search for the record associated
669 * with the lowest key and iterate forward until a non-matching record is
677 * 1: +- file A startblock B offset C length D -----------+
678 * 2: +- file E startblock F offset G length H --------------+
679 * 3: +- file I startblock F offset J length K --+
680 * 4: +- file L... --+
684 * that ends at (B+D-1) (i.e. record 1)? A LE lookup of (B+D-1) would return
686 * query would return records 1 and 2 because they both overlap (B+D-1), and
689 * In the non-overlapped case you can do a LE lookup and decrement the cursor
698 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_block_len()
699 if (xfs_has_crc(cur->bc_mp)) in xfs_btree_block_len()
703 if (xfs_has_crc(cur->bc_mp)) in xfs_btree_block_len()
709 * Calculate offset of the n-th record in a btree block.
717 (n - 1) * cur->bc_ops->rec_len; in xfs_btree_rec_offset()
721 * Calculate offset of the n-th key in a btree block.
729 (n - 1) * cur->bc_ops->key_len; in xfs_btree_key_offset()
733 * Calculate offset of the n-th high key in a btree block.
741 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2); in xfs_btree_high_key_offset()
745 * Calculate offset of the n-th block pointer in a btree block.
751 int level) in xfs_btree_ptr_offset() argument
754 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len + in xfs_btree_ptr_offset()
755 (n - 1) * cur->bc_ops->ptr_len; in xfs_btree_ptr_offset()
759 * Return a pointer to the n-th record in the btree block.
772 * Return a pointer to the n-th key in the btree block.
785 * Return a pointer to the n-th high key in the btree block.
798 * Return a pointer to the n-th block pointer in the btree block.
806 int level = xfs_btree_get_level(block); in xfs_btree_ptr_addr() local
808 ASSERT(block->bb_level != 0); in xfs_btree_ptr_addr()
811 ((char *)block + xfs_btree_ptr_offset(cur, n, level)); in xfs_btree_ptr_addr()
818 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_ifork_ptr()
820 if (cur->bc_flags & XFS_BTREE_STAGING) in xfs_btree_ifork_ptr()
821 return cur->bc_ino.ifake->if_fork; in xfs_btree_ifork_ptr()
822 return xfs_ifork_ptr(cur->bc_ino.ip, cur->bc_ino.whichfork); in xfs_btree_ifork_ptr()
837 return (struct xfs_btree_block *)ifp->if_broot; in xfs_btree_get_iroot()
841 * Retrieve the block pointer from the cursor at the given level.
847 int level, /* level in btree */ in xfs_btree_get_block() argument
850 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_get_block()
855 *bpp = cur->bc_levels[level].bp; in xfs_btree_get_block()
860 * Change the cursor to point to the first record at the given level.
866 int level) /* level to change */ in xfs_btree_firstrec() argument
872 * Get the block pointer for this level. in xfs_btree_firstrec()
874 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_firstrec()
875 if (xfs_btree_check_block(cur, block, level, bp)) in xfs_btree_firstrec()
880 if (!block->bb_numrecs) in xfs_btree_firstrec()
885 cur->bc_levels[level].ptr = 1; in xfs_btree_firstrec()
891 * at the given level. Other levels are unaffected.
896 int level) /* level to change */ in xfs_btree_lastrec() argument
902 * Get the block pointer for this level. in xfs_btree_lastrec()
904 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_lastrec()
905 if (xfs_btree_check_block(cur, block, level, bp)) in xfs_btree_lastrec()
910 if (!block->bb_numrecs) in xfs_btree_lastrec()
915 cur->bc_levels[level].ptr = be16_to_cpu(block->bb_numrecs); in xfs_btree_lastrec()
947 for (i = nbits - 1, imask = 1u << i; ; i--, imask >>= 1) { in xfs_btree_offsets()
949 *last = offsets[i + 1] - 1; in xfs_btree_offsets()
961 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_readahead_fsblock()
962 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); in xfs_btree_readahead_fsblock()
963 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); in xfs_btree_readahead_fsblock()
967 xfs_buf_readahead(mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, left), in xfs_btree_readahead_fsblock()
968 mp->m_bsize, cur->bc_ops->buf_ops); in xfs_btree_readahead_fsblock()
973 xfs_buf_readahead(mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, right), in xfs_btree_readahead_fsblock()
974 mp->m_bsize, cur->bc_ops->buf_ops); in xfs_btree_readahead_fsblock()
987 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target; in xfs_btree_readahead_memblock()
988 xfbno_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); in xfs_btree_readahead_memblock()
989 xfbno_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); in xfs_btree_readahead_memblock()
994 cur->bc_ops->buf_ops); in xfs_btree_readahead_memblock()
1000 cur->bc_ops->buf_ops); in xfs_btree_readahead_memblock()
1013 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_readahead_agblock()
1014 struct xfs_perag *pag = to_perag(cur->bc_group); in xfs_btree_readahead_agblock()
1015 xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib); in xfs_btree_readahead_agblock()
1016 xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib); in xfs_btree_readahead_agblock()
1020 xfs_buf_readahead(mp->m_ddev_targp, in xfs_btree_readahead_agblock()
1021 xfs_agbno_to_daddr(pag, left), mp->m_bsize, in xfs_btree_readahead_agblock()
1022 cur->bc_ops->buf_ops); in xfs_btree_readahead_agblock()
1027 xfs_buf_readahead(mp->m_ddev_targp, in xfs_btree_readahead_agblock()
1028 xfs_agbno_to_daddr(pag, right), mp->m_bsize, in xfs_btree_readahead_agblock()
1029 cur->bc_ops->buf_ops); in xfs_btree_readahead_agblock()
1037 * Read-ahead btree blocks, at the given level.
1043 int lev, /* level in btree */ in xfs_btree_readahead()
1049 * No readahead needed if we are at the root level and the in xfs_btree_readahead()
1055 if ((cur->bc_levels[lev].ra | lr) == cur->bc_levels[lev].ra) in xfs_btree_readahead()
1058 cur->bc_levels[lev].ra |= lr; in xfs_btree_readahead()
1059 block = XFS_BUF_TO_BLOCK(cur->bc_levels[lev].bp); in xfs_btree_readahead()
1061 switch (cur->bc_ops->type) { in xfs_btree_readahead()
1086 switch (cur->bc_ops->type) { in xfs_btree_ptr_to_daddr()
1088 *daddr = xfs_agbno_to_daddr(to_perag(cur->bc_group), in xfs_btree_ptr_to_daddr()
1089 be32_to_cpu(ptr->s)); in xfs_btree_ptr_to_daddr()
1092 *daddr = XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); in xfs_btree_ptr_to_daddr()
1095 *daddr = xfbno_to_daddr(be64_to_cpu(ptr->l)); in xfs_btree_ptr_to_daddr()
1119 cur->bc_ops->buf_ops); in xfs_btree_readahead_ptr()
1123 * Set the buffer for level "lev" in the cursor to bp, releasing
1129 int lev, /* level in btree */ in xfs_btree_setbuf()
1134 if (cur->bc_levels[lev].bp) in xfs_btree_setbuf()
1135 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[lev].bp); in xfs_btree_setbuf()
1136 cur->bc_levels[lev].bp = bp; in xfs_btree_setbuf()
1137 cur->bc_levels[lev].ra = 0; in xfs_btree_setbuf()
1140 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_setbuf()
1141 if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)) in xfs_btree_setbuf()
1142 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA; in xfs_btree_setbuf()
1143 if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)) in xfs_btree_setbuf()
1144 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA; in xfs_btree_setbuf()
1146 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK)) in xfs_btree_setbuf()
1147 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA; in xfs_btree_setbuf()
1148 if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK)) in xfs_btree_setbuf()
1149 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA; in xfs_btree_setbuf()
1158 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_ptr_is_null()
1159 return ptr->l == cpu_to_be64(NULLFSBLOCK); in xfs_btree_ptr_is_null()
1161 return ptr->s == cpu_to_be32(NULLAGBLOCK); in xfs_btree_ptr_is_null()
1169 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_set_ptr_null()
1170 ptr->l = cpu_to_be64(NULLFSBLOCK); in xfs_btree_set_ptr_null()
1172 ptr->s = cpu_to_be32(NULLAGBLOCK); in xfs_btree_set_ptr_null()
1181 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_ptrs_equal()
1182 return ptr1->l == ptr2->l; in xfs_btree_ptrs_equal()
1183 return ptr1->s == ptr2->s; in xfs_btree_ptrs_equal()
1198 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_get_sibling()
1200 ptr->l = block->bb_u.l.bb_rightsib; in xfs_btree_get_sibling()
1202 ptr->l = block->bb_u.l.bb_leftsib; in xfs_btree_get_sibling()
1205 ptr->s = block->bb_u.s.bb_rightsib; in xfs_btree_get_sibling()
1207 ptr->s = block->bb_u.s.bb_leftsib; in xfs_btree_get_sibling()
1220 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_set_sibling()
1222 block->bb_u.l.bb_rightsib = ptr->l; in xfs_btree_set_sibling()
1224 block->bb_u.l.bb_leftsib = ptr->l; in xfs_btree_set_sibling()
1227 block->bb_u.s.bb_rightsib = ptr->s; in xfs_btree_set_sibling()
1229 block->bb_u.s.bb_leftsib = ptr->s; in xfs_btree_set_sibling()
1239 __u16 level, in __xfs_btree_init_block() argument
1246 buf->bb_magic = cpu_to_be32(magic); in __xfs_btree_init_block()
1247 buf->bb_level = cpu_to_be16(level); in __xfs_btree_init_block()
1248 buf->bb_numrecs = cpu_to_be16(numrecs); in __xfs_btree_init_block()
1250 if (ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in __xfs_btree_init_block()
1251 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK); in __xfs_btree_init_block()
1252 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK); in __xfs_btree_init_block()
1254 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno); in __xfs_btree_init_block()
1255 buf->bb_u.l.bb_owner = cpu_to_be64(owner); in __xfs_btree_init_block()
1256 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid); in __xfs_btree_init_block()
1257 buf->bb_u.l.bb_pad = 0; in __xfs_btree_init_block()
1258 buf->bb_u.l.bb_lsn = 0; in __xfs_btree_init_block()
1261 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); in __xfs_btree_init_block()
1262 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); in __xfs_btree_init_block()
1264 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno); in __xfs_btree_init_block()
1266 buf->bb_u.s.bb_owner = cpu_to_be32((__u32)owner); in __xfs_btree_init_block()
1267 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid); in __xfs_btree_init_block()
1268 buf->bb_u.s.bb_lsn = 0; in __xfs_btree_init_block()
1278 __u16 level, in xfs_btree_init_block() argument
1282 __xfs_btree_init_block(mp, block, ops, XFS_BUF_DADDR_NULL, level, in xfs_btree_init_block()
1291 __u16 level, in xfs_btree_init_buf() argument
1296 xfs_buf_daddr(bp), level, numrecs, owner); in xfs_btree_init_buf()
1297 bp->b_ops = ops->buf_ops; in xfs_btree_init_buf()
1304 switch (cur->bc_ops->type) { in xfs_btree_owner()
1306 return cur->bc_mem.xfbtree->owner; in xfs_btree_owner()
1308 return cur->bc_ino.ip->i_ino; in xfs_btree_owner()
1310 return cur->bc_group->xg_gno; in xfs_btree_owner()
1321 int level, in xfs_btree_init_block_cur() argument
1324 xfs_btree_init_buf(cur->bc_mp, bp, cur->bc_ops, level, numrecs, in xfs_btree_init_block_cur()
1334 switch (cur->bc_ops->type) { in xfs_btree_buf_to_ptr()
1336 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp, in xfs_btree_buf_to_ptr()
1340 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, in xfs_btree_buf_to_ptr()
1344 ptr->l = cpu_to_be64(xfs_daddr_to_xfbno(xfs_buf_daddr(bp))); in xfs_btree_buf_to_ptr()
1354 xfs_buf_set_ref(bp, cur->bc_ops->lru_refs); in xfs_btree_set_refs()
1370 error = xfs_trans_get_buf(cur->bc_tp, xfs_btree_buftarg(cur), d, in xfs_btree_get_buf_block()
1375 (*bpp)->b_ops = cur->bc_ops->buf_ops; in xfs_btree_get_buf_block()
1392 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_read_buf_block()
1402 error = xfs_trans_read_buf(mp, cur->bc_tp, xfs_btree_buftarg(cur), d, in xfs_btree_read_buf_block()
1404 cur->bc_ops->buf_ops); in xfs_btree_read_buf_block()
1426 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len); in xfs_btree_copy_keys()
1440 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len); in xfs_btree_copy_recs()
1454 memcpy(dst_ptr, src_ptr, numptrs * cur->bc_ops->ptr_len); in xfs_btree_copy_ptrs()
1470 ASSERT(dir == 1 || dir == -1); in xfs_btree_shift_keys()
1472 dst_key = (char *)key + (dir * cur->bc_ops->key_len); in xfs_btree_shift_keys()
1473 memmove(dst_key, key, numkeys * cur->bc_ops->key_len); in xfs_btree_shift_keys()
1489 ASSERT(dir == 1 || dir == -1); in xfs_btree_shift_recs()
1491 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len); in xfs_btree_shift_recs()
1492 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len); in xfs_btree_shift_recs()
1508 ASSERT(dir == 1 || dir == -1); in xfs_btree_shift_ptrs()
1510 dst_ptr = (char *)ptr + (dir * cur->bc_ops->ptr_len); in xfs_btree_shift_ptrs()
1511 memmove(dst_ptr, ptr, numptrs * cur->bc_ops->ptr_len); in xfs_btree_shift_ptrs()
1526 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_keys()
1527 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_keys()
1529 xfs_btree_key_offset(cur, last + 1) - 1); in xfs_btree_log_keys()
1531 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_keys()
1532 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_keys()
1547 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_recs()
1548 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_recs()
1552 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_recs()
1553 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_recs()
1555 xfs_btree_rec_offset(cur, last + 1) - 1); in xfs_btree_log_recs()
1571 int level = xfs_btree_get_level(block); in xfs_btree_log_ptrs() local
1573 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_ptrs()
1574 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_ptrs()
1575 xfs_btree_ptr_offset(cur, first, level), in xfs_btree_log_ptrs()
1576 xfs_btree_ptr_offset(cur, last + 1, level) - 1); in xfs_btree_log_ptrs()
1578 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_ptrs()
1579 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_ptrs()
1626 if (xfs_has_crc(cur->bc_mp)) { in xfs_btree_log_block()
1641 (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) ? in xfs_btree_log_block()
1644 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_block()
1645 xfs_trans_log_buf(cur->bc_tp, bp, first, last); in xfs_btree_log_block()
1647 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_block()
1648 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_block()
1653 * Increment cursor by one record at the level.
1654 * For nonzero levels the leaf-ward information is untouched.
1659 int level, in xfs_btree_increment() argument
1668 ASSERT(level < cur->bc_nlevels); in xfs_btree_increment()
1670 /* Read-ahead to the right at this level. */ in xfs_btree_increment()
1671 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); in xfs_btree_increment()
1674 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_increment()
1677 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_increment()
1683 if (++cur->bc_levels[level].ptr <= xfs_btree_get_numrecs(block)) in xfs_btree_increment()
1697 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { in xfs_btree_increment()
1706 if (++cur->bc_levels[lev].ptr <= xfs_btree_get_numrecs(block)) in xfs_btree_increment()
1709 /* Read-ahead the right block for the next loop. */ in xfs_btree_increment()
1717 if (lev == cur->bc_nlevels) { in xfs_btree_increment()
1718 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in xfs_btree_increment()
1722 error = -EFSCORRUPTED; in xfs_btree_increment()
1725 ASSERT(lev < cur->bc_nlevels); in xfs_btree_increment()
1731 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { in xfs_btree_increment()
1734 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block); in xfs_btree_increment()
1735 --lev; in xfs_btree_increment()
1741 cur->bc_levels[lev].ptr = 1; in xfs_btree_increment()
1756 * Decrement cursor by one record at the level.
1757 * For nonzero levels the leaf-ward information is untouched.
1762 int level, in xfs_btree_decrement() argument
1771 ASSERT(level < cur->bc_nlevels); in xfs_btree_decrement()
1773 /* Read-ahead to the left at this level. */ in xfs_btree_decrement()
1774 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); in xfs_btree_decrement()
1777 if (--cur->bc_levels[level].ptr > 0) in xfs_btree_decrement()
1781 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_decrement()
1784 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_decrement()
1800 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { in xfs_btree_decrement()
1801 if (--cur->bc_levels[lev].ptr > 0) in xfs_btree_decrement()
1803 /* Read-ahead the left block for the next loop. */ in xfs_btree_decrement()
1811 if (lev == cur->bc_nlevels) { in xfs_btree_decrement()
1812 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in xfs_btree_decrement()
1816 error = -EFSCORRUPTED; in xfs_btree_decrement()
1819 ASSERT(lev < cur->bc_nlevels); in xfs_btree_decrement()
1825 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { in xfs_btree_decrement()
1828 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block); in xfs_btree_decrement()
1829 --lev; in xfs_btree_decrement()
1834 cur->bc_levels[lev].ptr = xfs_btree_get_numrecs(block); in xfs_btree_decrement()
1859 if (!xfs_has_crc(cur->bc_mp) || in xfs_btree_check_block_owner()
1860 (cur->bc_flags & XFS_BTREE_BMBT_INVALID_OWNER)) in xfs_btree_check_block_owner()
1864 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_check_block_owner()
1865 if (be64_to_cpu(block->bb_u.l.bb_owner) != owner) in xfs_btree_check_block_owner()
1868 if (be32_to_cpu(block->bb_u.s.bb_owner) != owner) in xfs_btree_check_block_owner()
1878 int level, /* level in the btree */ in xfs_btree_lookup_get_block() argument
1887 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_lookup_get_block()
1893 * If the old buffer at this level for the disk address we are in xfs_btree_lookup_get_block()
1894 * looking for re-use it. in xfs_btree_lookup_get_block()
1898 bp = cur->bc_levels[level].bp; in xfs_btree_lookup_get_block()
1915 /* Did we get the level we were looking for? */ in xfs_btree_lookup_get_block()
1916 if (be16_to_cpu((*blkp)->bb_level) != level) in xfs_btree_lookup_get_block()
1920 if (level != 0 && be16_to_cpu((*blkp)->bb_numrecs) == 0) in xfs_btree_lookup_get_block()
1923 xfs_btree_setbuf(cur, level, bp); in xfs_btree_lookup_get_block()
1929 xfs_trans_brelse(cur->bc_tp, bp); in xfs_btree_lookup_get_block()
1931 return -EFSCORRUPTED; in xfs_btree_lookup_get_block()
1935 * Get current search key. For level 0 we don't actually have a key
1942 int level, in xfs_lookup_get_search_key() argument
1947 if (level == 0) { in xfs_lookup_get_search_key()
1948 cur->bc_ops->init_key_from_rec(kp, in xfs_lookup_get_search_key()
1964 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) { in xfs_btree_init_ptr_from_cur()
1966 * Inode-rooted btrees call xfs_btree_get_iroot to find the root in xfs_btree_init_ptr_from_cur()
1969 ptr->l = 0; in xfs_btree_init_ptr_from_cur()
1970 } else if (cur->bc_flags & XFS_BTREE_STAGING) { in xfs_btree_init_ptr_from_cur()
1971 ptr->s = cpu_to_be32(cur->bc_ag.afake->af_root); in xfs_btree_init_ptr_from_cur()
1973 cur->bc_ops->init_ptr_from_cur(cur, ptr); in xfs_btree_init_ptr_from_cur()
1991 int level; /* level in the btree */ in xfs_btree_lookup() local
1997 /* No such thing as a zero-level tree. */ in xfs_btree_lookup()
1998 if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0)) { in xfs_btree_lookup()
2000 return -EFSCORRUPTED; in xfs_btree_lookup()
2011 * Iterate over each level in the btree, starting at the root. in xfs_btree_lookup()
2012 * For each level above the leaves, find the key we need, based in xfs_btree_lookup()
2014 * pointer down to the next level. in xfs_btree_lookup()
2016 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { in xfs_btree_lookup()
2018 error = xfs_btree_lookup_get_block(cur, level, pp, &block); in xfs_btree_lookup()
2024 * If we already had a key match at a higher level, we in xfs_btree_lookup()
2031 int high; /* high entry number */ in xfs_btree_lookup() local
2034 /* Set low and high entry numbers, 1-based. */ in xfs_btree_lookup()
2036 high = xfs_btree_get_numrecs(block); in xfs_btree_lookup()
2037 if (!high) { in xfs_btree_lookup()
2039 if (level != 0 || cur->bc_nlevels != 1) { in xfs_btree_lookup()
2042 cur->bc_mp, block, in xfs_btree_lookup()
2045 return -EFSCORRUPTED; in xfs_btree_lookup()
2048 cur->bc_levels[0].ptr = dir != XFS_LOOKUP_LE; in xfs_btree_lookup()
2054 while (low <= high) { in xfs_btree_lookup()
2060 /* keyno is average of low and high. */ in xfs_btree_lookup()
2061 keyno = (low + high) >> 1; in xfs_btree_lookup()
2064 kp = xfs_lookup_get_search_key(cur, level, in xfs_btree_lookup()
2069 * - less than, move right in xfs_btree_lookup()
2070 * - greater than, move left in xfs_btree_lookup()
2071 * - equal, we're done in xfs_btree_lookup()
2073 diff = cur->bc_ops->key_diff(cur, kp); in xfs_btree_lookup()
2077 high = keyno - 1; in xfs_btree_lookup()
2084 * If there are more levels, set up for the next level in xfs_btree_lookup()
2087 if (level > 0) { in xfs_btree_lookup()
2092 if (diff > 0 && --keyno < 1) in xfs_btree_lookup()
2096 error = xfs_btree_debug_check_ptr(cur, pp, 0, level); in xfs_btree_lookup()
2100 cur->bc_levels[level].ptr = keyno; in xfs_btree_lookup()
2117 cur->bc_levels[0].ptr = keyno; in xfs_btree_lookup()
2121 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_lookup()
2123 return -EFSCORRUPTED; in xfs_btree_lookup()
2129 keyno--; in xfs_btree_lookup()
2130 cur->bc_levels[0].ptr = keyno; in xfs_btree_lookup()
2145 /* Find the high key storage area from a regular key. */
2151 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING); in xfs_btree_high_key_from_key()
2153 (cur->bc_ops->key_len / 2)); in xfs_btree_high_key_from_key()
2156 /* Determine the low (and high if overlapped) keys of a leaf block */
2166 union xfs_btree_key *high; in xfs_btree_get_leaf_keys() local
2170 cur->bc_ops->init_key_from_rec(key, rec); in xfs_btree_get_leaf_keys()
2172 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_get_leaf_keys()
2174 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec); in xfs_btree_get_leaf_keys()
2177 cur->bc_ops->init_high_key_from_rec(&hkey, rec); in xfs_btree_get_leaf_keys()
2182 high = xfs_btree_high_key_from_key(cur, key); in xfs_btree_get_leaf_keys()
2183 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2); in xfs_btree_get_leaf_keys()
2187 /* Determine the low (and high if overlapped) keys of a node block */
2196 union xfs_btree_key *high; in xfs_btree_get_node_keys() local
2199 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_get_node_keys()
2201 cur->bc_ops->key_len / 2); in xfs_btree_get_node_keys()
2210 high = xfs_btree_high_key_from_key(cur, key); in xfs_btree_get_node_keys()
2211 memcpy(high, max_hkey, cur->bc_ops->key_len / 2); in xfs_btree_get_node_keys()
2214 cur->bc_ops->key_len); in xfs_btree_get_node_keys()
2225 if (be16_to_cpu(block->bb_level) == 0) in xfs_btree_get_keys()
2243 return (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) || ptr == 1; in xfs_btree_needs_key_update()
2247 * Update the low and high parent keys of the given level, progressing
2249 * level do not need updating.
2254 int level, in __xfs_btree_updkeys() argument
2259 union xfs_btree_key key; /* keys from current level */ in __xfs_btree_updkeys()
2260 union xfs_btree_key *lkey; /* keys from the next level up */ in __xfs_btree_updkeys()
2262 union xfs_btree_key *nlkey; /* keys from the next level up */ in __xfs_btree_updkeys()
2267 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING); in __xfs_btree_updkeys()
2270 if (level + 1 >= cur->bc_nlevels) in __xfs_btree_updkeys()
2273 trace_xfs_btree_updkeys(cur, level, bp0); in __xfs_btree_updkeys()
2278 for (level++; level < cur->bc_nlevels; level++) { in __xfs_btree_updkeys()
2282 block = xfs_btree_get_block(cur, level, &bp); in __xfs_btree_updkeys()
2283 trace_xfs_btree_updkeys(cur, level, bp); in __xfs_btree_updkeys()
2285 error = xfs_btree_check_block(cur, block, level, bp); in __xfs_btree_updkeys()
2289 ptr = cur->bc_levels[level].ptr; in __xfs_btree_updkeys()
2298 if (level + 1 >= cur->bc_nlevels) in __xfs_btree_updkeys()
2306 /* Update all the keys from some level in cursor back to the root. */
2310 int level) in xfs_btree_updkeys_force() argument
2315 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_updkeys_force()
2316 return __xfs_btree_updkeys(cur, level, block, bp, true); in xfs_btree_updkeys_force()
2320 * Update the parent keys of the given level, progressing towards the root.
2325 int level) in xfs_btree_update_keys() argument
2333 ASSERT(level >= 0); in xfs_btree_update_keys()
2335 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_update_keys()
2336 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) in xfs_btree_update_keys()
2337 return __xfs_btree_updkeys(cur, level, block, bp, false); in xfs_btree_update_keys()
2340 * Go up the tree from this level toward the root. in xfs_btree_update_keys()
2341 * At each level, update the key value to the value input. in xfs_btree_update_keys()
2342 * Stop when we reach a level where the cursor isn't pointing in xfs_btree_update_keys()
2346 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { in xfs_btree_update_keys()
2350 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_update_keys()
2352 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_update_keys()
2356 ptr = cur->bc_levels[level].ptr; in xfs_btree_update_keys()
2390 ptr = cur->bc_levels[0].ptr; in xfs_btree_update()
2411 * Move 1 record left from cur/level if possible.
2417 int level, in xfs_btree_lshift() argument
2434 if (xfs_btree_at_iroot(cur, level)) in xfs_btree_lshift()
2438 right = xfs_btree_get_block(cur, level, &rbp); in xfs_btree_lshift()
2441 error = xfs_btree_check_block(cur, right, level, rbp); in xfs_btree_lshift()
2455 if (cur->bc_levels[level].ptr <= 1) in xfs_btree_lshift()
2465 if (lrecs == cur->bc_ops->get_maxrecs(cur, level)) in xfs_btree_lshift()
2476 rrecs--; in xfs_btree_lshift()
2482 * If non-leaf, copy a key and a ptr to the left block. in xfs_btree_lshift()
2485 if (level > 0) { in xfs_btree_lshift()
2486 /* It's a non-leaf. Move keys and pointers. */ in xfs_btree_lshift()
2496 error = xfs_btree_debug_check_ptr(cur, rpp, 0, level); in xfs_btree_lshift()
2506 ASSERT(cur->bc_ops->keys_inorder(cur, in xfs_btree_lshift()
2507 xfs_btree_key_addr(cur, lrecs - 1, left), lkp)); in xfs_btree_lshift()
2518 ASSERT(cur->bc_ops->recs_inorder(cur, in xfs_btree_lshift()
2519 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp)); in xfs_btree_lshift()
2531 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1); in xfs_btree_lshift()
2532 if (level > 0) { in xfs_btree_lshift()
2535 error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level); in xfs_btree_lshift()
2542 -1, rrecs); in xfs_btree_lshift()
2545 -1, rrecs); in xfs_btree_lshift()
2553 -1, rrecs); in xfs_btree_lshift()
2561 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_lshift()
2565 i = xfs_btree_firstrec(tcur, level); in xfs_btree_lshift()
2566 if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) { in xfs_btree_lshift()
2568 error = -EFSCORRUPTED; in xfs_btree_lshift()
2572 error = xfs_btree_decrement(tcur, level, &i); in xfs_btree_lshift()
2576 /* Update the parent high keys of the left block, if needed. */ in xfs_btree_lshift()
2577 error = xfs_btree_update_keys(tcur, level); in xfs_btree_lshift()
2585 error = xfs_btree_update_keys(cur, level); in xfs_btree_lshift()
2590 cur->bc_levels[level].ptr--; in xfs_btree_lshift()
2608 * Move 1 record right from cur/level if possible.
2614 int level, in xfs_btree_rshift() argument
2629 if (xfs_btree_at_iroot(cur, level)) in xfs_btree_rshift()
2633 left = xfs_btree_get_block(cur, level, &lbp); in xfs_btree_rshift()
2636 error = xfs_btree_check_block(cur, left, level, lbp); in xfs_btree_rshift()
2651 if (cur->bc_levels[level].ptr >= lrecs) in xfs_btree_rshift()
2661 if (rrecs == cur->bc_ops->get_maxrecs(cur, level)) in xfs_btree_rshift()
2671 if (level > 0) { in xfs_btree_rshift()
2682 for (i = rrecs - 1; i >= 0; i--) { in xfs_btree_rshift()
2683 error = xfs_btree_debug_check_ptr(cur, rpp, i, level); in xfs_btree_rshift()
2691 error = xfs_btree_debug_check_ptr(cur, lpp, 0, level); in xfs_btree_rshift()
2702 ASSERT(cur->bc_ops->keys_inorder(cur, rkp, in xfs_btree_rshift()
2722 xfs_btree_set_numrecs(left, --lrecs); in xfs_btree_rshift()
2735 i = xfs_btree_lastrec(tcur, level); in xfs_btree_rshift()
2736 if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) { in xfs_btree_rshift()
2738 error = -EFSCORRUPTED; in xfs_btree_rshift()
2742 error = xfs_btree_increment(tcur, level, &i); in xfs_btree_rshift()
2746 /* Update the parent high keys of the left block, if needed. */ in xfs_btree_rshift()
2747 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_rshift()
2748 error = xfs_btree_update_keys(cur, level); in xfs_btree_rshift()
2754 error = xfs_btree_update_keys(tcur, level); in xfs_btree_rshift()
2791 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_alloc_block()
2793 return -EFSCORRUPTED; in xfs_btree_alloc_block()
2796 error = cur->bc_ops->alloc_block(cur, hint_block, new_block, stat); in xfs_btree_alloc_block()
2802 * Split cur/level block in half.
2809 int level, in __xfs_btree_split() argument
2821 union xfs_btree_ptr rrptr; /* right-right sibling ptr */ in __xfs_btree_split()
2822 struct xfs_buf *rrbp; /* right-right buffer pointer */ in __xfs_btree_split()
2823 struct xfs_btree_block *rrblock; /* right-right btree block */ in __xfs_btree_split()
2833 left = xfs_btree_get_block(cur, level, &lbp); in __xfs_btree_split()
2836 error = xfs_btree_check_block(cur, left, level, lbp); in __xfs_btree_split()
2866 if ((lrecs & 1) && cur->bc_levels[level].ptr <= rrecs + 1) in __xfs_btree_split()
2868 src_index = (lrecs - rrecs + 1); in __xfs_btree_split()
2873 lrecs -= rrecs; in __xfs_btree_split()
2882 if (level > 0) { in __xfs_btree_split()
2883 /* It's a non-leaf. Move keys and pointers. */ in __xfs_btree_split()
2895 error = xfs_btree_debug_check_ptr(cur, lpp, i, level); in __xfs_btree_split()
2950 /* Update the parent high keys of the left block, if needed. */ in __xfs_btree_split()
2951 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in __xfs_btree_split()
2952 error = xfs_btree_update_keys(cur, level); in __xfs_btree_split()
2962 if (cur->bc_levels[level].ptr > lrecs + 1) { in __xfs_btree_split()
2963 xfs_btree_setbuf(cur, level, rbp); in __xfs_btree_split()
2964 cur->bc_levels[level].ptr -= lrecs; in __xfs_btree_split()
2970 if (level + 1 < cur->bc_nlevels) { in __xfs_btree_split()
2974 (*curp)->bc_levels[level + 1].ptr++; in __xfs_btree_split()
2990 int level; member
3019 if (args->kswapd) in xfs_btree_split_worker()
3023 xfs_trans_set_context(args->cur->bc_tp); in xfs_btree_split_worker()
3025 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, in xfs_btree_split_worker()
3026 args->key, args->curp, args->stat); in xfs_btree_split_worker()
3028 xfs_trans_clear_context(args->cur->bc_tp); in xfs_btree_split_worker()
3035 complete(args->done); in xfs_btree_split_worker()
3044 * Care must be taken here - the work queue rescuer thread introduces potential
3060 int level, in xfs_btree_split() argument
3069 if (!xfs_btree_is_bmap(cur->bc_ops) || in xfs_btree_split()
3070 cur->bc_tp->t_highest_agno == NULLAGNUMBER) in xfs_btree_split()
3071 return __xfs_btree_split(cur, level, ptrp, key, curp, stat); in xfs_btree_split()
3074 args.level = level; in xfs_btree_split()
3121 cur->bc_ops->broot_realloc(cur, 0); in xfs_btree_promote_leaf_iroot()
3122 cur->bc_nlevels++; in xfs_btree_promote_leaf_iroot()
3123 cur->bc_levels[1].ptr = 1; in xfs_btree_promote_leaf_iroot()
3129 broot = cur->bc_ops->broot_realloc(cur, 1); in xfs_btree_promote_leaf_iroot()
3130 xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops, in xfs_btree_promote_leaf_iroot()
3131 cur->bc_nlevels - 1, 1, cur->bc_ino.ip->i_ino); in xfs_btree_promote_leaf_iroot()
3155 int level, in xfs_btree_promote_node_iroot() argument
3169 * Increase tree height, adjusting the root block level to match. in xfs_btree_promote_node_iroot()
3173 be16_add_cpu(&block->bb_level, 1); in xfs_btree_promote_node_iroot()
3174 cur->bc_nlevels++; in xfs_btree_promote_node_iroot()
3175 cur->bc_levels[level + 1].ptr = 1; in xfs_btree_promote_node_iroot()
3190 error = xfs_btree_debug_check_ptr(cur, pp, i, level); in xfs_btree_promote_node_iroot()
3200 error = xfs_btree_debug_check_ptr(cur, cptr, 0, level); in xfs_btree_promote_node_iroot()
3206 cur->bc_ops->broot_realloc(cur, 1); in xfs_btree_promote_node_iroot()
3209 xfs_btree_setbuf(cur, level, cbp); in xfs_btree_promote_node_iroot()
3224 int *stat) /* return status - 0 fail */ in xfs_btree_new_iroot()
3231 int level; /* btree level */ in xfs_btree_new_iroot() local
3236 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_new_iroot()
3238 level = cur->bc_nlevels - 1; in xfs_btree_new_iroot()
3241 ASSERT(level > 0 || (cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS)); in xfs_btree_new_iroot()
3242 if (level > 0) in xfs_btree_new_iroot()
3245 aptr.l = cpu_to_be64(XFS_INO_TO_FSB(cur->bc_mp, in xfs_btree_new_iroot()
3246 cur->bc_ino.ip->i_ino)); in xfs_btree_new_iroot()
3267 if (xfs_has_crc(cur->bc_mp)) { in xfs_btree_new_iroot()
3269 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_new_iroot()
3270 cblock->bb_u.l.bb_blkno = bno; in xfs_btree_new_iroot()
3272 cblock->bb_u.s.bb_blkno = bno; in xfs_btree_new_iroot()
3275 if (level > 0) { in xfs_btree_new_iroot()
3276 error = xfs_btree_promote_node_iroot(cur, block, level, cbp, in xfs_btree_new_iroot()
3284 *logflags |= XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork); in xfs_btree_new_iroot()
3297 if (cur->bc_flags & XFS_BTREE_STAGING) { in xfs_btree_set_root()
3298 /* Update the btree root information for a per-AG fake root. */ in xfs_btree_set_root()
3299 cur->bc_ag.afake->af_root = be32_to_cpu(ptr->s); in xfs_btree_set_root()
3300 cur->bc_ag.afake->af_levels += inc; in xfs_btree_set_root()
3302 cur->bc_ops->set_root(cur, ptr, inc); in xfs_btree_set_root()
3345 /* Set the root in the holding structure increasing the level by 1. */ in xfs_btree_new_root()
3349 * At the previous root level there are now two blocks: the old root, in xfs_btree_new_root()
3354 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp); in xfs_btree_new_root()
3357 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp); in xfs_btree_new_root()
3387 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2); in xfs_btree_new_root()
3423 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); in xfs_btree_new_root()
3424 cur->bc_levels[cur->bc_nlevels].ptr = nptr; in xfs_btree_new_root()
3425 cur->bc_nlevels++; in xfs_btree_new_root()
3426 ASSERT(cur->bc_nlevels <= cur->bc_maxlevels); in xfs_btree_new_root()
3439 int level, /* btree level */ in xfs_btree_make_block_unfull() argument
3450 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_make_block_unfull()
3451 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_make_block_unfull()
3453 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) { in xfs_btree_make_block_unfull()
3455 cur->bc_ops->broot_realloc(cur, numrecs + 1); in xfs_btree_make_block_unfull()
3465 xfs_trans_log_inode(cur->bc_tp, ip, logflags); in xfs_btree_make_block_unfull()
3472 error = xfs_btree_rshift(cur, level, stat); in xfs_btree_make_block_unfull()
3477 error = xfs_btree_lshift(cur, level, stat); in xfs_btree_make_block_unfull()
3482 *oindex = *index = cur->bc_levels[level].ptr; in xfs_btree_make_block_unfull()
3489 * If this works we have to re-set our variables because we in xfs_btree_make_block_unfull()
3492 error = xfs_btree_split(cur, level, nptr, key, ncur, stat); in xfs_btree_make_block_unfull()
3497 *index = cur->bc_levels[level].ptr; in xfs_btree_make_block_unfull()
3502 * Insert one record/level. Return information to the caller
3503 * allowing the next level up to proceed if necessary.
3508 int level, /* level to insert record at */ in xfs_btree_insrec() argument
3533 * root level, allocate a new root block and we're done. in xfs_btree_insrec()
3535 if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE && in xfs_btree_insrec()
3536 level >= cur->bc_nlevels) { in xfs_btree_insrec()
3544 ptr = cur->bc_levels[level].ptr; in xfs_btree_insrec()
3555 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_insrec()
3560 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_insrec()
3566 if (level == 0) { in xfs_btree_insrec()
3567 ASSERT(cur->bc_ops->recs_inorder(cur, rec, in xfs_btree_insrec()
3570 ASSERT(cur->bc_ops->keys_inorder(cur, key, in xfs_btree_insrec()
3578 * make the block un-full. in xfs_btree_insrec()
3581 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_insrec()
3582 error = xfs_btree_make_block_unfull(cur, level, numrecs, in xfs_btree_insrec()
3592 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_insrec()
3596 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_insrec()
3605 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1); in xfs_btree_insrec()
3607 if (level > 0) { in xfs_btree_insrec()
3615 for (i = numrecs - ptr; i >= 0; i--) { in xfs_btree_insrec()
3616 error = xfs_btree_debug_check_ptr(cur, pp, i, level); in xfs_btree_insrec()
3621 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3622 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3624 error = xfs_btree_debug_check_ptr(cur, ptrp, 0, level); in xfs_btree_insrec()
3637 ASSERT(cur->bc_ops->keys_inorder(cur, kp, in xfs_btree_insrec()
3647 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3655 ASSERT(cur->bc_ops->recs_inorder(cur, rp, in xfs_btree_insrec()
3673 * the next level up, so if we decide to add the new record to the new in xfs_btree_insrec()
3674 * block (bp->b_bn != old_bn), we have to update the caller's pointer in xfs_btree_insrec()
3677 * However, there is a third possibility-- if the selected block is the in xfs_btree_insrec()
3678 * root block of an inode-rooted btree and cannot be expanded further, in xfs_btree_insrec()
3684 * overlapping btrees, because the high key must be updated to reflect in xfs_btree_insrec()
3692 error = xfs_btree_update_keys(cur, level); in xfs_btree_insrec()
3719 * A multi-level split of the tree on insert will invalidate the original
3730 int level; /* current level number in btree */ in xfs_btree_insert() local
3733 struct xfs_btree_cur *pcur; /* previous level's cursor */ in xfs_btree_insert()
3738 level = 0; in xfs_btree_insert()
3746 cur->bc_ops->init_rec_from_cur(cur, &rec); in xfs_btree_insert()
3747 cur->bc_ops->init_key_from_rec(key, &rec); in xfs_btree_insert()
3750 * Loop going up the tree, starting at the leaf level. in xfs_btree_insert()
3752 * the insert is finished with this level. in xfs_btree_insert()
3756 * Insert nrec/nptr into this level of the tree. in xfs_btree_insert()
3759 error = xfs_btree_insrec(pcur, level, &nptr, &rec, key, in xfs_btree_insert()
3767 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_insert()
3769 error = -EFSCORRUPTED; in xfs_btree_insert()
3772 level++; in xfs_btree_insert()
3782 if (cur->bc_ops->update_cursor && in xfs_btree_insert()
3783 !(cur->bc_flags & XFS_BTREE_STAGING)) in xfs_btree_insert()
3784 cur->bc_ops->update_cursor(pcur, cur); in xfs_btree_insert()
3785 cur->bc_nlevels = pcur->bc_nlevels; in xfs_btree_insert()
3820 cur->bc_ops->broot_realloc(cur, 0); in xfs_btree_demote_leaf_child()
3821 cur->bc_nlevels--; in xfs_btree_demote_leaf_child()
3827 broot = cur->bc_ops->broot_realloc(cur, numrecs); in xfs_btree_demote_leaf_child()
3828 xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops, 0, numrecs, in xfs_btree_demote_leaf_child()
3829 cur->bc_ino.ip->i_ino); in xfs_btree_demote_leaf_child()
3835 cur->bc_levels[0].bp = NULL; in xfs_btree_demote_leaf_child()
3849 int level, in xfs_btree_demote_node_child() argument
3865 block = cur->bc_ops->broot_realloc(cur, numrecs); in xfs_btree_demote_node_child()
3868 ASSERT(block->bb_numrecs == cblock->bb_numrecs); in xfs_btree_demote_node_child()
3879 error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1); in xfs_btree_demote_node_child()
3885 /* Decrease tree height, adjusting the root block level to match. */ in xfs_btree_demote_node_child()
3886 cur->bc_levels[level - 1].bp = NULL; in xfs_btree_demote_node_child()
3887 be16_add_cpu(&block->bb_level, -1); in xfs_btree_demote_node_child()
3888 cur->bc_nlevels--; in xfs_btree_demote_node_child()
3893 * Try to merge a non-leaf block back into the inode root.
3904 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_kill_iroot()
3908 int level; in xfs_btree_kill_iroot() local
3915 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_kill_iroot()
3916 ASSERT((cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS) || in xfs_btree_kill_iroot()
3917 cur->bc_nlevels > 1); in xfs_btree_kill_iroot()
3923 level = cur->bc_nlevels - 1; in xfs_btree_kill_iroot()
3924 if (level == 1 && !(cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS)) in xfs_btree_kill_iroot()
3928 if (level == 0) in xfs_btree_kill_iroot()
3938 cblock = xfs_btree_get_block(cur, level - 1, &cbp); in xfs_btree_kill_iroot()
3942 * Only do this if the next level will fit. in xfs_btree_kill_iroot()
3944 * instead of freeing the root you free the next level. in xfs_btree_kill_iroot()
3946 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level)) in xfs_btree_kill_iroot()
3958 if (level > 1) { in xfs_btree_kill_iroot()
3959 error = xfs_btree_demote_node_child(cur, cblock, level, in xfs_btree_kill_iroot()
3970 xfs_trans_log_inode(cur->bc_tp, ip, in xfs_btree_kill_iroot()
3971 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_kill_iroot()
3983 int level, in xfs_btree_kill_root() argument
3991 * Update the root pointer, decreasing the level by 1 and then in xfs_btree_kill_root()
3994 xfs_btree_set_root(cur, newroot, -1); in xfs_btree_kill_root()
4000 cur->bc_levels[level].bp = NULL; in xfs_btree_kill_root()
4001 cur->bc_levels[level].ra = 0; in xfs_btree_kill_root()
4002 cur->bc_nlevels--; in xfs_btree_kill_root()
4010 int level, in xfs_btree_dec_cursor() argument
4016 if (level > 0) { in xfs_btree_dec_cursor()
4017 error = xfs_btree_decrement(cur, level, &i); in xfs_btree_dec_cursor()
4027 * Single level of the btree record deletion routine.
4028 * Delete record pointed to by cur/level.
4030 * Return 0 for error, 1 for done, 2 to go on to the next level.
4035 int level, /* level removing record from */ in xfs_btree_delrec() argument
4036 int *stat) /* fail/done/go-on */ in xfs_btree_delrec()
4051 struct xfs_btree_block *rrblock; /* right-right btree block */ in xfs_btree_delrec()
4052 struct xfs_buf *rrbp; /* right-right buffer pointer */ in xfs_btree_delrec()
4060 ptr = cur->bc_levels[level].ptr; in xfs_btree_delrec()
4067 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_delrec()
4071 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_delrec()
4083 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr); in xfs_btree_delrec()
4086 if (level > 0) { in xfs_btree_delrec()
4094 for (i = 0; i < numrecs - ptr; i++) { in xfs_btree_delrec()
4095 error = xfs_btree_debug_check_ptr(cur, lpp, i, level); in xfs_btree_delrec()
4101 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr); in xfs_btree_delrec()
4102 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr); in xfs_btree_delrec()
4103 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
4104 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
4111 -1, numrecs - ptr); in xfs_btree_delrec()
4112 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
4119 xfs_btree_set_numrecs(block, --numrecs); in xfs_btree_delrec()
4123 * We're at the root level. First, shrink the root block in-memory. in xfs_btree_delrec()
4124 * Try to get rid of the next level down. If we can't then there's in xfs_btree_delrec()
4127 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_delrec()
4128 cur->bc_ops->broot_realloc(cur, numrecs); in xfs_btree_delrec()
4134 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4142 * If this is the root level, and there's only one entry left, and it's in xfs_btree_delrec()
4143 * NOT the leaf level, then we can get rid of this level. in xfs_btree_delrec()
4145 if (level == cur->bc_nlevels - 1) { in xfs_btree_delrec()
4146 if (numrecs == 1 && level > 0) { in xfs_btree_delrec()
4153 error = xfs_btree_kill_root(cur, bp, level, pp); in xfs_btree_delrec()
4156 } else if (level > 0) { in xfs_btree_delrec()
4157 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4170 error = xfs_btree_update_keys(cur, level); in xfs_btree_delrec()
4179 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) { in xfs_btree_delrec()
4180 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4189 * see if we can re-balance by moving only one record. in xfs_btree_delrec()
4194 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) { in xfs_btree_delrec()
4197 * into the root and delete it. Can't go up to next level, in xfs_btree_delrec()
4202 level == cur->bc_nlevels - 2) { in xfs_btree_delrec()
4205 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4217 * disrupt the next level up. in xfs_btree_delrec()
4232 i = xfs_btree_lastrec(tcur, level); in xfs_btree_delrec()
4233 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4235 error = -EFSCORRUPTED; in xfs_btree_delrec()
4239 error = xfs_btree_increment(tcur, level, &i); in xfs_btree_delrec()
4242 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4244 error = -EFSCORRUPTED; in xfs_btree_delrec()
4248 i = xfs_btree_lastrec(tcur, level); in xfs_btree_delrec()
4249 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4251 error = -EFSCORRUPTED; in xfs_btree_delrec()
4256 right = xfs_btree_get_block(tcur, level, &rbp); in xfs_btree_delrec()
4258 error = xfs_btree_check_block(tcur, right, level, rbp); in xfs_btree_delrec()
4267 * won't make it too empty, and left-shifting an entry out in xfs_btree_delrec()
4270 if (xfs_btree_get_numrecs(right) - 1 >= in xfs_btree_delrec()
4271 cur->bc_ops->get_minrecs(tcur, level)) { in xfs_btree_delrec()
4272 error = xfs_btree_lshift(tcur, level, &i); in xfs_btree_delrec()
4277 cur->bc_ops->get_minrecs(tcur, level)); in xfs_btree_delrec()
4282 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4296 i = xfs_btree_firstrec(tcur, level); in xfs_btree_delrec()
4297 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4299 error = -EFSCORRUPTED; in xfs_btree_delrec()
4303 error = xfs_btree_decrement(tcur, level, &i); in xfs_btree_delrec()
4306 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4308 error = -EFSCORRUPTED; in xfs_btree_delrec()
4323 i = xfs_btree_firstrec(tcur, level); in xfs_btree_delrec()
4324 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4326 error = -EFSCORRUPTED; in xfs_btree_delrec()
4330 error = xfs_btree_decrement(tcur, level, &i); in xfs_btree_delrec()
4333 i = xfs_btree_firstrec(tcur, level); in xfs_btree_delrec()
4334 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4336 error = -EFSCORRUPTED; in xfs_btree_delrec()
4341 left = xfs_btree_get_block(tcur, level, &lbp); in xfs_btree_delrec()
4343 error = xfs_btree_check_block(cur, left, level, lbp); in xfs_btree_delrec()
4352 * won't make it too empty, and right-shifting an entry out in xfs_btree_delrec()
4355 if (xfs_btree_get_numrecs(left) - 1 >= in xfs_btree_delrec()
4356 cur->bc_ops->get_minrecs(tcur, level)) { in xfs_btree_delrec()
4357 error = xfs_btree_rshift(tcur, level, &i); in xfs_btree_delrec()
4362 cur->bc_ops->get_minrecs(tcur, level)); in xfs_btree_delrec()
4365 if (level == 0) in xfs_btree_delrec()
4366 cur->bc_levels[0].ptr++; in xfs_btree_delrec()
4389 cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_delrec()
4406 cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_delrec()
4423 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4437 if (level > 0) { in xfs_btree_delrec()
4438 /* It's a non-leaf. Move keys and pointers. */ in xfs_btree_delrec()
4450 error = xfs_btree_debug_check_ptr(cur, rpp, i, level); in xfs_btree_delrec()
4503 cur->bc_levels[level].bp = lbp; in xfs_btree_delrec()
4504 cur->bc_levels[level].ptr += lrecs; in xfs_btree_delrec()
4505 cur->bc_levels[level].ra = 0; in xfs_btree_delrec()
4508 * If we joined with the right neighbor and there's a level above in xfs_btree_delrec()
4509 * us, increment the cursor at that level. in xfs_btree_delrec()
4511 else if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE || in xfs_btree_delrec()
4512 level + 1 < cur->bc_nlevels) { in xfs_btree_delrec()
4513 error = xfs_btree_increment(cur, level + 1, &i); in xfs_btree_delrec()
4519 * Readjust the ptr at this level if it's not a leaf, since it's in xfs_btree_delrec()
4522 * We can't use decrement because it would change the next level up. in xfs_btree_delrec()
4524 if (level > 0) in xfs_btree_delrec()
4525 cur->bc_levels[level].ptr--; in xfs_btree_delrec()
4530 * bc_levels[level + 1].ptr points to the old block so that the caller in xfs_btree_delrec()
4537 /* Return value means the next level up has something to do. */ in xfs_btree_delrec()
4558 int level; in xfs_btree_delete() local
4563 * Go up the tree, starting at leaf level. in xfs_btree_delete()
4565 * If 2 is returned then a join was done; go to the next level. in xfs_btree_delete()
4568 for (level = 0, i = 2; i == 2; level++) { in xfs_btree_delete()
4569 error = xfs_btree_delrec(cur, level, &i); in xfs_btree_delete()
4578 * have updated the parent high keys so we have to do that here. in xfs_btree_delete()
4580 if (joined && (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) { in xfs_btree_delete()
4587 for (level = 1; level < cur->bc_nlevels; level++) { in xfs_btree_delete()
4588 if (cur->bc_levels[level].ptr == 0) { in xfs_btree_delete()
4589 error = xfs_btree_decrement(cur, level, &i); in xfs_btree_delete()
4604 * Get the data from the pointed-to record.
4619 ptr = cur->bc_levels[0].ptr; in xfs_btree_get_rec()
4648 int level, in xfs_btree_visit_block() argument
4658 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); in xfs_btree_visit_block()
4659 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_visit_block()
4662 error = fn(cur, level, data); in xfs_btree_visit_block()
4669 return -ENOENT; in xfs_btree_visit_block()
4680 return -EFSCORRUPTED; in xfs_btree_visit_block()
4683 return xfs_btree_lookup_get_block(cur, level, &rptr, &block); in xfs_btree_visit_block()
4696 int level; in xfs_btree_visit_blocks() local
4702 /* for each level */ in xfs_btree_visit_blocks()
4703 for (level = cur->bc_nlevels - 1; level >= 0; level--) { in xfs_btree_visit_blocks()
4705 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block); in xfs_btree_visit_blocks()
4709 /* readahead the left most block for the next level down */ in xfs_btree_visit_blocks()
4710 if (level > 0) { in xfs_btree_visit_blocks()
4725 /* for each buffer in the level */ in xfs_btree_visit_blocks()
4727 error = xfs_btree_visit_block(cur, level, fn, data); in xfs_btree_visit_blocks()
4730 if (error != -ENOENT) in xfs_btree_visit_blocks()
4745 * We do the btree walk in the most optimal manner possible - we have sibling
4746 * pointers so we can just walk all the blocks on each level from left to right
4747 * in a single pass, and then move to the next level and do the same. We can
4769 int level, in xfs_btree_block_change_owner() argument
4777 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_block_change_owner()
4778 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_block_change_owner()
4779 if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner)) in xfs_btree_block_change_owner()
4781 block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner); in xfs_btree_block_change_owner()
4783 if (block->bb_u.s.bb_owner == cpu_to_be32(bbcoi->new_owner)) in xfs_btree_block_change_owner()
4785 block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner); in xfs_btree_block_change_owner()
4792 * block is formatted into the on-disk inode fork. We still change it, in xfs_btree_block_change_owner()
4796 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_block_change_owner()
4797 ASSERT(level == cur->bc_nlevels - 1); in xfs_btree_block_change_owner()
4801 if (cur->bc_tp) { in xfs_btree_block_change_owner()
4802 if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) { in xfs_btree_block_change_owner()
4804 return -EAGAIN; in xfs_btree_block_change_owner()
4807 xfs_buf_delwri_queue(bp, bbcoi->buffer_list); in xfs_btree_block_change_owner()
4828 /* Verify the v5 fields of a long-format btree block. */
4834 struct xfs_mount *mp = bp->b_mount; in xfs_btree_fsblock_v5hdr_verify()
4839 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)) in xfs_btree_fsblock_v5hdr_verify()
4841 if (block->bb_u.l.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp))) in xfs_btree_fsblock_v5hdr_verify()
4844 be64_to_cpu(block->bb_u.l.bb_owner) != owner) in xfs_btree_fsblock_v5hdr_verify()
4849 /* Verify a long-format btree block. */
4855 struct xfs_mount *mp = bp->b_mount; in xfs_btree_fsblock_verify()
4860 ASSERT(!xfs_buftarg_is_mem(bp->b_target)); in xfs_btree_fsblock_verify()
4863 if (be16_to_cpu(block->bb_numrecs) > max_recs) in xfs_btree_fsblock_verify()
4869 block->bb_u.l.bb_leftsib); in xfs_btree_fsblock_verify()
4872 block->bb_u.l.bb_rightsib); in xfs_btree_fsblock_verify()
4876 /* Verify an in-memory btree block. */
4883 struct xfs_buftarg *btp = bp->b_target; in xfs_btree_memblock_verify()
4887 ASSERT(xfs_buftarg_is_mem(bp->b_target)); in xfs_btree_memblock_verify()
4890 if (be16_to_cpu(block->bb_numrecs) > max_recs) in xfs_btree_memblock_verify()
4896 block->bb_u.l.bb_leftsib); in xfs_btree_memblock_verify()
4900 block->bb_u.l.bb_rightsib); in xfs_btree_memblock_verify()
4907 * xfs_btree_agblock_v5hdr_verify() -- verify the v5 fields of a short-format
4916 struct xfs_mount *mp = bp->b_mount; in xfs_btree_agblock_v5hdr_verify()
4918 struct xfs_perag *pag = bp->b_pag; in xfs_btree_agblock_v5hdr_verify()
4922 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid)) in xfs_btree_agblock_v5hdr_verify()
4924 if (block->bb_u.s.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp))) in xfs_btree_agblock_v5hdr_verify()
4926 if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag_agno(pag)) in xfs_btree_agblock_v5hdr_verify()
4932 * xfs_btree_agblock_verify() -- verify a short-format btree block
4942 struct xfs_mount *mp = bp->b_mount; in xfs_btree_agblock_verify()
4947 ASSERT(!xfs_buftarg_is_mem(bp->b_target)); in xfs_btree_agblock_verify()
4950 if (be16_to_cpu(block->bb_numrecs) > max_recs) in xfs_btree_agblock_verify()
4955 fa = xfs_btree_check_agblock_siblings(bp->b_pag, agbno, in xfs_btree_agblock_verify()
4956 block->bb_u.s.bb_leftsib); in xfs_btree_agblock_verify()
4958 fa = xfs_btree_check_agblock_siblings(bp->b_pag, agbno, in xfs_btree_agblock_verify()
4959 block->bb_u.s.bb_rightsib); in xfs_btree_agblock_verify()
5009 * We start by assuming a single level tree consumes a single block, then track
5010 * the number of blocks each node level consumes until we no longer have space
5011 * to store the next node level. At this point, we are indexing all the leaf
5026 unsigned long long blocks_left = leaf_blocks - 1; in xfs_btree_space_to_height()
5033 blocks_left -= node_blocks; in xfs_btree_space_to_height()
5060 ASSERT(cur->bc_ops->init_high_key_from_rec); in xfs_btree_simple_query_range()
5061 ASSERT(cur->bc_ops->diff_two_keys); in xfs_btree_simple_query_range()
5087 cur->bc_ops->init_high_key_from_rec(&rec_key, recp); in xfs_btree_simple_query_range()
5094 cur->bc_ops->init_key_from_rec(&rec_key, recp); in xfs_btree_simple_query_range()
5120 * First, generate keys for the low and high records passed in.
5122 * For any leaf node, generate the high and low keys for the record.
5123 * If the record keys overlap with the query low/high keys, pass the
5126 * For any internal node, compare the low and high keys of each
5127 * pointer against the query low/high keys. If there's an overlap,
5131 * that is greater than the query's high key.
5149 int level; in xfs_btree_overlapped_query_range() local
5155 level = cur->bc_nlevels - 1; in xfs_btree_overlapped_query_range()
5157 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block); in xfs_btree_overlapped_query_range()
5160 xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
5161 trace_xfs_btree_overlapped_query_range(cur, level, bp); in xfs_btree_overlapped_query_range()
5163 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_overlapped_query_range()
5167 cur->bc_levels[level].ptr = 1; in xfs_btree_overlapped_query_range()
5169 while (level < cur->bc_nlevels) { in xfs_btree_overlapped_query_range()
5170 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
5173 if (cur->bc_levels[level].ptr > in xfs_btree_overlapped_query_range()
5174 be16_to_cpu(block->bb_numrecs)) { in xfs_btree_overlapped_query_range()
5176 if (level < cur->bc_nlevels - 1) in xfs_btree_overlapped_query_range()
5177 cur->bc_levels[level + 1].ptr++; in xfs_btree_overlapped_query_range()
5178 level++; in xfs_btree_overlapped_query_range()
5182 if (level == 0) { in xfs_btree_overlapped_query_range()
5184 recp = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr, in xfs_btree_overlapped_query_range()
5187 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp); in xfs_btree_overlapped_query_range()
5188 cur->bc_ops->init_key_from_rec(&rec_key, recp); in xfs_btree_overlapped_query_range()
5191 * If (query's high key < record's low key), then there in xfs_btree_overlapped_query_range()
5193 * up to the leaf level to find more record blocks. in xfs_btree_overlapped_query_range()
5195 * If (record's high key >= query's low key) and in xfs_btree_overlapped_query_range()
5196 * (query's high key >= record's low key), then in xfs_btree_overlapped_query_range()
5206 cur->bc_levels[level].ptr++; in xfs_btree_overlapped_query_range()
5211 lkp = xfs_btree_key_addr(cur, cur->bc_levels[level].ptr, block); in xfs_btree_overlapped_query_range()
5212 hkp = xfs_btree_high_key_addr(cur, cur->bc_levels[level].ptr, in xfs_btree_overlapped_query_range()
5214 pp = xfs_btree_ptr_addr(cur, cur->bc_levels[level].ptr, block); in xfs_btree_overlapped_query_range()
5217 * If (query's high key < pointer's low key), then there are no in xfs_btree_overlapped_query_range()
5218 * more interesting keys in this block. Pop up one leaf level in xfs_btree_overlapped_query_range()
5221 * If (pointer's high key >= query's low key) and in xfs_btree_overlapped_query_range()
5222 * (query's high key >= pointer's low key), then in xfs_btree_overlapped_query_range()
5228 level--; in xfs_btree_overlapped_query_range()
5229 error = xfs_btree_lookup_get_block(cur, level, pp, in xfs_btree_overlapped_query_range()
5233 xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
5234 trace_xfs_btree_overlapped_query_range(cur, level, bp); in xfs_btree_overlapped_query_range()
5236 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_overlapped_query_range()
5240 cur->bc_levels[level].ptr = 1; in xfs_btree_overlapped_query_range()
5243 cur->bc_levels[level].ptr++; in xfs_btree_overlapped_query_range()
5249 * block, a subsequent non-error cursor deletion will not release in xfs_btree_overlapped_query_range()
5250 * node-level buffers, causing a buffer leak. This is quite possible in xfs_btree_overlapped_query_range()
5251 * with a zero-results range query, so release the buffers if we in xfs_btree_overlapped_query_range()
5254 if (cur->bc_levels[0].bp == NULL) { in xfs_btree_overlapped_query_range()
5255 for (i = 0; i < cur->bc_nlevels; i++) { in xfs_btree_overlapped_query_range()
5256 if (cur->bc_levels[i].bp) { in xfs_btree_overlapped_query_range()
5257 xfs_trans_brelse(cur->bc_tp, in xfs_btree_overlapped_query_range()
5258 cur->bc_levels[i].bp); in xfs_btree_overlapped_query_range()
5259 cur->bc_levels[i].bp = NULL; in xfs_btree_overlapped_query_range()
5260 cur->bc_levels[i].ptr = 0; in xfs_btree_overlapped_query_range()
5261 cur->bc_levels[i].ra = 0; in xfs_btree_overlapped_query_range()
5277 cur->bc_rec = *irec; in xfs_btree_key_from_irec()
5278 cur->bc_ops->init_rec_from_cur(cur, &rec); in xfs_btree_key_from_irec()
5279 cur->bc_ops->init_key_from_rec(key, &rec); in xfs_btree_key_from_irec()
5286 * code. This function returns -ECANCELED, zero, or a negative error code.
5303 /* Enforce low key <= high key. */ in xfs_btree_query_range()
5305 return -EINVAL; in xfs_btree_query_range()
5307 if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) in xfs_btree_query_range()
5324 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec)); in xfs_btree_query_all()
5334 int level, in xfs_btree_count_blocks_helper() argument
5361 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_diff_two_ptrs()
5362 return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l); in xfs_btree_diff_two_ptrs()
5363 return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s); in xfs_btree_diff_two_ptrs()
5391 cur->bc_ops->init_key_from_rec(&rec_key, rec); in xfs_btree_has_records_helper()
5393 if (info->outcome == XBTREE_RECPACKING_EMPTY) { in xfs_btree_has_records_helper()
5394 info->outcome = XBTREE_RECPACKING_SPARSE; in xfs_btree_has_records_helper()
5401 if (xfs_btree_masked_keycmp_lt(cur, &info->start_key, &rec_key, in xfs_btree_has_records_helper()
5402 info->key_mask)) in xfs_btree_has_records_helper()
5403 return -ECANCELED; in xfs_btree_has_records_helper()
5412 key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key, in xfs_btree_has_records_helper()
5413 &rec_key, info->key_mask); in xfs_btree_has_records_helper()
5415 !(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) in xfs_btree_has_records_helper()
5416 return -EFSCORRUPTED; in xfs_btree_has_records_helper()
5418 return -ECANCELED; in xfs_btree_has_records_helper()
5422 * If high_key(rec) is larger than any other high key we've seen, in xfs_btree_has_records_helper()
5425 cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec); in xfs_btree_has_records_helper()
5426 if (xfs_btree_masked_keycmp_gt(cur, &rec_high_key, &info->high_key, in xfs_btree_has_records_helper()
5427 info->key_mask)) in xfs_btree_has_records_helper()
5428 info->high_key = rec_high_key; /* struct copy */ in xfs_btree_has_records_helper()
5451 const union xfs_btree_irec *high, in xfs_btree_has_records() argument
5462 if (!cur->bc_ops->keys_contiguous) { in xfs_btree_has_records()
5464 return -EOPNOTSUPP; in xfs_btree_has_records()
5468 xfs_btree_key_from_irec(cur, &info.end_key, high); in xfs_btree_has_records()
5470 error = xfs_btree_query_range(cur, low, high, in xfs_btree_has_records()
5472 if (error == -ECANCELED) in xfs_btree_has_records()
5505 if (cur->bc_levels[0].ptr < xfs_btree_get_numrecs(block)) in xfs_btree_has_more_records()
5509 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_has_more_records()
5510 return block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK); in xfs_btree_has_more_records()
5512 return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK); in xfs_btree_has_more_records()
5570 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec)); in xfs_btree_goto_left_edge()
5583 return -EFSCORRUPTED; in xfs_btree_goto_left_edge()
5589 /* Allocate a block for an inode-rooted metadata btree. */
5598 .mp = cur->bc_mp, in xfs_btree_alloc_metafile_block()
5599 .tp = cur->bc_tp, in xfs_btree_alloc_metafile_block()
5605 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_alloc_metafile_block()
5610 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, cur->bc_ino.whichfork); in xfs_btree_alloc_metafile_block()
5612 XFS_INO_TO_FSB(cur->bc_mp, ip->i_ino)); in xfs_btree_alloc_metafile_block()
5623 new->l = cpu_to_be64(args.fsbno); in xfs_btree_alloc_metafile_block()
5628 /* Free a block from an inode-rooted metadata btree. */
5635 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_free_metafile_block()
5636 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_free_metafile_block()
5637 struct xfs_trans *tp = cur->bc_tp; in xfs_btree_free_metafile_block()
5643 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork); in xfs_btree_free_metafile_block()