Lines Matching refs:cur
126 struct xfs_btree_cur *cur, in __xfs_btree_check_lblock_hdr() argument
131 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_lblock_hdr()
143 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops)) in __xfs_btree_check_lblock_hdr()
148 cur->bc_ops->get_maxrecs(cur, level)) in __xfs_btree_check_lblock_hdr()
160 struct xfs_btree_cur *cur, in __xfs_btree_check_fsblock() argument
165 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_fsblock()
169 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp); in __xfs_btree_check_fsblock()
200 struct xfs_btree_cur *cur, in __xfs_btree_check_memblock() argument
205 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target; in __xfs_btree_check_memblock()
209 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp); in __xfs_btree_check_memblock()
228 struct xfs_btree_cur *cur, in __xfs_btree_check_agblock() argument
233 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_agblock()
234 struct xfs_perag *pag = to_perag(cur->bc_group); in __xfs_btree_check_agblock()
245 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops)) in __xfs_btree_check_agblock()
250 cur->bc_ops->get_maxrecs(cur, level)) in __xfs_btree_check_agblock()
269 struct xfs_btree_cur *cur, in __xfs_btree_check_block() argument
274 switch (cur->bc_ops->type) { in __xfs_btree_check_block()
276 return __xfs_btree_check_memblock(cur, block, level, bp); in __xfs_btree_check_block()
278 return __xfs_btree_check_agblock(cur, block, level, bp); in __xfs_btree_check_block()
280 return __xfs_btree_check_fsblock(cur, block, level, bp); in __xfs_btree_check_block()
287 static inline unsigned int xfs_btree_block_errtag(struct xfs_btree_cur *cur) in xfs_btree_block_errtag() argument
289 if (cur->bc_ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN) in xfs_btree_block_errtag()
299 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_check_block() argument
304 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_check_block()
307 fa = __xfs_btree_check_block(cur, block, level, bp); in xfs_btree_check_block()
309 XFS_TEST_ERROR(false, mp, xfs_btree_block_errtag(cur))) { in xfs_btree_check_block()
312 xfs_btree_mark_sick(cur); in xfs_btree_check_block()
320 struct xfs_btree_cur *cur, in __xfs_btree_check_ptr() argument
328 switch (cur->bc_ops->type) { in __xfs_btree_check_ptr()
330 if (!xfbtree_verify_bno(cur->bc_mem.xfbtree, in __xfs_btree_check_ptr()
335 if (!xfs_verify_fsbno(cur->bc_mp, in __xfs_btree_check_ptr()
340 if (!xfs_verify_agbno(to_perag(cur->bc_group), in __xfs_btree_check_ptr()
355 struct xfs_btree_cur *cur, in xfs_btree_check_ptr() argument
362 error = __xfs_btree_check_ptr(cur, ptr, index, level); in xfs_btree_check_ptr()
364 switch (cur->bc_ops->type) { in xfs_btree_check_ptr()
366 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
368 cur->bc_ops->name, cur->bc_flags, level, index, in xfs_btree_check_ptr()
372 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
374 cur->bc_ino.ip->i_ino, in xfs_btree_check_ptr()
375 cur->bc_ino.whichfork, cur->bc_ops->name, in xfs_btree_check_ptr()
379 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
381 cur->bc_group->xg_gno, cur->bc_ops->name, in xfs_btree_check_ptr()
385 xfs_btree_mark_sick(cur); in xfs_btree_check_ptr()
475 struct xfs_btree_cur *cur, in xfs_btree_free_block() argument
480 trace_xfs_btree_free_block(cur, bp); in xfs_btree_free_block()
486 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_free_block()
491 error = cur->bc_ops->free_block(cur, bp); in xfs_btree_free_block()
493 xfs_trans_binval(cur->bc_tp, bp); in xfs_btree_free_block()
494 XFS_BTREE_STATS_INC(cur, free); in xfs_btree_free_block()
504 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_del_cursor() argument
516 for (i = 0; i < cur->bc_nlevels; i++) { in xfs_btree_del_cursor()
517 if (cur->bc_levels[i].bp) in xfs_btree_del_cursor()
518 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[i].bp); in xfs_btree_del_cursor()
529 ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 || in xfs_btree_del_cursor()
530 xfs_is_shutdown(cur->bc_mp) || error != 0); in xfs_btree_del_cursor()
532 if (cur->bc_group) in xfs_btree_del_cursor()
533 xfs_group_put(cur->bc_group); in xfs_btree_del_cursor()
534 kmem_cache_free(cur->bc_cache, cur); in xfs_btree_del_cursor()
540 struct xfs_btree_cur *cur) in xfs_btree_buftarg() argument
542 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM) in xfs_btree_buftarg()
543 return cur->bc_mem.xfbtree->target; in xfs_btree_buftarg()
544 return cur->bc_mp->m_ddev_targp; in xfs_btree_buftarg()
550 struct xfs_btree_cur *cur) in xfs_btree_bbsize() argument
552 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM) in xfs_btree_bbsize()
554 return cur->bc_mp->m_bsize; in xfs_btree_bbsize()
563 struct xfs_btree_cur *cur, /* input cursor */ in xfs_btree_dup_cursor() argument
566 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_dup_cursor()
567 struct xfs_trans *tp = cur->bc_tp; in xfs_btree_dup_cursor()
577 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_dup_cursor()
585 new = cur->bc_ops->dup_cursor(cur); in xfs_btree_dup_cursor()
590 new->bc_rec = cur->bc_rec; in xfs_btree_dup_cursor()
596 new->bc_levels[i].ptr = cur->bc_levels[i].ptr; in xfs_btree_dup_cursor()
597 new->bc_levels[i].ra = cur->bc_levels[i].ra; in xfs_btree_dup_cursor()
598 bp = cur->bc_levels[i].bp; in xfs_btree_dup_cursor()
601 xfs_btree_buftarg(cur), in xfs_btree_dup_cursor()
603 xfs_btree_bbsize(cur), 0, &bp, in xfs_btree_dup_cursor()
604 cur->bc_ops->buf_ops); in xfs_btree_dup_cursor()
696 static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur) in xfs_btree_block_len() argument
698 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_block_len()
699 if (xfs_has_crc(cur->bc_mp)) in xfs_btree_block_len()
703 if (xfs_has_crc(cur->bc_mp)) in xfs_btree_block_len()
713 struct xfs_btree_cur *cur, in xfs_btree_rec_offset() argument
716 return xfs_btree_block_len(cur) + in xfs_btree_rec_offset()
717 (n - 1) * cur->bc_ops->rec_len; in xfs_btree_rec_offset()
725 struct xfs_btree_cur *cur, in xfs_btree_key_offset() argument
728 return xfs_btree_block_len(cur) + in xfs_btree_key_offset()
729 (n - 1) * cur->bc_ops->key_len; in xfs_btree_key_offset()
737 struct xfs_btree_cur *cur, in xfs_btree_high_key_offset() argument
740 return xfs_btree_block_len(cur) + in xfs_btree_high_key_offset()
741 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2); in xfs_btree_high_key_offset()
749 struct xfs_btree_cur *cur, in xfs_btree_ptr_offset() argument
753 return xfs_btree_block_len(cur) + in xfs_btree_ptr_offset()
754 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len + in xfs_btree_ptr_offset()
755 (n - 1) * cur->bc_ops->ptr_len; in xfs_btree_ptr_offset()
763 struct xfs_btree_cur *cur, in xfs_btree_rec_addr() argument
768 ((char *)block + xfs_btree_rec_offset(cur, n)); in xfs_btree_rec_addr()
776 struct xfs_btree_cur *cur, in xfs_btree_key_addr() argument
781 ((char *)block + xfs_btree_key_offset(cur, n)); in xfs_btree_key_addr()
789 struct xfs_btree_cur *cur, in xfs_btree_high_key_addr() argument
794 ((char *)block + xfs_btree_high_key_offset(cur, n)); in xfs_btree_high_key_addr()
802 struct xfs_btree_cur *cur, in xfs_btree_ptr_addr() argument
811 ((char *)block + xfs_btree_ptr_offset(cur, n, level)); in xfs_btree_ptr_addr()
816 struct xfs_btree_cur *cur) in xfs_btree_ifork_ptr() argument
818 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_ifork_ptr()
820 if (cur->bc_flags & XFS_BTREE_STAGING) in xfs_btree_ifork_ptr()
821 return cur->bc_ino.ifake->if_fork; in xfs_btree_ifork_ptr()
822 return xfs_ifork_ptr(cur->bc_ino.ip, cur->bc_ino.whichfork); in xfs_btree_ifork_ptr()
833 struct xfs_btree_cur *cur) in xfs_btree_get_iroot() argument
835 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); in xfs_btree_get_iroot()
846 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_get_block() argument
850 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_get_block()
852 return xfs_btree_get_iroot(cur); in xfs_btree_get_block()
855 *bpp = cur->bc_levels[level].bp; in xfs_btree_get_block()
865 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_firstrec() argument
874 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_firstrec()
875 if (xfs_btree_check_block(cur, block, level, bp)) in xfs_btree_firstrec()
885 cur->bc_levels[level].ptr = 1; in xfs_btree_firstrec()
895 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_lastrec() argument
904 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_lastrec()
905 if (xfs_btree_check_block(cur, block, level, bp)) in xfs_btree_lastrec()
915 cur->bc_levels[level].ptr = be16_to_cpu(block->bb_numrecs); in xfs_btree_lastrec()
957 struct xfs_btree_cur *cur, in xfs_btree_readahead_fsblock() argument
961 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_readahead_fsblock()
968 mp->m_bsize, cur->bc_ops->buf_ops); in xfs_btree_readahead_fsblock()
974 mp->m_bsize, cur->bc_ops->buf_ops); in xfs_btree_readahead_fsblock()
983 struct xfs_btree_cur *cur, in xfs_btree_readahead_memblock() argument
987 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target; in xfs_btree_readahead_memblock()
994 cur->bc_ops->buf_ops); in xfs_btree_readahead_memblock()
1000 cur->bc_ops->buf_ops); in xfs_btree_readahead_memblock()
1009 struct xfs_btree_cur *cur, in xfs_btree_readahead_agblock() argument
1013 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_readahead_agblock()
1014 struct xfs_perag *pag = to_perag(cur->bc_group); in xfs_btree_readahead_agblock()
1022 cur->bc_ops->buf_ops); in xfs_btree_readahead_agblock()
1029 cur->bc_ops->buf_ops); in xfs_btree_readahead_agblock()
1042 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_readahead() argument
1052 if (xfs_btree_at_iroot(cur, lev)) in xfs_btree_readahead()
1055 if ((cur->bc_levels[lev].ra | lr) == cur->bc_levels[lev].ra) in xfs_btree_readahead()
1058 cur->bc_levels[lev].ra |= lr; in xfs_btree_readahead()
1059 block = XFS_BUF_TO_BLOCK(cur->bc_levels[lev].bp); in xfs_btree_readahead()
1061 switch (cur->bc_ops->type) { in xfs_btree_readahead()
1063 return xfs_btree_readahead_agblock(cur, lr, block); in xfs_btree_readahead()
1065 return xfs_btree_readahead_fsblock(cur, lr, block); in xfs_btree_readahead()
1067 return xfs_btree_readahead_memblock(cur, lr, block); in xfs_btree_readahead()
1076 struct xfs_btree_cur *cur, in xfs_btree_ptr_to_daddr() argument
1082 error = xfs_btree_check_ptr(cur, ptr, 0, 1); in xfs_btree_ptr_to_daddr()
1086 switch (cur->bc_ops->type) { in xfs_btree_ptr_to_daddr()
1088 *daddr = xfs_agbno_to_daddr(to_perag(cur->bc_group), in xfs_btree_ptr_to_daddr()
1092 *daddr = XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); in xfs_btree_ptr_to_daddr()
1109 struct xfs_btree_cur *cur, in xfs_btree_readahead_ptr() argument
1115 if (xfs_btree_ptr_to_daddr(cur, ptr, &daddr)) in xfs_btree_readahead_ptr()
1117 xfs_buf_readahead(xfs_btree_buftarg(cur), daddr, in xfs_btree_readahead_ptr()
1118 xfs_btree_bbsize(cur) * count, in xfs_btree_readahead_ptr()
1119 cur->bc_ops->buf_ops); in xfs_btree_readahead_ptr()
1128 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_setbuf() argument
1134 if (cur->bc_levels[lev].bp) in xfs_btree_setbuf()
1135 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[lev].bp); in xfs_btree_setbuf()
1136 cur->bc_levels[lev].bp = bp; in xfs_btree_setbuf()
1137 cur->bc_levels[lev].ra = 0; in xfs_btree_setbuf()
1140 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_setbuf()
1142 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA; in xfs_btree_setbuf()
1144 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA; in xfs_btree_setbuf()
1147 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA; in xfs_btree_setbuf()
1149 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA; in xfs_btree_setbuf()
1155 struct xfs_btree_cur *cur, in xfs_btree_ptr_is_null() argument
1158 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_ptr_is_null()
1166 struct xfs_btree_cur *cur, in xfs_btree_set_ptr_null() argument
1169 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_set_ptr_null()
1177 struct xfs_btree_cur *cur, in xfs_btree_ptrs_equal() argument
1181 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_ptrs_equal()
1191 struct xfs_btree_cur *cur, in xfs_btree_get_sibling() argument
1198 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_get_sibling()
1213 struct xfs_btree_cur *cur, in xfs_btree_set_sibling() argument
1220 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_set_sibling()
1302 struct xfs_btree_cur *cur) in xfs_btree_owner() argument
1304 switch (cur->bc_ops->type) { in xfs_btree_owner()
1306 return cur->bc_mem.xfbtree->owner; in xfs_btree_owner()
1308 return cur->bc_ino.ip->i_ino; in xfs_btree_owner()
1310 return cur->bc_group->xg_gno; in xfs_btree_owner()
1319 struct xfs_btree_cur *cur, in xfs_btree_init_block_cur() argument
1324 xfs_btree_init_buf(cur->bc_mp, bp, cur->bc_ops, level, numrecs, in xfs_btree_init_block_cur()
1325 xfs_btree_owner(cur)); in xfs_btree_init_block_cur()
1330 struct xfs_btree_cur *cur, in xfs_btree_buf_to_ptr() argument
1334 switch (cur->bc_ops->type) { in xfs_btree_buf_to_ptr()
1336 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp, in xfs_btree_buf_to_ptr()
1340 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, in xfs_btree_buf_to_ptr()
1351 struct xfs_btree_cur *cur, in xfs_btree_set_refs() argument
1354 xfs_buf_set_ref(bp, cur->bc_ops->lru_refs); in xfs_btree_set_refs()
1359 struct xfs_btree_cur *cur, in xfs_btree_get_buf_block() argument
1367 error = xfs_btree_ptr_to_daddr(cur, ptr, &d); in xfs_btree_get_buf_block()
1370 error = xfs_trans_get_buf(cur->bc_tp, xfs_btree_buftarg(cur), d, in xfs_btree_get_buf_block()
1371 xfs_btree_bbsize(cur), 0, bpp); in xfs_btree_get_buf_block()
1375 (*bpp)->b_ops = cur->bc_ops->buf_ops; in xfs_btree_get_buf_block()
1386 struct xfs_btree_cur *cur, in xfs_btree_read_buf_block() argument
1392 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_read_buf_block()
1399 error = xfs_btree_ptr_to_daddr(cur, ptr, &d); in xfs_btree_read_buf_block()
1402 error = xfs_trans_read_buf(mp, cur->bc_tp, xfs_btree_buftarg(cur), d, in xfs_btree_read_buf_block()
1403 xfs_btree_bbsize(cur), flags, bpp, in xfs_btree_read_buf_block()
1404 cur->bc_ops->buf_ops); in xfs_btree_read_buf_block()
1406 xfs_btree_mark_sick(cur); in xfs_btree_read_buf_block()
1410 xfs_btree_set_refs(cur, *bpp); in xfs_btree_read_buf_block()
1420 struct xfs_btree_cur *cur, in xfs_btree_copy_keys() argument
1426 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len); in xfs_btree_copy_keys()
1434 struct xfs_btree_cur *cur, in xfs_btree_copy_recs() argument
1440 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len); in xfs_btree_copy_recs()
1448 struct xfs_btree_cur *cur, in xfs_btree_copy_ptrs() argument
1454 memcpy(dst_ptr, src_ptr, numptrs * cur->bc_ops->ptr_len); in xfs_btree_copy_ptrs()
1462 struct xfs_btree_cur *cur, in xfs_btree_shift_keys() argument
1472 dst_key = (char *)key + (dir * cur->bc_ops->key_len); in xfs_btree_shift_keys()
1473 memmove(dst_key, key, numkeys * cur->bc_ops->key_len); in xfs_btree_shift_keys()
1481 struct xfs_btree_cur *cur, in xfs_btree_shift_recs() argument
1491 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len); in xfs_btree_shift_recs()
1492 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len); in xfs_btree_shift_recs()
1500 struct xfs_btree_cur *cur, in xfs_btree_shift_ptrs() argument
1510 dst_ptr = (char *)ptr + (dir * cur->bc_ops->ptr_len); in xfs_btree_shift_ptrs()
1511 memmove(dst_ptr, ptr, numptrs * cur->bc_ops->ptr_len); in xfs_btree_shift_ptrs()
1519 struct xfs_btree_cur *cur, in xfs_btree_log_keys() argument
1526 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_keys()
1527 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_keys()
1528 xfs_btree_key_offset(cur, first), in xfs_btree_log_keys()
1529 xfs_btree_key_offset(cur, last + 1) - 1); in xfs_btree_log_keys()
1531 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_keys()
1532 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_keys()
1541 struct xfs_btree_cur *cur, in xfs_btree_log_recs() argument
1547 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_recs()
1548 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_recs()
1552 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_recs()
1553 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_recs()
1554 xfs_btree_rec_offset(cur, first), in xfs_btree_log_recs()
1555 xfs_btree_rec_offset(cur, last + 1) - 1); in xfs_btree_log_recs()
1563 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_log_ptrs() argument
1573 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_ptrs()
1574 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_ptrs()
1575 xfs_btree_ptr_offset(cur, first, level), in xfs_btree_log_ptrs()
1576 xfs_btree_ptr_offset(cur, last + 1, level) - 1); in xfs_btree_log_ptrs()
1578 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_ptrs()
1579 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_ptrs()
1589 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_log_block() argument
1626 if (xfs_has_crc(cur->bc_mp)) { in xfs_btree_log_block()
1641 (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) ? in xfs_btree_log_block()
1644 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_block()
1645 xfs_trans_log_buf(cur->bc_tp, bp, first, last); in xfs_btree_log_block()
1647 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_block()
1648 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_block()
1658 struct xfs_btree_cur *cur, in xfs_btree_increment() argument
1668 ASSERT(level < cur->bc_nlevels); in xfs_btree_increment()
1671 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); in xfs_btree_increment()
1674 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_increment()
1677 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_increment()
1683 if (++cur->bc_levels[level].ptr <= xfs_btree_get_numrecs(block)) in xfs_btree_increment()
1687 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); in xfs_btree_increment()
1688 if (xfs_btree_ptr_is_null(cur, &ptr)) in xfs_btree_increment()
1691 XFS_BTREE_STATS_INC(cur, increment); in xfs_btree_increment()
1697 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { in xfs_btree_increment()
1698 block = xfs_btree_get_block(cur, lev, &bp); in xfs_btree_increment()
1701 error = xfs_btree_check_block(cur, block, lev, bp); in xfs_btree_increment()
1706 if (++cur->bc_levels[lev].ptr <= xfs_btree_get_numrecs(block)) in xfs_btree_increment()
1710 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); in xfs_btree_increment()
1717 if (lev == cur->bc_nlevels) { in xfs_btree_increment()
1718 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in xfs_btree_increment()
1721 xfs_btree_mark_sick(cur); in xfs_btree_increment()
1725 ASSERT(lev < cur->bc_nlevels); in xfs_btree_increment()
1731 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { in xfs_btree_increment()
1734 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block); in xfs_btree_increment()
1736 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); in xfs_btree_increment()
1740 xfs_btree_setbuf(cur, lev, bp); in xfs_btree_increment()
1741 cur->bc_levels[lev].ptr = 1; in xfs_btree_increment()
1761 struct xfs_btree_cur *cur, in xfs_btree_decrement() argument
1771 ASSERT(level < cur->bc_nlevels); in xfs_btree_decrement()
1774 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); in xfs_btree_decrement()
1777 if (--cur->bc_levels[level].ptr > 0) in xfs_btree_decrement()
1781 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_decrement()
1784 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_decrement()
1790 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); in xfs_btree_decrement()
1791 if (xfs_btree_ptr_is_null(cur, &ptr)) in xfs_btree_decrement()
1794 XFS_BTREE_STATS_INC(cur, decrement); in xfs_btree_decrement()
1800 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { in xfs_btree_decrement()
1801 if (--cur->bc_levels[lev].ptr > 0) in xfs_btree_decrement()
1804 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); in xfs_btree_decrement()
1811 if (lev == cur->bc_nlevels) { in xfs_btree_decrement()
1812 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in xfs_btree_decrement()
1815 xfs_btree_mark_sick(cur); in xfs_btree_decrement()
1819 ASSERT(lev < cur->bc_nlevels); in xfs_btree_decrement()
1825 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { in xfs_btree_decrement()
1828 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block); in xfs_btree_decrement()
1830 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); in xfs_btree_decrement()
1833 xfs_btree_setbuf(cur, lev, bp); in xfs_btree_decrement()
1834 cur->bc_levels[lev].ptr = xfs_btree_get_numrecs(block); in xfs_btree_decrement()
1854 struct xfs_btree_cur *cur, in xfs_btree_check_block_owner() argument
1859 if (!xfs_has_crc(cur->bc_mp) || in xfs_btree_check_block_owner()
1860 (cur->bc_flags & XFS_BTREE_BMBT_INVALID_OWNER)) in xfs_btree_check_block_owner()
1863 owner = xfs_btree_owner(cur); in xfs_btree_check_block_owner()
1864 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_check_block_owner()
1877 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_lookup_get_block() argument
1887 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_lookup_get_block()
1888 *blkp = xfs_btree_get_iroot(cur); in xfs_btree_lookup_get_block()
1898 bp = cur->bc_levels[level].bp; in xfs_btree_lookup_get_block()
1899 error = xfs_btree_ptr_to_daddr(cur, pp, &daddr); in xfs_btree_lookup_get_block()
1907 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp); in xfs_btree_lookup_get_block()
1912 if (xfs_btree_check_block_owner(cur, *blkp) != NULL) in xfs_btree_lookup_get_block()
1923 xfs_btree_setbuf(cur, level, bp); in xfs_btree_lookup_get_block()
1929 xfs_trans_brelse(cur->bc_tp, bp); in xfs_btree_lookup_get_block()
1930 xfs_btree_mark_sick(cur); in xfs_btree_lookup_get_block()
1941 struct xfs_btree_cur *cur, in xfs_lookup_get_search_key() argument
1948 cur->bc_ops->init_key_from_rec(kp, in xfs_lookup_get_search_key()
1949 xfs_btree_rec_addr(cur, keyno, block)); in xfs_lookup_get_search_key()
1953 return xfs_btree_key_addr(cur, keyno, block); in xfs_lookup_get_search_key()
1961 struct xfs_btree_cur *cur, in xfs_btree_init_ptr_from_cur() argument
1964 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) { in xfs_btree_init_ptr_from_cur()
1970 } else if (cur->bc_flags & XFS_BTREE_STAGING) { in xfs_btree_init_ptr_from_cur()
1971 ptr->s = cpu_to_be32(cur->bc_ag.afake->af_root); in xfs_btree_init_ptr_from_cur()
1973 cur->bc_ops->init_ptr_from_cur(cur, ptr); in xfs_btree_init_ptr_from_cur()
1983 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_lookup() argument
1995 XFS_BTREE_STATS_INC(cur, lookup); in xfs_btree_lookup()
1998 if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0)) { in xfs_btree_lookup()
1999 xfs_btree_mark_sick(cur); in xfs_btree_lookup()
2007 xfs_btree_init_ptr_from_cur(cur, &ptr); in xfs_btree_lookup()
2016 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { in xfs_btree_lookup()
2018 error = xfs_btree_lookup_get_block(cur, level, pp, &block); in xfs_btree_lookup()
2039 if (level != 0 || cur->bc_nlevels != 1) { in xfs_btree_lookup()
2042 cur->bc_mp, block, in xfs_btree_lookup()
2044 xfs_btree_mark_sick(cur); in xfs_btree_lookup()
2048 cur->bc_levels[0].ptr = dir != XFS_LOOKUP_LE; in xfs_btree_lookup()
2058 XFS_BTREE_STATS_INC(cur, compare); in xfs_btree_lookup()
2064 kp = xfs_lookup_get_search_key(cur, level, in xfs_btree_lookup()
2073 diff = cur->bc_ops->key_diff(cur, kp); in xfs_btree_lookup()
2094 pp = xfs_btree_ptr_addr(cur, keyno, block); in xfs_btree_lookup()
2096 error = xfs_btree_debug_check_ptr(cur, pp, 0, level); in xfs_btree_lookup()
2100 cur->bc_levels[level].ptr = keyno; in xfs_btree_lookup()
2111 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); in xfs_btree_lookup()
2114 !xfs_btree_ptr_is_null(cur, &ptr)) { in xfs_btree_lookup()
2117 cur->bc_levels[0].ptr = keyno; in xfs_btree_lookup()
2118 error = xfs_btree_increment(cur, 0, &i); in xfs_btree_lookup()
2121 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_lookup()
2122 xfs_btree_mark_sick(cur); in xfs_btree_lookup()
2130 cur->bc_levels[0].ptr = keyno; in xfs_btree_lookup()
2148 struct xfs_btree_cur *cur, in xfs_btree_high_key_from_key() argument
2151 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING); in xfs_btree_high_key_from_key()
2153 (cur->bc_ops->key_len / 2)); in xfs_btree_high_key_from_key()
2159 struct xfs_btree_cur *cur, in xfs_btree_get_leaf_keys() argument
2169 rec = xfs_btree_rec_addr(cur, 1, block); in xfs_btree_get_leaf_keys()
2170 cur->bc_ops->init_key_from_rec(key, rec); in xfs_btree_get_leaf_keys()
2172 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_get_leaf_keys()
2174 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec); in xfs_btree_get_leaf_keys()
2176 rec = xfs_btree_rec_addr(cur, n, block); in xfs_btree_get_leaf_keys()
2177 cur->bc_ops->init_high_key_from_rec(&hkey, rec); in xfs_btree_get_leaf_keys()
2178 if (xfs_btree_keycmp_gt(cur, &hkey, &max_hkey)) in xfs_btree_get_leaf_keys()
2182 high = xfs_btree_high_key_from_key(cur, key); in xfs_btree_get_leaf_keys()
2183 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2); in xfs_btree_get_leaf_keys()
2190 struct xfs_btree_cur *cur, in xfs_btree_get_node_keys() argument
2199 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_get_node_keys()
2200 memcpy(key, xfs_btree_key_addr(cur, 1, block), in xfs_btree_get_node_keys()
2201 cur->bc_ops->key_len / 2); in xfs_btree_get_node_keys()
2203 max_hkey = xfs_btree_high_key_addr(cur, 1, block); in xfs_btree_get_node_keys()
2205 hkey = xfs_btree_high_key_addr(cur, n, block); in xfs_btree_get_node_keys()
2206 if (xfs_btree_keycmp_gt(cur, hkey, max_hkey)) in xfs_btree_get_node_keys()
2210 high = xfs_btree_high_key_from_key(cur, key); in xfs_btree_get_node_keys()
2211 memcpy(high, max_hkey, cur->bc_ops->key_len / 2); in xfs_btree_get_node_keys()
2213 memcpy(key, xfs_btree_key_addr(cur, 1, block), in xfs_btree_get_node_keys()
2214 cur->bc_ops->key_len); in xfs_btree_get_node_keys()
2221 struct xfs_btree_cur *cur, in xfs_btree_get_keys() argument
2226 xfs_btree_get_leaf_keys(cur, block, key); in xfs_btree_get_keys()
2228 xfs_btree_get_node_keys(cur, block, key); in xfs_btree_get_keys()
2240 struct xfs_btree_cur *cur, in xfs_btree_needs_key_update() argument
2243 return (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) || ptr == 1; in xfs_btree_needs_key_update()
2253 struct xfs_btree_cur *cur, in __xfs_btree_updkeys() argument
2267 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING); in __xfs_btree_updkeys()
2270 if (level + 1 >= cur->bc_nlevels) in __xfs_btree_updkeys()
2273 trace_xfs_btree_updkeys(cur, level, bp0); in __xfs_btree_updkeys()
2276 hkey = xfs_btree_high_key_from_key(cur, lkey); in __xfs_btree_updkeys()
2277 xfs_btree_get_keys(cur, block, lkey); in __xfs_btree_updkeys()
2278 for (level++; level < cur->bc_nlevels; level++) { in __xfs_btree_updkeys()
2282 block = xfs_btree_get_block(cur, level, &bp); in __xfs_btree_updkeys()
2283 trace_xfs_btree_updkeys(cur, level, bp); in __xfs_btree_updkeys()
2285 error = xfs_btree_check_block(cur, block, level, bp); in __xfs_btree_updkeys()
2289 ptr = cur->bc_levels[level].ptr; in __xfs_btree_updkeys()
2290 nlkey = xfs_btree_key_addr(cur, ptr, block); in __xfs_btree_updkeys()
2291 nhkey = xfs_btree_high_key_addr(cur, ptr, block); in __xfs_btree_updkeys()
2293 xfs_btree_keycmp_eq(cur, nlkey, lkey) && in __xfs_btree_updkeys()
2294 xfs_btree_keycmp_eq(cur, nhkey, hkey)) in __xfs_btree_updkeys()
2296 xfs_btree_copy_keys(cur, nlkey, lkey, 1); in __xfs_btree_updkeys()
2297 xfs_btree_log_keys(cur, bp, ptr, ptr); in __xfs_btree_updkeys()
2298 if (level + 1 >= cur->bc_nlevels) in __xfs_btree_updkeys()
2300 xfs_btree_get_node_keys(cur, block, lkey); in __xfs_btree_updkeys()
2309 struct xfs_btree_cur *cur, in xfs_btree_updkeys_force() argument
2315 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_updkeys_force()
2316 return __xfs_btree_updkeys(cur, level, block, bp, true); in xfs_btree_updkeys_force()
2324 struct xfs_btree_cur *cur, in xfs_btree_update_keys() argument
2335 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_update_keys()
2336 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) in xfs_btree_update_keys()
2337 return __xfs_btree_updkeys(cur, level, block, bp, false); in xfs_btree_update_keys()
2345 xfs_btree_get_keys(cur, block, &key); in xfs_btree_update_keys()
2346 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { in xfs_btree_update_keys()
2350 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_update_keys()
2352 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_update_keys()
2356 ptr = cur->bc_levels[level].ptr; in xfs_btree_update_keys()
2357 kp = xfs_btree_key_addr(cur, ptr, block); in xfs_btree_update_keys()
2358 xfs_btree_copy_keys(cur, kp, &key, 1); in xfs_btree_update_keys()
2359 xfs_btree_log_keys(cur, bp, ptr, ptr); in xfs_btree_update_keys()
2372 struct xfs_btree_cur *cur, in xfs_btree_update() argument
2382 block = xfs_btree_get_block(cur, 0, &bp); in xfs_btree_update()
2385 error = xfs_btree_check_block(cur, block, 0, bp); in xfs_btree_update()
2390 ptr = cur->bc_levels[0].ptr; in xfs_btree_update()
2391 rp = xfs_btree_rec_addr(cur, ptr, block); in xfs_btree_update()
2394 xfs_btree_copy_recs(cur, rp, rec, 1); in xfs_btree_update()
2395 xfs_btree_log_recs(cur, bp, ptr, ptr); in xfs_btree_update()
2398 if (xfs_btree_needs_key_update(cur, ptr)) { in xfs_btree_update()
2399 error = xfs_btree_update_keys(cur, 0); in xfs_btree_update()
2416 struct xfs_btree_cur *cur, in xfs_btree_lshift() argument
2434 if (xfs_btree_at_iroot(cur, level)) in xfs_btree_lshift()
2438 right = xfs_btree_get_block(cur, level, &rbp); in xfs_btree_lshift()
2441 error = xfs_btree_check_block(cur, right, level, rbp); in xfs_btree_lshift()
2447 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); in xfs_btree_lshift()
2448 if (xfs_btree_ptr_is_null(cur, &lptr)) in xfs_btree_lshift()
2455 if (cur->bc_levels[level].ptr <= 1) in xfs_btree_lshift()
2459 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); in xfs_btree_lshift()
2465 if (lrecs == cur->bc_ops->get_maxrecs(cur, level)) in xfs_btree_lshift()
2478 XFS_BTREE_STATS_INC(cur, lshift); in xfs_btree_lshift()
2479 XFS_BTREE_STATS_ADD(cur, moves, 1); in xfs_btree_lshift()
2490 lkp = xfs_btree_key_addr(cur, lrecs, left); in xfs_btree_lshift()
2491 rkp = xfs_btree_key_addr(cur, 1, right); in xfs_btree_lshift()
2493 lpp = xfs_btree_ptr_addr(cur, lrecs, left); in xfs_btree_lshift()
2494 rpp = xfs_btree_ptr_addr(cur, 1, right); in xfs_btree_lshift()
2496 error = xfs_btree_debug_check_ptr(cur, rpp, 0, level); in xfs_btree_lshift()
2500 xfs_btree_copy_keys(cur, lkp, rkp, 1); in xfs_btree_lshift()
2501 xfs_btree_copy_ptrs(cur, lpp, rpp, 1); in xfs_btree_lshift()
2503 xfs_btree_log_keys(cur, lbp, lrecs, lrecs); in xfs_btree_lshift()
2504 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs); in xfs_btree_lshift()
2506 ASSERT(cur->bc_ops->keys_inorder(cur, in xfs_btree_lshift()
2507 xfs_btree_key_addr(cur, lrecs - 1, left), lkp)); in xfs_btree_lshift()
2512 lrp = xfs_btree_rec_addr(cur, lrecs, left); in xfs_btree_lshift()
2513 rrp = xfs_btree_rec_addr(cur, 1, right); in xfs_btree_lshift()
2515 xfs_btree_copy_recs(cur, lrp, rrp, 1); in xfs_btree_lshift()
2516 xfs_btree_log_recs(cur, lbp, lrecs, lrecs); in xfs_btree_lshift()
2518 ASSERT(cur->bc_ops->recs_inorder(cur, in xfs_btree_lshift()
2519 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp)); in xfs_btree_lshift()
2523 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); in xfs_btree_lshift()
2526 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); in xfs_btree_lshift()
2531 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1); in xfs_btree_lshift()
2535 error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level); in xfs_btree_lshift()
2540 xfs_btree_shift_keys(cur, in xfs_btree_lshift()
2541 xfs_btree_key_addr(cur, 2, right), in xfs_btree_lshift()
2543 xfs_btree_shift_ptrs(cur, in xfs_btree_lshift()
2544 xfs_btree_ptr_addr(cur, 2, right), in xfs_btree_lshift()
2547 xfs_btree_log_keys(cur, rbp, 1, rrecs); in xfs_btree_lshift()
2548 xfs_btree_log_ptrs(cur, rbp, 1, rrecs); in xfs_btree_lshift()
2551 xfs_btree_shift_recs(cur, in xfs_btree_lshift()
2552 xfs_btree_rec_addr(cur, 2, right), in xfs_btree_lshift()
2554 xfs_btree_log_recs(cur, rbp, 1, rrecs); in xfs_btree_lshift()
2561 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_lshift()
2562 error = xfs_btree_dup_cursor(cur, &tcur); in xfs_btree_lshift()
2567 xfs_btree_mark_sick(cur); in xfs_btree_lshift()
2585 error = xfs_btree_update_keys(cur, level); in xfs_btree_lshift()
2590 cur->bc_levels[level].ptr--; in xfs_btree_lshift()
2613 struct xfs_btree_cur *cur, in xfs_btree_rshift() argument
2629 if (xfs_btree_at_iroot(cur, level)) in xfs_btree_rshift()
2633 left = xfs_btree_get_block(cur, level, &lbp); in xfs_btree_rshift()
2636 error = xfs_btree_check_block(cur, left, level, lbp); in xfs_btree_rshift()
2642 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); in xfs_btree_rshift()
2643 if (xfs_btree_ptr_is_null(cur, &rptr)) in xfs_btree_rshift()
2651 if (cur->bc_levels[level].ptr >= lrecs) in xfs_btree_rshift()
2655 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); in xfs_btree_rshift()
2661 if (rrecs == cur->bc_ops->get_maxrecs(cur, level)) in xfs_btree_rshift()
2664 XFS_BTREE_STATS_INC(cur, rshift); in xfs_btree_rshift()
2665 XFS_BTREE_STATS_ADD(cur, moves, rrecs); in xfs_btree_rshift()
2677 lkp = xfs_btree_key_addr(cur, lrecs, left); in xfs_btree_rshift()
2678 lpp = xfs_btree_ptr_addr(cur, lrecs, left); in xfs_btree_rshift()
2679 rkp = xfs_btree_key_addr(cur, 1, right); in xfs_btree_rshift()
2680 rpp = xfs_btree_ptr_addr(cur, 1, right); in xfs_btree_rshift()
2683 error = xfs_btree_debug_check_ptr(cur, rpp, i, level); in xfs_btree_rshift()
2688 xfs_btree_shift_keys(cur, rkp, 1, rrecs); in xfs_btree_rshift()
2689 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs); in xfs_btree_rshift()
2691 error = xfs_btree_debug_check_ptr(cur, lpp, 0, level); in xfs_btree_rshift()
2696 xfs_btree_copy_keys(cur, rkp, lkp, 1); in xfs_btree_rshift()
2697 xfs_btree_copy_ptrs(cur, rpp, lpp, 1); in xfs_btree_rshift()
2699 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1); in xfs_btree_rshift()
2700 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1); in xfs_btree_rshift()
2702 ASSERT(cur->bc_ops->keys_inorder(cur, rkp, in xfs_btree_rshift()
2703 xfs_btree_key_addr(cur, 2, right))); in xfs_btree_rshift()
2709 lrp = xfs_btree_rec_addr(cur, lrecs, left); in xfs_btree_rshift()
2710 rrp = xfs_btree_rec_addr(cur, 1, right); in xfs_btree_rshift()
2712 xfs_btree_shift_recs(cur, rrp, 1, rrecs); in xfs_btree_rshift()
2715 xfs_btree_copy_recs(cur, rrp, lrp, 1); in xfs_btree_rshift()
2716 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1); in xfs_btree_rshift()
2723 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); in xfs_btree_rshift()
2726 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); in xfs_btree_rshift()
2732 error = xfs_btree_dup_cursor(cur, &tcur); in xfs_btree_rshift()
2737 xfs_btree_mark_sick(cur); in xfs_btree_rshift()
2747 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_rshift()
2748 error = xfs_btree_update_keys(cur, level); in xfs_btree_rshift()
2777 struct xfs_btree_cur *cur, in xfs_btree_alloc_block() argument
2791 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_alloc_block()
2796 error = cur->bc_ops->alloc_block(cur, hint_block, new_block, stat); in xfs_btree_alloc_block()
2797 trace_xfs_btree_alloc_block(cur, new_block, *stat, error); in xfs_btree_alloc_block()
2808 struct xfs_btree_cur *cur, in __xfs_btree_split() argument
2830 XFS_BTREE_STATS_INC(cur, split); in __xfs_btree_split()
2833 left = xfs_btree_get_block(cur, level, &lbp); in __xfs_btree_split()
2836 error = xfs_btree_check_block(cur, left, level, lbp); in __xfs_btree_split()
2841 xfs_btree_buf_to_ptr(cur, lbp, &lptr); in __xfs_btree_split()
2844 error = xfs_btree_alloc_block(cur, &lptr, &rptr, stat); in __xfs_btree_split()
2849 XFS_BTREE_STATS_INC(cur, alloc); in __xfs_btree_split()
2852 error = xfs_btree_get_buf_block(cur, &rptr, &right, &rbp); in __xfs_btree_split()
2857 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0); in __xfs_btree_split()
2866 if ((lrecs & 1) && cur->bc_levels[level].ptr <= rrecs + 1) in __xfs_btree_split()
2870 XFS_BTREE_STATS_ADD(cur, moves, rrecs); in __xfs_btree_split()
2889 lkp = xfs_btree_key_addr(cur, src_index, left); in __xfs_btree_split()
2890 lpp = xfs_btree_ptr_addr(cur, src_index, left); in __xfs_btree_split()
2891 rkp = xfs_btree_key_addr(cur, 1, right); in __xfs_btree_split()
2892 rpp = xfs_btree_ptr_addr(cur, 1, right); in __xfs_btree_split()
2895 error = xfs_btree_debug_check_ptr(cur, lpp, i, level); in __xfs_btree_split()
2901 xfs_btree_copy_keys(cur, rkp, lkp, rrecs); in __xfs_btree_split()
2902 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs); in __xfs_btree_split()
2904 xfs_btree_log_keys(cur, rbp, 1, rrecs); in __xfs_btree_split()
2905 xfs_btree_log_ptrs(cur, rbp, 1, rrecs); in __xfs_btree_split()
2908 xfs_btree_get_node_keys(cur, right, key); in __xfs_btree_split()
2914 lrp = xfs_btree_rec_addr(cur, src_index, left); in __xfs_btree_split()
2915 rrp = xfs_btree_rec_addr(cur, 1, right); in __xfs_btree_split()
2918 xfs_btree_copy_recs(cur, rrp, lrp, rrecs); in __xfs_btree_split()
2919 xfs_btree_log_recs(cur, rbp, 1, rrecs); in __xfs_btree_split()
2922 xfs_btree_get_leaf_keys(cur, right, key); in __xfs_btree_split()
2929 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB); in __xfs_btree_split()
2930 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB); in __xfs_btree_split()
2931 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); in __xfs_btree_split()
2932 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); in __xfs_btree_split()
2934 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS); in __xfs_btree_split()
2935 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); in __xfs_btree_split()
2941 if (!xfs_btree_ptr_is_null(cur, &rrptr)) { in __xfs_btree_split()
2942 error = xfs_btree_read_buf_block(cur, &rrptr, in __xfs_btree_split()
2946 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB); in __xfs_btree_split()
2947 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); in __xfs_btree_split()
2951 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in __xfs_btree_split()
2952 error = xfs_btree_update_keys(cur, level); in __xfs_btree_split()
2962 if (cur->bc_levels[level].ptr > lrecs + 1) { in __xfs_btree_split()
2963 xfs_btree_setbuf(cur, level, rbp); in __xfs_btree_split()
2964 cur->bc_levels[level].ptr -= lrecs; in __xfs_btree_split()
2970 if (level + 1 < cur->bc_nlevels) { in __xfs_btree_split()
2971 error = xfs_btree_dup_cursor(cur, curp); in __xfs_btree_split()
2989 struct xfs_btree_cur *cur; member
3023 xfs_trans_set_context(args->cur->bc_tp); in xfs_btree_split_worker()
3025 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, in xfs_btree_split_worker()
3028 xfs_trans_clear_context(args->cur->bc_tp); in xfs_btree_split_worker()
3059 struct xfs_btree_cur *cur, in xfs_btree_split() argument
3069 if (!xfs_btree_is_bmap(cur->bc_ops) || in xfs_btree_split()
3070 cur->bc_tp->t_highest_agno == NULLAGNUMBER) in xfs_btree_split()
3071 return __xfs_btree_split(cur, level, ptrp, key, curp, stat); in xfs_btree_split()
3073 args.cur = cur; in xfs_btree_split()
3094 struct xfs_btree_cur *cur, in xfs_btree_promote_leaf_iroot() argument
3108 rp = xfs_btree_rec_addr(cur, 1, block); in xfs_btree_promote_leaf_iroot()
3109 crp = xfs_btree_rec_addr(cur, 1, cblock); in xfs_btree_promote_leaf_iroot()
3110 xfs_btree_copy_recs(cur, crp, rp, numrecs); in xfs_btree_promote_leaf_iroot()
3121 cur->bc_ops->broot_realloc(cur, 0); in xfs_btree_promote_leaf_iroot()
3122 cur->bc_nlevels++; in xfs_btree_promote_leaf_iroot()
3123 cur->bc_levels[1].ptr = 1; in xfs_btree_promote_leaf_iroot()
3129 broot = cur->bc_ops->broot_realloc(cur, 1); in xfs_btree_promote_leaf_iroot()
3130 xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops, in xfs_btree_promote_leaf_iroot()
3131 cur->bc_nlevels - 1, 1, cur->bc_ino.ip->i_ino); in xfs_btree_promote_leaf_iroot()
3133 pp = xfs_btree_ptr_addr(cur, 1, broot); in xfs_btree_promote_leaf_iroot()
3134 kp = xfs_btree_key_addr(cur, 1, broot); in xfs_btree_promote_leaf_iroot()
3135 xfs_btree_copy_ptrs(cur, pp, cptr, 1); in xfs_btree_promote_leaf_iroot()
3136 xfs_btree_get_keys(cur, cblock, kp); in xfs_btree_promote_leaf_iroot()
3139 xfs_btree_setbuf(cur, 0, cbp); in xfs_btree_promote_leaf_iroot()
3140 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); in xfs_btree_promote_leaf_iroot()
3141 xfs_btree_log_recs(cur, cbp, 1, numrecs); in xfs_btree_promote_leaf_iroot()
3153 struct xfs_btree_cur *cur, in xfs_btree_promote_node_iroot() argument
3174 cur->bc_nlevels++; in xfs_btree_promote_node_iroot()
3175 cur->bc_levels[level + 1].ptr = 1; in xfs_btree_promote_node_iroot()
3182 kp = xfs_btree_key_addr(cur, 1, block); in xfs_btree_promote_node_iroot()
3183 ckp = xfs_btree_key_addr(cur, 1, cblock); in xfs_btree_promote_node_iroot()
3184 xfs_btree_copy_keys(cur, ckp, kp, numrecs); in xfs_btree_promote_node_iroot()
3187 pp = xfs_btree_ptr_addr(cur, 1, block); in xfs_btree_promote_node_iroot()
3188 cpp = xfs_btree_ptr_addr(cur, 1, cblock); in xfs_btree_promote_node_iroot()
3190 error = xfs_btree_debug_check_ptr(cur, pp, i, level); in xfs_btree_promote_node_iroot()
3194 xfs_btree_copy_ptrs(cur, cpp, pp, numrecs); in xfs_btree_promote_node_iroot()
3200 error = xfs_btree_debug_check_ptr(cur, cptr, 0, level); in xfs_btree_promote_node_iroot()
3203 xfs_btree_copy_ptrs(cur, pp, cptr, 1); in xfs_btree_promote_node_iroot()
3204 xfs_btree_get_keys(cur, cblock, kp); in xfs_btree_promote_node_iroot()
3206 cur->bc_ops->broot_realloc(cur, 1); in xfs_btree_promote_node_iroot()
3209 xfs_btree_setbuf(cur, level, cbp); in xfs_btree_promote_node_iroot()
3210 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); in xfs_btree_promote_node_iroot()
3211 xfs_btree_log_keys(cur, cbp, 1, numrecs); in xfs_btree_promote_node_iroot()
3212 xfs_btree_log_ptrs(cur, cbp, 1, numrecs); in xfs_btree_promote_node_iroot()
3222 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_new_iroot() argument
3234 XFS_BTREE_STATS_INC(cur, newroot); in xfs_btree_new_iroot()
3236 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_new_iroot()
3238 level = cur->bc_nlevels - 1; in xfs_btree_new_iroot()
3240 block = xfs_btree_get_iroot(cur); in xfs_btree_new_iroot()
3241 ASSERT(level > 0 || (cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS)); in xfs_btree_new_iroot()
3243 aptr = *xfs_btree_ptr_addr(cur, 1, block); in xfs_btree_new_iroot()
3245 aptr.l = cpu_to_be64(XFS_INO_TO_FSB(cur->bc_mp, in xfs_btree_new_iroot()
3246 cur->bc_ino.ip->i_ino)); in xfs_btree_new_iroot()
3249 error = xfs_btree_alloc_block(cur, &aptr, &nptr, stat); in xfs_btree_new_iroot()
3255 XFS_BTREE_STATS_INC(cur, alloc); in xfs_btree_new_iroot()
3258 error = xfs_btree_get_buf_block(cur, &nptr, &cblock, &cbp); in xfs_btree_new_iroot()
3266 memcpy(cblock, block, xfs_btree_block_len(cur)); in xfs_btree_new_iroot()
3267 if (xfs_has_crc(cur->bc_mp)) { in xfs_btree_new_iroot()
3269 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_new_iroot()
3276 error = xfs_btree_promote_node_iroot(cur, block, level, cbp, in xfs_btree_new_iroot()
3281 xfs_btree_promote_leaf_iroot(cur, block, cbp, &nptr, cblock); in xfs_btree_new_iroot()
3284 *logflags |= XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork); in xfs_btree_new_iroot()
3293 struct xfs_btree_cur *cur, in xfs_btree_set_root() argument
3297 if (cur->bc_flags & XFS_BTREE_STAGING) { in xfs_btree_set_root()
3299 cur->bc_ag.afake->af_root = be32_to_cpu(ptr->s); in xfs_btree_set_root()
3300 cur->bc_ag.afake->af_levels += inc; in xfs_btree_set_root()
3302 cur->bc_ops->set_root(cur, ptr, inc); in xfs_btree_set_root()
3311 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_new_root() argument
3327 XFS_BTREE_STATS_INC(cur, newroot); in xfs_btree_new_root()
3330 xfs_btree_init_ptr_from_cur(cur, &rptr); in xfs_btree_new_root()
3333 error = xfs_btree_alloc_block(cur, &rptr, &lptr, stat); in xfs_btree_new_root()
3338 XFS_BTREE_STATS_INC(cur, alloc); in xfs_btree_new_root()
3341 error = xfs_btree_get_buf_block(cur, &lptr, &new, &nbp); in xfs_btree_new_root()
3346 xfs_btree_set_root(cur, &lptr, 1); in xfs_btree_new_root()
3354 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp); in xfs_btree_new_root()
3357 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp); in xfs_btree_new_root()
3362 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); in xfs_btree_new_root()
3363 if (!xfs_btree_ptr_is_null(cur, &rptr)) { in xfs_btree_new_root()
3366 xfs_btree_buf_to_ptr(cur, lbp, &lptr); in xfs_btree_new_root()
3368 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); in xfs_btree_new_root()
3376 xfs_btree_buf_to_ptr(cur, rbp, &rptr); in xfs_btree_new_root()
3378 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); in xfs_btree_new_root()
3379 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); in xfs_btree_new_root()
3387 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2); in xfs_btree_new_root()
3388 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS); in xfs_btree_new_root()
3389 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) && in xfs_btree_new_root()
3390 !xfs_btree_ptr_is_null(cur, &rptr)); in xfs_btree_new_root()
3398 xfs_btree_get_node_keys(cur, left, in xfs_btree_new_root()
3399 xfs_btree_key_addr(cur, 1, new)); in xfs_btree_new_root()
3400 xfs_btree_get_node_keys(cur, right, in xfs_btree_new_root()
3401 xfs_btree_key_addr(cur, 2, new)); in xfs_btree_new_root()
3408 xfs_btree_get_leaf_keys(cur, left, in xfs_btree_new_root()
3409 xfs_btree_key_addr(cur, 1, new)); in xfs_btree_new_root()
3410 xfs_btree_get_leaf_keys(cur, right, in xfs_btree_new_root()
3411 xfs_btree_key_addr(cur, 2, new)); in xfs_btree_new_root()
3413 xfs_btree_log_keys(cur, nbp, 1, 2); in xfs_btree_new_root()
3416 xfs_btree_copy_ptrs(cur, in xfs_btree_new_root()
3417 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1); in xfs_btree_new_root()
3418 xfs_btree_copy_ptrs(cur, in xfs_btree_new_root()
3419 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1); in xfs_btree_new_root()
3420 xfs_btree_log_ptrs(cur, nbp, 1, 2); in xfs_btree_new_root()
3423 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); in xfs_btree_new_root()
3424 cur->bc_levels[cur->bc_nlevels].ptr = nptr; in xfs_btree_new_root()
3425 cur->bc_nlevels++; in xfs_btree_new_root()
3426 ASSERT(cur->bc_nlevels <= cur->bc_maxlevels); in xfs_btree_new_root()
3438 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_make_block_unfull() argument
3450 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_make_block_unfull()
3451 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_make_block_unfull()
3453 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) { in xfs_btree_make_block_unfull()
3455 cur->bc_ops->broot_realloc(cur, numrecs + 1); in xfs_btree_make_block_unfull()
3461 error = xfs_btree_new_iroot(cur, &logflags, stat); in xfs_btree_make_block_unfull()
3465 xfs_trans_log_inode(cur->bc_tp, ip, logflags); in xfs_btree_make_block_unfull()
3472 error = xfs_btree_rshift(cur, level, stat); in xfs_btree_make_block_unfull()
3477 error = xfs_btree_lshift(cur, level, stat); in xfs_btree_make_block_unfull()
3482 *oindex = *index = cur->bc_levels[level].ptr; in xfs_btree_make_block_unfull()
3492 error = xfs_btree_split(cur, level, nptr, key, ncur, stat); in xfs_btree_make_block_unfull()
3497 *index = cur->bc_levels[level].ptr; in xfs_btree_make_block_unfull()
3507 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_insrec() argument
3535 if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE && in xfs_btree_insrec()
3536 level >= cur->bc_nlevels) { in xfs_btree_insrec()
3537 error = xfs_btree_new_root(cur, stat); in xfs_btree_insrec()
3538 xfs_btree_set_ptr_null(cur, ptrp); in xfs_btree_insrec()
3544 ptr = cur->bc_levels[level].ptr; in xfs_btree_insrec()
3552 XFS_BTREE_STATS_INC(cur, insrec); in xfs_btree_insrec()
3555 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_insrec()
3560 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_insrec()
3567 ASSERT(cur->bc_ops->recs_inorder(cur, rec, in xfs_btree_insrec()
3568 xfs_btree_rec_addr(cur, ptr, block))); in xfs_btree_insrec()
3570 ASSERT(cur->bc_ops->keys_inorder(cur, key, in xfs_btree_insrec()
3571 xfs_btree_key_addr(cur, ptr, block))); in xfs_btree_insrec()
3580 xfs_btree_set_ptr_null(cur, &nptr); in xfs_btree_insrec()
3581 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_insrec()
3582 error = xfs_btree_make_block_unfull(cur, level, numrecs, in xfs_btree_insrec()
3592 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_insrec()
3596 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_insrec()
3605 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1); in xfs_btree_insrec()
3612 kp = xfs_btree_key_addr(cur, ptr, block); in xfs_btree_insrec()
3613 pp = xfs_btree_ptr_addr(cur, ptr, block); in xfs_btree_insrec()
3616 error = xfs_btree_debug_check_ptr(cur, pp, i, level); in xfs_btree_insrec()
3621 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3622 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3624 error = xfs_btree_debug_check_ptr(cur, ptrp, 0, level); in xfs_btree_insrec()
3629 xfs_btree_copy_keys(cur, kp, key, 1); in xfs_btree_insrec()
3630 xfs_btree_copy_ptrs(cur, pp, ptrp, 1); in xfs_btree_insrec()
3633 xfs_btree_log_ptrs(cur, bp, ptr, numrecs); in xfs_btree_insrec()
3634 xfs_btree_log_keys(cur, bp, ptr, numrecs); in xfs_btree_insrec()
3637 ASSERT(cur->bc_ops->keys_inorder(cur, kp, in xfs_btree_insrec()
3638 xfs_btree_key_addr(cur, ptr + 1, block))); in xfs_btree_insrec()
3645 rp = xfs_btree_rec_addr(cur, ptr, block); in xfs_btree_insrec()
3647 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3650 xfs_btree_copy_recs(cur, rp, rec, 1); in xfs_btree_insrec()
3652 xfs_btree_log_recs(cur, bp, ptr, numrecs); in xfs_btree_insrec()
3655 ASSERT(cur->bc_ops->recs_inorder(cur, rp, in xfs_btree_insrec()
3656 xfs_btree_rec_addr(cur, ptr + 1, block))); in xfs_btree_insrec()
3662 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); in xfs_btree_insrec()
3688 if (!xfs_btree_ptr_is_null(cur, &nptr) && in xfs_btree_insrec()
3690 xfs_btree_get_keys(cur, block, lkey); in xfs_btree_insrec()
3691 } else if (xfs_btree_needs_key_update(cur, optr)) { in xfs_btree_insrec()
3692 error = xfs_btree_update_keys(cur, level); in xfs_btree_insrec()
3702 if (!xfs_btree_ptr_is_null(cur, &nptr)) { in xfs_btree_insrec()
3703 xfs_btree_copy_keys(cur, key, lkey, 1); in xfs_btree_insrec()
3725 struct xfs_btree_cur *cur, in xfs_btree_insert() argument
3740 pcur = cur; in xfs_btree_insert()
3743 xfs_btree_set_ptr_null(cur, &nptr); in xfs_btree_insert()
3746 cur->bc_ops->init_rec_from_cur(cur, &rec); in xfs_btree_insert()
3747 cur->bc_ops->init_key_from_rec(key, &rec); in xfs_btree_insert()
3762 if (pcur != cur) in xfs_btree_insert()
3767 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_insert()
3768 xfs_btree_mark_sick(cur); in xfs_btree_insert()
3779 if (pcur != cur && in xfs_btree_insert()
3780 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) { in xfs_btree_insert()
3782 if (cur->bc_ops->update_cursor && in xfs_btree_insert()
3783 !(cur->bc_flags & XFS_BTREE_STAGING)) in xfs_btree_insert()
3784 cur->bc_ops->update_cursor(pcur, cur); in xfs_btree_insert()
3785 cur->bc_nlevels = pcur->bc_nlevels; in xfs_btree_insert()
3793 } while (!xfs_btree_ptr_is_null(cur, &nptr)); in xfs_btree_insert()
3804 struct xfs_btree_cur *cur, in xfs_btree_demote_leaf_child() argument
3820 cur->bc_ops->broot_realloc(cur, 0); in xfs_btree_demote_leaf_child()
3821 cur->bc_nlevels--; in xfs_btree_demote_leaf_child()
3827 broot = cur->bc_ops->broot_realloc(cur, numrecs); in xfs_btree_demote_leaf_child()
3828 xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops, 0, numrecs, in xfs_btree_demote_leaf_child()
3829 cur->bc_ino.ip->i_ino); in xfs_btree_demote_leaf_child()
3831 rp = xfs_btree_rec_addr(cur, 1, broot); in xfs_btree_demote_leaf_child()
3832 crp = xfs_btree_rec_addr(cur, 1, cblock); in xfs_btree_demote_leaf_child()
3833 xfs_btree_copy_recs(cur, rp, crp, numrecs); in xfs_btree_demote_leaf_child()
3835 cur->bc_levels[0].bp = NULL; in xfs_btree_demote_leaf_child()
3847 struct xfs_btree_cur *cur, in xfs_btree_demote_node_child() argument
3865 block = cur->bc_ops->broot_realloc(cur, numrecs); in xfs_btree_demote_node_child()
3871 kp = xfs_btree_key_addr(cur, 1, block); in xfs_btree_demote_node_child()
3872 ckp = xfs_btree_key_addr(cur, 1, cblock); in xfs_btree_demote_node_child()
3873 xfs_btree_copy_keys(cur, kp, ckp, numrecs); in xfs_btree_demote_node_child()
3876 pp = xfs_btree_ptr_addr(cur, 1, block); in xfs_btree_demote_node_child()
3877 cpp = xfs_btree_ptr_addr(cur, 1, cblock); in xfs_btree_demote_node_child()
3879 error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1); in xfs_btree_demote_node_child()
3883 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs); in xfs_btree_demote_node_child()
3886 cur->bc_levels[level - 1].bp = NULL; in xfs_btree_demote_node_child()
3888 cur->bc_nlevels--; in xfs_btree_demote_node_child()
3902 struct xfs_btree_cur *cur) in xfs_btree_kill_iroot() argument
3904 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_kill_iroot()
3915 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_kill_iroot()
3916 ASSERT((cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS) || in xfs_btree_kill_iroot()
3917 cur->bc_nlevels > 1); in xfs_btree_kill_iroot()
3923 level = cur->bc_nlevels - 1; in xfs_btree_kill_iroot()
3924 if (level == 1 && !(cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS)) in xfs_btree_kill_iroot()
3934 block = xfs_btree_get_iroot(cur); in xfs_btree_kill_iroot()
3938 cblock = xfs_btree_get_block(cur, level - 1, &cbp); in xfs_btree_kill_iroot()
3946 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level)) in xfs_btree_kill_iroot()
3949 XFS_BTREE_STATS_INC(cur, killroot); in xfs_btree_kill_iroot()
3952 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); in xfs_btree_kill_iroot()
3953 ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); in xfs_btree_kill_iroot()
3954 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); in xfs_btree_kill_iroot()
3955 ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); in xfs_btree_kill_iroot()
3959 error = xfs_btree_demote_node_child(cur, cblock, level, in xfs_btree_kill_iroot()
3964 xfs_btree_demote_leaf_child(cur, cblock, numrecs); in xfs_btree_kill_iroot()
3966 error = xfs_btree_free_block(cur, cbp); in xfs_btree_kill_iroot()
3970 xfs_trans_log_inode(cur->bc_tp, ip, in xfs_btree_kill_iroot()
3971 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_kill_iroot()
3981 struct xfs_btree_cur *cur, in xfs_btree_kill_root() argument
3988 XFS_BTREE_STATS_INC(cur, killroot); in xfs_btree_kill_root()
3994 xfs_btree_set_root(cur, newroot, -1); in xfs_btree_kill_root()
3996 error = xfs_btree_free_block(cur, bp); in xfs_btree_kill_root()
4000 cur->bc_levels[level].bp = NULL; in xfs_btree_kill_root()
4001 cur->bc_levels[level].ra = 0; in xfs_btree_kill_root()
4002 cur->bc_nlevels--; in xfs_btree_kill_root()
4009 struct xfs_btree_cur *cur, in xfs_btree_dec_cursor() argument
4017 error = xfs_btree_decrement(cur, level, &i); in xfs_btree_dec_cursor()
4034 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_delrec() argument
4060 ptr = cur->bc_levels[level].ptr; in xfs_btree_delrec()
4067 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_delrec()
4071 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_delrec()
4082 XFS_BTREE_STATS_INC(cur, delrec); in xfs_btree_delrec()
4083 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr); in xfs_btree_delrec()
4091 lkp = xfs_btree_key_addr(cur, ptr + 1, block); in xfs_btree_delrec()
4092 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block); in xfs_btree_delrec()
4095 error = xfs_btree_debug_check_ptr(cur, lpp, i, level); in xfs_btree_delrec()
4101 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr); in xfs_btree_delrec()
4102 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr); in xfs_btree_delrec()
4103 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
4104 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
4109 xfs_btree_shift_recs(cur, in xfs_btree_delrec()
4110 xfs_btree_rec_addr(cur, ptr + 1, block), in xfs_btree_delrec()
4112 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
4120 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); in xfs_btree_delrec()
4127 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_delrec()
4128 cur->bc_ops->broot_realloc(cur, numrecs); in xfs_btree_delrec()
4130 error = xfs_btree_kill_iroot(cur); in xfs_btree_delrec()
4134 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4145 if (level == cur->bc_nlevels - 1) { in xfs_btree_delrec()
4152 pp = xfs_btree_ptr_addr(cur, 1, block); in xfs_btree_delrec()
4153 error = xfs_btree_kill_root(cur, bp, level, pp); in xfs_btree_delrec()
4157 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4169 if (xfs_btree_needs_key_update(cur, ptr)) { in xfs_btree_delrec()
4170 error = xfs_btree_update_keys(cur, level); in xfs_btree_delrec()
4179 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) { in xfs_btree_delrec()
4180 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4191 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); in xfs_btree_delrec()
4192 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB); in xfs_btree_delrec()
4194 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) { in xfs_btree_delrec()
4200 if (xfs_btree_ptr_is_null(cur, &rptr) && in xfs_btree_delrec()
4201 xfs_btree_ptr_is_null(cur, &lptr) && in xfs_btree_delrec()
4202 level == cur->bc_nlevels - 2) { in xfs_btree_delrec()
4203 error = xfs_btree_kill_iroot(cur); in xfs_btree_delrec()
4205 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4212 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) || in xfs_btree_delrec()
4213 !xfs_btree_ptr_is_null(cur, &lptr)); in xfs_btree_delrec()
4219 error = xfs_btree_dup_cursor(cur, &tcur); in xfs_btree_delrec()
4227 if (!xfs_btree_ptr_is_null(cur, &rptr)) { in xfs_btree_delrec()
4233 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4234 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4242 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4243 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4249 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4250 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4271 cur->bc_ops->get_minrecs(tcur, level)) { in xfs_btree_delrec()
4277 cur->bc_ops->get_minrecs(tcur, level)); in xfs_btree_delrec()
4282 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4295 if (!xfs_btree_ptr_is_null(cur, &lptr)) { in xfs_btree_delrec()
4297 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4298 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4306 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4307 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4318 if (!xfs_btree_ptr_is_null(cur, &lptr)) { in xfs_btree_delrec()
4324 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4325 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4334 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4335 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4343 error = xfs_btree_check_block(cur, left, level, lbp); in xfs_btree_delrec()
4356 cur->bc_ops->get_minrecs(tcur, level)) { in xfs_btree_delrec()
4362 cur->bc_ops->get_minrecs(tcur, level)); in xfs_btree_delrec()
4366 cur->bc_levels[0].ptr++; in xfs_btree_delrec()
4385 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr)); in xfs_btree_delrec()
4387 if (!xfs_btree_ptr_is_null(cur, &lptr) && in xfs_btree_delrec()
4389 cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_delrec()
4397 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); in xfs_btree_delrec()
4404 } else if (!xfs_btree_ptr_is_null(cur, &rptr) && in xfs_btree_delrec()
4406 cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_delrec()
4414 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); in xfs_btree_delrec()
4423 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4436 XFS_BTREE_STATS_ADD(cur, moves, rrecs); in xfs_btree_delrec()
4444 lkp = xfs_btree_key_addr(cur, lrecs + 1, left); in xfs_btree_delrec()
4445 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left); in xfs_btree_delrec()
4446 rkp = xfs_btree_key_addr(cur, 1, right); in xfs_btree_delrec()
4447 rpp = xfs_btree_ptr_addr(cur, 1, right); in xfs_btree_delrec()
4450 error = xfs_btree_debug_check_ptr(cur, rpp, i, level); in xfs_btree_delrec()
4455 xfs_btree_copy_keys(cur, lkp, rkp, rrecs); in xfs_btree_delrec()
4456 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs); in xfs_btree_delrec()
4458 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs); in xfs_btree_delrec()
4459 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs); in xfs_btree_delrec()
4465 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left); in xfs_btree_delrec()
4466 rrp = xfs_btree_rec_addr(cur, 1, right); in xfs_btree_delrec()
4468 xfs_btree_copy_recs(cur, lrp, rrp, rrecs); in xfs_btree_delrec()
4469 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs); in xfs_btree_delrec()
4472 XFS_BTREE_STATS_INC(cur, join); in xfs_btree_delrec()
4479 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB); in xfs_btree_delrec()
4480 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); in xfs_btree_delrec()
4481 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); in xfs_btree_delrec()
4484 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); in xfs_btree_delrec()
4485 if (!xfs_btree_ptr_is_null(cur, &cptr)) { in xfs_btree_delrec()
4486 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp); in xfs_btree_delrec()
4489 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB); in xfs_btree_delrec()
4490 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); in xfs_btree_delrec()
4494 error = xfs_btree_free_block(cur, rbp); in xfs_btree_delrec()
4503 cur->bc_levels[level].bp = lbp; in xfs_btree_delrec()
4504 cur->bc_levels[level].ptr += lrecs; in xfs_btree_delrec()
4505 cur->bc_levels[level].ra = 0; in xfs_btree_delrec()
4511 else if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE || in xfs_btree_delrec()
4512 level + 1 < cur->bc_nlevels) { in xfs_btree_delrec()
4513 error = xfs_btree_increment(cur, level + 1, &i); in xfs_btree_delrec()
4525 cur->bc_levels[level].ptr--; in xfs_btree_delrec()
4554 struct xfs_btree_cur *cur, in xfs_btree_delete() argument
4569 error = xfs_btree_delrec(cur, level, &i); in xfs_btree_delete()
4580 if (joined && (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) { in xfs_btree_delete()
4581 error = xfs_btree_updkeys_force(cur, 0); in xfs_btree_delete()
4587 for (level = 1; level < cur->bc_nlevels; level++) { in xfs_btree_delete()
4588 if (cur->bc_levels[level].ptr == 0) { in xfs_btree_delete()
4589 error = xfs_btree_decrement(cur, level, &i); in xfs_btree_delete()
4608 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_get_rec() argument
4619 ptr = cur->bc_levels[0].ptr; in xfs_btree_get_rec()
4620 block = xfs_btree_get_block(cur, 0, &bp); in xfs_btree_get_rec()
4623 error = xfs_btree_check_block(cur, block, 0, bp); in xfs_btree_get_rec()
4639 *recp = xfs_btree_rec_addr(cur, ptr, block); in xfs_btree_get_rec()
4647 struct xfs_btree_cur *cur, in xfs_btree_visit_block() argument
4658 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); in xfs_btree_visit_block()
4659 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_visit_block()
4662 error = fn(cur, level, data); in xfs_btree_visit_block()
4667 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); in xfs_btree_visit_block()
4668 if (xfs_btree_ptr_is_null(cur, &rptr)) in xfs_btree_visit_block()
4677 xfs_btree_buf_to_ptr(cur, bp, &bufptr); in xfs_btree_visit_block()
4678 if (xfs_btree_ptrs_equal(cur, &rptr, &bufptr)) { in xfs_btree_visit_block()
4679 xfs_btree_mark_sick(cur); in xfs_btree_visit_block()
4683 return xfs_btree_lookup_get_block(cur, level, &rptr, &block); in xfs_btree_visit_block()
4690 struct xfs_btree_cur *cur, in xfs_btree_visit_blocks() argument
4700 xfs_btree_init_ptr_from_cur(cur, &lptr); in xfs_btree_visit_blocks()
4703 for (level = cur->bc_nlevels - 1; level >= 0; level--) { in xfs_btree_visit_blocks()
4705 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block); in xfs_btree_visit_blocks()
4713 ptr = xfs_btree_ptr_addr(cur, 1, block); in xfs_btree_visit_blocks()
4714 xfs_btree_readahead_ptr(cur, ptr, 1); in xfs_btree_visit_blocks()
4717 xfs_btree_copy_ptrs(cur, &lptr, ptr, 1); in xfs_btree_visit_blocks()
4727 error = xfs_btree_visit_block(cur, level, fn, data); in xfs_btree_visit_blocks()
4768 struct xfs_btree_cur *cur, in xfs_btree_block_change_owner() argument
4777 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_block_change_owner()
4778 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_block_change_owner()
4796 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_block_change_owner()
4797 ASSERT(level == cur->bc_nlevels - 1); in xfs_btree_block_change_owner()
4801 if (cur->bc_tp) { in xfs_btree_block_change_owner()
4802 if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) { in xfs_btree_block_change_owner()
4803 xfs_btree_log_block(cur, bp, XFS_BB_OWNER); in xfs_btree_block_change_owner()
4815 struct xfs_btree_cur *cur, in xfs_btree_change_owner() argument
4824 return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner, in xfs_btree_change_owner()
5048 struct xfs_btree_cur *cur, in xfs_btree_simple_query_range() argument
5060 ASSERT(cur->bc_ops->init_high_key_from_rec); in xfs_btree_simple_query_range()
5061 ASSERT(cur->bc_ops->diff_two_keys); in xfs_btree_simple_query_range()
5068 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat); in xfs_btree_simple_query_range()
5074 error = xfs_btree_increment(cur, 0, &stat); in xfs_btree_simple_query_range()
5081 error = xfs_btree_get_rec(cur, &recp, &stat); in xfs_btree_simple_query_range()
5087 cur->bc_ops->init_high_key_from_rec(&rec_key, recp); in xfs_btree_simple_query_range()
5089 if (xfs_btree_keycmp_gt(cur, low_key, &rec_key)) in xfs_btree_simple_query_range()
5094 cur->bc_ops->init_key_from_rec(&rec_key, recp); in xfs_btree_simple_query_range()
5095 if (xfs_btree_keycmp_gt(cur, &rec_key, high_key)) in xfs_btree_simple_query_range()
5099 error = fn(cur, recp, priv); in xfs_btree_simple_query_range()
5105 error = xfs_btree_increment(cur, 0, &stat); in xfs_btree_simple_query_range()
5135 struct xfs_btree_cur *cur, in xfs_btree_overlapped_query_range() argument
5155 level = cur->bc_nlevels - 1; in xfs_btree_overlapped_query_range()
5156 xfs_btree_init_ptr_from_cur(cur, &ptr); in xfs_btree_overlapped_query_range()
5157 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block); in xfs_btree_overlapped_query_range()
5160 xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
5161 trace_xfs_btree_overlapped_query_range(cur, level, bp); in xfs_btree_overlapped_query_range()
5163 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_overlapped_query_range()
5167 cur->bc_levels[level].ptr = 1; in xfs_btree_overlapped_query_range()
5169 while (level < cur->bc_nlevels) { in xfs_btree_overlapped_query_range()
5170 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
5173 if (cur->bc_levels[level].ptr > in xfs_btree_overlapped_query_range()
5176 if (level < cur->bc_nlevels - 1) in xfs_btree_overlapped_query_range()
5177 cur->bc_levels[level + 1].ptr++; in xfs_btree_overlapped_query_range()
5184 recp = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr, in xfs_btree_overlapped_query_range()
5187 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp); in xfs_btree_overlapped_query_range()
5188 cur->bc_ops->init_key_from_rec(&rec_key, recp); in xfs_btree_overlapped_query_range()
5199 if (xfs_btree_keycmp_lt(cur, high_key, &rec_key)) in xfs_btree_overlapped_query_range()
5201 if (xfs_btree_keycmp_ge(cur, &rec_hkey, low_key)) { in xfs_btree_overlapped_query_range()
5202 error = fn(cur, recp, priv); in xfs_btree_overlapped_query_range()
5206 cur->bc_levels[level].ptr++; in xfs_btree_overlapped_query_range()
5211 lkp = xfs_btree_key_addr(cur, cur->bc_levels[level].ptr, block); in xfs_btree_overlapped_query_range()
5212 hkp = xfs_btree_high_key_addr(cur, cur->bc_levels[level].ptr, in xfs_btree_overlapped_query_range()
5214 pp = xfs_btree_ptr_addr(cur, cur->bc_levels[level].ptr, block); in xfs_btree_overlapped_query_range()
5225 if (xfs_btree_keycmp_lt(cur, high_key, lkp)) in xfs_btree_overlapped_query_range()
5227 if (xfs_btree_keycmp_ge(cur, hkp, low_key)) { in xfs_btree_overlapped_query_range()
5229 error = xfs_btree_lookup_get_block(cur, level, pp, in xfs_btree_overlapped_query_range()
5233 xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
5234 trace_xfs_btree_overlapped_query_range(cur, level, bp); in xfs_btree_overlapped_query_range()
5236 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_overlapped_query_range()
5240 cur->bc_levels[level].ptr = 1; in xfs_btree_overlapped_query_range()
5243 cur->bc_levels[level].ptr++; in xfs_btree_overlapped_query_range()
5254 if (cur->bc_levels[0].bp == NULL) { in xfs_btree_overlapped_query_range()
5255 for (i = 0; i < cur->bc_nlevels; i++) { in xfs_btree_overlapped_query_range()
5256 if (cur->bc_levels[i].bp) { in xfs_btree_overlapped_query_range()
5257 xfs_trans_brelse(cur->bc_tp, in xfs_btree_overlapped_query_range()
5258 cur->bc_levels[i].bp); in xfs_btree_overlapped_query_range()
5259 cur->bc_levels[i].bp = NULL; in xfs_btree_overlapped_query_range()
5260 cur->bc_levels[i].ptr = 0; in xfs_btree_overlapped_query_range()
5261 cur->bc_levels[i].ra = 0; in xfs_btree_overlapped_query_range()
5271 struct xfs_btree_cur *cur, in xfs_btree_key_from_irec() argument
5277 cur->bc_rec = *irec; in xfs_btree_key_from_irec()
5278 cur->bc_ops->init_rec_from_cur(cur, &rec); in xfs_btree_key_from_irec()
5279 cur->bc_ops->init_key_from_rec(key, &rec); in xfs_btree_key_from_irec()
5290 struct xfs_btree_cur *cur, in xfs_btree_query_range() argument
5300 xfs_btree_key_from_irec(cur, &high_key, high_rec); in xfs_btree_query_range()
5301 xfs_btree_key_from_irec(cur, &low_key, low_rec); in xfs_btree_query_range()
5304 if (!xfs_btree_keycmp_le(cur, &low_key, &high_key)) in xfs_btree_query_range()
5307 if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) in xfs_btree_query_range()
5308 return xfs_btree_simple_query_range(cur, &low_key, in xfs_btree_query_range()
5310 return xfs_btree_overlapped_query_range(cur, &low_key, &high_key, in xfs_btree_query_range()
5317 struct xfs_btree_cur *cur, in xfs_btree_query_all() argument
5324 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec)); in xfs_btree_query_all()
5328 return xfs_btree_simple_query_range(cur, &low_key, &high_key, fn, priv); in xfs_btree_query_all()
5333 struct xfs_btree_cur *cur, in xfs_btree_count_blocks_helper() argument
5346 struct xfs_btree_cur *cur, in xfs_btree_count_blocks() argument
5350 return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper, in xfs_btree_count_blocks()
5357 struct xfs_btree_cur *cur, in xfs_btree_diff_two_ptrs() argument
5361 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_diff_two_ptrs()
5382 struct xfs_btree_cur *cur, in xfs_btree_has_records_helper() argument
5391 cur->bc_ops->init_key_from_rec(&rec_key, rec); in xfs_btree_has_records_helper()
5401 if (xfs_btree_masked_keycmp_lt(cur, &info->start_key, &rec_key, in xfs_btree_has_records_helper()
5412 key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key, in xfs_btree_has_records_helper()
5415 !(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) in xfs_btree_has_records_helper()
5425 cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec); in xfs_btree_has_records_helper()
5426 if (xfs_btree_masked_keycmp_gt(cur, &rec_high_key, &info->high_key, in xfs_btree_has_records_helper()
5449 struct xfs_btree_cur *cur, in xfs_btree_has_records() argument
5462 if (!cur->bc_ops->keys_contiguous) { in xfs_btree_has_records()
5467 xfs_btree_key_from_irec(cur, &info.start_key, low); in xfs_btree_has_records()
5468 xfs_btree_key_from_irec(cur, &info.end_key, high); in xfs_btree_has_records()
5470 error = xfs_btree_query_range(cur, low, high, in xfs_btree_has_records()
5485 if (xfs_btree_masked_keycmp_ge(cur, &info.high_key, &info.end_key, in xfs_btree_has_records()
5497 struct xfs_btree_cur *cur) in xfs_btree_has_more_records() argument
5502 block = xfs_btree_get_block(cur, 0, &bp); in xfs_btree_has_more_records()
5505 if (cur->bc_levels[0].ptr < xfs_btree_get_numrecs(block)) in xfs_btree_has_more_records()
5509 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_has_more_records()
5565 struct xfs_btree_cur *cur) in xfs_btree_goto_left_edge() argument
5570 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec)); in xfs_btree_goto_left_edge()
5571 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat); in xfs_btree_goto_left_edge()
5577 error = xfs_btree_decrement(cur, 0, &stat); in xfs_btree_goto_left_edge()
5582 xfs_btree_mark_sick(cur); in xfs_btree_goto_left_edge()
5592 struct xfs_btree_cur *cur, in xfs_btree_alloc_metafile_block() argument
5598 .mp = cur->bc_mp, in xfs_btree_alloc_metafile_block()
5599 .tp = cur->bc_tp, in xfs_btree_alloc_metafile_block()
5605 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_alloc_metafile_block()
5610 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, cur->bc_ino.whichfork); in xfs_btree_alloc_metafile_block()
5612 XFS_INO_TO_FSB(cur->bc_mp, ip->i_ino)); in xfs_btree_alloc_metafile_block()
5631 struct xfs_btree_cur *cur, in xfs_btree_free_metafile_block() argument
5635 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_free_metafile_block()
5636 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_free_metafile_block()
5637 struct xfs_trans *tp = cur->bc_tp; in xfs_btree_free_metafile_block()
5643 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork); in xfs_btree_free_metafile_block()