1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2021-2024 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_inode.h" 17 #include "xfs_trans.h" 18 #include "xfs_alloc.h" 19 #include "xfs_btree.h" 20 #include "xfs_btree_staging.h" 21 #include "xfs_rtrefcount_btree.h" 22 #include "xfs_refcount.h" 23 #include "xfs_trace.h" 24 #include "xfs_cksum.h" 25 #include "xfs_error.h" 26 #include "xfs_extent_busy.h" 27 #include "xfs_rtgroup.h" 28 #include "xfs_rtbitmap.h" 29 #include "xfs_metafile.h" 30 #include "xfs_health.h" 31 32 static struct kmem_cache *xfs_rtrefcountbt_cur_cache; 33 34 /* 35 * Realtime Reference Count btree. 36 * 37 * This is a btree used to track the owner(s) of a given extent in the realtime 38 * device. See the comments in xfs_refcount_btree.c for more information. 39 * 40 * This tree is basically the same as the regular refcount btree except that 41 * it's rooted in an inode. 42 */ 43 44 static struct xfs_btree_cur * 45 xfs_rtrefcountbt_dup_cursor( 46 struct xfs_btree_cur *cur) 47 { 48 return xfs_rtrefcountbt_init_cursor(cur->bc_tp, to_rtg(cur->bc_group)); 49 } 50 51 STATIC int 52 xfs_rtrefcountbt_get_minrecs( 53 struct xfs_btree_cur *cur, 54 int level) 55 { 56 if (level == cur->bc_nlevels - 1) { 57 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); 58 59 return xfs_rtrefcountbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes, 60 level == 0) / 2; 61 } 62 63 return cur->bc_mp->m_rtrefc_mnr[level != 0]; 64 } 65 66 STATIC int 67 xfs_rtrefcountbt_get_maxrecs( 68 struct xfs_btree_cur *cur, 69 int level) 70 { 71 if (level == cur->bc_nlevels - 1) { 72 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); 73 74 return xfs_rtrefcountbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes, 75 level == 0); 76 } 77 78 return cur->bc_mp->m_rtrefc_mxr[level != 0]; 79 } 80 81 /* 82 * Calculate number of records in a realtime refcount btree inode root. 83 */ 84 unsigned int 85 xfs_rtrefcountbt_droot_maxrecs( 86 unsigned int blocklen, 87 bool leaf) 88 { 89 blocklen -= sizeof(struct xfs_rtrefcount_root); 90 91 if (leaf) 92 return blocklen / sizeof(struct xfs_refcount_rec); 93 return blocklen / (2 * sizeof(struct xfs_refcount_key) + 94 sizeof(xfs_rtrefcount_ptr_t)); 95 } 96 97 /* 98 * Get the maximum records we could store in the on-disk format. 99 * 100 * For non-root nodes this is equivalent to xfs_rtrefcountbt_get_maxrecs, but 101 * for the root node this checks the available space in the dinode fork so that 102 * we can resize the in-memory buffer to match it. After a resize to the 103 * maximum size this function returns the same value as 104 * xfs_rtrefcountbt_get_maxrecs for the root node, too. 105 */ 106 STATIC int 107 xfs_rtrefcountbt_get_dmaxrecs( 108 struct xfs_btree_cur *cur, 109 int level) 110 { 111 if (level != cur->bc_nlevels - 1) 112 return cur->bc_mp->m_rtrefc_mxr[level != 0]; 113 return xfs_rtrefcountbt_droot_maxrecs(cur->bc_ino.forksize, level == 0); 114 } 115 116 STATIC void 117 xfs_rtrefcountbt_init_key_from_rec( 118 union xfs_btree_key *key, 119 const union xfs_btree_rec *rec) 120 { 121 key->refc.rc_startblock = rec->refc.rc_startblock; 122 } 123 124 STATIC void 125 xfs_rtrefcountbt_init_high_key_from_rec( 126 union xfs_btree_key *key, 127 const union xfs_btree_rec *rec) 128 { 129 __u32 x; 130 131 x = be32_to_cpu(rec->refc.rc_startblock); 132 x += be32_to_cpu(rec->refc.rc_blockcount) - 1; 133 key->refc.rc_startblock = cpu_to_be32(x); 134 } 135 136 STATIC void 137 xfs_rtrefcountbt_init_rec_from_cur( 138 struct xfs_btree_cur *cur, 139 union xfs_btree_rec *rec) 140 { 141 const struct xfs_refcount_irec *irec = &cur->bc_rec.rc; 142 uint32_t start; 143 144 start = xfs_refcount_encode_startblock(irec->rc_startblock, 145 irec->rc_domain); 146 rec->refc.rc_startblock = cpu_to_be32(start); 147 rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount); 148 rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount); 149 } 150 151 STATIC void 152 xfs_rtrefcountbt_init_ptr_from_cur( 153 struct xfs_btree_cur *cur, 154 union xfs_btree_ptr *ptr) 155 { 156 ptr->l = 0; 157 } 158 159 STATIC int64_t 160 xfs_rtrefcountbt_key_diff( 161 struct xfs_btree_cur *cur, 162 const union xfs_btree_key *key) 163 { 164 const struct xfs_refcount_key *kp = &key->refc; 165 const struct xfs_refcount_irec *irec = &cur->bc_rec.rc; 166 uint32_t start; 167 168 start = xfs_refcount_encode_startblock(irec->rc_startblock, 169 irec->rc_domain); 170 return (int64_t)be32_to_cpu(kp->rc_startblock) - start; 171 } 172 173 STATIC int64_t 174 xfs_rtrefcountbt_diff_two_keys( 175 struct xfs_btree_cur *cur, 176 const union xfs_btree_key *k1, 177 const union xfs_btree_key *k2, 178 const union xfs_btree_key *mask) 179 { 180 ASSERT(!mask || mask->refc.rc_startblock); 181 182 return (int64_t)be32_to_cpu(k1->refc.rc_startblock) - 183 be32_to_cpu(k2->refc.rc_startblock); 184 } 185 186 static xfs_failaddr_t 187 xfs_rtrefcountbt_verify( 188 struct xfs_buf *bp) 189 { 190 struct xfs_mount *mp = bp->b_target->bt_mount; 191 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 192 xfs_failaddr_t fa; 193 int level; 194 195 if (!xfs_verify_magic(bp, block->bb_magic)) 196 return __this_address; 197 198 if (!xfs_has_reflink(mp)) 199 return __this_address; 200 fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN); 201 if (fa) 202 return fa; 203 level = be16_to_cpu(block->bb_level); 204 if (level > mp->m_rtrefc_maxlevels) 205 return __this_address; 206 207 return xfs_btree_fsblock_verify(bp, mp->m_rtrefc_mxr[level != 0]); 208 } 209 210 static void 211 xfs_rtrefcountbt_read_verify( 212 struct xfs_buf *bp) 213 { 214 xfs_failaddr_t fa; 215 216 if (!xfs_btree_fsblock_verify_crc(bp)) 217 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 218 else { 219 fa = xfs_rtrefcountbt_verify(bp); 220 if (fa) 221 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 222 } 223 224 if (bp->b_error) 225 trace_xfs_btree_corrupt(bp, _RET_IP_); 226 } 227 228 static void 229 xfs_rtrefcountbt_write_verify( 230 struct xfs_buf *bp) 231 { 232 xfs_failaddr_t fa; 233 234 fa = xfs_rtrefcountbt_verify(bp); 235 if (fa) { 236 trace_xfs_btree_corrupt(bp, _RET_IP_); 237 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 238 return; 239 } 240 xfs_btree_fsblock_calc_crc(bp); 241 242 } 243 244 const struct xfs_buf_ops xfs_rtrefcountbt_buf_ops = { 245 .name = "xfs_rtrefcountbt", 246 .magic = { 0, cpu_to_be32(XFS_RTREFC_CRC_MAGIC) }, 247 .verify_read = xfs_rtrefcountbt_read_verify, 248 .verify_write = xfs_rtrefcountbt_write_verify, 249 .verify_struct = xfs_rtrefcountbt_verify, 250 }; 251 252 STATIC int 253 xfs_rtrefcountbt_keys_inorder( 254 struct xfs_btree_cur *cur, 255 const union xfs_btree_key *k1, 256 const union xfs_btree_key *k2) 257 { 258 return be32_to_cpu(k1->refc.rc_startblock) < 259 be32_to_cpu(k2->refc.rc_startblock); 260 } 261 262 STATIC int 263 xfs_rtrefcountbt_recs_inorder( 264 struct xfs_btree_cur *cur, 265 const union xfs_btree_rec *r1, 266 const union xfs_btree_rec *r2) 267 { 268 return be32_to_cpu(r1->refc.rc_startblock) + 269 be32_to_cpu(r1->refc.rc_blockcount) <= 270 be32_to_cpu(r2->refc.rc_startblock); 271 } 272 273 STATIC enum xbtree_key_contig 274 xfs_rtrefcountbt_keys_contiguous( 275 struct xfs_btree_cur *cur, 276 const union xfs_btree_key *key1, 277 const union xfs_btree_key *key2, 278 const union xfs_btree_key *mask) 279 { 280 ASSERT(!mask || mask->refc.rc_startblock); 281 282 return xbtree_key_contig(be32_to_cpu(key1->refc.rc_startblock), 283 be32_to_cpu(key2->refc.rc_startblock)); 284 } 285 286 static inline void 287 xfs_rtrefcountbt_move_ptrs( 288 struct xfs_mount *mp, 289 struct xfs_btree_block *broot, 290 short old_size, 291 size_t new_size, 292 unsigned int numrecs) 293 { 294 void *dptr; 295 void *sptr; 296 297 sptr = xfs_rtrefcount_broot_ptr_addr(mp, broot, 1, old_size); 298 dptr = xfs_rtrefcount_broot_ptr_addr(mp, broot, 1, new_size); 299 memmove(dptr, sptr, numrecs * sizeof(xfs_rtrefcount_ptr_t)); 300 } 301 302 static struct xfs_btree_block * 303 xfs_rtrefcountbt_broot_realloc( 304 struct xfs_btree_cur *cur, 305 unsigned int new_numrecs) 306 { 307 struct xfs_mount *mp = cur->bc_mp; 308 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); 309 struct xfs_btree_block *broot; 310 unsigned int new_size; 311 unsigned int old_size = ifp->if_broot_bytes; 312 const unsigned int level = cur->bc_nlevels - 1; 313 314 new_size = xfs_rtrefcount_broot_space_calc(mp, level, new_numrecs); 315 316 /* Handle the nop case quietly. */ 317 if (new_size == old_size) 318 return ifp->if_broot; 319 320 if (new_size > old_size) { 321 unsigned int old_numrecs; 322 323 /* 324 * If there wasn't any memory allocated before, just allocate 325 * it now and get out. 326 */ 327 if (old_size == 0) 328 return xfs_broot_realloc(ifp, new_size); 329 330 /* 331 * If there is already an existing if_broot, then we need to 332 * realloc it and possibly move the node block pointers because 333 * those are not butted up against the btree block header. 334 */ 335 old_numrecs = xfs_rtrefcountbt_maxrecs(mp, old_size, level); 336 broot = xfs_broot_realloc(ifp, new_size); 337 if (level > 0) 338 xfs_rtrefcountbt_move_ptrs(mp, broot, old_size, 339 new_size, old_numrecs); 340 goto out_broot; 341 } 342 343 /* 344 * We're reducing numrecs. If we're going all the way to zero, just 345 * free the block. 346 */ 347 ASSERT(ifp->if_broot != NULL && old_size > 0); 348 if (new_size == 0) 349 return xfs_broot_realloc(ifp, 0); 350 351 /* 352 * Shrink the btree root by possibly moving the rtrmapbt pointers, 353 * since they are not butted up against the btree block header. Then 354 * reallocate broot. 355 */ 356 if (level > 0) 357 xfs_rtrefcountbt_move_ptrs(mp, ifp->if_broot, old_size, 358 new_size, new_numrecs); 359 broot = xfs_broot_realloc(ifp, new_size); 360 361 out_broot: 362 ASSERT(xfs_rtrefcount_droot_space(broot) <= 363 xfs_inode_fork_size(cur->bc_ino.ip, cur->bc_ino.whichfork)); 364 return broot; 365 } 366 367 const struct xfs_btree_ops xfs_rtrefcountbt_ops = { 368 .name = "rtrefcount", 369 .type = XFS_BTREE_TYPE_INODE, 370 .geom_flags = XFS_BTGEO_IROOT_RECORDS, 371 372 .rec_len = sizeof(struct xfs_refcount_rec), 373 .key_len = sizeof(struct xfs_refcount_key), 374 .ptr_len = XFS_BTREE_LONG_PTR_LEN, 375 376 .lru_refs = XFS_REFC_BTREE_REF, 377 .statoff = XFS_STATS_CALC_INDEX(xs_rtrefcbt_2), 378 .sick_mask = XFS_SICK_RG_REFCNTBT, 379 380 .dup_cursor = xfs_rtrefcountbt_dup_cursor, 381 .alloc_block = xfs_btree_alloc_metafile_block, 382 .free_block = xfs_btree_free_metafile_block, 383 .get_minrecs = xfs_rtrefcountbt_get_minrecs, 384 .get_maxrecs = xfs_rtrefcountbt_get_maxrecs, 385 .get_dmaxrecs = xfs_rtrefcountbt_get_dmaxrecs, 386 .init_key_from_rec = xfs_rtrefcountbt_init_key_from_rec, 387 .init_high_key_from_rec = xfs_rtrefcountbt_init_high_key_from_rec, 388 .init_rec_from_cur = xfs_rtrefcountbt_init_rec_from_cur, 389 .init_ptr_from_cur = xfs_rtrefcountbt_init_ptr_from_cur, 390 .key_diff = xfs_rtrefcountbt_key_diff, 391 .buf_ops = &xfs_rtrefcountbt_buf_ops, 392 .diff_two_keys = xfs_rtrefcountbt_diff_two_keys, 393 .keys_inorder = xfs_rtrefcountbt_keys_inorder, 394 .recs_inorder = xfs_rtrefcountbt_recs_inorder, 395 .keys_contiguous = xfs_rtrefcountbt_keys_contiguous, 396 .broot_realloc = xfs_rtrefcountbt_broot_realloc, 397 }; 398 399 /* Allocate a new rt refcount btree cursor. */ 400 struct xfs_btree_cur * 401 xfs_rtrefcountbt_init_cursor( 402 struct xfs_trans *tp, 403 struct xfs_rtgroup *rtg) 404 { 405 struct xfs_inode *ip = rtg_refcount(rtg); 406 struct xfs_mount *mp = rtg_mount(rtg); 407 struct xfs_btree_cur *cur; 408 409 xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL); 410 411 cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrefcountbt_ops, 412 mp->m_rtrefc_maxlevels, xfs_rtrefcountbt_cur_cache); 413 414 cur->bc_ino.ip = ip; 415 cur->bc_refc.nr_ops = 0; 416 cur->bc_refc.shape_changes = 0; 417 cur->bc_group = xfs_group_hold(rtg_group(rtg)); 418 cur->bc_nlevels = be16_to_cpu(ip->i_df.if_broot->bb_level) + 1; 419 cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK); 420 cur->bc_ino.whichfork = XFS_DATA_FORK; 421 return cur; 422 } 423 424 /* 425 * Install a new rt reverse mapping btree root. Caller is responsible for 426 * invalidating and freeing the old btree blocks. 427 */ 428 void 429 xfs_rtrefcountbt_commit_staged_btree( 430 struct xfs_btree_cur *cur, 431 struct xfs_trans *tp) 432 { 433 struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake; 434 struct xfs_ifork *ifp; 435 int flags = XFS_ILOG_CORE | XFS_ILOG_DBROOT; 436 437 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 438 ASSERT(ifake->if_fork->if_format == XFS_DINODE_FMT_META_BTREE); 439 440 /* 441 * Free any resources hanging off the real fork, then shallow-copy the 442 * staging fork's contents into the real fork to transfer everything 443 * we just built. 444 */ 445 ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK); 446 xfs_idestroy_fork(ifp); 447 memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork)); 448 449 cur->bc_ino.ip->i_projid = cur->bc_group->xg_gno; 450 xfs_trans_log_inode(tp, cur->bc_ino.ip, flags); 451 xfs_btree_commit_ifakeroot(cur, tp, XFS_DATA_FORK); 452 } 453 454 /* Calculate number of records in a realtime refcount btree block. */ 455 static inline unsigned int 456 xfs_rtrefcountbt_block_maxrecs( 457 unsigned int blocklen, 458 bool leaf) 459 { 460 461 if (leaf) 462 return blocklen / sizeof(struct xfs_refcount_rec); 463 return blocklen / (sizeof(struct xfs_refcount_key) + 464 sizeof(xfs_rtrefcount_ptr_t)); 465 } 466 467 /* 468 * Calculate number of records in an refcount btree block. 469 */ 470 unsigned int 471 xfs_rtrefcountbt_maxrecs( 472 struct xfs_mount *mp, 473 unsigned int blocklen, 474 bool leaf) 475 { 476 blocklen -= XFS_RTREFCOUNT_BLOCK_LEN; 477 return xfs_rtrefcountbt_block_maxrecs(blocklen, leaf); 478 } 479 480 /* Compute the max possible height for realtime refcount btrees. */ 481 unsigned int 482 xfs_rtrefcountbt_maxlevels_ondisk(void) 483 { 484 unsigned int minrecs[2]; 485 unsigned int blocklen; 486 487 blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN; 488 489 minrecs[0] = xfs_rtrefcountbt_block_maxrecs(blocklen, true) / 2; 490 minrecs[1] = xfs_rtrefcountbt_block_maxrecs(blocklen, false) / 2; 491 492 /* We need at most one record for every block in an rt group. */ 493 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_RGBLOCKS); 494 } 495 496 int __init 497 xfs_rtrefcountbt_init_cur_cache(void) 498 { 499 xfs_rtrefcountbt_cur_cache = kmem_cache_create("xfs_rtrefcountbt_cur", 500 xfs_btree_cur_sizeof( 501 xfs_rtrefcountbt_maxlevels_ondisk()), 502 0, 0, NULL); 503 504 if (!xfs_rtrefcountbt_cur_cache) 505 return -ENOMEM; 506 return 0; 507 } 508 509 void 510 xfs_rtrefcountbt_destroy_cur_cache(void) 511 { 512 kmem_cache_destroy(xfs_rtrefcountbt_cur_cache); 513 xfs_rtrefcountbt_cur_cache = NULL; 514 } 515 516 /* Compute the maximum height of a realtime refcount btree. */ 517 void 518 xfs_rtrefcountbt_compute_maxlevels( 519 struct xfs_mount *mp) 520 { 521 unsigned int d_maxlevels, r_maxlevels; 522 523 if (!xfs_has_rtreflink(mp)) { 524 mp->m_rtrefc_maxlevels = 0; 525 return; 526 } 527 528 /* 529 * The realtime refcountbt lives on the data device, which means that 530 * its maximum height is constrained by the size of the data device and 531 * the height required to store one refcount record for each rtextent 532 * in an rt group. 533 */ 534 d_maxlevels = xfs_btree_space_to_height(mp->m_rtrefc_mnr, 535 mp->m_sb.sb_dblocks); 536 r_maxlevels = xfs_btree_compute_maxlevels(mp->m_rtrefc_mnr, 537 mp->m_sb.sb_rgextents); 538 539 /* Add one level to handle the inode root level. */ 540 mp->m_rtrefc_maxlevels = min(d_maxlevels, r_maxlevels) + 1; 541 } 542 543 /* Calculate the rtrefcount btree size for some records. */ 544 unsigned long long 545 xfs_rtrefcountbt_calc_size( 546 struct xfs_mount *mp, 547 unsigned long long len) 548 { 549 return xfs_btree_calc_size(mp->m_rtrefc_mnr, len); 550 } 551 552 /* 553 * Calculate the maximum refcount btree size. 554 */ 555 static unsigned long long 556 xfs_rtrefcountbt_max_size( 557 struct xfs_mount *mp, 558 xfs_rtblock_t rtblocks) 559 { 560 /* Bail out if we're uninitialized, which can happen in mkfs. */ 561 if (mp->m_rtrefc_mxr[0] == 0) 562 return 0; 563 564 return xfs_rtrefcountbt_calc_size(mp, rtblocks); 565 } 566 567 /* 568 * Figure out how many blocks to reserve and how many are used by this btree. 569 * We need enough space to hold one record for every rt extent in the rtgroup. 570 */ 571 xfs_filblks_t 572 xfs_rtrefcountbt_calc_reserves( 573 struct xfs_mount *mp) 574 { 575 if (!xfs_has_rtreflink(mp)) 576 return 0; 577 578 return xfs_rtrefcountbt_max_size(mp, mp->m_sb.sb_rgextents); 579 } 580 581 /* 582 * Convert on-disk form of btree root to in-memory form. 583 */ 584 STATIC void 585 xfs_rtrefcountbt_from_disk( 586 struct xfs_inode *ip, 587 struct xfs_rtrefcount_root *dblock, 588 int dblocklen, 589 struct xfs_btree_block *rblock) 590 { 591 struct xfs_mount *mp = ip->i_mount; 592 struct xfs_refcount_key *fkp; 593 __be64 *fpp; 594 struct xfs_refcount_key *tkp; 595 __be64 *tpp; 596 struct xfs_refcount_rec *frp; 597 struct xfs_refcount_rec *trp; 598 unsigned int numrecs; 599 unsigned int maxrecs; 600 unsigned int rblocklen; 601 602 rblocklen = xfs_rtrefcount_broot_space(mp, dblock); 603 604 xfs_btree_init_block(mp, rblock, &xfs_rtrefcountbt_ops, 0, 0, 605 ip->i_ino); 606 607 rblock->bb_level = dblock->bb_level; 608 rblock->bb_numrecs = dblock->bb_numrecs; 609 610 if (be16_to_cpu(rblock->bb_level) > 0) { 611 maxrecs = xfs_rtrefcountbt_droot_maxrecs(dblocklen, false); 612 fkp = xfs_rtrefcount_droot_key_addr(dblock, 1); 613 tkp = xfs_rtrefcount_key_addr(rblock, 1); 614 fpp = xfs_rtrefcount_droot_ptr_addr(dblock, 1, maxrecs); 615 tpp = xfs_rtrefcount_broot_ptr_addr(mp, rblock, 1, rblocklen); 616 numrecs = be16_to_cpu(dblock->bb_numrecs); 617 memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs); 618 memcpy(tpp, fpp, sizeof(*fpp) * numrecs); 619 } else { 620 frp = xfs_rtrefcount_droot_rec_addr(dblock, 1); 621 trp = xfs_rtrefcount_rec_addr(rblock, 1); 622 numrecs = be16_to_cpu(dblock->bb_numrecs); 623 memcpy(trp, frp, sizeof(*frp) * numrecs); 624 } 625 } 626 627 /* Load a realtime reference count btree root in from disk. */ 628 int 629 xfs_iformat_rtrefcount( 630 struct xfs_inode *ip, 631 struct xfs_dinode *dip) 632 { 633 struct xfs_mount *mp = ip->i_mount; 634 struct xfs_rtrefcount_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK); 635 struct xfs_btree_block *broot; 636 unsigned int numrecs; 637 unsigned int level; 638 int dsize; 639 640 /* 641 * growfs must create the rtrefcount inodes before adding a realtime 642 * volume to the filesystem, so we cannot use the rtrefcount predicate 643 * here. 644 */ 645 if (!xfs_has_reflink(ip->i_mount)) { 646 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); 647 return -EFSCORRUPTED; 648 } 649 650 dsize = XFS_DFORK_SIZE(dip, mp, XFS_DATA_FORK); 651 numrecs = be16_to_cpu(dfp->bb_numrecs); 652 level = be16_to_cpu(dfp->bb_level); 653 654 if (level > mp->m_rtrefc_maxlevels || 655 xfs_rtrefcount_droot_space_calc(level, numrecs) > dsize) { 656 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); 657 return -EFSCORRUPTED; 658 } 659 660 broot = xfs_broot_alloc(xfs_ifork_ptr(ip, XFS_DATA_FORK), 661 xfs_rtrefcount_broot_space_calc(mp, level, numrecs)); 662 if (broot) 663 xfs_rtrefcountbt_from_disk(ip, dfp, dsize, broot); 664 return 0; 665 } 666 667 /* 668 * Convert in-memory form of btree root to on-disk form. 669 */ 670 void 671 xfs_rtrefcountbt_to_disk( 672 struct xfs_mount *mp, 673 struct xfs_btree_block *rblock, 674 int rblocklen, 675 struct xfs_rtrefcount_root *dblock, 676 int dblocklen) 677 { 678 struct xfs_refcount_key *fkp; 679 __be64 *fpp; 680 struct xfs_refcount_key *tkp; 681 __be64 *tpp; 682 struct xfs_refcount_rec *frp; 683 struct xfs_refcount_rec *trp; 684 unsigned int maxrecs; 685 unsigned int numrecs; 686 687 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_RTREFC_CRC_MAGIC)); 688 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)); 689 ASSERT(rblock->bb_u.l.bb_blkno == cpu_to_be64(XFS_BUF_DADDR_NULL)); 690 ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)); 691 ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)); 692 693 dblock->bb_level = rblock->bb_level; 694 dblock->bb_numrecs = rblock->bb_numrecs; 695 696 if (be16_to_cpu(rblock->bb_level) > 0) { 697 maxrecs = xfs_rtrefcountbt_droot_maxrecs(dblocklen, false); 698 fkp = xfs_rtrefcount_key_addr(rblock, 1); 699 tkp = xfs_rtrefcount_droot_key_addr(dblock, 1); 700 fpp = xfs_rtrefcount_broot_ptr_addr(mp, rblock, 1, rblocklen); 701 tpp = xfs_rtrefcount_droot_ptr_addr(dblock, 1, maxrecs); 702 numrecs = be16_to_cpu(rblock->bb_numrecs); 703 memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs); 704 memcpy(tpp, fpp, sizeof(*fpp) * numrecs); 705 } else { 706 frp = xfs_rtrefcount_rec_addr(rblock, 1); 707 trp = xfs_rtrefcount_droot_rec_addr(dblock, 1); 708 numrecs = be16_to_cpu(rblock->bb_numrecs); 709 memcpy(trp, frp, sizeof(*frp) * numrecs); 710 } 711 } 712 713 /* Flush a realtime reference count btree root out to disk. */ 714 void 715 xfs_iflush_rtrefcount( 716 struct xfs_inode *ip, 717 struct xfs_dinode *dip) 718 { 719 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); 720 struct xfs_rtrefcount_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK); 721 722 ASSERT(ifp->if_broot != NULL); 723 ASSERT(ifp->if_broot_bytes > 0); 724 ASSERT(xfs_rtrefcount_droot_space(ifp->if_broot) <= 725 xfs_inode_fork_size(ip, XFS_DATA_FORK)); 726 xfs_rtrefcountbt_to_disk(ip->i_mount, ifp->if_broot, 727 ifp->if_broot_bytes, dfp, 728 XFS_DFORK_SIZE(dip, ip->i_mount, XFS_DATA_FORK)); 729 } 730 731 /* 732 * Create a realtime refcount btree inode. 733 */ 734 int 735 xfs_rtrefcountbt_create( 736 struct xfs_rtgroup *rtg, 737 struct xfs_inode *ip, 738 struct xfs_trans *tp, 739 bool init) 740 { 741 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); 742 struct xfs_mount *mp = ip->i_mount; 743 struct xfs_btree_block *broot; 744 745 ifp->if_format = XFS_DINODE_FMT_META_BTREE; 746 ASSERT(ifp->if_broot_bytes == 0); 747 ASSERT(ifp->if_bytes == 0); 748 749 /* Initialize the empty incore btree root. */ 750 broot = xfs_broot_realloc(ifp, 751 xfs_rtrefcount_broot_space_calc(mp, 0, 0)); 752 if (broot) 753 xfs_btree_init_block(mp, broot, &xfs_rtrefcountbt_ops, 0, 0, 754 ip->i_ino); 755 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE | XFS_ILOG_DBROOT); 756 return 0; 757 } 758