1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_btree.h" 14 #include "xfs_btree_staging.h" 15 #include "xfs_alloc_btree.h" 16 #include "xfs_alloc.h" 17 #include "xfs_extent_busy.h" 18 #include "xfs_error.h" 19 #include "xfs_trace.h" 20 #include "xfs_trans.h" 21 #include "xfs_ag.h" 22 23 static struct kmem_cache *xfs_allocbt_cur_cache; 24 25 STATIC struct xfs_btree_cur * 26 xfs_allocbt_dup_cursor( 27 struct xfs_btree_cur *cur) 28 { 29 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, 30 cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum); 31 } 32 33 STATIC void 34 xfs_allocbt_set_root( 35 struct xfs_btree_cur *cur, 36 const union xfs_btree_ptr *ptr, 37 int inc) 38 { 39 struct xfs_buf *agbp = cur->bc_ag.agbp; 40 struct xfs_agf *agf = agbp->b_addr; 41 int btnum = cur->bc_btnum; 42 43 ASSERT(ptr->s != 0); 44 45 agf->agf_roots[btnum] = ptr->s; 46 be32_add_cpu(&agf->agf_levels[btnum], inc); 47 cur->bc_ag.pag->pagf_levels[btnum] += inc; 48 49 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); 50 } 51 52 STATIC int 53 xfs_allocbt_alloc_block( 54 struct xfs_btree_cur *cur, 55 const union xfs_btree_ptr *start, 56 union xfs_btree_ptr *new, 57 int *stat) 58 { 59 int error; 60 xfs_agblock_t bno; 61 62 /* Allocate the new block from the freelist. If we can't, give up. */ 63 error = xfs_alloc_get_freelist(cur->bc_ag.pag, cur->bc_tp, 64 cur->bc_ag.agbp, &bno, 1); 65 if (error) 66 return error; 67 68 if (bno == NULLAGBLOCK) { 69 *stat = 0; 70 return 0; 71 } 72 73 atomic64_inc(&cur->bc_mp->m_allocbt_blks); 74 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.pag, bno, 1, false); 75 76 new->s = cpu_to_be32(bno); 77 78 *stat = 1; 79 return 0; 80 } 81 82 STATIC int 83 xfs_allocbt_free_block( 84 struct xfs_btree_cur *cur, 85 struct xfs_buf *bp) 86 { 87 struct xfs_buf *agbp = cur->bc_ag.agbp; 88 xfs_agblock_t bno; 89 int error; 90 91 bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp)); 92 error = xfs_alloc_put_freelist(cur->bc_ag.pag, cur->bc_tp, agbp, NULL, 93 bno, 1); 94 if (error) 95 return error; 96 97 atomic64_dec(&cur->bc_mp->m_allocbt_blks); 98 xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1, 99 XFS_EXTENT_BUSY_SKIP_DISCARD); 100 return 0; 101 } 102 103 /* 104 * Update the longest extent in the AGF 105 */ 106 STATIC void 107 xfs_allocbt_update_lastrec( 108 struct xfs_btree_cur *cur, 109 const struct xfs_btree_block *block, 110 const union xfs_btree_rec *rec, 111 int ptr, 112 int reason) 113 { 114 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; 115 struct xfs_perag *pag; 116 __be32 len; 117 int numrecs; 118 119 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT); 120 121 switch (reason) { 122 case LASTREC_UPDATE: 123 /* 124 * If this is the last leaf block and it's the last record, 125 * then update the size of the longest extent in the AG. 126 */ 127 if (ptr != xfs_btree_get_numrecs(block)) 128 return; 129 len = rec->alloc.ar_blockcount; 130 break; 131 case LASTREC_INSREC: 132 if (be32_to_cpu(rec->alloc.ar_blockcount) <= 133 be32_to_cpu(agf->agf_longest)) 134 return; 135 len = rec->alloc.ar_blockcount; 136 break; 137 case LASTREC_DELREC: 138 numrecs = xfs_btree_get_numrecs(block); 139 if (ptr <= numrecs) 140 return; 141 ASSERT(ptr == numrecs + 1); 142 143 if (numrecs) { 144 xfs_alloc_rec_t *rrp; 145 146 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs); 147 len = rrp->ar_blockcount; 148 } else { 149 len = 0; 150 } 151 152 break; 153 default: 154 ASSERT(0); 155 return; 156 } 157 158 agf->agf_longest = len; 159 pag = cur->bc_ag.agbp->b_pag; 160 pag->pagf_longest = be32_to_cpu(len); 161 xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST); 162 } 163 164 STATIC int 165 xfs_allocbt_get_minrecs( 166 struct xfs_btree_cur *cur, 167 int level) 168 { 169 return cur->bc_mp->m_alloc_mnr[level != 0]; 170 } 171 172 STATIC int 173 xfs_allocbt_get_maxrecs( 174 struct xfs_btree_cur *cur, 175 int level) 176 { 177 return cur->bc_mp->m_alloc_mxr[level != 0]; 178 } 179 180 STATIC void 181 xfs_allocbt_init_key_from_rec( 182 union xfs_btree_key *key, 183 const union xfs_btree_rec *rec) 184 { 185 key->alloc.ar_startblock = rec->alloc.ar_startblock; 186 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; 187 } 188 189 STATIC void 190 xfs_bnobt_init_high_key_from_rec( 191 union xfs_btree_key *key, 192 const union xfs_btree_rec *rec) 193 { 194 __u32 x; 195 196 x = be32_to_cpu(rec->alloc.ar_startblock); 197 x += be32_to_cpu(rec->alloc.ar_blockcount) - 1; 198 key->alloc.ar_startblock = cpu_to_be32(x); 199 key->alloc.ar_blockcount = 0; 200 } 201 202 STATIC void 203 xfs_cntbt_init_high_key_from_rec( 204 union xfs_btree_key *key, 205 const union xfs_btree_rec *rec) 206 { 207 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; 208 key->alloc.ar_startblock = 0; 209 } 210 211 STATIC void 212 xfs_allocbt_init_rec_from_cur( 213 struct xfs_btree_cur *cur, 214 union xfs_btree_rec *rec) 215 { 216 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); 217 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); 218 } 219 220 STATIC void 221 xfs_allocbt_init_ptr_from_cur( 222 struct xfs_btree_cur *cur, 223 union xfs_btree_ptr *ptr) 224 { 225 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; 226 227 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno)); 228 229 ptr->s = agf->agf_roots[cur->bc_btnum]; 230 } 231 232 STATIC int64_t 233 xfs_bnobt_key_diff( 234 struct xfs_btree_cur *cur, 235 const union xfs_btree_key *key) 236 { 237 struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a; 238 const struct xfs_alloc_rec *kp = &key->alloc; 239 240 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; 241 } 242 243 STATIC int64_t 244 xfs_cntbt_key_diff( 245 struct xfs_btree_cur *cur, 246 const union xfs_btree_key *key) 247 { 248 struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a; 249 const struct xfs_alloc_rec *kp = &key->alloc; 250 int64_t diff; 251 252 diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount; 253 if (diff) 254 return diff; 255 256 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; 257 } 258 259 STATIC int64_t 260 xfs_bnobt_diff_two_keys( 261 struct xfs_btree_cur *cur, 262 const union xfs_btree_key *k1, 263 const union xfs_btree_key *k2, 264 const union xfs_btree_key *mask) 265 { 266 ASSERT(!mask || mask->alloc.ar_startblock); 267 268 return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) - 269 be32_to_cpu(k2->alloc.ar_startblock); 270 } 271 272 STATIC int64_t 273 xfs_cntbt_diff_two_keys( 274 struct xfs_btree_cur *cur, 275 const union xfs_btree_key *k1, 276 const union xfs_btree_key *k2, 277 const union xfs_btree_key *mask) 278 { 279 int64_t diff; 280 281 ASSERT(!mask || (mask->alloc.ar_blockcount && 282 mask->alloc.ar_startblock)); 283 284 diff = be32_to_cpu(k1->alloc.ar_blockcount) - 285 be32_to_cpu(k2->alloc.ar_blockcount); 286 if (diff) 287 return diff; 288 289 return be32_to_cpu(k1->alloc.ar_startblock) - 290 be32_to_cpu(k2->alloc.ar_startblock); 291 } 292 293 static xfs_failaddr_t 294 xfs_allocbt_verify( 295 struct xfs_buf *bp) 296 { 297 struct xfs_mount *mp = bp->b_mount; 298 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 299 struct xfs_perag *pag = bp->b_pag; 300 xfs_failaddr_t fa; 301 unsigned int level; 302 xfs_btnum_t btnum = XFS_BTNUM_BNOi; 303 304 if (!xfs_verify_magic(bp, block->bb_magic)) 305 return __this_address; 306 307 if (xfs_has_crc(mp)) { 308 fa = xfs_btree_sblock_v5hdr_verify(bp); 309 if (fa) 310 return fa; 311 } 312 313 /* 314 * The perag may not be attached during grow operations or fully 315 * initialized from the AGF during log recovery. Therefore we can only 316 * check against maximum tree depth from those contexts. 317 * 318 * Otherwise check against the per-tree limit. Peek at one of the 319 * verifier magic values to determine the type of tree we're verifying 320 * against. 321 */ 322 level = be16_to_cpu(block->bb_level); 323 if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC)) 324 btnum = XFS_BTNUM_CNTi; 325 if (pag && xfs_perag_initialised_agf(pag)) { 326 unsigned int maxlevel = pag->pagf_levels[btnum]; 327 328 #ifdef CONFIG_XFS_ONLINE_REPAIR 329 /* 330 * Online repair could be rewriting the free space btrees, so 331 * we'll validate against the larger of either tree while this 332 * is going on. 333 */ 334 maxlevel = max_t(unsigned int, maxlevel, 335 pag->pagf_repair_levels[btnum]); 336 #endif 337 if (level >= maxlevel) 338 return __this_address; 339 } else if (level >= mp->m_alloc_maxlevels) 340 return __this_address; 341 342 return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]); 343 } 344 345 static void 346 xfs_allocbt_read_verify( 347 struct xfs_buf *bp) 348 { 349 xfs_failaddr_t fa; 350 351 if (!xfs_btree_sblock_verify_crc(bp)) 352 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 353 else { 354 fa = xfs_allocbt_verify(bp); 355 if (fa) 356 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 357 } 358 359 if (bp->b_error) 360 trace_xfs_btree_corrupt(bp, _RET_IP_); 361 } 362 363 static void 364 xfs_allocbt_write_verify( 365 struct xfs_buf *bp) 366 { 367 xfs_failaddr_t fa; 368 369 fa = xfs_allocbt_verify(bp); 370 if (fa) { 371 trace_xfs_btree_corrupt(bp, _RET_IP_); 372 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 373 return; 374 } 375 xfs_btree_sblock_calc_crc(bp); 376 377 } 378 379 const struct xfs_buf_ops xfs_bnobt_buf_ops = { 380 .name = "xfs_bnobt", 381 .magic = { cpu_to_be32(XFS_ABTB_MAGIC), 382 cpu_to_be32(XFS_ABTB_CRC_MAGIC) }, 383 .verify_read = xfs_allocbt_read_verify, 384 .verify_write = xfs_allocbt_write_verify, 385 .verify_struct = xfs_allocbt_verify, 386 }; 387 388 const struct xfs_buf_ops xfs_cntbt_buf_ops = { 389 .name = "xfs_cntbt", 390 .magic = { cpu_to_be32(XFS_ABTC_MAGIC), 391 cpu_to_be32(XFS_ABTC_CRC_MAGIC) }, 392 .verify_read = xfs_allocbt_read_verify, 393 .verify_write = xfs_allocbt_write_verify, 394 .verify_struct = xfs_allocbt_verify, 395 }; 396 397 STATIC int 398 xfs_bnobt_keys_inorder( 399 struct xfs_btree_cur *cur, 400 const union xfs_btree_key *k1, 401 const union xfs_btree_key *k2) 402 { 403 return be32_to_cpu(k1->alloc.ar_startblock) < 404 be32_to_cpu(k2->alloc.ar_startblock); 405 } 406 407 STATIC int 408 xfs_bnobt_recs_inorder( 409 struct xfs_btree_cur *cur, 410 const union xfs_btree_rec *r1, 411 const union xfs_btree_rec *r2) 412 { 413 return be32_to_cpu(r1->alloc.ar_startblock) + 414 be32_to_cpu(r1->alloc.ar_blockcount) <= 415 be32_to_cpu(r2->alloc.ar_startblock); 416 } 417 418 STATIC int 419 xfs_cntbt_keys_inorder( 420 struct xfs_btree_cur *cur, 421 const union xfs_btree_key *k1, 422 const union xfs_btree_key *k2) 423 { 424 return be32_to_cpu(k1->alloc.ar_blockcount) < 425 be32_to_cpu(k2->alloc.ar_blockcount) || 426 (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount && 427 be32_to_cpu(k1->alloc.ar_startblock) < 428 be32_to_cpu(k2->alloc.ar_startblock)); 429 } 430 431 STATIC int 432 xfs_cntbt_recs_inorder( 433 struct xfs_btree_cur *cur, 434 const union xfs_btree_rec *r1, 435 const union xfs_btree_rec *r2) 436 { 437 return be32_to_cpu(r1->alloc.ar_blockcount) < 438 be32_to_cpu(r2->alloc.ar_blockcount) || 439 (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount && 440 be32_to_cpu(r1->alloc.ar_startblock) < 441 be32_to_cpu(r2->alloc.ar_startblock)); 442 } 443 444 STATIC enum xbtree_key_contig 445 xfs_allocbt_keys_contiguous( 446 struct xfs_btree_cur *cur, 447 const union xfs_btree_key *key1, 448 const union xfs_btree_key *key2, 449 const union xfs_btree_key *mask) 450 { 451 ASSERT(!mask || mask->alloc.ar_startblock); 452 453 return xbtree_key_contig(be32_to_cpu(key1->alloc.ar_startblock), 454 be32_to_cpu(key2->alloc.ar_startblock)); 455 } 456 457 static const struct xfs_btree_ops xfs_bnobt_ops = { 458 .rec_len = sizeof(xfs_alloc_rec_t), 459 .key_len = sizeof(xfs_alloc_key_t), 460 461 .dup_cursor = xfs_allocbt_dup_cursor, 462 .set_root = xfs_allocbt_set_root, 463 .alloc_block = xfs_allocbt_alloc_block, 464 .free_block = xfs_allocbt_free_block, 465 .update_lastrec = xfs_allocbt_update_lastrec, 466 .get_minrecs = xfs_allocbt_get_minrecs, 467 .get_maxrecs = xfs_allocbt_get_maxrecs, 468 .init_key_from_rec = xfs_allocbt_init_key_from_rec, 469 .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec, 470 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, 471 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, 472 .key_diff = xfs_bnobt_key_diff, 473 .buf_ops = &xfs_bnobt_buf_ops, 474 .diff_two_keys = xfs_bnobt_diff_two_keys, 475 .keys_inorder = xfs_bnobt_keys_inorder, 476 .recs_inorder = xfs_bnobt_recs_inorder, 477 .keys_contiguous = xfs_allocbt_keys_contiguous, 478 }; 479 480 static const struct xfs_btree_ops xfs_cntbt_ops = { 481 .rec_len = sizeof(xfs_alloc_rec_t), 482 .key_len = sizeof(xfs_alloc_key_t), 483 484 .dup_cursor = xfs_allocbt_dup_cursor, 485 .set_root = xfs_allocbt_set_root, 486 .alloc_block = xfs_allocbt_alloc_block, 487 .free_block = xfs_allocbt_free_block, 488 .update_lastrec = xfs_allocbt_update_lastrec, 489 .get_minrecs = xfs_allocbt_get_minrecs, 490 .get_maxrecs = xfs_allocbt_get_maxrecs, 491 .init_key_from_rec = xfs_allocbt_init_key_from_rec, 492 .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec, 493 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, 494 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, 495 .key_diff = xfs_cntbt_key_diff, 496 .buf_ops = &xfs_cntbt_buf_ops, 497 .diff_two_keys = xfs_cntbt_diff_two_keys, 498 .keys_inorder = xfs_cntbt_keys_inorder, 499 .recs_inorder = xfs_cntbt_recs_inorder, 500 .keys_contiguous = NULL, /* not needed right now */ 501 }; 502 503 /* Allocate most of a new allocation btree cursor. */ 504 STATIC struct xfs_btree_cur * 505 xfs_allocbt_init_common( 506 struct xfs_mount *mp, 507 struct xfs_trans *tp, 508 struct xfs_perag *pag, 509 xfs_btnum_t btnum) 510 { 511 struct xfs_btree_cur *cur; 512 513 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); 514 515 cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels, 516 xfs_allocbt_cur_cache); 517 cur->bc_ag.abt.active = false; 518 519 if (btnum == XFS_BTNUM_CNT) { 520 cur->bc_ops = &xfs_cntbt_ops; 521 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2); 522 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; 523 } else { 524 cur->bc_ops = &xfs_bnobt_ops; 525 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2); 526 } 527 528 cur->bc_ag.pag = xfs_perag_hold(pag); 529 530 if (xfs_has_crc(mp)) 531 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 532 533 return cur; 534 } 535 536 /* 537 * Allocate a new allocation btree cursor. 538 */ 539 struct xfs_btree_cur * /* new alloc btree cursor */ 540 xfs_allocbt_init_cursor( 541 struct xfs_mount *mp, /* file system mount point */ 542 struct xfs_trans *tp, /* transaction pointer */ 543 struct xfs_buf *agbp, /* buffer for agf structure */ 544 struct xfs_perag *pag, 545 xfs_btnum_t btnum) /* btree identifier */ 546 { 547 struct xfs_agf *agf = agbp->b_addr; 548 struct xfs_btree_cur *cur; 549 550 cur = xfs_allocbt_init_common(mp, tp, pag, btnum); 551 if (btnum == XFS_BTNUM_CNT) 552 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); 553 else 554 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); 555 556 cur->bc_ag.agbp = agbp; 557 558 return cur; 559 } 560 561 /* Create a free space btree cursor with a fake root for staging. */ 562 struct xfs_btree_cur * 563 xfs_allocbt_stage_cursor( 564 struct xfs_mount *mp, 565 struct xbtree_afakeroot *afake, 566 struct xfs_perag *pag, 567 xfs_btnum_t btnum) 568 { 569 struct xfs_btree_cur *cur; 570 571 cur = xfs_allocbt_init_common(mp, NULL, pag, btnum); 572 xfs_btree_stage_afakeroot(cur, afake); 573 return cur; 574 } 575 576 /* 577 * Install a new free space btree root. Caller is responsible for invalidating 578 * and freeing the old btree blocks. 579 */ 580 void 581 xfs_allocbt_commit_staged_btree( 582 struct xfs_btree_cur *cur, 583 struct xfs_trans *tp, 584 struct xfs_buf *agbp) 585 { 586 struct xfs_agf *agf = agbp->b_addr; 587 struct xbtree_afakeroot *afake = cur->bc_ag.afake; 588 589 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 590 591 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root); 592 agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels); 593 xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); 594 595 if (cur->bc_btnum == XFS_BTNUM_BNO) { 596 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops); 597 } else { 598 cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE; 599 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops); 600 } 601 } 602 603 /* Calculate number of records in an alloc btree block. */ 604 static inline unsigned int 605 xfs_allocbt_block_maxrecs( 606 unsigned int blocklen, 607 bool leaf) 608 { 609 if (leaf) 610 return blocklen / sizeof(xfs_alloc_rec_t); 611 return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t)); 612 } 613 614 /* 615 * Calculate number of records in an alloc btree block. 616 */ 617 int 618 xfs_allocbt_maxrecs( 619 struct xfs_mount *mp, 620 int blocklen, 621 int leaf) 622 { 623 blocklen -= XFS_ALLOC_BLOCK_LEN(mp); 624 return xfs_allocbt_block_maxrecs(blocklen, leaf); 625 } 626 627 /* Free space btrees are at their largest when every other block is free. */ 628 #define XFS_MAX_FREESP_RECORDS ((XFS_MAX_AG_BLOCKS + 1) / 2) 629 630 /* Compute the max possible height for free space btrees. */ 631 unsigned int 632 xfs_allocbt_maxlevels_ondisk(void) 633 { 634 unsigned int minrecs[2]; 635 unsigned int blocklen; 636 637 blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN, 638 XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN); 639 640 minrecs[0] = xfs_allocbt_block_maxrecs(blocklen, true) / 2; 641 minrecs[1] = xfs_allocbt_block_maxrecs(blocklen, false) / 2; 642 643 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_FREESP_RECORDS); 644 } 645 646 /* Calculate the freespace btree size for some records. */ 647 xfs_extlen_t 648 xfs_allocbt_calc_size( 649 struct xfs_mount *mp, 650 unsigned long long len) 651 { 652 return xfs_btree_calc_size(mp->m_alloc_mnr, len); 653 } 654 655 int __init 656 xfs_allocbt_init_cur_cache(void) 657 { 658 xfs_allocbt_cur_cache = kmem_cache_create("xfs_bnobt_cur", 659 xfs_btree_cur_sizeof(xfs_allocbt_maxlevels_ondisk()), 660 0, 0, NULL); 661 662 if (!xfs_allocbt_cur_cache) 663 return -ENOMEM; 664 return 0; 665 } 666 667 void 668 xfs_allocbt_destroy_cur_cache(void) 669 { 670 kmem_cache_destroy(xfs_allocbt_cur_cache); 671 xfs_allocbt_cur_cache = NULL; 672 } 673