1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_btree.h" 14 #include "xfs_btree_staging.h" 15 #include "xfs_alloc_btree.h" 16 #include "xfs_alloc.h" 17 #include "xfs_extent_busy.h" 18 #include "xfs_error.h" 19 #include "xfs_trace.h" 20 #include "xfs_trans.h" 21 #include "xfs_ag.h" 22 23 static struct kmem_cache *xfs_allocbt_cur_cache; 24 25 STATIC struct xfs_btree_cur * 26 xfs_allocbt_dup_cursor( 27 struct xfs_btree_cur *cur) 28 { 29 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, 30 cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum); 31 } 32 33 STATIC void 34 xfs_allocbt_set_root( 35 struct xfs_btree_cur *cur, 36 const union xfs_btree_ptr *ptr, 37 int inc) 38 { 39 struct xfs_buf *agbp = cur->bc_ag.agbp; 40 struct xfs_agf *agf = agbp->b_addr; 41 int btnum = cur->bc_btnum; 42 43 ASSERT(ptr->s != 0); 44 45 agf->agf_roots[btnum] = ptr->s; 46 be32_add_cpu(&agf->agf_levels[btnum], inc); 47 cur->bc_ag.pag->pagf_levels[btnum] += inc; 48 49 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); 50 } 51 52 STATIC int 53 xfs_allocbt_alloc_block( 54 struct xfs_btree_cur *cur, 55 const union xfs_btree_ptr *start, 56 union xfs_btree_ptr *new, 57 int *stat) 58 { 59 int error; 60 xfs_agblock_t bno; 61 62 /* Allocate the new block from the freelist. If we can't, give up. */ 63 error = xfs_alloc_get_freelist(cur->bc_ag.pag, cur->bc_tp, 64 cur->bc_ag.agbp, &bno, 1); 65 if (error) 66 return error; 67 68 if (bno == NULLAGBLOCK) { 69 *stat = 0; 70 return 0; 71 } 72 73 atomic64_inc(&cur->bc_mp->m_allocbt_blks); 74 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.pag, bno, 1, false); 75 76 new->s = cpu_to_be32(bno); 77 78 *stat = 1; 79 return 0; 80 } 81 82 STATIC int 83 xfs_allocbt_free_block( 84 struct xfs_btree_cur *cur, 85 struct xfs_buf *bp) 86 { 87 struct xfs_buf *agbp = cur->bc_ag.agbp; 88 xfs_agblock_t bno; 89 int error; 90 91 bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp)); 92 error = xfs_alloc_put_freelist(cur->bc_ag.pag, cur->bc_tp, agbp, NULL, 93 bno, 1); 94 if (error) 95 return error; 96 97 atomic64_dec(&cur->bc_mp->m_allocbt_blks); 98 xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1, 99 XFS_EXTENT_BUSY_SKIP_DISCARD); 100 return 0; 101 } 102 103 /* 104 * Update the longest extent in the AGF 105 */ 106 STATIC void 107 xfs_allocbt_update_lastrec( 108 struct xfs_btree_cur *cur, 109 const struct xfs_btree_block *block, 110 const union xfs_btree_rec *rec, 111 int ptr, 112 int reason) 113 { 114 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; 115 struct xfs_perag *pag; 116 __be32 len; 117 int numrecs; 118 119 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT); 120 121 switch (reason) { 122 case LASTREC_UPDATE: 123 /* 124 * If this is the last leaf block and it's the last record, 125 * then update the size of the longest extent in the AG. 126 */ 127 if (ptr != xfs_btree_get_numrecs(block)) 128 return; 129 len = rec->alloc.ar_blockcount; 130 break; 131 case LASTREC_INSREC: 132 if (be32_to_cpu(rec->alloc.ar_blockcount) <= 133 be32_to_cpu(agf->agf_longest)) 134 return; 135 len = rec->alloc.ar_blockcount; 136 break; 137 case LASTREC_DELREC: 138 numrecs = xfs_btree_get_numrecs(block); 139 if (ptr <= numrecs) 140 return; 141 ASSERT(ptr == numrecs + 1); 142 143 if (numrecs) { 144 xfs_alloc_rec_t *rrp; 145 146 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs); 147 len = rrp->ar_blockcount; 148 } else { 149 len = 0; 150 } 151 152 break; 153 default: 154 ASSERT(0); 155 return; 156 } 157 158 agf->agf_longest = len; 159 pag = cur->bc_ag.agbp->b_pag; 160 pag->pagf_longest = be32_to_cpu(len); 161 xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST); 162 } 163 164 STATIC int 165 xfs_allocbt_get_minrecs( 166 struct xfs_btree_cur *cur, 167 int level) 168 { 169 return cur->bc_mp->m_alloc_mnr[level != 0]; 170 } 171 172 STATIC int 173 xfs_allocbt_get_maxrecs( 174 struct xfs_btree_cur *cur, 175 int level) 176 { 177 return cur->bc_mp->m_alloc_mxr[level != 0]; 178 } 179 180 STATIC void 181 xfs_allocbt_init_key_from_rec( 182 union xfs_btree_key *key, 183 const union xfs_btree_rec *rec) 184 { 185 key->alloc.ar_startblock = rec->alloc.ar_startblock; 186 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; 187 } 188 189 STATIC void 190 xfs_bnobt_init_high_key_from_rec( 191 union xfs_btree_key *key, 192 const union xfs_btree_rec *rec) 193 { 194 __u32 x; 195 196 x = be32_to_cpu(rec->alloc.ar_startblock); 197 x += be32_to_cpu(rec->alloc.ar_blockcount) - 1; 198 key->alloc.ar_startblock = cpu_to_be32(x); 199 key->alloc.ar_blockcount = 0; 200 } 201 202 STATIC void 203 xfs_cntbt_init_high_key_from_rec( 204 union xfs_btree_key *key, 205 const union xfs_btree_rec *rec) 206 { 207 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; 208 key->alloc.ar_startblock = 0; 209 } 210 211 STATIC void 212 xfs_allocbt_init_rec_from_cur( 213 struct xfs_btree_cur *cur, 214 union xfs_btree_rec *rec) 215 { 216 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); 217 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); 218 } 219 220 STATIC void 221 xfs_allocbt_init_ptr_from_cur( 222 struct xfs_btree_cur *cur, 223 union xfs_btree_ptr *ptr) 224 { 225 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; 226 227 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno)); 228 229 ptr->s = agf->agf_roots[cur->bc_btnum]; 230 } 231 232 STATIC int64_t 233 xfs_bnobt_key_diff( 234 struct xfs_btree_cur *cur, 235 const union xfs_btree_key *key) 236 { 237 struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a; 238 const struct xfs_alloc_rec *kp = &key->alloc; 239 240 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; 241 } 242 243 STATIC int64_t 244 xfs_cntbt_key_diff( 245 struct xfs_btree_cur *cur, 246 const union xfs_btree_key *key) 247 { 248 struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a; 249 const struct xfs_alloc_rec *kp = &key->alloc; 250 int64_t diff; 251 252 diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount; 253 if (diff) 254 return diff; 255 256 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; 257 } 258 259 STATIC int64_t 260 xfs_bnobt_diff_two_keys( 261 struct xfs_btree_cur *cur, 262 const union xfs_btree_key *k1, 263 const union xfs_btree_key *k2, 264 const union xfs_btree_key *mask) 265 { 266 ASSERT(!mask || mask->alloc.ar_startblock); 267 268 return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) - 269 be32_to_cpu(k2->alloc.ar_startblock); 270 } 271 272 STATIC int64_t 273 xfs_cntbt_diff_two_keys( 274 struct xfs_btree_cur *cur, 275 const union xfs_btree_key *k1, 276 const union xfs_btree_key *k2, 277 const union xfs_btree_key *mask) 278 { 279 int64_t diff; 280 281 ASSERT(!mask || (mask->alloc.ar_blockcount && 282 mask->alloc.ar_startblock)); 283 284 diff = be32_to_cpu(k1->alloc.ar_blockcount) - 285 be32_to_cpu(k2->alloc.ar_blockcount); 286 if (diff) 287 return diff; 288 289 return be32_to_cpu(k1->alloc.ar_startblock) - 290 be32_to_cpu(k2->alloc.ar_startblock); 291 } 292 293 static xfs_failaddr_t 294 xfs_allocbt_verify( 295 struct xfs_buf *bp) 296 { 297 struct xfs_mount *mp = bp->b_mount; 298 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 299 struct xfs_perag *pag = bp->b_pag; 300 xfs_failaddr_t fa; 301 unsigned int level; 302 xfs_btnum_t btnum = XFS_BTNUM_BNOi; 303 304 if (!xfs_verify_magic(bp, block->bb_magic)) 305 return __this_address; 306 307 if (xfs_has_crc(mp)) { 308 fa = xfs_btree_sblock_v5hdr_verify(bp); 309 if (fa) 310 return fa; 311 } 312 313 /* 314 * The perag may not be attached during grow operations or fully 315 * initialized from the AGF during log recovery. Therefore we can only 316 * check against maximum tree depth from those contexts. 317 * 318 * Otherwise check against the per-tree limit. Peek at one of the 319 * verifier magic values to determine the type of tree we're verifying 320 * against. 321 */ 322 level = be16_to_cpu(block->bb_level); 323 if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC)) 324 btnum = XFS_BTNUM_CNTi; 325 if (pag && xfs_perag_initialised_agf(pag)) { 326 if (level >= pag->pagf_levels[btnum]) 327 return __this_address; 328 } else if (level >= mp->m_alloc_maxlevels) 329 return __this_address; 330 331 return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]); 332 } 333 334 static void 335 xfs_allocbt_read_verify( 336 struct xfs_buf *bp) 337 { 338 xfs_failaddr_t fa; 339 340 if (!xfs_btree_sblock_verify_crc(bp)) 341 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 342 else { 343 fa = xfs_allocbt_verify(bp); 344 if (fa) 345 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 346 } 347 348 if (bp->b_error) 349 trace_xfs_btree_corrupt(bp, _RET_IP_); 350 } 351 352 static void 353 xfs_allocbt_write_verify( 354 struct xfs_buf *bp) 355 { 356 xfs_failaddr_t fa; 357 358 fa = xfs_allocbt_verify(bp); 359 if (fa) { 360 trace_xfs_btree_corrupt(bp, _RET_IP_); 361 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 362 return; 363 } 364 xfs_btree_sblock_calc_crc(bp); 365 366 } 367 368 const struct xfs_buf_ops xfs_bnobt_buf_ops = { 369 .name = "xfs_bnobt", 370 .magic = { cpu_to_be32(XFS_ABTB_MAGIC), 371 cpu_to_be32(XFS_ABTB_CRC_MAGIC) }, 372 .verify_read = xfs_allocbt_read_verify, 373 .verify_write = xfs_allocbt_write_verify, 374 .verify_struct = xfs_allocbt_verify, 375 }; 376 377 const struct xfs_buf_ops xfs_cntbt_buf_ops = { 378 .name = "xfs_cntbt", 379 .magic = { cpu_to_be32(XFS_ABTC_MAGIC), 380 cpu_to_be32(XFS_ABTC_CRC_MAGIC) }, 381 .verify_read = xfs_allocbt_read_verify, 382 .verify_write = xfs_allocbt_write_verify, 383 .verify_struct = xfs_allocbt_verify, 384 }; 385 386 STATIC int 387 xfs_bnobt_keys_inorder( 388 struct xfs_btree_cur *cur, 389 const union xfs_btree_key *k1, 390 const union xfs_btree_key *k2) 391 { 392 return be32_to_cpu(k1->alloc.ar_startblock) < 393 be32_to_cpu(k2->alloc.ar_startblock); 394 } 395 396 STATIC int 397 xfs_bnobt_recs_inorder( 398 struct xfs_btree_cur *cur, 399 const union xfs_btree_rec *r1, 400 const union xfs_btree_rec *r2) 401 { 402 return be32_to_cpu(r1->alloc.ar_startblock) + 403 be32_to_cpu(r1->alloc.ar_blockcount) <= 404 be32_to_cpu(r2->alloc.ar_startblock); 405 } 406 407 STATIC int 408 xfs_cntbt_keys_inorder( 409 struct xfs_btree_cur *cur, 410 const union xfs_btree_key *k1, 411 const union xfs_btree_key *k2) 412 { 413 return be32_to_cpu(k1->alloc.ar_blockcount) < 414 be32_to_cpu(k2->alloc.ar_blockcount) || 415 (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount && 416 be32_to_cpu(k1->alloc.ar_startblock) < 417 be32_to_cpu(k2->alloc.ar_startblock)); 418 } 419 420 STATIC int 421 xfs_cntbt_recs_inorder( 422 struct xfs_btree_cur *cur, 423 const union xfs_btree_rec *r1, 424 const union xfs_btree_rec *r2) 425 { 426 return be32_to_cpu(r1->alloc.ar_blockcount) < 427 be32_to_cpu(r2->alloc.ar_blockcount) || 428 (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount && 429 be32_to_cpu(r1->alloc.ar_startblock) < 430 be32_to_cpu(r2->alloc.ar_startblock)); 431 } 432 433 STATIC enum xbtree_key_contig 434 xfs_allocbt_keys_contiguous( 435 struct xfs_btree_cur *cur, 436 const union xfs_btree_key *key1, 437 const union xfs_btree_key *key2, 438 const union xfs_btree_key *mask) 439 { 440 ASSERT(!mask || mask->alloc.ar_startblock); 441 442 return xbtree_key_contig(be32_to_cpu(key1->alloc.ar_startblock), 443 be32_to_cpu(key2->alloc.ar_startblock)); 444 } 445 446 static const struct xfs_btree_ops xfs_bnobt_ops = { 447 .rec_len = sizeof(xfs_alloc_rec_t), 448 .key_len = sizeof(xfs_alloc_key_t), 449 450 .dup_cursor = xfs_allocbt_dup_cursor, 451 .set_root = xfs_allocbt_set_root, 452 .alloc_block = xfs_allocbt_alloc_block, 453 .free_block = xfs_allocbt_free_block, 454 .update_lastrec = xfs_allocbt_update_lastrec, 455 .get_minrecs = xfs_allocbt_get_minrecs, 456 .get_maxrecs = xfs_allocbt_get_maxrecs, 457 .init_key_from_rec = xfs_allocbt_init_key_from_rec, 458 .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec, 459 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, 460 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, 461 .key_diff = xfs_bnobt_key_diff, 462 .buf_ops = &xfs_bnobt_buf_ops, 463 .diff_two_keys = xfs_bnobt_diff_two_keys, 464 .keys_inorder = xfs_bnobt_keys_inorder, 465 .recs_inorder = xfs_bnobt_recs_inorder, 466 .keys_contiguous = xfs_allocbt_keys_contiguous, 467 }; 468 469 static const struct xfs_btree_ops xfs_cntbt_ops = { 470 .rec_len = sizeof(xfs_alloc_rec_t), 471 .key_len = sizeof(xfs_alloc_key_t), 472 473 .dup_cursor = xfs_allocbt_dup_cursor, 474 .set_root = xfs_allocbt_set_root, 475 .alloc_block = xfs_allocbt_alloc_block, 476 .free_block = xfs_allocbt_free_block, 477 .update_lastrec = xfs_allocbt_update_lastrec, 478 .get_minrecs = xfs_allocbt_get_minrecs, 479 .get_maxrecs = xfs_allocbt_get_maxrecs, 480 .init_key_from_rec = xfs_allocbt_init_key_from_rec, 481 .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec, 482 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, 483 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, 484 .key_diff = xfs_cntbt_key_diff, 485 .buf_ops = &xfs_cntbt_buf_ops, 486 .diff_two_keys = xfs_cntbt_diff_two_keys, 487 .keys_inorder = xfs_cntbt_keys_inorder, 488 .recs_inorder = xfs_cntbt_recs_inorder, 489 .keys_contiguous = NULL, /* not needed right now */ 490 }; 491 492 /* Allocate most of a new allocation btree cursor. */ 493 STATIC struct xfs_btree_cur * 494 xfs_allocbt_init_common( 495 struct xfs_mount *mp, 496 struct xfs_trans *tp, 497 struct xfs_perag *pag, 498 xfs_btnum_t btnum) 499 { 500 struct xfs_btree_cur *cur; 501 502 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); 503 504 cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels, 505 xfs_allocbt_cur_cache); 506 cur->bc_ag.abt.active = false; 507 508 if (btnum == XFS_BTNUM_CNT) { 509 cur->bc_ops = &xfs_cntbt_ops; 510 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2); 511 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; 512 } else { 513 cur->bc_ops = &xfs_bnobt_ops; 514 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2); 515 } 516 517 cur->bc_ag.pag = xfs_perag_hold(pag); 518 519 if (xfs_has_crc(mp)) 520 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 521 522 return cur; 523 } 524 525 /* 526 * Allocate a new allocation btree cursor. 527 */ 528 struct xfs_btree_cur * /* new alloc btree cursor */ 529 xfs_allocbt_init_cursor( 530 struct xfs_mount *mp, /* file system mount point */ 531 struct xfs_trans *tp, /* transaction pointer */ 532 struct xfs_buf *agbp, /* buffer for agf structure */ 533 struct xfs_perag *pag, 534 xfs_btnum_t btnum) /* btree identifier */ 535 { 536 struct xfs_agf *agf = agbp->b_addr; 537 struct xfs_btree_cur *cur; 538 539 cur = xfs_allocbt_init_common(mp, tp, pag, btnum); 540 if (btnum == XFS_BTNUM_CNT) 541 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); 542 else 543 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); 544 545 cur->bc_ag.agbp = agbp; 546 547 return cur; 548 } 549 550 /* Create a free space btree cursor with a fake root for staging. */ 551 struct xfs_btree_cur * 552 xfs_allocbt_stage_cursor( 553 struct xfs_mount *mp, 554 struct xbtree_afakeroot *afake, 555 struct xfs_perag *pag, 556 xfs_btnum_t btnum) 557 { 558 struct xfs_btree_cur *cur; 559 560 cur = xfs_allocbt_init_common(mp, NULL, pag, btnum); 561 xfs_btree_stage_afakeroot(cur, afake); 562 return cur; 563 } 564 565 /* 566 * Install a new free space btree root. Caller is responsible for invalidating 567 * and freeing the old btree blocks. 568 */ 569 void 570 xfs_allocbt_commit_staged_btree( 571 struct xfs_btree_cur *cur, 572 struct xfs_trans *tp, 573 struct xfs_buf *agbp) 574 { 575 struct xfs_agf *agf = agbp->b_addr; 576 struct xbtree_afakeroot *afake = cur->bc_ag.afake; 577 578 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 579 580 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root); 581 agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels); 582 xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); 583 584 if (cur->bc_btnum == XFS_BTNUM_BNO) { 585 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops); 586 } else { 587 cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE; 588 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops); 589 } 590 } 591 592 /* Calculate number of records in an alloc btree block. */ 593 static inline unsigned int 594 xfs_allocbt_block_maxrecs( 595 unsigned int blocklen, 596 bool leaf) 597 { 598 if (leaf) 599 return blocklen / sizeof(xfs_alloc_rec_t); 600 return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t)); 601 } 602 603 /* 604 * Calculate number of records in an alloc btree block. 605 */ 606 int 607 xfs_allocbt_maxrecs( 608 struct xfs_mount *mp, 609 int blocklen, 610 int leaf) 611 { 612 blocklen -= XFS_ALLOC_BLOCK_LEN(mp); 613 return xfs_allocbt_block_maxrecs(blocklen, leaf); 614 } 615 616 /* Free space btrees are at their largest when every other block is free. */ 617 #define XFS_MAX_FREESP_RECORDS ((XFS_MAX_AG_BLOCKS + 1) / 2) 618 619 /* Compute the max possible height for free space btrees. */ 620 unsigned int 621 xfs_allocbt_maxlevels_ondisk(void) 622 { 623 unsigned int minrecs[2]; 624 unsigned int blocklen; 625 626 blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN, 627 XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN); 628 629 minrecs[0] = xfs_allocbt_block_maxrecs(blocklen, true) / 2; 630 minrecs[1] = xfs_allocbt_block_maxrecs(blocklen, false) / 2; 631 632 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_FREESP_RECORDS); 633 } 634 635 /* Calculate the freespace btree size for some records. */ 636 xfs_extlen_t 637 xfs_allocbt_calc_size( 638 struct xfs_mount *mp, 639 unsigned long long len) 640 { 641 return xfs_btree_calc_size(mp->m_alloc_mnr, len); 642 } 643 644 int __init 645 xfs_allocbt_init_cur_cache(void) 646 { 647 xfs_allocbt_cur_cache = kmem_cache_create("xfs_bnobt_cur", 648 xfs_btree_cur_sizeof(xfs_allocbt_maxlevels_ondisk()), 649 0, 0, NULL); 650 651 if (!xfs_allocbt_cur_cache) 652 return -ENOMEM; 653 return 0; 654 } 655 656 void 657 xfs_allocbt_destroy_cur_cache(void) 658 { 659 kmem_cache_destroy(xfs_allocbt_cur_cache); 660 xfs_allocbt_cur_cache = NULL; 661 } 662