1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_btree.h" 15 #include "xfs_btree_staging.h" 16 #include "xfs_ialloc.h" 17 #include "xfs_ialloc_btree.h" 18 #include "xfs_alloc.h" 19 #include "xfs_error.h" 20 #include "xfs_trace.h" 21 #include "xfs_trans.h" 22 #include "xfs_rmap.h" 23 #include "xfs_ag.h" 24 25 static struct kmem_cache *xfs_inobt_cur_cache; 26 27 STATIC int 28 xfs_inobt_get_minrecs( 29 struct xfs_btree_cur *cur, 30 int level) 31 { 32 return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0]; 33 } 34 35 STATIC struct xfs_btree_cur * 36 xfs_inobt_dup_cursor( 37 struct xfs_btree_cur *cur) 38 { 39 return xfs_inobt_init_cursor(cur->bc_ag.pag, cur->bc_tp, 40 cur->bc_ag.agbp, cur->bc_btnum); 41 } 42 43 STATIC void 44 xfs_inobt_set_root( 45 struct xfs_btree_cur *cur, 46 const union xfs_btree_ptr *nptr, 47 int inc) /* level change */ 48 { 49 struct xfs_buf *agbp = cur->bc_ag.agbp; 50 struct xfs_agi *agi = agbp->b_addr; 51 52 agi->agi_root = nptr->s; 53 be32_add_cpu(&agi->agi_level, inc); 54 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); 55 } 56 57 STATIC void 58 xfs_finobt_set_root( 59 struct xfs_btree_cur *cur, 60 const union xfs_btree_ptr *nptr, 61 int inc) /* level change */ 62 { 63 struct xfs_buf *agbp = cur->bc_ag.agbp; 64 struct xfs_agi *agi = agbp->b_addr; 65 66 agi->agi_free_root = nptr->s; 67 be32_add_cpu(&agi->agi_free_level, inc); 68 xfs_ialloc_log_agi(cur->bc_tp, agbp, 69 XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL); 70 } 71 72 /* Update the inode btree block counter for this btree. */ 73 static inline void 74 xfs_inobt_mod_blockcount( 75 struct xfs_btree_cur *cur, 76 int howmuch) 77 { 78 struct xfs_buf *agbp = cur->bc_ag.agbp; 79 struct xfs_agi *agi = agbp->b_addr; 80 81 if (!xfs_has_inobtcounts(cur->bc_mp)) 82 return; 83 84 if (cur->bc_btnum == XFS_BTNUM_FINO) 85 be32_add_cpu(&agi->agi_fblocks, howmuch); 86 else if (cur->bc_btnum == XFS_BTNUM_INO) 87 be32_add_cpu(&agi->agi_iblocks, howmuch); 88 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS); 89 } 90 91 STATIC int 92 __xfs_inobt_alloc_block( 93 struct xfs_btree_cur *cur, 94 const union xfs_btree_ptr *start, 95 union xfs_btree_ptr *new, 96 int *stat, 97 enum xfs_ag_resv_type resv) 98 { 99 xfs_alloc_arg_t args; /* block allocation args */ 100 int error; /* error return value */ 101 xfs_agblock_t sbno = be32_to_cpu(start->s); 102 103 memset(&args, 0, sizeof(args)); 104 args.tp = cur->bc_tp; 105 args.mp = cur->bc_mp; 106 args.pag = cur->bc_ag.pag; 107 args.oinfo = XFS_RMAP_OINFO_INOBT; 108 args.minlen = 1; 109 args.maxlen = 1; 110 args.prod = 1; 111 args.resv = resv; 112 113 error = xfs_alloc_vextent_near_bno(&args, 114 XFS_AGB_TO_FSB(args.mp, args.pag->pag_agno, sbno)); 115 if (error) 116 return error; 117 118 if (args.fsbno == NULLFSBLOCK) { 119 *stat = 0; 120 return 0; 121 } 122 ASSERT(args.len == 1); 123 124 new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno)); 125 *stat = 1; 126 xfs_inobt_mod_blockcount(cur, 1); 127 return 0; 128 } 129 130 STATIC int 131 xfs_inobt_alloc_block( 132 struct xfs_btree_cur *cur, 133 const union xfs_btree_ptr *start, 134 union xfs_btree_ptr *new, 135 int *stat) 136 { 137 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE); 138 } 139 140 STATIC int 141 xfs_finobt_alloc_block( 142 struct xfs_btree_cur *cur, 143 const union xfs_btree_ptr *start, 144 union xfs_btree_ptr *new, 145 int *stat) 146 { 147 if (cur->bc_mp->m_finobt_nores) 148 return xfs_inobt_alloc_block(cur, start, new, stat); 149 return __xfs_inobt_alloc_block(cur, start, new, stat, 150 XFS_AG_RESV_METADATA); 151 } 152 153 STATIC int 154 __xfs_inobt_free_block( 155 struct xfs_btree_cur *cur, 156 struct xfs_buf *bp, 157 enum xfs_ag_resv_type resv) 158 { 159 xfs_fsblock_t fsbno; 160 161 xfs_inobt_mod_blockcount(cur, -1); 162 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); 163 return xfs_free_extent_later(cur->bc_tp, fsbno, 1, 164 &XFS_RMAP_OINFO_INOBT, resv); 165 } 166 167 STATIC int 168 xfs_inobt_free_block( 169 struct xfs_btree_cur *cur, 170 struct xfs_buf *bp) 171 { 172 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE); 173 } 174 175 STATIC int 176 xfs_finobt_free_block( 177 struct xfs_btree_cur *cur, 178 struct xfs_buf *bp) 179 { 180 if (cur->bc_mp->m_finobt_nores) 181 return xfs_inobt_free_block(cur, bp); 182 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA); 183 } 184 185 STATIC int 186 xfs_inobt_get_maxrecs( 187 struct xfs_btree_cur *cur, 188 int level) 189 { 190 return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0]; 191 } 192 193 STATIC void 194 xfs_inobt_init_key_from_rec( 195 union xfs_btree_key *key, 196 const union xfs_btree_rec *rec) 197 { 198 key->inobt.ir_startino = rec->inobt.ir_startino; 199 } 200 201 STATIC void 202 xfs_inobt_init_high_key_from_rec( 203 union xfs_btree_key *key, 204 const union xfs_btree_rec *rec) 205 { 206 __u32 x; 207 208 x = be32_to_cpu(rec->inobt.ir_startino); 209 x += XFS_INODES_PER_CHUNK - 1; 210 key->inobt.ir_startino = cpu_to_be32(x); 211 } 212 213 STATIC void 214 xfs_inobt_init_rec_from_cur( 215 struct xfs_btree_cur *cur, 216 union xfs_btree_rec *rec) 217 { 218 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino); 219 if (xfs_has_sparseinodes(cur->bc_mp)) { 220 rec->inobt.ir_u.sp.ir_holemask = 221 cpu_to_be16(cur->bc_rec.i.ir_holemask); 222 rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count; 223 rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount; 224 } else { 225 /* ir_holemask/ir_count not supported on-disk */ 226 rec->inobt.ir_u.f.ir_freecount = 227 cpu_to_be32(cur->bc_rec.i.ir_freecount); 228 } 229 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free); 230 } 231 232 /* 233 * initial value of ptr for lookup 234 */ 235 STATIC void 236 xfs_inobt_init_ptr_from_cur( 237 struct xfs_btree_cur *cur, 238 union xfs_btree_ptr *ptr) 239 { 240 struct xfs_agi *agi = cur->bc_ag.agbp->b_addr; 241 242 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno)); 243 244 ptr->s = agi->agi_root; 245 } 246 247 STATIC void 248 xfs_finobt_init_ptr_from_cur( 249 struct xfs_btree_cur *cur, 250 union xfs_btree_ptr *ptr) 251 { 252 struct xfs_agi *agi = cur->bc_ag.agbp->b_addr; 253 254 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno)); 255 ptr->s = agi->agi_free_root; 256 } 257 258 STATIC int64_t 259 xfs_inobt_key_diff( 260 struct xfs_btree_cur *cur, 261 const union xfs_btree_key *key) 262 { 263 return (int64_t)be32_to_cpu(key->inobt.ir_startino) - 264 cur->bc_rec.i.ir_startino; 265 } 266 267 STATIC int64_t 268 xfs_inobt_diff_two_keys( 269 struct xfs_btree_cur *cur, 270 const union xfs_btree_key *k1, 271 const union xfs_btree_key *k2, 272 const union xfs_btree_key *mask) 273 { 274 ASSERT(!mask || mask->inobt.ir_startino); 275 276 return (int64_t)be32_to_cpu(k1->inobt.ir_startino) - 277 be32_to_cpu(k2->inobt.ir_startino); 278 } 279 280 static xfs_failaddr_t 281 xfs_inobt_verify( 282 struct xfs_buf *bp) 283 { 284 struct xfs_mount *mp = bp->b_mount; 285 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 286 xfs_failaddr_t fa; 287 unsigned int level; 288 289 if (!xfs_verify_magic(bp, block->bb_magic)) 290 return __this_address; 291 292 /* 293 * During growfs operations, we can't verify the exact owner as the 294 * perag is not fully initialised and hence not attached to the buffer. 295 * 296 * Similarly, during log recovery we will have a perag structure 297 * attached, but the agi information will not yet have been initialised 298 * from the on disk AGI. We don't currently use any of this information, 299 * but beware of the landmine (i.e. need to check 300 * xfs_perag_initialised_agi(pag)) if we ever do. 301 */ 302 if (xfs_has_crc(mp)) { 303 fa = xfs_btree_sblock_v5hdr_verify(bp); 304 if (fa) 305 return fa; 306 } 307 308 /* level verification */ 309 level = be16_to_cpu(block->bb_level); 310 if (level >= M_IGEO(mp)->inobt_maxlevels) 311 return __this_address; 312 313 return xfs_btree_sblock_verify(bp, 314 M_IGEO(mp)->inobt_mxr[level != 0]); 315 } 316 317 static void 318 xfs_inobt_read_verify( 319 struct xfs_buf *bp) 320 { 321 xfs_failaddr_t fa; 322 323 if (!xfs_btree_sblock_verify_crc(bp)) 324 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 325 else { 326 fa = xfs_inobt_verify(bp); 327 if (fa) 328 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 329 } 330 331 if (bp->b_error) 332 trace_xfs_btree_corrupt(bp, _RET_IP_); 333 } 334 335 static void 336 xfs_inobt_write_verify( 337 struct xfs_buf *bp) 338 { 339 xfs_failaddr_t fa; 340 341 fa = xfs_inobt_verify(bp); 342 if (fa) { 343 trace_xfs_btree_corrupt(bp, _RET_IP_); 344 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 345 return; 346 } 347 xfs_btree_sblock_calc_crc(bp); 348 349 } 350 351 const struct xfs_buf_ops xfs_inobt_buf_ops = { 352 .name = "xfs_inobt", 353 .magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) }, 354 .verify_read = xfs_inobt_read_verify, 355 .verify_write = xfs_inobt_write_verify, 356 .verify_struct = xfs_inobt_verify, 357 }; 358 359 const struct xfs_buf_ops xfs_finobt_buf_ops = { 360 .name = "xfs_finobt", 361 .magic = { cpu_to_be32(XFS_FIBT_MAGIC), 362 cpu_to_be32(XFS_FIBT_CRC_MAGIC) }, 363 .verify_read = xfs_inobt_read_verify, 364 .verify_write = xfs_inobt_write_verify, 365 .verify_struct = xfs_inobt_verify, 366 }; 367 368 STATIC int 369 xfs_inobt_keys_inorder( 370 struct xfs_btree_cur *cur, 371 const union xfs_btree_key *k1, 372 const union xfs_btree_key *k2) 373 { 374 return be32_to_cpu(k1->inobt.ir_startino) < 375 be32_to_cpu(k2->inobt.ir_startino); 376 } 377 378 STATIC int 379 xfs_inobt_recs_inorder( 380 struct xfs_btree_cur *cur, 381 const union xfs_btree_rec *r1, 382 const union xfs_btree_rec *r2) 383 { 384 return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <= 385 be32_to_cpu(r2->inobt.ir_startino); 386 } 387 388 STATIC enum xbtree_key_contig 389 xfs_inobt_keys_contiguous( 390 struct xfs_btree_cur *cur, 391 const union xfs_btree_key *key1, 392 const union xfs_btree_key *key2, 393 const union xfs_btree_key *mask) 394 { 395 ASSERT(!mask || mask->inobt.ir_startino); 396 397 return xbtree_key_contig(be32_to_cpu(key1->inobt.ir_startino), 398 be32_to_cpu(key2->inobt.ir_startino)); 399 } 400 401 static const struct xfs_btree_ops xfs_inobt_ops = { 402 .rec_len = sizeof(xfs_inobt_rec_t), 403 .key_len = sizeof(xfs_inobt_key_t), 404 405 .dup_cursor = xfs_inobt_dup_cursor, 406 .set_root = xfs_inobt_set_root, 407 .alloc_block = xfs_inobt_alloc_block, 408 .free_block = xfs_inobt_free_block, 409 .get_minrecs = xfs_inobt_get_minrecs, 410 .get_maxrecs = xfs_inobt_get_maxrecs, 411 .init_key_from_rec = xfs_inobt_init_key_from_rec, 412 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec, 413 .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 414 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, 415 .key_diff = xfs_inobt_key_diff, 416 .buf_ops = &xfs_inobt_buf_ops, 417 .diff_two_keys = xfs_inobt_diff_two_keys, 418 .keys_inorder = xfs_inobt_keys_inorder, 419 .recs_inorder = xfs_inobt_recs_inorder, 420 .keys_contiguous = xfs_inobt_keys_contiguous, 421 }; 422 423 static const struct xfs_btree_ops xfs_finobt_ops = { 424 .rec_len = sizeof(xfs_inobt_rec_t), 425 .key_len = sizeof(xfs_inobt_key_t), 426 427 .dup_cursor = xfs_inobt_dup_cursor, 428 .set_root = xfs_finobt_set_root, 429 .alloc_block = xfs_finobt_alloc_block, 430 .free_block = xfs_finobt_free_block, 431 .get_minrecs = xfs_inobt_get_minrecs, 432 .get_maxrecs = xfs_inobt_get_maxrecs, 433 .init_key_from_rec = xfs_inobt_init_key_from_rec, 434 .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec, 435 .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 436 .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur, 437 .key_diff = xfs_inobt_key_diff, 438 .buf_ops = &xfs_finobt_buf_ops, 439 .diff_two_keys = xfs_inobt_diff_two_keys, 440 .keys_inorder = xfs_inobt_keys_inorder, 441 .recs_inorder = xfs_inobt_recs_inorder, 442 .keys_contiguous = xfs_inobt_keys_contiguous, 443 }; 444 445 /* 446 * Initialize a new inode btree cursor. 447 */ 448 static struct xfs_btree_cur * 449 xfs_inobt_init_common( 450 struct xfs_perag *pag, 451 struct xfs_trans *tp, /* transaction pointer */ 452 xfs_btnum_t btnum) /* ialloc or free ino btree */ 453 { 454 struct xfs_mount *mp = pag->pag_mount; 455 struct xfs_btree_cur *cur; 456 457 cur = xfs_btree_alloc_cursor(mp, tp, btnum, 458 M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache); 459 if (btnum == XFS_BTNUM_INO) { 460 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2); 461 cur->bc_ops = &xfs_inobt_ops; 462 } else { 463 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2); 464 cur->bc_ops = &xfs_finobt_ops; 465 } 466 467 if (xfs_has_crc(mp)) 468 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 469 470 cur->bc_ag.pag = xfs_perag_hold(pag); 471 return cur; 472 } 473 474 /* Create an inode btree cursor. */ 475 struct xfs_btree_cur * 476 xfs_inobt_init_cursor( 477 struct xfs_perag *pag, 478 struct xfs_trans *tp, 479 struct xfs_buf *agbp, 480 xfs_btnum_t btnum) 481 { 482 struct xfs_btree_cur *cur; 483 struct xfs_agi *agi = agbp->b_addr; 484 485 cur = xfs_inobt_init_common(pag, tp, btnum); 486 if (btnum == XFS_BTNUM_INO) 487 cur->bc_nlevels = be32_to_cpu(agi->agi_level); 488 else 489 cur->bc_nlevels = be32_to_cpu(agi->agi_free_level); 490 cur->bc_ag.agbp = agbp; 491 return cur; 492 } 493 494 /* Create an inode btree cursor with a fake root for staging. */ 495 struct xfs_btree_cur * 496 xfs_inobt_stage_cursor( 497 struct xfs_perag *pag, 498 struct xbtree_afakeroot *afake, 499 xfs_btnum_t btnum) 500 { 501 struct xfs_btree_cur *cur; 502 503 cur = xfs_inobt_init_common(pag, NULL, btnum); 504 xfs_btree_stage_afakeroot(cur, afake); 505 return cur; 506 } 507 508 /* 509 * Install a new inobt btree root. Caller is responsible for invalidating 510 * and freeing the old btree blocks. 511 */ 512 void 513 xfs_inobt_commit_staged_btree( 514 struct xfs_btree_cur *cur, 515 struct xfs_trans *tp, 516 struct xfs_buf *agbp) 517 { 518 struct xfs_agi *agi = agbp->b_addr; 519 struct xbtree_afakeroot *afake = cur->bc_ag.afake; 520 int fields; 521 522 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 523 524 if (cur->bc_btnum == XFS_BTNUM_INO) { 525 fields = XFS_AGI_ROOT | XFS_AGI_LEVEL; 526 agi->agi_root = cpu_to_be32(afake->af_root); 527 agi->agi_level = cpu_to_be32(afake->af_levels); 528 if (xfs_has_inobtcounts(cur->bc_mp)) { 529 agi->agi_iblocks = cpu_to_be32(afake->af_blocks); 530 fields |= XFS_AGI_IBLOCKS; 531 } 532 xfs_ialloc_log_agi(tp, agbp, fields); 533 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops); 534 } else { 535 fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL; 536 agi->agi_free_root = cpu_to_be32(afake->af_root); 537 agi->agi_free_level = cpu_to_be32(afake->af_levels); 538 if (xfs_has_inobtcounts(cur->bc_mp)) { 539 agi->agi_fblocks = cpu_to_be32(afake->af_blocks); 540 fields |= XFS_AGI_IBLOCKS; 541 } 542 xfs_ialloc_log_agi(tp, agbp, fields); 543 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops); 544 } 545 } 546 547 /* Calculate number of records in an inode btree block. */ 548 static inline unsigned int 549 xfs_inobt_block_maxrecs( 550 unsigned int blocklen, 551 bool leaf) 552 { 553 if (leaf) 554 return blocklen / sizeof(xfs_inobt_rec_t); 555 return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t)); 556 } 557 558 /* 559 * Calculate number of records in an inobt btree block. 560 */ 561 int 562 xfs_inobt_maxrecs( 563 struct xfs_mount *mp, 564 int blocklen, 565 int leaf) 566 { 567 blocklen -= XFS_INOBT_BLOCK_LEN(mp); 568 return xfs_inobt_block_maxrecs(blocklen, leaf); 569 } 570 571 /* 572 * Maximum number of inode btree records per AG. Pretend that we can fill an 573 * entire AG completely full of inodes except for the AG headers. 574 */ 575 #define XFS_MAX_INODE_RECORDS \ 576 ((XFS_MAX_AG_BYTES - (4 * BBSIZE)) / XFS_DINODE_MIN_SIZE) / \ 577 XFS_INODES_PER_CHUNK 578 579 /* Compute the max possible height for the inode btree. */ 580 static inline unsigned int 581 xfs_inobt_maxlevels_ondisk(void) 582 { 583 unsigned int minrecs[2]; 584 unsigned int blocklen; 585 586 blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN, 587 XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN); 588 589 minrecs[0] = xfs_inobt_block_maxrecs(blocklen, true) / 2; 590 minrecs[1] = xfs_inobt_block_maxrecs(blocklen, false) / 2; 591 592 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_INODE_RECORDS); 593 } 594 595 /* Compute the max possible height for the free inode btree. */ 596 static inline unsigned int 597 xfs_finobt_maxlevels_ondisk(void) 598 { 599 unsigned int minrecs[2]; 600 unsigned int blocklen; 601 602 blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN; 603 604 minrecs[0] = xfs_inobt_block_maxrecs(blocklen, true) / 2; 605 minrecs[1] = xfs_inobt_block_maxrecs(blocklen, false) / 2; 606 607 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_INODE_RECORDS); 608 } 609 610 /* Compute the max possible height for either inode btree. */ 611 unsigned int 612 xfs_iallocbt_maxlevels_ondisk(void) 613 { 614 return max(xfs_inobt_maxlevels_ondisk(), 615 xfs_finobt_maxlevels_ondisk()); 616 } 617 618 /* 619 * Convert the inode record holemask to an inode allocation bitmap. The inode 620 * allocation bitmap is inode granularity and specifies whether an inode is 621 * physically allocated on disk (not whether the inode is considered allocated 622 * or free by the fs). 623 * 624 * A bit value of 1 means the inode is allocated, a value of 0 means it is free. 625 */ 626 uint64_t 627 xfs_inobt_irec_to_allocmask( 628 const struct xfs_inobt_rec_incore *rec) 629 { 630 uint64_t bitmap = 0; 631 uint64_t inodespbit; 632 int nextbit; 633 uint allocbitmap; 634 635 /* 636 * The holemask has 16-bits for a 64 inode record. Therefore each 637 * holemask bit represents multiple inodes. Create a mask of bits to set 638 * in the allocmask for each holemask bit. 639 */ 640 inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1; 641 642 /* 643 * Allocated inodes are represented by 0 bits in holemask. Invert the 0 644 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask 645 * anything beyond the 16 holemask bits since this casts to a larger 646 * type. 647 */ 648 allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1); 649 650 /* 651 * allocbitmap is the inverted holemask so every set bit represents 652 * allocated inodes. To expand from 16-bit holemask granularity to 653 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target 654 * bitmap for every holemask bit. 655 */ 656 nextbit = xfs_next_bit(&allocbitmap, 1, 0); 657 while (nextbit != -1) { 658 ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY)); 659 660 bitmap |= (inodespbit << 661 (nextbit * XFS_INODES_PER_HOLEMASK_BIT)); 662 663 nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1); 664 } 665 666 return bitmap; 667 } 668 669 #if defined(DEBUG) || defined(XFS_WARN) 670 /* 671 * Verify that an in-core inode record has a valid inode count. 672 */ 673 int 674 xfs_inobt_rec_check_count( 675 struct xfs_mount *mp, 676 struct xfs_inobt_rec_incore *rec) 677 { 678 int inocount = 0; 679 int nextbit = 0; 680 uint64_t allocbmap; 681 int wordsz; 682 683 wordsz = sizeof(allocbmap) / sizeof(unsigned int); 684 allocbmap = xfs_inobt_irec_to_allocmask(rec); 685 686 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit); 687 while (nextbit != -1) { 688 inocount++; 689 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, 690 nextbit + 1); 691 } 692 693 if (inocount != rec->ir_count) 694 return -EFSCORRUPTED; 695 696 return 0; 697 } 698 #endif /* DEBUG */ 699 700 static xfs_extlen_t 701 xfs_inobt_max_size( 702 struct xfs_perag *pag) 703 { 704 struct xfs_mount *mp = pag->pag_mount; 705 xfs_agblock_t agblocks = pag->block_count; 706 707 /* Bail out if we're uninitialized, which can happen in mkfs. */ 708 if (M_IGEO(mp)->inobt_mxr[0] == 0) 709 return 0; 710 711 /* 712 * The log is permanently allocated, so the space it occupies will 713 * never be available for the kinds of things that would require btree 714 * expansion. We therefore can pretend the space isn't there. 715 */ 716 if (xfs_ag_contains_log(mp, pag->pag_agno)) 717 agblocks -= mp->m_sb.sb_logblocks; 718 719 return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, 720 (uint64_t)agblocks * mp->m_sb.sb_inopblock / 721 XFS_INODES_PER_CHUNK); 722 } 723 724 /* Read AGI and create inobt cursor. */ 725 int 726 xfs_inobt_cur( 727 struct xfs_perag *pag, 728 struct xfs_trans *tp, 729 xfs_btnum_t which, 730 struct xfs_btree_cur **curpp, 731 struct xfs_buf **agi_bpp) 732 { 733 struct xfs_btree_cur *cur; 734 int error; 735 736 ASSERT(*agi_bpp == NULL); 737 ASSERT(*curpp == NULL); 738 739 error = xfs_ialloc_read_agi(pag, tp, agi_bpp); 740 if (error) 741 return error; 742 743 cur = xfs_inobt_init_cursor(pag, tp, *agi_bpp, which); 744 *curpp = cur; 745 return 0; 746 } 747 748 static int 749 xfs_inobt_count_blocks( 750 struct xfs_perag *pag, 751 struct xfs_trans *tp, 752 xfs_btnum_t btnum, 753 xfs_extlen_t *tree_blocks) 754 { 755 struct xfs_buf *agbp = NULL; 756 struct xfs_btree_cur *cur = NULL; 757 int error; 758 759 error = xfs_inobt_cur(pag, tp, btnum, &cur, &agbp); 760 if (error) 761 return error; 762 763 error = xfs_btree_count_blocks(cur, tree_blocks); 764 xfs_btree_del_cursor(cur, error); 765 xfs_trans_brelse(tp, agbp); 766 767 return error; 768 } 769 770 /* Read finobt block count from AGI header. */ 771 static int 772 xfs_finobt_read_blocks( 773 struct xfs_perag *pag, 774 struct xfs_trans *tp, 775 xfs_extlen_t *tree_blocks) 776 { 777 struct xfs_buf *agbp; 778 struct xfs_agi *agi; 779 int error; 780 781 error = xfs_ialloc_read_agi(pag, tp, &agbp); 782 if (error) 783 return error; 784 785 agi = agbp->b_addr; 786 *tree_blocks = be32_to_cpu(agi->agi_fblocks); 787 xfs_trans_brelse(tp, agbp); 788 return 0; 789 } 790 791 /* 792 * Figure out how many blocks to reserve and how many are used by this btree. 793 */ 794 int 795 xfs_finobt_calc_reserves( 796 struct xfs_perag *pag, 797 struct xfs_trans *tp, 798 xfs_extlen_t *ask, 799 xfs_extlen_t *used) 800 { 801 xfs_extlen_t tree_len = 0; 802 int error; 803 804 if (!xfs_has_finobt(pag->pag_mount)) 805 return 0; 806 807 if (xfs_has_inobtcounts(pag->pag_mount)) 808 error = xfs_finobt_read_blocks(pag, tp, &tree_len); 809 else 810 error = xfs_inobt_count_blocks(pag, tp, XFS_BTNUM_FINO, 811 &tree_len); 812 if (error) 813 return error; 814 815 *ask += xfs_inobt_max_size(pag); 816 *used += tree_len; 817 return 0; 818 } 819 820 /* Calculate the inobt btree size for some records. */ 821 xfs_extlen_t 822 xfs_iallocbt_calc_size( 823 struct xfs_mount *mp, 824 unsigned long long len) 825 { 826 return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len); 827 } 828 829 int __init 830 xfs_inobt_init_cur_cache(void) 831 { 832 xfs_inobt_cur_cache = kmem_cache_create("xfs_inobt_cur", 833 xfs_btree_cur_sizeof(xfs_inobt_maxlevels_ondisk()), 834 0, 0, NULL); 835 836 if (!xfs_inobt_cur_cache) 837 return -ENOMEM; 838 return 0; 839 } 840 841 void 842 xfs_inobt_destroy_cur_cache(void) 843 { 844 kmem_cache_destroy(xfs_inobt_cur_cache); 845 xfs_inobt_cur_cache = NULL; 846 } 847