1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_trans.h" 16 #include "xfs_alloc.h" 17 #include "xfs_btree.h" 18 #include "xfs_btree_staging.h" 19 #include "xfs_bmap_btree.h" 20 #include "xfs_bmap.h" 21 #include "xfs_error.h" 22 #include "xfs_quota.h" 23 #include "xfs_trace.h" 24 #include "xfs_rmap.h" 25 #include "xfs_ag.h" 26 27 static struct kmem_cache *xfs_bmbt_cur_cache; 28 29 void 30 xfs_bmbt_init_block( 31 struct xfs_inode *ip, 32 struct xfs_btree_block *buf, 33 struct xfs_buf *bp, 34 __u16 level, 35 __u16 numrecs) 36 { 37 if (bp) 38 xfs_btree_init_buf(ip->i_mount, bp, &xfs_bmbt_ops, level, 39 numrecs, ip->i_ino); 40 else 41 xfs_btree_init_block(ip->i_mount, buf, &xfs_bmbt_ops, level, 42 numrecs, ip->i_ino); 43 } 44 45 /* 46 * Convert on-disk form of btree root to in-memory form. 47 */ 48 void 49 xfs_bmdr_to_bmbt( 50 struct xfs_inode *ip, 51 xfs_bmdr_block_t *dblock, 52 int dblocklen, 53 struct xfs_btree_block *rblock, 54 int rblocklen) 55 { 56 struct xfs_mount *mp = ip->i_mount; 57 int dmxr; 58 xfs_bmbt_key_t *fkp; 59 __be64 *fpp; 60 xfs_bmbt_key_t *tkp; 61 __be64 *tpp; 62 63 xfs_bmbt_init_block(ip, rblock, NULL, 0, 0); 64 rblock->bb_level = dblock->bb_level; 65 ASSERT(be16_to_cpu(rblock->bb_level) > 0); 66 rblock->bb_numrecs = dblock->bb_numrecs; 67 dmxr = xfs_bmdr_maxrecs(dblocklen, 0); 68 fkp = XFS_BMDR_KEY_ADDR(dblock, 1); 69 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); 70 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); 71 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); 72 dmxr = be16_to_cpu(dblock->bb_numrecs); 73 memcpy(tkp, fkp, sizeof(*fkp) * dmxr); 74 memcpy(tpp, fpp, sizeof(*fpp) * dmxr); 75 } 76 77 void 78 xfs_bmbt_disk_get_all( 79 const struct xfs_bmbt_rec *rec, 80 struct xfs_bmbt_irec *irec) 81 { 82 uint64_t l0 = get_unaligned_be64(&rec->l0); 83 uint64_t l1 = get_unaligned_be64(&rec->l1); 84 85 irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 86 irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21); 87 irec->br_blockcount = l1 & xfs_mask64lo(21); 88 if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN)) 89 irec->br_state = XFS_EXT_UNWRITTEN; 90 else 91 irec->br_state = XFS_EXT_NORM; 92 } 93 94 /* 95 * Extract the blockcount field from an on disk bmap extent record. 96 */ 97 xfs_filblks_t 98 xfs_bmbt_disk_get_blockcount( 99 const struct xfs_bmbt_rec *r) 100 { 101 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21)); 102 } 103 104 /* 105 * Extract the startoff field from a disk format bmap extent record. 106 */ 107 xfs_fileoff_t 108 xfs_bmbt_disk_get_startoff( 109 const struct xfs_bmbt_rec *r) 110 { 111 return ((xfs_fileoff_t)be64_to_cpu(r->l0) & 112 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 113 } 114 115 /* 116 * Set all the fields in a bmap extent record from the uncompressed form. 117 */ 118 void 119 xfs_bmbt_disk_set_all( 120 struct xfs_bmbt_rec *r, 121 struct xfs_bmbt_irec *s) 122 { 123 int extent_flag = (s->br_state != XFS_EXT_NORM); 124 125 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN); 126 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN))); 127 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN))); 128 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN))); 129 130 put_unaligned_be64( 131 ((xfs_bmbt_rec_base_t)extent_flag << 63) | 132 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | 133 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0); 134 put_unaligned_be64( 135 ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | 136 ((xfs_bmbt_rec_base_t)s->br_blockcount & 137 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1); 138 } 139 140 /* 141 * Convert in-memory form of btree root to on-disk form. 142 */ 143 void 144 xfs_bmbt_to_bmdr( 145 struct xfs_mount *mp, 146 struct xfs_btree_block *rblock, 147 int rblocklen, 148 xfs_bmdr_block_t *dblock, 149 int dblocklen) 150 { 151 int dmxr; 152 xfs_bmbt_key_t *fkp; 153 __be64 *fpp; 154 xfs_bmbt_key_t *tkp; 155 __be64 *tpp; 156 157 if (xfs_has_crc(mp)) { 158 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC)); 159 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, 160 &mp->m_sb.sb_meta_uuid)); 161 ASSERT(rblock->bb_u.l.bb_blkno == 162 cpu_to_be64(XFS_BUF_DADDR_NULL)); 163 } else 164 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC)); 165 ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)); 166 ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)); 167 ASSERT(rblock->bb_level != 0); 168 dblock->bb_level = rblock->bb_level; 169 dblock->bb_numrecs = rblock->bb_numrecs; 170 dmxr = xfs_bmdr_maxrecs(dblocklen, 0); 171 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); 172 tkp = XFS_BMDR_KEY_ADDR(dblock, 1); 173 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); 174 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); 175 dmxr = be16_to_cpu(dblock->bb_numrecs); 176 memcpy(tkp, fkp, sizeof(*fkp) * dmxr); 177 memcpy(tpp, fpp, sizeof(*fpp) * dmxr); 178 } 179 180 STATIC struct xfs_btree_cur * 181 xfs_bmbt_dup_cursor( 182 struct xfs_btree_cur *cur) 183 { 184 struct xfs_btree_cur *new; 185 186 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp, 187 cur->bc_ino.ip, cur->bc_ino.whichfork); 188 new->bc_flags |= (cur->bc_flags & 189 (XFS_BTREE_BMBT_INVALID_OWNER | XFS_BTREE_BMBT_WASDEL)); 190 return new; 191 } 192 193 STATIC void 194 xfs_bmbt_update_cursor( 195 struct xfs_btree_cur *src, 196 struct xfs_btree_cur *dst) 197 { 198 ASSERT((dst->bc_tp->t_highest_agno != NULLAGNUMBER) || 199 (dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME)); 200 201 dst->bc_bmap.allocated += src->bc_bmap.allocated; 202 dst->bc_tp->t_highest_agno = src->bc_tp->t_highest_agno; 203 204 src->bc_bmap.allocated = 0; 205 } 206 207 STATIC int 208 xfs_bmbt_alloc_block( 209 struct xfs_btree_cur *cur, 210 const union xfs_btree_ptr *start, 211 union xfs_btree_ptr *new, 212 int *stat) 213 { 214 struct xfs_alloc_arg args; 215 int error; 216 217 memset(&args, 0, sizeof(args)); 218 args.tp = cur->bc_tp; 219 args.mp = cur->bc_mp; 220 xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino, 221 cur->bc_ino.whichfork); 222 args.minlen = args.maxlen = args.prod = 1; 223 args.wasdel = cur->bc_flags & XFS_BTREE_BMBT_WASDEL; 224 if (!args.wasdel && args.tp->t_blk_res == 0) 225 return -ENOSPC; 226 227 /* 228 * If we are coming here from something like unwritten extent 229 * conversion, there has been no data extent allocation already done, so 230 * we have to ensure that we attempt to locate the entire set of bmbt 231 * allocations in the same AG, as xfs_bmapi_write() would have reserved. 232 */ 233 if (cur->bc_tp->t_highest_agno == NULLAGNUMBER) 234 args.minleft = xfs_bmapi_minleft(cur->bc_tp, cur->bc_ino.ip, 235 cur->bc_ino.whichfork); 236 237 error = xfs_alloc_vextent_start_ag(&args, be64_to_cpu(start->l)); 238 if (error) 239 return error; 240 241 if (args.fsbno == NULLFSBLOCK && args.minleft) { 242 /* 243 * Could not find an AG with enough free space to satisfy 244 * a full btree split. Try again and if 245 * successful activate the lowspace algorithm. 246 */ 247 args.minleft = 0; 248 error = xfs_alloc_vextent_start_ag(&args, 0); 249 if (error) 250 return error; 251 cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE; 252 } 253 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 254 *stat = 0; 255 return 0; 256 } 257 258 ASSERT(args.len == 1); 259 cur->bc_bmap.allocated++; 260 cur->bc_ino.ip->i_nblocks++; 261 xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE); 262 xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip, 263 XFS_TRANS_DQ_BCOUNT, 1L); 264 265 new->l = cpu_to_be64(args.fsbno); 266 267 *stat = 1; 268 return 0; 269 } 270 271 STATIC int 272 xfs_bmbt_free_block( 273 struct xfs_btree_cur *cur, 274 struct xfs_buf *bp) 275 { 276 struct xfs_mount *mp = cur->bc_mp; 277 struct xfs_inode *ip = cur->bc_ino.ip; 278 struct xfs_trans *tp = cur->bc_tp; 279 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); 280 struct xfs_owner_info oinfo; 281 int error; 282 283 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork); 284 error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo, 285 XFS_AG_RESV_NONE, 0); 286 if (error) 287 return error; 288 289 ip->i_nblocks--; 290 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 291 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 292 return 0; 293 } 294 295 STATIC int 296 xfs_bmbt_get_minrecs( 297 struct xfs_btree_cur *cur, 298 int level) 299 { 300 if (level == cur->bc_nlevels - 1) { 301 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); 302 303 return xfs_bmbt_maxrecs(cur->bc_mp, 304 ifp->if_broot_bytes, level == 0) / 2; 305 } 306 307 return cur->bc_mp->m_bmap_dmnr[level != 0]; 308 } 309 310 int 311 xfs_bmbt_get_maxrecs( 312 struct xfs_btree_cur *cur, 313 int level) 314 { 315 if (level == cur->bc_nlevels - 1) { 316 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); 317 318 return xfs_bmbt_maxrecs(cur->bc_mp, 319 ifp->if_broot_bytes, level == 0); 320 } 321 322 return cur->bc_mp->m_bmap_dmxr[level != 0]; 323 324 } 325 326 /* 327 * Get the maximum records we could store in the on-disk format. 328 * 329 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but 330 * for the root node this checks the available space in the dinode fork 331 * so that we can resize the in-memory buffer to match it. After a 332 * resize to the maximum size this function returns the same value 333 * as xfs_bmbt_get_maxrecs for the root node, too. 334 */ 335 STATIC int 336 xfs_bmbt_get_dmaxrecs( 337 struct xfs_btree_cur *cur, 338 int level) 339 { 340 if (level != cur->bc_nlevels - 1) 341 return cur->bc_mp->m_bmap_dmxr[level != 0]; 342 return xfs_bmdr_maxrecs(cur->bc_ino.forksize, level == 0); 343 } 344 345 STATIC void 346 xfs_bmbt_init_key_from_rec( 347 union xfs_btree_key *key, 348 const union xfs_btree_rec *rec) 349 { 350 key->bmbt.br_startoff = 351 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt)); 352 } 353 354 STATIC void 355 xfs_bmbt_init_high_key_from_rec( 356 union xfs_btree_key *key, 357 const union xfs_btree_rec *rec) 358 { 359 key->bmbt.br_startoff = cpu_to_be64( 360 xfs_bmbt_disk_get_startoff(&rec->bmbt) + 361 xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1); 362 } 363 364 STATIC void 365 xfs_bmbt_init_rec_from_cur( 366 struct xfs_btree_cur *cur, 367 union xfs_btree_rec *rec) 368 { 369 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b); 370 } 371 372 STATIC int64_t 373 xfs_bmbt_key_diff( 374 struct xfs_btree_cur *cur, 375 const union xfs_btree_key *key) 376 { 377 return (int64_t)be64_to_cpu(key->bmbt.br_startoff) - 378 cur->bc_rec.b.br_startoff; 379 } 380 381 STATIC int64_t 382 xfs_bmbt_diff_two_keys( 383 struct xfs_btree_cur *cur, 384 const union xfs_btree_key *k1, 385 const union xfs_btree_key *k2, 386 const union xfs_btree_key *mask) 387 { 388 uint64_t a = be64_to_cpu(k1->bmbt.br_startoff); 389 uint64_t b = be64_to_cpu(k2->bmbt.br_startoff); 390 391 ASSERT(!mask || mask->bmbt.br_startoff); 392 393 /* 394 * Note: This routine previously casted a and b to int64 and subtracted 395 * them to generate a result. This lead to problems if b was the 396 * "maximum" key value (all ones) being signed incorrectly, hence this 397 * somewhat less efficient version. 398 */ 399 if (a > b) 400 return 1; 401 if (b > a) 402 return -1; 403 return 0; 404 } 405 406 static xfs_failaddr_t 407 xfs_bmbt_verify( 408 struct xfs_buf *bp) 409 { 410 struct xfs_mount *mp = bp->b_mount; 411 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 412 xfs_failaddr_t fa; 413 unsigned int level; 414 415 if (!xfs_verify_magic(bp, block->bb_magic)) 416 return __this_address; 417 418 if (xfs_has_crc(mp)) { 419 /* 420 * XXX: need a better way of verifying the owner here. Right now 421 * just make sure there has been one set. 422 */ 423 fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN); 424 if (fa) 425 return fa; 426 } 427 428 /* 429 * numrecs and level verification. 430 * 431 * We don't know what fork we belong to, so just verify that the level 432 * is less than the maximum of the two. Later checks will be more 433 * precise. 434 */ 435 level = be16_to_cpu(block->bb_level); 436 if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1])) 437 return __this_address; 438 439 return xfs_btree_fsblock_verify(bp, mp->m_bmap_dmxr[level != 0]); 440 } 441 442 static void 443 xfs_bmbt_read_verify( 444 struct xfs_buf *bp) 445 { 446 xfs_failaddr_t fa; 447 448 if (!xfs_btree_fsblock_verify_crc(bp)) 449 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 450 else { 451 fa = xfs_bmbt_verify(bp); 452 if (fa) 453 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 454 } 455 456 if (bp->b_error) 457 trace_xfs_btree_corrupt(bp, _RET_IP_); 458 } 459 460 static void 461 xfs_bmbt_write_verify( 462 struct xfs_buf *bp) 463 { 464 xfs_failaddr_t fa; 465 466 fa = xfs_bmbt_verify(bp); 467 if (fa) { 468 trace_xfs_btree_corrupt(bp, _RET_IP_); 469 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 470 return; 471 } 472 xfs_btree_fsblock_calc_crc(bp); 473 } 474 475 const struct xfs_buf_ops xfs_bmbt_buf_ops = { 476 .name = "xfs_bmbt", 477 .magic = { cpu_to_be32(XFS_BMAP_MAGIC), 478 cpu_to_be32(XFS_BMAP_CRC_MAGIC) }, 479 .verify_read = xfs_bmbt_read_verify, 480 .verify_write = xfs_bmbt_write_verify, 481 .verify_struct = xfs_bmbt_verify, 482 }; 483 484 485 STATIC int 486 xfs_bmbt_keys_inorder( 487 struct xfs_btree_cur *cur, 488 const union xfs_btree_key *k1, 489 const union xfs_btree_key *k2) 490 { 491 return be64_to_cpu(k1->bmbt.br_startoff) < 492 be64_to_cpu(k2->bmbt.br_startoff); 493 } 494 495 STATIC int 496 xfs_bmbt_recs_inorder( 497 struct xfs_btree_cur *cur, 498 const union xfs_btree_rec *r1, 499 const union xfs_btree_rec *r2) 500 { 501 return xfs_bmbt_disk_get_startoff(&r1->bmbt) + 502 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <= 503 xfs_bmbt_disk_get_startoff(&r2->bmbt); 504 } 505 506 STATIC enum xbtree_key_contig 507 xfs_bmbt_keys_contiguous( 508 struct xfs_btree_cur *cur, 509 const union xfs_btree_key *key1, 510 const union xfs_btree_key *key2, 511 const union xfs_btree_key *mask) 512 { 513 ASSERT(!mask || mask->bmbt.br_startoff); 514 515 return xbtree_key_contig(be64_to_cpu(key1->bmbt.br_startoff), 516 be64_to_cpu(key2->bmbt.br_startoff)); 517 } 518 519 const struct xfs_btree_ops xfs_bmbt_ops = { 520 .name = "bmap", 521 .type = XFS_BTREE_TYPE_INODE, 522 523 .rec_len = sizeof(xfs_bmbt_rec_t), 524 .key_len = sizeof(xfs_bmbt_key_t), 525 .ptr_len = XFS_BTREE_LONG_PTR_LEN, 526 527 .lru_refs = XFS_BMAP_BTREE_REF, 528 .statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2), 529 530 .dup_cursor = xfs_bmbt_dup_cursor, 531 .update_cursor = xfs_bmbt_update_cursor, 532 .alloc_block = xfs_bmbt_alloc_block, 533 .free_block = xfs_bmbt_free_block, 534 .get_maxrecs = xfs_bmbt_get_maxrecs, 535 .get_minrecs = xfs_bmbt_get_minrecs, 536 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs, 537 .init_key_from_rec = xfs_bmbt_init_key_from_rec, 538 .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec, 539 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur, 540 .key_diff = xfs_bmbt_key_diff, 541 .diff_two_keys = xfs_bmbt_diff_two_keys, 542 .buf_ops = &xfs_bmbt_buf_ops, 543 .keys_inorder = xfs_bmbt_keys_inorder, 544 .recs_inorder = xfs_bmbt_recs_inorder, 545 .keys_contiguous = xfs_bmbt_keys_contiguous, 546 }; 547 548 /* 549 * Create a new bmap btree cursor. 550 * 551 * For staging cursors -1 in passed in whichfork. 552 */ 553 struct xfs_btree_cur * 554 xfs_bmbt_init_cursor( 555 struct xfs_mount *mp, 556 struct xfs_trans *tp, 557 struct xfs_inode *ip, 558 int whichfork) 559 { 560 struct xfs_btree_cur *cur; 561 unsigned int maxlevels; 562 563 ASSERT(whichfork != XFS_COW_FORK); 564 565 /* 566 * The Data fork always has larger maxlevel, so use that for staging 567 * cursors. 568 */ 569 switch (whichfork) { 570 case XFS_STAGING_FORK: 571 maxlevels = mp->m_bm_maxlevels[XFS_DATA_FORK]; 572 break; 573 default: 574 maxlevels = mp->m_bm_maxlevels[whichfork]; 575 break; 576 } 577 cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bmbt_ops, maxlevels, 578 xfs_bmbt_cur_cache); 579 cur->bc_ino.ip = ip; 580 cur->bc_ino.whichfork = whichfork; 581 cur->bc_bmap.allocated = 0; 582 if (whichfork != XFS_STAGING_FORK) { 583 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 584 585 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; 586 cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork); 587 } 588 return cur; 589 } 590 591 /* Calculate number of records in a block mapping btree block. */ 592 static inline unsigned int 593 xfs_bmbt_block_maxrecs( 594 unsigned int blocklen, 595 bool leaf) 596 { 597 if (leaf) 598 return blocklen / sizeof(xfs_bmbt_rec_t); 599 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)); 600 } 601 602 /* 603 * Swap in the new inode fork root. Once we pass this point the newly rebuilt 604 * mappings are in place and we have to kill off any old btree blocks. 605 */ 606 void 607 xfs_bmbt_commit_staged_btree( 608 struct xfs_btree_cur *cur, 609 struct xfs_trans *tp, 610 int whichfork) 611 { 612 struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake; 613 struct xfs_ifork *ifp; 614 static const short brootflag[2] = {XFS_ILOG_DBROOT, XFS_ILOG_ABROOT}; 615 static const short extflag[2] = {XFS_ILOG_DEXT, XFS_ILOG_AEXT}; 616 int flags = XFS_ILOG_CORE; 617 618 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 619 ASSERT(whichfork != XFS_COW_FORK); 620 621 /* 622 * Free any resources hanging off the real fork, then shallow-copy the 623 * staging fork's contents into the real fork to transfer everything 624 * we just built. 625 */ 626 ifp = xfs_ifork_ptr(cur->bc_ino.ip, whichfork); 627 xfs_idestroy_fork(ifp); 628 memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork)); 629 630 switch (ifp->if_format) { 631 case XFS_DINODE_FMT_EXTENTS: 632 flags |= extflag[whichfork]; 633 break; 634 case XFS_DINODE_FMT_BTREE: 635 flags |= brootflag[whichfork]; 636 break; 637 default: 638 ASSERT(0); 639 break; 640 } 641 xfs_trans_log_inode(tp, cur->bc_ino.ip, flags); 642 xfs_btree_commit_ifakeroot(cur, tp, whichfork); 643 } 644 645 /* 646 * Calculate number of records in a bmap btree block. 647 */ 648 int 649 xfs_bmbt_maxrecs( 650 struct xfs_mount *mp, 651 int blocklen, 652 int leaf) 653 { 654 blocklen -= XFS_BMBT_BLOCK_LEN(mp); 655 return xfs_bmbt_block_maxrecs(blocklen, leaf); 656 } 657 658 /* 659 * Calculate the maximum possible height of the btree that the on-disk format 660 * supports. This is used for sizing structures large enough to support every 661 * possible configuration of a filesystem that might get mounted. 662 */ 663 unsigned int 664 xfs_bmbt_maxlevels_ondisk(void) 665 { 666 unsigned int minrecs[2]; 667 unsigned int blocklen; 668 669 blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN, 670 XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN); 671 672 minrecs[0] = xfs_bmbt_block_maxrecs(blocklen, true) / 2; 673 minrecs[1] = xfs_bmbt_block_maxrecs(blocklen, false) / 2; 674 675 /* One extra level for the inode root. */ 676 return xfs_btree_compute_maxlevels(minrecs, 677 XFS_MAX_EXTCNT_DATA_FORK_LARGE) + 1; 678 } 679 680 /* 681 * Calculate number of records in a bmap btree inode root. 682 */ 683 int 684 xfs_bmdr_maxrecs( 685 int blocklen, 686 int leaf) 687 { 688 blocklen -= sizeof(xfs_bmdr_block_t); 689 690 if (leaf) 691 return blocklen / sizeof(xfs_bmdr_rec_t); 692 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t)); 693 } 694 695 /* 696 * Change the owner of a btree format fork fo the inode passed in. Change it to 697 * the owner of that is passed in so that we can change owners before or after 698 * we switch forks between inodes. The operation that the caller is doing will 699 * determine whether is needs to change owner before or after the switch. 700 * 701 * For demand paged transactional modification, the fork switch should be done 702 * after reading in all the blocks, modifying them and pinning them in the 703 * transaction. For modification when the buffers are already pinned in memory, 704 * the fork switch can be done before changing the owner as we won't need to 705 * validate the owner until the btree buffers are unpinned and writes can occur 706 * again. 707 * 708 * For recovery based ownership change, there is no transactional context and 709 * so a buffer list must be supplied so that we can record the buffers that we 710 * modified for the caller to issue IO on. 711 */ 712 int 713 xfs_bmbt_change_owner( 714 struct xfs_trans *tp, 715 struct xfs_inode *ip, 716 int whichfork, 717 xfs_ino_t new_owner, 718 struct list_head *buffer_list) 719 { 720 struct xfs_btree_cur *cur; 721 int error; 722 723 ASSERT(tp || buffer_list); 724 ASSERT(!(tp && buffer_list)); 725 ASSERT(xfs_ifork_ptr(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE); 726 727 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork); 728 cur->bc_flags |= XFS_BTREE_BMBT_INVALID_OWNER; 729 730 error = xfs_btree_change_owner(cur, new_owner, buffer_list); 731 xfs_btree_del_cursor(cur, error); 732 return error; 733 } 734 735 /* Calculate the bmap btree size for some records. */ 736 unsigned long long 737 xfs_bmbt_calc_size( 738 struct xfs_mount *mp, 739 unsigned long long len) 740 { 741 return xfs_btree_calc_size(mp->m_bmap_dmnr, len); 742 } 743 744 int __init 745 xfs_bmbt_init_cur_cache(void) 746 { 747 xfs_bmbt_cur_cache = kmem_cache_create("xfs_bmbt_cur", 748 xfs_btree_cur_sizeof(xfs_bmbt_maxlevels_ondisk()), 749 0, 0, NULL); 750 751 if (!xfs_bmbt_cur_cache) 752 return -ENOMEM; 753 return 0; 754 } 755 756 void 757 xfs_bmbt_destroy_cur_cache(void) 758 { 759 kmem_cache_destroy(xfs_bmbt_cur_cache); 760 xfs_bmbt_cur_cache = NULL; 761 } 762