1 /* 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_mount.h" 26 #include "xfs_inode.h" 27 #include "xfs_trans.h" 28 #include "xfs_inode_item.h" 29 #include "xfs_buf_item.h" 30 #include "xfs_btree.h" 31 #include "xfs_error.h" 32 #include "xfs_trace.h" 33 #include "xfs_cksum.h" 34 #include "xfs_alloc.h" 35 36 /* 37 * Cursor allocation zone. 38 */ 39 kmem_zone_t *xfs_btree_cur_zone; 40 41 /* 42 * Btree magic numbers. 43 */ 44 static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = { 45 { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC, 46 XFS_FIBT_MAGIC }, 47 { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, 48 XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC } 49 }; 50 #define xfs_btree_magic(cur) \ 51 xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum] 52 53 54 STATIC int /* error (0 or EFSCORRUPTED) */ 55 xfs_btree_check_lblock( 56 struct xfs_btree_cur *cur, /* btree cursor */ 57 struct xfs_btree_block *block, /* btree long form block pointer */ 58 int level, /* level of the btree block */ 59 struct xfs_buf *bp) /* buffer for block, if any */ 60 { 61 int lblock_ok = 1; /* block passes checks */ 62 struct xfs_mount *mp; /* file system mount point */ 63 64 mp = cur->bc_mp; 65 66 if (xfs_sb_version_hascrc(&mp->m_sb)) { 67 lblock_ok = lblock_ok && 68 uuid_equal(&block->bb_u.l.bb_uuid, 69 &mp->m_sb.sb_meta_uuid) && 70 block->bb_u.l.bb_blkno == cpu_to_be64( 71 bp ? bp->b_bn : XFS_BUF_DADDR_NULL); 72 } 73 74 lblock_ok = lblock_ok && 75 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) && 76 be16_to_cpu(block->bb_level) == level && 77 be16_to_cpu(block->bb_numrecs) <= 78 cur->bc_ops->get_maxrecs(cur, level) && 79 block->bb_u.l.bb_leftsib && 80 (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK) || 81 XFS_FSB_SANITY_CHECK(mp, 82 be64_to_cpu(block->bb_u.l.bb_leftsib))) && 83 block->bb_u.l.bb_rightsib && 84 (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK) || 85 XFS_FSB_SANITY_CHECK(mp, 86 be64_to_cpu(block->bb_u.l.bb_rightsib))); 87 88 if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, 89 XFS_ERRTAG_BTREE_CHECK_LBLOCK, 90 XFS_RANDOM_BTREE_CHECK_LBLOCK))) { 91 if (bp) 92 trace_xfs_btree_corrupt(bp, _RET_IP_); 93 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 94 return -EFSCORRUPTED; 95 } 96 return 0; 97 } 98 99 STATIC int /* error (0 or EFSCORRUPTED) */ 100 xfs_btree_check_sblock( 101 struct xfs_btree_cur *cur, /* btree cursor */ 102 struct xfs_btree_block *block, /* btree short form block pointer */ 103 int level, /* level of the btree block */ 104 struct xfs_buf *bp) /* buffer containing block */ 105 { 106 struct xfs_mount *mp; /* file system mount point */ 107 struct xfs_buf *agbp; /* buffer for ag. freespace struct */ 108 struct xfs_agf *agf; /* ag. freespace structure */ 109 xfs_agblock_t agflen; /* native ag. freespace length */ 110 int sblock_ok = 1; /* block passes checks */ 111 112 mp = cur->bc_mp; 113 agbp = cur->bc_private.a.agbp; 114 agf = XFS_BUF_TO_AGF(agbp); 115 agflen = be32_to_cpu(agf->agf_length); 116 117 if (xfs_sb_version_hascrc(&mp->m_sb)) { 118 sblock_ok = sblock_ok && 119 uuid_equal(&block->bb_u.s.bb_uuid, 120 &mp->m_sb.sb_meta_uuid) && 121 block->bb_u.s.bb_blkno == cpu_to_be64( 122 bp ? bp->b_bn : XFS_BUF_DADDR_NULL); 123 } 124 125 sblock_ok = sblock_ok && 126 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) && 127 be16_to_cpu(block->bb_level) == level && 128 be16_to_cpu(block->bb_numrecs) <= 129 cur->bc_ops->get_maxrecs(cur, level) && 130 (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) || 131 be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) && 132 block->bb_u.s.bb_leftsib && 133 (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) || 134 be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) && 135 block->bb_u.s.bb_rightsib; 136 137 if (unlikely(XFS_TEST_ERROR(!sblock_ok, mp, 138 XFS_ERRTAG_BTREE_CHECK_SBLOCK, 139 XFS_RANDOM_BTREE_CHECK_SBLOCK))) { 140 if (bp) 141 trace_xfs_btree_corrupt(bp, _RET_IP_); 142 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 143 return -EFSCORRUPTED; 144 } 145 return 0; 146 } 147 148 /* 149 * Debug routine: check that block header is ok. 150 */ 151 int 152 xfs_btree_check_block( 153 struct xfs_btree_cur *cur, /* btree cursor */ 154 struct xfs_btree_block *block, /* generic btree block pointer */ 155 int level, /* level of the btree block */ 156 struct xfs_buf *bp) /* buffer containing block, if any */ 157 { 158 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 159 return xfs_btree_check_lblock(cur, block, level, bp); 160 else 161 return xfs_btree_check_sblock(cur, block, level, bp); 162 } 163 164 /* 165 * Check that (long) pointer is ok. 166 */ 167 int /* error (0 or EFSCORRUPTED) */ 168 xfs_btree_check_lptr( 169 struct xfs_btree_cur *cur, /* btree cursor */ 170 xfs_fsblock_t bno, /* btree block disk address */ 171 int level) /* btree block level */ 172 { 173 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, 174 level > 0 && 175 bno != NULLFSBLOCK && 176 XFS_FSB_SANITY_CHECK(cur->bc_mp, bno)); 177 return 0; 178 } 179 180 #ifdef DEBUG 181 /* 182 * Check that (short) pointer is ok. 183 */ 184 STATIC int /* error (0 or EFSCORRUPTED) */ 185 xfs_btree_check_sptr( 186 struct xfs_btree_cur *cur, /* btree cursor */ 187 xfs_agblock_t bno, /* btree block disk address */ 188 int level) /* btree block level */ 189 { 190 xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks; 191 192 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, 193 level > 0 && 194 bno != NULLAGBLOCK && 195 bno != 0 && 196 bno < agblocks); 197 return 0; 198 } 199 200 /* 201 * Check that block ptr is ok. 202 */ 203 STATIC int /* error (0 or EFSCORRUPTED) */ 204 xfs_btree_check_ptr( 205 struct xfs_btree_cur *cur, /* btree cursor */ 206 union xfs_btree_ptr *ptr, /* btree block disk address */ 207 int index, /* offset from ptr to check */ 208 int level) /* btree block level */ 209 { 210 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { 211 return xfs_btree_check_lptr(cur, 212 be64_to_cpu((&ptr->l)[index]), level); 213 } else { 214 return xfs_btree_check_sptr(cur, 215 be32_to_cpu((&ptr->s)[index]), level); 216 } 217 } 218 #endif 219 220 /* 221 * Calculate CRC on the whole btree block and stuff it into the 222 * long-form btree header. 223 * 224 * Prior to calculting the CRC, pull the LSN out of the buffer log item and put 225 * it into the buffer so recovery knows what the last modifcation was that made 226 * it to disk. 227 */ 228 void 229 xfs_btree_lblock_calc_crc( 230 struct xfs_buf *bp) 231 { 232 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 233 struct xfs_buf_log_item *bip = bp->b_fspriv; 234 235 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) 236 return; 237 if (bip) 238 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); 239 xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF); 240 } 241 242 bool 243 xfs_btree_lblock_verify_crc( 244 struct xfs_buf *bp) 245 { 246 if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) 247 return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF); 248 249 return true; 250 } 251 252 /* 253 * Calculate CRC on the whole btree block and stuff it into the 254 * short-form btree header. 255 * 256 * Prior to calculting the CRC, pull the LSN out of the buffer log item and put 257 * it into the buffer so recovery knows what the last modifcation was that made 258 * it to disk. 259 */ 260 void 261 xfs_btree_sblock_calc_crc( 262 struct xfs_buf *bp) 263 { 264 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 265 struct xfs_buf_log_item *bip = bp->b_fspriv; 266 267 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) 268 return; 269 if (bip) 270 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); 271 xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF); 272 } 273 274 bool 275 xfs_btree_sblock_verify_crc( 276 struct xfs_buf *bp) 277 { 278 if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) 279 return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF); 280 281 return true; 282 } 283 284 /* 285 * Delete the btree cursor. 286 */ 287 void 288 xfs_btree_del_cursor( 289 xfs_btree_cur_t *cur, /* btree cursor */ 290 int error) /* del because of error */ 291 { 292 int i; /* btree level */ 293 294 /* 295 * Clear the buffer pointers, and release the buffers. 296 * If we're doing this in the face of an error, we 297 * need to make sure to inspect all of the entries 298 * in the bc_bufs array for buffers to be unlocked. 299 * This is because some of the btree code works from 300 * level n down to 0, and if we get an error along 301 * the way we won't have initialized all the entries 302 * down to 0. 303 */ 304 for (i = 0; i < cur->bc_nlevels; i++) { 305 if (cur->bc_bufs[i]) 306 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]); 307 else if (!error) 308 break; 309 } 310 /* 311 * Can't free a bmap cursor without having dealt with the 312 * allocated indirect blocks' accounting. 313 */ 314 ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || 315 cur->bc_private.b.allocated == 0); 316 /* 317 * Free the cursor. 318 */ 319 kmem_zone_free(xfs_btree_cur_zone, cur); 320 } 321 322 /* 323 * Duplicate the btree cursor. 324 * Allocate a new one, copy the record, re-get the buffers. 325 */ 326 int /* error */ 327 xfs_btree_dup_cursor( 328 xfs_btree_cur_t *cur, /* input cursor */ 329 xfs_btree_cur_t **ncur) /* output cursor */ 330 { 331 xfs_buf_t *bp; /* btree block's buffer pointer */ 332 int error; /* error return value */ 333 int i; /* level number of btree block */ 334 xfs_mount_t *mp; /* mount structure for filesystem */ 335 xfs_btree_cur_t *new; /* new cursor value */ 336 xfs_trans_t *tp; /* transaction pointer, can be NULL */ 337 338 tp = cur->bc_tp; 339 mp = cur->bc_mp; 340 341 /* 342 * Allocate a new cursor like the old one. 343 */ 344 new = cur->bc_ops->dup_cursor(cur); 345 346 /* 347 * Copy the record currently in the cursor. 348 */ 349 new->bc_rec = cur->bc_rec; 350 351 /* 352 * For each level current, re-get the buffer and copy the ptr value. 353 */ 354 for (i = 0; i < new->bc_nlevels; i++) { 355 new->bc_ptrs[i] = cur->bc_ptrs[i]; 356 new->bc_ra[i] = cur->bc_ra[i]; 357 bp = cur->bc_bufs[i]; 358 if (bp) { 359 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 360 XFS_BUF_ADDR(bp), mp->m_bsize, 361 0, &bp, 362 cur->bc_ops->buf_ops); 363 if (error) { 364 xfs_btree_del_cursor(new, error); 365 *ncur = NULL; 366 return error; 367 } 368 } 369 new->bc_bufs[i] = bp; 370 } 371 *ncur = new; 372 return 0; 373 } 374 375 /* 376 * XFS btree block layout and addressing: 377 * 378 * There are two types of blocks in the btree: leaf and non-leaf blocks. 379 * 380 * The leaf record start with a header then followed by records containing 381 * the values. A non-leaf block also starts with the same header, and 382 * then first contains lookup keys followed by an equal number of pointers 383 * to the btree blocks at the previous level. 384 * 385 * +--------+-------+-------+-------+-------+-------+-------+ 386 * Leaf: | header | rec 1 | rec 2 | rec 3 | rec 4 | rec 5 | rec N | 387 * +--------+-------+-------+-------+-------+-------+-------+ 388 * 389 * +--------+-------+-------+-------+-------+-------+-------+ 390 * Non-Leaf: | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N | 391 * +--------+-------+-------+-------+-------+-------+-------+ 392 * 393 * The header is called struct xfs_btree_block for reasons better left unknown 394 * and comes in different versions for short (32bit) and long (64bit) block 395 * pointers. The record and key structures are defined by the btree instances 396 * and opaque to the btree core. The block pointers are simple disk endian 397 * integers, available in a short (32bit) and long (64bit) variant. 398 * 399 * The helpers below calculate the offset of a given record, key or pointer 400 * into a btree block (xfs_btree_*_offset) or return a pointer to the given 401 * record, key or pointer (xfs_btree_*_addr). Note that all addressing 402 * inside the btree block is done using indices starting at one, not zero! 403 */ 404 405 /* 406 * Return size of the btree block header for this btree instance. 407 */ 408 static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur) 409 { 410 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { 411 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) 412 return XFS_BTREE_LBLOCK_CRC_LEN; 413 return XFS_BTREE_LBLOCK_LEN; 414 } 415 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) 416 return XFS_BTREE_SBLOCK_CRC_LEN; 417 return XFS_BTREE_SBLOCK_LEN; 418 } 419 420 /* 421 * Return size of btree block pointers for this btree instance. 422 */ 423 static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur) 424 { 425 return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? 426 sizeof(__be64) : sizeof(__be32); 427 } 428 429 /* 430 * Calculate offset of the n-th record in a btree block. 431 */ 432 STATIC size_t 433 xfs_btree_rec_offset( 434 struct xfs_btree_cur *cur, 435 int n) 436 { 437 return xfs_btree_block_len(cur) + 438 (n - 1) * cur->bc_ops->rec_len; 439 } 440 441 /* 442 * Calculate offset of the n-th key in a btree block. 443 */ 444 STATIC size_t 445 xfs_btree_key_offset( 446 struct xfs_btree_cur *cur, 447 int n) 448 { 449 return xfs_btree_block_len(cur) + 450 (n - 1) * cur->bc_ops->key_len; 451 } 452 453 /* 454 * Calculate offset of the n-th block pointer in a btree block. 455 */ 456 STATIC size_t 457 xfs_btree_ptr_offset( 458 struct xfs_btree_cur *cur, 459 int n, 460 int level) 461 { 462 return xfs_btree_block_len(cur) + 463 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len + 464 (n - 1) * xfs_btree_ptr_len(cur); 465 } 466 467 /* 468 * Return a pointer to the n-th record in the btree block. 469 */ 470 STATIC union xfs_btree_rec * 471 xfs_btree_rec_addr( 472 struct xfs_btree_cur *cur, 473 int n, 474 struct xfs_btree_block *block) 475 { 476 return (union xfs_btree_rec *) 477 ((char *)block + xfs_btree_rec_offset(cur, n)); 478 } 479 480 /* 481 * Return a pointer to the n-th key in the btree block. 482 */ 483 STATIC union xfs_btree_key * 484 xfs_btree_key_addr( 485 struct xfs_btree_cur *cur, 486 int n, 487 struct xfs_btree_block *block) 488 { 489 return (union xfs_btree_key *) 490 ((char *)block + xfs_btree_key_offset(cur, n)); 491 } 492 493 /* 494 * Return a pointer to the n-th block pointer in the btree block. 495 */ 496 STATIC union xfs_btree_ptr * 497 xfs_btree_ptr_addr( 498 struct xfs_btree_cur *cur, 499 int n, 500 struct xfs_btree_block *block) 501 { 502 int level = xfs_btree_get_level(block); 503 504 ASSERT(block->bb_level != 0); 505 506 return (union xfs_btree_ptr *) 507 ((char *)block + xfs_btree_ptr_offset(cur, n, level)); 508 } 509 510 /* 511 * Get the root block which is stored in the inode. 512 * 513 * For now this btree implementation assumes the btree root is always 514 * stored in the if_broot field of an inode fork. 515 */ 516 STATIC struct xfs_btree_block * 517 xfs_btree_get_iroot( 518 struct xfs_btree_cur *cur) 519 { 520 struct xfs_ifork *ifp; 521 522 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork); 523 return (struct xfs_btree_block *)ifp->if_broot; 524 } 525 526 /* 527 * Retrieve the block pointer from the cursor at the given level. 528 * This may be an inode btree root or from a buffer. 529 */ 530 STATIC struct xfs_btree_block * /* generic btree block pointer */ 531 xfs_btree_get_block( 532 struct xfs_btree_cur *cur, /* btree cursor */ 533 int level, /* level in btree */ 534 struct xfs_buf **bpp) /* buffer containing the block */ 535 { 536 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && 537 (level == cur->bc_nlevels - 1)) { 538 *bpp = NULL; 539 return xfs_btree_get_iroot(cur); 540 } 541 542 *bpp = cur->bc_bufs[level]; 543 return XFS_BUF_TO_BLOCK(*bpp); 544 } 545 546 /* 547 * Get a buffer for the block, return it with no data read. 548 * Long-form addressing. 549 */ 550 xfs_buf_t * /* buffer for fsbno */ 551 xfs_btree_get_bufl( 552 xfs_mount_t *mp, /* file system mount point */ 553 xfs_trans_t *tp, /* transaction pointer */ 554 xfs_fsblock_t fsbno, /* file system block number */ 555 uint lock) /* lock flags for get_buf */ 556 { 557 xfs_daddr_t d; /* real disk block address */ 558 559 ASSERT(fsbno != NULLFSBLOCK); 560 d = XFS_FSB_TO_DADDR(mp, fsbno); 561 return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); 562 } 563 564 /* 565 * Get a buffer for the block, return it with no data read. 566 * Short-form addressing. 567 */ 568 xfs_buf_t * /* buffer for agno/agbno */ 569 xfs_btree_get_bufs( 570 xfs_mount_t *mp, /* file system mount point */ 571 xfs_trans_t *tp, /* transaction pointer */ 572 xfs_agnumber_t agno, /* allocation group number */ 573 xfs_agblock_t agbno, /* allocation group block number */ 574 uint lock) /* lock flags for get_buf */ 575 { 576 xfs_daddr_t d; /* real disk block address */ 577 578 ASSERT(agno != NULLAGNUMBER); 579 ASSERT(agbno != NULLAGBLOCK); 580 d = XFS_AGB_TO_DADDR(mp, agno, agbno); 581 return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); 582 } 583 584 /* 585 * Check for the cursor referring to the last block at the given level. 586 */ 587 int /* 1=is last block, 0=not last block */ 588 xfs_btree_islastblock( 589 xfs_btree_cur_t *cur, /* btree cursor */ 590 int level) /* level to check */ 591 { 592 struct xfs_btree_block *block; /* generic btree block pointer */ 593 xfs_buf_t *bp; /* buffer containing block */ 594 595 block = xfs_btree_get_block(cur, level, &bp); 596 xfs_btree_check_block(cur, block, level, bp); 597 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 598 return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK); 599 else 600 return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK); 601 } 602 603 /* 604 * Change the cursor to point to the first record at the given level. 605 * Other levels are unaffected. 606 */ 607 STATIC int /* success=1, failure=0 */ 608 xfs_btree_firstrec( 609 xfs_btree_cur_t *cur, /* btree cursor */ 610 int level) /* level to change */ 611 { 612 struct xfs_btree_block *block; /* generic btree block pointer */ 613 xfs_buf_t *bp; /* buffer containing block */ 614 615 /* 616 * Get the block pointer for this level. 617 */ 618 block = xfs_btree_get_block(cur, level, &bp); 619 xfs_btree_check_block(cur, block, level, bp); 620 /* 621 * It's empty, there is no such record. 622 */ 623 if (!block->bb_numrecs) 624 return 0; 625 /* 626 * Set the ptr value to 1, that's the first record/key. 627 */ 628 cur->bc_ptrs[level] = 1; 629 return 1; 630 } 631 632 /* 633 * Change the cursor to point to the last record in the current block 634 * at the given level. Other levels are unaffected. 635 */ 636 STATIC int /* success=1, failure=0 */ 637 xfs_btree_lastrec( 638 xfs_btree_cur_t *cur, /* btree cursor */ 639 int level) /* level to change */ 640 { 641 struct xfs_btree_block *block; /* generic btree block pointer */ 642 xfs_buf_t *bp; /* buffer containing block */ 643 644 /* 645 * Get the block pointer for this level. 646 */ 647 block = xfs_btree_get_block(cur, level, &bp); 648 xfs_btree_check_block(cur, block, level, bp); 649 /* 650 * It's empty, there is no such record. 651 */ 652 if (!block->bb_numrecs) 653 return 0; 654 /* 655 * Set the ptr value to numrecs, that's the last record/key. 656 */ 657 cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs); 658 return 1; 659 } 660 661 /* 662 * Compute first and last byte offsets for the fields given. 663 * Interprets the offsets table, which contains struct field offsets. 664 */ 665 void 666 xfs_btree_offsets( 667 __int64_t fields, /* bitmask of fields */ 668 const short *offsets, /* table of field offsets */ 669 int nbits, /* number of bits to inspect */ 670 int *first, /* output: first byte offset */ 671 int *last) /* output: last byte offset */ 672 { 673 int i; /* current bit number */ 674 __int64_t imask; /* mask for current bit number */ 675 676 ASSERT(fields != 0); 677 /* 678 * Find the lowest bit, so the first byte offset. 679 */ 680 for (i = 0, imask = 1LL; ; i++, imask <<= 1) { 681 if (imask & fields) { 682 *first = offsets[i]; 683 break; 684 } 685 } 686 /* 687 * Find the highest bit, so the last byte offset. 688 */ 689 for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) { 690 if (imask & fields) { 691 *last = offsets[i + 1] - 1; 692 break; 693 } 694 } 695 } 696 697 /* 698 * Get a buffer for the block, return it read in. 699 * Long-form addressing. 700 */ 701 int 702 xfs_btree_read_bufl( 703 struct xfs_mount *mp, /* file system mount point */ 704 struct xfs_trans *tp, /* transaction pointer */ 705 xfs_fsblock_t fsbno, /* file system block number */ 706 uint lock, /* lock flags for read_buf */ 707 struct xfs_buf **bpp, /* buffer for fsbno */ 708 int refval, /* ref count value for buffer */ 709 const struct xfs_buf_ops *ops) 710 { 711 struct xfs_buf *bp; /* return value */ 712 xfs_daddr_t d; /* real disk block address */ 713 int error; 714 715 ASSERT(fsbno != NULLFSBLOCK); 716 d = XFS_FSB_TO_DADDR(mp, fsbno); 717 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, 718 mp->m_bsize, lock, &bp, ops); 719 if (error) 720 return error; 721 if (bp) 722 xfs_buf_set_ref(bp, refval); 723 *bpp = bp; 724 return 0; 725 } 726 727 /* 728 * Read-ahead the block, don't wait for it, don't return a buffer. 729 * Long-form addressing. 730 */ 731 /* ARGSUSED */ 732 void 733 xfs_btree_reada_bufl( 734 struct xfs_mount *mp, /* file system mount point */ 735 xfs_fsblock_t fsbno, /* file system block number */ 736 xfs_extlen_t count, /* count of filesystem blocks */ 737 const struct xfs_buf_ops *ops) 738 { 739 xfs_daddr_t d; 740 741 ASSERT(fsbno != NULLFSBLOCK); 742 d = XFS_FSB_TO_DADDR(mp, fsbno); 743 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops); 744 } 745 746 /* 747 * Read-ahead the block, don't wait for it, don't return a buffer. 748 * Short-form addressing. 749 */ 750 /* ARGSUSED */ 751 void 752 xfs_btree_reada_bufs( 753 struct xfs_mount *mp, /* file system mount point */ 754 xfs_agnumber_t agno, /* allocation group number */ 755 xfs_agblock_t agbno, /* allocation group block number */ 756 xfs_extlen_t count, /* count of filesystem blocks */ 757 const struct xfs_buf_ops *ops) 758 { 759 xfs_daddr_t d; 760 761 ASSERT(agno != NULLAGNUMBER); 762 ASSERT(agbno != NULLAGBLOCK); 763 d = XFS_AGB_TO_DADDR(mp, agno, agbno); 764 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops); 765 } 766 767 STATIC int 768 xfs_btree_readahead_lblock( 769 struct xfs_btree_cur *cur, 770 int lr, 771 struct xfs_btree_block *block) 772 { 773 int rval = 0; 774 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); 775 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); 776 777 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) { 778 xfs_btree_reada_bufl(cur->bc_mp, left, 1, 779 cur->bc_ops->buf_ops); 780 rval++; 781 } 782 783 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) { 784 xfs_btree_reada_bufl(cur->bc_mp, right, 1, 785 cur->bc_ops->buf_ops); 786 rval++; 787 } 788 789 return rval; 790 } 791 792 STATIC int 793 xfs_btree_readahead_sblock( 794 struct xfs_btree_cur *cur, 795 int lr, 796 struct xfs_btree_block *block) 797 { 798 int rval = 0; 799 xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib); 800 xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib); 801 802 803 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) { 804 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, 805 left, 1, cur->bc_ops->buf_ops); 806 rval++; 807 } 808 809 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) { 810 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, 811 right, 1, cur->bc_ops->buf_ops); 812 rval++; 813 } 814 815 return rval; 816 } 817 818 /* 819 * Read-ahead btree blocks, at the given level. 820 * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. 821 */ 822 STATIC int 823 xfs_btree_readahead( 824 struct xfs_btree_cur *cur, /* btree cursor */ 825 int lev, /* level in btree */ 826 int lr) /* left/right bits */ 827 { 828 struct xfs_btree_block *block; 829 830 /* 831 * No readahead needed if we are at the root level and the 832 * btree root is stored in the inode. 833 */ 834 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && 835 (lev == cur->bc_nlevels - 1)) 836 return 0; 837 838 if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev]) 839 return 0; 840 841 cur->bc_ra[lev] |= lr; 842 block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]); 843 844 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 845 return xfs_btree_readahead_lblock(cur, lr, block); 846 return xfs_btree_readahead_sblock(cur, lr, block); 847 } 848 849 STATIC xfs_daddr_t 850 xfs_btree_ptr_to_daddr( 851 struct xfs_btree_cur *cur, 852 union xfs_btree_ptr *ptr) 853 { 854 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { 855 ASSERT(ptr->l != cpu_to_be64(NULLFSBLOCK)); 856 857 return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); 858 } else { 859 ASSERT(cur->bc_private.a.agno != NULLAGNUMBER); 860 ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK)); 861 862 return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno, 863 be32_to_cpu(ptr->s)); 864 } 865 } 866 867 /* 868 * Readahead @count btree blocks at the given @ptr location. 869 * 870 * We don't need to care about long or short form btrees here as we have a 871 * method of converting the ptr directly to a daddr available to us. 872 */ 873 STATIC void 874 xfs_btree_readahead_ptr( 875 struct xfs_btree_cur *cur, 876 union xfs_btree_ptr *ptr, 877 xfs_extlen_t count) 878 { 879 xfs_buf_readahead(cur->bc_mp->m_ddev_targp, 880 xfs_btree_ptr_to_daddr(cur, ptr), 881 cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops); 882 } 883 884 /* 885 * Set the buffer for level "lev" in the cursor to bp, releasing 886 * any previous buffer. 887 */ 888 STATIC void 889 xfs_btree_setbuf( 890 xfs_btree_cur_t *cur, /* btree cursor */ 891 int lev, /* level in btree */ 892 xfs_buf_t *bp) /* new buffer to set */ 893 { 894 struct xfs_btree_block *b; /* btree block */ 895 896 if (cur->bc_bufs[lev]) 897 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]); 898 cur->bc_bufs[lev] = bp; 899 cur->bc_ra[lev] = 0; 900 901 b = XFS_BUF_TO_BLOCK(bp); 902 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { 903 if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)) 904 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; 905 if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)) 906 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; 907 } else { 908 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK)) 909 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; 910 if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK)) 911 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; 912 } 913 } 914 915 STATIC int 916 xfs_btree_ptr_is_null( 917 struct xfs_btree_cur *cur, 918 union xfs_btree_ptr *ptr) 919 { 920 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 921 return ptr->l == cpu_to_be64(NULLFSBLOCK); 922 else 923 return ptr->s == cpu_to_be32(NULLAGBLOCK); 924 } 925 926 STATIC void 927 xfs_btree_set_ptr_null( 928 struct xfs_btree_cur *cur, 929 union xfs_btree_ptr *ptr) 930 { 931 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 932 ptr->l = cpu_to_be64(NULLFSBLOCK); 933 else 934 ptr->s = cpu_to_be32(NULLAGBLOCK); 935 } 936 937 /* 938 * Get/set/init sibling pointers 939 */ 940 STATIC void 941 xfs_btree_get_sibling( 942 struct xfs_btree_cur *cur, 943 struct xfs_btree_block *block, 944 union xfs_btree_ptr *ptr, 945 int lr) 946 { 947 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB); 948 949 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { 950 if (lr == XFS_BB_RIGHTSIB) 951 ptr->l = block->bb_u.l.bb_rightsib; 952 else 953 ptr->l = block->bb_u.l.bb_leftsib; 954 } else { 955 if (lr == XFS_BB_RIGHTSIB) 956 ptr->s = block->bb_u.s.bb_rightsib; 957 else 958 ptr->s = block->bb_u.s.bb_leftsib; 959 } 960 } 961 962 STATIC void 963 xfs_btree_set_sibling( 964 struct xfs_btree_cur *cur, 965 struct xfs_btree_block *block, 966 union xfs_btree_ptr *ptr, 967 int lr) 968 { 969 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB); 970 971 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { 972 if (lr == XFS_BB_RIGHTSIB) 973 block->bb_u.l.bb_rightsib = ptr->l; 974 else 975 block->bb_u.l.bb_leftsib = ptr->l; 976 } else { 977 if (lr == XFS_BB_RIGHTSIB) 978 block->bb_u.s.bb_rightsib = ptr->s; 979 else 980 block->bb_u.s.bb_leftsib = ptr->s; 981 } 982 } 983 984 void 985 xfs_btree_init_block_int( 986 struct xfs_mount *mp, 987 struct xfs_btree_block *buf, 988 xfs_daddr_t blkno, 989 __u32 magic, 990 __u16 level, 991 __u16 numrecs, 992 __u64 owner, 993 unsigned int flags) 994 { 995 buf->bb_magic = cpu_to_be32(magic); 996 buf->bb_level = cpu_to_be16(level); 997 buf->bb_numrecs = cpu_to_be16(numrecs); 998 999 if (flags & XFS_BTREE_LONG_PTRS) { 1000 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK); 1001 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK); 1002 if (flags & XFS_BTREE_CRC_BLOCKS) { 1003 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno); 1004 buf->bb_u.l.bb_owner = cpu_to_be64(owner); 1005 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid); 1006 buf->bb_u.l.bb_pad = 0; 1007 buf->bb_u.l.bb_lsn = 0; 1008 } 1009 } else { 1010 /* owner is a 32 bit value on short blocks */ 1011 __u32 __owner = (__u32)owner; 1012 1013 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); 1014 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); 1015 if (flags & XFS_BTREE_CRC_BLOCKS) { 1016 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno); 1017 buf->bb_u.s.bb_owner = cpu_to_be32(__owner); 1018 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid); 1019 buf->bb_u.s.bb_lsn = 0; 1020 } 1021 } 1022 } 1023 1024 void 1025 xfs_btree_init_block( 1026 struct xfs_mount *mp, 1027 struct xfs_buf *bp, 1028 __u32 magic, 1029 __u16 level, 1030 __u16 numrecs, 1031 __u64 owner, 1032 unsigned int flags) 1033 { 1034 xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn, 1035 magic, level, numrecs, owner, flags); 1036 } 1037 1038 STATIC void 1039 xfs_btree_init_block_cur( 1040 struct xfs_btree_cur *cur, 1041 struct xfs_buf *bp, 1042 int level, 1043 int numrecs) 1044 { 1045 __u64 owner; 1046 1047 /* 1048 * we can pull the owner from the cursor right now as the different 1049 * owners align directly with the pointer size of the btree. This may 1050 * change in future, but is safe for current users of the generic btree 1051 * code. 1052 */ 1053 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 1054 owner = cur->bc_private.b.ip->i_ino; 1055 else 1056 owner = cur->bc_private.a.agno; 1057 1058 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn, 1059 xfs_btree_magic(cur), level, numrecs, 1060 owner, cur->bc_flags); 1061 } 1062 1063 /* 1064 * Return true if ptr is the last record in the btree and 1065 * we need to track updates to this record. The decision 1066 * will be further refined in the update_lastrec method. 1067 */ 1068 STATIC int 1069 xfs_btree_is_lastrec( 1070 struct xfs_btree_cur *cur, 1071 struct xfs_btree_block *block, 1072 int level) 1073 { 1074 union xfs_btree_ptr ptr; 1075 1076 if (level > 0) 1077 return 0; 1078 if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE)) 1079 return 0; 1080 1081 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); 1082 if (!xfs_btree_ptr_is_null(cur, &ptr)) 1083 return 0; 1084 return 1; 1085 } 1086 1087 STATIC void 1088 xfs_btree_buf_to_ptr( 1089 struct xfs_btree_cur *cur, 1090 struct xfs_buf *bp, 1091 union xfs_btree_ptr *ptr) 1092 { 1093 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 1094 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, 1095 XFS_BUF_ADDR(bp))); 1096 else { 1097 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp, 1098 XFS_BUF_ADDR(bp))); 1099 } 1100 } 1101 1102 STATIC void 1103 xfs_btree_set_refs( 1104 struct xfs_btree_cur *cur, 1105 struct xfs_buf *bp) 1106 { 1107 switch (cur->bc_btnum) { 1108 case XFS_BTNUM_BNO: 1109 case XFS_BTNUM_CNT: 1110 xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF); 1111 break; 1112 case XFS_BTNUM_INO: 1113 case XFS_BTNUM_FINO: 1114 xfs_buf_set_ref(bp, XFS_INO_BTREE_REF); 1115 break; 1116 case XFS_BTNUM_BMAP: 1117 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF); 1118 break; 1119 default: 1120 ASSERT(0); 1121 } 1122 } 1123 1124 STATIC int 1125 xfs_btree_get_buf_block( 1126 struct xfs_btree_cur *cur, 1127 union xfs_btree_ptr *ptr, 1128 int flags, 1129 struct xfs_btree_block **block, 1130 struct xfs_buf **bpp) 1131 { 1132 struct xfs_mount *mp = cur->bc_mp; 1133 xfs_daddr_t d; 1134 1135 /* need to sort out how callers deal with failures first */ 1136 ASSERT(!(flags & XBF_TRYLOCK)); 1137 1138 d = xfs_btree_ptr_to_daddr(cur, ptr); 1139 *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, 1140 mp->m_bsize, flags); 1141 1142 if (!*bpp) 1143 return -ENOMEM; 1144 1145 (*bpp)->b_ops = cur->bc_ops->buf_ops; 1146 *block = XFS_BUF_TO_BLOCK(*bpp); 1147 return 0; 1148 } 1149 1150 /* 1151 * Read in the buffer at the given ptr and return the buffer and 1152 * the block pointer within the buffer. 1153 */ 1154 STATIC int 1155 xfs_btree_read_buf_block( 1156 struct xfs_btree_cur *cur, 1157 union xfs_btree_ptr *ptr, 1158 int flags, 1159 struct xfs_btree_block **block, 1160 struct xfs_buf **bpp) 1161 { 1162 struct xfs_mount *mp = cur->bc_mp; 1163 xfs_daddr_t d; 1164 int error; 1165 1166 /* need to sort out how callers deal with failures first */ 1167 ASSERT(!(flags & XBF_TRYLOCK)); 1168 1169 d = xfs_btree_ptr_to_daddr(cur, ptr); 1170 error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d, 1171 mp->m_bsize, flags, bpp, 1172 cur->bc_ops->buf_ops); 1173 if (error) 1174 return error; 1175 1176 xfs_btree_set_refs(cur, *bpp); 1177 *block = XFS_BUF_TO_BLOCK(*bpp); 1178 return 0; 1179 } 1180 1181 /* 1182 * Copy keys from one btree block to another. 1183 */ 1184 STATIC void 1185 xfs_btree_copy_keys( 1186 struct xfs_btree_cur *cur, 1187 union xfs_btree_key *dst_key, 1188 union xfs_btree_key *src_key, 1189 int numkeys) 1190 { 1191 ASSERT(numkeys >= 0); 1192 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len); 1193 } 1194 1195 /* 1196 * Copy records from one btree block to another. 1197 */ 1198 STATIC void 1199 xfs_btree_copy_recs( 1200 struct xfs_btree_cur *cur, 1201 union xfs_btree_rec *dst_rec, 1202 union xfs_btree_rec *src_rec, 1203 int numrecs) 1204 { 1205 ASSERT(numrecs >= 0); 1206 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len); 1207 } 1208 1209 /* 1210 * Copy block pointers from one btree block to another. 1211 */ 1212 STATIC void 1213 xfs_btree_copy_ptrs( 1214 struct xfs_btree_cur *cur, 1215 union xfs_btree_ptr *dst_ptr, 1216 union xfs_btree_ptr *src_ptr, 1217 int numptrs) 1218 { 1219 ASSERT(numptrs >= 0); 1220 memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur)); 1221 } 1222 1223 /* 1224 * Shift keys one index left/right inside a single btree block. 1225 */ 1226 STATIC void 1227 xfs_btree_shift_keys( 1228 struct xfs_btree_cur *cur, 1229 union xfs_btree_key *key, 1230 int dir, 1231 int numkeys) 1232 { 1233 char *dst_key; 1234 1235 ASSERT(numkeys >= 0); 1236 ASSERT(dir == 1 || dir == -1); 1237 1238 dst_key = (char *)key + (dir * cur->bc_ops->key_len); 1239 memmove(dst_key, key, numkeys * cur->bc_ops->key_len); 1240 } 1241 1242 /* 1243 * Shift records one index left/right inside a single btree block. 1244 */ 1245 STATIC void 1246 xfs_btree_shift_recs( 1247 struct xfs_btree_cur *cur, 1248 union xfs_btree_rec *rec, 1249 int dir, 1250 int numrecs) 1251 { 1252 char *dst_rec; 1253 1254 ASSERT(numrecs >= 0); 1255 ASSERT(dir == 1 || dir == -1); 1256 1257 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len); 1258 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len); 1259 } 1260 1261 /* 1262 * Shift block pointers one index left/right inside a single btree block. 1263 */ 1264 STATIC void 1265 xfs_btree_shift_ptrs( 1266 struct xfs_btree_cur *cur, 1267 union xfs_btree_ptr *ptr, 1268 int dir, 1269 int numptrs) 1270 { 1271 char *dst_ptr; 1272 1273 ASSERT(numptrs >= 0); 1274 ASSERT(dir == 1 || dir == -1); 1275 1276 dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur)); 1277 memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur)); 1278 } 1279 1280 /* 1281 * Log key values from the btree block. 1282 */ 1283 STATIC void 1284 xfs_btree_log_keys( 1285 struct xfs_btree_cur *cur, 1286 struct xfs_buf *bp, 1287 int first, 1288 int last) 1289 { 1290 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1291 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); 1292 1293 if (bp) { 1294 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); 1295 xfs_trans_log_buf(cur->bc_tp, bp, 1296 xfs_btree_key_offset(cur, first), 1297 xfs_btree_key_offset(cur, last + 1) - 1); 1298 } else { 1299 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, 1300 xfs_ilog_fbroot(cur->bc_private.b.whichfork)); 1301 } 1302 1303 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1304 } 1305 1306 /* 1307 * Log record values from the btree block. 1308 */ 1309 void 1310 xfs_btree_log_recs( 1311 struct xfs_btree_cur *cur, 1312 struct xfs_buf *bp, 1313 int first, 1314 int last) 1315 { 1316 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1317 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); 1318 1319 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); 1320 xfs_trans_log_buf(cur->bc_tp, bp, 1321 xfs_btree_rec_offset(cur, first), 1322 xfs_btree_rec_offset(cur, last + 1) - 1); 1323 1324 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1325 } 1326 1327 /* 1328 * Log block pointer fields from a btree block (nonleaf). 1329 */ 1330 STATIC void 1331 xfs_btree_log_ptrs( 1332 struct xfs_btree_cur *cur, /* btree cursor */ 1333 struct xfs_buf *bp, /* buffer containing btree block */ 1334 int first, /* index of first pointer to log */ 1335 int last) /* index of last pointer to log */ 1336 { 1337 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1338 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); 1339 1340 if (bp) { 1341 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 1342 int level = xfs_btree_get_level(block); 1343 1344 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); 1345 xfs_trans_log_buf(cur->bc_tp, bp, 1346 xfs_btree_ptr_offset(cur, first, level), 1347 xfs_btree_ptr_offset(cur, last + 1, level) - 1); 1348 } else { 1349 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, 1350 xfs_ilog_fbroot(cur->bc_private.b.whichfork)); 1351 } 1352 1353 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1354 } 1355 1356 /* 1357 * Log fields from a btree block header. 1358 */ 1359 void 1360 xfs_btree_log_block( 1361 struct xfs_btree_cur *cur, /* btree cursor */ 1362 struct xfs_buf *bp, /* buffer containing btree block */ 1363 int fields) /* mask of fields: XFS_BB_... */ 1364 { 1365 int first; /* first byte offset logged */ 1366 int last; /* last byte offset logged */ 1367 static const short soffsets[] = { /* table of offsets (short) */ 1368 offsetof(struct xfs_btree_block, bb_magic), 1369 offsetof(struct xfs_btree_block, bb_level), 1370 offsetof(struct xfs_btree_block, bb_numrecs), 1371 offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib), 1372 offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib), 1373 offsetof(struct xfs_btree_block, bb_u.s.bb_blkno), 1374 offsetof(struct xfs_btree_block, bb_u.s.bb_lsn), 1375 offsetof(struct xfs_btree_block, bb_u.s.bb_uuid), 1376 offsetof(struct xfs_btree_block, bb_u.s.bb_owner), 1377 offsetof(struct xfs_btree_block, bb_u.s.bb_crc), 1378 XFS_BTREE_SBLOCK_CRC_LEN 1379 }; 1380 static const short loffsets[] = { /* table of offsets (long) */ 1381 offsetof(struct xfs_btree_block, bb_magic), 1382 offsetof(struct xfs_btree_block, bb_level), 1383 offsetof(struct xfs_btree_block, bb_numrecs), 1384 offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib), 1385 offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib), 1386 offsetof(struct xfs_btree_block, bb_u.l.bb_blkno), 1387 offsetof(struct xfs_btree_block, bb_u.l.bb_lsn), 1388 offsetof(struct xfs_btree_block, bb_u.l.bb_uuid), 1389 offsetof(struct xfs_btree_block, bb_u.l.bb_owner), 1390 offsetof(struct xfs_btree_block, bb_u.l.bb_crc), 1391 offsetof(struct xfs_btree_block, bb_u.l.bb_pad), 1392 XFS_BTREE_LBLOCK_CRC_LEN 1393 }; 1394 1395 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1396 XFS_BTREE_TRACE_ARGBI(cur, bp, fields); 1397 1398 if (bp) { 1399 int nbits; 1400 1401 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) { 1402 /* 1403 * We don't log the CRC when updating a btree 1404 * block but instead recreate it during log 1405 * recovery. As the log buffers have checksums 1406 * of their own this is safe and avoids logging a crc 1407 * update in a lot of places. 1408 */ 1409 if (fields == XFS_BB_ALL_BITS) 1410 fields = XFS_BB_ALL_BITS_CRC; 1411 nbits = XFS_BB_NUM_BITS_CRC; 1412 } else { 1413 nbits = XFS_BB_NUM_BITS; 1414 } 1415 xfs_btree_offsets(fields, 1416 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? 1417 loffsets : soffsets, 1418 nbits, &first, &last); 1419 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); 1420 xfs_trans_log_buf(cur->bc_tp, bp, first, last); 1421 } else { 1422 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, 1423 xfs_ilog_fbroot(cur->bc_private.b.whichfork)); 1424 } 1425 1426 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1427 } 1428 1429 /* 1430 * Increment cursor by one record at the level. 1431 * For nonzero levels the leaf-ward information is untouched. 1432 */ 1433 int /* error */ 1434 xfs_btree_increment( 1435 struct xfs_btree_cur *cur, 1436 int level, 1437 int *stat) /* success/failure */ 1438 { 1439 struct xfs_btree_block *block; 1440 union xfs_btree_ptr ptr; 1441 struct xfs_buf *bp; 1442 int error; /* error return value */ 1443 int lev; 1444 1445 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1446 XFS_BTREE_TRACE_ARGI(cur, level); 1447 1448 ASSERT(level < cur->bc_nlevels); 1449 1450 /* Read-ahead to the right at this level. */ 1451 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); 1452 1453 /* Get a pointer to the btree block. */ 1454 block = xfs_btree_get_block(cur, level, &bp); 1455 1456 #ifdef DEBUG 1457 error = xfs_btree_check_block(cur, block, level, bp); 1458 if (error) 1459 goto error0; 1460 #endif 1461 1462 /* We're done if we remain in the block after the increment. */ 1463 if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block)) 1464 goto out1; 1465 1466 /* Fail if we just went off the right edge of the tree. */ 1467 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); 1468 if (xfs_btree_ptr_is_null(cur, &ptr)) 1469 goto out0; 1470 1471 XFS_BTREE_STATS_INC(cur, increment); 1472 1473 /* 1474 * March up the tree incrementing pointers. 1475 * Stop when we don't go off the right edge of a block. 1476 */ 1477 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { 1478 block = xfs_btree_get_block(cur, lev, &bp); 1479 1480 #ifdef DEBUG 1481 error = xfs_btree_check_block(cur, block, lev, bp); 1482 if (error) 1483 goto error0; 1484 #endif 1485 1486 if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block)) 1487 break; 1488 1489 /* Read-ahead the right block for the next loop. */ 1490 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); 1491 } 1492 1493 /* 1494 * If we went off the root then we are either seriously 1495 * confused or have the tree root in an inode. 1496 */ 1497 if (lev == cur->bc_nlevels) { 1498 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) 1499 goto out0; 1500 ASSERT(0); 1501 error = -EFSCORRUPTED; 1502 goto error0; 1503 } 1504 ASSERT(lev < cur->bc_nlevels); 1505 1506 /* 1507 * Now walk back down the tree, fixing up the cursor's buffer 1508 * pointers and key numbers. 1509 */ 1510 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { 1511 union xfs_btree_ptr *ptrp; 1512 1513 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block); 1514 --lev; 1515 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); 1516 if (error) 1517 goto error0; 1518 1519 xfs_btree_setbuf(cur, lev, bp); 1520 cur->bc_ptrs[lev] = 1; 1521 } 1522 out1: 1523 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1524 *stat = 1; 1525 return 0; 1526 1527 out0: 1528 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1529 *stat = 0; 1530 return 0; 1531 1532 error0: 1533 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 1534 return error; 1535 } 1536 1537 /* 1538 * Decrement cursor by one record at the level. 1539 * For nonzero levels the leaf-ward information is untouched. 1540 */ 1541 int /* error */ 1542 xfs_btree_decrement( 1543 struct xfs_btree_cur *cur, 1544 int level, 1545 int *stat) /* success/failure */ 1546 { 1547 struct xfs_btree_block *block; 1548 xfs_buf_t *bp; 1549 int error; /* error return value */ 1550 int lev; 1551 union xfs_btree_ptr ptr; 1552 1553 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1554 XFS_BTREE_TRACE_ARGI(cur, level); 1555 1556 ASSERT(level < cur->bc_nlevels); 1557 1558 /* Read-ahead to the left at this level. */ 1559 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); 1560 1561 /* We're done if we remain in the block after the decrement. */ 1562 if (--cur->bc_ptrs[level] > 0) 1563 goto out1; 1564 1565 /* Get a pointer to the btree block. */ 1566 block = xfs_btree_get_block(cur, level, &bp); 1567 1568 #ifdef DEBUG 1569 error = xfs_btree_check_block(cur, block, level, bp); 1570 if (error) 1571 goto error0; 1572 #endif 1573 1574 /* Fail if we just went off the left edge of the tree. */ 1575 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); 1576 if (xfs_btree_ptr_is_null(cur, &ptr)) 1577 goto out0; 1578 1579 XFS_BTREE_STATS_INC(cur, decrement); 1580 1581 /* 1582 * March up the tree decrementing pointers. 1583 * Stop when we don't go off the left edge of a block. 1584 */ 1585 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { 1586 if (--cur->bc_ptrs[lev] > 0) 1587 break; 1588 /* Read-ahead the left block for the next loop. */ 1589 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); 1590 } 1591 1592 /* 1593 * If we went off the root then we are seriously confused. 1594 * or the root of the tree is in an inode. 1595 */ 1596 if (lev == cur->bc_nlevels) { 1597 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) 1598 goto out0; 1599 ASSERT(0); 1600 error = -EFSCORRUPTED; 1601 goto error0; 1602 } 1603 ASSERT(lev < cur->bc_nlevels); 1604 1605 /* 1606 * Now walk back down the tree, fixing up the cursor's buffer 1607 * pointers and key numbers. 1608 */ 1609 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { 1610 union xfs_btree_ptr *ptrp; 1611 1612 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block); 1613 --lev; 1614 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); 1615 if (error) 1616 goto error0; 1617 xfs_btree_setbuf(cur, lev, bp); 1618 cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block); 1619 } 1620 out1: 1621 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1622 *stat = 1; 1623 return 0; 1624 1625 out0: 1626 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1627 *stat = 0; 1628 return 0; 1629 1630 error0: 1631 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 1632 return error; 1633 } 1634 1635 STATIC int 1636 xfs_btree_lookup_get_block( 1637 struct xfs_btree_cur *cur, /* btree cursor */ 1638 int level, /* level in the btree */ 1639 union xfs_btree_ptr *pp, /* ptr to btree block */ 1640 struct xfs_btree_block **blkp) /* return btree block */ 1641 { 1642 struct xfs_buf *bp; /* buffer pointer for btree block */ 1643 int error = 0; 1644 1645 /* special case the root block if in an inode */ 1646 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && 1647 (level == cur->bc_nlevels - 1)) { 1648 *blkp = xfs_btree_get_iroot(cur); 1649 return 0; 1650 } 1651 1652 /* 1653 * If the old buffer at this level for the disk address we are 1654 * looking for re-use it. 1655 * 1656 * Otherwise throw it away and get a new one. 1657 */ 1658 bp = cur->bc_bufs[level]; 1659 if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) { 1660 *blkp = XFS_BUF_TO_BLOCK(bp); 1661 return 0; 1662 } 1663 1664 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp); 1665 if (error) 1666 return error; 1667 1668 xfs_btree_setbuf(cur, level, bp); 1669 return 0; 1670 } 1671 1672 /* 1673 * Get current search key. For level 0 we don't actually have a key 1674 * structure so we make one up from the record. For all other levels 1675 * we just return the right key. 1676 */ 1677 STATIC union xfs_btree_key * 1678 xfs_lookup_get_search_key( 1679 struct xfs_btree_cur *cur, 1680 int level, 1681 int keyno, 1682 struct xfs_btree_block *block, 1683 union xfs_btree_key *kp) 1684 { 1685 if (level == 0) { 1686 cur->bc_ops->init_key_from_rec(kp, 1687 xfs_btree_rec_addr(cur, keyno, block)); 1688 return kp; 1689 } 1690 1691 return xfs_btree_key_addr(cur, keyno, block); 1692 } 1693 1694 /* 1695 * Lookup the record. The cursor is made to point to it, based on dir. 1696 * stat is set to 0 if can't find any such record, 1 for success. 1697 */ 1698 int /* error */ 1699 xfs_btree_lookup( 1700 struct xfs_btree_cur *cur, /* btree cursor */ 1701 xfs_lookup_t dir, /* <=, ==, or >= */ 1702 int *stat) /* success/failure */ 1703 { 1704 struct xfs_btree_block *block; /* current btree block */ 1705 __int64_t diff; /* difference for the current key */ 1706 int error; /* error return value */ 1707 int keyno; /* current key number */ 1708 int level; /* level in the btree */ 1709 union xfs_btree_ptr *pp; /* ptr to btree block */ 1710 union xfs_btree_ptr ptr; /* ptr to btree block */ 1711 1712 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1713 XFS_BTREE_TRACE_ARGI(cur, dir); 1714 1715 XFS_BTREE_STATS_INC(cur, lookup); 1716 1717 block = NULL; 1718 keyno = 0; 1719 1720 /* initialise start pointer from cursor */ 1721 cur->bc_ops->init_ptr_from_cur(cur, &ptr); 1722 pp = &ptr; 1723 1724 /* 1725 * Iterate over each level in the btree, starting at the root. 1726 * For each level above the leaves, find the key we need, based 1727 * on the lookup record, then follow the corresponding block 1728 * pointer down to the next level. 1729 */ 1730 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { 1731 /* Get the block we need to do the lookup on. */ 1732 error = xfs_btree_lookup_get_block(cur, level, pp, &block); 1733 if (error) 1734 goto error0; 1735 1736 if (diff == 0) { 1737 /* 1738 * If we already had a key match at a higher level, we 1739 * know we need to use the first entry in this block. 1740 */ 1741 keyno = 1; 1742 } else { 1743 /* Otherwise search this block. Do a binary search. */ 1744 1745 int high; /* high entry number */ 1746 int low; /* low entry number */ 1747 1748 /* Set low and high entry numbers, 1-based. */ 1749 low = 1; 1750 high = xfs_btree_get_numrecs(block); 1751 if (!high) { 1752 /* Block is empty, must be an empty leaf. */ 1753 ASSERT(level == 0 && cur->bc_nlevels == 1); 1754 1755 cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; 1756 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1757 *stat = 0; 1758 return 0; 1759 } 1760 1761 /* Binary search the block. */ 1762 while (low <= high) { 1763 union xfs_btree_key key; 1764 union xfs_btree_key *kp; 1765 1766 XFS_BTREE_STATS_INC(cur, compare); 1767 1768 /* keyno is average of low and high. */ 1769 keyno = (low + high) >> 1; 1770 1771 /* Get current search key */ 1772 kp = xfs_lookup_get_search_key(cur, level, 1773 keyno, block, &key); 1774 1775 /* 1776 * Compute difference to get next direction: 1777 * - less than, move right 1778 * - greater than, move left 1779 * - equal, we're done 1780 */ 1781 diff = cur->bc_ops->key_diff(cur, kp); 1782 if (diff < 0) 1783 low = keyno + 1; 1784 else if (diff > 0) 1785 high = keyno - 1; 1786 else 1787 break; 1788 } 1789 } 1790 1791 /* 1792 * If there are more levels, set up for the next level 1793 * by getting the block number and filling in the cursor. 1794 */ 1795 if (level > 0) { 1796 /* 1797 * If we moved left, need the previous key number, 1798 * unless there isn't one. 1799 */ 1800 if (diff > 0 && --keyno < 1) 1801 keyno = 1; 1802 pp = xfs_btree_ptr_addr(cur, keyno, block); 1803 1804 #ifdef DEBUG 1805 error = xfs_btree_check_ptr(cur, pp, 0, level); 1806 if (error) 1807 goto error0; 1808 #endif 1809 cur->bc_ptrs[level] = keyno; 1810 } 1811 } 1812 1813 /* Done with the search. See if we need to adjust the results. */ 1814 if (dir != XFS_LOOKUP_LE && diff < 0) { 1815 keyno++; 1816 /* 1817 * If ge search and we went off the end of the block, but it's 1818 * not the last block, we're in the wrong block. 1819 */ 1820 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); 1821 if (dir == XFS_LOOKUP_GE && 1822 keyno > xfs_btree_get_numrecs(block) && 1823 !xfs_btree_ptr_is_null(cur, &ptr)) { 1824 int i; 1825 1826 cur->bc_ptrs[0] = keyno; 1827 error = xfs_btree_increment(cur, 0, &i); 1828 if (error) 1829 goto error0; 1830 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); 1831 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1832 *stat = 1; 1833 return 0; 1834 } 1835 } else if (dir == XFS_LOOKUP_LE && diff > 0) 1836 keyno--; 1837 cur->bc_ptrs[0] = keyno; 1838 1839 /* Return if we succeeded or not. */ 1840 if (keyno == 0 || keyno > xfs_btree_get_numrecs(block)) 1841 *stat = 0; 1842 else if (dir != XFS_LOOKUP_EQ || diff == 0) 1843 *stat = 1; 1844 else 1845 *stat = 0; 1846 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1847 return 0; 1848 1849 error0: 1850 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 1851 return error; 1852 } 1853 1854 /* 1855 * Update keys at all levels from here to the root along the cursor's path. 1856 */ 1857 STATIC int 1858 xfs_btree_updkey( 1859 struct xfs_btree_cur *cur, 1860 union xfs_btree_key *keyp, 1861 int level) 1862 { 1863 struct xfs_btree_block *block; 1864 struct xfs_buf *bp; 1865 union xfs_btree_key *kp; 1866 int ptr; 1867 1868 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1869 XFS_BTREE_TRACE_ARGIK(cur, level, keyp); 1870 1871 ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || level >= 1); 1872 1873 /* 1874 * Go up the tree from this level toward the root. 1875 * At each level, update the key value to the value input. 1876 * Stop when we reach a level where the cursor isn't pointing 1877 * at the first entry in the block. 1878 */ 1879 for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { 1880 #ifdef DEBUG 1881 int error; 1882 #endif 1883 block = xfs_btree_get_block(cur, level, &bp); 1884 #ifdef DEBUG 1885 error = xfs_btree_check_block(cur, block, level, bp); 1886 if (error) { 1887 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 1888 return error; 1889 } 1890 #endif 1891 ptr = cur->bc_ptrs[level]; 1892 kp = xfs_btree_key_addr(cur, ptr, block); 1893 xfs_btree_copy_keys(cur, kp, keyp, 1); 1894 xfs_btree_log_keys(cur, bp, ptr, ptr); 1895 } 1896 1897 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1898 return 0; 1899 } 1900 1901 /* 1902 * Update the record referred to by cur to the value in the 1903 * given record. This either works (return 0) or gets an 1904 * EFSCORRUPTED error. 1905 */ 1906 int 1907 xfs_btree_update( 1908 struct xfs_btree_cur *cur, 1909 union xfs_btree_rec *rec) 1910 { 1911 struct xfs_btree_block *block; 1912 struct xfs_buf *bp; 1913 int error; 1914 int ptr; 1915 union xfs_btree_rec *rp; 1916 1917 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1918 XFS_BTREE_TRACE_ARGR(cur, rec); 1919 1920 /* Pick up the current block. */ 1921 block = xfs_btree_get_block(cur, 0, &bp); 1922 1923 #ifdef DEBUG 1924 error = xfs_btree_check_block(cur, block, 0, bp); 1925 if (error) 1926 goto error0; 1927 #endif 1928 /* Get the address of the rec to be updated. */ 1929 ptr = cur->bc_ptrs[0]; 1930 rp = xfs_btree_rec_addr(cur, ptr, block); 1931 1932 /* Fill in the new contents and log them. */ 1933 xfs_btree_copy_recs(cur, rp, rec, 1); 1934 xfs_btree_log_recs(cur, bp, ptr, ptr); 1935 1936 /* 1937 * If we are tracking the last record in the tree and 1938 * we are at the far right edge of the tree, update it. 1939 */ 1940 if (xfs_btree_is_lastrec(cur, block, 0)) { 1941 cur->bc_ops->update_lastrec(cur, block, rec, 1942 ptr, LASTREC_UPDATE); 1943 } 1944 1945 /* Updating first rec in leaf. Pass new key value up to our parent. */ 1946 if (ptr == 1) { 1947 union xfs_btree_key key; 1948 1949 cur->bc_ops->init_key_from_rec(&key, rec); 1950 error = xfs_btree_updkey(cur, &key, 1); 1951 if (error) 1952 goto error0; 1953 } 1954 1955 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1956 return 0; 1957 1958 error0: 1959 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 1960 return error; 1961 } 1962 1963 /* 1964 * Move 1 record left from cur/level if possible. 1965 * Update cur to reflect the new path. 1966 */ 1967 STATIC int /* error */ 1968 xfs_btree_lshift( 1969 struct xfs_btree_cur *cur, 1970 int level, 1971 int *stat) /* success/failure */ 1972 { 1973 union xfs_btree_key key; /* btree key */ 1974 struct xfs_buf *lbp; /* left buffer pointer */ 1975 struct xfs_btree_block *left; /* left btree block */ 1976 int lrecs; /* left record count */ 1977 struct xfs_buf *rbp; /* right buffer pointer */ 1978 struct xfs_btree_block *right; /* right btree block */ 1979 int rrecs; /* right record count */ 1980 union xfs_btree_ptr lptr; /* left btree pointer */ 1981 union xfs_btree_key *rkp = NULL; /* right btree key */ 1982 union xfs_btree_ptr *rpp = NULL; /* right address pointer */ 1983 union xfs_btree_rec *rrp = NULL; /* right record pointer */ 1984 int error; /* error return value */ 1985 1986 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1987 XFS_BTREE_TRACE_ARGI(cur, level); 1988 1989 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && 1990 level == cur->bc_nlevels - 1) 1991 goto out0; 1992 1993 /* Set up variables for this block as "right". */ 1994 right = xfs_btree_get_block(cur, level, &rbp); 1995 1996 #ifdef DEBUG 1997 error = xfs_btree_check_block(cur, right, level, rbp); 1998 if (error) 1999 goto error0; 2000 #endif 2001 2002 /* If we've got no left sibling then we can't shift an entry left. */ 2003 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); 2004 if (xfs_btree_ptr_is_null(cur, &lptr)) 2005 goto out0; 2006 2007 /* 2008 * If the cursor entry is the one that would be moved, don't 2009 * do it... it's too complicated. 2010 */ 2011 if (cur->bc_ptrs[level] <= 1) 2012 goto out0; 2013 2014 /* Set up the left neighbor as "left". */ 2015 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); 2016 if (error) 2017 goto error0; 2018 2019 /* If it's full, it can't take another entry. */ 2020 lrecs = xfs_btree_get_numrecs(left); 2021 if (lrecs == cur->bc_ops->get_maxrecs(cur, level)) 2022 goto out0; 2023 2024 rrecs = xfs_btree_get_numrecs(right); 2025 2026 /* 2027 * We add one entry to the left side and remove one for the right side. 2028 * Account for it here, the changes will be updated on disk and logged 2029 * later. 2030 */ 2031 lrecs++; 2032 rrecs--; 2033 2034 XFS_BTREE_STATS_INC(cur, lshift); 2035 XFS_BTREE_STATS_ADD(cur, moves, 1); 2036 2037 /* 2038 * If non-leaf, copy a key and a ptr to the left block. 2039 * Log the changes to the left block. 2040 */ 2041 if (level > 0) { 2042 /* It's a non-leaf. Move keys and pointers. */ 2043 union xfs_btree_key *lkp; /* left btree key */ 2044 union xfs_btree_ptr *lpp; /* left address pointer */ 2045 2046 lkp = xfs_btree_key_addr(cur, lrecs, left); 2047 rkp = xfs_btree_key_addr(cur, 1, right); 2048 2049 lpp = xfs_btree_ptr_addr(cur, lrecs, left); 2050 rpp = xfs_btree_ptr_addr(cur, 1, right); 2051 #ifdef DEBUG 2052 error = xfs_btree_check_ptr(cur, rpp, 0, level); 2053 if (error) 2054 goto error0; 2055 #endif 2056 xfs_btree_copy_keys(cur, lkp, rkp, 1); 2057 xfs_btree_copy_ptrs(cur, lpp, rpp, 1); 2058 2059 xfs_btree_log_keys(cur, lbp, lrecs, lrecs); 2060 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs); 2061 2062 ASSERT(cur->bc_ops->keys_inorder(cur, 2063 xfs_btree_key_addr(cur, lrecs - 1, left), lkp)); 2064 } else { 2065 /* It's a leaf. Move records. */ 2066 union xfs_btree_rec *lrp; /* left record pointer */ 2067 2068 lrp = xfs_btree_rec_addr(cur, lrecs, left); 2069 rrp = xfs_btree_rec_addr(cur, 1, right); 2070 2071 xfs_btree_copy_recs(cur, lrp, rrp, 1); 2072 xfs_btree_log_recs(cur, lbp, lrecs, lrecs); 2073 2074 ASSERT(cur->bc_ops->recs_inorder(cur, 2075 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp)); 2076 } 2077 2078 xfs_btree_set_numrecs(left, lrecs); 2079 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); 2080 2081 xfs_btree_set_numrecs(right, rrecs); 2082 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); 2083 2084 /* 2085 * Slide the contents of right down one entry. 2086 */ 2087 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1); 2088 if (level > 0) { 2089 /* It's a nonleaf. operate on keys and ptrs */ 2090 #ifdef DEBUG 2091 int i; /* loop index */ 2092 2093 for (i = 0; i < rrecs; i++) { 2094 error = xfs_btree_check_ptr(cur, rpp, i + 1, level); 2095 if (error) 2096 goto error0; 2097 } 2098 #endif 2099 xfs_btree_shift_keys(cur, 2100 xfs_btree_key_addr(cur, 2, right), 2101 -1, rrecs); 2102 xfs_btree_shift_ptrs(cur, 2103 xfs_btree_ptr_addr(cur, 2, right), 2104 -1, rrecs); 2105 2106 xfs_btree_log_keys(cur, rbp, 1, rrecs); 2107 xfs_btree_log_ptrs(cur, rbp, 1, rrecs); 2108 } else { 2109 /* It's a leaf. operate on records */ 2110 xfs_btree_shift_recs(cur, 2111 xfs_btree_rec_addr(cur, 2, right), 2112 -1, rrecs); 2113 xfs_btree_log_recs(cur, rbp, 1, rrecs); 2114 2115 /* 2116 * If it's the first record in the block, we'll need a key 2117 * structure to pass up to the next level (updkey). 2118 */ 2119 cur->bc_ops->init_key_from_rec(&key, 2120 xfs_btree_rec_addr(cur, 1, right)); 2121 rkp = &key; 2122 } 2123 2124 /* Update the parent key values of right. */ 2125 error = xfs_btree_updkey(cur, rkp, level + 1); 2126 if (error) 2127 goto error0; 2128 2129 /* Slide the cursor value left one. */ 2130 cur->bc_ptrs[level]--; 2131 2132 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2133 *stat = 1; 2134 return 0; 2135 2136 out0: 2137 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2138 *stat = 0; 2139 return 0; 2140 2141 error0: 2142 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 2143 return error; 2144 } 2145 2146 /* 2147 * Move 1 record right from cur/level if possible. 2148 * Update cur to reflect the new path. 2149 */ 2150 STATIC int /* error */ 2151 xfs_btree_rshift( 2152 struct xfs_btree_cur *cur, 2153 int level, 2154 int *stat) /* success/failure */ 2155 { 2156 union xfs_btree_key key; /* btree key */ 2157 struct xfs_buf *lbp; /* left buffer pointer */ 2158 struct xfs_btree_block *left; /* left btree block */ 2159 struct xfs_buf *rbp; /* right buffer pointer */ 2160 struct xfs_btree_block *right; /* right btree block */ 2161 struct xfs_btree_cur *tcur; /* temporary btree cursor */ 2162 union xfs_btree_ptr rptr; /* right block pointer */ 2163 union xfs_btree_key *rkp; /* right btree key */ 2164 int rrecs; /* right record count */ 2165 int lrecs; /* left record count */ 2166 int error; /* error return value */ 2167 int i; /* loop counter */ 2168 2169 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 2170 XFS_BTREE_TRACE_ARGI(cur, level); 2171 2172 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && 2173 (level == cur->bc_nlevels - 1)) 2174 goto out0; 2175 2176 /* Set up variables for this block as "left". */ 2177 left = xfs_btree_get_block(cur, level, &lbp); 2178 2179 #ifdef DEBUG 2180 error = xfs_btree_check_block(cur, left, level, lbp); 2181 if (error) 2182 goto error0; 2183 #endif 2184 2185 /* If we've got no right sibling then we can't shift an entry right. */ 2186 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); 2187 if (xfs_btree_ptr_is_null(cur, &rptr)) 2188 goto out0; 2189 2190 /* 2191 * If the cursor entry is the one that would be moved, don't 2192 * do it... it's too complicated. 2193 */ 2194 lrecs = xfs_btree_get_numrecs(left); 2195 if (cur->bc_ptrs[level] >= lrecs) 2196 goto out0; 2197 2198 /* Set up the right neighbor as "right". */ 2199 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); 2200 if (error) 2201 goto error0; 2202 2203 /* If it's full, it can't take another entry. */ 2204 rrecs = xfs_btree_get_numrecs(right); 2205 if (rrecs == cur->bc_ops->get_maxrecs(cur, level)) 2206 goto out0; 2207 2208 XFS_BTREE_STATS_INC(cur, rshift); 2209 XFS_BTREE_STATS_ADD(cur, moves, rrecs); 2210 2211 /* 2212 * Make a hole at the start of the right neighbor block, then 2213 * copy the last left block entry to the hole. 2214 */ 2215 if (level > 0) { 2216 /* It's a nonleaf. make a hole in the keys and ptrs */ 2217 union xfs_btree_key *lkp; 2218 union xfs_btree_ptr *lpp; 2219 union xfs_btree_ptr *rpp; 2220 2221 lkp = xfs_btree_key_addr(cur, lrecs, left); 2222 lpp = xfs_btree_ptr_addr(cur, lrecs, left); 2223 rkp = xfs_btree_key_addr(cur, 1, right); 2224 rpp = xfs_btree_ptr_addr(cur, 1, right); 2225 2226 #ifdef DEBUG 2227 for (i = rrecs - 1; i >= 0; i--) { 2228 error = xfs_btree_check_ptr(cur, rpp, i, level); 2229 if (error) 2230 goto error0; 2231 } 2232 #endif 2233 2234 xfs_btree_shift_keys(cur, rkp, 1, rrecs); 2235 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs); 2236 2237 #ifdef DEBUG 2238 error = xfs_btree_check_ptr(cur, lpp, 0, level); 2239 if (error) 2240 goto error0; 2241 #endif 2242 2243 /* Now put the new data in, and log it. */ 2244 xfs_btree_copy_keys(cur, rkp, lkp, 1); 2245 xfs_btree_copy_ptrs(cur, rpp, lpp, 1); 2246 2247 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1); 2248 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1); 2249 2250 ASSERT(cur->bc_ops->keys_inorder(cur, rkp, 2251 xfs_btree_key_addr(cur, 2, right))); 2252 } else { 2253 /* It's a leaf. make a hole in the records */ 2254 union xfs_btree_rec *lrp; 2255 union xfs_btree_rec *rrp; 2256 2257 lrp = xfs_btree_rec_addr(cur, lrecs, left); 2258 rrp = xfs_btree_rec_addr(cur, 1, right); 2259 2260 xfs_btree_shift_recs(cur, rrp, 1, rrecs); 2261 2262 /* Now put the new data in, and log it. */ 2263 xfs_btree_copy_recs(cur, rrp, lrp, 1); 2264 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1); 2265 2266 cur->bc_ops->init_key_from_rec(&key, rrp); 2267 rkp = &key; 2268 2269 ASSERT(cur->bc_ops->recs_inorder(cur, rrp, 2270 xfs_btree_rec_addr(cur, 2, right))); 2271 } 2272 2273 /* 2274 * Decrement and log left's numrecs, bump and log right's numrecs. 2275 */ 2276 xfs_btree_set_numrecs(left, --lrecs); 2277 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); 2278 2279 xfs_btree_set_numrecs(right, ++rrecs); 2280 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); 2281 2282 /* 2283 * Using a temporary cursor, update the parent key values of the 2284 * block on the right. 2285 */ 2286 error = xfs_btree_dup_cursor(cur, &tcur); 2287 if (error) 2288 goto error0; 2289 i = xfs_btree_lastrec(tcur, level); 2290 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); 2291 2292 error = xfs_btree_increment(tcur, level, &i); 2293 if (error) 2294 goto error1; 2295 2296 error = xfs_btree_updkey(tcur, rkp, level + 1); 2297 if (error) 2298 goto error1; 2299 2300 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 2301 2302 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2303 *stat = 1; 2304 return 0; 2305 2306 out0: 2307 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2308 *stat = 0; 2309 return 0; 2310 2311 error0: 2312 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 2313 return error; 2314 2315 error1: 2316 XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR); 2317 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); 2318 return error; 2319 } 2320 2321 /* 2322 * Split cur/level block in half. 2323 * Return new block number and the key to its first 2324 * record (to be inserted into parent). 2325 */ 2326 STATIC int /* error */ 2327 __xfs_btree_split( 2328 struct xfs_btree_cur *cur, 2329 int level, 2330 union xfs_btree_ptr *ptrp, 2331 union xfs_btree_key *key, 2332 struct xfs_btree_cur **curp, 2333 int *stat) /* success/failure */ 2334 { 2335 union xfs_btree_ptr lptr; /* left sibling block ptr */ 2336 struct xfs_buf *lbp; /* left buffer pointer */ 2337 struct xfs_btree_block *left; /* left btree block */ 2338 union xfs_btree_ptr rptr; /* right sibling block ptr */ 2339 struct xfs_buf *rbp; /* right buffer pointer */ 2340 struct xfs_btree_block *right; /* right btree block */ 2341 union xfs_btree_ptr rrptr; /* right-right sibling ptr */ 2342 struct xfs_buf *rrbp; /* right-right buffer pointer */ 2343 struct xfs_btree_block *rrblock; /* right-right btree block */ 2344 int lrecs; 2345 int rrecs; 2346 int src_index; 2347 int error; /* error return value */ 2348 #ifdef DEBUG 2349 int i; 2350 #endif 2351 2352 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 2353 XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key); 2354 2355 XFS_BTREE_STATS_INC(cur, split); 2356 2357 /* Set up left block (current one). */ 2358 left = xfs_btree_get_block(cur, level, &lbp); 2359 2360 #ifdef DEBUG 2361 error = xfs_btree_check_block(cur, left, level, lbp); 2362 if (error) 2363 goto error0; 2364 #endif 2365 2366 xfs_btree_buf_to_ptr(cur, lbp, &lptr); 2367 2368 /* Allocate the new block. If we can't do it, we're toast. Give up. */ 2369 error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat); 2370 if (error) 2371 goto error0; 2372 if (*stat == 0) 2373 goto out0; 2374 XFS_BTREE_STATS_INC(cur, alloc); 2375 2376 /* Set up the new block as "right". */ 2377 error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp); 2378 if (error) 2379 goto error0; 2380 2381 /* Fill in the btree header for the new right block. */ 2382 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0); 2383 2384 /* 2385 * Split the entries between the old and the new block evenly. 2386 * Make sure that if there's an odd number of entries now, that 2387 * each new block will have the same number of entries. 2388 */ 2389 lrecs = xfs_btree_get_numrecs(left); 2390 rrecs = lrecs / 2; 2391 if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1) 2392 rrecs++; 2393 src_index = (lrecs - rrecs + 1); 2394 2395 XFS_BTREE_STATS_ADD(cur, moves, rrecs); 2396 2397 /* 2398 * Copy btree block entries from the left block over to the 2399 * new block, the right. Update the right block and log the 2400 * changes. 2401 */ 2402 if (level > 0) { 2403 /* It's a non-leaf. Move keys and pointers. */ 2404 union xfs_btree_key *lkp; /* left btree key */ 2405 union xfs_btree_ptr *lpp; /* left address pointer */ 2406 union xfs_btree_key *rkp; /* right btree key */ 2407 union xfs_btree_ptr *rpp; /* right address pointer */ 2408 2409 lkp = xfs_btree_key_addr(cur, src_index, left); 2410 lpp = xfs_btree_ptr_addr(cur, src_index, left); 2411 rkp = xfs_btree_key_addr(cur, 1, right); 2412 rpp = xfs_btree_ptr_addr(cur, 1, right); 2413 2414 #ifdef DEBUG 2415 for (i = src_index; i < rrecs; i++) { 2416 error = xfs_btree_check_ptr(cur, lpp, i, level); 2417 if (error) 2418 goto error0; 2419 } 2420 #endif 2421 2422 xfs_btree_copy_keys(cur, rkp, lkp, rrecs); 2423 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs); 2424 2425 xfs_btree_log_keys(cur, rbp, 1, rrecs); 2426 xfs_btree_log_ptrs(cur, rbp, 1, rrecs); 2427 2428 /* Grab the keys to the entries moved to the right block */ 2429 xfs_btree_copy_keys(cur, key, rkp, 1); 2430 } else { 2431 /* It's a leaf. Move records. */ 2432 union xfs_btree_rec *lrp; /* left record pointer */ 2433 union xfs_btree_rec *rrp; /* right record pointer */ 2434 2435 lrp = xfs_btree_rec_addr(cur, src_index, left); 2436 rrp = xfs_btree_rec_addr(cur, 1, right); 2437 2438 xfs_btree_copy_recs(cur, rrp, lrp, rrecs); 2439 xfs_btree_log_recs(cur, rbp, 1, rrecs); 2440 2441 cur->bc_ops->init_key_from_rec(key, 2442 xfs_btree_rec_addr(cur, 1, right)); 2443 } 2444 2445 2446 /* 2447 * Find the left block number by looking in the buffer. 2448 * Adjust numrecs, sibling pointers. 2449 */ 2450 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB); 2451 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB); 2452 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); 2453 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); 2454 2455 lrecs -= rrecs; 2456 xfs_btree_set_numrecs(left, lrecs); 2457 xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs); 2458 2459 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS); 2460 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); 2461 2462 /* 2463 * If there's a block to the new block's right, make that block 2464 * point back to right instead of to left. 2465 */ 2466 if (!xfs_btree_ptr_is_null(cur, &rrptr)) { 2467 error = xfs_btree_read_buf_block(cur, &rrptr, 2468 0, &rrblock, &rrbp); 2469 if (error) 2470 goto error0; 2471 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB); 2472 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); 2473 } 2474 /* 2475 * If the cursor is really in the right block, move it there. 2476 * If it's just pointing past the last entry in left, then we'll 2477 * insert there, so don't change anything in that case. 2478 */ 2479 if (cur->bc_ptrs[level] > lrecs + 1) { 2480 xfs_btree_setbuf(cur, level, rbp); 2481 cur->bc_ptrs[level] -= lrecs; 2482 } 2483 /* 2484 * If there are more levels, we'll need another cursor which refers 2485 * the right block, no matter where this cursor was. 2486 */ 2487 if (level + 1 < cur->bc_nlevels) { 2488 error = xfs_btree_dup_cursor(cur, curp); 2489 if (error) 2490 goto error0; 2491 (*curp)->bc_ptrs[level + 1]++; 2492 } 2493 *ptrp = rptr; 2494 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2495 *stat = 1; 2496 return 0; 2497 out0: 2498 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2499 *stat = 0; 2500 return 0; 2501 2502 error0: 2503 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 2504 return error; 2505 } 2506 2507 struct xfs_btree_split_args { 2508 struct xfs_btree_cur *cur; 2509 int level; 2510 union xfs_btree_ptr *ptrp; 2511 union xfs_btree_key *key; 2512 struct xfs_btree_cur **curp; 2513 int *stat; /* success/failure */ 2514 int result; 2515 bool kswapd; /* allocation in kswapd context */ 2516 struct completion *done; 2517 struct work_struct work; 2518 }; 2519 2520 /* 2521 * Stack switching interfaces for allocation 2522 */ 2523 static void 2524 xfs_btree_split_worker( 2525 struct work_struct *work) 2526 { 2527 struct xfs_btree_split_args *args = container_of(work, 2528 struct xfs_btree_split_args, work); 2529 unsigned long pflags; 2530 unsigned long new_pflags = PF_FSTRANS; 2531 2532 /* 2533 * we are in a transaction context here, but may also be doing work 2534 * in kswapd context, and hence we may need to inherit that state 2535 * temporarily to ensure that we don't block waiting for memory reclaim 2536 * in any way. 2537 */ 2538 if (args->kswapd) 2539 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 2540 2541 current_set_flags_nested(&pflags, new_pflags); 2542 2543 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, 2544 args->key, args->curp, args->stat); 2545 complete(args->done); 2546 2547 current_restore_flags_nested(&pflags, new_pflags); 2548 } 2549 2550 /* 2551 * BMBT split requests often come in with little stack to work on. Push 2552 * them off to a worker thread so there is lots of stack to use. For the other 2553 * btree types, just call directly to avoid the context switch overhead here. 2554 */ 2555 STATIC int /* error */ 2556 xfs_btree_split( 2557 struct xfs_btree_cur *cur, 2558 int level, 2559 union xfs_btree_ptr *ptrp, 2560 union xfs_btree_key *key, 2561 struct xfs_btree_cur **curp, 2562 int *stat) /* success/failure */ 2563 { 2564 struct xfs_btree_split_args args; 2565 DECLARE_COMPLETION_ONSTACK(done); 2566 2567 if (cur->bc_btnum != XFS_BTNUM_BMAP) 2568 return __xfs_btree_split(cur, level, ptrp, key, curp, stat); 2569 2570 args.cur = cur; 2571 args.level = level; 2572 args.ptrp = ptrp; 2573 args.key = key; 2574 args.curp = curp; 2575 args.stat = stat; 2576 args.done = &done; 2577 args.kswapd = current_is_kswapd(); 2578 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker); 2579 queue_work(xfs_alloc_wq, &args.work); 2580 wait_for_completion(&done); 2581 destroy_work_on_stack(&args.work); 2582 return args.result; 2583 } 2584 2585 2586 /* 2587 * Copy the old inode root contents into a real block and make the 2588 * broot point to it. 2589 */ 2590 int /* error */ 2591 xfs_btree_new_iroot( 2592 struct xfs_btree_cur *cur, /* btree cursor */ 2593 int *logflags, /* logging flags for inode */ 2594 int *stat) /* return status - 0 fail */ 2595 { 2596 struct xfs_buf *cbp; /* buffer for cblock */ 2597 struct xfs_btree_block *block; /* btree block */ 2598 struct xfs_btree_block *cblock; /* child btree block */ 2599 union xfs_btree_key *ckp; /* child key pointer */ 2600 union xfs_btree_ptr *cpp; /* child ptr pointer */ 2601 union xfs_btree_key *kp; /* pointer to btree key */ 2602 union xfs_btree_ptr *pp; /* pointer to block addr */ 2603 union xfs_btree_ptr nptr; /* new block addr */ 2604 int level; /* btree level */ 2605 int error; /* error return code */ 2606 #ifdef DEBUG 2607 int i; /* loop counter */ 2608 #endif 2609 2610 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 2611 XFS_BTREE_STATS_INC(cur, newroot); 2612 2613 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); 2614 2615 level = cur->bc_nlevels - 1; 2616 2617 block = xfs_btree_get_iroot(cur); 2618 pp = xfs_btree_ptr_addr(cur, 1, block); 2619 2620 /* Allocate the new block. If we can't do it, we're toast. Give up. */ 2621 error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat); 2622 if (error) 2623 goto error0; 2624 if (*stat == 0) { 2625 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2626 return 0; 2627 } 2628 XFS_BTREE_STATS_INC(cur, alloc); 2629 2630 /* Copy the root into a real block. */ 2631 error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp); 2632 if (error) 2633 goto error0; 2634 2635 /* 2636 * we can't just memcpy() the root in for CRC enabled btree blocks. 2637 * In that case have to also ensure the blkno remains correct 2638 */ 2639 memcpy(cblock, block, xfs_btree_block_len(cur)); 2640 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) { 2641 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 2642 cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn); 2643 else 2644 cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn); 2645 } 2646 2647 be16_add_cpu(&block->bb_level, 1); 2648 xfs_btree_set_numrecs(block, 1); 2649 cur->bc_nlevels++; 2650 cur->bc_ptrs[level + 1] = 1; 2651 2652 kp = xfs_btree_key_addr(cur, 1, block); 2653 ckp = xfs_btree_key_addr(cur, 1, cblock); 2654 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock)); 2655 2656 cpp = xfs_btree_ptr_addr(cur, 1, cblock); 2657 #ifdef DEBUG 2658 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { 2659 error = xfs_btree_check_ptr(cur, pp, i, level); 2660 if (error) 2661 goto error0; 2662 } 2663 #endif 2664 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock)); 2665 2666 #ifdef DEBUG 2667 error = xfs_btree_check_ptr(cur, &nptr, 0, level); 2668 if (error) 2669 goto error0; 2670 #endif 2671 xfs_btree_copy_ptrs(cur, pp, &nptr, 1); 2672 2673 xfs_iroot_realloc(cur->bc_private.b.ip, 2674 1 - xfs_btree_get_numrecs(cblock), 2675 cur->bc_private.b.whichfork); 2676 2677 xfs_btree_setbuf(cur, level, cbp); 2678 2679 /* 2680 * Do all this logging at the end so that 2681 * the root is at the right level. 2682 */ 2683 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); 2684 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); 2685 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); 2686 2687 *logflags |= 2688 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork); 2689 *stat = 1; 2690 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2691 return 0; 2692 error0: 2693 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 2694 return error; 2695 } 2696 2697 /* 2698 * Allocate a new root block, fill it in. 2699 */ 2700 STATIC int /* error */ 2701 xfs_btree_new_root( 2702 struct xfs_btree_cur *cur, /* btree cursor */ 2703 int *stat) /* success/failure */ 2704 { 2705 struct xfs_btree_block *block; /* one half of the old root block */ 2706 struct xfs_buf *bp; /* buffer containing block */ 2707 int error; /* error return value */ 2708 struct xfs_buf *lbp; /* left buffer pointer */ 2709 struct xfs_btree_block *left; /* left btree block */ 2710 struct xfs_buf *nbp; /* new (root) buffer */ 2711 struct xfs_btree_block *new; /* new (root) btree block */ 2712 int nptr; /* new value for key index, 1 or 2 */ 2713 struct xfs_buf *rbp; /* right buffer pointer */ 2714 struct xfs_btree_block *right; /* right btree block */ 2715 union xfs_btree_ptr rptr; 2716 union xfs_btree_ptr lptr; 2717 2718 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 2719 XFS_BTREE_STATS_INC(cur, newroot); 2720 2721 /* initialise our start point from the cursor */ 2722 cur->bc_ops->init_ptr_from_cur(cur, &rptr); 2723 2724 /* Allocate the new block. If we can't do it, we're toast. Give up. */ 2725 error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat); 2726 if (error) 2727 goto error0; 2728 if (*stat == 0) 2729 goto out0; 2730 XFS_BTREE_STATS_INC(cur, alloc); 2731 2732 /* Set up the new block. */ 2733 error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp); 2734 if (error) 2735 goto error0; 2736 2737 /* Set the root in the holding structure increasing the level by 1. */ 2738 cur->bc_ops->set_root(cur, &lptr, 1); 2739 2740 /* 2741 * At the previous root level there are now two blocks: the old root, 2742 * and the new block generated when it was split. We don't know which 2743 * one the cursor is pointing at, so we set up variables "left" and 2744 * "right" for each case. 2745 */ 2746 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp); 2747 2748 #ifdef DEBUG 2749 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp); 2750 if (error) 2751 goto error0; 2752 #endif 2753 2754 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); 2755 if (!xfs_btree_ptr_is_null(cur, &rptr)) { 2756 /* Our block is left, pick up the right block. */ 2757 lbp = bp; 2758 xfs_btree_buf_to_ptr(cur, lbp, &lptr); 2759 left = block; 2760 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); 2761 if (error) 2762 goto error0; 2763 bp = rbp; 2764 nptr = 1; 2765 } else { 2766 /* Our block is right, pick up the left block. */ 2767 rbp = bp; 2768 xfs_btree_buf_to_ptr(cur, rbp, &rptr); 2769 right = block; 2770 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); 2771 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); 2772 if (error) 2773 goto error0; 2774 bp = lbp; 2775 nptr = 2; 2776 } 2777 /* Fill in the new block's btree header and log it. */ 2778 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2); 2779 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS); 2780 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) && 2781 !xfs_btree_ptr_is_null(cur, &rptr)); 2782 2783 /* Fill in the key data in the new root. */ 2784 if (xfs_btree_get_level(left) > 0) { 2785 xfs_btree_copy_keys(cur, 2786 xfs_btree_key_addr(cur, 1, new), 2787 xfs_btree_key_addr(cur, 1, left), 1); 2788 xfs_btree_copy_keys(cur, 2789 xfs_btree_key_addr(cur, 2, new), 2790 xfs_btree_key_addr(cur, 1, right), 1); 2791 } else { 2792 cur->bc_ops->init_key_from_rec( 2793 xfs_btree_key_addr(cur, 1, new), 2794 xfs_btree_rec_addr(cur, 1, left)); 2795 cur->bc_ops->init_key_from_rec( 2796 xfs_btree_key_addr(cur, 2, new), 2797 xfs_btree_rec_addr(cur, 1, right)); 2798 } 2799 xfs_btree_log_keys(cur, nbp, 1, 2); 2800 2801 /* Fill in the pointer data in the new root. */ 2802 xfs_btree_copy_ptrs(cur, 2803 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1); 2804 xfs_btree_copy_ptrs(cur, 2805 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1); 2806 xfs_btree_log_ptrs(cur, nbp, 1, 2); 2807 2808 /* Fix up the cursor. */ 2809 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); 2810 cur->bc_ptrs[cur->bc_nlevels] = nptr; 2811 cur->bc_nlevels++; 2812 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2813 *stat = 1; 2814 return 0; 2815 error0: 2816 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 2817 return error; 2818 out0: 2819 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2820 *stat = 0; 2821 return 0; 2822 } 2823 2824 STATIC int 2825 xfs_btree_make_block_unfull( 2826 struct xfs_btree_cur *cur, /* btree cursor */ 2827 int level, /* btree level */ 2828 int numrecs,/* # of recs in block */ 2829 int *oindex,/* old tree index */ 2830 int *index, /* new tree index */ 2831 union xfs_btree_ptr *nptr, /* new btree ptr */ 2832 struct xfs_btree_cur **ncur, /* new btree cursor */ 2833 union xfs_btree_rec *nrec, /* new record */ 2834 int *stat) 2835 { 2836 union xfs_btree_key key; /* new btree key value */ 2837 int error = 0; 2838 2839 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && 2840 level == cur->bc_nlevels - 1) { 2841 struct xfs_inode *ip = cur->bc_private.b.ip; 2842 2843 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) { 2844 /* A root block that can be made bigger. */ 2845 xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork); 2846 } else { 2847 /* A root block that needs replacing */ 2848 int logflags = 0; 2849 2850 error = xfs_btree_new_iroot(cur, &logflags, stat); 2851 if (error || *stat == 0) 2852 return error; 2853 2854 xfs_trans_log_inode(cur->bc_tp, ip, logflags); 2855 } 2856 2857 return 0; 2858 } 2859 2860 /* First, try shifting an entry to the right neighbor. */ 2861 error = xfs_btree_rshift(cur, level, stat); 2862 if (error || *stat) 2863 return error; 2864 2865 /* Next, try shifting an entry to the left neighbor. */ 2866 error = xfs_btree_lshift(cur, level, stat); 2867 if (error) 2868 return error; 2869 2870 if (*stat) { 2871 *oindex = *index = cur->bc_ptrs[level]; 2872 return 0; 2873 } 2874 2875 /* 2876 * Next, try splitting the current block in half. 2877 * 2878 * If this works we have to re-set our variables because we 2879 * could be in a different block now. 2880 */ 2881 error = xfs_btree_split(cur, level, nptr, &key, ncur, stat); 2882 if (error || *stat == 0) 2883 return error; 2884 2885 2886 *index = cur->bc_ptrs[level]; 2887 cur->bc_ops->init_rec_from_key(&key, nrec); 2888 return 0; 2889 } 2890 2891 /* 2892 * Insert one record/level. Return information to the caller 2893 * allowing the next level up to proceed if necessary. 2894 */ 2895 STATIC int 2896 xfs_btree_insrec( 2897 struct xfs_btree_cur *cur, /* btree cursor */ 2898 int level, /* level to insert record at */ 2899 union xfs_btree_ptr *ptrp, /* i/o: block number inserted */ 2900 union xfs_btree_rec *recp, /* i/o: record data inserted */ 2901 struct xfs_btree_cur **curp, /* output: new cursor replacing cur */ 2902 int *stat) /* success/failure */ 2903 { 2904 struct xfs_btree_block *block; /* btree block */ 2905 struct xfs_buf *bp; /* buffer for block */ 2906 union xfs_btree_key key; /* btree key */ 2907 union xfs_btree_ptr nptr; /* new block ptr */ 2908 struct xfs_btree_cur *ncur; /* new btree cursor */ 2909 union xfs_btree_rec nrec; /* new record count */ 2910 int optr; /* old key/record index */ 2911 int ptr; /* key/record index */ 2912 int numrecs;/* number of records */ 2913 int error; /* error return value */ 2914 #ifdef DEBUG 2915 int i; 2916 #endif 2917 2918 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 2919 XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, recp); 2920 2921 ncur = NULL; 2922 2923 /* 2924 * If we have an external root pointer, and we've made it to the 2925 * root level, allocate a new root block and we're done. 2926 */ 2927 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && 2928 (level >= cur->bc_nlevels)) { 2929 error = xfs_btree_new_root(cur, stat); 2930 xfs_btree_set_ptr_null(cur, ptrp); 2931 2932 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2933 return error; 2934 } 2935 2936 /* If we're off the left edge, return failure. */ 2937 ptr = cur->bc_ptrs[level]; 2938 if (ptr == 0) { 2939 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 2940 *stat = 0; 2941 return 0; 2942 } 2943 2944 /* Make a key out of the record data to be inserted, and save it. */ 2945 cur->bc_ops->init_key_from_rec(&key, recp); 2946 2947 optr = ptr; 2948 2949 XFS_BTREE_STATS_INC(cur, insrec); 2950 2951 /* Get pointers to the btree buffer and block. */ 2952 block = xfs_btree_get_block(cur, level, &bp); 2953 numrecs = xfs_btree_get_numrecs(block); 2954 2955 #ifdef DEBUG 2956 error = xfs_btree_check_block(cur, block, level, bp); 2957 if (error) 2958 goto error0; 2959 2960 /* Check that the new entry is being inserted in the right place. */ 2961 if (ptr <= numrecs) { 2962 if (level == 0) { 2963 ASSERT(cur->bc_ops->recs_inorder(cur, recp, 2964 xfs_btree_rec_addr(cur, ptr, block))); 2965 } else { 2966 ASSERT(cur->bc_ops->keys_inorder(cur, &key, 2967 xfs_btree_key_addr(cur, ptr, block))); 2968 } 2969 } 2970 #endif 2971 2972 /* 2973 * If the block is full, we can't insert the new entry until we 2974 * make the block un-full. 2975 */ 2976 xfs_btree_set_ptr_null(cur, &nptr); 2977 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) { 2978 error = xfs_btree_make_block_unfull(cur, level, numrecs, 2979 &optr, &ptr, &nptr, &ncur, &nrec, stat); 2980 if (error || *stat == 0) 2981 goto error0; 2982 } 2983 2984 /* 2985 * The current block may have changed if the block was 2986 * previously full and we have just made space in it. 2987 */ 2988 block = xfs_btree_get_block(cur, level, &bp); 2989 numrecs = xfs_btree_get_numrecs(block); 2990 2991 #ifdef DEBUG 2992 error = xfs_btree_check_block(cur, block, level, bp); 2993 if (error) 2994 return error; 2995 #endif 2996 2997 /* 2998 * At this point we know there's room for our new entry in the block 2999 * we're pointing at. 3000 */ 3001 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1); 3002 3003 if (level > 0) { 3004 /* It's a nonleaf. make a hole in the keys and ptrs */ 3005 union xfs_btree_key *kp; 3006 union xfs_btree_ptr *pp; 3007 3008 kp = xfs_btree_key_addr(cur, ptr, block); 3009 pp = xfs_btree_ptr_addr(cur, ptr, block); 3010 3011 #ifdef DEBUG 3012 for (i = numrecs - ptr; i >= 0; i--) { 3013 error = xfs_btree_check_ptr(cur, pp, i, level); 3014 if (error) 3015 return error; 3016 } 3017 #endif 3018 3019 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1); 3020 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1); 3021 3022 #ifdef DEBUG 3023 error = xfs_btree_check_ptr(cur, ptrp, 0, level); 3024 if (error) 3025 goto error0; 3026 #endif 3027 3028 /* Now put the new data in, bump numrecs and log it. */ 3029 xfs_btree_copy_keys(cur, kp, &key, 1); 3030 xfs_btree_copy_ptrs(cur, pp, ptrp, 1); 3031 numrecs++; 3032 xfs_btree_set_numrecs(block, numrecs); 3033 xfs_btree_log_ptrs(cur, bp, ptr, numrecs); 3034 xfs_btree_log_keys(cur, bp, ptr, numrecs); 3035 #ifdef DEBUG 3036 if (ptr < numrecs) { 3037 ASSERT(cur->bc_ops->keys_inorder(cur, kp, 3038 xfs_btree_key_addr(cur, ptr + 1, block))); 3039 } 3040 #endif 3041 } else { 3042 /* It's a leaf. make a hole in the records */ 3043 union xfs_btree_rec *rp; 3044 3045 rp = xfs_btree_rec_addr(cur, ptr, block); 3046 3047 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1); 3048 3049 /* Now put the new data in, bump numrecs and log it. */ 3050 xfs_btree_copy_recs(cur, rp, recp, 1); 3051 xfs_btree_set_numrecs(block, ++numrecs); 3052 xfs_btree_log_recs(cur, bp, ptr, numrecs); 3053 #ifdef DEBUG 3054 if (ptr < numrecs) { 3055 ASSERT(cur->bc_ops->recs_inorder(cur, rp, 3056 xfs_btree_rec_addr(cur, ptr + 1, block))); 3057 } 3058 #endif 3059 } 3060 3061 /* Log the new number of records in the btree header. */ 3062 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); 3063 3064 /* If we inserted at the start of a block, update the parents' keys. */ 3065 if (optr == 1) { 3066 error = xfs_btree_updkey(cur, &key, level + 1); 3067 if (error) 3068 goto error0; 3069 } 3070 3071 /* 3072 * If we are tracking the last record in the tree and 3073 * we are at the far right edge of the tree, update it. 3074 */ 3075 if (xfs_btree_is_lastrec(cur, block, level)) { 3076 cur->bc_ops->update_lastrec(cur, block, recp, 3077 ptr, LASTREC_INSREC); 3078 } 3079 3080 /* 3081 * Return the new block number, if any. 3082 * If there is one, give back a record value and a cursor too. 3083 */ 3084 *ptrp = nptr; 3085 if (!xfs_btree_ptr_is_null(cur, &nptr)) { 3086 *recp = nrec; 3087 *curp = ncur; 3088 } 3089 3090 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 3091 *stat = 1; 3092 return 0; 3093 3094 error0: 3095 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 3096 return error; 3097 } 3098 3099 /* 3100 * Insert the record at the point referenced by cur. 3101 * 3102 * A multi-level split of the tree on insert will invalidate the original 3103 * cursor. All callers of this function should assume that the cursor is 3104 * no longer valid and revalidate it. 3105 */ 3106 int 3107 xfs_btree_insert( 3108 struct xfs_btree_cur *cur, 3109 int *stat) 3110 { 3111 int error; /* error return value */ 3112 int i; /* result value, 0 for failure */ 3113 int level; /* current level number in btree */ 3114 union xfs_btree_ptr nptr; /* new block number (split result) */ 3115 struct xfs_btree_cur *ncur; /* new cursor (split result) */ 3116 struct xfs_btree_cur *pcur; /* previous level's cursor */ 3117 union xfs_btree_rec rec; /* record to insert */ 3118 3119 level = 0; 3120 ncur = NULL; 3121 pcur = cur; 3122 3123 xfs_btree_set_ptr_null(cur, &nptr); 3124 cur->bc_ops->init_rec_from_cur(cur, &rec); 3125 3126 /* 3127 * Loop going up the tree, starting at the leaf level. 3128 * Stop when we don't get a split block, that must mean that 3129 * the insert is finished with this level. 3130 */ 3131 do { 3132 /* 3133 * Insert nrec/nptr into this level of the tree. 3134 * Note if we fail, nptr will be null. 3135 */ 3136 error = xfs_btree_insrec(pcur, level, &nptr, &rec, &ncur, &i); 3137 if (error) { 3138 if (pcur != cur) 3139 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); 3140 goto error0; 3141 } 3142 3143 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); 3144 level++; 3145 3146 /* 3147 * See if the cursor we just used is trash. 3148 * Can't trash the caller's cursor, but otherwise we should 3149 * if ncur is a new cursor or we're about to be done. 3150 */ 3151 if (pcur != cur && 3152 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) { 3153 /* Save the state from the cursor before we trash it */ 3154 if (cur->bc_ops->update_cursor) 3155 cur->bc_ops->update_cursor(pcur, cur); 3156 cur->bc_nlevels = pcur->bc_nlevels; 3157 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); 3158 } 3159 /* If we got a new cursor, switch to it. */ 3160 if (ncur) { 3161 pcur = ncur; 3162 ncur = NULL; 3163 } 3164 } while (!xfs_btree_ptr_is_null(cur, &nptr)); 3165 3166 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 3167 *stat = i; 3168 return 0; 3169 error0: 3170 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 3171 return error; 3172 } 3173 3174 /* 3175 * Try to merge a non-leaf block back into the inode root. 3176 * 3177 * Note: the killroot names comes from the fact that we're effectively 3178 * killing the old root block. But because we can't just delete the 3179 * inode we have to copy the single block it was pointing to into the 3180 * inode. 3181 */ 3182 STATIC int 3183 xfs_btree_kill_iroot( 3184 struct xfs_btree_cur *cur) 3185 { 3186 int whichfork = cur->bc_private.b.whichfork; 3187 struct xfs_inode *ip = cur->bc_private.b.ip; 3188 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 3189 struct xfs_btree_block *block; 3190 struct xfs_btree_block *cblock; 3191 union xfs_btree_key *kp; 3192 union xfs_btree_key *ckp; 3193 union xfs_btree_ptr *pp; 3194 union xfs_btree_ptr *cpp; 3195 struct xfs_buf *cbp; 3196 int level; 3197 int index; 3198 int numrecs; 3199 #ifdef DEBUG 3200 union xfs_btree_ptr ptr; 3201 int i; 3202 #endif 3203 3204 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 3205 3206 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); 3207 ASSERT(cur->bc_nlevels > 1); 3208 3209 /* 3210 * Don't deal with the root block needs to be a leaf case. 3211 * We're just going to turn the thing back into extents anyway. 3212 */ 3213 level = cur->bc_nlevels - 1; 3214 if (level == 1) 3215 goto out0; 3216 3217 /* 3218 * Give up if the root has multiple children. 3219 */ 3220 block = xfs_btree_get_iroot(cur); 3221 if (xfs_btree_get_numrecs(block) != 1) 3222 goto out0; 3223 3224 cblock = xfs_btree_get_block(cur, level - 1, &cbp); 3225 numrecs = xfs_btree_get_numrecs(cblock); 3226 3227 /* 3228 * Only do this if the next level will fit. 3229 * Then the data must be copied up to the inode, 3230 * instead of freeing the root you free the next level. 3231 */ 3232 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level)) 3233 goto out0; 3234 3235 XFS_BTREE_STATS_INC(cur, killroot); 3236 3237 #ifdef DEBUG 3238 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); 3239 ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); 3240 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); 3241 ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); 3242 #endif 3243 3244 index = numrecs - cur->bc_ops->get_maxrecs(cur, level); 3245 if (index) { 3246 xfs_iroot_realloc(cur->bc_private.b.ip, index, 3247 cur->bc_private.b.whichfork); 3248 block = ifp->if_broot; 3249 } 3250 3251 be16_add_cpu(&block->bb_numrecs, index); 3252 ASSERT(block->bb_numrecs == cblock->bb_numrecs); 3253 3254 kp = xfs_btree_key_addr(cur, 1, block); 3255 ckp = xfs_btree_key_addr(cur, 1, cblock); 3256 xfs_btree_copy_keys(cur, kp, ckp, numrecs); 3257 3258 pp = xfs_btree_ptr_addr(cur, 1, block); 3259 cpp = xfs_btree_ptr_addr(cur, 1, cblock); 3260 #ifdef DEBUG 3261 for (i = 0; i < numrecs; i++) { 3262 int error; 3263 3264 error = xfs_btree_check_ptr(cur, cpp, i, level - 1); 3265 if (error) { 3266 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 3267 return error; 3268 } 3269 } 3270 #endif 3271 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs); 3272 3273 cur->bc_ops->free_block(cur, cbp); 3274 XFS_BTREE_STATS_INC(cur, free); 3275 3276 cur->bc_bufs[level - 1] = NULL; 3277 be16_add_cpu(&block->bb_level, -1); 3278 xfs_trans_log_inode(cur->bc_tp, ip, 3279 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork)); 3280 cur->bc_nlevels--; 3281 out0: 3282 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 3283 return 0; 3284 } 3285 3286 /* 3287 * Kill the current root node, and replace it with it's only child node. 3288 */ 3289 STATIC int 3290 xfs_btree_kill_root( 3291 struct xfs_btree_cur *cur, 3292 struct xfs_buf *bp, 3293 int level, 3294 union xfs_btree_ptr *newroot) 3295 { 3296 int error; 3297 3298 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 3299 XFS_BTREE_STATS_INC(cur, killroot); 3300 3301 /* 3302 * Update the root pointer, decreasing the level by 1 and then 3303 * free the old root. 3304 */ 3305 cur->bc_ops->set_root(cur, newroot, -1); 3306 3307 error = cur->bc_ops->free_block(cur, bp); 3308 if (error) { 3309 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 3310 return error; 3311 } 3312 3313 XFS_BTREE_STATS_INC(cur, free); 3314 3315 cur->bc_bufs[level] = NULL; 3316 cur->bc_ra[level] = 0; 3317 cur->bc_nlevels--; 3318 3319 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 3320 return 0; 3321 } 3322 3323 STATIC int 3324 xfs_btree_dec_cursor( 3325 struct xfs_btree_cur *cur, 3326 int level, 3327 int *stat) 3328 { 3329 int error; 3330 int i; 3331 3332 if (level > 0) { 3333 error = xfs_btree_decrement(cur, level, &i); 3334 if (error) 3335 return error; 3336 } 3337 3338 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 3339 *stat = 1; 3340 return 0; 3341 } 3342 3343 /* 3344 * Single level of the btree record deletion routine. 3345 * Delete record pointed to by cur/level. 3346 * Remove the record from its block then rebalance the tree. 3347 * Return 0 for error, 1 for done, 2 to go on to the next level. 3348 */ 3349 STATIC int /* error */ 3350 xfs_btree_delrec( 3351 struct xfs_btree_cur *cur, /* btree cursor */ 3352 int level, /* level removing record from */ 3353 int *stat) /* fail/done/go-on */ 3354 { 3355 struct xfs_btree_block *block; /* btree block */ 3356 union xfs_btree_ptr cptr; /* current block ptr */ 3357 struct xfs_buf *bp; /* buffer for block */ 3358 int error; /* error return value */ 3359 int i; /* loop counter */ 3360 union xfs_btree_key key; /* storage for keyp */ 3361 union xfs_btree_key *keyp = &key; /* passed to the next level */ 3362 union xfs_btree_ptr lptr; /* left sibling block ptr */ 3363 struct xfs_buf *lbp; /* left buffer pointer */ 3364 struct xfs_btree_block *left; /* left btree block */ 3365 int lrecs = 0; /* left record count */ 3366 int ptr; /* key/record index */ 3367 union xfs_btree_ptr rptr; /* right sibling block ptr */ 3368 struct xfs_buf *rbp; /* right buffer pointer */ 3369 struct xfs_btree_block *right; /* right btree block */ 3370 struct xfs_btree_block *rrblock; /* right-right btree block */ 3371 struct xfs_buf *rrbp; /* right-right buffer pointer */ 3372 int rrecs = 0; /* right record count */ 3373 struct xfs_btree_cur *tcur; /* temporary btree cursor */ 3374 int numrecs; /* temporary numrec count */ 3375 3376 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 3377 XFS_BTREE_TRACE_ARGI(cur, level); 3378 3379 tcur = NULL; 3380 3381 /* Get the index of the entry being deleted, check for nothing there. */ 3382 ptr = cur->bc_ptrs[level]; 3383 if (ptr == 0) { 3384 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 3385 *stat = 0; 3386 return 0; 3387 } 3388 3389 /* Get the buffer & block containing the record or key/ptr. */ 3390 block = xfs_btree_get_block(cur, level, &bp); 3391 numrecs = xfs_btree_get_numrecs(block); 3392 3393 #ifdef DEBUG 3394 error = xfs_btree_check_block(cur, block, level, bp); 3395 if (error) 3396 goto error0; 3397 #endif 3398 3399 /* Fail if we're off the end of the block. */ 3400 if (ptr > numrecs) { 3401 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 3402 *stat = 0; 3403 return 0; 3404 } 3405 3406 XFS_BTREE_STATS_INC(cur, delrec); 3407 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr); 3408 3409 /* Excise the entries being deleted. */ 3410 if (level > 0) { 3411 /* It's a nonleaf. operate on keys and ptrs */ 3412 union xfs_btree_key *lkp; 3413 union xfs_btree_ptr *lpp; 3414 3415 lkp = xfs_btree_key_addr(cur, ptr + 1, block); 3416 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block); 3417 3418 #ifdef DEBUG 3419 for (i = 0; i < numrecs - ptr; i++) { 3420 error = xfs_btree_check_ptr(cur, lpp, i, level); 3421 if (error) 3422 goto error0; 3423 } 3424 #endif 3425 3426 if (ptr < numrecs) { 3427 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr); 3428 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr); 3429 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1); 3430 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1); 3431 } 3432 3433 /* 3434 * If it's the first record in the block, we'll need to pass a 3435 * key up to the next level (updkey). 3436 */ 3437 if (ptr == 1) 3438 keyp = xfs_btree_key_addr(cur, 1, block); 3439 } else { 3440 /* It's a leaf. operate on records */ 3441 if (ptr < numrecs) { 3442 xfs_btree_shift_recs(cur, 3443 xfs_btree_rec_addr(cur, ptr + 1, block), 3444 -1, numrecs - ptr); 3445 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1); 3446 } 3447 3448 /* 3449 * If it's the first record in the block, we'll need a key 3450 * structure to pass up to the next level (updkey). 3451 */ 3452 if (ptr == 1) { 3453 cur->bc_ops->init_key_from_rec(&key, 3454 xfs_btree_rec_addr(cur, 1, block)); 3455 keyp = &key; 3456 } 3457 } 3458 3459 /* 3460 * Decrement and log the number of entries in the block. 3461 */ 3462 xfs_btree_set_numrecs(block, --numrecs); 3463 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); 3464 3465 /* 3466 * If we are tracking the last record in the tree and 3467 * we are at the far right edge of the tree, update it. 3468 */ 3469 if (xfs_btree_is_lastrec(cur, block, level)) { 3470 cur->bc_ops->update_lastrec(cur, block, NULL, 3471 ptr, LASTREC_DELREC); 3472 } 3473 3474 /* 3475 * We're at the root level. First, shrink the root block in-memory. 3476 * Try to get rid of the next level down. If we can't then there's 3477 * nothing left to do. 3478 */ 3479 if (level == cur->bc_nlevels - 1) { 3480 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { 3481 xfs_iroot_realloc(cur->bc_private.b.ip, -1, 3482 cur->bc_private.b.whichfork); 3483 3484 error = xfs_btree_kill_iroot(cur); 3485 if (error) 3486 goto error0; 3487 3488 error = xfs_btree_dec_cursor(cur, level, stat); 3489 if (error) 3490 goto error0; 3491 *stat = 1; 3492 return 0; 3493 } 3494 3495 /* 3496 * If this is the root level, and there's only one entry left, 3497 * and it's NOT the leaf level, then we can get rid of this 3498 * level. 3499 */ 3500 if (numrecs == 1 && level > 0) { 3501 union xfs_btree_ptr *pp; 3502 /* 3503 * pp is still set to the first pointer in the block. 3504 * Make it the new root of the btree. 3505 */ 3506 pp = xfs_btree_ptr_addr(cur, 1, block); 3507 error = xfs_btree_kill_root(cur, bp, level, pp); 3508 if (error) 3509 goto error0; 3510 } else if (level > 0) { 3511 error = xfs_btree_dec_cursor(cur, level, stat); 3512 if (error) 3513 goto error0; 3514 } 3515 *stat = 1; 3516 return 0; 3517 } 3518 3519 /* 3520 * If we deleted the leftmost entry in the block, update the 3521 * key values above us in the tree. 3522 */ 3523 if (ptr == 1) { 3524 error = xfs_btree_updkey(cur, keyp, level + 1); 3525 if (error) 3526 goto error0; 3527 } 3528 3529 /* 3530 * If the number of records remaining in the block is at least 3531 * the minimum, we're done. 3532 */ 3533 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) { 3534 error = xfs_btree_dec_cursor(cur, level, stat); 3535 if (error) 3536 goto error0; 3537 return 0; 3538 } 3539 3540 /* 3541 * Otherwise, we have to move some records around to keep the 3542 * tree balanced. Look at the left and right sibling blocks to 3543 * see if we can re-balance by moving only one record. 3544 */ 3545 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); 3546 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB); 3547 3548 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { 3549 /* 3550 * One child of root, need to get a chance to copy its contents 3551 * into the root and delete it. Can't go up to next level, 3552 * there's nothing to delete there. 3553 */ 3554 if (xfs_btree_ptr_is_null(cur, &rptr) && 3555 xfs_btree_ptr_is_null(cur, &lptr) && 3556 level == cur->bc_nlevels - 2) { 3557 error = xfs_btree_kill_iroot(cur); 3558 if (!error) 3559 error = xfs_btree_dec_cursor(cur, level, stat); 3560 if (error) 3561 goto error0; 3562 return 0; 3563 } 3564 } 3565 3566 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) || 3567 !xfs_btree_ptr_is_null(cur, &lptr)); 3568 3569 /* 3570 * Duplicate the cursor so our btree manipulations here won't 3571 * disrupt the next level up. 3572 */ 3573 error = xfs_btree_dup_cursor(cur, &tcur); 3574 if (error) 3575 goto error0; 3576 3577 /* 3578 * If there's a right sibling, see if it's ok to shift an entry 3579 * out of it. 3580 */ 3581 if (!xfs_btree_ptr_is_null(cur, &rptr)) { 3582 /* 3583 * Move the temp cursor to the last entry in the next block. 3584 * Actually any entry but the first would suffice. 3585 */ 3586 i = xfs_btree_lastrec(tcur, level); 3587 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); 3588 3589 error = xfs_btree_increment(tcur, level, &i); 3590 if (error) 3591 goto error0; 3592 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); 3593 3594 i = xfs_btree_lastrec(tcur, level); 3595 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); 3596 3597 /* Grab a pointer to the block. */ 3598 right = xfs_btree_get_block(tcur, level, &rbp); 3599 #ifdef DEBUG 3600 error = xfs_btree_check_block(tcur, right, level, rbp); 3601 if (error) 3602 goto error0; 3603 #endif 3604 /* Grab the current block number, for future use. */ 3605 xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB); 3606 3607 /* 3608 * If right block is full enough so that removing one entry 3609 * won't make it too empty, and left-shifting an entry out 3610 * of right to us works, we're done. 3611 */ 3612 if (xfs_btree_get_numrecs(right) - 1 >= 3613 cur->bc_ops->get_minrecs(tcur, level)) { 3614 error = xfs_btree_lshift(tcur, level, &i); 3615 if (error) 3616 goto error0; 3617 if (i) { 3618 ASSERT(xfs_btree_get_numrecs(block) >= 3619 cur->bc_ops->get_minrecs(tcur, level)); 3620 3621 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 3622 tcur = NULL; 3623 3624 error = xfs_btree_dec_cursor(cur, level, stat); 3625 if (error) 3626 goto error0; 3627 return 0; 3628 } 3629 } 3630 3631 /* 3632 * Otherwise, grab the number of records in right for 3633 * future reference, and fix up the temp cursor to point 3634 * to our block again (last record). 3635 */ 3636 rrecs = xfs_btree_get_numrecs(right); 3637 if (!xfs_btree_ptr_is_null(cur, &lptr)) { 3638 i = xfs_btree_firstrec(tcur, level); 3639 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); 3640 3641 error = xfs_btree_decrement(tcur, level, &i); 3642 if (error) 3643 goto error0; 3644 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); 3645 } 3646 } 3647 3648 /* 3649 * If there's a left sibling, see if it's ok to shift an entry 3650 * out of it. 3651 */ 3652 if (!xfs_btree_ptr_is_null(cur, &lptr)) { 3653 /* 3654 * Move the temp cursor to the first entry in the 3655 * previous block. 3656 */ 3657 i = xfs_btree_firstrec(tcur, level); 3658 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); 3659 3660 error = xfs_btree_decrement(tcur, level, &i); 3661 if (error) 3662 goto error0; 3663 i = xfs_btree_firstrec(tcur, level); 3664 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); 3665 3666 /* Grab a pointer to the block. */ 3667 left = xfs_btree_get_block(tcur, level, &lbp); 3668 #ifdef DEBUG 3669 error = xfs_btree_check_block(cur, left, level, lbp); 3670 if (error) 3671 goto error0; 3672 #endif 3673 /* Grab the current block number, for future use. */ 3674 xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB); 3675 3676 /* 3677 * If left block is full enough so that removing one entry 3678 * won't make it too empty, and right-shifting an entry out 3679 * of left to us works, we're done. 3680 */ 3681 if (xfs_btree_get_numrecs(left) - 1 >= 3682 cur->bc_ops->get_minrecs(tcur, level)) { 3683 error = xfs_btree_rshift(tcur, level, &i); 3684 if (error) 3685 goto error0; 3686 if (i) { 3687 ASSERT(xfs_btree_get_numrecs(block) >= 3688 cur->bc_ops->get_minrecs(tcur, level)); 3689 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 3690 tcur = NULL; 3691 if (level == 0) 3692 cur->bc_ptrs[0]++; 3693 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 3694 *stat = 1; 3695 return 0; 3696 } 3697 } 3698 3699 /* 3700 * Otherwise, grab the number of records in right for 3701 * future reference. 3702 */ 3703 lrecs = xfs_btree_get_numrecs(left); 3704 } 3705 3706 /* Delete the temp cursor, we're done with it. */ 3707 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 3708 tcur = NULL; 3709 3710 /* If here, we need to do a join to keep the tree balanced. */ 3711 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr)); 3712 3713 if (!xfs_btree_ptr_is_null(cur, &lptr) && 3714 lrecs + xfs_btree_get_numrecs(block) <= 3715 cur->bc_ops->get_maxrecs(cur, level)) { 3716 /* 3717 * Set "right" to be the starting block, 3718 * "left" to be the left neighbor. 3719 */ 3720 rptr = cptr; 3721 right = block; 3722 rbp = bp; 3723 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); 3724 if (error) 3725 goto error0; 3726 3727 /* 3728 * If that won't work, see if we can join with the right neighbor block. 3729 */ 3730 } else if (!xfs_btree_ptr_is_null(cur, &rptr) && 3731 rrecs + xfs_btree_get_numrecs(block) <= 3732 cur->bc_ops->get_maxrecs(cur, level)) { 3733 /* 3734 * Set "left" to be the starting block, 3735 * "right" to be the right neighbor. 3736 */ 3737 lptr = cptr; 3738 left = block; 3739 lbp = bp; 3740 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); 3741 if (error) 3742 goto error0; 3743 3744 /* 3745 * Otherwise, we can't fix the imbalance. 3746 * Just return. This is probably a logic error, but it's not fatal. 3747 */ 3748 } else { 3749 error = xfs_btree_dec_cursor(cur, level, stat); 3750 if (error) 3751 goto error0; 3752 return 0; 3753 } 3754 3755 rrecs = xfs_btree_get_numrecs(right); 3756 lrecs = xfs_btree_get_numrecs(left); 3757 3758 /* 3759 * We're now going to join "left" and "right" by moving all the stuff 3760 * in "right" to "left" and deleting "right". 3761 */ 3762 XFS_BTREE_STATS_ADD(cur, moves, rrecs); 3763 if (level > 0) { 3764 /* It's a non-leaf. Move keys and pointers. */ 3765 union xfs_btree_key *lkp; /* left btree key */ 3766 union xfs_btree_ptr *lpp; /* left address pointer */ 3767 union xfs_btree_key *rkp; /* right btree key */ 3768 union xfs_btree_ptr *rpp; /* right address pointer */ 3769 3770 lkp = xfs_btree_key_addr(cur, lrecs + 1, left); 3771 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left); 3772 rkp = xfs_btree_key_addr(cur, 1, right); 3773 rpp = xfs_btree_ptr_addr(cur, 1, right); 3774 #ifdef DEBUG 3775 for (i = 1; i < rrecs; i++) { 3776 error = xfs_btree_check_ptr(cur, rpp, i, level); 3777 if (error) 3778 goto error0; 3779 } 3780 #endif 3781 xfs_btree_copy_keys(cur, lkp, rkp, rrecs); 3782 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs); 3783 3784 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs); 3785 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs); 3786 } else { 3787 /* It's a leaf. Move records. */ 3788 union xfs_btree_rec *lrp; /* left record pointer */ 3789 union xfs_btree_rec *rrp; /* right record pointer */ 3790 3791 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left); 3792 rrp = xfs_btree_rec_addr(cur, 1, right); 3793 3794 xfs_btree_copy_recs(cur, lrp, rrp, rrecs); 3795 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs); 3796 } 3797 3798 XFS_BTREE_STATS_INC(cur, join); 3799 3800 /* 3801 * Fix up the number of records and right block pointer in the 3802 * surviving block, and log it. 3803 */ 3804 xfs_btree_set_numrecs(left, lrecs + rrecs); 3805 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB), 3806 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); 3807 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); 3808 3809 /* If there is a right sibling, point it to the remaining block. */ 3810 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); 3811 if (!xfs_btree_ptr_is_null(cur, &cptr)) { 3812 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp); 3813 if (error) 3814 goto error0; 3815 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB); 3816 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); 3817 } 3818 3819 /* Free the deleted block. */ 3820 error = cur->bc_ops->free_block(cur, rbp); 3821 if (error) 3822 goto error0; 3823 XFS_BTREE_STATS_INC(cur, free); 3824 3825 /* 3826 * If we joined with the left neighbor, set the buffer in the 3827 * cursor to the left block, and fix up the index. 3828 */ 3829 if (bp != lbp) { 3830 cur->bc_bufs[level] = lbp; 3831 cur->bc_ptrs[level] += lrecs; 3832 cur->bc_ra[level] = 0; 3833 } 3834 /* 3835 * If we joined with the right neighbor and there's a level above 3836 * us, increment the cursor at that level. 3837 */ 3838 else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || 3839 (level + 1 < cur->bc_nlevels)) { 3840 error = xfs_btree_increment(cur, level + 1, &i); 3841 if (error) 3842 goto error0; 3843 } 3844 3845 /* 3846 * Readjust the ptr at this level if it's not a leaf, since it's 3847 * still pointing at the deletion point, which makes the cursor 3848 * inconsistent. If this makes the ptr 0, the caller fixes it up. 3849 * We can't use decrement because it would change the next level up. 3850 */ 3851 if (level > 0) 3852 cur->bc_ptrs[level]--; 3853 3854 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 3855 /* Return value means the next level up has something to do. */ 3856 *stat = 2; 3857 return 0; 3858 3859 error0: 3860 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 3861 if (tcur) 3862 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); 3863 return error; 3864 } 3865 3866 /* 3867 * Delete the record pointed to by cur. 3868 * The cursor refers to the place where the record was (could be inserted) 3869 * when the operation returns. 3870 */ 3871 int /* error */ 3872 xfs_btree_delete( 3873 struct xfs_btree_cur *cur, 3874 int *stat) /* success/failure */ 3875 { 3876 int error; /* error return value */ 3877 int level; 3878 int i; 3879 3880 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 3881 3882 /* 3883 * Go up the tree, starting at leaf level. 3884 * 3885 * If 2 is returned then a join was done; go to the next level. 3886 * Otherwise we are done. 3887 */ 3888 for (level = 0, i = 2; i == 2; level++) { 3889 error = xfs_btree_delrec(cur, level, &i); 3890 if (error) 3891 goto error0; 3892 } 3893 3894 if (i == 0) { 3895 for (level = 1; level < cur->bc_nlevels; level++) { 3896 if (cur->bc_ptrs[level] == 0) { 3897 error = xfs_btree_decrement(cur, level, &i); 3898 if (error) 3899 goto error0; 3900 break; 3901 } 3902 } 3903 } 3904 3905 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 3906 *stat = i; 3907 return 0; 3908 error0: 3909 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 3910 return error; 3911 } 3912 3913 /* 3914 * Get the data from the pointed-to record. 3915 */ 3916 int /* error */ 3917 xfs_btree_get_rec( 3918 struct xfs_btree_cur *cur, /* btree cursor */ 3919 union xfs_btree_rec **recp, /* output: btree record */ 3920 int *stat) /* output: success/failure */ 3921 { 3922 struct xfs_btree_block *block; /* btree block */ 3923 struct xfs_buf *bp; /* buffer pointer */ 3924 int ptr; /* record number */ 3925 #ifdef DEBUG 3926 int error; /* error return value */ 3927 #endif 3928 3929 ptr = cur->bc_ptrs[0]; 3930 block = xfs_btree_get_block(cur, 0, &bp); 3931 3932 #ifdef DEBUG 3933 error = xfs_btree_check_block(cur, block, 0, bp); 3934 if (error) 3935 return error; 3936 #endif 3937 3938 /* 3939 * Off the right end or left end, return failure. 3940 */ 3941 if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) { 3942 *stat = 0; 3943 return 0; 3944 } 3945 3946 /* 3947 * Point to the record and extract its data. 3948 */ 3949 *recp = xfs_btree_rec_addr(cur, ptr, block); 3950 *stat = 1; 3951 return 0; 3952 } 3953 3954 /* 3955 * Change the owner of a btree. 3956 * 3957 * The mechanism we use here is ordered buffer logging. Because we don't know 3958 * how many buffers were are going to need to modify, we don't really want to 3959 * have to make transaction reservations for the worst case of every buffer in a 3960 * full size btree as that may be more space that we can fit in the log.... 3961 * 3962 * We do the btree walk in the most optimal manner possible - we have sibling 3963 * pointers so we can just walk all the blocks on each level from left to right 3964 * in a single pass, and then move to the next level and do the same. We can 3965 * also do readahead on the sibling pointers to get IO moving more quickly, 3966 * though for slow disks this is unlikely to make much difference to performance 3967 * as the amount of CPU work we have to do before moving to the next block is 3968 * relatively small. 3969 * 3970 * For each btree block that we load, modify the owner appropriately, set the 3971 * buffer as an ordered buffer and log it appropriately. We need to ensure that 3972 * we mark the region we change dirty so that if the buffer is relogged in 3973 * a subsequent transaction the changes we make here as an ordered buffer are 3974 * correctly relogged in that transaction. If we are in recovery context, then 3975 * just queue the modified buffer as delayed write buffer so the transaction 3976 * recovery completion writes the changes to disk. 3977 */ 3978 static int 3979 xfs_btree_block_change_owner( 3980 struct xfs_btree_cur *cur, 3981 int level, 3982 __uint64_t new_owner, 3983 struct list_head *buffer_list) 3984 { 3985 struct xfs_btree_block *block; 3986 struct xfs_buf *bp; 3987 union xfs_btree_ptr rptr; 3988 3989 /* do right sibling readahead */ 3990 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); 3991 3992 /* modify the owner */ 3993 block = xfs_btree_get_block(cur, level, &bp); 3994 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 3995 block->bb_u.l.bb_owner = cpu_to_be64(new_owner); 3996 else 3997 block->bb_u.s.bb_owner = cpu_to_be32(new_owner); 3998 3999 /* 4000 * If the block is a root block hosted in an inode, we might not have a 4001 * buffer pointer here and we shouldn't attempt to log the change as the 4002 * information is already held in the inode and discarded when the root 4003 * block is formatted into the on-disk inode fork. We still change it, 4004 * though, so everything is consistent in memory. 4005 */ 4006 if (bp) { 4007 if (cur->bc_tp) { 4008 xfs_trans_ordered_buf(cur->bc_tp, bp); 4009 xfs_btree_log_block(cur, bp, XFS_BB_OWNER); 4010 } else { 4011 xfs_buf_delwri_queue(bp, buffer_list); 4012 } 4013 } else { 4014 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); 4015 ASSERT(level == cur->bc_nlevels - 1); 4016 } 4017 4018 /* now read rh sibling block for next iteration */ 4019 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); 4020 if (xfs_btree_ptr_is_null(cur, &rptr)) 4021 return -ENOENT; 4022 4023 return xfs_btree_lookup_get_block(cur, level, &rptr, &block); 4024 } 4025 4026 int 4027 xfs_btree_change_owner( 4028 struct xfs_btree_cur *cur, 4029 __uint64_t new_owner, 4030 struct list_head *buffer_list) 4031 { 4032 union xfs_btree_ptr lptr; 4033 int level; 4034 struct xfs_btree_block *block = NULL; 4035 int error = 0; 4036 4037 cur->bc_ops->init_ptr_from_cur(cur, &lptr); 4038 4039 /* for each level */ 4040 for (level = cur->bc_nlevels - 1; level >= 0; level--) { 4041 /* grab the left hand block */ 4042 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block); 4043 if (error) 4044 return error; 4045 4046 /* readahead the left most block for the next level down */ 4047 if (level > 0) { 4048 union xfs_btree_ptr *ptr; 4049 4050 ptr = xfs_btree_ptr_addr(cur, 1, block); 4051 xfs_btree_readahead_ptr(cur, ptr, 1); 4052 4053 /* save for the next iteration of the loop */ 4054 lptr = *ptr; 4055 } 4056 4057 /* for each buffer in the level */ 4058 do { 4059 error = xfs_btree_block_change_owner(cur, level, 4060 new_owner, 4061 buffer_list); 4062 } while (!error); 4063 4064 if (error != -ENOENT) 4065 return error; 4066 } 4067 4068 return 0; 4069 } 4070