1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * Copyright (c) 2013 Red Hat, Inc. 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include "xfs.h" 20 #include "xfs_fs.h" 21 #include "xfs_shared.h" 22 #include "xfs_format.h" 23 #include "xfs_log_format.h" 24 #include "xfs_trans_resv.h" 25 #include "xfs_bit.h" 26 #include "xfs_sb.h" 27 #include "xfs_ag.h" 28 #include "xfs_mount.h" 29 #include "xfs_da_format.h" 30 #include "xfs_da_btree.h" 31 #include "xfs_dir2.h" 32 #include "xfs_dir2_priv.h" 33 #include "xfs_inode.h" 34 #include "xfs_trans.h" 35 #include "xfs_inode_item.h" 36 #include "xfs_alloc.h" 37 #include "xfs_bmap.h" 38 #include "xfs_attr.h" 39 #include "xfs_attr_leaf.h" 40 #include "xfs_error.h" 41 #include "xfs_trace.h" 42 #include "xfs_cksum.h" 43 #include "xfs_buf_item.h" 44 45 /* 46 * xfs_da_btree.c 47 * 48 * Routines to implement directories as Btrees of hashed names. 49 */ 50 51 /*======================================================================== 52 * Function prototypes for the kernel. 53 *========================================================================*/ 54 55 /* 56 * Routines used for growing the Btree. 57 */ 58 STATIC int xfs_da3_root_split(xfs_da_state_t *state, 59 xfs_da_state_blk_t *existing_root, 60 xfs_da_state_blk_t *new_child); 61 STATIC int xfs_da3_node_split(xfs_da_state_t *state, 62 xfs_da_state_blk_t *existing_blk, 63 xfs_da_state_blk_t *split_blk, 64 xfs_da_state_blk_t *blk_to_add, 65 int treelevel, 66 int *result); 67 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state, 68 xfs_da_state_blk_t *node_blk_1, 69 xfs_da_state_blk_t *node_blk_2); 70 STATIC void xfs_da3_node_add(xfs_da_state_t *state, 71 xfs_da_state_blk_t *old_node_blk, 72 xfs_da_state_blk_t *new_node_blk); 73 74 /* 75 * Routines used for shrinking the Btree. 76 */ 77 STATIC int xfs_da3_root_join(xfs_da_state_t *state, 78 xfs_da_state_blk_t *root_blk); 79 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval); 80 STATIC void xfs_da3_node_remove(xfs_da_state_t *state, 81 xfs_da_state_blk_t *drop_blk); 82 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state, 83 xfs_da_state_blk_t *src_node_blk, 84 xfs_da_state_blk_t *dst_node_blk); 85 86 /* 87 * Utility routines. 88 */ 89 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state, 90 xfs_da_state_blk_t *drop_blk, 91 xfs_da_state_blk_t *save_blk); 92 93 94 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */ 95 96 /* 97 * Allocate a dir-state structure. 98 * We don't put them on the stack since they're large. 99 */ 100 xfs_da_state_t * 101 xfs_da_state_alloc(void) 102 { 103 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS); 104 } 105 106 /* 107 * Kill the altpath contents of a da-state structure. 108 */ 109 STATIC void 110 xfs_da_state_kill_altpath(xfs_da_state_t *state) 111 { 112 int i; 113 114 for (i = 0; i < state->altpath.active; i++) 115 state->altpath.blk[i].bp = NULL; 116 state->altpath.active = 0; 117 } 118 119 /* 120 * Free a da-state structure. 121 */ 122 void 123 xfs_da_state_free(xfs_da_state_t *state) 124 { 125 xfs_da_state_kill_altpath(state); 126 #ifdef DEBUG 127 memset((char *)state, 0, sizeof(*state)); 128 #endif /* DEBUG */ 129 kmem_zone_free(xfs_da_state_zone, state); 130 } 131 132 static bool 133 xfs_da3_node_verify( 134 struct xfs_buf *bp) 135 { 136 struct xfs_mount *mp = bp->b_target->bt_mount; 137 struct xfs_da_intnode *hdr = bp->b_addr; 138 struct xfs_da3_icnode_hdr ichdr; 139 const struct xfs_dir_ops *ops; 140 141 ops = xfs_dir_get_ops(mp, NULL); 142 143 ops->node_hdr_from_disk(&ichdr, hdr); 144 145 if (xfs_sb_version_hascrc(&mp->m_sb)) { 146 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 147 148 if (ichdr.magic != XFS_DA3_NODE_MAGIC) 149 return false; 150 151 if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid)) 152 return false; 153 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn) 154 return false; 155 } else { 156 if (ichdr.magic != XFS_DA_NODE_MAGIC) 157 return false; 158 } 159 if (ichdr.level == 0) 160 return false; 161 if (ichdr.level > XFS_DA_NODE_MAXDEPTH) 162 return false; 163 if (ichdr.count == 0) 164 return false; 165 166 /* 167 * we don't know if the node is for and attribute or directory tree, 168 * so only fail if the count is outside both bounds 169 */ 170 if (ichdr.count > mp->m_dir_geo->node_ents && 171 ichdr.count > mp->m_attr_geo->node_ents) 172 return false; 173 174 /* XXX: hash order check? */ 175 176 return true; 177 } 178 179 static void 180 xfs_da3_node_write_verify( 181 struct xfs_buf *bp) 182 { 183 struct xfs_mount *mp = bp->b_target->bt_mount; 184 struct xfs_buf_log_item *bip = bp->b_fspriv; 185 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 186 187 if (!xfs_da3_node_verify(bp)) { 188 xfs_buf_ioerror(bp, -EFSCORRUPTED); 189 xfs_verifier_error(bp); 190 return; 191 } 192 193 if (!xfs_sb_version_hascrc(&mp->m_sb)) 194 return; 195 196 if (bip) 197 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn); 198 199 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF); 200 } 201 202 /* 203 * leaf/node format detection on trees is sketchy, so a node read can be done on 204 * leaf level blocks when detection identifies the tree as a node format tree 205 * incorrectly. In this case, we need to swap the verifier to match the correct 206 * format of the block being read. 207 */ 208 static void 209 xfs_da3_node_read_verify( 210 struct xfs_buf *bp) 211 { 212 struct xfs_da_blkinfo *info = bp->b_addr; 213 214 switch (be16_to_cpu(info->magic)) { 215 case XFS_DA3_NODE_MAGIC: 216 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { 217 xfs_buf_ioerror(bp, -EFSBADCRC); 218 break; 219 } 220 /* fall through */ 221 case XFS_DA_NODE_MAGIC: 222 if (!xfs_da3_node_verify(bp)) { 223 xfs_buf_ioerror(bp, -EFSCORRUPTED); 224 break; 225 } 226 return; 227 case XFS_ATTR_LEAF_MAGIC: 228 case XFS_ATTR3_LEAF_MAGIC: 229 bp->b_ops = &xfs_attr3_leaf_buf_ops; 230 bp->b_ops->verify_read(bp); 231 return; 232 case XFS_DIR2_LEAFN_MAGIC: 233 case XFS_DIR3_LEAFN_MAGIC: 234 bp->b_ops = &xfs_dir3_leafn_buf_ops; 235 bp->b_ops->verify_read(bp); 236 return; 237 default: 238 break; 239 } 240 241 /* corrupt block */ 242 xfs_verifier_error(bp); 243 } 244 245 const struct xfs_buf_ops xfs_da3_node_buf_ops = { 246 .verify_read = xfs_da3_node_read_verify, 247 .verify_write = xfs_da3_node_write_verify, 248 }; 249 250 int 251 xfs_da3_node_read( 252 struct xfs_trans *tp, 253 struct xfs_inode *dp, 254 xfs_dablk_t bno, 255 xfs_daddr_t mappedbno, 256 struct xfs_buf **bpp, 257 int which_fork) 258 { 259 int err; 260 261 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp, 262 which_fork, &xfs_da3_node_buf_ops); 263 if (!err && tp) { 264 struct xfs_da_blkinfo *info = (*bpp)->b_addr; 265 int type; 266 267 switch (be16_to_cpu(info->magic)) { 268 case XFS_DA_NODE_MAGIC: 269 case XFS_DA3_NODE_MAGIC: 270 type = XFS_BLFT_DA_NODE_BUF; 271 break; 272 case XFS_ATTR_LEAF_MAGIC: 273 case XFS_ATTR3_LEAF_MAGIC: 274 type = XFS_BLFT_ATTR_LEAF_BUF; 275 break; 276 case XFS_DIR2_LEAFN_MAGIC: 277 case XFS_DIR3_LEAFN_MAGIC: 278 type = XFS_BLFT_DIR_LEAFN_BUF; 279 break; 280 default: 281 type = 0; 282 ASSERT(0); 283 break; 284 } 285 xfs_trans_buf_set_type(tp, *bpp, type); 286 } 287 return err; 288 } 289 290 /*======================================================================== 291 * Routines used for growing the Btree. 292 *========================================================================*/ 293 294 /* 295 * Create the initial contents of an intermediate node. 296 */ 297 int 298 xfs_da3_node_create( 299 struct xfs_da_args *args, 300 xfs_dablk_t blkno, 301 int level, 302 struct xfs_buf **bpp, 303 int whichfork) 304 { 305 struct xfs_da_intnode *node; 306 struct xfs_trans *tp = args->trans; 307 struct xfs_mount *mp = tp->t_mountp; 308 struct xfs_da3_icnode_hdr ichdr = {0}; 309 struct xfs_buf *bp; 310 int error; 311 struct xfs_inode *dp = args->dp; 312 313 trace_xfs_da_node_create(args); 314 ASSERT(level <= XFS_DA_NODE_MAXDEPTH); 315 316 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork); 317 if (error) 318 return error; 319 bp->b_ops = &xfs_da3_node_buf_ops; 320 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 321 node = bp->b_addr; 322 323 if (xfs_sb_version_hascrc(&mp->m_sb)) { 324 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 325 326 ichdr.magic = XFS_DA3_NODE_MAGIC; 327 hdr3->info.blkno = cpu_to_be64(bp->b_bn); 328 hdr3->info.owner = cpu_to_be64(args->dp->i_ino); 329 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_uuid); 330 } else { 331 ichdr.magic = XFS_DA_NODE_MAGIC; 332 } 333 ichdr.level = level; 334 335 dp->d_ops->node_hdr_to_disk(node, &ichdr); 336 xfs_trans_log_buf(tp, bp, 337 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); 338 339 *bpp = bp; 340 return 0; 341 } 342 343 /* 344 * Split a leaf node, rebalance, then possibly split 345 * intermediate nodes, rebalance, etc. 346 */ 347 int /* error */ 348 xfs_da3_split( 349 struct xfs_da_state *state) 350 { 351 struct xfs_da_state_blk *oldblk; 352 struct xfs_da_state_blk *newblk; 353 struct xfs_da_state_blk *addblk; 354 struct xfs_da_intnode *node; 355 struct xfs_buf *bp; 356 int max; 357 int action = 0; 358 int error; 359 int i; 360 361 trace_xfs_da_split(state->args); 362 363 /* 364 * Walk back up the tree splitting/inserting/adjusting as necessary. 365 * If we need to insert and there isn't room, split the node, then 366 * decide which fragment to insert the new block from below into. 367 * Note that we may split the root this way, but we need more fixup. 368 */ 369 max = state->path.active - 1; 370 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH)); 371 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC || 372 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); 373 374 addblk = &state->path.blk[max]; /* initial dummy value */ 375 for (i = max; (i >= 0) && addblk; state->path.active--, i--) { 376 oldblk = &state->path.blk[i]; 377 newblk = &state->altpath.blk[i]; 378 379 /* 380 * If a leaf node then 381 * Allocate a new leaf node, then rebalance across them. 382 * else if an intermediate node then 383 * We split on the last layer, must we split the node? 384 */ 385 switch (oldblk->magic) { 386 case XFS_ATTR_LEAF_MAGIC: 387 error = xfs_attr3_leaf_split(state, oldblk, newblk); 388 if ((error != 0) && (error != -ENOSPC)) { 389 return error; /* GROT: attr is inconsistent */ 390 } 391 if (!error) { 392 addblk = newblk; 393 break; 394 } 395 /* 396 * Entry wouldn't fit, split the leaf again. 397 */ 398 state->extravalid = 1; 399 if (state->inleaf) { 400 state->extraafter = 0; /* before newblk */ 401 trace_xfs_attr_leaf_split_before(state->args); 402 error = xfs_attr3_leaf_split(state, oldblk, 403 &state->extrablk); 404 } else { 405 state->extraafter = 1; /* after newblk */ 406 trace_xfs_attr_leaf_split_after(state->args); 407 error = xfs_attr3_leaf_split(state, newblk, 408 &state->extrablk); 409 } 410 if (error) 411 return error; /* GROT: attr inconsistent */ 412 addblk = newblk; 413 break; 414 case XFS_DIR2_LEAFN_MAGIC: 415 error = xfs_dir2_leafn_split(state, oldblk, newblk); 416 if (error) 417 return error; 418 addblk = newblk; 419 break; 420 case XFS_DA_NODE_MAGIC: 421 error = xfs_da3_node_split(state, oldblk, newblk, addblk, 422 max - i, &action); 423 addblk->bp = NULL; 424 if (error) 425 return error; /* GROT: dir is inconsistent */ 426 /* 427 * Record the newly split block for the next time thru? 428 */ 429 if (action) 430 addblk = newblk; 431 else 432 addblk = NULL; 433 break; 434 } 435 436 /* 437 * Update the btree to show the new hashval for this child. 438 */ 439 xfs_da3_fixhashpath(state, &state->path); 440 } 441 if (!addblk) 442 return 0; 443 444 /* 445 * Split the root node. 446 */ 447 ASSERT(state->path.active == 0); 448 oldblk = &state->path.blk[0]; 449 error = xfs_da3_root_split(state, oldblk, addblk); 450 if (error) { 451 addblk->bp = NULL; 452 return error; /* GROT: dir is inconsistent */ 453 } 454 455 /* 456 * Update pointers to the node which used to be block 0 and 457 * just got bumped because of the addition of a new root node. 458 * There might be three blocks involved if a double split occurred, 459 * and the original block 0 could be at any position in the list. 460 * 461 * Note: the magic numbers and sibling pointers are in the same 462 * physical place for both v2 and v3 headers (by design). Hence it 463 * doesn't matter which version of the xfs_da_intnode structure we use 464 * here as the result will be the same using either structure. 465 */ 466 node = oldblk->bp->b_addr; 467 if (node->hdr.info.forw) { 468 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) { 469 bp = addblk->bp; 470 } else { 471 ASSERT(state->extravalid); 472 bp = state->extrablk.bp; 473 } 474 node = bp->b_addr; 475 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 476 xfs_trans_log_buf(state->args->trans, bp, 477 XFS_DA_LOGRANGE(node, &node->hdr.info, 478 sizeof(node->hdr.info))); 479 } 480 node = oldblk->bp->b_addr; 481 if (node->hdr.info.back) { 482 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) { 483 bp = addblk->bp; 484 } else { 485 ASSERT(state->extravalid); 486 bp = state->extrablk.bp; 487 } 488 node = bp->b_addr; 489 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 490 xfs_trans_log_buf(state->args->trans, bp, 491 XFS_DA_LOGRANGE(node, &node->hdr.info, 492 sizeof(node->hdr.info))); 493 } 494 addblk->bp = NULL; 495 return 0; 496 } 497 498 /* 499 * Split the root. We have to create a new root and point to the two 500 * parts (the split old root) that we just created. Copy block zero to 501 * the EOF, extending the inode in process. 502 */ 503 STATIC int /* error */ 504 xfs_da3_root_split( 505 struct xfs_da_state *state, 506 struct xfs_da_state_blk *blk1, 507 struct xfs_da_state_blk *blk2) 508 { 509 struct xfs_da_intnode *node; 510 struct xfs_da_intnode *oldroot; 511 struct xfs_da_node_entry *btree; 512 struct xfs_da3_icnode_hdr nodehdr; 513 struct xfs_da_args *args; 514 struct xfs_buf *bp; 515 struct xfs_inode *dp; 516 struct xfs_trans *tp; 517 struct xfs_mount *mp; 518 struct xfs_dir2_leaf *leaf; 519 xfs_dablk_t blkno; 520 int level; 521 int error; 522 int size; 523 524 trace_xfs_da_root_split(state->args); 525 526 /* 527 * Copy the existing (incorrect) block from the root node position 528 * to a free space somewhere. 529 */ 530 args = state->args; 531 error = xfs_da_grow_inode(args, &blkno); 532 if (error) 533 return error; 534 535 dp = args->dp; 536 tp = args->trans; 537 mp = state->mp; 538 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork); 539 if (error) 540 return error; 541 node = bp->b_addr; 542 oldroot = blk1->bp->b_addr; 543 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 544 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) { 545 struct xfs_da3_icnode_hdr nodehdr; 546 547 dp->d_ops->node_hdr_from_disk(&nodehdr, oldroot); 548 btree = dp->d_ops->node_tree_p(oldroot); 549 size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot); 550 level = nodehdr.level; 551 552 /* 553 * we are about to copy oldroot to bp, so set up the type 554 * of bp while we know exactly what it will be. 555 */ 556 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 557 } else { 558 struct xfs_dir3_icleaf_hdr leafhdr; 559 struct xfs_dir2_leaf_entry *ents; 560 561 leaf = (xfs_dir2_leaf_t *)oldroot; 562 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf); 563 ents = dp->d_ops->leaf_ents_p(leaf); 564 565 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC || 566 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC); 567 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf); 568 level = 0; 569 570 /* 571 * we are about to copy oldroot to bp, so set up the type 572 * of bp while we know exactly what it will be. 573 */ 574 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); 575 } 576 577 /* 578 * we can copy most of the information in the node from one block to 579 * another, but for CRC enabled headers we have to make sure that the 580 * block specific identifiers are kept intact. We update the buffer 581 * directly for this. 582 */ 583 memcpy(node, oldroot, size); 584 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 585 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 586 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node; 587 588 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn); 589 } 590 xfs_trans_log_buf(tp, bp, 0, size - 1); 591 592 bp->b_ops = blk1->bp->b_ops; 593 xfs_trans_buf_copy_type(bp, blk1->bp); 594 blk1->bp = bp; 595 blk1->blkno = blkno; 596 597 /* 598 * Set up the new root node. 599 */ 600 error = xfs_da3_node_create(args, 601 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0, 602 level + 1, &bp, args->whichfork); 603 if (error) 604 return error; 605 606 node = bp->b_addr; 607 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 608 btree = dp->d_ops->node_tree_p(node); 609 btree[0].hashval = cpu_to_be32(blk1->hashval); 610 btree[0].before = cpu_to_be32(blk1->blkno); 611 btree[1].hashval = cpu_to_be32(blk2->hashval); 612 btree[1].before = cpu_to_be32(blk2->blkno); 613 nodehdr.count = 2; 614 dp->d_ops->node_hdr_to_disk(node, &nodehdr); 615 616 #ifdef DEBUG 617 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 618 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 619 ASSERT(blk1->blkno >= args->geo->leafblk && 620 blk1->blkno < args->geo->freeblk); 621 ASSERT(blk2->blkno >= args->geo->leafblk && 622 blk2->blkno < args->geo->freeblk); 623 } 624 #endif 625 626 /* Header is already logged by xfs_da_node_create */ 627 xfs_trans_log_buf(tp, bp, 628 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2)); 629 630 return 0; 631 } 632 633 /* 634 * Split the node, rebalance, then add the new entry. 635 */ 636 STATIC int /* error */ 637 xfs_da3_node_split( 638 struct xfs_da_state *state, 639 struct xfs_da_state_blk *oldblk, 640 struct xfs_da_state_blk *newblk, 641 struct xfs_da_state_blk *addblk, 642 int treelevel, 643 int *result) 644 { 645 struct xfs_da_intnode *node; 646 struct xfs_da3_icnode_hdr nodehdr; 647 xfs_dablk_t blkno; 648 int newcount; 649 int error; 650 int useextra; 651 struct xfs_inode *dp = state->args->dp; 652 653 trace_xfs_da_node_split(state->args); 654 655 node = oldblk->bp->b_addr; 656 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 657 658 /* 659 * With V2 dirs the extra block is data or freespace. 660 */ 661 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK; 662 newcount = 1 + useextra; 663 /* 664 * Do we have to split the node? 665 */ 666 if (nodehdr.count + newcount > state->args->geo->node_ents) { 667 /* 668 * Allocate a new node, add to the doubly linked chain of 669 * nodes, then move some of our excess entries into it. 670 */ 671 error = xfs_da_grow_inode(state->args, &blkno); 672 if (error) 673 return error; /* GROT: dir is inconsistent */ 674 675 error = xfs_da3_node_create(state->args, blkno, treelevel, 676 &newblk->bp, state->args->whichfork); 677 if (error) 678 return error; /* GROT: dir is inconsistent */ 679 newblk->blkno = blkno; 680 newblk->magic = XFS_DA_NODE_MAGIC; 681 xfs_da3_node_rebalance(state, oldblk, newblk); 682 error = xfs_da3_blk_link(state, oldblk, newblk); 683 if (error) 684 return error; 685 *result = 1; 686 } else { 687 *result = 0; 688 } 689 690 /* 691 * Insert the new entry(s) into the correct block 692 * (updating last hashval in the process). 693 * 694 * xfs_da3_node_add() inserts BEFORE the given index, 695 * and as a result of using node_lookup_int() we always 696 * point to a valid entry (not after one), but a split 697 * operation always results in a new block whose hashvals 698 * FOLLOW the current block. 699 * 700 * If we had double-split op below us, then add the extra block too. 701 */ 702 node = oldblk->bp->b_addr; 703 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 704 if (oldblk->index <= nodehdr.count) { 705 oldblk->index++; 706 xfs_da3_node_add(state, oldblk, addblk); 707 if (useextra) { 708 if (state->extraafter) 709 oldblk->index++; 710 xfs_da3_node_add(state, oldblk, &state->extrablk); 711 state->extravalid = 0; 712 } 713 } else { 714 newblk->index++; 715 xfs_da3_node_add(state, newblk, addblk); 716 if (useextra) { 717 if (state->extraafter) 718 newblk->index++; 719 xfs_da3_node_add(state, newblk, &state->extrablk); 720 state->extravalid = 0; 721 } 722 } 723 724 return 0; 725 } 726 727 /* 728 * Balance the btree elements between two intermediate nodes, 729 * usually one full and one empty. 730 * 731 * NOTE: if blk2 is empty, then it will get the upper half of blk1. 732 */ 733 STATIC void 734 xfs_da3_node_rebalance( 735 struct xfs_da_state *state, 736 struct xfs_da_state_blk *blk1, 737 struct xfs_da_state_blk *blk2) 738 { 739 struct xfs_da_intnode *node1; 740 struct xfs_da_intnode *node2; 741 struct xfs_da_intnode *tmpnode; 742 struct xfs_da_node_entry *btree1; 743 struct xfs_da_node_entry *btree2; 744 struct xfs_da_node_entry *btree_s; 745 struct xfs_da_node_entry *btree_d; 746 struct xfs_da3_icnode_hdr nodehdr1; 747 struct xfs_da3_icnode_hdr nodehdr2; 748 struct xfs_trans *tp; 749 int count; 750 int tmp; 751 int swap = 0; 752 struct xfs_inode *dp = state->args->dp; 753 754 trace_xfs_da_node_rebalance(state->args); 755 756 node1 = blk1->bp->b_addr; 757 node2 = blk2->bp->b_addr; 758 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1); 759 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2); 760 btree1 = dp->d_ops->node_tree_p(node1); 761 btree2 = dp->d_ops->node_tree_p(node2); 762 763 /* 764 * Figure out how many entries need to move, and in which direction. 765 * Swap the nodes around if that makes it simpler. 766 */ 767 if (nodehdr1.count > 0 && nodehdr2.count > 0 && 768 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 769 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) < 770 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) { 771 tmpnode = node1; 772 node1 = node2; 773 node2 = tmpnode; 774 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1); 775 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2); 776 btree1 = dp->d_ops->node_tree_p(node1); 777 btree2 = dp->d_ops->node_tree_p(node2); 778 swap = 1; 779 } 780 781 count = (nodehdr1.count - nodehdr2.count) / 2; 782 if (count == 0) 783 return; 784 tp = state->args->trans; 785 /* 786 * Two cases: high-to-low and low-to-high. 787 */ 788 if (count > 0) { 789 /* 790 * Move elements in node2 up to make a hole. 791 */ 792 tmp = nodehdr2.count; 793 if (tmp > 0) { 794 tmp *= (uint)sizeof(xfs_da_node_entry_t); 795 btree_s = &btree2[0]; 796 btree_d = &btree2[count]; 797 memmove(btree_d, btree_s, tmp); 798 } 799 800 /* 801 * Move the req'd B-tree elements from high in node1 to 802 * low in node2. 803 */ 804 nodehdr2.count += count; 805 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 806 btree_s = &btree1[nodehdr1.count - count]; 807 btree_d = &btree2[0]; 808 memcpy(btree_d, btree_s, tmp); 809 nodehdr1.count -= count; 810 } else { 811 /* 812 * Move the req'd B-tree elements from low in node2 to 813 * high in node1. 814 */ 815 count = -count; 816 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 817 btree_s = &btree2[0]; 818 btree_d = &btree1[nodehdr1.count]; 819 memcpy(btree_d, btree_s, tmp); 820 nodehdr1.count += count; 821 822 xfs_trans_log_buf(tp, blk1->bp, 823 XFS_DA_LOGRANGE(node1, btree_d, tmp)); 824 825 /* 826 * Move elements in node2 down to fill the hole. 827 */ 828 tmp = nodehdr2.count - count; 829 tmp *= (uint)sizeof(xfs_da_node_entry_t); 830 btree_s = &btree2[count]; 831 btree_d = &btree2[0]; 832 memmove(btree_d, btree_s, tmp); 833 nodehdr2.count -= count; 834 } 835 836 /* 837 * Log header of node 1 and all current bits of node 2. 838 */ 839 dp->d_ops->node_hdr_to_disk(node1, &nodehdr1); 840 xfs_trans_log_buf(tp, blk1->bp, 841 XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size)); 842 843 dp->d_ops->node_hdr_to_disk(node2, &nodehdr2); 844 xfs_trans_log_buf(tp, blk2->bp, 845 XFS_DA_LOGRANGE(node2, &node2->hdr, 846 dp->d_ops->node_hdr_size + 847 (sizeof(btree2[0]) * nodehdr2.count))); 848 849 /* 850 * Record the last hashval from each block for upward propagation. 851 * (note: don't use the swapped node pointers) 852 */ 853 if (swap) { 854 node1 = blk1->bp->b_addr; 855 node2 = blk2->bp->b_addr; 856 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1); 857 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2); 858 btree1 = dp->d_ops->node_tree_p(node1); 859 btree2 = dp->d_ops->node_tree_p(node2); 860 } 861 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval); 862 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval); 863 864 /* 865 * Adjust the expected index for insertion. 866 */ 867 if (blk1->index >= nodehdr1.count) { 868 blk2->index = blk1->index - nodehdr1.count; 869 blk1->index = nodehdr1.count + 1; /* make it invalid */ 870 } 871 } 872 873 /* 874 * Add a new entry to an intermediate node. 875 */ 876 STATIC void 877 xfs_da3_node_add( 878 struct xfs_da_state *state, 879 struct xfs_da_state_blk *oldblk, 880 struct xfs_da_state_blk *newblk) 881 { 882 struct xfs_da_intnode *node; 883 struct xfs_da3_icnode_hdr nodehdr; 884 struct xfs_da_node_entry *btree; 885 int tmp; 886 struct xfs_inode *dp = state->args->dp; 887 888 trace_xfs_da_node_add(state->args); 889 890 node = oldblk->bp->b_addr; 891 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 892 btree = dp->d_ops->node_tree_p(node); 893 894 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count); 895 ASSERT(newblk->blkno != 0); 896 if (state->args->whichfork == XFS_DATA_FORK) 897 ASSERT(newblk->blkno >= state->args->geo->leafblk && 898 newblk->blkno < state->args->geo->freeblk); 899 900 /* 901 * We may need to make some room before we insert the new node. 902 */ 903 tmp = 0; 904 if (oldblk->index < nodehdr.count) { 905 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree); 906 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp); 907 } 908 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval); 909 btree[oldblk->index].before = cpu_to_be32(newblk->blkno); 910 xfs_trans_log_buf(state->args->trans, oldblk->bp, 911 XFS_DA_LOGRANGE(node, &btree[oldblk->index], 912 tmp + sizeof(*btree))); 913 914 nodehdr.count += 1; 915 dp->d_ops->node_hdr_to_disk(node, &nodehdr); 916 xfs_trans_log_buf(state->args->trans, oldblk->bp, 917 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); 918 919 /* 920 * Copy the last hash value from the oldblk to propagate upwards. 921 */ 922 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 923 } 924 925 /*======================================================================== 926 * Routines used for shrinking the Btree. 927 *========================================================================*/ 928 929 /* 930 * Deallocate an empty leaf node, remove it from its parent, 931 * possibly deallocating that block, etc... 932 */ 933 int 934 xfs_da3_join( 935 struct xfs_da_state *state) 936 { 937 struct xfs_da_state_blk *drop_blk; 938 struct xfs_da_state_blk *save_blk; 939 int action = 0; 940 int error; 941 942 trace_xfs_da_join(state->args); 943 944 drop_blk = &state->path.blk[ state->path.active-1 ]; 945 save_blk = &state->altpath.blk[ state->path.active-1 ]; 946 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC); 947 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC || 948 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC); 949 950 /* 951 * Walk back up the tree joining/deallocating as necessary. 952 * When we stop dropping blocks, break out. 953 */ 954 for ( ; state->path.active >= 2; drop_blk--, save_blk--, 955 state->path.active--) { 956 /* 957 * See if we can combine the block with a neighbor. 958 * (action == 0) => no options, just leave 959 * (action == 1) => coalesce, then unlink 960 * (action == 2) => block empty, unlink it 961 */ 962 switch (drop_blk->magic) { 963 case XFS_ATTR_LEAF_MAGIC: 964 error = xfs_attr3_leaf_toosmall(state, &action); 965 if (error) 966 return error; 967 if (action == 0) 968 return 0; 969 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk); 970 break; 971 case XFS_DIR2_LEAFN_MAGIC: 972 error = xfs_dir2_leafn_toosmall(state, &action); 973 if (error) 974 return error; 975 if (action == 0) 976 return 0; 977 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk); 978 break; 979 case XFS_DA_NODE_MAGIC: 980 /* 981 * Remove the offending node, fixup hashvals, 982 * check for a toosmall neighbor. 983 */ 984 xfs_da3_node_remove(state, drop_blk); 985 xfs_da3_fixhashpath(state, &state->path); 986 error = xfs_da3_node_toosmall(state, &action); 987 if (error) 988 return error; 989 if (action == 0) 990 return 0; 991 xfs_da3_node_unbalance(state, drop_blk, save_blk); 992 break; 993 } 994 xfs_da3_fixhashpath(state, &state->altpath); 995 error = xfs_da3_blk_unlink(state, drop_blk, save_blk); 996 xfs_da_state_kill_altpath(state); 997 if (error) 998 return error; 999 error = xfs_da_shrink_inode(state->args, drop_blk->blkno, 1000 drop_blk->bp); 1001 drop_blk->bp = NULL; 1002 if (error) 1003 return error; 1004 } 1005 /* 1006 * We joined all the way to the top. If it turns out that 1007 * we only have one entry in the root, make the child block 1008 * the new root. 1009 */ 1010 xfs_da3_node_remove(state, drop_blk); 1011 xfs_da3_fixhashpath(state, &state->path); 1012 error = xfs_da3_root_join(state, &state->path.blk[0]); 1013 return error; 1014 } 1015 1016 #ifdef DEBUG 1017 static void 1018 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level) 1019 { 1020 __be16 magic = blkinfo->magic; 1021 1022 if (level == 1) { 1023 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1024 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 1025 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 1026 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 1027 } else { 1028 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1029 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)); 1030 } 1031 ASSERT(!blkinfo->forw); 1032 ASSERT(!blkinfo->back); 1033 } 1034 #else /* !DEBUG */ 1035 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level) 1036 #endif /* !DEBUG */ 1037 1038 /* 1039 * We have only one entry in the root. Copy the only remaining child of 1040 * the old root to block 0 as the new root node. 1041 */ 1042 STATIC int 1043 xfs_da3_root_join( 1044 struct xfs_da_state *state, 1045 struct xfs_da_state_blk *root_blk) 1046 { 1047 struct xfs_da_intnode *oldroot; 1048 struct xfs_da_args *args; 1049 xfs_dablk_t child; 1050 struct xfs_buf *bp; 1051 struct xfs_da3_icnode_hdr oldroothdr; 1052 struct xfs_da_node_entry *btree; 1053 int error; 1054 struct xfs_inode *dp = state->args->dp; 1055 1056 trace_xfs_da_root_join(state->args); 1057 1058 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); 1059 1060 args = state->args; 1061 oldroot = root_blk->bp->b_addr; 1062 dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot); 1063 ASSERT(oldroothdr.forw == 0); 1064 ASSERT(oldroothdr.back == 0); 1065 1066 /* 1067 * If the root has more than one child, then don't do anything. 1068 */ 1069 if (oldroothdr.count > 1) 1070 return 0; 1071 1072 /* 1073 * Read in the (only) child block, then copy those bytes into 1074 * the root block's buffer and free the original child block. 1075 */ 1076 btree = dp->d_ops->node_tree_p(oldroot); 1077 child = be32_to_cpu(btree[0].before); 1078 ASSERT(child != 0); 1079 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp, 1080 args->whichfork); 1081 if (error) 1082 return error; 1083 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level); 1084 1085 /* 1086 * This could be copying a leaf back into the root block in the case of 1087 * there only being a single leaf block left in the tree. Hence we have 1088 * to update the b_ops pointer as well to match the buffer type change 1089 * that could occur. For dir3 blocks we also need to update the block 1090 * number in the buffer header. 1091 */ 1092 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize); 1093 root_blk->bp->b_ops = bp->b_ops; 1094 xfs_trans_buf_copy_type(root_blk->bp, bp); 1095 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) { 1096 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr; 1097 da3->blkno = cpu_to_be64(root_blk->bp->b_bn); 1098 } 1099 xfs_trans_log_buf(args->trans, root_blk->bp, 0, 1100 args->geo->blksize - 1); 1101 error = xfs_da_shrink_inode(args, child, bp); 1102 return error; 1103 } 1104 1105 /* 1106 * Check a node block and its neighbors to see if the block should be 1107 * collapsed into one or the other neighbor. Always keep the block 1108 * with the smaller block number. 1109 * If the current block is over 50% full, don't try to join it, return 0. 1110 * If the block is empty, fill in the state structure and return 2. 1111 * If it can be collapsed, fill in the state structure and return 1. 1112 * If nothing can be done, return 0. 1113 */ 1114 STATIC int 1115 xfs_da3_node_toosmall( 1116 struct xfs_da_state *state, 1117 int *action) 1118 { 1119 struct xfs_da_intnode *node; 1120 struct xfs_da_state_blk *blk; 1121 struct xfs_da_blkinfo *info; 1122 xfs_dablk_t blkno; 1123 struct xfs_buf *bp; 1124 struct xfs_da3_icnode_hdr nodehdr; 1125 int count; 1126 int forward; 1127 int error; 1128 int retval; 1129 int i; 1130 struct xfs_inode *dp = state->args->dp; 1131 1132 trace_xfs_da_node_toosmall(state->args); 1133 1134 /* 1135 * Check for the degenerate case of the block being over 50% full. 1136 * If so, it's not worth even looking to see if we might be able 1137 * to coalesce with a sibling. 1138 */ 1139 blk = &state->path.blk[ state->path.active-1 ]; 1140 info = blk->bp->b_addr; 1141 node = (xfs_da_intnode_t *)info; 1142 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1143 if (nodehdr.count > (state->args->geo->node_ents >> 1)) { 1144 *action = 0; /* blk over 50%, don't try to join */ 1145 return 0; /* blk over 50%, don't try to join */ 1146 } 1147 1148 /* 1149 * Check for the degenerate case of the block being empty. 1150 * If the block is empty, we'll simply delete it, no need to 1151 * coalesce it with a sibling block. We choose (arbitrarily) 1152 * to merge with the forward block unless it is NULL. 1153 */ 1154 if (nodehdr.count == 0) { 1155 /* 1156 * Make altpath point to the block we want to keep and 1157 * path point to the block we want to drop (this one). 1158 */ 1159 forward = (info->forw != 0); 1160 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1161 error = xfs_da3_path_shift(state, &state->altpath, forward, 1162 0, &retval); 1163 if (error) 1164 return error; 1165 if (retval) { 1166 *action = 0; 1167 } else { 1168 *action = 2; 1169 } 1170 return 0; 1171 } 1172 1173 /* 1174 * Examine each sibling block to see if we can coalesce with 1175 * at least 25% free space to spare. We need to figure out 1176 * whether to merge with the forward or the backward block. 1177 * We prefer coalescing with the lower numbered sibling so as 1178 * to shrink a directory over time. 1179 */ 1180 count = state->args->geo->node_ents; 1181 count -= state->args->geo->node_ents >> 2; 1182 count -= nodehdr.count; 1183 1184 /* start with smaller blk num */ 1185 forward = nodehdr.forw < nodehdr.back; 1186 for (i = 0; i < 2; forward = !forward, i++) { 1187 struct xfs_da3_icnode_hdr thdr; 1188 if (forward) 1189 blkno = nodehdr.forw; 1190 else 1191 blkno = nodehdr.back; 1192 if (blkno == 0) 1193 continue; 1194 error = xfs_da3_node_read(state->args->trans, dp, 1195 blkno, -1, &bp, state->args->whichfork); 1196 if (error) 1197 return error; 1198 1199 node = bp->b_addr; 1200 dp->d_ops->node_hdr_from_disk(&thdr, node); 1201 xfs_trans_brelse(state->args->trans, bp); 1202 1203 if (count - thdr.count >= 0) 1204 break; /* fits with at least 25% to spare */ 1205 } 1206 if (i >= 2) { 1207 *action = 0; 1208 return 0; 1209 } 1210 1211 /* 1212 * Make altpath point to the block we want to keep (the lower 1213 * numbered block) and path point to the block we want to drop. 1214 */ 1215 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1216 if (blkno < blk->blkno) { 1217 error = xfs_da3_path_shift(state, &state->altpath, forward, 1218 0, &retval); 1219 } else { 1220 error = xfs_da3_path_shift(state, &state->path, forward, 1221 0, &retval); 1222 } 1223 if (error) 1224 return error; 1225 if (retval) { 1226 *action = 0; 1227 return 0; 1228 } 1229 *action = 1; 1230 return 0; 1231 } 1232 1233 /* 1234 * Pick up the last hashvalue from an intermediate node. 1235 */ 1236 STATIC uint 1237 xfs_da3_node_lasthash( 1238 struct xfs_inode *dp, 1239 struct xfs_buf *bp, 1240 int *count) 1241 { 1242 struct xfs_da_intnode *node; 1243 struct xfs_da_node_entry *btree; 1244 struct xfs_da3_icnode_hdr nodehdr; 1245 1246 node = bp->b_addr; 1247 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1248 if (count) 1249 *count = nodehdr.count; 1250 if (!nodehdr.count) 1251 return 0; 1252 btree = dp->d_ops->node_tree_p(node); 1253 return be32_to_cpu(btree[nodehdr.count - 1].hashval); 1254 } 1255 1256 /* 1257 * Walk back up the tree adjusting hash values as necessary, 1258 * when we stop making changes, return. 1259 */ 1260 void 1261 xfs_da3_fixhashpath( 1262 struct xfs_da_state *state, 1263 struct xfs_da_state_path *path) 1264 { 1265 struct xfs_da_state_blk *blk; 1266 struct xfs_da_intnode *node; 1267 struct xfs_da_node_entry *btree; 1268 xfs_dahash_t lasthash=0; 1269 int level; 1270 int count; 1271 struct xfs_inode *dp = state->args->dp; 1272 1273 trace_xfs_da_fixhashpath(state->args); 1274 1275 level = path->active-1; 1276 blk = &path->blk[ level ]; 1277 switch (blk->magic) { 1278 case XFS_ATTR_LEAF_MAGIC: 1279 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); 1280 if (count == 0) 1281 return; 1282 break; 1283 case XFS_DIR2_LEAFN_MAGIC: 1284 lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count); 1285 if (count == 0) 1286 return; 1287 break; 1288 case XFS_DA_NODE_MAGIC: 1289 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count); 1290 if (count == 0) 1291 return; 1292 break; 1293 } 1294 for (blk--, level--; level >= 0; blk--, level--) { 1295 struct xfs_da3_icnode_hdr nodehdr; 1296 1297 node = blk->bp->b_addr; 1298 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1299 btree = dp->d_ops->node_tree_p(node); 1300 if (be32_to_cpu(btree[blk->index].hashval) == lasthash) 1301 break; 1302 blk->hashval = lasthash; 1303 btree[blk->index].hashval = cpu_to_be32(lasthash); 1304 xfs_trans_log_buf(state->args->trans, blk->bp, 1305 XFS_DA_LOGRANGE(node, &btree[blk->index], 1306 sizeof(*btree))); 1307 1308 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1309 } 1310 } 1311 1312 /* 1313 * Remove an entry from an intermediate node. 1314 */ 1315 STATIC void 1316 xfs_da3_node_remove( 1317 struct xfs_da_state *state, 1318 struct xfs_da_state_blk *drop_blk) 1319 { 1320 struct xfs_da_intnode *node; 1321 struct xfs_da3_icnode_hdr nodehdr; 1322 struct xfs_da_node_entry *btree; 1323 int index; 1324 int tmp; 1325 struct xfs_inode *dp = state->args->dp; 1326 1327 trace_xfs_da_node_remove(state->args); 1328 1329 node = drop_blk->bp->b_addr; 1330 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1331 ASSERT(drop_blk->index < nodehdr.count); 1332 ASSERT(drop_blk->index >= 0); 1333 1334 /* 1335 * Copy over the offending entry, or just zero it out. 1336 */ 1337 index = drop_blk->index; 1338 btree = dp->d_ops->node_tree_p(node); 1339 if (index < nodehdr.count - 1) { 1340 tmp = nodehdr.count - index - 1; 1341 tmp *= (uint)sizeof(xfs_da_node_entry_t); 1342 memmove(&btree[index], &btree[index + 1], tmp); 1343 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1344 XFS_DA_LOGRANGE(node, &btree[index], tmp)); 1345 index = nodehdr.count - 1; 1346 } 1347 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t)); 1348 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1349 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index]))); 1350 nodehdr.count -= 1; 1351 dp->d_ops->node_hdr_to_disk(node, &nodehdr); 1352 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1353 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); 1354 1355 /* 1356 * Copy the last hash value from the block to propagate upwards. 1357 */ 1358 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval); 1359 } 1360 1361 /* 1362 * Unbalance the elements between two intermediate nodes, 1363 * move all Btree elements from one node into another. 1364 */ 1365 STATIC void 1366 xfs_da3_node_unbalance( 1367 struct xfs_da_state *state, 1368 struct xfs_da_state_blk *drop_blk, 1369 struct xfs_da_state_blk *save_blk) 1370 { 1371 struct xfs_da_intnode *drop_node; 1372 struct xfs_da_intnode *save_node; 1373 struct xfs_da_node_entry *drop_btree; 1374 struct xfs_da_node_entry *save_btree; 1375 struct xfs_da3_icnode_hdr drop_hdr; 1376 struct xfs_da3_icnode_hdr save_hdr; 1377 struct xfs_trans *tp; 1378 int sindex; 1379 int tmp; 1380 struct xfs_inode *dp = state->args->dp; 1381 1382 trace_xfs_da_node_unbalance(state->args); 1383 1384 drop_node = drop_blk->bp->b_addr; 1385 save_node = save_blk->bp->b_addr; 1386 dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node); 1387 dp->d_ops->node_hdr_from_disk(&save_hdr, save_node); 1388 drop_btree = dp->d_ops->node_tree_p(drop_node); 1389 save_btree = dp->d_ops->node_tree_p(save_node); 1390 tp = state->args->trans; 1391 1392 /* 1393 * If the dying block has lower hashvals, then move all the 1394 * elements in the remaining block up to make a hole. 1395 */ 1396 if ((be32_to_cpu(drop_btree[0].hashval) < 1397 be32_to_cpu(save_btree[0].hashval)) || 1398 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) < 1399 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) { 1400 /* XXX: check this - is memmove dst correct? */ 1401 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t); 1402 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp); 1403 1404 sindex = 0; 1405 xfs_trans_log_buf(tp, save_blk->bp, 1406 XFS_DA_LOGRANGE(save_node, &save_btree[0], 1407 (save_hdr.count + drop_hdr.count) * 1408 sizeof(xfs_da_node_entry_t))); 1409 } else { 1410 sindex = save_hdr.count; 1411 xfs_trans_log_buf(tp, save_blk->bp, 1412 XFS_DA_LOGRANGE(save_node, &save_btree[sindex], 1413 drop_hdr.count * sizeof(xfs_da_node_entry_t))); 1414 } 1415 1416 /* 1417 * Move all the B-tree elements from drop_blk to save_blk. 1418 */ 1419 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t); 1420 memcpy(&save_btree[sindex], &drop_btree[0], tmp); 1421 save_hdr.count += drop_hdr.count; 1422 1423 dp->d_ops->node_hdr_to_disk(save_node, &save_hdr); 1424 xfs_trans_log_buf(tp, save_blk->bp, 1425 XFS_DA_LOGRANGE(save_node, &save_node->hdr, 1426 dp->d_ops->node_hdr_size)); 1427 1428 /* 1429 * Save the last hashval in the remaining block for upward propagation. 1430 */ 1431 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval); 1432 } 1433 1434 /*======================================================================== 1435 * Routines used for finding things in the Btree. 1436 *========================================================================*/ 1437 1438 /* 1439 * Walk down the Btree looking for a particular filename, filling 1440 * in the state structure as we go. 1441 * 1442 * We will set the state structure to point to each of the elements 1443 * in each of the nodes where either the hashval is or should be. 1444 * 1445 * We support duplicate hashval's so for each entry in the current 1446 * node that could contain the desired hashval, descend. This is a 1447 * pruned depth-first tree search. 1448 */ 1449 int /* error */ 1450 xfs_da3_node_lookup_int( 1451 struct xfs_da_state *state, 1452 int *result) 1453 { 1454 struct xfs_da_state_blk *blk; 1455 struct xfs_da_blkinfo *curr; 1456 struct xfs_da_intnode *node; 1457 struct xfs_da_node_entry *btree; 1458 struct xfs_da3_icnode_hdr nodehdr; 1459 struct xfs_da_args *args; 1460 xfs_dablk_t blkno; 1461 xfs_dahash_t hashval; 1462 xfs_dahash_t btreehashval; 1463 int probe; 1464 int span; 1465 int max; 1466 int error; 1467 int retval; 1468 struct xfs_inode *dp = state->args->dp; 1469 1470 args = state->args; 1471 1472 /* 1473 * Descend thru the B-tree searching each level for the right 1474 * node to use, until the right hashval is found. 1475 */ 1476 blkno = (args->whichfork == XFS_DATA_FORK)? args->geo->leafblk : 0; 1477 for (blk = &state->path.blk[0], state->path.active = 1; 1478 state->path.active <= XFS_DA_NODE_MAXDEPTH; 1479 blk++, state->path.active++) { 1480 /* 1481 * Read the next node down in the tree. 1482 */ 1483 blk->blkno = blkno; 1484 error = xfs_da3_node_read(args->trans, args->dp, blkno, 1485 -1, &blk->bp, args->whichfork); 1486 if (error) { 1487 blk->blkno = 0; 1488 state->path.active--; 1489 return error; 1490 } 1491 curr = blk->bp->b_addr; 1492 blk->magic = be16_to_cpu(curr->magic); 1493 1494 if (blk->magic == XFS_ATTR_LEAF_MAGIC || 1495 blk->magic == XFS_ATTR3_LEAF_MAGIC) { 1496 blk->magic = XFS_ATTR_LEAF_MAGIC; 1497 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1498 break; 1499 } 1500 1501 if (blk->magic == XFS_DIR2_LEAFN_MAGIC || 1502 blk->magic == XFS_DIR3_LEAFN_MAGIC) { 1503 blk->magic = XFS_DIR2_LEAFN_MAGIC; 1504 blk->hashval = xfs_dir2_leafn_lasthash(args->dp, 1505 blk->bp, NULL); 1506 break; 1507 } 1508 1509 blk->magic = XFS_DA_NODE_MAGIC; 1510 1511 1512 /* 1513 * Search an intermediate node for a match. 1514 */ 1515 node = blk->bp->b_addr; 1516 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1517 btree = dp->d_ops->node_tree_p(node); 1518 1519 max = nodehdr.count; 1520 blk->hashval = be32_to_cpu(btree[max - 1].hashval); 1521 1522 /* 1523 * Binary search. (note: small blocks will skip loop) 1524 */ 1525 probe = span = max / 2; 1526 hashval = args->hashval; 1527 while (span > 4) { 1528 span /= 2; 1529 btreehashval = be32_to_cpu(btree[probe].hashval); 1530 if (btreehashval < hashval) 1531 probe += span; 1532 else if (btreehashval > hashval) 1533 probe -= span; 1534 else 1535 break; 1536 } 1537 ASSERT((probe >= 0) && (probe < max)); 1538 ASSERT((span <= 4) || 1539 (be32_to_cpu(btree[probe].hashval) == hashval)); 1540 1541 /* 1542 * Since we may have duplicate hashval's, find the first 1543 * matching hashval in the node. 1544 */ 1545 while (probe > 0 && 1546 be32_to_cpu(btree[probe].hashval) >= hashval) { 1547 probe--; 1548 } 1549 while (probe < max && 1550 be32_to_cpu(btree[probe].hashval) < hashval) { 1551 probe++; 1552 } 1553 1554 /* 1555 * Pick the right block to descend on. 1556 */ 1557 if (probe == max) { 1558 blk->index = max - 1; 1559 blkno = be32_to_cpu(btree[max - 1].before); 1560 } else { 1561 blk->index = probe; 1562 blkno = be32_to_cpu(btree[probe].before); 1563 } 1564 } 1565 1566 /* 1567 * A leaf block that ends in the hashval that we are interested in 1568 * (final hashval == search hashval) means that the next block may 1569 * contain more entries with the same hashval, shift upward to the 1570 * next leaf and keep searching. 1571 */ 1572 for (;;) { 1573 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { 1574 retval = xfs_dir2_leafn_lookup_int(blk->bp, args, 1575 &blk->index, state); 1576 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1577 retval = xfs_attr3_leaf_lookup_int(blk->bp, args); 1578 blk->index = args->index; 1579 args->blkno = blk->blkno; 1580 } else { 1581 ASSERT(0); 1582 return -EFSCORRUPTED; 1583 } 1584 if (((retval == -ENOENT) || (retval == -ENOATTR)) && 1585 (blk->hashval == args->hashval)) { 1586 error = xfs_da3_path_shift(state, &state->path, 1, 1, 1587 &retval); 1588 if (error) 1589 return error; 1590 if (retval == 0) { 1591 continue; 1592 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1593 /* path_shift() gives ENOENT */ 1594 retval = -ENOATTR; 1595 } 1596 } 1597 break; 1598 } 1599 *result = retval; 1600 return 0; 1601 } 1602 1603 /*======================================================================== 1604 * Utility routines. 1605 *========================================================================*/ 1606 1607 /* 1608 * Compare two intermediate nodes for "order". 1609 */ 1610 STATIC int 1611 xfs_da3_node_order( 1612 struct xfs_inode *dp, 1613 struct xfs_buf *node1_bp, 1614 struct xfs_buf *node2_bp) 1615 { 1616 struct xfs_da_intnode *node1; 1617 struct xfs_da_intnode *node2; 1618 struct xfs_da_node_entry *btree1; 1619 struct xfs_da_node_entry *btree2; 1620 struct xfs_da3_icnode_hdr node1hdr; 1621 struct xfs_da3_icnode_hdr node2hdr; 1622 1623 node1 = node1_bp->b_addr; 1624 node2 = node2_bp->b_addr; 1625 dp->d_ops->node_hdr_from_disk(&node1hdr, node1); 1626 dp->d_ops->node_hdr_from_disk(&node2hdr, node2); 1627 btree1 = dp->d_ops->node_tree_p(node1); 1628 btree2 = dp->d_ops->node_tree_p(node2); 1629 1630 if (node1hdr.count > 0 && node2hdr.count > 0 && 1631 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 1632 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) < 1633 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) { 1634 return 1; 1635 } 1636 return 0; 1637 } 1638 1639 /* 1640 * Link a new block into a doubly linked list of blocks (of whatever type). 1641 */ 1642 int /* error */ 1643 xfs_da3_blk_link( 1644 struct xfs_da_state *state, 1645 struct xfs_da_state_blk *old_blk, 1646 struct xfs_da_state_blk *new_blk) 1647 { 1648 struct xfs_da_blkinfo *old_info; 1649 struct xfs_da_blkinfo *new_info; 1650 struct xfs_da_blkinfo *tmp_info; 1651 struct xfs_da_args *args; 1652 struct xfs_buf *bp; 1653 int before = 0; 1654 int error; 1655 struct xfs_inode *dp = state->args->dp; 1656 1657 /* 1658 * Set up environment. 1659 */ 1660 args = state->args; 1661 ASSERT(args != NULL); 1662 old_info = old_blk->bp->b_addr; 1663 new_info = new_blk->bp->b_addr; 1664 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || 1665 old_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1666 old_blk->magic == XFS_ATTR_LEAF_MAGIC); 1667 1668 switch (old_blk->magic) { 1669 case XFS_ATTR_LEAF_MAGIC: 1670 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp); 1671 break; 1672 case XFS_DIR2_LEAFN_MAGIC: 1673 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp); 1674 break; 1675 case XFS_DA_NODE_MAGIC: 1676 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp); 1677 break; 1678 } 1679 1680 /* 1681 * Link blocks in appropriate order. 1682 */ 1683 if (before) { 1684 /* 1685 * Link new block in before existing block. 1686 */ 1687 trace_xfs_da_link_before(args); 1688 new_info->forw = cpu_to_be32(old_blk->blkno); 1689 new_info->back = old_info->back; 1690 if (old_info->back) { 1691 error = xfs_da3_node_read(args->trans, dp, 1692 be32_to_cpu(old_info->back), 1693 -1, &bp, args->whichfork); 1694 if (error) 1695 return error; 1696 ASSERT(bp != NULL); 1697 tmp_info = bp->b_addr; 1698 ASSERT(tmp_info->magic == old_info->magic); 1699 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); 1700 tmp_info->forw = cpu_to_be32(new_blk->blkno); 1701 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1702 } 1703 old_info->back = cpu_to_be32(new_blk->blkno); 1704 } else { 1705 /* 1706 * Link new block in after existing block. 1707 */ 1708 trace_xfs_da_link_after(args); 1709 new_info->forw = old_info->forw; 1710 new_info->back = cpu_to_be32(old_blk->blkno); 1711 if (old_info->forw) { 1712 error = xfs_da3_node_read(args->trans, dp, 1713 be32_to_cpu(old_info->forw), 1714 -1, &bp, args->whichfork); 1715 if (error) 1716 return error; 1717 ASSERT(bp != NULL); 1718 tmp_info = bp->b_addr; 1719 ASSERT(tmp_info->magic == old_info->magic); 1720 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); 1721 tmp_info->back = cpu_to_be32(new_blk->blkno); 1722 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1723 } 1724 old_info->forw = cpu_to_be32(new_blk->blkno); 1725 } 1726 1727 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); 1728 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); 1729 return 0; 1730 } 1731 1732 /* 1733 * Unlink a block from a doubly linked list of blocks. 1734 */ 1735 STATIC int /* error */ 1736 xfs_da3_blk_unlink( 1737 struct xfs_da_state *state, 1738 struct xfs_da_state_blk *drop_blk, 1739 struct xfs_da_state_blk *save_blk) 1740 { 1741 struct xfs_da_blkinfo *drop_info; 1742 struct xfs_da_blkinfo *save_info; 1743 struct xfs_da_blkinfo *tmp_info; 1744 struct xfs_da_args *args; 1745 struct xfs_buf *bp; 1746 int error; 1747 1748 /* 1749 * Set up environment. 1750 */ 1751 args = state->args; 1752 ASSERT(args != NULL); 1753 save_info = save_blk->bp->b_addr; 1754 drop_info = drop_blk->bp->b_addr; 1755 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || 1756 save_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1757 save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1758 ASSERT(save_blk->magic == drop_blk->magic); 1759 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) || 1760 (be32_to_cpu(save_info->back) == drop_blk->blkno)); 1761 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) || 1762 (be32_to_cpu(drop_info->back) == save_blk->blkno)); 1763 1764 /* 1765 * Unlink the leaf block from the doubly linked chain of leaves. 1766 */ 1767 if (be32_to_cpu(save_info->back) == drop_blk->blkno) { 1768 trace_xfs_da_unlink_back(args); 1769 save_info->back = drop_info->back; 1770 if (drop_info->back) { 1771 error = xfs_da3_node_read(args->trans, args->dp, 1772 be32_to_cpu(drop_info->back), 1773 -1, &bp, args->whichfork); 1774 if (error) 1775 return error; 1776 ASSERT(bp != NULL); 1777 tmp_info = bp->b_addr; 1778 ASSERT(tmp_info->magic == save_info->magic); 1779 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); 1780 tmp_info->forw = cpu_to_be32(save_blk->blkno); 1781 xfs_trans_log_buf(args->trans, bp, 0, 1782 sizeof(*tmp_info) - 1); 1783 } 1784 } else { 1785 trace_xfs_da_unlink_forward(args); 1786 save_info->forw = drop_info->forw; 1787 if (drop_info->forw) { 1788 error = xfs_da3_node_read(args->trans, args->dp, 1789 be32_to_cpu(drop_info->forw), 1790 -1, &bp, args->whichfork); 1791 if (error) 1792 return error; 1793 ASSERT(bp != NULL); 1794 tmp_info = bp->b_addr; 1795 ASSERT(tmp_info->magic == save_info->magic); 1796 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); 1797 tmp_info->back = cpu_to_be32(save_blk->blkno); 1798 xfs_trans_log_buf(args->trans, bp, 0, 1799 sizeof(*tmp_info) - 1); 1800 } 1801 } 1802 1803 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); 1804 return 0; 1805 } 1806 1807 /* 1808 * Move a path "forward" or "!forward" one block at the current level. 1809 * 1810 * This routine will adjust a "path" to point to the next block 1811 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the 1812 * Btree, including updating pointers to the intermediate nodes between 1813 * the new bottom and the root. 1814 */ 1815 int /* error */ 1816 xfs_da3_path_shift( 1817 struct xfs_da_state *state, 1818 struct xfs_da_state_path *path, 1819 int forward, 1820 int release, 1821 int *result) 1822 { 1823 struct xfs_da_state_blk *blk; 1824 struct xfs_da_blkinfo *info; 1825 struct xfs_da_intnode *node; 1826 struct xfs_da_args *args; 1827 struct xfs_da_node_entry *btree; 1828 struct xfs_da3_icnode_hdr nodehdr; 1829 xfs_dablk_t blkno = 0; 1830 int level; 1831 int error; 1832 struct xfs_inode *dp = state->args->dp; 1833 1834 trace_xfs_da_path_shift(state->args); 1835 1836 /* 1837 * Roll up the Btree looking for the first block where our 1838 * current index is not at the edge of the block. Note that 1839 * we skip the bottom layer because we want the sibling block. 1840 */ 1841 args = state->args; 1842 ASSERT(args != NULL); 1843 ASSERT(path != NULL); 1844 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); 1845 level = (path->active-1) - 1; /* skip bottom layer in path */ 1846 for (blk = &path->blk[level]; level >= 0; blk--, level--) { 1847 node = blk->bp->b_addr; 1848 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1849 btree = dp->d_ops->node_tree_p(node); 1850 1851 if (forward && (blk->index < nodehdr.count - 1)) { 1852 blk->index++; 1853 blkno = be32_to_cpu(btree[blk->index].before); 1854 break; 1855 } else if (!forward && (blk->index > 0)) { 1856 blk->index--; 1857 blkno = be32_to_cpu(btree[blk->index].before); 1858 break; 1859 } 1860 } 1861 if (level < 0) { 1862 *result = -ENOENT; /* we're out of our tree */ 1863 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 1864 return 0; 1865 } 1866 1867 /* 1868 * Roll down the edge of the subtree until we reach the 1869 * same depth we were at originally. 1870 */ 1871 for (blk++, level++; level < path->active; blk++, level++) { 1872 /* 1873 * Release the old block. 1874 * (if it's dirty, trans won't actually let go) 1875 */ 1876 if (release) 1877 xfs_trans_brelse(args->trans, blk->bp); 1878 1879 /* 1880 * Read the next child block. 1881 */ 1882 blk->blkno = blkno; 1883 error = xfs_da3_node_read(args->trans, dp, blkno, -1, 1884 &blk->bp, args->whichfork); 1885 if (error) 1886 return error; 1887 info = blk->bp->b_addr; 1888 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1889 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 1890 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1891 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 1892 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 1893 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 1894 1895 1896 /* 1897 * Note: we flatten the magic number to a single type so we 1898 * don't have to compare against crc/non-crc types elsewhere. 1899 */ 1900 switch (be16_to_cpu(info->magic)) { 1901 case XFS_DA_NODE_MAGIC: 1902 case XFS_DA3_NODE_MAGIC: 1903 blk->magic = XFS_DA_NODE_MAGIC; 1904 node = (xfs_da_intnode_t *)info; 1905 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1906 btree = dp->d_ops->node_tree_p(node); 1907 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1908 if (forward) 1909 blk->index = 0; 1910 else 1911 blk->index = nodehdr.count - 1; 1912 blkno = be32_to_cpu(btree[blk->index].before); 1913 break; 1914 case XFS_ATTR_LEAF_MAGIC: 1915 case XFS_ATTR3_LEAF_MAGIC: 1916 blk->magic = XFS_ATTR_LEAF_MAGIC; 1917 ASSERT(level == path->active-1); 1918 blk->index = 0; 1919 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1920 break; 1921 case XFS_DIR2_LEAFN_MAGIC: 1922 case XFS_DIR3_LEAFN_MAGIC: 1923 blk->magic = XFS_DIR2_LEAFN_MAGIC; 1924 ASSERT(level == path->active-1); 1925 blk->index = 0; 1926 blk->hashval = xfs_dir2_leafn_lasthash(args->dp, 1927 blk->bp, NULL); 1928 break; 1929 default: 1930 ASSERT(0); 1931 break; 1932 } 1933 } 1934 *result = 0; 1935 return 0; 1936 } 1937 1938 1939 /*======================================================================== 1940 * Utility routines. 1941 *========================================================================*/ 1942 1943 /* 1944 * Implement a simple hash on a character string. 1945 * Rotate the hash value by 7 bits, then XOR each character in. 1946 * This is implemented with some source-level loop unrolling. 1947 */ 1948 xfs_dahash_t 1949 xfs_da_hashname(const __uint8_t *name, int namelen) 1950 { 1951 xfs_dahash_t hash; 1952 1953 /* 1954 * Do four characters at a time as long as we can. 1955 */ 1956 for (hash = 0; namelen >= 4; namelen -= 4, name += 4) 1957 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^ 1958 (name[3] << 0) ^ rol32(hash, 7 * 4); 1959 1960 /* 1961 * Now do the rest of the characters. 1962 */ 1963 switch (namelen) { 1964 case 3: 1965 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^ 1966 rol32(hash, 7 * 3); 1967 case 2: 1968 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2); 1969 case 1: 1970 return (name[0] << 0) ^ rol32(hash, 7 * 1); 1971 default: /* case 0: */ 1972 return hash; 1973 } 1974 } 1975 1976 enum xfs_dacmp 1977 xfs_da_compname( 1978 struct xfs_da_args *args, 1979 const unsigned char *name, 1980 int len) 1981 { 1982 return (args->namelen == len && memcmp(args->name, name, len) == 0) ? 1983 XFS_CMP_EXACT : XFS_CMP_DIFFERENT; 1984 } 1985 1986 static xfs_dahash_t 1987 xfs_default_hashname( 1988 struct xfs_name *name) 1989 { 1990 return xfs_da_hashname(name->name, name->len); 1991 } 1992 1993 const struct xfs_nameops xfs_default_nameops = { 1994 .hashname = xfs_default_hashname, 1995 .compname = xfs_da_compname 1996 }; 1997 1998 int 1999 xfs_da_grow_inode_int( 2000 struct xfs_da_args *args, 2001 xfs_fileoff_t *bno, 2002 int count) 2003 { 2004 struct xfs_trans *tp = args->trans; 2005 struct xfs_inode *dp = args->dp; 2006 int w = args->whichfork; 2007 xfs_rfsblock_t nblks = dp->i_d.di_nblocks; 2008 struct xfs_bmbt_irec map, *mapp; 2009 int nmap, error, got, i, mapi; 2010 2011 /* 2012 * Find a spot in the file space to put the new block. 2013 */ 2014 error = xfs_bmap_first_unused(tp, dp, count, bno, w); 2015 if (error) 2016 return error; 2017 2018 /* 2019 * Try mapping it in one filesystem block. 2020 */ 2021 nmap = 1; 2022 ASSERT(args->firstblock != NULL); 2023 error = xfs_bmapi_write(tp, dp, *bno, count, 2024 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, 2025 args->firstblock, args->total, &map, &nmap, 2026 args->flist); 2027 if (error) 2028 return error; 2029 2030 ASSERT(nmap <= 1); 2031 if (nmap == 1) { 2032 mapp = ↦ 2033 mapi = 1; 2034 } else if (nmap == 0 && count > 1) { 2035 xfs_fileoff_t b; 2036 int c; 2037 2038 /* 2039 * If we didn't get it and the block might work if fragmented, 2040 * try without the CONTIG flag. Loop until we get it all. 2041 */ 2042 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); 2043 for (b = *bno, mapi = 0; b < *bno + count; ) { 2044 nmap = MIN(XFS_BMAP_MAX_NMAP, count); 2045 c = (int)(*bno + count - b); 2046 error = xfs_bmapi_write(tp, dp, b, c, 2047 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, 2048 args->firstblock, args->total, 2049 &mapp[mapi], &nmap, args->flist); 2050 if (error) 2051 goto out_free_map; 2052 if (nmap < 1) 2053 break; 2054 mapi += nmap; 2055 b = mapp[mapi - 1].br_startoff + 2056 mapp[mapi - 1].br_blockcount; 2057 } 2058 } else { 2059 mapi = 0; 2060 mapp = NULL; 2061 } 2062 2063 /* 2064 * Count the blocks we got, make sure it matches the total. 2065 */ 2066 for (i = 0, got = 0; i < mapi; i++) 2067 got += mapp[i].br_blockcount; 2068 if (got != count || mapp[0].br_startoff != *bno || 2069 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != 2070 *bno + count) { 2071 error = -ENOSPC; 2072 goto out_free_map; 2073 } 2074 2075 /* account for newly allocated blocks in reserved blocks total */ 2076 args->total -= dp->i_d.di_nblocks - nblks; 2077 2078 out_free_map: 2079 if (mapp != &map) 2080 kmem_free(mapp); 2081 return error; 2082 } 2083 2084 /* 2085 * Add a block to the btree ahead of the file. 2086 * Return the new block number to the caller. 2087 */ 2088 int 2089 xfs_da_grow_inode( 2090 struct xfs_da_args *args, 2091 xfs_dablk_t *new_blkno) 2092 { 2093 xfs_fileoff_t bno; 2094 int error; 2095 2096 trace_xfs_da_grow_inode(args); 2097 2098 bno = args->geo->leafblk; 2099 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount); 2100 if (!error) 2101 *new_blkno = (xfs_dablk_t)bno; 2102 return error; 2103 } 2104 2105 /* 2106 * Ick. We need to always be able to remove a btree block, even 2107 * if there's no space reservation because the filesystem is full. 2108 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC. 2109 * It swaps the target block with the last block in the file. The 2110 * last block in the file can always be removed since it can't cause 2111 * a bmap btree split to do that. 2112 */ 2113 STATIC int 2114 xfs_da3_swap_lastblock( 2115 struct xfs_da_args *args, 2116 xfs_dablk_t *dead_blknop, 2117 struct xfs_buf **dead_bufp) 2118 { 2119 struct xfs_da_blkinfo *dead_info; 2120 struct xfs_da_blkinfo *sib_info; 2121 struct xfs_da_intnode *par_node; 2122 struct xfs_da_intnode *dead_node; 2123 struct xfs_dir2_leaf *dead_leaf2; 2124 struct xfs_da_node_entry *btree; 2125 struct xfs_da3_icnode_hdr par_hdr; 2126 struct xfs_inode *dp; 2127 struct xfs_trans *tp; 2128 struct xfs_mount *mp; 2129 struct xfs_buf *dead_buf; 2130 struct xfs_buf *last_buf; 2131 struct xfs_buf *sib_buf; 2132 struct xfs_buf *par_buf; 2133 xfs_dahash_t dead_hash; 2134 xfs_fileoff_t lastoff; 2135 xfs_dablk_t dead_blkno; 2136 xfs_dablk_t last_blkno; 2137 xfs_dablk_t sib_blkno; 2138 xfs_dablk_t par_blkno; 2139 int error; 2140 int w; 2141 int entno; 2142 int level; 2143 int dead_level; 2144 2145 trace_xfs_da_swap_lastblock(args); 2146 2147 dead_buf = *dead_bufp; 2148 dead_blkno = *dead_blknop; 2149 tp = args->trans; 2150 dp = args->dp; 2151 w = args->whichfork; 2152 ASSERT(w == XFS_DATA_FORK); 2153 mp = dp->i_mount; 2154 lastoff = args->geo->freeblk; 2155 error = xfs_bmap_last_before(tp, dp, &lastoff, w); 2156 if (error) 2157 return error; 2158 if (unlikely(lastoff == 0)) { 2159 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW, 2160 mp); 2161 return -EFSCORRUPTED; 2162 } 2163 /* 2164 * Read the last block in the btree space. 2165 */ 2166 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount; 2167 error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w); 2168 if (error) 2169 return error; 2170 /* 2171 * Copy the last block into the dead buffer and log it. 2172 */ 2173 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize); 2174 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1); 2175 dead_info = dead_buf->b_addr; 2176 /* 2177 * Get values from the moved block. 2178 */ 2179 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 2180 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 2181 struct xfs_dir3_icleaf_hdr leafhdr; 2182 struct xfs_dir2_leaf_entry *ents; 2183 2184 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; 2185 dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2); 2186 ents = dp->d_ops->leaf_ents_p(dead_leaf2); 2187 dead_level = 0; 2188 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval); 2189 } else { 2190 struct xfs_da3_icnode_hdr deadhdr; 2191 2192 dead_node = (xfs_da_intnode_t *)dead_info; 2193 dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node); 2194 btree = dp->d_ops->node_tree_p(dead_node); 2195 dead_level = deadhdr.level; 2196 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval); 2197 } 2198 sib_buf = par_buf = NULL; 2199 /* 2200 * If the moved block has a left sibling, fix up the pointers. 2201 */ 2202 if ((sib_blkno = be32_to_cpu(dead_info->back))) { 2203 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w); 2204 if (error) 2205 goto done; 2206 sib_info = sib_buf->b_addr; 2207 if (unlikely( 2208 be32_to_cpu(sib_info->forw) != last_blkno || 2209 sib_info->magic != dead_info->magic)) { 2210 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", 2211 XFS_ERRLEVEL_LOW, mp); 2212 error = -EFSCORRUPTED; 2213 goto done; 2214 } 2215 sib_info->forw = cpu_to_be32(dead_blkno); 2216 xfs_trans_log_buf(tp, sib_buf, 2217 XFS_DA_LOGRANGE(sib_info, &sib_info->forw, 2218 sizeof(sib_info->forw))); 2219 sib_buf = NULL; 2220 } 2221 /* 2222 * If the moved block has a right sibling, fix up the pointers. 2223 */ 2224 if ((sib_blkno = be32_to_cpu(dead_info->forw))) { 2225 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w); 2226 if (error) 2227 goto done; 2228 sib_info = sib_buf->b_addr; 2229 if (unlikely( 2230 be32_to_cpu(sib_info->back) != last_blkno || 2231 sib_info->magic != dead_info->magic)) { 2232 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", 2233 XFS_ERRLEVEL_LOW, mp); 2234 error = -EFSCORRUPTED; 2235 goto done; 2236 } 2237 sib_info->back = cpu_to_be32(dead_blkno); 2238 xfs_trans_log_buf(tp, sib_buf, 2239 XFS_DA_LOGRANGE(sib_info, &sib_info->back, 2240 sizeof(sib_info->back))); 2241 sib_buf = NULL; 2242 } 2243 par_blkno = args->geo->leafblk; 2244 level = -1; 2245 /* 2246 * Walk down the tree looking for the parent of the moved block. 2247 */ 2248 for (;;) { 2249 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w); 2250 if (error) 2251 goto done; 2252 par_node = par_buf->b_addr; 2253 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node); 2254 if (level >= 0 && level != par_hdr.level + 1) { 2255 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", 2256 XFS_ERRLEVEL_LOW, mp); 2257 error = -EFSCORRUPTED; 2258 goto done; 2259 } 2260 level = par_hdr.level; 2261 btree = dp->d_ops->node_tree_p(par_node); 2262 for (entno = 0; 2263 entno < par_hdr.count && 2264 be32_to_cpu(btree[entno].hashval) < dead_hash; 2265 entno++) 2266 continue; 2267 if (entno == par_hdr.count) { 2268 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", 2269 XFS_ERRLEVEL_LOW, mp); 2270 error = -EFSCORRUPTED; 2271 goto done; 2272 } 2273 par_blkno = be32_to_cpu(btree[entno].before); 2274 if (level == dead_level + 1) 2275 break; 2276 xfs_trans_brelse(tp, par_buf); 2277 par_buf = NULL; 2278 } 2279 /* 2280 * We're in the right parent block. 2281 * Look for the right entry. 2282 */ 2283 for (;;) { 2284 for (; 2285 entno < par_hdr.count && 2286 be32_to_cpu(btree[entno].before) != last_blkno; 2287 entno++) 2288 continue; 2289 if (entno < par_hdr.count) 2290 break; 2291 par_blkno = par_hdr.forw; 2292 xfs_trans_brelse(tp, par_buf); 2293 par_buf = NULL; 2294 if (unlikely(par_blkno == 0)) { 2295 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)", 2296 XFS_ERRLEVEL_LOW, mp); 2297 error = -EFSCORRUPTED; 2298 goto done; 2299 } 2300 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w); 2301 if (error) 2302 goto done; 2303 par_node = par_buf->b_addr; 2304 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node); 2305 if (par_hdr.level != level) { 2306 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", 2307 XFS_ERRLEVEL_LOW, mp); 2308 error = -EFSCORRUPTED; 2309 goto done; 2310 } 2311 btree = dp->d_ops->node_tree_p(par_node); 2312 entno = 0; 2313 } 2314 /* 2315 * Update the parent entry pointing to the moved block. 2316 */ 2317 btree[entno].before = cpu_to_be32(dead_blkno); 2318 xfs_trans_log_buf(tp, par_buf, 2319 XFS_DA_LOGRANGE(par_node, &btree[entno].before, 2320 sizeof(btree[entno].before))); 2321 *dead_blknop = last_blkno; 2322 *dead_bufp = last_buf; 2323 return 0; 2324 done: 2325 if (par_buf) 2326 xfs_trans_brelse(tp, par_buf); 2327 if (sib_buf) 2328 xfs_trans_brelse(tp, sib_buf); 2329 xfs_trans_brelse(tp, last_buf); 2330 return error; 2331 } 2332 2333 /* 2334 * Remove a btree block from a directory or attribute. 2335 */ 2336 int 2337 xfs_da_shrink_inode( 2338 xfs_da_args_t *args, 2339 xfs_dablk_t dead_blkno, 2340 struct xfs_buf *dead_buf) 2341 { 2342 xfs_inode_t *dp; 2343 int done, error, w, count; 2344 xfs_trans_t *tp; 2345 xfs_mount_t *mp; 2346 2347 trace_xfs_da_shrink_inode(args); 2348 2349 dp = args->dp; 2350 w = args->whichfork; 2351 tp = args->trans; 2352 mp = dp->i_mount; 2353 count = args->geo->fsbcount; 2354 for (;;) { 2355 /* 2356 * Remove extents. If we get ENOSPC for a dir we have to move 2357 * the last block to the place we want to kill. 2358 */ 2359 error = xfs_bunmapi(tp, dp, dead_blkno, count, 2360 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, 2361 0, args->firstblock, args->flist, &done); 2362 if (error == -ENOSPC) { 2363 if (w != XFS_DATA_FORK) 2364 break; 2365 error = xfs_da3_swap_lastblock(args, &dead_blkno, 2366 &dead_buf); 2367 if (error) 2368 break; 2369 } else { 2370 break; 2371 } 2372 } 2373 xfs_trans_binval(tp, dead_buf); 2374 return error; 2375 } 2376 2377 /* 2378 * See if the mapping(s) for this btree block are valid, i.e. 2379 * don't contain holes, are logically contiguous, and cover the whole range. 2380 */ 2381 STATIC int 2382 xfs_da_map_covers_blocks( 2383 int nmap, 2384 xfs_bmbt_irec_t *mapp, 2385 xfs_dablk_t bno, 2386 int count) 2387 { 2388 int i; 2389 xfs_fileoff_t off; 2390 2391 for (i = 0, off = bno; i < nmap; i++) { 2392 if (mapp[i].br_startblock == HOLESTARTBLOCK || 2393 mapp[i].br_startblock == DELAYSTARTBLOCK) { 2394 return 0; 2395 } 2396 if (off != mapp[i].br_startoff) { 2397 return 0; 2398 } 2399 off += mapp[i].br_blockcount; 2400 } 2401 return off == bno + count; 2402 } 2403 2404 /* 2405 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map. 2406 * 2407 * For the single map case, it is assumed that the caller has provided a pointer 2408 * to a valid xfs_buf_map. For the multiple map case, this function will 2409 * allocate the xfs_buf_map to hold all the maps and replace the caller's single 2410 * map pointer with the allocated map. 2411 */ 2412 static int 2413 xfs_buf_map_from_irec( 2414 struct xfs_mount *mp, 2415 struct xfs_buf_map **mapp, 2416 int *nmaps, 2417 struct xfs_bmbt_irec *irecs, 2418 int nirecs) 2419 { 2420 struct xfs_buf_map *map; 2421 int i; 2422 2423 ASSERT(*nmaps == 1); 2424 ASSERT(nirecs >= 1); 2425 2426 if (nirecs > 1) { 2427 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), 2428 KM_SLEEP | KM_NOFS); 2429 if (!map) 2430 return -ENOMEM; 2431 *mapp = map; 2432 } 2433 2434 *nmaps = nirecs; 2435 map = *mapp; 2436 for (i = 0; i < *nmaps; i++) { 2437 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK && 2438 irecs[i].br_startblock != HOLESTARTBLOCK); 2439 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock); 2440 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount); 2441 } 2442 return 0; 2443 } 2444 2445 /* 2446 * Map the block we are given ready for reading. There are three possible return 2447 * values: 2448 * -1 - will be returned if we land in a hole and mappedbno == -2 so the 2449 * caller knows not to execute a subsequent read. 2450 * 0 - if we mapped the block successfully 2451 * >0 - positive error number if there was an error. 2452 */ 2453 static int 2454 xfs_dabuf_map( 2455 struct xfs_inode *dp, 2456 xfs_dablk_t bno, 2457 xfs_daddr_t mappedbno, 2458 int whichfork, 2459 struct xfs_buf_map **map, 2460 int *nmaps) 2461 { 2462 struct xfs_mount *mp = dp->i_mount; 2463 int nfsb; 2464 int error = 0; 2465 struct xfs_bmbt_irec irec; 2466 struct xfs_bmbt_irec *irecs = &irec; 2467 int nirecs; 2468 2469 ASSERT(map && *map); 2470 ASSERT(*nmaps == 1); 2471 2472 if (whichfork == XFS_DATA_FORK) 2473 nfsb = mp->m_dir_geo->fsbcount; 2474 else 2475 nfsb = mp->m_attr_geo->fsbcount; 2476 2477 /* 2478 * Caller doesn't have a mapping. -2 means don't complain 2479 * if we land in a hole. 2480 */ 2481 if (mappedbno == -1 || mappedbno == -2) { 2482 /* 2483 * Optimize the one-block case. 2484 */ 2485 if (nfsb != 1) 2486 irecs = kmem_zalloc(sizeof(irec) * nfsb, 2487 KM_SLEEP | KM_NOFS); 2488 2489 nirecs = nfsb; 2490 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs, 2491 &nirecs, xfs_bmapi_aflag(whichfork)); 2492 if (error) 2493 goto out; 2494 } else { 2495 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno); 2496 irecs->br_startoff = (xfs_fileoff_t)bno; 2497 irecs->br_blockcount = nfsb; 2498 irecs->br_state = 0; 2499 nirecs = 1; 2500 } 2501 2502 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) { 2503 error = mappedbno == -2 ? -1 : -EFSCORRUPTED; 2504 if (unlikely(error == -EFSCORRUPTED)) { 2505 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 2506 int i; 2507 xfs_alert(mp, "%s: bno %lld dir: inode %lld", 2508 __func__, (long long)bno, 2509 (long long)dp->i_ino); 2510 for (i = 0; i < *nmaps; i++) { 2511 xfs_alert(mp, 2512 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d", 2513 i, 2514 (long long)irecs[i].br_startoff, 2515 (long long)irecs[i].br_startblock, 2516 (long long)irecs[i].br_blockcount, 2517 irecs[i].br_state); 2518 } 2519 } 2520 XFS_ERROR_REPORT("xfs_da_do_buf(1)", 2521 XFS_ERRLEVEL_LOW, mp); 2522 } 2523 goto out; 2524 } 2525 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs); 2526 out: 2527 if (irecs != &irec) 2528 kmem_free(irecs); 2529 return error; 2530 } 2531 2532 /* 2533 * Get a buffer for the dir/attr block. 2534 */ 2535 int 2536 xfs_da_get_buf( 2537 struct xfs_trans *trans, 2538 struct xfs_inode *dp, 2539 xfs_dablk_t bno, 2540 xfs_daddr_t mappedbno, 2541 struct xfs_buf **bpp, 2542 int whichfork) 2543 { 2544 struct xfs_buf *bp; 2545 struct xfs_buf_map map; 2546 struct xfs_buf_map *mapp; 2547 int nmap; 2548 int error; 2549 2550 *bpp = NULL; 2551 mapp = ↦ 2552 nmap = 1; 2553 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork, 2554 &mapp, &nmap); 2555 if (error) { 2556 /* mapping a hole is not an error, but we don't continue */ 2557 if (error == -1) 2558 error = 0; 2559 goto out_free; 2560 } 2561 2562 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp, 2563 mapp, nmap, 0); 2564 error = bp ? bp->b_error : -EIO; 2565 if (error) { 2566 xfs_trans_brelse(trans, bp); 2567 goto out_free; 2568 } 2569 2570 *bpp = bp; 2571 2572 out_free: 2573 if (mapp != &map) 2574 kmem_free(mapp); 2575 2576 return error; 2577 } 2578 2579 /* 2580 * Get a buffer for the dir/attr block, fill in the contents. 2581 */ 2582 int 2583 xfs_da_read_buf( 2584 struct xfs_trans *trans, 2585 struct xfs_inode *dp, 2586 xfs_dablk_t bno, 2587 xfs_daddr_t mappedbno, 2588 struct xfs_buf **bpp, 2589 int whichfork, 2590 const struct xfs_buf_ops *ops) 2591 { 2592 struct xfs_buf *bp; 2593 struct xfs_buf_map map; 2594 struct xfs_buf_map *mapp; 2595 int nmap; 2596 int error; 2597 2598 *bpp = NULL; 2599 mapp = ↦ 2600 nmap = 1; 2601 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork, 2602 &mapp, &nmap); 2603 if (error) { 2604 /* mapping a hole is not an error, but we don't continue */ 2605 if (error == -1) 2606 error = 0; 2607 goto out_free; 2608 } 2609 2610 error = xfs_trans_read_buf_map(dp->i_mount, trans, 2611 dp->i_mount->m_ddev_targp, 2612 mapp, nmap, 0, &bp, ops); 2613 if (error) 2614 goto out_free; 2615 2616 if (whichfork == XFS_ATTR_FORK) 2617 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF); 2618 else 2619 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF); 2620 *bpp = bp; 2621 out_free: 2622 if (mapp != &map) 2623 kmem_free(mapp); 2624 2625 return error; 2626 } 2627 2628 /* 2629 * Readahead the dir/attr block. 2630 */ 2631 xfs_daddr_t 2632 xfs_da_reada_buf( 2633 struct xfs_inode *dp, 2634 xfs_dablk_t bno, 2635 xfs_daddr_t mappedbno, 2636 int whichfork, 2637 const struct xfs_buf_ops *ops) 2638 { 2639 struct xfs_buf_map map; 2640 struct xfs_buf_map *mapp; 2641 int nmap; 2642 int error; 2643 2644 mapp = ↦ 2645 nmap = 1; 2646 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork, 2647 &mapp, &nmap); 2648 if (error) { 2649 /* mapping a hole is not an error, but we don't continue */ 2650 if (error == -1) 2651 error = 0; 2652 goto out_free; 2653 } 2654 2655 mappedbno = mapp[0].bm_bn; 2656 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops); 2657 2658 out_free: 2659 if (mapp != &map) 2660 kmem_free(mapp); 2661 2662 if (error) 2663 return -1; 2664 return mappedbno; 2665 } 2666