1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * Copyright (c) 2013 Red Hat, Inc. 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include "xfs.h" 20 #include "xfs_fs.h" 21 #include "xfs_shared.h" 22 #include "xfs_format.h" 23 #include "xfs_log_format.h" 24 #include "xfs_trans_resv.h" 25 #include "xfs_bit.h" 26 #include "xfs_mount.h" 27 #include "xfs_da_format.h" 28 #include "xfs_da_btree.h" 29 #include "xfs_dir2.h" 30 #include "xfs_dir2_priv.h" 31 #include "xfs_inode.h" 32 #include "xfs_trans.h" 33 #include "xfs_inode_item.h" 34 #include "xfs_alloc.h" 35 #include "xfs_bmap.h" 36 #include "xfs_attr.h" 37 #include "xfs_attr_leaf.h" 38 #include "xfs_error.h" 39 #include "xfs_trace.h" 40 #include "xfs_cksum.h" 41 #include "xfs_buf_item.h" 42 43 /* 44 * xfs_da_btree.c 45 * 46 * Routines to implement directories as Btrees of hashed names. 47 */ 48 49 /*======================================================================== 50 * Function prototypes for the kernel. 51 *========================================================================*/ 52 53 /* 54 * Routines used for growing the Btree. 55 */ 56 STATIC int xfs_da3_root_split(xfs_da_state_t *state, 57 xfs_da_state_blk_t *existing_root, 58 xfs_da_state_blk_t *new_child); 59 STATIC int xfs_da3_node_split(xfs_da_state_t *state, 60 xfs_da_state_blk_t *existing_blk, 61 xfs_da_state_blk_t *split_blk, 62 xfs_da_state_blk_t *blk_to_add, 63 int treelevel, 64 int *result); 65 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state, 66 xfs_da_state_blk_t *node_blk_1, 67 xfs_da_state_blk_t *node_blk_2); 68 STATIC void xfs_da3_node_add(xfs_da_state_t *state, 69 xfs_da_state_blk_t *old_node_blk, 70 xfs_da_state_blk_t *new_node_blk); 71 72 /* 73 * Routines used for shrinking the Btree. 74 */ 75 STATIC int xfs_da3_root_join(xfs_da_state_t *state, 76 xfs_da_state_blk_t *root_blk); 77 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval); 78 STATIC void xfs_da3_node_remove(xfs_da_state_t *state, 79 xfs_da_state_blk_t *drop_blk); 80 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state, 81 xfs_da_state_blk_t *src_node_blk, 82 xfs_da_state_blk_t *dst_node_blk); 83 84 /* 85 * Utility routines. 86 */ 87 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state, 88 xfs_da_state_blk_t *drop_blk, 89 xfs_da_state_blk_t *save_blk); 90 91 92 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */ 93 94 /* 95 * Allocate a dir-state structure. 96 * We don't put them on the stack since they're large. 97 */ 98 xfs_da_state_t * 99 xfs_da_state_alloc(void) 100 { 101 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS); 102 } 103 104 /* 105 * Kill the altpath contents of a da-state structure. 106 */ 107 STATIC void 108 xfs_da_state_kill_altpath(xfs_da_state_t *state) 109 { 110 int i; 111 112 for (i = 0; i < state->altpath.active; i++) 113 state->altpath.blk[i].bp = NULL; 114 state->altpath.active = 0; 115 } 116 117 /* 118 * Free a da-state structure. 119 */ 120 void 121 xfs_da_state_free(xfs_da_state_t *state) 122 { 123 xfs_da_state_kill_altpath(state); 124 #ifdef DEBUG 125 memset((char *)state, 0, sizeof(*state)); 126 #endif /* DEBUG */ 127 kmem_zone_free(xfs_da_state_zone, state); 128 } 129 130 static bool 131 xfs_da3_node_verify( 132 struct xfs_buf *bp) 133 { 134 struct xfs_mount *mp = bp->b_target->bt_mount; 135 struct xfs_da_intnode *hdr = bp->b_addr; 136 struct xfs_da3_icnode_hdr ichdr; 137 const struct xfs_dir_ops *ops; 138 139 ops = xfs_dir_get_ops(mp, NULL); 140 141 ops->node_hdr_from_disk(&ichdr, hdr); 142 143 if (xfs_sb_version_hascrc(&mp->m_sb)) { 144 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 145 146 if (ichdr.magic != XFS_DA3_NODE_MAGIC) 147 return false; 148 149 if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid)) 150 return false; 151 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn) 152 return false; 153 } else { 154 if (ichdr.magic != XFS_DA_NODE_MAGIC) 155 return false; 156 } 157 if (ichdr.level == 0) 158 return false; 159 if (ichdr.level > XFS_DA_NODE_MAXDEPTH) 160 return false; 161 if (ichdr.count == 0) 162 return false; 163 164 /* 165 * we don't know if the node is for and attribute or directory tree, 166 * so only fail if the count is outside both bounds 167 */ 168 if (ichdr.count > mp->m_dir_geo->node_ents && 169 ichdr.count > mp->m_attr_geo->node_ents) 170 return false; 171 172 /* XXX: hash order check? */ 173 174 return true; 175 } 176 177 static void 178 xfs_da3_node_write_verify( 179 struct xfs_buf *bp) 180 { 181 struct xfs_mount *mp = bp->b_target->bt_mount; 182 struct xfs_buf_log_item *bip = bp->b_fspriv; 183 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 184 185 if (!xfs_da3_node_verify(bp)) { 186 xfs_buf_ioerror(bp, -EFSCORRUPTED); 187 xfs_verifier_error(bp); 188 return; 189 } 190 191 if (!xfs_sb_version_hascrc(&mp->m_sb)) 192 return; 193 194 if (bip) 195 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn); 196 197 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF); 198 } 199 200 /* 201 * leaf/node format detection on trees is sketchy, so a node read can be done on 202 * leaf level blocks when detection identifies the tree as a node format tree 203 * incorrectly. In this case, we need to swap the verifier to match the correct 204 * format of the block being read. 205 */ 206 static void 207 xfs_da3_node_read_verify( 208 struct xfs_buf *bp) 209 { 210 struct xfs_da_blkinfo *info = bp->b_addr; 211 212 switch (be16_to_cpu(info->magic)) { 213 case XFS_DA3_NODE_MAGIC: 214 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { 215 xfs_buf_ioerror(bp, -EFSBADCRC); 216 break; 217 } 218 /* fall through */ 219 case XFS_DA_NODE_MAGIC: 220 if (!xfs_da3_node_verify(bp)) { 221 xfs_buf_ioerror(bp, -EFSCORRUPTED); 222 break; 223 } 224 return; 225 case XFS_ATTR_LEAF_MAGIC: 226 case XFS_ATTR3_LEAF_MAGIC: 227 bp->b_ops = &xfs_attr3_leaf_buf_ops; 228 bp->b_ops->verify_read(bp); 229 return; 230 case XFS_DIR2_LEAFN_MAGIC: 231 case XFS_DIR3_LEAFN_MAGIC: 232 bp->b_ops = &xfs_dir3_leafn_buf_ops; 233 bp->b_ops->verify_read(bp); 234 return; 235 default: 236 xfs_buf_ioerror(bp, -EFSCORRUPTED); 237 break; 238 } 239 240 /* corrupt block */ 241 xfs_verifier_error(bp); 242 } 243 244 const struct xfs_buf_ops xfs_da3_node_buf_ops = { 245 .verify_read = xfs_da3_node_read_verify, 246 .verify_write = xfs_da3_node_write_verify, 247 }; 248 249 int 250 xfs_da3_node_read( 251 struct xfs_trans *tp, 252 struct xfs_inode *dp, 253 xfs_dablk_t bno, 254 xfs_daddr_t mappedbno, 255 struct xfs_buf **bpp, 256 int which_fork) 257 { 258 int err; 259 260 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp, 261 which_fork, &xfs_da3_node_buf_ops); 262 if (!err && tp) { 263 struct xfs_da_blkinfo *info = (*bpp)->b_addr; 264 int type; 265 266 switch (be16_to_cpu(info->magic)) { 267 case XFS_DA_NODE_MAGIC: 268 case XFS_DA3_NODE_MAGIC: 269 type = XFS_BLFT_DA_NODE_BUF; 270 break; 271 case XFS_ATTR_LEAF_MAGIC: 272 case XFS_ATTR3_LEAF_MAGIC: 273 type = XFS_BLFT_ATTR_LEAF_BUF; 274 break; 275 case XFS_DIR2_LEAFN_MAGIC: 276 case XFS_DIR3_LEAFN_MAGIC: 277 type = XFS_BLFT_DIR_LEAFN_BUF; 278 break; 279 default: 280 type = 0; 281 ASSERT(0); 282 break; 283 } 284 xfs_trans_buf_set_type(tp, *bpp, type); 285 } 286 return err; 287 } 288 289 /*======================================================================== 290 * Routines used for growing the Btree. 291 *========================================================================*/ 292 293 /* 294 * Create the initial contents of an intermediate node. 295 */ 296 int 297 xfs_da3_node_create( 298 struct xfs_da_args *args, 299 xfs_dablk_t blkno, 300 int level, 301 struct xfs_buf **bpp, 302 int whichfork) 303 { 304 struct xfs_da_intnode *node; 305 struct xfs_trans *tp = args->trans; 306 struct xfs_mount *mp = tp->t_mountp; 307 struct xfs_da3_icnode_hdr ichdr = {0}; 308 struct xfs_buf *bp; 309 int error; 310 struct xfs_inode *dp = args->dp; 311 312 trace_xfs_da_node_create(args); 313 ASSERT(level <= XFS_DA_NODE_MAXDEPTH); 314 315 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork); 316 if (error) 317 return error; 318 bp->b_ops = &xfs_da3_node_buf_ops; 319 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 320 node = bp->b_addr; 321 322 if (xfs_sb_version_hascrc(&mp->m_sb)) { 323 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 324 325 ichdr.magic = XFS_DA3_NODE_MAGIC; 326 hdr3->info.blkno = cpu_to_be64(bp->b_bn); 327 hdr3->info.owner = cpu_to_be64(args->dp->i_ino); 328 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid); 329 } else { 330 ichdr.magic = XFS_DA_NODE_MAGIC; 331 } 332 ichdr.level = level; 333 334 dp->d_ops->node_hdr_to_disk(node, &ichdr); 335 xfs_trans_log_buf(tp, bp, 336 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); 337 338 *bpp = bp; 339 return 0; 340 } 341 342 /* 343 * Split a leaf node, rebalance, then possibly split 344 * intermediate nodes, rebalance, etc. 345 */ 346 int /* error */ 347 xfs_da3_split( 348 struct xfs_da_state *state) 349 { 350 struct xfs_da_state_blk *oldblk; 351 struct xfs_da_state_blk *newblk; 352 struct xfs_da_state_blk *addblk; 353 struct xfs_da_intnode *node; 354 struct xfs_buf *bp; 355 int max; 356 int action = 0; 357 int error; 358 int i; 359 360 trace_xfs_da_split(state->args); 361 362 /* 363 * Walk back up the tree splitting/inserting/adjusting as necessary. 364 * If we need to insert and there isn't room, split the node, then 365 * decide which fragment to insert the new block from below into. 366 * Note that we may split the root this way, but we need more fixup. 367 */ 368 max = state->path.active - 1; 369 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH)); 370 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC || 371 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC); 372 373 addblk = &state->path.blk[max]; /* initial dummy value */ 374 for (i = max; (i >= 0) && addblk; state->path.active--, i--) { 375 oldblk = &state->path.blk[i]; 376 newblk = &state->altpath.blk[i]; 377 378 /* 379 * If a leaf node then 380 * Allocate a new leaf node, then rebalance across them. 381 * else if an intermediate node then 382 * We split on the last layer, must we split the node? 383 */ 384 switch (oldblk->magic) { 385 case XFS_ATTR_LEAF_MAGIC: 386 error = xfs_attr3_leaf_split(state, oldblk, newblk); 387 if ((error != 0) && (error != -ENOSPC)) { 388 return error; /* GROT: attr is inconsistent */ 389 } 390 if (!error) { 391 addblk = newblk; 392 break; 393 } 394 /* 395 * Entry wouldn't fit, split the leaf again. 396 */ 397 state->extravalid = 1; 398 if (state->inleaf) { 399 state->extraafter = 0; /* before newblk */ 400 trace_xfs_attr_leaf_split_before(state->args); 401 error = xfs_attr3_leaf_split(state, oldblk, 402 &state->extrablk); 403 } else { 404 state->extraafter = 1; /* after newblk */ 405 trace_xfs_attr_leaf_split_after(state->args); 406 error = xfs_attr3_leaf_split(state, newblk, 407 &state->extrablk); 408 } 409 if (error) 410 return error; /* GROT: attr inconsistent */ 411 addblk = newblk; 412 break; 413 case XFS_DIR2_LEAFN_MAGIC: 414 error = xfs_dir2_leafn_split(state, oldblk, newblk); 415 if (error) 416 return error; 417 addblk = newblk; 418 break; 419 case XFS_DA_NODE_MAGIC: 420 error = xfs_da3_node_split(state, oldblk, newblk, addblk, 421 max - i, &action); 422 addblk->bp = NULL; 423 if (error) 424 return error; /* GROT: dir is inconsistent */ 425 /* 426 * Record the newly split block for the next time thru? 427 */ 428 if (action) 429 addblk = newblk; 430 else 431 addblk = NULL; 432 break; 433 } 434 435 /* 436 * Update the btree to show the new hashval for this child. 437 */ 438 xfs_da3_fixhashpath(state, &state->path); 439 } 440 if (!addblk) 441 return 0; 442 443 /* 444 * Split the root node. 445 */ 446 ASSERT(state->path.active == 0); 447 oldblk = &state->path.blk[0]; 448 error = xfs_da3_root_split(state, oldblk, addblk); 449 if (error) { 450 addblk->bp = NULL; 451 return error; /* GROT: dir is inconsistent */ 452 } 453 454 /* 455 * Update pointers to the node which used to be block 0 and 456 * just got bumped because of the addition of a new root node. 457 * There might be three blocks involved if a double split occurred, 458 * and the original block 0 could be at any position in the list. 459 * 460 * Note: the magic numbers and sibling pointers are in the same 461 * physical place for both v2 and v3 headers (by design). Hence it 462 * doesn't matter which version of the xfs_da_intnode structure we use 463 * here as the result will be the same using either structure. 464 */ 465 node = oldblk->bp->b_addr; 466 if (node->hdr.info.forw) { 467 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) { 468 bp = addblk->bp; 469 } else { 470 ASSERT(state->extravalid); 471 bp = state->extrablk.bp; 472 } 473 node = bp->b_addr; 474 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 475 xfs_trans_log_buf(state->args->trans, bp, 476 XFS_DA_LOGRANGE(node, &node->hdr.info, 477 sizeof(node->hdr.info))); 478 } 479 node = oldblk->bp->b_addr; 480 if (node->hdr.info.back) { 481 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) { 482 bp = addblk->bp; 483 } else { 484 ASSERT(state->extravalid); 485 bp = state->extrablk.bp; 486 } 487 node = bp->b_addr; 488 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 489 xfs_trans_log_buf(state->args->trans, bp, 490 XFS_DA_LOGRANGE(node, &node->hdr.info, 491 sizeof(node->hdr.info))); 492 } 493 addblk->bp = NULL; 494 return 0; 495 } 496 497 /* 498 * Split the root. We have to create a new root and point to the two 499 * parts (the split old root) that we just created. Copy block zero to 500 * the EOF, extending the inode in process. 501 */ 502 STATIC int /* error */ 503 xfs_da3_root_split( 504 struct xfs_da_state *state, 505 struct xfs_da_state_blk *blk1, 506 struct xfs_da_state_blk *blk2) 507 { 508 struct xfs_da_intnode *node; 509 struct xfs_da_intnode *oldroot; 510 struct xfs_da_node_entry *btree; 511 struct xfs_da3_icnode_hdr nodehdr; 512 struct xfs_da_args *args; 513 struct xfs_buf *bp; 514 struct xfs_inode *dp; 515 struct xfs_trans *tp; 516 struct xfs_dir2_leaf *leaf; 517 xfs_dablk_t blkno; 518 int level; 519 int error; 520 int size; 521 522 trace_xfs_da_root_split(state->args); 523 524 /* 525 * Copy the existing (incorrect) block from the root node position 526 * to a free space somewhere. 527 */ 528 args = state->args; 529 error = xfs_da_grow_inode(args, &blkno); 530 if (error) 531 return error; 532 533 dp = args->dp; 534 tp = args->trans; 535 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork); 536 if (error) 537 return error; 538 node = bp->b_addr; 539 oldroot = blk1->bp->b_addr; 540 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 541 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) { 542 struct xfs_da3_icnode_hdr icnodehdr; 543 544 dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot); 545 btree = dp->d_ops->node_tree_p(oldroot); 546 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot); 547 level = icnodehdr.level; 548 549 /* 550 * we are about to copy oldroot to bp, so set up the type 551 * of bp while we know exactly what it will be. 552 */ 553 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 554 } else { 555 struct xfs_dir3_icleaf_hdr leafhdr; 556 struct xfs_dir2_leaf_entry *ents; 557 558 leaf = (xfs_dir2_leaf_t *)oldroot; 559 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf); 560 ents = dp->d_ops->leaf_ents_p(leaf); 561 562 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC || 563 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC); 564 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf); 565 level = 0; 566 567 /* 568 * we are about to copy oldroot to bp, so set up the type 569 * of bp while we know exactly what it will be. 570 */ 571 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); 572 } 573 574 /* 575 * we can copy most of the information in the node from one block to 576 * another, but for CRC enabled headers we have to make sure that the 577 * block specific identifiers are kept intact. We update the buffer 578 * directly for this. 579 */ 580 memcpy(node, oldroot, size); 581 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 582 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 583 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node; 584 585 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn); 586 } 587 xfs_trans_log_buf(tp, bp, 0, size - 1); 588 589 bp->b_ops = blk1->bp->b_ops; 590 xfs_trans_buf_copy_type(bp, blk1->bp); 591 blk1->bp = bp; 592 blk1->blkno = blkno; 593 594 /* 595 * Set up the new root node. 596 */ 597 error = xfs_da3_node_create(args, 598 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0, 599 level + 1, &bp, args->whichfork); 600 if (error) 601 return error; 602 603 node = bp->b_addr; 604 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 605 btree = dp->d_ops->node_tree_p(node); 606 btree[0].hashval = cpu_to_be32(blk1->hashval); 607 btree[0].before = cpu_to_be32(blk1->blkno); 608 btree[1].hashval = cpu_to_be32(blk2->hashval); 609 btree[1].before = cpu_to_be32(blk2->blkno); 610 nodehdr.count = 2; 611 dp->d_ops->node_hdr_to_disk(node, &nodehdr); 612 613 #ifdef DEBUG 614 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 615 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 616 ASSERT(blk1->blkno >= args->geo->leafblk && 617 blk1->blkno < args->geo->freeblk); 618 ASSERT(blk2->blkno >= args->geo->leafblk && 619 blk2->blkno < args->geo->freeblk); 620 } 621 #endif 622 623 /* Header is already logged by xfs_da_node_create */ 624 xfs_trans_log_buf(tp, bp, 625 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2)); 626 627 return 0; 628 } 629 630 /* 631 * Split the node, rebalance, then add the new entry. 632 */ 633 STATIC int /* error */ 634 xfs_da3_node_split( 635 struct xfs_da_state *state, 636 struct xfs_da_state_blk *oldblk, 637 struct xfs_da_state_blk *newblk, 638 struct xfs_da_state_blk *addblk, 639 int treelevel, 640 int *result) 641 { 642 struct xfs_da_intnode *node; 643 struct xfs_da3_icnode_hdr nodehdr; 644 xfs_dablk_t blkno; 645 int newcount; 646 int error; 647 int useextra; 648 struct xfs_inode *dp = state->args->dp; 649 650 trace_xfs_da_node_split(state->args); 651 652 node = oldblk->bp->b_addr; 653 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 654 655 /* 656 * With V2 dirs the extra block is data or freespace. 657 */ 658 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK; 659 newcount = 1 + useextra; 660 /* 661 * Do we have to split the node? 662 */ 663 if (nodehdr.count + newcount > state->args->geo->node_ents) { 664 /* 665 * Allocate a new node, add to the doubly linked chain of 666 * nodes, then move some of our excess entries into it. 667 */ 668 error = xfs_da_grow_inode(state->args, &blkno); 669 if (error) 670 return error; /* GROT: dir is inconsistent */ 671 672 error = xfs_da3_node_create(state->args, blkno, treelevel, 673 &newblk->bp, state->args->whichfork); 674 if (error) 675 return error; /* GROT: dir is inconsistent */ 676 newblk->blkno = blkno; 677 newblk->magic = XFS_DA_NODE_MAGIC; 678 xfs_da3_node_rebalance(state, oldblk, newblk); 679 error = xfs_da3_blk_link(state, oldblk, newblk); 680 if (error) 681 return error; 682 *result = 1; 683 } else { 684 *result = 0; 685 } 686 687 /* 688 * Insert the new entry(s) into the correct block 689 * (updating last hashval in the process). 690 * 691 * xfs_da3_node_add() inserts BEFORE the given index, 692 * and as a result of using node_lookup_int() we always 693 * point to a valid entry (not after one), but a split 694 * operation always results in a new block whose hashvals 695 * FOLLOW the current block. 696 * 697 * If we had double-split op below us, then add the extra block too. 698 */ 699 node = oldblk->bp->b_addr; 700 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 701 if (oldblk->index <= nodehdr.count) { 702 oldblk->index++; 703 xfs_da3_node_add(state, oldblk, addblk); 704 if (useextra) { 705 if (state->extraafter) 706 oldblk->index++; 707 xfs_da3_node_add(state, oldblk, &state->extrablk); 708 state->extravalid = 0; 709 } 710 } else { 711 newblk->index++; 712 xfs_da3_node_add(state, newblk, addblk); 713 if (useextra) { 714 if (state->extraafter) 715 newblk->index++; 716 xfs_da3_node_add(state, newblk, &state->extrablk); 717 state->extravalid = 0; 718 } 719 } 720 721 return 0; 722 } 723 724 /* 725 * Balance the btree elements between two intermediate nodes, 726 * usually one full and one empty. 727 * 728 * NOTE: if blk2 is empty, then it will get the upper half of blk1. 729 */ 730 STATIC void 731 xfs_da3_node_rebalance( 732 struct xfs_da_state *state, 733 struct xfs_da_state_blk *blk1, 734 struct xfs_da_state_blk *blk2) 735 { 736 struct xfs_da_intnode *node1; 737 struct xfs_da_intnode *node2; 738 struct xfs_da_intnode *tmpnode; 739 struct xfs_da_node_entry *btree1; 740 struct xfs_da_node_entry *btree2; 741 struct xfs_da_node_entry *btree_s; 742 struct xfs_da_node_entry *btree_d; 743 struct xfs_da3_icnode_hdr nodehdr1; 744 struct xfs_da3_icnode_hdr nodehdr2; 745 struct xfs_trans *tp; 746 int count; 747 int tmp; 748 int swap = 0; 749 struct xfs_inode *dp = state->args->dp; 750 751 trace_xfs_da_node_rebalance(state->args); 752 753 node1 = blk1->bp->b_addr; 754 node2 = blk2->bp->b_addr; 755 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1); 756 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2); 757 btree1 = dp->d_ops->node_tree_p(node1); 758 btree2 = dp->d_ops->node_tree_p(node2); 759 760 /* 761 * Figure out how many entries need to move, and in which direction. 762 * Swap the nodes around if that makes it simpler. 763 */ 764 if (nodehdr1.count > 0 && nodehdr2.count > 0 && 765 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 766 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) < 767 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) { 768 tmpnode = node1; 769 node1 = node2; 770 node2 = tmpnode; 771 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1); 772 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2); 773 btree1 = dp->d_ops->node_tree_p(node1); 774 btree2 = dp->d_ops->node_tree_p(node2); 775 swap = 1; 776 } 777 778 count = (nodehdr1.count - nodehdr2.count) / 2; 779 if (count == 0) 780 return; 781 tp = state->args->trans; 782 /* 783 * Two cases: high-to-low and low-to-high. 784 */ 785 if (count > 0) { 786 /* 787 * Move elements in node2 up to make a hole. 788 */ 789 tmp = nodehdr2.count; 790 if (tmp > 0) { 791 tmp *= (uint)sizeof(xfs_da_node_entry_t); 792 btree_s = &btree2[0]; 793 btree_d = &btree2[count]; 794 memmove(btree_d, btree_s, tmp); 795 } 796 797 /* 798 * Move the req'd B-tree elements from high in node1 to 799 * low in node2. 800 */ 801 nodehdr2.count += count; 802 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 803 btree_s = &btree1[nodehdr1.count - count]; 804 btree_d = &btree2[0]; 805 memcpy(btree_d, btree_s, tmp); 806 nodehdr1.count -= count; 807 } else { 808 /* 809 * Move the req'd B-tree elements from low in node2 to 810 * high in node1. 811 */ 812 count = -count; 813 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 814 btree_s = &btree2[0]; 815 btree_d = &btree1[nodehdr1.count]; 816 memcpy(btree_d, btree_s, tmp); 817 nodehdr1.count += count; 818 819 xfs_trans_log_buf(tp, blk1->bp, 820 XFS_DA_LOGRANGE(node1, btree_d, tmp)); 821 822 /* 823 * Move elements in node2 down to fill the hole. 824 */ 825 tmp = nodehdr2.count - count; 826 tmp *= (uint)sizeof(xfs_da_node_entry_t); 827 btree_s = &btree2[count]; 828 btree_d = &btree2[0]; 829 memmove(btree_d, btree_s, tmp); 830 nodehdr2.count -= count; 831 } 832 833 /* 834 * Log header of node 1 and all current bits of node 2. 835 */ 836 dp->d_ops->node_hdr_to_disk(node1, &nodehdr1); 837 xfs_trans_log_buf(tp, blk1->bp, 838 XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size)); 839 840 dp->d_ops->node_hdr_to_disk(node2, &nodehdr2); 841 xfs_trans_log_buf(tp, blk2->bp, 842 XFS_DA_LOGRANGE(node2, &node2->hdr, 843 dp->d_ops->node_hdr_size + 844 (sizeof(btree2[0]) * nodehdr2.count))); 845 846 /* 847 * Record the last hashval from each block for upward propagation. 848 * (note: don't use the swapped node pointers) 849 */ 850 if (swap) { 851 node1 = blk1->bp->b_addr; 852 node2 = blk2->bp->b_addr; 853 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1); 854 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2); 855 btree1 = dp->d_ops->node_tree_p(node1); 856 btree2 = dp->d_ops->node_tree_p(node2); 857 } 858 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval); 859 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval); 860 861 /* 862 * Adjust the expected index for insertion. 863 */ 864 if (blk1->index >= nodehdr1.count) { 865 blk2->index = blk1->index - nodehdr1.count; 866 blk1->index = nodehdr1.count + 1; /* make it invalid */ 867 } 868 } 869 870 /* 871 * Add a new entry to an intermediate node. 872 */ 873 STATIC void 874 xfs_da3_node_add( 875 struct xfs_da_state *state, 876 struct xfs_da_state_blk *oldblk, 877 struct xfs_da_state_blk *newblk) 878 { 879 struct xfs_da_intnode *node; 880 struct xfs_da3_icnode_hdr nodehdr; 881 struct xfs_da_node_entry *btree; 882 int tmp; 883 struct xfs_inode *dp = state->args->dp; 884 885 trace_xfs_da_node_add(state->args); 886 887 node = oldblk->bp->b_addr; 888 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 889 btree = dp->d_ops->node_tree_p(node); 890 891 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count); 892 ASSERT(newblk->blkno != 0); 893 if (state->args->whichfork == XFS_DATA_FORK) 894 ASSERT(newblk->blkno >= state->args->geo->leafblk && 895 newblk->blkno < state->args->geo->freeblk); 896 897 /* 898 * We may need to make some room before we insert the new node. 899 */ 900 tmp = 0; 901 if (oldblk->index < nodehdr.count) { 902 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree); 903 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp); 904 } 905 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval); 906 btree[oldblk->index].before = cpu_to_be32(newblk->blkno); 907 xfs_trans_log_buf(state->args->trans, oldblk->bp, 908 XFS_DA_LOGRANGE(node, &btree[oldblk->index], 909 tmp + sizeof(*btree))); 910 911 nodehdr.count += 1; 912 dp->d_ops->node_hdr_to_disk(node, &nodehdr); 913 xfs_trans_log_buf(state->args->trans, oldblk->bp, 914 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); 915 916 /* 917 * Copy the last hash value from the oldblk to propagate upwards. 918 */ 919 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 920 } 921 922 /*======================================================================== 923 * Routines used for shrinking the Btree. 924 *========================================================================*/ 925 926 /* 927 * Deallocate an empty leaf node, remove it from its parent, 928 * possibly deallocating that block, etc... 929 */ 930 int 931 xfs_da3_join( 932 struct xfs_da_state *state) 933 { 934 struct xfs_da_state_blk *drop_blk; 935 struct xfs_da_state_blk *save_blk; 936 int action = 0; 937 int error; 938 939 trace_xfs_da_join(state->args); 940 941 drop_blk = &state->path.blk[ state->path.active-1 ]; 942 save_blk = &state->altpath.blk[ state->path.active-1 ]; 943 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC); 944 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC || 945 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC); 946 947 /* 948 * Walk back up the tree joining/deallocating as necessary. 949 * When we stop dropping blocks, break out. 950 */ 951 for ( ; state->path.active >= 2; drop_blk--, save_blk--, 952 state->path.active--) { 953 /* 954 * See if we can combine the block with a neighbor. 955 * (action == 0) => no options, just leave 956 * (action == 1) => coalesce, then unlink 957 * (action == 2) => block empty, unlink it 958 */ 959 switch (drop_blk->magic) { 960 case XFS_ATTR_LEAF_MAGIC: 961 error = xfs_attr3_leaf_toosmall(state, &action); 962 if (error) 963 return error; 964 if (action == 0) 965 return 0; 966 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk); 967 break; 968 case XFS_DIR2_LEAFN_MAGIC: 969 error = xfs_dir2_leafn_toosmall(state, &action); 970 if (error) 971 return error; 972 if (action == 0) 973 return 0; 974 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk); 975 break; 976 case XFS_DA_NODE_MAGIC: 977 /* 978 * Remove the offending node, fixup hashvals, 979 * check for a toosmall neighbor. 980 */ 981 xfs_da3_node_remove(state, drop_blk); 982 xfs_da3_fixhashpath(state, &state->path); 983 error = xfs_da3_node_toosmall(state, &action); 984 if (error) 985 return error; 986 if (action == 0) 987 return 0; 988 xfs_da3_node_unbalance(state, drop_blk, save_blk); 989 break; 990 } 991 xfs_da3_fixhashpath(state, &state->altpath); 992 error = xfs_da3_blk_unlink(state, drop_blk, save_blk); 993 xfs_da_state_kill_altpath(state); 994 if (error) 995 return error; 996 error = xfs_da_shrink_inode(state->args, drop_blk->blkno, 997 drop_blk->bp); 998 drop_blk->bp = NULL; 999 if (error) 1000 return error; 1001 } 1002 /* 1003 * We joined all the way to the top. If it turns out that 1004 * we only have one entry in the root, make the child block 1005 * the new root. 1006 */ 1007 xfs_da3_node_remove(state, drop_blk); 1008 xfs_da3_fixhashpath(state, &state->path); 1009 error = xfs_da3_root_join(state, &state->path.blk[0]); 1010 return error; 1011 } 1012 1013 #ifdef DEBUG 1014 static void 1015 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level) 1016 { 1017 __be16 magic = blkinfo->magic; 1018 1019 if (level == 1) { 1020 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1021 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 1022 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 1023 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 1024 } else { 1025 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1026 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)); 1027 } 1028 ASSERT(!blkinfo->forw); 1029 ASSERT(!blkinfo->back); 1030 } 1031 #else /* !DEBUG */ 1032 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level) 1033 #endif /* !DEBUG */ 1034 1035 /* 1036 * We have only one entry in the root. Copy the only remaining child of 1037 * the old root to block 0 as the new root node. 1038 */ 1039 STATIC int 1040 xfs_da3_root_join( 1041 struct xfs_da_state *state, 1042 struct xfs_da_state_blk *root_blk) 1043 { 1044 struct xfs_da_intnode *oldroot; 1045 struct xfs_da_args *args; 1046 xfs_dablk_t child; 1047 struct xfs_buf *bp; 1048 struct xfs_da3_icnode_hdr oldroothdr; 1049 struct xfs_da_node_entry *btree; 1050 int error; 1051 struct xfs_inode *dp = state->args->dp; 1052 1053 trace_xfs_da_root_join(state->args); 1054 1055 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); 1056 1057 args = state->args; 1058 oldroot = root_blk->bp->b_addr; 1059 dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot); 1060 ASSERT(oldroothdr.forw == 0); 1061 ASSERT(oldroothdr.back == 0); 1062 1063 /* 1064 * If the root has more than one child, then don't do anything. 1065 */ 1066 if (oldroothdr.count > 1) 1067 return 0; 1068 1069 /* 1070 * Read in the (only) child block, then copy those bytes into 1071 * the root block's buffer and free the original child block. 1072 */ 1073 btree = dp->d_ops->node_tree_p(oldroot); 1074 child = be32_to_cpu(btree[0].before); 1075 ASSERT(child != 0); 1076 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp, 1077 args->whichfork); 1078 if (error) 1079 return error; 1080 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level); 1081 1082 /* 1083 * This could be copying a leaf back into the root block in the case of 1084 * there only being a single leaf block left in the tree. Hence we have 1085 * to update the b_ops pointer as well to match the buffer type change 1086 * that could occur. For dir3 blocks we also need to update the block 1087 * number in the buffer header. 1088 */ 1089 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize); 1090 root_blk->bp->b_ops = bp->b_ops; 1091 xfs_trans_buf_copy_type(root_blk->bp, bp); 1092 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) { 1093 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr; 1094 da3->blkno = cpu_to_be64(root_blk->bp->b_bn); 1095 } 1096 xfs_trans_log_buf(args->trans, root_blk->bp, 0, 1097 args->geo->blksize - 1); 1098 error = xfs_da_shrink_inode(args, child, bp); 1099 return error; 1100 } 1101 1102 /* 1103 * Check a node block and its neighbors to see if the block should be 1104 * collapsed into one or the other neighbor. Always keep the block 1105 * with the smaller block number. 1106 * If the current block is over 50% full, don't try to join it, return 0. 1107 * If the block is empty, fill in the state structure and return 2. 1108 * If it can be collapsed, fill in the state structure and return 1. 1109 * If nothing can be done, return 0. 1110 */ 1111 STATIC int 1112 xfs_da3_node_toosmall( 1113 struct xfs_da_state *state, 1114 int *action) 1115 { 1116 struct xfs_da_intnode *node; 1117 struct xfs_da_state_blk *blk; 1118 struct xfs_da_blkinfo *info; 1119 xfs_dablk_t blkno; 1120 struct xfs_buf *bp; 1121 struct xfs_da3_icnode_hdr nodehdr; 1122 int count; 1123 int forward; 1124 int error; 1125 int retval; 1126 int i; 1127 struct xfs_inode *dp = state->args->dp; 1128 1129 trace_xfs_da_node_toosmall(state->args); 1130 1131 /* 1132 * Check for the degenerate case of the block being over 50% full. 1133 * If so, it's not worth even looking to see if we might be able 1134 * to coalesce with a sibling. 1135 */ 1136 blk = &state->path.blk[ state->path.active-1 ]; 1137 info = blk->bp->b_addr; 1138 node = (xfs_da_intnode_t *)info; 1139 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1140 if (nodehdr.count > (state->args->geo->node_ents >> 1)) { 1141 *action = 0; /* blk over 50%, don't try to join */ 1142 return 0; /* blk over 50%, don't try to join */ 1143 } 1144 1145 /* 1146 * Check for the degenerate case of the block being empty. 1147 * If the block is empty, we'll simply delete it, no need to 1148 * coalesce it with a sibling block. We choose (arbitrarily) 1149 * to merge with the forward block unless it is NULL. 1150 */ 1151 if (nodehdr.count == 0) { 1152 /* 1153 * Make altpath point to the block we want to keep and 1154 * path point to the block we want to drop (this one). 1155 */ 1156 forward = (info->forw != 0); 1157 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1158 error = xfs_da3_path_shift(state, &state->altpath, forward, 1159 0, &retval); 1160 if (error) 1161 return error; 1162 if (retval) { 1163 *action = 0; 1164 } else { 1165 *action = 2; 1166 } 1167 return 0; 1168 } 1169 1170 /* 1171 * Examine each sibling block to see if we can coalesce with 1172 * at least 25% free space to spare. We need to figure out 1173 * whether to merge with the forward or the backward block. 1174 * We prefer coalescing with the lower numbered sibling so as 1175 * to shrink a directory over time. 1176 */ 1177 count = state->args->geo->node_ents; 1178 count -= state->args->geo->node_ents >> 2; 1179 count -= nodehdr.count; 1180 1181 /* start with smaller blk num */ 1182 forward = nodehdr.forw < nodehdr.back; 1183 for (i = 0; i < 2; forward = !forward, i++) { 1184 struct xfs_da3_icnode_hdr thdr; 1185 if (forward) 1186 blkno = nodehdr.forw; 1187 else 1188 blkno = nodehdr.back; 1189 if (blkno == 0) 1190 continue; 1191 error = xfs_da3_node_read(state->args->trans, dp, 1192 blkno, -1, &bp, state->args->whichfork); 1193 if (error) 1194 return error; 1195 1196 node = bp->b_addr; 1197 dp->d_ops->node_hdr_from_disk(&thdr, node); 1198 xfs_trans_brelse(state->args->trans, bp); 1199 1200 if (count - thdr.count >= 0) 1201 break; /* fits with at least 25% to spare */ 1202 } 1203 if (i >= 2) { 1204 *action = 0; 1205 return 0; 1206 } 1207 1208 /* 1209 * Make altpath point to the block we want to keep (the lower 1210 * numbered block) and path point to the block we want to drop. 1211 */ 1212 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1213 if (blkno < blk->blkno) { 1214 error = xfs_da3_path_shift(state, &state->altpath, forward, 1215 0, &retval); 1216 } else { 1217 error = xfs_da3_path_shift(state, &state->path, forward, 1218 0, &retval); 1219 } 1220 if (error) 1221 return error; 1222 if (retval) { 1223 *action = 0; 1224 return 0; 1225 } 1226 *action = 1; 1227 return 0; 1228 } 1229 1230 /* 1231 * Pick up the last hashvalue from an intermediate node. 1232 */ 1233 STATIC uint 1234 xfs_da3_node_lasthash( 1235 struct xfs_inode *dp, 1236 struct xfs_buf *bp, 1237 int *count) 1238 { 1239 struct xfs_da_intnode *node; 1240 struct xfs_da_node_entry *btree; 1241 struct xfs_da3_icnode_hdr nodehdr; 1242 1243 node = bp->b_addr; 1244 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1245 if (count) 1246 *count = nodehdr.count; 1247 if (!nodehdr.count) 1248 return 0; 1249 btree = dp->d_ops->node_tree_p(node); 1250 return be32_to_cpu(btree[nodehdr.count - 1].hashval); 1251 } 1252 1253 /* 1254 * Walk back up the tree adjusting hash values as necessary, 1255 * when we stop making changes, return. 1256 */ 1257 void 1258 xfs_da3_fixhashpath( 1259 struct xfs_da_state *state, 1260 struct xfs_da_state_path *path) 1261 { 1262 struct xfs_da_state_blk *blk; 1263 struct xfs_da_intnode *node; 1264 struct xfs_da_node_entry *btree; 1265 xfs_dahash_t lasthash=0; 1266 int level; 1267 int count; 1268 struct xfs_inode *dp = state->args->dp; 1269 1270 trace_xfs_da_fixhashpath(state->args); 1271 1272 level = path->active-1; 1273 blk = &path->blk[ level ]; 1274 switch (blk->magic) { 1275 case XFS_ATTR_LEAF_MAGIC: 1276 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); 1277 if (count == 0) 1278 return; 1279 break; 1280 case XFS_DIR2_LEAFN_MAGIC: 1281 lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count); 1282 if (count == 0) 1283 return; 1284 break; 1285 case XFS_DA_NODE_MAGIC: 1286 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count); 1287 if (count == 0) 1288 return; 1289 break; 1290 } 1291 for (blk--, level--; level >= 0; blk--, level--) { 1292 struct xfs_da3_icnode_hdr nodehdr; 1293 1294 node = blk->bp->b_addr; 1295 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1296 btree = dp->d_ops->node_tree_p(node); 1297 if (be32_to_cpu(btree[blk->index].hashval) == lasthash) 1298 break; 1299 blk->hashval = lasthash; 1300 btree[blk->index].hashval = cpu_to_be32(lasthash); 1301 xfs_trans_log_buf(state->args->trans, blk->bp, 1302 XFS_DA_LOGRANGE(node, &btree[blk->index], 1303 sizeof(*btree))); 1304 1305 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1306 } 1307 } 1308 1309 /* 1310 * Remove an entry from an intermediate node. 1311 */ 1312 STATIC void 1313 xfs_da3_node_remove( 1314 struct xfs_da_state *state, 1315 struct xfs_da_state_blk *drop_blk) 1316 { 1317 struct xfs_da_intnode *node; 1318 struct xfs_da3_icnode_hdr nodehdr; 1319 struct xfs_da_node_entry *btree; 1320 int index; 1321 int tmp; 1322 struct xfs_inode *dp = state->args->dp; 1323 1324 trace_xfs_da_node_remove(state->args); 1325 1326 node = drop_blk->bp->b_addr; 1327 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1328 ASSERT(drop_blk->index < nodehdr.count); 1329 ASSERT(drop_blk->index >= 0); 1330 1331 /* 1332 * Copy over the offending entry, or just zero it out. 1333 */ 1334 index = drop_blk->index; 1335 btree = dp->d_ops->node_tree_p(node); 1336 if (index < nodehdr.count - 1) { 1337 tmp = nodehdr.count - index - 1; 1338 tmp *= (uint)sizeof(xfs_da_node_entry_t); 1339 memmove(&btree[index], &btree[index + 1], tmp); 1340 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1341 XFS_DA_LOGRANGE(node, &btree[index], tmp)); 1342 index = nodehdr.count - 1; 1343 } 1344 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t)); 1345 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1346 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index]))); 1347 nodehdr.count -= 1; 1348 dp->d_ops->node_hdr_to_disk(node, &nodehdr); 1349 xfs_trans_log_buf(state->args->trans, drop_blk->bp, 1350 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); 1351 1352 /* 1353 * Copy the last hash value from the block to propagate upwards. 1354 */ 1355 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval); 1356 } 1357 1358 /* 1359 * Unbalance the elements between two intermediate nodes, 1360 * move all Btree elements from one node into another. 1361 */ 1362 STATIC void 1363 xfs_da3_node_unbalance( 1364 struct xfs_da_state *state, 1365 struct xfs_da_state_blk *drop_blk, 1366 struct xfs_da_state_blk *save_blk) 1367 { 1368 struct xfs_da_intnode *drop_node; 1369 struct xfs_da_intnode *save_node; 1370 struct xfs_da_node_entry *drop_btree; 1371 struct xfs_da_node_entry *save_btree; 1372 struct xfs_da3_icnode_hdr drop_hdr; 1373 struct xfs_da3_icnode_hdr save_hdr; 1374 struct xfs_trans *tp; 1375 int sindex; 1376 int tmp; 1377 struct xfs_inode *dp = state->args->dp; 1378 1379 trace_xfs_da_node_unbalance(state->args); 1380 1381 drop_node = drop_blk->bp->b_addr; 1382 save_node = save_blk->bp->b_addr; 1383 dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node); 1384 dp->d_ops->node_hdr_from_disk(&save_hdr, save_node); 1385 drop_btree = dp->d_ops->node_tree_p(drop_node); 1386 save_btree = dp->d_ops->node_tree_p(save_node); 1387 tp = state->args->trans; 1388 1389 /* 1390 * If the dying block has lower hashvals, then move all the 1391 * elements in the remaining block up to make a hole. 1392 */ 1393 if ((be32_to_cpu(drop_btree[0].hashval) < 1394 be32_to_cpu(save_btree[0].hashval)) || 1395 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) < 1396 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) { 1397 /* XXX: check this - is memmove dst correct? */ 1398 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t); 1399 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp); 1400 1401 sindex = 0; 1402 xfs_trans_log_buf(tp, save_blk->bp, 1403 XFS_DA_LOGRANGE(save_node, &save_btree[0], 1404 (save_hdr.count + drop_hdr.count) * 1405 sizeof(xfs_da_node_entry_t))); 1406 } else { 1407 sindex = save_hdr.count; 1408 xfs_trans_log_buf(tp, save_blk->bp, 1409 XFS_DA_LOGRANGE(save_node, &save_btree[sindex], 1410 drop_hdr.count * sizeof(xfs_da_node_entry_t))); 1411 } 1412 1413 /* 1414 * Move all the B-tree elements from drop_blk to save_blk. 1415 */ 1416 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t); 1417 memcpy(&save_btree[sindex], &drop_btree[0], tmp); 1418 save_hdr.count += drop_hdr.count; 1419 1420 dp->d_ops->node_hdr_to_disk(save_node, &save_hdr); 1421 xfs_trans_log_buf(tp, save_blk->bp, 1422 XFS_DA_LOGRANGE(save_node, &save_node->hdr, 1423 dp->d_ops->node_hdr_size)); 1424 1425 /* 1426 * Save the last hashval in the remaining block for upward propagation. 1427 */ 1428 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval); 1429 } 1430 1431 /*======================================================================== 1432 * Routines used for finding things in the Btree. 1433 *========================================================================*/ 1434 1435 /* 1436 * Walk down the Btree looking for a particular filename, filling 1437 * in the state structure as we go. 1438 * 1439 * We will set the state structure to point to each of the elements 1440 * in each of the nodes where either the hashval is or should be. 1441 * 1442 * We support duplicate hashval's so for each entry in the current 1443 * node that could contain the desired hashval, descend. This is a 1444 * pruned depth-first tree search. 1445 */ 1446 int /* error */ 1447 xfs_da3_node_lookup_int( 1448 struct xfs_da_state *state, 1449 int *result) 1450 { 1451 struct xfs_da_state_blk *blk; 1452 struct xfs_da_blkinfo *curr; 1453 struct xfs_da_intnode *node; 1454 struct xfs_da_node_entry *btree; 1455 struct xfs_da3_icnode_hdr nodehdr; 1456 struct xfs_da_args *args; 1457 xfs_dablk_t blkno; 1458 xfs_dahash_t hashval; 1459 xfs_dahash_t btreehashval; 1460 int probe; 1461 int span; 1462 int max; 1463 int error; 1464 int retval; 1465 struct xfs_inode *dp = state->args->dp; 1466 1467 args = state->args; 1468 1469 /* 1470 * Descend thru the B-tree searching each level for the right 1471 * node to use, until the right hashval is found. 1472 */ 1473 blkno = (args->whichfork == XFS_DATA_FORK)? args->geo->leafblk : 0; 1474 for (blk = &state->path.blk[0], state->path.active = 1; 1475 state->path.active <= XFS_DA_NODE_MAXDEPTH; 1476 blk++, state->path.active++) { 1477 /* 1478 * Read the next node down in the tree. 1479 */ 1480 blk->blkno = blkno; 1481 error = xfs_da3_node_read(args->trans, args->dp, blkno, 1482 -1, &blk->bp, args->whichfork); 1483 if (error) { 1484 blk->blkno = 0; 1485 state->path.active--; 1486 return error; 1487 } 1488 curr = blk->bp->b_addr; 1489 blk->magic = be16_to_cpu(curr->magic); 1490 1491 if (blk->magic == XFS_ATTR_LEAF_MAGIC || 1492 blk->magic == XFS_ATTR3_LEAF_MAGIC) { 1493 blk->magic = XFS_ATTR_LEAF_MAGIC; 1494 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1495 break; 1496 } 1497 1498 if (blk->magic == XFS_DIR2_LEAFN_MAGIC || 1499 blk->magic == XFS_DIR3_LEAFN_MAGIC) { 1500 blk->magic = XFS_DIR2_LEAFN_MAGIC; 1501 blk->hashval = xfs_dir2_leafn_lasthash(args->dp, 1502 blk->bp, NULL); 1503 break; 1504 } 1505 1506 blk->magic = XFS_DA_NODE_MAGIC; 1507 1508 1509 /* 1510 * Search an intermediate node for a match. 1511 */ 1512 node = blk->bp->b_addr; 1513 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1514 btree = dp->d_ops->node_tree_p(node); 1515 1516 max = nodehdr.count; 1517 blk->hashval = be32_to_cpu(btree[max - 1].hashval); 1518 1519 /* 1520 * Binary search. (note: small blocks will skip loop) 1521 */ 1522 probe = span = max / 2; 1523 hashval = args->hashval; 1524 while (span > 4) { 1525 span /= 2; 1526 btreehashval = be32_to_cpu(btree[probe].hashval); 1527 if (btreehashval < hashval) 1528 probe += span; 1529 else if (btreehashval > hashval) 1530 probe -= span; 1531 else 1532 break; 1533 } 1534 ASSERT((probe >= 0) && (probe < max)); 1535 ASSERT((span <= 4) || 1536 (be32_to_cpu(btree[probe].hashval) == hashval)); 1537 1538 /* 1539 * Since we may have duplicate hashval's, find the first 1540 * matching hashval in the node. 1541 */ 1542 while (probe > 0 && 1543 be32_to_cpu(btree[probe].hashval) >= hashval) { 1544 probe--; 1545 } 1546 while (probe < max && 1547 be32_to_cpu(btree[probe].hashval) < hashval) { 1548 probe++; 1549 } 1550 1551 /* 1552 * Pick the right block to descend on. 1553 */ 1554 if (probe == max) { 1555 blk->index = max - 1; 1556 blkno = be32_to_cpu(btree[max - 1].before); 1557 } else { 1558 blk->index = probe; 1559 blkno = be32_to_cpu(btree[probe].before); 1560 } 1561 } 1562 1563 /* 1564 * A leaf block that ends in the hashval that we are interested in 1565 * (final hashval == search hashval) means that the next block may 1566 * contain more entries with the same hashval, shift upward to the 1567 * next leaf and keep searching. 1568 */ 1569 for (;;) { 1570 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { 1571 retval = xfs_dir2_leafn_lookup_int(blk->bp, args, 1572 &blk->index, state); 1573 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1574 retval = xfs_attr3_leaf_lookup_int(blk->bp, args); 1575 blk->index = args->index; 1576 args->blkno = blk->blkno; 1577 } else { 1578 ASSERT(0); 1579 return -EFSCORRUPTED; 1580 } 1581 if (((retval == -ENOENT) || (retval == -ENOATTR)) && 1582 (blk->hashval == args->hashval)) { 1583 error = xfs_da3_path_shift(state, &state->path, 1, 1, 1584 &retval); 1585 if (error) 1586 return error; 1587 if (retval == 0) { 1588 continue; 1589 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1590 /* path_shift() gives ENOENT */ 1591 retval = -ENOATTR; 1592 } 1593 } 1594 break; 1595 } 1596 *result = retval; 1597 return 0; 1598 } 1599 1600 /*======================================================================== 1601 * Utility routines. 1602 *========================================================================*/ 1603 1604 /* 1605 * Compare two intermediate nodes for "order". 1606 */ 1607 STATIC int 1608 xfs_da3_node_order( 1609 struct xfs_inode *dp, 1610 struct xfs_buf *node1_bp, 1611 struct xfs_buf *node2_bp) 1612 { 1613 struct xfs_da_intnode *node1; 1614 struct xfs_da_intnode *node2; 1615 struct xfs_da_node_entry *btree1; 1616 struct xfs_da_node_entry *btree2; 1617 struct xfs_da3_icnode_hdr node1hdr; 1618 struct xfs_da3_icnode_hdr node2hdr; 1619 1620 node1 = node1_bp->b_addr; 1621 node2 = node2_bp->b_addr; 1622 dp->d_ops->node_hdr_from_disk(&node1hdr, node1); 1623 dp->d_ops->node_hdr_from_disk(&node2hdr, node2); 1624 btree1 = dp->d_ops->node_tree_p(node1); 1625 btree2 = dp->d_ops->node_tree_p(node2); 1626 1627 if (node1hdr.count > 0 && node2hdr.count > 0 && 1628 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) || 1629 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) < 1630 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) { 1631 return 1; 1632 } 1633 return 0; 1634 } 1635 1636 /* 1637 * Link a new block into a doubly linked list of blocks (of whatever type). 1638 */ 1639 int /* error */ 1640 xfs_da3_blk_link( 1641 struct xfs_da_state *state, 1642 struct xfs_da_state_blk *old_blk, 1643 struct xfs_da_state_blk *new_blk) 1644 { 1645 struct xfs_da_blkinfo *old_info; 1646 struct xfs_da_blkinfo *new_info; 1647 struct xfs_da_blkinfo *tmp_info; 1648 struct xfs_da_args *args; 1649 struct xfs_buf *bp; 1650 int before = 0; 1651 int error; 1652 struct xfs_inode *dp = state->args->dp; 1653 1654 /* 1655 * Set up environment. 1656 */ 1657 args = state->args; 1658 ASSERT(args != NULL); 1659 old_info = old_blk->bp->b_addr; 1660 new_info = new_blk->bp->b_addr; 1661 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || 1662 old_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1663 old_blk->magic == XFS_ATTR_LEAF_MAGIC); 1664 1665 switch (old_blk->magic) { 1666 case XFS_ATTR_LEAF_MAGIC: 1667 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp); 1668 break; 1669 case XFS_DIR2_LEAFN_MAGIC: 1670 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp); 1671 break; 1672 case XFS_DA_NODE_MAGIC: 1673 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp); 1674 break; 1675 } 1676 1677 /* 1678 * Link blocks in appropriate order. 1679 */ 1680 if (before) { 1681 /* 1682 * Link new block in before existing block. 1683 */ 1684 trace_xfs_da_link_before(args); 1685 new_info->forw = cpu_to_be32(old_blk->blkno); 1686 new_info->back = old_info->back; 1687 if (old_info->back) { 1688 error = xfs_da3_node_read(args->trans, dp, 1689 be32_to_cpu(old_info->back), 1690 -1, &bp, args->whichfork); 1691 if (error) 1692 return error; 1693 ASSERT(bp != NULL); 1694 tmp_info = bp->b_addr; 1695 ASSERT(tmp_info->magic == old_info->magic); 1696 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); 1697 tmp_info->forw = cpu_to_be32(new_blk->blkno); 1698 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1699 } 1700 old_info->back = cpu_to_be32(new_blk->blkno); 1701 } else { 1702 /* 1703 * Link new block in after existing block. 1704 */ 1705 trace_xfs_da_link_after(args); 1706 new_info->forw = old_info->forw; 1707 new_info->back = cpu_to_be32(old_blk->blkno); 1708 if (old_info->forw) { 1709 error = xfs_da3_node_read(args->trans, dp, 1710 be32_to_cpu(old_info->forw), 1711 -1, &bp, args->whichfork); 1712 if (error) 1713 return error; 1714 ASSERT(bp != NULL); 1715 tmp_info = bp->b_addr; 1716 ASSERT(tmp_info->magic == old_info->magic); 1717 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); 1718 tmp_info->back = cpu_to_be32(new_blk->blkno); 1719 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1720 } 1721 old_info->forw = cpu_to_be32(new_blk->blkno); 1722 } 1723 1724 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); 1725 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); 1726 return 0; 1727 } 1728 1729 /* 1730 * Unlink a block from a doubly linked list of blocks. 1731 */ 1732 STATIC int /* error */ 1733 xfs_da3_blk_unlink( 1734 struct xfs_da_state *state, 1735 struct xfs_da_state_blk *drop_blk, 1736 struct xfs_da_state_blk *save_blk) 1737 { 1738 struct xfs_da_blkinfo *drop_info; 1739 struct xfs_da_blkinfo *save_info; 1740 struct xfs_da_blkinfo *tmp_info; 1741 struct xfs_da_args *args; 1742 struct xfs_buf *bp; 1743 int error; 1744 1745 /* 1746 * Set up environment. 1747 */ 1748 args = state->args; 1749 ASSERT(args != NULL); 1750 save_info = save_blk->bp->b_addr; 1751 drop_info = drop_blk->bp->b_addr; 1752 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || 1753 save_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1754 save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1755 ASSERT(save_blk->magic == drop_blk->magic); 1756 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) || 1757 (be32_to_cpu(save_info->back) == drop_blk->blkno)); 1758 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) || 1759 (be32_to_cpu(drop_info->back) == save_blk->blkno)); 1760 1761 /* 1762 * Unlink the leaf block from the doubly linked chain of leaves. 1763 */ 1764 if (be32_to_cpu(save_info->back) == drop_blk->blkno) { 1765 trace_xfs_da_unlink_back(args); 1766 save_info->back = drop_info->back; 1767 if (drop_info->back) { 1768 error = xfs_da3_node_read(args->trans, args->dp, 1769 be32_to_cpu(drop_info->back), 1770 -1, &bp, args->whichfork); 1771 if (error) 1772 return error; 1773 ASSERT(bp != NULL); 1774 tmp_info = bp->b_addr; 1775 ASSERT(tmp_info->magic == save_info->magic); 1776 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); 1777 tmp_info->forw = cpu_to_be32(save_blk->blkno); 1778 xfs_trans_log_buf(args->trans, bp, 0, 1779 sizeof(*tmp_info) - 1); 1780 } 1781 } else { 1782 trace_xfs_da_unlink_forward(args); 1783 save_info->forw = drop_info->forw; 1784 if (drop_info->forw) { 1785 error = xfs_da3_node_read(args->trans, args->dp, 1786 be32_to_cpu(drop_info->forw), 1787 -1, &bp, args->whichfork); 1788 if (error) 1789 return error; 1790 ASSERT(bp != NULL); 1791 tmp_info = bp->b_addr; 1792 ASSERT(tmp_info->magic == save_info->magic); 1793 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); 1794 tmp_info->back = cpu_to_be32(save_blk->blkno); 1795 xfs_trans_log_buf(args->trans, bp, 0, 1796 sizeof(*tmp_info) - 1); 1797 } 1798 } 1799 1800 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); 1801 return 0; 1802 } 1803 1804 /* 1805 * Move a path "forward" or "!forward" one block at the current level. 1806 * 1807 * This routine will adjust a "path" to point to the next block 1808 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the 1809 * Btree, including updating pointers to the intermediate nodes between 1810 * the new bottom and the root. 1811 */ 1812 int /* error */ 1813 xfs_da3_path_shift( 1814 struct xfs_da_state *state, 1815 struct xfs_da_state_path *path, 1816 int forward, 1817 int release, 1818 int *result) 1819 { 1820 struct xfs_da_state_blk *blk; 1821 struct xfs_da_blkinfo *info; 1822 struct xfs_da_intnode *node; 1823 struct xfs_da_args *args; 1824 struct xfs_da_node_entry *btree; 1825 struct xfs_da3_icnode_hdr nodehdr; 1826 struct xfs_buf *bp; 1827 xfs_dablk_t blkno = 0; 1828 int level; 1829 int error; 1830 struct xfs_inode *dp = state->args->dp; 1831 1832 trace_xfs_da_path_shift(state->args); 1833 1834 /* 1835 * Roll up the Btree looking for the first block where our 1836 * current index is not at the edge of the block. Note that 1837 * we skip the bottom layer because we want the sibling block. 1838 */ 1839 args = state->args; 1840 ASSERT(args != NULL); 1841 ASSERT(path != NULL); 1842 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); 1843 level = (path->active-1) - 1; /* skip bottom layer in path */ 1844 for (blk = &path->blk[level]; level >= 0; blk--, level--) { 1845 node = blk->bp->b_addr; 1846 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1847 btree = dp->d_ops->node_tree_p(node); 1848 1849 if (forward && (blk->index < nodehdr.count - 1)) { 1850 blk->index++; 1851 blkno = be32_to_cpu(btree[blk->index].before); 1852 break; 1853 } else if (!forward && (blk->index > 0)) { 1854 blk->index--; 1855 blkno = be32_to_cpu(btree[blk->index].before); 1856 break; 1857 } 1858 } 1859 if (level < 0) { 1860 *result = -ENOENT; /* we're out of our tree */ 1861 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 1862 return 0; 1863 } 1864 1865 /* 1866 * Roll down the edge of the subtree until we reach the 1867 * same depth we were at originally. 1868 */ 1869 for (blk++, level++; level < path->active; blk++, level++) { 1870 /* 1871 * Read the next child block into a local buffer. 1872 */ 1873 error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp, 1874 args->whichfork); 1875 if (error) 1876 return error; 1877 1878 /* 1879 * Release the old block (if it's dirty, the trans doesn't 1880 * actually let go) and swap the local buffer into the path 1881 * structure. This ensures failure of the above read doesn't set 1882 * a NULL buffer in an active slot in the path. 1883 */ 1884 if (release) 1885 xfs_trans_brelse(args->trans, blk->bp); 1886 blk->blkno = blkno; 1887 blk->bp = bp; 1888 1889 info = blk->bp->b_addr; 1890 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1891 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || 1892 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1893 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) || 1894 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || 1895 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); 1896 1897 1898 /* 1899 * Note: we flatten the magic number to a single type so we 1900 * don't have to compare against crc/non-crc types elsewhere. 1901 */ 1902 switch (be16_to_cpu(info->magic)) { 1903 case XFS_DA_NODE_MAGIC: 1904 case XFS_DA3_NODE_MAGIC: 1905 blk->magic = XFS_DA_NODE_MAGIC; 1906 node = (xfs_da_intnode_t *)info; 1907 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1908 btree = dp->d_ops->node_tree_p(node); 1909 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval); 1910 if (forward) 1911 blk->index = 0; 1912 else 1913 blk->index = nodehdr.count - 1; 1914 blkno = be32_to_cpu(btree[blk->index].before); 1915 break; 1916 case XFS_ATTR_LEAF_MAGIC: 1917 case XFS_ATTR3_LEAF_MAGIC: 1918 blk->magic = XFS_ATTR_LEAF_MAGIC; 1919 ASSERT(level == path->active-1); 1920 blk->index = 0; 1921 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1922 break; 1923 case XFS_DIR2_LEAFN_MAGIC: 1924 case XFS_DIR3_LEAFN_MAGIC: 1925 blk->magic = XFS_DIR2_LEAFN_MAGIC; 1926 ASSERT(level == path->active-1); 1927 blk->index = 0; 1928 blk->hashval = xfs_dir2_leafn_lasthash(args->dp, 1929 blk->bp, NULL); 1930 break; 1931 default: 1932 ASSERT(0); 1933 break; 1934 } 1935 } 1936 *result = 0; 1937 return 0; 1938 } 1939 1940 1941 /*======================================================================== 1942 * Utility routines. 1943 *========================================================================*/ 1944 1945 /* 1946 * Implement a simple hash on a character string. 1947 * Rotate the hash value by 7 bits, then XOR each character in. 1948 * This is implemented with some source-level loop unrolling. 1949 */ 1950 xfs_dahash_t 1951 xfs_da_hashname(const __uint8_t *name, int namelen) 1952 { 1953 xfs_dahash_t hash; 1954 1955 /* 1956 * Do four characters at a time as long as we can. 1957 */ 1958 for (hash = 0; namelen >= 4; namelen -= 4, name += 4) 1959 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^ 1960 (name[3] << 0) ^ rol32(hash, 7 * 4); 1961 1962 /* 1963 * Now do the rest of the characters. 1964 */ 1965 switch (namelen) { 1966 case 3: 1967 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^ 1968 rol32(hash, 7 * 3); 1969 case 2: 1970 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2); 1971 case 1: 1972 return (name[0] << 0) ^ rol32(hash, 7 * 1); 1973 default: /* case 0: */ 1974 return hash; 1975 } 1976 } 1977 1978 enum xfs_dacmp 1979 xfs_da_compname( 1980 struct xfs_da_args *args, 1981 const unsigned char *name, 1982 int len) 1983 { 1984 return (args->namelen == len && memcmp(args->name, name, len) == 0) ? 1985 XFS_CMP_EXACT : XFS_CMP_DIFFERENT; 1986 } 1987 1988 static xfs_dahash_t 1989 xfs_default_hashname( 1990 struct xfs_name *name) 1991 { 1992 return xfs_da_hashname(name->name, name->len); 1993 } 1994 1995 const struct xfs_nameops xfs_default_nameops = { 1996 .hashname = xfs_default_hashname, 1997 .compname = xfs_da_compname 1998 }; 1999 2000 int 2001 xfs_da_grow_inode_int( 2002 struct xfs_da_args *args, 2003 xfs_fileoff_t *bno, 2004 int count) 2005 { 2006 struct xfs_trans *tp = args->trans; 2007 struct xfs_inode *dp = args->dp; 2008 int w = args->whichfork; 2009 xfs_rfsblock_t nblks = dp->i_d.di_nblocks; 2010 struct xfs_bmbt_irec map, *mapp; 2011 int nmap, error, got, i, mapi; 2012 2013 /* 2014 * Find a spot in the file space to put the new block. 2015 */ 2016 error = xfs_bmap_first_unused(tp, dp, count, bno, w); 2017 if (error) 2018 return error; 2019 2020 /* 2021 * Try mapping it in one filesystem block. 2022 */ 2023 nmap = 1; 2024 ASSERT(args->firstblock != NULL); 2025 error = xfs_bmapi_write(tp, dp, *bno, count, 2026 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, 2027 args->firstblock, args->total, &map, &nmap, 2028 args->flist); 2029 if (error) 2030 return error; 2031 2032 ASSERT(nmap <= 1); 2033 if (nmap == 1) { 2034 mapp = ↦ 2035 mapi = 1; 2036 } else if (nmap == 0 && count > 1) { 2037 xfs_fileoff_t b; 2038 int c; 2039 2040 /* 2041 * If we didn't get it and the block might work if fragmented, 2042 * try without the CONTIG flag. Loop until we get it all. 2043 */ 2044 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); 2045 for (b = *bno, mapi = 0; b < *bno + count; ) { 2046 nmap = MIN(XFS_BMAP_MAX_NMAP, count); 2047 c = (int)(*bno + count - b); 2048 error = xfs_bmapi_write(tp, dp, b, c, 2049 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, 2050 args->firstblock, args->total, 2051 &mapp[mapi], &nmap, args->flist); 2052 if (error) 2053 goto out_free_map; 2054 if (nmap < 1) 2055 break; 2056 mapi += nmap; 2057 b = mapp[mapi - 1].br_startoff + 2058 mapp[mapi - 1].br_blockcount; 2059 } 2060 } else { 2061 mapi = 0; 2062 mapp = NULL; 2063 } 2064 2065 /* 2066 * Count the blocks we got, make sure it matches the total. 2067 */ 2068 for (i = 0, got = 0; i < mapi; i++) 2069 got += mapp[i].br_blockcount; 2070 if (got != count || mapp[0].br_startoff != *bno || 2071 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != 2072 *bno + count) { 2073 error = -ENOSPC; 2074 goto out_free_map; 2075 } 2076 2077 /* account for newly allocated blocks in reserved blocks total */ 2078 args->total -= dp->i_d.di_nblocks - nblks; 2079 2080 out_free_map: 2081 if (mapp != &map) 2082 kmem_free(mapp); 2083 return error; 2084 } 2085 2086 /* 2087 * Add a block to the btree ahead of the file. 2088 * Return the new block number to the caller. 2089 */ 2090 int 2091 xfs_da_grow_inode( 2092 struct xfs_da_args *args, 2093 xfs_dablk_t *new_blkno) 2094 { 2095 xfs_fileoff_t bno; 2096 int error; 2097 2098 trace_xfs_da_grow_inode(args); 2099 2100 bno = args->geo->leafblk; 2101 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount); 2102 if (!error) 2103 *new_blkno = (xfs_dablk_t)bno; 2104 return error; 2105 } 2106 2107 /* 2108 * Ick. We need to always be able to remove a btree block, even 2109 * if there's no space reservation because the filesystem is full. 2110 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC. 2111 * It swaps the target block with the last block in the file. The 2112 * last block in the file can always be removed since it can't cause 2113 * a bmap btree split to do that. 2114 */ 2115 STATIC int 2116 xfs_da3_swap_lastblock( 2117 struct xfs_da_args *args, 2118 xfs_dablk_t *dead_blknop, 2119 struct xfs_buf **dead_bufp) 2120 { 2121 struct xfs_da_blkinfo *dead_info; 2122 struct xfs_da_blkinfo *sib_info; 2123 struct xfs_da_intnode *par_node; 2124 struct xfs_da_intnode *dead_node; 2125 struct xfs_dir2_leaf *dead_leaf2; 2126 struct xfs_da_node_entry *btree; 2127 struct xfs_da3_icnode_hdr par_hdr; 2128 struct xfs_inode *dp; 2129 struct xfs_trans *tp; 2130 struct xfs_mount *mp; 2131 struct xfs_buf *dead_buf; 2132 struct xfs_buf *last_buf; 2133 struct xfs_buf *sib_buf; 2134 struct xfs_buf *par_buf; 2135 xfs_dahash_t dead_hash; 2136 xfs_fileoff_t lastoff; 2137 xfs_dablk_t dead_blkno; 2138 xfs_dablk_t last_blkno; 2139 xfs_dablk_t sib_blkno; 2140 xfs_dablk_t par_blkno; 2141 int error; 2142 int w; 2143 int entno; 2144 int level; 2145 int dead_level; 2146 2147 trace_xfs_da_swap_lastblock(args); 2148 2149 dead_buf = *dead_bufp; 2150 dead_blkno = *dead_blknop; 2151 tp = args->trans; 2152 dp = args->dp; 2153 w = args->whichfork; 2154 ASSERT(w == XFS_DATA_FORK); 2155 mp = dp->i_mount; 2156 lastoff = args->geo->freeblk; 2157 error = xfs_bmap_last_before(tp, dp, &lastoff, w); 2158 if (error) 2159 return error; 2160 if (unlikely(lastoff == 0)) { 2161 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW, 2162 mp); 2163 return -EFSCORRUPTED; 2164 } 2165 /* 2166 * Read the last block in the btree space. 2167 */ 2168 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount; 2169 error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w); 2170 if (error) 2171 return error; 2172 /* 2173 * Copy the last block into the dead buffer and log it. 2174 */ 2175 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize); 2176 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1); 2177 dead_info = dead_buf->b_addr; 2178 /* 2179 * Get values from the moved block. 2180 */ 2181 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 2182 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) { 2183 struct xfs_dir3_icleaf_hdr leafhdr; 2184 struct xfs_dir2_leaf_entry *ents; 2185 2186 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; 2187 dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2); 2188 ents = dp->d_ops->leaf_ents_p(dead_leaf2); 2189 dead_level = 0; 2190 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval); 2191 } else { 2192 struct xfs_da3_icnode_hdr deadhdr; 2193 2194 dead_node = (xfs_da_intnode_t *)dead_info; 2195 dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node); 2196 btree = dp->d_ops->node_tree_p(dead_node); 2197 dead_level = deadhdr.level; 2198 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval); 2199 } 2200 sib_buf = par_buf = NULL; 2201 /* 2202 * If the moved block has a left sibling, fix up the pointers. 2203 */ 2204 if ((sib_blkno = be32_to_cpu(dead_info->back))) { 2205 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w); 2206 if (error) 2207 goto done; 2208 sib_info = sib_buf->b_addr; 2209 if (unlikely( 2210 be32_to_cpu(sib_info->forw) != last_blkno || 2211 sib_info->magic != dead_info->magic)) { 2212 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", 2213 XFS_ERRLEVEL_LOW, mp); 2214 error = -EFSCORRUPTED; 2215 goto done; 2216 } 2217 sib_info->forw = cpu_to_be32(dead_blkno); 2218 xfs_trans_log_buf(tp, sib_buf, 2219 XFS_DA_LOGRANGE(sib_info, &sib_info->forw, 2220 sizeof(sib_info->forw))); 2221 sib_buf = NULL; 2222 } 2223 /* 2224 * If the moved block has a right sibling, fix up the pointers. 2225 */ 2226 if ((sib_blkno = be32_to_cpu(dead_info->forw))) { 2227 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w); 2228 if (error) 2229 goto done; 2230 sib_info = sib_buf->b_addr; 2231 if (unlikely( 2232 be32_to_cpu(sib_info->back) != last_blkno || 2233 sib_info->magic != dead_info->magic)) { 2234 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", 2235 XFS_ERRLEVEL_LOW, mp); 2236 error = -EFSCORRUPTED; 2237 goto done; 2238 } 2239 sib_info->back = cpu_to_be32(dead_blkno); 2240 xfs_trans_log_buf(tp, sib_buf, 2241 XFS_DA_LOGRANGE(sib_info, &sib_info->back, 2242 sizeof(sib_info->back))); 2243 sib_buf = NULL; 2244 } 2245 par_blkno = args->geo->leafblk; 2246 level = -1; 2247 /* 2248 * Walk down the tree looking for the parent of the moved block. 2249 */ 2250 for (;;) { 2251 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w); 2252 if (error) 2253 goto done; 2254 par_node = par_buf->b_addr; 2255 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node); 2256 if (level >= 0 && level != par_hdr.level + 1) { 2257 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", 2258 XFS_ERRLEVEL_LOW, mp); 2259 error = -EFSCORRUPTED; 2260 goto done; 2261 } 2262 level = par_hdr.level; 2263 btree = dp->d_ops->node_tree_p(par_node); 2264 for (entno = 0; 2265 entno < par_hdr.count && 2266 be32_to_cpu(btree[entno].hashval) < dead_hash; 2267 entno++) 2268 continue; 2269 if (entno == par_hdr.count) { 2270 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", 2271 XFS_ERRLEVEL_LOW, mp); 2272 error = -EFSCORRUPTED; 2273 goto done; 2274 } 2275 par_blkno = be32_to_cpu(btree[entno].before); 2276 if (level == dead_level + 1) 2277 break; 2278 xfs_trans_brelse(tp, par_buf); 2279 par_buf = NULL; 2280 } 2281 /* 2282 * We're in the right parent block. 2283 * Look for the right entry. 2284 */ 2285 for (;;) { 2286 for (; 2287 entno < par_hdr.count && 2288 be32_to_cpu(btree[entno].before) != last_blkno; 2289 entno++) 2290 continue; 2291 if (entno < par_hdr.count) 2292 break; 2293 par_blkno = par_hdr.forw; 2294 xfs_trans_brelse(tp, par_buf); 2295 par_buf = NULL; 2296 if (unlikely(par_blkno == 0)) { 2297 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)", 2298 XFS_ERRLEVEL_LOW, mp); 2299 error = -EFSCORRUPTED; 2300 goto done; 2301 } 2302 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w); 2303 if (error) 2304 goto done; 2305 par_node = par_buf->b_addr; 2306 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node); 2307 if (par_hdr.level != level) { 2308 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", 2309 XFS_ERRLEVEL_LOW, mp); 2310 error = -EFSCORRUPTED; 2311 goto done; 2312 } 2313 btree = dp->d_ops->node_tree_p(par_node); 2314 entno = 0; 2315 } 2316 /* 2317 * Update the parent entry pointing to the moved block. 2318 */ 2319 btree[entno].before = cpu_to_be32(dead_blkno); 2320 xfs_trans_log_buf(tp, par_buf, 2321 XFS_DA_LOGRANGE(par_node, &btree[entno].before, 2322 sizeof(btree[entno].before))); 2323 *dead_blknop = last_blkno; 2324 *dead_bufp = last_buf; 2325 return 0; 2326 done: 2327 if (par_buf) 2328 xfs_trans_brelse(tp, par_buf); 2329 if (sib_buf) 2330 xfs_trans_brelse(tp, sib_buf); 2331 xfs_trans_brelse(tp, last_buf); 2332 return error; 2333 } 2334 2335 /* 2336 * Remove a btree block from a directory or attribute. 2337 */ 2338 int 2339 xfs_da_shrink_inode( 2340 xfs_da_args_t *args, 2341 xfs_dablk_t dead_blkno, 2342 struct xfs_buf *dead_buf) 2343 { 2344 xfs_inode_t *dp; 2345 int done, error, w, count; 2346 xfs_trans_t *tp; 2347 2348 trace_xfs_da_shrink_inode(args); 2349 2350 dp = args->dp; 2351 w = args->whichfork; 2352 tp = args->trans; 2353 count = args->geo->fsbcount; 2354 for (;;) { 2355 /* 2356 * Remove extents. If we get ENOSPC for a dir we have to move 2357 * the last block to the place we want to kill. 2358 */ 2359 error = xfs_bunmapi(tp, dp, dead_blkno, count, 2360 xfs_bmapi_aflag(w), 0, args->firstblock, 2361 args->flist, &done); 2362 if (error == -ENOSPC) { 2363 if (w != XFS_DATA_FORK) 2364 break; 2365 error = xfs_da3_swap_lastblock(args, &dead_blkno, 2366 &dead_buf); 2367 if (error) 2368 break; 2369 } else { 2370 break; 2371 } 2372 } 2373 xfs_trans_binval(tp, dead_buf); 2374 return error; 2375 } 2376 2377 /* 2378 * See if the mapping(s) for this btree block are valid, i.e. 2379 * don't contain holes, are logically contiguous, and cover the whole range. 2380 */ 2381 STATIC int 2382 xfs_da_map_covers_blocks( 2383 int nmap, 2384 xfs_bmbt_irec_t *mapp, 2385 xfs_dablk_t bno, 2386 int count) 2387 { 2388 int i; 2389 xfs_fileoff_t off; 2390 2391 for (i = 0, off = bno; i < nmap; i++) { 2392 if (mapp[i].br_startblock == HOLESTARTBLOCK || 2393 mapp[i].br_startblock == DELAYSTARTBLOCK) { 2394 return 0; 2395 } 2396 if (off != mapp[i].br_startoff) { 2397 return 0; 2398 } 2399 off += mapp[i].br_blockcount; 2400 } 2401 return off == bno + count; 2402 } 2403 2404 /* 2405 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map. 2406 * 2407 * For the single map case, it is assumed that the caller has provided a pointer 2408 * to a valid xfs_buf_map. For the multiple map case, this function will 2409 * allocate the xfs_buf_map to hold all the maps and replace the caller's single 2410 * map pointer with the allocated map. 2411 */ 2412 static int 2413 xfs_buf_map_from_irec( 2414 struct xfs_mount *mp, 2415 struct xfs_buf_map **mapp, 2416 int *nmaps, 2417 struct xfs_bmbt_irec *irecs, 2418 int nirecs) 2419 { 2420 struct xfs_buf_map *map; 2421 int i; 2422 2423 ASSERT(*nmaps == 1); 2424 ASSERT(nirecs >= 1); 2425 2426 if (nirecs > 1) { 2427 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), 2428 KM_SLEEP | KM_NOFS); 2429 if (!map) 2430 return -ENOMEM; 2431 *mapp = map; 2432 } 2433 2434 *nmaps = nirecs; 2435 map = *mapp; 2436 for (i = 0; i < *nmaps; i++) { 2437 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK && 2438 irecs[i].br_startblock != HOLESTARTBLOCK); 2439 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock); 2440 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount); 2441 } 2442 return 0; 2443 } 2444 2445 /* 2446 * Map the block we are given ready for reading. There are three possible return 2447 * values: 2448 * -1 - will be returned if we land in a hole and mappedbno == -2 so the 2449 * caller knows not to execute a subsequent read. 2450 * 0 - if we mapped the block successfully 2451 * >0 - positive error number if there was an error. 2452 */ 2453 static int 2454 xfs_dabuf_map( 2455 struct xfs_inode *dp, 2456 xfs_dablk_t bno, 2457 xfs_daddr_t mappedbno, 2458 int whichfork, 2459 struct xfs_buf_map **map, 2460 int *nmaps) 2461 { 2462 struct xfs_mount *mp = dp->i_mount; 2463 int nfsb; 2464 int error = 0; 2465 struct xfs_bmbt_irec irec; 2466 struct xfs_bmbt_irec *irecs = &irec; 2467 int nirecs; 2468 2469 ASSERT(map && *map); 2470 ASSERT(*nmaps == 1); 2471 2472 if (whichfork == XFS_DATA_FORK) 2473 nfsb = mp->m_dir_geo->fsbcount; 2474 else 2475 nfsb = mp->m_attr_geo->fsbcount; 2476 2477 /* 2478 * Caller doesn't have a mapping. -2 means don't complain 2479 * if we land in a hole. 2480 */ 2481 if (mappedbno == -1 || mappedbno == -2) { 2482 /* 2483 * Optimize the one-block case. 2484 */ 2485 if (nfsb != 1) 2486 irecs = kmem_zalloc(sizeof(irec) * nfsb, 2487 KM_SLEEP | KM_NOFS); 2488 2489 nirecs = nfsb; 2490 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs, 2491 &nirecs, xfs_bmapi_aflag(whichfork)); 2492 if (error) 2493 goto out; 2494 } else { 2495 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno); 2496 irecs->br_startoff = (xfs_fileoff_t)bno; 2497 irecs->br_blockcount = nfsb; 2498 irecs->br_state = 0; 2499 nirecs = 1; 2500 } 2501 2502 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) { 2503 error = mappedbno == -2 ? -1 : -EFSCORRUPTED; 2504 if (unlikely(error == -EFSCORRUPTED)) { 2505 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 2506 int i; 2507 xfs_alert(mp, "%s: bno %lld dir: inode %lld", 2508 __func__, (long long)bno, 2509 (long long)dp->i_ino); 2510 for (i = 0; i < *nmaps; i++) { 2511 xfs_alert(mp, 2512 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d", 2513 i, 2514 (long long)irecs[i].br_startoff, 2515 (long long)irecs[i].br_startblock, 2516 (long long)irecs[i].br_blockcount, 2517 irecs[i].br_state); 2518 } 2519 } 2520 XFS_ERROR_REPORT("xfs_da_do_buf(1)", 2521 XFS_ERRLEVEL_LOW, mp); 2522 } 2523 goto out; 2524 } 2525 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs); 2526 out: 2527 if (irecs != &irec) 2528 kmem_free(irecs); 2529 return error; 2530 } 2531 2532 /* 2533 * Get a buffer for the dir/attr block. 2534 */ 2535 int 2536 xfs_da_get_buf( 2537 struct xfs_trans *trans, 2538 struct xfs_inode *dp, 2539 xfs_dablk_t bno, 2540 xfs_daddr_t mappedbno, 2541 struct xfs_buf **bpp, 2542 int whichfork) 2543 { 2544 struct xfs_buf *bp; 2545 struct xfs_buf_map map; 2546 struct xfs_buf_map *mapp; 2547 int nmap; 2548 int error; 2549 2550 *bpp = NULL; 2551 mapp = ↦ 2552 nmap = 1; 2553 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork, 2554 &mapp, &nmap); 2555 if (error) { 2556 /* mapping a hole is not an error, but we don't continue */ 2557 if (error == -1) 2558 error = 0; 2559 goto out_free; 2560 } 2561 2562 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp, 2563 mapp, nmap, 0); 2564 error = bp ? bp->b_error : -EIO; 2565 if (error) { 2566 if (bp) 2567 xfs_trans_brelse(trans, bp); 2568 goto out_free; 2569 } 2570 2571 *bpp = bp; 2572 2573 out_free: 2574 if (mapp != &map) 2575 kmem_free(mapp); 2576 2577 return error; 2578 } 2579 2580 /* 2581 * Get a buffer for the dir/attr block, fill in the contents. 2582 */ 2583 int 2584 xfs_da_read_buf( 2585 struct xfs_trans *trans, 2586 struct xfs_inode *dp, 2587 xfs_dablk_t bno, 2588 xfs_daddr_t mappedbno, 2589 struct xfs_buf **bpp, 2590 int whichfork, 2591 const struct xfs_buf_ops *ops) 2592 { 2593 struct xfs_buf *bp; 2594 struct xfs_buf_map map; 2595 struct xfs_buf_map *mapp; 2596 int nmap; 2597 int error; 2598 2599 *bpp = NULL; 2600 mapp = ↦ 2601 nmap = 1; 2602 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork, 2603 &mapp, &nmap); 2604 if (error) { 2605 /* mapping a hole is not an error, but we don't continue */ 2606 if (error == -1) 2607 error = 0; 2608 goto out_free; 2609 } 2610 2611 error = xfs_trans_read_buf_map(dp->i_mount, trans, 2612 dp->i_mount->m_ddev_targp, 2613 mapp, nmap, 0, &bp, ops); 2614 if (error) 2615 goto out_free; 2616 2617 if (whichfork == XFS_ATTR_FORK) 2618 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF); 2619 else 2620 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF); 2621 *bpp = bp; 2622 out_free: 2623 if (mapp != &map) 2624 kmem_free(mapp); 2625 2626 return error; 2627 } 2628 2629 /* 2630 * Readahead the dir/attr block. 2631 */ 2632 xfs_daddr_t 2633 xfs_da_reada_buf( 2634 struct xfs_inode *dp, 2635 xfs_dablk_t bno, 2636 xfs_daddr_t mappedbno, 2637 int whichfork, 2638 const struct xfs_buf_ops *ops) 2639 { 2640 struct xfs_buf_map map; 2641 struct xfs_buf_map *mapp; 2642 int nmap; 2643 int error; 2644 2645 mapp = ↦ 2646 nmap = 1; 2647 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork, 2648 &mapp, &nmap); 2649 if (error) { 2650 /* mapping a hole is not an error, but we don't continue */ 2651 if (error == -1) 2652 error = 0; 2653 goto out_free; 2654 } 2655 2656 mappedbno = mapp[0].bm_bn; 2657 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops); 2658 2659 out_free: 2660 if (mapp != &map) 2661 kmem_free(mapp); 2662 2663 if (error) 2664 return -1; 2665 return mappedbno; 2666 } 2667