1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_dir2.h" 17 #include "xfs_inode.h" 18 #include "xfs_btree.h" 19 #include "xfs_trans.h" 20 #include "xfs_alloc.h" 21 #include "xfs_bmap.h" 22 #include "xfs_bmap_util.h" 23 #include "xfs_bmap_btree.h" 24 #include "xfs_rtalloc.h" 25 #include "xfs_errortag.h" 26 #include "xfs_error.h" 27 #include "xfs_quota.h" 28 #include "xfs_trans_space.h" 29 #include "xfs_buf_item.h" 30 #include "xfs_trace.h" 31 #include "xfs_attr_leaf.h" 32 #include "xfs_filestream.h" 33 #include "xfs_rmap.h" 34 #include "xfs_ag.h" 35 #include "xfs_ag_resv.h" 36 #include "xfs_refcount.h" 37 #include "xfs_icache.h" 38 #include "xfs_iomap.h" 39 40 struct kmem_cache *xfs_bmap_intent_cache; 41 42 /* 43 * Miscellaneous helper functions 44 */ 45 46 /* 47 * Compute and fill in the value of the maximum depth of a bmap btree 48 * in this filesystem. Done once, during mount. 49 */ 50 void 51 xfs_bmap_compute_maxlevels( 52 xfs_mount_t *mp, /* file system mount structure */ 53 int whichfork) /* data or attr fork */ 54 { 55 uint64_t maxblocks; /* max blocks at this level */ 56 xfs_extnum_t maxleafents; /* max leaf entries possible */ 57 int level; /* btree level */ 58 int maxrootrecs; /* max records in root block */ 59 int minleafrecs; /* min records in leaf block */ 60 int minnoderecs; /* min records in node block */ 61 int sz; /* root block size */ 62 63 /* 64 * The maximum number of extents in a fork, hence the maximum number of 65 * leaf entries, is controlled by the size of the on-disk extent count. 66 * 67 * Note that we can no longer assume that if we are in ATTR1 that the 68 * fork offset of all the inodes will be 69 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with 70 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed 71 * but probably at various positions. Therefore, for both ATTR1 and 72 * ATTR2 we have to assume the worst case scenario of a minimum size 73 * available. 74 */ 75 maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp), 76 whichfork); 77 if (whichfork == XFS_DATA_FORK) 78 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 79 else 80 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 81 82 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 83 minleafrecs = mp->m_bmap_dmnr[0]; 84 minnoderecs = mp->m_bmap_dmnr[1]; 85 maxblocks = howmany_64(maxleafents, minleafrecs); 86 for (level = 1; maxblocks > 1; level++) { 87 if (maxblocks <= maxrootrecs) 88 maxblocks = 1; 89 else 90 maxblocks = howmany_64(maxblocks, minnoderecs); 91 } 92 mp->m_bm_maxlevels[whichfork] = level; 93 ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk()); 94 } 95 96 unsigned int 97 xfs_bmap_compute_attr_offset( 98 struct xfs_mount *mp) 99 { 100 if (mp->m_sb.sb_inodesize == 256) 101 return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); 102 return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 103 } 104 105 STATIC int /* error */ 106 xfs_bmbt_lookup_eq( 107 struct xfs_btree_cur *cur, 108 struct xfs_bmbt_irec *irec, 109 int *stat) /* success/failure */ 110 { 111 cur->bc_rec.b = *irec; 112 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 113 } 114 115 STATIC int /* error */ 116 xfs_bmbt_lookup_first( 117 struct xfs_btree_cur *cur, 118 int *stat) /* success/failure */ 119 { 120 cur->bc_rec.b.br_startoff = 0; 121 cur->bc_rec.b.br_startblock = 0; 122 cur->bc_rec.b.br_blockcount = 0; 123 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 124 } 125 126 /* 127 * Check if the inode needs to be converted to btree format. 128 */ 129 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 130 { 131 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 132 133 return whichfork != XFS_COW_FORK && 134 ifp->if_format == XFS_DINODE_FMT_EXTENTS && 135 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork); 136 } 137 138 /* 139 * Check if the inode should be converted to extent format. 140 */ 141 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 142 { 143 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 144 145 return whichfork != XFS_COW_FORK && 146 ifp->if_format == XFS_DINODE_FMT_BTREE && 147 ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork); 148 } 149 150 /* 151 * Update the record referred to by cur to the value given by irec 152 * This either works (return 0) or gets an EFSCORRUPTED error. 153 */ 154 STATIC int 155 xfs_bmbt_update( 156 struct xfs_btree_cur *cur, 157 struct xfs_bmbt_irec *irec) 158 { 159 union xfs_btree_rec rec; 160 161 xfs_bmbt_disk_set_all(&rec.bmbt, irec); 162 return xfs_btree_update(cur, &rec); 163 } 164 165 /* 166 * Compute the worst-case number of indirect blocks that will be used 167 * for ip's delayed extent of length "len". 168 */ 169 STATIC xfs_filblks_t 170 xfs_bmap_worst_indlen( 171 xfs_inode_t *ip, /* incore inode pointer */ 172 xfs_filblks_t len) /* delayed extent length */ 173 { 174 int level; /* btree level number */ 175 int maxrecs; /* maximum record count at this level */ 176 xfs_mount_t *mp; /* mount structure */ 177 xfs_filblks_t rval; /* return value */ 178 179 mp = ip->i_mount; 180 maxrecs = mp->m_bmap_dmxr[0]; 181 for (level = 0, rval = 0; 182 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 183 level++) { 184 len += maxrecs - 1; 185 do_div(len, maxrecs); 186 rval += len; 187 if (len == 1) 188 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 189 level - 1; 190 if (level == 0) 191 maxrecs = mp->m_bmap_dmxr[1]; 192 } 193 return rval; 194 } 195 196 /* 197 * Calculate the default attribute fork offset for newly created inodes. 198 */ 199 uint 200 xfs_default_attroffset( 201 struct xfs_inode *ip) 202 { 203 if (ip->i_df.if_format == XFS_DINODE_FMT_DEV) 204 return roundup(sizeof(xfs_dev_t), 8); 205 return M_IGEO(ip->i_mount)->attr_fork_offset; 206 } 207 208 /* 209 * Helper routine to reset inode i_forkoff field when switching attribute fork 210 * from local to extent format - we reset it where possible to make space 211 * available for inline data fork extents. 212 */ 213 STATIC void 214 xfs_bmap_forkoff_reset( 215 xfs_inode_t *ip, 216 int whichfork) 217 { 218 if (whichfork == XFS_ATTR_FORK && 219 ip->i_df.if_format != XFS_DINODE_FMT_DEV && 220 ip->i_df.if_format != XFS_DINODE_FMT_BTREE) { 221 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 222 223 if (dfl_forkoff > ip->i_forkoff) 224 ip->i_forkoff = dfl_forkoff; 225 } 226 } 227 228 #ifdef DEBUG 229 STATIC struct xfs_buf * 230 xfs_bmap_get_bp( 231 struct xfs_btree_cur *cur, 232 xfs_fsblock_t bno) 233 { 234 struct xfs_log_item *lip; 235 int i; 236 237 if (!cur) 238 return NULL; 239 240 for (i = 0; i < cur->bc_maxlevels; i++) { 241 if (!cur->bc_levels[i].bp) 242 break; 243 if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno) 244 return cur->bc_levels[i].bp; 245 } 246 247 /* Chase down all the log items to see if the bp is there */ 248 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { 249 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; 250 251 if (bip->bli_item.li_type == XFS_LI_BUF && 252 xfs_buf_daddr(bip->bli_buf) == bno) 253 return bip->bli_buf; 254 } 255 256 return NULL; 257 } 258 259 STATIC void 260 xfs_check_block( 261 struct xfs_btree_block *block, 262 xfs_mount_t *mp, 263 int root, 264 short sz) 265 { 266 int i, j, dmxr; 267 __be64 *pp, *thispa; /* pointer to block address */ 268 xfs_bmbt_key_t *prevp, *keyp; 269 270 ASSERT(be16_to_cpu(block->bb_level) > 0); 271 272 prevp = NULL; 273 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 274 dmxr = mp->m_bmap_dmxr[0]; 275 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 276 277 if (prevp) { 278 ASSERT(be64_to_cpu(prevp->br_startoff) < 279 be64_to_cpu(keyp->br_startoff)); 280 } 281 prevp = keyp; 282 283 /* 284 * Compare the block numbers to see if there are dups. 285 */ 286 if (root) 287 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 288 else 289 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 290 291 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 292 if (root) 293 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 294 else 295 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 296 if (*thispa == *pp) { 297 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld", 298 __func__, j, i, 299 (unsigned long long)be64_to_cpu(*thispa)); 300 xfs_err(mp, "%s: ptrs are equal in node\n", 301 __func__); 302 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 303 } 304 } 305 } 306 } 307 308 /* 309 * Check that the extents for the inode ip are in the right order in all 310 * btree leaves. THis becomes prohibitively expensive for large extent count 311 * files, so don't bother with inodes that have more than 10,000 extents in 312 * them. The btree record ordering checks will still be done, so for such large 313 * bmapbt constructs that is going to catch most corruptions. 314 */ 315 STATIC void 316 xfs_bmap_check_leaf_extents( 317 struct xfs_btree_cur *cur, /* btree cursor or null */ 318 xfs_inode_t *ip, /* incore inode pointer */ 319 int whichfork) /* data or attr fork */ 320 { 321 struct xfs_mount *mp = ip->i_mount; 322 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 323 struct xfs_btree_block *block; /* current btree block */ 324 xfs_fsblock_t bno; /* block # of "block" */ 325 struct xfs_buf *bp; /* buffer for "block" */ 326 int error; /* error return value */ 327 xfs_extnum_t i=0, j; /* index into the extents list */ 328 int level; /* btree level, for checking */ 329 __be64 *pp; /* pointer to block address */ 330 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 331 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 332 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 333 int bp_release = 0; 334 335 if (ifp->if_format != XFS_DINODE_FMT_BTREE) 336 return; 337 338 /* skip large extent count inodes */ 339 if (ip->i_df.if_nextents > 10000) 340 return; 341 342 bno = NULLFSBLOCK; 343 block = ifp->if_broot; 344 /* 345 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 346 */ 347 level = be16_to_cpu(block->bb_level); 348 ASSERT(level > 0); 349 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 350 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 351 bno = be64_to_cpu(*pp); 352 353 ASSERT(bno != NULLFSBLOCK); 354 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 355 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 356 357 /* 358 * Go down the tree until leaf level is reached, following the first 359 * pointer (leftmost) at each level. 360 */ 361 while (level-- > 0) { 362 /* See if buf is in cur first */ 363 bp_release = 0; 364 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 365 if (!bp) { 366 bp_release = 1; 367 error = xfs_btree_read_bufl(mp, NULL, bno, &bp, 368 XFS_BMAP_BTREE_REF, 369 &xfs_bmbt_buf_ops); 370 if (error) 371 goto error_norelse; 372 } 373 block = XFS_BUF_TO_BLOCK(bp); 374 if (level == 0) 375 break; 376 377 /* 378 * Check this block for basic sanity (increasing keys and 379 * no duplicate blocks). 380 */ 381 382 xfs_check_block(block, mp, 0, 0); 383 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 384 bno = be64_to_cpu(*pp); 385 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) { 386 error = -EFSCORRUPTED; 387 goto error0; 388 } 389 if (bp_release) { 390 bp_release = 0; 391 xfs_trans_brelse(NULL, bp); 392 } 393 } 394 395 /* 396 * Here with bp and block set to the leftmost leaf node in the tree. 397 */ 398 i = 0; 399 400 /* 401 * Loop over all leaf nodes checking that all extents are in the right order. 402 */ 403 for (;;) { 404 xfs_fsblock_t nextbno; 405 xfs_extnum_t num_recs; 406 407 408 num_recs = xfs_btree_get_numrecs(block); 409 410 /* 411 * Read-ahead the next leaf block, if any. 412 */ 413 414 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 415 416 /* 417 * Check all the extents to make sure they are OK. 418 * If we had a previous block, the last entry should 419 * conform with the first entry in this one. 420 */ 421 422 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 423 if (i) { 424 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 425 xfs_bmbt_disk_get_blockcount(&last) <= 426 xfs_bmbt_disk_get_startoff(ep)); 427 } 428 for (j = 1; j < num_recs; j++) { 429 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 430 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 431 xfs_bmbt_disk_get_blockcount(ep) <= 432 xfs_bmbt_disk_get_startoff(nextp)); 433 ep = nextp; 434 } 435 436 last = *ep; 437 i += num_recs; 438 if (bp_release) { 439 bp_release = 0; 440 xfs_trans_brelse(NULL, bp); 441 } 442 bno = nextbno; 443 /* 444 * If we've reached the end, stop. 445 */ 446 if (bno == NULLFSBLOCK) 447 break; 448 449 bp_release = 0; 450 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 451 if (!bp) { 452 bp_release = 1; 453 error = xfs_btree_read_bufl(mp, NULL, bno, &bp, 454 XFS_BMAP_BTREE_REF, 455 &xfs_bmbt_buf_ops); 456 if (error) 457 goto error_norelse; 458 } 459 block = XFS_BUF_TO_BLOCK(bp); 460 } 461 462 return; 463 464 error0: 465 xfs_warn(mp, "%s: at error0", __func__); 466 if (bp_release) 467 xfs_trans_brelse(NULL, bp); 468 error_norelse: 469 xfs_warn(mp, "%s: BAD after btree leaves for %llu extents", 470 __func__, i); 471 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); 472 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 473 return; 474 } 475 476 /* 477 * Validate that the bmbt_irecs being returned from bmapi are valid 478 * given the caller's original parameters. Specifically check the 479 * ranges of the returned irecs to ensure that they only extend beyond 480 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 481 */ 482 STATIC void 483 xfs_bmap_validate_ret( 484 xfs_fileoff_t bno, 485 xfs_filblks_t len, 486 uint32_t flags, 487 xfs_bmbt_irec_t *mval, 488 int nmap, 489 int ret_nmap) 490 { 491 int i; /* index to map values */ 492 493 ASSERT(ret_nmap <= nmap); 494 495 for (i = 0; i < ret_nmap; i++) { 496 ASSERT(mval[i].br_blockcount > 0); 497 if (!(flags & XFS_BMAPI_ENTIRE)) { 498 ASSERT(mval[i].br_startoff >= bno); 499 ASSERT(mval[i].br_blockcount <= len); 500 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 501 bno + len); 502 } else { 503 ASSERT(mval[i].br_startoff < bno + len); 504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 505 bno); 506 } 507 ASSERT(i == 0 || 508 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 509 mval[i].br_startoff); 510 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 511 mval[i].br_startblock != HOLESTARTBLOCK); 512 ASSERT(mval[i].br_state == XFS_EXT_NORM || 513 mval[i].br_state == XFS_EXT_UNWRITTEN); 514 } 515 } 516 517 #else 518 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 519 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 520 #endif /* DEBUG */ 521 522 /* 523 * Inode fork format manipulation functions 524 */ 525 526 /* 527 * Convert the inode format to extent format if it currently is in btree format, 528 * but the extent list is small enough that it fits into the extent format. 529 * 530 * Since the extents are already in-core, all we have to do is give up the space 531 * for the btree root and pitch the leaf block. 532 */ 533 STATIC int /* error */ 534 xfs_bmap_btree_to_extents( 535 struct xfs_trans *tp, /* transaction pointer */ 536 struct xfs_inode *ip, /* incore inode pointer */ 537 struct xfs_btree_cur *cur, /* btree cursor */ 538 int *logflagsp, /* inode logging flags */ 539 int whichfork) /* data or attr fork */ 540 { 541 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 542 struct xfs_mount *mp = ip->i_mount; 543 struct xfs_btree_block *rblock = ifp->if_broot; 544 struct xfs_btree_block *cblock;/* child btree block */ 545 xfs_fsblock_t cbno; /* child block number */ 546 struct xfs_buf *cbp; /* child block's buffer */ 547 int error; /* error return value */ 548 __be64 *pp; /* ptr to block address */ 549 struct xfs_owner_info oinfo; 550 551 /* check if we actually need the extent format first: */ 552 if (!xfs_bmap_wants_extents(ip, whichfork)) 553 return 0; 554 555 ASSERT(cur); 556 ASSERT(whichfork != XFS_COW_FORK); 557 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); 558 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 559 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 560 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 561 562 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 563 cbno = be64_to_cpu(*pp); 564 #ifdef DEBUG 565 if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1))) 566 return -EFSCORRUPTED; 567 #endif 568 error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF, 569 &xfs_bmbt_buf_ops); 570 if (error) 571 return error; 572 cblock = XFS_BUF_TO_BLOCK(cbp); 573 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 574 return error; 575 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 576 xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo); 577 ip->i_nblocks--; 578 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 579 xfs_trans_binval(tp, cbp); 580 if (cur->bc_levels[0].bp == cbp) 581 cur->bc_levels[0].bp = NULL; 582 xfs_iroot_realloc(ip, -1, whichfork); 583 ASSERT(ifp->if_broot == NULL); 584 ifp->if_format = XFS_DINODE_FMT_EXTENTS; 585 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 586 return 0; 587 } 588 589 /* 590 * Convert an extents-format file into a btree-format file. 591 * The new file will have a root block (in the inode) and a single child block. 592 */ 593 STATIC int /* error */ 594 xfs_bmap_extents_to_btree( 595 struct xfs_trans *tp, /* transaction pointer */ 596 struct xfs_inode *ip, /* incore inode pointer */ 597 struct xfs_btree_cur **curp, /* cursor returned to caller */ 598 int wasdel, /* converting a delayed alloc */ 599 int *logflagsp, /* inode logging flags */ 600 int whichfork) /* data or attr fork */ 601 { 602 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 603 struct xfs_buf *abp; /* buffer for ablock */ 604 struct xfs_alloc_arg args; /* allocation arguments */ 605 struct xfs_bmbt_rec *arp; /* child record pointer */ 606 struct xfs_btree_block *block; /* btree root block */ 607 struct xfs_btree_cur *cur; /* bmap btree cursor */ 608 int error; /* error return value */ 609 struct xfs_ifork *ifp; /* inode fork pointer */ 610 struct xfs_bmbt_key *kp; /* root block key pointer */ 611 struct xfs_mount *mp; /* mount structure */ 612 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 613 struct xfs_iext_cursor icur; 614 struct xfs_bmbt_irec rec; 615 xfs_extnum_t cnt = 0; 616 617 mp = ip->i_mount; 618 ASSERT(whichfork != XFS_COW_FORK); 619 ifp = xfs_ifork_ptr(ip, whichfork); 620 ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS); 621 622 /* 623 * Make space in the inode incore. This needs to be undone if we fail 624 * to expand the root. 625 */ 626 xfs_iroot_realloc(ip, 1, whichfork); 627 628 /* 629 * Fill in the root. 630 */ 631 block = ifp->if_broot; 632 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 633 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 634 XFS_BTREE_LONG_PTRS); 635 /* 636 * Need a cursor. Can't allocate until bb_level is filled in. 637 */ 638 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 639 cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0; 640 /* 641 * Convert to a btree with two levels, one record in root. 642 */ 643 ifp->if_format = XFS_DINODE_FMT_BTREE; 644 memset(&args, 0, sizeof(args)); 645 args.tp = tp; 646 args.mp = mp; 647 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 648 649 args.minlen = args.maxlen = args.prod = 1; 650 args.wasdel = wasdel; 651 *logflagsp = 0; 652 error = xfs_alloc_vextent_start_ag(&args, 653 XFS_INO_TO_FSB(mp, ip->i_ino)); 654 if (error) 655 goto out_root_realloc; 656 657 /* 658 * Allocation can't fail, the space was reserved. 659 */ 660 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 661 error = -ENOSPC; 662 goto out_root_realloc; 663 } 664 665 cur->bc_ino.allocated++; 666 ip->i_nblocks++; 667 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 668 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, 669 XFS_FSB_TO_DADDR(mp, args.fsbno), 670 mp->m_bsize, 0, &abp); 671 if (error) 672 goto out_unreserve_dquot; 673 674 /* 675 * Fill in the child block. 676 */ 677 abp->b_ops = &xfs_bmbt_buf_ops; 678 ablock = XFS_BUF_TO_BLOCK(abp); 679 xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp), 680 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 681 XFS_BTREE_LONG_PTRS); 682 683 for_each_xfs_iext(ifp, &icur, &rec) { 684 if (isnullstartblock(rec.br_startblock)) 685 continue; 686 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt); 687 xfs_bmbt_disk_set_all(arp, &rec); 688 cnt++; 689 } 690 ASSERT(cnt == ifp->if_nextents); 691 xfs_btree_set_numrecs(ablock, cnt); 692 693 /* 694 * Fill in the root key and pointer. 695 */ 696 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 697 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 698 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 699 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 700 be16_to_cpu(block->bb_level))); 701 *pp = cpu_to_be64(args.fsbno); 702 703 /* 704 * Do all this logging at the end so that 705 * the root is at the right level. 706 */ 707 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 708 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 709 ASSERT(*curp == NULL); 710 *curp = cur; 711 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 712 return 0; 713 714 out_unreserve_dquot: 715 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 716 out_root_realloc: 717 xfs_iroot_realloc(ip, -1, whichfork); 718 ifp->if_format = XFS_DINODE_FMT_EXTENTS; 719 ASSERT(ifp->if_broot == NULL); 720 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 721 722 return error; 723 } 724 725 /* 726 * Convert a local file to an extents file. 727 * This code is out of bounds for data forks of regular files, 728 * since the file data needs to get logged so things will stay consistent. 729 * (The bmap-level manipulations are ok, though). 730 */ 731 void 732 xfs_bmap_local_to_extents_empty( 733 struct xfs_trans *tp, 734 struct xfs_inode *ip, 735 int whichfork) 736 { 737 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 738 739 ASSERT(whichfork != XFS_COW_FORK); 740 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); 741 ASSERT(ifp->if_bytes == 0); 742 ASSERT(ifp->if_nextents == 0); 743 744 xfs_bmap_forkoff_reset(ip, whichfork); 745 ifp->if_u1.if_root = NULL; 746 ifp->if_height = 0; 747 ifp->if_format = XFS_DINODE_FMT_EXTENTS; 748 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 749 } 750 751 752 STATIC int /* error */ 753 xfs_bmap_local_to_extents( 754 xfs_trans_t *tp, /* transaction pointer */ 755 xfs_inode_t *ip, /* incore inode pointer */ 756 xfs_extlen_t total, /* total blocks needed by transaction */ 757 int *logflagsp, /* inode logging flags */ 758 int whichfork, 759 void (*init_fn)(struct xfs_trans *tp, 760 struct xfs_buf *bp, 761 struct xfs_inode *ip, 762 struct xfs_ifork *ifp)) 763 { 764 int error = 0; 765 int flags; /* logging flags returned */ 766 struct xfs_ifork *ifp; /* inode fork pointer */ 767 xfs_alloc_arg_t args; /* allocation arguments */ 768 struct xfs_buf *bp; /* buffer for extent block */ 769 struct xfs_bmbt_irec rec; 770 struct xfs_iext_cursor icur; 771 772 /* 773 * We don't want to deal with the case of keeping inode data inline yet. 774 * So sending the data fork of a regular inode is invalid. 775 */ 776 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 777 ifp = xfs_ifork_ptr(ip, whichfork); 778 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); 779 780 if (!ifp->if_bytes) { 781 xfs_bmap_local_to_extents_empty(tp, ip, whichfork); 782 flags = XFS_ILOG_CORE; 783 goto done; 784 } 785 786 flags = 0; 787 error = 0; 788 memset(&args, 0, sizeof(args)); 789 args.tp = tp; 790 args.mp = ip->i_mount; 791 args.total = total; 792 args.minlen = args.maxlen = args.prod = 1; 793 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 794 795 /* 796 * Allocate a block. We know we need only one, since the 797 * file currently fits in an inode. 798 */ 799 args.total = total; 800 args.minlen = args.maxlen = args.prod = 1; 801 error = xfs_alloc_vextent_start_ag(&args, 802 XFS_INO_TO_FSB(args.mp, ip->i_ino)); 803 if (error) 804 goto done; 805 806 /* Can't fail, the space was reserved. */ 807 ASSERT(args.fsbno != NULLFSBLOCK); 808 ASSERT(args.len == 1); 809 error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, 810 XFS_FSB_TO_DADDR(args.mp, args.fsbno), 811 args.mp->m_bsize, 0, &bp); 812 if (error) 813 goto done; 814 815 /* 816 * Initialize the block, copy the data and log the remote buffer. 817 * 818 * The callout is responsible for logging because the remote format 819 * might differ from the local format and thus we don't know how much to 820 * log here. Note that init_fn must also set the buffer log item type 821 * correctly. 822 */ 823 init_fn(tp, bp, ip, ifp); 824 825 /* account for the change in fork size */ 826 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 827 xfs_bmap_local_to_extents_empty(tp, ip, whichfork); 828 flags |= XFS_ILOG_CORE; 829 830 ifp->if_u1.if_root = NULL; 831 ifp->if_height = 0; 832 833 rec.br_startoff = 0; 834 rec.br_startblock = args.fsbno; 835 rec.br_blockcount = 1; 836 rec.br_state = XFS_EXT_NORM; 837 xfs_iext_first(ifp, &icur); 838 xfs_iext_insert(ip, &icur, &rec, 0); 839 840 ifp->if_nextents = 1; 841 ip->i_nblocks = 1; 842 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 843 flags |= xfs_ilog_fext(whichfork); 844 845 done: 846 *logflagsp = flags; 847 return error; 848 } 849 850 /* 851 * Called from xfs_bmap_add_attrfork to handle btree format files. 852 */ 853 STATIC int /* error */ 854 xfs_bmap_add_attrfork_btree( 855 xfs_trans_t *tp, /* transaction pointer */ 856 xfs_inode_t *ip, /* incore inode pointer */ 857 int *flags) /* inode logging flags */ 858 { 859 struct xfs_btree_block *block = ip->i_df.if_broot; 860 struct xfs_btree_cur *cur; /* btree cursor */ 861 int error; /* error return value */ 862 xfs_mount_t *mp; /* file system mount struct */ 863 int stat; /* newroot status */ 864 865 mp = ip->i_mount; 866 867 if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip)) 868 *flags |= XFS_ILOG_DBROOT; 869 else { 870 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 871 error = xfs_bmbt_lookup_first(cur, &stat); 872 if (error) 873 goto error0; 874 /* must be at least one entry */ 875 if (XFS_IS_CORRUPT(mp, stat != 1)) { 876 error = -EFSCORRUPTED; 877 goto error0; 878 } 879 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 880 goto error0; 881 if (stat == 0) { 882 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 883 return -ENOSPC; 884 } 885 cur->bc_ino.allocated = 0; 886 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 887 } 888 return 0; 889 error0: 890 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 891 return error; 892 } 893 894 /* 895 * Called from xfs_bmap_add_attrfork to handle extents format files. 896 */ 897 STATIC int /* error */ 898 xfs_bmap_add_attrfork_extents( 899 struct xfs_trans *tp, /* transaction pointer */ 900 struct xfs_inode *ip, /* incore inode pointer */ 901 int *flags) /* inode logging flags */ 902 { 903 struct xfs_btree_cur *cur; /* bmap btree cursor */ 904 int error; /* error return value */ 905 906 if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <= 907 xfs_inode_data_fork_size(ip)) 908 return 0; 909 cur = NULL; 910 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, 911 XFS_DATA_FORK); 912 if (cur) { 913 cur->bc_ino.allocated = 0; 914 xfs_btree_del_cursor(cur, error); 915 } 916 return error; 917 } 918 919 /* 920 * Called from xfs_bmap_add_attrfork to handle local format files. Each 921 * different data fork content type needs a different callout to do the 922 * conversion. Some are basic and only require special block initialisation 923 * callouts for the data formating, others (directories) are so specialised they 924 * handle everything themselves. 925 * 926 * XXX (dgc): investigate whether directory conversion can use the generic 927 * formatting callout. It should be possible - it's just a very complex 928 * formatter. 929 */ 930 STATIC int /* error */ 931 xfs_bmap_add_attrfork_local( 932 struct xfs_trans *tp, /* transaction pointer */ 933 struct xfs_inode *ip, /* incore inode pointer */ 934 int *flags) /* inode logging flags */ 935 { 936 struct xfs_da_args dargs; /* args for dir/attr code */ 937 938 if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip)) 939 return 0; 940 941 if (S_ISDIR(VFS_I(ip)->i_mode)) { 942 memset(&dargs, 0, sizeof(dargs)); 943 dargs.geo = ip->i_mount->m_dir_geo; 944 dargs.dp = ip; 945 dargs.total = dargs.geo->fsbcount; 946 dargs.whichfork = XFS_DATA_FORK; 947 dargs.trans = tp; 948 return xfs_dir2_sf_to_block(&dargs); 949 } 950 951 if (S_ISLNK(VFS_I(ip)->i_mode)) 952 return xfs_bmap_local_to_extents(tp, ip, 1, flags, 953 XFS_DATA_FORK, 954 xfs_symlink_local_to_remote); 955 956 /* should only be called for types that support local format data */ 957 ASSERT(0); 958 return -EFSCORRUPTED; 959 } 960 961 /* 962 * Set an inode attr fork offset based on the format of the data fork. 963 */ 964 static int 965 xfs_bmap_set_attrforkoff( 966 struct xfs_inode *ip, 967 int size, 968 int *version) 969 { 970 int default_size = xfs_default_attroffset(ip) >> 3; 971 972 switch (ip->i_df.if_format) { 973 case XFS_DINODE_FMT_DEV: 974 ip->i_forkoff = default_size; 975 break; 976 case XFS_DINODE_FMT_LOCAL: 977 case XFS_DINODE_FMT_EXTENTS: 978 case XFS_DINODE_FMT_BTREE: 979 ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size); 980 if (!ip->i_forkoff) 981 ip->i_forkoff = default_size; 982 else if (xfs_has_attr2(ip->i_mount) && version) 983 *version = 2; 984 break; 985 default: 986 ASSERT(0); 987 return -EINVAL; 988 } 989 990 return 0; 991 } 992 993 /* 994 * Convert inode from non-attributed to attributed. 995 * Must not be in a transaction, ip must not be locked. 996 */ 997 int /* error code */ 998 xfs_bmap_add_attrfork( 999 xfs_inode_t *ip, /* incore inode pointer */ 1000 int size, /* space new attribute needs */ 1001 int rsvd) /* xact may use reserved blks */ 1002 { 1003 xfs_mount_t *mp; /* mount structure */ 1004 xfs_trans_t *tp; /* transaction pointer */ 1005 int blks; /* space reservation */ 1006 int version = 1; /* superblock attr version */ 1007 int logflags; /* logging flags */ 1008 int error; /* error return value */ 1009 1010 ASSERT(xfs_inode_has_attr_fork(ip) == 0); 1011 1012 mp = ip->i_mount; 1013 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1014 1015 blks = XFS_ADDAFORK_SPACE_RES(mp); 1016 1017 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0, 1018 rsvd, &tp); 1019 if (error) 1020 return error; 1021 if (xfs_inode_has_attr_fork(ip)) 1022 goto trans_cancel; 1023 1024 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1025 error = xfs_bmap_set_attrforkoff(ip, size, &version); 1026 if (error) 1027 goto trans_cancel; 1028 1029 xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0); 1030 logflags = 0; 1031 switch (ip->i_df.if_format) { 1032 case XFS_DINODE_FMT_LOCAL: 1033 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); 1034 break; 1035 case XFS_DINODE_FMT_EXTENTS: 1036 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); 1037 break; 1038 case XFS_DINODE_FMT_BTREE: 1039 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); 1040 break; 1041 default: 1042 error = 0; 1043 break; 1044 } 1045 if (logflags) 1046 xfs_trans_log_inode(tp, ip, logflags); 1047 if (error) 1048 goto trans_cancel; 1049 if (!xfs_has_attr(mp) || 1050 (!xfs_has_attr2(mp) && version == 2)) { 1051 bool log_sb = false; 1052 1053 spin_lock(&mp->m_sb_lock); 1054 if (!xfs_has_attr(mp)) { 1055 xfs_add_attr(mp); 1056 log_sb = true; 1057 } 1058 if (!xfs_has_attr2(mp) && version == 2) { 1059 xfs_add_attr2(mp); 1060 log_sb = true; 1061 } 1062 spin_unlock(&mp->m_sb_lock); 1063 if (log_sb) 1064 xfs_log_sb(tp); 1065 } 1066 1067 error = xfs_trans_commit(tp); 1068 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1069 return error; 1070 1071 trans_cancel: 1072 xfs_trans_cancel(tp); 1073 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1074 return error; 1075 } 1076 1077 /* 1078 * Internal and external extent tree search functions. 1079 */ 1080 1081 struct xfs_iread_state { 1082 struct xfs_iext_cursor icur; 1083 xfs_extnum_t loaded; 1084 }; 1085 1086 int 1087 xfs_bmap_complain_bad_rec( 1088 struct xfs_inode *ip, 1089 int whichfork, 1090 xfs_failaddr_t fa, 1091 const struct xfs_bmbt_irec *irec) 1092 { 1093 struct xfs_mount *mp = ip->i_mount; 1094 const char *forkname; 1095 1096 switch (whichfork) { 1097 case XFS_DATA_FORK: forkname = "data"; break; 1098 case XFS_ATTR_FORK: forkname = "attr"; break; 1099 case XFS_COW_FORK: forkname = "CoW"; break; 1100 default: forkname = "???"; break; 1101 } 1102 1103 xfs_warn(mp, 1104 "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!", 1105 ip->i_ino, forkname, fa); 1106 xfs_warn(mp, 1107 "Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x", 1108 irec->br_startoff, irec->br_startblock, irec->br_blockcount, 1109 irec->br_state); 1110 1111 return -EFSCORRUPTED; 1112 } 1113 1114 /* Stuff every bmbt record from this block into the incore extent map. */ 1115 static int 1116 xfs_iread_bmbt_block( 1117 struct xfs_btree_cur *cur, 1118 int level, 1119 void *priv) 1120 { 1121 struct xfs_iread_state *ir = priv; 1122 struct xfs_mount *mp = cur->bc_mp; 1123 struct xfs_inode *ip = cur->bc_ino.ip; 1124 struct xfs_btree_block *block; 1125 struct xfs_buf *bp; 1126 struct xfs_bmbt_rec *frp; 1127 xfs_extnum_t num_recs; 1128 xfs_extnum_t j; 1129 int whichfork = cur->bc_ino.whichfork; 1130 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1131 1132 block = xfs_btree_get_block(cur, level, &bp); 1133 1134 /* Abort if we find more records than nextents. */ 1135 num_recs = xfs_btree_get_numrecs(block); 1136 if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) { 1137 xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).", 1138 (unsigned long long)ip->i_ino); 1139 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block, 1140 sizeof(*block), __this_address); 1141 return -EFSCORRUPTED; 1142 } 1143 1144 /* Copy records into the incore cache. */ 1145 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1146 for (j = 0; j < num_recs; j++, frp++, ir->loaded++) { 1147 struct xfs_bmbt_irec new; 1148 xfs_failaddr_t fa; 1149 1150 xfs_bmbt_disk_get_all(frp, &new); 1151 fa = xfs_bmap_validate_extent(ip, whichfork, &new); 1152 if (fa) { 1153 xfs_inode_verifier_error(ip, -EFSCORRUPTED, 1154 "xfs_iread_extents(2)", frp, 1155 sizeof(*frp), fa); 1156 return xfs_bmap_complain_bad_rec(ip, whichfork, fa, 1157 &new); 1158 } 1159 xfs_iext_insert(ip, &ir->icur, &new, 1160 xfs_bmap_fork_to_state(whichfork)); 1161 trace_xfs_read_extent(ip, &ir->icur, 1162 xfs_bmap_fork_to_state(whichfork), _THIS_IP_); 1163 xfs_iext_next(ifp, &ir->icur); 1164 } 1165 1166 return 0; 1167 } 1168 1169 /* 1170 * Read in extents from a btree-format inode. 1171 */ 1172 int 1173 xfs_iread_extents( 1174 struct xfs_trans *tp, 1175 struct xfs_inode *ip, 1176 int whichfork) 1177 { 1178 struct xfs_iread_state ir; 1179 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1180 struct xfs_mount *mp = ip->i_mount; 1181 struct xfs_btree_cur *cur; 1182 int error; 1183 1184 if (!xfs_need_iread_extents(ifp)) 1185 return 0; 1186 1187 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1188 1189 ir.loaded = 0; 1190 xfs_iext_first(ifp, &ir.icur); 1191 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 1192 error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block, 1193 XFS_BTREE_VISIT_RECORDS, &ir); 1194 xfs_btree_del_cursor(cur, error); 1195 if (error) 1196 goto out; 1197 1198 if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) { 1199 error = -EFSCORRUPTED; 1200 goto out; 1201 } 1202 ASSERT(ir.loaded == xfs_iext_count(ifp)); 1203 /* 1204 * Use release semantics so that we can use acquire semantics in 1205 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree 1206 * after that load. 1207 */ 1208 smp_store_release(&ifp->if_needextents, 0); 1209 return 0; 1210 out: 1211 xfs_iext_destroy(ifp); 1212 return error; 1213 } 1214 1215 /* 1216 * Returns the relative block number of the first unused block(s) in the given 1217 * fork with at least "len" logically contiguous blocks free. This is the 1218 * lowest-address hole if the fork has holes, else the first block past the end 1219 * of fork. Return 0 if the fork is currently local (in-inode). 1220 */ 1221 int /* error */ 1222 xfs_bmap_first_unused( 1223 struct xfs_trans *tp, /* transaction pointer */ 1224 struct xfs_inode *ip, /* incore inode */ 1225 xfs_extlen_t len, /* size of hole to find */ 1226 xfs_fileoff_t *first_unused, /* unused block */ 1227 int whichfork) /* data or attr fork */ 1228 { 1229 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1230 struct xfs_bmbt_irec got; 1231 struct xfs_iext_cursor icur; 1232 xfs_fileoff_t lastaddr = 0; 1233 xfs_fileoff_t lowest, max; 1234 int error; 1235 1236 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) { 1237 *first_unused = 0; 1238 return 0; 1239 } 1240 1241 ASSERT(xfs_ifork_has_extents(ifp)); 1242 1243 error = xfs_iread_extents(tp, ip, whichfork); 1244 if (error) 1245 return error; 1246 1247 lowest = max = *first_unused; 1248 for_each_xfs_iext(ifp, &icur, &got) { 1249 /* 1250 * See if the hole before this extent will work. 1251 */ 1252 if (got.br_startoff >= lowest + len && 1253 got.br_startoff - max >= len) 1254 break; 1255 lastaddr = got.br_startoff + got.br_blockcount; 1256 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1257 } 1258 1259 *first_unused = max; 1260 return 0; 1261 } 1262 1263 /* 1264 * Returns the file-relative block number of the last block - 1 before 1265 * last_block (input value) in the file. 1266 * This is not based on i_size, it is based on the extent records. 1267 * Returns 0 for local files, as they do not have extent records. 1268 */ 1269 int /* error */ 1270 xfs_bmap_last_before( 1271 struct xfs_trans *tp, /* transaction pointer */ 1272 struct xfs_inode *ip, /* incore inode */ 1273 xfs_fileoff_t *last_block, /* last block */ 1274 int whichfork) /* data or attr fork */ 1275 { 1276 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1277 struct xfs_bmbt_irec got; 1278 struct xfs_iext_cursor icur; 1279 int error; 1280 1281 switch (ifp->if_format) { 1282 case XFS_DINODE_FMT_LOCAL: 1283 *last_block = 0; 1284 return 0; 1285 case XFS_DINODE_FMT_BTREE: 1286 case XFS_DINODE_FMT_EXTENTS: 1287 break; 1288 default: 1289 ASSERT(0); 1290 return -EFSCORRUPTED; 1291 } 1292 1293 error = xfs_iread_extents(tp, ip, whichfork); 1294 if (error) 1295 return error; 1296 1297 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) 1298 *last_block = 0; 1299 return 0; 1300 } 1301 1302 int 1303 xfs_bmap_last_extent( 1304 struct xfs_trans *tp, 1305 struct xfs_inode *ip, 1306 int whichfork, 1307 struct xfs_bmbt_irec *rec, 1308 int *is_empty) 1309 { 1310 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1311 struct xfs_iext_cursor icur; 1312 int error; 1313 1314 error = xfs_iread_extents(tp, ip, whichfork); 1315 if (error) 1316 return error; 1317 1318 xfs_iext_last(ifp, &icur); 1319 if (!xfs_iext_get_extent(ifp, &icur, rec)) 1320 *is_empty = 1; 1321 else 1322 *is_empty = 0; 1323 return 0; 1324 } 1325 1326 /* 1327 * Check the last inode extent to determine whether this allocation will result 1328 * in blocks being allocated at the end of the file. When we allocate new data 1329 * blocks at the end of the file which do not start at the previous data block, 1330 * we will try to align the new blocks at stripe unit boundaries. 1331 * 1332 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1333 * at, or past the EOF. 1334 */ 1335 STATIC int 1336 xfs_bmap_isaeof( 1337 struct xfs_bmalloca *bma, 1338 int whichfork) 1339 { 1340 struct xfs_bmbt_irec rec; 1341 int is_empty; 1342 int error; 1343 1344 bma->aeof = false; 1345 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1346 &is_empty); 1347 if (error) 1348 return error; 1349 1350 if (is_empty) { 1351 bma->aeof = true; 1352 return 0; 1353 } 1354 1355 /* 1356 * Check if we are allocation or past the last extent, or at least into 1357 * the last delayed allocated extent. 1358 */ 1359 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1360 (bma->offset >= rec.br_startoff && 1361 isnullstartblock(rec.br_startblock)); 1362 return 0; 1363 } 1364 1365 /* 1366 * Returns the file-relative block number of the first block past eof in 1367 * the file. This is not based on i_size, it is based on the extent records. 1368 * Returns 0 for local files, as they do not have extent records. 1369 */ 1370 int 1371 xfs_bmap_last_offset( 1372 struct xfs_inode *ip, 1373 xfs_fileoff_t *last_block, 1374 int whichfork) 1375 { 1376 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1377 struct xfs_bmbt_irec rec; 1378 int is_empty; 1379 int error; 1380 1381 *last_block = 0; 1382 1383 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) 1384 return 0; 1385 1386 if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) 1387 return -EFSCORRUPTED; 1388 1389 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1390 if (error || is_empty) 1391 return error; 1392 1393 *last_block = rec.br_startoff + rec.br_blockcount; 1394 return 0; 1395 } 1396 1397 /* 1398 * Extent tree manipulation functions used during allocation. 1399 */ 1400 1401 /* 1402 * Convert a delayed allocation to a real allocation. 1403 */ 1404 STATIC int /* error */ 1405 xfs_bmap_add_extent_delay_real( 1406 struct xfs_bmalloca *bma, 1407 int whichfork) 1408 { 1409 struct xfs_mount *mp = bma->ip->i_mount; 1410 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 1411 struct xfs_bmbt_irec *new = &bma->got; 1412 int error; /* error return value */ 1413 int i; /* temp state */ 1414 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1415 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1416 /* left is 0, right is 1, prev is 2 */ 1417 int rval=0; /* return value (logging flags) */ 1418 uint32_t state = xfs_bmap_fork_to_state(whichfork); 1419 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1420 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1421 xfs_filblks_t temp=0; /* value for da_new calculations */ 1422 int tmp_rval; /* partial logging flags */ 1423 struct xfs_bmbt_irec old; 1424 1425 ASSERT(whichfork != XFS_ATTR_FORK); 1426 ASSERT(!isnullstartblock(new->br_startblock)); 1427 ASSERT(!bma->cur || 1428 (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL)); 1429 1430 XFS_STATS_INC(mp, xs_add_exlist); 1431 1432 #define LEFT r[0] 1433 #define RIGHT r[1] 1434 #define PREV r[2] 1435 1436 /* 1437 * Set up a bunch of variables to make the tests simpler. 1438 */ 1439 xfs_iext_get_extent(ifp, &bma->icur, &PREV); 1440 new_endoff = new->br_startoff + new->br_blockcount; 1441 ASSERT(isnullstartblock(PREV.br_startblock)); 1442 ASSERT(PREV.br_startoff <= new->br_startoff); 1443 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1444 1445 da_old = startblockval(PREV.br_startblock); 1446 da_new = 0; 1447 1448 /* 1449 * Set flags determining what part of the previous delayed allocation 1450 * extent is being replaced by a real allocation. 1451 */ 1452 if (PREV.br_startoff == new->br_startoff) 1453 state |= BMAP_LEFT_FILLING; 1454 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1455 state |= BMAP_RIGHT_FILLING; 1456 1457 /* 1458 * Check and set flags if this segment has a left neighbor. 1459 * Don't set contiguous if the combined extent would be too large. 1460 */ 1461 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { 1462 state |= BMAP_LEFT_VALID; 1463 if (isnullstartblock(LEFT.br_startblock)) 1464 state |= BMAP_LEFT_DELAY; 1465 } 1466 1467 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1468 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1469 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1470 LEFT.br_state == new->br_state && 1471 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 1472 state |= BMAP_LEFT_CONTIG; 1473 1474 /* 1475 * Check and set flags if this segment has a right neighbor. 1476 * Don't set contiguous if the combined extent would be too large. 1477 * Also check for all-three-contiguous being too large. 1478 */ 1479 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { 1480 state |= BMAP_RIGHT_VALID; 1481 if (isnullstartblock(RIGHT.br_startblock)) 1482 state |= BMAP_RIGHT_DELAY; 1483 } 1484 1485 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1486 new_endoff == RIGHT.br_startoff && 1487 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1488 new->br_state == RIGHT.br_state && 1489 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 1490 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1491 BMAP_RIGHT_FILLING)) != 1492 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1493 BMAP_RIGHT_FILLING) || 1494 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1495 <= XFS_MAX_BMBT_EXTLEN)) 1496 state |= BMAP_RIGHT_CONTIG; 1497 1498 error = 0; 1499 /* 1500 * Switch out based on the FILLING and CONTIG state bits. 1501 */ 1502 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1503 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1504 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1505 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1506 /* 1507 * Filling in all of a previously delayed allocation extent. 1508 * The left and right neighbors are both contiguous with new. 1509 */ 1510 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 1511 1512 xfs_iext_remove(bma->ip, &bma->icur, state); 1513 xfs_iext_remove(bma->ip, &bma->icur, state); 1514 xfs_iext_prev(ifp, &bma->icur); 1515 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1516 ifp->if_nextents--; 1517 1518 if (bma->cur == NULL) 1519 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1520 else { 1521 rval = XFS_ILOG_CORE; 1522 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1523 if (error) 1524 goto done; 1525 if (XFS_IS_CORRUPT(mp, i != 1)) { 1526 error = -EFSCORRUPTED; 1527 goto done; 1528 } 1529 error = xfs_btree_delete(bma->cur, &i); 1530 if (error) 1531 goto done; 1532 if (XFS_IS_CORRUPT(mp, i != 1)) { 1533 error = -EFSCORRUPTED; 1534 goto done; 1535 } 1536 error = xfs_btree_decrement(bma->cur, 0, &i); 1537 if (error) 1538 goto done; 1539 if (XFS_IS_CORRUPT(mp, i != 1)) { 1540 error = -EFSCORRUPTED; 1541 goto done; 1542 } 1543 error = xfs_bmbt_update(bma->cur, &LEFT); 1544 if (error) 1545 goto done; 1546 } 1547 break; 1548 1549 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1550 /* 1551 * Filling in all of a previously delayed allocation extent. 1552 * The left neighbor is contiguous, the right is not. 1553 */ 1554 old = LEFT; 1555 LEFT.br_blockcount += PREV.br_blockcount; 1556 1557 xfs_iext_remove(bma->ip, &bma->icur, state); 1558 xfs_iext_prev(ifp, &bma->icur); 1559 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1560 1561 if (bma->cur == NULL) 1562 rval = XFS_ILOG_DEXT; 1563 else { 1564 rval = 0; 1565 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1566 if (error) 1567 goto done; 1568 if (XFS_IS_CORRUPT(mp, i != 1)) { 1569 error = -EFSCORRUPTED; 1570 goto done; 1571 } 1572 error = xfs_bmbt_update(bma->cur, &LEFT); 1573 if (error) 1574 goto done; 1575 } 1576 break; 1577 1578 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1579 /* 1580 * Filling in all of a previously delayed allocation extent. 1581 * The right neighbor is contiguous, the left is not. Take care 1582 * with delay -> unwritten extent allocation here because the 1583 * delalloc record we are overwriting is always written. 1584 */ 1585 PREV.br_startblock = new->br_startblock; 1586 PREV.br_blockcount += RIGHT.br_blockcount; 1587 PREV.br_state = new->br_state; 1588 1589 xfs_iext_next(ifp, &bma->icur); 1590 xfs_iext_remove(bma->ip, &bma->icur, state); 1591 xfs_iext_prev(ifp, &bma->icur); 1592 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1593 1594 if (bma->cur == NULL) 1595 rval = XFS_ILOG_DEXT; 1596 else { 1597 rval = 0; 1598 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1599 if (error) 1600 goto done; 1601 if (XFS_IS_CORRUPT(mp, i != 1)) { 1602 error = -EFSCORRUPTED; 1603 goto done; 1604 } 1605 error = xfs_bmbt_update(bma->cur, &PREV); 1606 if (error) 1607 goto done; 1608 } 1609 break; 1610 1611 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1612 /* 1613 * Filling in all of a previously delayed allocation extent. 1614 * Neither the left nor right neighbors are contiguous with 1615 * the new one. 1616 */ 1617 PREV.br_startblock = new->br_startblock; 1618 PREV.br_state = new->br_state; 1619 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1620 ifp->if_nextents++; 1621 1622 if (bma->cur == NULL) 1623 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1624 else { 1625 rval = XFS_ILOG_CORE; 1626 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1627 if (error) 1628 goto done; 1629 if (XFS_IS_CORRUPT(mp, i != 0)) { 1630 error = -EFSCORRUPTED; 1631 goto done; 1632 } 1633 error = xfs_btree_insert(bma->cur, &i); 1634 if (error) 1635 goto done; 1636 if (XFS_IS_CORRUPT(mp, i != 1)) { 1637 error = -EFSCORRUPTED; 1638 goto done; 1639 } 1640 } 1641 break; 1642 1643 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1644 /* 1645 * Filling in the first part of a previous delayed allocation. 1646 * The left neighbor is contiguous. 1647 */ 1648 old = LEFT; 1649 temp = PREV.br_blockcount - new->br_blockcount; 1650 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1651 startblockval(PREV.br_startblock)); 1652 1653 LEFT.br_blockcount += new->br_blockcount; 1654 1655 PREV.br_blockcount = temp; 1656 PREV.br_startoff += new->br_blockcount; 1657 PREV.br_startblock = nullstartblock(da_new); 1658 1659 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1660 xfs_iext_prev(ifp, &bma->icur); 1661 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1662 1663 if (bma->cur == NULL) 1664 rval = XFS_ILOG_DEXT; 1665 else { 1666 rval = 0; 1667 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1668 if (error) 1669 goto done; 1670 if (XFS_IS_CORRUPT(mp, i != 1)) { 1671 error = -EFSCORRUPTED; 1672 goto done; 1673 } 1674 error = xfs_bmbt_update(bma->cur, &LEFT); 1675 if (error) 1676 goto done; 1677 } 1678 break; 1679 1680 case BMAP_LEFT_FILLING: 1681 /* 1682 * Filling in the first part of a previous delayed allocation. 1683 * The left neighbor is not contiguous. 1684 */ 1685 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1686 ifp->if_nextents++; 1687 1688 if (bma->cur == NULL) 1689 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1690 else { 1691 rval = XFS_ILOG_CORE; 1692 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1693 if (error) 1694 goto done; 1695 if (XFS_IS_CORRUPT(mp, i != 0)) { 1696 error = -EFSCORRUPTED; 1697 goto done; 1698 } 1699 error = xfs_btree_insert(bma->cur, &i); 1700 if (error) 1701 goto done; 1702 if (XFS_IS_CORRUPT(mp, i != 1)) { 1703 error = -EFSCORRUPTED; 1704 goto done; 1705 } 1706 } 1707 1708 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1709 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1710 &bma->cur, 1, &tmp_rval, whichfork); 1711 rval |= tmp_rval; 1712 if (error) 1713 goto done; 1714 } 1715 1716 temp = PREV.br_blockcount - new->br_blockcount; 1717 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1718 startblockval(PREV.br_startblock) - 1719 (bma->cur ? bma->cur->bc_ino.allocated : 0)); 1720 1721 PREV.br_startoff = new_endoff; 1722 PREV.br_blockcount = temp; 1723 PREV.br_startblock = nullstartblock(da_new); 1724 xfs_iext_next(ifp, &bma->icur); 1725 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1726 xfs_iext_prev(ifp, &bma->icur); 1727 break; 1728 1729 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1730 /* 1731 * Filling in the last part of a previous delayed allocation. 1732 * The right neighbor is contiguous with the new allocation. 1733 */ 1734 old = RIGHT; 1735 RIGHT.br_startoff = new->br_startoff; 1736 RIGHT.br_startblock = new->br_startblock; 1737 RIGHT.br_blockcount += new->br_blockcount; 1738 1739 if (bma->cur == NULL) 1740 rval = XFS_ILOG_DEXT; 1741 else { 1742 rval = 0; 1743 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1744 if (error) 1745 goto done; 1746 if (XFS_IS_CORRUPT(mp, i != 1)) { 1747 error = -EFSCORRUPTED; 1748 goto done; 1749 } 1750 error = xfs_bmbt_update(bma->cur, &RIGHT); 1751 if (error) 1752 goto done; 1753 } 1754 1755 temp = PREV.br_blockcount - new->br_blockcount; 1756 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1757 startblockval(PREV.br_startblock)); 1758 1759 PREV.br_blockcount = temp; 1760 PREV.br_startblock = nullstartblock(da_new); 1761 1762 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1763 xfs_iext_next(ifp, &bma->icur); 1764 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); 1765 break; 1766 1767 case BMAP_RIGHT_FILLING: 1768 /* 1769 * Filling in the last part of a previous delayed allocation. 1770 * The right neighbor is not contiguous. 1771 */ 1772 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1773 ifp->if_nextents++; 1774 1775 if (bma->cur == NULL) 1776 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1777 else { 1778 rval = XFS_ILOG_CORE; 1779 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1780 if (error) 1781 goto done; 1782 if (XFS_IS_CORRUPT(mp, i != 0)) { 1783 error = -EFSCORRUPTED; 1784 goto done; 1785 } 1786 error = xfs_btree_insert(bma->cur, &i); 1787 if (error) 1788 goto done; 1789 if (XFS_IS_CORRUPT(mp, i != 1)) { 1790 error = -EFSCORRUPTED; 1791 goto done; 1792 } 1793 } 1794 1795 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1796 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1797 &bma->cur, 1, &tmp_rval, whichfork); 1798 rval |= tmp_rval; 1799 if (error) 1800 goto done; 1801 } 1802 1803 temp = PREV.br_blockcount - new->br_blockcount; 1804 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1805 startblockval(PREV.br_startblock) - 1806 (bma->cur ? bma->cur->bc_ino.allocated : 0)); 1807 1808 PREV.br_startblock = nullstartblock(da_new); 1809 PREV.br_blockcount = temp; 1810 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1811 xfs_iext_next(ifp, &bma->icur); 1812 break; 1813 1814 case 0: 1815 /* 1816 * Filling in the middle part of a previous delayed allocation. 1817 * Contiguity is impossible here. 1818 * This case is avoided almost all the time. 1819 * 1820 * We start with a delayed allocation: 1821 * 1822 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 1823 * PREV @ idx 1824 * 1825 * and we are allocating: 1826 * +rrrrrrrrrrrrrrrrr+ 1827 * new 1828 * 1829 * and we set it up for insertion as: 1830 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 1831 * new 1832 * PREV @ idx LEFT RIGHT 1833 * inserted at idx + 1 1834 */ 1835 old = PREV; 1836 1837 /* LEFT is the new middle */ 1838 LEFT = *new; 1839 1840 /* RIGHT is the new right */ 1841 RIGHT.br_state = PREV.br_state; 1842 RIGHT.br_startoff = new_endoff; 1843 RIGHT.br_blockcount = 1844 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1845 RIGHT.br_startblock = 1846 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1847 RIGHT.br_blockcount)); 1848 1849 /* truncate PREV */ 1850 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 1851 PREV.br_startblock = 1852 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1853 PREV.br_blockcount)); 1854 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1855 1856 xfs_iext_next(ifp, &bma->icur); 1857 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); 1858 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); 1859 ifp->if_nextents++; 1860 1861 if (bma->cur == NULL) 1862 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1863 else { 1864 rval = XFS_ILOG_CORE; 1865 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1866 if (error) 1867 goto done; 1868 if (XFS_IS_CORRUPT(mp, i != 0)) { 1869 error = -EFSCORRUPTED; 1870 goto done; 1871 } 1872 error = xfs_btree_insert(bma->cur, &i); 1873 if (error) 1874 goto done; 1875 if (XFS_IS_CORRUPT(mp, i != 1)) { 1876 error = -EFSCORRUPTED; 1877 goto done; 1878 } 1879 } 1880 1881 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1882 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1883 &bma->cur, 1, &tmp_rval, whichfork); 1884 rval |= tmp_rval; 1885 if (error) 1886 goto done; 1887 } 1888 1889 da_new = startblockval(PREV.br_startblock) + 1890 startblockval(RIGHT.br_startblock); 1891 break; 1892 1893 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1894 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1895 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 1896 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1897 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1898 case BMAP_LEFT_CONTIG: 1899 case BMAP_RIGHT_CONTIG: 1900 /* 1901 * These cases are all impossible. 1902 */ 1903 ASSERT(0); 1904 } 1905 1906 /* add reverse mapping unless caller opted out */ 1907 if (!(bma->flags & XFS_BMAPI_NORMAP)) 1908 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); 1909 1910 /* convert to a btree if necessary */ 1911 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1912 int tmp_logflags; /* partial log flag return val */ 1913 1914 ASSERT(bma->cur == NULL); 1915 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1916 &bma->cur, da_old > 0, &tmp_logflags, 1917 whichfork); 1918 bma->logflags |= tmp_logflags; 1919 if (error) 1920 goto done; 1921 } 1922 1923 if (da_new != da_old) 1924 xfs_mod_delalloc(mp, (int64_t)da_new - da_old); 1925 1926 if (bma->cur) { 1927 da_new += bma->cur->bc_ino.allocated; 1928 bma->cur->bc_ino.allocated = 0; 1929 } 1930 1931 /* adjust for changes in reserved delayed indirect blocks */ 1932 if (da_new != da_old) { 1933 ASSERT(state == 0 || da_new < da_old); 1934 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), 1935 false); 1936 } 1937 1938 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 1939 done: 1940 if (whichfork != XFS_COW_FORK) 1941 bma->logflags |= rval; 1942 return error; 1943 #undef LEFT 1944 #undef RIGHT 1945 #undef PREV 1946 } 1947 1948 /* 1949 * Convert an unwritten allocation to a real allocation or vice versa. 1950 */ 1951 int /* error */ 1952 xfs_bmap_add_extent_unwritten_real( 1953 struct xfs_trans *tp, 1954 xfs_inode_t *ip, /* incore inode pointer */ 1955 int whichfork, 1956 struct xfs_iext_cursor *icur, 1957 struct xfs_btree_cur **curp, /* if *curp is null, not a btree */ 1958 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 1959 int *logflagsp) /* inode logging flags */ 1960 { 1961 struct xfs_btree_cur *cur; /* btree cursor */ 1962 int error; /* error return value */ 1963 int i; /* temp state */ 1964 struct xfs_ifork *ifp; /* inode fork pointer */ 1965 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1966 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1967 /* left is 0, right is 1, prev is 2 */ 1968 int rval=0; /* return value (logging flags) */ 1969 uint32_t state = xfs_bmap_fork_to_state(whichfork); 1970 struct xfs_mount *mp = ip->i_mount; 1971 struct xfs_bmbt_irec old; 1972 1973 *logflagsp = 0; 1974 1975 cur = *curp; 1976 ifp = xfs_ifork_ptr(ip, whichfork); 1977 1978 ASSERT(!isnullstartblock(new->br_startblock)); 1979 1980 XFS_STATS_INC(mp, xs_add_exlist); 1981 1982 #define LEFT r[0] 1983 #define RIGHT r[1] 1984 #define PREV r[2] 1985 1986 /* 1987 * Set up a bunch of variables to make the tests simpler. 1988 */ 1989 error = 0; 1990 xfs_iext_get_extent(ifp, icur, &PREV); 1991 ASSERT(new->br_state != PREV.br_state); 1992 new_endoff = new->br_startoff + new->br_blockcount; 1993 ASSERT(PREV.br_startoff <= new->br_startoff); 1994 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1995 1996 /* 1997 * Set flags determining what part of the previous oldext allocation 1998 * extent is being replaced by a newext allocation. 1999 */ 2000 if (PREV.br_startoff == new->br_startoff) 2001 state |= BMAP_LEFT_FILLING; 2002 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2003 state |= BMAP_RIGHT_FILLING; 2004 2005 /* 2006 * Check and set flags if this segment has a left neighbor. 2007 * Don't set contiguous if the combined extent would be too large. 2008 */ 2009 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { 2010 state |= BMAP_LEFT_VALID; 2011 if (isnullstartblock(LEFT.br_startblock)) 2012 state |= BMAP_LEFT_DELAY; 2013 } 2014 2015 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2016 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2017 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2018 LEFT.br_state == new->br_state && 2019 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 2020 state |= BMAP_LEFT_CONTIG; 2021 2022 /* 2023 * Check and set flags if this segment has a right neighbor. 2024 * Don't set contiguous if the combined extent would be too large. 2025 * Also check for all-three-contiguous being too large. 2026 */ 2027 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { 2028 state |= BMAP_RIGHT_VALID; 2029 if (isnullstartblock(RIGHT.br_startblock)) 2030 state |= BMAP_RIGHT_DELAY; 2031 } 2032 2033 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2034 new_endoff == RIGHT.br_startoff && 2035 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2036 new->br_state == RIGHT.br_state && 2037 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 2038 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2039 BMAP_RIGHT_FILLING)) != 2040 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2041 BMAP_RIGHT_FILLING) || 2042 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2043 <= XFS_MAX_BMBT_EXTLEN)) 2044 state |= BMAP_RIGHT_CONTIG; 2045 2046 /* 2047 * Switch out based on the FILLING and CONTIG state bits. 2048 */ 2049 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2050 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2051 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2052 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2053 /* 2054 * Setting all of a previous oldext extent to newext. 2055 * The left and right neighbors are both contiguous with new. 2056 */ 2057 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 2058 2059 xfs_iext_remove(ip, icur, state); 2060 xfs_iext_remove(ip, icur, state); 2061 xfs_iext_prev(ifp, icur); 2062 xfs_iext_update_extent(ip, state, icur, &LEFT); 2063 ifp->if_nextents -= 2; 2064 if (cur == NULL) 2065 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2066 else { 2067 rval = XFS_ILOG_CORE; 2068 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2069 if (error) 2070 goto done; 2071 if (XFS_IS_CORRUPT(mp, i != 1)) { 2072 error = -EFSCORRUPTED; 2073 goto done; 2074 } 2075 if ((error = xfs_btree_delete(cur, &i))) 2076 goto done; 2077 if (XFS_IS_CORRUPT(mp, i != 1)) { 2078 error = -EFSCORRUPTED; 2079 goto done; 2080 } 2081 if ((error = xfs_btree_decrement(cur, 0, &i))) 2082 goto done; 2083 if (XFS_IS_CORRUPT(mp, i != 1)) { 2084 error = -EFSCORRUPTED; 2085 goto done; 2086 } 2087 if ((error = xfs_btree_delete(cur, &i))) 2088 goto done; 2089 if (XFS_IS_CORRUPT(mp, i != 1)) { 2090 error = -EFSCORRUPTED; 2091 goto done; 2092 } 2093 if ((error = xfs_btree_decrement(cur, 0, &i))) 2094 goto done; 2095 if (XFS_IS_CORRUPT(mp, i != 1)) { 2096 error = -EFSCORRUPTED; 2097 goto done; 2098 } 2099 error = xfs_bmbt_update(cur, &LEFT); 2100 if (error) 2101 goto done; 2102 } 2103 break; 2104 2105 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2106 /* 2107 * Setting all of a previous oldext extent to newext. 2108 * The left neighbor is contiguous, the right is not. 2109 */ 2110 LEFT.br_blockcount += PREV.br_blockcount; 2111 2112 xfs_iext_remove(ip, icur, state); 2113 xfs_iext_prev(ifp, icur); 2114 xfs_iext_update_extent(ip, state, icur, &LEFT); 2115 ifp->if_nextents--; 2116 if (cur == NULL) 2117 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2118 else { 2119 rval = XFS_ILOG_CORE; 2120 error = xfs_bmbt_lookup_eq(cur, &PREV, &i); 2121 if (error) 2122 goto done; 2123 if (XFS_IS_CORRUPT(mp, i != 1)) { 2124 error = -EFSCORRUPTED; 2125 goto done; 2126 } 2127 if ((error = xfs_btree_delete(cur, &i))) 2128 goto done; 2129 if (XFS_IS_CORRUPT(mp, i != 1)) { 2130 error = -EFSCORRUPTED; 2131 goto done; 2132 } 2133 if ((error = xfs_btree_decrement(cur, 0, &i))) 2134 goto done; 2135 if (XFS_IS_CORRUPT(mp, i != 1)) { 2136 error = -EFSCORRUPTED; 2137 goto done; 2138 } 2139 error = xfs_bmbt_update(cur, &LEFT); 2140 if (error) 2141 goto done; 2142 } 2143 break; 2144 2145 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2146 /* 2147 * Setting all of a previous oldext extent to newext. 2148 * The right neighbor is contiguous, the left is not. 2149 */ 2150 PREV.br_blockcount += RIGHT.br_blockcount; 2151 PREV.br_state = new->br_state; 2152 2153 xfs_iext_next(ifp, icur); 2154 xfs_iext_remove(ip, icur, state); 2155 xfs_iext_prev(ifp, icur); 2156 xfs_iext_update_extent(ip, state, icur, &PREV); 2157 ifp->if_nextents--; 2158 2159 if (cur == NULL) 2160 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2161 else { 2162 rval = XFS_ILOG_CORE; 2163 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2164 if (error) 2165 goto done; 2166 if (XFS_IS_CORRUPT(mp, i != 1)) { 2167 error = -EFSCORRUPTED; 2168 goto done; 2169 } 2170 if ((error = xfs_btree_delete(cur, &i))) 2171 goto done; 2172 if (XFS_IS_CORRUPT(mp, i != 1)) { 2173 error = -EFSCORRUPTED; 2174 goto done; 2175 } 2176 if ((error = xfs_btree_decrement(cur, 0, &i))) 2177 goto done; 2178 if (XFS_IS_CORRUPT(mp, i != 1)) { 2179 error = -EFSCORRUPTED; 2180 goto done; 2181 } 2182 error = xfs_bmbt_update(cur, &PREV); 2183 if (error) 2184 goto done; 2185 } 2186 break; 2187 2188 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2189 /* 2190 * Setting all of a previous oldext extent to newext. 2191 * Neither the left nor right neighbors are contiguous with 2192 * the new one. 2193 */ 2194 PREV.br_state = new->br_state; 2195 xfs_iext_update_extent(ip, state, icur, &PREV); 2196 2197 if (cur == NULL) 2198 rval = XFS_ILOG_DEXT; 2199 else { 2200 rval = 0; 2201 error = xfs_bmbt_lookup_eq(cur, new, &i); 2202 if (error) 2203 goto done; 2204 if (XFS_IS_CORRUPT(mp, i != 1)) { 2205 error = -EFSCORRUPTED; 2206 goto done; 2207 } 2208 error = xfs_bmbt_update(cur, &PREV); 2209 if (error) 2210 goto done; 2211 } 2212 break; 2213 2214 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2215 /* 2216 * Setting the first part of a previous oldext extent to newext. 2217 * The left neighbor is contiguous. 2218 */ 2219 LEFT.br_blockcount += new->br_blockcount; 2220 2221 old = PREV; 2222 PREV.br_startoff += new->br_blockcount; 2223 PREV.br_startblock += new->br_blockcount; 2224 PREV.br_blockcount -= new->br_blockcount; 2225 2226 xfs_iext_update_extent(ip, state, icur, &PREV); 2227 xfs_iext_prev(ifp, icur); 2228 xfs_iext_update_extent(ip, state, icur, &LEFT); 2229 2230 if (cur == NULL) 2231 rval = XFS_ILOG_DEXT; 2232 else { 2233 rval = 0; 2234 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2235 if (error) 2236 goto done; 2237 if (XFS_IS_CORRUPT(mp, i != 1)) { 2238 error = -EFSCORRUPTED; 2239 goto done; 2240 } 2241 error = xfs_bmbt_update(cur, &PREV); 2242 if (error) 2243 goto done; 2244 error = xfs_btree_decrement(cur, 0, &i); 2245 if (error) 2246 goto done; 2247 error = xfs_bmbt_update(cur, &LEFT); 2248 if (error) 2249 goto done; 2250 } 2251 break; 2252 2253 case BMAP_LEFT_FILLING: 2254 /* 2255 * Setting the first part of a previous oldext extent to newext. 2256 * The left neighbor is not contiguous. 2257 */ 2258 old = PREV; 2259 PREV.br_startoff += new->br_blockcount; 2260 PREV.br_startblock += new->br_blockcount; 2261 PREV.br_blockcount -= new->br_blockcount; 2262 2263 xfs_iext_update_extent(ip, state, icur, &PREV); 2264 xfs_iext_insert(ip, icur, new, state); 2265 ifp->if_nextents++; 2266 2267 if (cur == NULL) 2268 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2269 else { 2270 rval = XFS_ILOG_CORE; 2271 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2272 if (error) 2273 goto done; 2274 if (XFS_IS_CORRUPT(mp, i != 1)) { 2275 error = -EFSCORRUPTED; 2276 goto done; 2277 } 2278 error = xfs_bmbt_update(cur, &PREV); 2279 if (error) 2280 goto done; 2281 cur->bc_rec.b = *new; 2282 if ((error = xfs_btree_insert(cur, &i))) 2283 goto done; 2284 if (XFS_IS_CORRUPT(mp, i != 1)) { 2285 error = -EFSCORRUPTED; 2286 goto done; 2287 } 2288 } 2289 break; 2290 2291 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2292 /* 2293 * Setting the last part of a previous oldext extent to newext. 2294 * The right neighbor is contiguous with the new allocation. 2295 */ 2296 old = PREV; 2297 PREV.br_blockcount -= new->br_blockcount; 2298 2299 RIGHT.br_startoff = new->br_startoff; 2300 RIGHT.br_startblock = new->br_startblock; 2301 RIGHT.br_blockcount += new->br_blockcount; 2302 2303 xfs_iext_update_extent(ip, state, icur, &PREV); 2304 xfs_iext_next(ifp, icur); 2305 xfs_iext_update_extent(ip, state, icur, &RIGHT); 2306 2307 if (cur == NULL) 2308 rval = XFS_ILOG_DEXT; 2309 else { 2310 rval = 0; 2311 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2312 if (error) 2313 goto done; 2314 if (XFS_IS_CORRUPT(mp, i != 1)) { 2315 error = -EFSCORRUPTED; 2316 goto done; 2317 } 2318 error = xfs_bmbt_update(cur, &PREV); 2319 if (error) 2320 goto done; 2321 error = xfs_btree_increment(cur, 0, &i); 2322 if (error) 2323 goto done; 2324 error = xfs_bmbt_update(cur, &RIGHT); 2325 if (error) 2326 goto done; 2327 } 2328 break; 2329 2330 case BMAP_RIGHT_FILLING: 2331 /* 2332 * Setting the last part of a previous oldext extent to newext. 2333 * The right neighbor is not contiguous. 2334 */ 2335 old = PREV; 2336 PREV.br_blockcount -= new->br_blockcount; 2337 2338 xfs_iext_update_extent(ip, state, icur, &PREV); 2339 xfs_iext_next(ifp, icur); 2340 xfs_iext_insert(ip, icur, new, state); 2341 ifp->if_nextents++; 2342 2343 if (cur == NULL) 2344 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2345 else { 2346 rval = XFS_ILOG_CORE; 2347 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2348 if (error) 2349 goto done; 2350 if (XFS_IS_CORRUPT(mp, i != 1)) { 2351 error = -EFSCORRUPTED; 2352 goto done; 2353 } 2354 error = xfs_bmbt_update(cur, &PREV); 2355 if (error) 2356 goto done; 2357 error = xfs_bmbt_lookup_eq(cur, new, &i); 2358 if (error) 2359 goto done; 2360 if (XFS_IS_CORRUPT(mp, i != 0)) { 2361 error = -EFSCORRUPTED; 2362 goto done; 2363 } 2364 if ((error = xfs_btree_insert(cur, &i))) 2365 goto done; 2366 if (XFS_IS_CORRUPT(mp, i != 1)) { 2367 error = -EFSCORRUPTED; 2368 goto done; 2369 } 2370 } 2371 break; 2372 2373 case 0: 2374 /* 2375 * Setting the middle part of a previous oldext extent to 2376 * newext. Contiguity is impossible here. 2377 * One extent becomes three extents. 2378 */ 2379 old = PREV; 2380 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 2381 2382 r[0] = *new; 2383 r[1].br_startoff = new_endoff; 2384 r[1].br_blockcount = 2385 old.br_startoff + old.br_blockcount - new_endoff; 2386 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2387 r[1].br_state = PREV.br_state; 2388 2389 xfs_iext_update_extent(ip, state, icur, &PREV); 2390 xfs_iext_next(ifp, icur); 2391 xfs_iext_insert(ip, icur, &r[1], state); 2392 xfs_iext_insert(ip, icur, &r[0], state); 2393 ifp->if_nextents += 2; 2394 2395 if (cur == NULL) 2396 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2397 else { 2398 rval = XFS_ILOG_CORE; 2399 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2400 if (error) 2401 goto done; 2402 if (XFS_IS_CORRUPT(mp, i != 1)) { 2403 error = -EFSCORRUPTED; 2404 goto done; 2405 } 2406 /* new right extent - oldext */ 2407 error = xfs_bmbt_update(cur, &r[1]); 2408 if (error) 2409 goto done; 2410 /* new left extent - oldext */ 2411 cur->bc_rec.b = PREV; 2412 if ((error = xfs_btree_insert(cur, &i))) 2413 goto done; 2414 if (XFS_IS_CORRUPT(mp, i != 1)) { 2415 error = -EFSCORRUPTED; 2416 goto done; 2417 } 2418 /* 2419 * Reset the cursor to the position of the new extent 2420 * we are about to insert as we can't trust it after 2421 * the previous insert. 2422 */ 2423 error = xfs_bmbt_lookup_eq(cur, new, &i); 2424 if (error) 2425 goto done; 2426 if (XFS_IS_CORRUPT(mp, i != 0)) { 2427 error = -EFSCORRUPTED; 2428 goto done; 2429 } 2430 /* new middle extent - newext */ 2431 if ((error = xfs_btree_insert(cur, &i))) 2432 goto done; 2433 if (XFS_IS_CORRUPT(mp, i != 1)) { 2434 error = -EFSCORRUPTED; 2435 goto done; 2436 } 2437 } 2438 break; 2439 2440 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2441 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2442 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2443 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2444 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2445 case BMAP_LEFT_CONTIG: 2446 case BMAP_RIGHT_CONTIG: 2447 /* 2448 * These cases are all impossible. 2449 */ 2450 ASSERT(0); 2451 } 2452 2453 /* update reverse mappings */ 2454 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); 2455 2456 /* convert to a btree if necessary */ 2457 if (xfs_bmap_needs_btree(ip, whichfork)) { 2458 int tmp_logflags; /* partial log flag return val */ 2459 2460 ASSERT(cur == NULL); 2461 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 2462 &tmp_logflags, whichfork); 2463 *logflagsp |= tmp_logflags; 2464 if (error) 2465 goto done; 2466 } 2467 2468 /* clear out the allocated field, done with it now in any case. */ 2469 if (cur) { 2470 cur->bc_ino.allocated = 0; 2471 *curp = cur; 2472 } 2473 2474 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2475 done: 2476 *logflagsp |= rval; 2477 return error; 2478 #undef LEFT 2479 #undef RIGHT 2480 #undef PREV 2481 } 2482 2483 /* 2484 * Convert a hole to a delayed allocation. 2485 */ 2486 STATIC void 2487 xfs_bmap_add_extent_hole_delay( 2488 xfs_inode_t *ip, /* incore inode pointer */ 2489 int whichfork, 2490 struct xfs_iext_cursor *icur, 2491 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2492 { 2493 struct xfs_ifork *ifp; /* inode fork pointer */ 2494 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2495 xfs_filblks_t newlen=0; /* new indirect size */ 2496 xfs_filblks_t oldlen=0; /* old indirect size */ 2497 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2498 uint32_t state = xfs_bmap_fork_to_state(whichfork); 2499 xfs_filblks_t temp; /* temp for indirect calculations */ 2500 2501 ifp = xfs_ifork_ptr(ip, whichfork); 2502 ASSERT(isnullstartblock(new->br_startblock)); 2503 2504 /* 2505 * Check and set flags if this segment has a left neighbor 2506 */ 2507 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2508 state |= BMAP_LEFT_VALID; 2509 if (isnullstartblock(left.br_startblock)) 2510 state |= BMAP_LEFT_DELAY; 2511 } 2512 2513 /* 2514 * Check and set flags if the current (right) segment exists. 2515 * If it doesn't exist, we're converting the hole at end-of-file. 2516 */ 2517 if (xfs_iext_get_extent(ifp, icur, &right)) { 2518 state |= BMAP_RIGHT_VALID; 2519 if (isnullstartblock(right.br_startblock)) 2520 state |= BMAP_RIGHT_DELAY; 2521 } 2522 2523 /* 2524 * Set contiguity flags on the left and right neighbors. 2525 * Don't let extents get too large, even if the pieces are contiguous. 2526 */ 2527 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2528 left.br_startoff + left.br_blockcount == new->br_startoff && 2529 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 2530 state |= BMAP_LEFT_CONTIG; 2531 2532 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2533 new->br_startoff + new->br_blockcount == right.br_startoff && 2534 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 2535 (!(state & BMAP_LEFT_CONTIG) || 2536 (left.br_blockcount + new->br_blockcount + 2537 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))) 2538 state |= BMAP_RIGHT_CONTIG; 2539 2540 /* 2541 * Switch out based on the contiguity flags. 2542 */ 2543 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2544 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2545 /* 2546 * New allocation is contiguous with delayed allocations 2547 * on the left and on the right. 2548 * Merge all three into a single extent record. 2549 */ 2550 temp = left.br_blockcount + new->br_blockcount + 2551 right.br_blockcount; 2552 2553 oldlen = startblockval(left.br_startblock) + 2554 startblockval(new->br_startblock) + 2555 startblockval(right.br_startblock); 2556 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2557 oldlen); 2558 left.br_startblock = nullstartblock(newlen); 2559 left.br_blockcount = temp; 2560 2561 xfs_iext_remove(ip, icur, state); 2562 xfs_iext_prev(ifp, icur); 2563 xfs_iext_update_extent(ip, state, icur, &left); 2564 break; 2565 2566 case BMAP_LEFT_CONTIG: 2567 /* 2568 * New allocation is contiguous with a delayed allocation 2569 * on the left. 2570 * Merge the new allocation with the left neighbor. 2571 */ 2572 temp = left.br_blockcount + new->br_blockcount; 2573 2574 oldlen = startblockval(left.br_startblock) + 2575 startblockval(new->br_startblock); 2576 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2577 oldlen); 2578 left.br_blockcount = temp; 2579 left.br_startblock = nullstartblock(newlen); 2580 2581 xfs_iext_prev(ifp, icur); 2582 xfs_iext_update_extent(ip, state, icur, &left); 2583 break; 2584 2585 case BMAP_RIGHT_CONTIG: 2586 /* 2587 * New allocation is contiguous with a delayed allocation 2588 * on the right. 2589 * Merge the new allocation with the right neighbor. 2590 */ 2591 temp = new->br_blockcount + right.br_blockcount; 2592 oldlen = startblockval(new->br_startblock) + 2593 startblockval(right.br_startblock); 2594 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2595 oldlen); 2596 right.br_startoff = new->br_startoff; 2597 right.br_startblock = nullstartblock(newlen); 2598 right.br_blockcount = temp; 2599 xfs_iext_update_extent(ip, state, icur, &right); 2600 break; 2601 2602 case 0: 2603 /* 2604 * New allocation is not contiguous with another 2605 * delayed allocation. 2606 * Insert a new entry. 2607 */ 2608 oldlen = newlen = 0; 2609 xfs_iext_insert(ip, icur, new, state); 2610 break; 2611 } 2612 if (oldlen != newlen) { 2613 ASSERT(oldlen > newlen); 2614 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2615 false); 2616 /* 2617 * Nothing to do for disk quota accounting here. 2618 */ 2619 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen); 2620 } 2621 } 2622 2623 /* 2624 * Convert a hole to a real allocation. 2625 */ 2626 STATIC int /* error */ 2627 xfs_bmap_add_extent_hole_real( 2628 struct xfs_trans *tp, 2629 struct xfs_inode *ip, 2630 int whichfork, 2631 struct xfs_iext_cursor *icur, 2632 struct xfs_btree_cur **curp, 2633 struct xfs_bmbt_irec *new, 2634 int *logflagsp, 2635 uint32_t flags) 2636 { 2637 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 2638 struct xfs_mount *mp = ip->i_mount; 2639 struct xfs_btree_cur *cur = *curp; 2640 int error; /* error return value */ 2641 int i; /* temp state */ 2642 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2643 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2644 int rval=0; /* return value (logging flags) */ 2645 uint32_t state = xfs_bmap_fork_to_state(whichfork); 2646 struct xfs_bmbt_irec old; 2647 2648 ASSERT(!isnullstartblock(new->br_startblock)); 2649 ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL)); 2650 2651 XFS_STATS_INC(mp, xs_add_exlist); 2652 2653 /* 2654 * Check and set flags if this segment has a left neighbor. 2655 */ 2656 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2657 state |= BMAP_LEFT_VALID; 2658 if (isnullstartblock(left.br_startblock)) 2659 state |= BMAP_LEFT_DELAY; 2660 } 2661 2662 /* 2663 * Check and set flags if this segment has a current value. 2664 * Not true if we're inserting into the "hole" at eof. 2665 */ 2666 if (xfs_iext_get_extent(ifp, icur, &right)) { 2667 state |= BMAP_RIGHT_VALID; 2668 if (isnullstartblock(right.br_startblock)) 2669 state |= BMAP_RIGHT_DELAY; 2670 } 2671 2672 /* 2673 * We're inserting a real allocation between "left" and "right". 2674 * Set the contiguity flags. Don't let extents get too large. 2675 */ 2676 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2677 left.br_startoff + left.br_blockcount == new->br_startoff && 2678 left.br_startblock + left.br_blockcount == new->br_startblock && 2679 left.br_state == new->br_state && 2680 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 2681 state |= BMAP_LEFT_CONTIG; 2682 2683 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2684 new->br_startoff + new->br_blockcount == right.br_startoff && 2685 new->br_startblock + new->br_blockcount == right.br_startblock && 2686 new->br_state == right.br_state && 2687 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 2688 (!(state & BMAP_LEFT_CONTIG) || 2689 left.br_blockcount + new->br_blockcount + 2690 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)) 2691 state |= BMAP_RIGHT_CONTIG; 2692 2693 error = 0; 2694 /* 2695 * Select which case we're in here, and implement it. 2696 */ 2697 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2698 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2699 /* 2700 * New allocation is contiguous with real allocations on the 2701 * left and on the right. 2702 * Merge all three into a single extent record. 2703 */ 2704 left.br_blockcount += new->br_blockcount + right.br_blockcount; 2705 2706 xfs_iext_remove(ip, icur, state); 2707 xfs_iext_prev(ifp, icur); 2708 xfs_iext_update_extent(ip, state, icur, &left); 2709 ifp->if_nextents--; 2710 2711 if (cur == NULL) { 2712 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2713 } else { 2714 rval = XFS_ILOG_CORE; 2715 error = xfs_bmbt_lookup_eq(cur, &right, &i); 2716 if (error) 2717 goto done; 2718 if (XFS_IS_CORRUPT(mp, i != 1)) { 2719 error = -EFSCORRUPTED; 2720 goto done; 2721 } 2722 error = xfs_btree_delete(cur, &i); 2723 if (error) 2724 goto done; 2725 if (XFS_IS_CORRUPT(mp, i != 1)) { 2726 error = -EFSCORRUPTED; 2727 goto done; 2728 } 2729 error = xfs_btree_decrement(cur, 0, &i); 2730 if (error) 2731 goto done; 2732 if (XFS_IS_CORRUPT(mp, i != 1)) { 2733 error = -EFSCORRUPTED; 2734 goto done; 2735 } 2736 error = xfs_bmbt_update(cur, &left); 2737 if (error) 2738 goto done; 2739 } 2740 break; 2741 2742 case BMAP_LEFT_CONTIG: 2743 /* 2744 * New allocation is contiguous with a real allocation 2745 * on the left. 2746 * Merge the new allocation with the left neighbor. 2747 */ 2748 old = left; 2749 left.br_blockcount += new->br_blockcount; 2750 2751 xfs_iext_prev(ifp, icur); 2752 xfs_iext_update_extent(ip, state, icur, &left); 2753 2754 if (cur == NULL) { 2755 rval = xfs_ilog_fext(whichfork); 2756 } else { 2757 rval = 0; 2758 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2759 if (error) 2760 goto done; 2761 if (XFS_IS_CORRUPT(mp, i != 1)) { 2762 error = -EFSCORRUPTED; 2763 goto done; 2764 } 2765 error = xfs_bmbt_update(cur, &left); 2766 if (error) 2767 goto done; 2768 } 2769 break; 2770 2771 case BMAP_RIGHT_CONTIG: 2772 /* 2773 * New allocation is contiguous with a real allocation 2774 * on the right. 2775 * Merge the new allocation with the right neighbor. 2776 */ 2777 old = right; 2778 2779 right.br_startoff = new->br_startoff; 2780 right.br_startblock = new->br_startblock; 2781 right.br_blockcount += new->br_blockcount; 2782 xfs_iext_update_extent(ip, state, icur, &right); 2783 2784 if (cur == NULL) { 2785 rval = xfs_ilog_fext(whichfork); 2786 } else { 2787 rval = 0; 2788 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2789 if (error) 2790 goto done; 2791 if (XFS_IS_CORRUPT(mp, i != 1)) { 2792 error = -EFSCORRUPTED; 2793 goto done; 2794 } 2795 error = xfs_bmbt_update(cur, &right); 2796 if (error) 2797 goto done; 2798 } 2799 break; 2800 2801 case 0: 2802 /* 2803 * New allocation is not contiguous with another 2804 * real allocation. 2805 * Insert a new entry. 2806 */ 2807 xfs_iext_insert(ip, icur, new, state); 2808 ifp->if_nextents++; 2809 2810 if (cur == NULL) { 2811 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2812 } else { 2813 rval = XFS_ILOG_CORE; 2814 error = xfs_bmbt_lookup_eq(cur, new, &i); 2815 if (error) 2816 goto done; 2817 if (XFS_IS_CORRUPT(mp, i != 0)) { 2818 error = -EFSCORRUPTED; 2819 goto done; 2820 } 2821 error = xfs_btree_insert(cur, &i); 2822 if (error) 2823 goto done; 2824 if (XFS_IS_CORRUPT(mp, i != 1)) { 2825 error = -EFSCORRUPTED; 2826 goto done; 2827 } 2828 } 2829 break; 2830 } 2831 2832 /* add reverse mapping unless caller opted out */ 2833 if (!(flags & XFS_BMAPI_NORMAP)) 2834 xfs_rmap_map_extent(tp, ip, whichfork, new); 2835 2836 /* convert to a btree if necessary */ 2837 if (xfs_bmap_needs_btree(ip, whichfork)) { 2838 int tmp_logflags; /* partial log flag return val */ 2839 2840 ASSERT(cur == NULL); 2841 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, 2842 &tmp_logflags, whichfork); 2843 *logflagsp |= tmp_logflags; 2844 cur = *curp; 2845 if (error) 2846 goto done; 2847 } 2848 2849 /* clear out the allocated field, done with it now in any case. */ 2850 if (cur) 2851 cur->bc_ino.allocated = 0; 2852 2853 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 2854 done: 2855 *logflagsp |= rval; 2856 return error; 2857 } 2858 2859 /* 2860 * Functions used in the extent read, allocate and remove paths 2861 */ 2862 2863 /* 2864 * Adjust the size of the new extent based on i_extsize and rt extsize. 2865 */ 2866 int 2867 xfs_bmap_extsize_align( 2868 xfs_mount_t *mp, 2869 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 2870 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 2871 xfs_extlen_t extsz, /* align to this extent size */ 2872 int rt, /* is this a realtime inode? */ 2873 int eof, /* is extent at end-of-file? */ 2874 int delay, /* creating delalloc extent? */ 2875 int convert, /* overwriting unwritten extent? */ 2876 xfs_fileoff_t *offp, /* in/out: aligned offset */ 2877 xfs_extlen_t *lenp) /* in/out: aligned length */ 2878 { 2879 xfs_fileoff_t orig_off; /* original offset */ 2880 xfs_extlen_t orig_alen; /* original length */ 2881 xfs_fileoff_t orig_end; /* original off+len */ 2882 xfs_fileoff_t nexto; /* next file offset */ 2883 xfs_fileoff_t prevo; /* previous file offset */ 2884 xfs_fileoff_t align_off; /* temp for offset */ 2885 xfs_extlen_t align_alen; /* temp for length */ 2886 xfs_extlen_t temp; /* temp for calculations */ 2887 2888 if (convert) 2889 return 0; 2890 2891 orig_off = align_off = *offp; 2892 orig_alen = align_alen = *lenp; 2893 orig_end = orig_off + orig_alen; 2894 2895 /* 2896 * If this request overlaps an existing extent, then don't 2897 * attempt to perform any additional alignment. 2898 */ 2899 if (!delay && !eof && 2900 (orig_off >= gotp->br_startoff) && 2901 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 2902 return 0; 2903 } 2904 2905 /* 2906 * If the file offset is unaligned vs. the extent size 2907 * we need to align it. This will be possible unless 2908 * the file was previously written with a kernel that didn't 2909 * perform this alignment, or if a truncate shot us in the 2910 * foot. 2911 */ 2912 div_u64_rem(orig_off, extsz, &temp); 2913 if (temp) { 2914 align_alen += temp; 2915 align_off -= temp; 2916 } 2917 2918 /* Same adjustment for the end of the requested area. */ 2919 temp = (align_alen % extsz); 2920 if (temp) 2921 align_alen += extsz - temp; 2922 2923 /* 2924 * For large extent hint sizes, the aligned extent might be larger than 2925 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so 2926 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer 2927 * allocation loops handle short allocation just fine, so it is safe to 2928 * do this. We only want to do it when we are forced to, though, because 2929 * it means more allocation operations are required. 2930 */ 2931 while (align_alen > XFS_MAX_BMBT_EXTLEN) 2932 align_alen -= extsz; 2933 ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN); 2934 2935 /* 2936 * If the previous block overlaps with this proposed allocation 2937 * then move the start forward without adjusting the length. 2938 */ 2939 if (prevp->br_startoff != NULLFILEOFF) { 2940 if (prevp->br_startblock == HOLESTARTBLOCK) 2941 prevo = prevp->br_startoff; 2942 else 2943 prevo = prevp->br_startoff + prevp->br_blockcount; 2944 } else 2945 prevo = 0; 2946 if (align_off != orig_off && align_off < prevo) 2947 align_off = prevo; 2948 /* 2949 * If the next block overlaps with this proposed allocation 2950 * then move the start back without adjusting the length, 2951 * but not before offset 0. 2952 * This may of course make the start overlap previous block, 2953 * and if we hit the offset 0 limit then the next block 2954 * can still overlap too. 2955 */ 2956 if (!eof && gotp->br_startoff != NULLFILEOFF) { 2957 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 2958 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 2959 nexto = gotp->br_startoff + gotp->br_blockcount; 2960 else 2961 nexto = gotp->br_startoff; 2962 } else 2963 nexto = NULLFILEOFF; 2964 if (!eof && 2965 align_off + align_alen != orig_end && 2966 align_off + align_alen > nexto) 2967 align_off = nexto > align_alen ? nexto - align_alen : 0; 2968 /* 2969 * If we're now overlapping the next or previous extent that 2970 * means we can't fit an extsz piece in this hole. Just move 2971 * the start forward to the first valid spot and set 2972 * the length so we hit the end. 2973 */ 2974 if (align_off != orig_off && align_off < prevo) 2975 align_off = prevo; 2976 if (align_off + align_alen != orig_end && 2977 align_off + align_alen > nexto && 2978 nexto != NULLFILEOFF) { 2979 ASSERT(nexto > prevo); 2980 align_alen = nexto - align_off; 2981 } 2982 2983 /* 2984 * If realtime, and the result isn't a multiple of the realtime 2985 * extent size we need to remove blocks until it is. 2986 */ 2987 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 2988 /* 2989 * We're not covering the original request, or 2990 * we won't be able to once we fix the length. 2991 */ 2992 if (orig_off < align_off || 2993 orig_end > align_off + align_alen || 2994 align_alen - temp < orig_alen) 2995 return -EINVAL; 2996 /* 2997 * Try to fix it by moving the start up. 2998 */ 2999 if (align_off + temp <= orig_off) { 3000 align_alen -= temp; 3001 align_off += temp; 3002 } 3003 /* 3004 * Try to fix it by moving the end in. 3005 */ 3006 else if (align_off + align_alen - temp >= orig_end) 3007 align_alen -= temp; 3008 /* 3009 * Set the start to the minimum then trim the length. 3010 */ 3011 else { 3012 align_alen -= orig_off - align_off; 3013 align_off = orig_off; 3014 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3015 } 3016 /* 3017 * Result doesn't cover the request, fail it. 3018 */ 3019 if (orig_off < align_off || orig_end > align_off + align_alen) 3020 return -EINVAL; 3021 } else { 3022 ASSERT(orig_off >= align_off); 3023 /* see XFS_BMBT_MAX_EXTLEN handling above */ 3024 ASSERT(orig_end <= align_off + align_alen || 3025 align_alen + extsz > XFS_MAX_BMBT_EXTLEN); 3026 } 3027 3028 #ifdef DEBUG 3029 if (!eof && gotp->br_startoff != NULLFILEOFF) 3030 ASSERT(align_off + align_alen <= gotp->br_startoff); 3031 if (prevp->br_startoff != NULLFILEOFF) 3032 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3033 #endif 3034 3035 *lenp = align_alen; 3036 *offp = align_off; 3037 return 0; 3038 } 3039 3040 #define XFS_ALLOC_GAP_UNITS 4 3041 3042 void 3043 xfs_bmap_adjacent( 3044 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3045 { 3046 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3047 xfs_mount_t *mp; /* mount point structure */ 3048 int rt; /* true if inode is realtime */ 3049 3050 #define ISVALID(x,y) \ 3051 (rt ? \ 3052 (x) < mp->m_sb.sb_rblocks : \ 3053 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3054 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3055 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3056 3057 mp = ap->ip->i_mount; 3058 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3059 (ap->datatype & XFS_ALLOC_USERDATA); 3060 /* 3061 * If allocating at eof, and there's a previous real block, 3062 * try to use its last block as our starting point. 3063 */ 3064 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3065 !isnullstartblock(ap->prev.br_startblock) && 3066 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3067 ap->prev.br_startblock)) { 3068 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3069 /* 3070 * Adjust for the gap between prevp and us. 3071 */ 3072 adjust = ap->offset - 3073 (ap->prev.br_startoff + ap->prev.br_blockcount); 3074 if (adjust && 3075 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3076 ap->blkno += adjust; 3077 } 3078 /* 3079 * If not at eof, then compare the two neighbor blocks. 3080 * Figure out whether either one gives us a good starting point, 3081 * and pick the better one. 3082 */ 3083 else if (!ap->eof) { 3084 xfs_fsblock_t gotbno; /* right side block number */ 3085 xfs_fsblock_t gotdiff=0; /* right side difference */ 3086 xfs_fsblock_t prevbno; /* left side block number */ 3087 xfs_fsblock_t prevdiff=0; /* left side difference */ 3088 3089 /* 3090 * If there's a previous (left) block, select a requested 3091 * start block based on it. 3092 */ 3093 if (ap->prev.br_startoff != NULLFILEOFF && 3094 !isnullstartblock(ap->prev.br_startblock) && 3095 (prevbno = ap->prev.br_startblock + 3096 ap->prev.br_blockcount) && 3097 ISVALID(prevbno, ap->prev.br_startblock)) { 3098 /* 3099 * Calculate gap to end of previous block. 3100 */ 3101 adjust = prevdiff = ap->offset - 3102 (ap->prev.br_startoff + 3103 ap->prev.br_blockcount); 3104 /* 3105 * Figure the startblock based on the previous block's 3106 * end and the gap size. 3107 * Heuristic! 3108 * If the gap is large relative to the piece we're 3109 * allocating, or using it gives us an invalid block 3110 * number, then just use the end of the previous block. 3111 */ 3112 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3113 ISVALID(prevbno + prevdiff, 3114 ap->prev.br_startblock)) 3115 prevbno += adjust; 3116 else 3117 prevdiff += adjust; 3118 } 3119 /* 3120 * No previous block or can't follow it, just default. 3121 */ 3122 else 3123 prevbno = NULLFSBLOCK; 3124 /* 3125 * If there's a following (right) block, select a requested 3126 * start block based on it. 3127 */ 3128 if (!isnullstartblock(ap->got.br_startblock)) { 3129 /* 3130 * Calculate gap to start of next block. 3131 */ 3132 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3133 /* 3134 * Figure the startblock based on the next block's 3135 * start and the gap size. 3136 */ 3137 gotbno = ap->got.br_startblock; 3138 /* 3139 * Heuristic! 3140 * If the gap is large relative to the piece we're 3141 * allocating, or using it gives us an invalid block 3142 * number, then just use the start of the next block 3143 * offset by our length. 3144 */ 3145 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3146 ISVALID(gotbno - gotdiff, gotbno)) 3147 gotbno -= adjust; 3148 else if (ISVALID(gotbno - ap->length, gotbno)) { 3149 gotbno -= ap->length; 3150 gotdiff += adjust - ap->length; 3151 } else 3152 gotdiff += adjust; 3153 } 3154 /* 3155 * No next block, just default. 3156 */ 3157 else 3158 gotbno = NULLFSBLOCK; 3159 /* 3160 * If both valid, pick the better one, else the only good 3161 * one, else ap->blkno is already set (to 0 or the inode block). 3162 */ 3163 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3164 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3165 else if (prevbno != NULLFSBLOCK) 3166 ap->blkno = prevbno; 3167 else if (gotbno != NULLFSBLOCK) 3168 ap->blkno = gotbno; 3169 } 3170 #undef ISVALID 3171 } 3172 3173 int 3174 xfs_bmap_longest_free_extent( 3175 struct xfs_perag *pag, 3176 struct xfs_trans *tp, 3177 xfs_extlen_t *blen) 3178 { 3179 xfs_extlen_t longest; 3180 int error = 0; 3181 3182 if (!xfs_perag_initialised_agf(pag)) { 3183 error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK, 3184 NULL); 3185 if (error) 3186 return error; 3187 } 3188 3189 longest = xfs_alloc_longest_free_extent(pag, 3190 xfs_alloc_min_freelist(pag->pag_mount, pag), 3191 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3192 if (*blen < longest) 3193 *blen = longest; 3194 3195 return 0; 3196 } 3197 3198 static xfs_extlen_t 3199 xfs_bmap_select_minlen( 3200 struct xfs_bmalloca *ap, 3201 struct xfs_alloc_arg *args, 3202 xfs_extlen_t blen) 3203 { 3204 3205 /* 3206 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is 3207 * possible that there is enough contiguous free space for this request. 3208 */ 3209 if (blen < ap->minlen) 3210 return ap->minlen; 3211 3212 /* 3213 * If the best seen length is less than the request length, 3214 * use the best as the minimum, otherwise we've got the maxlen we 3215 * were asked for. 3216 */ 3217 if (blen < args->maxlen) 3218 return blen; 3219 return args->maxlen; 3220 } 3221 3222 static int 3223 xfs_bmap_btalloc_select_lengths( 3224 struct xfs_bmalloca *ap, 3225 struct xfs_alloc_arg *args, 3226 xfs_extlen_t *blen) 3227 { 3228 struct xfs_mount *mp = args->mp; 3229 struct xfs_perag *pag; 3230 xfs_agnumber_t agno, startag; 3231 int error = 0; 3232 3233 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3234 args->total = ap->minlen; 3235 args->minlen = ap->minlen; 3236 return 0; 3237 } 3238 3239 args->total = ap->total; 3240 startag = XFS_FSB_TO_AGNO(mp, ap->blkno); 3241 if (startag == NULLAGNUMBER) 3242 startag = 0; 3243 3244 *blen = 0; 3245 for_each_perag_wrap(mp, startag, agno, pag) { 3246 error = xfs_bmap_longest_free_extent(pag, args->tp, blen); 3247 if (error && error != -EAGAIN) 3248 break; 3249 error = 0; 3250 if (*blen >= args->maxlen) 3251 break; 3252 } 3253 if (pag) 3254 xfs_perag_rele(pag); 3255 3256 args->minlen = xfs_bmap_select_minlen(ap, args, *blen); 3257 return error; 3258 } 3259 3260 /* Update all inode and quota accounting for the allocation we just did. */ 3261 static void 3262 xfs_bmap_btalloc_accounting( 3263 struct xfs_bmalloca *ap, 3264 struct xfs_alloc_arg *args) 3265 { 3266 if (ap->flags & XFS_BMAPI_COWFORK) { 3267 /* 3268 * COW fork blocks are in-core only and thus are treated as 3269 * in-core quota reservation (like delalloc blocks) even when 3270 * converted to real blocks. The quota reservation is not 3271 * accounted to disk until blocks are remapped to the data 3272 * fork. So if these blocks were previously delalloc, we 3273 * already have quota reservation and there's nothing to do 3274 * yet. 3275 */ 3276 if (ap->wasdel) { 3277 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 3278 return; 3279 } 3280 3281 /* 3282 * Otherwise, we've allocated blocks in a hole. The transaction 3283 * has acquired in-core quota reservation for this extent. 3284 * Rather than account these as real blocks, however, we reduce 3285 * the transaction quota reservation based on the allocation. 3286 * This essentially transfers the transaction quota reservation 3287 * to that of a delalloc extent. 3288 */ 3289 ap->ip->i_delayed_blks += args->len; 3290 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS, 3291 -(long)args->len); 3292 return; 3293 } 3294 3295 /* data/attr fork only */ 3296 ap->ip->i_nblocks += args->len; 3297 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3298 if (ap->wasdel) { 3299 ap->ip->i_delayed_blks -= args->len; 3300 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 3301 } 3302 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3303 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, 3304 args->len); 3305 } 3306 3307 static int 3308 xfs_bmap_compute_alignments( 3309 struct xfs_bmalloca *ap, 3310 struct xfs_alloc_arg *args) 3311 { 3312 struct xfs_mount *mp = args->mp; 3313 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3314 int stripe_align = 0; 3315 3316 /* stripe alignment for allocation is determined by mount parameters */ 3317 if (mp->m_swidth && xfs_has_swalloc(mp)) 3318 stripe_align = mp->m_swidth; 3319 else if (mp->m_dalign) 3320 stripe_align = mp->m_dalign; 3321 3322 if (ap->flags & XFS_BMAPI_COWFORK) 3323 align = xfs_get_cowextsz_hint(ap->ip); 3324 else if (ap->datatype & XFS_ALLOC_USERDATA) 3325 align = xfs_get_extsz_hint(ap->ip); 3326 if (align) { 3327 if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0, 3328 ap->eof, 0, ap->conv, &ap->offset, 3329 &ap->length)) 3330 ASSERT(0); 3331 ASSERT(ap->length); 3332 } 3333 3334 /* apply extent size hints if obtained earlier */ 3335 if (align) { 3336 args->prod = align; 3337 div_u64_rem(ap->offset, args->prod, &args->mod); 3338 if (args->mod) 3339 args->mod = args->prod - args->mod; 3340 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3341 args->prod = 1; 3342 args->mod = 0; 3343 } else { 3344 args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3345 div_u64_rem(ap->offset, args->prod, &args->mod); 3346 if (args->mod) 3347 args->mod = args->prod - args->mod; 3348 } 3349 3350 return stripe_align; 3351 } 3352 3353 static void 3354 xfs_bmap_process_allocated_extent( 3355 struct xfs_bmalloca *ap, 3356 struct xfs_alloc_arg *args, 3357 xfs_fileoff_t orig_offset, 3358 xfs_extlen_t orig_length) 3359 { 3360 ap->blkno = args->fsbno; 3361 ap->length = args->len; 3362 /* 3363 * If the extent size hint is active, we tried to round the 3364 * caller's allocation request offset down to extsz and the 3365 * length up to another extsz boundary. If we found a free 3366 * extent we mapped it in starting at this new offset. If the 3367 * newly mapped space isn't long enough to cover any of the 3368 * range of offsets that was originally requested, move the 3369 * mapping up so that we can fill as much of the caller's 3370 * original request as possible. Free space is apparently 3371 * very fragmented so we're unlikely to be able to satisfy the 3372 * hints anyway. 3373 */ 3374 if (ap->length <= orig_length) 3375 ap->offset = orig_offset; 3376 else if (ap->offset + ap->length < orig_offset + orig_length) 3377 ap->offset = orig_offset + orig_length - ap->length; 3378 xfs_bmap_btalloc_accounting(ap, args); 3379 } 3380 3381 #ifdef DEBUG 3382 static int 3383 xfs_bmap_exact_minlen_extent_alloc( 3384 struct xfs_bmalloca *ap) 3385 { 3386 struct xfs_mount *mp = ap->ip->i_mount; 3387 struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp }; 3388 xfs_fileoff_t orig_offset; 3389 xfs_extlen_t orig_length; 3390 int error; 3391 3392 ASSERT(ap->length); 3393 3394 if (ap->minlen != 1) { 3395 ap->blkno = NULLFSBLOCK; 3396 ap->length = 0; 3397 return 0; 3398 } 3399 3400 orig_offset = ap->offset; 3401 orig_length = ap->length; 3402 3403 args.alloc_minlen_only = 1; 3404 3405 xfs_bmap_compute_alignments(ap, &args); 3406 3407 /* 3408 * Unlike the longest extent available in an AG, we don't track 3409 * the length of an AG's shortest extent. 3410 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and 3411 * hence we can afford to start traversing from the 0th AG since 3412 * we need not be concerned about a drop in performance in 3413 * "debug only" code paths. 3414 */ 3415 ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0); 3416 3417 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 3418 args.minlen = args.maxlen = ap->minlen; 3419 args.total = ap->total; 3420 3421 args.alignment = 1; 3422 args.minalignslop = 0; 3423 3424 args.minleft = ap->minleft; 3425 args.wasdel = ap->wasdel; 3426 args.resv = XFS_AG_RESV_NONE; 3427 args.datatype = ap->datatype; 3428 3429 error = xfs_alloc_vextent_first_ag(&args, ap->blkno); 3430 if (error) 3431 return error; 3432 3433 if (args.fsbno != NULLFSBLOCK) { 3434 xfs_bmap_process_allocated_extent(ap, &args, orig_offset, 3435 orig_length); 3436 } else { 3437 ap->blkno = NULLFSBLOCK; 3438 ap->length = 0; 3439 } 3440 3441 return 0; 3442 } 3443 #else 3444 3445 #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED) 3446 3447 #endif 3448 3449 /* 3450 * If we are not low on available data blocks and we are allocating at 3451 * EOF, optimise allocation for contiguous file extension and/or stripe 3452 * alignment of the new extent. 3453 * 3454 * NOTE: ap->aeof is only set if the allocation length is >= the 3455 * stripe unit and the allocation offset is at the end of file. 3456 */ 3457 static int 3458 xfs_bmap_btalloc_at_eof( 3459 struct xfs_bmalloca *ap, 3460 struct xfs_alloc_arg *args, 3461 xfs_extlen_t blen, 3462 int stripe_align, 3463 bool ag_only) 3464 { 3465 struct xfs_mount *mp = args->mp; 3466 struct xfs_perag *caller_pag = args->pag; 3467 int error; 3468 3469 /* 3470 * If there are already extents in the file, try an exact EOF block 3471 * allocation to extend the file as a contiguous extent. If that fails, 3472 * or it's the first allocation in a file, just try for a stripe aligned 3473 * allocation. 3474 */ 3475 if (ap->offset) { 3476 xfs_extlen_t nextminlen = 0; 3477 3478 /* 3479 * Compute the minlen+alignment for the next case. Set slop so 3480 * that the value of minlen+alignment+slop doesn't go up between 3481 * the calls. 3482 */ 3483 args->alignment = 1; 3484 if (blen > stripe_align && blen <= args->maxlen) 3485 nextminlen = blen - stripe_align; 3486 else 3487 nextminlen = args->minlen; 3488 if (nextminlen + stripe_align > args->minlen + 1) 3489 args->minalignslop = nextminlen + stripe_align - 3490 args->minlen - 1; 3491 else 3492 args->minalignslop = 0; 3493 3494 if (!caller_pag) 3495 args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno)); 3496 error = xfs_alloc_vextent_exact_bno(args, ap->blkno); 3497 if (!caller_pag) { 3498 xfs_perag_put(args->pag); 3499 args->pag = NULL; 3500 } 3501 if (error) 3502 return error; 3503 3504 if (args->fsbno != NULLFSBLOCK) 3505 return 0; 3506 /* 3507 * Exact allocation failed. Reset to try an aligned allocation 3508 * according to the original allocation specification. 3509 */ 3510 args->alignment = stripe_align; 3511 args->minlen = nextminlen; 3512 args->minalignslop = 0; 3513 } else { 3514 /* 3515 * Adjust minlen to try and preserve alignment if we 3516 * can't guarantee an aligned maxlen extent. 3517 */ 3518 args->alignment = stripe_align; 3519 if (blen > args->alignment && 3520 blen <= args->maxlen + args->alignment) 3521 args->minlen = blen - args->alignment; 3522 args->minalignslop = 0; 3523 } 3524 3525 if (ag_only) { 3526 error = xfs_alloc_vextent_near_bno(args, ap->blkno); 3527 } else { 3528 args->pag = NULL; 3529 error = xfs_alloc_vextent_start_ag(args, ap->blkno); 3530 ASSERT(args->pag == NULL); 3531 args->pag = caller_pag; 3532 } 3533 if (error) 3534 return error; 3535 3536 if (args->fsbno != NULLFSBLOCK) 3537 return 0; 3538 3539 /* 3540 * Allocation failed, so turn return the allocation args to their 3541 * original non-aligned state so the caller can proceed on allocation 3542 * failure as if this function was never called. 3543 */ 3544 args->alignment = 1; 3545 return 0; 3546 } 3547 3548 /* 3549 * We have failed multiple allocation attempts so now are in a low space 3550 * allocation situation. Try a locality first full filesystem minimum length 3551 * allocation whilst still maintaining necessary total block reservation 3552 * requirements. 3553 * 3554 * If that fails, we are now critically low on space, so perform a last resort 3555 * allocation attempt: no reserve, no locality, blocking, minimum length, full 3556 * filesystem free space scan. We also indicate to future allocations in this 3557 * transaction that we are critically low on space so they don't waste time on 3558 * allocation modes that are unlikely to succeed. 3559 */ 3560 int 3561 xfs_bmap_btalloc_low_space( 3562 struct xfs_bmalloca *ap, 3563 struct xfs_alloc_arg *args) 3564 { 3565 int error; 3566 3567 if (args->minlen > ap->minlen) { 3568 args->minlen = ap->minlen; 3569 error = xfs_alloc_vextent_start_ag(args, ap->blkno); 3570 if (error || args->fsbno != NULLFSBLOCK) 3571 return error; 3572 } 3573 3574 /* Last ditch attempt before failure is declared. */ 3575 args->total = ap->minlen; 3576 error = xfs_alloc_vextent_first_ag(args, 0); 3577 if (error) 3578 return error; 3579 ap->tp->t_flags |= XFS_TRANS_LOWMODE; 3580 return 0; 3581 } 3582 3583 static int 3584 xfs_bmap_btalloc_filestreams( 3585 struct xfs_bmalloca *ap, 3586 struct xfs_alloc_arg *args, 3587 int stripe_align) 3588 { 3589 xfs_extlen_t blen = 0; 3590 int error = 0; 3591 3592 3593 error = xfs_filestream_select_ag(ap, args, &blen); 3594 if (error) 3595 return error; 3596 ASSERT(args->pag); 3597 3598 /* 3599 * If we are in low space mode, then optimal allocation will fail so 3600 * prepare for minimal allocation and jump to the low space algorithm 3601 * immediately. 3602 */ 3603 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3604 args->minlen = ap->minlen; 3605 ASSERT(args->fsbno == NULLFSBLOCK); 3606 goto out_low_space; 3607 } 3608 3609 args->minlen = xfs_bmap_select_minlen(ap, args, blen); 3610 if (ap->aeof) 3611 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align, 3612 true); 3613 3614 if (!error && args->fsbno == NULLFSBLOCK) 3615 error = xfs_alloc_vextent_near_bno(args, ap->blkno); 3616 3617 out_low_space: 3618 /* 3619 * We are now done with the perag reference for the filestreams 3620 * association provided by xfs_filestream_select_ag(). Release it now as 3621 * we've either succeeded, had a fatal error or we are out of space and 3622 * need to do a full filesystem scan for free space which will take it's 3623 * own references. 3624 */ 3625 xfs_perag_rele(args->pag); 3626 args->pag = NULL; 3627 if (error || args->fsbno != NULLFSBLOCK) 3628 return error; 3629 3630 return xfs_bmap_btalloc_low_space(ap, args); 3631 } 3632 3633 static int 3634 xfs_bmap_btalloc_best_length( 3635 struct xfs_bmalloca *ap, 3636 struct xfs_alloc_arg *args, 3637 int stripe_align) 3638 { 3639 xfs_extlen_t blen = 0; 3640 int error; 3641 3642 ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino); 3643 xfs_bmap_adjacent(ap); 3644 3645 /* 3646 * Search for an allocation group with a single extent large enough for 3647 * the request. If one isn't found, then adjust the minimum allocation 3648 * size to the largest space found. 3649 */ 3650 error = xfs_bmap_btalloc_select_lengths(ap, args, &blen); 3651 if (error) 3652 return error; 3653 3654 /* 3655 * Don't attempt optimal EOF allocation if previous allocations barely 3656 * succeeded due to being near ENOSPC. It is highly unlikely we'll get 3657 * optimal or even aligned allocations in this case, so don't waste time 3658 * trying. 3659 */ 3660 if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) { 3661 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align, 3662 false); 3663 if (error || args->fsbno != NULLFSBLOCK) 3664 return error; 3665 } 3666 3667 error = xfs_alloc_vextent_start_ag(args, ap->blkno); 3668 if (error || args->fsbno != NULLFSBLOCK) 3669 return error; 3670 3671 return xfs_bmap_btalloc_low_space(ap, args); 3672 } 3673 3674 static int 3675 xfs_bmap_btalloc( 3676 struct xfs_bmalloca *ap) 3677 { 3678 struct xfs_mount *mp = ap->ip->i_mount; 3679 struct xfs_alloc_arg args = { 3680 .tp = ap->tp, 3681 .mp = mp, 3682 .fsbno = NULLFSBLOCK, 3683 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE, 3684 .minleft = ap->minleft, 3685 .wasdel = ap->wasdel, 3686 .resv = XFS_AG_RESV_NONE, 3687 .datatype = ap->datatype, 3688 .alignment = 1, 3689 .minalignslop = 0, 3690 }; 3691 xfs_fileoff_t orig_offset; 3692 xfs_extlen_t orig_length; 3693 int error; 3694 int stripe_align; 3695 3696 ASSERT(ap->length); 3697 orig_offset = ap->offset; 3698 orig_length = ap->length; 3699 3700 stripe_align = xfs_bmap_compute_alignments(ap, &args); 3701 3702 /* Trim the allocation back to the maximum an AG can fit. */ 3703 args.maxlen = min(ap->length, mp->m_ag_max_usable); 3704 3705 if ((ap->datatype & XFS_ALLOC_USERDATA) && 3706 xfs_inode_is_filestream(ap->ip)) 3707 error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align); 3708 else 3709 error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align); 3710 if (error) 3711 return error; 3712 3713 if (args.fsbno != NULLFSBLOCK) { 3714 xfs_bmap_process_allocated_extent(ap, &args, orig_offset, 3715 orig_length); 3716 } else { 3717 ap->blkno = NULLFSBLOCK; 3718 ap->length = 0; 3719 } 3720 return 0; 3721 } 3722 3723 /* Trim extent to fit a logical block range. */ 3724 void 3725 xfs_trim_extent( 3726 struct xfs_bmbt_irec *irec, 3727 xfs_fileoff_t bno, 3728 xfs_filblks_t len) 3729 { 3730 xfs_fileoff_t distance; 3731 xfs_fileoff_t end = bno + len; 3732 3733 if (irec->br_startoff + irec->br_blockcount <= bno || 3734 irec->br_startoff >= end) { 3735 irec->br_blockcount = 0; 3736 return; 3737 } 3738 3739 if (irec->br_startoff < bno) { 3740 distance = bno - irec->br_startoff; 3741 if (isnullstartblock(irec->br_startblock)) 3742 irec->br_startblock = DELAYSTARTBLOCK; 3743 if (irec->br_startblock != DELAYSTARTBLOCK && 3744 irec->br_startblock != HOLESTARTBLOCK) 3745 irec->br_startblock += distance; 3746 irec->br_startoff += distance; 3747 irec->br_blockcount -= distance; 3748 } 3749 3750 if (end < irec->br_startoff + irec->br_blockcount) { 3751 distance = irec->br_startoff + irec->br_blockcount - end; 3752 irec->br_blockcount -= distance; 3753 } 3754 } 3755 3756 /* 3757 * Trim the returned map to the required bounds 3758 */ 3759 STATIC void 3760 xfs_bmapi_trim_map( 3761 struct xfs_bmbt_irec *mval, 3762 struct xfs_bmbt_irec *got, 3763 xfs_fileoff_t *bno, 3764 xfs_filblks_t len, 3765 xfs_fileoff_t obno, 3766 xfs_fileoff_t end, 3767 int n, 3768 uint32_t flags) 3769 { 3770 if ((flags & XFS_BMAPI_ENTIRE) || 3771 got->br_startoff + got->br_blockcount <= obno) { 3772 *mval = *got; 3773 if (isnullstartblock(got->br_startblock)) 3774 mval->br_startblock = DELAYSTARTBLOCK; 3775 return; 3776 } 3777 3778 if (obno > *bno) 3779 *bno = obno; 3780 ASSERT((*bno >= obno) || (n == 0)); 3781 ASSERT(*bno < end); 3782 mval->br_startoff = *bno; 3783 if (isnullstartblock(got->br_startblock)) 3784 mval->br_startblock = DELAYSTARTBLOCK; 3785 else 3786 mval->br_startblock = got->br_startblock + 3787 (*bno - got->br_startoff); 3788 /* 3789 * Return the minimum of what we got and what we asked for for 3790 * the length. We can use the len variable here because it is 3791 * modified below and we could have been there before coming 3792 * here if the first part of the allocation didn't overlap what 3793 * was asked for. 3794 */ 3795 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3796 got->br_blockcount - (*bno - got->br_startoff)); 3797 mval->br_state = got->br_state; 3798 ASSERT(mval->br_blockcount <= len); 3799 return; 3800 } 3801 3802 /* 3803 * Update and validate the extent map to return 3804 */ 3805 STATIC void 3806 xfs_bmapi_update_map( 3807 struct xfs_bmbt_irec **map, 3808 xfs_fileoff_t *bno, 3809 xfs_filblks_t *len, 3810 xfs_fileoff_t obno, 3811 xfs_fileoff_t end, 3812 int *n, 3813 uint32_t flags) 3814 { 3815 xfs_bmbt_irec_t *mval = *map; 3816 3817 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3818 ((mval->br_startoff + mval->br_blockcount) <= end)); 3819 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3820 (mval->br_startoff < obno)); 3821 3822 *bno = mval->br_startoff + mval->br_blockcount; 3823 *len = end - *bno; 3824 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3825 /* update previous map with new information */ 3826 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3827 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3828 ASSERT(mval->br_state == mval[-1].br_state); 3829 mval[-1].br_blockcount = mval->br_blockcount; 3830 mval[-1].br_state = mval->br_state; 3831 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3832 mval[-1].br_startblock != DELAYSTARTBLOCK && 3833 mval[-1].br_startblock != HOLESTARTBLOCK && 3834 mval->br_startblock == mval[-1].br_startblock + 3835 mval[-1].br_blockcount && 3836 mval[-1].br_state == mval->br_state) { 3837 ASSERT(mval->br_startoff == 3838 mval[-1].br_startoff + mval[-1].br_blockcount); 3839 mval[-1].br_blockcount += mval->br_blockcount; 3840 } else if (*n > 0 && 3841 mval->br_startblock == DELAYSTARTBLOCK && 3842 mval[-1].br_startblock == DELAYSTARTBLOCK && 3843 mval->br_startoff == 3844 mval[-1].br_startoff + mval[-1].br_blockcount) { 3845 mval[-1].br_blockcount += mval->br_blockcount; 3846 mval[-1].br_state = mval->br_state; 3847 } else if (!((*n == 0) && 3848 ((mval->br_startoff + mval->br_blockcount) <= 3849 obno))) { 3850 mval++; 3851 (*n)++; 3852 } 3853 *map = mval; 3854 } 3855 3856 /* 3857 * Map file blocks to filesystem blocks without allocation. 3858 */ 3859 int 3860 xfs_bmapi_read( 3861 struct xfs_inode *ip, 3862 xfs_fileoff_t bno, 3863 xfs_filblks_t len, 3864 struct xfs_bmbt_irec *mval, 3865 int *nmap, 3866 uint32_t flags) 3867 { 3868 struct xfs_mount *mp = ip->i_mount; 3869 int whichfork = xfs_bmapi_whichfork(flags); 3870 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 3871 struct xfs_bmbt_irec got; 3872 xfs_fileoff_t obno; 3873 xfs_fileoff_t end; 3874 struct xfs_iext_cursor icur; 3875 int error; 3876 bool eof = false; 3877 int n = 0; 3878 3879 ASSERT(*nmap >= 1); 3880 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE))); 3881 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3882 3883 if (WARN_ON_ONCE(!ifp)) 3884 return -EFSCORRUPTED; 3885 3886 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 3887 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) 3888 return -EFSCORRUPTED; 3889 3890 if (xfs_is_shutdown(mp)) 3891 return -EIO; 3892 3893 XFS_STATS_INC(mp, xs_blk_mapr); 3894 3895 error = xfs_iread_extents(NULL, ip, whichfork); 3896 if (error) 3897 return error; 3898 3899 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) 3900 eof = true; 3901 end = bno + len; 3902 obno = bno; 3903 3904 while (bno < end && n < *nmap) { 3905 /* Reading past eof, act as though there's a hole up to end. */ 3906 if (eof) 3907 got.br_startoff = end; 3908 if (got.br_startoff > bno) { 3909 /* Reading in a hole. */ 3910 mval->br_startoff = bno; 3911 mval->br_startblock = HOLESTARTBLOCK; 3912 mval->br_blockcount = 3913 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 3914 mval->br_state = XFS_EXT_NORM; 3915 bno += mval->br_blockcount; 3916 len -= mval->br_blockcount; 3917 mval++; 3918 n++; 3919 continue; 3920 } 3921 3922 /* set up the extent map to return. */ 3923 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 3924 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 3925 3926 /* If we're done, stop now. */ 3927 if (bno >= end || n >= *nmap) 3928 break; 3929 3930 /* Else go on to the next record. */ 3931 if (!xfs_iext_next_extent(ifp, &icur, &got)) 3932 eof = true; 3933 } 3934 *nmap = n; 3935 return 0; 3936 } 3937 3938 /* 3939 * Add a delayed allocation extent to an inode. Blocks are reserved from the 3940 * global pool and the extent inserted into the inode in-core extent tree. 3941 * 3942 * On entry, got refers to the first extent beyond the offset of the extent to 3943 * allocate or eof is specified if no such extent exists. On return, got refers 3944 * to the extent record that was inserted to the inode fork. 3945 * 3946 * Note that the allocated extent may have been merged with contiguous extents 3947 * during insertion into the inode fork. Thus, got does not reflect the current 3948 * state of the inode fork on return. If necessary, the caller can use lastx to 3949 * look up the updated record in the inode fork. 3950 */ 3951 int 3952 xfs_bmapi_reserve_delalloc( 3953 struct xfs_inode *ip, 3954 int whichfork, 3955 xfs_fileoff_t off, 3956 xfs_filblks_t len, 3957 xfs_filblks_t prealloc, 3958 struct xfs_bmbt_irec *got, 3959 struct xfs_iext_cursor *icur, 3960 int eof) 3961 { 3962 struct xfs_mount *mp = ip->i_mount; 3963 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 3964 xfs_extlen_t alen; 3965 xfs_extlen_t indlen; 3966 int error; 3967 xfs_fileoff_t aoff = off; 3968 3969 /* 3970 * Cap the alloc length. Keep track of prealloc so we know whether to 3971 * tag the inode before we return. 3972 */ 3973 alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN); 3974 if (!eof) 3975 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 3976 if (prealloc && alen >= len) 3977 prealloc = alen - len; 3978 3979 /* Figure out the extent size, adjust alen */ 3980 if (whichfork == XFS_COW_FORK) { 3981 struct xfs_bmbt_irec prev; 3982 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 3983 3984 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 3985 prev.br_startoff = NULLFILEOFF; 3986 3987 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 3988 1, 0, &aoff, &alen); 3989 ASSERT(!error); 3990 } 3991 3992 /* 3993 * Make a transaction-less quota reservation for delayed allocation 3994 * blocks. This number gets adjusted later. We return if we haven't 3995 * allocated blocks already inside this loop. 3996 */ 3997 error = xfs_quota_reserve_blkres(ip, alen); 3998 if (error) 3999 return error; 4000 4001 /* 4002 * Split changing sb for alen and indlen since they could be coming 4003 * from different places. 4004 */ 4005 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 4006 ASSERT(indlen > 0); 4007 4008 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 4009 if (error) 4010 goto out_unreserve_quota; 4011 4012 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 4013 if (error) 4014 goto out_unreserve_blocks; 4015 4016 4017 ip->i_delayed_blks += alen; 4018 xfs_mod_delalloc(ip->i_mount, alen + indlen); 4019 4020 got->br_startoff = aoff; 4021 got->br_startblock = nullstartblock(indlen); 4022 got->br_blockcount = alen; 4023 got->br_state = XFS_EXT_NORM; 4024 4025 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 4026 4027 /* 4028 * Tag the inode if blocks were preallocated. Note that COW fork 4029 * preallocation can occur at the start or end of the extent, even when 4030 * prealloc == 0, so we must also check the aligned offset and length. 4031 */ 4032 if (whichfork == XFS_DATA_FORK && prealloc) 4033 xfs_inode_set_eofblocks_tag(ip); 4034 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 4035 xfs_inode_set_cowblocks_tag(ip); 4036 4037 return 0; 4038 4039 out_unreserve_blocks: 4040 xfs_mod_fdblocks(mp, alen, false); 4041 out_unreserve_quota: 4042 if (XFS_IS_QUOTA_ON(mp)) 4043 xfs_quota_unreserve_blkres(ip, alen); 4044 return error; 4045 } 4046 4047 static int 4048 xfs_bmap_alloc_userdata( 4049 struct xfs_bmalloca *bma) 4050 { 4051 struct xfs_mount *mp = bma->ip->i_mount; 4052 int whichfork = xfs_bmapi_whichfork(bma->flags); 4053 int error; 4054 4055 /* 4056 * Set the data type being allocated. For the data fork, the first data 4057 * in the file is treated differently to all other allocations. For the 4058 * attribute fork, we only need to ensure the allocated range is not on 4059 * the busy list. 4060 */ 4061 bma->datatype = XFS_ALLOC_NOBUSY; 4062 if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) { 4063 bma->datatype |= XFS_ALLOC_USERDATA; 4064 if (bma->offset == 0) 4065 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4066 4067 if (mp->m_dalign && bma->length >= mp->m_dalign) { 4068 error = xfs_bmap_isaeof(bma, whichfork); 4069 if (error) 4070 return error; 4071 } 4072 4073 if (XFS_IS_REALTIME_INODE(bma->ip)) 4074 return xfs_bmap_rtalloc(bma); 4075 } 4076 4077 if (unlikely(XFS_TEST_ERROR(false, mp, 4078 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) 4079 return xfs_bmap_exact_minlen_extent_alloc(bma); 4080 4081 return xfs_bmap_btalloc(bma); 4082 } 4083 4084 static int 4085 xfs_bmapi_allocate( 4086 struct xfs_bmalloca *bma) 4087 { 4088 struct xfs_mount *mp = bma->ip->i_mount; 4089 int whichfork = xfs_bmapi_whichfork(bma->flags); 4090 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 4091 int tmp_logflags = 0; 4092 int error; 4093 4094 ASSERT(bma->length > 0); 4095 4096 /* 4097 * For the wasdelay case, we could also just allocate the stuff asked 4098 * for in this bmap call but that wouldn't be as good. 4099 */ 4100 if (bma->wasdel) { 4101 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4102 bma->offset = bma->got.br_startoff; 4103 if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev)) 4104 bma->prev.br_startoff = NULLFILEOFF; 4105 } else { 4106 bma->length = XFS_FILBLKS_MIN(bma->length, XFS_MAX_BMBT_EXTLEN); 4107 if (!bma->eof) 4108 bma->length = XFS_FILBLKS_MIN(bma->length, 4109 bma->got.br_startoff - bma->offset); 4110 } 4111 4112 if (bma->flags & XFS_BMAPI_CONTIG) 4113 bma->minlen = bma->length; 4114 else 4115 bma->minlen = 1; 4116 4117 if (bma->flags & XFS_BMAPI_METADATA) { 4118 if (unlikely(XFS_TEST_ERROR(false, mp, 4119 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) 4120 error = xfs_bmap_exact_minlen_extent_alloc(bma); 4121 else 4122 error = xfs_bmap_btalloc(bma); 4123 } else { 4124 error = xfs_bmap_alloc_userdata(bma); 4125 } 4126 if (error || bma->blkno == NULLFSBLOCK) 4127 return error; 4128 4129 if (bma->flags & XFS_BMAPI_ZERO) { 4130 error = xfs_zero_extent(bma->ip, bma->blkno, bma->length); 4131 if (error) 4132 return error; 4133 } 4134 4135 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) 4136 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4137 /* 4138 * Bump the number of extents we've allocated 4139 * in this call. 4140 */ 4141 bma->nallocs++; 4142 4143 if (bma->cur) 4144 bma->cur->bc_ino.flags = 4145 bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0; 4146 4147 bma->got.br_startoff = bma->offset; 4148 bma->got.br_startblock = bma->blkno; 4149 bma->got.br_blockcount = bma->length; 4150 bma->got.br_state = XFS_EXT_NORM; 4151 4152 if (bma->flags & XFS_BMAPI_PREALLOC) 4153 bma->got.br_state = XFS_EXT_UNWRITTEN; 4154 4155 if (bma->wasdel) 4156 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4157 else 4158 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4159 whichfork, &bma->icur, &bma->cur, &bma->got, 4160 &bma->logflags, bma->flags); 4161 4162 bma->logflags |= tmp_logflags; 4163 if (error) 4164 return error; 4165 4166 /* 4167 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4168 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4169 * the neighbouring ones. 4170 */ 4171 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4172 4173 ASSERT(bma->got.br_startoff <= bma->offset); 4174 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4175 bma->offset + bma->length); 4176 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4177 bma->got.br_state == XFS_EXT_UNWRITTEN); 4178 return 0; 4179 } 4180 4181 STATIC int 4182 xfs_bmapi_convert_unwritten( 4183 struct xfs_bmalloca *bma, 4184 struct xfs_bmbt_irec *mval, 4185 xfs_filblks_t len, 4186 uint32_t flags) 4187 { 4188 int whichfork = xfs_bmapi_whichfork(flags); 4189 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 4190 int tmp_logflags = 0; 4191 int error; 4192 4193 /* check if we need to do unwritten->real conversion */ 4194 if (mval->br_state == XFS_EXT_UNWRITTEN && 4195 (flags & XFS_BMAPI_PREALLOC)) 4196 return 0; 4197 4198 /* check if we need to do real->unwritten conversion */ 4199 if (mval->br_state == XFS_EXT_NORM && 4200 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4201 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4202 return 0; 4203 4204 /* 4205 * Modify (by adding) the state flag, if writing. 4206 */ 4207 ASSERT(mval->br_blockcount <= len); 4208 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) { 4209 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4210 bma->ip, whichfork); 4211 } 4212 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4213 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4214 4215 /* 4216 * Before insertion into the bmbt, zero the range being converted 4217 * if required. 4218 */ 4219 if (flags & XFS_BMAPI_ZERO) { 4220 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4221 mval->br_blockcount); 4222 if (error) 4223 return error; 4224 } 4225 4226 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4227 &bma->icur, &bma->cur, mval, &tmp_logflags); 4228 /* 4229 * Log the inode core unconditionally in the unwritten extent conversion 4230 * path because the conversion might not have done so (e.g., if the 4231 * extent count hasn't changed). We need to make sure the inode is dirty 4232 * in the transaction for the sake of fsync(), even if nothing has 4233 * changed, because fsync() will not force the log for this transaction 4234 * unless it sees the inode pinned. 4235 * 4236 * Note: If we're only converting cow fork extents, there aren't 4237 * any on-disk updates to make, so we don't need to log anything. 4238 */ 4239 if (whichfork != XFS_COW_FORK) 4240 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4241 if (error) 4242 return error; 4243 4244 /* 4245 * Update our extent pointer, given that 4246 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4247 * of the neighbouring ones. 4248 */ 4249 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4250 4251 /* 4252 * We may have combined previously unwritten space with written space, 4253 * so generate another request. 4254 */ 4255 if (mval->br_blockcount < len) 4256 return -EAGAIN; 4257 return 0; 4258 } 4259 4260 xfs_extlen_t 4261 xfs_bmapi_minleft( 4262 struct xfs_trans *tp, 4263 struct xfs_inode *ip, 4264 int fork) 4265 { 4266 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, fork); 4267 4268 if (tp && tp->t_highest_agno != NULLAGNUMBER) 4269 return 0; 4270 if (ifp->if_format != XFS_DINODE_FMT_BTREE) 4271 return 1; 4272 return be16_to_cpu(ifp->if_broot->bb_level) + 1; 4273 } 4274 4275 /* 4276 * Log whatever the flags say, even if error. Otherwise we might miss detecting 4277 * a case where the data is changed, there's an error, and it's not logged so we 4278 * don't shutdown when we should. Don't bother logging extents/btree changes if 4279 * we converted to the other format. 4280 */ 4281 static void 4282 xfs_bmapi_finish( 4283 struct xfs_bmalloca *bma, 4284 int whichfork, 4285 int error) 4286 { 4287 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 4288 4289 if ((bma->logflags & xfs_ilog_fext(whichfork)) && 4290 ifp->if_format != XFS_DINODE_FMT_EXTENTS) 4291 bma->logflags &= ~xfs_ilog_fext(whichfork); 4292 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) && 4293 ifp->if_format != XFS_DINODE_FMT_BTREE) 4294 bma->logflags &= ~xfs_ilog_fbroot(whichfork); 4295 4296 if (bma->logflags) 4297 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags); 4298 if (bma->cur) 4299 xfs_btree_del_cursor(bma->cur, error); 4300 } 4301 4302 /* 4303 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4304 * extent state if necessary. Details behaviour is controlled by the flags 4305 * parameter. Only allocates blocks from a single allocation group, to avoid 4306 * locking problems. 4307 */ 4308 int 4309 xfs_bmapi_write( 4310 struct xfs_trans *tp, /* transaction pointer */ 4311 struct xfs_inode *ip, /* incore inode */ 4312 xfs_fileoff_t bno, /* starting file offs. mapped */ 4313 xfs_filblks_t len, /* length to map in file */ 4314 uint32_t flags, /* XFS_BMAPI_... */ 4315 xfs_extlen_t total, /* total blocks needed */ 4316 struct xfs_bmbt_irec *mval, /* output: map values */ 4317 int *nmap) /* i/o: mval size/count */ 4318 { 4319 struct xfs_bmalloca bma = { 4320 .tp = tp, 4321 .ip = ip, 4322 .total = total, 4323 }; 4324 struct xfs_mount *mp = ip->i_mount; 4325 int whichfork = xfs_bmapi_whichfork(flags); 4326 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4327 xfs_fileoff_t end; /* end of mapped file region */ 4328 bool eof = false; /* after the end of extents */ 4329 int error; /* error return */ 4330 int n; /* current extent index */ 4331 xfs_fileoff_t obno; /* old block number (offset) */ 4332 4333 #ifdef DEBUG 4334 xfs_fileoff_t orig_bno; /* original block number value */ 4335 int orig_flags; /* original flags arg value */ 4336 xfs_filblks_t orig_len; /* original value of len arg */ 4337 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4338 int orig_nmap; /* original value of *nmap */ 4339 4340 orig_bno = bno; 4341 orig_len = len; 4342 orig_flags = flags; 4343 orig_mval = mval; 4344 orig_nmap = *nmap; 4345 #endif 4346 4347 ASSERT(*nmap >= 1); 4348 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4349 ASSERT(tp != NULL); 4350 ASSERT(len > 0); 4351 ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL); 4352 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4353 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4354 4355 /* zeroing is for currently only for data extents, not metadata */ 4356 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4357 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4358 /* 4359 * we can allocate unwritten extents or pre-zero allocated blocks, 4360 * but it makes no sense to do both at once. This would result in 4361 * zeroing the unwritten extent twice, but it still being an 4362 * unwritten extent.... 4363 */ 4364 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4365 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4366 4367 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 4368 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 4369 return -EFSCORRUPTED; 4370 } 4371 4372 if (xfs_is_shutdown(mp)) 4373 return -EIO; 4374 4375 XFS_STATS_INC(mp, xs_blk_mapw); 4376 4377 error = xfs_iread_extents(tp, ip, whichfork); 4378 if (error) 4379 goto error0; 4380 4381 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) 4382 eof = true; 4383 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4384 bma.prev.br_startoff = NULLFILEOFF; 4385 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4386 4387 n = 0; 4388 end = bno + len; 4389 obno = bno; 4390 while (bno < end && n < *nmap) { 4391 bool need_alloc = false, wasdelay = false; 4392 4393 /* in hole or beyond EOF? */ 4394 if (eof || bma.got.br_startoff > bno) { 4395 /* 4396 * CoW fork conversions should /never/ hit EOF or 4397 * holes. There should always be something for us 4398 * to work on. 4399 */ 4400 ASSERT(!((flags & XFS_BMAPI_CONVERT) && 4401 (flags & XFS_BMAPI_COWFORK))); 4402 4403 need_alloc = true; 4404 } else if (isnullstartblock(bma.got.br_startblock)) { 4405 wasdelay = true; 4406 } 4407 4408 /* 4409 * First, deal with the hole before the allocated space 4410 * that we found, if any. 4411 */ 4412 if (need_alloc || wasdelay) { 4413 bma.eof = eof; 4414 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4415 bma.wasdel = wasdelay; 4416 bma.offset = bno; 4417 bma.flags = flags; 4418 4419 /* 4420 * There's a 32/64 bit type mismatch between the 4421 * allocation length request (which can be 64 bits in 4422 * length) and the bma length request, which is 4423 * xfs_extlen_t and therefore 32 bits. Hence we have to 4424 * check for 32-bit overflows and handle them here. 4425 */ 4426 if (len > (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN) 4427 bma.length = XFS_MAX_BMBT_EXTLEN; 4428 else 4429 bma.length = len; 4430 4431 ASSERT(len > 0); 4432 ASSERT(bma.length > 0); 4433 error = xfs_bmapi_allocate(&bma); 4434 if (error) 4435 goto error0; 4436 if (bma.blkno == NULLFSBLOCK) 4437 break; 4438 4439 /* 4440 * If this is a CoW allocation, record the data in 4441 * the refcount btree for orphan recovery. 4442 */ 4443 if (whichfork == XFS_COW_FORK) 4444 xfs_refcount_alloc_cow_extent(tp, bma.blkno, 4445 bma.length); 4446 } 4447 4448 /* Deal with the allocated space we found. */ 4449 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4450 end, n, flags); 4451 4452 /* Execute unwritten extent conversion if necessary */ 4453 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4454 if (error == -EAGAIN) 4455 continue; 4456 if (error) 4457 goto error0; 4458 4459 /* update the extent map to return */ 4460 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4461 4462 /* 4463 * If we're done, stop now. Stop when we've allocated 4464 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4465 * the transaction may get too big. 4466 */ 4467 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4468 break; 4469 4470 /* Else go on to the next record. */ 4471 bma.prev = bma.got; 4472 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) 4473 eof = true; 4474 } 4475 *nmap = n; 4476 4477 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4478 whichfork); 4479 if (error) 4480 goto error0; 4481 4482 ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE || 4483 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork)); 4484 xfs_bmapi_finish(&bma, whichfork, 0); 4485 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4486 orig_nmap, *nmap); 4487 return 0; 4488 error0: 4489 xfs_bmapi_finish(&bma, whichfork, error); 4490 return error; 4491 } 4492 4493 /* 4494 * Convert an existing delalloc extent to real blocks based on file offset. This 4495 * attempts to allocate the entire delalloc extent and may require multiple 4496 * invocations to allocate the target offset if a large enough physical extent 4497 * is not available. 4498 */ 4499 int 4500 xfs_bmapi_convert_delalloc( 4501 struct xfs_inode *ip, 4502 int whichfork, 4503 xfs_off_t offset, 4504 struct iomap *iomap, 4505 unsigned int *seq) 4506 { 4507 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4508 struct xfs_mount *mp = ip->i_mount; 4509 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 4510 struct xfs_bmalloca bma = { NULL }; 4511 uint16_t flags = 0; 4512 struct xfs_trans *tp; 4513 int error; 4514 4515 if (whichfork == XFS_COW_FORK) 4516 flags |= IOMAP_F_SHARED; 4517 4518 /* 4519 * Space for the extent and indirect blocks was reserved when the 4520 * delalloc extent was created so there's no need to do so here. 4521 */ 4522 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 4523 XFS_TRANS_RESERVE, &tp); 4524 if (error) 4525 return error; 4526 4527 xfs_ilock(ip, XFS_ILOCK_EXCL); 4528 xfs_trans_ijoin(tp, ip, 0); 4529 4530 error = xfs_iext_count_may_overflow(ip, whichfork, 4531 XFS_IEXT_ADD_NOSPLIT_CNT); 4532 if (error == -EFBIG) 4533 error = xfs_iext_count_upgrade(tp, ip, 4534 XFS_IEXT_ADD_NOSPLIT_CNT); 4535 if (error) 4536 goto out_trans_cancel; 4537 4538 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) || 4539 bma.got.br_startoff > offset_fsb) { 4540 /* 4541 * No extent found in the range we are trying to convert. This 4542 * should only happen for the COW fork, where another thread 4543 * might have moved the extent to the data fork in the meantime. 4544 */ 4545 WARN_ON_ONCE(whichfork != XFS_COW_FORK); 4546 error = -EAGAIN; 4547 goto out_trans_cancel; 4548 } 4549 4550 /* 4551 * If we find a real extent here we raced with another thread converting 4552 * the extent. Just return the real extent at this offset. 4553 */ 4554 if (!isnullstartblock(bma.got.br_startblock)) { 4555 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, 4556 xfs_iomap_inode_sequence(ip, flags)); 4557 *seq = READ_ONCE(ifp->if_seq); 4558 goto out_trans_cancel; 4559 } 4560 4561 bma.tp = tp; 4562 bma.ip = ip; 4563 bma.wasdel = true; 4564 bma.offset = bma.got.br_startoff; 4565 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, 4566 XFS_MAX_BMBT_EXTLEN); 4567 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4568 4569 /* 4570 * When we're converting the delalloc reservations backing dirty pages 4571 * in the page cache, we must be careful about how we create the new 4572 * extents: 4573 * 4574 * New CoW fork extents are created unwritten, turned into real extents 4575 * when we're about to write the data to disk, and mapped into the data 4576 * fork after the write finishes. End of story. 4577 * 4578 * New data fork extents must be mapped in as unwritten and converted 4579 * to real extents after the write succeeds to avoid exposing stale 4580 * disk contents if we crash. 4581 */ 4582 bma.flags = XFS_BMAPI_PREALLOC; 4583 if (whichfork == XFS_COW_FORK) 4584 bma.flags |= XFS_BMAPI_COWFORK; 4585 4586 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4587 bma.prev.br_startoff = NULLFILEOFF; 4588 4589 error = xfs_bmapi_allocate(&bma); 4590 if (error) 4591 goto out_finish; 4592 4593 error = -ENOSPC; 4594 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK)) 4595 goto out_finish; 4596 error = -EFSCORRUPTED; 4597 if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock))) 4598 goto out_finish; 4599 4600 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length)); 4601 XFS_STATS_INC(mp, xs_xstrat_quick); 4602 4603 ASSERT(!isnullstartblock(bma.got.br_startblock)); 4604 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, 4605 xfs_iomap_inode_sequence(ip, flags)); 4606 *seq = READ_ONCE(ifp->if_seq); 4607 4608 if (whichfork == XFS_COW_FORK) 4609 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length); 4610 4611 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4612 whichfork); 4613 if (error) 4614 goto out_finish; 4615 4616 xfs_bmapi_finish(&bma, whichfork, 0); 4617 error = xfs_trans_commit(tp); 4618 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4619 return error; 4620 4621 out_finish: 4622 xfs_bmapi_finish(&bma, whichfork, error); 4623 out_trans_cancel: 4624 xfs_trans_cancel(tp); 4625 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4626 return error; 4627 } 4628 4629 int 4630 xfs_bmapi_remap( 4631 struct xfs_trans *tp, 4632 struct xfs_inode *ip, 4633 xfs_fileoff_t bno, 4634 xfs_filblks_t len, 4635 xfs_fsblock_t startblock, 4636 uint32_t flags) 4637 { 4638 struct xfs_mount *mp = ip->i_mount; 4639 struct xfs_ifork *ifp; 4640 struct xfs_btree_cur *cur = NULL; 4641 struct xfs_bmbt_irec got; 4642 struct xfs_iext_cursor icur; 4643 int whichfork = xfs_bmapi_whichfork(flags); 4644 int logflags = 0, error; 4645 4646 ifp = xfs_ifork_ptr(ip, whichfork); 4647 ASSERT(len > 0); 4648 ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN); 4649 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4650 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | 4651 XFS_BMAPI_NORMAP))); 4652 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != 4653 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); 4654 4655 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 4656 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 4657 return -EFSCORRUPTED; 4658 } 4659 4660 if (xfs_is_shutdown(mp)) 4661 return -EIO; 4662 4663 error = xfs_iread_extents(tp, ip, whichfork); 4664 if (error) 4665 return error; 4666 4667 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 4668 /* make sure we only reflink into a hole. */ 4669 ASSERT(got.br_startoff > bno); 4670 ASSERT(got.br_startoff - bno >= len); 4671 } 4672 4673 ip->i_nblocks += len; 4674 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4675 4676 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 4677 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 4678 cur->bc_ino.flags = 0; 4679 } 4680 4681 got.br_startoff = bno; 4682 got.br_startblock = startblock; 4683 got.br_blockcount = len; 4684 if (flags & XFS_BMAPI_PREALLOC) 4685 got.br_state = XFS_EXT_UNWRITTEN; 4686 else 4687 got.br_state = XFS_EXT_NORM; 4688 4689 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, 4690 &cur, &got, &logflags, flags); 4691 if (error) 4692 goto error0; 4693 4694 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); 4695 4696 error0: 4697 if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS) 4698 logflags &= ~XFS_ILOG_DEXT; 4699 else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE) 4700 logflags &= ~XFS_ILOG_DBROOT; 4701 4702 if (logflags) 4703 xfs_trans_log_inode(tp, ip, logflags); 4704 if (cur) 4705 xfs_btree_del_cursor(cur, error); 4706 return error; 4707 } 4708 4709 /* 4710 * When a delalloc extent is split (e.g., due to a hole punch), the original 4711 * indlen reservation must be shared across the two new extents that are left 4712 * behind. 4713 * 4714 * Given the original reservation and the worst case indlen for the two new 4715 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4716 * reservation fairly across the two new extents. If necessary, steal available 4717 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4718 * ores == 1). The number of stolen blocks is returned. The availability and 4719 * subsequent accounting of stolen blocks is the responsibility of the caller. 4720 */ 4721 static xfs_filblks_t 4722 xfs_bmap_split_indlen( 4723 xfs_filblks_t ores, /* original res. */ 4724 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4725 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4726 xfs_filblks_t avail) /* stealable blocks */ 4727 { 4728 xfs_filblks_t len1 = *indlen1; 4729 xfs_filblks_t len2 = *indlen2; 4730 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4731 xfs_filblks_t stolen = 0; 4732 xfs_filblks_t resfactor; 4733 4734 /* 4735 * Steal as many blocks as we can to try and satisfy the worst case 4736 * indlen for both new extents. 4737 */ 4738 if (ores < nres && avail) 4739 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4740 ores += stolen; 4741 4742 /* nothing else to do if we've satisfied the new reservation */ 4743 if (ores >= nres) 4744 return stolen; 4745 4746 /* 4747 * We can't meet the total required reservation for the two extents. 4748 * Calculate the percent of the overall shortage between both extents 4749 * and apply this percentage to each of the requested indlen values. 4750 * This distributes the shortage fairly and reduces the chances that one 4751 * of the two extents is left with nothing when extents are repeatedly 4752 * split. 4753 */ 4754 resfactor = (ores * 100); 4755 do_div(resfactor, nres); 4756 len1 *= resfactor; 4757 do_div(len1, 100); 4758 len2 *= resfactor; 4759 do_div(len2, 100); 4760 ASSERT(len1 + len2 <= ores); 4761 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4762 4763 /* 4764 * Hand out the remainder to each extent. If one of the two reservations 4765 * is zero, we want to make sure that one gets a block first. The loop 4766 * below starts with len1, so hand len2 a block right off the bat if it 4767 * is zero. 4768 */ 4769 ores -= (len1 + len2); 4770 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4771 if (ores && !len2 && *indlen2) { 4772 len2++; 4773 ores--; 4774 } 4775 while (ores) { 4776 if (len1 < *indlen1) { 4777 len1++; 4778 ores--; 4779 } 4780 if (!ores) 4781 break; 4782 if (len2 < *indlen2) { 4783 len2++; 4784 ores--; 4785 } 4786 } 4787 4788 *indlen1 = len1; 4789 *indlen2 = len2; 4790 4791 return stolen; 4792 } 4793 4794 int 4795 xfs_bmap_del_extent_delay( 4796 struct xfs_inode *ip, 4797 int whichfork, 4798 struct xfs_iext_cursor *icur, 4799 struct xfs_bmbt_irec *got, 4800 struct xfs_bmbt_irec *del) 4801 { 4802 struct xfs_mount *mp = ip->i_mount; 4803 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4804 struct xfs_bmbt_irec new; 4805 int64_t da_old, da_new, da_diff = 0; 4806 xfs_fileoff_t del_endoff, got_endoff; 4807 xfs_filblks_t got_indlen, new_indlen, stolen; 4808 uint32_t state = xfs_bmap_fork_to_state(whichfork); 4809 int error = 0; 4810 bool isrt; 4811 4812 XFS_STATS_INC(mp, xs_del_exlist); 4813 4814 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4815 del_endoff = del->br_startoff + del->br_blockcount; 4816 got_endoff = got->br_startoff + got->br_blockcount; 4817 da_old = startblockval(got->br_startblock); 4818 da_new = 0; 4819 4820 ASSERT(del->br_blockcount > 0); 4821 ASSERT(got->br_startoff <= del->br_startoff); 4822 ASSERT(got_endoff >= del_endoff); 4823 4824 if (isrt) { 4825 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4826 4827 do_div(rtexts, mp->m_sb.sb_rextsize); 4828 xfs_mod_frextents(mp, rtexts); 4829 } 4830 4831 /* 4832 * Update the inode delalloc counter now and wait to update the 4833 * sb counters as we might have to borrow some blocks for the 4834 * indirect block accounting. 4835 */ 4836 ASSERT(!isrt); 4837 error = xfs_quota_unreserve_blkres(ip, del->br_blockcount); 4838 if (error) 4839 return error; 4840 ip->i_delayed_blks -= del->br_blockcount; 4841 4842 if (got->br_startoff == del->br_startoff) 4843 state |= BMAP_LEFT_FILLING; 4844 if (got_endoff == del_endoff) 4845 state |= BMAP_RIGHT_FILLING; 4846 4847 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4848 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4849 /* 4850 * Matches the whole extent. Delete the entry. 4851 */ 4852 xfs_iext_remove(ip, icur, state); 4853 xfs_iext_prev(ifp, icur); 4854 break; 4855 case BMAP_LEFT_FILLING: 4856 /* 4857 * Deleting the first part of the extent. 4858 */ 4859 got->br_startoff = del_endoff; 4860 got->br_blockcount -= del->br_blockcount; 4861 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4862 got->br_blockcount), da_old); 4863 got->br_startblock = nullstartblock((int)da_new); 4864 xfs_iext_update_extent(ip, state, icur, got); 4865 break; 4866 case BMAP_RIGHT_FILLING: 4867 /* 4868 * Deleting the last part of the extent. 4869 */ 4870 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4871 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4872 got->br_blockcount), da_old); 4873 got->br_startblock = nullstartblock((int)da_new); 4874 xfs_iext_update_extent(ip, state, icur, got); 4875 break; 4876 case 0: 4877 /* 4878 * Deleting the middle of the extent. 4879 * 4880 * Distribute the original indlen reservation across the two new 4881 * extents. Steal blocks from the deleted extent if necessary. 4882 * Stealing blocks simply fudges the fdblocks accounting below. 4883 * Warn if either of the new indlen reservations is zero as this 4884 * can lead to delalloc problems. 4885 */ 4886 got->br_blockcount = del->br_startoff - got->br_startoff; 4887 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4888 4889 new.br_blockcount = got_endoff - del_endoff; 4890 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4891 4892 WARN_ON_ONCE(!got_indlen || !new_indlen); 4893 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4894 del->br_blockcount); 4895 4896 got->br_startblock = nullstartblock((int)got_indlen); 4897 4898 new.br_startoff = del_endoff; 4899 new.br_state = got->br_state; 4900 new.br_startblock = nullstartblock((int)new_indlen); 4901 4902 xfs_iext_update_extent(ip, state, icur, got); 4903 xfs_iext_next(ifp, icur); 4904 xfs_iext_insert(ip, icur, &new, state); 4905 4906 da_new = got_indlen + new_indlen - stolen; 4907 del->br_blockcount -= stolen; 4908 break; 4909 } 4910 4911 ASSERT(da_old >= da_new); 4912 da_diff = da_old - da_new; 4913 if (!isrt) 4914 da_diff += del->br_blockcount; 4915 if (da_diff) { 4916 xfs_mod_fdblocks(mp, da_diff, false); 4917 xfs_mod_delalloc(mp, -da_diff); 4918 } 4919 return error; 4920 } 4921 4922 void 4923 xfs_bmap_del_extent_cow( 4924 struct xfs_inode *ip, 4925 struct xfs_iext_cursor *icur, 4926 struct xfs_bmbt_irec *got, 4927 struct xfs_bmbt_irec *del) 4928 { 4929 struct xfs_mount *mp = ip->i_mount; 4930 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); 4931 struct xfs_bmbt_irec new; 4932 xfs_fileoff_t del_endoff, got_endoff; 4933 uint32_t state = BMAP_COWFORK; 4934 4935 XFS_STATS_INC(mp, xs_del_exlist); 4936 4937 del_endoff = del->br_startoff + del->br_blockcount; 4938 got_endoff = got->br_startoff + got->br_blockcount; 4939 4940 ASSERT(del->br_blockcount > 0); 4941 ASSERT(got->br_startoff <= del->br_startoff); 4942 ASSERT(got_endoff >= del_endoff); 4943 ASSERT(!isnullstartblock(got->br_startblock)); 4944 4945 if (got->br_startoff == del->br_startoff) 4946 state |= BMAP_LEFT_FILLING; 4947 if (got_endoff == del_endoff) 4948 state |= BMAP_RIGHT_FILLING; 4949 4950 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4951 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4952 /* 4953 * Matches the whole extent. Delete the entry. 4954 */ 4955 xfs_iext_remove(ip, icur, state); 4956 xfs_iext_prev(ifp, icur); 4957 break; 4958 case BMAP_LEFT_FILLING: 4959 /* 4960 * Deleting the first part of the extent. 4961 */ 4962 got->br_startoff = del_endoff; 4963 got->br_blockcount -= del->br_blockcount; 4964 got->br_startblock = del->br_startblock + del->br_blockcount; 4965 xfs_iext_update_extent(ip, state, icur, got); 4966 break; 4967 case BMAP_RIGHT_FILLING: 4968 /* 4969 * Deleting the last part of the extent. 4970 */ 4971 got->br_blockcount -= del->br_blockcount; 4972 xfs_iext_update_extent(ip, state, icur, got); 4973 break; 4974 case 0: 4975 /* 4976 * Deleting the middle of the extent. 4977 */ 4978 got->br_blockcount = del->br_startoff - got->br_startoff; 4979 4980 new.br_startoff = del_endoff; 4981 new.br_blockcount = got_endoff - del_endoff; 4982 new.br_state = got->br_state; 4983 new.br_startblock = del->br_startblock + del->br_blockcount; 4984 4985 xfs_iext_update_extent(ip, state, icur, got); 4986 xfs_iext_next(ifp, icur); 4987 xfs_iext_insert(ip, icur, &new, state); 4988 break; 4989 } 4990 ip->i_delayed_blks -= del->br_blockcount; 4991 } 4992 4993 /* 4994 * Called by xfs_bmapi to update file extent records and the btree 4995 * after removing space. 4996 */ 4997 STATIC int /* error */ 4998 xfs_bmap_del_extent_real( 4999 xfs_inode_t *ip, /* incore inode pointer */ 5000 xfs_trans_t *tp, /* current transaction pointer */ 5001 struct xfs_iext_cursor *icur, 5002 struct xfs_btree_cur *cur, /* if null, not a btree */ 5003 xfs_bmbt_irec_t *del, /* data to remove from extents */ 5004 int *logflagsp, /* inode logging flags */ 5005 int whichfork, /* data or attr fork */ 5006 uint32_t bflags) /* bmapi flags */ 5007 { 5008 xfs_fsblock_t del_endblock=0; /* first block past del */ 5009 xfs_fileoff_t del_endoff; /* first offset past del */ 5010 int do_fx; /* free extent at end of routine */ 5011 int error; /* error return value */ 5012 int flags = 0;/* inode logging flags */ 5013 struct xfs_bmbt_irec got; /* current extent entry */ 5014 xfs_fileoff_t got_endoff; /* first offset past got */ 5015 int i; /* temp state */ 5016 struct xfs_ifork *ifp; /* inode fork pointer */ 5017 xfs_mount_t *mp; /* mount structure */ 5018 xfs_filblks_t nblks; /* quota/sb block count */ 5019 xfs_bmbt_irec_t new; /* new record to be inserted */ 5020 /* REFERENCED */ 5021 uint qfield; /* quota field to update */ 5022 uint32_t state = xfs_bmap_fork_to_state(whichfork); 5023 struct xfs_bmbt_irec old; 5024 5025 mp = ip->i_mount; 5026 XFS_STATS_INC(mp, xs_del_exlist); 5027 5028 ifp = xfs_ifork_ptr(ip, whichfork); 5029 ASSERT(del->br_blockcount > 0); 5030 xfs_iext_get_extent(ifp, icur, &got); 5031 ASSERT(got.br_startoff <= del->br_startoff); 5032 del_endoff = del->br_startoff + del->br_blockcount; 5033 got_endoff = got.br_startoff + got.br_blockcount; 5034 ASSERT(got_endoff >= del_endoff); 5035 ASSERT(!isnullstartblock(got.br_startblock)); 5036 qfield = 0; 5037 error = 0; 5038 5039 /* 5040 * If it's the case where the directory code is running with no block 5041 * reservation, and the deleted block is in the middle of its extent, 5042 * and the resulting insert of an extent would cause transformation to 5043 * btree format, then reject it. The calling code will then swap blocks 5044 * around instead. We have to do this now, rather than waiting for the 5045 * conversion to btree format, since the transaction will be dirty then. 5046 */ 5047 if (tp->t_blk_res == 0 && 5048 ifp->if_format == XFS_DINODE_FMT_EXTENTS && 5049 ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) && 5050 del->br_startoff > got.br_startoff && del_endoff < got_endoff) 5051 return -ENOSPC; 5052 5053 flags = XFS_ILOG_CORE; 5054 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 5055 xfs_filblks_t len; 5056 xfs_extlen_t mod; 5057 5058 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, 5059 &mod); 5060 ASSERT(mod == 0); 5061 5062 if (!(bflags & XFS_BMAPI_REMAP)) { 5063 xfs_fsblock_t bno; 5064 5065 bno = div_u64_rem(del->br_startblock, 5066 mp->m_sb.sb_rextsize, &mod); 5067 ASSERT(mod == 0); 5068 5069 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 5070 if (error) 5071 goto done; 5072 } 5073 5074 do_fx = 0; 5075 nblks = len * mp->m_sb.sb_rextsize; 5076 qfield = XFS_TRANS_DQ_RTBCOUNT; 5077 } else { 5078 do_fx = 1; 5079 nblks = del->br_blockcount; 5080 qfield = XFS_TRANS_DQ_BCOUNT; 5081 } 5082 5083 del_endblock = del->br_startblock + del->br_blockcount; 5084 if (cur) { 5085 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5086 if (error) 5087 goto done; 5088 if (XFS_IS_CORRUPT(mp, i != 1)) { 5089 error = -EFSCORRUPTED; 5090 goto done; 5091 } 5092 } 5093 5094 if (got.br_startoff == del->br_startoff) 5095 state |= BMAP_LEFT_FILLING; 5096 if (got_endoff == del_endoff) 5097 state |= BMAP_RIGHT_FILLING; 5098 5099 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 5100 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 5101 /* 5102 * Matches the whole extent. Delete the entry. 5103 */ 5104 xfs_iext_remove(ip, icur, state); 5105 xfs_iext_prev(ifp, icur); 5106 ifp->if_nextents--; 5107 5108 flags |= XFS_ILOG_CORE; 5109 if (!cur) { 5110 flags |= xfs_ilog_fext(whichfork); 5111 break; 5112 } 5113 if ((error = xfs_btree_delete(cur, &i))) 5114 goto done; 5115 if (XFS_IS_CORRUPT(mp, i != 1)) { 5116 error = -EFSCORRUPTED; 5117 goto done; 5118 } 5119 break; 5120 case BMAP_LEFT_FILLING: 5121 /* 5122 * Deleting the first part of the extent. 5123 */ 5124 got.br_startoff = del_endoff; 5125 got.br_startblock = del_endblock; 5126 got.br_blockcount -= del->br_blockcount; 5127 xfs_iext_update_extent(ip, state, icur, &got); 5128 if (!cur) { 5129 flags |= xfs_ilog_fext(whichfork); 5130 break; 5131 } 5132 error = xfs_bmbt_update(cur, &got); 5133 if (error) 5134 goto done; 5135 break; 5136 case BMAP_RIGHT_FILLING: 5137 /* 5138 * Deleting the last part of the extent. 5139 */ 5140 got.br_blockcount -= del->br_blockcount; 5141 xfs_iext_update_extent(ip, state, icur, &got); 5142 if (!cur) { 5143 flags |= xfs_ilog_fext(whichfork); 5144 break; 5145 } 5146 error = xfs_bmbt_update(cur, &got); 5147 if (error) 5148 goto done; 5149 break; 5150 case 0: 5151 /* 5152 * Deleting the middle of the extent. 5153 */ 5154 5155 old = got; 5156 5157 got.br_blockcount = del->br_startoff - got.br_startoff; 5158 xfs_iext_update_extent(ip, state, icur, &got); 5159 5160 new.br_startoff = del_endoff; 5161 new.br_blockcount = got_endoff - del_endoff; 5162 new.br_state = got.br_state; 5163 new.br_startblock = del_endblock; 5164 5165 flags |= XFS_ILOG_CORE; 5166 if (cur) { 5167 error = xfs_bmbt_update(cur, &got); 5168 if (error) 5169 goto done; 5170 error = xfs_btree_increment(cur, 0, &i); 5171 if (error) 5172 goto done; 5173 cur->bc_rec.b = new; 5174 error = xfs_btree_insert(cur, &i); 5175 if (error && error != -ENOSPC) 5176 goto done; 5177 /* 5178 * If get no-space back from btree insert, it tried a 5179 * split, and we have a zero block reservation. Fix up 5180 * our state and return the error. 5181 */ 5182 if (error == -ENOSPC) { 5183 /* 5184 * Reset the cursor, don't trust it after any 5185 * insert operation. 5186 */ 5187 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5188 if (error) 5189 goto done; 5190 if (XFS_IS_CORRUPT(mp, i != 1)) { 5191 error = -EFSCORRUPTED; 5192 goto done; 5193 } 5194 /* 5195 * Update the btree record back 5196 * to the original value. 5197 */ 5198 error = xfs_bmbt_update(cur, &old); 5199 if (error) 5200 goto done; 5201 /* 5202 * Reset the extent record back 5203 * to the original value. 5204 */ 5205 xfs_iext_update_extent(ip, state, icur, &old); 5206 flags = 0; 5207 error = -ENOSPC; 5208 goto done; 5209 } 5210 if (XFS_IS_CORRUPT(mp, i != 1)) { 5211 error = -EFSCORRUPTED; 5212 goto done; 5213 } 5214 } else 5215 flags |= xfs_ilog_fext(whichfork); 5216 5217 ifp->if_nextents++; 5218 xfs_iext_next(ifp, icur); 5219 xfs_iext_insert(ip, icur, &new, state); 5220 break; 5221 } 5222 5223 /* remove reverse mapping */ 5224 xfs_rmap_unmap_extent(tp, ip, whichfork, del); 5225 5226 /* 5227 * If we need to, add to list of extents to delete. 5228 */ 5229 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5230 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5231 xfs_refcount_decrease_extent(tp, del); 5232 } else { 5233 __xfs_free_extent_later(tp, del->br_startblock, 5234 del->br_blockcount, NULL, 5235 (bflags & XFS_BMAPI_NODISCARD) || 5236 del->br_state == XFS_EXT_UNWRITTEN); 5237 } 5238 } 5239 5240 /* 5241 * Adjust inode # blocks in the file. 5242 */ 5243 if (nblks) 5244 ip->i_nblocks -= nblks; 5245 /* 5246 * Adjust quota data. 5247 */ 5248 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5249 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5250 5251 done: 5252 *logflagsp = flags; 5253 return error; 5254 } 5255 5256 /* 5257 * Unmap (remove) blocks from a file. 5258 * If nexts is nonzero then the number of extents to remove is limited to 5259 * that value. If not all extents in the block range can be removed then 5260 * *done is set. 5261 */ 5262 int /* error */ 5263 __xfs_bunmapi( 5264 struct xfs_trans *tp, /* transaction pointer */ 5265 struct xfs_inode *ip, /* incore inode */ 5266 xfs_fileoff_t start, /* first file offset deleted */ 5267 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5268 uint32_t flags, /* misc flags */ 5269 xfs_extnum_t nexts) /* number of extents max */ 5270 { 5271 struct xfs_btree_cur *cur; /* bmap btree cursor */ 5272 struct xfs_bmbt_irec del; /* extent being deleted */ 5273 int error; /* error return value */ 5274 xfs_extnum_t extno; /* extent number in list */ 5275 struct xfs_bmbt_irec got; /* current extent record */ 5276 struct xfs_ifork *ifp; /* inode fork pointer */ 5277 int isrt; /* freeing in rt area */ 5278 int logflags; /* transaction logging flags */ 5279 xfs_extlen_t mod; /* rt extent offset */ 5280 struct xfs_mount *mp = ip->i_mount; 5281 int tmp_logflags; /* partial logging flags */ 5282 int wasdel; /* was a delayed alloc extent */ 5283 int whichfork; /* data or attribute fork */ 5284 xfs_fsblock_t sum; 5285 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5286 xfs_fileoff_t end; 5287 struct xfs_iext_cursor icur; 5288 bool done = false; 5289 5290 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); 5291 5292 whichfork = xfs_bmapi_whichfork(flags); 5293 ASSERT(whichfork != XFS_COW_FORK); 5294 ifp = xfs_ifork_ptr(ip, whichfork); 5295 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) 5296 return -EFSCORRUPTED; 5297 if (xfs_is_shutdown(mp)) 5298 return -EIO; 5299 5300 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5301 ASSERT(len > 0); 5302 ASSERT(nexts >= 0); 5303 5304 error = xfs_iread_extents(tp, ip, whichfork); 5305 if (error) 5306 return error; 5307 5308 if (xfs_iext_count(ifp) == 0) { 5309 *rlen = 0; 5310 return 0; 5311 } 5312 XFS_STATS_INC(mp, xs_blk_unmap); 5313 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5314 end = start + len; 5315 5316 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { 5317 *rlen = 0; 5318 return 0; 5319 } 5320 end--; 5321 5322 logflags = 0; 5323 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5324 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); 5325 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5326 cur->bc_ino.flags = 0; 5327 } else 5328 cur = NULL; 5329 5330 if (isrt) { 5331 /* 5332 * Synchronize by locking the bitmap inode. 5333 */ 5334 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5335 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5336 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5337 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5338 } 5339 5340 extno = 0; 5341 while (end != (xfs_fileoff_t)-1 && end >= start && 5342 (nexts == 0 || extno < nexts)) { 5343 /* 5344 * Is the found extent after a hole in which end lives? 5345 * Just back up to the previous extent, if so. 5346 */ 5347 if (got.br_startoff > end && 5348 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5349 done = true; 5350 break; 5351 } 5352 /* 5353 * Is the last block of this extent before the range 5354 * we're supposed to delete? If so, we're done. 5355 */ 5356 end = XFS_FILEOFF_MIN(end, 5357 got.br_startoff + got.br_blockcount - 1); 5358 if (end < start) 5359 break; 5360 /* 5361 * Then deal with the (possibly delayed) allocated space 5362 * we found. 5363 */ 5364 del = got; 5365 wasdel = isnullstartblock(del.br_startblock); 5366 5367 if (got.br_startoff < start) { 5368 del.br_startoff = start; 5369 del.br_blockcount -= start - got.br_startoff; 5370 if (!wasdel) 5371 del.br_startblock += start - got.br_startoff; 5372 } 5373 if (del.br_startoff + del.br_blockcount > end + 1) 5374 del.br_blockcount = end + 1 - del.br_startoff; 5375 5376 if (!isrt) 5377 goto delete; 5378 5379 sum = del.br_startblock + del.br_blockcount; 5380 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod); 5381 if (mod) { 5382 /* 5383 * Realtime extent not lined up at the end. 5384 * The extent could have been split into written 5385 * and unwritten pieces, or we could just be 5386 * unmapping part of it. But we can't really 5387 * get rid of part of a realtime extent. 5388 */ 5389 if (del.br_state == XFS_EXT_UNWRITTEN) { 5390 /* 5391 * This piece is unwritten, or we're not 5392 * using unwritten extents. Skip over it. 5393 */ 5394 ASSERT(end >= mod); 5395 end -= mod > del.br_blockcount ? 5396 del.br_blockcount : mod; 5397 if (end < got.br_startoff && 5398 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5399 done = true; 5400 break; 5401 } 5402 continue; 5403 } 5404 /* 5405 * It's written, turn it unwritten. 5406 * This is better than zeroing it. 5407 */ 5408 ASSERT(del.br_state == XFS_EXT_NORM); 5409 ASSERT(tp->t_blk_res > 0); 5410 /* 5411 * If this spans a realtime extent boundary, 5412 * chop it back to the start of the one we end at. 5413 */ 5414 if (del.br_blockcount > mod) { 5415 del.br_startoff += del.br_blockcount - mod; 5416 del.br_startblock += del.br_blockcount - mod; 5417 del.br_blockcount = mod; 5418 } 5419 del.br_state = XFS_EXT_UNWRITTEN; 5420 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5421 whichfork, &icur, &cur, &del, 5422 &logflags); 5423 if (error) 5424 goto error0; 5425 goto nodelete; 5426 } 5427 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); 5428 if (mod) { 5429 xfs_extlen_t off = mp->m_sb.sb_rextsize - mod; 5430 5431 /* 5432 * Realtime extent is lined up at the end but not 5433 * at the front. We'll get rid of full extents if 5434 * we can. 5435 */ 5436 if (del.br_blockcount > off) { 5437 del.br_blockcount -= off; 5438 del.br_startoff += off; 5439 del.br_startblock += off; 5440 } else if (del.br_startoff == start && 5441 (del.br_state == XFS_EXT_UNWRITTEN || 5442 tp->t_blk_res == 0)) { 5443 /* 5444 * Can't make it unwritten. There isn't 5445 * a full extent here so just skip it. 5446 */ 5447 ASSERT(end >= del.br_blockcount); 5448 end -= del.br_blockcount; 5449 if (got.br_startoff > end && 5450 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5451 done = true; 5452 break; 5453 } 5454 continue; 5455 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5456 struct xfs_bmbt_irec prev; 5457 xfs_fileoff_t unwrite_start; 5458 5459 /* 5460 * This one is already unwritten. 5461 * It must have a written left neighbor. 5462 * Unwrite the killed part of that one and 5463 * try again. 5464 */ 5465 if (!xfs_iext_prev_extent(ifp, &icur, &prev)) 5466 ASSERT(0); 5467 ASSERT(prev.br_state == XFS_EXT_NORM); 5468 ASSERT(!isnullstartblock(prev.br_startblock)); 5469 ASSERT(del.br_startblock == 5470 prev.br_startblock + prev.br_blockcount); 5471 unwrite_start = max3(start, 5472 del.br_startoff - mod, 5473 prev.br_startoff); 5474 mod = unwrite_start - prev.br_startoff; 5475 prev.br_startoff = unwrite_start; 5476 prev.br_startblock += mod; 5477 prev.br_blockcount -= mod; 5478 prev.br_state = XFS_EXT_UNWRITTEN; 5479 error = xfs_bmap_add_extent_unwritten_real(tp, 5480 ip, whichfork, &icur, &cur, 5481 &prev, &logflags); 5482 if (error) 5483 goto error0; 5484 goto nodelete; 5485 } else { 5486 ASSERT(del.br_state == XFS_EXT_NORM); 5487 del.br_state = XFS_EXT_UNWRITTEN; 5488 error = xfs_bmap_add_extent_unwritten_real(tp, 5489 ip, whichfork, &icur, &cur, 5490 &del, &logflags); 5491 if (error) 5492 goto error0; 5493 goto nodelete; 5494 } 5495 } 5496 5497 delete: 5498 if (wasdel) { 5499 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5500 &got, &del); 5501 } else { 5502 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, 5503 &del, &tmp_logflags, whichfork, 5504 flags); 5505 logflags |= tmp_logflags; 5506 } 5507 5508 if (error) 5509 goto error0; 5510 5511 end = del.br_startoff - 1; 5512 nodelete: 5513 /* 5514 * If not done go on to the next (previous) record. 5515 */ 5516 if (end != (xfs_fileoff_t)-1 && end >= start) { 5517 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5518 (got.br_startoff > end && 5519 !xfs_iext_prev_extent(ifp, &icur, &got))) { 5520 done = true; 5521 break; 5522 } 5523 extno++; 5524 } 5525 } 5526 if (done || end == (xfs_fileoff_t)-1 || end < start) 5527 *rlen = 0; 5528 else 5529 *rlen = end - start + 1; 5530 5531 /* 5532 * Convert to a btree if necessary. 5533 */ 5534 if (xfs_bmap_needs_btree(ip, whichfork)) { 5535 ASSERT(cur == NULL); 5536 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5537 &tmp_logflags, whichfork); 5538 logflags |= tmp_logflags; 5539 } else { 5540 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, 5541 whichfork); 5542 } 5543 5544 error0: 5545 /* 5546 * Log everything. Do this after conversion, there's no point in 5547 * logging the extent records if we've converted to btree format. 5548 */ 5549 if ((logflags & xfs_ilog_fext(whichfork)) && 5550 ifp->if_format != XFS_DINODE_FMT_EXTENTS) 5551 logflags &= ~xfs_ilog_fext(whichfork); 5552 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5553 ifp->if_format != XFS_DINODE_FMT_BTREE) 5554 logflags &= ~xfs_ilog_fbroot(whichfork); 5555 /* 5556 * Log inode even in the error case, if the transaction 5557 * is dirty we'll need to shut down the filesystem. 5558 */ 5559 if (logflags) 5560 xfs_trans_log_inode(tp, ip, logflags); 5561 if (cur) { 5562 if (!error) 5563 cur->bc_ino.allocated = 0; 5564 xfs_btree_del_cursor(cur, error); 5565 } 5566 return error; 5567 } 5568 5569 /* Unmap a range of a file. */ 5570 int 5571 xfs_bunmapi( 5572 xfs_trans_t *tp, 5573 struct xfs_inode *ip, 5574 xfs_fileoff_t bno, 5575 xfs_filblks_t len, 5576 uint32_t flags, 5577 xfs_extnum_t nexts, 5578 int *done) 5579 { 5580 int error; 5581 5582 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); 5583 *done = (len == 0); 5584 return error; 5585 } 5586 5587 /* 5588 * Determine whether an extent shift can be accomplished by a merge with the 5589 * extent that precedes the target hole of the shift. 5590 */ 5591 STATIC bool 5592 xfs_bmse_can_merge( 5593 struct xfs_bmbt_irec *left, /* preceding extent */ 5594 struct xfs_bmbt_irec *got, /* current extent to shift */ 5595 xfs_fileoff_t shift) /* shift fsb */ 5596 { 5597 xfs_fileoff_t startoff; 5598 5599 startoff = got->br_startoff - shift; 5600 5601 /* 5602 * The extent, once shifted, must be adjacent in-file and on-disk with 5603 * the preceding extent. 5604 */ 5605 if ((left->br_startoff + left->br_blockcount != startoff) || 5606 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5607 (left->br_state != got->br_state) || 5608 (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN)) 5609 return false; 5610 5611 return true; 5612 } 5613 5614 /* 5615 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5616 * hole in the file. If an extent shift would result in the extent being fully 5617 * adjacent to the extent that currently precedes the hole, we can merge with 5618 * the preceding extent rather than do the shift. 5619 * 5620 * This function assumes the caller has verified a shift-by-merge is possible 5621 * with the provided extents via xfs_bmse_can_merge(). 5622 */ 5623 STATIC int 5624 xfs_bmse_merge( 5625 struct xfs_trans *tp, 5626 struct xfs_inode *ip, 5627 int whichfork, 5628 xfs_fileoff_t shift, /* shift fsb */ 5629 struct xfs_iext_cursor *icur, 5630 struct xfs_bmbt_irec *got, /* extent to shift */ 5631 struct xfs_bmbt_irec *left, /* preceding extent */ 5632 struct xfs_btree_cur *cur, 5633 int *logflags) /* output */ 5634 { 5635 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5636 struct xfs_bmbt_irec new; 5637 xfs_filblks_t blockcount; 5638 int error, i; 5639 struct xfs_mount *mp = ip->i_mount; 5640 5641 blockcount = left->br_blockcount + got->br_blockcount; 5642 5643 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5644 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5645 ASSERT(xfs_bmse_can_merge(left, got, shift)); 5646 5647 new = *left; 5648 new.br_blockcount = blockcount; 5649 5650 /* 5651 * Update the on-disk extent count, the btree if necessary and log the 5652 * inode. 5653 */ 5654 ifp->if_nextents--; 5655 *logflags |= XFS_ILOG_CORE; 5656 if (!cur) { 5657 *logflags |= XFS_ILOG_DEXT; 5658 goto done; 5659 } 5660 5661 /* lookup and remove the extent to merge */ 5662 error = xfs_bmbt_lookup_eq(cur, got, &i); 5663 if (error) 5664 return error; 5665 if (XFS_IS_CORRUPT(mp, i != 1)) 5666 return -EFSCORRUPTED; 5667 5668 error = xfs_btree_delete(cur, &i); 5669 if (error) 5670 return error; 5671 if (XFS_IS_CORRUPT(mp, i != 1)) 5672 return -EFSCORRUPTED; 5673 5674 /* lookup and update size of the previous extent */ 5675 error = xfs_bmbt_lookup_eq(cur, left, &i); 5676 if (error) 5677 return error; 5678 if (XFS_IS_CORRUPT(mp, i != 1)) 5679 return -EFSCORRUPTED; 5680 5681 error = xfs_bmbt_update(cur, &new); 5682 if (error) 5683 return error; 5684 5685 /* change to extent format if required after extent removal */ 5686 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork); 5687 if (error) 5688 return error; 5689 5690 done: 5691 xfs_iext_remove(ip, icur, 0); 5692 xfs_iext_prev(ifp, icur); 5693 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5694 &new); 5695 5696 /* update reverse mapping. rmap functions merge the rmaps for us */ 5697 xfs_rmap_unmap_extent(tp, ip, whichfork, got); 5698 memcpy(&new, got, sizeof(new)); 5699 new.br_startoff = left->br_startoff + left->br_blockcount; 5700 xfs_rmap_map_extent(tp, ip, whichfork, &new); 5701 return 0; 5702 } 5703 5704 static int 5705 xfs_bmap_shift_update_extent( 5706 struct xfs_trans *tp, 5707 struct xfs_inode *ip, 5708 int whichfork, 5709 struct xfs_iext_cursor *icur, 5710 struct xfs_bmbt_irec *got, 5711 struct xfs_btree_cur *cur, 5712 int *logflags, 5713 xfs_fileoff_t startoff) 5714 { 5715 struct xfs_mount *mp = ip->i_mount; 5716 struct xfs_bmbt_irec prev = *got; 5717 int error, i; 5718 5719 *logflags |= XFS_ILOG_CORE; 5720 5721 got->br_startoff = startoff; 5722 5723 if (cur) { 5724 error = xfs_bmbt_lookup_eq(cur, &prev, &i); 5725 if (error) 5726 return error; 5727 if (XFS_IS_CORRUPT(mp, i != 1)) 5728 return -EFSCORRUPTED; 5729 5730 error = xfs_bmbt_update(cur, got); 5731 if (error) 5732 return error; 5733 } else { 5734 *logflags |= XFS_ILOG_DEXT; 5735 } 5736 5737 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5738 got); 5739 5740 /* update reverse mapping */ 5741 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); 5742 xfs_rmap_map_extent(tp, ip, whichfork, got); 5743 return 0; 5744 } 5745 5746 int 5747 xfs_bmap_collapse_extents( 5748 struct xfs_trans *tp, 5749 struct xfs_inode *ip, 5750 xfs_fileoff_t *next_fsb, 5751 xfs_fileoff_t offset_shift_fsb, 5752 bool *done) 5753 { 5754 int whichfork = XFS_DATA_FORK; 5755 struct xfs_mount *mp = ip->i_mount; 5756 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5757 struct xfs_btree_cur *cur = NULL; 5758 struct xfs_bmbt_irec got, prev; 5759 struct xfs_iext_cursor icur; 5760 xfs_fileoff_t new_startoff; 5761 int error = 0; 5762 int logflags = 0; 5763 5764 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5765 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 5766 return -EFSCORRUPTED; 5767 } 5768 5769 if (xfs_is_shutdown(mp)) 5770 return -EIO; 5771 5772 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5773 5774 error = xfs_iread_extents(tp, ip, whichfork); 5775 if (error) 5776 return error; 5777 5778 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5779 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5780 cur->bc_ino.flags = 0; 5781 } 5782 5783 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5784 *done = true; 5785 goto del_cursor; 5786 } 5787 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) { 5788 error = -EFSCORRUPTED; 5789 goto del_cursor; 5790 } 5791 5792 new_startoff = got.br_startoff - offset_shift_fsb; 5793 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { 5794 if (new_startoff < prev.br_startoff + prev.br_blockcount) { 5795 error = -EINVAL; 5796 goto del_cursor; 5797 } 5798 5799 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) { 5800 error = xfs_bmse_merge(tp, ip, whichfork, 5801 offset_shift_fsb, &icur, &got, &prev, 5802 cur, &logflags); 5803 if (error) 5804 goto del_cursor; 5805 goto done; 5806 } 5807 } else { 5808 if (got.br_startoff < offset_shift_fsb) { 5809 error = -EINVAL; 5810 goto del_cursor; 5811 } 5812 } 5813 5814 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5815 cur, &logflags, new_startoff); 5816 if (error) 5817 goto del_cursor; 5818 5819 done: 5820 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 5821 *done = true; 5822 goto del_cursor; 5823 } 5824 5825 *next_fsb = got.br_startoff; 5826 del_cursor: 5827 if (cur) 5828 xfs_btree_del_cursor(cur, error); 5829 if (logflags) 5830 xfs_trans_log_inode(tp, ip, logflags); 5831 return error; 5832 } 5833 5834 /* Make sure we won't be right-shifting an extent past the maximum bound. */ 5835 int 5836 xfs_bmap_can_insert_extents( 5837 struct xfs_inode *ip, 5838 xfs_fileoff_t off, 5839 xfs_fileoff_t shift) 5840 { 5841 struct xfs_bmbt_irec got; 5842 int is_empty; 5843 int error = 0; 5844 5845 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5846 5847 if (xfs_is_shutdown(ip->i_mount)) 5848 return -EIO; 5849 5850 xfs_ilock(ip, XFS_ILOCK_EXCL); 5851 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); 5852 if (!error && !is_empty && got.br_startoff >= off && 5853 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) 5854 error = -EINVAL; 5855 xfs_iunlock(ip, XFS_ILOCK_EXCL); 5856 5857 return error; 5858 } 5859 5860 int 5861 xfs_bmap_insert_extents( 5862 struct xfs_trans *tp, 5863 struct xfs_inode *ip, 5864 xfs_fileoff_t *next_fsb, 5865 xfs_fileoff_t offset_shift_fsb, 5866 bool *done, 5867 xfs_fileoff_t stop_fsb) 5868 { 5869 int whichfork = XFS_DATA_FORK; 5870 struct xfs_mount *mp = ip->i_mount; 5871 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5872 struct xfs_btree_cur *cur = NULL; 5873 struct xfs_bmbt_irec got, next; 5874 struct xfs_iext_cursor icur; 5875 xfs_fileoff_t new_startoff; 5876 int error = 0; 5877 int logflags = 0; 5878 5879 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5880 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 5881 return -EFSCORRUPTED; 5882 } 5883 5884 if (xfs_is_shutdown(mp)) 5885 return -EIO; 5886 5887 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5888 5889 error = xfs_iread_extents(tp, ip, whichfork); 5890 if (error) 5891 return error; 5892 5893 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5894 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5895 cur->bc_ino.flags = 0; 5896 } 5897 5898 if (*next_fsb == NULLFSBLOCK) { 5899 xfs_iext_last(ifp, &icur); 5900 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5901 stop_fsb > got.br_startoff) { 5902 *done = true; 5903 goto del_cursor; 5904 } 5905 } else { 5906 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5907 *done = true; 5908 goto del_cursor; 5909 } 5910 } 5911 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) { 5912 error = -EFSCORRUPTED; 5913 goto del_cursor; 5914 } 5915 5916 if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) { 5917 error = -EFSCORRUPTED; 5918 goto del_cursor; 5919 } 5920 5921 new_startoff = got.br_startoff + offset_shift_fsb; 5922 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { 5923 if (new_startoff + got.br_blockcount > next.br_startoff) { 5924 error = -EINVAL; 5925 goto del_cursor; 5926 } 5927 5928 /* 5929 * Unlike a left shift (which involves a hole punch), a right 5930 * shift does not modify extent neighbors in any way. We should 5931 * never find mergeable extents in this scenario. Check anyways 5932 * and warn if we encounter two extents that could be one. 5933 */ 5934 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb)) 5935 WARN_ON_ONCE(1); 5936 } 5937 5938 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5939 cur, &logflags, new_startoff); 5940 if (error) 5941 goto del_cursor; 5942 5943 if (!xfs_iext_prev_extent(ifp, &icur, &got) || 5944 stop_fsb >= got.br_startoff + got.br_blockcount) { 5945 *done = true; 5946 goto del_cursor; 5947 } 5948 5949 *next_fsb = got.br_startoff; 5950 del_cursor: 5951 if (cur) 5952 xfs_btree_del_cursor(cur, error); 5953 if (logflags) 5954 xfs_trans_log_inode(tp, ip, logflags); 5955 return error; 5956 } 5957 5958 /* 5959 * Splits an extent into two extents at split_fsb block such that it is the 5960 * first block of the current_ext. @ext is a target extent to be split. 5961 * @split_fsb is a block where the extents is split. If split_fsb lies in a 5962 * hole or the first block of extents, just return 0. 5963 */ 5964 int 5965 xfs_bmap_split_extent( 5966 struct xfs_trans *tp, 5967 struct xfs_inode *ip, 5968 xfs_fileoff_t split_fsb) 5969 { 5970 int whichfork = XFS_DATA_FORK; 5971 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5972 struct xfs_btree_cur *cur = NULL; 5973 struct xfs_bmbt_irec got; 5974 struct xfs_bmbt_irec new; /* split extent */ 5975 struct xfs_mount *mp = ip->i_mount; 5976 xfs_fsblock_t gotblkcnt; /* new block count for got */ 5977 struct xfs_iext_cursor icur; 5978 int error = 0; 5979 int logflags = 0; 5980 int i = 0; 5981 5982 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5983 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 5984 return -EFSCORRUPTED; 5985 } 5986 5987 if (xfs_is_shutdown(mp)) 5988 return -EIO; 5989 5990 /* Read in all the extents */ 5991 error = xfs_iread_extents(tp, ip, whichfork); 5992 if (error) 5993 return error; 5994 5995 /* 5996 * If there are not extents, or split_fsb lies in a hole we are done. 5997 */ 5998 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || 5999 got.br_startoff >= split_fsb) 6000 return 0; 6001 6002 gotblkcnt = split_fsb - got.br_startoff; 6003 new.br_startoff = split_fsb; 6004 new.br_startblock = got.br_startblock + gotblkcnt; 6005 new.br_blockcount = got.br_blockcount - gotblkcnt; 6006 new.br_state = got.br_state; 6007 6008 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 6009 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 6010 cur->bc_ino.flags = 0; 6011 error = xfs_bmbt_lookup_eq(cur, &got, &i); 6012 if (error) 6013 goto del_cursor; 6014 if (XFS_IS_CORRUPT(mp, i != 1)) { 6015 error = -EFSCORRUPTED; 6016 goto del_cursor; 6017 } 6018 } 6019 6020 got.br_blockcount = gotblkcnt; 6021 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, 6022 &got); 6023 6024 logflags = XFS_ILOG_CORE; 6025 if (cur) { 6026 error = xfs_bmbt_update(cur, &got); 6027 if (error) 6028 goto del_cursor; 6029 } else 6030 logflags |= XFS_ILOG_DEXT; 6031 6032 /* Add new extent */ 6033 xfs_iext_next(ifp, &icur); 6034 xfs_iext_insert(ip, &icur, &new, 0); 6035 ifp->if_nextents++; 6036 6037 if (cur) { 6038 error = xfs_bmbt_lookup_eq(cur, &new, &i); 6039 if (error) 6040 goto del_cursor; 6041 if (XFS_IS_CORRUPT(mp, i != 0)) { 6042 error = -EFSCORRUPTED; 6043 goto del_cursor; 6044 } 6045 error = xfs_btree_insert(cur, &i); 6046 if (error) 6047 goto del_cursor; 6048 if (XFS_IS_CORRUPT(mp, i != 1)) { 6049 error = -EFSCORRUPTED; 6050 goto del_cursor; 6051 } 6052 } 6053 6054 /* 6055 * Convert to a btree if necessary. 6056 */ 6057 if (xfs_bmap_needs_btree(ip, whichfork)) { 6058 int tmp_logflags; /* partial log flag return val */ 6059 6060 ASSERT(cur == NULL); 6061 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 6062 &tmp_logflags, whichfork); 6063 logflags |= tmp_logflags; 6064 } 6065 6066 del_cursor: 6067 if (cur) { 6068 cur->bc_ino.allocated = 0; 6069 xfs_btree_del_cursor(cur, error); 6070 } 6071 6072 if (logflags) 6073 xfs_trans_log_inode(tp, ip, logflags); 6074 return error; 6075 } 6076 6077 /* Deferred mapping is only for real extents in the data fork. */ 6078 static bool 6079 xfs_bmap_is_update_needed( 6080 struct xfs_bmbt_irec *bmap) 6081 { 6082 return bmap->br_startblock != HOLESTARTBLOCK && 6083 bmap->br_startblock != DELAYSTARTBLOCK; 6084 } 6085 6086 /* Record a bmap intent. */ 6087 static int 6088 __xfs_bmap_add( 6089 struct xfs_trans *tp, 6090 enum xfs_bmap_intent_type type, 6091 struct xfs_inode *ip, 6092 int whichfork, 6093 struct xfs_bmbt_irec *bmap) 6094 { 6095 struct xfs_bmap_intent *bi; 6096 6097 trace_xfs_bmap_defer(tp->t_mountp, 6098 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6099 type, 6100 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6101 ip->i_ino, whichfork, 6102 bmap->br_startoff, 6103 bmap->br_blockcount, 6104 bmap->br_state); 6105 6106 bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL); 6107 INIT_LIST_HEAD(&bi->bi_list); 6108 bi->bi_type = type; 6109 bi->bi_owner = ip; 6110 bi->bi_whichfork = whichfork; 6111 bi->bi_bmap = *bmap; 6112 6113 xfs_bmap_update_get_group(tp->t_mountp, bi); 6114 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6115 return 0; 6116 } 6117 6118 /* Map an extent into a file. */ 6119 void 6120 xfs_bmap_map_extent( 6121 struct xfs_trans *tp, 6122 struct xfs_inode *ip, 6123 struct xfs_bmbt_irec *PREV) 6124 { 6125 if (!xfs_bmap_is_update_needed(PREV)) 6126 return; 6127 6128 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV); 6129 } 6130 6131 /* Unmap an extent out of a file. */ 6132 void 6133 xfs_bmap_unmap_extent( 6134 struct xfs_trans *tp, 6135 struct xfs_inode *ip, 6136 struct xfs_bmbt_irec *PREV) 6137 { 6138 if (!xfs_bmap_is_update_needed(PREV)) 6139 return; 6140 6141 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV); 6142 } 6143 6144 /* 6145 * Process one of the deferred bmap operations. We pass back the 6146 * btree cursor to maintain our lock on the bmapbt between calls. 6147 */ 6148 int 6149 xfs_bmap_finish_one( 6150 struct xfs_trans *tp, 6151 struct xfs_bmap_intent *bi) 6152 { 6153 struct xfs_bmbt_irec *bmap = &bi->bi_bmap; 6154 int error = 0; 6155 6156 ASSERT(tp->t_highest_agno == NULLAGNUMBER); 6157 6158 trace_xfs_bmap_deferred(tp->t_mountp, 6159 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6160 bi->bi_type, 6161 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6162 bi->bi_owner->i_ino, bi->bi_whichfork, 6163 bmap->br_startoff, bmap->br_blockcount, 6164 bmap->br_state); 6165 6166 if (WARN_ON_ONCE(bi->bi_whichfork != XFS_DATA_FORK)) 6167 return -EFSCORRUPTED; 6168 6169 if (XFS_TEST_ERROR(false, tp->t_mountp, 6170 XFS_ERRTAG_BMAP_FINISH_ONE)) 6171 return -EIO; 6172 6173 switch (bi->bi_type) { 6174 case XFS_BMAP_MAP: 6175 error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff, 6176 bmap->br_blockcount, bmap->br_startblock, 0); 6177 bmap->br_blockcount = 0; 6178 break; 6179 case XFS_BMAP_UNMAP: 6180 error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff, 6181 &bmap->br_blockcount, XFS_BMAPI_REMAP, 1); 6182 break; 6183 default: 6184 ASSERT(0); 6185 error = -EFSCORRUPTED; 6186 } 6187 6188 return error; 6189 } 6190 6191 /* Check that an inode's extent does not have invalid flags or bad ranges. */ 6192 xfs_failaddr_t 6193 xfs_bmap_validate_extent( 6194 struct xfs_inode *ip, 6195 int whichfork, 6196 struct xfs_bmbt_irec *irec) 6197 { 6198 struct xfs_mount *mp = ip->i_mount; 6199 6200 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount)) 6201 return __this_address; 6202 6203 if (XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK) { 6204 if (!xfs_verify_rtext(mp, irec->br_startblock, 6205 irec->br_blockcount)) 6206 return __this_address; 6207 } else { 6208 if (!xfs_verify_fsbext(mp, irec->br_startblock, 6209 irec->br_blockcount)) 6210 return __this_address; 6211 } 6212 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK) 6213 return __this_address; 6214 return NULL; 6215 } 6216 6217 int __init 6218 xfs_bmap_intent_init_cache(void) 6219 { 6220 xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent", 6221 sizeof(struct xfs_bmap_intent), 6222 0, 0, NULL); 6223 6224 return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM; 6225 } 6226 6227 void 6228 xfs_bmap_intent_destroy_cache(void) 6229 { 6230 kmem_cache_destroy(xfs_bmap_intent_cache); 6231 xfs_bmap_intent_cache = NULL; 6232 } 6233