1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_da_format.h" 17 #include "xfs_da_btree.h" 18 #include "xfs_dir2.h" 19 #include "xfs_inode.h" 20 #include "xfs_btree.h" 21 #include "xfs_trans.h" 22 #include "xfs_inode_item.h" 23 #include "xfs_extfree_item.h" 24 #include "xfs_alloc.h" 25 #include "xfs_bmap.h" 26 #include "xfs_bmap_util.h" 27 #include "xfs_bmap_btree.h" 28 #include "xfs_rtalloc.h" 29 #include "xfs_errortag.h" 30 #include "xfs_error.h" 31 #include "xfs_quota.h" 32 #include "xfs_trans_space.h" 33 #include "xfs_buf_item.h" 34 #include "xfs_trace.h" 35 #include "xfs_symlink.h" 36 #include "xfs_attr_leaf.h" 37 #include "xfs_filestream.h" 38 #include "xfs_rmap.h" 39 #include "xfs_ag_resv.h" 40 #include "xfs_refcount.h" 41 #include "xfs_icache.h" 42 43 44 kmem_zone_t *xfs_bmap_free_item_zone; 45 46 /* 47 * Miscellaneous helper functions 48 */ 49 50 /* 51 * Compute and fill in the value of the maximum depth of a bmap btree 52 * in this filesystem. Done once, during mount. 53 */ 54 void 55 xfs_bmap_compute_maxlevels( 56 xfs_mount_t *mp, /* file system mount structure */ 57 int whichfork) /* data or attr fork */ 58 { 59 int level; /* btree level */ 60 uint maxblocks; /* max blocks at this level */ 61 uint maxleafents; /* max leaf entries possible */ 62 int maxrootrecs; /* max records in root block */ 63 int minleafrecs; /* min records in leaf block */ 64 int minnoderecs; /* min records in node block */ 65 int sz; /* root block size */ 66 67 /* 68 * The maximum number of extents in a file, hence the maximum 69 * number of leaf entries, is controlled by the type of di_nextents 70 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents 71 * (a signed 16-bit number, xfs_aextnum_t). 72 * 73 * Note that we can no longer assume that if we are in ATTR1 that 74 * the fork offset of all the inodes will be 75 * (xfs_default_attroffset(ip) >> 3) because we could have mounted 76 * with ATTR2 and then mounted back with ATTR1, keeping the 77 * di_forkoff's fixed but probably at various positions. Therefore, 78 * for both ATTR1 and ATTR2 we have to assume the worst case scenario 79 * of a minimum size available. 80 */ 81 if (whichfork == XFS_DATA_FORK) { 82 maxleafents = MAXEXTNUM; 83 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 84 } else { 85 maxleafents = MAXAEXTNUM; 86 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 87 } 88 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 89 minleafrecs = mp->m_bmap_dmnr[0]; 90 minnoderecs = mp->m_bmap_dmnr[1]; 91 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 92 for (level = 1; maxblocks > 1; level++) { 93 if (maxblocks <= maxrootrecs) 94 maxblocks = 1; 95 else 96 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 97 } 98 mp->m_bm_maxlevels[whichfork] = level; 99 } 100 101 STATIC int /* error */ 102 xfs_bmbt_lookup_eq( 103 struct xfs_btree_cur *cur, 104 struct xfs_bmbt_irec *irec, 105 int *stat) /* success/failure */ 106 { 107 cur->bc_rec.b = *irec; 108 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 109 } 110 111 STATIC int /* error */ 112 xfs_bmbt_lookup_first( 113 struct xfs_btree_cur *cur, 114 int *stat) /* success/failure */ 115 { 116 cur->bc_rec.b.br_startoff = 0; 117 cur->bc_rec.b.br_startblock = 0; 118 cur->bc_rec.b.br_blockcount = 0; 119 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 120 } 121 122 /* 123 * Check if the inode needs to be converted to btree format. 124 */ 125 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 126 { 127 return whichfork != XFS_COW_FORK && 128 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 129 XFS_IFORK_NEXTENTS(ip, whichfork) > 130 XFS_IFORK_MAXEXT(ip, whichfork); 131 } 132 133 /* 134 * Check if the inode should be converted to extent format. 135 */ 136 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 137 { 138 return whichfork != XFS_COW_FORK && 139 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && 140 XFS_IFORK_NEXTENTS(ip, whichfork) <= 141 XFS_IFORK_MAXEXT(ip, whichfork); 142 } 143 144 /* 145 * Update the record referred to by cur to the value given by irec 146 * This either works (return 0) or gets an EFSCORRUPTED error. 147 */ 148 STATIC int 149 xfs_bmbt_update( 150 struct xfs_btree_cur *cur, 151 struct xfs_bmbt_irec *irec) 152 { 153 union xfs_btree_rec rec; 154 155 xfs_bmbt_disk_set_all(&rec.bmbt, irec); 156 return xfs_btree_update(cur, &rec); 157 } 158 159 /* 160 * Compute the worst-case number of indirect blocks that will be used 161 * for ip's delayed extent of length "len". 162 */ 163 STATIC xfs_filblks_t 164 xfs_bmap_worst_indlen( 165 xfs_inode_t *ip, /* incore inode pointer */ 166 xfs_filblks_t len) /* delayed extent length */ 167 { 168 int level; /* btree level number */ 169 int maxrecs; /* maximum record count at this level */ 170 xfs_mount_t *mp; /* mount structure */ 171 xfs_filblks_t rval; /* return value */ 172 173 mp = ip->i_mount; 174 maxrecs = mp->m_bmap_dmxr[0]; 175 for (level = 0, rval = 0; 176 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 177 level++) { 178 len += maxrecs - 1; 179 do_div(len, maxrecs); 180 rval += len; 181 if (len == 1) 182 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 183 level - 1; 184 if (level == 0) 185 maxrecs = mp->m_bmap_dmxr[1]; 186 } 187 return rval; 188 } 189 190 /* 191 * Calculate the default attribute fork offset for newly created inodes. 192 */ 193 uint 194 xfs_default_attroffset( 195 struct xfs_inode *ip) 196 { 197 struct xfs_mount *mp = ip->i_mount; 198 uint offset; 199 200 if (mp->m_sb.sb_inodesize == 256) { 201 offset = XFS_LITINO(mp, ip->i_d.di_version) - 202 XFS_BMDR_SPACE_CALC(MINABTPTRS); 203 } else { 204 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 205 } 206 207 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); 208 return offset; 209 } 210 211 /* 212 * Helper routine to reset inode di_forkoff field when switching 213 * attribute fork from local to extent format - we reset it where 214 * possible to make space available for inline data fork extents. 215 */ 216 STATIC void 217 xfs_bmap_forkoff_reset( 218 xfs_inode_t *ip, 219 int whichfork) 220 { 221 if (whichfork == XFS_ATTR_FORK && 222 ip->i_d.di_format != XFS_DINODE_FMT_DEV && 223 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { 224 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 225 226 if (dfl_forkoff > ip->i_d.di_forkoff) 227 ip->i_d.di_forkoff = dfl_forkoff; 228 } 229 } 230 231 #ifdef DEBUG 232 STATIC struct xfs_buf * 233 xfs_bmap_get_bp( 234 struct xfs_btree_cur *cur, 235 xfs_fsblock_t bno) 236 { 237 struct xfs_log_item *lip; 238 int i; 239 240 if (!cur) 241 return NULL; 242 243 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { 244 if (!cur->bc_bufs[i]) 245 break; 246 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) 247 return cur->bc_bufs[i]; 248 } 249 250 /* Chase down all the log items to see if the bp is there */ 251 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { 252 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; 253 254 if (bip->bli_item.li_type == XFS_LI_BUF && 255 XFS_BUF_ADDR(bip->bli_buf) == bno) 256 return bip->bli_buf; 257 } 258 259 return NULL; 260 } 261 262 STATIC void 263 xfs_check_block( 264 struct xfs_btree_block *block, 265 xfs_mount_t *mp, 266 int root, 267 short sz) 268 { 269 int i, j, dmxr; 270 __be64 *pp, *thispa; /* pointer to block address */ 271 xfs_bmbt_key_t *prevp, *keyp; 272 273 ASSERT(be16_to_cpu(block->bb_level) > 0); 274 275 prevp = NULL; 276 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 277 dmxr = mp->m_bmap_dmxr[0]; 278 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 279 280 if (prevp) { 281 ASSERT(be64_to_cpu(prevp->br_startoff) < 282 be64_to_cpu(keyp->br_startoff)); 283 } 284 prevp = keyp; 285 286 /* 287 * Compare the block numbers to see if there are dups. 288 */ 289 if (root) 290 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 291 else 292 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 293 294 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 295 if (root) 296 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 297 else 298 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 299 if (*thispa == *pp) { 300 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 301 __func__, j, i, 302 (unsigned long long)be64_to_cpu(*thispa)); 303 xfs_err(mp, "%s: ptrs are equal in node\n", 304 __func__); 305 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 306 } 307 } 308 } 309 } 310 311 /* 312 * Check that the extents for the inode ip are in the right order in all 313 * btree leaves. THis becomes prohibitively expensive for large extent count 314 * files, so don't bother with inodes that have more than 10,000 extents in 315 * them. The btree record ordering checks will still be done, so for such large 316 * bmapbt constructs that is going to catch most corruptions. 317 */ 318 STATIC void 319 xfs_bmap_check_leaf_extents( 320 xfs_btree_cur_t *cur, /* btree cursor or null */ 321 xfs_inode_t *ip, /* incore inode pointer */ 322 int whichfork) /* data or attr fork */ 323 { 324 struct xfs_btree_block *block; /* current btree block */ 325 xfs_fsblock_t bno; /* block # of "block" */ 326 xfs_buf_t *bp; /* buffer for "block" */ 327 int error; /* error return value */ 328 xfs_extnum_t i=0, j; /* index into the extents list */ 329 struct xfs_ifork *ifp; /* fork structure */ 330 int level; /* btree level, for checking */ 331 xfs_mount_t *mp; /* file system mount structure */ 332 __be64 *pp; /* pointer to block address */ 333 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 334 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 335 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 336 int bp_release = 0; 337 338 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 339 return; 340 } 341 342 /* skip large extent count inodes */ 343 if (ip->i_d.di_nextents > 10000) 344 return; 345 346 bno = NULLFSBLOCK; 347 mp = ip->i_mount; 348 ifp = XFS_IFORK_PTR(ip, whichfork); 349 block = ifp->if_broot; 350 /* 351 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 352 */ 353 level = be16_to_cpu(block->bb_level); 354 ASSERT(level > 0); 355 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 356 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 357 bno = be64_to_cpu(*pp); 358 359 ASSERT(bno != NULLFSBLOCK); 360 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 361 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 362 363 /* 364 * Go down the tree until leaf level is reached, following the first 365 * pointer (leftmost) at each level. 366 */ 367 while (level-- > 0) { 368 /* See if buf is in cur first */ 369 bp_release = 0; 370 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 371 if (!bp) { 372 bp_release = 1; 373 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 374 XFS_BMAP_BTREE_REF, 375 &xfs_bmbt_buf_ops); 376 if (error) 377 goto error_norelse; 378 } 379 block = XFS_BUF_TO_BLOCK(bp); 380 if (level == 0) 381 break; 382 383 /* 384 * Check this block for basic sanity (increasing keys and 385 * no duplicate blocks). 386 */ 387 388 xfs_check_block(block, mp, 0, 0); 389 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 390 bno = be64_to_cpu(*pp); 391 XFS_WANT_CORRUPTED_GOTO(mp, 392 xfs_verify_fsbno(mp, bno), error0); 393 if (bp_release) { 394 bp_release = 0; 395 xfs_trans_brelse(NULL, bp); 396 } 397 } 398 399 /* 400 * Here with bp and block set to the leftmost leaf node in the tree. 401 */ 402 i = 0; 403 404 /* 405 * Loop over all leaf nodes checking that all extents are in the right order. 406 */ 407 for (;;) { 408 xfs_fsblock_t nextbno; 409 xfs_extnum_t num_recs; 410 411 412 num_recs = xfs_btree_get_numrecs(block); 413 414 /* 415 * Read-ahead the next leaf block, if any. 416 */ 417 418 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 419 420 /* 421 * Check all the extents to make sure they are OK. 422 * If we had a previous block, the last entry should 423 * conform with the first entry in this one. 424 */ 425 426 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 427 if (i) { 428 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 429 xfs_bmbt_disk_get_blockcount(&last) <= 430 xfs_bmbt_disk_get_startoff(ep)); 431 } 432 for (j = 1; j < num_recs; j++) { 433 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 434 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 435 xfs_bmbt_disk_get_blockcount(ep) <= 436 xfs_bmbt_disk_get_startoff(nextp)); 437 ep = nextp; 438 } 439 440 last = *ep; 441 i += num_recs; 442 if (bp_release) { 443 bp_release = 0; 444 xfs_trans_brelse(NULL, bp); 445 } 446 bno = nextbno; 447 /* 448 * If we've reached the end, stop. 449 */ 450 if (bno == NULLFSBLOCK) 451 break; 452 453 bp_release = 0; 454 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 455 if (!bp) { 456 bp_release = 1; 457 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 458 XFS_BMAP_BTREE_REF, 459 &xfs_bmbt_buf_ops); 460 if (error) 461 goto error_norelse; 462 } 463 block = XFS_BUF_TO_BLOCK(bp); 464 } 465 466 return; 467 468 error0: 469 xfs_warn(mp, "%s: at error0", __func__); 470 if (bp_release) 471 xfs_trans_brelse(NULL, bp); 472 error_norelse: 473 xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 474 __func__, i); 475 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); 476 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 477 return; 478 } 479 480 /* 481 * Validate that the bmbt_irecs being returned from bmapi are valid 482 * given the caller's original parameters. Specifically check the 483 * ranges of the returned irecs to ensure that they only extend beyond 484 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 485 */ 486 STATIC void 487 xfs_bmap_validate_ret( 488 xfs_fileoff_t bno, 489 xfs_filblks_t len, 490 int flags, 491 xfs_bmbt_irec_t *mval, 492 int nmap, 493 int ret_nmap) 494 { 495 int i; /* index to map values */ 496 497 ASSERT(ret_nmap <= nmap); 498 499 for (i = 0; i < ret_nmap; i++) { 500 ASSERT(mval[i].br_blockcount > 0); 501 if (!(flags & XFS_BMAPI_ENTIRE)) { 502 ASSERT(mval[i].br_startoff >= bno); 503 ASSERT(mval[i].br_blockcount <= len); 504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 505 bno + len); 506 } else { 507 ASSERT(mval[i].br_startoff < bno + len); 508 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 509 bno); 510 } 511 ASSERT(i == 0 || 512 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 513 mval[i].br_startoff); 514 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 515 mval[i].br_startblock != HOLESTARTBLOCK); 516 ASSERT(mval[i].br_state == XFS_EXT_NORM || 517 mval[i].br_state == XFS_EXT_UNWRITTEN); 518 } 519 } 520 521 #else 522 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 523 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 524 #endif /* DEBUG */ 525 526 /* 527 * bmap free list manipulation functions 528 */ 529 530 /* 531 * Add the extent to the list of extents to be free at transaction end. 532 * The list is maintained sorted (by block number). 533 */ 534 void 535 __xfs_bmap_add_free( 536 struct xfs_trans *tp, 537 xfs_fsblock_t bno, 538 xfs_filblks_t len, 539 struct xfs_owner_info *oinfo, 540 bool skip_discard) 541 { 542 struct xfs_extent_free_item *new; /* new element */ 543 #ifdef DEBUG 544 struct xfs_mount *mp = tp->t_mountp; 545 xfs_agnumber_t agno; 546 xfs_agblock_t agbno; 547 548 ASSERT(bno != NULLFSBLOCK); 549 ASSERT(len > 0); 550 ASSERT(len <= MAXEXTLEN); 551 ASSERT(!isnullstartblock(bno)); 552 agno = XFS_FSB_TO_AGNO(mp, bno); 553 agbno = XFS_FSB_TO_AGBNO(mp, bno); 554 ASSERT(agno < mp->m_sb.sb_agcount); 555 ASSERT(agbno < mp->m_sb.sb_agblocks); 556 ASSERT(len < mp->m_sb.sb_agblocks); 557 ASSERT(agbno + len <= mp->m_sb.sb_agblocks); 558 #endif 559 ASSERT(xfs_bmap_free_item_zone != NULL); 560 561 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 562 new->xefi_startblock = bno; 563 new->xefi_blockcount = (xfs_extlen_t)len; 564 if (oinfo) 565 new->xefi_oinfo = *oinfo; 566 else 567 xfs_rmap_skip_owner_update(&new->xefi_oinfo); 568 new->xefi_skip_discard = skip_discard; 569 trace_xfs_bmap_free_defer(tp->t_mountp, 570 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0, 571 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len); 572 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); 573 } 574 575 /* 576 * Inode fork format manipulation functions 577 */ 578 579 /* 580 * Transform a btree format file with only one leaf node, where the 581 * extents list will fit in the inode, into an extents format file. 582 * Since the file extents are already in-core, all we have to do is 583 * give up the space for the btree root and pitch the leaf block. 584 */ 585 STATIC int /* error */ 586 xfs_bmap_btree_to_extents( 587 xfs_trans_t *tp, /* transaction pointer */ 588 xfs_inode_t *ip, /* incore inode pointer */ 589 xfs_btree_cur_t *cur, /* btree cursor */ 590 int *logflagsp, /* inode logging flags */ 591 int whichfork) /* data or attr fork */ 592 { 593 /* REFERENCED */ 594 struct xfs_btree_block *cblock;/* child btree block */ 595 xfs_fsblock_t cbno; /* child block number */ 596 xfs_buf_t *cbp; /* child block's buffer */ 597 int error; /* error return value */ 598 struct xfs_ifork *ifp; /* inode fork data */ 599 xfs_mount_t *mp; /* mount point structure */ 600 __be64 *pp; /* ptr to block address */ 601 struct xfs_btree_block *rblock;/* root btree block */ 602 struct xfs_owner_info oinfo; 603 604 mp = ip->i_mount; 605 ifp = XFS_IFORK_PTR(ip, whichfork); 606 ASSERT(whichfork != XFS_COW_FORK); 607 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 608 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 609 rblock = ifp->if_broot; 610 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 611 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 612 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 613 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 614 cbno = be64_to_cpu(*pp); 615 *logflagsp = 0; 616 #ifdef DEBUG 617 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, 618 xfs_btree_check_lptr(cur, cbno, 1)); 619 #endif 620 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, 621 &xfs_bmbt_buf_ops); 622 if (error) 623 return error; 624 cblock = XFS_BUF_TO_BLOCK(cbp); 625 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 626 return error; 627 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 628 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo); 629 ip->i_d.di_nblocks--; 630 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 631 xfs_trans_binval(tp, cbp); 632 if (cur->bc_bufs[0] == cbp) 633 cur->bc_bufs[0] = NULL; 634 xfs_iroot_realloc(ip, -1, whichfork); 635 ASSERT(ifp->if_broot == NULL); 636 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); 637 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 638 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 639 return 0; 640 } 641 642 /* 643 * Convert an extents-format file into a btree-format file. 644 * The new file will have a root block (in the inode) and a single child block. 645 */ 646 STATIC int /* error */ 647 xfs_bmap_extents_to_btree( 648 struct xfs_trans *tp, /* transaction pointer */ 649 struct xfs_inode *ip, /* incore inode pointer */ 650 struct xfs_btree_cur **curp, /* cursor returned to caller */ 651 int wasdel, /* converting a delayed alloc */ 652 int *logflagsp, /* inode logging flags */ 653 int whichfork) /* data or attr fork */ 654 { 655 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 656 struct xfs_buf *abp; /* buffer for ablock */ 657 struct xfs_alloc_arg args; /* allocation arguments */ 658 struct xfs_bmbt_rec *arp; /* child record pointer */ 659 struct xfs_btree_block *block; /* btree root block */ 660 struct xfs_btree_cur *cur; /* bmap btree cursor */ 661 int error; /* error return value */ 662 struct xfs_ifork *ifp; /* inode fork pointer */ 663 struct xfs_bmbt_key *kp; /* root block key pointer */ 664 struct xfs_mount *mp; /* mount structure */ 665 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 666 struct xfs_iext_cursor icur; 667 struct xfs_bmbt_irec rec; 668 xfs_extnum_t cnt = 0; 669 670 mp = ip->i_mount; 671 ASSERT(whichfork != XFS_COW_FORK); 672 ifp = XFS_IFORK_PTR(ip, whichfork); 673 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 674 675 /* 676 * Make space in the inode incore. This needs to be undone if we fail 677 * to expand the root. 678 */ 679 xfs_iroot_realloc(ip, 1, whichfork); 680 ifp->if_flags |= XFS_IFBROOT; 681 682 /* 683 * Fill in the root. 684 */ 685 block = ifp->if_broot; 686 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 687 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 688 XFS_BTREE_LONG_PTRS); 689 /* 690 * Need a cursor. Can't allocate until bb_level is filled in. 691 */ 692 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 693 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 694 /* 695 * Convert to a btree with two levels, one record in root. 696 */ 697 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 698 memset(&args, 0, sizeof(args)); 699 args.tp = tp; 700 args.mp = mp; 701 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 702 if (tp->t_firstblock == NULLFSBLOCK) { 703 args.type = XFS_ALLOCTYPE_START_BNO; 704 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 705 } else if (tp->t_flags & XFS_TRANS_LOWMODE) { 706 args.type = XFS_ALLOCTYPE_START_BNO; 707 args.fsbno = tp->t_firstblock; 708 } else { 709 args.type = XFS_ALLOCTYPE_NEAR_BNO; 710 args.fsbno = tp->t_firstblock; 711 } 712 args.minlen = args.maxlen = args.prod = 1; 713 args.wasdel = wasdel; 714 *logflagsp = 0; 715 error = xfs_alloc_vextent(&args); 716 if (error) 717 goto out_root_realloc; 718 719 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 720 error = -ENOSPC; 721 goto out_root_realloc; 722 } 723 724 /* 725 * Allocation can't fail, the space was reserved. 726 */ 727 ASSERT(tp->t_firstblock == NULLFSBLOCK || 728 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock)); 729 tp->t_firstblock = args.fsbno; 730 cur->bc_private.b.allocated++; 731 ip->i_d.di_nblocks++; 732 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 733 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 734 if (!abp) { 735 error = -EFSCORRUPTED; 736 goto out_unreserve_dquot; 737 } 738 739 /* 740 * Fill in the child block. 741 */ 742 abp->b_ops = &xfs_bmbt_buf_ops; 743 ablock = XFS_BUF_TO_BLOCK(abp); 744 xfs_btree_init_block_int(mp, ablock, abp->b_bn, 745 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 746 XFS_BTREE_LONG_PTRS); 747 748 for_each_xfs_iext(ifp, &icur, &rec) { 749 if (isnullstartblock(rec.br_startblock)) 750 continue; 751 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt); 752 xfs_bmbt_disk_set_all(arp, &rec); 753 cnt++; 754 } 755 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); 756 xfs_btree_set_numrecs(ablock, cnt); 757 758 /* 759 * Fill in the root key and pointer. 760 */ 761 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 762 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 763 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 764 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 765 be16_to_cpu(block->bb_level))); 766 *pp = cpu_to_be64(args.fsbno); 767 768 /* 769 * Do all this logging at the end so that 770 * the root is at the right level. 771 */ 772 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 773 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 774 ASSERT(*curp == NULL); 775 *curp = cur; 776 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 777 return 0; 778 779 out_unreserve_dquot: 780 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 781 out_root_realloc: 782 xfs_iroot_realloc(ip, -1, whichfork); 783 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 784 ASSERT(ifp->if_broot == NULL); 785 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 786 787 return error; 788 } 789 790 /* 791 * Convert a local file to an extents file. 792 * This code is out of bounds for data forks of regular files, 793 * since the file data needs to get logged so things will stay consistent. 794 * (The bmap-level manipulations are ok, though). 795 */ 796 void 797 xfs_bmap_local_to_extents_empty( 798 struct xfs_inode *ip, 799 int whichfork) 800 { 801 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 802 803 ASSERT(whichfork != XFS_COW_FORK); 804 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 805 ASSERT(ifp->if_bytes == 0); 806 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); 807 808 xfs_bmap_forkoff_reset(ip, whichfork); 809 ifp->if_flags &= ~XFS_IFINLINE; 810 ifp->if_flags |= XFS_IFEXTENTS; 811 ifp->if_u1.if_root = NULL; 812 ifp->if_height = 0; 813 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 814 } 815 816 817 STATIC int /* error */ 818 xfs_bmap_local_to_extents( 819 xfs_trans_t *tp, /* transaction pointer */ 820 xfs_inode_t *ip, /* incore inode pointer */ 821 xfs_extlen_t total, /* total blocks needed by transaction */ 822 int *logflagsp, /* inode logging flags */ 823 int whichfork, 824 void (*init_fn)(struct xfs_trans *tp, 825 struct xfs_buf *bp, 826 struct xfs_inode *ip, 827 struct xfs_ifork *ifp)) 828 { 829 int error = 0; 830 int flags; /* logging flags returned */ 831 struct xfs_ifork *ifp; /* inode fork pointer */ 832 xfs_alloc_arg_t args; /* allocation arguments */ 833 xfs_buf_t *bp; /* buffer for extent block */ 834 struct xfs_bmbt_irec rec; 835 struct xfs_iext_cursor icur; 836 837 /* 838 * We don't want to deal with the case of keeping inode data inline yet. 839 * So sending the data fork of a regular inode is invalid. 840 */ 841 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 842 ifp = XFS_IFORK_PTR(ip, whichfork); 843 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 844 845 if (!ifp->if_bytes) { 846 xfs_bmap_local_to_extents_empty(ip, whichfork); 847 flags = XFS_ILOG_CORE; 848 goto done; 849 } 850 851 flags = 0; 852 error = 0; 853 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE); 854 memset(&args, 0, sizeof(args)); 855 args.tp = tp; 856 args.mp = ip->i_mount; 857 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 858 /* 859 * Allocate a block. We know we need only one, since the 860 * file currently fits in an inode. 861 */ 862 if (tp->t_firstblock == NULLFSBLOCK) { 863 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); 864 args.type = XFS_ALLOCTYPE_START_BNO; 865 } else { 866 args.fsbno = tp->t_firstblock; 867 args.type = XFS_ALLOCTYPE_NEAR_BNO; 868 } 869 args.total = total; 870 args.minlen = args.maxlen = args.prod = 1; 871 error = xfs_alloc_vextent(&args); 872 if (error) 873 goto done; 874 875 /* Can't fail, the space was reserved. */ 876 ASSERT(args.fsbno != NULLFSBLOCK); 877 ASSERT(args.len == 1); 878 tp->t_firstblock = args.fsbno; 879 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 880 881 /* 882 * Initialize the block, copy the data and log the remote buffer. 883 * 884 * The callout is responsible for logging because the remote format 885 * might differ from the local format and thus we don't know how much to 886 * log here. Note that init_fn must also set the buffer log item type 887 * correctly. 888 */ 889 init_fn(tp, bp, ip, ifp); 890 891 /* account for the change in fork size */ 892 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 893 xfs_bmap_local_to_extents_empty(ip, whichfork); 894 flags |= XFS_ILOG_CORE; 895 896 ifp->if_u1.if_root = NULL; 897 ifp->if_height = 0; 898 899 rec.br_startoff = 0; 900 rec.br_startblock = args.fsbno; 901 rec.br_blockcount = 1; 902 rec.br_state = XFS_EXT_NORM; 903 xfs_iext_first(ifp, &icur); 904 xfs_iext_insert(ip, &icur, &rec, 0); 905 906 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 907 ip->i_d.di_nblocks = 1; 908 xfs_trans_mod_dquot_byino(tp, ip, 909 XFS_TRANS_DQ_BCOUNT, 1L); 910 flags |= xfs_ilog_fext(whichfork); 911 912 done: 913 *logflagsp = flags; 914 return error; 915 } 916 917 /* 918 * Called from xfs_bmap_add_attrfork to handle btree format files. 919 */ 920 STATIC int /* error */ 921 xfs_bmap_add_attrfork_btree( 922 xfs_trans_t *tp, /* transaction pointer */ 923 xfs_inode_t *ip, /* incore inode pointer */ 924 int *flags) /* inode logging flags */ 925 { 926 xfs_btree_cur_t *cur; /* btree cursor */ 927 int error; /* error return value */ 928 xfs_mount_t *mp; /* file system mount struct */ 929 int stat; /* newroot status */ 930 931 mp = ip->i_mount; 932 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) 933 *flags |= XFS_ILOG_DBROOT; 934 else { 935 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 936 error = xfs_bmbt_lookup_first(cur, &stat); 937 if (error) 938 goto error0; 939 /* must be at least one entry */ 940 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); 941 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 942 goto error0; 943 if (stat == 0) { 944 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 945 return -ENOSPC; 946 } 947 cur->bc_private.b.allocated = 0; 948 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 949 } 950 return 0; 951 error0: 952 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 953 return error; 954 } 955 956 /* 957 * Called from xfs_bmap_add_attrfork to handle extents format files. 958 */ 959 STATIC int /* error */ 960 xfs_bmap_add_attrfork_extents( 961 struct xfs_trans *tp, /* transaction pointer */ 962 struct xfs_inode *ip, /* incore inode pointer */ 963 int *flags) /* inode logging flags */ 964 { 965 xfs_btree_cur_t *cur; /* bmap btree cursor */ 966 int error; /* error return value */ 967 968 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) 969 return 0; 970 cur = NULL; 971 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, 972 XFS_DATA_FORK); 973 if (cur) { 974 cur->bc_private.b.allocated = 0; 975 xfs_btree_del_cursor(cur, error); 976 } 977 return error; 978 } 979 980 /* 981 * Called from xfs_bmap_add_attrfork to handle local format files. Each 982 * different data fork content type needs a different callout to do the 983 * conversion. Some are basic and only require special block initialisation 984 * callouts for the data formating, others (directories) are so specialised they 985 * handle everything themselves. 986 * 987 * XXX (dgc): investigate whether directory conversion can use the generic 988 * formatting callout. It should be possible - it's just a very complex 989 * formatter. 990 */ 991 STATIC int /* error */ 992 xfs_bmap_add_attrfork_local( 993 struct xfs_trans *tp, /* transaction pointer */ 994 struct xfs_inode *ip, /* incore inode pointer */ 995 int *flags) /* inode logging flags */ 996 { 997 struct xfs_da_args dargs; /* args for dir/attr code */ 998 999 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 1000 return 0; 1001 1002 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1003 memset(&dargs, 0, sizeof(dargs)); 1004 dargs.geo = ip->i_mount->m_dir_geo; 1005 dargs.dp = ip; 1006 dargs.total = dargs.geo->fsbcount; 1007 dargs.whichfork = XFS_DATA_FORK; 1008 dargs.trans = tp; 1009 return xfs_dir2_sf_to_block(&dargs); 1010 } 1011 1012 if (S_ISLNK(VFS_I(ip)->i_mode)) 1013 return xfs_bmap_local_to_extents(tp, ip, 1, flags, 1014 XFS_DATA_FORK, 1015 xfs_symlink_local_to_remote); 1016 1017 /* should only be called for types that support local format data */ 1018 ASSERT(0); 1019 return -EFSCORRUPTED; 1020 } 1021 1022 /* 1023 * Convert inode from non-attributed to attributed. 1024 * Must not be in a transaction, ip must not be locked. 1025 */ 1026 int /* error code */ 1027 xfs_bmap_add_attrfork( 1028 xfs_inode_t *ip, /* incore inode pointer */ 1029 int size, /* space new attribute needs */ 1030 int rsvd) /* xact may use reserved blks */ 1031 { 1032 xfs_mount_t *mp; /* mount structure */ 1033 xfs_trans_t *tp; /* transaction pointer */ 1034 int blks; /* space reservation */ 1035 int version = 1; /* superblock attr version */ 1036 int logflags; /* logging flags */ 1037 int error; /* error return value */ 1038 1039 ASSERT(XFS_IFORK_Q(ip) == 0); 1040 1041 mp = ip->i_mount; 1042 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1043 1044 blks = XFS_ADDAFORK_SPACE_RES(mp); 1045 1046 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, 1047 rsvd ? XFS_TRANS_RESERVE : 0, &tp); 1048 if (error) 1049 return error; 1050 1051 xfs_ilock(ip, XFS_ILOCK_EXCL); 1052 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 1053 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 1054 XFS_QMOPT_RES_REGBLKS); 1055 if (error) 1056 goto trans_cancel; 1057 if (XFS_IFORK_Q(ip)) 1058 goto trans_cancel; 1059 if (ip->i_d.di_anextents != 0) { 1060 error = -EFSCORRUPTED; 1061 goto trans_cancel; 1062 } 1063 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 1064 /* 1065 * For inodes coming from pre-6.2 filesystems. 1066 */ 1067 ASSERT(ip->i_d.di_aformat == 0); 1068 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1069 } 1070 1071 xfs_trans_ijoin(tp, ip, 0); 1072 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1073 1074 switch (ip->i_d.di_format) { 1075 case XFS_DINODE_FMT_DEV: 1076 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 1077 break; 1078 case XFS_DINODE_FMT_LOCAL: 1079 case XFS_DINODE_FMT_EXTENTS: 1080 case XFS_DINODE_FMT_BTREE: 1081 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 1082 if (!ip->i_d.di_forkoff) 1083 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; 1084 else if (mp->m_flags & XFS_MOUNT_ATTR2) 1085 version = 2; 1086 break; 1087 default: 1088 ASSERT(0); 1089 error = -EINVAL; 1090 goto trans_cancel; 1091 } 1092 1093 ASSERT(ip->i_afp == NULL); 1094 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 1095 ip->i_afp->if_flags = XFS_IFEXTENTS; 1096 logflags = 0; 1097 switch (ip->i_d.di_format) { 1098 case XFS_DINODE_FMT_LOCAL: 1099 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); 1100 break; 1101 case XFS_DINODE_FMT_EXTENTS: 1102 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); 1103 break; 1104 case XFS_DINODE_FMT_BTREE: 1105 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); 1106 break; 1107 default: 1108 error = 0; 1109 break; 1110 } 1111 if (logflags) 1112 xfs_trans_log_inode(tp, ip, logflags); 1113 if (error) 1114 goto trans_cancel; 1115 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1116 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1117 bool log_sb = false; 1118 1119 spin_lock(&mp->m_sb_lock); 1120 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1121 xfs_sb_version_addattr(&mp->m_sb); 1122 log_sb = true; 1123 } 1124 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1125 xfs_sb_version_addattr2(&mp->m_sb); 1126 log_sb = true; 1127 } 1128 spin_unlock(&mp->m_sb_lock); 1129 if (log_sb) 1130 xfs_log_sb(tp); 1131 } 1132 1133 error = xfs_trans_commit(tp); 1134 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1135 return error; 1136 1137 trans_cancel: 1138 xfs_trans_cancel(tp); 1139 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1140 return error; 1141 } 1142 1143 /* 1144 * Internal and external extent tree search functions. 1145 */ 1146 1147 /* 1148 * Read in extents from a btree-format inode. 1149 */ 1150 int 1151 xfs_iread_extents( 1152 struct xfs_trans *tp, 1153 struct xfs_inode *ip, 1154 int whichfork) 1155 { 1156 struct xfs_mount *mp = ip->i_mount; 1157 int state = xfs_bmap_fork_to_state(whichfork); 1158 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1159 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 1160 struct xfs_btree_block *block = ifp->if_broot; 1161 struct xfs_iext_cursor icur; 1162 struct xfs_bmbt_irec new; 1163 xfs_fsblock_t bno; 1164 struct xfs_buf *bp; 1165 xfs_extnum_t i, j; 1166 int level; 1167 __be64 *pp; 1168 int error; 1169 1170 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1171 1172 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 1173 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 1174 return -EFSCORRUPTED; 1175 } 1176 1177 /* 1178 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1179 */ 1180 level = be16_to_cpu(block->bb_level); 1181 ASSERT(level > 0); 1182 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1183 bno = be64_to_cpu(*pp); 1184 1185 /* 1186 * Go down the tree until leaf level is reached, following the first 1187 * pointer (leftmost) at each level. 1188 */ 1189 while (level-- > 0) { 1190 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1191 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1192 if (error) 1193 goto out; 1194 block = XFS_BUF_TO_BLOCK(bp); 1195 if (level == 0) 1196 break; 1197 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 1198 bno = be64_to_cpu(*pp); 1199 XFS_WANT_CORRUPTED_GOTO(mp, 1200 xfs_verify_fsbno(mp, bno), out_brelse); 1201 xfs_trans_brelse(tp, bp); 1202 } 1203 1204 /* 1205 * Here with bp and block set to the leftmost leaf node in the tree. 1206 */ 1207 i = 0; 1208 xfs_iext_first(ifp, &icur); 1209 1210 /* 1211 * Loop over all leaf nodes. Copy information to the extent records. 1212 */ 1213 for (;;) { 1214 xfs_bmbt_rec_t *frp; 1215 xfs_fsblock_t nextbno; 1216 xfs_extnum_t num_recs; 1217 1218 num_recs = xfs_btree_get_numrecs(block); 1219 if (unlikely(i + num_recs > nextents)) { 1220 xfs_warn(ip->i_mount, 1221 "corrupt dinode %Lu, (btree extents).", 1222 (unsigned long long) ip->i_ino); 1223 xfs_inode_verifier_error(ip, -EFSCORRUPTED, 1224 __func__, block, sizeof(*block), 1225 __this_address); 1226 error = -EFSCORRUPTED; 1227 goto out_brelse; 1228 } 1229 /* 1230 * Read-ahead the next leaf block, if any. 1231 */ 1232 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 1233 if (nextbno != NULLFSBLOCK) 1234 xfs_btree_reada_bufl(mp, nextbno, 1, 1235 &xfs_bmbt_buf_ops); 1236 /* 1237 * Copy records into the extent records. 1238 */ 1239 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1240 for (j = 0; j < num_recs; j++, frp++, i++) { 1241 xfs_failaddr_t fa; 1242 1243 xfs_bmbt_disk_get_all(frp, &new); 1244 fa = xfs_bmap_validate_extent(ip, whichfork, &new); 1245 if (fa) { 1246 error = -EFSCORRUPTED; 1247 xfs_inode_verifier_error(ip, error, 1248 "xfs_iread_extents(2)", 1249 frp, sizeof(*frp), fa); 1250 goto out_brelse; 1251 } 1252 xfs_iext_insert(ip, &icur, &new, state); 1253 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_); 1254 xfs_iext_next(ifp, &icur); 1255 } 1256 xfs_trans_brelse(tp, bp); 1257 bno = nextbno; 1258 /* 1259 * If we've reached the end, stop. 1260 */ 1261 if (bno == NULLFSBLOCK) 1262 break; 1263 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1264 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1265 if (error) 1266 goto out; 1267 block = XFS_BUF_TO_BLOCK(bp); 1268 } 1269 1270 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) { 1271 error = -EFSCORRUPTED; 1272 goto out; 1273 } 1274 ASSERT(i == xfs_iext_count(ifp)); 1275 1276 ifp->if_flags |= XFS_IFEXTENTS; 1277 return 0; 1278 1279 out_brelse: 1280 xfs_trans_brelse(tp, bp); 1281 out: 1282 xfs_iext_destroy(ifp); 1283 return error; 1284 } 1285 1286 /* 1287 * Returns the relative block number of the first unused block(s) in the given 1288 * fork with at least "len" logically contiguous blocks free. This is the 1289 * lowest-address hole if the fork has holes, else the first block past the end 1290 * of fork. Return 0 if the fork is currently local (in-inode). 1291 */ 1292 int /* error */ 1293 xfs_bmap_first_unused( 1294 struct xfs_trans *tp, /* transaction pointer */ 1295 struct xfs_inode *ip, /* incore inode */ 1296 xfs_extlen_t len, /* size of hole to find */ 1297 xfs_fileoff_t *first_unused, /* unused block */ 1298 int whichfork) /* data or attr fork */ 1299 { 1300 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1301 struct xfs_bmbt_irec got; 1302 struct xfs_iext_cursor icur; 1303 xfs_fileoff_t lastaddr = 0; 1304 xfs_fileoff_t lowest, max; 1305 int error; 1306 1307 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || 1308 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || 1309 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 1310 1311 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1312 *first_unused = 0; 1313 return 0; 1314 } 1315 1316 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1317 error = xfs_iread_extents(tp, ip, whichfork); 1318 if (error) 1319 return error; 1320 } 1321 1322 lowest = max = *first_unused; 1323 for_each_xfs_iext(ifp, &icur, &got) { 1324 /* 1325 * See if the hole before this extent will work. 1326 */ 1327 if (got.br_startoff >= lowest + len && 1328 got.br_startoff - max >= len) 1329 break; 1330 lastaddr = got.br_startoff + got.br_blockcount; 1331 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1332 } 1333 1334 *first_unused = max; 1335 return 0; 1336 } 1337 1338 /* 1339 * Returns the file-relative block number of the last block - 1 before 1340 * last_block (input value) in the file. 1341 * This is not based on i_size, it is based on the extent records. 1342 * Returns 0 for local files, as they do not have extent records. 1343 */ 1344 int /* error */ 1345 xfs_bmap_last_before( 1346 struct xfs_trans *tp, /* transaction pointer */ 1347 struct xfs_inode *ip, /* incore inode */ 1348 xfs_fileoff_t *last_block, /* last block */ 1349 int whichfork) /* data or attr fork */ 1350 { 1351 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1352 struct xfs_bmbt_irec got; 1353 struct xfs_iext_cursor icur; 1354 int error; 1355 1356 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 1357 case XFS_DINODE_FMT_LOCAL: 1358 *last_block = 0; 1359 return 0; 1360 case XFS_DINODE_FMT_BTREE: 1361 case XFS_DINODE_FMT_EXTENTS: 1362 break; 1363 default: 1364 return -EIO; 1365 } 1366 1367 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1368 error = xfs_iread_extents(tp, ip, whichfork); 1369 if (error) 1370 return error; 1371 } 1372 1373 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) 1374 *last_block = 0; 1375 return 0; 1376 } 1377 1378 int 1379 xfs_bmap_last_extent( 1380 struct xfs_trans *tp, 1381 struct xfs_inode *ip, 1382 int whichfork, 1383 struct xfs_bmbt_irec *rec, 1384 int *is_empty) 1385 { 1386 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1387 struct xfs_iext_cursor icur; 1388 int error; 1389 1390 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1391 error = xfs_iread_extents(tp, ip, whichfork); 1392 if (error) 1393 return error; 1394 } 1395 1396 xfs_iext_last(ifp, &icur); 1397 if (!xfs_iext_get_extent(ifp, &icur, rec)) 1398 *is_empty = 1; 1399 else 1400 *is_empty = 0; 1401 return 0; 1402 } 1403 1404 /* 1405 * Check the last inode extent to determine whether this allocation will result 1406 * in blocks being allocated at the end of the file. When we allocate new data 1407 * blocks at the end of the file which do not start at the previous data block, 1408 * we will try to align the new blocks at stripe unit boundaries. 1409 * 1410 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1411 * at, or past the EOF. 1412 */ 1413 STATIC int 1414 xfs_bmap_isaeof( 1415 struct xfs_bmalloca *bma, 1416 int whichfork) 1417 { 1418 struct xfs_bmbt_irec rec; 1419 int is_empty; 1420 int error; 1421 1422 bma->aeof = false; 1423 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1424 &is_empty); 1425 if (error) 1426 return error; 1427 1428 if (is_empty) { 1429 bma->aeof = true; 1430 return 0; 1431 } 1432 1433 /* 1434 * Check if we are allocation or past the last extent, or at least into 1435 * the last delayed allocated extent. 1436 */ 1437 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1438 (bma->offset >= rec.br_startoff && 1439 isnullstartblock(rec.br_startblock)); 1440 return 0; 1441 } 1442 1443 /* 1444 * Returns the file-relative block number of the first block past eof in 1445 * the file. This is not based on i_size, it is based on the extent records. 1446 * Returns 0 for local files, as they do not have extent records. 1447 */ 1448 int 1449 xfs_bmap_last_offset( 1450 struct xfs_inode *ip, 1451 xfs_fileoff_t *last_block, 1452 int whichfork) 1453 { 1454 struct xfs_bmbt_irec rec; 1455 int is_empty; 1456 int error; 1457 1458 *last_block = 0; 1459 1460 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) 1461 return 0; 1462 1463 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1464 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1465 return -EIO; 1466 1467 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1468 if (error || is_empty) 1469 return error; 1470 1471 *last_block = rec.br_startoff + rec.br_blockcount; 1472 return 0; 1473 } 1474 1475 /* 1476 * Returns whether the selected fork of the inode has exactly one 1477 * block or not. For the data fork we check this matches di_size, 1478 * implying the file's range is 0..bsize-1. 1479 */ 1480 int /* 1=>1 block, 0=>otherwise */ 1481 xfs_bmap_one_block( 1482 xfs_inode_t *ip, /* incore inode */ 1483 int whichfork) /* data or attr fork */ 1484 { 1485 struct xfs_ifork *ifp; /* inode fork pointer */ 1486 int rval; /* return value */ 1487 xfs_bmbt_irec_t s; /* internal version of extent */ 1488 struct xfs_iext_cursor icur; 1489 1490 #ifndef DEBUG 1491 if (whichfork == XFS_DATA_FORK) 1492 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; 1493 #endif /* !DEBUG */ 1494 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) 1495 return 0; 1496 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1497 return 0; 1498 ifp = XFS_IFORK_PTR(ip, whichfork); 1499 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 1500 xfs_iext_first(ifp, &icur); 1501 xfs_iext_get_extent(ifp, &icur, &s); 1502 rval = s.br_startoff == 0 && s.br_blockcount == 1; 1503 if (rval && whichfork == XFS_DATA_FORK) 1504 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); 1505 return rval; 1506 } 1507 1508 /* 1509 * Extent tree manipulation functions used during allocation. 1510 */ 1511 1512 /* 1513 * Convert a delayed allocation to a real allocation. 1514 */ 1515 STATIC int /* error */ 1516 xfs_bmap_add_extent_delay_real( 1517 struct xfs_bmalloca *bma, 1518 int whichfork) 1519 { 1520 struct xfs_bmbt_irec *new = &bma->got; 1521 int error; /* error return value */ 1522 int i; /* temp state */ 1523 struct xfs_ifork *ifp; /* inode fork pointer */ 1524 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1525 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1526 /* left is 0, right is 1, prev is 2 */ 1527 int rval=0; /* return value (logging flags) */ 1528 int state = xfs_bmap_fork_to_state(whichfork); 1529 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1530 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1531 xfs_filblks_t temp=0; /* value for da_new calculations */ 1532 int tmp_rval; /* partial logging flags */ 1533 struct xfs_mount *mp; 1534 xfs_extnum_t *nextents; 1535 struct xfs_bmbt_irec old; 1536 1537 mp = bma->ip->i_mount; 1538 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 1539 ASSERT(whichfork != XFS_ATTR_FORK); 1540 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : 1541 &bma->ip->i_d.di_nextents); 1542 1543 ASSERT(!isnullstartblock(new->br_startblock)); 1544 ASSERT(!bma->cur || 1545 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 1546 1547 XFS_STATS_INC(mp, xs_add_exlist); 1548 1549 #define LEFT r[0] 1550 #define RIGHT r[1] 1551 #define PREV r[2] 1552 1553 /* 1554 * Set up a bunch of variables to make the tests simpler. 1555 */ 1556 xfs_iext_get_extent(ifp, &bma->icur, &PREV); 1557 new_endoff = new->br_startoff + new->br_blockcount; 1558 ASSERT(isnullstartblock(PREV.br_startblock)); 1559 ASSERT(PREV.br_startoff <= new->br_startoff); 1560 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1561 1562 da_old = startblockval(PREV.br_startblock); 1563 da_new = 0; 1564 1565 /* 1566 * Set flags determining what part of the previous delayed allocation 1567 * extent is being replaced by a real allocation. 1568 */ 1569 if (PREV.br_startoff == new->br_startoff) 1570 state |= BMAP_LEFT_FILLING; 1571 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1572 state |= BMAP_RIGHT_FILLING; 1573 1574 /* 1575 * Check and set flags if this segment has a left neighbor. 1576 * Don't set contiguous if the combined extent would be too large. 1577 */ 1578 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { 1579 state |= BMAP_LEFT_VALID; 1580 if (isnullstartblock(LEFT.br_startblock)) 1581 state |= BMAP_LEFT_DELAY; 1582 } 1583 1584 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1585 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1586 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1587 LEFT.br_state == new->br_state && 1588 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1589 state |= BMAP_LEFT_CONTIG; 1590 1591 /* 1592 * Check and set flags if this segment has a right neighbor. 1593 * Don't set contiguous if the combined extent would be too large. 1594 * Also check for all-three-contiguous being too large. 1595 */ 1596 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { 1597 state |= BMAP_RIGHT_VALID; 1598 if (isnullstartblock(RIGHT.br_startblock)) 1599 state |= BMAP_RIGHT_DELAY; 1600 } 1601 1602 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1603 new_endoff == RIGHT.br_startoff && 1604 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1605 new->br_state == RIGHT.br_state && 1606 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1607 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1608 BMAP_RIGHT_FILLING)) != 1609 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1610 BMAP_RIGHT_FILLING) || 1611 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1612 <= MAXEXTLEN)) 1613 state |= BMAP_RIGHT_CONTIG; 1614 1615 error = 0; 1616 /* 1617 * Switch out based on the FILLING and CONTIG state bits. 1618 */ 1619 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1620 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1621 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1622 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1623 /* 1624 * Filling in all of a previously delayed allocation extent. 1625 * The left and right neighbors are both contiguous with new. 1626 */ 1627 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 1628 1629 xfs_iext_remove(bma->ip, &bma->icur, state); 1630 xfs_iext_remove(bma->ip, &bma->icur, state); 1631 xfs_iext_prev(ifp, &bma->icur); 1632 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1633 (*nextents)--; 1634 1635 if (bma->cur == NULL) 1636 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1637 else { 1638 rval = XFS_ILOG_CORE; 1639 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1640 if (error) 1641 goto done; 1642 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1643 error = xfs_btree_delete(bma->cur, &i); 1644 if (error) 1645 goto done; 1646 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1647 error = xfs_btree_decrement(bma->cur, 0, &i); 1648 if (error) 1649 goto done; 1650 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1651 error = xfs_bmbt_update(bma->cur, &LEFT); 1652 if (error) 1653 goto done; 1654 } 1655 break; 1656 1657 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1658 /* 1659 * Filling in all of a previously delayed allocation extent. 1660 * The left neighbor is contiguous, the right is not. 1661 */ 1662 old = LEFT; 1663 LEFT.br_blockcount += PREV.br_blockcount; 1664 1665 xfs_iext_remove(bma->ip, &bma->icur, state); 1666 xfs_iext_prev(ifp, &bma->icur); 1667 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1668 1669 if (bma->cur == NULL) 1670 rval = XFS_ILOG_DEXT; 1671 else { 1672 rval = 0; 1673 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1674 if (error) 1675 goto done; 1676 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1677 error = xfs_bmbt_update(bma->cur, &LEFT); 1678 if (error) 1679 goto done; 1680 } 1681 break; 1682 1683 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1684 /* 1685 * Filling in all of a previously delayed allocation extent. 1686 * The right neighbor is contiguous, the left is not. 1687 */ 1688 PREV.br_startblock = new->br_startblock; 1689 PREV.br_blockcount += RIGHT.br_blockcount; 1690 1691 xfs_iext_next(ifp, &bma->icur); 1692 xfs_iext_remove(bma->ip, &bma->icur, state); 1693 xfs_iext_prev(ifp, &bma->icur); 1694 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1695 1696 if (bma->cur == NULL) 1697 rval = XFS_ILOG_DEXT; 1698 else { 1699 rval = 0; 1700 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1701 if (error) 1702 goto done; 1703 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1704 error = xfs_bmbt_update(bma->cur, &PREV); 1705 if (error) 1706 goto done; 1707 } 1708 break; 1709 1710 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1711 /* 1712 * Filling in all of a previously delayed allocation extent. 1713 * Neither the left nor right neighbors are contiguous with 1714 * the new one. 1715 */ 1716 PREV.br_startblock = new->br_startblock; 1717 PREV.br_state = new->br_state; 1718 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1719 1720 (*nextents)++; 1721 if (bma->cur == NULL) 1722 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1723 else { 1724 rval = XFS_ILOG_CORE; 1725 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1726 if (error) 1727 goto done; 1728 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1729 error = xfs_btree_insert(bma->cur, &i); 1730 if (error) 1731 goto done; 1732 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1733 } 1734 break; 1735 1736 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1737 /* 1738 * Filling in the first part of a previous delayed allocation. 1739 * The left neighbor is contiguous. 1740 */ 1741 old = LEFT; 1742 temp = PREV.br_blockcount - new->br_blockcount; 1743 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1744 startblockval(PREV.br_startblock)); 1745 1746 LEFT.br_blockcount += new->br_blockcount; 1747 1748 PREV.br_blockcount = temp; 1749 PREV.br_startoff += new->br_blockcount; 1750 PREV.br_startblock = nullstartblock(da_new); 1751 1752 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1753 xfs_iext_prev(ifp, &bma->icur); 1754 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1755 1756 if (bma->cur == NULL) 1757 rval = XFS_ILOG_DEXT; 1758 else { 1759 rval = 0; 1760 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1761 if (error) 1762 goto done; 1763 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1764 error = xfs_bmbt_update(bma->cur, &LEFT); 1765 if (error) 1766 goto done; 1767 } 1768 break; 1769 1770 case BMAP_LEFT_FILLING: 1771 /* 1772 * Filling in the first part of a previous delayed allocation. 1773 * The left neighbor is not contiguous. 1774 */ 1775 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1776 (*nextents)++; 1777 if (bma->cur == NULL) 1778 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1779 else { 1780 rval = XFS_ILOG_CORE; 1781 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1782 if (error) 1783 goto done; 1784 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1785 error = xfs_btree_insert(bma->cur, &i); 1786 if (error) 1787 goto done; 1788 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1789 } 1790 1791 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1792 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1793 &bma->cur, 1, &tmp_rval, whichfork); 1794 rval |= tmp_rval; 1795 if (error) 1796 goto done; 1797 } 1798 1799 temp = PREV.br_blockcount - new->br_blockcount; 1800 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1801 startblockval(PREV.br_startblock) - 1802 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1803 1804 PREV.br_startoff = new_endoff; 1805 PREV.br_blockcount = temp; 1806 PREV.br_startblock = nullstartblock(da_new); 1807 xfs_iext_next(ifp, &bma->icur); 1808 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1809 xfs_iext_prev(ifp, &bma->icur); 1810 break; 1811 1812 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1813 /* 1814 * Filling in the last part of a previous delayed allocation. 1815 * The right neighbor is contiguous with the new allocation. 1816 */ 1817 old = RIGHT; 1818 RIGHT.br_startoff = new->br_startoff; 1819 RIGHT.br_startblock = new->br_startblock; 1820 RIGHT.br_blockcount += new->br_blockcount; 1821 1822 if (bma->cur == NULL) 1823 rval = XFS_ILOG_DEXT; 1824 else { 1825 rval = 0; 1826 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1827 if (error) 1828 goto done; 1829 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1830 error = xfs_bmbt_update(bma->cur, &RIGHT); 1831 if (error) 1832 goto done; 1833 } 1834 1835 temp = PREV.br_blockcount - new->br_blockcount; 1836 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1837 startblockval(PREV.br_startblock)); 1838 1839 PREV.br_blockcount = temp; 1840 PREV.br_startblock = nullstartblock(da_new); 1841 1842 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1843 xfs_iext_next(ifp, &bma->icur); 1844 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); 1845 break; 1846 1847 case BMAP_RIGHT_FILLING: 1848 /* 1849 * Filling in the last part of a previous delayed allocation. 1850 * The right neighbor is not contiguous. 1851 */ 1852 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1853 (*nextents)++; 1854 if (bma->cur == NULL) 1855 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1856 else { 1857 rval = XFS_ILOG_CORE; 1858 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1859 if (error) 1860 goto done; 1861 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1862 error = xfs_btree_insert(bma->cur, &i); 1863 if (error) 1864 goto done; 1865 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1866 } 1867 1868 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1869 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1870 &bma->cur, 1, &tmp_rval, whichfork); 1871 rval |= tmp_rval; 1872 if (error) 1873 goto done; 1874 } 1875 1876 temp = PREV.br_blockcount - new->br_blockcount; 1877 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1878 startblockval(PREV.br_startblock) - 1879 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1880 1881 PREV.br_startblock = nullstartblock(da_new); 1882 PREV.br_blockcount = temp; 1883 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1884 xfs_iext_next(ifp, &bma->icur); 1885 break; 1886 1887 case 0: 1888 /* 1889 * Filling in the middle part of a previous delayed allocation. 1890 * Contiguity is impossible here. 1891 * This case is avoided almost all the time. 1892 * 1893 * We start with a delayed allocation: 1894 * 1895 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 1896 * PREV @ idx 1897 * 1898 * and we are allocating: 1899 * +rrrrrrrrrrrrrrrrr+ 1900 * new 1901 * 1902 * and we set it up for insertion as: 1903 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 1904 * new 1905 * PREV @ idx LEFT RIGHT 1906 * inserted at idx + 1 1907 */ 1908 old = PREV; 1909 1910 /* LEFT is the new middle */ 1911 LEFT = *new; 1912 1913 /* RIGHT is the new right */ 1914 RIGHT.br_state = PREV.br_state; 1915 RIGHT.br_startoff = new_endoff; 1916 RIGHT.br_blockcount = 1917 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1918 RIGHT.br_startblock = 1919 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1920 RIGHT.br_blockcount)); 1921 1922 /* truncate PREV */ 1923 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 1924 PREV.br_startblock = 1925 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1926 PREV.br_blockcount)); 1927 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1928 1929 xfs_iext_next(ifp, &bma->icur); 1930 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); 1931 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); 1932 (*nextents)++; 1933 1934 if (bma->cur == NULL) 1935 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1936 else { 1937 rval = XFS_ILOG_CORE; 1938 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1939 if (error) 1940 goto done; 1941 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1942 error = xfs_btree_insert(bma->cur, &i); 1943 if (error) 1944 goto done; 1945 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1946 } 1947 1948 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1949 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1950 &bma->cur, 1, &tmp_rval, whichfork); 1951 rval |= tmp_rval; 1952 if (error) 1953 goto done; 1954 } 1955 1956 da_new = startblockval(PREV.br_startblock) + 1957 startblockval(RIGHT.br_startblock); 1958 break; 1959 1960 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1961 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1962 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 1963 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1964 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1965 case BMAP_LEFT_CONTIG: 1966 case BMAP_RIGHT_CONTIG: 1967 /* 1968 * These cases are all impossible. 1969 */ 1970 ASSERT(0); 1971 } 1972 1973 /* add reverse mapping unless caller opted out */ 1974 if (!(bma->flags & XFS_BMAPI_NORMAP)) { 1975 error = xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); 1976 if (error) 1977 goto done; 1978 } 1979 1980 /* convert to a btree if necessary */ 1981 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1982 int tmp_logflags; /* partial log flag return val */ 1983 1984 ASSERT(bma->cur == NULL); 1985 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1986 &bma->cur, da_old > 0, &tmp_logflags, 1987 whichfork); 1988 bma->logflags |= tmp_logflags; 1989 if (error) 1990 goto done; 1991 } 1992 1993 if (bma->cur) { 1994 da_new += bma->cur->bc_private.b.allocated; 1995 bma->cur->bc_private.b.allocated = 0; 1996 } 1997 1998 /* adjust for changes in reserved delayed indirect blocks */ 1999 if (da_new != da_old) { 2000 ASSERT(state == 0 || da_new < da_old); 2001 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), 2002 false); 2003 } 2004 2005 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 2006 done: 2007 if (whichfork != XFS_COW_FORK) 2008 bma->logflags |= rval; 2009 return error; 2010 #undef LEFT 2011 #undef RIGHT 2012 #undef PREV 2013 } 2014 2015 /* 2016 * Convert an unwritten allocation to a real allocation or vice versa. 2017 */ 2018 STATIC int /* error */ 2019 xfs_bmap_add_extent_unwritten_real( 2020 struct xfs_trans *tp, 2021 xfs_inode_t *ip, /* incore inode pointer */ 2022 int whichfork, 2023 struct xfs_iext_cursor *icur, 2024 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 2025 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 2026 int *logflagsp) /* inode logging flags */ 2027 { 2028 xfs_btree_cur_t *cur; /* btree cursor */ 2029 int error; /* error return value */ 2030 int i; /* temp state */ 2031 struct xfs_ifork *ifp; /* inode fork pointer */ 2032 xfs_fileoff_t new_endoff; /* end offset of new entry */ 2033 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 2034 /* left is 0, right is 1, prev is 2 */ 2035 int rval=0; /* return value (logging flags) */ 2036 int state = xfs_bmap_fork_to_state(whichfork); 2037 struct xfs_mount *mp = ip->i_mount; 2038 struct xfs_bmbt_irec old; 2039 2040 *logflagsp = 0; 2041 2042 cur = *curp; 2043 ifp = XFS_IFORK_PTR(ip, whichfork); 2044 2045 ASSERT(!isnullstartblock(new->br_startblock)); 2046 2047 XFS_STATS_INC(mp, xs_add_exlist); 2048 2049 #define LEFT r[0] 2050 #define RIGHT r[1] 2051 #define PREV r[2] 2052 2053 /* 2054 * Set up a bunch of variables to make the tests simpler. 2055 */ 2056 error = 0; 2057 xfs_iext_get_extent(ifp, icur, &PREV); 2058 ASSERT(new->br_state != PREV.br_state); 2059 new_endoff = new->br_startoff + new->br_blockcount; 2060 ASSERT(PREV.br_startoff <= new->br_startoff); 2061 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2062 2063 /* 2064 * Set flags determining what part of the previous oldext allocation 2065 * extent is being replaced by a newext allocation. 2066 */ 2067 if (PREV.br_startoff == new->br_startoff) 2068 state |= BMAP_LEFT_FILLING; 2069 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2070 state |= BMAP_RIGHT_FILLING; 2071 2072 /* 2073 * Check and set flags if this segment has a left neighbor. 2074 * Don't set contiguous if the combined extent would be too large. 2075 */ 2076 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { 2077 state |= BMAP_LEFT_VALID; 2078 if (isnullstartblock(LEFT.br_startblock)) 2079 state |= BMAP_LEFT_DELAY; 2080 } 2081 2082 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2083 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2084 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2085 LEFT.br_state == new->br_state && 2086 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2087 state |= BMAP_LEFT_CONTIG; 2088 2089 /* 2090 * Check and set flags if this segment has a right neighbor. 2091 * Don't set contiguous if the combined extent would be too large. 2092 * Also check for all-three-contiguous being too large. 2093 */ 2094 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { 2095 state |= BMAP_RIGHT_VALID; 2096 if (isnullstartblock(RIGHT.br_startblock)) 2097 state |= BMAP_RIGHT_DELAY; 2098 } 2099 2100 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2101 new_endoff == RIGHT.br_startoff && 2102 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2103 new->br_state == RIGHT.br_state && 2104 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 2105 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2106 BMAP_RIGHT_FILLING)) != 2107 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2108 BMAP_RIGHT_FILLING) || 2109 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2110 <= MAXEXTLEN)) 2111 state |= BMAP_RIGHT_CONTIG; 2112 2113 /* 2114 * Switch out based on the FILLING and CONTIG state bits. 2115 */ 2116 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2117 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2118 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2119 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2120 /* 2121 * Setting all of a previous oldext extent to newext. 2122 * The left and right neighbors are both contiguous with new. 2123 */ 2124 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 2125 2126 xfs_iext_remove(ip, icur, state); 2127 xfs_iext_remove(ip, icur, state); 2128 xfs_iext_prev(ifp, icur); 2129 xfs_iext_update_extent(ip, state, icur, &LEFT); 2130 XFS_IFORK_NEXT_SET(ip, whichfork, 2131 XFS_IFORK_NEXTENTS(ip, whichfork) - 2); 2132 if (cur == NULL) 2133 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2134 else { 2135 rval = XFS_ILOG_CORE; 2136 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2137 if (error) 2138 goto done; 2139 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2140 if ((error = xfs_btree_delete(cur, &i))) 2141 goto done; 2142 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2143 if ((error = xfs_btree_decrement(cur, 0, &i))) 2144 goto done; 2145 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2146 if ((error = xfs_btree_delete(cur, &i))) 2147 goto done; 2148 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2149 if ((error = xfs_btree_decrement(cur, 0, &i))) 2150 goto done; 2151 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2152 error = xfs_bmbt_update(cur, &LEFT); 2153 if (error) 2154 goto done; 2155 } 2156 break; 2157 2158 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2159 /* 2160 * Setting all of a previous oldext extent to newext. 2161 * The left neighbor is contiguous, the right is not. 2162 */ 2163 LEFT.br_blockcount += PREV.br_blockcount; 2164 2165 xfs_iext_remove(ip, icur, state); 2166 xfs_iext_prev(ifp, icur); 2167 xfs_iext_update_extent(ip, state, icur, &LEFT); 2168 XFS_IFORK_NEXT_SET(ip, whichfork, 2169 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2170 if (cur == NULL) 2171 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2172 else { 2173 rval = XFS_ILOG_CORE; 2174 error = xfs_bmbt_lookup_eq(cur, &PREV, &i); 2175 if (error) 2176 goto done; 2177 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2178 if ((error = xfs_btree_delete(cur, &i))) 2179 goto done; 2180 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2181 if ((error = xfs_btree_decrement(cur, 0, &i))) 2182 goto done; 2183 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2184 error = xfs_bmbt_update(cur, &LEFT); 2185 if (error) 2186 goto done; 2187 } 2188 break; 2189 2190 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2191 /* 2192 * Setting all of a previous oldext extent to newext. 2193 * The right neighbor is contiguous, the left is not. 2194 */ 2195 PREV.br_blockcount += RIGHT.br_blockcount; 2196 PREV.br_state = new->br_state; 2197 2198 xfs_iext_next(ifp, icur); 2199 xfs_iext_remove(ip, icur, state); 2200 xfs_iext_prev(ifp, icur); 2201 xfs_iext_update_extent(ip, state, icur, &PREV); 2202 2203 XFS_IFORK_NEXT_SET(ip, whichfork, 2204 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2205 if (cur == NULL) 2206 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2207 else { 2208 rval = XFS_ILOG_CORE; 2209 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2210 if (error) 2211 goto done; 2212 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2213 if ((error = xfs_btree_delete(cur, &i))) 2214 goto done; 2215 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2216 if ((error = xfs_btree_decrement(cur, 0, &i))) 2217 goto done; 2218 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2219 error = xfs_bmbt_update(cur, &PREV); 2220 if (error) 2221 goto done; 2222 } 2223 break; 2224 2225 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2226 /* 2227 * Setting all of a previous oldext extent to newext. 2228 * Neither the left nor right neighbors are contiguous with 2229 * the new one. 2230 */ 2231 PREV.br_state = new->br_state; 2232 xfs_iext_update_extent(ip, state, icur, &PREV); 2233 2234 if (cur == NULL) 2235 rval = XFS_ILOG_DEXT; 2236 else { 2237 rval = 0; 2238 error = xfs_bmbt_lookup_eq(cur, new, &i); 2239 if (error) 2240 goto done; 2241 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2242 error = xfs_bmbt_update(cur, &PREV); 2243 if (error) 2244 goto done; 2245 } 2246 break; 2247 2248 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2249 /* 2250 * Setting the first part of a previous oldext extent to newext. 2251 * The left neighbor is contiguous. 2252 */ 2253 LEFT.br_blockcount += new->br_blockcount; 2254 2255 old = PREV; 2256 PREV.br_startoff += new->br_blockcount; 2257 PREV.br_startblock += new->br_blockcount; 2258 PREV.br_blockcount -= new->br_blockcount; 2259 2260 xfs_iext_update_extent(ip, state, icur, &PREV); 2261 xfs_iext_prev(ifp, icur); 2262 xfs_iext_update_extent(ip, state, icur, &LEFT); 2263 2264 if (cur == NULL) 2265 rval = XFS_ILOG_DEXT; 2266 else { 2267 rval = 0; 2268 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2269 if (error) 2270 goto done; 2271 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2272 error = xfs_bmbt_update(cur, &PREV); 2273 if (error) 2274 goto done; 2275 error = xfs_btree_decrement(cur, 0, &i); 2276 if (error) 2277 goto done; 2278 error = xfs_bmbt_update(cur, &LEFT); 2279 if (error) 2280 goto done; 2281 } 2282 break; 2283 2284 case BMAP_LEFT_FILLING: 2285 /* 2286 * Setting the first part of a previous oldext extent to newext. 2287 * The left neighbor is not contiguous. 2288 */ 2289 old = PREV; 2290 PREV.br_startoff += new->br_blockcount; 2291 PREV.br_startblock += new->br_blockcount; 2292 PREV.br_blockcount -= new->br_blockcount; 2293 2294 xfs_iext_update_extent(ip, state, icur, &PREV); 2295 xfs_iext_insert(ip, icur, new, state); 2296 XFS_IFORK_NEXT_SET(ip, whichfork, 2297 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2298 if (cur == NULL) 2299 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2300 else { 2301 rval = XFS_ILOG_CORE; 2302 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2303 if (error) 2304 goto done; 2305 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2306 error = xfs_bmbt_update(cur, &PREV); 2307 if (error) 2308 goto done; 2309 cur->bc_rec.b = *new; 2310 if ((error = xfs_btree_insert(cur, &i))) 2311 goto done; 2312 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2313 } 2314 break; 2315 2316 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2317 /* 2318 * Setting the last part of a previous oldext extent to newext. 2319 * The right neighbor is contiguous with the new allocation. 2320 */ 2321 old = PREV; 2322 PREV.br_blockcount -= new->br_blockcount; 2323 2324 RIGHT.br_startoff = new->br_startoff; 2325 RIGHT.br_startblock = new->br_startblock; 2326 RIGHT.br_blockcount += new->br_blockcount; 2327 2328 xfs_iext_update_extent(ip, state, icur, &PREV); 2329 xfs_iext_next(ifp, icur); 2330 xfs_iext_update_extent(ip, state, icur, &RIGHT); 2331 2332 if (cur == NULL) 2333 rval = XFS_ILOG_DEXT; 2334 else { 2335 rval = 0; 2336 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2337 if (error) 2338 goto done; 2339 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2340 error = xfs_bmbt_update(cur, &PREV); 2341 if (error) 2342 goto done; 2343 error = xfs_btree_increment(cur, 0, &i); 2344 if (error) 2345 goto done; 2346 error = xfs_bmbt_update(cur, &RIGHT); 2347 if (error) 2348 goto done; 2349 } 2350 break; 2351 2352 case BMAP_RIGHT_FILLING: 2353 /* 2354 * Setting the last part of a previous oldext extent to newext. 2355 * The right neighbor is not contiguous. 2356 */ 2357 old = PREV; 2358 PREV.br_blockcount -= new->br_blockcount; 2359 2360 xfs_iext_update_extent(ip, state, icur, &PREV); 2361 xfs_iext_next(ifp, icur); 2362 xfs_iext_insert(ip, icur, new, state); 2363 2364 XFS_IFORK_NEXT_SET(ip, whichfork, 2365 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2366 if (cur == NULL) 2367 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2368 else { 2369 rval = XFS_ILOG_CORE; 2370 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2371 if (error) 2372 goto done; 2373 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2374 error = xfs_bmbt_update(cur, &PREV); 2375 if (error) 2376 goto done; 2377 error = xfs_bmbt_lookup_eq(cur, new, &i); 2378 if (error) 2379 goto done; 2380 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2381 if ((error = xfs_btree_insert(cur, &i))) 2382 goto done; 2383 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2384 } 2385 break; 2386 2387 case 0: 2388 /* 2389 * Setting the middle part of a previous oldext extent to 2390 * newext. Contiguity is impossible here. 2391 * One extent becomes three extents. 2392 */ 2393 old = PREV; 2394 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 2395 2396 r[0] = *new; 2397 r[1].br_startoff = new_endoff; 2398 r[1].br_blockcount = 2399 old.br_startoff + old.br_blockcount - new_endoff; 2400 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2401 r[1].br_state = PREV.br_state; 2402 2403 xfs_iext_update_extent(ip, state, icur, &PREV); 2404 xfs_iext_next(ifp, icur); 2405 xfs_iext_insert(ip, icur, &r[1], state); 2406 xfs_iext_insert(ip, icur, &r[0], state); 2407 2408 XFS_IFORK_NEXT_SET(ip, whichfork, 2409 XFS_IFORK_NEXTENTS(ip, whichfork) + 2); 2410 if (cur == NULL) 2411 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2412 else { 2413 rval = XFS_ILOG_CORE; 2414 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2415 if (error) 2416 goto done; 2417 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2418 /* new right extent - oldext */ 2419 error = xfs_bmbt_update(cur, &r[1]); 2420 if (error) 2421 goto done; 2422 /* new left extent - oldext */ 2423 cur->bc_rec.b = PREV; 2424 if ((error = xfs_btree_insert(cur, &i))) 2425 goto done; 2426 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2427 /* 2428 * Reset the cursor to the position of the new extent 2429 * we are about to insert as we can't trust it after 2430 * the previous insert. 2431 */ 2432 error = xfs_bmbt_lookup_eq(cur, new, &i); 2433 if (error) 2434 goto done; 2435 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2436 /* new middle extent - newext */ 2437 if ((error = xfs_btree_insert(cur, &i))) 2438 goto done; 2439 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2440 } 2441 break; 2442 2443 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2444 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2445 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2446 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2447 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2448 case BMAP_LEFT_CONTIG: 2449 case BMAP_RIGHT_CONTIG: 2450 /* 2451 * These cases are all impossible. 2452 */ 2453 ASSERT(0); 2454 } 2455 2456 /* update reverse mappings */ 2457 error = xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); 2458 if (error) 2459 goto done; 2460 2461 /* convert to a btree if necessary */ 2462 if (xfs_bmap_needs_btree(ip, whichfork)) { 2463 int tmp_logflags; /* partial log flag return val */ 2464 2465 ASSERT(cur == NULL); 2466 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 2467 &tmp_logflags, whichfork); 2468 *logflagsp |= tmp_logflags; 2469 if (error) 2470 goto done; 2471 } 2472 2473 /* clear out the allocated field, done with it now in any case. */ 2474 if (cur) { 2475 cur->bc_private.b.allocated = 0; 2476 *curp = cur; 2477 } 2478 2479 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2480 done: 2481 *logflagsp |= rval; 2482 return error; 2483 #undef LEFT 2484 #undef RIGHT 2485 #undef PREV 2486 } 2487 2488 /* 2489 * Convert a hole to a delayed allocation. 2490 */ 2491 STATIC void 2492 xfs_bmap_add_extent_hole_delay( 2493 xfs_inode_t *ip, /* incore inode pointer */ 2494 int whichfork, 2495 struct xfs_iext_cursor *icur, 2496 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2497 { 2498 struct xfs_ifork *ifp; /* inode fork pointer */ 2499 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2500 xfs_filblks_t newlen=0; /* new indirect size */ 2501 xfs_filblks_t oldlen=0; /* old indirect size */ 2502 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2503 int state = xfs_bmap_fork_to_state(whichfork); 2504 xfs_filblks_t temp; /* temp for indirect calculations */ 2505 2506 ifp = XFS_IFORK_PTR(ip, whichfork); 2507 ASSERT(isnullstartblock(new->br_startblock)); 2508 2509 /* 2510 * Check and set flags if this segment has a left neighbor 2511 */ 2512 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2513 state |= BMAP_LEFT_VALID; 2514 if (isnullstartblock(left.br_startblock)) 2515 state |= BMAP_LEFT_DELAY; 2516 } 2517 2518 /* 2519 * Check and set flags if the current (right) segment exists. 2520 * If it doesn't exist, we're converting the hole at end-of-file. 2521 */ 2522 if (xfs_iext_get_extent(ifp, icur, &right)) { 2523 state |= BMAP_RIGHT_VALID; 2524 if (isnullstartblock(right.br_startblock)) 2525 state |= BMAP_RIGHT_DELAY; 2526 } 2527 2528 /* 2529 * Set contiguity flags on the left and right neighbors. 2530 * Don't let extents get too large, even if the pieces are contiguous. 2531 */ 2532 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2533 left.br_startoff + left.br_blockcount == new->br_startoff && 2534 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2535 state |= BMAP_LEFT_CONTIG; 2536 2537 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2538 new->br_startoff + new->br_blockcount == right.br_startoff && 2539 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2540 (!(state & BMAP_LEFT_CONTIG) || 2541 (left.br_blockcount + new->br_blockcount + 2542 right.br_blockcount <= MAXEXTLEN))) 2543 state |= BMAP_RIGHT_CONTIG; 2544 2545 /* 2546 * Switch out based on the contiguity flags. 2547 */ 2548 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2549 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2550 /* 2551 * New allocation is contiguous with delayed allocations 2552 * on the left and on the right. 2553 * Merge all three into a single extent record. 2554 */ 2555 temp = left.br_blockcount + new->br_blockcount + 2556 right.br_blockcount; 2557 2558 oldlen = startblockval(left.br_startblock) + 2559 startblockval(new->br_startblock) + 2560 startblockval(right.br_startblock); 2561 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2562 oldlen); 2563 left.br_startblock = nullstartblock(newlen); 2564 left.br_blockcount = temp; 2565 2566 xfs_iext_remove(ip, icur, state); 2567 xfs_iext_prev(ifp, icur); 2568 xfs_iext_update_extent(ip, state, icur, &left); 2569 break; 2570 2571 case BMAP_LEFT_CONTIG: 2572 /* 2573 * New allocation is contiguous with a delayed allocation 2574 * on the left. 2575 * Merge the new allocation with the left neighbor. 2576 */ 2577 temp = left.br_blockcount + new->br_blockcount; 2578 2579 oldlen = startblockval(left.br_startblock) + 2580 startblockval(new->br_startblock); 2581 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2582 oldlen); 2583 left.br_blockcount = temp; 2584 left.br_startblock = nullstartblock(newlen); 2585 2586 xfs_iext_prev(ifp, icur); 2587 xfs_iext_update_extent(ip, state, icur, &left); 2588 break; 2589 2590 case BMAP_RIGHT_CONTIG: 2591 /* 2592 * New allocation is contiguous with a delayed allocation 2593 * on the right. 2594 * Merge the new allocation with the right neighbor. 2595 */ 2596 temp = new->br_blockcount + right.br_blockcount; 2597 oldlen = startblockval(new->br_startblock) + 2598 startblockval(right.br_startblock); 2599 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2600 oldlen); 2601 right.br_startoff = new->br_startoff; 2602 right.br_startblock = nullstartblock(newlen); 2603 right.br_blockcount = temp; 2604 xfs_iext_update_extent(ip, state, icur, &right); 2605 break; 2606 2607 case 0: 2608 /* 2609 * New allocation is not contiguous with another 2610 * delayed allocation. 2611 * Insert a new entry. 2612 */ 2613 oldlen = newlen = 0; 2614 xfs_iext_insert(ip, icur, new, state); 2615 break; 2616 } 2617 if (oldlen != newlen) { 2618 ASSERT(oldlen > newlen); 2619 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2620 false); 2621 /* 2622 * Nothing to do for disk quota accounting here. 2623 */ 2624 } 2625 } 2626 2627 /* 2628 * Convert a hole to a real allocation. 2629 */ 2630 STATIC int /* error */ 2631 xfs_bmap_add_extent_hole_real( 2632 struct xfs_trans *tp, 2633 struct xfs_inode *ip, 2634 int whichfork, 2635 struct xfs_iext_cursor *icur, 2636 struct xfs_btree_cur **curp, 2637 struct xfs_bmbt_irec *new, 2638 int *logflagsp, 2639 int flags) 2640 { 2641 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 2642 struct xfs_mount *mp = ip->i_mount; 2643 struct xfs_btree_cur *cur = *curp; 2644 int error; /* error return value */ 2645 int i; /* temp state */ 2646 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2647 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2648 int rval=0; /* return value (logging flags) */ 2649 int state = xfs_bmap_fork_to_state(whichfork); 2650 struct xfs_bmbt_irec old; 2651 2652 ASSERT(!isnullstartblock(new->br_startblock)); 2653 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 2654 2655 XFS_STATS_INC(mp, xs_add_exlist); 2656 2657 /* 2658 * Check and set flags if this segment has a left neighbor. 2659 */ 2660 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2661 state |= BMAP_LEFT_VALID; 2662 if (isnullstartblock(left.br_startblock)) 2663 state |= BMAP_LEFT_DELAY; 2664 } 2665 2666 /* 2667 * Check and set flags if this segment has a current value. 2668 * Not true if we're inserting into the "hole" at eof. 2669 */ 2670 if (xfs_iext_get_extent(ifp, icur, &right)) { 2671 state |= BMAP_RIGHT_VALID; 2672 if (isnullstartblock(right.br_startblock)) 2673 state |= BMAP_RIGHT_DELAY; 2674 } 2675 2676 /* 2677 * We're inserting a real allocation between "left" and "right". 2678 * Set the contiguity flags. Don't let extents get too large. 2679 */ 2680 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2681 left.br_startoff + left.br_blockcount == new->br_startoff && 2682 left.br_startblock + left.br_blockcount == new->br_startblock && 2683 left.br_state == new->br_state && 2684 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2685 state |= BMAP_LEFT_CONTIG; 2686 2687 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2688 new->br_startoff + new->br_blockcount == right.br_startoff && 2689 new->br_startblock + new->br_blockcount == right.br_startblock && 2690 new->br_state == right.br_state && 2691 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2692 (!(state & BMAP_LEFT_CONTIG) || 2693 left.br_blockcount + new->br_blockcount + 2694 right.br_blockcount <= MAXEXTLEN)) 2695 state |= BMAP_RIGHT_CONTIG; 2696 2697 error = 0; 2698 /* 2699 * Select which case we're in here, and implement it. 2700 */ 2701 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2702 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2703 /* 2704 * New allocation is contiguous with real allocations on the 2705 * left and on the right. 2706 * Merge all three into a single extent record. 2707 */ 2708 left.br_blockcount += new->br_blockcount + right.br_blockcount; 2709 2710 xfs_iext_remove(ip, icur, state); 2711 xfs_iext_prev(ifp, icur); 2712 xfs_iext_update_extent(ip, state, icur, &left); 2713 2714 XFS_IFORK_NEXT_SET(ip, whichfork, 2715 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2716 if (cur == NULL) { 2717 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2718 } else { 2719 rval = XFS_ILOG_CORE; 2720 error = xfs_bmbt_lookup_eq(cur, &right, &i); 2721 if (error) 2722 goto done; 2723 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2724 error = xfs_btree_delete(cur, &i); 2725 if (error) 2726 goto done; 2727 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2728 error = xfs_btree_decrement(cur, 0, &i); 2729 if (error) 2730 goto done; 2731 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2732 error = xfs_bmbt_update(cur, &left); 2733 if (error) 2734 goto done; 2735 } 2736 break; 2737 2738 case BMAP_LEFT_CONTIG: 2739 /* 2740 * New allocation is contiguous with a real allocation 2741 * on the left. 2742 * Merge the new allocation with the left neighbor. 2743 */ 2744 old = left; 2745 left.br_blockcount += new->br_blockcount; 2746 2747 xfs_iext_prev(ifp, icur); 2748 xfs_iext_update_extent(ip, state, icur, &left); 2749 2750 if (cur == NULL) { 2751 rval = xfs_ilog_fext(whichfork); 2752 } else { 2753 rval = 0; 2754 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2755 if (error) 2756 goto done; 2757 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2758 error = xfs_bmbt_update(cur, &left); 2759 if (error) 2760 goto done; 2761 } 2762 break; 2763 2764 case BMAP_RIGHT_CONTIG: 2765 /* 2766 * New allocation is contiguous with a real allocation 2767 * on the right. 2768 * Merge the new allocation with the right neighbor. 2769 */ 2770 old = right; 2771 2772 right.br_startoff = new->br_startoff; 2773 right.br_startblock = new->br_startblock; 2774 right.br_blockcount += new->br_blockcount; 2775 xfs_iext_update_extent(ip, state, icur, &right); 2776 2777 if (cur == NULL) { 2778 rval = xfs_ilog_fext(whichfork); 2779 } else { 2780 rval = 0; 2781 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2782 if (error) 2783 goto done; 2784 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2785 error = xfs_bmbt_update(cur, &right); 2786 if (error) 2787 goto done; 2788 } 2789 break; 2790 2791 case 0: 2792 /* 2793 * New allocation is not contiguous with another 2794 * real allocation. 2795 * Insert a new entry. 2796 */ 2797 xfs_iext_insert(ip, icur, new, state); 2798 XFS_IFORK_NEXT_SET(ip, whichfork, 2799 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2800 if (cur == NULL) { 2801 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2802 } else { 2803 rval = XFS_ILOG_CORE; 2804 error = xfs_bmbt_lookup_eq(cur, new, &i); 2805 if (error) 2806 goto done; 2807 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2808 error = xfs_btree_insert(cur, &i); 2809 if (error) 2810 goto done; 2811 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2812 } 2813 break; 2814 } 2815 2816 /* add reverse mapping unless caller opted out */ 2817 if (!(flags & XFS_BMAPI_NORMAP)) { 2818 error = xfs_rmap_map_extent(tp, ip, whichfork, new); 2819 if (error) 2820 goto done; 2821 } 2822 2823 /* convert to a btree if necessary */ 2824 if (xfs_bmap_needs_btree(ip, whichfork)) { 2825 int tmp_logflags; /* partial log flag return val */ 2826 2827 ASSERT(cur == NULL); 2828 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, 2829 &tmp_logflags, whichfork); 2830 *logflagsp |= tmp_logflags; 2831 cur = *curp; 2832 if (error) 2833 goto done; 2834 } 2835 2836 /* clear out the allocated field, done with it now in any case. */ 2837 if (cur) 2838 cur->bc_private.b.allocated = 0; 2839 2840 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 2841 done: 2842 *logflagsp |= rval; 2843 return error; 2844 } 2845 2846 /* 2847 * Functions used in the extent read, allocate and remove paths 2848 */ 2849 2850 /* 2851 * Adjust the size of the new extent based on di_extsize and rt extsize. 2852 */ 2853 int 2854 xfs_bmap_extsize_align( 2855 xfs_mount_t *mp, 2856 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 2857 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 2858 xfs_extlen_t extsz, /* align to this extent size */ 2859 int rt, /* is this a realtime inode? */ 2860 int eof, /* is extent at end-of-file? */ 2861 int delay, /* creating delalloc extent? */ 2862 int convert, /* overwriting unwritten extent? */ 2863 xfs_fileoff_t *offp, /* in/out: aligned offset */ 2864 xfs_extlen_t *lenp) /* in/out: aligned length */ 2865 { 2866 xfs_fileoff_t orig_off; /* original offset */ 2867 xfs_extlen_t orig_alen; /* original length */ 2868 xfs_fileoff_t orig_end; /* original off+len */ 2869 xfs_fileoff_t nexto; /* next file offset */ 2870 xfs_fileoff_t prevo; /* previous file offset */ 2871 xfs_fileoff_t align_off; /* temp for offset */ 2872 xfs_extlen_t align_alen; /* temp for length */ 2873 xfs_extlen_t temp; /* temp for calculations */ 2874 2875 if (convert) 2876 return 0; 2877 2878 orig_off = align_off = *offp; 2879 orig_alen = align_alen = *lenp; 2880 orig_end = orig_off + orig_alen; 2881 2882 /* 2883 * If this request overlaps an existing extent, then don't 2884 * attempt to perform any additional alignment. 2885 */ 2886 if (!delay && !eof && 2887 (orig_off >= gotp->br_startoff) && 2888 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 2889 return 0; 2890 } 2891 2892 /* 2893 * If the file offset is unaligned vs. the extent size 2894 * we need to align it. This will be possible unless 2895 * the file was previously written with a kernel that didn't 2896 * perform this alignment, or if a truncate shot us in the 2897 * foot. 2898 */ 2899 div_u64_rem(orig_off, extsz, &temp); 2900 if (temp) { 2901 align_alen += temp; 2902 align_off -= temp; 2903 } 2904 2905 /* Same adjustment for the end of the requested area. */ 2906 temp = (align_alen % extsz); 2907 if (temp) 2908 align_alen += extsz - temp; 2909 2910 /* 2911 * For large extent hint sizes, the aligned extent might be larger than 2912 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls 2913 * the length back under MAXEXTLEN. The outer allocation loops handle 2914 * short allocation just fine, so it is safe to do this. We only want to 2915 * do it when we are forced to, though, because it means more allocation 2916 * operations are required. 2917 */ 2918 while (align_alen > MAXEXTLEN) 2919 align_alen -= extsz; 2920 ASSERT(align_alen <= MAXEXTLEN); 2921 2922 /* 2923 * If the previous block overlaps with this proposed allocation 2924 * then move the start forward without adjusting the length. 2925 */ 2926 if (prevp->br_startoff != NULLFILEOFF) { 2927 if (prevp->br_startblock == HOLESTARTBLOCK) 2928 prevo = prevp->br_startoff; 2929 else 2930 prevo = prevp->br_startoff + prevp->br_blockcount; 2931 } else 2932 prevo = 0; 2933 if (align_off != orig_off && align_off < prevo) 2934 align_off = prevo; 2935 /* 2936 * If the next block overlaps with this proposed allocation 2937 * then move the start back without adjusting the length, 2938 * but not before offset 0. 2939 * This may of course make the start overlap previous block, 2940 * and if we hit the offset 0 limit then the next block 2941 * can still overlap too. 2942 */ 2943 if (!eof && gotp->br_startoff != NULLFILEOFF) { 2944 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 2945 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 2946 nexto = gotp->br_startoff + gotp->br_blockcount; 2947 else 2948 nexto = gotp->br_startoff; 2949 } else 2950 nexto = NULLFILEOFF; 2951 if (!eof && 2952 align_off + align_alen != orig_end && 2953 align_off + align_alen > nexto) 2954 align_off = nexto > align_alen ? nexto - align_alen : 0; 2955 /* 2956 * If we're now overlapping the next or previous extent that 2957 * means we can't fit an extsz piece in this hole. Just move 2958 * the start forward to the first valid spot and set 2959 * the length so we hit the end. 2960 */ 2961 if (align_off != orig_off && align_off < prevo) 2962 align_off = prevo; 2963 if (align_off + align_alen != orig_end && 2964 align_off + align_alen > nexto && 2965 nexto != NULLFILEOFF) { 2966 ASSERT(nexto > prevo); 2967 align_alen = nexto - align_off; 2968 } 2969 2970 /* 2971 * If realtime, and the result isn't a multiple of the realtime 2972 * extent size we need to remove blocks until it is. 2973 */ 2974 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 2975 /* 2976 * We're not covering the original request, or 2977 * we won't be able to once we fix the length. 2978 */ 2979 if (orig_off < align_off || 2980 orig_end > align_off + align_alen || 2981 align_alen - temp < orig_alen) 2982 return -EINVAL; 2983 /* 2984 * Try to fix it by moving the start up. 2985 */ 2986 if (align_off + temp <= orig_off) { 2987 align_alen -= temp; 2988 align_off += temp; 2989 } 2990 /* 2991 * Try to fix it by moving the end in. 2992 */ 2993 else if (align_off + align_alen - temp >= orig_end) 2994 align_alen -= temp; 2995 /* 2996 * Set the start to the minimum then trim the length. 2997 */ 2998 else { 2999 align_alen -= orig_off - align_off; 3000 align_off = orig_off; 3001 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3002 } 3003 /* 3004 * Result doesn't cover the request, fail it. 3005 */ 3006 if (orig_off < align_off || orig_end > align_off + align_alen) 3007 return -EINVAL; 3008 } else { 3009 ASSERT(orig_off >= align_off); 3010 /* see MAXEXTLEN handling above */ 3011 ASSERT(orig_end <= align_off + align_alen || 3012 align_alen + extsz > MAXEXTLEN); 3013 } 3014 3015 #ifdef DEBUG 3016 if (!eof && gotp->br_startoff != NULLFILEOFF) 3017 ASSERT(align_off + align_alen <= gotp->br_startoff); 3018 if (prevp->br_startoff != NULLFILEOFF) 3019 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3020 #endif 3021 3022 *lenp = align_alen; 3023 *offp = align_off; 3024 return 0; 3025 } 3026 3027 #define XFS_ALLOC_GAP_UNITS 4 3028 3029 void 3030 xfs_bmap_adjacent( 3031 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3032 { 3033 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3034 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3035 xfs_mount_t *mp; /* mount point structure */ 3036 int nullfb; /* true if ap->firstblock isn't set */ 3037 int rt; /* true if inode is realtime */ 3038 3039 #define ISVALID(x,y) \ 3040 (rt ? \ 3041 (x) < mp->m_sb.sb_rblocks : \ 3042 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3043 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3044 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3045 3046 mp = ap->ip->i_mount; 3047 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3048 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3049 xfs_alloc_is_userdata(ap->datatype); 3050 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3051 ap->tp->t_firstblock); 3052 /* 3053 * If allocating at eof, and there's a previous real block, 3054 * try to use its last block as our starting point. 3055 */ 3056 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3057 !isnullstartblock(ap->prev.br_startblock) && 3058 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3059 ap->prev.br_startblock)) { 3060 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3061 /* 3062 * Adjust for the gap between prevp and us. 3063 */ 3064 adjust = ap->offset - 3065 (ap->prev.br_startoff + ap->prev.br_blockcount); 3066 if (adjust && 3067 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3068 ap->blkno += adjust; 3069 } 3070 /* 3071 * If not at eof, then compare the two neighbor blocks. 3072 * Figure out whether either one gives us a good starting point, 3073 * and pick the better one. 3074 */ 3075 else if (!ap->eof) { 3076 xfs_fsblock_t gotbno; /* right side block number */ 3077 xfs_fsblock_t gotdiff=0; /* right side difference */ 3078 xfs_fsblock_t prevbno; /* left side block number */ 3079 xfs_fsblock_t prevdiff=0; /* left side difference */ 3080 3081 /* 3082 * If there's a previous (left) block, select a requested 3083 * start block based on it. 3084 */ 3085 if (ap->prev.br_startoff != NULLFILEOFF && 3086 !isnullstartblock(ap->prev.br_startblock) && 3087 (prevbno = ap->prev.br_startblock + 3088 ap->prev.br_blockcount) && 3089 ISVALID(prevbno, ap->prev.br_startblock)) { 3090 /* 3091 * Calculate gap to end of previous block. 3092 */ 3093 adjust = prevdiff = ap->offset - 3094 (ap->prev.br_startoff + 3095 ap->prev.br_blockcount); 3096 /* 3097 * Figure the startblock based on the previous block's 3098 * end and the gap size. 3099 * Heuristic! 3100 * If the gap is large relative to the piece we're 3101 * allocating, or using it gives us an invalid block 3102 * number, then just use the end of the previous block. 3103 */ 3104 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3105 ISVALID(prevbno + prevdiff, 3106 ap->prev.br_startblock)) 3107 prevbno += adjust; 3108 else 3109 prevdiff += adjust; 3110 /* 3111 * If the firstblock forbids it, can't use it, 3112 * must use default. 3113 */ 3114 if (!rt && !nullfb && 3115 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) 3116 prevbno = NULLFSBLOCK; 3117 } 3118 /* 3119 * No previous block or can't follow it, just default. 3120 */ 3121 else 3122 prevbno = NULLFSBLOCK; 3123 /* 3124 * If there's a following (right) block, select a requested 3125 * start block based on it. 3126 */ 3127 if (!isnullstartblock(ap->got.br_startblock)) { 3128 /* 3129 * Calculate gap to start of next block. 3130 */ 3131 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3132 /* 3133 * Figure the startblock based on the next block's 3134 * start and the gap size. 3135 */ 3136 gotbno = ap->got.br_startblock; 3137 /* 3138 * Heuristic! 3139 * If the gap is large relative to the piece we're 3140 * allocating, or using it gives us an invalid block 3141 * number, then just use the start of the next block 3142 * offset by our length. 3143 */ 3144 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3145 ISVALID(gotbno - gotdiff, gotbno)) 3146 gotbno -= adjust; 3147 else if (ISVALID(gotbno - ap->length, gotbno)) { 3148 gotbno -= ap->length; 3149 gotdiff += adjust - ap->length; 3150 } else 3151 gotdiff += adjust; 3152 /* 3153 * If the firstblock forbids it, can't use it, 3154 * must use default. 3155 */ 3156 if (!rt && !nullfb && 3157 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) 3158 gotbno = NULLFSBLOCK; 3159 } 3160 /* 3161 * No next block, just default. 3162 */ 3163 else 3164 gotbno = NULLFSBLOCK; 3165 /* 3166 * If both valid, pick the better one, else the only good 3167 * one, else ap->blkno is already set (to 0 or the inode block). 3168 */ 3169 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3170 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3171 else if (prevbno != NULLFSBLOCK) 3172 ap->blkno = prevbno; 3173 else if (gotbno != NULLFSBLOCK) 3174 ap->blkno = gotbno; 3175 } 3176 #undef ISVALID 3177 } 3178 3179 static int 3180 xfs_bmap_longest_free_extent( 3181 struct xfs_trans *tp, 3182 xfs_agnumber_t ag, 3183 xfs_extlen_t *blen, 3184 int *notinit) 3185 { 3186 struct xfs_mount *mp = tp->t_mountp; 3187 struct xfs_perag *pag; 3188 xfs_extlen_t longest; 3189 int error = 0; 3190 3191 pag = xfs_perag_get(mp, ag); 3192 if (!pag->pagf_init) { 3193 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK); 3194 if (error) 3195 goto out; 3196 3197 if (!pag->pagf_init) { 3198 *notinit = 1; 3199 goto out; 3200 } 3201 } 3202 3203 longest = xfs_alloc_longest_free_extent(pag, 3204 xfs_alloc_min_freelist(mp, pag), 3205 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3206 if (*blen < longest) 3207 *blen = longest; 3208 3209 out: 3210 xfs_perag_put(pag); 3211 return error; 3212 } 3213 3214 static void 3215 xfs_bmap_select_minlen( 3216 struct xfs_bmalloca *ap, 3217 struct xfs_alloc_arg *args, 3218 xfs_extlen_t *blen, 3219 int notinit) 3220 { 3221 if (notinit || *blen < ap->minlen) { 3222 /* 3223 * Since we did a BUF_TRYLOCK above, it is possible that 3224 * there is space for this request. 3225 */ 3226 args->minlen = ap->minlen; 3227 } else if (*blen < args->maxlen) { 3228 /* 3229 * If the best seen length is less than the request length, 3230 * use the best as the minimum. 3231 */ 3232 args->minlen = *blen; 3233 } else { 3234 /* 3235 * Otherwise we've seen an extent as big as maxlen, use that 3236 * as the minimum. 3237 */ 3238 args->minlen = args->maxlen; 3239 } 3240 } 3241 3242 STATIC int 3243 xfs_bmap_btalloc_nullfb( 3244 struct xfs_bmalloca *ap, 3245 struct xfs_alloc_arg *args, 3246 xfs_extlen_t *blen) 3247 { 3248 struct xfs_mount *mp = ap->ip->i_mount; 3249 xfs_agnumber_t ag, startag; 3250 int notinit = 0; 3251 int error; 3252 3253 args->type = XFS_ALLOCTYPE_START_BNO; 3254 args->total = ap->total; 3255 3256 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3257 if (startag == NULLAGNUMBER) 3258 startag = ag = 0; 3259 3260 while (*blen < args->maxlen) { 3261 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3262 ¬init); 3263 if (error) 3264 return error; 3265 3266 if (++ag == mp->m_sb.sb_agcount) 3267 ag = 0; 3268 if (ag == startag) 3269 break; 3270 } 3271 3272 xfs_bmap_select_minlen(ap, args, blen, notinit); 3273 return 0; 3274 } 3275 3276 STATIC int 3277 xfs_bmap_btalloc_filestreams( 3278 struct xfs_bmalloca *ap, 3279 struct xfs_alloc_arg *args, 3280 xfs_extlen_t *blen) 3281 { 3282 struct xfs_mount *mp = ap->ip->i_mount; 3283 xfs_agnumber_t ag; 3284 int notinit = 0; 3285 int error; 3286 3287 args->type = XFS_ALLOCTYPE_NEAR_BNO; 3288 args->total = ap->total; 3289 3290 ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3291 if (ag == NULLAGNUMBER) 3292 ag = 0; 3293 3294 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init); 3295 if (error) 3296 return error; 3297 3298 if (*blen < args->maxlen) { 3299 error = xfs_filestream_new_ag(ap, &ag); 3300 if (error) 3301 return error; 3302 3303 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3304 ¬init); 3305 if (error) 3306 return error; 3307 3308 } 3309 3310 xfs_bmap_select_minlen(ap, args, blen, notinit); 3311 3312 /* 3313 * Set the failure fallback case to look in the selected AG as stream 3314 * may have moved. 3315 */ 3316 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 3317 return 0; 3318 } 3319 3320 /* Update all inode and quota accounting for the allocation we just did. */ 3321 static void 3322 xfs_bmap_btalloc_accounting( 3323 struct xfs_bmalloca *ap, 3324 struct xfs_alloc_arg *args) 3325 { 3326 if (ap->flags & XFS_BMAPI_COWFORK) { 3327 /* 3328 * COW fork blocks are in-core only and thus are treated as 3329 * in-core quota reservation (like delalloc blocks) even when 3330 * converted to real blocks. The quota reservation is not 3331 * accounted to disk until blocks are remapped to the data 3332 * fork. So if these blocks were previously delalloc, we 3333 * already have quota reservation and there's nothing to do 3334 * yet. 3335 */ 3336 if (ap->wasdel) 3337 return; 3338 3339 /* 3340 * Otherwise, we've allocated blocks in a hole. The transaction 3341 * has acquired in-core quota reservation for this extent. 3342 * Rather than account these as real blocks, however, we reduce 3343 * the transaction quota reservation based on the allocation. 3344 * This essentially transfers the transaction quota reservation 3345 * to that of a delalloc extent. 3346 */ 3347 ap->ip->i_delayed_blks += args->len; 3348 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS, 3349 -(long)args->len); 3350 return; 3351 } 3352 3353 /* data/attr fork only */ 3354 ap->ip->i_d.di_nblocks += args->len; 3355 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3356 if (ap->wasdel) 3357 ap->ip->i_delayed_blks -= args->len; 3358 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3359 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, 3360 args->len); 3361 } 3362 3363 STATIC int 3364 xfs_bmap_btalloc( 3365 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3366 { 3367 xfs_mount_t *mp; /* mount point structure */ 3368 xfs_alloctype_t atype = 0; /* type for allocation routines */ 3369 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3370 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3371 xfs_agnumber_t ag; 3372 xfs_alloc_arg_t args; 3373 xfs_fileoff_t orig_offset; 3374 xfs_extlen_t orig_length; 3375 xfs_extlen_t blen; 3376 xfs_extlen_t nextminlen = 0; 3377 int nullfb; /* true if ap->firstblock isn't set */ 3378 int isaligned; 3379 int tryagain; 3380 int error; 3381 int stripe_align; 3382 3383 ASSERT(ap->length); 3384 orig_offset = ap->offset; 3385 orig_length = ap->length; 3386 3387 mp = ap->ip->i_mount; 3388 3389 /* stripe alignment for allocation is determined by mount parameters */ 3390 stripe_align = 0; 3391 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 3392 stripe_align = mp->m_swidth; 3393 else if (mp->m_dalign) 3394 stripe_align = mp->m_dalign; 3395 3396 if (ap->flags & XFS_BMAPI_COWFORK) 3397 align = xfs_get_cowextsz_hint(ap->ip); 3398 else if (xfs_alloc_is_userdata(ap->datatype)) 3399 align = xfs_get_extsz_hint(ap->ip); 3400 if (align) { 3401 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3402 align, 0, ap->eof, 0, ap->conv, 3403 &ap->offset, &ap->length); 3404 ASSERT(!error); 3405 ASSERT(ap->length); 3406 } 3407 3408 3409 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3410 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3411 ap->tp->t_firstblock); 3412 if (nullfb) { 3413 if (xfs_alloc_is_userdata(ap->datatype) && 3414 xfs_inode_is_filestream(ap->ip)) { 3415 ag = xfs_filestream_lookup_ag(ap->ip); 3416 ag = (ag != NULLAGNUMBER) ? ag : 0; 3417 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); 3418 } else { 3419 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 3420 } 3421 } else 3422 ap->blkno = ap->tp->t_firstblock; 3423 3424 xfs_bmap_adjacent(ap); 3425 3426 /* 3427 * If allowed, use ap->blkno; otherwise must use firstblock since 3428 * it's in the right allocation group. 3429 */ 3430 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) 3431 ; 3432 else 3433 ap->blkno = ap->tp->t_firstblock; 3434 /* 3435 * Normal allocation, done through xfs_alloc_vextent. 3436 */ 3437 tryagain = isaligned = 0; 3438 memset(&args, 0, sizeof(args)); 3439 args.tp = ap->tp; 3440 args.mp = mp; 3441 args.fsbno = ap->blkno; 3442 xfs_rmap_skip_owner_update(&args.oinfo); 3443 3444 /* Trim the allocation back to the maximum an AG can fit. */ 3445 args.maxlen = min(ap->length, mp->m_ag_max_usable); 3446 blen = 0; 3447 if (nullfb) { 3448 /* 3449 * Search for an allocation group with a single extent large 3450 * enough for the request. If one isn't found, then adjust 3451 * the minimum allocation size to the largest space found. 3452 */ 3453 if (xfs_alloc_is_userdata(ap->datatype) && 3454 xfs_inode_is_filestream(ap->ip)) 3455 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); 3456 else 3457 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); 3458 if (error) 3459 return error; 3460 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3461 if (xfs_inode_is_filestream(ap->ip)) 3462 args.type = XFS_ALLOCTYPE_FIRST_AG; 3463 else 3464 args.type = XFS_ALLOCTYPE_START_BNO; 3465 args.total = args.minlen = ap->minlen; 3466 } else { 3467 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3468 args.total = ap->total; 3469 args.minlen = ap->minlen; 3470 } 3471 /* apply extent size hints if obtained earlier */ 3472 if (align) { 3473 args.prod = align; 3474 div_u64_rem(ap->offset, args.prod, &args.mod); 3475 if (args.mod) 3476 args.mod = args.prod - args.mod; 3477 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3478 args.prod = 1; 3479 args.mod = 0; 3480 } else { 3481 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3482 div_u64_rem(ap->offset, args.prod, &args.mod); 3483 if (args.mod) 3484 args.mod = args.prod - args.mod; 3485 } 3486 /* 3487 * If we are not low on available data blocks, and the 3488 * underlying logical volume manager is a stripe, and 3489 * the file offset is zero then try to allocate data 3490 * blocks on stripe unit boundary. 3491 * NOTE: ap->aeof is only set if the allocation length 3492 * is >= the stripe unit and the allocation offset is 3493 * at the end of file. 3494 */ 3495 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) { 3496 if (!ap->offset) { 3497 args.alignment = stripe_align; 3498 atype = args.type; 3499 isaligned = 1; 3500 /* 3501 * Adjust for alignment 3502 */ 3503 if (blen > args.alignment && blen <= args.maxlen) 3504 args.minlen = blen - args.alignment; 3505 args.minalignslop = 0; 3506 } else { 3507 /* 3508 * First try an exact bno allocation. 3509 * If it fails then do a near or start bno 3510 * allocation with alignment turned on. 3511 */ 3512 atype = args.type; 3513 tryagain = 1; 3514 args.type = XFS_ALLOCTYPE_THIS_BNO; 3515 args.alignment = 1; 3516 /* 3517 * Compute the minlen+alignment for the 3518 * next case. Set slop so that the value 3519 * of minlen+alignment+slop doesn't go up 3520 * between the calls. 3521 */ 3522 if (blen > stripe_align && blen <= args.maxlen) 3523 nextminlen = blen - stripe_align; 3524 else 3525 nextminlen = args.minlen; 3526 if (nextminlen + stripe_align > args.minlen + 1) 3527 args.minalignslop = 3528 nextminlen + stripe_align - 3529 args.minlen - 1; 3530 else 3531 args.minalignslop = 0; 3532 } 3533 } else { 3534 args.alignment = 1; 3535 args.minalignslop = 0; 3536 } 3537 args.minleft = ap->minleft; 3538 args.wasdel = ap->wasdel; 3539 args.resv = XFS_AG_RESV_NONE; 3540 args.datatype = ap->datatype; 3541 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) 3542 args.ip = ap->ip; 3543 3544 error = xfs_alloc_vextent(&args); 3545 if (error) 3546 return error; 3547 3548 if (tryagain && args.fsbno == NULLFSBLOCK) { 3549 /* 3550 * Exact allocation failed. Now try with alignment 3551 * turned on. 3552 */ 3553 args.type = atype; 3554 args.fsbno = ap->blkno; 3555 args.alignment = stripe_align; 3556 args.minlen = nextminlen; 3557 args.minalignslop = 0; 3558 isaligned = 1; 3559 if ((error = xfs_alloc_vextent(&args))) 3560 return error; 3561 } 3562 if (isaligned && args.fsbno == NULLFSBLOCK) { 3563 /* 3564 * allocation failed, so turn off alignment and 3565 * try again. 3566 */ 3567 args.type = atype; 3568 args.fsbno = ap->blkno; 3569 args.alignment = 0; 3570 if ((error = xfs_alloc_vextent(&args))) 3571 return error; 3572 } 3573 if (args.fsbno == NULLFSBLOCK && nullfb && 3574 args.minlen > ap->minlen) { 3575 args.minlen = ap->minlen; 3576 args.type = XFS_ALLOCTYPE_START_BNO; 3577 args.fsbno = ap->blkno; 3578 if ((error = xfs_alloc_vextent(&args))) 3579 return error; 3580 } 3581 if (args.fsbno == NULLFSBLOCK && nullfb) { 3582 args.fsbno = 0; 3583 args.type = XFS_ALLOCTYPE_FIRST_AG; 3584 args.total = ap->minlen; 3585 if ((error = xfs_alloc_vextent(&args))) 3586 return error; 3587 ap->tp->t_flags |= XFS_TRANS_LOWMODE; 3588 } 3589 if (args.fsbno != NULLFSBLOCK) { 3590 /* 3591 * check the allocation happened at the same or higher AG than 3592 * the first block that was allocated. 3593 */ 3594 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK || 3595 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <= 3596 XFS_FSB_TO_AGNO(mp, args.fsbno)); 3597 3598 ap->blkno = args.fsbno; 3599 if (ap->tp->t_firstblock == NULLFSBLOCK) 3600 ap->tp->t_firstblock = args.fsbno; 3601 ASSERT(nullfb || fb_agno <= args.agno); 3602 ap->length = args.len; 3603 /* 3604 * If the extent size hint is active, we tried to round the 3605 * caller's allocation request offset down to extsz and the 3606 * length up to another extsz boundary. If we found a free 3607 * extent we mapped it in starting at this new offset. If the 3608 * newly mapped space isn't long enough to cover any of the 3609 * range of offsets that was originally requested, move the 3610 * mapping up so that we can fill as much of the caller's 3611 * original request as possible. Free space is apparently 3612 * very fragmented so we're unlikely to be able to satisfy the 3613 * hints anyway. 3614 */ 3615 if (ap->length <= orig_length) 3616 ap->offset = orig_offset; 3617 else if (ap->offset + ap->length < orig_offset + orig_length) 3618 ap->offset = orig_offset + orig_length - ap->length; 3619 xfs_bmap_btalloc_accounting(ap, &args); 3620 } else { 3621 ap->blkno = NULLFSBLOCK; 3622 ap->length = 0; 3623 } 3624 return 0; 3625 } 3626 3627 /* 3628 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 3629 * It figures out where to ask the underlying allocator to put the new extent. 3630 */ 3631 STATIC int 3632 xfs_bmap_alloc( 3633 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3634 { 3635 if (XFS_IS_REALTIME_INODE(ap->ip) && 3636 xfs_alloc_is_userdata(ap->datatype)) 3637 return xfs_bmap_rtalloc(ap); 3638 return xfs_bmap_btalloc(ap); 3639 } 3640 3641 /* Trim extent to fit a logical block range. */ 3642 void 3643 xfs_trim_extent( 3644 struct xfs_bmbt_irec *irec, 3645 xfs_fileoff_t bno, 3646 xfs_filblks_t len) 3647 { 3648 xfs_fileoff_t distance; 3649 xfs_fileoff_t end = bno + len; 3650 3651 if (irec->br_startoff + irec->br_blockcount <= bno || 3652 irec->br_startoff >= end) { 3653 irec->br_blockcount = 0; 3654 return; 3655 } 3656 3657 if (irec->br_startoff < bno) { 3658 distance = bno - irec->br_startoff; 3659 if (isnullstartblock(irec->br_startblock)) 3660 irec->br_startblock = DELAYSTARTBLOCK; 3661 if (irec->br_startblock != DELAYSTARTBLOCK && 3662 irec->br_startblock != HOLESTARTBLOCK) 3663 irec->br_startblock += distance; 3664 irec->br_startoff += distance; 3665 irec->br_blockcount -= distance; 3666 } 3667 3668 if (end < irec->br_startoff + irec->br_blockcount) { 3669 distance = irec->br_startoff + irec->br_blockcount - end; 3670 irec->br_blockcount -= distance; 3671 } 3672 } 3673 3674 /* trim extent to within eof */ 3675 void 3676 xfs_trim_extent_eof( 3677 struct xfs_bmbt_irec *irec, 3678 struct xfs_inode *ip) 3679 3680 { 3681 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount, 3682 i_size_read(VFS_I(ip)))); 3683 } 3684 3685 /* 3686 * Trim the returned map to the required bounds 3687 */ 3688 STATIC void 3689 xfs_bmapi_trim_map( 3690 struct xfs_bmbt_irec *mval, 3691 struct xfs_bmbt_irec *got, 3692 xfs_fileoff_t *bno, 3693 xfs_filblks_t len, 3694 xfs_fileoff_t obno, 3695 xfs_fileoff_t end, 3696 int n, 3697 int flags) 3698 { 3699 if ((flags & XFS_BMAPI_ENTIRE) || 3700 got->br_startoff + got->br_blockcount <= obno) { 3701 *mval = *got; 3702 if (isnullstartblock(got->br_startblock)) 3703 mval->br_startblock = DELAYSTARTBLOCK; 3704 return; 3705 } 3706 3707 if (obno > *bno) 3708 *bno = obno; 3709 ASSERT((*bno >= obno) || (n == 0)); 3710 ASSERT(*bno < end); 3711 mval->br_startoff = *bno; 3712 if (isnullstartblock(got->br_startblock)) 3713 mval->br_startblock = DELAYSTARTBLOCK; 3714 else 3715 mval->br_startblock = got->br_startblock + 3716 (*bno - got->br_startoff); 3717 /* 3718 * Return the minimum of what we got and what we asked for for 3719 * the length. We can use the len variable here because it is 3720 * modified below and we could have been there before coming 3721 * here if the first part of the allocation didn't overlap what 3722 * was asked for. 3723 */ 3724 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3725 got->br_blockcount - (*bno - got->br_startoff)); 3726 mval->br_state = got->br_state; 3727 ASSERT(mval->br_blockcount <= len); 3728 return; 3729 } 3730 3731 /* 3732 * Update and validate the extent map to return 3733 */ 3734 STATIC void 3735 xfs_bmapi_update_map( 3736 struct xfs_bmbt_irec **map, 3737 xfs_fileoff_t *bno, 3738 xfs_filblks_t *len, 3739 xfs_fileoff_t obno, 3740 xfs_fileoff_t end, 3741 int *n, 3742 int flags) 3743 { 3744 xfs_bmbt_irec_t *mval = *map; 3745 3746 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3747 ((mval->br_startoff + mval->br_blockcount) <= end)); 3748 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3749 (mval->br_startoff < obno)); 3750 3751 *bno = mval->br_startoff + mval->br_blockcount; 3752 *len = end - *bno; 3753 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3754 /* update previous map with new information */ 3755 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3756 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3757 ASSERT(mval->br_state == mval[-1].br_state); 3758 mval[-1].br_blockcount = mval->br_blockcount; 3759 mval[-1].br_state = mval->br_state; 3760 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3761 mval[-1].br_startblock != DELAYSTARTBLOCK && 3762 mval[-1].br_startblock != HOLESTARTBLOCK && 3763 mval->br_startblock == mval[-1].br_startblock + 3764 mval[-1].br_blockcount && 3765 mval[-1].br_state == mval->br_state) { 3766 ASSERT(mval->br_startoff == 3767 mval[-1].br_startoff + mval[-1].br_blockcount); 3768 mval[-1].br_blockcount += mval->br_blockcount; 3769 } else if (*n > 0 && 3770 mval->br_startblock == DELAYSTARTBLOCK && 3771 mval[-1].br_startblock == DELAYSTARTBLOCK && 3772 mval->br_startoff == 3773 mval[-1].br_startoff + mval[-1].br_blockcount) { 3774 mval[-1].br_blockcount += mval->br_blockcount; 3775 mval[-1].br_state = mval->br_state; 3776 } else if (!((*n == 0) && 3777 ((mval->br_startoff + mval->br_blockcount) <= 3778 obno))) { 3779 mval++; 3780 (*n)++; 3781 } 3782 *map = mval; 3783 } 3784 3785 /* 3786 * Map file blocks to filesystem blocks without allocation. 3787 */ 3788 int 3789 xfs_bmapi_read( 3790 struct xfs_inode *ip, 3791 xfs_fileoff_t bno, 3792 xfs_filblks_t len, 3793 struct xfs_bmbt_irec *mval, 3794 int *nmap, 3795 int flags) 3796 { 3797 struct xfs_mount *mp = ip->i_mount; 3798 struct xfs_ifork *ifp; 3799 struct xfs_bmbt_irec got; 3800 xfs_fileoff_t obno; 3801 xfs_fileoff_t end; 3802 struct xfs_iext_cursor icur; 3803 int error; 3804 bool eof = false; 3805 int n = 0; 3806 int whichfork = xfs_bmapi_whichfork(flags); 3807 3808 ASSERT(*nmap >= 1); 3809 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| 3810 XFS_BMAPI_COWFORK))); 3811 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3812 3813 if (unlikely(XFS_TEST_ERROR( 3814 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 3815 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 3816 mp, XFS_ERRTAG_BMAPIFORMAT))) { 3817 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 3818 return -EFSCORRUPTED; 3819 } 3820 3821 if (XFS_FORCED_SHUTDOWN(mp)) 3822 return -EIO; 3823 3824 XFS_STATS_INC(mp, xs_blk_mapr); 3825 3826 ifp = XFS_IFORK_PTR(ip, whichfork); 3827 3828 /* No CoW fork? Return a hole. */ 3829 if (whichfork == XFS_COW_FORK && !ifp) { 3830 mval->br_startoff = bno; 3831 mval->br_startblock = HOLESTARTBLOCK; 3832 mval->br_blockcount = len; 3833 mval->br_state = XFS_EXT_NORM; 3834 *nmap = 1; 3835 return 0; 3836 } 3837 3838 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 3839 error = xfs_iread_extents(NULL, ip, whichfork); 3840 if (error) 3841 return error; 3842 } 3843 3844 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) 3845 eof = true; 3846 end = bno + len; 3847 obno = bno; 3848 3849 while (bno < end && n < *nmap) { 3850 /* Reading past eof, act as though there's a hole up to end. */ 3851 if (eof) 3852 got.br_startoff = end; 3853 if (got.br_startoff > bno) { 3854 /* Reading in a hole. */ 3855 mval->br_startoff = bno; 3856 mval->br_startblock = HOLESTARTBLOCK; 3857 mval->br_blockcount = 3858 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 3859 mval->br_state = XFS_EXT_NORM; 3860 bno += mval->br_blockcount; 3861 len -= mval->br_blockcount; 3862 mval++; 3863 n++; 3864 continue; 3865 } 3866 3867 /* set up the extent map to return. */ 3868 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 3869 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 3870 3871 /* If we're done, stop now. */ 3872 if (bno >= end || n >= *nmap) 3873 break; 3874 3875 /* Else go on to the next record. */ 3876 if (!xfs_iext_next_extent(ifp, &icur, &got)) 3877 eof = true; 3878 } 3879 *nmap = n; 3880 return 0; 3881 } 3882 3883 /* 3884 * Add a delayed allocation extent to an inode. Blocks are reserved from the 3885 * global pool and the extent inserted into the inode in-core extent tree. 3886 * 3887 * On entry, got refers to the first extent beyond the offset of the extent to 3888 * allocate or eof is specified if no such extent exists. On return, got refers 3889 * to the extent record that was inserted to the inode fork. 3890 * 3891 * Note that the allocated extent may have been merged with contiguous extents 3892 * during insertion into the inode fork. Thus, got does not reflect the current 3893 * state of the inode fork on return. If necessary, the caller can use lastx to 3894 * look up the updated record in the inode fork. 3895 */ 3896 int 3897 xfs_bmapi_reserve_delalloc( 3898 struct xfs_inode *ip, 3899 int whichfork, 3900 xfs_fileoff_t off, 3901 xfs_filblks_t len, 3902 xfs_filblks_t prealloc, 3903 struct xfs_bmbt_irec *got, 3904 struct xfs_iext_cursor *icur, 3905 int eof) 3906 { 3907 struct xfs_mount *mp = ip->i_mount; 3908 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 3909 xfs_extlen_t alen; 3910 xfs_extlen_t indlen; 3911 int error; 3912 xfs_fileoff_t aoff = off; 3913 3914 /* 3915 * Cap the alloc length. Keep track of prealloc so we know whether to 3916 * tag the inode before we return. 3917 */ 3918 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); 3919 if (!eof) 3920 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 3921 if (prealloc && alen >= len) 3922 prealloc = alen - len; 3923 3924 /* Figure out the extent size, adjust alen */ 3925 if (whichfork == XFS_COW_FORK) { 3926 struct xfs_bmbt_irec prev; 3927 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 3928 3929 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 3930 prev.br_startoff = NULLFILEOFF; 3931 3932 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 3933 1, 0, &aoff, &alen); 3934 ASSERT(!error); 3935 } 3936 3937 /* 3938 * Make a transaction-less quota reservation for delayed allocation 3939 * blocks. This number gets adjusted later. We return if we haven't 3940 * allocated blocks already inside this loop. 3941 */ 3942 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, 3943 XFS_QMOPT_RES_REGBLKS); 3944 if (error) 3945 return error; 3946 3947 /* 3948 * Split changing sb for alen and indlen since they could be coming 3949 * from different places. 3950 */ 3951 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 3952 ASSERT(indlen > 0); 3953 3954 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 3955 if (error) 3956 goto out_unreserve_quota; 3957 3958 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 3959 if (error) 3960 goto out_unreserve_blocks; 3961 3962 3963 ip->i_delayed_blks += alen; 3964 3965 got->br_startoff = aoff; 3966 got->br_startblock = nullstartblock(indlen); 3967 got->br_blockcount = alen; 3968 got->br_state = XFS_EXT_NORM; 3969 3970 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 3971 3972 /* 3973 * Tag the inode if blocks were preallocated. Note that COW fork 3974 * preallocation can occur at the start or end of the extent, even when 3975 * prealloc == 0, so we must also check the aligned offset and length. 3976 */ 3977 if (whichfork == XFS_DATA_FORK && prealloc) 3978 xfs_inode_set_eofblocks_tag(ip); 3979 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 3980 xfs_inode_set_cowblocks_tag(ip); 3981 3982 return 0; 3983 3984 out_unreserve_blocks: 3985 xfs_mod_fdblocks(mp, alen, false); 3986 out_unreserve_quota: 3987 if (XFS_IS_QUOTA_ON(mp)) 3988 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, 3989 XFS_QMOPT_RES_REGBLKS); 3990 return error; 3991 } 3992 3993 static int 3994 xfs_bmapi_allocate( 3995 struct xfs_bmalloca *bma) 3996 { 3997 struct xfs_mount *mp = bma->ip->i_mount; 3998 int whichfork = xfs_bmapi_whichfork(bma->flags); 3999 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4000 int tmp_logflags = 0; 4001 int error; 4002 4003 ASSERT(bma->length > 0); 4004 4005 /* 4006 * For the wasdelay case, we could also just allocate the stuff asked 4007 * for in this bmap call but that wouldn't be as good. 4008 */ 4009 if (bma->wasdel) { 4010 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4011 bma->offset = bma->got.br_startoff; 4012 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev); 4013 } else { 4014 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); 4015 if (!bma->eof) 4016 bma->length = XFS_FILBLKS_MIN(bma->length, 4017 bma->got.br_startoff - bma->offset); 4018 } 4019 4020 /* 4021 * Set the data type being allocated. For the data fork, the first data 4022 * in the file is treated differently to all other allocations. For the 4023 * attribute fork, we only need to ensure the allocated range is not on 4024 * the busy list. 4025 */ 4026 if (!(bma->flags & XFS_BMAPI_METADATA)) { 4027 bma->datatype = XFS_ALLOC_NOBUSY; 4028 if (whichfork == XFS_DATA_FORK) { 4029 if (bma->offset == 0) 4030 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4031 else 4032 bma->datatype |= XFS_ALLOC_USERDATA; 4033 } 4034 if (bma->flags & XFS_BMAPI_ZERO) 4035 bma->datatype |= XFS_ALLOC_USERDATA_ZERO; 4036 } 4037 4038 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4039 4040 /* 4041 * Only want to do the alignment at the eof if it is userdata and 4042 * allocation length is larger than a stripe unit. 4043 */ 4044 if (mp->m_dalign && bma->length >= mp->m_dalign && 4045 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4046 error = xfs_bmap_isaeof(bma, whichfork); 4047 if (error) 4048 return error; 4049 } 4050 4051 error = xfs_bmap_alloc(bma); 4052 if (error) 4053 return error; 4054 4055 if (bma->blkno == NULLFSBLOCK) 4056 return 0; 4057 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) 4058 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4059 /* 4060 * Bump the number of extents we've allocated 4061 * in this call. 4062 */ 4063 bma->nallocs++; 4064 4065 if (bma->cur) 4066 bma->cur->bc_private.b.flags = 4067 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 4068 4069 bma->got.br_startoff = bma->offset; 4070 bma->got.br_startblock = bma->blkno; 4071 bma->got.br_blockcount = bma->length; 4072 bma->got.br_state = XFS_EXT_NORM; 4073 4074 /* 4075 * In the data fork, a wasdelay extent has been initialized, so 4076 * shouldn't be flagged as unwritten. 4077 * 4078 * For the cow fork, however, we convert delalloc reservations 4079 * (extents allocated for speculative preallocation) to 4080 * allocated unwritten extents, and only convert the unwritten 4081 * extents to real extents when we're about to write the data. 4082 */ 4083 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) && 4084 (bma->flags & XFS_BMAPI_PREALLOC) && 4085 xfs_sb_version_hasextflgbit(&mp->m_sb)) 4086 bma->got.br_state = XFS_EXT_UNWRITTEN; 4087 4088 if (bma->wasdel) 4089 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4090 else 4091 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4092 whichfork, &bma->icur, &bma->cur, &bma->got, 4093 &bma->logflags, bma->flags); 4094 4095 bma->logflags |= tmp_logflags; 4096 if (error) 4097 return error; 4098 4099 /* 4100 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4101 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4102 * the neighbouring ones. 4103 */ 4104 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4105 4106 ASSERT(bma->got.br_startoff <= bma->offset); 4107 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4108 bma->offset + bma->length); 4109 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4110 bma->got.br_state == XFS_EXT_UNWRITTEN); 4111 return 0; 4112 } 4113 4114 STATIC int 4115 xfs_bmapi_convert_unwritten( 4116 struct xfs_bmalloca *bma, 4117 struct xfs_bmbt_irec *mval, 4118 xfs_filblks_t len, 4119 int flags) 4120 { 4121 int whichfork = xfs_bmapi_whichfork(flags); 4122 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4123 int tmp_logflags = 0; 4124 int error; 4125 4126 /* check if we need to do unwritten->real conversion */ 4127 if (mval->br_state == XFS_EXT_UNWRITTEN && 4128 (flags & XFS_BMAPI_PREALLOC)) 4129 return 0; 4130 4131 /* check if we need to do real->unwritten conversion */ 4132 if (mval->br_state == XFS_EXT_NORM && 4133 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4134 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4135 return 0; 4136 4137 /* 4138 * Modify (by adding) the state flag, if writing. 4139 */ 4140 ASSERT(mval->br_blockcount <= len); 4141 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4142 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4143 bma->ip, whichfork); 4144 } 4145 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4146 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4147 4148 /* 4149 * Before insertion into the bmbt, zero the range being converted 4150 * if required. 4151 */ 4152 if (flags & XFS_BMAPI_ZERO) { 4153 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4154 mval->br_blockcount); 4155 if (error) 4156 return error; 4157 } 4158 4159 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4160 &bma->icur, &bma->cur, mval, &tmp_logflags); 4161 /* 4162 * Log the inode core unconditionally in the unwritten extent conversion 4163 * path because the conversion might not have done so (e.g., if the 4164 * extent count hasn't changed). We need to make sure the inode is dirty 4165 * in the transaction for the sake of fsync(), even if nothing has 4166 * changed, because fsync() will not force the log for this transaction 4167 * unless it sees the inode pinned. 4168 * 4169 * Note: If we're only converting cow fork extents, there aren't 4170 * any on-disk updates to make, so we don't need to log anything. 4171 */ 4172 if (whichfork != XFS_COW_FORK) 4173 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4174 if (error) 4175 return error; 4176 4177 /* 4178 * Update our extent pointer, given that 4179 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4180 * of the neighbouring ones. 4181 */ 4182 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4183 4184 /* 4185 * We may have combined previously unwritten space with written space, 4186 * so generate another request. 4187 */ 4188 if (mval->br_blockcount < len) 4189 return -EAGAIN; 4190 return 0; 4191 } 4192 4193 /* 4194 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4195 * extent state if necessary. Details behaviour is controlled by the flags 4196 * parameter. Only allocates blocks from a single allocation group, to avoid 4197 * locking problems. 4198 */ 4199 int 4200 xfs_bmapi_write( 4201 struct xfs_trans *tp, /* transaction pointer */ 4202 struct xfs_inode *ip, /* incore inode */ 4203 xfs_fileoff_t bno, /* starting file offs. mapped */ 4204 xfs_filblks_t len, /* length to map in file */ 4205 int flags, /* XFS_BMAPI_... */ 4206 xfs_extlen_t total, /* total blocks needed */ 4207 struct xfs_bmbt_irec *mval, /* output: map values */ 4208 int *nmap) /* i/o: mval size/count */ 4209 { 4210 struct xfs_mount *mp = ip->i_mount; 4211 struct xfs_ifork *ifp; 4212 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */ 4213 xfs_fileoff_t end; /* end of mapped file region */ 4214 bool eof = false; /* after the end of extents */ 4215 int error; /* error return */ 4216 int n; /* current extent index */ 4217 xfs_fileoff_t obno; /* old block number (offset) */ 4218 int whichfork; /* data or attr fork */ 4219 4220 #ifdef DEBUG 4221 xfs_fileoff_t orig_bno; /* original block number value */ 4222 int orig_flags; /* original flags arg value */ 4223 xfs_filblks_t orig_len; /* original value of len arg */ 4224 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4225 int orig_nmap; /* original value of *nmap */ 4226 4227 orig_bno = bno; 4228 orig_len = len; 4229 orig_flags = flags; 4230 orig_mval = mval; 4231 orig_nmap = *nmap; 4232 #endif 4233 whichfork = xfs_bmapi_whichfork(flags); 4234 4235 ASSERT(*nmap >= 1); 4236 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4237 ASSERT(tp != NULL || 4238 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) == 4239 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)); 4240 ASSERT(len > 0); 4241 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); 4242 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4243 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4244 4245 /* zeroing is for currently only for data extents, not metadata */ 4246 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4247 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4248 /* 4249 * we can allocate unwritten extents or pre-zero allocated blocks, 4250 * but it makes no sense to do both at once. This would result in 4251 * zeroing the unwritten extent twice, but it still being an 4252 * unwritten extent.... 4253 */ 4254 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4255 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4256 4257 if (unlikely(XFS_TEST_ERROR( 4258 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4259 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4260 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4261 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4262 return -EFSCORRUPTED; 4263 } 4264 4265 if (XFS_FORCED_SHUTDOWN(mp)) 4266 return -EIO; 4267 4268 ifp = XFS_IFORK_PTR(ip, whichfork); 4269 4270 XFS_STATS_INC(mp, xs_blk_mapw); 4271 4272 if (!tp || tp->t_firstblock == NULLFSBLOCK) { 4273 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) 4274 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1; 4275 else 4276 bma.minleft = 1; 4277 } else { 4278 bma.minleft = 0; 4279 } 4280 4281 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4282 error = xfs_iread_extents(tp, ip, whichfork); 4283 if (error) 4284 goto error0; 4285 } 4286 4287 n = 0; 4288 end = bno + len; 4289 obno = bno; 4290 4291 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) 4292 eof = true; 4293 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4294 bma.prev.br_startoff = NULLFILEOFF; 4295 bma.tp = tp; 4296 bma.ip = ip; 4297 bma.total = total; 4298 bma.datatype = 0; 4299 4300 while (bno < end && n < *nmap) { 4301 bool need_alloc = false, wasdelay = false; 4302 4303 /* in hole or beyond EOF? */ 4304 if (eof || bma.got.br_startoff > bno) { 4305 /* 4306 * CoW fork conversions should /never/ hit EOF or 4307 * holes. There should always be something for us 4308 * to work on. 4309 */ 4310 ASSERT(!((flags & XFS_BMAPI_CONVERT) && 4311 (flags & XFS_BMAPI_COWFORK))); 4312 4313 if (flags & XFS_BMAPI_DELALLOC) { 4314 /* 4315 * For the COW fork we can reasonably get a 4316 * request for converting an extent that races 4317 * with other threads already having converted 4318 * part of it, as there converting COW to 4319 * regular blocks is not protected using the 4320 * IOLOCK. 4321 */ 4322 ASSERT(flags & XFS_BMAPI_COWFORK); 4323 if (!(flags & XFS_BMAPI_COWFORK)) { 4324 error = -EIO; 4325 goto error0; 4326 } 4327 4328 if (eof || bno >= end) 4329 break; 4330 } else { 4331 need_alloc = true; 4332 } 4333 } else if (isnullstartblock(bma.got.br_startblock)) { 4334 wasdelay = true; 4335 } 4336 4337 /* 4338 * First, deal with the hole before the allocated space 4339 * that we found, if any. 4340 */ 4341 if ((need_alloc || wasdelay) && 4342 !(flags & XFS_BMAPI_CONVERT_ONLY)) { 4343 bma.eof = eof; 4344 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4345 bma.wasdel = wasdelay; 4346 bma.offset = bno; 4347 bma.flags = flags; 4348 4349 /* 4350 * There's a 32/64 bit type mismatch between the 4351 * allocation length request (which can be 64 bits in 4352 * length) and the bma length request, which is 4353 * xfs_extlen_t and therefore 32 bits. Hence we have to 4354 * check for 32-bit overflows and handle them here. 4355 */ 4356 if (len > (xfs_filblks_t)MAXEXTLEN) 4357 bma.length = MAXEXTLEN; 4358 else 4359 bma.length = len; 4360 4361 ASSERT(len > 0); 4362 ASSERT(bma.length > 0); 4363 error = xfs_bmapi_allocate(&bma); 4364 if (error) 4365 goto error0; 4366 if (bma.blkno == NULLFSBLOCK) 4367 break; 4368 4369 /* 4370 * If this is a CoW allocation, record the data in 4371 * the refcount btree for orphan recovery. 4372 */ 4373 if (whichfork == XFS_COW_FORK) { 4374 error = xfs_refcount_alloc_cow_extent(tp, 4375 bma.blkno, bma.length); 4376 if (error) 4377 goto error0; 4378 } 4379 } 4380 4381 /* Deal with the allocated space we found. */ 4382 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4383 end, n, flags); 4384 4385 /* Execute unwritten extent conversion if necessary */ 4386 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4387 if (error == -EAGAIN) 4388 continue; 4389 if (error) 4390 goto error0; 4391 4392 /* update the extent map to return */ 4393 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4394 4395 /* 4396 * If we're done, stop now. Stop when we've allocated 4397 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4398 * the transaction may get too big. 4399 */ 4400 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4401 break; 4402 4403 /* Else go on to the next record. */ 4404 bma.prev = bma.got; 4405 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) 4406 eof = true; 4407 } 4408 *nmap = n; 4409 4410 /* 4411 * Transform from btree to extents, give it cur. 4412 */ 4413 if (xfs_bmap_wants_extents(ip, whichfork)) { 4414 int tmp_logflags = 0; 4415 4416 ASSERT(bma.cur); 4417 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, 4418 &tmp_logflags, whichfork); 4419 bma.logflags |= tmp_logflags; 4420 if (error) 4421 goto error0; 4422 } 4423 4424 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4425 XFS_IFORK_NEXTENTS(ip, whichfork) > 4426 XFS_IFORK_MAXEXT(ip, whichfork)); 4427 error = 0; 4428 error0: 4429 /* 4430 * Log everything. Do this after conversion, there's no point in 4431 * logging the extent records if we've converted to btree format. 4432 */ 4433 if ((bma.logflags & xfs_ilog_fext(whichfork)) && 4434 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 4435 bma.logflags &= ~xfs_ilog_fext(whichfork); 4436 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) && 4437 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 4438 bma.logflags &= ~xfs_ilog_fbroot(whichfork); 4439 /* 4440 * Log whatever the flags say, even if error. Otherwise we might miss 4441 * detecting a case where the data is changed, there's an error, 4442 * and it's not logged so we don't shutdown when we should. 4443 */ 4444 if (bma.logflags) 4445 xfs_trans_log_inode(tp, ip, bma.logflags); 4446 4447 if (bma.cur) { 4448 xfs_btree_del_cursor(bma.cur, error); 4449 } 4450 if (!error) 4451 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4452 orig_nmap, *nmap); 4453 return error; 4454 } 4455 4456 int 4457 xfs_bmapi_remap( 4458 struct xfs_trans *tp, 4459 struct xfs_inode *ip, 4460 xfs_fileoff_t bno, 4461 xfs_filblks_t len, 4462 xfs_fsblock_t startblock, 4463 int flags) 4464 { 4465 struct xfs_mount *mp = ip->i_mount; 4466 struct xfs_ifork *ifp; 4467 struct xfs_btree_cur *cur = NULL; 4468 struct xfs_bmbt_irec got; 4469 struct xfs_iext_cursor icur; 4470 int whichfork = xfs_bmapi_whichfork(flags); 4471 int logflags = 0, error; 4472 4473 ifp = XFS_IFORK_PTR(ip, whichfork); 4474 ASSERT(len > 0); 4475 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN); 4476 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4477 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | 4478 XFS_BMAPI_NORMAP))); 4479 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != 4480 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); 4481 4482 if (unlikely(XFS_TEST_ERROR( 4483 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4484 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4485 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4486 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp); 4487 return -EFSCORRUPTED; 4488 } 4489 4490 if (XFS_FORCED_SHUTDOWN(mp)) 4491 return -EIO; 4492 4493 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4494 error = xfs_iread_extents(tp, ip, whichfork); 4495 if (error) 4496 return error; 4497 } 4498 4499 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 4500 /* make sure we only reflink into a hole. */ 4501 ASSERT(got.br_startoff > bno); 4502 ASSERT(got.br_startoff - bno >= len); 4503 } 4504 4505 ip->i_d.di_nblocks += len; 4506 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4507 4508 if (ifp->if_flags & XFS_IFBROOT) { 4509 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 4510 cur->bc_private.b.flags = 0; 4511 } 4512 4513 got.br_startoff = bno; 4514 got.br_startblock = startblock; 4515 got.br_blockcount = len; 4516 if (flags & XFS_BMAPI_PREALLOC) 4517 got.br_state = XFS_EXT_UNWRITTEN; 4518 else 4519 got.br_state = XFS_EXT_NORM; 4520 4521 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, 4522 &cur, &got, &logflags, flags); 4523 if (error) 4524 goto error0; 4525 4526 if (xfs_bmap_wants_extents(ip, whichfork)) { 4527 int tmp_logflags = 0; 4528 4529 error = xfs_bmap_btree_to_extents(tp, ip, cur, 4530 &tmp_logflags, whichfork); 4531 logflags |= tmp_logflags; 4532 } 4533 4534 error0: 4535 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) 4536 logflags &= ~XFS_ILOG_DEXT; 4537 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) 4538 logflags &= ~XFS_ILOG_DBROOT; 4539 4540 if (logflags) 4541 xfs_trans_log_inode(tp, ip, logflags); 4542 if (cur) 4543 xfs_btree_del_cursor(cur, error); 4544 return error; 4545 } 4546 4547 /* 4548 * When a delalloc extent is split (e.g., due to a hole punch), the original 4549 * indlen reservation must be shared across the two new extents that are left 4550 * behind. 4551 * 4552 * Given the original reservation and the worst case indlen for the two new 4553 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4554 * reservation fairly across the two new extents. If necessary, steal available 4555 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4556 * ores == 1). The number of stolen blocks is returned. The availability and 4557 * subsequent accounting of stolen blocks is the responsibility of the caller. 4558 */ 4559 static xfs_filblks_t 4560 xfs_bmap_split_indlen( 4561 xfs_filblks_t ores, /* original res. */ 4562 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4563 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4564 xfs_filblks_t avail) /* stealable blocks */ 4565 { 4566 xfs_filblks_t len1 = *indlen1; 4567 xfs_filblks_t len2 = *indlen2; 4568 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4569 xfs_filblks_t stolen = 0; 4570 xfs_filblks_t resfactor; 4571 4572 /* 4573 * Steal as many blocks as we can to try and satisfy the worst case 4574 * indlen for both new extents. 4575 */ 4576 if (ores < nres && avail) 4577 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4578 ores += stolen; 4579 4580 /* nothing else to do if we've satisfied the new reservation */ 4581 if (ores >= nres) 4582 return stolen; 4583 4584 /* 4585 * We can't meet the total required reservation for the two extents. 4586 * Calculate the percent of the overall shortage between both extents 4587 * and apply this percentage to each of the requested indlen values. 4588 * This distributes the shortage fairly and reduces the chances that one 4589 * of the two extents is left with nothing when extents are repeatedly 4590 * split. 4591 */ 4592 resfactor = (ores * 100); 4593 do_div(resfactor, nres); 4594 len1 *= resfactor; 4595 do_div(len1, 100); 4596 len2 *= resfactor; 4597 do_div(len2, 100); 4598 ASSERT(len1 + len2 <= ores); 4599 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4600 4601 /* 4602 * Hand out the remainder to each extent. If one of the two reservations 4603 * is zero, we want to make sure that one gets a block first. The loop 4604 * below starts with len1, so hand len2 a block right off the bat if it 4605 * is zero. 4606 */ 4607 ores -= (len1 + len2); 4608 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4609 if (ores && !len2 && *indlen2) { 4610 len2++; 4611 ores--; 4612 } 4613 while (ores) { 4614 if (len1 < *indlen1) { 4615 len1++; 4616 ores--; 4617 } 4618 if (!ores) 4619 break; 4620 if (len2 < *indlen2) { 4621 len2++; 4622 ores--; 4623 } 4624 } 4625 4626 *indlen1 = len1; 4627 *indlen2 = len2; 4628 4629 return stolen; 4630 } 4631 4632 int 4633 xfs_bmap_del_extent_delay( 4634 struct xfs_inode *ip, 4635 int whichfork, 4636 struct xfs_iext_cursor *icur, 4637 struct xfs_bmbt_irec *got, 4638 struct xfs_bmbt_irec *del) 4639 { 4640 struct xfs_mount *mp = ip->i_mount; 4641 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4642 struct xfs_bmbt_irec new; 4643 int64_t da_old, da_new, da_diff = 0; 4644 xfs_fileoff_t del_endoff, got_endoff; 4645 xfs_filblks_t got_indlen, new_indlen, stolen; 4646 int state = xfs_bmap_fork_to_state(whichfork); 4647 int error = 0; 4648 bool isrt; 4649 4650 XFS_STATS_INC(mp, xs_del_exlist); 4651 4652 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4653 del_endoff = del->br_startoff + del->br_blockcount; 4654 got_endoff = got->br_startoff + got->br_blockcount; 4655 da_old = startblockval(got->br_startblock); 4656 da_new = 0; 4657 4658 ASSERT(del->br_blockcount > 0); 4659 ASSERT(got->br_startoff <= del->br_startoff); 4660 ASSERT(got_endoff >= del_endoff); 4661 4662 if (isrt) { 4663 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4664 4665 do_div(rtexts, mp->m_sb.sb_rextsize); 4666 xfs_mod_frextents(mp, rtexts); 4667 } 4668 4669 /* 4670 * Update the inode delalloc counter now and wait to update the 4671 * sb counters as we might have to borrow some blocks for the 4672 * indirect block accounting. 4673 */ 4674 error = xfs_trans_reserve_quota_nblks(NULL, ip, 4675 -((long)del->br_blockcount), 0, 4676 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4677 if (error) 4678 return error; 4679 ip->i_delayed_blks -= del->br_blockcount; 4680 4681 if (got->br_startoff == del->br_startoff) 4682 state |= BMAP_LEFT_FILLING; 4683 if (got_endoff == del_endoff) 4684 state |= BMAP_RIGHT_FILLING; 4685 4686 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4687 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4688 /* 4689 * Matches the whole extent. Delete the entry. 4690 */ 4691 xfs_iext_remove(ip, icur, state); 4692 xfs_iext_prev(ifp, icur); 4693 break; 4694 case BMAP_LEFT_FILLING: 4695 /* 4696 * Deleting the first part of the extent. 4697 */ 4698 got->br_startoff = del_endoff; 4699 got->br_blockcount -= del->br_blockcount; 4700 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4701 got->br_blockcount), da_old); 4702 got->br_startblock = nullstartblock((int)da_new); 4703 xfs_iext_update_extent(ip, state, icur, got); 4704 break; 4705 case BMAP_RIGHT_FILLING: 4706 /* 4707 * Deleting the last part of the extent. 4708 */ 4709 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4710 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4711 got->br_blockcount), da_old); 4712 got->br_startblock = nullstartblock((int)da_new); 4713 xfs_iext_update_extent(ip, state, icur, got); 4714 break; 4715 case 0: 4716 /* 4717 * Deleting the middle of the extent. 4718 * 4719 * Distribute the original indlen reservation across the two new 4720 * extents. Steal blocks from the deleted extent if necessary. 4721 * Stealing blocks simply fudges the fdblocks accounting below. 4722 * Warn if either of the new indlen reservations is zero as this 4723 * can lead to delalloc problems. 4724 */ 4725 got->br_blockcount = del->br_startoff - got->br_startoff; 4726 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4727 4728 new.br_blockcount = got_endoff - del_endoff; 4729 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4730 4731 WARN_ON_ONCE(!got_indlen || !new_indlen); 4732 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4733 del->br_blockcount); 4734 4735 got->br_startblock = nullstartblock((int)got_indlen); 4736 4737 new.br_startoff = del_endoff; 4738 new.br_state = got->br_state; 4739 new.br_startblock = nullstartblock((int)new_indlen); 4740 4741 xfs_iext_update_extent(ip, state, icur, got); 4742 xfs_iext_next(ifp, icur); 4743 xfs_iext_insert(ip, icur, &new, state); 4744 4745 da_new = got_indlen + new_indlen - stolen; 4746 del->br_blockcount -= stolen; 4747 break; 4748 } 4749 4750 ASSERT(da_old >= da_new); 4751 da_diff = da_old - da_new; 4752 if (!isrt) 4753 da_diff += del->br_blockcount; 4754 if (da_diff) 4755 xfs_mod_fdblocks(mp, da_diff, false); 4756 return error; 4757 } 4758 4759 void 4760 xfs_bmap_del_extent_cow( 4761 struct xfs_inode *ip, 4762 struct xfs_iext_cursor *icur, 4763 struct xfs_bmbt_irec *got, 4764 struct xfs_bmbt_irec *del) 4765 { 4766 struct xfs_mount *mp = ip->i_mount; 4767 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 4768 struct xfs_bmbt_irec new; 4769 xfs_fileoff_t del_endoff, got_endoff; 4770 int state = BMAP_COWFORK; 4771 4772 XFS_STATS_INC(mp, xs_del_exlist); 4773 4774 del_endoff = del->br_startoff + del->br_blockcount; 4775 got_endoff = got->br_startoff + got->br_blockcount; 4776 4777 ASSERT(del->br_blockcount > 0); 4778 ASSERT(got->br_startoff <= del->br_startoff); 4779 ASSERT(got_endoff >= del_endoff); 4780 ASSERT(!isnullstartblock(got->br_startblock)); 4781 4782 if (got->br_startoff == del->br_startoff) 4783 state |= BMAP_LEFT_FILLING; 4784 if (got_endoff == del_endoff) 4785 state |= BMAP_RIGHT_FILLING; 4786 4787 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4788 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4789 /* 4790 * Matches the whole extent. Delete the entry. 4791 */ 4792 xfs_iext_remove(ip, icur, state); 4793 xfs_iext_prev(ifp, icur); 4794 break; 4795 case BMAP_LEFT_FILLING: 4796 /* 4797 * Deleting the first part of the extent. 4798 */ 4799 got->br_startoff = del_endoff; 4800 got->br_blockcount -= del->br_blockcount; 4801 got->br_startblock = del->br_startblock + del->br_blockcount; 4802 xfs_iext_update_extent(ip, state, icur, got); 4803 break; 4804 case BMAP_RIGHT_FILLING: 4805 /* 4806 * Deleting the last part of the extent. 4807 */ 4808 got->br_blockcount -= del->br_blockcount; 4809 xfs_iext_update_extent(ip, state, icur, got); 4810 break; 4811 case 0: 4812 /* 4813 * Deleting the middle of the extent. 4814 */ 4815 got->br_blockcount = del->br_startoff - got->br_startoff; 4816 4817 new.br_startoff = del_endoff; 4818 new.br_blockcount = got_endoff - del_endoff; 4819 new.br_state = got->br_state; 4820 new.br_startblock = del->br_startblock + del->br_blockcount; 4821 4822 xfs_iext_update_extent(ip, state, icur, got); 4823 xfs_iext_next(ifp, icur); 4824 xfs_iext_insert(ip, icur, &new, state); 4825 break; 4826 } 4827 ip->i_delayed_blks -= del->br_blockcount; 4828 } 4829 4830 /* 4831 * Called by xfs_bmapi to update file extent records and the btree 4832 * after removing space. 4833 */ 4834 STATIC int /* error */ 4835 xfs_bmap_del_extent_real( 4836 xfs_inode_t *ip, /* incore inode pointer */ 4837 xfs_trans_t *tp, /* current transaction pointer */ 4838 struct xfs_iext_cursor *icur, 4839 xfs_btree_cur_t *cur, /* if null, not a btree */ 4840 xfs_bmbt_irec_t *del, /* data to remove from extents */ 4841 int *logflagsp, /* inode logging flags */ 4842 int whichfork, /* data or attr fork */ 4843 int bflags) /* bmapi flags */ 4844 { 4845 xfs_fsblock_t del_endblock=0; /* first block past del */ 4846 xfs_fileoff_t del_endoff; /* first offset past del */ 4847 int do_fx; /* free extent at end of routine */ 4848 int error; /* error return value */ 4849 int flags = 0;/* inode logging flags */ 4850 struct xfs_bmbt_irec got; /* current extent entry */ 4851 xfs_fileoff_t got_endoff; /* first offset past got */ 4852 int i; /* temp state */ 4853 struct xfs_ifork *ifp; /* inode fork pointer */ 4854 xfs_mount_t *mp; /* mount structure */ 4855 xfs_filblks_t nblks; /* quota/sb block count */ 4856 xfs_bmbt_irec_t new; /* new record to be inserted */ 4857 /* REFERENCED */ 4858 uint qfield; /* quota field to update */ 4859 int state = xfs_bmap_fork_to_state(whichfork); 4860 struct xfs_bmbt_irec old; 4861 4862 mp = ip->i_mount; 4863 XFS_STATS_INC(mp, xs_del_exlist); 4864 4865 ifp = XFS_IFORK_PTR(ip, whichfork); 4866 ASSERT(del->br_blockcount > 0); 4867 xfs_iext_get_extent(ifp, icur, &got); 4868 ASSERT(got.br_startoff <= del->br_startoff); 4869 del_endoff = del->br_startoff + del->br_blockcount; 4870 got_endoff = got.br_startoff + got.br_blockcount; 4871 ASSERT(got_endoff >= del_endoff); 4872 ASSERT(!isnullstartblock(got.br_startblock)); 4873 qfield = 0; 4874 error = 0; 4875 4876 /* 4877 * If it's the case where the directory code is running with no block 4878 * reservation, and the deleted block is in the middle of its extent, 4879 * and the resulting insert of an extent would cause transformation to 4880 * btree format, then reject it. The calling code will then swap blocks 4881 * around instead. We have to do this now, rather than waiting for the 4882 * conversion to btree format, since the transaction will be dirty then. 4883 */ 4884 if (tp->t_blk_res == 0 && 4885 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 4886 XFS_IFORK_NEXTENTS(ip, whichfork) >= 4887 XFS_IFORK_MAXEXT(ip, whichfork) && 4888 del->br_startoff > got.br_startoff && del_endoff < got_endoff) 4889 return -ENOSPC; 4890 4891 flags = XFS_ILOG_CORE; 4892 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 4893 xfs_fsblock_t bno; 4894 xfs_filblks_t len; 4895 xfs_extlen_t mod; 4896 4897 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize, 4898 &mod); 4899 ASSERT(mod == 0); 4900 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, 4901 &mod); 4902 ASSERT(mod == 0); 4903 4904 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 4905 if (error) 4906 goto done; 4907 do_fx = 0; 4908 nblks = len * mp->m_sb.sb_rextsize; 4909 qfield = XFS_TRANS_DQ_RTBCOUNT; 4910 } else { 4911 do_fx = 1; 4912 nblks = del->br_blockcount; 4913 qfield = XFS_TRANS_DQ_BCOUNT; 4914 } 4915 4916 del_endblock = del->br_startblock + del->br_blockcount; 4917 if (cur) { 4918 error = xfs_bmbt_lookup_eq(cur, &got, &i); 4919 if (error) 4920 goto done; 4921 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 4922 } 4923 4924 if (got.br_startoff == del->br_startoff) 4925 state |= BMAP_LEFT_FILLING; 4926 if (got_endoff == del_endoff) 4927 state |= BMAP_RIGHT_FILLING; 4928 4929 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4930 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4931 /* 4932 * Matches the whole extent. Delete the entry. 4933 */ 4934 xfs_iext_remove(ip, icur, state); 4935 xfs_iext_prev(ifp, icur); 4936 XFS_IFORK_NEXT_SET(ip, whichfork, 4937 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 4938 flags |= XFS_ILOG_CORE; 4939 if (!cur) { 4940 flags |= xfs_ilog_fext(whichfork); 4941 break; 4942 } 4943 if ((error = xfs_btree_delete(cur, &i))) 4944 goto done; 4945 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 4946 break; 4947 case BMAP_LEFT_FILLING: 4948 /* 4949 * Deleting the first part of the extent. 4950 */ 4951 got.br_startoff = del_endoff; 4952 got.br_startblock = del_endblock; 4953 got.br_blockcount -= del->br_blockcount; 4954 xfs_iext_update_extent(ip, state, icur, &got); 4955 if (!cur) { 4956 flags |= xfs_ilog_fext(whichfork); 4957 break; 4958 } 4959 error = xfs_bmbt_update(cur, &got); 4960 if (error) 4961 goto done; 4962 break; 4963 case BMAP_RIGHT_FILLING: 4964 /* 4965 * Deleting the last part of the extent. 4966 */ 4967 got.br_blockcount -= del->br_blockcount; 4968 xfs_iext_update_extent(ip, state, icur, &got); 4969 if (!cur) { 4970 flags |= xfs_ilog_fext(whichfork); 4971 break; 4972 } 4973 error = xfs_bmbt_update(cur, &got); 4974 if (error) 4975 goto done; 4976 break; 4977 case 0: 4978 /* 4979 * Deleting the middle of the extent. 4980 */ 4981 old = got; 4982 4983 got.br_blockcount = del->br_startoff - got.br_startoff; 4984 xfs_iext_update_extent(ip, state, icur, &got); 4985 4986 new.br_startoff = del_endoff; 4987 new.br_blockcount = got_endoff - del_endoff; 4988 new.br_state = got.br_state; 4989 new.br_startblock = del_endblock; 4990 4991 flags |= XFS_ILOG_CORE; 4992 if (cur) { 4993 error = xfs_bmbt_update(cur, &got); 4994 if (error) 4995 goto done; 4996 error = xfs_btree_increment(cur, 0, &i); 4997 if (error) 4998 goto done; 4999 cur->bc_rec.b = new; 5000 error = xfs_btree_insert(cur, &i); 5001 if (error && error != -ENOSPC) 5002 goto done; 5003 /* 5004 * If get no-space back from btree insert, it tried a 5005 * split, and we have a zero block reservation. Fix up 5006 * our state and return the error. 5007 */ 5008 if (error == -ENOSPC) { 5009 /* 5010 * Reset the cursor, don't trust it after any 5011 * insert operation. 5012 */ 5013 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5014 if (error) 5015 goto done; 5016 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5017 /* 5018 * Update the btree record back 5019 * to the original value. 5020 */ 5021 error = xfs_bmbt_update(cur, &old); 5022 if (error) 5023 goto done; 5024 /* 5025 * Reset the extent record back 5026 * to the original value. 5027 */ 5028 xfs_iext_update_extent(ip, state, icur, &old); 5029 flags = 0; 5030 error = -ENOSPC; 5031 goto done; 5032 } 5033 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5034 } else 5035 flags |= xfs_ilog_fext(whichfork); 5036 XFS_IFORK_NEXT_SET(ip, whichfork, 5037 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5038 xfs_iext_next(ifp, icur); 5039 xfs_iext_insert(ip, icur, &new, state); 5040 break; 5041 } 5042 5043 /* remove reverse mapping */ 5044 error = xfs_rmap_unmap_extent(tp, ip, whichfork, del); 5045 if (error) 5046 goto done; 5047 5048 /* 5049 * If we need to, add to list of extents to delete. 5050 */ 5051 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5052 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5053 error = xfs_refcount_decrease_extent(tp, del); 5054 if (error) 5055 goto done; 5056 } else { 5057 __xfs_bmap_add_free(tp, del->br_startblock, 5058 del->br_blockcount, NULL, 5059 (bflags & XFS_BMAPI_NODISCARD) || 5060 del->br_state == XFS_EXT_UNWRITTEN); 5061 } 5062 } 5063 5064 /* 5065 * Adjust inode # blocks in the file. 5066 */ 5067 if (nblks) 5068 ip->i_d.di_nblocks -= nblks; 5069 /* 5070 * Adjust quota data. 5071 */ 5072 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5073 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5074 5075 done: 5076 *logflagsp = flags; 5077 return error; 5078 } 5079 5080 /* 5081 * Unmap (remove) blocks from a file. 5082 * If nexts is nonzero then the number of extents to remove is limited to 5083 * that value. If not all extents in the block range can be removed then 5084 * *done is set. 5085 */ 5086 int /* error */ 5087 __xfs_bunmapi( 5088 struct xfs_trans *tp, /* transaction pointer */ 5089 struct xfs_inode *ip, /* incore inode */ 5090 xfs_fileoff_t start, /* first file offset deleted */ 5091 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5092 int flags, /* misc flags */ 5093 xfs_extnum_t nexts) /* number of extents max */ 5094 { 5095 struct xfs_btree_cur *cur; /* bmap btree cursor */ 5096 struct xfs_bmbt_irec del; /* extent being deleted */ 5097 int error; /* error return value */ 5098 xfs_extnum_t extno; /* extent number in list */ 5099 struct xfs_bmbt_irec got; /* current extent record */ 5100 struct xfs_ifork *ifp; /* inode fork pointer */ 5101 int isrt; /* freeing in rt area */ 5102 int logflags; /* transaction logging flags */ 5103 xfs_extlen_t mod; /* rt extent offset */ 5104 struct xfs_mount *mp; /* mount structure */ 5105 int tmp_logflags; /* partial logging flags */ 5106 int wasdel; /* was a delayed alloc extent */ 5107 int whichfork; /* data or attribute fork */ 5108 xfs_fsblock_t sum; 5109 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5110 xfs_fileoff_t max_len; 5111 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno; 5112 xfs_fileoff_t end; 5113 struct xfs_iext_cursor icur; 5114 bool done = false; 5115 5116 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); 5117 5118 whichfork = xfs_bmapi_whichfork(flags); 5119 ASSERT(whichfork != XFS_COW_FORK); 5120 ifp = XFS_IFORK_PTR(ip, whichfork); 5121 if (unlikely( 5122 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5123 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5124 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5125 ip->i_mount); 5126 return -EFSCORRUPTED; 5127 } 5128 mp = ip->i_mount; 5129 if (XFS_FORCED_SHUTDOWN(mp)) 5130 return -EIO; 5131 5132 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5133 ASSERT(len > 0); 5134 ASSERT(nexts >= 0); 5135 5136 /* 5137 * Guesstimate how many blocks we can unmap without running the risk of 5138 * blowing out the transaction with a mix of EFIs and reflink 5139 * adjustments. 5140 */ 5141 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) 5142 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res)); 5143 else 5144 max_len = len; 5145 5146 if (!(ifp->if_flags & XFS_IFEXTENTS) && 5147 (error = xfs_iread_extents(tp, ip, whichfork))) 5148 return error; 5149 if (xfs_iext_count(ifp) == 0) { 5150 *rlen = 0; 5151 return 0; 5152 } 5153 XFS_STATS_INC(mp, xs_blk_unmap); 5154 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5155 end = start + len; 5156 5157 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { 5158 *rlen = 0; 5159 return 0; 5160 } 5161 end--; 5162 5163 logflags = 0; 5164 if (ifp->if_flags & XFS_IFBROOT) { 5165 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 5166 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5167 cur->bc_private.b.flags = 0; 5168 } else 5169 cur = NULL; 5170 5171 if (isrt) { 5172 /* 5173 * Synchronize by locking the bitmap inode. 5174 */ 5175 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5176 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5177 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5178 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5179 } 5180 5181 extno = 0; 5182 while (end != (xfs_fileoff_t)-1 && end >= start && 5183 (nexts == 0 || extno < nexts) && max_len > 0) { 5184 /* 5185 * Is the found extent after a hole in which end lives? 5186 * Just back up to the previous extent, if so. 5187 */ 5188 if (got.br_startoff > end && 5189 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5190 done = true; 5191 break; 5192 } 5193 /* 5194 * Is the last block of this extent before the range 5195 * we're supposed to delete? If so, we're done. 5196 */ 5197 end = XFS_FILEOFF_MIN(end, 5198 got.br_startoff + got.br_blockcount - 1); 5199 if (end < start) 5200 break; 5201 /* 5202 * Then deal with the (possibly delayed) allocated space 5203 * we found. 5204 */ 5205 del = got; 5206 wasdel = isnullstartblock(del.br_startblock); 5207 5208 /* 5209 * Make sure we don't touch multiple AGF headers out of order 5210 * in a single transaction, as that could cause AB-BA deadlocks. 5211 */ 5212 if (!wasdel) { 5213 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); 5214 if (prev_agno != NULLAGNUMBER && prev_agno > agno) 5215 break; 5216 prev_agno = agno; 5217 } 5218 if (got.br_startoff < start) { 5219 del.br_startoff = start; 5220 del.br_blockcount -= start - got.br_startoff; 5221 if (!wasdel) 5222 del.br_startblock += start - got.br_startoff; 5223 } 5224 if (del.br_startoff + del.br_blockcount > end + 1) 5225 del.br_blockcount = end + 1 - del.br_startoff; 5226 5227 /* How much can we safely unmap? */ 5228 if (max_len < del.br_blockcount) { 5229 del.br_startoff += del.br_blockcount - max_len; 5230 if (!wasdel) 5231 del.br_startblock += del.br_blockcount - max_len; 5232 del.br_blockcount = max_len; 5233 } 5234 5235 if (!isrt) 5236 goto delete; 5237 5238 sum = del.br_startblock + del.br_blockcount; 5239 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod); 5240 if (mod) { 5241 /* 5242 * Realtime extent not lined up at the end. 5243 * The extent could have been split into written 5244 * and unwritten pieces, or we could just be 5245 * unmapping part of it. But we can't really 5246 * get rid of part of a realtime extent. 5247 */ 5248 if (del.br_state == XFS_EXT_UNWRITTEN || 5249 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5250 /* 5251 * This piece is unwritten, or we're not 5252 * using unwritten extents. Skip over it. 5253 */ 5254 ASSERT(end >= mod); 5255 end -= mod > del.br_blockcount ? 5256 del.br_blockcount : mod; 5257 if (end < got.br_startoff && 5258 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5259 done = true; 5260 break; 5261 } 5262 continue; 5263 } 5264 /* 5265 * It's written, turn it unwritten. 5266 * This is better than zeroing it. 5267 */ 5268 ASSERT(del.br_state == XFS_EXT_NORM); 5269 ASSERT(tp->t_blk_res > 0); 5270 /* 5271 * If this spans a realtime extent boundary, 5272 * chop it back to the start of the one we end at. 5273 */ 5274 if (del.br_blockcount > mod) { 5275 del.br_startoff += del.br_blockcount - mod; 5276 del.br_startblock += del.br_blockcount - mod; 5277 del.br_blockcount = mod; 5278 } 5279 del.br_state = XFS_EXT_UNWRITTEN; 5280 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5281 whichfork, &icur, &cur, &del, 5282 &logflags); 5283 if (error) 5284 goto error0; 5285 goto nodelete; 5286 } 5287 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); 5288 if (mod) { 5289 /* 5290 * Realtime extent is lined up at the end but not 5291 * at the front. We'll get rid of full extents if 5292 * we can. 5293 */ 5294 mod = mp->m_sb.sb_rextsize - mod; 5295 if (del.br_blockcount > mod) { 5296 del.br_blockcount -= mod; 5297 del.br_startoff += mod; 5298 del.br_startblock += mod; 5299 } else if ((del.br_startoff == start && 5300 (del.br_state == XFS_EXT_UNWRITTEN || 5301 tp->t_blk_res == 0)) || 5302 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5303 /* 5304 * Can't make it unwritten. There isn't 5305 * a full extent here so just skip it. 5306 */ 5307 ASSERT(end >= del.br_blockcount); 5308 end -= del.br_blockcount; 5309 if (got.br_startoff > end && 5310 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5311 done = true; 5312 break; 5313 } 5314 continue; 5315 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5316 struct xfs_bmbt_irec prev; 5317 5318 /* 5319 * This one is already unwritten. 5320 * It must have a written left neighbor. 5321 * Unwrite the killed part of that one and 5322 * try again. 5323 */ 5324 if (!xfs_iext_prev_extent(ifp, &icur, &prev)) 5325 ASSERT(0); 5326 ASSERT(prev.br_state == XFS_EXT_NORM); 5327 ASSERT(!isnullstartblock(prev.br_startblock)); 5328 ASSERT(del.br_startblock == 5329 prev.br_startblock + prev.br_blockcount); 5330 if (prev.br_startoff < start) { 5331 mod = start - prev.br_startoff; 5332 prev.br_blockcount -= mod; 5333 prev.br_startblock += mod; 5334 prev.br_startoff = start; 5335 } 5336 prev.br_state = XFS_EXT_UNWRITTEN; 5337 error = xfs_bmap_add_extent_unwritten_real(tp, 5338 ip, whichfork, &icur, &cur, 5339 &prev, &logflags); 5340 if (error) 5341 goto error0; 5342 goto nodelete; 5343 } else { 5344 ASSERT(del.br_state == XFS_EXT_NORM); 5345 del.br_state = XFS_EXT_UNWRITTEN; 5346 error = xfs_bmap_add_extent_unwritten_real(tp, 5347 ip, whichfork, &icur, &cur, 5348 &del, &logflags); 5349 if (error) 5350 goto error0; 5351 goto nodelete; 5352 } 5353 } 5354 5355 delete: 5356 if (wasdel) { 5357 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5358 &got, &del); 5359 } else { 5360 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, 5361 &del, &tmp_logflags, whichfork, 5362 flags); 5363 logflags |= tmp_logflags; 5364 } 5365 5366 if (error) 5367 goto error0; 5368 5369 max_len -= del.br_blockcount; 5370 end = del.br_startoff - 1; 5371 nodelete: 5372 /* 5373 * If not done go on to the next (previous) record. 5374 */ 5375 if (end != (xfs_fileoff_t)-1 && end >= start) { 5376 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5377 (got.br_startoff > end && 5378 !xfs_iext_prev_extent(ifp, &icur, &got))) { 5379 done = true; 5380 break; 5381 } 5382 extno++; 5383 } 5384 } 5385 if (done || end == (xfs_fileoff_t)-1 || end < start) 5386 *rlen = 0; 5387 else 5388 *rlen = end - start + 1; 5389 5390 /* 5391 * Convert to a btree if necessary. 5392 */ 5393 if (xfs_bmap_needs_btree(ip, whichfork)) { 5394 ASSERT(cur == NULL); 5395 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5396 &tmp_logflags, whichfork); 5397 logflags |= tmp_logflags; 5398 if (error) 5399 goto error0; 5400 } 5401 /* 5402 * transform from btree to extents, give it cur 5403 */ 5404 else if (xfs_bmap_wants_extents(ip, whichfork)) { 5405 ASSERT(cur != NULL); 5406 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, 5407 whichfork); 5408 logflags |= tmp_logflags; 5409 if (error) 5410 goto error0; 5411 } 5412 /* 5413 * transform from extents to local? 5414 */ 5415 error = 0; 5416 error0: 5417 /* 5418 * Log everything. Do this after conversion, there's no point in 5419 * logging the extent records if we've converted to btree format. 5420 */ 5421 if ((logflags & xfs_ilog_fext(whichfork)) && 5422 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5423 logflags &= ~xfs_ilog_fext(whichfork); 5424 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5425 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5426 logflags &= ~xfs_ilog_fbroot(whichfork); 5427 /* 5428 * Log inode even in the error case, if the transaction 5429 * is dirty we'll need to shut down the filesystem. 5430 */ 5431 if (logflags) 5432 xfs_trans_log_inode(tp, ip, logflags); 5433 if (cur) { 5434 if (!error) 5435 cur->bc_private.b.allocated = 0; 5436 xfs_btree_del_cursor(cur, error); 5437 } 5438 return error; 5439 } 5440 5441 /* Unmap a range of a file. */ 5442 int 5443 xfs_bunmapi( 5444 xfs_trans_t *tp, 5445 struct xfs_inode *ip, 5446 xfs_fileoff_t bno, 5447 xfs_filblks_t len, 5448 int flags, 5449 xfs_extnum_t nexts, 5450 int *done) 5451 { 5452 int error; 5453 5454 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); 5455 *done = (len == 0); 5456 return error; 5457 } 5458 5459 /* 5460 * Determine whether an extent shift can be accomplished by a merge with the 5461 * extent that precedes the target hole of the shift. 5462 */ 5463 STATIC bool 5464 xfs_bmse_can_merge( 5465 struct xfs_bmbt_irec *left, /* preceding extent */ 5466 struct xfs_bmbt_irec *got, /* current extent to shift */ 5467 xfs_fileoff_t shift) /* shift fsb */ 5468 { 5469 xfs_fileoff_t startoff; 5470 5471 startoff = got->br_startoff - shift; 5472 5473 /* 5474 * The extent, once shifted, must be adjacent in-file and on-disk with 5475 * the preceding extent. 5476 */ 5477 if ((left->br_startoff + left->br_blockcount != startoff) || 5478 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5479 (left->br_state != got->br_state) || 5480 (left->br_blockcount + got->br_blockcount > MAXEXTLEN)) 5481 return false; 5482 5483 return true; 5484 } 5485 5486 /* 5487 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5488 * hole in the file. If an extent shift would result in the extent being fully 5489 * adjacent to the extent that currently precedes the hole, we can merge with 5490 * the preceding extent rather than do the shift. 5491 * 5492 * This function assumes the caller has verified a shift-by-merge is possible 5493 * with the provided extents via xfs_bmse_can_merge(). 5494 */ 5495 STATIC int 5496 xfs_bmse_merge( 5497 struct xfs_trans *tp, 5498 struct xfs_inode *ip, 5499 int whichfork, 5500 xfs_fileoff_t shift, /* shift fsb */ 5501 struct xfs_iext_cursor *icur, 5502 struct xfs_bmbt_irec *got, /* extent to shift */ 5503 struct xfs_bmbt_irec *left, /* preceding extent */ 5504 struct xfs_btree_cur *cur, 5505 int *logflags) /* output */ 5506 { 5507 struct xfs_bmbt_irec new; 5508 xfs_filblks_t blockcount; 5509 int error, i; 5510 struct xfs_mount *mp = ip->i_mount; 5511 5512 blockcount = left->br_blockcount + got->br_blockcount; 5513 5514 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5515 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5516 ASSERT(xfs_bmse_can_merge(left, got, shift)); 5517 5518 new = *left; 5519 new.br_blockcount = blockcount; 5520 5521 /* 5522 * Update the on-disk extent count, the btree if necessary and log the 5523 * inode. 5524 */ 5525 XFS_IFORK_NEXT_SET(ip, whichfork, 5526 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5527 *logflags |= XFS_ILOG_CORE; 5528 if (!cur) { 5529 *logflags |= XFS_ILOG_DEXT; 5530 goto done; 5531 } 5532 5533 /* lookup and remove the extent to merge */ 5534 error = xfs_bmbt_lookup_eq(cur, got, &i); 5535 if (error) 5536 return error; 5537 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5538 5539 error = xfs_btree_delete(cur, &i); 5540 if (error) 5541 return error; 5542 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5543 5544 /* lookup and update size of the previous extent */ 5545 error = xfs_bmbt_lookup_eq(cur, left, &i); 5546 if (error) 5547 return error; 5548 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5549 5550 error = xfs_bmbt_update(cur, &new); 5551 if (error) 5552 return error; 5553 5554 done: 5555 xfs_iext_remove(ip, icur, 0); 5556 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur); 5557 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5558 &new); 5559 5560 /* update reverse mapping. rmap functions merge the rmaps for us */ 5561 error = xfs_rmap_unmap_extent(tp, ip, whichfork, got); 5562 if (error) 5563 return error; 5564 memcpy(&new, got, sizeof(new)); 5565 new.br_startoff = left->br_startoff + left->br_blockcount; 5566 return xfs_rmap_map_extent(tp, ip, whichfork, &new); 5567 } 5568 5569 static int 5570 xfs_bmap_shift_update_extent( 5571 struct xfs_trans *tp, 5572 struct xfs_inode *ip, 5573 int whichfork, 5574 struct xfs_iext_cursor *icur, 5575 struct xfs_bmbt_irec *got, 5576 struct xfs_btree_cur *cur, 5577 int *logflags, 5578 xfs_fileoff_t startoff) 5579 { 5580 struct xfs_mount *mp = ip->i_mount; 5581 struct xfs_bmbt_irec prev = *got; 5582 int error, i; 5583 5584 *logflags |= XFS_ILOG_CORE; 5585 5586 got->br_startoff = startoff; 5587 5588 if (cur) { 5589 error = xfs_bmbt_lookup_eq(cur, &prev, &i); 5590 if (error) 5591 return error; 5592 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5593 5594 error = xfs_bmbt_update(cur, got); 5595 if (error) 5596 return error; 5597 } else { 5598 *logflags |= XFS_ILOG_DEXT; 5599 } 5600 5601 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5602 got); 5603 5604 /* update reverse mapping */ 5605 error = xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); 5606 if (error) 5607 return error; 5608 return xfs_rmap_map_extent(tp, ip, whichfork, got); 5609 } 5610 5611 int 5612 xfs_bmap_collapse_extents( 5613 struct xfs_trans *tp, 5614 struct xfs_inode *ip, 5615 xfs_fileoff_t *next_fsb, 5616 xfs_fileoff_t offset_shift_fsb, 5617 bool *done) 5618 { 5619 int whichfork = XFS_DATA_FORK; 5620 struct xfs_mount *mp = ip->i_mount; 5621 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5622 struct xfs_btree_cur *cur = NULL; 5623 struct xfs_bmbt_irec got, prev; 5624 struct xfs_iext_cursor icur; 5625 xfs_fileoff_t new_startoff; 5626 int error = 0; 5627 int logflags = 0; 5628 5629 if (unlikely(XFS_TEST_ERROR( 5630 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5631 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5632 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5633 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5634 return -EFSCORRUPTED; 5635 } 5636 5637 if (XFS_FORCED_SHUTDOWN(mp)) 5638 return -EIO; 5639 5640 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5641 5642 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5643 error = xfs_iread_extents(tp, ip, whichfork); 5644 if (error) 5645 return error; 5646 } 5647 5648 if (ifp->if_flags & XFS_IFBROOT) { 5649 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5650 cur->bc_private.b.flags = 0; 5651 } 5652 5653 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5654 *done = true; 5655 goto del_cursor; 5656 } 5657 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5658 del_cursor); 5659 5660 new_startoff = got.br_startoff - offset_shift_fsb; 5661 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { 5662 if (new_startoff < prev.br_startoff + prev.br_blockcount) { 5663 error = -EINVAL; 5664 goto del_cursor; 5665 } 5666 5667 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) { 5668 error = xfs_bmse_merge(tp, ip, whichfork, 5669 offset_shift_fsb, &icur, &got, &prev, 5670 cur, &logflags); 5671 if (error) 5672 goto del_cursor; 5673 goto done; 5674 } 5675 } else { 5676 if (got.br_startoff < offset_shift_fsb) { 5677 error = -EINVAL; 5678 goto del_cursor; 5679 } 5680 } 5681 5682 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5683 cur, &logflags, new_startoff); 5684 if (error) 5685 goto del_cursor; 5686 5687 done: 5688 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 5689 *done = true; 5690 goto del_cursor; 5691 } 5692 5693 *next_fsb = got.br_startoff; 5694 del_cursor: 5695 if (cur) 5696 xfs_btree_del_cursor(cur, error); 5697 if (logflags) 5698 xfs_trans_log_inode(tp, ip, logflags); 5699 return error; 5700 } 5701 5702 /* Make sure we won't be right-shifting an extent past the maximum bound. */ 5703 int 5704 xfs_bmap_can_insert_extents( 5705 struct xfs_inode *ip, 5706 xfs_fileoff_t off, 5707 xfs_fileoff_t shift) 5708 { 5709 struct xfs_bmbt_irec got; 5710 int is_empty; 5711 int error = 0; 5712 5713 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5714 5715 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 5716 return -EIO; 5717 5718 xfs_ilock(ip, XFS_ILOCK_EXCL); 5719 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); 5720 if (!error && !is_empty && got.br_startoff >= off && 5721 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) 5722 error = -EINVAL; 5723 xfs_iunlock(ip, XFS_ILOCK_EXCL); 5724 5725 return error; 5726 } 5727 5728 int 5729 xfs_bmap_insert_extents( 5730 struct xfs_trans *tp, 5731 struct xfs_inode *ip, 5732 xfs_fileoff_t *next_fsb, 5733 xfs_fileoff_t offset_shift_fsb, 5734 bool *done, 5735 xfs_fileoff_t stop_fsb) 5736 { 5737 int whichfork = XFS_DATA_FORK; 5738 struct xfs_mount *mp = ip->i_mount; 5739 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5740 struct xfs_btree_cur *cur = NULL; 5741 struct xfs_bmbt_irec got, next; 5742 struct xfs_iext_cursor icur; 5743 xfs_fileoff_t new_startoff; 5744 int error = 0; 5745 int logflags = 0; 5746 5747 if (unlikely(XFS_TEST_ERROR( 5748 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5749 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5750 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5751 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5752 return -EFSCORRUPTED; 5753 } 5754 5755 if (XFS_FORCED_SHUTDOWN(mp)) 5756 return -EIO; 5757 5758 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5759 5760 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5761 error = xfs_iread_extents(tp, ip, whichfork); 5762 if (error) 5763 return error; 5764 } 5765 5766 if (ifp->if_flags & XFS_IFBROOT) { 5767 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5768 cur->bc_private.b.flags = 0; 5769 } 5770 5771 if (*next_fsb == NULLFSBLOCK) { 5772 xfs_iext_last(ifp, &icur); 5773 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5774 stop_fsb > got.br_startoff) { 5775 *done = true; 5776 goto del_cursor; 5777 } 5778 } else { 5779 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5780 *done = true; 5781 goto del_cursor; 5782 } 5783 } 5784 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5785 del_cursor); 5786 5787 if (stop_fsb >= got.br_startoff + got.br_blockcount) { 5788 error = -EIO; 5789 goto del_cursor; 5790 } 5791 5792 new_startoff = got.br_startoff + offset_shift_fsb; 5793 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { 5794 if (new_startoff + got.br_blockcount > next.br_startoff) { 5795 error = -EINVAL; 5796 goto del_cursor; 5797 } 5798 5799 /* 5800 * Unlike a left shift (which involves a hole punch), a right 5801 * shift does not modify extent neighbors in any way. We should 5802 * never find mergeable extents in this scenario. Check anyways 5803 * and warn if we encounter two extents that could be one. 5804 */ 5805 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb)) 5806 WARN_ON_ONCE(1); 5807 } 5808 5809 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5810 cur, &logflags, new_startoff); 5811 if (error) 5812 goto del_cursor; 5813 5814 if (!xfs_iext_prev_extent(ifp, &icur, &got) || 5815 stop_fsb >= got.br_startoff + got.br_blockcount) { 5816 *done = true; 5817 goto del_cursor; 5818 } 5819 5820 *next_fsb = got.br_startoff; 5821 del_cursor: 5822 if (cur) 5823 xfs_btree_del_cursor(cur, error); 5824 if (logflags) 5825 xfs_trans_log_inode(tp, ip, logflags); 5826 return error; 5827 } 5828 5829 /* 5830 * Splits an extent into two extents at split_fsb block such that it is the 5831 * first block of the current_ext. @ext is a target extent to be split. 5832 * @split_fsb is a block where the extents is split. If split_fsb lies in a 5833 * hole or the first block of extents, just return 0. 5834 */ 5835 STATIC int 5836 xfs_bmap_split_extent_at( 5837 struct xfs_trans *tp, 5838 struct xfs_inode *ip, 5839 xfs_fileoff_t split_fsb) 5840 { 5841 int whichfork = XFS_DATA_FORK; 5842 struct xfs_btree_cur *cur = NULL; 5843 struct xfs_bmbt_irec got; 5844 struct xfs_bmbt_irec new; /* split extent */ 5845 struct xfs_mount *mp = ip->i_mount; 5846 struct xfs_ifork *ifp; 5847 xfs_fsblock_t gotblkcnt; /* new block count for got */ 5848 struct xfs_iext_cursor icur; 5849 int error = 0; 5850 int logflags = 0; 5851 int i = 0; 5852 5853 if (unlikely(XFS_TEST_ERROR( 5854 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5855 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5856 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5857 XFS_ERROR_REPORT("xfs_bmap_split_extent_at", 5858 XFS_ERRLEVEL_LOW, mp); 5859 return -EFSCORRUPTED; 5860 } 5861 5862 if (XFS_FORCED_SHUTDOWN(mp)) 5863 return -EIO; 5864 5865 ifp = XFS_IFORK_PTR(ip, whichfork); 5866 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5867 /* Read in all the extents */ 5868 error = xfs_iread_extents(tp, ip, whichfork); 5869 if (error) 5870 return error; 5871 } 5872 5873 /* 5874 * If there are not extents, or split_fsb lies in a hole we are done. 5875 */ 5876 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || 5877 got.br_startoff >= split_fsb) 5878 return 0; 5879 5880 gotblkcnt = split_fsb - got.br_startoff; 5881 new.br_startoff = split_fsb; 5882 new.br_startblock = got.br_startblock + gotblkcnt; 5883 new.br_blockcount = got.br_blockcount - gotblkcnt; 5884 new.br_state = got.br_state; 5885 5886 if (ifp->if_flags & XFS_IFBROOT) { 5887 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5888 cur->bc_private.b.flags = 0; 5889 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5890 if (error) 5891 goto del_cursor; 5892 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5893 } 5894 5895 got.br_blockcount = gotblkcnt; 5896 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, 5897 &got); 5898 5899 logflags = XFS_ILOG_CORE; 5900 if (cur) { 5901 error = xfs_bmbt_update(cur, &got); 5902 if (error) 5903 goto del_cursor; 5904 } else 5905 logflags |= XFS_ILOG_DEXT; 5906 5907 /* Add new extent */ 5908 xfs_iext_next(ifp, &icur); 5909 xfs_iext_insert(ip, &icur, &new, 0); 5910 XFS_IFORK_NEXT_SET(ip, whichfork, 5911 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5912 5913 if (cur) { 5914 error = xfs_bmbt_lookup_eq(cur, &new, &i); 5915 if (error) 5916 goto del_cursor; 5917 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); 5918 error = xfs_btree_insert(cur, &i); 5919 if (error) 5920 goto del_cursor; 5921 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5922 } 5923 5924 /* 5925 * Convert to a btree if necessary. 5926 */ 5927 if (xfs_bmap_needs_btree(ip, whichfork)) { 5928 int tmp_logflags; /* partial log flag return val */ 5929 5930 ASSERT(cur == NULL); 5931 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5932 &tmp_logflags, whichfork); 5933 logflags |= tmp_logflags; 5934 } 5935 5936 del_cursor: 5937 if (cur) { 5938 cur->bc_private.b.allocated = 0; 5939 xfs_btree_del_cursor(cur, error); 5940 } 5941 5942 if (logflags) 5943 xfs_trans_log_inode(tp, ip, logflags); 5944 return error; 5945 } 5946 5947 int 5948 xfs_bmap_split_extent( 5949 struct xfs_inode *ip, 5950 xfs_fileoff_t split_fsb) 5951 { 5952 struct xfs_mount *mp = ip->i_mount; 5953 struct xfs_trans *tp; 5954 int error; 5955 5956 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 5957 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); 5958 if (error) 5959 return error; 5960 5961 xfs_ilock(ip, XFS_ILOCK_EXCL); 5962 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 5963 5964 error = xfs_bmap_split_extent_at(tp, ip, split_fsb); 5965 if (error) 5966 goto out; 5967 5968 return xfs_trans_commit(tp); 5969 5970 out: 5971 xfs_trans_cancel(tp); 5972 return error; 5973 } 5974 5975 /* Deferred mapping is only for real extents in the data fork. */ 5976 static bool 5977 xfs_bmap_is_update_needed( 5978 struct xfs_bmbt_irec *bmap) 5979 { 5980 return bmap->br_startblock != HOLESTARTBLOCK && 5981 bmap->br_startblock != DELAYSTARTBLOCK; 5982 } 5983 5984 /* Record a bmap intent. */ 5985 static int 5986 __xfs_bmap_add( 5987 struct xfs_trans *tp, 5988 enum xfs_bmap_intent_type type, 5989 struct xfs_inode *ip, 5990 int whichfork, 5991 struct xfs_bmbt_irec *bmap) 5992 { 5993 struct xfs_bmap_intent *bi; 5994 5995 trace_xfs_bmap_defer(tp->t_mountp, 5996 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 5997 type, 5998 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 5999 ip->i_ino, whichfork, 6000 bmap->br_startoff, 6001 bmap->br_blockcount, 6002 bmap->br_state); 6003 6004 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); 6005 INIT_LIST_HEAD(&bi->bi_list); 6006 bi->bi_type = type; 6007 bi->bi_owner = ip; 6008 bi->bi_whichfork = whichfork; 6009 bi->bi_bmap = *bmap; 6010 6011 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6012 return 0; 6013 } 6014 6015 /* Map an extent into a file. */ 6016 int 6017 xfs_bmap_map_extent( 6018 struct xfs_trans *tp, 6019 struct xfs_inode *ip, 6020 struct xfs_bmbt_irec *PREV) 6021 { 6022 if (!xfs_bmap_is_update_needed(PREV)) 6023 return 0; 6024 6025 return __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV); 6026 } 6027 6028 /* Unmap an extent out of a file. */ 6029 int 6030 xfs_bmap_unmap_extent( 6031 struct xfs_trans *tp, 6032 struct xfs_inode *ip, 6033 struct xfs_bmbt_irec *PREV) 6034 { 6035 if (!xfs_bmap_is_update_needed(PREV)) 6036 return 0; 6037 6038 return __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV); 6039 } 6040 6041 /* 6042 * Process one of the deferred bmap operations. We pass back the 6043 * btree cursor to maintain our lock on the bmapbt between calls. 6044 */ 6045 int 6046 xfs_bmap_finish_one( 6047 struct xfs_trans *tp, 6048 struct xfs_inode *ip, 6049 enum xfs_bmap_intent_type type, 6050 int whichfork, 6051 xfs_fileoff_t startoff, 6052 xfs_fsblock_t startblock, 6053 xfs_filblks_t *blockcount, 6054 xfs_exntst_t state) 6055 { 6056 int error = 0; 6057 6058 ASSERT(tp->t_firstblock == NULLFSBLOCK); 6059 6060 trace_xfs_bmap_deferred(tp->t_mountp, 6061 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, 6062 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 6063 ip->i_ino, whichfork, startoff, *blockcount, state); 6064 6065 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK)) 6066 return -EFSCORRUPTED; 6067 6068 if (XFS_TEST_ERROR(false, tp->t_mountp, 6069 XFS_ERRTAG_BMAP_FINISH_ONE)) 6070 return -EIO; 6071 6072 switch (type) { 6073 case XFS_BMAP_MAP: 6074 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount, 6075 startblock, 0); 6076 *blockcount = 0; 6077 break; 6078 case XFS_BMAP_UNMAP: 6079 error = __xfs_bunmapi(tp, ip, startoff, blockcount, 6080 XFS_BMAPI_REMAP, 1); 6081 break; 6082 default: 6083 ASSERT(0); 6084 error = -EFSCORRUPTED; 6085 } 6086 6087 return error; 6088 } 6089 6090 /* Check that an inode's extent does not have invalid flags or bad ranges. */ 6091 xfs_failaddr_t 6092 xfs_bmap_validate_extent( 6093 struct xfs_inode *ip, 6094 int whichfork, 6095 struct xfs_bmbt_irec *irec) 6096 { 6097 struct xfs_mount *mp = ip->i_mount; 6098 xfs_fsblock_t endfsb; 6099 bool isrt; 6100 6101 isrt = XFS_IS_REALTIME_INODE(ip); 6102 endfsb = irec->br_startblock + irec->br_blockcount - 1; 6103 if (isrt) { 6104 if (!xfs_verify_rtbno(mp, irec->br_startblock)) 6105 return __this_address; 6106 if (!xfs_verify_rtbno(mp, endfsb)) 6107 return __this_address; 6108 } else { 6109 if (!xfs_verify_fsbno(mp, irec->br_startblock)) 6110 return __this_address; 6111 if (!xfs_verify_fsbno(mp, endfsb)) 6112 return __this_address; 6113 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) != 6114 XFS_FSB_TO_AGNO(mp, endfsb)) 6115 return __this_address; 6116 } 6117 if (irec->br_state != XFS_EXT_NORM) { 6118 if (whichfork != XFS_DATA_FORK) 6119 return __this_address; 6120 if (!xfs_sb_version_hasextflgbit(&mp->m_sb)) 6121 return __this_address; 6122 } 6123 return NULL; 6124 } 6125