1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * Copyright (c) 2012 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_bit.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_inode.h" 17 #include "xfs_btree.h" 18 #include "xfs_trans.h" 19 #include "xfs_alloc.h" 20 #include "xfs_bmap.h" 21 #include "xfs_bmap_util.h" 22 #include "xfs_bmap_btree.h" 23 #include "xfs_rtalloc.h" 24 #include "xfs_error.h" 25 #include "xfs_quota.h" 26 #include "xfs_trans_space.h" 27 #include "xfs_trace.h" 28 #include "xfs_icache.h" 29 #include "xfs_iomap.h" 30 #include "xfs_reflink.h" 31 #include "xfs_rtbitmap.h" 32 33 /* Kernel only BMAP related definitions and functions */ 34 35 /* 36 * Convert the given file system block to a disk block. We have to treat it 37 * differently based on whether the file is a real time file or not, because the 38 * bmap code does. 39 */ 40 xfs_daddr_t 41 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) 42 { 43 if (XFS_IS_REALTIME_INODE(ip)) 44 return XFS_FSB_TO_BB(ip->i_mount, fsb); 45 return XFS_FSB_TO_DADDR(ip->i_mount, fsb); 46 } 47 48 /* 49 * Routine to zero an extent on disk allocated to the specific inode. 50 * 51 * The VFS functions take a linearised filesystem block offset, so we have to 52 * convert the sparse xfs fsb to the right format first. 53 * VFS types are real funky, too. 54 */ 55 int 56 xfs_zero_extent( 57 struct xfs_inode *ip, 58 xfs_fsblock_t start_fsb, 59 xfs_off_t count_fsb) 60 { 61 struct xfs_mount *mp = ip->i_mount; 62 struct xfs_buftarg *target = xfs_inode_buftarg(ip); 63 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb); 64 sector_t block = XFS_BB_TO_FSBT(mp, sector); 65 66 return blkdev_issue_zeroout(target->bt_bdev, 67 block << (mp->m_super->s_blocksize_bits - 9), 68 count_fsb << (mp->m_super->s_blocksize_bits - 9), 69 GFP_KERNEL, 0); 70 } 71 72 /* 73 * Extent tree block counting routines. 74 */ 75 76 /* 77 * Count leaf blocks given a range of extent records. Delayed allocation 78 * extents are not counted towards the totals. 79 */ 80 xfs_extnum_t 81 xfs_bmap_count_leaves( 82 struct xfs_ifork *ifp, 83 xfs_filblks_t *count) 84 { 85 struct xfs_iext_cursor icur; 86 struct xfs_bmbt_irec got; 87 xfs_extnum_t numrecs = 0; 88 89 for_each_xfs_iext(ifp, &icur, &got) { 90 if (!isnullstartblock(got.br_startblock)) { 91 *count += got.br_blockcount; 92 numrecs++; 93 } 94 } 95 96 return numrecs; 97 } 98 99 /* 100 * Count fsblocks of the given fork. Delayed allocation extents are 101 * not counted towards the totals. 102 */ 103 int 104 xfs_bmap_count_blocks( 105 struct xfs_trans *tp, 106 struct xfs_inode *ip, 107 int whichfork, 108 xfs_extnum_t *nextents, 109 xfs_filblks_t *count) 110 { 111 struct xfs_mount *mp = ip->i_mount; 112 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 113 struct xfs_btree_cur *cur; 114 xfs_extlen_t btblocks = 0; 115 int error; 116 117 *nextents = 0; 118 *count = 0; 119 120 if (!ifp) 121 return 0; 122 123 switch (ifp->if_format) { 124 case XFS_DINODE_FMT_BTREE: 125 error = xfs_iread_extents(tp, ip, whichfork); 126 if (error) 127 return error; 128 129 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 130 error = xfs_btree_count_blocks(cur, &btblocks); 131 xfs_btree_del_cursor(cur, error); 132 if (error) 133 return error; 134 135 /* 136 * xfs_btree_count_blocks includes the root block contained in 137 * the inode fork in @btblocks, so subtract one because we're 138 * only interested in allocated disk blocks. 139 */ 140 *count += btblocks - 1; 141 142 fallthrough; 143 case XFS_DINODE_FMT_EXTENTS: 144 *nextents = xfs_bmap_count_leaves(ifp, count); 145 break; 146 } 147 148 return 0; 149 } 150 151 static int 152 xfs_getbmap_report_one( 153 struct xfs_inode *ip, 154 struct getbmapx *bmv, 155 struct kgetbmap *out, 156 int64_t bmv_end, 157 struct xfs_bmbt_irec *got) 158 { 159 struct kgetbmap *p = out + bmv->bmv_entries; 160 bool shared = false; 161 int error; 162 163 error = xfs_reflink_trim_around_shared(ip, got, &shared); 164 if (error) 165 return error; 166 167 if (isnullstartblock(got->br_startblock) || 168 got->br_startblock == DELAYSTARTBLOCK) { 169 /* 170 * Take the flush completion as being a point-in-time snapshot 171 * where there are no delalloc extents, and if any new ones 172 * have been created racily, just skip them as being 'after' 173 * the flush and so don't get reported. 174 */ 175 if (!(bmv->bmv_iflags & BMV_IF_DELALLOC)) 176 return 0; 177 178 p->bmv_oflags |= BMV_OF_DELALLOC; 179 p->bmv_block = -2; 180 } else { 181 p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock); 182 } 183 184 if (got->br_state == XFS_EXT_UNWRITTEN && 185 (bmv->bmv_iflags & BMV_IF_PREALLOC)) 186 p->bmv_oflags |= BMV_OF_PREALLOC; 187 188 if (shared) 189 p->bmv_oflags |= BMV_OF_SHARED; 190 191 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff); 192 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount); 193 194 bmv->bmv_offset = p->bmv_offset + p->bmv_length; 195 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset); 196 bmv->bmv_entries++; 197 return 0; 198 } 199 200 static void 201 xfs_getbmap_report_hole( 202 struct xfs_inode *ip, 203 struct getbmapx *bmv, 204 struct kgetbmap *out, 205 int64_t bmv_end, 206 xfs_fileoff_t bno, 207 xfs_fileoff_t end) 208 { 209 struct kgetbmap *p = out + bmv->bmv_entries; 210 211 if (bmv->bmv_iflags & BMV_IF_NO_HOLES) 212 return; 213 214 p->bmv_block = -1; 215 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno); 216 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno); 217 218 bmv->bmv_offset = p->bmv_offset + p->bmv_length; 219 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset); 220 bmv->bmv_entries++; 221 } 222 223 static inline bool 224 xfs_getbmap_full( 225 struct getbmapx *bmv) 226 { 227 return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1; 228 } 229 230 static bool 231 xfs_getbmap_next_rec( 232 struct xfs_bmbt_irec *rec, 233 xfs_fileoff_t total_end) 234 { 235 xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount; 236 237 if (end == total_end) 238 return false; 239 240 rec->br_startoff += rec->br_blockcount; 241 if (!isnullstartblock(rec->br_startblock) && 242 rec->br_startblock != DELAYSTARTBLOCK) 243 rec->br_startblock += rec->br_blockcount; 244 rec->br_blockcount = total_end - end; 245 return true; 246 } 247 248 /* 249 * Get inode's extents as described in bmv, and format for output. 250 * Calls formatter to fill the user's buffer until all extents 251 * are mapped, until the passed-in bmv->bmv_count slots have 252 * been filled, or until the formatter short-circuits the loop, 253 * if it is tracking filled-in extents on its own. 254 */ 255 int /* error code */ 256 xfs_getbmap( 257 struct xfs_inode *ip, 258 struct getbmapx *bmv, /* user bmap structure */ 259 struct kgetbmap *out) 260 { 261 struct xfs_mount *mp = ip->i_mount; 262 int iflags = bmv->bmv_iflags; 263 int whichfork, lock, error = 0; 264 int64_t bmv_end, max_len; 265 xfs_fileoff_t bno, first_bno; 266 struct xfs_ifork *ifp; 267 struct xfs_bmbt_irec got, rec; 268 xfs_filblks_t len; 269 struct xfs_iext_cursor icur; 270 271 if (bmv->bmv_iflags & ~BMV_IF_VALID) 272 return -EINVAL; 273 #ifndef DEBUG 274 /* Only allow CoW fork queries if we're debugging. */ 275 if (iflags & BMV_IF_COWFORK) 276 return -EINVAL; 277 #endif 278 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK)) 279 return -EINVAL; 280 281 if (bmv->bmv_length < -1) 282 return -EINVAL; 283 bmv->bmv_entries = 0; 284 if (bmv->bmv_length == 0) 285 return 0; 286 287 if (iflags & BMV_IF_ATTRFORK) 288 whichfork = XFS_ATTR_FORK; 289 else if (iflags & BMV_IF_COWFORK) 290 whichfork = XFS_COW_FORK; 291 else 292 whichfork = XFS_DATA_FORK; 293 294 xfs_ilock(ip, XFS_IOLOCK_SHARED); 295 switch (whichfork) { 296 case XFS_ATTR_FORK: 297 lock = xfs_ilock_attr_map_shared(ip); 298 if (!xfs_inode_has_attr_fork(ip)) 299 goto out_unlock_ilock; 300 301 max_len = 1LL << 32; 302 break; 303 case XFS_COW_FORK: 304 lock = XFS_ILOCK_SHARED; 305 xfs_ilock(ip, lock); 306 307 /* No CoW fork? Just return */ 308 if (!xfs_ifork_ptr(ip, whichfork)) 309 goto out_unlock_ilock; 310 311 if (xfs_get_cowextsz_hint(ip)) 312 max_len = mp->m_super->s_maxbytes; 313 else 314 max_len = XFS_ISIZE(ip); 315 break; 316 case XFS_DATA_FORK: 317 if (!(iflags & BMV_IF_DELALLOC) && 318 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) { 319 error = filemap_write_and_wait(VFS_I(ip)->i_mapping); 320 if (error) 321 goto out_unlock_iolock; 322 323 /* 324 * Even after flushing the inode, there can still be 325 * delalloc blocks on the inode beyond EOF due to 326 * speculative preallocation. These are not removed 327 * until the release function is called or the inode 328 * is inactivated. Hence we cannot assert here that 329 * ip->i_delayed_blks == 0. 330 */ 331 } 332 333 if (xfs_get_extsz_hint(ip) || 334 (ip->i_diflags & 335 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))) 336 max_len = mp->m_super->s_maxbytes; 337 else 338 max_len = XFS_ISIZE(ip); 339 340 lock = xfs_ilock_data_map_shared(ip); 341 break; 342 } 343 344 ifp = xfs_ifork_ptr(ip, whichfork); 345 346 switch (ifp->if_format) { 347 case XFS_DINODE_FMT_EXTENTS: 348 case XFS_DINODE_FMT_BTREE: 349 break; 350 case XFS_DINODE_FMT_LOCAL: 351 /* Local format inode forks report no extents. */ 352 goto out_unlock_ilock; 353 default: 354 error = -EINVAL; 355 goto out_unlock_ilock; 356 } 357 358 if (bmv->bmv_length == -1) { 359 max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len)); 360 bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset); 361 } 362 363 bmv_end = bmv->bmv_offset + bmv->bmv_length; 364 365 first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset); 366 len = XFS_BB_TO_FSB(mp, bmv->bmv_length); 367 368 error = xfs_iread_extents(NULL, ip, whichfork); 369 if (error) 370 goto out_unlock_ilock; 371 372 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 373 /* 374 * Report a whole-file hole if the delalloc flag is set to 375 * stay compatible with the old implementation. 376 */ 377 if (iflags & BMV_IF_DELALLOC) 378 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno, 379 XFS_B_TO_FSB(mp, XFS_ISIZE(ip))); 380 goto out_unlock_ilock; 381 } 382 383 while (!xfs_getbmap_full(bmv)) { 384 xfs_trim_extent(&got, first_bno, len); 385 386 /* 387 * Report an entry for a hole if this extent doesn't directly 388 * follow the previous one. 389 */ 390 if (got.br_startoff > bno) { 391 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno, 392 got.br_startoff); 393 if (xfs_getbmap_full(bmv)) 394 break; 395 } 396 397 /* 398 * In order to report shared extents accurately, we report each 399 * distinct shared / unshared part of a single bmbt record with 400 * an individual getbmapx record. 401 */ 402 bno = got.br_startoff + got.br_blockcount; 403 rec = got; 404 do { 405 error = xfs_getbmap_report_one(ip, bmv, out, bmv_end, 406 &rec); 407 if (error || xfs_getbmap_full(bmv)) 408 goto out_unlock_ilock; 409 } while (xfs_getbmap_next_rec(&rec, bno)); 410 411 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 412 xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 413 414 if (bmv->bmv_entries > 0) 415 out[bmv->bmv_entries - 1].bmv_oflags |= 416 BMV_OF_LAST; 417 418 if (whichfork != XFS_ATTR_FORK && bno < end && 419 !xfs_getbmap_full(bmv)) { 420 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, 421 bno, end); 422 } 423 break; 424 } 425 426 if (bno >= first_bno + len) 427 break; 428 } 429 430 out_unlock_ilock: 431 xfs_iunlock(ip, lock); 432 out_unlock_iolock: 433 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 434 return error; 435 } 436 437 /* 438 * Dead simple method of punching delalyed allocation blocks from a range in 439 * the inode. This will always punch out both the start and end blocks, even 440 * if the ranges only partially overlap them, so it is up to the caller to 441 * ensure that partial blocks are not passed in. 442 */ 443 void 444 xfs_bmap_punch_delalloc_range( 445 struct xfs_inode *ip, 446 xfs_off_t start_byte, 447 xfs_off_t end_byte) 448 { 449 struct xfs_mount *mp = ip->i_mount; 450 struct xfs_ifork *ifp = &ip->i_df; 451 xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, start_byte); 452 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, end_byte); 453 struct xfs_bmbt_irec got, del; 454 struct xfs_iext_cursor icur; 455 456 ASSERT(!xfs_need_iread_extents(ifp)); 457 458 xfs_ilock(ip, XFS_ILOCK_EXCL); 459 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) 460 goto out_unlock; 461 462 while (got.br_startoff + got.br_blockcount > start_fsb) { 463 del = got; 464 xfs_trim_extent(&del, start_fsb, end_fsb - start_fsb); 465 466 /* 467 * A delete can push the cursor forward. Step back to the 468 * previous extent on non-delalloc or extents outside the 469 * target range. 470 */ 471 if (!del.br_blockcount || 472 !isnullstartblock(del.br_startblock)) { 473 if (!xfs_iext_prev_extent(ifp, &icur, &got)) 474 break; 475 continue; 476 } 477 478 xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur, &got, &del); 479 if (!xfs_iext_get_extent(ifp, &icur, &got)) 480 break; 481 } 482 483 out_unlock: 484 xfs_iunlock(ip, XFS_ILOCK_EXCL); 485 } 486 487 /* 488 * Test whether it is appropriate to check an inode for and free post EOF 489 * blocks. The 'force' parameter determines whether we should also consider 490 * regular files that are marked preallocated or append-only. 491 */ 492 bool 493 xfs_can_free_eofblocks( 494 struct xfs_inode *ip, 495 bool force) 496 { 497 struct xfs_bmbt_irec imap; 498 struct xfs_mount *mp = ip->i_mount; 499 xfs_fileoff_t end_fsb; 500 xfs_fileoff_t last_fsb; 501 int nimaps = 1; 502 int error; 503 504 /* 505 * Caller must either hold the exclusive io lock; or be inactivating 506 * the inode, which guarantees there are no other users of the inode. 507 */ 508 if (!(VFS_I(ip)->i_state & I_FREEING)) 509 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL); 510 511 /* prealloc/delalloc exists only on regular files */ 512 if (!S_ISREG(VFS_I(ip)->i_mode)) 513 return false; 514 515 /* 516 * Zero sized files with no cached pages and delalloc blocks will not 517 * have speculative prealloc/delalloc blocks to remove. 518 */ 519 if (VFS_I(ip)->i_size == 0 && 520 VFS_I(ip)->i_mapping->nrpages == 0 && 521 ip->i_delayed_blks == 0) 522 return false; 523 524 /* If we haven't read in the extent list, then don't do it now. */ 525 if (xfs_need_iread_extents(&ip->i_df)) 526 return false; 527 528 /* 529 * Do not free real preallocated or append-only files unless the file 530 * has delalloc blocks and we are forced to remove them. 531 */ 532 if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) 533 if (!force || ip->i_delayed_blks == 0) 534 return false; 535 536 /* 537 * Do not try to free post-EOF blocks if EOF is beyond the end of the 538 * range supported by the page cache, because the truncation will loop 539 * forever. 540 */ 541 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip)); 542 if (xfs_inode_has_bigrtalloc(ip)) 543 end_fsb = xfs_rtb_roundup_rtx(mp, end_fsb); 544 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 545 if (last_fsb <= end_fsb) 546 return false; 547 548 /* 549 * Look up the mapping for the first block past EOF. If we can't find 550 * it, there's nothing to free. 551 */ 552 xfs_ilock(ip, XFS_ILOCK_SHARED); 553 error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps, 554 0); 555 xfs_iunlock(ip, XFS_ILOCK_SHARED); 556 if (error || nimaps == 0) 557 return false; 558 559 /* 560 * If there's a real mapping there or there are delayed allocation 561 * reservations, then we have post-EOF blocks to try to free. 562 */ 563 return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks; 564 } 565 566 /* 567 * This is called to free any blocks beyond eof. The caller must hold 568 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only 569 * reference to the inode. 570 */ 571 int 572 xfs_free_eofblocks( 573 struct xfs_inode *ip) 574 { 575 struct xfs_trans *tp; 576 struct xfs_mount *mp = ip->i_mount; 577 int error; 578 579 /* Attach the dquots to the inode up front. */ 580 error = xfs_qm_dqattach(ip); 581 if (error) 582 return error; 583 584 /* Wait on dio to ensure i_size has settled. */ 585 inode_dio_wait(VFS_I(ip)); 586 587 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); 588 if (error) { 589 ASSERT(xfs_is_shutdown(mp)); 590 return error; 591 } 592 593 xfs_ilock(ip, XFS_ILOCK_EXCL); 594 xfs_trans_ijoin(tp, ip, 0); 595 596 /* 597 * Do not update the on-disk file size. If we update the on-disk file 598 * size and then the system crashes before the contents of the file are 599 * flushed to disk then the files may be full of holes (ie NULL files 600 * bug). 601 */ 602 error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK, 603 XFS_ISIZE(ip), XFS_BMAPI_NODISCARD); 604 if (error) 605 goto err_cancel; 606 607 error = xfs_trans_commit(tp); 608 if (error) 609 goto out_unlock; 610 611 xfs_inode_clear_eofblocks_tag(ip); 612 goto out_unlock; 613 614 err_cancel: 615 /* 616 * If we get an error at this point we simply don't 617 * bother truncating the file. 618 */ 619 xfs_trans_cancel(tp); 620 out_unlock: 621 xfs_iunlock(ip, XFS_ILOCK_EXCL); 622 return error; 623 } 624 625 int 626 xfs_alloc_file_space( 627 struct xfs_inode *ip, 628 xfs_off_t offset, 629 xfs_off_t len) 630 { 631 xfs_mount_t *mp = ip->i_mount; 632 xfs_off_t count; 633 xfs_filblks_t allocatesize_fsb; 634 xfs_extlen_t extsz, temp; 635 xfs_fileoff_t startoffset_fsb; 636 xfs_fileoff_t endoffset_fsb; 637 int rt; 638 xfs_trans_t *tp; 639 xfs_bmbt_irec_t imaps[1], *imapp; 640 int error; 641 642 trace_xfs_alloc_file_space(ip); 643 644 if (xfs_is_shutdown(mp)) 645 return -EIO; 646 647 error = xfs_qm_dqattach(ip); 648 if (error) 649 return error; 650 651 if (len <= 0) 652 return -EINVAL; 653 654 rt = XFS_IS_REALTIME_INODE(ip); 655 extsz = xfs_get_extsz_hint(ip); 656 657 count = len; 658 imapp = &imaps[0]; 659 startoffset_fsb = XFS_B_TO_FSBT(mp, offset); 660 endoffset_fsb = XFS_B_TO_FSB(mp, offset + count); 661 allocatesize_fsb = endoffset_fsb - startoffset_fsb; 662 663 /* 664 * Allocate file space until done or until there is an error 665 */ 666 while (allocatesize_fsb && !error) { 667 xfs_fileoff_t s, e; 668 unsigned int dblocks, rblocks, resblks; 669 int nimaps = 1; 670 671 /* 672 * Determine space reservations for data/realtime. 673 */ 674 if (unlikely(extsz)) { 675 s = startoffset_fsb; 676 do_div(s, extsz); 677 s *= extsz; 678 e = startoffset_fsb + allocatesize_fsb; 679 div_u64_rem(startoffset_fsb, extsz, &temp); 680 if (temp) 681 e += temp; 682 div_u64_rem(e, extsz, &temp); 683 if (temp) 684 e += extsz - temp; 685 } else { 686 s = 0; 687 e = allocatesize_fsb; 688 } 689 690 /* 691 * The transaction reservation is limited to a 32-bit block 692 * count, hence we need to limit the number of blocks we are 693 * trying to reserve to avoid an overflow. We can't allocate 694 * more than @nimaps extents, and an extent is limited on disk 695 * to XFS_BMBT_MAX_EXTLEN (21 bits), so use that to enforce the 696 * limit. 697 */ 698 resblks = min_t(xfs_fileoff_t, (e - s), 699 (XFS_MAX_BMBT_EXTLEN * nimaps)); 700 if (unlikely(rt)) { 701 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 702 rblocks = resblks; 703 } else { 704 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks); 705 rblocks = 0; 706 } 707 708 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, 709 dblocks, rblocks, false, &tp); 710 if (error) 711 break; 712 713 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, 714 XFS_IEXT_ADD_NOSPLIT_CNT); 715 if (error) 716 goto error; 717 718 /* 719 * If the allocator cannot find a single free extent large 720 * enough to cover the start block of the requested range, 721 * xfs_bmapi_write will return -ENOSR. 722 * 723 * In that case we simply need to keep looping with the same 724 * startoffset_fsb so that one of the following allocations 725 * will eventually reach the requested range. 726 */ 727 error = xfs_bmapi_write(tp, ip, startoffset_fsb, 728 allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp, 729 &nimaps); 730 if (error) { 731 if (error != -ENOSR) 732 goto error; 733 error = 0; 734 } else { 735 startoffset_fsb += imapp->br_blockcount; 736 allocatesize_fsb -= imapp->br_blockcount; 737 } 738 739 ip->i_diflags |= XFS_DIFLAG_PREALLOC; 740 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 741 742 error = xfs_trans_commit(tp); 743 xfs_iunlock(ip, XFS_ILOCK_EXCL); 744 } 745 746 return error; 747 748 error: 749 xfs_trans_cancel(tp); 750 xfs_iunlock(ip, XFS_ILOCK_EXCL); 751 return error; 752 } 753 754 static int 755 xfs_unmap_extent( 756 struct xfs_inode *ip, 757 xfs_fileoff_t startoffset_fsb, 758 xfs_filblks_t len_fsb, 759 int *done) 760 { 761 struct xfs_mount *mp = ip->i_mount; 762 struct xfs_trans *tp; 763 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 764 int error; 765 766 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0, 767 false, &tp); 768 if (error) 769 return error; 770 771 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, 772 XFS_IEXT_PUNCH_HOLE_CNT); 773 if (error) 774 goto out_trans_cancel; 775 776 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done); 777 if (error) 778 goto out_trans_cancel; 779 780 error = xfs_trans_commit(tp); 781 out_unlock: 782 xfs_iunlock(ip, XFS_ILOCK_EXCL); 783 return error; 784 785 out_trans_cancel: 786 xfs_trans_cancel(tp); 787 goto out_unlock; 788 } 789 790 /* Caller must first wait for the completion of any pending DIOs if required. */ 791 int 792 xfs_flush_unmap_range( 793 struct xfs_inode *ip, 794 xfs_off_t offset, 795 xfs_off_t len) 796 { 797 struct xfs_mount *mp = ip->i_mount; 798 struct inode *inode = VFS_I(ip); 799 xfs_off_t rounding, start, end; 800 int error; 801 802 rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE); 803 start = round_down(offset, rounding); 804 end = round_up(offset + len, rounding) - 1; 805 806 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 807 if (error) 808 return error; 809 truncate_pagecache_range(inode, start, end); 810 return 0; 811 } 812 813 int 814 xfs_free_file_space( 815 struct xfs_inode *ip, 816 xfs_off_t offset, 817 xfs_off_t len) 818 { 819 struct xfs_mount *mp = ip->i_mount; 820 xfs_fileoff_t startoffset_fsb; 821 xfs_fileoff_t endoffset_fsb; 822 int done = 0, error; 823 824 trace_xfs_free_file_space(ip); 825 826 error = xfs_qm_dqattach(ip); 827 if (error) 828 return error; 829 830 if (len <= 0) /* if nothing being freed */ 831 return 0; 832 833 startoffset_fsb = XFS_B_TO_FSB(mp, offset); 834 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len); 835 836 /* We can only free complete realtime extents. */ 837 if (xfs_inode_has_bigrtalloc(ip)) { 838 startoffset_fsb = xfs_rtb_roundup_rtx(mp, startoffset_fsb); 839 endoffset_fsb = xfs_rtb_rounddown_rtx(mp, endoffset_fsb); 840 } 841 842 /* 843 * Need to zero the stuff we're not freeing, on disk. 844 */ 845 if (endoffset_fsb > startoffset_fsb) { 846 while (!done) { 847 error = xfs_unmap_extent(ip, startoffset_fsb, 848 endoffset_fsb - startoffset_fsb, &done); 849 if (error) 850 return error; 851 } 852 } 853 854 /* 855 * Now that we've unmap all full blocks we'll have to zero out any 856 * partial block at the beginning and/or end. xfs_zero_range is smart 857 * enough to skip any holes, including those we just created, but we 858 * must take care not to zero beyond EOF and enlarge i_size. 859 */ 860 if (offset >= XFS_ISIZE(ip)) 861 return 0; 862 if (offset + len > XFS_ISIZE(ip)) 863 len = XFS_ISIZE(ip) - offset; 864 error = xfs_zero_range(ip, offset, len, NULL); 865 if (error) 866 return error; 867 868 /* 869 * If we zeroed right up to EOF and EOF straddles a page boundary we 870 * must make sure that the post-EOF area is also zeroed because the 871 * page could be mmap'd and xfs_zero_range doesn't do that for us. 872 * Writeback of the eof page will do this, albeit clumsily. 873 */ 874 if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) { 875 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 876 round_down(offset + len, PAGE_SIZE), LLONG_MAX); 877 } 878 879 return error; 880 } 881 882 static int 883 xfs_prepare_shift( 884 struct xfs_inode *ip, 885 loff_t offset) 886 { 887 struct xfs_mount *mp = ip->i_mount; 888 int error; 889 890 /* 891 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation 892 * into the accessible region of the file. 893 */ 894 if (xfs_can_free_eofblocks(ip, true)) { 895 error = xfs_free_eofblocks(ip); 896 if (error) 897 return error; 898 } 899 900 /* 901 * Shift operations must stabilize the start block offset boundary along 902 * with the full range of the operation. If we don't, a COW writeback 903 * completion could race with an insert, front merge with the start 904 * extent (after split) during the shift and corrupt the file. Start 905 * with the block just prior to the start to stabilize the boundary. 906 */ 907 offset = round_down(offset, mp->m_sb.sb_blocksize); 908 if (offset) 909 offset -= mp->m_sb.sb_blocksize; 910 911 /* 912 * Writeback and invalidate cache for the remainder of the file as we're 913 * about to shift down every extent from offset to EOF. 914 */ 915 error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip)); 916 if (error) 917 return error; 918 919 /* 920 * Clean out anything hanging around in the cow fork now that 921 * we've flushed all the dirty data out to disk to avoid having 922 * CoW extents at the wrong offsets. 923 */ 924 if (xfs_inode_has_cow_data(ip)) { 925 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF, 926 true); 927 if (error) 928 return error; 929 } 930 931 return 0; 932 } 933 934 /* 935 * xfs_collapse_file_space() 936 * This routine frees disk space and shift extent for the given file. 937 * The first thing we do is to free data blocks in the specified range 938 * by calling xfs_free_file_space(). It would also sync dirty data 939 * and invalidate page cache over the region on which collapse range 940 * is working. And Shift extent records to the left to cover a hole. 941 * RETURNS: 942 * 0 on success 943 * errno on error 944 * 945 */ 946 int 947 xfs_collapse_file_space( 948 struct xfs_inode *ip, 949 xfs_off_t offset, 950 xfs_off_t len) 951 { 952 struct xfs_mount *mp = ip->i_mount; 953 struct xfs_trans *tp; 954 int error; 955 xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len); 956 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len); 957 bool done = false; 958 959 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL); 960 961 trace_xfs_collapse_file_space(ip); 962 963 error = xfs_free_file_space(ip, offset, len); 964 if (error) 965 return error; 966 967 error = xfs_prepare_shift(ip, offset); 968 if (error) 969 return error; 970 971 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp); 972 if (error) 973 return error; 974 975 xfs_ilock(ip, XFS_ILOCK_EXCL); 976 xfs_trans_ijoin(tp, ip, 0); 977 978 while (!done) { 979 error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb, 980 &done); 981 if (error) 982 goto out_trans_cancel; 983 if (done) 984 break; 985 986 /* finish any deferred frees and roll the transaction */ 987 error = xfs_defer_finish(&tp); 988 if (error) 989 goto out_trans_cancel; 990 } 991 992 error = xfs_trans_commit(tp); 993 xfs_iunlock(ip, XFS_ILOCK_EXCL); 994 return error; 995 996 out_trans_cancel: 997 xfs_trans_cancel(tp); 998 xfs_iunlock(ip, XFS_ILOCK_EXCL); 999 return error; 1000 } 1001 1002 /* 1003 * xfs_insert_file_space() 1004 * This routine create hole space by shifting extents for the given file. 1005 * The first thing we do is to sync dirty data and invalidate page cache 1006 * over the region on which insert range is working. And split an extent 1007 * to two extents at given offset by calling xfs_bmap_split_extent. 1008 * And shift all extent records which are laying between [offset, 1009 * last allocated extent] to the right to reserve hole range. 1010 * RETURNS: 1011 * 0 on success 1012 * errno on error 1013 */ 1014 int 1015 xfs_insert_file_space( 1016 struct xfs_inode *ip, 1017 loff_t offset, 1018 loff_t len) 1019 { 1020 struct xfs_mount *mp = ip->i_mount; 1021 struct xfs_trans *tp; 1022 int error; 1023 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset); 1024 xfs_fileoff_t next_fsb = NULLFSBLOCK; 1025 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len); 1026 bool done = false; 1027 1028 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL); 1029 1030 trace_xfs_insert_file_space(ip); 1031 1032 error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb); 1033 if (error) 1034 return error; 1035 1036 error = xfs_prepare_shift(ip, offset); 1037 if (error) 1038 return error; 1039 1040 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 1041 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); 1042 if (error) 1043 return error; 1044 1045 xfs_ilock(ip, XFS_ILOCK_EXCL); 1046 xfs_trans_ijoin(tp, ip, 0); 1047 1048 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, 1049 XFS_IEXT_PUNCH_HOLE_CNT); 1050 if (error) 1051 goto out_trans_cancel; 1052 1053 /* 1054 * The extent shifting code works on extent granularity. So, if stop_fsb 1055 * is not the starting block of extent, we need to split the extent at 1056 * stop_fsb. 1057 */ 1058 error = xfs_bmap_split_extent(tp, ip, stop_fsb); 1059 if (error) 1060 goto out_trans_cancel; 1061 1062 do { 1063 error = xfs_defer_finish(&tp); 1064 if (error) 1065 goto out_trans_cancel; 1066 1067 error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb, 1068 &done, stop_fsb); 1069 if (error) 1070 goto out_trans_cancel; 1071 } while (!done); 1072 1073 error = xfs_trans_commit(tp); 1074 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1075 return error; 1076 1077 out_trans_cancel: 1078 xfs_trans_cancel(tp); 1079 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1080 return error; 1081 } 1082 1083 /* 1084 * We need to check that the format of the data fork in the temporary inode is 1085 * valid for the target inode before doing the swap. This is not a problem with 1086 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized 1087 * data fork depending on the space the attribute fork is taking so we can get 1088 * invalid formats on the target inode. 1089 * 1090 * E.g. target has space for 7 extents in extent format, temp inode only has 1091 * space for 6. If we defragment down to 7 extents, then the tmp format is a 1092 * btree, but when swapped it needs to be in extent format. Hence we can't just 1093 * blindly swap data forks on attr2 filesystems. 1094 * 1095 * Note that we check the swap in both directions so that we don't end up with 1096 * a corrupt temporary inode, either. 1097 * 1098 * Note that fixing the way xfs_fsr sets up the attribute fork in the source 1099 * inode will prevent this situation from occurring, so all we do here is 1100 * reject and log the attempt. basically we are putting the responsibility on 1101 * userspace to get this right. 1102 */ 1103 static int 1104 xfs_swap_extents_check_format( 1105 struct xfs_inode *ip, /* target inode */ 1106 struct xfs_inode *tip) /* tmp inode */ 1107 { 1108 struct xfs_ifork *ifp = &ip->i_df; 1109 struct xfs_ifork *tifp = &tip->i_df; 1110 1111 /* User/group/project quota ids must match if quotas are enforced. */ 1112 if (XFS_IS_QUOTA_ON(ip->i_mount) && 1113 (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) || 1114 !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) || 1115 ip->i_projid != tip->i_projid)) 1116 return -EINVAL; 1117 1118 /* Should never get a local format */ 1119 if (ifp->if_format == XFS_DINODE_FMT_LOCAL || 1120 tifp->if_format == XFS_DINODE_FMT_LOCAL) 1121 return -EINVAL; 1122 1123 /* 1124 * if the target inode has less extents that then temporary inode then 1125 * why did userspace call us? 1126 */ 1127 if (ifp->if_nextents < tifp->if_nextents) 1128 return -EINVAL; 1129 1130 /* 1131 * If we have to use the (expensive) rmap swap method, we can 1132 * handle any number of extents and any format. 1133 */ 1134 if (xfs_has_rmapbt(ip->i_mount)) 1135 return 0; 1136 1137 /* 1138 * if the target inode is in extent form and the temp inode is in btree 1139 * form then we will end up with the target inode in the wrong format 1140 * as we already know there are less extents in the temp inode. 1141 */ 1142 if (ifp->if_format == XFS_DINODE_FMT_EXTENTS && 1143 tifp->if_format == XFS_DINODE_FMT_BTREE) 1144 return -EINVAL; 1145 1146 /* Check temp in extent form to max in target */ 1147 if (tifp->if_format == XFS_DINODE_FMT_EXTENTS && 1148 tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) 1149 return -EINVAL; 1150 1151 /* Check target in extent form to max in temp */ 1152 if (ifp->if_format == XFS_DINODE_FMT_EXTENTS && 1153 ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) 1154 return -EINVAL; 1155 1156 /* 1157 * If we are in a btree format, check that the temp root block will fit 1158 * in the target and that it has enough extents to be in btree format 1159 * in the target. 1160 * 1161 * Note that we have to be careful to allow btree->extent conversions 1162 * (a common defrag case) which will occur when the temp inode is in 1163 * extent format... 1164 */ 1165 if (tifp->if_format == XFS_DINODE_FMT_BTREE) { 1166 if (xfs_inode_has_attr_fork(ip) && 1167 XFS_BMAP_BMDR_SPACE(tifp->if_broot) > xfs_inode_fork_boff(ip)) 1168 return -EINVAL; 1169 if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) 1170 return -EINVAL; 1171 } 1172 1173 /* Reciprocal target->temp btree format checks */ 1174 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 1175 if (xfs_inode_has_attr_fork(tip) && 1176 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > xfs_inode_fork_boff(tip)) 1177 return -EINVAL; 1178 if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) 1179 return -EINVAL; 1180 } 1181 1182 return 0; 1183 } 1184 1185 static int 1186 xfs_swap_extent_flush( 1187 struct xfs_inode *ip) 1188 { 1189 int error; 1190 1191 error = filemap_write_and_wait(VFS_I(ip)->i_mapping); 1192 if (error) 1193 return error; 1194 truncate_pagecache_range(VFS_I(ip), 0, -1); 1195 1196 /* Verify O_DIRECT for ftmp */ 1197 if (VFS_I(ip)->i_mapping->nrpages) 1198 return -EINVAL; 1199 return 0; 1200 } 1201 1202 /* 1203 * Move extents from one file to another, when rmap is enabled. 1204 */ 1205 STATIC int 1206 xfs_swap_extent_rmap( 1207 struct xfs_trans **tpp, 1208 struct xfs_inode *ip, 1209 struct xfs_inode *tip) 1210 { 1211 struct xfs_trans *tp = *tpp; 1212 struct xfs_bmbt_irec irec; 1213 struct xfs_bmbt_irec uirec; 1214 struct xfs_bmbt_irec tirec; 1215 xfs_fileoff_t offset_fsb; 1216 xfs_fileoff_t end_fsb; 1217 xfs_filblks_t count_fsb; 1218 int error; 1219 xfs_filblks_t ilen; 1220 xfs_filblks_t rlen; 1221 int nimaps; 1222 uint64_t tip_flags2; 1223 1224 /* 1225 * If the source file has shared blocks, we must flag the donor 1226 * file as having shared blocks so that we get the shared-block 1227 * rmap functions when we go to fix up the rmaps. The flags 1228 * will be switch for reals later. 1229 */ 1230 tip_flags2 = tip->i_diflags2; 1231 if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK) 1232 tip->i_diflags2 |= XFS_DIFLAG2_REFLINK; 1233 1234 offset_fsb = 0; 1235 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip))); 1236 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb); 1237 1238 while (count_fsb) { 1239 /* Read extent from the donor file */ 1240 nimaps = 1; 1241 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec, 1242 &nimaps, 0); 1243 if (error) 1244 goto out; 1245 ASSERT(nimaps == 1); 1246 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK); 1247 1248 trace_xfs_swap_extent_rmap_remap(tip, &tirec); 1249 ilen = tirec.br_blockcount; 1250 1251 /* Unmap the old blocks in the source file. */ 1252 while (tirec.br_blockcount) { 1253 ASSERT(tp->t_highest_agno == NULLAGNUMBER); 1254 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec); 1255 1256 /* Read extent from the source file */ 1257 nimaps = 1; 1258 error = xfs_bmapi_read(ip, tirec.br_startoff, 1259 tirec.br_blockcount, &irec, 1260 &nimaps, 0); 1261 if (error) 1262 goto out; 1263 ASSERT(nimaps == 1); 1264 ASSERT(tirec.br_startoff == irec.br_startoff); 1265 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec); 1266 1267 /* Trim the extent. */ 1268 uirec = tirec; 1269 uirec.br_blockcount = rlen = min_t(xfs_filblks_t, 1270 tirec.br_blockcount, 1271 irec.br_blockcount); 1272 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec); 1273 1274 if (xfs_bmap_is_real_extent(&uirec)) { 1275 error = xfs_iext_count_extend(tp, ip, 1276 XFS_DATA_FORK, 1277 XFS_IEXT_SWAP_RMAP_CNT); 1278 if (error) 1279 goto out; 1280 } 1281 1282 if (xfs_bmap_is_real_extent(&irec)) { 1283 error = xfs_iext_count_extend(tp, tip, 1284 XFS_DATA_FORK, 1285 XFS_IEXT_SWAP_RMAP_CNT); 1286 if (error) 1287 goto out; 1288 } 1289 1290 /* Remove the mapping from the donor file. */ 1291 xfs_bmap_unmap_extent(tp, tip, XFS_DATA_FORK, &uirec); 1292 1293 /* Remove the mapping from the source file. */ 1294 xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &irec); 1295 1296 /* Map the donor file's blocks into the source file. */ 1297 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, &uirec); 1298 1299 /* Map the source file's blocks into the donor file. */ 1300 xfs_bmap_map_extent(tp, tip, XFS_DATA_FORK, &irec); 1301 1302 error = xfs_defer_finish(tpp); 1303 tp = *tpp; 1304 if (error) 1305 goto out; 1306 1307 tirec.br_startoff += rlen; 1308 if (tirec.br_startblock != HOLESTARTBLOCK && 1309 tirec.br_startblock != DELAYSTARTBLOCK) 1310 tirec.br_startblock += rlen; 1311 tirec.br_blockcount -= rlen; 1312 } 1313 1314 /* Roll on... */ 1315 count_fsb -= ilen; 1316 offset_fsb += ilen; 1317 } 1318 1319 tip->i_diflags2 = tip_flags2; 1320 return 0; 1321 1322 out: 1323 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_); 1324 tip->i_diflags2 = tip_flags2; 1325 return error; 1326 } 1327 1328 /* Swap the extents of two files by swapping data forks. */ 1329 STATIC int 1330 xfs_swap_extent_forks( 1331 struct xfs_trans *tp, 1332 struct xfs_inode *ip, 1333 struct xfs_inode *tip, 1334 int *src_log_flags, 1335 int *target_log_flags) 1336 { 1337 xfs_filblks_t aforkblks = 0; 1338 xfs_filblks_t taforkblks = 0; 1339 xfs_extnum_t junk; 1340 uint64_t tmp; 1341 int error; 1342 1343 /* 1344 * Count the number of extended attribute blocks 1345 */ 1346 if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 && 1347 ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) { 1348 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk, 1349 &aforkblks); 1350 if (error) 1351 return error; 1352 } 1353 if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 && 1354 tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) { 1355 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk, 1356 &taforkblks); 1357 if (error) 1358 return error; 1359 } 1360 1361 /* 1362 * Btree format (v3) inodes have the inode number stamped in the bmbt 1363 * block headers. We can't start changing the bmbt blocks until the 1364 * inode owner change is logged so recovery does the right thing in the 1365 * event of a crash. Set the owner change log flags now and leave the 1366 * bmbt scan as the last step. 1367 */ 1368 if (xfs_has_v3inodes(ip->i_mount)) { 1369 if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE) 1370 (*target_log_flags) |= XFS_ILOG_DOWNER; 1371 if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE) 1372 (*src_log_flags) |= XFS_ILOG_DOWNER; 1373 } 1374 1375 /* 1376 * Swap the data forks of the inodes 1377 */ 1378 swap(ip->i_df, tip->i_df); 1379 1380 /* 1381 * Fix the on-disk inode values 1382 */ 1383 tmp = (uint64_t)ip->i_nblocks; 1384 ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks; 1385 tip->i_nblocks = tmp + taforkblks - aforkblks; 1386 1387 /* 1388 * The extents in the source inode could still contain speculative 1389 * preallocation beyond EOF (e.g. the file is open but not modified 1390 * while defrag is in progress). In that case, we need to copy over the 1391 * number of delalloc blocks the data fork in the source inode is 1392 * tracking beyond EOF so that when the fork is truncated away when the 1393 * temporary inode is unlinked we don't underrun the i_delayed_blks 1394 * counter on that inode. 1395 */ 1396 ASSERT(tip->i_delayed_blks == 0); 1397 tip->i_delayed_blks = ip->i_delayed_blks; 1398 ip->i_delayed_blks = 0; 1399 1400 switch (ip->i_df.if_format) { 1401 case XFS_DINODE_FMT_EXTENTS: 1402 (*src_log_flags) |= XFS_ILOG_DEXT; 1403 break; 1404 case XFS_DINODE_FMT_BTREE: 1405 ASSERT(!xfs_has_v3inodes(ip->i_mount) || 1406 (*src_log_flags & XFS_ILOG_DOWNER)); 1407 (*src_log_flags) |= XFS_ILOG_DBROOT; 1408 break; 1409 } 1410 1411 switch (tip->i_df.if_format) { 1412 case XFS_DINODE_FMT_EXTENTS: 1413 (*target_log_flags) |= XFS_ILOG_DEXT; 1414 break; 1415 case XFS_DINODE_FMT_BTREE: 1416 (*target_log_flags) |= XFS_ILOG_DBROOT; 1417 ASSERT(!xfs_has_v3inodes(ip->i_mount) || 1418 (*target_log_flags & XFS_ILOG_DOWNER)); 1419 break; 1420 } 1421 1422 return 0; 1423 } 1424 1425 /* 1426 * Fix up the owners of the bmbt blocks to refer to the current inode. The 1427 * change owner scan attempts to order all modified buffers in the current 1428 * transaction. In the event of ordered buffer failure, the offending buffer is 1429 * physically logged as a fallback and the scan returns -EAGAIN. We must roll 1430 * the transaction in this case to replenish the fallback log reservation and 1431 * restart the scan. This process repeats until the scan completes. 1432 */ 1433 static int 1434 xfs_swap_change_owner( 1435 struct xfs_trans **tpp, 1436 struct xfs_inode *ip, 1437 struct xfs_inode *tmpip) 1438 { 1439 int error; 1440 struct xfs_trans *tp = *tpp; 1441 1442 do { 1443 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino, 1444 NULL); 1445 /* success or fatal error */ 1446 if (error != -EAGAIN) 1447 break; 1448 1449 error = xfs_trans_roll(tpp); 1450 if (error) 1451 break; 1452 tp = *tpp; 1453 1454 /* 1455 * Redirty both inodes so they can relog and keep the log tail 1456 * moving forward. 1457 */ 1458 xfs_trans_ijoin(tp, ip, 0); 1459 xfs_trans_ijoin(tp, tmpip, 0); 1460 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1461 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE); 1462 } while (true); 1463 1464 return error; 1465 } 1466 1467 int 1468 xfs_swap_extents( 1469 struct xfs_inode *ip, /* target inode */ 1470 struct xfs_inode *tip, /* tmp inode */ 1471 struct xfs_swapext *sxp) 1472 { 1473 struct xfs_mount *mp = ip->i_mount; 1474 struct xfs_trans *tp; 1475 struct xfs_bstat *sbp = &sxp->sx_stat; 1476 int src_log_flags, target_log_flags; 1477 int error = 0; 1478 uint64_t f; 1479 int resblks = 0; 1480 unsigned int flags = 0; 1481 struct timespec64 ctime, mtime; 1482 1483 /* 1484 * Lock the inodes against other IO, page faults and truncate to 1485 * begin with. Then we can ensure the inodes are flushed and have no 1486 * page cache safely. Once we have done this we can take the ilocks and 1487 * do the rest of the checks. 1488 */ 1489 lock_two_nondirectories(VFS_I(ip), VFS_I(tip)); 1490 filemap_invalidate_lock_two(VFS_I(ip)->i_mapping, 1491 VFS_I(tip)->i_mapping); 1492 1493 /* Verify that both files have the same format */ 1494 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) { 1495 error = -EINVAL; 1496 goto out_unlock; 1497 } 1498 1499 /* Verify both files are either real-time or non-realtime */ 1500 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { 1501 error = -EINVAL; 1502 goto out_unlock; 1503 } 1504 1505 error = xfs_qm_dqattach(ip); 1506 if (error) 1507 goto out_unlock; 1508 1509 error = xfs_qm_dqattach(tip); 1510 if (error) 1511 goto out_unlock; 1512 1513 error = xfs_swap_extent_flush(ip); 1514 if (error) 1515 goto out_unlock; 1516 error = xfs_swap_extent_flush(tip); 1517 if (error) 1518 goto out_unlock; 1519 1520 if (xfs_inode_has_cow_data(tip)) { 1521 error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true); 1522 if (error) 1523 goto out_unlock; 1524 } 1525 1526 /* 1527 * Extent "swapping" with rmap requires a permanent reservation and 1528 * a block reservation because it's really just a remap operation 1529 * performed with log redo items! 1530 */ 1531 if (xfs_has_rmapbt(mp)) { 1532 int w = XFS_DATA_FORK; 1533 uint32_t ipnext = ip->i_df.if_nextents; 1534 uint32_t tipnext = tip->i_df.if_nextents; 1535 1536 /* 1537 * Conceptually this shouldn't affect the shape of either bmbt, 1538 * but since we atomically move extents one by one, we reserve 1539 * enough space to rebuild both trees. 1540 */ 1541 resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w); 1542 resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w); 1543 1544 /* 1545 * If either inode straddles a bmapbt block allocation boundary, 1546 * the rmapbt algorithm triggers repeated allocs and frees as 1547 * extents are remapped. This can exhaust the block reservation 1548 * prematurely and cause shutdown. Return freed blocks to the 1549 * transaction reservation to counter this behavior. 1550 */ 1551 flags |= XFS_TRANS_RES_FDBLKS; 1552 } 1553 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags, 1554 &tp); 1555 if (error) 1556 goto out_unlock; 1557 1558 /* 1559 * Lock and join the inodes to the tansaction so that transaction commit 1560 * or cancel will unlock the inodes from this point onwards. 1561 */ 1562 xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL); 1563 xfs_trans_ijoin(tp, ip, 0); 1564 xfs_trans_ijoin(tp, tip, 0); 1565 1566 1567 /* Verify all data are being swapped */ 1568 if (sxp->sx_offset != 0 || 1569 sxp->sx_length != ip->i_disk_size || 1570 sxp->sx_length != tip->i_disk_size) { 1571 error = -EFAULT; 1572 goto out_trans_cancel; 1573 } 1574 1575 trace_xfs_swap_extent_before(ip, 0); 1576 trace_xfs_swap_extent_before(tip, 1); 1577 1578 /* check inode formats now that data is flushed */ 1579 error = xfs_swap_extents_check_format(ip, tip); 1580 if (error) { 1581 xfs_notice(mp, 1582 "%s: inode 0x%llx format is incompatible for exchanging.", 1583 __func__, ip->i_ino); 1584 goto out_trans_cancel; 1585 } 1586 1587 /* 1588 * Compare the current change & modify times with that 1589 * passed in. If they differ, we abort this swap. 1590 * This is the mechanism used to ensure the calling 1591 * process that the file was not changed out from 1592 * under it. 1593 */ 1594 ctime = inode_get_ctime(VFS_I(ip)); 1595 mtime = inode_get_mtime(VFS_I(ip)); 1596 if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) || 1597 (sbp->bs_ctime.tv_nsec != ctime.tv_nsec) || 1598 (sbp->bs_mtime.tv_sec != mtime.tv_sec) || 1599 (sbp->bs_mtime.tv_nsec != mtime.tv_nsec)) { 1600 error = -EBUSY; 1601 goto out_trans_cancel; 1602 } 1603 1604 /* 1605 * Note the trickiness in setting the log flags - we set the owner log 1606 * flag on the opposite inode (i.e. the inode we are setting the new 1607 * owner to be) because once we swap the forks and log that, log 1608 * recovery is going to see the fork as owned by the swapped inode, 1609 * not the pre-swapped inodes. 1610 */ 1611 src_log_flags = XFS_ILOG_CORE; 1612 target_log_flags = XFS_ILOG_CORE; 1613 1614 if (xfs_has_rmapbt(mp)) 1615 error = xfs_swap_extent_rmap(&tp, ip, tip); 1616 else 1617 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags, 1618 &target_log_flags); 1619 if (error) 1620 goto out_trans_cancel; 1621 1622 /* Do we have to swap reflink flags? */ 1623 if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^ 1624 (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) { 1625 f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK; 1626 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1627 ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK; 1628 tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1629 tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK; 1630 } 1631 1632 /* Swap the cow forks. */ 1633 if (xfs_has_reflink(mp)) { 1634 ASSERT(!ip->i_cowfp || 1635 ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS); 1636 ASSERT(!tip->i_cowfp || 1637 tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS); 1638 1639 swap(ip->i_cowfp, tip->i_cowfp); 1640 1641 if (ip->i_cowfp && ip->i_cowfp->if_bytes) 1642 xfs_inode_set_cowblocks_tag(ip); 1643 else 1644 xfs_inode_clear_cowblocks_tag(ip); 1645 if (tip->i_cowfp && tip->i_cowfp->if_bytes) 1646 xfs_inode_set_cowblocks_tag(tip); 1647 else 1648 xfs_inode_clear_cowblocks_tag(tip); 1649 } 1650 1651 xfs_trans_log_inode(tp, ip, src_log_flags); 1652 xfs_trans_log_inode(tp, tip, target_log_flags); 1653 1654 /* 1655 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems 1656 * have inode number owner values in the bmbt blocks that still refer to 1657 * the old inode. Scan each bmbt to fix up the owner values with the 1658 * inode number of the current inode. 1659 */ 1660 if (src_log_flags & XFS_ILOG_DOWNER) { 1661 error = xfs_swap_change_owner(&tp, ip, tip); 1662 if (error) 1663 goto out_trans_cancel; 1664 } 1665 if (target_log_flags & XFS_ILOG_DOWNER) { 1666 error = xfs_swap_change_owner(&tp, tip, ip); 1667 if (error) 1668 goto out_trans_cancel; 1669 } 1670 1671 /* 1672 * If this is a synchronous mount, make sure that the 1673 * transaction goes to disk before returning to the user. 1674 */ 1675 if (xfs_has_wsync(mp)) 1676 xfs_trans_set_sync(tp); 1677 1678 error = xfs_trans_commit(tp); 1679 1680 trace_xfs_swap_extent_after(ip, 0); 1681 trace_xfs_swap_extent_after(tip, 1); 1682 1683 out_unlock_ilock: 1684 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1685 xfs_iunlock(tip, XFS_ILOCK_EXCL); 1686 out_unlock: 1687 filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping, 1688 VFS_I(tip)->i_mapping); 1689 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip)); 1690 return error; 1691 1692 out_trans_cancel: 1693 xfs_trans_cancel(tp); 1694 goto out_unlock_ilock; 1695 } 1696