1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_defer.h" 14 #include "xfs_inode.h" 15 #include "xfs_trans.h" 16 #include "xfs_bmap.h" 17 #include "xfs_bmap_util.h" 18 #include "xfs_trace.h" 19 #include "xfs_icache.h" 20 #include "xfs_btree.h" 21 #include "xfs_refcount_btree.h" 22 #include "xfs_refcount.h" 23 #include "xfs_bmap_btree.h" 24 #include "xfs_trans_space.h" 25 #include "xfs_bit.h" 26 #include "xfs_alloc.h" 27 #include "xfs_quota.h" 28 #include "xfs_reflink.h" 29 #include "xfs_iomap.h" 30 #include "xfs_ag.h" 31 #include "xfs_ag_resv.h" 32 #include "xfs_health.h" 33 #include "xfs_rtrefcount_btree.h" 34 #include "xfs_rtalloc.h" 35 #include "xfs_rtgroup.h" 36 #include "xfs_metafile.h" 37 38 /* 39 * Copy on Write of Shared Blocks 40 * 41 * XFS must preserve "the usual" file semantics even when two files share 42 * the same physical blocks. This means that a write to one file must not 43 * alter the blocks in a different file; the way that we'll do that is 44 * through the use of a copy-on-write mechanism. At a high level, that 45 * means that when we want to write to a shared block, we allocate a new 46 * block, write the data to the new block, and if that succeeds we map the 47 * new block into the file. 48 * 49 * XFS provides a "delayed allocation" mechanism that defers the allocation 50 * of disk blocks to dirty-but-not-yet-mapped file blocks as long as 51 * possible. This reduces fragmentation by enabling the filesystem to ask 52 * for bigger chunks less often, which is exactly what we want for CoW. 53 * 54 * The delalloc mechanism begins when the kernel wants to make a block 55 * writable (write_begin or page_mkwrite). If the offset is not mapped, we 56 * create a delalloc mapping, which is a regular in-core extent, but without 57 * a real startblock. (For delalloc mappings, the startblock encodes both 58 * a flag that this is a delalloc mapping, and a worst-case estimate of how 59 * many blocks might be required to put the mapping into the BMBT.) delalloc 60 * mappings are a reservation against the free space in the filesystem; 61 * adjacent mappings can also be combined into fewer larger mappings. 62 * 63 * As an optimization, the CoW extent size hint (cowextsz) creates 64 * outsized aligned delalloc reservations in the hope of landing out of 65 * order nearby CoW writes in a single extent on disk, thereby reducing 66 * fragmentation and improving future performance. 67 * 68 * D: --RRRRRRSSSRRRRRRRR--- (data fork) 69 * C: ------DDDDDDD--------- (CoW fork) 70 * 71 * When dirty pages are being written out (typically in writepage), the 72 * delalloc reservations are converted into unwritten mappings by 73 * allocating blocks and replacing the delalloc mapping with real ones. 74 * A delalloc mapping can be replaced by several unwritten ones if the 75 * free space is fragmented. 76 * 77 * D: --RRRRRRSSSRRRRRRRR--- 78 * C: ------UUUUUUU--------- 79 * 80 * We want to adapt the delalloc mechanism for copy-on-write, since the 81 * write paths are similar. The first two steps (creating the reservation 82 * and allocating the blocks) are exactly the same as delalloc except that 83 * the mappings must be stored in a separate CoW fork because we do not want 84 * to disturb the mapping in the data fork until we're sure that the write 85 * succeeded. IO completion in this case is the process of removing the old 86 * mapping from the data fork and moving the new mapping from the CoW fork to 87 * the data fork. This will be discussed shortly. 88 * 89 * For now, unaligned directio writes will be bounced back to the page cache. 90 * Block-aligned directio writes will use the same mechanism as buffered 91 * writes. 92 * 93 * Just prior to submitting the actual disk write requests, we convert 94 * the extents representing the range of the file actually being written 95 * (as opposed to extra pieces created for the cowextsize hint) to real 96 * extents. This will become important in the next step: 97 * 98 * D: --RRRRRRSSSRRRRRRRR--- 99 * C: ------UUrrUUU--------- 100 * 101 * CoW remapping must be done after the data block write completes, 102 * because we don't want to destroy the old data fork map until we're sure 103 * the new block has been written. Since the new mappings are kept in a 104 * separate fork, we can simply iterate these mappings to find the ones 105 * that cover the file blocks that we just CoW'd. For each extent, simply 106 * unmap the corresponding range in the data fork, map the new range into 107 * the data fork, and remove the extent from the CoW fork. Because of 108 * the presence of the cowextsize hint, however, we must be careful 109 * only to remap the blocks that we've actually written out -- we must 110 * never remap delalloc reservations nor CoW staging blocks that have 111 * yet to be written. This corresponds exactly to the real extents in 112 * the CoW fork: 113 * 114 * D: --RRRRRRrrSRRRRRRRR--- 115 * C: ------UU--UUU--------- 116 * 117 * Since the remapping operation can be applied to an arbitrary file 118 * range, we record the need for the remap step as a flag in the ioend 119 * instead of declaring a new IO type. This is required for direct io 120 * because we only have ioend for the whole dio, and we have to be able to 121 * remember the presence of unwritten blocks and CoW blocks with a single 122 * ioend structure. Better yet, the more ground we can cover with one 123 * ioend, the better. 124 */ 125 126 /* 127 * Given a file mapping for the data device, find the lowest-numbered run of 128 * shared blocks within that mapping and return it in shared_offset/shared_len. 129 * The offset is relative to the start of irec. 130 * 131 * If find_end_of_shared is true, return the longest contiguous extent of shared 132 * blocks. If there are no shared extents, shared_offset and shared_len will be 133 * set to 0; 134 */ 135 static int 136 xfs_reflink_find_shared( 137 struct xfs_mount *mp, 138 struct xfs_trans *tp, 139 const struct xfs_bmbt_irec *irec, 140 xfs_extlen_t *shared_offset, 141 xfs_extlen_t *shared_len, 142 bool find_end_of_shared) 143 { 144 struct xfs_buf *agbp; 145 struct xfs_perag *pag; 146 struct xfs_btree_cur *cur; 147 int error; 148 xfs_agblock_t orig_bno, found_bno; 149 150 pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, irec->br_startblock)); 151 orig_bno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock); 152 153 error = xfs_alloc_read_agf(pag, tp, 0, &agbp); 154 if (error) 155 goto out; 156 157 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag); 158 error = xfs_refcount_find_shared(cur, orig_bno, irec->br_blockcount, 159 &found_bno, shared_len, find_end_of_shared); 160 xfs_btree_del_cursor(cur, error); 161 xfs_trans_brelse(tp, agbp); 162 163 if (!error && *shared_len) 164 *shared_offset = found_bno - orig_bno; 165 out: 166 xfs_perag_put(pag); 167 return error; 168 } 169 170 /* 171 * Given a file mapping for the rt device, find the lowest-numbered run of 172 * shared blocks within that mapping and return it in shared_offset/shared_len. 173 * The offset is relative to the start of irec. 174 * 175 * If find_end_of_shared is true, return the longest contiguous extent of shared 176 * blocks. If there are no shared extents, shared_offset and shared_len will be 177 * set to 0; 178 */ 179 static int 180 xfs_reflink_find_rtshared( 181 struct xfs_mount *mp, 182 struct xfs_trans *tp, 183 const struct xfs_bmbt_irec *irec, 184 xfs_extlen_t *shared_offset, 185 xfs_extlen_t *shared_len, 186 bool find_end_of_shared) 187 { 188 struct xfs_rtgroup *rtg; 189 struct xfs_btree_cur *cur; 190 xfs_rgblock_t orig_bno; 191 xfs_agblock_t found_bno; 192 int error; 193 194 BUILD_BUG_ON(NULLRGBLOCK != NULLAGBLOCK); 195 196 /* 197 * Note: this uses the not quite correct xfs_agblock_t type because 198 * xfs_refcount_find_shared is shared between the RT and data device 199 * refcount code. 200 */ 201 orig_bno = xfs_rtb_to_rgbno(mp, irec->br_startblock); 202 rtg = xfs_rtgroup_get(mp, xfs_rtb_to_rgno(mp, irec->br_startblock)); 203 204 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_REFCOUNT); 205 cur = xfs_rtrefcountbt_init_cursor(tp, rtg); 206 error = xfs_refcount_find_shared(cur, orig_bno, irec->br_blockcount, 207 &found_bno, shared_len, find_end_of_shared); 208 xfs_btree_del_cursor(cur, error); 209 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_REFCOUNT); 210 xfs_rtgroup_put(rtg); 211 212 if (!error && *shared_len) 213 *shared_offset = found_bno - orig_bno; 214 return error; 215 } 216 217 /* 218 * Trim the mapping to the next block where there's a change in the 219 * shared/unshared status. More specifically, this means that we 220 * find the lowest-numbered extent of shared blocks that coincides with 221 * the given block mapping. If the shared extent overlaps the start of 222 * the mapping, trim the mapping to the end of the shared extent. If 223 * the shared region intersects the mapping, trim the mapping to the 224 * start of the shared extent. If there are no shared regions that 225 * overlap, just return the original extent. 226 */ 227 int 228 xfs_reflink_trim_around_shared( 229 struct xfs_inode *ip, 230 struct xfs_bmbt_irec *irec, 231 bool *shared) 232 { 233 struct xfs_mount *mp = ip->i_mount; 234 xfs_extlen_t shared_offset, shared_len; 235 int error = 0; 236 237 /* Holes, unwritten, and delalloc extents cannot be shared */ 238 if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_written_extent(irec)) { 239 *shared = false; 240 return 0; 241 } 242 243 trace_xfs_reflink_trim_around_shared(ip, irec); 244 245 if (XFS_IS_REALTIME_INODE(ip)) 246 error = xfs_reflink_find_rtshared(mp, NULL, irec, 247 &shared_offset, &shared_len, true); 248 else 249 error = xfs_reflink_find_shared(mp, NULL, irec, 250 &shared_offset, &shared_len, true); 251 if (error) 252 return error; 253 254 if (!shared_len) { 255 /* No shared blocks at all. */ 256 *shared = false; 257 } else if (!shared_offset) { 258 /* 259 * The start of this mapping points to shared space. Truncate 260 * the mapping at the end of the shared region so that a 261 * subsequent iteration starts at the start of the unshared 262 * region. 263 */ 264 irec->br_blockcount = shared_len; 265 *shared = true; 266 } else { 267 /* 268 * There's a shared region that doesn't start at the beginning 269 * of the mapping. Truncate the mapping at the start of the 270 * shared extent so that a subsequent iteration starts at the 271 * start of the shared region. 272 */ 273 irec->br_blockcount = shared_offset; 274 *shared = false; 275 } 276 return 0; 277 } 278 279 int 280 xfs_bmap_trim_cow( 281 struct xfs_inode *ip, 282 struct xfs_bmbt_irec *imap, 283 bool *shared) 284 { 285 /* We can't update any real extents in always COW mode. */ 286 if (xfs_is_always_cow_inode(ip) && 287 !isnullstartblock(imap->br_startblock)) { 288 *shared = true; 289 return 0; 290 } 291 292 /* Trim the mapping to the nearest shared extent boundary. */ 293 return xfs_reflink_trim_around_shared(ip, imap, shared); 294 } 295 296 static int 297 xfs_reflink_convert_cow_locked( 298 struct xfs_inode *ip, 299 xfs_fileoff_t offset_fsb, 300 xfs_filblks_t count_fsb) 301 { 302 struct xfs_iext_cursor icur; 303 struct xfs_bmbt_irec got; 304 struct xfs_btree_cur *dummy_cur = NULL; 305 int dummy_logflags; 306 int error = 0; 307 308 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got)) 309 return 0; 310 311 do { 312 if (got.br_startoff >= offset_fsb + count_fsb) 313 break; 314 if (got.br_state == XFS_EXT_NORM) 315 continue; 316 if (WARN_ON_ONCE(isnullstartblock(got.br_startblock))) 317 return -EIO; 318 319 xfs_trim_extent(&got, offset_fsb, count_fsb); 320 if (!got.br_blockcount) 321 continue; 322 323 got.br_state = XFS_EXT_NORM; 324 error = xfs_bmap_add_extent_unwritten_real(NULL, ip, 325 XFS_COW_FORK, &icur, &dummy_cur, &got, 326 &dummy_logflags); 327 if (error) 328 return error; 329 } while (xfs_iext_next_extent(ip->i_cowfp, &icur, &got)); 330 331 return error; 332 } 333 334 /* Convert all of the unwritten CoW extents in a file's range to real ones. */ 335 int 336 xfs_reflink_convert_cow( 337 struct xfs_inode *ip, 338 xfs_off_t offset, 339 xfs_off_t count) 340 { 341 struct xfs_mount *mp = ip->i_mount; 342 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 343 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); 344 xfs_filblks_t count_fsb = end_fsb - offset_fsb; 345 int error; 346 347 ASSERT(count != 0); 348 349 xfs_ilock(ip, XFS_ILOCK_EXCL); 350 error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb); 351 xfs_iunlock(ip, XFS_ILOCK_EXCL); 352 return error; 353 } 354 355 /* 356 * Find the extent that maps the given range in the COW fork. Even if the extent 357 * is not shared we might have a preallocation for it in the COW fork. If so we 358 * use it that rather than trigger a new allocation. 359 */ 360 static int 361 xfs_find_trim_cow_extent( 362 struct xfs_inode *ip, 363 struct xfs_bmbt_irec *imap, 364 struct xfs_bmbt_irec *cmap, 365 bool *shared, 366 bool *found) 367 { 368 xfs_fileoff_t offset_fsb = imap->br_startoff; 369 xfs_filblks_t count_fsb = imap->br_blockcount; 370 struct xfs_iext_cursor icur; 371 372 *found = false; 373 374 /* 375 * If we don't find an overlapping extent, trim the range we need to 376 * allocate to fit the hole we found. 377 */ 378 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, cmap)) 379 cmap->br_startoff = offset_fsb + count_fsb; 380 if (cmap->br_startoff > offset_fsb) { 381 xfs_trim_extent(imap, imap->br_startoff, 382 cmap->br_startoff - imap->br_startoff); 383 return xfs_bmap_trim_cow(ip, imap, shared); 384 } 385 386 *shared = true; 387 if (isnullstartblock(cmap->br_startblock)) { 388 xfs_trim_extent(imap, cmap->br_startoff, cmap->br_blockcount); 389 return 0; 390 } 391 392 /* real extent found - no need to allocate */ 393 xfs_trim_extent(cmap, offset_fsb, count_fsb); 394 *found = true; 395 return 0; 396 } 397 398 static int 399 xfs_reflink_convert_unwritten( 400 struct xfs_inode *ip, 401 struct xfs_bmbt_irec *imap, 402 struct xfs_bmbt_irec *cmap, 403 bool convert_now) 404 { 405 xfs_fileoff_t offset_fsb = imap->br_startoff; 406 xfs_filblks_t count_fsb = imap->br_blockcount; 407 int error; 408 409 /* 410 * cmap might larger than imap due to cowextsize hint. 411 */ 412 xfs_trim_extent(cmap, offset_fsb, count_fsb); 413 414 /* 415 * COW fork extents are supposed to remain unwritten until we're ready 416 * to initiate a disk write. For direct I/O we are going to write the 417 * data and need the conversion, but for buffered writes we're done. 418 */ 419 if (!convert_now || cmap->br_state == XFS_EXT_NORM) 420 return 0; 421 422 trace_xfs_reflink_convert_cow(ip, cmap); 423 424 error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb); 425 if (!error) 426 cmap->br_state = XFS_EXT_NORM; 427 428 return error; 429 } 430 431 static int 432 xfs_reflink_fill_cow_hole( 433 struct xfs_inode *ip, 434 struct xfs_bmbt_irec *imap, 435 struct xfs_bmbt_irec *cmap, 436 bool *shared, 437 uint *lockmode, 438 bool convert_now) 439 { 440 struct xfs_mount *mp = ip->i_mount; 441 struct xfs_trans *tp; 442 xfs_filblks_t resaligned; 443 unsigned int dblocks = 0, rblocks = 0; 444 int nimaps; 445 int error; 446 bool found; 447 448 resaligned = xfs_aligned_fsb_count(imap->br_startoff, 449 imap->br_blockcount, xfs_get_cowextsz_hint(ip)); 450 if (XFS_IS_REALTIME_INODE(ip)) { 451 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 452 rblocks = resaligned; 453 } else { 454 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 455 rblocks = 0; 456 } 457 458 xfs_iunlock(ip, *lockmode); 459 *lockmode = 0; 460 461 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks, 462 rblocks, false, &tp); 463 if (error) 464 return error; 465 466 *lockmode = XFS_ILOCK_EXCL; 467 468 error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found); 469 if (error || !*shared) 470 goto out_trans_cancel; 471 472 if (found) { 473 xfs_trans_cancel(tp); 474 goto convert; 475 } 476 477 /* Allocate the entire reservation as unwritten blocks. */ 478 nimaps = 1; 479 error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount, 480 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0, cmap, 481 &nimaps); 482 if (error) 483 goto out_trans_cancel; 484 485 xfs_inode_set_cowblocks_tag(ip); 486 error = xfs_trans_commit(tp); 487 if (error) 488 return error; 489 490 convert: 491 return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now); 492 493 out_trans_cancel: 494 xfs_trans_cancel(tp); 495 return error; 496 } 497 498 static int 499 xfs_reflink_fill_delalloc( 500 struct xfs_inode *ip, 501 struct xfs_bmbt_irec *imap, 502 struct xfs_bmbt_irec *cmap, 503 bool *shared, 504 uint *lockmode, 505 bool convert_now) 506 { 507 struct xfs_mount *mp = ip->i_mount; 508 struct xfs_trans *tp; 509 int nimaps; 510 int error; 511 bool found; 512 513 do { 514 xfs_iunlock(ip, *lockmode); 515 *lockmode = 0; 516 517 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, 0, 0, 518 false, &tp); 519 if (error) 520 return error; 521 522 *lockmode = XFS_ILOCK_EXCL; 523 524 error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, 525 &found); 526 if (error || !*shared) 527 goto out_trans_cancel; 528 529 if (found) { 530 xfs_trans_cancel(tp); 531 break; 532 } 533 534 ASSERT(isnullstartblock(cmap->br_startblock) || 535 cmap->br_startblock == DELAYSTARTBLOCK); 536 537 /* 538 * Replace delalloc reservation with an unwritten extent. 539 */ 540 nimaps = 1; 541 error = xfs_bmapi_write(tp, ip, cmap->br_startoff, 542 cmap->br_blockcount, 543 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0, 544 cmap, &nimaps); 545 if (error) 546 goto out_trans_cancel; 547 548 xfs_inode_set_cowblocks_tag(ip); 549 error = xfs_trans_commit(tp); 550 if (error) 551 return error; 552 } while (cmap->br_startoff + cmap->br_blockcount <= imap->br_startoff); 553 554 return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now); 555 556 out_trans_cancel: 557 xfs_trans_cancel(tp); 558 return error; 559 } 560 561 /* Allocate all CoW reservations covering a range of blocks in a file. */ 562 int 563 xfs_reflink_allocate_cow( 564 struct xfs_inode *ip, 565 struct xfs_bmbt_irec *imap, 566 struct xfs_bmbt_irec *cmap, 567 bool *shared, 568 uint *lockmode, 569 bool convert_now) 570 { 571 int error; 572 bool found; 573 574 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 575 if (!ip->i_cowfp) { 576 ASSERT(!xfs_is_reflink_inode(ip)); 577 xfs_ifork_init_cow(ip); 578 } 579 580 error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found); 581 if (error || !*shared) 582 return error; 583 584 /* CoW fork has a real extent */ 585 if (found) 586 return xfs_reflink_convert_unwritten(ip, imap, cmap, 587 convert_now); 588 589 /* 590 * CoW fork does not have an extent and data extent is shared. 591 * Allocate a real extent in the CoW fork. 592 */ 593 if (cmap->br_startoff > imap->br_startoff) 594 return xfs_reflink_fill_cow_hole(ip, imap, cmap, shared, 595 lockmode, convert_now); 596 597 /* 598 * CoW fork has a delalloc reservation. Replace it with a real extent. 599 * There may or may not be a data fork mapping. 600 */ 601 if (isnullstartblock(cmap->br_startblock) || 602 cmap->br_startblock == DELAYSTARTBLOCK) 603 return xfs_reflink_fill_delalloc(ip, imap, cmap, shared, 604 lockmode, convert_now); 605 606 /* Shouldn't get here. */ 607 ASSERT(0); 608 return -EFSCORRUPTED; 609 } 610 611 /* 612 * Cancel CoW reservations for some block range of an inode. 613 * 614 * If cancel_real is true this function cancels all COW fork extents for the 615 * inode; if cancel_real is false, real extents are not cleared. 616 * 617 * Caller must have already joined the inode to the current transaction. The 618 * inode will be joined to the transaction returned to the caller. 619 */ 620 int 621 xfs_reflink_cancel_cow_blocks( 622 struct xfs_inode *ip, 623 struct xfs_trans **tpp, 624 xfs_fileoff_t offset_fsb, 625 xfs_fileoff_t end_fsb, 626 bool cancel_real) 627 { 628 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); 629 struct xfs_bmbt_irec got, del; 630 struct xfs_iext_cursor icur; 631 bool isrt = XFS_IS_REALTIME_INODE(ip); 632 int error = 0; 633 634 if (!xfs_inode_has_cow_data(ip)) 635 return 0; 636 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) 637 return 0; 638 639 /* Walk backwards until we're out of the I/O range... */ 640 while (got.br_startoff + got.br_blockcount > offset_fsb) { 641 del = got; 642 xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb); 643 644 /* Extent delete may have bumped ext forward */ 645 if (!del.br_blockcount) { 646 xfs_iext_prev(ifp, &icur); 647 goto next_extent; 648 } 649 650 trace_xfs_reflink_cancel_cow(ip, &del); 651 652 if (isnullstartblock(del.br_startblock)) { 653 xfs_bmap_del_extent_delay(ip, XFS_COW_FORK, &icur, &got, 654 &del); 655 } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) { 656 ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER); 657 658 /* Free the CoW orphan record. */ 659 xfs_refcount_free_cow_extent(*tpp, isrt, 660 del.br_startblock, del.br_blockcount); 661 662 error = xfs_free_extent_later(*tpp, del.br_startblock, 663 del.br_blockcount, NULL, 664 XFS_AG_RESV_NONE, 665 isrt ? XFS_FREE_EXTENT_REALTIME : 0); 666 if (error) 667 break; 668 669 /* Roll the transaction */ 670 error = xfs_defer_finish(tpp); 671 if (error) 672 break; 673 674 /* Remove the mapping from the CoW fork. */ 675 xfs_bmap_del_extent_cow(ip, &icur, &got, &del); 676 677 /* Remove the quota reservation */ 678 xfs_quota_unreserve_blkres(ip, del.br_blockcount); 679 } else { 680 /* Didn't do anything, push cursor back. */ 681 xfs_iext_prev(ifp, &icur); 682 } 683 next_extent: 684 if (!xfs_iext_get_extent(ifp, &icur, &got)) 685 break; 686 } 687 688 /* clear tag if cow fork is emptied */ 689 if (!ifp->if_bytes) 690 xfs_inode_clear_cowblocks_tag(ip); 691 return error; 692 } 693 694 /* 695 * Cancel CoW reservations for some byte range of an inode. 696 * 697 * If cancel_real is true this function cancels all COW fork extents for the 698 * inode; if cancel_real is false, real extents are not cleared. 699 */ 700 int 701 xfs_reflink_cancel_cow_range( 702 struct xfs_inode *ip, 703 xfs_off_t offset, 704 xfs_off_t count, 705 bool cancel_real) 706 { 707 struct xfs_trans *tp; 708 xfs_fileoff_t offset_fsb; 709 xfs_fileoff_t end_fsb; 710 int error; 711 712 trace_xfs_reflink_cancel_cow_range(ip, offset, count); 713 ASSERT(ip->i_cowfp); 714 715 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 716 if (count == NULLFILEOFF) 717 end_fsb = NULLFILEOFF; 718 else 719 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count); 720 721 /* Start a rolling transaction to remove the mappings */ 722 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write, 723 0, 0, 0, &tp); 724 if (error) 725 goto out; 726 727 xfs_ilock(ip, XFS_ILOCK_EXCL); 728 xfs_trans_ijoin(tp, ip, 0); 729 730 /* Scrape out the old CoW reservations */ 731 error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb, 732 cancel_real); 733 if (error) 734 goto out_cancel; 735 736 error = xfs_trans_commit(tp); 737 738 xfs_iunlock(ip, XFS_ILOCK_EXCL); 739 return error; 740 741 out_cancel: 742 xfs_trans_cancel(tp); 743 xfs_iunlock(ip, XFS_ILOCK_EXCL); 744 out: 745 trace_xfs_reflink_cancel_cow_range_error(ip, error, _RET_IP_); 746 return error; 747 } 748 749 #ifdef CONFIG_XFS_QUOTA 750 /* 751 * Update quota accounting for a remapping operation. When we're remapping 752 * something from the CoW fork to the data fork, we must update the quota 753 * accounting for delayed allocations. For remapping from the data fork to the 754 * data fork, use regular block accounting. 755 */ 756 static inline void 757 xfs_reflink_update_quota( 758 struct xfs_trans *tp, 759 struct xfs_inode *ip, 760 bool is_cow, 761 int64_t blocks) 762 { 763 unsigned int qflag; 764 765 if (XFS_IS_REALTIME_INODE(ip)) { 766 qflag = is_cow ? XFS_TRANS_DQ_DELRTBCOUNT : 767 XFS_TRANS_DQ_RTBCOUNT; 768 } else { 769 qflag = is_cow ? XFS_TRANS_DQ_DELBCOUNT : 770 XFS_TRANS_DQ_BCOUNT; 771 } 772 xfs_trans_mod_dquot_byino(tp, ip, qflag, blocks); 773 } 774 #else 775 # define xfs_reflink_update_quota(tp, ip, is_cow, blocks) ((void)0) 776 #endif 777 778 /* 779 * Remap part of the CoW fork into the data fork. 780 * 781 * We aim to remap the range starting at @offset_fsb and ending at @end_fsb 782 * into the data fork; this function will remap what it can (at the end of the 783 * range) and update @end_fsb appropriately. Each remap gets its own 784 * transaction because we can end up merging and splitting bmbt blocks for 785 * every remap operation and we'd like to keep the block reservation 786 * requirements as low as possible. 787 */ 788 STATIC int 789 xfs_reflink_end_cow_extent( 790 struct xfs_inode *ip, 791 xfs_fileoff_t *offset_fsb, 792 xfs_fileoff_t end_fsb) 793 { 794 struct xfs_iext_cursor icur; 795 struct xfs_bmbt_irec got, del, data; 796 struct xfs_mount *mp = ip->i_mount; 797 struct xfs_trans *tp; 798 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); 799 unsigned int resblks; 800 int nmaps; 801 bool isrt = XFS_IS_REALTIME_INODE(ip); 802 int error; 803 804 resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 805 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 806 XFS_TRANS_RESERVE, &tp); 807 if (error) 808 return error; 809 810 /* 811 * Lock the inode. We have to ijoin without automatic unlock because 812 * the lead transaction is the refcountbt record deletion; the data 813 * fork update follows as a deferred log item. 814 */ 815 xfs_ilock(ip, XFS_ILOCK_EXCL); 816 xfs_trans_ijoin(tp, ip, 0); 817 818 /* 819 * In case of racing, overlapping AIO writes no COW extents might be 820 * left by the time I/O completes for the loser of the race. In that 821 * case we are done. 822 */ 823 if (!xfs_iext_lookup_extent(ip, ifp, *offset_fsb, &icur, &got) || 824 got.br_startoff >= end_fsb) { 825 *offset_fsb = end_fsb; 826 goto out_cancel; 827 } 828 829 /* 830 * Only remap real extents that contain data. With AIO, speculative 831 * preallocations can leak into the range we are called upon, and we 832 * need to skip them. Preserve @got for the eventual CoW fork 833 * deletion; from now on @del represents the mapping that we're 834 * actually remapping. 835 */ 836 while (!xfs_bmap_is_written_extent(&got)) { 837 if (!xfs_iext_next_extent(ifp, &icur, &got) || 838 got.br_startoff >= end_fsb) { 839 *offset_fsb = end_fsb; 840 goto out_cancel; 841 } 842 } 843 del = got; 844 xfs_trim_extent(&del, *offset_fsb, end_fsb - *offset_fsb); 845 846 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, 847 XFS_IEXT_REFLINK_END_COW_CNT); 848 if (error) 849 goto out_cancel; 850 851 /* Grab the corresponding mapping in the data fork. */ 852 nmaps = 1; 853 error = xfs_bmapi_read(ip, del.br_startoff, del.br_blockcount, &data, 854 &nmaps, 0); 855 if (error) 856 goto out_cancel; 857 858 /* We can only remap the smaller of the two extent sizes. */ 859 data.br_blockcount = min(data.br_blockcount, del.br_blockcount); 860 del.br_blockcount = data.br_blockcount; 861 862 trace_xfs_reflink_cow_remap_from(ip, &del); 863 trace_xfs_reflink_cow_remap_to(ip, &data); 864 865 if (xfs_bmap_is_real_extent(&data)) { 866 /* 867 * If the extent we're remapping is backed by storage (written 868 * or not), unmap the extent and drop its refcount. 869 */ 870 xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data); 871 xfs_refcount_decrease_extent(tp, isrt, &data); 872 xfs_reflink_update_quota(tp, ip, false, -data.br_blockcount); 873 } else if (data.br_startblock == DELAYSTARTBLOCK) { 874 int done; 875 876 /* 877 * If the extent we're remapping is a delalloc reservation, 878 * we can use the regular bunmapi function to release the 879 * incore state. Dropping the delalloc reservation takes care 880 * of the quota reservation for us. 881 */ 882 error = xfs_bunmapi(NULL, ip, data.br_startoff, 883 data.br_blockcount, 0, 1, &done); 884 if (error) 885 goto out_cancel; 886 ASSERT(done); 887 } 888 889 /* Free the CoW orphan record. */ 890 xfs_refcount_free_cow_extent(tp, isrt, del.br_startblock, 891 del.br_blockcount); 892 893 /* Map the new blocks into the data fork. */ 894 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, &del); 895 896 /* Charge this new data fork mapping to the on-disk quota. */ 897 xfs_reflink_update_quota(tp, ip, true, del.br_blockcount); 898 899 /* Remove the mapping from the CoW fork. */ 900 xfs_bmap_del_extent_cow(ip, &icur, &got, &del); 901 902 error = xfs_trans_commit(tp); 903 xfs_iunlock(ip, XFS_ILOCK_EXCL); 904 if (error) 905 return error; 906 907 /* Update the caller about how much progress we made. */ 908 *offset_fsb = del.br_startoff + del.br_blockcount; 909 return 0; 910 911 out_cancel: 912 xfs_trans_cancel(tp); 913 xfs_iunlock(ip, XFS_ILOCK_EXCL); 914 return error; 915 } 916 917 /* 918 * Remap parts of a file's data fork after a successful CoW. 919 */ 920 int 921 xfs_reflink_end_cow( 922 struct xfs_inode *ip, 923 xfs_off_t offset, 924 xfs_off_t count) 925 { 926 xfs_fileoff_t offset_fsb; 927 xfs_fileoff_t end_fsb; 928 int error = 0; 929 930 trace_xfs_reflink_end_cow(ip, offset, count); 931 932 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 933 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count); 934 935 /* 936 * Walk forwards until we've remapped the I/O range. The loop function 937 * repeatedly cycles the ILOCK to allocate one transaction per remapped 938 * extent. 939 * 940 * If we're being called by writeback then the pages will still 941 * have PageWriteback set, which prevents races with reflink remapping 942 * and truncate. Reflink remapping prevents races with writeback by 943 * taking the iolock and mmaplock before flushing the pages and 944 * remapping, which means there won't be any further writeback or page 945 * cache dirtying until the reflink completes. 946 * 947 * We should never have two threads issuing writeback for the same file 948 * region. There are also have post-eof checks in the writeback 949 * preparation code so that we don't bother writing out pages that are 950 * about to be truncated. 951 * 952 * If we're being called as part of directio write completion, the dio 953 * count is still elevated, which reflink and truncate will wait for. 954 * Reflink remapping takes the iolock and mmaplock and waits for 955 * pending dio to finish, which should prevent any directio until the 956 * remap completes. Multiple concurrent directio writes to the same 957 * region are handled by end_cow processing only occurring for the 958 * threads which succeed; the outcome of multiple overlapping direct 959 * writes is not well defined anyway. 960 * 961 * It's possible that a buffered write and a direct write could collide 962 * here (the buffered write stumbles in after the dio flushes and 963 * invalidates the page cache and immediately queues writeback), but we 964 * have never supported this 100%. If either disk write succeeds the 965 * blocks will be remapped. 966 */ 967 while (end_fsb > offset_fsb && !error) 968 error = xfs_reflink_end_cow_extent(ip, &offset_fsb, end_fsb); 969 970 if (error) 971 trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_); 972 return error; 973 } 974 975 /* 976 * Free all CoW staging blocks that are still referenced by the ondisk refcount 977 * metadata. The ondisk metadata does not track which inode created the 978 * staging extent, so callers must ensure that there are no cached inodes with 979 * live CoW staging extents. 980 */ 981 int 982 xfs_reflink_recover_cow( 983 struct xfs_mount *mp) 984 { 985 struct xfs_perag *pag = NULL; 986 struct xfs_rtgroup *rtg = NULL; 987 int error = 0; 988 989 if (!xfs_has_reflink(mp)) 990 return 0; 991 992 while ((pag = xfs_perag_next(mp, pag))) { 993 error = xfs_refcount_recover_cow_leftovers(pag_group(pag)); 994 if (error) { 995 xfs_perag_rele(pag); 996 return error; 997 } 998 } 999 1000 while ((rtg = xfs_rtgroup_next(mp, rtg))) { 1001 error = xfs_refcount_recover_cow_leftovers(rtg_group(rtg)); 1002 if (error) { 1003 xfs_rtgroup_rele(rtg); 1004 return error; 1005 } 1006 } 1007 1008 return 0; 1009 } 1010 1011 /* 1012 * Reflinking (Block) Ranges of Two Files Together 1013 * 1014 * First, ensure that the reflink flag is set on both inodes. The flag is an 1015 * optimization to avoid unnecessary refcount btree lookups in the write path. 1016 * 1017 * Now we can iteratively remap the range of extents (and holes) in src to the 1018 * corresponding ranges in dest. Let drange and srange denote the ranges of 1019 * logical blocks in dest and src touched by the reflink operation. 1020 * 1021 * While the length of drange is greater than zero, 1022 * - Read src's bmbt at the start of srange ("imap") 1023 * - If imap doesn't exist, make imap appear to start at the end of srange 1024 * with zero length. 1025 * - If imap starts before srange, advance imap to start at srange. 1026 * - If imap goes beyond srange, truncate imap to end at the end of srange. 1027 * - Punch (imap start - srange start + imap len) blocks from dest at 1028 * offset (drange start). 1029 * - If imap points to a real range of pblks, 1030 * > Increase the refcount of the imap's pblks 1031 * > Map imap's pblks into dest at the offset 1032 * (drange start + imap start - srange start) 1033 * - Advance drange and srange by (imap start - srange start + imap len) 1034 * 1035 * Finally, if the reflink made dest longer, update both the in-core and 1036 * on-disk file sizes. 1037 * 1038 * ASCII Art Demonstration: 1039 * 1040 * Let's say we want to reflink this source file: 1041 * 1042 * ----SSSSSSS-SSSSS----SSSSSS (src file) 1043 * <--------------------> 1044 * 1045 * into this destination file: 1046 * 1047 * --DDDDDDDDDDDDDDDDDDD--DDD (dest file) 1048 * <--------------------> 1049 * '-' means a hole, and 'S' and 'D' are written blocks in the src and dest. 1050 * Observe that the range has different logical offsets in either file. 1051 * 1052 * Consider that the first extent in the source file doesn't line up with our 1053 * reflink range. Unmapping and remapping are separate operations, so we can 1054 * unmap more blocks from the destination file than we remap. 1055 * 1056 * ----SSSSSSS-SSSSS----SSSSSS 1057 * <-------> 1058 * --DDDDD---------DDDDD--DDD 1059 * <-------> 1060 * 1061 * Now remap the source extent into the destination file: 1062 * 1063 * ----SSSSSSS-SSSSS----SSSSSS 1064 * <-------> 1065 * --DDDDD--SSSSSSSDDDDD--DDD 1066 * <-------> 1067 * 1068 * Do likewise with the second hole and extent in our range. Holes in the 1069 * unmap range don't affect our operation. 1070 * 1071 * ----SSSSSSS-SSSSS----SSSSSS 1072 * <----> 1073 * --DDDDD--SSSSSSS-SSSSS-DDD 1074 * <----> 1075 * 1076 * Finally, unmap and remap part of the third extent. This will increase the 1077 * size of the destination file. 1078 * 1079 * ----SSSSSSS-SSSSS----SSSSSS 1080 * <-----> 1081 * --DDDDD--SSSSSSS-SSSSS----SSS 1082 * <-----> 1083 * 1084 * Once we update the destination file's i_size, we're done. 1085 */ 1086 1087 /* 1088 * Ensure the reflink bit is set in both inodes. 1089 */ 1090 STATIC int 1091 xfs_reflink_set_inode_flag( 1092 struct xfs_inode *src, 1093 struct xfs_inode *dest) 1094 { 1095 struct xfs_mount *mp = src->i_mount; 1096 int error; 1097 struct xfs_trans *tp; 1098 1099 if (xfs_is_reflink_inode(src) && xfs_is_reflink_inode(dest)) 1100 return 0; 1101 1102 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 1103 if (error) 1104 goto out_error; 1105 1106 /* Lock both files against IO */ 1107 if (src->i_ino == dest->i_ino) 1108 xfs_ilock(src, XFS_ILOCK_EXCL); 1109 else 1110 xfs_lock_two_inodes(src, XFS_ILOCK_EXCL, dest, XFS_ILOCK_EXCL); 1111 1112 if (!xfs_is_reflink_inode(src)) { 1113 trace_xfs_reflink_set_inode_flag(src); 1114 xfs_trans_ijoin(tp, src, XFS_ILOCK_EXCL); 1115 src->i_diflags2 |= XFS_DIFLAG2_REFLINK; 1116 xfs_trans_log_inode(tp, src, XFS_ILOG_CORE); 1117 xfs_ifork_init_cow(src); 1118 } else 1119 xfs_iunlock(src, XFS_ILOCK_EXCL); 1120 1121 if (src->i_ino == dest->i_ino) 1122 goto commit_flags; 1123 1124 if (!xfs_is_reflink_inode(dest)) { 1125 trace_xfs_reflink_set_inode_flag(dest); 1126 xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL); 1127 dest->i_diflags2 |= XFS_DIFLAG2_REFLINK; 1128 xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE); 1129 xfs_ifork_init_cow(dest); 1130 } else 1131 xfs_iunlock(dest, XFS_ILOCK_EXCL); 1132 1133 commit_flags: 1134 error = xfs_trans_commit(tp); 1135 if (error) 1136 goto out_error; 1137 return error; 1138 1139 out_error: 1140 trace_xfs_reflink_set_inode_flag_error(dest, error, _RET_IP_); 1141 return error; 1142 } 1143 1144 /* 1145 * Update destination inode size & cowextsize hint, if necessary. 1146 */ 1147 int 1148 xfs_reflink_update_dest( 1149 struct xfs_inode *dest, 1150 xfs_off_t newlen, 1151 xfs_extlen_t cowextsize, 1152 unsigned int remap_flags) 1153 { 1154 struct xfs_mount *mp = dest->i_mount; 1155 struct xfs_trans *tp; 1156 int error; 1157 1158 if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0) 1159 return 0; 1160 1161 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 1162 if (error) 1163 goto out_error; 1164 1165 xfs_ilock(dest, XFS_ILOCK_EXCL); 1166 xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL); 1167 1168 if (newlen > i_size_read(VFS_I(dest))) { 1169 trace_xfs_reflink_update_inode_size(dest, newlen); 1170 i_size_write(VFS_I(dest), newlen); 1171 dest->i_disk_size = newlen; 1172 } 1173 1174 if (cowextsize) { 1175 dest->i_cowextsize = cowextsize; 1176 dest->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE; 1177 } 1178 1179 xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE); 1180 1181 error = xfs_trans_commit(tp); 1182 if (error) 1183 goto out_error; 1184 return error; 1185 1186 out_error: 1187 trace_xfs_reflink_update_inode_size_error(dest, error, _RET_IP_); 1188 return error; 1189 } 1190 1191 /* 1192 * Do we have enough reserve in this AG to handle a reflink? The refcount 1193 * btree already reserved all the space it needs, but the rmap btree can grow 1194 * infinitely, so we won't allow more reflinks when the AG is down to the 1195 * btree reserves. 1196 */ 1197 static int 1198 xfs_reflink_ag_has_free_space( 1199 struct xfs_mount *mp, 1200 struct xfs_inode *ip, 1201 xfs_fsblock_t fsb) 1202 { 1203 struct xfs_perag *pag; 1204 xfs_agnumber_t agno; 1205 int error = 0; 1206 1207 if (!xfs_has_rmapbt(mp)) 1208 return 0; 1209 if (XFS_IS_REALTIME_INODE(ip)) { 1210 struct xfs_rtgroup *rtg; 1211 xfs_rgnumber_t rgno; 1212 1213 rgno = xfs_rtb_to_rgno(mp, fsb); 1214 rtg = xfs_rtgroup_get(mp, rgno); 1215 if (xfs_metafile_resv_critical(rtg_rmap(rtg))) 1216 error = -ENOSPC; 1217 xfs_rtgroup_put(rtg); 1218 return error; 1219 } 1220 1221 agno = XFS_FSB_TO_AGNO(mp, fsb); 1222 pag = xfs_perag_get(mp, agno); 1223 if (xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) || 1224 xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA)) 1225 error = -ENOSPC; 1226 xfs_perag_put(pag); 1227 return error; 1228 } 1229 1230 /* 1231 * Remap the given extent into the file. The dmap blockcount will be set to 1232 * the number of blocks that were actually remapped. 1233 */ 1234 STATIC int 1235 xfs_reflink_remap_extent( 1236 struct xfs_inode *ip, 1237 struct xfs_bmbt_irec *dmap, 1238 xfs_off_t new_isize) 1239 { 1240 struct xfs_bmbt_irec smap; 1241 struct xfs_mount *mp = ip->i_mount; 1242 struct xfs_trans *tp; 1243 xfs_off_t newlen; 1244 int64_t qdelta = 0; 1245 unsigned int dblocks, rblocks, resblks; 1246 bool quota_reserved = true; 1247 bool smap_real; 1248 bool dmap_written = xfs_bmap_is_written_extent(dmap); 1249 bool isrt = XFS_IS_REALTIME_INODE(ip); 1250 int iext_delta = 0; 1251 int nimaps; 1252 int error; 1253 1254 /* 1255 * Start a rolling transaction to switch the mappings. 1256 * 1257 * Adding a written extent to the extent map can cause a bmbt split, 1258 * and removing a mapped extent from the extent can cause a bmbt split. 1259 * The two operations cannot both cause a split since they operate on 1260 * the same index in the bmap btree, so we only need a reservation for 1261 * one bmbt split if either thing is happening. However, we haven't 1262 * locked the inode yet, so we reserve assuming this is the case. 1263 * 1264 * The first allocation call tries to reserve enough space to handle 1265 * mapping dmap into a sparse part of the file plus the bmbt split. We 1266 * haven't locked the inode or read the existing mapping yet, so we do 1267 * not know for sure that we need the space. This should succeed most 1268 * of the time. 1269 * 1270 * If the first attempt fails, try again but reserving only enough 1271 * space to handle a bmbt split. This is the hard minimum requirement, 1272 * and we revisit quota reservations later when we know more about what 1273 * we're remapping. 1274 */ 1275 resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 1276 if (XFS_IS_REALTIME_INODE(ip)) { 1277 dblocks = resblks; 1278 rblocks = dmap->br_blockcount; 1279 } else { 1280 dblocks = resblks + dmap->br_blockcount; 1281 rblocks = 0; 1282 } 1283 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, 1284 dblocks, rblocks, false, &tp); 1285 if (error == -EDQUOT || error == -ENOSPC) { 1286 quota_reserved = false; 1287 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, 1288 resblks, 0, false, &tp); 1289 } 1290 if (error) 1291 goto out; 1292 1293 /* 1294 * Read what's currently mapped in the destination file into smap. 1295 * If smap isn't a hole, we will have to remove it before we can add 1296 * dmap to the destination file. 1297 */ 1298 nimaps = 1; 1299 error = xfs_bmapi_read(ip, dmap->br_startoff, dmap->br_blockcount, 1300 &smap, &nimaps, 0); 1301 if (error) 1302 goto out_cancel; 1303 ASSERT(nimaps == 1 && smap.br_startoff == dmap->br_startoff); 1304 smap_real = xfs_bmap_is_real_extent(&smap); 1305 1306 /* 1307 * We can only remap as many blocks as the smaller of the two extent 1308 * maps, because we can only remap one extent at a time. 1309 */ 1310 dmap->br_blockcount = min(dmap->br_blockcount, smap.br_blockcount); 1311 ASSERT(dmap->br_blockcount == smap.br_blockcount); 1312 1313 trace_xfs_reflink_remap_extent_dest(ip, &smap); 1314 1315 /* 1316 * Two extents mapped to the same physical block must not have 1317 * different states; that's filesystem corruption. Move on to the next 1318 * extent if they're both holes or both the same physical extent. 1319 */ 1320 if (dmap->br_startblock == smap.br_startblock) { 1321 if (dmap->br_state != smap.br_state) { 1322 xfs_bmap_mark_sick(ip, XFS_DATA_FORK); 1323 error = -EFSCORRUPTED; 1324 } 1325 goto out_cancel; 1326 } 1327 1328 /* If both extents are unwritten, leave them alone. */ 1329 if (dmap->br_state == XFS_EXT_UNWRITTEN && 1330 smap.br_state == XFS_EXT_UNWRITTEN) 1331 goto out_cancel; 1332 1333 /* No reflinking if the AG of the dest mapping is low on space. */ 1334 if (dmap_written) { 1335 error = xfs_reflink_ag_has_free_space(mp, ip, 1336 dmap->br_startblock); 1337 if (error) 1338 goto out_cancel; 1339 } 1340 1341 /* 1342 * Increase quota reservation if we think the quota block counter for 1343 * this file could increase. 1344 * 1345 * If we are mapping a written extent into the file, we need to have 1346 * enough quota block count reservation to handle the blocks in that 1347 * extent. We log only the delta to the quota block counts, so if the 1348 * extent we're unmapping also has blocks allocated to it, we don't 1349 * need a quota reservation for the extent itself. 1350 * 1351 * Note that if we're replacing a delalloc reservation with a written 1352 * extent, we have to take the full quota reservation because removing 1353 * the delalloc reservation gives the block count back to the quota 1354 * count. This is suboptimal, but the VFS flushed the dest range 1355 * before we started. That should have removed all the delalloc 1356 * reservations, but we code defensively. 1357 * 1358 * xfs_trans_alloc_inode above already tried to grab an even larger 1359 * quota reservation, and kicked off a blockgc scan if it couldn't. 1360 * If we can't get a potentially smaller quota reservation now, we're 1361 * done. 1362 */ 1363 if (!quota_reserved && !smap_real && dmap_written) { 1364 if (XFS_IS_REALTIME_INODE(ip)) { 1365 dblocks = 0; 1366 rblocks = dmap->br_blockcount; 1367 } else { 1368 dblocks = dmap->br_blockcount; 1369 rblocks = 0; 1370 } 1371 error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, 1372 false); 1373 if (error) 1374 goto out_cancel; 1375 } 1376 1377 if (smap_real) 1378 ++iext_delta; 1379 1380 if (dmap_written) 1381 ++iext_delta; 1382 1383 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, iext_delta); 1384 if (error) 1385 goto out_cancel; 1386 1387 if (smap_real) { 1388 /* 1389 * If the extent we're unmapping is backed by storage (written 1390 * or not), unmap the extent and drop its refcount. 1391 */ 1392 xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &smap); 1393 xfs_refcount_decrease_extent(tp, isrt, &smap); 1394 qdelta -= smap.br_blockcount; 1395 } else if (smap.br_startblock == DELAYSTARTBLOCK) { 1396 int done; 1397 1398 /* 1399 * If the extent we're unmapping is a delalloc reservation, 1400 * we can use the regular bunmapi function to release the 1401 * incore state. Dropping the delalloc reservation takes care 1402 * of the quota reservation for us. 1403 */ 1404 error = xfs_bunmapi(NULL, ip, smap.br_startoff, 1405 smap.br_blockcount, 0, 1, &done); 1406 if (error) 1407 goto out_cancel; 1408 ASSERT(done); 1409 } 1410 1411 /* 1412 * If the extent we're sharing is backed by written storage, increase 1413 * its refcount and map it into the file. 1414 */ 1415 if (dmap_written) { 1416 xfs_refcount_increase_extent(tp, isrt, dmap); 1417 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, dmap); 1418 qdelta += dmap->br_blockcount; 1419 } 1420 1421 xfs_reflink_update_quota(tp, ip, false, qdelta); 1422 1423 /* Update dest isize if needed. */ 1424 newlen = XFS_FSB_TO_B(mp, dmap->br_startoff + dmap->br_blockcount); 1425 newlen = min_t(xfs_off_t, newlen, new_isize); 1426 if (newlen > i_size_read(VFS_I(ip))) { 1427 trace_xfs_reflink_update_inode_size(ip, newlen); 1428 i_size_write(VFS_I(ip), newlen); 1429 ip->i_disk_size = newlen; 1430 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1431 } 1432 1433 /* Commit everything and unlock. */ 1434 error = xfs_trans_commit(tp); 1435 goto out_unlock; 1436 1437 out_cancel: 1438 xfs_trans_cancel(tp); 1439 out_unlock: 1440 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1441 out: 1442 if (error) 1443 trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_); 1444 return error; 1445 } 1446 1447 /* Remap a range of one file to the other. */ 1448 int 1449 xfs_reflink_remap_blocks( 1450 struct xfs_inode *src, 1451 loff_t pos_in, 1452 struct xfs_inode *dest, 1453 loff_t pos_out, 1454 loff_t remap_len, 1455 loff_t *remapped) 1456 { 1457 struct xfs_bmbt_irec imap; 1458 struct xfs_mount *mp = src->i_mount; 1459 xfs_fileoff_t srcoff = XFS_B_TO_FSBT(mp, pos_in); 1460 xfs_fileoff_t destoff = XFS_B_TO_FSBT(mp, pos_out); 1461 xfs_filblks_t len; 1462 xfs_filblks_t remapped_len = 0; 1463 xfs_off_t new_isize = pos_out + remap_len; 1464 int nimaps; 1465 int error = 0; 1466 1467 len = min_t(xfs_filblks_t, XFS_B_TO_FSB(mp, remap_len), 1468 XFS_MAX_FILEOFF); 1469 1470 trace_xfs_reflink_remap_blocks(src, srcoff, len, dest, destoff); 1471 1472 while (len > 0) { 1473 unsigned int lock_mode; 1474 1475 /* Read extent from the source file */ 1476 nimaps = 1; 1477 lock_mode = xfs_ilock_data_map_shared(src); 1478 error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0); 1479 xfs_iunlock(src, lock_mode); 1480 if (error) 1481 break; 1482 /* 1483 * The caller supposedly flushed all dirty pages in the source 1484 * file range, which means that writeback should have allocated 1485 * or deleted all delalloc reservations in that range. If we 1486 * find one, that's a good sign that something is seriously 1487 * wrong here. 1488 */ 1489 ASSERT(nimaps == 1 && imap.br_startoff == srcoff); 1490 if (imap.br_startblock == DELAYSTARTBLOCK) { 1491 ASSERT(imap.br_startblock != DELAYSTARTBLOCK); 1492 xfs_bmap_mark_sick(src, XFS_DATA_FORK); 1493 error = -EFSCORRUPTED; 1494 break; 1495 } 1496 1497 trace_xfs_reflink_remap_extent_src(src, &imap); 1498 1499 /* Remap into the destination file at the given offset. */ 1500 imap.br_startoff = destoff; 1501 error = xfs_reflink_remap_extent(dest, &imap, new_isize); 1502 if (error) 1503 break; 1504 1505 if (fatal_signal_pending(current)) { 1506 error = -EINTR; 1507 break; 1508 } 1509 1510 /* Advance drange/srange */ 1511 srcoff += imap.br_blockcount; 1512 destoff += imap.br_blockcount; 1513 len -= imap.br_blockcount; 1514 remapped_len += imap.br_blockcount; 1515 cond_resched(); 1516 } 1517 1518 if (error) 1519 trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_); 1520 *remapped = min_t(loff_t, remap_len, 1521 XFS_FSB_TO_B(src->i_mount, remapped_len)); 1522 return error; 1523 } 1524 1525 /* 1526 * If we're reflinking to a point past the destination file's EOF, we must 1527 * zero any speculative post-EOF preallocations that sit between the old EOF 1528 * and the destination file offset. 1529 */ 1530 static int 1531 xfs_reflink_zero_posteof( 1532 struct xfs_inode *ip, 1533 loff_t pos) 1534 { 1535 loff_t isize = i_size_read(VFS_I(ip)); 1536 1537 if (pos <= isize) 1538 return 0; 1539 1540 trace_xfs_zero_eof(ip, isize, pos - isize); 1541 return xfs_zero_range(ip, isize, pos - isize, NULL); 1542 } 1543 1544 /* 1545 * Prepare two files for range cloning. Upon a successful return both inodes 1546 * will have the iolock and mmaplock held, the page cache of the out file will 1547 * be truncated, and any leases on the out file will have been broken. This 1548 * function borrows heavily from xfs_file_aio_write_checks. 1549 * 1550 * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't 1551 * checked that the bytes beyond EOF physically match. Hence we cannot use the 1552 * EOF block in the source dedupe range because it's not a complete block match, 1553 * hence can introduce a corruption into the file that has it's block replaced. 1554 * 1555 * In similar fashion, the VFS file cloning also allows partial EOF blocks to be 1556 * "block aligned" for the purposes of cloning entire files. However, if the 1557 * source file range includes the EOF block and it lands within the existing EOF 1558 * of the destination file, then we can expose stale data from beyond the source 1559 * file EOF in the destination file. 1560 * 1561 * XFS doesn't support partial block sharing, so in both cases we have check 1562 * these cases ourselves. For dedupe, we can simply round the length to dedupe 1563 * down to the previous whole block and ignore the partial EOF block. While this 1564 * means we can't dedupe the last block of a file, this is an acceptible 1565 * tradeoff for simplicity on implementation. 1566 * 1567 * For cloning, we want to share the partial EOF block if it is also the new EOF 1568 * block of the destination file. If the partial EOF block lies inside the 1569 * existing destination EOF, then we have to abort the clone to avoid exposing 1570 * stale data in the destination file. Hence we reject these clone attempts with 1571 * -EINVAL in this case. 1572 */ 1573 int 1574 xfs_reflink_remap_prep( 1575 struct file *file_in, 1576 loff_t pos_in, 1577 struct file *file_out, 1578 loff_t pos_out, 1579 loff_t *len, 1580 unsigned int remap_flags) 1581 { 1582 struct inode *inode_in = file_inode(file_in); 1583 struct xfs_inode *src = XFS_I(inode_in); 1584 struct inode *inode_out = file_inode(file_out); 1585 struct xfs_inode *dest = XFS_I(inode_out); 1586 int ret; 1587 1588 /* Lock both files against IO */ 1589 ret = xfs_ilock2_io_mmap(src, dest); 1590 if (ret) 1591 return ret; 1592 1593 /* Check file eligibility and prepare for block sharing. */ 1594 ret = -EINVAL; 1595 /* Can't reflink between data and rt volumes */ 1596 if (XFS_IS_REALTIME_INODE(src) != XFS_IS_REALTIME_INODE(dest)) 1597 goto out_unlock; 1598 1599 /* Don't share DAX file data with non-DAX file. */ 1600 if (IS_DAX(inode_in) != IS_DAX(inode_out)) 1601 goto out_unlock; 1602 1603 if (!IS_DAX(inode_in)) 1604 ret = generic_remap_file_range_prep(file_in, pos_in, file_out, 1605 pos_out, len, remap_flags); 1606 else 1607 ret = dax_remap_file_range_prep(file_in, pos_in, file_out, 1608 pos_out, len, remap_flags, &xfs_read_iomap_ops); 1609 if (ret || *len == 0) 1610 goto out_unlock; 1611 1612 /* Attach dquots to dest inode before changing block map */ 1613 ret = xfs_qm_dqattach(dest); 1614 if (ret) 1615 goto out_unlock; 1616 1617 /* 1618 * Zero existing post-eof speculative preallocations in the destination 1619 * file. 1620 */ 1621 ret = xfs_reflink_zero_posteof(dest, pos_out); 1622 if (ret) 1623 goto out_unlock; 1624 1625 /* Set flags and remap blocks. */ 1626 ret = xfs_reflink_set_inode_flag(src, dest); 1627 if (ret) 1628 goto out_unlock; 1629 1630 /* 1631 * If pos_out > EOF, we may have dirtied blocks between EOF and 1632 * pos_out. In that case, we need to extend the flush and unmap to cover 1633 * from EOF to the end of the copy length. 1634 */ 1635 if (pos_out > XFS_ISIZE(dest)) { 1636 loff_t flen = *len + (pos_out - XFS_ISIZE(dest)); 1637 ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen); 1638 } else { 1639 ret = xfs_flush_unmap_range(dest, pos_out, *len); 1640 } 1641 if (ret) 1642 goto out_unlock; 1643 1644 xfs_iflags_set(src, XFS_IREMAPPING); 1645 if (inode_in != inode_out) 1646 xfs_ilock_demote(src, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL); 1647 1648 return 0; 1649 out_unlock: 1650 xfs_iunlock2_io_mmap(src, dest); 1651 return ret; 1652 } 1653 1654 /* Does this inode need the reflink flag? */ 1655 int 1656 xfs_reflink_inode_has_shared_extents( 1657 struct xfs_trans *tp, 1658 struct xfs_inode *ip, 1659 bool *has_shared) 1660 { 1661 struct xfs_bmbt_irec got; 1662 struct xfs_mount *mp = ip->i_mount; 1663 struct xfs_ifork *ifp; 1664 struct xfs_iext_cursor icur; 1665 bool found; 1666 int error; 1667 1668 ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); 1669 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK); 1670 if (error) 1671 return error; 1672 1673 *has_shared = false; 1674 found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got); 1675 while (found) { 1676 xfs_extlen_t shared_offset, shared_len; 1677 1678 if (isnullstartblock(got.br_startblock) || 1679 got.br_state != XFS_EXT_NORM) 1680 goto next; 1681 1682 if (XFS_IS_REALTIME_INODE(ip)) 1683 error = xfs_reflink_find_rtshared(mp, tp, &got, 1684 &shared_offset, &shared_len, false); 1685 else 1686 error = xfs_reflink_find_shared(mp, tp, &got, 1687 &shared_offset, &shared_len, false); 1688 if (error) 1689 return error; 1690 1691 /* Is there still a shared block here? */ 1692 if (shared_len) { 1693 *has_shared = true; 1694 return 0; 1695 } 1696 next: 1697 found = xfs_iext_next_extent(ifp, &icur, &got); 1698 } 1699 1700 return 0; 1701 } 1702 1703 /* 1704 * Clear the inode reflink flag if there are no shared extents. 1705 * 1706 * The caller is responsible for joining the inode to the transaction passed in. 1707 * The inode will be joined to the transaction that is returned to the caller. 1708 */ 1709 int 1710 xfs_reflink_clear_inode_flag( 1711 struct xfs_inode *ip, 1712 struct xfs_trans **tpp) 1713 { 1714 bool needs_flag; 1715 int error = 0; 1716 1717 ASSERT(xfs_is_reflink_inode(ip)); 1718 1719 if (!xfs_can_free_cowblocks(ip)) 1720 return 0; 1721 1722 error = xfs_reflink_inode_has_shared_extents(*tpp, ip, &needs_flag); 1723 if (error || needs_flag) 1724 return error; 1725 1726 /* 1727 * We didn't find any shared blocks so turn off the reflink flag. 1728 * First, get rid of any leftover CoW mappings. 1729 */ 1730 error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, XFS_MAX_FILEOFF, 1731 true); 1732 if (error) 1733 return error; 1734 1735 /* Clear the inode flag. */ 1736 trace_xfs_reflink_unset_inode_flag(ip); 1737 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1738 xfs_inode_clear_cowblocks_tag(ip); 1739 xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE); 1740 1741 return error; 1742 } 1743 1744 /* 1745 * Clear the inode reflink flag if there are no shared extents and the size 1746 * hasn't changed. 1747 */ 1748 STATIC int 1749 xfs_reflink_try_clear_inode_flag( 1750 struct xfs_inode *ip) 1751 { 1752 struct xfs_mount *mp = ip->i_mount; 1753 struct xfs_trans *tp; 1754 int error = 0; 1755 1756 /* Start a rolling transaction to remove the mappings */ 1757 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp); 1758 if (error) 1759 return error; 1760 1761 xfs_ilock(ip, XFS_ILOCK_EXCL); 1762 xfs_trans_ijoin(tp, ip, 0); 1763 1764 error = xfs_reflink_clear_inode_flag(ip, &tp); 1765 if (error) 1766 goto cancel; 1767 1768 error = xfs_trans_commit(tp); 1769 if (error) 1770 goto out; 1771 1772 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1773 return 0; 1774 cancel: 1775 xfs_trans_cancel(tp); 1776 out: 1777 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1778 return error; 1779 } 1780 1781 /* 1782 * Pre-COW all shared blocks within a given byte range of a file and turn off 1783 * the reflink flag if we unshare all of the file's blocks. 1784 */ 1785 int 1786 xfs_reflink_unshare( 1787 struct xfs_inode *ip, 1788 xfs_off_t offset, 1789 xfs_off_t len) 1790 { 1791 struct inode *inode = VFS_I(ip); 1792 int error; 1793 1794 if (!xfs_is_reflink_inode(ip)) 1795 return 0; 1796 1797 trace_xfs_reflink_unshare(ip, offset, len); 1798 1799 inode_dio_wait(inode); 1800 1801 if (IS_DAX(inode)) 1802 error = dax_file_unshare(inode, offset, len, 1803 &xfs_dax_write_iomap_ops); 1804 else 1805 error = iomap_file_unshare(inode, offset, len, 1806 &xfs_buffered_write_iomap_ops); 1807 if (error) 1808 goto out; 1809 1810 error = filemap_write_and_wait_range(inode->i_mapping, offset, 1811 offset + len - 1); 1812 if (error) 1813 goto out; 1814 1815 /* Turn off the reflink flag if possible. */ 1816 error = xfs_reflink_try_clear_inode_flag(ip); 1817 if (error) 1818 goto out; 1819 return 0; 1820 1821 out: 1822 trace_xfs_reflink_unshare_error(ip, error, _RET_IP_); 1823 return error; 1824 } 1825 1826 /* 1827 * Can we use reflink with this realtime extent size? Note that we don't check 1828 * for rblocks > 0 here because this can be called as part of attaching a new 1829 * rt section. 1830 */ 1831 bool 1832 xfs_reflink_supports_rextsize( 1833 struct xfs_mount *mp, 1834 unsigned int rextsize) 1835 { 1836 /* reflink on the realtime device requires rtgroups */ 1837 if (!xfs_has_rtgroups(mp)) 1838 return false; 1839 1840 /* 1841 * Reflink doesn't support rt extent size larger than a single fsblock 1842 * because we would have to perform CoW-around for unaligned write 1843 * requests to guarantee that we always remap entire rt extents. 1844 */ 1845 if (rextsize != 1) 1846 return false; 1847 1848 return true; 1849 } 1850