1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/blkdev.h> 4 #include <linux/iversion.h> 5 #include "compression.h" 6 #include "ctree.h" 7 #include "delalloc-space.h" 8 #include "disk-io.h" 9 #include "reflink.h" 10 #include "transaction.h" 11 #include "subpage.h" 12 13 #define BTRFS_MAX_DEDUPE_LEN SZ_16M 14 15 static int clone_finish_inode_update(struct btrfs_trans_handle *trans, 16 struct inode *inode, 17 u64 endoff, 18 const u64 destoff, 19 const u64 olen, 20 int no_time_update) 21 { 22 struct btrfs_root *root = BTRFS_I(inode)->root; 23 int ret; 24 25 inode_inc_iversion(inode); 26 if (!no_time_update) { 27 inode->i_mtime = current_time(inode); 28 inode->i_ctime = inode->i_mtime; 29 } 30 /* 31 * We round up to the block size at eof when determining which 32 * extents to clone above, but shouldn't round up the file size. 33 */ 34 if (endoff > destoff + olen) 35 endoff = destoff + olen; 36 if (endoff > inode->i_size) { 37 i_size_write(inode, endoff); 38 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 39 } 40 41 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 42 if (ret) { 43 btrfs_abort_transaction(trans, ret); 44 btrfs_end_transaction(trans); 45 goto out; 46 } 47 ret = btrfs_end_transaction(trans); 48 out: 49 return ret; 50 } 51 52 static int copy_inline_to_page(struct btrfs_inode *inode, 53 const u64 file_offset, 54 char *inline_data, 55 const u64 size, 56 const u64 datal, 57 const u8 comp_type) 58 { 59 struct btrfs_fs_info *fs_info = inode->root->fs_info; 60 const u32 block_size = fs_info->sectorsize; 61 const u64 range_end = file_offset + block_size - 1; 62 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0); 63 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0); 64 struct extent_changeset *data_reserved = NULL; 65 struct page *page = NULL; 66 struct address_space *mapping = inode->vfs_inode.i_mapping; 67 int ret; 68 69 ASSERT(IS_ALIGNED(file_offset, block_size)); 70 71 /* 72 * We have flushed and locked the ranges of the source and destination 73 * inodes, we also have locked the inodes, so we are safe to do a 74 * reservation here. Also we must not do the reservation while holding 75 * a transaction open, otherwise we would deadlock. 76 */ 77 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset, 78 block_size); 79 if (ret) 80 goto out; 81 82 page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT, 83 btrfs_alloc_write_mask(mapping)); 84 if (!page) { 85 ret = -ENOMEM; 86 goto out_unlock; 87 } 88 89 ret = set_page_extent_mapped(page); 90 if (ret < 0) 91 goto out_unlock; 92 93 clear_extent_bit(&inode->io_tree, file_offset, range_end, 94 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 95 NULL); 96 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); 97 if (ret) 98 goto out_unlock; 99 100 /* 101 * After dirtying the page our caller will need to start a transaction, 102 * and if we are low on metadata free space, that can cause flushing of 103 * delalloc for all inodes in order to get metadata space released. 104 * However we are holding the range locked for the whole duration of 105 * the clone/dedupe operation, so we may deadlock if that happens and no 106 * other task releases enough space. So mark this inode as not being 107 * possible to flush to avoid such deadlock. We will clear that flag 108 * when we finish cloning all extents, since a transaction is started 109 * after finding each extent to clone. 110 */ 111 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags); 112 113 if (comp_type == BTRFS_COMPRESS_NONE) { 114 memcpy_to_page(page, offset_in_page(file_offset), data_start, 115 datal); 116 } else { 117 ret = btrfs_decompress(comp_type, data_start, page, 118 offset_in_page(file_offset), 119 inline_size, datal); 120 if (ret) 121 goto out_unlock; 122 flush_dcache_page(page); 123 } 124 125 /* 126 * If our inline data is smaller then the block/page size, then the 127 * remaining of the block/page is equivalent to zeroes. We had something 128 * like the following done: 129 * 130 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file 131 * $ sync # (or fsync) 132 * $ xfs_io -c "falloc 0 4K" file 133 * $ xfs_io -c "pwrite -S 0xcd 4K 4K" 134 * 135 * So what's in the range [500, 4095] corresponds to zeroes. 136 */ 137 if (datal < block_size) 138 memzero_page(page, datal, block_size - datal); 139 140 btrfs_page_set_uptodate(fs_info, page, file_offset, block_size); 141 btrfs_page_clear_checked(fs_info, page, file_offset, block_size); 142 btrfs_page_set_dirty(fs_info, page, file_offset, block_size); 143 out_unlock: 144 if (page) { 145 unlock_page(page); 146 put_page(page); 147 } 148 if (ret) 149 btrfs_delalloc_release_space(inode, data_reserved, file_offset, 150 block_size, true); 151 btrfs_delalloc_release_extents(inode, block_size); 152 out: 153 extent_changeset_free(data_reserved); 154 155 return ret; 156 } 157 158 /* 159 * Deal with cloning of inline extents. We try to copy the inline extent from 160 * the source inode to destination inode when possible. When not possible we 161 * copy the inline extent's data into the respective page of the inode. 162 */ 163 static int clone_copy_inline_extent(struct inode *dst, 164 struct btrfs_path *path, 165 struct btrfs_key *new_key, 166 const u64 drop_start, 167 const u64 datal, 168 const u64 size, 169 const u8 comp_type, 170 char *inline_data, 171 struct btrfs_trans_handle **trans_out) 172 { 173 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); 174 struct btrfs_root *root = BTRFS_I(dst)->root; 175 const u64 aligned_end = ALIGN(new_key->offset + datal, 176 fs_info->sectorsize); 177 struct btrfs_trans_handle *trans = NULL; 178 struct btrfs_drop_extents_args drop_args = { 0 }; 179 int ret; 180 struct btrfs_key key; 181 182 if (new_key->offset > 0) { 183 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 184 inline_data, size, datal, comp_type); 185 goto out; 186 } 187 188 key.objectid = btrfs_ino(BTRFS_I(dst)); 189 key.type = BTRFS_EXTENT_DATA_KEY; 190 key.offset = 0; 191 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 192 if (ret < 0) { 193 return ret; 194 } else if (ret > 0) { 195 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 196 ret = btrfs_next_leaf(root, path); 197 if (ret < 0) 198 return ret; 199 else if (ret > 0) 200 goto copy_inline_extent; 201 } 202 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 203 if (key.objectid == btrfs_ino(BTRFS_I(dst)) && 204 key.type == BTRFS_EXTENT_DATA_KEY) { 205 /* 206 * There's an implicit hole at file offset 0, copy the 207 * inline extent's data to the page. 208 */ 209 ASSERT(key.offset > 0); 210 goto copy_to_page; 211 } 212 } else if (i_size_read(dst) <= datal) { 213 struct btrfs_file_extent_item *ei; 214 215 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 216 struct btrfs_file_extent_item); 217 /* 218 * If it's an inline extent replace it with the source inline 219 * extent, otherwise copy the source inline extent data into 220 * the respective page at the destination inode. 221 */ 222 if (btrfs_file_extent_type(path->nodes[0], ei) == 223 BTRFS_FILE_EXTENT_INLINE) 224 goto copy_inline_extent; 225 226 goto copy_to_page; 227 } 228 229 copy_inline_extent: 230 /* 231 * We have no extent items, or we have an extent at offset 0 which may 232 * or may not be inlined. All these cases are dealt the same way. 233 */ 234 if (i_size_read(dst) > datal) { 235 /* 236 * At the destination offset 0 we have either a hole, a regular 237 * extent or an inline extent larger then the one we want to 238 * clone. Deal with all these cases by copying the inline extent 239 * data into the respective page at the destination inode. 240 */ 241 goto copy_to_page; 242 } 243 244 /* 245 * Release path before starting a new transaction so we don't hold locks 246 * that would confuse lockdep. 247 */ 248 btrfs_release_path(path); 249 /* 250 * If we end up here it means were copy the inline extent into a leaf 251 * of the destination inode. We know we will drop or adjust at most one 252 * extent item in the destination root. 253 * 254 * 1 unit - adjusting old extent (we may have to split it) 255 * 1 unit - add new extent 256 * 1 unit - inode update 257 */ 258 trans = btrfs_start_transaction(root, 3); 259 if (IS_ERR(trans)) { 260 ret = PTR_ERR(trans); 261 trans = NULL; 262 goto out; 263 } 264 drop_args.path = path; 265 drop_args.start = drop_start; 266 drop_args.end = aligned_end; 267 drop_args.drop_cache = true; 268 ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args); 269 if (ret) 270 goto out; 271 ret = btrfs_insert_empty_item(trans, root, path, new_key, size); 272 if (ret) 273 goto out; 274 275 write_extent_buffer(path->nodes[0], inline_data, 276 btrfs_item_ptr_offset(path->nodes[0], 277 path->slots[0]), 278 size); 279 btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found); 280 btrfs_set_inode_full_sync(BTRFS_I(dst)); 281 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end); 282 out: 283 if (!ret && !trans) { 284 /* 285 * No transaction here means we copied the inline extent into a 286 * page of the destination inode. 287 * 288 * 1 unit to update inode item 289 */ 290 trans = btrfs_start_transaction(root, 1); 291 if (IS_ERR(trans)) { 292 ret = PTR_ERR(trans); 293 trans = NULL; 294 } 295 } 296 if (ret && trans) { 297 btrfs_abort_transaction(trans, ret); 298 btrfs_end_transaction(trans); 299 } 300 if (!ret) 301 *trans_out = trans; 302 303 return ret; 304 305 copy_to_page: 306 /* 307 * Release our path because we don't need it anymore and also because 308 * copy_inline_to_page() needs to reserve data and metadata, which may 309 * need to flush delalloc when we are low on available space and 310 * therefore cause a deadlock if writeback of an inline extent needs to 311 * write to the same leaf or an ordered extent completion needs to write 312 * to the same leaf. 313 */ 314 btrfs_release_path(path); 315 316 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 317 inline_data, size, datal, comp_type); 318 goto out; 319 } 320 321 /** 322 * btrfs_clone() - clone a range from inode file to another 323 * 324 * @src: Inode to clone from 325 * @inode: Inode to clone to 326 * @off: Offset within source to start clone from 327 * @olen: Original length, passed by user, of range to clone 328 * @olen_aligned: Block-aligned value of olen 329 * @destoff: Offset within @inode to start clone 330 * @no_time_update: Whether to update mtime/ctime on the target inode 331 */ 332 static int btrfs_clone(struct inode *src, struct inode *inode, 333 const u64 off, const u64 olen, const u64 olen_aligned, 334 const u64 destoff, int no_time_update) 335 { 336 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 337 struct btrfs_path *path = NULL; 338 struct extent_buffer *leaf; 339 struct btrfs_trans_handle *trans; 340 char *buf = NULL; 341 struct btrfs_key key; 342 u32 nritems; 343 int slot; 344 int ret; 345 const u64 len = olen_aligned; 346 u64 last_dest_end = destoff; 347 u64 prev_extent_end = off; 348 349 ret = -ENOMEM; 350 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); 351 if (!buf) 352 return ret; 353 354 path = btrfs_alloc_path(); 355 if (!path) { 356 kvfree(buf); 357 return ret; 358 } 359 360 path->reada = READA_FORWARD; 361 /* Clone data */ 362 key.objectid = btrfs_ino(BTRFS_I(src)); 363 key.type = BTRFS_EXTENT_DATA_KEY; 364 key.offset = off; 365 366 while (1) { 367 struct btrfs_file_extent_item *extent; 368 u64 extent_gen; 369 int type; 370 u32 size; 371 struct btrfs_key new_key; 372 u64 disko = 0, diskl = 0; 373 u64 datao = 0, datal = 0; 374 u8 comp; 375 u64 drop_start; 376 377 /* Note the key will change type as we walk through the tree */ 378 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, 379 0, 0); 380 if (ret < 0) 381 goto out; 382 /* 383 * First search, if no extent item that starts at offset off was 384 * found but the previous item is an extent item, it's possible 385 * it might overlap our target range, therefore process it. 386 */ 387 if (key.offset == off && ret > 0 && path->slots[0] > 0) { 388 btrfs_item_key_to_cpu(path->nodes[0], &key, 389 path->slots[0] - 1); 390 if (key.type == BTRFS_EXTENT_DATA_KEY) 391 path->slots[0]--; 392 } 393 394 nritems = btrfs_header_nritems(path->nodes[0]); 395 process_slot: 396 if (path->slots[0] >= nritems) { 397 ret = btrfs_next_leaf(BTRFS_I(src)->root, path); 398 if (ret < 0) 399 goto out; 400 if (ret > 0) 401 break; 402 nritems = btrfs_header_nritems(path->nodes[0]); 403 } 404 leaf = path->nodes[0]; 405 slot = path->slots[0]; 406 407 btrfs_item_key_to_cpu(leaf, &key, slot); 408 if (key.type > BTRFS_EXTENT_DATA_KEY || 409 key.objectid != btrfs_ino(BTRFS_I(src))) 410 break; 411 412 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 413 414 extent = btrfs_item_ptr(leaf, slot, 415 struct btrfs_file_extent_item); 416 extent_gen = btrfs_file_extent_generation(leaf, extent); 417 comp = btrfs_file_extent_compression(leaf, extent); 418 type = btrfs_file_extent_type(leaf, extent); 419 if (type == BTRFS_FILE_EXTENT_REG || 420 type == BTRFS_FILE_EXTENT_PREALLOC) { 421 disko = btrfs_file_extent_disk_bytenr(leaf, extent); 422 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); 423 datao = btrfs_file_extent_offset(leaf, extent); 424 datal = btrfs_file_extent_num_bytes(leaf, extent); 425 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 426 /* Take upper bound, may be compressed */ 427 datal = btrfs_file_extent_ram_bytes(leaf, extent); 428 } 429 430 /* 431 * The first search might have left us at an extent item that 432 * ends before our target range's start, can happen if we have 433 * holes and NO_HOLES feature enabled. 434 * 435 * Subsequent searches may leave us on a file range we have 436 * processed before - this happens due to a race with ordered 437 * extent completion for a file range that is outside our source 438 * range, but that range was part of a file extent item that 439 * also covered a leading part of our source range. 440 */ 441 if (key.offset + datal <= prev_extent_end) { 442 path->slots[0]++; 443 goto process_slot; 444 } else if (key.offset >= off + len) { 445 break; 446 } 447 448 prev_extent_end = key.offset + datal; 449 size = btrfs_item_size(leaf, slot); 450 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), 451 size); 452 453 btrfs_release_path(path); 454 455 memcpy(&new_key, &key, sizeof(new_key)); 456 new_key.objectid = btrfs_ino(BTRFS_I(inode)); 457 if (off <= key.offset) 458 new_key.offset = key.offset + destoff - off; 459 else 460 new_key.offset = destoff; 461 462 /* 463 * Deal with a hole that doesn't have an extent item that 464 * represents it (NO_HOLES feature enabled). 465 * This hole is either in the middle of the cloning range or at 466 * the beginning (fully overlaps it or partially overlaps it). 467 */ 468 if (new_key.offset != last_dest_end) 469 drop_start = last_dest_end; 470 else 471 drop_start = new_key.offset; 472 473 if (type == BTRFS_FILE_EXTENT_REG || 474 type == BTRFS_FILE_EXTENT_PREALLOC) { 475 struct btrfs_replace_extent_info clone_info; 476 477 /* 478 * a | --- range to clone ---| b 479 * | ------------- extent ------------- | 480 */ 481 482 /* Subtract range b */ 483 if (key.offset + datal > off + len) 484 datal = off + len - key.offset; 485 486 /* Subtract range a */ 487 if (off > key.offset) { 488 datao += off - key.offset; 489 datal -= off - key.offset; 490 } 491 492 clone_info.disk_offset = disko; 493 clone_info.disk_len = diskl; 494 clone_info.data_offset = datao; 495 clone_info.data_len = datal; 496 clone_info.file_offset = new_key.offset; 497 clone_info.extent_buf = buf; 498 clone_info.is_new_extent = false; 499 clone_info.update_times = !no_time_update; 500 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, 501 drop_start, new_key.offset + datal - 1, 502 &clone_info, &trans); 503 if (ret) 504 goto out; 505 } else { 506 ASSERT(type == BTRFS_FILE_EXTENT_INLINE); 507 /* 508 * Inline extents always have to start at file offset 0 509 * and can never be bigger then the sector size. We can 510 * never clone only parts of an inline extent, since all 511 * reflink operations must start at a sector size aligned 512 * offset, and the length must be aligned too or end at 513 * the i_size (which implies the whole inlined data). 514 */ 515 ASSERT(key.offset == 0); 516 ASSERT(datal <= fs_info->sectorsize); 517 if (WARN_ON(type != BTRFS_FILE_EXTENT_INLINE) || 518 WARN_ON(key.offset != 0) || 519 WARN_ON(datal > fs_info->sectorsize)) { 520 ret = -EUCLEAN; 521 goto out; 522 } 523 524 ret = clone_copy_inline_extent(inode, path, &new_key, 525 drop_start, datal, size, 526 comp, buf, &trans); 527 if (ret) 528 goto out; 529 } 530 531 btrfs_release_path(path); 532 533 /* 534 * Whenever we share an extent we update the last_reflink_trans 535 * of each inode to the current transaction. This is needed to 536 * make sure fsync does not log multiple checksum items with 537 * overlapping ranges (because some extent items might refer 538 * only to sections of the original extent). For the destination 539 * inode we do this regardless of the generation of the extents 540 * or even if they are inline extents or explicit holes, to make 541 * sure a full fsync does not skip them. For the source inode, 542 * we only need to update last_reflink_trans in case it's a new 543 * extent that is not a hole or an inline extent, to deal with 544 * the checksums problem on fsync. 545 */ 546 if (extent_gen == trans->transid && disko > 0) 547 BTRFS_I(src)->last_reflink_trans = trans->transid; 548 549 BTRFS_I(inode)->last_reflink_trans = trans->transid; 550 551 last_dest_end = ALIGN(new_key.offset + datal, 552 fs_info->sectorsize); 553 ret = clone_finish_inode_update(trans, inode, last_dest_end, 554 destoff, olen, no_time_update); 555 if (ret) 556 goto out; 557 if (new_key.offset + datal >= destoff + len) 558 break; 559 560 btrfs_release_path(path); 561 key.offset = prev_extent_end; 562 563 if (fatal_signal_pending(current)) { 564 ret = -EINTR; 565 goto out; 566 } 567 568 cond_resched(); 569 } 570 ret = 0; 571 572 if (last_dest_end < destoff + len) { 573 /* 574 * We have an implicit hole that fully or partially overlaps our 575 * cloning range at its end. This means that we either have the 576 * NO_HOLES feature enabled or the implicit hole happened due to 577 * mixing buffered and direct IO writes against this file. 578 */ 579 btrfs_release_path(path); 580 581 /* 582 * When using NO_HOLES and we are cloning a range that covers 583 * only a hole (no extents) into a range beyond the current 584 * i_size, punching a hole in the target range will not create 585 * an extent map defining a hole, because the range starts at or 586 * beyond current i_size. If the file previously had an i_size 587 * greater than the new i_size set by this clone operation, we 588 * need to make sure the next fsync is a full fsync, so that it 589 * detects and logs a hole covering a range from the current 590 * i_size to the new i_size. If the clone range covers extents, 591 * besides a hole, then we know the full sync flag was already 592 * set by previous calls to btrfs_replace_file_extents() that 593 * replaced file extent items. 594 */ 595 if (last_dest_end >= i_size_read(inode)) 596 btrfs_set_inode_full_sync(BTRFS_I(inode)); 597 598 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, 599 last_dest_end, destoff + len - 1, NULL, &trans); 600 if (ret) 601 goto out; 602 603 ret = clone_finish_inode_update(trans, inode, destoff + len, 604 destoff, olen, no_time_update); 605 } 606 607 out: 608 btrfs_free_path(path); 609 kvfree(buf); 610 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags); 611 612 return ret; 613 } 614 615 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, 616 struct inode *inode2, u64 loff2, u64 len) 617 { 618 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1, NULL); 619 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1, NULL); 620 } 621 622 static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, 623 struct inode *inode2, u64 loff2, u64 len) 624 { 625 u64 range1_end = loff1 + len - 1; 626 u64 range2_end = loff2 + len - 1; 627 628 if (inode1 < inode2) { 629 swap(inode1, inode2); 630 swap(loff1, loff2); 631 swap(range1_end, range2_end); 632 } else if (inode1 == inode2 && loff2 < loff1) { 633 swap(loff1, loff2); 634 swap(range1_end, range2_end); 635 } 636 637 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, range1_end, NULL); 638 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, range2_end, NULL); 639 640 btrfs_assert_inode_range_clean(BTRFS_I(inode1), loff1, range1_end); 641 btrfs_assert_inode_range_clean(BTRFS_I(inode2), loff2, range2_end); 642 } 643 644 static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2) 645 { 646 if (inode1 < inode2) 647 swap(inode1, inode2); 648 down_write(&BTRFS_I(inode1)->i_mmap_lock); 649 down_write_nested(&BTRFS_I(inode2)->i_mmap_lock, SINGLE_DEPTH_NESTING); 650 } 651 652 static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2) 653 { 654 up_write(&BTRFS_I(inode1)->i_mmap_lock); 655 up_write(&BTRFS_I(inode2)->i_mmap_lock); 656 } 657 658 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, 659 struct inode *dst, u64 dst_loff) 660 { 661 struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info; 662 const u64 bs = fs_info->sb->s_blocksize; 663 int ret; 664 665 /* 666 * Lock destination range to serialize with concurrent readahead() and 667 * source range to serialize with relocation. 668 */ 669 btrfs_double_extent_lock(src, loff, dst, dst_loff, len); 670 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); 671 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); 672 673 btrfs_btree_balance_dirty(fs_info); 674 675 return ret; 676 } 677 678 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, 679 struct inode *dst, u64 dst_loff) 680 { 681 int ret = 0; 682 u64 i, tail_len, chunk_count; 683 struct btrfs_root *root_dst = BTRFS_I(dst)->root; 684 685 spin_lock(&root_dst->root_item_lock); 686 if (root_dst->send_in_progress) { 687 btrfs_warn_rl(root_dst->fs_info, 688 "cannot deduplicate to root %llu while send operations are using it (%d in progress)", 689 root_dst->root_key.objectid, 690 root_dst->send_in_progress); 691 spin_unlock(&root_dst->root_item_lock); 692 return -EAGAIN; 693 } 694 root_dst->dedupe_in_progress++; 695 spin_unlock(&root_dst->root_item_lock); 696 697 tail_len = olen % BTRFS_MAX_DEDUPE_LEN; 698 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); 699 700 for (i = 0; i < chunk_count; i++) { 701 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, 702 dst, dst_loff); 703 if (ret) 704 goto out; 705 706 loff += BTRFS_MAX_DEDUPE_LEN; 707 dst_loff += BTRFS_MAX_DEDUPE_LEN; 708 } 709 710 if (tail_len > 0) 711 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff); 712 out: 713 spin_lock(&root_dst->root_item_lock); 714 root_dst->dedupe_in_progress--; 715 spin_unlock(&root_dst->root_item_lock); 716 717 return ret; 718 } 719 720 static noinline int btrfs_clone_files(struct file *file, struct file *file_src, 721 u64 off, u64 olen, u64 destoff) 722 { 723 struct inode *inode = file_inode(file); 724 struct inode *src = file_inode(file_src); 725 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 726 int ret; 727 int wb_ret; 728 u64 len = olen; 729 u64 bs = fs_info->sb->s_blocksize; 730 731 /* 732 * VFS's generic_remap_file_range_prep() protects us from cloning the 733 * eof block into the middle of a file, which would result in corruption 734 * if the file size is not blocksize aligned. So we don't need to check 735 * for that case here. 736 */ 737 if (off + len == src->i_size) 738 len = ALIGN(src->i_size, bs) - off; 739 740 if (destoff > inode->i_size) { 741 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); 742 743 ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff); 744 if (ret) 745 return ret; 746 /* 747 * We may have truncated the last block if the inode's size is 748 * not sector size aligned, so we need to wait for writeback to 749 * complete before proceeding further, otherwise we can race 750 * with cloning and attempt to increment a reference to an 751 * extent that no longer exists (writeback completed right after 752 * we found the previous extent covering eof and before we 753 * attempted to increment its reference count). 754 */ 755 ret = btrfs_wait_ordered_range(inode, wb_start, 756 destoff - wb_start); 757 if (ret) 758 return ret; 759 } 760 761 /* 762 * Lock destination range to serialize with concurrent readahead() and 763 * source range to serialize with relocation. 764 */ 765 btrfs_double_extent_lock(src, off, inode, destoff, len); 766 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 767 btrfs_double_extent_unlock(src, off, inode, destoff, len); 768 769 /* 770 * We may have copied an inline extent into a page of the destination 771 * range, so wait for writeback to complete before truncating pages 772 * from the page cache. This is a rare case. 773 */ 774 wb_ret = btrfs_wait_ordered_range(inode, destoff, len); 775 ret = ret ? ret : wb_ret; 776 /* 777 * Truncate page cache pages so that future reads will see the cloned 778 * data immediately and not the previous data. 779 */ 780 truncate_inode_pages_range(&inode->i_data, 781 round_down(destoff, PAGE_SIZE), 782 round_up(destoff + len, PAGE_SIZE) - 1); 783 784 btrfs_btree_balance_dirty(fs_info); 785 786 return ret; 787 } 788 789 static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, 790 struct file *file_out, loff_t pos_out, 791 loff_t *len, unsigned int remap_flags) 792 { 793 struct inode *inode_in = file_inode(file_in); 794 struct inode *inode_out = file_inode(file_out); 795 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; 796 u64 wb_len; 797 int ret; 798 799 if (!(remap_flags & REMAP_FILE_DEDUP)) { 800 struct btrfs_root *root_out = BTRFS_I(inode_out)->root; 801 802 if (btrfs_root_readonly(root_out)) 803 return -EROFS; 804 805 ASSERT(inode_in->i_sb == inode_out->i_sb); 806 } 807 808 /* Don't make the dst file partly checksummed */ 809 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) != 810 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) { 811 return -EINVAL; 812 } 813 814 /* 815 * Now that the inodes are locked, we need to start writeback ourselves 816 * and can not rely on the writeback from the VFS's generic helper 817 * generic_remap_file_range_prep() because: 818 * 819 * 1) For compression we must call filemap_fdatawrite_range() range 820 * twice (btrfs_fdatawrite_range() does it for us), and the generic 821 * helper only calls it once; 822 * 823 * 2) filemap_fdatawrite_range(), called by the generic helper only 824 * waits for the writeback to complete, i.e. for IO to be done, and 825 * not for the ordered extents to complete. We need to wait for them 826 * to complete so that new file extent items are in the fs tree. 827 */ 828 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) 829 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); 830 else 831 wb_len = ALIGN(*len, bs); 832 833 /* 834 * Workaround to make sure NOCOW buffered write reach disk as NOCOW. 835 * 836 * Btrfs' back references do not have a block level granularity, they 837 * work at the whole extent level. 838 * NOCOW buffered write without data space reserved may not be able 839 * to fall back to CoW due to lack of data space, thus could cause 840 * data loss. 841 * 842 * Here we take a shortcut by flushing the whole inode, so that all 843 * nocow write should reach disk as nocow before we increase the 844 * reference of the extent. We could do better by only flushing NOCOW 845 * data, but that needs extra accounting. 846 * 847 * Also we don't need to check ASYNC_EXTENT, as async extent will be 848 * CoWed anyway, not affecting nocow part. 849 */ 850 ret = filemap_flush(inode_in->i_mapping); 851 if (ret < 0) 852 return ret; 853 854 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), 855 wb_len); 856 if (ret < 0) 857 return ret; 858 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), 859 wb_len); 860 if (ret < 0) 861 return ret; 862 863 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, 864 len, remap_flags); 865 } 866 867 static bool file_sync_write(const struct file *file) 868 { 869 if (file->f_flags & (__O_SYNC | O_DSYNC)) 870 return true; 871 if (IS_SYNC(file_inode(file))) 872 return true; 873 874 return false; 875 } 876 877 loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, 878 struct file *dst_file, loff_t destoff, loff_t len, 879 unsigned int remap_flags) 880 { 881 struct inode *src_inode = file_inode(src_file); 882 struct inode *dst_inode = file_inode(dst_file); 883 bool same_inode = dst_inode == src_inode; 884 int ret; 885 886 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) 887 return -EINVAL; 888 889 if (same_inode) { 890 btrfs_inode_lock(src_inode, BTRFS_ILOCK_MMAP); 891 } else { 892 lock_two_nondirectories(src_inode, dst_inode); 893 btrfs_double_mmap_lock(src_inode, dst_inode); 894 } 895 896 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, 897 &len, remap_flags); 898 if (ret < 0 || len == 0) 899 goto out_unlock; 900 901 if (remap_flags & REMAP_FILE_DEDUP) 902 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); 903 else 904 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); 905 906 out_unlock: 907 if (same_inode) { 908 btrfs_inode_unlock(src_inode, BTRFS_ILOCK_MMAP); 909 } else { 910 btrfs_double_mmap_unlock(src_inode, dst_inode); 911 unlock_two_nondirectories(src_inode, dst_inode); 912 } 913 914 /* 915 * If either the source or the destination file was opened with O_SYNC, 916 * O_DSYNC or has the S_SYNC attribute, fsync both the destination and 917 * source files/ranges, so that after a successful return (0) followed 918 * by a power failure results in the reflinked data to be readable from 919 * both files/ranges. 920 */ 921 if (ret == 0 && len > 0 && 922 (file_sync_write(src_file) || file_sync_write(dst_file))) { 923 ret = btrfs_sync_file(src_file, off, off + len - 1, 0); 924 if (ret == 0) 925 ret = btrfs_sync_file(dst_file, destoff, 926 destoff + len - 1, 0); 927 } 928 929 return ret < 0 ? ret : len; 930 } 931