1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2012 Alexander Block. All rights reserved. 4 */ 5 6 #include <linux/bsearch.h> 7 #include <linux/fs.h> 8 #include <linux/file.h> 9 #include <linux/sort.h> 10 #include <linux/mount.h> 11 #include <linux/xattr.h> 12 #include <linux/posix_acl_xattr.h> 13 #include <linux/radix-tree.h> 14 #include <linux/vmalloc.h> 15 #include <linux/string.h> 16 #include <linux/compat.h> 17 #include <linux/crc32c.h> 18 #include <linux/fsverity.h> 19 20 #include "send.h" 21 #include "ctree.h" 22 #include "backref.h" 23 #include "locking.h" 24 #include "disk-io.h" 25 #include "btrfs_inode.h" 26 #include "transaction.h" 27 #include "compression.h" 28 #include "xattr.h" 29 #include "print-tree.h" 30 #include "accessors.h" 31 #include "dir-item.h" 32 #include "file-item.h" 33 #include "ioctl.h" 34 #include "verity.h" 35 #include "lru_cache.h" 36 37 /* 38 * Maximum number of references an extent can have in order for us to attempt to 39 * issue clone operations instead of write operations. This currently exists to 40 * avoid hitting limitations of the backreference walking code (taking a lot of 41 * time and using too much memory for extents with large number of references). 42 */ 43 #define SEND_MAX_EXTENT_REFS 1024 44 45 /* 46 * A fs_path is a helper to dynamically build path names with unknown size. 47 * It reallocates the internal buffer on demand. 48 * It allows fast adding of path elements on the right side (normal path) and 49 * fast adding to the left side (reversed path). A reversed path can also be 50 * unreversed if needed. 51 */ 52 struct fs_path { 53 union { 54 struct { 55 char *start; 56 char *end; 57 58 char *buf; 59 unsigned short buf_len:15; 60 unsigned short reversed:1; 61 char inline_buf[]; 62 }; 63 /* 64 * Average path length does not exceed 200 bytes, we'll have 65 * better packing in the slab and higher chance to satisfy 66 * a allocation later during send. 67 */ 68 char pad[256]; 69 }; 70 }; 71 #define FS_PATH_INLINE_SIZE \ 72 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf)) 73 74 75 /* reused for each extent */ 76 struct clone_root { 77 struct btrfs_root *root; 78 u64 ino; 79 u64 offset; 80 u64 num_bytes; 81 bool found_ref; 82 }; 83 84 #define SEND_MAX_NAME_CACHE_SIZE 256 85 86 /* 87 * Limit the root_ids array of struct backref_cache_entry to 17 elements. 88 * This makes the size of a cache entry to be exactly 192 bytes on x86_64, which 89 * can be satisfied from the kmalloc-192 slab, without wasting any space. 90 * The most common case is to have a single root for cloning, which corresponds 91 * to the send root. Having the user specify more than 16 clone roots is not 92 * common, and in such rare cases we simply don't use caching if the number of 93 * cloning roots that lead down to a leaf is more than 17. 94 */ 95 #define SEND_MAX_BACKREF_CACHE_ROOTS 17 96 97 /* 98 * Max number of entries in the cache. 99 * With SEND_MAX_BACKREF_CACHE_ROOTS as 17, the size in bytes, excluding 100 * maple tree's internal nodes, is 24K. 101 */ 102 #define SEND_MAX_BACKREF_CACHE_SIZE 128 103 104 /* 105 * A backref cache entry maps a leaf to a list of IDs of roots from which the 106 * leaf is accessible and we can use for clone operations. 107 * With SEND_MAX_BACKREF_CACHE_ROOTS as 12, each cache entry is 128 bytes (on 108 * x86_64). 109 */ 110 struct backref_cache_entry { 111 struct btrfs_lru_cache_entry entry; 112 u64 root_ids[SEND_MAX_BACKREF_CACHE_ROOTS]; 113 /* Number of valid elements in the root_ids array. */ 114 int num_roots; 115 }; 116 117 /* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */ 118 static_assert(offsetof(struct backref_cache_entry, entry) == 0); 119 120 /* 121 * Max number of entries in the cache that stores directories that were already 122 * created. The cache uses raw struct btrfs_lru_cache_entry entries, so it uses 123 * at most 4096 bytes - sizeof(struct btrfs_lru_cache_entry) is 48 bytes, but 124 * the kmalloc-64 slab is used, so we get 4096 bytes (64 bytes * 64). 125 */ 126 #define SEND_MAX_DIR_CREATED_CACHE_SIZE 64 127 128 /* 129 * Max number of entries in the cache that stores directories that were already 130 * created. The cache uses raw struct btrfs_lru_cache_entry entries, so it uses 131 * at most 4096 bytes - sizeof(struct btrfs_lru_cache_entry) is 48 bytes, but 132 * the kmalloc-64 slab is used, so we get 4096 bytes (64 bytes * 64). 133 */ 134 #define SEND_MAX_DIR_UTIMES_CACHE_SIZE 64 135 136 struct send_ctx { 137 struct file *send_filp; 138 loff_t send_off; 139 char *send_buf; 140 u32 send_size; 141 u32 send_max_size; 142 /* 143 * Whether BTRFS_SEND_A_DATA attribute was already added to current 144 * command (since protocol v2, data must be the last attribute). 145 */ 146 bool put_data; 147 struct page **send_buf_pages; 148 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */ 149 /* Protocol version compatibility requested */ 150 u32 proto; 151 152 struct btrfs_root *send_root; 153 struct btrfs_root *parent_root; 154 struct clone_root *clone_roots; 155 int clone_roots_cnt; 156 157 /* current state of the compare_tree call */ 158 struct btrfs_path *left_path; 159 struct btrfs_path *right_path; 160 struct btrfs_key *cmp_key; 161 162 /* 163 * Keep track of the generation of the last transaction that was used 164 * for relocating a block group. This is periodically checked in order 165 * to detect if a relocation happened since the last check, so that we 166 * don't operate on stale extent buffers for nodes (level >= 1) or on 167 * stale disk_bytenr values of file extent items. 168 */ 169 u64 last_reloc_trans; 170 171 /* 172 * infos of the currently processed inode. In case of deleted inodes, 173 * these are the values from the deleted inode. 174 */ 175 u64 cur_ino; 176 u64 cur_inode_gen; 177 u64 cur_inode_size; 178 u64 cur_inode_mode; 179 u64 cur_inode_rdev; 180 u64 cur_inode_last_extent; 181 u64 cur_inode_next_write_offset; 182 bool cur_inode_new; 183 bool cur_inode_new_gen; 184 bool cur_inode_deleted; 185 bool ignore_cur_inode; 186 bool cur_inode_needs_verity; 187 void *verity_descriptor; 188 189 u64 send_progress; 190 191 struct list_head new_refs; 192 struct list_head deleted_refs; 193 194 struct btrfs_lru_cache name_cache; 195 196 /* 197 * The inode we are currently processing. It's not NULL only when we 198 * need to issue write commands for data extents from this inode. 199 */ 200 struct inode *cur_inode; 201 struct file_ra_state ra; 202 u64 page_cache_clear_start; 203 bool clean_page_cache; 204 205 /* 206 * We process inodes by their increasing order, so if before an 207 * incremental send we reverse the parent/child relationship of 208 * directories such that a directory with a lower inode number was 209 * the parent of a directory with a higher inode number, and the one 210 * becoming the new parent got renamed too, we can't rename/move the 211 * directory with lower inode number when we finish processing it - we 212 * must process the directory with higher inode number first, then 213 * rename/move it and then rename/move the directory with lower inode 214 * number. Example follows. 215 * 216 * Tree state when the first send was performed: 217 * 218 * . 219 * |-- a (ino 257) 220 * |-- b (ino 258) 221 * | 222 * | 223 * |-- c (ino 259) 224 * | |-- d (ino 260) 225 * | 226 * |-- c2 (ino 261) 227 * 228 * Tree state when the second (incremental) send is performed: 229 * 230 * . 231 * |-- a (ino 257) 232 * |-- b (ino 258) 233 * |-- c2 (ino 261) 234 * |-- d2 (ino 260) 235 * |-- cc (ino 259) 236 * 237 * The sequence of steps that lead to the second state was: 238 * 239 * mv /a/b/c/d /a/b/c2/d2 240 * mv /a/b/c /a/b/c2/d2/cc 241 * 242 * "c" has lower inode number, but we can't move it (2nd mv operation) 243 * before we move "d", which has higher inode number. 244 * 245 * So we just memorize which move/rename operations must be performed 246 * later when their respective parent is processed and moved/renamed. 247 */ 248 249 /* Indexed by parent directory inode number. */ 250 struct rb_root pending_dir_moves; 251 252 /* 253 * Reverse index, indexed by the inode number of a directory that 254 * is waiting for the move/rename of its immediate parent before its 255 * own move/rename can be performed. 256 */ 257 struct rb_root waiting_dir_moves; 258 259 /* 260 * A directory that is going to be rm'ed might have a child directory 261 * which is in the pending directory moves index above. In this case, 262 * the directory can only be removed after the move/rename of its child 263 * is performed. Example: 264 * 265 * Parent snapshot: 266 * 267 * . (ino 256) 268 * |-- a/ (ino 257) 269 * |-- b/ (ino 258) 270 * |-- c/ (ino 259) 271 * | |-- x/ (ino 260) 272 * | 273 * |-- y/ (ino 261) 274 * 275 * Send snapshot: 276 * 277 * . (ino 256) 278 * |-- a/ (ino 257) 279 * |-- b/ (ino 258) 280 * |-- YY/ (ino 261) 281 * |-- x/ (ino 260) 282 * 283 * Sequence of steps that lead to the send snapshot: 284 * rm -f /a/b/c/foo.txt 285 * mv /a/b/y /a/b/YY 286 * mv /a/b/c/x /a/b/YY 287 * rmdir /a/b/c 288 * 289 * When the child is processed, its move/rename is delayed until its 290 * parent is processed (as explained above), but all other operations 291 * like update utimes, chown, chgrp, etc, are performed and the paths 292 * that it uses for those operations must use the orphanized name of 293 * its parent (the directory we're going to rm later), so we need to 294 * memorize that name. 295 * 296 * Indexed by the inode number of the directory to be deleted. 297 */ 298 struct rb_root orphan_dirs; 299 300 struct rb_root rbtree_new_refs; 301 struct rb_root rbtree_deleted_refs; 302 303 struct btrfs_lru_cache backref_cache; 304 u64 backref_cache_last_reloc_trans; 305 306 struct btrfs_lru_cache dir_created_cache; 307 struct btrfs_lru_cache dir_utimes_cache; 308 }; 309 310 struct pending_dir_move { 311 struct rb_node node; 312 struct list_head list; 313 u64 parent_ino; 314 u64 ino; 315 u64 gen; 316 struct list_head update_refs; 317 }; 318 319 struct waiting_dir_move { 320 struct rb_node node; 321 u64 ino; 322 /* 323 * There might be some directory that could not be removed because it 324 * was waiting for this directory inode to be moved first. Therefore 325 * after this directory is moved, we can try to rmdir the ino rmdir_ino. 326 */ 327 u64 rmdir_ino; 328 u64 rmdir_gen; 329 bool orphanized; 330 }; 331 332 struct orphan_dir_info { 333 struct rb_node node; 334 u64 ino; 335 u64 gen; 336 u64 last_dir_index_offset; 337 u64 dir_high_seq_ino; 338 }; 339 340 struct name_cache_entry { 341 /* 342 * The key in the entry is an inode number, and the generation matches 343 * the inode's generation. 344 */ 345 struct btrfs_lru_cache_entry entry; 346 u64 parent_ino; 347 u64 parent_gen; 348 int ret; 349 int need_later_update; 350 int name_len; 351 char name[]; 352 }; 353 354 /* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */ 355 static_assert(offsetof(struct name_cache_entry, entry) == 0); 356 357 #define ADVANCE 1 358 #define ADVANCE_ONLY_NEXT -1 359 360 enum btrfs_compare_tree_result { 361 BTRFS_COMPARE_TREE_NEW, 362 BTRFS_COMPARE_TREE_DELETED, 363 BTRFS_COMPARE_TREE_CHANGED, 364 BTRFS_COMPARE_TREE_SAME, 365 }; 366 367 __cold 368 static void inconsistent_snapshot_error(struct send_ctx *sctx, 369 enum btrfs_compare_tree_result result, 370 const char *what) 371 { 372 const char *result_string; 373 374 switch (result) { 375 case BTRFS_COMPARE_TREE_NEW: 376 result_string = "new"; 377 break; 378 case BTRFS_COMPARE_TREE_DELETED: 379 result_string = "deleted"; 380 break; 381 case BTRFS_COMPARE_TREE_CHANGED: 382 result_string = "updated"; 383 break; 384 case BTRFS_COMPARE_TREE_SAME: 385 ASSERT(0); 386 result_string = "unchanged"; 387 break; 388 default: 389 ASSERT(0); 390 result_string = "unexpected"; 391 } 392 393 btrfs_err(sctx->send_root->fs_info, 394 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu", 395 result_string, what, sctx->cmp_key->objectid, 396 sctx->send_root->root_key.objectid, 397 (sctx->parent_root ? 398 sctx->parent_root->root_key.objectid : 0)); 399 } 400 401 __maybe_unused 402 static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd) 403 { 404 switch (sctx->proto) { 405 case 1: return cmd <= BTRFS_SEND_C_MAX_V1; 406 case 2: return cmd <= BTRFS_SEND_C_MAX_V2; 407 case 3: return cmd <= BTRFS_SEND_C_MAX_V3; 408 default: return false; 409 } 410 } 411 412 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); 413 414 static struct waiting_dir_move * 415 get_waiting_dir_move(struct send_ctx *sctx, u64 ino); 416 417 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen); 418 419 static int need_send_hole(struct send_ctx *sctx) 420 { 421 return (sctx->parent_root && !sctx->cur_inode_new && 422 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted && 423 S_ISREG(sctx->cur_inode_mode)); 424 } 425 426 static void fs_path_reset(struct fs_path *p) 427 { 428 if (p->reversed) { 429 p->start = p->buf + p->buf_len - 1; 430 p->end = p->start; 431 *p->start = 0; 432 } else { 433 p->start = p->buf; 434 p->end = p->start; 435 *p->start = 0; 436 } 437 } 438 439 static struct fs_path *fs_path_alloc(void) 440 { 441 struct fs_path *p; 442 443 p = kmalloc(sizeof(*p), GFP_KERNEL); 444 if (!p) 445 return NULL; 446 p->reversed = 0; 447 p->buf = p->inline_buf; 448 p->buf_len = FS_PATH_INLINE_SIZE; 449 fs_path_reset(p); 450 return p; 451 } 452 453 static struct fs_path *fs_path_alloc_reversed(void) 454 { 455 struct fs_path *p; 456 457 p = fs_path_alloc(); 458 if (!p) 459 return NULL; 460 p->reversed = 1; 461 fs_path_reset(p); 462 return p; 463 } 464 465 static void fs_path_free(struct fs_path *p) 466 { 467 if (!p) 468 return; 469 if (p->buf != p->inline_buf) 470 kfree(p->buf); 471 kfree(p); 472 } 473 474 static int fs_path_len(struct fs_path *p) 475 { 476 return p->end - p->start; 477 } 478 479 static int fs_path_ensure_buf(struct fs_path *p, int len) 480 { 481 char *tmp_buf; 482 int path_len; 483 int old_buf_len; 484 485 len++; 486 487 if (p->buf_len >= len) 488 return 0; 489 490 if (len > PATH_MAX) { 491 WARN_ON(1); 492 return -ENOMEM; 493 } 494 495 path_len = p->end - p->start; 496 old_buf_len = p->buf_len; 497 498 /* 499 * Allocate to the next largest kmalloc bucket size, to let 500 * the fast path happen most of the time. 501 */ 502 len = kmalloc_size_roundup(len); 503 /* 504 * First time the inline_buf does not suffice 505 */ 506 if (p->buf == p->inline_buf) { 507 tmp_buf = kmalloc(len, GFP_KERNEL); 508 if (tmp_buf) 509 memcpy(tmp_buf, p->buf, old_buf_len); 510 } else { 511 tmp_buf = krealloc(p->buf, len, GFP_KERNEL); 512 } 513 if (!tmp_buf) 514 return -ENOMEM; 515 p->buf = tmp_buf; 516 p->buf_len = len; 517 518 if (p->reversed) { 519 tmp_buf = p->buf + old_buf_len - path_len - 1; 520 p->end = p->buf + p->buf_len - 1; 521 p->start = p->end - path_len; 522 memmove(p->start, tmp_buf, path_len + 1); 523 } else { 524 p->start = p->buf; 525 p->end = p->start + path_len; 526 } 527 return 0; 528 } 529 530 static int fs_path_prepare_for_add(struct fs_path *p, int name_len, 531 char **prepared) 532 { 533 int ret; 534 int new_len; 535 536 new_len = p->end - p->start + name_len; 537 if (p->start != p->end) 538 new_len++; 539 ret = fs_path_ensure_buf(p, new_len); 540 if (ret < 0) 541 goto out; 542 543 if (p->reversed) { 544 if (p->start != p->end) 545 *--p->start = '/'; 546 p->start -= name_len; 547 *prepared = p->start; 548 } else { 549 if (p->start != p->end) 550 *p->end++ = '/'; 551 *prepared = p->end; 552 p->end += name_len; 553 *p->end = 0; 554 } 555 556 out: 557 return ret; 558 } 559 560 static int fs_path_add(struct fs_path *p, const char *name, int name_len) 561 { 562 int ret; 563 char *prepared; 564 565 ret = fs_path_prepare_for_add(p, name_len, &prepared); 566 if (ret < 0) 567 goto out; 568 memcpy(prepared, name, name_len); 569 570 out: 571 return ret; 572 } 573 574 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) 575 { 576 int ret; 577 char *prepared; 578 579 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared); 580 if (ret < 0) 581 goto out; 582 memcpy(prepared, p2->start, p2->end - p2->start); 583 584 out: 585 return ret; 586 } 587 588 static int fs_path_add_from_extent_buffer(struct fs_path *p, 589 struct extent_buffer *eb, 590 unsigned long off, int len) 591 { 592 int ret; 593 char *prepared; 594 595 ret = fs_path_prepare_for_add(p, len, &prepared); 596 if (ret < 0) 597 goto out; 598 599 read_extent_buffer(eb, prepared, off, len); 600 601 out: 602 return ret; 603 } 604 605 static int fs_path_copy(struct fs_path *p, struct fs_path *from) 606 { 607 p->reversed = from->reversed; 608 fs_path_reset(p); 609 610 return fs_path_add_path(p, from); 611 } 612 613 static void fs_path_unreverse(struct fs_path *p) 614 { 615 char *tmp; 616 int len; 617 618 if (!p->reversed) 619 return; 620 621 tmp = p->start; 622 len = p->end - p->start; 623 p->start = p->buf; 624 p->end = p->start + len; 625 memmove(p->start, tmp, len + 1); 626 p->reversed = 0; 627 } 628 629 static struct btrfs_path *alloc_path_for_send(void) 630 { 631 struct btrfs_path *path; 632 633 path = btrfs_alloc_path(); 634 if (!path) 635 return NULL; 636 path->search_commit_root = 1; 637 path->skip_locking = 1; 638 path->need_commit_sem = 1; 639 return path; 640 } 641 642 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) 643 { 644 int ret; 645 u32 pos = 0; 646 647 while (pos < len) { 648 ret = kernel_write(filp, buf + pos, len - pos, off); 649 if (ret < 0) 650 return ret; 651 if (ret == 0) 652 return -EIO; 653 pos += ret; 654 } 655 656 return 0; 657 } 658 659 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) 660 { 661 struct btrfs_tlv_header *hdr; 662 int total_len = sizeof(*hdr) + len; 663 int left = sctx->send_max_size - sctx->send_size; 664 665 if (WARN_ON_ONCE(sctx->put_data)) 666 return -EINVAL; 667 668 if (unlikely(left < total_len)) 669 return -EOVERFLOW; 670 671 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); 672 put_unaligned_le16(attr, &hdr->tlv_type); 673 put_unaligned_le16(len, &hdr->tlv_len); 674 memcpy(hdr + 1, data, len); 675 sctx->send_size += total_len; 676 677 return 0; 678 } 679 680 #define TLV_PUT_DEFINE_INT(bits) \ 681 static int tlv_put_u##bits(struct send_ctx *sctx, \ 682 u##bits attr, u##bits value) \ 683 { \ 684 __le##bits __tmp = cpu_to_le##bits(value); \ 685 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \ 686 } 687 688 TLV_PUT_DEFINE_INT(8) 689 TLV_PUT_DEFINE_INT(32) 690 TLV_PUT_DEFINE_INT(64) 691 692 static int tlv_put_string(struct send_ctx *sctx, u16 attr, 693 const char *str, int len) 694 { 695 if (len == -1) 696 len = strlen(str); 697 return tlv_put(sctx, attr, str, len); 698 } 699 700 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, 701 const u8 *uuid) 702 { 703 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); 704 } 705 706 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, 707 struct extent_buffer *eb, 708 struct btrfs_timespec *ts) 709 { 710 struct btrfs_timespec bts; 711 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); 712 return tlv_put(sctx, attr, &bts, sizeof(bts)); 713 } 714 715 716 #define TLV_PUT(sctx, attrtype, data, attrlen) \ 717 do { \ 718 ret = tlv_put(sctx, attrtype, data, attrlen); \ 719 if (ret < 0) \ 720 goto tlv_put_failure; \ 721 } while (0) 722 723 #define TLV_PUT_INT(sctx, attrtype, bits, value) \ 724 do { \ 725 ret = tlv_put_u##bits(sctx, attrtype, value); \ 726 if (ret < 0) \ 727 goto tlv_put_failure; \ 728 } while (0) 729 730 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data) 731 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data) 732 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data) 733 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data) 734 #define TLV_PUT_STRING(sctx, attrtype, str, len) \ 735 do { \ 736 ret = tlv_put_string(sctx, attrtype, str, len); \ 737 if (ret < 0) \ 738 goto tlv_put_failure; \ 739 } while (0) 740 #define TLV_PUT_PATH(sctx, attrtype, p) \ 741 do { \ 742 ret = tlv_put_string(sctx, attrtype, p->start, \ 743 p->end - p->start); \ 744 if (ret < 0) \ 745 goto tlv_put_failure; \ 746 } while(0) 747 #define TLV_PUT_UUID(sctx, attrtype, uuid) \ 748 do { \ 749 ret = tlv_put_uuid(sctx, attrtype, uuid); \ 750 if (ret < 0) \ 751 goto tlv_put_failure; \ 752 } while (0) 753 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ 754 do { \ 755 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ 756 if (ret < 0) \ 757 goto tlv_put_failure; \ 758 } while (0) 759 760 static int send_header(struct send_ctx *sctx) 761 { 762 struct btrfs_stream_header hdr; 763 764 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); 765 hdr.version = cpu_to_le32(sctx->proto); 766 return write_buf(sctx->send_filp, &hdr, sizeof(hdr), 767 &sctx->send_off); 768 } 769 770 /* 771 * For each command/item we want to send to userspace, we call this function. 772 */ 773 static int begin_cmd(struct send_ctx *sctx, int cmd) 774 { 775 struct btrfs_cmd_header *hdr; 776 777 if (WARN_ON(!sctx->send_buf)) 778 return -EINVAL; 779 780 BUG_ON(sctx->send_size); 781 782 sctx->send_size += sizeof(*hdr); 783 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 784 put_unaligned_le16(cmd, &hdr->cmd); 785 786 return 0; 787 } 788 789 static int send_cmd(struct send_ctx *sctx) 790 { 791 int ret; 792 struct btrfs_cmd_header *hdr; 793 u32 crc; 794 795 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 796 put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len); 797 put_unaligned_le32(0, &hdr->crc); 798 799 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); 800 put_unaligned_le32(crc, &hdr->crc); 801 802 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, 803 &sctx->send_off); 804 805 sctx->send_size = 0; 806 sctx->put_data = false; 807 808 return ret; 809 } 810 811 /* 812 * Sends a move instruction to user space 813 */ 814 static int send_rename(struct send_ctx *sctx, 815 struct fs_path *from, struct fs_path *to) 816 { 817 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 818 int ret; 819 820 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start); 821 822 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); 823 if (ret < 0) 824 goto out; 825 826 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); 827 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); 828 829 ret = send_cmd(sctx); 830 831 tlv_put_failure: 832 out: 833 return ret; 834 } 835 836 /* 837 * Sends a link instruction to user space 838 */ 839 static int send_link(struct send_ctx *sctx, 840 struct fs_path *path, struct fs_path *lnk) 841 { 842 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 843 int ret; 844 845 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start); 846 847 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); 848 if (ret < 0) 849 goto out; 850 851 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 852 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); 853 854 ret = send_cmd(sctx); 855 856 tlv_put_failure: 857 out: 858 return ret; 859 } 860 861 /* 862 * Sends an unlink instruction to user space 863 */ 864 static int send_unlink(struct send_ctx *sctx, struct fs_path *path) 865 { 866 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 867 int ret; 868 869 btrfs_debug(fs_info, "send_unlink %s", path->start); 870 871 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); 872 if (ret < 0) 873 goto out; 874 875 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 876 877 ret = send_cmd(sctx); 878 879 tlv_put_failure: 880 out: 881 return ret; 882 } 883 884 /* 885 * Sends a rmdir instruction to user space 886 */ 887 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) 888 { 889 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 890 int ret; 891 892 btrfs_debug(fs_info, "send_rmdir %s", path->start); 893 894 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); 895 if (ret < 0) 896 goto out; 897 898 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 899 900 ret = send_cmd(sctx); 901 902 tlv_put_failure: 903 out: 904 return ret; 905 } 906 907 struct btrfs_inode_info { 908 u64 size; 909 u64 gen; 910 u64 mode; 911 u64 uid; 912 u64 gid; 913 u64 rdev; 914 u64 fileattr; 915 u64 nlink; 916 }; 917 918 /* 919 * Helper function to retrieve some fields from an inode item. 920 */ 921 static int get_inode_info(struct btrfs_root *root, u64 ino, 922 struct btrfs_inode_info *info) 923 { 924 int ret; 925 struct btrfs_path *path; 926 struct btrfs_inode_item *ii; 927 struct btrfs_key key; 928 929 path = alloc_path_for_send(); 930 if (!path) 931 return -ENOMEM; 932 933 key.objectid = ino; 934 key.type = BTRFS_INODE_ITEM_KEY; 935 key.offset = 0; 936 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 937 if (ret) { 938 if (ret > 0) 939 ret = -ENOENT; 940 goto out; 941 } 942 943 if (!info) 944 goto out; 945 946 ii = btrfs_item_ptr(path->nodes[0], path->slots[0], 947 struct btrfs_inode_item); 948 info->size = btrfs_inode_size(path->nodes[0], ii); 949 info->gen = btrfs_inode_generation(path->nodes[0], ii); 950 info->mode = btrfs_inode_mode(path->nodes[0], ii); 951 info->uid = btrfs_inode_uid(path->nodes[0], ii); 952 info->gid = btrfs_inode_gid(path->nodes[0], ii); 953 info->rdev = btrfs_inode_rdev(path->nodes[0], ii); 954 info->nlink = btrfs_inode_nlink(path->nodes[0], ii); 955 /* 956 * Transfer the unchanged u64 value of btrfs_inode_item::flags, that's 957 * otherwise logically split to 32/32 parts. 958 */ 959 info->fileattr = btrfs_inode_flags(path->nodes[0], ii); 960 961 out: 962 btrfs_free_path(path); 963 return ret; 964 } 965 966 static int get_inode_gen(struct btrfs_root *root, u64 ino, u64 *gen) 967 { 968 int ret; 969 struct btrfs_inode_info info = { 0 }; 970 971 ASSERT(gen); 972 973 ret = get_inode_info(root, ino, &info); 974 *gen = info.gen; 975 return ret; 976 } 977 978 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index, 979 struct fs_path *p, 980 void *ctx); 981 982 /* 983 * Helper function to iterate the entries in ONE btrfs_inode_ref or 984 * btrfs_inode_extref. 985 * The iterate callback may return a non zero value to stop iteration. This can 986 * be a negative value for error codes or 1 to simply stop it. 987 * 988 * path must point to the INODE_REF or INODE_EXTREF when called. 989 */ 990 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path, 991 struct btrfs_key *found_key, int resolve, 992 iterate_inode_ref_t iterate, void *ctx) 993 { 994 struct extent_buffer *eb = path->nodes[0]; 995 struct btrfs_inode_ref *iref; 996 struct btrfs_inode_extref *extref; 997 struct btrfs_path *tmp_path; 998 struct fs_path *p; 999 u32 cur = 0; 1000 u32 total; 1001 int slot = path->slots[0]; 1002 u32 name_len; 1003 char *start; 1004 int ret = 0; 1005 int num = 0; 1006 int index; 1007 u64 dir; 1008 unsigned long name_off; 1009 unsigned long elem_size; 1010 unsigned long ptr; 1011 1012 p = fs_path_alloc_reversed(); 1013 if (!p) 1014 return -ENOMEM; 1015 1016 tmp_path = alloc_path_for_send(); 1017 if (!tmp_path) { 1018 fs_path_free(p); 1019 return -ENOMEM; 1020 } 1021 1022 1023 if (found_key->type == BTRFS_INODE_REF_KEY) { 1024 ptr = (unsigned long)btrfs_item_ptr(eb, slot, 1025 struct btrfs_inode_ref); 1026 total = btrfs_item_size(eb, slot); 1027 elem_size = sizeof(*iref); 1028 } else { 1029 ptr = btrfs_item_ptr_offset(eb, slot); 1030 total = btrfs_item_size(eb, slot); 1031 elem_size = sizeof(*extref); 1032 } 1033 1034 while (cur < total) { 1035 fs_path_reset(p); 1036 1037 if (found_key->type == BTRFS_INODE_REF_KEY) { 1038 iref = (struct btrfs_inode_ref *)(ptr + cur); 1039 name_len = btrfs_inode_ref_name_len(eb, iref); 1040 name_off = (unsigned long)(iref + 1); 1041 index = btrfs_inode_ref_index(eb, iref); 1042 dir = found_key->offset; 1043 } else { 1044 extref = (struct btrfs_inode_extref *)(ptr + cur); 1045 name_len = btrfs_inode_extref_name_len(eb, extref); 1046 name_off = (unsigned long)&extref->name; 1047 index = btrfs_inode_extref_index(eb, extref); 1048 dir = btrfs_inode_extref_parent(eb, extref); 1049 } 1050 1051 if (resolve) { 1052 start = btrfs_ref_to_path(root, tmp_path, name_len, 1053 name_off, eb, dir, 1054 p->buf, p->buf_len); 1055 if (IS_ERR(start)) { 1056 ret = PTR_ERR(start); 1057 goto out; 1058 } 1059 if (start < p->buf) { 1060 /* overflow , try again with larger buffer */ 1061 ret = fs_path_ensure_buf(p, 1062 p->buf_len + p->buf - start); 1063 if (ret < 0) 1064 goto out; 1065 start = btrfs_ref_to_path(root, tmp_path, 1066 name_len, name_off, 1067 eb, dir, 1068 p->buf, p->buf_len); 1069 if (IS_ERR(start)) { 1070 ret = PTR_ERR(start); 1071 goto out; 1072 } 1073 BUG_ON(start < p->buf); 1074 } 1075 p->start = start; 1076 } else { 1077 ret = fs_path_add_from_extent_buffer(p, eb, name_off, 1078 name_len); 1079 if (ret < 0) 1080 goto out; 1081 } 1082 1083 cur += elem_size + name_len; 1084 ret = iterate(num, dir, index, p, ctx); 1085 if (ret) 1086 goto out; 1087 num++; 1088 } 1089 1090 out: 1091 btrfs_free_path(tmp_path); 1092 fs_path_free(p); 1093 return ret; 1094 } 1095 1096 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key, 1097 const char *name, int name_len, 1098 const char *data, int data_len, 1099 void *ctx); 1100 1101 /* 1102 * Helper function to iterate the entries in ONE btrfs_dir_item. 1103 * The iterate callback may return a non zero value to stop iteration. This can 1104 * be a negative value for error codes or 1 to simply stop it. 1105 * 1106 * path must point to the dir item when called. 1107 */ 1108 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, 1109 iterate_dir_item_t iterate, void *ctx) 1110 { 1111 int ret = 0; 1112 struct extent_buffer *eb; 1113 struct btrfs_dir_item *di; 1114 struct btrfs_key di_key; 1115 char *buf = NULL; 1116 int buf_len; 1117 u32 name_len; 1118 u32 data_len; 1119 u32 cur; 1120 u32 len; 1121 u32 total; 1122 int slot; 1123 int num; 1124 1125 /* 1126 * Start with a small buffer (1 page). If later we end up needing more 1127 * space, which can happen for xattrs on a fs with a leaf size greater 1128 * then the page size, attempt to increase the buffer. Typically xattr 1129 * values are small. 1130 */ 1131 buf_len = PATH_MAX; 1132 buf = kmalloc(buf_len, GFP_KERNEL); 1133 if (!buf) { 1134 ret = -ENOMEM; 1135 goto out; 1136 } 1137 1138 eb = path->nodes[0]; 1139 slot = path->slots[0]; 1140 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 1141 cur = 0; 1142 len = 0; 1143 total = btrfs_item_size(eb, slot); 1144 1145 num = 0; 1146 while (cur < total) { 1147 name_len = btrfs_dir_name_len(eb, di); 1148 data_len = btrfs_dir_data_len(eb, di); 1149 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 1150 1151 if (btrfs_dir_ftype(eb, di) == BTRFS_FT_XATTR) { 1152 if (name_len > XATTR_NAME_MAX) { 1153 ret = -ENAMETOOLONG; 1154 goto out; 1155 } 1156 if (name_len + data_len > 1157 BTRFS_MAX_XATTR_SIZE(root->fs_info)) { 1158 ret = -E2BIG; 1159 goto out; 1160 } 1161 } else { 1162 /* 1163 * Path too long 1164 */ 1165 if (name_len + data_len > PATH_MAX) { 1166 ret = -ENAMETOOLONG; 1167 goto out; 1168 } 1169 } 1170 1171 if (name_len + data_len > buf_len) { 1172 buf_len = name_len + data_len; 1173 if (is_vmalloc_addr(buf)) { 1174 vfree(buf); 1175 buf = NULL; 1176 } else { 1177 char *tmp = krealloc(buf, buf_len, 1178 GFP_KERNEL | __GFP_NOWARN); 1179 1180 if (!tmp) 1181 kfree(buf); 1182 buf = tmp; 1183 } 1184 if (!buf) { 1185 buf = kvmalloc(buf_len, GFP_KERNEL); 1186 if (!buf) { 1187 ret = -ENOMEM; 1188 goto out; 1189 } 1190 } 1191 } 1192 1193 read_extent_buffer(eb, buf, (unsigned long)(di + 1), 1194 name_len + data_len); 1195 1196 len = sizeof(*di) + name_len + data_len; 1197 di = (struct btrfs_dir_item *)((char *)di + len); 1198 cur += len; 1199 1200 ret = iterate(num, &di_key, buf, name_len, buf + name_len, 1201 data_len, ctx); 1202 if (ret < 0) 1203 goto out; 1204 if (ret) { 1205 ret = 0; 1206 goto out; 1207 } 1208 1209 num++; 1210 } 1211 1212 out: 1213 kvfree(buf); 1214 return ret; 1215 } 1216 1217 static int __copy_first_ref(int num, u64 dir, int index, 1218 struct fs_path *p, void *ctx) 1219 { 1220 int ret; 1221 struct fs_path *pt = ctx; 1222 1223 ret = fs_path_copy(pt, p); 1224 if (ret < 0) 1225 return ret; 1226 1227 /* we want the first only */ 1228 return 1; 1229 } 1230 1231 /* 1232 * Retrieve the first path of an inode. If an inode has more then one 1233 * ref/hardlink, this is ignored. 1234 */ 1235 static int get_inode_path(struct btrfs_root *root, 1236 u64 ino, struct fs_path *path) 1237 { 1238 int ret; 1239 struct btrfs_key key, found_key; 1240 struct btrfs_path *p; 1241 1242 p = alloc_path_for_send(); 1243 if (!p) 1244 return -ENOMEM; 1245 1246 fs_path_reset(path); 1247 1248 key.objectid = ino; 1249 key.type = BTRFS_INODE_REF_KEY; 1250 key.offset = 0; 1251 1252 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0); 1253 if (ret < 0) 1254 goto out; 1255 if (ret) { 1256 ret = 1; 1257 goto out; 1258 } 1259 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); 1260 if (found_key.objectid != ino || 1261 (found_key.type != BTRFS_INODE_REF_KEY && 1262 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1263 ret = -ENOENT; 1264 goto out; 1265 } 1266 1267 ret = iterate_inode_ref(root, p, &found_key, 1, 1268 __copy_first_ref, path); 1269 if (ret < 0) 1270 goto out; 1271 ret = 0; 1272 1273 out: 1274 btrfs_free_path(p); 1275 return ret; 1276 } 1277 1278 struct backref_ctx { 1279 struct send_ctx *sctx; 1280 1281 /* number of total found references */ 1282 u64 found; 1283 1284 /* 1285 * used for clones found in send_root. clones found behind cur_objectid 1286 * and cur_offset are not considered as allowed clones. 1287 */ 1288 u64 cur_objectid; 1289 u64 cur_offset; 1290 1291 /* may be truncated in case it's the last extent in a file */ 1292 u64 extent_len; 1293 1294 /* The bytenr the file extent item we are processing refers to. */ 1295 u64 bytenr; 1296 /* The owner (root id) of the data backref for the current extent. */ 1297 u64 backref_owner; 1298 /* The offset of the data backref for the current extent. */ 1299 u64 backref_offset; 1300 }; 1301 1302 static int __clone_root_cmp_bsearch(const void *key, const void *elt) 1303 { 1304 u64 root = (u64)(uintptr_t)key; 1305 const struct clone_root *cr = elt; 1306 1307 if (root < cr->root->root_key.objectid) 1308 return -1; 1309 if (root > cr->root->root_key.objectid) 1310 return 1; 1311 return 0; 1312 } 1313 1314 static int __clone_root_cmp_sort(const void *e1, const void *e2) 1315 { 1316 const struct clone_root *cr1 = e1; 1317 const struct clone_root *cr2 = e2; 1318 1319 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid) 1320 return -1; 1321 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid) 1322 return 1; 1323 return 0; 1324 } 1325 1326 /* 1327 * Called for every backref that is found for the current extent. 1328 * Results are collected in sctx->clone_roots->ino/offset. 1329 */ 1330 static int iterate_backrefs(u64 ino, u64 offset, u64 num_bytes, u64 root_id, 1331 void *ctx_) 1332 { 1333 struct backref_ctx *bctx = ctx_; 1334 struct clone_root *clone_root; 1335 1336 /* First check if the root is in the list of accepted clone sources */ 1337 clone_root = bsearch((void *)(uintptr_t)root_id, bctx->sctx->clone_roots, 1338 bctx->sctx->clone_roots_cnt, 1339 sizeof(struct clone_root), 1340 __clone_root_cmp_bsearch); 1341 if (!clone_root) 1342 return 0; 1343 1344 /* This is our own reference, bail out as we can't clone from it. */ 1345 if (clone_root->root == bctx->sctx->send_root && 1346 ino == bctx->cur_objectid && 1347 offset == bctx->cur_offset) 1348 return 0; 1349 1350 /* 1351 * Make sure we don't consider clones from send_root that are 1352 * behind the current inode/offset. 1353 */ 1354 if (clone_root->root == bctx->sctx->send_root) { 1355 /* 1356 * If the source inode was not yet processed we can't issue a 1357 * clone operation, as the source extent does not exist yet at 1358 * the destination of the stream. 1359 */ 1360 if (ino > bctx->cur_objectid) 1361 return 0; 1362 /* 1363 * We clone from the inode currently being sent as long as the 1364 * source extent is already processed, otherwise we could try 1365 * to clone from an extent that does not exist yet at the 1366 * destination of the stream. 1367 */ 1368 if (ino == bctx->cur_objectid && 1369 offset + bctx->extent_len > 1370 bctx->sctx->cur_inode_next_write_offset) 1371 return 0; 1372 } 1373 1374 bctx->found++; 1375 clone_root->found_ref = true; 1376 1377 /* 1378 * If the given backref refers to a file extent item with a larger 1379 * number of bytes than what we found before, use the new one so that 1380 * we clone more optimally and end up doing less writes and getting 1381 * less exclusive, non-shared extents at the destination. 1382 */ 1383 if (num_bytes > clone_root->num_bytes) { 1384 clone_root->ino = ino; 1385 clone_root->offset = offset; 1386 clone_root->num_bytes = num_bytes; 1387 1388 /* 1389 * Found a perfect candidate, so there's no need to continue 1390 * backref walking. 1391 */ 1392 if (num_bytes >= bctx->extent_len) 1393 return BTRFS_ITERATE_EXTENT_INODES_STOP; 1394 } 1395 1396 return 0; 1397 } 1398 1399 static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx, 1400 const u64 **root_ids_ret, int *root_count_ret) 1401 { 1402 struct backref_ctx *bctx = ctx; 1403 struct send_ctx *sctx = bctx->sctx; 1404 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 1405 const u64 key = leaf_bytenr >> fs_info->sectorsize_bits; 1406 struct btrfs_lru_cache_entry *raw_entry; 1407 struct backref_cache_entry *entry; 1408 1409 if (btrfs_lru_cache_size(&sctx->backref_cache) == 0) 1410 return false; 1411 1412 /* 1413 * If relocation happened since we first filled the cache, then we must 1414 * empty the cache and can not use it, because even though we operate on 1415 * read-only roots, their leaves and nodes may have been reallocated and 1416 * now be used for different nodes/leaves of the same tree or some other 1417 * tree. 1418 * 1419 * We are called from iterate_extent_inodes() while either holding a 1420 * transaction handle or holding fs_info->commit_root_sem, so no need 1421 * to take any lock here. 1422 */ 1423 if (fs_info->last_reloc_trans > sctx->backref_cache_last_reloc_trans) { 1424 btrfs_lru_cache_clear(&sctx->backref_cache); 1425 return false; 1426 } 1427 1428 raw_entry = btrfs_lru_cache_lookup(&sctx->backref_cache, key, 0); 1429 if (!raw_entry) 1430 return false; 1431 1432 entry = container_of(raw_entry, struct backref_cache_entry, entry); 1433 *root_ids_ret = entry->root_ids; 1434 *root_count_ret = entry->num_roots; 1435 1436 return true; 1437 } 1438 1439 static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids, 1440 void *ctx) 1441 { 1442 struct backref_ctx *bctx = ctx; 1443 struct send_ctx *sctx = bctx->sctx; 1444 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 1445 struct backref_cache_entry *new_entry; 1446 struct ulist_iterator uiter; 1447 struct ulist_node *node; 1448 int ret; 1449 1450 /* 1451 * We're called while holding a transaction handle or while holding 1452 * fs_info->commit_root_sem (at iterate_extent_inodes()), so must do a 1453 * NOFS allocation. 1454 */ 1455 new_entry = kmalloc(sizeof(struct backref_cache_entry), GFP_NOFS); 1456 /* No worries, cache is optional. */ 1457 if (!new_entry) 1458 return; 1459 1460 new_entry->entry.key = leaf_bytenr >> fs_info->sectorsize_bits; 1461 new_entry->entry.gen = 0; 1462 new_entry->num_roots = 0; 1463 ULIST_ITER_INIT(&uiter); 1464 while ((node = ulist_next(root_ids, &uiter)) != NULL) { 1465 const u64 root_id = node->val; 1466 struct clone_root *root; 1467 1468 root = bsearch((void *)(uintptr_t)root_id, sctx->clone_roots, 1469 sctx->clone_roots_cnt, sizeof(struct clone_root), 1470 __clone_root_cmp_bsearch); 1471 if (!root) 1472 continue; 1473 1474 /* Too many roots, just exit, no worries as caching is optional. */ 1475 if (new_entry->num_roots >= SEND_MAX_BACKREF_CACHE_ROOTS) { 1476 kfree(new_entry); 1477 return; 1478 } 1479 1480 new_entry->root_ids[new_entry->num_roots] = root_id; 1481 new_entry->num_roots++; 1482 } 1483 1484 /* 1485 * We may have not added any roots to the new cache entry, which means 1486 * none of the roots is part of the list of roots from which we are 1487 * allowed to clone. Cache the new entry as it's still useful to avoid 1488 * backref walking to determine which roots have a path to the leaf. 1489 * 1490 * Also use GFP_NOFS because we're called while holding a transaction 1491 * handle or while holding fs_info->commit_root_sem. 1492 */ 1493 ret = btrfs_lru_cache_store(&sctx->backref_cache, &new_entry->entry, 1494 GFP_NOFS); 1495 ASSERT(ret == 0 || ret == -ENOMEM); 1496 if (ret) { 1497 /* Caching is optional, no worries. */ 1498 kfree(new_entry); 1499 return; 1500 } 1501 1502 /* 1503 * We are called from iterate_extent_inodes() while either holding a 1504 * transaction handle or holding fs_info->commit_root_sem, so no need 1505 * to take any lock here. 1506 */ 1507 if (btrfs_lru_cache_size(&sctx->backref_cache) == 1) 1508 sctx->backref_cache_last_reloc_trans = fs_info->last_reloc_trans; 1509 } 1510 1511 static int check_extent_item(u64 bytenr, const struct btrfs_extent_item *ei, 1512 const struct extent_buffer *leaf, void *ctx) 1513 { 1514 const u64 refs = btrfs_extent_refs(leaf, ei); 1515 const struct backref_ctx *bctx = ctx; 1516 const struct send_ctx *sctx = bctx->sctx; 1517 1518 if (bytenr == bctx->bytenr) { 1519 const u64 flags = btrfs_extent_flags(leaf, ei); 1520 1521 if (WARN_ON(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 1522 return -EUCLEAN; 1523 1524 /* 1525 * If we have only one reference and only the send root as a 1526 * clone source - meaning no clone roots were given in the 1527 * struct btrfs_ioctl_send_args passed to the send ioctl - then 1528 * it's our reference and there's no point in doing backref 1529 * walking which is expensive, so exit early. 1530 */ 1531 if (refs == 1 && sctx->clone_roots_cnt == 1) 1532 return -ENOENT; 1533 } 1534 1535 /* 1536 * Backreference walking (iterate_extent_inodes() below) is currently 1537 * too expensive when an extent has a large number of references, both 1538 * in time spent and used memory. So for now just fallback to write 1539 * operations instead of clone operations when an extent has more than 1540 * a certain amount of references. 1541 */ 1542 if (refs > SEND_MAX_EXTENT_REFS) 1543 return -ENOENT; 1544 1545 return 0; 1546 } 1547 1548 static bool skip_self_data_ref(u64 root, u64 ino, u64 offset, void *ctx) 1549 { 1550 const struct backref_ctx *bctx = ctx; 1551 1552 if (ino == bctx->cur_objectid && 1553 root == bctx->backref_owner && 1554 offset == bctx->backref_offset) 1555 return true; 1556 1557 return false; 1558 } 1559 1560 /* 1561 * Given an inode, offset and extent item, it finds a good clone for a clone 1562 * instruction. Returns -ENOENT when none could be found. The function makes 1563 * sure that the returned clone is usable at the point where sending is at the 1564 * moment. This means, that no clones are accepted which lie behind the current 1565 * inode+offset. 1566 * 1567 * path must point to the extent item when called. 1568 */ 1569 static int find_extent_clone(struct send_ctx *sctx, 1570 struct btrfs_path *path, 1571 u64 ino, u64 data_offset, 1572 u64 ino_size, 1573 struct clone_root **found) 1574 { 1575 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 1576 int ret; 1577 int extent_type; 1578 u64 logical; 1579 u64 disk_byte; 1580 u64 num_bytes; 1581 struct btrfs_file_extent_item *fi; 1582 struct extent_buffer *eb = path->nodes[0]; 1583 struct backref_ctx backref_ctx = { 0 }; 1584 struct btrfs_backref_walk_ctx backref_walk_ctx = { 0 }; 1585 struct clone_root *cur_clone_root; 1586 int compressed; 1587 u32 i; 1588 1589 /* 1590 * With fallocate we can get prealloc extents beyond the inode's i_size, 1591 * so we don't do anything here because clone operations can not clone 1592 * to a range beyond i_size without increasing the i_size of the 1593 * destination inode. 1594 */ 1595 if (data_offset >= ino_size) 1596 return 0; 1597 1598 fi = btrfs_item_ptr(eb, path->slots[0], struct btrfs_file_extent_item); 1599 extent_type = btrfs_file_extent_type(eb, fi); 1600 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1601 return -ENOENT; 1602 1603 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 1604 if (disk_byte == 0) 1605 return -ENOENT; 1606 1607 compressed = btrfs_file_extent_compression(eb, fi); 1608 num_bytes = btrfs_file_extent_num_bytes(eb, fi); 1609 logical = disk_byte + btrfs_file_extent_offset(eb, fi); 1610 1611 /* 1612 * Setup the clone roots. 1613 */ 1614 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1615 cur_clone_root = sctx->clone_roots + i; 1616 cur_clone_root->ino = (u64)-1; 1617 cur_clone_root->offset = 0; 1618 cur_clone_root->num_bytes = 0; 1619 cur_clone_root->found_ref = false; 1620 } 1621 1622 backref_ctx.sctx = sctx; 1623 backref_ctx.cur_objectid = ino; 1624 backref_ctx.cur_offset = data_offset; 1625 backref_ctx.bytenr = disk_byte; 1626 /* 1627 * Use the header owner and not the send root's id, because in case of a 1628 * snapshot we can have shared subtrees. 1629 */ 1630 backref_ctx.backref_owner = btrfs_header_owner(eb); 1631 backref_ctx.backref_offset = data_offset - btrfs_file_extent_offset(eb, fi); 1632 1633 /* 1634 * The last extent of a file may be too large due to page alignment. 1635 * We need to adjust extent_len in this case so that the checks in 1636 * iterate_backrefs() work. 1637 */ 1638 if (data_offset + num_bytes >= ino_size) 1639 backref_ctx.extent_len = ino_size - data_offset; 1640 else 1641 backref_ctx.extent_len = num_bytes; 1642 1643 /* 1644 * Now collect all backrefs. 1645 */ 1646 backref_walk_ctx.bytenr = disk_byte; 1647 if (compressed == BTRFS_COMPRESS_NONE) 1648 backref_walk_ctx.extent_item_pos = btrfs_file_extent_offset(eb, fi); 1649 backref_walk_ctx.fs_info = fs_info; 1650 backref_walk_ctx.cache_lookup = lookup_backref_cache; 1651 backref_walk_ctx.cache_store = store_backref_cache; 1652 backref_walk_ctx.indirect_ref_iterator = iterate_backrefs; 1653 backref_walk_ctx.check_extent_item = check_extent_item; 1654 backref_walk_ctx.user_ctx = &backref_ctx; 1655 1656 /* 1657 * If have a single clone root, then it's the send root and we can tell 1658 * the backref walking code to skip our own backref and not resolve it, 1659 * since we can not use it for cloning - the source and destination 1660 * ranges can't overlap and in case the leaf is shared through a subtree 1661 * due to snapshots, we can't use those other roots since they are not 1662 * in the list of clone roots. 1663 */ 1664 if (sctx->clone_roots_cnt == 1) 1665 backref_walk_ctx.skip_data_ref = skip_self_data_ref; 1666 1667 ret = iterate_extent_inodes(&backref_walk_ctx, true, iterate_backrefs, 1668 &backref_ctx); 1669 if (ret < 0) 1670 return ret; 1671 1672 down_read(&fs_info->commit_root_sem); 1673 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) { 1674 /* 1675 * A transaction commit for a transaction in which block group 1676 * relocation was done just happened. 1677 * The disk_bytenr of the file extent item we processed is 1678 * possibly stale, referring to the extent's location before 1679 * relocation. So act as if we haven't found any clone sources 1680 * and fallback to write commands, which will read the correct 1681 * data from the new extent location. Otherwise we will fail 1682 * below because we haven't found our own back reference or we 1683 * could be getting incorrect sources in case the old extent 1684 * was already reallocated after the relocation. 1685 */ 1686 up_read(&fs_info->commit_root_sem); 1687 return -ENOENT; 1688 } 1689 up_read(&fs_info->commit_root_sem); 1690 1691 btrfs_debug(fs_info, 1692 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu", 1693 data_offset, ino, num_bytes, logical); 1694 1695 if (!backref_ctx.found) { 1696 btrfs_debug(fs_info, "no clones found"); 1697 return -ENOENT; 1698 } 1699 1700 cur_clone_root = NULL; 1701 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1702 struct clone_root *clone_root = &sctx->clone_roots[i]; 1703 1704 if (!clone_root->found_ref) 1705 continue; 1706 1707 /* 1708 * Choose the root from which we can clone more bytes, to 1709 * minimize write operations and therefore have more extent 1710 * sharing at the destination (the same as in the source). 1711 */ 1712 if (!cur_clone_root || 1713 clone_root->num_bytes > cur_clone_root->num_bytes) { 1714 cur_clone_root = clone_root; 1715 1716 /* 1717 * We found an optimal clone candidate (any inode from 1718 * any root is fine), so we're done. 1719 */ 1720 if (clone_root->num_bytes >= backref_ctx.extent_len) 1721 break; 1722 } 1723 } 1724 1725 if (cur_clone_root) { 1726 *found = cur_clone_root; 1727 ret = 0; 1728 } else { 1729 ret = -ENOENT; 1730 } 1731 1732 return ret; 1733 } 1734 1735 static int read_symlink(struct btrfs_root *root, 1736 u64 ino, 1737 struct fs_path *dest) 1738 { 1739 int ret; 1740 struct btrfs_path *path; 1741 struct btrfs_key key; 1742 struct btrfs_file_extent_item *ei; 1743 u8 type; 1744 u8 compression; 1745 unsigned long off; 1746 int len; 1747 1748 path = alloc_path_for_send(); 1749 if (!path) 1750 return -ENOMEM; 1751 1752 key.objectid = ino; 1753 key.type = BTRFS_EXTENT_DATA_KEY; 1754 key.offset = 0; 1755 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1756 if (ret < 0) 1757 goto out; 1758 if (ret) { 1759 /* 1760 * An empty symlink inode. Can happen in rare error paths when 1761 * creating a symlink (transaction committed before the inode 1762 * eviction handler removed the symlink inode items and a crash 1763 * happened in between or the subvol was snapshoted in between). 1764 * Print an informative message to dmesg/syslog so that the user 1765 * can delete the symlink. 1766 */ 1767 btrfs_err(root->fs_info, 1768 "Found empty symlink inode %llu at root %llu", 1769 ino, root->root_key.objectid); 1770 ret = -EIO; 1771 goto out; 1772 } 1773 1774 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 1775 struct btrfs_file_extent_item); 1776 type = btrfs_file_extent_type(path->nodes[0], ei); 1777 compression = btrfs_file_extent_compression(path->nodes[0], ei); 1778 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE); 1779 BUG_ON(compression); 1780 1781 off = btrfs_file_extent_inline_start(ei); 1782 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei); 1783 1784 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); 1785 1786 out: 1787 btrfs_free_path(path); 1788 return ret; 1789 } 1790 1791 /* 1792 * Helper function to generate a file name that is unique in the root of 1793 * send_root and parent_root. This is used to generate names for orphan inodes. 1794 */ 1795 static int gen_unique_name(struct send_ctx *sctx, 1796 u64 ino, u64 gen, 1797 struct fs_path *dest) 1798 { 1799 int ret = 0; 1800 struct btrfs_path *path; 1801 struct btrfs_dir_item *di; 1802 char tmp[64]; 1803 int len; 1804 u64 idx = 0; 1805 1806 path = alloc_path_for_send(); 1807 if (!path) 1808 return -ENOMEM; 1809 1810 while (1) { 1811 struct fscrypt_str tmp_name; 1812 1813 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", 1814 ino, gen, idx); 1815 ASSERT(len < sizeof(tmp)); 1816 tmp_name.name = tmp; 1817 tmp_name.len = strlen(tmp); 1818 1819 di = btrfs_lookup_dir_item(NULL, sctx->send_root, 1820 path, BTRFS_FIRST_FREE_OBJECTID, 1821 &tmp_name, 0); 1822 btrfs_release_path(path); 1823 if (IS_ERR(di)) { 1824 ret = PTR_ERR(di); 1825 goto out; 1826 } 1827 if (di) { 1828 /* not unique, try again */ 1829 idx++; 1830 continue; 1831 } 1832 1833 if (!sctx->parent_root) { 1834 /* unique */ 1835 ret = 0; 1836 break; 1837 } 1838 1839 di = btrfs_lookup_dir_item(NULL, sctx->parent_root, 1840 path, BTRFS_FIRST_FREE_OBJECTID, 1841 &tmp_name, 0); 1842 btrfs_release_path(path); 1843 if (IS_ERR(di)) { 1844 ret = PTR_ERR(di); 1845 goto out; 1846 } 1847 if (di) { 1848 /* not unique, try again */ 1849 idx++; 1850 continue; 1851 } 1852 /* unique */ 1853 break; 1854 } 1855 1856 ret = fs_path_add(dest, tmp, strlen(tmp)); 1857 1858 out: 1859 btrfs_free_path(path); 1860 return ret; 1861 } 1862 1863 enum inode_state { 1864 inode_state_no_change, 1865 inode_state_will_create, 1866 inode_state_did_create, 1867 inode_state_will_delete, 1868 inode_state_did_delete, 1869 }; 1870 1871 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen, 1872 u64 *send_gen, u64 *parent_gen) 1873 { 1874 int ret; 1875 int left_ret; 1876 int right_ret; 1877 u64 left_gen; 1878 u64 right_gen; 1879 struct btrfs_inode_info info; 1880 1881 ret = get_inode_info(sctx->send_root, ino, &info); 1882 if (ret < 0 && ret != -ENOENT) 1883 goto out; 1884 left_ret = (info.nlink == 0) ? -ENOENT : ret; 1885 left_gen = info.gen; 1886 if (send_gen) 1887 *send_gen = ((left_ret == -ENOENT) ? 0 : info.gen); 1888 1889 if (!sctx->parent_root) { 1890 right_ret = -ENOENT; 1891 } else { 1892 ret = get_inode_info(sctx->parent_root, ino, &info); 1893 if (ret < 0 && ret != -ENOENT) 1894 goto out; 1895 right_ret = (info.nlink == 0) ? -ENOENT : ret; 1896 right_gen = info.gen; 1897 if (parent_gen) 1898 *parent_gen = ((right_ret == -ENOENT) ? 0 : info.gen); 1899 } 1900 1901 if (!left_ret && !right_ret) { 1902 if (left_gen == gen && right_gen == gen) { 1903 ret = inode_state_no_change; 1904 } else if (left_gen == gen) { 1905 if (ino < sctx->send_progress) 1906 ret = inode_state_did_create; 1907 else 1908 ret = inode_state_will_create; 1909 } else if (right_gen == gen) { 1910 if (ino < sctx->send_progress) 1911 ret = inode_state_did_delete; 1912 else 1913 ret = inode_state_will_delete; 1914 } else { 1915 ret = -ENOENT; 1916 } 1917 } else if (!left_ret) { 1918 if (left_gen == gen) { 1919 if (ino < sctx->send_progress) 1920 ret = inode_state_did_create; 1921 else 1922 ret = inode_state_will_create; 1923 } else { 1924 ret = -ENOENT; 1925 } 1926 } else if (!right_ret) { 1927 if (right_gen == gen) { 1928 if (ino < sctx->send_progress) 1929 ret = inode_state_did_delete; 1930 else 1931 ret = inode_state_will_delete; 1932 } else { 1933 ret = -ENOENT; 1934 } 1935 } else { 1936 ret = -ENOENT; 1937 } 1938 1939 out: 1940 return ret; 1941 } 1942 1943 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen, 1944 u64 *send_gen, u64 *parent_gen) 1945 { 1946 int ret; 1947 1948 if (ino == BTRFS_FIRST_FREE_OBJECTID) 1949 return 1; 1950 1951 ret = get_cur_inode_state(sctx, ino, gen, send_gen, parent_gen); 1952 if (ret < 0) 1953 goto out; 1954 1955 if (ret == inode_state_no_change || 1956 ret == inode_state_did_create || 1957 ret == inode_state_will_delete) 1958 ret = 1; 1959 else 1960 ret = 0; 1961 1962 out: 1963 return ret; 1964 } 1965 1966 /* 1967 * Helper function to lookup a dir item in a dir. 1968 */ 1969 static int lookup_dir_item_inode(struct btrfs_root *root, 1970 u64 dir, const char *name, int name_len, 1971 u64 *found_inode) 1972 { 1973 int ret = 0; 1974 struct btrfs_dir_item *di; 1975 struct btrfs_key key; 1976 struct btrfs_path *path; 1977 struct fscrypt_str name_str = FSTR_INIT((char *)name, name_len); 1978 1979 path = alloc_path_for_send(); 1980 if (!path) 1981 return -ENOMEM; 1982 1983 di = btrfs_lookup_dir_item(NULL, root, path, dir, &name_str, 0); 1984 if (IS_ERR_OR_NULL(di)) { 1985 ret = di ? PTR_ERR(di) : -ENOENT; 1986 goto out; 1987 } 1988 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 1989 if (key.type == BTRFS_ROOT_ITEM_KEY) { 1990 ret = -ENOENT; 1991 goto out; 1992 } 1993 *found_inode = key.objectid; 1994 1995 out: 1996 btrfs_free_path(path); 1997 return ret; 1998 } 1999 2000 /* 2001 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir, 2002 * generation of the parent dir and the name of the dir entry. 2003 */ 2004 static int get_first_ref(struct btrfs_root *root, u64 ino, 2005 u64 *dir, u64 *dir_gen, struct fs_path *name) 2006 { 2007 int ret; 2008 struct btrfs_key key; 2009 struct btrfs_key found_key; 2010 struct btrfs_path *path; 2011 int len; 2012 u64 parent_dir; 2013 2014 path = alloc_path_for_send(); 2015 if (!path) 2016 return -ENOMEM; 2017 2018 key.objectid = ino; 2019 key.type = BTRFS_INODE_REF_KEY; 2020 key.offset = 0; 2021 2022 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); 2023 if (ret < 0) 2024 goto out; 2025 if (!ret) 2026 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2027 path->slots[0]); 2028 if (ret || found_key.objectid != ino || 2029 (found_key.type != BTRFS_INODE_REF_KEY && 2030 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 2031 ret = -ENOENT; 2032 goto out; 2033 } 2034 2035 if (found_key.type == BTRFS_INODE_REF_KEY) { 2036 struct btrfs_inode_ref *iref; 2037 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 2038 struct btrfs_inode_ref); 2039 len = btrfs_inode_ref_name_len(path->nodes[0], iref); 2040 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 2041 (unsigned long)(iref + 1), 2042 len); 2043 parent_dir = found_key.offset; 2044 } else { 2045 struct btrfs_inode_extref *extref; 2046 extref = btrfs_item_ptr(path->nodes[0], path->slots[0], 2047 struct btrfs_inode_extref); 2048 len = btrfs_inode_extref_name_len(path->nodes[0], extref); 2049 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 2050 (unsigned long)&extref->name, len); 2051 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref); 2052 } 2053 if (ret < 0) 2054 goto out; 2055 btrfs_release_path(path); 2056 2057 if (dir_gen) { 2058 ret = get_inode_gen(root, parent_dir, dir_gen); 2059 if (ret < 0) 2060 goto out; 2061 } 2062 2063 *dir = parent_dir; 2064 2065 out: 2066 btrfs_free_path(path); 2067 return ret; 2068 } 2069 2070 static int is_first_ref(struct btrfs_root *root, 2071 u64 ino, u64 dir, 2072 const char *name, int name_len) 2073 { 2074 int ret; 2075 struct fs_path *tmp_name; 2076 u64 tmp_dir; 2077 2078 tmp_name = fs_path_alloc(); 2079 if (!tmp_name) 2080 return -ENOMEM; 2081 2082 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name); 2083 if (ret < 0) 2084 goto out; 2085 2086 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) { 2087 ret = 0; 2088 goto out; 2089 } 2090 2091 ret = !memcmp(tmp_name->start, name, name_len); 2092 2093 out: 2094 fs_path_free(tmp_name); 2095 return ret; 2096 } 2097 2098 /* 2099 * Used by process_recorded_refs to determine if a new ref would overwrite an 2100 * already existing ref. In case it detects an overwrite, it returns the 2101 * inode/gen in who_ino/who_gen. 2102 * When an overwrite is detected, process_recorded_refs does proper orphanizing 2103 * to make sure later references to the overwritten inode are possible. 2104 * Orphanizing is however only required for the first ref of an inode. 2105 * process_recorded_refs does an additional is_first_ref check to see if 2106 * orphanizing is really required. 2107 */ 2108 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, 2109 const char *name, int name_len, 2110 u64 *who_ino, u64 *who_gen, u64 *who_mode) 2111 { 2112 int ret; 2113 u64 parent_root_dir_gen; 2114 u64 other_inode = 0; 2115 struct btrfs_inode_info info; 2116 2117 if (!sctx->parent_root) 2118 return 0; 2119 2120 ret = is_inode_existent(sctx, dir, dir_gen, NULL, &parent_root_dir_gen); 2121 if (ret <= 0) 2122 return 0; 2123 2124 /* 2125 * If we have a parent root we need to verify that the parent dir was 2126 * not deleted and then re-created, if it was then we have no overwrite 2127 * and we can just unlink this entry. 2128 * 2129 * @parent_root_dir_gen was set to 0 if the inode does not exist in the 2130 * parent root. 2131 */ 2132 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID && 2133 parent_root_dir_gen != dir_gen) 2134 return 0; 2135 2136 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, 2137 &other_inode); 2138 if (ret == -ENOENT) 2139 return 0; 2140 else if (ret < 0) 2141 return ret; 2142 2143 /* 2144 * Check if the overwritten ref was already processed. If yes, the ref 2145 * was already unlinked/moved, so we can safely assume that we will not 2146 * overwrite anything at this point in time. 2147 */ 2148 if (other_inode > sctx->send_progress || 2149 is_waiting_for_move(sctx, other_inode)) { 2150 ret = get_inode_info(sctx->parent_root, other_inode, &info); 2151 if (ret < 0) 2152 return ret; 2153 2154 *who_ino = other_inode; 2155 *who_gen = info.gen; 2156 *who_mode = info.mode; 2157 return 1; 2158 } 2159 2160 return 0; 2161 } 2162 2163 /* 2164 * Checks if the ref was overwritten by an already processed inode. This is 2165 * used by __get_cur_name_and_parent to find out if the ref was orphanized and 2166 * thus the orphan name needs be used. 2167 * process_recorded_refs also uses it to avoid unlinking of refs that were 2168 * overwritten. 2169 */ 2170 static int did_overwrite_ref(struct send_ctx *sctx, 2171 u64 dir, u64 dir_gen, 2172 u64 ino, u64 ino_gen, 2173 const char *name, int name_len) 2174 { 2175 int ret; 2176 u64 ow_inode; 2177 u64 ow_gen = 0; 2178 u64 send_root_dir_gen; 2179 2180 if (!sctx->parent_root) 2181 return 0; 2182 2183 ret = is_inode_existent(sctx, dir, dir_gen, &send_root_dir_gen, NULL); 2184 if (ret <= 0) 2185 return ret; 2186 2187 /* 2188 * @send_root_dir_gen was set to 0 if the inode does not exist in the 2189 * send root. 2190 */ 2191 if (dir != BTRFS_FIRST_FREE_OBJECTID && send_root_dir_gen != dir_gen) 2192 return 0; 2193 2194 /* check if the ref was overwritten by another ref */ 2195 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, 2196 &ow_inode); 2197 if (ret == -ENOENT) { 2198 /* was never and will never be overwritten */ 2199 return 0; 2200 } else if (ret < 0) { 2201 return ret; 2202 } 2203 2204 if (ow_inode == ino) { 2205 ret = get_inode_gen(sctx->send_root, ow_inode, &ow_gen); 2206 if (ret < 0) 2207 return ret; 2208 2209 /* It's the same inode, so no overwrite happened. */ 2210 if (ow_gen == ino_gen) 2211 return 0; 2212 } 2213 2214 /* 2215 * We know that it is or will be overwritten. Check this now. 2216 * The current inode being processed might have been the one that caused 2217 * inode 'ino' to be orphanized, therefore check if ow_inode matches 2218 * the current inode being processed. 2219 */ 2220 if (ow_inode < sctx->send_progress) 2221 return 1; 2222 2223 if (ino != sctx->cur_ino && ow_inode == sctx->cur_ino) { 2224 if (ow_gen == 0) { 2225 ret = get_inode_gen(sctx->send_root, ow_inode, &ow_gen); 2226 if (ret < 0) 2227 return ret; 2228 } 2229 if (ow_gen == sctx->cur_inode_gen) 2230 return 1; 2231 } 2232 2233 return 0; 2234 } 2235 2236 /* 2237 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode 2238 * that got overwritten. This is used by process_recorded_refs to determine 2239 * if it has to use the path as returned by get_cur_path or the orphan name. 2240 */ 2241 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) 2242 { 2243 int ret = 0; 2244 struct fs_path *name = NULL; 2245 u64 dir; 2246 u64 dir_gen; 2247 2248 if (!sctx->parent_root) 2249 goto out; 2250 2251 name = fs_path_alloc(); 2252 if (!name) 2253 return -ENOMEM; 2254 2255 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name); 2256 if (ret < 0) 2257 goto out; 2258 2259 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, 2260 name->start, fs_path_len(name)); 2261 2262 out: 2263 fs_path_free(name); 2264 return ret; 2265 } 2266 2267 static inline struct name_cache_entry *name_cache_search(struct send_ctx *sctx, 2268 u64 ino, u64 gen) 2269 { 2270 struct btrfs_lru_cache_entry *entry; 2271 2272 entry = btrfs_lru_cache_lookup(&sctx->name_cache, ino, gen); 2273 if (!entry) 2274 return NULL; 2275 2276 return container_of(entry, struct name_cache_entry, entry); 2277 } 2278 2279 /* 2280 * Used by get_cur_path for each ref up to the root. 2281 * Returns 0 if it succeeded. 2282 * Returns 1 if the inode is not existent or got overwritten. In that case, the 2283 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1 2284 * is returned, parent_ino/parent_gen are not guaranteed to be valid. 2285 * Returns <0 in case of error. 2286 */ 2287 static int __get_cur_name_and_parent(struct send_ctx *sctx, 2288 u64 ino, u64 gen, 2289 u64 *parent_ino, 2290 u64 *parent_gen, 2291 struct fs_path *dest) 2292 { 2293 int ret; 2294 int nce_ret; 2295 struct name_cache_entry *nce; 2296 2297 /* 2298 * First check if we already did a call to this function with the same 2299 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes 2300 * return the cached result. 2301 */ 2302 nce = name_cache_search(sctx, ino, gen); 2303 if (nce) { 2304 if (ino < sctx->send_progress && nce->need_later_update) { 2305 btrfs_lru_cache_remove(&sctx->name_cache, &nce->entry); 2306 nce = NULL; 2307 } else { 2308 *parent_ino = nce->parent_ino; 2309 *parent_gen = nce->parent_gen; 2310 ret = fs_path_add(dest, nce->name, nce->name_len); 2311 if (ret < 0) 2312 goto out; 2313 ret = nce->ret; 2314 goto out; 2315 } 2316 } 2317 2318 /* 2319 * If the inode is not existent yet, add the orphan name and return 1. 2320 * This should only happen for the parent dir that we determine in 2321 * record_new_ref_if_needed(). 2322 */ 2323 ret = is_inode_existent(sctx, ino, gen, NULL, NULL); 2324 if (ret < 0) 2325 goto out; 2326 2327 if (!ret) { 2328 ret = gen_unique_name(sctx, ino, gen, dest); 2329 if (ret < 0) 2330 goto out; 2331 ret = 1; 2332 goto out_cache; 2333 } 2334 2335 /* 2336 * Depending on whether the inode was already processed or not, use 2337 * send_root or parent_root for ref lookup. 2338 */ 2339 if (ino < sctx->send_progress) 2340 ret = get_first_ref(sctx->send_root, ino, 2341 parent_ino, parent_gen, dest); 2342 else 2343 ret = get_first_ref(sctx->parent_root, ino, 2344 parent_ino, parent_gen, dest); 2345 if (ret < 0) 2346 goto out; 2347 2348 /* 2349 * Check if the ref was overwritten by an inode's ref that was processed 2350 * earlier. If yes, treat as orphan and return 1. 2351 */ 2352 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, 2353 dest->start, dest->end - dest->start); 2354 if (ret < 0) 2355 goto out; 2356 if (ret) { 2357 fs_path_reset(dest); 2358 ret = gen_unique_name(sctx, ino, gen, dest); 2359 if (ret < 0) 2360 goto out; 2361 ret = 1; 2362 } 2363 2364 out_cache: 2365 /* 2366 * Store the result of the lookup in the name cache. 2367 */ 2368 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL); 2369 if (!nce) { 2370 ret = -ENOMEM; 2371 goto out; 2372 } 2373 2374 nce->entry.key = ino; 2375 nce->entry.gen = gen; 2376 nce->parent_ino = *parent_ino; 2377 nce->parent_gen = *parent_gen; 2378 nce->name_len = fs_path_len(dest); 2379 nce->ret = ret; 2380 strcpy(nce->name, dest->start); 2381 2382 if (ino < sctx->send_progress) 2383 nce->need_later_update = 0; 2384 else 2385 nce->need_later_update = 1; 2386 2387 nce_ret = btrfs_lru_cache_store(&sctx->name_cache, &nce->entry, GFP_KERNEL); 2388 if (nce_ret < 0) { 2389 kfree(nce); 2390 ret = nce_ret; 2391 } 2392 2393 out: 2394 return ret; 2395 } 2396 2397 /* 2398 * Magic happens here. This function returns the first ref to an inode as it 2399 * would look like while receiving the stream at this point in time. 2400 * We walk the path up to the root. For every inode in between, we check if it 2401 * was already processed/sent. If yes, we continue with the parent as found 2402 * in send_root. If not, we continue with the parent as found in parent_root. 2403 * If we encounter an inode that was deleted at this point in time, we use the 2404 * inodes "orphan" name instead of the real name and stop. Same with new inodes 2405 * that were not created yet and overwritten inodes/refs. 2406 * 2407 * When do we have orphan inodes: 2408 * 1. When an inode is freshly created and thus no valid refs are available yet 2409 * 2. When a directory lost all it's refs (deleted) but still has dir items 2410 * inside which were not processed yet (pending for move/delete). If anyone 2411 * tried to get the path to the dir items, it would get a path inside that 2412 * orphan directory. 2413 * 3. When an inode is moved around or gets new links, it may overwrite the ref 2414 * of an unprocessed inode. If in that case the first ref would be 2415 * overwritten, the overwritten inode gets "orphanized". Later when we 2416 * process this overwritten inode, it is restored at a new place by moving 2417 * the orphan inode. 2418 * 2419 * sctx->send_progress tells this function at which point in time receiving 2420 * would be. 2421 */ 2422 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, 2423 struct fs_path *dest) 2424 { 2425 int ret = 0; 2426 struct fs_path *name = NULL; 2427 u64 parent_inode = 0; 2428 u64 parent_gen = 0; 2429 int stop = 0; 2430 2431 name = fs_path_alloc(); 2432 if (!name) { 2433 ret = -ENOMEM; 2434 goto out; 2435 } 2436 2437 dest->reversed = 1; 2438 fs_path_reset(dest); 2439 2440 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { 2441 struct waiting_dir_move *wdm; 2442 2443 fs_path_reset(name); 2444 2445 if (is_waiting_for_rm(sctx, ino, gen)) { 2446 ret = gen_unique_name(sctx, ino, gen, name); 2447 if (ret < 0) 2448 goto out; 2449 ret = fs_path_add_path(dest, name); 2450 break; 2451 } 2452 2453 wdm = get_waiting_dir_move(sctx, ino); 2454 if (wdm && wdm->orphanized) { 2455 ret = gen_unique_name(sctx, ino, gen, name); 2456 stop = 1; 2457 } else if (wdm) { 2458 ret = get_first_ref(sctx->parent_root, ino, 2459 &parent_inode, &parent_gen, name); 2460 } else { 2461 ret = __get_cur_name_and_parent(sctx, ino, gen, 2462 &parent_inode, 2463 &parent_gen, name); 2464 if (ret) 2465 stop = 1; 2466 } 2467 2468 if (ret < 0) 2469 goto out; 2470 2471 ret = fs_path_add_path(dest, name); 2472 if (ret < 0) 2473 goto out; 2474 2475 ino = parent_inode; 2476 gen = parent_gen; 2477 } 2478 2479 out: 2480 fs_path_free(name); 2481 if (!ret) 2482 fs_path_unreverse(dest); 2483 return ret; 2484 } 2485 2486 /* 2487 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace 2488 */ 2489 static int send_subvol_begin(struct send_ctx *sctx) 2490 { 2491 int ret; 2492 struct btrfs_root *send_root = sctx->send_root; 2493 struct btrfs_root *parent_root = sctx->parent_root; 2494 struct btrfs_path *path; 2495 struct btrfs_key key; 2496 struct btrfs_root_ref *ref; 2497 struct extent_buffer *leaf; 2498 char *name = NULL; 2499 int namelen; 2500 2501 path = btrfs_alloc_path(); 2502 if (!path) 2503 return -ENOMEM; 2504 2505 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL); 2506 if (!name) { 2507 btrfs_free_path(path); 2508 return -ENOMEM; 2509 } 2510 2511 key.objectid = send_root->root_key.objectid; 2512 key.type = BTRFS_ROOT_BACKREF_KEY; 2513 key.offset = 0; 2514 2515 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root, 2516 &key, path, 1, 0); 2517 if (ret < 0) 2518 goto out; 2519 if (ret) { 2520 ret = -ENOENT; 2521 goto out; 2522 } 2523 2524 leaf = path->nodes[0]; 2525 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2526 if (key.type != BTRFS_ROOT_BACKREF_KEY || 2527 key.objectid != send_root->root_key.objectid) { 2528 ret = -ENOENT; 2529 goto out; 2530 } 2531 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 2532 namelen = btrfs_root_ref_name_len(leaf, ref); 2533 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); 2534 btrfs_release_path(path); 2535 2536 if (parent_root) { 2537 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); 2538 if (ret < 0) 2539 goto out; 2540 } else { 2541 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); 2542 if (ret < 0) 2543 goto out; 2544 } 2545 2546 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); 2547 2548 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid)) 2549 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2550 sctx->send_root->root_item.received_uuid); 2551 else 2552 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2553 sctx->send_root->root_item.uuid); 2554 2555 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, 2556 btrfs_root_ctransid(&sctx->send_root->root_item)); 2557 if (parent_root) { 2558 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid)) 2559 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2560 parent_root->root_item.received_uuid); 2561 else 2562 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2563 parent_root->root_item.uuid); 2564 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 2565 btrfs_root_ctransid(&sctx->parent_root->root_item)); 2566 } 2567 2568 ret = send_cmd(sctx); 2569 2570 tlv_put_failure: 2571 out: 2572 btrfs_free_path(path); 2573 kfree(name); 2574 return ret; 2575 } 2576 2577 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) 2578 { 2579 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2580 int ret = 0; 2581 struct fs_path *p; 2582 2583 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size); 2584 2585 p = fs_path_alloc(); 2586 if (!p) 2587 return -ENOMEM; 2588 2589 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); 2590 if (ret < 0) 2591 goto out; 2592 2593 ret = get_cur_path(sctx, ino, gen, p); 2594 if (ret < 0) 2595 goto out; 2596 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2597 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); 2598 2599 ret = send_cmd(sctx); 2600 2601 tlv_put_failure: 2602 out: 2603 fs_path_free(p); 2604 return ret; 2605 } 2606 2607 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) 2608 { 2609 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2610 int ret = 0; 2611 struct fs_path *p; 2612 2613 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode); 2614 2615 p = fs_path_alloc(); 2616 if (!p) 2617 return -ENOMEM; 2618 2619 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); 2620 if (ret < 0) 2621 goto out; 2622 2623 ret = get_cur_path(sctx, ino, gen, p); 2624 if (ret < 0) 2625 goto out; 2626 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2627 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); 2628 2629 ret = send_cmd(sctx); 2630 2631 tlv_put_failure: 2632 out: 2633 fs_path_free(p); 2634 return ret; 2635 } 2636 2637 static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr) 2638 { 2639 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2640 int ret = 0; 2641 struct fs_path *p; 2642 2643 if (sctx->proto < 2) 2644 return 0; 2645 2646 btrfs_debug(fs_info, "send_fileattr %llu fileattr=%llu", ino, fileattr); 2647 2648 p = fs_path_alloc(); 2649 if (!p) 2650 return -ENOMEM; 2651 2652 ret = begin_cmd(sctx, BTRFS_SEND_C_FILEATTR); 2653 if (ret < 0) 2654 goto out; 2655 2656 ret = get_cur_path(sctx, ino, gen, p); 2657 if (ret < 0) 2658 goto out; 2659 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2660 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILEATTR, fileattr); 2661 2662 ret = send_cmd(sctx); 2663 2664 tlv_put_failure: 2665 out: 2666 fs_path_free(p); 2667 return ret; 2668 } 2669 2670 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) 2671 { 2672 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2673 int ret = 0; 2674 struct fs_path *p; 2675 2676 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu", 2677 ino, uid, gid); 2678 2679 p = fs_path_alloc(); 2680 if (!p) 2681 return -ENOMEM; 2682 2683 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); 2684 if (ret < 0) 2685 goto out; 2686 2687 ret = get_cur_path(sctx, ino, gen, p); 2688 if (ret < 0) 2689 goto out; 2690 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2691 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); 2692 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); 2693 2694 ret = send_cmd(sctx); 2695 2696 tlv_put_failure: 2697 out: 2698 fs_path_free(p); 2699 return ret; 2700 } 2701 2702 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) 2703 { 2704 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2705 int ret = 0; 2706 struct fs_path *p = NULL; 2707 struct btrfs_inode_item *ii; 2708 struct btrfs_path *path = NULL; 2709 struct extent_buffer *eb; 2710 struct btrfs_key key; 2711 int slot; 2712 2713 btrfs_debug(fs_info, "send_utimes %llu", ino); 2714 2715 p = fs_path_alloc(); 2716 if (!p) 2717 return -ENOMEM; 2718 2719 path = alloc_path_for_send(); 2720 if (!path) { 2721 ret = -ENOMEM; 2722 goto out; 2723 } 2724 2725 key.objectid = ino; 2726 key.type = BTRFS_INODE_ITEM_KEY; 2727 key.offset = 0; 2728 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2729 if (ret > 0) 2730 ret = -ENOENT; 2731 if (ret < 0) 2732 goto out; 2733 2734 eb = path->nodes[0]; 2735 slot = path->slots[0]; 2736 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 2737 2738 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); 2739 if (ret < 0) 2740 goto out; 2741 2742 ret = get_cur_path(sctx, ino, gen, p); 2743 if (ret < 0) 2744 goto out; 2745 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2746 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); 2747 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); 2748 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); 2749 if (sctx->proto >= 2) 2750 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_OTIME, eb, &ii->otime); 2751 2752 ret = send_cmd(sctx); 2753 2754 tlv_put_failure: 2755 out: 2756 fs_path_free(p); 2757 btrfs_free_path(path); 2758 return ret; 2759 } 2760 2761 /* 2762 * If the cache is full, we can't remove entries from it and do a call to 2763 * send_utimes() for each respective inode, because we might be finishing 2764 * processing an inode that is a directory and it just got renamed, and existing 2765 * entries in the cache may refer to inodes that have the directory in their 2766 * full path - in which case we would generate outdated paths (pre-rename) 2767 * for the inodes that the cache entries point to. Instead of prunning the 2768 * cache when inserting, do it after we finish processing each inode at 2769 * finish_inode_if_needed(). 2770 */ 2771 static int cache_dir_utimes(struct send_ctx *sctx, u64 dir, u64 gen) 2772 { 2773 struct btrfs_lru_cache_entry *entry; 2774 int ret; 2775 2776 entry = btrfs_lru_cache_lookup(&sctx->dir_utimes_cache, dir, gen); 2777 if (entry != NULL) 2778 return 0; 2779 2780 /* Caching is optional, don't fail if we can't allocate memory. */ 2781 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 2782 if (!entry) 2783 return send_utimes(sctx, dir, gen); 2784 2785 entry->key = dir; 2786 entry->gen = gen; 2787 2788 ret = btrfs_lru_cache_store(&sctx->dir_utimes_cache, entry, GFP_KERNEL); 2789 ASSERT(ret != -EEXIST); 2790 if (ret) { 2791 kfree(entry); 2792 return send_utimes(sctx, dir, gen); 2793 } 2794 2795 return 0; 2796 } 2797 2798 static int trim_dir_utimes_cache(struct send_ctx *sctx) 2799 { 2800 while (btrfs_lru_cache_size(&sctx->dir_utimes_cache) > 2801 SEND_MAX_DIR_UTIMES_CACHE_SIZE) { 2802 struct btrfs_lru_cache_entry *lru; 2803 int ret; 2804 2805 lru = btrfs_lru_cache_lru_entry(&sctx->dir_utimes_cache); 2806 ASSERT(lru != NULL); 2807 2808 ret = send_utimes(sctx, lru->key, lru->gen); 2809 if (ret) 2810 return ret; 2811 2812 btrfs_lru_cache_remove(&sctx->dir_utimes_cache, lru); 2813 } 2814 2815 return 0; 2816 } 2817 2818 /* 2819 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have 2820 * a valid path yet because we did not process the refs yet. So, the inode 2821 * is created as orphan. 2822 */ 2823 static int send_create_inode(struct send_ctx *sctx, u64 ino) 2824 { 2825 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2826 int ret = 0; 2827 struct fs_path *p; 2828 int cmd; 2829 struct btrfs_inode_info info; 2830 u64 gen; 2831 u64 mode; 2832 u64 rdev; 2833 2834 btrfs_debug(fs_info, "send_create_inode %llu", ino); 2835 2836 p = fs_path_alloc(); 2837 if (!p) 2838 return -ENOMEM; 2839 2840 if (ino != sctx->cur_ino) { 2841 ret = get_inode_info(sctx->send_root, ino, &info); 2842 if (ret < 0) 2843 goto out; 2844 gen = info.gen; 2845 mode = info.mode; 2846 rdev = info.rdev; 2847 } else { 2848 gen = sctx->cur_inode_gen; 2849 mode = sctx->cur_inode_mode; 2850 rdev = sctx->cur_inode_rdev; 2851 } 2852 2853 if (S_ISREG(mode)) { 2854 cmd = BTRFS_SEND_C_MKFILE; 2855 } else if (S_ISDIR(mode)) { 2856 cmd = BTRFS_SEND_C_MKDIR; 2857 } else if (S_ISLNK(mode)) { 2858 cmd = BTRFS_SEND_C_SYMLINK; 2859 } else if (S_ISCHR(mode) || S_ISBLK(mode)) { 2860 cmd = BTRFS_SEND_C_MKNOD; 2861 } else if (S_ISFIFO(mode)) { 2862 cmd = BTRFS_SEND_C_MKFIFO; 2863 } else if (S_ISSOCK(mode)) { 2864 cmd = BTRFS_SEND_C_MKSOCK; 2865 } else { 2866 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o", 2867 (int)(mode & S_IFMT)); 2868 ret = -EOPNOTSUPP; 2869 goto out; 2870 } 2871 2872 ret = begin_cmd(sctx, cmd); 2873 if (ret < 0) 2874 goto out; 2875 2876 ret = gen_unique_name(sctx, ino, gen, p); 2877 if (ret < 0) 2878 goto out; 2879 2880 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2881 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); 2882 2883 if (S_ISLNK(mode)) { 2884 fs_path_reset(p); 2885 ret = read_symlink(sctx->send_root, ino, p); 2886 if (ret < 0) 2887 goto out; 2888 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); 2889 } else if (S_ISCHR(mode) || S_ISBLK(mode) || 2890 S_ISFIFO(mode) || S_ISSOCK(mode)) { 2891 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev)); 2892 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode); 2893 } 2894 2895 ret = send_cmd(sctx); 2896 if (ret < 0) 2897 goto out; 2898 2899 2900 tlv_put_failure: 2901 out: 2902 fs_path_free(p); 2903 return ret; 2904 } 2905 2906 static void cache_dir_created(struct send_ctx *sctx, u64 dir) 2907 { 2908 struct btrfs_lru_cache_entry *entry; 2909 int ret; 2910 2911 /* Caching is optional, ignore any failures. */ 2912 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 2913 if (!entry) 2914 return; 2915 2916 entry->key = dir; 2917 entry->gen = 0; 2918 ret = btrfs_lru_cache_store(&sctx->dir_created_cache, entry, GFP_KERNEL); 2919 if (ret < 0) 2920 kfree(entry); 2921 } 2922 2923 /* 2924 * We need some special handling for inodes that get processed before the parent 2925 * directory got created. See process_recorded_refs for details. 2926 * This function does the check if we already created the dir out of order. 2927 */ 2928 static int did_create_dir(struct send_ctx *sctx, u64 dir) 2929 { 2930 int ret = 0; 2931 int iter_ret = 0; 2932 struct btrfs_path *path = NULL; 2933 struct btrfs_key key; 2934 struct btrfs_key found_key; 2935 struct btrfs_key di_key; 2936 struct btrfs_dir_item *di; 2937 2938 if (btrfs_lru_cache_lookup(&sctx->dir_created_cache, dir, 0)) 2939 return 1; 2940 2941 path = alloc_path_for_send(); 2942 if (!path) 2943 return -ENOMEM; 2944 2945 key.objectid = dir; 2946 key.type = BTRFS_DIR_INDEX_KEY; 2947 key.offset = 0; 2948 2949 btrfs_for_each_slot(sctx->send_root, &key, &found_key, path, iter_ret) { 2950 struct extent_buffer *eb = path->nodes[0]; 2951 2952 if (found_key.objectid != key.objectid || 2953 found_key.type != key.type) { 2954 ret = 0; 2955 break; 2956 } 2957 2958 di = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dir_item); 2959 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2960 2961 if (di_key.type != BTRFS_ROOT_ITEM_KEY && 2962 di_key.objectid < sctx->send_progress) { 2963 ret = 1; 2964 cache_dir_created(sctx, dir); 2965 break; 2966 } 2967 } 2968 /* Catch error found during iteration */ 2969 if (iter_ret < 0) 2970 ret = iter_ret; 2971 2972 btrfs_free_path(path); 2973 return ret; 2974 } 2975 2976 /* 2977 * Only creates the inode if it is: 2978 * 1. Not a directory 2979 * 2. Or a directory which was not created already due to out of order 2980 * directories. See did_create_dir and process_recorded_refs for details. 2981 */ 2982 static int send_create_inode_if_needed(struct send_ctx *sctx) 2983 { 2984 int ret; 2985 2986 if (S_ISDIR(sctx->cur_inode_mode)) { 2987 ret = did_create_dir(sctx, sctx->cur_ino); 2988 if (ret < 0) 2989 return ret; 2990 else if (ret > 0) 2991 return 0; 2992 } 2993 2994 ret = send_create_inode(sctx, sctx->cur_ino); 2995 2996 if (ret == 0 && S_ISDIR(sctx->cur_inode_mode)) 2997 cache_dir_created(sctx, sctx->cur_ino); 2998 2999 return ret; 3000 } 3001 3002 struct recorded_ref { 3003 struct list_head list; 3004 char *name; 3005 struct fs_path *full_path; 3006 u64 dir; 3007 u64 dir_gen; 3008 int name_len; 3009 struct rb_node node; 3010 struct rb_root *root; 3011 }; 3012 3013 static struct recorded_ref *recorded_ref_alloc(void) 3014 { 3015 struct recorded_ref *ref; 3016 3017 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 3018 if (!ref) 3019 return NULL; 3020 RB_CLEAR_NODE(&ref->node); 3021 INIT_LIST_HEAD(&ref->list); 3022 return ref; 3023 } 3024 3025 static void recorded_ref_free(struct recorded_ref *ref) 3026 { 3027 if (!ref) 3028 return; 3029 if (!RB_EMPTY_NODE(&ref->node)) 3030 rb_erase(&ref->node, ref->root); 3031 list_del(&ref->list); 3032 fs_path_free(ref->full_path); 3033 kfree(ref); 3034 } 3035 3036 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path) 3037 { 3038 ref->full_path = path; 3039 ref->name = (char *)kbasename(ref->full_path->start); 3040 ref->name_len = ref->full_path->end - ref->name; 3041 } 3042 3043 static int dup_ref(struct recorded_ref *ref, struct list_head *list) 3044 { 3045 struct recorded_ref *new; 3046 3047 new = recorded_ref_alloc(); 3048 if (!new) 3049 return -ENOMEM; 3050 3051 new->dir = ref->dir; 3052 new->dir_gen = ref->dir_gen; 3053 list_add_tail(&new->list, list); 3054 return 0; 3055 } 3056 3057 static void __free_recorded_refs(struct list_head *head) 3058 { 3059 struct recorded_ref *cur; 3060 3061 while (!list_empty(head)) { 3062 cur = list_entry(head->next, struct recorded_ref, list); 3063 recorded_ref_free(cur); 3064 } 3065 } 3066 3067 static void free_recorded_refs(struct send_ctx *sctx) 3068 { 3069 __free_recorded_refs(&sctx->new_refs); 3070 __free_recorded_refs(&sctx->deleted_refs); 3071 } 3072 3073 /* 3074 * Renames/moves a file/dir to its orphan name. Used when the first 3075 * ref of an unprocessed inode gets overwritten and for all non empty 3076 * directories. 3077 */ 3078 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, 3079 struct fs_path *path) 3080 { 3081 int ret; 3082 struct fs_path *orphan; 3083 3084 orphan = fs_path_alloc(); 3085 if (!orphan) 3086 return -ENOMEM; 3087 3088 ret = gen_unique_name(sctx, ino, gen, orphan); 3089 if (ret < 0) 3090 goto out; 3091 3092 ret = send_rename(sctx, path, orphan); 3093 3094 out: 3095 fs_path_free(orphan); 3096 return ret; 3097 } 3098 3099 static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx, 3100 u64 dir_ino, u64 dir_gen) 3101 { 3102 struct rb_node **p = &sctx->orphan_dirs.rb_node; 3103 struct rb_node *parent = NULL; 3104 struct orphan_dir_info *entry, *odi; 3105 3106 while (*p) { 3107 parent = *p; 3108 entry = rb_entry(parent, struct orphan_dir_info, node); 3109 if (dir_ino < entry->ino) 3110 p = &(*p)->rb_left; 3111 else if (dir_ino > entry->ino) 3112 p = &(*p)->rb_right; 3113 else if (dir_gen < entry->gen) 3114 p = &(*p)->rb_left; 3115 else if (dir_gen > entry->gen) 3116 p = &(*p)->rb_right; 3117 else 3118 return entry; 3119 } 3120 3121 odi = kmalloc(sizeof(*odi), GFP_KERNEL); 3122 if (!odi) 3123 return ERR_PTR(-ENOMEM); 3124 odi->ino = dir_ino; 3125 odi->gen = dir_gen; 3126 odi->last_dir_index_offset = 0; 3127 odi->dir_high_seq_ino = 0; 3128 3129 rb_link_node(&odi->node, parent, p); 3130 rb_insert_color(&odi->node, &sctx->orphan_dirs); 3131 return odi; 3132 } 3133 3134 static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx, 3135 u64 dir_ino, u64 gen) 3136 { 3137 struct rb_node *n = sctx->orphan_dirs.rb_node; 3138 struct orphan_dir_info *entry; 3139 3140 while (n) { 3141 entry = rb_entry(n, struct orphan_dir_info, node); 3142 if (dir_ino < entry->ino) 3143 n = n->rb_left; 3144 else if (dir_ino > entry->ino) 3145 n = n->rb_right; 3146 else if (gen < entry->gen) 3147 n = n->rb_left; 3148 else if (gen > entry->gen) 3149 n = n->rb_right; 3150 else 3151 return entry; 3152 } 3153 return NULL; 3154 } 3155 3156 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen) 3157 { 3158 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen); 3159 3160 return odi != NULL; 3161 } 3162 3163 static void free_orphan_dir_info(struct send_ctx *sctx, 3164 struct orphan_dir_info *odi) 3165 { 3166 if (!odi) 3167 return; 3168 rb_erase(&odi->node, &sctx->orphan_dirs); 3169 kfree(odi); 3170 } 3171 3172 /* 3173 * Returns 1 if a directory can be removed at this point in time. 3174 * We check this by iterating all dir items and checking if the inode behind 3175 * the dir item was already processed. 3176 */ 3177 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen) 3178 { 3179 int ret = 0; 3180 int iter_ret = 0; 3181 struct btrfs_root *root = sctx->parent_root; 3182 struct btrfs_path *path; 3183 struct btrfs_key key; 3184 struct btrfs_key found_key; 3185 struct btrfs_key loc; 3186 struct btrfs_dir_item *di; 3187 struct orphan_dir_info *odi = NULL; 3188 u64 dir_high_seq_ino = 0; 3189 u64 last_dir_index_offset = 0; 3190 3191 /* 3192 * Don't try to rmdir the top/root subvolume dir. 3193 */ 3194 if (dir == BTRFS_FIRST_FREE_OBJECTID) 3195 return 0; 3196 3197 odi = get_orphan_dir_info(sctx, dir, dir_gen); 3198 if (odi && sctx->cur_ino < odi->dir_high_seq_ino) 3199 return 0; 3200 3201 path = alloc_path_for_send(); 3202 if (!path) 3203 return -ENOMEM; 3204 3205 if (!odi) { 3206 /* 3207 * Find the inode number associated with the last dir index 3208 * entry. This is very likely the inode with the highest number 3209 * of all inodes that have an entry in the directory. We can 3210 * then use it to avoid future calls to can_rmdir(), when 3211 * processing inodes with a lower number, from having to search 3212 * the parent root b+tree for dir index keys. 3213 */ 3214 key.objectid = dir; 3215 key.type = BTRFS_DIR_INDEX_KEY; 3216 key.offset = (u64)-1; 3217 3218 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3219 if (ret < 0) { 3220 goto out; 3221 } else if (ret > 0) { 3222 /* Can't happen, the root is never empty. */ 3223 ASSERT(path->slots[0] > 0); 3224 if (WARN_ON(path->slots[0] == 0)) { 3225 ret = -EUCLEAN; 3226 goto out; 3227 } 3228 path->slots[0]--; 3229 } 3230 3231 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3232 if (key.objectid != dir || key.type != BTRFS_DIR_INDEX_KEY) { 3233 /* No index keys, dir can be removed. */ 3234 ret = 1; 3235 goto out; 3236 } 3237 3238 di = btrfs_item_ptr(path->nodes[0], path->slots[0], 3239 struct btrfs_dir_item); 3240 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); 3241 dir_high_seq_ino = loc.objectid; 3242 if (sctx->cur_ino < dir_high_seq_ino) { 3243 ret = 0; 3244 goto out; 3245 } 3246 3247 btrfs_release_path(path); 3248 } 3249 3250 key.objectid = dir; 3251 key.type = BTRFS_DIR_INDEX_KEY; 3252 key.offset = (odi ? odi->last_dir_index_offset : 0); 3253 3254 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 3255 struct waiting_dir_move *dm; 3256 3257 if (found_key.objectid != key.objectid || 3258 found_key.type != key.type) 3259 break; 3260 3261 di = btrfs_item_ptr(path->nodes[0], path->slots[0], 3262 struct btrfs_dir_item); 3263 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); 3264 3265 dir_high_seq_ino = max(dir_high_seq_ino, loc.objectid); 3266 last_dir_index_offset = found_key.offset; 3267 3268 dm = get_waiting_dir_move(sctx, loc.objectid); 3269 if (dm) { 3270 dm->rmdir_ino = dir; 3271 dm->rmdir_gen = dir_gen; 3272 ret = 0; 3273 goto out; 3274 } 3275 3276 if (loc.objectid > sctx->cur_ino) { 3277 ret = 0; 3278 goto out; 3279 } 3280 } 3281 if (iter_ret < 0) { 3282 ret = iter_ret; 3283 goto out; 3284 } 3285 free_orphan_dir_info(sctx, odi); 3286 3287 ret = 1; 3288 3289 out: 3290 btrfs_free_path(path); 3291 3292 if (ret) 3293 return ret; 3294 3295 if (!odi) { 3296 odi = add_orphan_dir_info(sctx, dir, dir_gen); 3297 if (IS_ERR(odi)) 3298 return PTR_ERR(odi); 3299 3300 odi->gen = dir_gen; 3301 } 3302 3303 odi->last_dir_index_offset = last_dir_index_offset; 3304 odi->dir_high_seq_ino = max(odi->dir_high_seq_ino, dir_high_seq_ino); 3305 3306 return 0; 3307 } 3308 3309 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) 3310 { 3311 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino); 3312 3313 return entry != NULL; 3314 } 3315 3316 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized) 3317 { 3318 struct rb_node **p = &sctx->waiting_dir_moves.rb_node; 3319 struct rb_node *parent = NULL; 3320 struct waiting_dir_move *entry, *dm; 3321 3322 dm = kmalloc(sizeof(*dm), GFP_KERNEL); 3323 if (!dm) 3324 return -ENOMEM; 3325 dm->ino = ino; 3326 dm->rmdir_ino = 0; 3327 dm->rmdir_gen = 0; 3328 dm->orphanized = orphanized; 3329 3330 while (*p) { 3331 parent = *p; 3332 entry = rb_entry(parent, struct waiting_dir_move, node); 3333 if (ino < entry->ino) { 3334 p = &(*p)->rb_left; 3335 } else if (ino > entry->ino) { 3336 p = &(*p)->rb_right; 3337 } else { 3338 kfree(dm); 3339 return -EEXIST; 3340 } 3341 } 3342 3343 rb_link_node(&dm->node, parent, p); 3344 rb_insert_color(&dm->node, &sctx->waiting_dir_moves); 3345 return 0; 3346 } 3347 3348 static struct waiting_dir_move * 3349 get_waiting_dir_move(struct send_ctx *sctx, u64 ino) 3350 { 3351 struct rb_node *n = sctx->waiting_dir_moves.rb_node; 3352 struct waiting_dir_move *entry; 3353 3354 while (n) { 3355 entry = rb_entry(n, struct waiting_dir_move, node); 3356 if (ino < entry->ino) 3357 n = n->rb_left; 3358 else if (ino > entry->ino) 3359 n = n->rb_right; 3360 else 3361 return entry; 3362 } 3363 return NULL; 3364 } 3365 3366 static void free_waiting_dir_move(struct send_ctx *sctx, 3367 struct waiting_dir_move *dm) 3368 { 3369 if (!dm) 3370 return; 3371 rb_erase(&dm->node, &sctx->waiting_dir_moves); 3372 kfree(dm); 3373 } 3374 3375 static int add_pending_dir_move(struct send_ctx *sctx, 3376 u64 ino, 3377 u64 ino_gen, 3378 u64 parent_ino, 3379 struct list_head *new_refs, 3380 struct list_head *deleted_refs, 3381 const bool is_orphan) 3382 { 3383 struct rb_node **p = &sctx->pending_dir_moves.rb_node; 3384 struct rb_node *parent = NULL; 3385 struct pending_dir_move *entry = NULL, *pm; 3386 struct recorded_ref *cur; 3387 int exists = 0; 3388 int ret; 3389 3390 pm = kmalloc(sizeof(*pm), GFP_KERNEL); 3391 if (!pm) 3392 return -ENOMEM; 3393 pm->parent_ino = parent_ino; 3394 pm->ino = ino; 3395 pm->gen = ino_gen; 3396 INIT_LIST_HEAD(&pm->list); 3397 INIT_LIST_HEAD(&pm->update_refs); 3398 RB_CLEAR_NODE(&pm->node); 3399 3400 while (*p) { 3401 parent = *p; 3402 entry = rb_entry(parent, struct pending_dir_move, node); 3403 if (parent_ino < entry->parent_ino) { 3404 p = &(*p)->rb_left; 3405 } else if (parent_ino > entry->parent_ino) { 3406 p = &(*p)->rb_right; 3407 } else { 3408 exists = 1; 3409 break; 3410 } 3411 } 3412 3413 list_for_each_entry(cur, deleted_refs, list) { 3414 ret = dup_ref(cur, &pm->update_refs); 3415 if (ret < 0) 3416 goto out; 3417 } 3418 list_for_each_entry(cur, new_refs, list) { 3419 ret = dup_ref(cur, &pm->update_refs); 3420 if (ret < 0) 3421 goto out; 3422 } 3423 3424 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan); 3425 if (ret) 3426 goto out; 3427 3428 if (exists) { 3429 list_add_tail(&pm->list, &entry->list); 3430 } else { 3431 rb_link_node(&pm->node, parent, p); 3432 rb_insert_color(&pm->node, &sctx->pending_dir_moves); 3433 } 3434 ret = 0; 3435 out: 3436 if (ret) { 3437 __free_recorded_refs(&pm->update_refs); 3438 kfree(pm); 3439 } 3440 return ret; 3441 } 3442 3443 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx, 3444 u64 parent_ino) 3445 { 3446 struct rb_node *n = sctx->pending_dir_moves.rb_node; 3447 struct pending_dir_move *entry; 3448 3449 while (n) { 3450 entry = rb_entry(n, struct pending_dir_move, node); 3451 if (parent_ino < entry->parent_ino) 3452 n = n->rb_left; 3453 else if (parent_ino > entry->parent_ino) 3454 n = n->rb_right; 3455 else 3456 return entry; 3457 } 3458 return NULL; 3459 } 3460 3461 static int path_loop(struct send_ctx *sctx, struct fs_path *name, 3462 u64 ino, u64 gen, u64 *ancestor_ino) 3463 { 3464 int ret = 0; 3465 u64 parent_inode = 0; 3466 u64 parent_gen = 0; 3467 u64 start_ino = ino; 3468 3469 *ancestor_ino = 0; 3470 while (ino != BTRFS_FIRST_FREE_OBJECTID) { 3471 fs_path_reset(name); 3472 3473 if (is_waiting_for_rm(sctx, ino, gen)) 3474 break; 3475 if (is_waiting_for_move(sctx, ino)) { 3476 if (*ancestor_ino == 0) 3477 *ancestor_ino = ino; 3478 ret = get_first_ref(sctx->parent_root, ino, 3479 &parent_inode, &parent_gen, name); 3480 } else { 3481 ret = __get_cur_name_and_parent(sctx, ino, gen, 3482 &parent_inode, 3483 &parent_gen, name); 3484 if (ret > 0) { 3485 ret = 0; 3486 break; 3487 } 3488 } 3489 if (ret < 0) 3490 break; 3491 if (parent_inode == start_ino) { 3492 ret = 1; 3493 if (*ancestor_ino == 0) 3494 *ancestor_ino = ino; 3495 break; 3496 } 3497 ino = parent_inode; 3498 gen = parent_gen; 3499 } 3500 return ret; 3501 } 3502 3503 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) 3504 { 3505 struct fs_path *from_path = NULL; 3506 struct fs_path *to_path = NULL; 3507 struct fs_path *name = NULL; 3508 u64 orig_progress = sctx->send_progress; 3509 struct recorded_ref *cur; 3510 u64 parent_ino, parent_gen; 3511 struct waiting_dir_move *dm = NULL; 3512 u64 rmdir_ino = 0; 3513 u64 rmdir_gen; 3514 u64 ancestor; 3515 bool is_orphan; 3516 int ret; 3517 3518 name = fs_path_alloc(); 3519 from_path = fs_path_alloc(); 3520 if (!name || !from_path) { 3521 ret = -ENOMEM; 3522 goto out; 3523 } 3524 3525 dm = get_waiting_dir_move(sctx, pm->ino); 3526 ASSERT(dm); 3527 rmdir_ino = dm->rmdir_ino; 3528 rmdir_gen = dm->rmdir_gen; 3529 is_orphan = dm->orphanized; 3530 free_waiting_dir_move(sctx, dm); 3531 3532 if (is_orphan) { 3533 ret = gen_unique_name(sctx, pm->ino, 3534 pm->gen, from_path); 3535 } else { 3536 ret = get_first_ref(sctx->parent_root, pm->ino, 3537 &parent_ino, &parent_gen, name); 3538 if (ret < 0) 3539 goto out; 3540 ret = get_cur_path(sctx, parent_ino, parent_gen, 3541 from_path); 3542 if (ret < 0) 3543 goto out; 3544 ret = fs_path_add_path(from_path, name); 3545 } 3546 if (ret < 0) 3547 goto out; 3548 3549 sctx->send_progress = sctx->cur_ino + 1; 3550 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor); 3551 if (ret < 0) 3552 goto out; 3553 if (ret) { 3554 LIST_HEAD(deleted_refs); 3555 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); 3556 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, 3557 &pm->update_refs, &deleted_refs, 3558 is_orphan); 3559 if (ret < 0) 3560 goto out; 3561 if (rmdir_ino) { 3562 dm = get_waiting_dir_move(sctx, pm->ino); 3563 ASSERT(dm); 3564 dm->rmdir_ino = rmdir_ino; 3565 dm->rmdir_gen = rmdir_gen; 3566 } 3567 goto out; 3568 } 3569 fs_path_reset(name); 3570 to_path = name; 3571 name = NULL; 3572 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); 3573 if (ret < 0) 3574 goto out; 3575 3576 ret = send_rename(sctx, from_path, to_path); 3577 if (ret < 0) 3578 goto out; 3579 3580 if (rmdir_ino) { 3581 struct orphan_dir_info *odi; 3582 u64 gen; 3583 3584 odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen); 3585 if (!odi) { 3586 /* already deleted */ 3587 goto finish; 3588 } 3589 gen = odi->gen; 3590 3591 ret = can_rmdir(sctx, rmdir_ino, gen); 3592 if (ret < 0) 3593 goto out; 3594 if (!ret) 3595 goto finish; 3596 3597 name = fs_path_alloc(); 3598 if (!name) { 3599 ret = -ENOMEM; 3600 goto out; 3601 } 3602 ret = get_cur_path(sctx, rmdir_ino, gen, name); 3603 if (ret < 0) 3604 goto out; 3605 ret = send_rmdir(sctx, name); 3606 if (ret < 0) 3607 goto out; 3608 } 3609 3610 finish: 3611 ret = cache_dir_utimes(sctx, pm->ino, pm->gen); 3612 if (ret < 0) 3613 goto out; 3614 3615 /* 3616 * After rename/move, need to update the utimes of both new parent(s) 3617 * and old parent(s). 3618 */ 3619 list_for_each_entry(cur, &pm->update_refs, list) { 3620 /* 3621 * The parent inode might have been deleted in the send snapshot 3622 */ 3623 ret = get_inode_info(sctx->send_root, cur->dir, NULL); 3624 if (ret == -ENOENT) { 3625 ret = 0; 3626 continue; 3627 } 3628 if (ret < 0) 3629 goto out; 3630 3631 ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen); 3632 if (ret < 0) 3633 goto out; 3634 } 3635 3636 out: 3637 fs_path_free(name); 3638 fs_path_free(from_path); 3639 fs_path_free(to_path); 3640 sctx->send_progress = orig_progress; 3641 3642 return ret; 3643 } 3644 3645 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m) 3646 { 3647 if (!list_empty(&m->list)) 3648 list_del(&m->list); 3649 if (!RB_EMPTY_NODE(&m->node)) 3650 rb_erase(&m->node, &sctx->pending_dir_moves); 3651 __free_recorded_refs(&m->update_refs); 3652 kfree(m); 3653 } 3654 3655 static void tail_append_pending_moves(struct send_ctx *sctx, 3656 struct pending_dir_move *moves, 3657 struct list_head *stack) 3658 { 3659 if (list_empty(&moves->list)) { 3660 list_add_tail(&moves->list, stack); 3661 } else { 3662 LIST_HEAD(list); 3663 list_splice_init(&moves->list, &list); 3664 list_add_tail(&moves->list, stack); 3665 list_splice_tail(&list, stack); 3666 } 3667 if (!RB_EMPTY_NODE(&moves->node)) { 3668 rb_erase(&moves->node, &sctx->pending_dir_moves); 3669 RB_CLEAR_NODE(&moves->node); 3670 } 3671 } 3672 3673 static int apply_children_dir_moves(struct send_ctx *sctx) 3674 { 3675 struct pending_dir_move *pm; 3676 struct list_head stack; 3677 u64 parent_ino = sctx->cur_ino; 3678 int ret = 0; 3679 3680 pm = get_pending_dir_moves(sctx, parent_ino); 3681 if (!pm) 3682 return 0; 3683 3684 INIT_LIST_HEAD(&stack); 3685 tail_append_pending_moves(sctx, pm, &stack); 3686 3687 while (!list_empty(&stack)) { 3688 pm = list_first_entry(&stack, struct pending_dir_move, list); 3689 parent_ino = pm->ino; 3690 ret = apply_dir_move(sctx, pm); 3691 free_pending_move(sctx, pm); 3692 if (ret) 3693 goto out; 3694 pm = get_pending_dir_moves(sctx, parent_ino); 3695 if (pm) 3696 tail_append_pending_moves(sctx, pm, &stack); 3697 } 3698 return 0; 3699 3700 out: 3701 while (!list_empty(&stack)) { 3702 pm = list_first_entry(&stack, struct pending_dir_move, list); 3703 free_pending_move(sctx, pm); 3704 } 3705 return ret; 3706 } 3707 3708 /* 3709 * We might need to delay a directory rename even when no ancestor directory 3710 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was 3711 * renamed. This happens when we rename a directory to the old name (the name 3712 * in the parent root) of some other unrelated directory that got its rename 3713 * delayed due to some ancestor with higher number that got renamed. 3714 * 3715 * Example: 3716 * 3717 * Parent snapshot: 3718 * . (ino 256) 3719 * |---- a/ (ino 257) 3720 * | |---- file (ino 260) 3721 * | 3722 * |---- b/ (ino 258) 3723 * |---- c/ (ino 259) 3724 * 3725 * Send snapshot: 3726 * . (ino 256) 3727 * |---- a/ (ino 258) 3728 * |---- x/ (ino 259) 3729 * |---- y/ (ino 257) 3730 * |----- file (ino 260) 3731 * 3732 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257 3733 * from 'a' to 'x/y' happening first, which in turn depends on the rename of 3734 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream 3735 * must issue is: 3736 * 3737 * 1 - rename 259 from 'c' to 'x' 3738 * 2 - rename 257 from 'a' to 'x/y' 3739 * 3 - rename 258 from 'b' to 'a' 3740 * 3741 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can 3742 * be done right away and < 0 on error. 3743 */ 3744 static int wait_for_dest_dir_move(struct send_ctx *sctx, 3745 struct recorded_ref *parent_ref, 3746 const bool is_orphan) 3747 { 3748 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info; 3749 struct btrfs_path *path; 3750 struct btrfs_key key; 3751 struct btrfs_key di_key; 3752 struct btrfs_dir_item *di; 3753 u64 left_gen; 3754 u64 right_gen; 3755 int ret = 0; 3756 struct waiting_dir_move *wdm; 3757 3758 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) 3759 return 0; 3760 3761 path = alloc_path_for_send(); 3762 if (!path) 3763 return -ENOMEM; 3764 3765 key.objectid = parent_ref->dir; 3766 key.type = BTRFS_DIR_ITEM_KEY; 3767 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len); 3768 3769 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); 3770 if (ret < 0) { 3771 goto out; 3772 } else if (ret > 0) { 3773 ret = 0; 3774 goto out; 3775 } 3776 3777 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name, 3778 parent_ref->name_len); 3779 if (!di) { 3780 ret = 0; 3781 goto out; 3782 } 3783 /* 3784 * di_key.objectid has the number of the inode that has a dentry in the 3785 * parent directory with the same name that sctx->cur_ino is being 3786 * renamed to. We need to check if that inode is in the send root as 3787 * well and if it is currently marked as an inode with a pending rename, 3788 * if it is, we need to delay the rename of sctx->cur_ino as well, so 3789 * that it happens after that other inode is renamed. 3790 */ 3791 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key); 3792 if (di_key.type != BTRFS_INODE_ITEM_KEY) { 3793 ret = 0; 3794 goto out; 3795 } 3796 3797 ret = get_inode_gen(sctx->parent_root, di_key.objectid, &left_gen); 3798 if (ret < 0) 3799 goto out; 3800 ret = get_inode_gen(sctx->send_root, di_key.objectid, &right_gen); 3801 if (ret < 0) { 3802 if (ret == -ENOENT) 3803 ret = 0; 3804 goto out; 3805 } 3806 3807 /* Different inode, no need to delay the rename of sctx->cur_ino */ 3808 if (right_gen != left_gen) { 3809 ret = 0; 3810 goto out; 3811 } 3812 3813 wdm = get_waiting_dir_move(sctx, di_key.objectid); 3814 if (wdm && !wdm->orphanized) { 3815 ret = add_pending_dir_move(sctx, 3816 sctx->cur_ino, 3817 sctx->cur_inode_gen, 3818 di_key.objectid, 3819 &sctx->new_refs, 3820 &sctx->deleted_refs, 3821 is_orphan); 3822 if (!ret) 3823 ret = 1; 3824 } 3825 out: 3826 btrfs_free_path(path); 3827 return ret; 3828 } 3829 3830 /* 3831 * Check if inode ino2, or any of its ancestors, is inode ino1. 3832 * Return 1 if true, 0 if false and < 0 on error. 3833 */ 3834 static int check_ino_in_path(struct btrfs_root *root, 3835 const u64 ino1, 3836 const u64 ino1_gen, 3837 const u64 ino2, 3838 const u64 ino2_gen, 3839 struct fs_path *fs_path) 3840 { 3841 u64 ino = ino2; 3842 3843 if (ino1 == ino2) 3844 return ino1_gen == ino2_gen; 3845 3846 while (ino > BTRFS_FIRST_FREE_OBJECTID) { 3847 u64 parent; 3848 u64 parent_gen; 3849 int ret; 3850 3851 fs_path_reset(fs_path); 3852 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path); 3853 if (ret < 0) 3854 return ret; 3855 if (parent == ino1) 3856 return parent_gen == ino1_gen; 3857 ino = parent; 3858 } 3859 return 0; 3860 } 3861 3862 /* 3863 * Check if inode ino1 is an ancestor of inode ino2 in the given root for any 3864 * possible path (in case ino2 is not a directory and has multiple hard links). 3865 * Return 1 if true, 0 if false and < 0 on error. 3866 */ 3867 static int is_ancestor(struct btrfs_root *root, 3868 const u64 ino1, 3869 const u64 ino1_gen, 3870 const u64 ino2, 3871 struct fs_path *fs_path) 3872 { 3873 bool free_fs_path = false; 3874 int ret = 0; 3875 int iter_ret = 0; 3876 struct btrfs_path *path = NULL; 3877 struct btrfs_key key; 3878 3879 if (!fs_path) { 3880 fs_path = fs_path_alloc(); 3881 if (!fs_path) 3882 return -ENOMEM; 3883 free_fs_path = true; 3884 } 3885 3886 path = alloc_path_for_send(); 3887 if (!path) { 3888 ret = -ENOMEM; 3889 goto out; 3890 } 3891 3892 key.objectid = ino2; 3893 key.type = BTRFS_INODE_REF_KEY; 3894 key.offset = 0; 3895 3896 btrfs_for_each_slot(root, &key, &key, path, iter_ret) { 3897 struct extent_buffer *leaf = path->nodes[0]; 3898 int slot = path->slots[0]; 3899 u32 cur_offset = 0; 3900 u32 item_size; 3901 3902 if (key.objectid != ino2) 3903 break; 3904 if (key.type != BTRFS_INODE_REF_KEY && 3905 key.type != BTRFS_INODE_EXTREF_KEY) 3906 break; 3907 3908 item_size = btrfs_item_size(leaf, slot); 3909 while (cur_offset < item_size) { 3910 u64 parent; 3911 u64 parent_gen; 3912 3913 if (key.type == BTRFS_INODE_EXTREF_KEY) { 3914 unsigned long ptr; 3915 struct btrfs_inode_extref *extref; 3916 3917 ptr = btrfs_item_ptr_offset(leaf, slot); 3918 extref = (struct btrfs_inode_extref *) 3919 (ptr + cur_offset); 3920 parent = btrfs_inode_extref_parent(leaf, 3921 extref); 3922 cur_offset += sizeof(*extref); 3923 cur_offset += btrfs_inode_extref_name_len(leaf, 3924 extref); 3925 } else { 3926 parent = key.offset; 3927 cur_offset = item_size; 3928 } 3929 3930 ret = get_inode_gen(root, parent, &parent_gen); 3931 if (ret < 0) 3932 goto out; 3933 ret = check_ino_in_path(root, ino1, ino1_gen, 3934 parent, parent_gen, fs_path); 3935 if (ret) 3936 goto out; 3937 } 3938 } 3939 ret = 0; 3940 if (iter_ret < 0) 3941 ret = iter_ret; 3942 3943 out: 3944 btrfs_free_path(path); 3945 if (free_fs_path) 3946 fs_path_free(fs_path); 3947 return ret; 3948 } 3949 3950 static int wait_for_parent_move(struct send_ctx *sctx, 3951 struct recorded_ref *parent_ref, 3952 const bool is_orphan) 3953 { 3954 int ret = 0; 3955 u64 ino = parent_ref->dir; 3956 u64 ino_gen = parent_ref->dir_gen; 3957 u64 parent_ino_before, parent_ino_after; 3958 struct fs_path *path_before = NULL; 3959 struct fs_path *path_after = NULL; 3960 int len1, len2; 3961 3962 path_after = fs_path_alloc(); 3963 path_before = fs_path_alloc(); 3964 if (!path_after || !path_before) { 3965 ret = -ENOMEM; 3966 goto out; 3967 } 3968 3969 /* 3970 * Our current directory inode may not yet be renamed/moved because some 3971 * ancestor (immediate or not) has to be renamed/moved first. So find if 3972 * such ancestor exists and make sure our own rename/move happens after 3973 * that ancestor is processed to avoid path build infinite loops (done 3974 * at get_cur_path()). 3975 */ 3976 while (ino > BTRFS_FIRST_FREE_OBJECTID) { 3977 u64 parent_ino_after_gen; 3978 3979 if (is_waiting_for_move(sctx, ino)) { 3980 /* 3981 * If the current inode is an ancestor of ino in the 3982 * parent root, we need to delay the rename of the 3983 * current inode, otherwise don't delayed the rename 3984 * because we can end up with a circular dependency 3985 * of renames, resulting in some directories never 3986 * getting the respective rename operations issued in 3987 * the send stream or getting into infinite path build 3988 * loops. 3989 */ 3990 ret = is_ancestor(sctx->parent_root, 3991 sctx->cur_ino, sctx->cur_inode_gen, 3992 ino, path_before); 3993 if (ret) 3994 break; 3995 } 3996 3997 fs_path_reset(path_before); 3998 fs_path_reset(path_after); 3999 4000 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, 4001 &parent_ino_after_gen, path_after); 4002 if (ret < 0) 4003 goto out; 4004 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, 4005 NULL, path_before); 4006 if (ret < 0 && ret != -ENOENT) { 4007 goto out; 4008 } else if (ret == -ENOENT) { 4009 ret = 0; 4010 break; 4011 } 4012 4013 len1 = fs_path_len(path_before); 4014 len2 = fs_path_len(path_after); 4015 if (ino > sctx->cur_ino && 4016 (parent_ino_before != parent_ino_after || len1 != len2 || 4017 memcmp(path_before->start, path_after->start, len1))) { 4018 u64 parent_ino_gen; 4019 4020 ret = get_inode_gen(sctx->parent_root, ino, &parent_ino_gen); 4021 if (ret < 0) 4022 goto out; 4023 if (ino_gen == parent_ino_gen) { 4024 ret = 1; 4025 break; 4026 } 4027 } 4028 ino = parent_ino_after; 4029 ino_gen = parent_ino_after_gen; 4030 } 4031 4032 out: 4033 fs_path_free(path_before); 4034 fs_path_free(path_after); 4035 4036 if (ret == 1) { 4037 ret = add_pending_dir_move(sctx, 4038 sctx->cur_ino, 4039 sctx->cur_inode_gen, 4040 ino, 4041 &sctx->new_refs, 4042 &sctx->deleted_refs, 4043 is_orphan); 4044 if (!ret) 4045 ret = 1; 4046 } 4047 4048 return ret; 4049 } 4050 4051 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) 4052 { 4053 int ret; 4054 struct fs_path *new_path; 4055 4056 /* 4057 * Our reference's name member points to its full_path member string, so 4058 * we use here a new path. 4059 */ 4060 new_path = fs_path_alloc(); 4061 if (!new_path) 4062 return -ENOMEM; 4063 4064 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path); 4065 if (ret < 0) { 4066 fs_path_free(new_path); 4067 return ret; 4068 } 4069 ret = fs_path_add(new_path, ref->name, ref->name_len); 4070 if (ret < 0) { 4071 fs_path_free(new_path); 4072 return ret; 4073 } 4074 4075 fs_path_free(ref->full_path); 4076 set_ref_path(ref, new_path); 4077 4078 return 0; 4079 } 4080 4081 /* 4082 * When processing the new references for an inode we may orphanize an existing 4083 * directory inode because its old name conflicts with one of the new references 4084 * of the current inode. Later, when processing another new reference of our 4085 * inode, we might need to orphanize another inode, but the path we have in the 4086 * reference reflects the pre-orphanization name of the directory we previously 4087 * orphanized. For example: 4088 * 4089 * parent snapshot looks like: 4090 * 4091 * . (ino 256) 4092 * |----- f1 (ino 257) 4093 * |----- f2 (ino 258) 4094 * |----- d1/ (ino 259) 4095 * |----- d2/ (ino 260) 4096 * 4097 * send snapshot looks like: 4098 * 4099 * . (ino 256) 4100 * |----- d1 (ino 258) 4101 * |----- f2/ (ino 259) 4102 * |----- f2_link/ (ino 260) 4103 * | |----- f1 (ino 257) 4104 * | 4105 * |----- d2 (ino 258) 4106 * 4107 * When processing inode 257 we compute the name for inode 259 as "d1", and we 4108 * cache it in the name cache. Later when we start processing inode 258, when 4109 * collecting all its new references we set a full path of "d1/d2" for its new 4110 * reference with name "d2". When we start processing the new references we 4111 * start by processing the new reference with name "d1", and this results in 4112 * orphanizing inode 259, since its old reference causes a conflict. Then we 4113 * move on the next new reference, with name "d2", and we find out we must 4114 * orphanize inode 260, as its old reference conflicts with ours - but for the 4115 * orphanization we use a source path corresponding to the path we stored in the 4116 * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the 4117 * receiver fail since the path component "d1/" no longer exists, it was renamed 4118 * to "o259-6-0/" when processing the previous new reference. So in this case we 4119 * must recompute the path in the new reference and use it for the new 4120 * orphanization operation. 4121 */ 4122 static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) 4123 { 4124 char *name; 4125 int ret; 4126 4127 name = kmemdup(ref->name, ref->name_len, GFP_KERNEL); 4128 if (!name) 4129 return -ENOMEM; 4130 4131 fs_path_reset(ref->full_path); 4132 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path); 4133 if (ret < 0) 4134 goto out; 4135 4136 ret = fs_path_add(ref->full_path, name, ref->name_len); 4137 if (ret < 0) 4138 goto out; 4139 4140 /* Update the reference's base name pointer. */ 4141 set_ref_path(ref, ref->full_path); 4142 out: 4143 kfree(name); 4144 return ret; 4145 } 4146 4147 /* 4148 * This does all the move/link/unlink/rmdir magic. 4149 */ 4150 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) 4151 { 4152 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 4153 int ret = 0; 4154 struct recorded_ref *cur; 4155 struct recorded_ref *cur2; 4156 struct list_head check_dirs; 4157 struct fs_path *valid_path = NULL; 4158 u64 ow_inode = 0; 4159 u64 ow_gen; 4160 u64 ow_mode; 4161 int did_overwrite = 0; 4162 int is_orphan = 0; 4163 u64 last_dir_ino_rm = 0; 4164 bool can_rename = true; 4165 bool orphanized_dir = false; 4166 bool orphanized_ancestor = false; 4167 4168 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino); 4169 4170 /* 4171 * This should never happen as the root dir always has the same ref 4172 * which is always '..' 4173 */ 4174 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); 4175 INIT_LIST_HEAD(&check_dirs); 4176 4177 valid_path = fs_path_alloc(); 4178 if (!valid_path) { 4179 ret = -ENOMEM; 4180 goto out; 4181 } 4182 4183 /* 4184 * First, check if the first ref of the current inode was overwritten 4185 * before. If yes, we know that the current inode was already orphanized 4186 * and thus use the orphan name. If not, we can use get_cur_path to 4187 * get the path of the first ref as it would like while receiving at 4188 * this point in time. 4189 * New inodes are always orphan at the beginning, so force to use the 4190 * orphan name in this case. 4191 * The first ref is stored in valid_path and will be updated if it 4192 * gets moved around. 4193 */ 4194 if (!sctx->cur_inode_new) { 4195 ret = did_overwrite_first_ref(sctx, sctx->cur_ino, 4196 sctx->cur_inode_gen); 4197 if (ret < 0) 4198 goto out; 4199 if (ret) 4200 did_overwrite = 1; 4201 } 4202 if (sctx->cur_inode_new || did_overwrite) { 4203 ret = gen_unique_name(sctx, sctx->cur_ino, 4204 sctx->cur_inode_gen, valid_path); 4205 if (ret < 0) 4206 goto out; 4207 is_orphan = 1; 4208 } else { 4209 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, 4210 valid_path); 4211 if (ret < 0) 4212 goto out; 4213 } 4214 4215 /* 4216 * Before doing any rename and link operations, do a first pass on the 4217 * new references to orphanize any unprocessed inodes that may have a 4218 * reference that conflicts with one of the new references of the current 4219 * inode. This needs to happen first because a new reference may conflict 4220 * with the old reference of a parent directory, so we must make sure 4221 * that the path used for link and rename commands don't use an 4222 * orphanized name when an ancestor was not yet orphanized. 4223 * 4224 * Example: 4225 * 4226 * Parent snapshot: 4227 * 4228 * . (ino 256) 4229 * |----- testdir/ (ino 259) 4230 * | |----- a (ino 257) 4231 * | 4232 * |----- b (ino 258) 4233 * 4234 * Send snapshot: 4235 * 4236 * . (ino 256) 4237 * |----- testdir_2/ (ino 259) 4238 * | |----- a (ino 260) 4239 * | 4240 * |----- testdir (ino 257) 4241 * |----- b (ino 257) 4242 * |----- b2 (ino 258) 4243 * 4244 * Processing the new reference for inode 257 with name "b" may happen 4245 * before processing the new reference with name "testdir". If so, we 4246 * must make sure that by the time we send a link command to create the 4247 * hard link "b", inode 259 was already orphanized, since the generated 4248 * path in "valid_path" already contains the orphanized name for 259. 4249 * We are processing inode 257, so only later when processing 259 we do 4250 * the rename operation to change its temporary (orphanized) name to 4251 * "testdir_2". 4252 */ 4253 list_for_each_entry(cur, &sctx->new_refs, list) { 4254 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL); 4255 if (ret < 0) 4256 goto out; 4257 if (ret == inode_state_will_create) 4258 continue; 4259 4260 /* 4261 * Check if this new ref would overwrite the first ref of another 4262 * unprocessed inode. If yes, orphanize the overwritten inode. 4263 * If we find an overwritten ref that is not the first ref, 4264 * simply unlink it. 4265 */ 4266 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, 4267 cur->name, cur->name_len, 4268 &ow_inode, &ow_gen, &ow_mode); 4269 if (ret < 0) 4270 goto out; 4271 if (ret) { 4272 ret = is_first_ref(sctx->parent_root, 4273 ow_inode, cur->dir, cur->name, 4274 cur->name_len); 4275 if (ret < 0) 4276 goto out; 4277 if (ret) { 4278 struct name_cache_entry *nce; 4279 struct waiting_dir_move *wdm; 4280 4281 if (orphanized_dir) { 4282 ret = refresh_ref_path(sctx, cur); 4283 if (ret < 0) 4284 goto out; 4285 } 4286 4287 ret = orphanize_inode(sctx, ow_inode, ow_gen, 4288 cur->full_path); 4289 if (ret < 0) 4290 goto out; 4291 if (S_ISDIR(ow_mode)) 4292 orphanized_dir = true; 4293 4294 /* 4295 * If ow_inode has its rename operation delayed 4296 * make sure that its orphanized name is used in 4297 * the source path when performing its rename 4298 * operation. 4299 */ 4300 wdm = get_waiting_dir_move(sctx, ow_inode); 4301 if (wdm) 4302 wdm->orphanized = true; 4303 4304 /* 4305 * Make sure we clear our orphanized inode's 4306 * name from the name cache. This is because the 4307 * inode ow_inode might be an ancestor of some 4308 * other inode that will be orphanized as well 4309 * later and has an inode number greater than 4310 * sctx->send_progress. We need to prevent 4311 * future name lookups from using the old name 4312 * and get instead the orphan name. 4313 */ 4314 nce = name_cache_search(sctx, ow_inode, ow_gen); 4315 if (nce) 4316 btrfs_lru_cache_remove(&sctx->name_cache, 4317 &nce->entry); 4318 4319 /* 4320 * ow_inode might currently be an ancestor of 4321 * cur_ino, therefore compute valid_path (the 4322 * current path of cur_ino) again because it 4323 * might contain the pre-orphanization name of 4324 * ow_inode, which is no longer valid. 4325 */ 4326 ret = is_ancestor(sctx->parent_root, 4327 ow_inode, ow_gen, 4328 sctx->cur_ino, NULL); 4329 if (ret > 0) { 4330 orphanized_ancestor = true; 4331 fs_path_reset(valid_path); 4332 ret = get_cur_path(sctx, sctx->cur_ino, 4333 sctx->cur_inode_gen, 4334 valid_path); 4335 } 4336 if (ret < 0) 4337 goto out; 4338 } else { 4339 /* 4340 * If we previously orphanized a directory that 4341 * collided with a new reference that we already 4342 * processed, recompute the current path because 4343 * that directory may be part of the path. 4344 */ 4345 if (orphanized_dir) { 4346 ret = refresh_ref_path(sctx, cur); 4347 if (ret < 0) 4348 goto out; 4349 } 4350 ret = send_unlink(sctx, cur->full_path); 4351 if (ret < 0) 4352 goto out; 4353 } 4354 } 4355 4356 } 4357 4358 list_for_each_entry(cur, &sctx->new_refs, list) { 4359 /* 4360 * We may have refs where the parent directory does not exist 4361 * yet. This happens if the parent directories inum is higher 4362 * than the current inum. To handle this case, we create the 4363 * parent directory out of order. But we need to check if this 4364 * did already happen before due to other refs in the same dir. 4365 */ 4366 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL); 4367 if (ret < 0) 4368 goto out; 4369 if (ret == inode_state_will_create) { 4370 ret = 0; 4371 /* 4372 * First check if any of the current inodes refs did 4373 * already create the dir. 4374 */ 4375 list_for_each_entry(cur2, &sctx->new_refs, list) { 4376 if (cur == cur2) 4377 break; 4378 if (cur2->dir == cur->dir) { 4379 ret = 1; 4380 break; 4381 } 4382 } 4383 4384 /* 4385 * If that did not happen, check if a previous inode 4386 * did already create the dir. 4387 */ 4388 if (!ret) 4389 ret = did_create_dir(sctx, cur->dir); 4390 if (ret < 0) 4391 goto out; 4392 if (!ret) { 4393 ret = send_create_inode(sctx, cur->dir); 4394 if (ret < 0) 4395 goto out; 4396 cache_dir_created(sctx, cur->dir); 4397 } 4398 } 4399 4400 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { 4401 ret = wait_for_dest_dir_move(sctx, cur, is_orphan); 4402 if (ret < 0) 4403 goto out; 4404 if (ret == 1) { 4405 can_rename = false; 4406 *pending_move = 1; 4407 } 4408 } 4409 4410 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root && 4411 can_rename) { 4412 ret = wait_for_parent_move(sctx, cur, is_orphan); 4413 if (ret < 0) 4414 goto out; 4415 if (ret == 1) { 4416 can_rename = false; 4417 *pending_move = 1; 4418 } 4419 } 4420 4421 /* 4422 * link/move the ref to the new place. If we have an orphan 4423 * inode, move it and update valid_path. If not, link or move 4424 * it depending on the inode mode. 4425 */ 4426 if (is_orphan && can_rename) { 4427 ret = send_rename(sctx, valid_path, cur->full_path); 4428 if (ret < 0) 4429 goto out; 4430 is_orphan = 0; 4431 ret = fs_path_copy(valid_path, cur->full_path); 4432 if (ret < 0) 4433 goto out; 4434 } else if (can_rename) { 4435 if (S_ISDIR(sctx->cur_inode_mode)) { 4436 /* 4437 * Dirs can't be linked, so move it. For moved 4438 * dirs, we always have one new and one deleted 4439 * ref. The deleted ref is ignored later. 4440 */ 4441 ret = send_rename(sctx, valid_path, 4442 cur->full_path); 4443 if (!ret) 4444 ret = fs_path_copy(valid_path, 4445 cur->full_path); 4446 if (ret < 0) 4447 goto out; 4448 } else { 4449 /* 4450 * We might have previously orphanized an inode 4451 * which is an ancestor of our current inode, 4452 * so our reference's full path, which was 4453 * computed before any such orphanizations, must 4454 * be updated. 4455 */ 4456 if (orphanized_dir) { 4457 ret = update_ref_path(sctx, cur); 4458 if (ret < 0) 4459 goto out; 4460 } 4461 ret = send_link(sctx, cur->full_path, 4462 valid_path); 4463 if (ret < 0) 4464 goto out; 4465 } 4466 } 4467 ret = dup_ref(cur, &check_dirs); 4468 if (ret < 0) 4469 goto out; 4470 } 4471 4472 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { 4473 /* 4474 * Check if we can already rmdir the directory. If not, 4475 * orphanize it. For every dir item inside that gets deleted 4476 * later, we do this check again and rmdir it then if possible. 4477 * See the use of check_dirs for more details. 4478 */ 4479 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen); 4480 if (ret < 0) 4481 goto out; 4482 if (ret) { 4483 ret = send_rmdir(sctx, valid_path); 4484 if (ret < 0) 4485 goto out; 4486 } else if (!is_orphan) { 4487 ret = orphanize_inode(sctx, sctx->cur_ino, 4488 sctx->cur_inode_gen, valid_path); 4489 if (ret < 0) 4490 goto out; 4491 is_orphan = 1; 4492 } 4493 4494 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4495 ret = dup_ref(cur, &check_dirs); 4496 if (ret < 0) 4497 goto out; 4498 } 4499 } else if (S_ISDIR(sctx->cur_inode_mode) && 4500 !list_empty(&sctx->deleted_refs)) { 4501 /* 4502 * We have a moved dir. Add the old parent to check_dirs 4503 */ 4504 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, 4505 list); 4506 ret = dup_ref(cur, &check_dirs); 4507 if (ret < 0) 4508 goto out; 4509 } else if (!S_ISDIR(sctx->cur_inode_mode)) { 4510 /* 4511 * We have a non dir inode. Go through all deleted refs and 4512 * unlink them if they were not already overwritten by other 4513 * inodes. 4514 */ 4515 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4516 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, 4517 sctx->cur_ino, sctx->cur_inode_gen, 4518 cur->name, cur->name_len); 4519 if (ret < 0) 4520 goto out; 4521 if (!ret) { 4522 /* 4523 * If we orphanized any ancestor before, we need 4524 * to recompute the full path for deleted names, 4525 * since any such path was computed before we 4526 * processed any references and orphanized any 4527 * ancestor inode. 4528 */ 4529 if (orphanized_ancestor) { 4530 ret = update_ref_path(sctx, cur); 4531 if (ret < 0) 4532 goto out; 4533 } 4534 ret = send_unlink(sctx, cur->full_path); 4535 if (ret < 0) 4536 goto out; 4537 } 4538 ret = dup_ref(cur, &check_dirs); 4539 if (ret < 0) 4540 goto out; 4541 } 4542 /* 4543 * If the inode is still orphan, unlink the orphan. This may 4544 * happen when a previous inode did overwrite the first ref 4545 * of this inode and no new refs were added for the current 4546 * inode. Unlinking does not mean that the inode is deleted in 4547 * all cases. There may still be links to this inode in other 4548 * places. 4549 */ 4550 if (is_orphan) { 4551 ret = send_unlink(sctx, valid_path); 4552 if (ret < 0) 4553 goto out; 4554 } 4555 } 4556 4557 /* 4558 * We did collect all parent dirs where cur_inode was once located. We 4559 * now go through all these dirs and check if they are pending for 4560 * deletion and if it's finally possible to perform the rmdir now. 4561 * We also update the inode stats of the parent dirs here. 4562 */ 4563 list_for_each_entry(cur, &check_dirs, list) { 4564 /* 4565 * In case we had refs into dirs that were not processed yet, 4566 * we don't need to do the utime and rmdir logic for these dirs. 4567 * The dir will be processed later. 4568 */ 4569 if (cur->dir > sctx->cur_ino) 4570 continue; 4571 4572 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL); 4573 if (ret < 0) 4574 goto out; 4575 4576 if (ret == inode_state_did_create || 4577 ret == inode_state_no_change) { 4578 ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen); 4579 if (ret < 0) 4580 goto out; 4581 } else if (ret == inode_state_did_delete && 4582 cur->dir != last_dir_ino_rm) { 4583 ret = can_rmdir(sctx, cur->dir, cur->dir_gen); 4584 if (ret < 0) 4585 goto out; 4586 if (ret) { 4587 ret = get_cur_path(sctx, cur->dir, 4588 cur->dir_gen, valid_path); 4589 if (ret < 0) 4590 goto out; 4591 ret = send_rmdir(sctx, valid_path); 4592 if (ret < 0) 4593 goto out; 4594 last_dir_ino_rm = cur->dir; 4595 } 4596 } 4597 } 4598 4599 ret = 0; 4600 4601 out: 4602 __free_recorded_refs(&check_dirs); 4603 free_recorded_refs(sctx); 4604 fs_path_free(valid_path); 4605 return ret; 4606 } 4607 4608 static int rbtree_ref_comp(const void *k, const struct rb_node *node) 4609 { 4610 const struct recorded_ref *data = k; 4611 const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node); 4612 int result; 4613 4614 if (data->dir > ref->dir) 4615 return 1; 4616 if (data->dir < ref->dir) 4617 return -1; 4618 if (data->dir_gen > ref->dir_gen) 4619 return 1; 4620 if (data->dir_gen < ref->dir_gen) 4621 return -1; 4622 if (data->name_len > ref->name_len) 4623 return 1; 4624 if (data->name_len < ref->name_len) 4625 return -1; 4626 result = strcmp(data->name, ref->name); 4627 if (result > 0) 4628 return 1; 4629 if (result < 0) 4630 return -1; 4631 return 0; 4632 } 4633 4634 static bool rbtree_ref_less(struct rb_node *node, const struct rb_node *parent) 4635 { 4636 const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node); 4637 4638 return rbtree_ref_comp(entry, parent) < 0; 4639 } 4640 4641 static int record_ref_in_tree(struct rb_root *root, struct list_head *refs, 4642 struct fs_path *name, u64 dir, u64 dir_gen, 4643 struct send_ctx *sctx) 4644 { 4645 int ret = 0; 4646 struct fs_path *path = NULL; 4647 struct recorded_ref *ref = NULL; 4648 4649 path = fs_path_alloc(); 4650 if (!path) { 4651 ret = -ENOMEM; 4652 goto out; 4653 } 4654 4655 ref = recorded_ref_alloc(); 4656 if (!ref) { 4657 ret = -ENOMEM; 4658 goto out; 4659 } 4660 4661 ret = get_cur_path(sctx, dir, dir_gen, path); 4662 if (ret < 0) 4663 goto out; 4664 ret = fs_path_add_path(path, name); 4665 if (ret < 0) 4666 goto out; 4667 4668 ref->dir = dir; 4669 ref->dir_gen = dir_gen; 4670 set_ref_path(ref, path); 4671 list_add_tail(&ref->list, refs); 4672 rb_add(&ref->node, root, rbtree_ref_less); 4673 ref->root = root; 4674 out: 4675 if (ret) { 4676 if (path && (!ref || !ref->full_path)) 4677 fs_path_free(path); 4678 recorded_ref_free(ref); 4679 } 4680 return ret; 4681 } 4682 4683 static int record_new_ref_if_needed(int num, u64 dir, int index, 4684 struct fs_path *name, void *ctx) 4685 { 4686 int ret = 0; 4687 struct send_ctx *sctx = ctx; 4688 struct rb_node *node = NULL; 4689 struct recorded_ref data; 4690 struct recorded_ref *ref; 4691 u64 dir_gen; 4692 4693 ret = get_inode_gen(sctx->send_root, dir, &dir_gen); 4694 if (ret < 0) 4695 goto out; 4696 4697 data.dir = dir; 4698 data.dir_gen = dir_gen; 4699 set_ref_path(&data, name); 4700 node = rb_find(&data, &sctx->rbtree_deleted_refs, rbtree_ref_comp); 4701 if (node) { 4702 ref = rb_entry(node, struct recorded_ref, node); 4703 recorded_ref_free(ref); 4704 } else { 4705 ret = record_ref_in_tree(&sctx->rbtree_new_refs, 4706 &sctx->new_refs, name, dir, dir_gen, 4707 sctx); 4708 } 4709 out: 4710 return ret; 4711 } 4712 4713 static int record_deleted_ref_if_needed(int num, u64 dir, int index, 4714 struct fs_path *name, void *ctx) 4715 { 4716 int ret = 0; 4717 struct send_ctx *sctx = ctx; 4718 struct rb_node *node = NULL; 4719 struct recorded_ref data; 4720 struct recorded_ref *ref; 4721 u64 dir_gen; 4722 4723 ret = get_inode_gen(sctx->parent_root, dir, &dir_gen); 4724 if (ret < 0) 4725 goto out; 4726 4727 data.dir = dir; 4728 data.dir_gen = dir_gen; 4729 set_ref_path(&data, name); 4730 node = rb_find(&data, &sctx->rbtree_new_refs, rbtree_ref_comp); 4731 if (node) { 4732 ref = rb_entry(node, struct recorded_ref, node); 4733 recorded_ref_free(ref); 4734 } else { 4735 ret = record_ref_in_tree(&sctx->rbtree_deleted_refs, 4736 &sctx->deleted_refs, name, dir, 4737 dir_gen, sctx); 4738 } 4739 out: 4740 return ret; 4741 } 4742 4743 static int record_new_ref(struct send_ctx *sctx) 4744 { 4745 int ret; 4746 4747 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 4748 sctx->cmp_key, 0, record_new_ref_if_needed, sctx); 4749 if (ret < 0) 4750 goto out; 4751 ret = 0; 4752 4753 out: 4754 return ret; 4755 } 4756 4757 static int record_deleted_ref(struct send_ctx *sctx) 4758 { 4759 int ret; 4760 4761 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 4762 sctx->cmp_key, 0, record_deleted_ref_if_needed, 4763 sctx); 4764 if (ret < 0) 4765 goto out; 4766 ret = 0; 4767 4768 out: 4769 return ret; 4770 } 4771 4772 static int record_changed_ref(struct send_ctx *sctx) 4773 { 4774 int ret = 0; 4775 4776 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 4777 sctx->cmp_key, 0, record_new_ref_if_needed, sctx); 4778 if (ret < 0) 4779 goto out; 4780 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 4781 sctx->cmp_key, 0, record_deleted_ref_if_needed, sctx); 4782 if (ret < 0) 4783 goto out; 4784 ret = 0; 4785 4786 out: 4787 return ret; 4788 } 4789 4790 /* 4791 * Record and process all refs at once. Needed when an inode changes the 4792 * generation number, which means that it was deleted and recreated. 4793 */ 4794 static int process_all_refs(struct send_ctx *sctx, 4795 enum btrfs_compare_tree_result cmd) 4796 { 4797 int ret = 0; 4798 int iter_ret = 0; 4799 struct btrfs_root *root; 4800 struct btrfs_path *path; 4801 struct btrfs_key key; 4802 struct btrfs_key found_key; 4803 iterate_inode_ref_t cb; 4804 int pending_move = 0; 4805 4806 path = alloc_path_for_send(); 4807 if (!path) 4808 return -ENOMEM; 4809 4810 if (cmd == BTRFS_COMPARE_TREE_NEW) { 4811 root = sctx->send_root; 4812 cb = record_new_ref_if_needed; 4813 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) { 4814 root = sctx->parent_root; 4815 cb = record_deleted_ref_if_needed; 4816 } else { 4817 btrfs_err(sctx->send_root->fs_info, 4818 "Wrong command %d in process_all_refs", cmd); 4819 ret = -EINVAL; 4820 goto out; 4821 } 4822 4823 key.objectid = sctx->cmp_key->objectid; 4824 key.type = BTRFS_INODE_REF_KEY; 4825 key.offset = 0; 4826 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 4827 if (found_key.objectid != key.objectid || 4828 (found_key.type != BTRFS_INODE_REF_KEY && 4829 found_key.type != BTRFS_INODE_EXTREF_KEY)) 4830 break; 4831 4832 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); 4833 if (ret < 0) 4834 goto out; 4835 } 4836 /* Catch error found during iteration */ 4837 if (iter_ret < 0) { 4838 ret = iter_ret; 4839 goto out; 4840 } 4841 btrfs_release_path(path); 4842 4843 /* 4844 * We don't actually care about pending_move as we are simply 4845 * re-creating this inode and will be rename'ing it into place once we 4846 * rename the parent directory. 4847 */ 4848 ret = process_recorded_refs(sctx, &pending_move); 4849 out: 4850 btrfs_free_path(path); 4851 return ret; 4852 } 4853 4854 static int send_set_xattr(struct send_ctx *sctx, 4855 struct fs_path *path, 4856 const char *name, int name_len, 4857 const char *data, int data_len) 4858 { 4859 int ret = 0; 4860 4861 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); 4862 if (ret < 0) 4863 goto out; 4864 4865 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 4866 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 4867 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); 4868 4869 ret = send_cmd(sctx); 4870 4871 tlv_put_failure: 4872 out: 4873 return ret; 4874 } 4875 4876 static int send_remove_xattr(struct send_ctx *sctx, 4877 struct fs_path *path, 4878 const char *name, int name_len) 4879 { 4880 int ret = 0; 4881 4882 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); 4883 if (ret < 0) 4884 goto out; 4885 4886 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 4887 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 4888 4889 ret = send_cmd(sctx); 4890 4891 tlv_put_failure: 4892 out: 4893 return ret; 4894 } 4895 4896 static int __process_new_xattr(int num, struct btrfs_key *di_key, 4897 const char *name, int name_len, const char *data, 4898 int data_len, void *ctx) 4899 { 4900 int ret; 4901 struct send_ctx *sctx = ctx; 4902 struct fs_path *p; 4903 struct posix_acl_xattr_header dummy_acl; 4904 4905 /* Capabilities are emitted by finish_inode_if_needed */ 4906 if (!strncmp(name, XATTR_NAME_CAPS, name_len)) 4907 return 0; 4908 4909 p = fs_path_alloc(); 4910 if (!p) 4911 return -ENOMEM; 4912 4913 /* 4914 * This hack is needed because empty acls are stored as zero byte 4915 * data in xattrs. Problem with that is, that receiving these zero byte 4916 * acls will fail later. To fix this, we send a dummy acl list that 4917 * only contains the version number and no entries. 4918 */ 4919 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || 4920 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) { 4921 if (data_len == 0) { 4922 dummy_acl.a_version = 4923 cpu_to_le32(POSIX_ACL_XATTR_VERSION); 4924 data = (char *)&dummy_acl; 4925 data_len = sizeof(dummy_acl); 4926 } 4927 } 4928 4929 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4930 if (ret < 0) 4931 goto out; 4932 4933 ret = send_set_xattr(sctx, p, name, name_len, data, data_len); 4934 4935 out: 4936 fs_path_free(p); 4937 return ret; 4938 } 4939 4940 static int __process_deleted_xattr(int num, struct btrfs_key *di_key, 4941 const char *name, int name_len, 4942 const char *data, int data_len, void *ctx) 4943 { 4944 int ret; 4945 struct send_ctx *sctx = ctx; 4946 struct fs_path *p; 4947 4948 p = fs_path_alloc(); 4949 if (!p) 4950 return -ENOMEM; 4951 4952 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4953 if (ret < 0) 4954 goto out; 4955 4956 ret = send_remove_xattr(sctx, p, name, name_len); 4957 4958 out: 4959 fs_path_free(p); 4960 return ret; 4961 } 4962 4963 static int process_new_xattr(struct send_ctx *sctx) 4964 { 4965 int ret = 0; 4966 4967 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 4968 __process_new_xattr, sctx); 4969 4970 return ret; 4971 } 4972 4973 static int process_deleted_xattr(struct send_ctx *sctx) 4974 { 4975 return iterate_dir_item(sctx->parent_root, sctx->right_path, 4976 __process_deleted_xattr, sctx); 4977 } 4978 4979 struct find_xattr_ctx { 4980 const char *name; 4981 int name_len; 4982 int found_idx; 4983 char *found_data; 4984 int found_data_len; 4985 }; 4986 4987 static int __find_xattr(int num, struct btrfs_key *di_key, const char *name, 4988 int name_len, const char *data, int data_len, void *vctx) 4989 { 4990 struct find_xattr_ctx *ctx = vctx; 4991 4992 if (name_len == ctx->name_len && 4993 strncmp(name, ctx->name, name_len) == 0) { 4994 ctx->found_idx = num; 4995 ctx->found_data_len = data_len; 4996 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL); 4997 if (!ctx->found_data) 4998 return -ENOMEM; 4999 return 1; 5000 } 5001 return 0; 5002 } 5003 5004 static int find_xattr(struct btrfs_root *root, 5005 struct btrfs_path *path, 5006 struct btrfs_key *key, 5007 const char *name, int name_len, 5008 char **data, int *data_len) 5009 { 5010 int ret; 5011 struct find_xattr_ctx ctx; 5012 5013 ctx.name = name; 5014 ctx.name_len = name_len; 5015 ctx.found_idx = -1; 5016 ctx.found_data = NULL; 5017 ctx.found_data_len = 0; 5018 5019 ret = iterate_dir_item(root, path, __find_xattr, &ctx); 5020 if (ret < 0) 5021 return ret; 5022 5023 if (ctx.found_idx == -1) 5024 return -ENOENT; 5025 if (data) { 5026 *data = ctx.found_data; 5027 *data_len = ctx.found_data_len; 5028 } else { 5029 kfree(ctx.found_data); 5030 } 5031 return ctx.found_idx; 5032 } 5033 5034 5035 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key, 5036 const char *name, int name_len, 5037 const char *data, int data_len, 5038 void *ctx) 5039 { 5040 int ret; 5041 struct send_ctx *sctx = ctx; 5042 char *found_data = NULL; 5043 int found_data_len = 0; 5044 5045 ret = find_xattr(sctx->parent_root, sctx->right_path, 5046 sctx->cmp_key, name, name_len, &found_data, 5047 &found_data_len); 5048 if (ret == -ENOENT) { 5049 ret = __process_new_xattr(num, di_key, name, name_len, data, 5050 data_len, ctx); 5051 } else if (ret >= 0) { 5052 if (data_len != found_data_len || 5053 memcmp(data, found_data, data_len)) { 5054 ret = __process_new_xattr(num, di_key, name, name_len, 5055 data, data_len, ctx); 5056 } else { 5057 ret = 0; 5058 } 5059 } 5060 5061 kfree(found_data); 5062 return ret; 5063 } 5064 5065 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key, 5066 const char *name, int name_len, 5067 const char *data, int data_len, 5068 void *ctx) 5069 { 5070 int ret; 5071 struct send_ctx *sctx = ctx; 5072 5073 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key, 5074 name, name_len, NULL, NULL); 5075 if (ret == -ENOENT) 5076 ret = __process_deleted_xattr(num, di_key, name, name_len, data, 5077 data_len, ctx); 5078 else if (ret >= 0) 5079 ret = 0; 5080 5081 return ret; 5082 } 5083 5084 static int process_changed_xattr(struct send_ctx *sctx) 5085 { 5086 int ret = 0; 5087 5088 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 5089 __process_changed_new_xattr, sctx); 5090 if (ret < 0) 5091 goto out; 5092 ret = iterate_dir_item(sctx->parent_root, sctx->right_path, 5093 __process_changed_deleted_xattr, sctx); 5094 5095 out: 5096 return ret; 5097 } 5098 5099 static int process_all_new_xattrs(struct send_ctx *sctx) 5100 { 5101 int ret = 0; 5102 int iter_ret = 0; 5103 struct btrfs_root *root; 5104 struct btrfs_path *path; 5105 struct btrfs_key key; 5106 struct btrfs_key found_key; 5107 5108 path = alloc_path_for_send(); 5109 if (!path) 5110 return -ENOMEM; 5111 5112 root = sctx->send_root; 5113 5114 key.objectid = sctx->cmp_key->objectid; 5115 key.type = BTRFS_XATTR_ITEM_KEY; 5116 key.offset = 0; 5117 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 5118 if (found_key.objectid != key.objectid || 5119 found_key.type != key.type) { 5120 ret = 0; 5121 break; 5122 } 5123 5124 ret = iterate_dir_item(root, path, __process_new_xattr, sctx); 5125 if (ret < 0) 5126 break; 5127 } 5128 /* Catch error found during iteration */ 5129 if (iter_ret < 0) 5130 ret = iter_ret; 5131 5132 btrfs_free_path(path); 5133 return ret; 5134 } 5135 5136 static int send_verity(struct send_ctx *sctx, struct fs_path *path, 5137 struct fsverity_descriptor *desc) 5138 { 5139 int ret; 5140 5141 ret = begin_cmd(sctx, BTRFS_SEND_C_ENABLE_VERITY); 5142 if (ret < 0) 5143 goto out; 5144 5145 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 5146 TLV_PUT_U8(sctx, BTRFS_SEND_A_VERITY_ALGORITHM, 5147 le8_to_cpu(desc->hash_algorithm)); 5148 TLV_PUT_U32(sctx, BTRFS_SEND_A_VERITY_BLOCK_SIZE, 5149 1U << le8_to_cpu(desc->log_blocksize)); 5150 TLV_PUT(sctx, BTRFS_SEND_A_VERITY_SALT_DATA, desc->salt, 5151 le8_to_cpu(desc->salt_size)); 5152 TLV_PUT(sctx, BTRFS_SEND_A_VERITY_SIG_DATA, desc->signature, 5153 le32_to_cpu(desc->sig_size)); 5154 5155 ret = send_cmd(sctx); 5156 5157 tlv_put_failure: 5158 out: 5159 return ret; 5160 } 5161 5162 static int process_verity(struct send_ctx *sctx) 5163 { 5164 int ret = 0; 5165 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 5166 struct inode *inode; 5167 struct fs_path *p; 5168 5169 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, sctx->send_root); 5170 if (IS_ERR(inode)) 5171 return PTR_ERR(inode); 5172 5173 ret = btrfs_get_verity_descriptor(inode, NULL, 0); 5174 if (ret < 0) 5175 goto iput; 5176 5177 if (ret > FS_VERITY_MAX_DESCRIPTOR_SIZE) { 5178 ret = -EMSGSIZE; 5179 goto iput; 5180 } 5181 if (!sctx->verity_descriptor) { 5182 sctx->verity_descriptor = kvmalloc(FS_VERITY_MAX_DESCRIPTOR_SIZE, 5183 GFP_KERNEL); 5184 if (!sctx->verity_descriptor) { 5185 ret = -ENOMEM; 5186 goto iput; 5187 } 5188 } 5189 5190 ret = btrfs_get_verity_descriptor(inode, sctx->verity_descriptor, ret); 5191 if (ret < 0) 5192 goto iput; 5193 5194 p = fs_path_alloc(); 5195 if (!p) { 5196 ret = -ENOMEM; 5197 goto iput; 5198 } 5199 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5200 if (ret < 0) 5201 goto free_path; 5202 5203 ret = send_verity(sctx, p, sctx->verity_descriptor); 5204 if (ret < 0) 5205 goto free_path; 5206 5207 free_path: 5208 fs_path_free(p); 5209 iput: 5210 iput(inode); 5211 return ret; 5212 } 5213 5214 static inline u64 max_send_read_size(const struct send_ctx *sctx) 5215 { 5216 return sctx->send_max_size - SZ_16K; 5217 } 5218 5219 static int put_data_header(struct send_ctx *sctx, u32 len) 5220 { 5221 if (WARN_ON_ONCE(sctx->put_data)) 5222 return -EINVAL; 5223 sctx->put_data = true; 5224 if (sctx->proto >= 2) { 5225 /* 5226 * Since v2, the data attribute header doesn't include a length, 5227 * it is implicitly to the end of the command. 5228 */ 5229 if (sctx->send_max_size - sctx->send_size < sizeof(__le16) + len) 5230 return -EOVERFLOW; 5231 put_unaligned_le16(BTRFS_SEND_A_DATA, sctx->send_buf + sctx->send_size); 5232 sctx->send_size += sizeof(__le16); 5233 } else { 5234 struct btrfs_tlv_header *hdr; 5235 5236 if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len) 5237 return -EOVERFLOW; 5238 hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size); 5239 put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type); 5240 put_unaligned_le16(len, &hdr->tlv_len); 5241 sctx->send_size += sizeof(*hdr); 5242 } 5243 return 0; 5244 } 5245 5246 static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len) 5247 { 5248 struct btrfs_root *root = sctx->send_root; 5249 struct btrfs_fs_info *fs_info = root->fs_info; 5250 struct page *page; 5251 pgoff_t index = offset >> PAGE_SHIFT; 5252 pgoff_t last_index; 5253 unsigned pg_offset = offset_in_page(offset); 5254 int ret; 5255 5256 ret = put_data_header(sctx, len); 5257 if (ret) 5258 return ret; 5259 5260 last_index = (offset + len - 1) >> PAGE_SHIFT; 5261 5262 while (index <= last_index) { 5263 unsigned cur_len = min_t(unsigned, len, 5264 PAGE_SIZE - pg_offset); 5265 5266 page = find_lock_page(sctx->cur_inode->i_mapping, index); 5267 if (!page) { 5268 page_cache_sync_readahead(sctx->cur_inode->i_mapping, 5269 &sctx->ra, NULL, index, 5270 last_index + 1 - index); 5271 5272 page = find_or_create_page(sctx->cur_inode->i_mapping, 5273 index, GFP_KERNEL); 5274 if (!page) { 5275 ret = -ENOMEM; 5276 break; 5277 } 5278 } 5279 5280 if (PageReadahead(page)) 5281 page_cache_async_readahead(sctx->cur_inode->i_mapping, 5282 &sctx->ra, NULL, page_folio(page), 5283 index, last_index + 1 - index); 5284 5285 if (!PageUptodate(page)) { 5286 btrfs_read_folio(NULL, page_folio(page)); 5287 lock_page(page); 5288 if (!PageUptodate(page)) { 5289 unlock_page(page); 5290 btrfs_err(fs_info, 5291 "send: IO error at offset %llu for inode %llu root %llu", 5292 page_offset(page), sctx->cur_ino, 5293 sctx->send_root->root_key.objectid); 5294 put_page(page); 5295 ret = -EIO; 5296 break; 5297 } 5298 } 5299 5300 memcpy_from_page(sctx->send_buf + sctx->send_size, page, 5301 pg_offset, cur_len); 5302 unlock_page(page); 5303 put_page(page); 5304 index++; 5305 pg_offset = 0; 5306 len -= cur_len; 5307 sctx->send_size += cur_len; 5308 } 5309 5310 return ret; 5311 } 5312 5313 /* 5314 * Read some bytes from the current inode/file and send a write command to 5315 * user space. 5316 */ 5317 static int send_write(struct send_ctx *sctx, u64 offset, u32 len) 5318 { 5319 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 5320 int ret = 0; 5321 struct fs_path *p; 5322 5323 p = fs_path_alloc(); 5324 if (!p) 5325 return -ENOMEM; 5326 5327 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len); 5328 5329 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 5330 if (ret < 0) 5331 goto out; 5332 5333 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5334 if (ret < 0) 5335 goto out; 5336 5337 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5338 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5339 ret = put_file_data(sctx, offset, len); 5340 if (ret < 0) 5341 goto out; 5342 5343 ret = send_cmd(sctx); 5344 5345 tlv_put_failure: 5346 out: 5347 fs_path_free(p); 5348 return ret; 5349 } 5350 5351 /* 5352 * Send a clone command to user space. 5353 */ 5354 static int send_clone(struct send_ctx *sctx, 5355 u64 offset, u32 len, 5356 struct clone_root *clone_root) 5357 { 5358 int ret = 0; 5359 struct fs_path *p; 5360 u64 gen; 5361 5362 btrfs_debug(sctx->send_root->fs_info, 5363 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu", 5364 offset, len, clone_root->root->root_key.objectid, 5365 clone_root->ino, clone_root->offset); 5366 5367 p = fs_path_alloc(); 5368 if (!p) 5369 return -ENOMEM; 5370 5371 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); 5372 if (ret < 0) 5373 goto out; 5374 5375 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5376 if (ret < 0) 5377 goto out; 5378 5379 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5380 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); 5381 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5382 5383 if (clone_root->root == sctx->send_root) { 5384 ret = get_inode_gen(sctx->send_root, clone_root->ino, &gen); 5385 if (ret < 0) 5386 goto out; 5387 ret = get_cur_path(sctx, clone_root->ino, gen, p); 5388 } else { 5389 ret = get_inode_path(clone_root->root, clone_root->ino, p); 5390 } 5391 if (ret < 0) 5392 goto out; 5393 5394 /* 5395 * If the parent we're using has a received_uuid set then use that as 5396 * our clone source as that is what we will look for when doing a 5397 * receive. 5398 * 5399 * This covers the case that we create a snapshot off of a received 5400 * subvolume and then use that as the parent and try to receive on a 5401 * different host. 5402 */ 5403 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid)) 5404 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 5405 clone_root->root->root_item.received_uuid); 5406 else 5407 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 5408 clone_root->root->root_item.uuid); 5409 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 5410 btrfs_root_ctransid(&clone_root->root->root_item)); 5411 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); 5412 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, 5413 clone_root->offset); 5414 5415 ret = send_cmd(sctx); 5416 5417 tlv_put_failure: 5418 out: 5419 fs_path_free(p); 5420 return ret; 5421 } 5422 5423 /* 5424 * Send an update extent command to user space. 5425 */ 5426 static int send_update_extent(struct send_ctx *sctx, 5427 u64 offset, u32 len) 5428 { 5429 int ret = 0; 5430 struct fs_path *p; 5431 5432 p = fs_path_alloc(); 5433 if (!p) 5434 return -ENOMEM; 5435 5436 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); 5437 if (ret < 0) 5438 goto out; 5439 5440 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5441 if (ret < 0) 5442 goto out; 5443 5444 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5445 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5446 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); 5447 5448 ret = send_cmd(sctx); 5449 5450 tlv_put_failure: 5451 out: 5452 fs_path_free(p); 5453 return ret; 5454 } 5455 5456 static int send_hole(struct send_ctx *sctx, u64 end) 5457 { 5458 struct fs_path *p = NULL; 5459 u64 read_size = max_send_read_size(sctx); 5460 u64 offset = sctx->cur_inode_last_extent; 5461 int ret = 0; 5462 5463 /* 5464 * A hole that starts at EOF or beyond it. Since we do not yet support 5465 * fallocate (for extent preallocation and hole punching), sending a 5466 * write of zeroes starting at EOF or beyond would later require issuing 5467 * a truncate operation which would undo the write and achieve nothing. 5468 */ 5469 if (offset >= sctx->cur_inode_size) 5470 return 0; 5471 5472 /* 5473 * Don't go beyond the inode's i_size due to prealloc extents that start 5474 * after the i_size. 5475 */ 5476 end = min_t(u64, end, sctx->cur_inode_size); 5477 5478 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) 5479 return send_update_extent(sctx, offset, end - offset); 5480 5481 p = fs_path_alloc(); 5482 if (!p) 5483 return -ENOMEM; 5484 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5485 if (ret < 0) 5486 goto tlv_put_failure; 5487 while (offset < end) { 5488 u64 len = min(end - offset, read_size); 5489 5490 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 5491 if (ret < 0) 5492 break; 5493 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5494 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5495 ret = put_data_header(sctx, len); 5496 if (ret < 0) 5497 break; 5498 memset(sctx->send_buf + sctx->send_size, 0, len); 5499 sctx->send_size += len; 5500 ret = send_cmd(sctx); 5501 if (ret < 0) 5502 break; 5503 offset += len; 5504 } 5505 sctx->cur_inode_next_write_offset = offset; 5506 tlv_put_failure: 5507 fs_path_free(p); 5508 return ret; 5509 } 5510 5511 static int send_encoded_inline_extent(struct send_ctx *sctx, 5512 struct btrfs_path *path, u64 offset, 5513 u64 len) 5514 { 5515 struct btrfs_root *root = sctx->send_root; 5516 struct btrfs_fs_info *fs_info = root->fs_info; 5517 struct inode *inode; 5518 struct fs_path *fspath; 5519 struct extent_buffer *leaf = path->nodes[0]; 5520 struct btrfs_key key; 5521 struct btrfs_file_extent_item *ei; 5522 u64 ram_bytes; 5523 size_t inline_size; 5524 int ret; 5525 5526 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root); 5527 if (IS_ERR(inode)) 5528 return PTR_ERR(inode); 5529 5530 fspath = fs_path_alloc(); 5531 if (!fspath) { 5532 ret = -ENOMEM; 5533 goto out; 5534 } 5535 5536 ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE); 5537 if (ret < 0) 5538 goto out; 5539 5540 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); 5541 if (ret < 0) 5542 goto out; 5543 5544 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 5545 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 5546 ram_bytes = btrfs_file_extent_ram_bytes(leaf, ei); 5547 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 5548 5549 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath); 5550 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5551 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN, 5552 min(key.offset + ram_bytes - offset, len)); 5553 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN, ram_bytes); 5554 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET, offset - key.offset); 5555 ret = btrfs_encoded_io_compression_from_extent(fs_info, 5556 btrfs_file_extent_compression(leaf, ei)); 5557 if (ret < 0) 5558 goto out; 5559 TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret); 5560 5561 ret = put_data_header(sctx, inline_size); 5562 if (ret < 0) 5563 goto out; 5564 read_extent_buffer(leaf, sctx->send_buf + sctx->send_size, 5565 btrfs_file_extent_inline_start(ei), inline_size); 5566 sctx->send_size += inline_size; 5567 5568 ret = send_cmd(sctx); 5569 5570 tlv_put_failure: 5571 out: 5572 fs_path_free(fspath); 5573 iput(inode); 5574 return ret; 5575 } 5576 5577 static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path, 5578 u64 offset, u64 len) 5579 { 5580 struct btrfs_root *root = sctx->send_root; 5581 struct btrfs_fs_info *fs_info = root->fs_info; 5582 struct inode *inode; 5583 struct fs_path *fspath; 5584 struct extent_buffer *leaf = path->nodes[0]; 5585 struct btrfs_key key; 5586 struct btrfs_file_extent_item *ei; 5587 u64 disk_bytenr, disk_num_bytes; 5588 u32 data_offset; 5589 struct btrfs_cmd_header *hdr; 5590 u32 crc; 5591 int ret; 5592 5593 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root); 5594 if (IS_ERR(inode)) 5595 return PTR_ERR(inode); 5596 5597 fspath = fs_path_alloc(); 5598 if (!fspath) { 5599 ret = -ENOMEM; 5600 goto out; 5601 } 5602 5603 ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE); 5604 if (ret < 0) 5605 goto out; 5606 5607 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); 5608 if (ret < 0) 5609 goto out; 5610 5611 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 5612 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 5613 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei); 5614 disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, ei); 5615 5616 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath); 5617 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5618 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN, 5619 min(key.offset + btrfs_file_extent_num_bytes(leaf, ei) - offset, 5620 len)); 5621 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN, 5622 btrfs_file_extent_ram_bytes(leaf, ei)); 5623 TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET, 5624 offset - key.offset + btrfs_file_extent_offset(leaf, ei)); 5625 ret = btrfs_encoded_io_compression_from_extent(fs_info, 5626 btrfs_file_extent_compression(leaf, ei)); 5627 if (ret < 0) 5628 goto out; 5629 TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret); 5630 TLV_PUT_U32(sctx, BTRFS_SEND_A_ENCRYPTION, 0); 5631 5632 ret = put_data_header(sctx, disk_num_bytes); 5633 if (ret < 0) 5634 goto out; 5635 5636 /* 5637 * We want to do I/O directly into the send buffer, so get the next page 5638 * boundary in the send buffer. This means that there may be a gap 5639 * between the beginning of the command and the file data. 5640 */ 5641 data_offset = PAGE_ALIGN(sctx->send_size); 5642 if (data_offset > sctx->send_max_size || 5643 sctx->send_max_size - data_offset < disk_num_bytes) { 5644 ret = -EOVERFLOW; 5645 goto out; 5646 } 5647 5648 /* 5649 * Note that send_buf is a mapping of send_buf_pages, so this is really 5650 * reading into send_buf. 5651 */ 5652 ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode), offset, 5653 disk_bytenr, disk_num_bytes, 5654 sctx->send_buf_pages + 5655 (data_offset >> PAGE_SHIFT)); 5656 if (ret) 5657 goto out; 5658 5659 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 5660 hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr)); 5661 hdr->crc = 0; 5662 crc = btrfs_crc32c(0, sctx->send_buf, sctx->send_size); 5663 crc = btrfs_crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes); 5664 hdr->crc = cpu_to_le32(crc); 5665 5666 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, 5667 &sctx->send_off); 5668 if (!ret) { 5669 ret = write_buf(sctx->send_filp, sctx->send_buf + data_offset, 5670 disk_num_bytes, &sctx->send_off); 5671 } 5672 sctx->send_size = 0; 5673 sctx->put_data = false; 5674 5675 tlv_put_failure: 5676 out: 5677 fs_path_free(fspath); 5678 iput(inode); 5679 return ret; 5680 } 5681 5682 static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path, 5683 const u64 offset, const u64 len) 5684 { 5685 const u64 end = offset + len; 5686 struct extent_buffer *leaf = path->nodes[0]; 5687 struct btrfs_file_extent_item *ei; 5688 u64 read_size = max_send_read_size(sctx); 5689 u64 sent = 0; 5690 5691 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) 5692 return send_update_extent(sctx, offset, len); 5693 5694 ei = btrfs_item_ptr(leaf, path->slots[0], 5695 struct btrfs_file_extent_item); 5696 if ((sctx->flags & BTRFS_SEND_FLAG_COMPRESSED) && 5697 btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) { 5698 bool is_inline = (btrfs_file_extent_type(leaf, ei) == 5699 BTRFS_FILE_EXTENT_INLINE); 5700 5701 /* 5702 * Send the compressed extent unless the compressed data is 5703 * larger than the decompressed data. This can happen if we're 5704 * not sending the entire extent, either because it has been 5705 * partially overwritten/truncated or because this is a part of 5706 * the extent that we couldn't clone in clone_range(). 5707 */ 5708 if (is_inline && 5709 btrfs_file_extent_inline_item_len(leaf, 5710 path->slots[0]) <= len) { 5711 return send_encoded_inline_extent(sctx, path, offset, 5712 len); 5713 } else if (!is_inline && 5714 btrfs_file_extent_disk_num_bytes(leaf, ei) <= len) { 5715 return send_encoded_extent(sctx, path, offset, len); 5716 } 5717 } 5718 5719 if (sctx->cur_inode == NULL) { 5720 struct btrfs_root *root = sctx->send_root; 5721 5722 sctx->cur_inode = btrfs_iget(root->fs_info->sb, sctx->cur_ino, root); 5723 if (IS_ERR(sctx->cur_inode)) { 5724 int err = PTR_ERR(sctx->cur_inode); 5725 5726 sctx->cur_inode = NULL; 5727 return err; 5728 } 5729 memset(&sctx->ra, 0, sizeof(struct file_ra_state)); 5730 file_ra_state_init(&sctx->ra, sctx->cur_inode->i_mapping); 5731 5732 /* 5733 * It's very likely there are no pages from this inode in the page 5734 * cache, so after reading extents and sending their data, we clean 5735 * the page cache to avoid trashing the page cache (adding pressure 5736 * to the page cache and forcing eviction of other data more useful 5737 * for applications). 5738 * 5739 * We decide if we should clean the page cache simply by checking 5740 * if the inode's mapping nrpages is 0 when we first open it, and 5741 * not by using something like filemap_range_has_page() before 5742 * reading an extent because when we ask the readahead code to 5743 * read a given file range, it may (and almost always does) read 5744 * pages from beyond that range (see the documentation for 5745 * page_cache_sync_readahead()), so it would not be reliable, 5746 * because after reading the first extent future calls to 5747 * filemap_range_has_page() would return true because the readahead 5748 * on the previous extent resulted in reading pages of the current 5749 * extent as well. 5750 */ 5751 sctx->clean_page_cache = (sctx->cur_inode->i_mapping->nrpages == 0); 5752 sctx->page_cache_clear_start = round_down(offset, PAGE_SIZE); 5753 } 5754 5755 while (sent < len) { 5756 u64 size = min(len - sent, read_size); 5757 int ret; 5758 5759 ret = send_write(sctx, offset + sent, size); 5760 if (ret < 0) 5761 return ret; 5762 sent += size; 5763 } 5764 5765 if (sctx->clean_page_cache && PAGE_ALIGNED(end)) { 5766 /* 5767 * Always operate only on ranges that are a multiple of the page 5768 * size. This is not only to prevent zeroing parts of a page in 5769 * the case of subpage sector size, but also to guarantee we evict 5770 * pages, as passing a range that is smaller than page size does 5771 * not evict the respective page (only zeroes part of its content). 5772 * 5773 * Always start from the end offset of the last range cleared. 5774 * This is because the readahead code may (and very often does) 5775 * reads pages beyond the range we request for readahead. So if 5776 * we have an extent layout like this: 5777 * 5778 * [ extent A ] [ extent B ] [ extent C ] 5779 * 5780 * When we ask page_cache_sync_readahead() to read extent A, it 5781 * may also trigger reads for pages of extent B. If we are doing 5782 * an incremental send and extent B has not changed between the 5783 * parent and send snapshots, some or all of its pages may end 5784 * up being read and placed in the page cache. So when truncating 5785 * the page cache we always start from the end offset of the 5786 * previously processed extent up to the end of the current 5787 * extent. 5788 */ 5789 truncate_inode_pages_range(&sctx->cur_inode->i_data, 5790 sctx->page_cache_clear_start, 5791 end - 1); 5792 sctx->page_cache_clear_start = end; 5793 } 5794 5795 return 0; 5796 } 5797 5798 /* 5799 * Search for a capability xattr related to sctx->cur_ino. If the capability is 5800 * found, call send_set_xattr function to emit it. 5801 * 5802 * Return 0 if there isn't a capability, or when the capability was emitted 5803 * successfully, or < 0 if an error occurred. 5804 */ 5805 static int send_capabilities(struct send_ctx *sctx) 5806 { 5807 struct fs_path *fspath = NULL; 5808 struct btrfs_path *path; 5809 struct btrfs_dir_item *di; 5810 struct extent_buffer *leaf; 5811 unsigned long data_ptr; 5812 char *buf = NULL; 5813 int buf_len; 5814 int ret = 0; 5815 5816 path = alloc_path_for_send(); 5817 if (!path) 5818 return -ENOMEM; 5819 5820 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, 5821 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); 5822 if (!di) { 5823 /* There is no xattr for this inode */ 5824 goto out; 5825 } else if (IS_ERR(di)) { 5826 ret = PTR_ERR(di); 5827 goto out; 5828 } 5829 5830 leaf = path->nodes[0]; 5831 buf_len = btrfs_dir_data_len(leaf, di); 5832 5833 fspath = fs_path_alloc(); 5834 buf = kmalloc(buf_len, GFP_KERNEL); 5835 if (!fspath || !buf) { 5836 ret = -ENOMEM; 5837 goto out; 5838 } 5839 5840 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); 5841 if (ret < 0) 5842 goto out; 5843 5844 data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); 5845 read_extent_buffer(leaf, buf, data_ptr, buf_len); 5846 5847 ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, 5848 strlen(XATTR_NAME_CAPS), buf, buf_len); 5849 out: 5850 kfree(buf); 5851 fs_path_free(fspath); 5852 btrfs_free_path(path); 5853 return ret; 5854 } 5855 5856 static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path, 5857 struct clone_root *clone_root, const u64 disk_byte, 5858 u64 data_offset, u64 offset, u64 len) 5859 { 5860 struct btrfs_path *path; 5861 struct btrfs_key key; 5862 int ret; 5863 struct btrfs_inode_info info; 5864 u64 clone_src_i_size = 0; 5865 5866 /* 5867 * Prevent cloning from a zero offset with a length matching the sector 5868 * size because in some scenarios this will make the receiver fail. 5869 * 5870 * For example, if in the source filesystem the extent at offset 0 5871 * has a length of sectorsize and it was written using direct IO, then 5872 * it can never be an inline extent (even if compression is enabled). 5873 * Then this extent can be cloned in the original filesystem to a non 5874 * zero file offset, but it may not be possible to clone in the 5875 * destination filesystem because it can be inlined due to compression 5876 * on the destination filesystem (as the receiver's write operations are 5877 * always done using buffered IO). The same happens when the original 5878 * filesystem does not have compression enabled but the destination 5879 * filesystem has. 5880 */ 5881 if (clone_root->offset == 0 && 5882 len == sctx->send_root->fs_info->sectorsize) 5883 return send_extent_data(sctx, dst_path, offset, len); 5884 5885 path = alloc_path_for_send(); 5886 if (!path) 5887 return -ENOMEM; 5888 5889 /* 5890 * There are inodes that have extents that lie behind its i_size. Don't 5891 * accept clones from these extents. 5892 */ 5893 ret = get_inode_info(clone_root->root, clone_root->ino, &info); 5894 btrfs_release_path(path); 5895 if (ret < 0) 5896 goto out; 5897 clone_src_i_size = info.size; 5898 5899 /* 5900 * We can't send a clone operation for the entire range if we find 5901 * extent items in the respective range in the source file that 5902 * refer to different extents or if we find holes. 5903 * So check for that and do a mix of clone and regular write/copy 5904 * operations if needed. 5905 * 5906 * Example: 5907 * 5908 * mkfs.btrfs -f /dev/sda 5909 * mount /dev/sda /mnt 5910 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo 5911 * cp --reflink=always /mnt/foo /mnt/bar 5912 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo 5913 * btrfs subvolume snapshot -r /mnt /mnt/snap 5914 * 5915 * If when we send the snapshot and we are processing file bar (which 5916 * has a higher inode number than foo) we blindly send a clone operation 5917 * for the [0, 100K[ range from foo to bar, the receiver ends up getting 5918 * a file bar that matches the content of file foo - iow, doesn't match 5919 * the content from bar in the original filesystem. 5920 */ 5921 key.objectid = clone_root->ino; 5922 key.type = BTRFS_EXTENT_DATA_KEY; 5923 key.offset = clone_root->offset; 5924 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0); 5925 if (ret < 0) 5926 goto out; 5927 if (ret > 0 && path->slots[0] > 0) { 5928 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); 5929 if (key.objectid == clone_root->ino && 5930 key.type == BTRFS_EXTENT_DATA_KEY) 5931 path->slots[0]--; 5932 } 5933 5934 while (true) { 5935 struct extent_buffer *leaf = path->nodes[0]; 5936 int slot = path->slots[0]; 5937 struct btrfs_file_extent_item *ei; 5938 u8 type; 5939 u64 ext_len; 5940 u64 clone_len; 5941 u64 clone_data_offset; 5942 bool crossed_src_i_size = false; 5943 5944 if (slot >= btrfs_header_nritems(leaf)) { 5945 ret = btrfs_next_leaf(clone_root->root, path); 5946 if (ret < 0) 5947 goto out; 5948 else if (ret > 0) 5949 break; 5950 continue; 5951 } 5952 5953 btrfs_item_key_to_cpu(leaf, &key, slot); 5954 5955 /* 5956 * We might have an implicit trailing hole (NO_HOLES feature 5957 * enabled). We deal with it after leaving this loop. 5958 */ 5959 if (key.objectid != clone_root->ino || 5960 key.type != BTRFS_EXTENT_DATA_KEY) 5961 break; 5962 5963 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5964 type = btrfs_file_extent_type(leaf, ei); 5965 if (type == BTRFS_FILE_EXTENT_INLINE) { 5966 ext_len = btrfs_file_extent_ram_bytes(leaf, ei); 5967 ext_len = PAGE_ALIGN(ext_len); 5968 } else { 5969 ext_len = btrfs_file_extent_num_bytes(leaf, ei); 5970 } 5971 5972 if (key.offset + ext_len <= clone_root->offset) 5973 goto next; 5974 5975 if (key.offset > clone_root->offset) { 5976 /* Implicit hole, NO_HOLES feature enabled. */ 5977 u64 hole_len = key.offset - clone_root->offset; 5978 5979 if (hole_len > len) 5980 hole_len = len; 5981 ret = send_extent_data(sctx, dst_path, offset, 5982 hole_len); 5983 if (ret < 0) 5984 goto out; 5985 5986 len -= hole_len; 5987 if (len == 0) 5988 break; 5989 offset += hole_len; 5990 clone_root->offset += hole_len; 5991 data_offset += hole_len; 5992 } 5993 5994 if (key.offset >= clone_root->offset + len) 5995 break; 5996 5997 if (key.offset >= clone_src_i_size) 5998 break; 5999 6000 if (key.offset + ext_len > clone_src_i_size) { 6001 ext_len = clone_src_i_size - key.offset; 6002 crossed_src_i_size = true; 6003 } 6004 6005 clone_data_offset = btrfs_file_extent_offset(leaf, ei); 6006 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) { 6007 clone_root->offset = key.offset; 6008 if (clone_data_offset < data_offset && 6009 clone_data_offset + ext_len > data_offset) { 6010 u64 extent_offset; 6011 6012 extent_offset = data_offset - clone_data_offset; 6013 ext_len -= extent_offset; 6014 clone_data_offset += extent_offset; 6015 clone_root->offset += extent_offset; 6016 } 6017 } 6018 6019 clone_len = min_t(u64, ext_len, len); 6020 6021 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte && 6022 clone_data_offset == data_offset) { 6023 const u64 src_end = clone_root->offset + clone_len; 6024 const u64 sectorsize = SZ_64K; 6025 6026 /* 6027 * We can't clone the last block, when its size is not 6028 * sector size aligned, into the middle of a file. If we 6029 * do so, the receiver will get a failure (-EINVAL) when 6030 * trying to clone or will silently corrupt the data in 6031 * the destination file if it's on a kernel without the 6032 * fix introduced by commit ac765f83f1397646 6033 * ("Btrfs: fix data corruption due to cloning of eof 6034 * block). 6035 * 6036 * So issue a clone of the aligned down range plus a 6037 * regular write for the eof block, if we hit that case. 6038 * 6039 * Also, we use the maximum possible sector size, 64K, 6040 * because we don't know what's the sector size of the 6041 * filesystem that receives the stream, so we have to 6042 * assume the largest possible sector size. 6043 */ 6044 if (src_end == clone_src_i_size && 6045 !IS_ALIGNED(src_end, sectorsize) && 6046 offset + clone_len < sctx->cur_inode_size) { 6047 u64 slen; 6048 6049 slen = ALIGN_DOWN(src_end - clone_root->offset, 6050 sectorsize); 6051 if (slen > 0) { 6052 ret = send_clone(sctx, offset, slen, 6053 clone_root); 6054 if (ret < 0) 6055 goto out; 6056 } 6057 ret = send_extent_data(sctx, dst_path, 6058 offset + slen, 6059 clone_len - slen); 6060 } else { 6061 ret = send_clone(sctx, offset, clone_len, 6062 clone_root); 6063 } 6064 } else if (crossed_src_i_size && clone_len < len) { 6065 /* 6066 * If we are at i_size of the clone source inode and we 6067 * can not clone from it, terminate the loop. This is 6068 * to avoid sending two write operations, one with a 6069 * length matching clone_len and the final one after 6070 * this loop with a length of len - clone_len. 6071 * 6072 * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED 6073 * was passed to the send ioctl), this helps avoid 6074 * sending an encoded write for an offset that is not 6075 * sector size aligned, in case the i_size of the source 6076 * inode is not sector size aligned. That will make the 6077 * receiver fallback to decompression of the data and 6078 * writing it using regular buffered IO, therefore while 6079 * not incorrect, it's not optimal due decompression and 6080 * possible re-compression at the receiver. 6081 */ 6082 break; 6083 } else { 6084 ret = send_extent_data(sctx, dst_path, offset, 6085 clone_len); 6086 } 6087 6088 if (ret < 0) 6089 goto out; 6090 6091 len -= clone_len; 6092 if (len == 0) 6093 break; 6094 offset += clone_len; 6095 clone_root->offset += clone_len; 6096 6097 /* 6098 * If we are cloning from the file we are currently processing, 6099 * and using the send root as the clone root, we must stop once 6100 * the current clone offset reaches the current eof of the file 6101 * at the receiver, otherwise we would issue an invalid clone 6102 * operation (source range going beyond eof) and cause the 6103 * receiver to fail. So if we reach the current eof, bail out 6104 * and fallback to a regular write. 6105 */ 6106 if (clone_root->root == sctx->send_root && 6107 clone_root->ino == sctx->cur_ino && 6108 clone_root->offset >= sctx->cur_inode_next_write_offset) 6109 break; 6110 6111 data_offset += clone_len; 6112 next: 6113 path->slots[0]++; 6114 } 6115 6116 if (len > 0) 6117 ret = send_extent_data(sctx, dst_path, offset, len); 6118 else 6119 ret = 0; 6120 out: 6121 btrfs_free_path(path); 6122 return ret; 6123 } 6124 6125 static int send_write_or_clone(struct send_ctx *sctx, 6126 struct btrfs_path *path, 6127 struct btrfs_key *key, 6128 struct clone_root *clone_root) 6129 { 6130 int ret = 0; 6131 u64 offset = key->offset; 6132 u64 end; 6133 u64 bs = sctx->send_root->fs_info->sb->s_blocksize; 6134 6135 end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size); 6136 if (offset >= end) 6137 return 0; 6138 6139 if (clone_root && IS_ALIGNED(end, bs)) { 6140 struct btrfs_file_extent_item *ei; 6141 u64 disk_byte; 6142 u64 data_offset; 6143 6144 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 6145 struct btrfs_file_extent_item); 6146 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); 6147 data_offset = btrfs_file_extent_offset(path->nodes[0], ei); 6148 ret = clone_range(sctx, path, clone_root, disk_byte, 6149 data_offset, offset, end - offset); 6150 } else { 6151 ret = send_extent_data(sctx, path, offset, end - offset); 6152 } 6153 sctx->cur_inode_next_write_offset = end; 6154 return ret; 6155 } 6156 6157 static int is_extent_unchanged(struct send_ctx *sctx, 6158 struct btrfs_path *left_path, 6159 struct btrfs_key *ekey) 6160 { 6161 int ret = 0; 6162 struct btrfs_key key; 6163 struct btrfs_path *path = NULL; 6164 struct extent_buffer *eb; 6165 int slot; 6166 struct btrfs_key found_key; 6167 struct btrfs_file_extent_item *ei; 6168 u64 left_disknr; 6169 u64 right_disknr; 6170 u64 left_offset; 6171 u64 right_offset; 6172 u64 left_offset_fixed; 6173 u64 left_len; 6174 u64 right_len; 6175 u64 left_gen; 6176 u64 right_gen; 6177 u8 left_type; 6178 u8 right_type; 6179 6180 path = alloc_path_for_send(); 6181 if (!path) 6182 return -ENOMEM; 6183 6184 eb = left_path->nodes[0]; 6185 slot = left_path->slots[0]; 6186 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 6187 left_type = btrfs_file_extent_type(eb, ei); 6188 6189 if (left_type != BTRFS_FILE_EXTENT_REG) { 6190 ret = 0; 6191 goto out; 6192 } 6193 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 6194 left_len = btrfs_file_extent_num_bytes(eb, ei); 6195 left_offset = btrfs_file_extent_offset(eb, ei); 6196 left_gen = btrfs_file_extent_generation(eb, ei); 6197 6198 /* 6199 * Following comments will refer to these graphics. L is the left 6200 * extents which we are checking at the moment. 1-8 are the right 6201 * extents that we iterate. 6202 * 6203 * |-----L-----| 6204 * |-1-|-2a-|-3-|-4-|-5-|-6-| 6205 * 6206 * |-----L-----| 6207 * |--1--|-2b-|...(same as above) 6208 * 6209 * Alternative situation. Happens on files where extents got split. 6210 * |-----L-----| 6211 * |-----------7-----------|-6-| 6212 * 6213 * Alternative situation. Happens on files which got larger. 6214 * |-----L-----| 6215 * |-8-| 6216 * Nothing follows after 8. 6217 */ 6218 6219 key.objectid = ekey->objectid; 6220 key.type = BTRFS_EXTENT_DATA_KEY; 6221 key.offset = ekey->offset; 6222 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); 6223 if (ret < 0) 6224 goto out; 6225 if (ret) { 6226 ret = 0; 6227 goto out; 6228 } 6229 6230 /* 6231 * Handle special case where the right side has no extents at all. 6232 */ 6233 eb = path->nodes[0]; 6234 slot = path->slots[0]; 6235 btrfs_item_key_to_cpu(eb, &found_key, slot); 6236 if (found_key.objectid != key.objectid || 6237 found_key.type != key.type) { 6238 /* If we're a hole then just pretend nothing changed */ 6239 ret = (left_disknr) ? 0 : 1; 6240 goto out; 6241 } 6242 6243 /* 6244 * We're now on 2a, 2b or 7. 6245 */ 6246 key = found_key; 6247 while (key.offset < ekey->offset + left_len) { 6248 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 6249 right_type = btrfs_file_extent_type(eb, ei); 6250 if (right_type != BTRFS_FILE_EXTENT_REG && 6251 right_type != BTRFS_FILE_EXTENT_INLINE) { 6252 ret = 0; 6253 goto out; 6254 } 6255 6256 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 6257 right_len = btrfs_file_extent_ram_bytes(eb, ei); 6258 right_len = PAGE_ALIGN(right_len); 6259 } else { 6260 right_len = btrfs_file_extent_num_bytes(eb, ei); 6261 } 6262 6263 /* 6264 * Are we at extent 8? If yes, we know the extent is changed. 6265 * This may only happen on the first iteration. 6266 */ 6267 if (found_key.offset + right_len <= ekey->offset) { 6268 /* If we're a hole just pretend nothing changed */ 6269 ret = (left_disknr) ? 0 : 1; 6270 goto out; 6271 } 6272 6273 /* 6274 * We just wanted to see if when we have an inline extent, what 6275 * follows it is a regular extent (wanted to check the above 6276 * condition for inline extents too). This should normally not 6277 * happen but it's possible for example when we have an inline 6278 * compressed extent representing data with a size matching 6279 * the page size (currently the same as sector size). 6280 */ 6281 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 6282 ret = 0; 6283 goto out; 6284 } 6285 6286 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 6287 right_offset = btrfs_file_extent_offset(eb, ei); 6288 right_gen = btrfs_file_extent_generation(eb, ei); 6289 6290 left_offset_fixed = left_offset; 6291 if (key.offset < ekey->offset) { 6292 /* Fix the right offset for 2a and 7. */ 6293 right_offset += ekey->offset - key.offset; 6294 } else { 6295 /* Fix the left offset for all behind 2a and 2b */ 6296 left_offset_fixed += key.offset - ekey->offset; 6297 } 6298 6299 /* 6300 * Check if we have the same extent. 6301 */ 6302 if (left_disknr != right_disknr || 6303 left_offset_fixed != right_offset || 6304 left_gen != right_gen) { 6305 ret = 0; 6306 goto out; 6307 } 6308 6309 /* 6310 * Go to the next extent. 6311 */ 6312 ret = btrfs_next_item(sctx->parent_root, path); 6313 if (ret < 0) 6314 goto out; 6315 if (!ret) { 6316 eb = path->nodes[0]; 6317 slot = path->slots[0]; 6318 btrfs_item_key_to_cpu(eb, &found_key, slot); 6319 } 6320 if (ret || found_key.objectid != key.objectid || 6321 found_key.type != key.type) { 6322 key.offset += right_len; 6323 break; 6324 } 6325 if (found_key.offset != key.offset + right_len) { 6326 ret = 0; 6327 goto out; 6328 } 6329 key = found_key; 6330 } 6331 6332 /* 6333 * We're now behind the left extent (treat as unchanged) or at the end 6334 * of the right side (treat as changed). 6335 */ 6336 if (key.offset >= ekey->offset + left_len) 6337 ret = 1; 6338 else 6339 ret = 0; 6340 6341 6342 out: 6343 btrfs_free_path(path); 6344 return ret; 6345 } 6346 6347 static int get_last_extent(struct send_ctx *sctx, u64 offset) 6348 { 6349 struct btrfs_path *path; 6350 struct btrfs_root *root = sctx->send_root; 6351 struct btrfs_key key; 6352 int ret; 6353 6354 path = alloc_path_for_send(); 6355 if (!path) 6356 return -ENOMEM; 6357 6358 sctx->cur_inode_last_extent = 0; 6359 6360 key.objectid = sctx->cur_ino; 6361 key.type = BTRFS_EXTENT_DATA_KEY; 6362 key.offset = offset; 6363 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1); 6364 if (ret < 0) 6365 goto out; 6366 ret = 0; 6367 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 6368 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY) 6369 goto out; 6370 6371 sctx->cur_inode_last_extent = btrfs_file_extent_end(path); 6372 out: 6373 btrfs_free_path(path); 6374 return ret; 6375 } 6376 6377 static int range_is_hole_in_parent(struct send_ctx *sctx, 6378 const u64 start, 6379 const u64 end) 6380 { 6381 struct btrfs_path *path; 6382 struct btrfs_key key; 6383 struct btrfs_root *root = sctx->parent_root; 6384 u64 search_start = start; 6385 int ret; 6386 6387 path = alloc_path_for_send(); 6388 if (!path) 6389 return -ENOMEM; 6390 6391 key.objectid = sctx->cur_ino; 6392 key.type = BTRFS_EXTENT_DATA_KEY; 6393 key.offset = search_start; 6394 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6395 if (ret < 0) 6396 goto out; 6397 if (ret > 0 && path->slots[0] > 0) 6398 path->slots[0]--; 6399 6400 while (search_start < end) { 6401 struct extent_buffer *leaf = path->nodes[0]; 6402 int slot = path->slots[0]; 6403 struct btrfs_file_extent_item *fi; 6404 u64 extent_end; 6405 6406 if (slot >= btrfs_header_nritems(leaf)) { 6407 ret = btrfs_next_leaf(root, path); 6408 if (ret < 0) 6409 goto out; 6410 else if (ret > 0) 6411 break; 6412 continue; 6413 } 6414 6415 btrfs_item_key_to_cpu(leaf, &key, slot); 6416 if (key.objectid < sctx->cur_ino || 6417 key.type < BTRFS_EXTENT_DATA_KEY) 6418 goto next; 6419 if (key.objectid > sctx->cur_ino || 6420 key.type > BTRFS_EXTENT_DATA_KEY || 6421 key.offset >= end) 6422 break; 6423 6424 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 6425 extent_end = btrfs_file_extent_end(path); 6426 if (extent_end <= start) 6427 goto next; 6428 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) { 6429 search_start = extent_end; 6430 goto next; 6431 } 6432 ret = 0; 6433 goto out; 6434 next: 6435 path->slots[0]++; 6436 } 6437 ret = 1; 6438 out: 6439 btrfs_free_path(path); 6440 return ret; 6441 } 6442 6443 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, 6444 struct btrfs_key *key) 6445 { 6446 int ret = 0; 6447 6448 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx)) 6449 return 0; 6450 6451 if (sctx->cur_inode_last_extent == (u64)-1) { 6452 ret = get_last_extent(sctx, key->offset - 1); 6453 if (ret) 6454 return ret; 6455 } 6456 6457 if (path->slots[0] == 0 && 6458 sctx->cur_inode_last_extent < key->offset) { 6459 /* 6460 * We might have skipped entire leafs that contained only 6461 * file extent items for our current inode. These leafs have 6462 * a generation number smaller (older) than the one in the 6463 * current leaf and the leaf our last extent came from, and 6464 * are located between these 2 leafs. 6465 */ 6466 ret = get_last_extent(sctx, key->offset - 1); 6467 if (ret) 6468 return ret; 6469 } 6470 6471 if (sctx->cur_inode_last_extent < key->offset) { 6472 ret = range_is_hole_in_parent(sctx, 6473 sctx->cur_inode_last_extent, 6474 key->offset); 6475 if (ret < 0) 6476 return ret; 6477 else if (ret == 0) 6478 ret = send_hole(sctx, key->offset); 6479 else 6480 ret = 0; 6481 } 6482 sctx->cur_inode_last_extent = btrfs_file_extent_end(path); 6483 return ret; 6484 } 6485 6486 static int process_extent(struct send_ctx *sctx, 6487 struct btrfs_path *path, 6488 struct btrfs_key *key) 6489 { 6490 struct clone_root *found_clone = NULL; 6491 int ret = 0; 6492 6493 if (S_ISLNK(sctx->cur_inode_mode)) 6494 return 0; 6495 6496 if (sctx->parent_root && !sctx->cur_inode_new) { 6497 ret = is_extent_unchanged(sctx, path, key); 6498 if (ret < 0) 6499 goto out; 6500 if (ret) { 6501 ret = 0; 6502 goto out_hole; 6503 } 6504 } else { 6505 struct btrfs_file_extent_item *ei; 6506 u8 type; 6507 6508 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 6509 struct btrfs_file_extent_item); 6510 type = btrfs_file_extent_type(path->nodes[0], ei); 6511 if (type == BTRFS_FILE_EXTENT_PREALLOC || 6512 type == BTRFS_FILE_EXTENT_REG) { 6513 /* 6514 * The send spec does not have a prealloc command yet, 6515 * so just leave a hole for prealloc'ed extents until 6516 * we have enough commands queued up to justify rev'ing 6517 * the send spec. 6518 */ 6519 if (type == BTRFS_FILE_EXTENT_PREALLOC) { 6520 ret = 0; 6521 goto out; 6522 } 6523 6524 /* Have a hole, just skip it. */ 6525 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) { 6526 ret = 0; 6527 goto out; 6528 } 6529 } 6530 } 6531 6532 ret = find_extent_clone(sctx, path, key->objectid, key->offset, 6533 sctx->cur_inode_size, &found_clone); 6534 if (ret != -ENOENT && ret < 0) 6535 goto out; 6536 6537 ret = send_write_or_clone(sctx, path, key, found_clone); 6538 if (ret) 6539 goto out; 6540 out_hole: 6541 ret = maybe_send_hole(sctx, path, key); 6542 out: 6543 return ret; 6544 } 6545 6546 static int process_all_extents(struct send_ctx *sctx) 6547 { 6548 int ret = 0; 6549 int iter_ret = 0; 6550 struct btrfs_root *root; 6551 struct btrfs_path *path; 6552 struct btrfs_key key; 6553 struct btrfs_key found_key; 6554 6555 root = sctx->send_root; 6556 path = alloc_path_for_send(); 6557 if (!path) 6558 return -ENOMEM; 6559 6560 key.objectid = sctx->cmp_key->objectid; 6561 key.type = BTRFS_EXTENT_DATA_KEY; 6562 key.offset = 0; 6563 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 6564 if (found_key.objectid != key.objectid || 6565 found_key.type != key.type) { 6566 ret = 0; 6567 break; 6568 } 6569 6570 ret = process_extent(sctx, path, &found_key); 6571 if (ret < 0) 6572 break; 6573 } 6574 /* Catch error found during iteration */ 6575 if (iter_ret < 0) 6576 ret = iter_ret; 6577 6578 btrfs_free_path(path); 6579 return ret; 6580 } 6581 6582 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end, 6583 int *pending_move, 6584 int *refs_processed) 6585 { 6586 int ret = 0; 6587 6588 if (sctx->cur_ino == 0) 6589 goto out; 6590 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && 6591 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY) 6592 goto out; 6593 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) 6594 goto out; 6595 6596 ret = process_recorded_refs(sctx, pending_move); 6597 if (ret < 0) 6598 goto out; 6599 6600 *refs_processed = 1; 6601 out: 6602 return ret; 6603 } 6604 6605 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) 6606 { 6607 int ret = 0; 6608 struct btrfs_inode_info info; 6609 u64 left_mode; 6610 u64 left_uid; 6611 u64 left_gid; 6612 u64 left_fileattr; 6613 u64 right_mode; 6614 u64 right_uid; 6615 u64 right_gid; 6616 u64 right_fileattr; 6617 int need_chmod = 0; 6618 int need_chown = 0; 6619 bool need_fileattr = false; 6620 int need_truncate = 1; 6621 int pending_move = 0; 6622 int refs_processed = 0; 6623 6624 if (sctx->ignore_cur_inode) 6625 return 0; 6626 6627 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move, 6628 &refs_processed); 6629 if (ret < 0) 6630 goto out; 6631 6632 /* 6633 * We have processed the refs and thus need to advance send_progress. 6634 * Now, calls to get_cur_xxx will take the updated refs of the current 6635 * inode into account. 6636 * 6637 * On the other hand, if our current inode is a directory and couldn't 6638 * be moved/renamed because its parent was renamed/moved too and it has 6639 * a higher inode number, we can only move/rename our current inode 6640 * after we moved/renamed its parent. Therefore in this case operate on 6641 * the old path (pre move/rename) of our current inode, and the 6642 * move/rename will be performed later. 6643 */ 6644 if (refs_processed && !pending_move) 6645 sctx->send_progress = sctx->cur_ino + 1; 6646 6647 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted) 6648 goto out; 6649 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino) 6650 goto out; 6651 ret = get_inode_info(sctx->send_root, sctx->cur_ino, &info); 6652 if (ret < 0) 6653 goto out; 6654 left_mode = info.mode; 6655 left_uid = info.uid; 6656 left_gid = info.gid; 6657 left_fileattr = info.fileattr; 6658 6659 if (!sctx->parent_root || sctx->cur_inode_new) { 6660 need_chown = 1; 6661 if (!S_ISLNK(sctx->cur_inode_mode)) 6662 need_chmod = 1; 6663 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size) 6664 need_truncate = 0; 6665 } else { 6666 u64 old_size; 6667 6668 ret = get_inode_info(sctx->parent_root, sctx->cur_ino, &info); 6669 if (ret < 0) 6670 goto out; 6671 old_size = info.size; 6672 right_mode = info.mode; 6673 right_uid = info.uid; 6674 right_gid = info.gid; 6675 right_fileattr = info.fileattr; 6676 6677 if (left_uid != right_uid || left_gid != right_gid) 6678 need_chown = 1; 6679 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode) 6680 need_chmod = 1; 6681 if (!S_ISLNK(sctx->cur_inode_mode) && left_fileattr != right_fileattr) 6682 need_fileattr = true; 6683 if ((old_size == sctx->cur_inode_size) || 6684 (sctx->cur_inode_size > old_size && 6685 sctx->cur_inode_next_write_offset == sctx->cur_inode_size)) 6686 need_truncate = 0; 6687 } 6688 6689 if (S_ISREG(sctx->cur_inode_mode)) { 6690 if (need_send_hole(sctx)) { 6691 if (sctx->cur_inode_last_extent == (u64)-1 || 6692 sctx->cur_inode_last_extent < 6693 sctx->cur_inode_size) { 6694 ret = get_last_extent(sctx, (u64)-1); 6695 if (ret) 6696 goto out; 6697 } 6698 if (sctx->cur_inode_last_extent < 6699 sctx->cur_inode_size) { 6700 ret = send_hole(sctx, sctx->cur_inode_size); 6701 if (ret) 6702 goto out; 6703 } 6704 } 6705 if (need_truncate) { 6706 ret = send_truncate(sctx, sctx->cur_ino, 6707 sctx->cur_inode_gen, 6708 sctx->cur_inode_size); 6709 if (ret < 0) 6710 goto out; 6711 } 6712 } 6713 6714 if (need_chown) { 6715 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen, 6716 left_uid, left_gid); 6717 if (ret < 0) 6718 goto out; 6719 } 6720 if (need_chmod) { 6721 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen, 6722 left_mode); 6723 if (ret < 0) 6724 goto out; 6725 } 6726 if (need_fileattr) { 6727 ret = send_fileattr(sctx, sctx->cur_ino, sctx->cur_inode_gen, 6728 left_fileattr); 6729 if (ret < 0) 6730 goto out; 6731 } 6732 6733 if (proto_cmd_ok(sctx, BTRFS_SEND_C_ENABLE_VERITY) 6734 && sctx->cur_inode_needs_verity) { 6735 ret = process_verity(sctx); 6736 if (ret < 0) 6737 goto out; 6738 } 6739 6740 ret = send_capabilities(sctx); 6741 if (ret < 0) 6742 goto out; 6743 6744 /* 6745 * If other directory inodes depended on our current directory 6746 * inode's move/rename, now do their move/rename operations. 6747 */ 6748 if (!is_waiting_for_move(sctx, sctx->cur_ino)) { 6749 ret = apply_children_dir_moves(sctx); 6750 if (ret) 6751 goto out; 6752 /* 6753 * Need to send that every time, no matter if it actually 6754 * changed between the two trees as we have done changes to 6755 * the inode before. If our inode is a directory and it's 6756 * waiting to be moved/renamed, we will send its utimes when 6757 * it's moved/renamed, therefore we don't need to do it here. 6758 */ 6759 sctx->send_progress = sctx->cur_ino + 1; 6760 6761 /* 6762 * If the current inode is a non-empty directory, delay issuing 6763 * the utimes command for it, as it's very likely we have inodes 6764 * with an higher number inside it. We want to issue the utimes 6765 * command only after adding all dentries to it. 6766 */ 6767 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_size > 0) 6768 ret = cache_dir_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); 6769 else 6770 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); 6771 6772 if (ret < 0) 6773 goto out; 6774 } 6775 6776 out: 6777 if (!ret) 6778 ret = trim_dir_utimes_cache(sctx); 6779 6780 return ret; 6781 } 6782 6783 static void close_current_inode(struct send_ctx *sctx) 6784 { 6785 u64 i_size; 6786 6787 if (sctx->cur_inode == NULL) 6788 return; 6789 6790 i_size = i_size_read(sctx->cur_inode); 6791 6792 /* 6793 * If we are doing an incremental send, we may have extents between the 6794 * last processed extent and the i_size that have not been processed 6795 * because they haven't changed but we may have read some of their pages 6796 * through readahead, see the comments at send_extent_data(). 6797 */ 6798 if (sctx->clean_page_cache && sctx->page_cache_clear_start < i_size) 6799 truncate_inode_pages_range(&sctx->cur_inode->i_data, 6800 sctx->page_cache_clear_start, 6801 round_up(i_size, PAGE_SIZE) - 1); 6802 6803 iput(sctx->cur_inode); 6804 sctx->cur_inode = NULL; 6805 } 6806 6807 static int changed_inode(struct send_ctx *sctx, 6808 enum btrfs_compare_tree_result result) 6809 { 6810 int ret = 0; 6811 struct btrfs_key *key = sctx->cmp_key; 6812 struct btrfs_inode_item *left_ii = NULL; 6813 struct btrfs_inode_item *right_ii = NULL; 6814 u64 left_gen = 0; 6815 u64 right_gen = 0; 6816 6817 close_current_inode(sctx); 6818 6819 sctx->cur_ino = key->objectid; 6820 sctx->cur_inode_new_gen = false; 6821 sctx->cur_inode_last_extent = (u64)-1; 6822 sctx->cur_inode_next_write_offset = 0; 6823 sctx->ignore_cur_inode = false; 6824 6825 /* 6826 * Set send_progress to current inode. This will tell all get_cur_xxx 6827 * functions that the current inode's refs are not updated yet. Later, 6828 * when process_recorded_refs is finished, it is set to cur_ino + 1. 6829 */ 6830 sctx->send_progress = sctx->cur_ino; 6831 6832 if (result == BTRFS_COMPARE_TREE_NEW || 6833 result == BTRFS_COMPARE_TREE_CHANGED) { 6834 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0], 6835 sctx->left_path->slots[0], 6836 struct btrfs_inode_item); 6837 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0], 6838 left_ii); 6839 } else { 6840 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], 6841 sctx->right_path->slots[0], 6842 struct btrfs_inode_item); 6843 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], 6844 right_ii); 6845 } 6846 if (result == BTRFS_COMPARE_TREE_CHANGED) { 6847 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], 6848 sctx->right_path->slots[0], 6849 struct btrfs_inode_item); 6850 6851 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], 6852 right_ii); 6853 6854 /* 6855 * The cur_ino = root dir case is special here. We can't treat 6856 * the inode as deleted+reused because it would generate a 6857 * stream that tries to delete/mkdir the root dir. 6858 */ 6859 if (left_gen != right_gen && 6860 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) 6861 sctx->cur_inode_new_gen = true; 6862 } 6863 6864 /* 6865 * Normally we do not find inodes with a link count of zero (orphans) 6866 * because the most common case is to create a snapshot and use it 6867 * for a send operation. However other less common use cases involve 6868 * using a subvolume and send it after turning it to RO mode just 6869 * after deleting all hard links of a file while holding an open 6870 * file descriptor against it or turning a RO snapshot into RW mode, 6871 * keep an open file descriptor against a file, delete it and then 6872 * turn the snapshot back to RO mode before using it for a send 6873 * operation. The former is what the receiver operation does. 6874 * Therefore, if we want to send these snapshots soon after they're 6875 * received, we need to handle orphan inodes as well. Moreover, orphans 6876 * can appear not only in the send snapshot but also in the parent 6877 * snapshot. Here are several cases: 6878 * 6879 * Case 1: BTRFS_COMPARE_TREE_NEW 6880 * | send snapshot | action 6881 * -------------------------------- 6882 * nlink | 0 | ignore 6883 * 6884 * Case 2: BTRFS_COMPARE_TREE_DELETED 6885 * | parent snapshot | action 6886 * ---------------------------------- 6887 * nlink | 0 | as usual 6888 * Note: No unlinks will be sent because there're no paths for it. 6889 * 6890 * Case 3: BTRFS_COMPARE_TREE_CHANGED 6891 * | | parent snapshot | send snapshot | action 6892 * ----------------------------------------------------------------------- 6893 * subcase 1 | nlink | 0 | 0 | ignore 6894 * subcase 2 | nlink | >0 | 0 | new_gen(deletion) 6895 * subcase 3 | nlink | 0 | >0 | new_gen(creation) 6896 * 6897 */ 6898 if (result == BTRFS_COMPARE_TREE_NEW) { 6899 if (btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii) == 0) { 6900 sctx->ignore_cur_inode = true; 6901 goto out; 6902 } 6903 sctx->cur_inode_gen = left_gen; 6904 sctx->cur_inode_new = true; 6905 sctx->cur_inode_deleted = false; 6906 sctx->cur_inode_size = btrfs_inode_size( 6907 sctx->left_path->nodes[0], left_ii); 6908 sctx->cur_inode_mode = btrfs_inode_mode( 6909 sctx->left_path->nodes[0], left_ii); 6910 sctx->cur_inode_rdev = btrfs_inode_rdev( 6911 sctx->left_path->nodes[0], left_ii); 6912 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) 6913 ret = send_create_inode_if_needed(sctx); 6914 } else if (result == BTRFS_COMPARE_TREE_DELETED) { 6915 sctx->cur_inode_gen = right_gen; 6916 sctx->cur_inode_new = false; 6917 sctx->cur_inode_deleted = true; 6918 sctx->cur_inode_size = btrfs_inode_size( 6919 sctx->right_path->nodes[0], right_ii); 6920 sctx->cur_inode_mode = btrfs_inode_mode( 6921 sctx->right_path->nodes[0], right_ii); 6922 } else if (result == BTRFS_COMPARE_TREE_CHANGED) { 6923 u32 new_nlinks, old_nlinks; 6924 6925 new_nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii); 6926 old_nlinks = btrfs_inode_nlink(sctx->right_path->nodes[0], right_ii); 6927 if (new_nlinks == 0 && old_nlinks == 0) { 6928 sctx->ignore_cur_inode = true; 6929 goto out; 6930 } else if (new_nlinks == 0 || old_nlinks == 0) { 6931 sctx->cur_inode_new_gen = 1; 6932 } 6933 /* 6934 * We need to do some special handling in case the inode was 6935 * reported as changed with a changed generation number. This 6936 * means that the original inode was deleted and new inode 6937 * reused the same inum. So we have to treat the old inode as 6938 * deleted and the new one as new. 6939 */ 6940 if (sctx->cur_inode_new_gen) { 6941 /* 6942 * First, process the inode as if it was deleted. 6943 */ 6944 if (old_nlinks > 0) { 6945 sctx->cur_inode_gen = right_gen; 6946 sctx->cur_inode_new = false; 6947 sctx->cur_inode_deleted = true; 6948 sctx->cur_inode_size = btrfs_inode_size( 6949 sctx->right_path->nodes[0], right_ii); 6950 sctx->cur_inode_mode = btrfs_inode_mode( 6951 sctx->right_path->nodes[0], right_ii); 6952 ret = process_all_refs(sctx, 6953 BTRFS_COMPARE_TREE_DELETED); 6954 if (ret < 0) 6955 goto out; 6956 } 6957 6958 /* 6959 * Now process the inode as if it was new. 6960 */ 6961 if (new_nlinks > 0) { 6962 sctx->cur_inode_gen = left_gen; 6963 sctx->cur_inode_new = true; 6964 sctx->cur_inode_deleted = false; 6965 sctx->cur_inode_size = btrfs_inode_size( 6966 sctx->left_path->nodes[0], 6967 left_ii); 6968 sctx->cur_inode_mode = btrfs_inode_mode( 6969 sctx->left_path->nodes[0], 6970 left_ii); 6971 sctx->cur_inode_rdev = btrfs_inode_rdev( 6972 sctx->left_path->nodes[0], 6973 left_ii); 6974 ret = send_create_inode_if_needed(sctx); 6975 if (ret < 0) 6976 goto out; 6977 6978 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); 6979 if (ret < 0) 6980 goto out; 6981 /* 6982 * Advance send_progress now as we did not get 6983 * into process_recorded_refs_if_needed in the 6984 * new_gen case. 6985 */ 6986 sctx->send_progress = sctx->cur_ino + 1; 6987 6988 /* 6989 * Now process all extents and xattrs of the 6990 * inode as if they were all new. 6991 */ 6992 ret = process_all_extents(sctx); 6993 if (ret < 0) 6994 goto out; 6995 ret = process_all_new_xattrs(sctx); 6996 if (ret < 0) 6997 goto out; 6998 } 6999 } else { 7000 sctx->cur_inode_gen = left_gen; 7001 sctx->cur_inode_new = false; 7002 sctx->cur_inode_new_gen = false; 7003 sctx->cur_inode_deleted = false; 7004 sctx->cur_inode_size = btrfs_inode_size( 7005 sctx->left_path->nodes[0], left_ii); 7006 sctx->cur_inode_mode = btrfs_inode_mode( 7007 sctx->left_path->nodes[0], left_ii); 7008 } 7009 } 7010 7011 out: 7012 return ret; 7013 } 7014 7015 /* 7016 * We have to process new refs before deleted refs, but compare_trees gives us 7017 * the new and deleted refs mixed. To fix this, we record the new/deleted refs 7018 * first and later process them in process_recorded_refs. 7019 * For the cur_inode_new_gen case, we skip recording completely because 7020 * changed_inode did already initiate processing of refs. The reason for this is 7021 * that in this case, compare_tree actually compares the refs of 2 different 7022 * inodes. To fix this, process_all_refs is used in changed_inode to handle all 7023 * refs of the right tree as deleted and all refs of the left tree as new. 7024 */ 7025 static int changed_ref(struct send_ctx *sctx, 7026 enum btrfs_compare_tree_result result) 7027 { 7028 int ret = 0; 7029 7030 if (sctx->cur_ino != sctx->cmp_key->objectid) { 7031 inconsistent_snapshot_error(sctx, result, "reference"); 7032 return -EIO; 7033 } 7034 7035 if (!sctx->cur_inode_new_gen && 7036 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { 7037 if (result == BTRFS_COMPARE_TREE_NEW) 7038 ret = record_new_ref(sctx); 7039 else if (result == BTRFS_COMPARE_TREE_DELETED) 7040 ret = record_deleted_ref(sctx); 7041 else if (result == BTRFS_COMPARE_TREE_CHANGED) 7042 ret = record_changed_ref(sctx); 7043 } 7044 7045 return ret; 7046 } 7047 7048 /* 7049 * Process new/deleted/changed xattrs. We skip processing in the 7050 * cur_inode_new_gen case because changed_inode did already initiate processing 7051 * of xattrs. The reason is the same as in changed_ref 7052 */ 7053 static int changed_xattr(struct send_ctx *sctx, 7054 enum btrfs_compare_tree_result result) 7055 { 7056 int ret = 0; 7057 7058 if (sctx->cur_ino != sctx->cmp_key->objectid) { 7059 inconsistent_snapshot_error(sctx, result, "xattr"); 7060 return -EIO; 7061 } 7062 7063 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 7064 if (result == BTRFS_COMPARE_TREE_NEW) 7065 ret = process_new_xattr(sctx); 7066 else if (result == BTRFS_COMPARE_TREE_DELETED) 7067 ret = process_deleted_xattr(sctx); 7068 else if (result == BTRFS_COMPARE_TREE_CHANGED) 7069 ret = process_changed_xattr(sctx); 7070 } 7071 7072 return ret; 7073 } 7074 7075 /* 7076 * Process new/deleted/changed extents. We skip processing in the 7077 * cur_inode_new_gen case because changed_inode did already initiate processing 7078 * of extents. The reason is the same as in changed_ref 7079 */ 7080 static int changed_extent(struct send_ctx *sctx, 7081 enum btrfs_compare_tree_result result) 7082 { 7083 int ret = 0; 7084 7085 /* 7086 * We have found an extent item that changed without the inode item 7087 * having changed. This can happen either after relocation (where the 7088 * disk_bytenr of an extent item is replaced at 7089 * relocation.c:replace_file_extents()) or after deduplication into a 7090 * file in both the parent and send snapshots (where an extent item can 7091 * get modified or replaced with a new one). Note that deduplication 7092 * updates the inode item, but it only changes the iversion (sequence 7093 * field in the inode item) of the inode, so if a file is deduplicated 7094 * the same amount of times in both the parent and send snapshots, its 7095 * iversion becomes the same in both snapshots, whence the inode item is 7096 * the same on both snapshots. 7097 */ 7098 if (sctx->cur_ino != sctx->cmp_key->objectid) 7099 return 0; 7100 7101 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 7102 if (result != BTRFS_COMPARE_TREE_DELETED) 7103 ret = process_extent(sctx, sctx->left_path, 7104 sctx->cmp_key); 7105 } 7106 7107 return ret; 7108 } 7109 7110 static int changed_verity(struct send_ctx *sctx, enum btrfs_compare_tree_result result) 7111 { 7112 int ret = 0; 7113 7114 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 7115 if (result == BTRFS_COMPARE_TREE_NEW) 7116 sctx->cur_inode_needs_verity = true; 7117 } 7118 return ret; 7119 } 7120 7121 static int dir_changed(struct send_ctx *sctx, u64 dir) 7122 { 7123 u64 orig_gen, new_gen; 7124 int ret; 7125 7126 ret = get_inode_gen(sctx->send_root, dir, &new_gen); 7127 if (ret) 7128 return ret; 7129 7130 ret = get_inode_gen(sctx->parent_root, dir, &orig_gen); 7131 if (ret) 7132 return ret; 7133 7134 return (orig_gen != new_gen) ? 1 : 0; 7135 } 7136 7137 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path, 7138 struct btrfs_key *key) 7139 { 7140 struct btrfs_inode_extref *extref; 7141 struct extent_buffer *leaf; 7142 u64 dirid = 0, last_dirid = 0; 7143 unsigned long ptr; 7144 u32 item_size; 7145 u32 cur_offset = 0; 7146 int ref_name_len; 7147 int ret = 0; 7148 7149 /* Easy case, just check this one dirid */ 7150 if (key->type == BTRFS_INODE_REF_KEY) { 7151 dirid = key->offset; 7152 7153 ret = dir_changed(sctx, dirid); 7154 goto out; 7155 } 7156 7157 leaf = path->nodes[0]; 7158 item_size = btrfs_item_size(leaf, path->slots[0]); 7159 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 7160 while (cur_offset < item_size) { 7161 extref = (struct btrfs_inode_extref *)(ptr + 7162 cur_offset); 7163 dirid = btrfs_inode_extref_parent(leaf, extref); 7164 ref_name_len = btrfs_inode_extref_name_len(leaf, extref); 7165 cur_offset += ref_name_len + sizeof(*extref); 7166 if (dirid == last_dirid) 7167 continue; 7168 ret = dir_changed(sctx, dirid); 7169 if (ret) 7170 break; 7171 last_dirid = dirid; 7172 } 7173 out: 7174 return ret; 7175 } 7176 7177 /* 7178 * Updates compare related fields in sctx and simply forwards to the actual 7179 * changed_xxx functions. 7180 */ 7181 static int changed_cb(struct btrfs_path *left_path, 7182 struct btrfs_path *right_path, 7183 struct btrfs_key *key, 7184 enum btrfs_compare_tree_result result, 7185 struct send_ctx *sctx) 7186 { 7187 int ret = 0; 7188 7189 /* 7190 * We can not hold the commit root semaphore here. This is because in 7191 * the case of sending and receiving to the same filesystem, using a 7192 * pipe, could result in a deadlock: 7193 * 7194 * 1) The task running send blocks on the pipe because it's full; 7195 * 7196 * 2) The task running receive, which is the only consumer of the pipe, 7197 * is waiting for a transaction commit (for example due to a space 7198 * reservation when doing a write or triggering a transaction commit 7199 * when creating a subvolume); 7200 * 7201 * 3) The transaction is waiting to write lock the commit root semaphore, 7202 * but can not acquire it since it's being held at 1). 7203 * 7204 * Down this call chain we write to the pipe through kernel_write(). 7205 * The same type of problem can also happen when sending to a file that 7206 * is stored in the same filesystem - when reserving space for a write 7207 * into the file, we can trigger a transaction commit. 7208 * 7209 * Our caller has supplied us with clones of leaves from the send and 7210 * parent roots, so we're safe here from a concurrent relocation and 7211 * further reallocation of metadata extents while we are here. Below we 7212 * also assert that the leaves are clones. 7213 */ 7214 lockdep_assert_not_held(&sctx->send_root->fs_info->commit_root_sem); 7215 7216 /* 7217 * We always have a send root, so left_path is never NULL. We will not 7218 * have a leaf when we have reached the end of the send root but have 7219 * not yet reached the end of the parent root. 7220 */ 7221 if (left_path->nodes[0]) 7222 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, 7223 &left_path->nodes[0]->bflags)); 7224 /* 7225 * When doing a full send we don't have a parent root, so right_path is 7226 * NULL. When doing an incremental send, we may have reached the end of 7227 * the parent root already, so we don't have a leaf at right_path. 7228 */ 7229 if (right_path && right_path->nodes[0]) 7230 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, 7231 &right_path->nodes[0]->bflags)); 7232 7233 if (result == BTRFS_COMPARE_TREE_SAME) { 7234 if (key->type == BTRFS_INODE_REF_KEY || 7235 key->type == BTRFS_INODE_EXTREF_KEY) { 7236 ret = compare_refs(sctx, left_path, key); 7237 if (!ret) 7238 return 0; 7239 if (ret < 0) 7240 return ret; 7241 } else if (key->type == BTRFS_EXTENT_DATA_KEY) { 7242 return maybe_send_hole(sctx, left_path, key); 7243 } else { 7244 return 0; 7245 } 7246 result = BTRFS_COMPARE_TREE_CHANGED; 7247 ret = 0; 7248 } 7249 7250 sctx->left_path = left_path; 7251 sctx->right_path = right_path; 7252 sctx->cmp_key = key; 7253 7254 ret = finish_inode_if_needed(sctx, 0); 7255 if (ret < 0) 7256 goto out; 7257 7258 /* Ignore non-FS objects */ 7259 if (key->objectid == BTRFS_FREE_INO_OBJECTID || 7260 key->objectid == BTRFS_FREE_SPACE_OBJECTID) 7261 goto out; 7262 7263 if (key->type == BTRFS_INODE_ITEM_KEY) { 7264 ret = changed_inode(sctx, result); 7265 } else if (!sctx->ignore_cur_inode) { 7266 if (key->type == BTRFS_INODE_REF_KEY || 7267 key->type == BTRFS_INODE_EXTREF_KEY) 7268 ret = changed_ref(sctx, result); 7269 else if (key->type == BTRFS_XATTR_ITEM_KEY) 7270 ret = changed_xattr(sctx, result); 7271 else if (key->type == BTRFS_EXTENT_DATA_KEY) 7272 ret = changed_extent(sctx, result); 7273 else if (key->type == BTRFS_VERITY_DESC_ITEM_KEY && 7274 key->offset == 0) 7275 ret = changed_verity(sctx, result); 7276 } 7277 7278 out: 7279 return ret; 7280 } 7281 7282 static int search_key_again(const struct send_ctx *sctx, 7283 struct btrfs_root *root, 7284 struct btrfs_path *path, 7285 const struct btrfs_key *key) 7286 { 7287 int ret; 7288 7289 if (!path->need_commit_sem) 7290 lockdep_assert_held_read(&root->fs_info->commit_root_sem); 7291 7292 /* 7293 * Roots used for send operations are readonly and no one can add, 7294 * update or remove keys from them, so we should be able to find our 7295 * key again. The only exception is deduplication, which can operate on 7296 * readonly roots and add, update or remove keys to/from them - but at 7297 * the moment we don't allow it to run in parallel with send. 7298 */ 7299 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 7300 ASSERT(ret <= 0); 7301 if (ret > 0) { 7302 btrfs_print_tree(path->nodes[path->lowest_level], false); 7303 btrfs_err(root->fs_info, 7304 "send: key (%llu %u %llu) not found in %s root %llu, lowest_level %d, slot %d", 7305 key->objectid, key->type, key->offset, 7306 (root == sctx->parent_root ? "parent" : "send"), 7307 root->root_key.objectid, path->lowest_level, 7308 path->slots[path->lowest_level]); 7309 return -EUCLEAN; 7310 } 7311 7312 return ret; 7313 } 7314 7315 static int full_send_tree(struct send_ctx *sctx) 7316 { 7317 int ret; 7318 struct btrfs_root *send_root = sctx->send_root; 7319 struct btrfs_key key; 7320 struct btrfs_fs_info *fs_info = send_root->fs_info; 7321 struct btrfs_path *path; 7322 7323 path = alloc_path_for_send(); 7324 if (!path) 7325 return -ENOMEM; 7326 path->reada = READA_FORWARD_ALWAYS; 7327 7328 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 7329 key.type = BTRFS_INODE_ITEM_KEY; 7330 key.offset = 0; 7331 7332 down_read(&fs_info->commit_root_sem); 7333 sctx->last_reloc_trans = fs_info->last_reloc_trans; 7334 up_read(&fs_info->commit_root_sem); 7335 7336 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); 7337 if (ret < 0) 7338 goto out; 7339 if (ret) 7340 goto out_finish; 7341 7342 while (1) { 7343 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 7344 7345 ret = changed_cb(path, NULL, &key, 7346 BTRFS_COMPARE_TREE_NEW, sctx); 7347 if (ret < 0) 7348 goto out; 7349 7350 down_read(&fs_info->commit_root_sem); 7351 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) { 7352 sctx->last_reloc_trans = fs_info->last_reloc_trans; 7353 up_read(&fs_info->commit_root_sem); 7354 /* 7355 * A transaction used for relocating a block group was 7356 * committed or is about to finish its commit. Release 7357 * our path (leaf) and restart the search, so that we 7358 * avoid operating on any file extent items that are 7359 * stale, with a disk_bytenr that reflects a pre 7360 * relocation value. This way we avoid as much as 7361 * possible to fallback to regular writes when checking 7362 * if we can clone file ranges. 7363 */ 7364 btrfs_release_path(path); 7365 ret = search_key_again(sctx, send_root, path, &key); 7366 if (ret < 0) 7367 goto out; 7368 } else { 7369 up_read(&fs_info->commit_root_sem); 7370 } 7371 7372 ret = btrfs_next_item(send_root, path); 7373 if (ret < 0) 7374 goto out; 7375 if (ret) { 7376 ret = 0; 7377 break; 7378 } 7379 } 7380 7381 out_finish: 7382 ret = finish_inode_if_needed(sctx, 1); 7383 7384 out: 7385 btrfs_free_path(path); 7386 return ret; 7387 } 7388 7389 static int replace_node_with_clone(struct btrfs_path *path, int level) 7390 { 7391 struct extent_buffer *clone; 7392 7393 clone = btrfs_clone_extent_buffer(path->nodes[level]); 7394 if (!clone) 7395 return -ENOMEM; 7396 7397 free_extent_buffer(path->nodes[level]); 7398 path->nodes[level] = clone; 7399 7400 return 0; 7401 } 7402 7403 static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen) 7404 { 7405 struct extent_buffer *eb; 7406 struct extent_buffer *parent = path->nodes[*level]; 7407 int slot = path->slots[*level]; 7408 const int nritems = btrfs_header_nritems(parent); 7409 u64 reada_max; 7410 u64 reada_done = 0; 7411 7412 lockdep_assert_held_read(&parent->fs_info->commit_root_sem); 7413 7414 BUG_ON(*level == 0); 7415 eb = btrfs_read_node_slot(parent, slot); 7416 if (IS_ERR(eb)) 7417 return PTR_ERR(eb); 7418 7419 /* 7420 * Trigger readahead for the next leaves we will process, so that it is 7421 * very likely that when we need them they are already in memory and we 7422 * will not block on disk IO. For nodes we only do readahead for one, 7423 * since the time window between processing nodes is typically larger. 7424 */ 7425 reada_max = (*level == 1 ? SZ_128K : eb->fs_info->nodesize); 7426 7427 for (slot++; slot < nritems && reada_done < reada_max; slot++) { 7428 if (btrfs_node_ptr_generation(parent, slot) > reada_min_gen) { 7429 btrfs_readahead_node_child(parent, slot); 7430 reada_done += eb->fs_info->nodesize; 7431 } 7432 } 7433 7434 path->nodes[*level - 1] = eb; 7435 path->slots[*level - 1] = 0; 7436 (*level)--; 7437 7438 if (*level == 0) 7439 return replace_node_with_clone(path, 0); 7440 7441 return 0; 7442 } 7443 7444 static int tree_move_next_or_upnext(struct btrfs_path *path, 7445 int *level, int root_level) 7446 { 7447 int ret = 0; 7448 int nritems; 7449 nritems = btrfs_header_nritems(path->nodes[*level]); 7450 7451 path->slots[*level]++; 7452 7453 while (path->slots[*level] >= nritems) { 7454 if (*level == root_level) { 7455 path->slots[*level] = nritems - 1; 7456 return -1; 7457 } 7458 7459 /* move upnext */ 7460 path->slots[*level] = 0; 7461 free_extent_buffer(path->nodes[*level]); 7462 path->nodes[*level] = NULL; 7463 (*level)++; 7464 path->slots[*level]++; 7465 7466 nritems = btrfs_header_nritems(path->nodes[*level]); 7467 ret = 1; 7468 } 7469 return ret; 7470 } 7471 7472 /* 7473 * Returns 1 if it had to move up and next. 0 is returned if it moved only next 7474 * or down. 7475 */ 7476 static int tree_advance(struct btrfs_path *path, 7477 int *level, int root_level, 7478 int allow_down, 7479 struct btrfs_key *key, 7480 u64 reada_min_gen) 7481 { 7482 int ret; 7483 7484 if (*level == 0 || !allow_down) { 7485 ret = tree_move_next_or_upnext(path, level, root_level); 7486 } else { 7487 ret = tree_move_down(path, level, reada_min_gen); 7488 } 7489 7490 /* 7491 * Even if we have reached the end of a tree, ret is -1, update the key 7492 * anyway, so that in case we need to restart due to a block group 7493 * relocation, we can assert that the last key of the root node still 7494 * exists in the tree. 7495 */ 7496 if (*level == 0) 7497 btrfs_item_key_to_cpu(path->nodes[*level], key, 7498 path->slots[*level]); 7499 else 7500 btrfs_node_key_to_cpu(path->nodes[*level], key, 7501 path->slots[*level]); 7502 7503 return ret; 7504 } 7505 7506 static int tree_compare_item(struct btrfs_path *left_path, 7507 struct btrfs_path *right_path, 7508 char *tmp_buf) 7509 { 7510 int cmp; 7511 int len1, len2; 7512 unsigned long off1, off2; 7513 7514 len1 = btrfs_item_size(left_path->nodes[0], left_path->slots[0]); 7515 len2 = btrfs_item_size(right_path->nodes[0], right_path->slots[0]); 7516 if (len1 != len2) 7517 return 1; 7518 7519 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]); 7520 off2 = btrfs_item_ptr_offset(right_path->nodes[0], 7521 right_path->slots[0]); 7522 7523 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1); 7524 7525 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1); 7526 if (cmp) 7527 return 1; 7528 return 0; 7529 } 7530 7531 /* 7532 * A transaction used for relocating a block group was committed or is about to 7533 * finish its commit. Release our paths and restart the search, so that we are 7534 * not using stale extent buffers: 7535 * 7536 * 1) For levels > 0, we are only holding references of extent buffers, without 7537 * any locks on them, which does not prevent them from having been relocated 7538 * and reallocated after the last time we released the commit root semaphore. 7539 * The exception are the root nodes, for which we always have a clone, see 7540 * the comment at btrfs_compare_trees(); 7541 * 7542 * 2) For leaves, level 0, we are holding copies (clones) of extent buffers, so 7543 * we are safe from the concurrent relocation and reallocation. However they 7544 * can have file extent items with a pre relocation disk_bytenr value, so we 7545 * restart the start from the current commit roots and clone the new leaves so 7546 * that we get the post relocation disk_bytenr values. Not doing so, could 7547 * make us clone the wrong data in case there are new extents using the old 7548 * disk_bytenr that happen to be shared. 7549 */ 7550 static int restart_after_relocation(struct btrfs_path *left_path, 7551 struct btrfs_path *right_path, 7552 const struct btrfs_key *left_key, 7553 const struct btrfs_key *right_key, 7554 int left_level, 7555 int right_level, 7556 const struct send_ctx *sctx) 7557 { 7558 int root_level; 7559 int ret; 7560 7561 lockdep_assert_held_read(&sctx->send_root->fs_info->commit_root_sem); 7562 7563 btrfs_release_path(left_path); 7564 btrfs_release_path(right_path); 7565 7566 /* 7567 * Since keys can not be added or removed to/from our roots because they 7568 * are readonly and we do not allow deduplication to run in parallel 7569 * (which can add, remove or change keys), the layout of the trees should 7570 * not change. 7571 */ 7572 left_path->lowest_level = left_level; 7573 ret = search_key_again(sctx, sctx->send_root, left_path, left_key); 7574 if (ret < 0) 7575 return ret; 7576 7577 right_path->lowest_level = right_level; 7578 ret = search_key_again(sctx, sctx->parent_root, right_path, right_key); 7579 if (ret < 0) 7580 return ret; 7581 7582 /* 7583 * If the lowest level nodes are leaves, clone them so that they can be 7584 * safely used by changed_cb() while not under the protection of the 7585 * commit root semaphore, even if relocation and reallocation happens in 7586 * parallel. 7587 */ 7588 if (left_level == 0) { 7589 ret = replace_node_with_clone(left_path, 0); 7590 if (ret < 0) 7591 return ret; 7592 } 7593 7594 if (right_level == 0) { 7595 ret = replace_node_with_clone(right_path, 0); 7596 if (ret < 0) 7597 return ret; 7598 } 7599 7600 /* 7601 * Now clone the root nodes (unless they happen to be the leaves we have 7602 * already cloned). This is to protect against concurrent snapshotting of 7603 * the send and parent roots (see the comment at btrfs_compare_trees()). 7604 */ 7605 root_level = btrfs_header_level(sctx->send_root->commit_root); 7606 if (root_level > 0) { 7607 ret = replace_node_with_clone(left_path, root_level); 7608 if (ret < 0) 7609 return ret; 7610 } 7611 7612 root_level = btrfs_header_level(sctx->parent_root->commit_root); 7613 if (root_level > 0) { 7614 ret = replace_node_with_clone(right_path, root_level); 7615 if (ret < 0) 7616 return ret; 7617 } 7618 7619 return 0; 7620 } 7621 7622 /* 7623 * This function compares two trees and calls the provided callback for 7624 * every changed/new/deleted item it finds. 7625 * If shared tree blocks are encountered, whole subtrees are skipped, making 7626 * the compare pretty fast on snapshotted subvolumes. 7627 * 7628 * This currently works on commit roots only. As commit roots are read only, 7629 * we don't do any locking. The commit roots are protected with transactions. 7630 * Transactions are ended and rejoined when a commit is tried in between. 7631 * 7632 * This function checks for modifications done to the trees while comparing. 7633 * If it detects a change, it aborts immediately. 7634 */ 7635 static int btrfs_compare_trees(struct btrfs_root *left_root, 7636 struct btrfs_root *right_root, struct send_ctx *sctx) 7637 { 7638 struct btrfs_fs_info *fs_info = left_root->fs_info; 7639 int ret; 7640 int cmp; 7641 struct btrfs_path *left_path = NULL; 7642 struct btrfs_path *right_path = NULL; 7643 struct btrfs_key left_key; 7644 struct btrfs_key right_key; 7645 char *tmp_buf = NULL; 7646 int left_root_level; 7647 int right_root_level; 7648 int left_level; 7649 int right_level; 7650 int left_end_reached = 0; 7651 int right_end_reached = 0; 7652 int advance_left = 0; 7653 int advance_right = 0; 7654 u64 left_blockptr; 7655 u64 right_blockptr; 7656 u64 left_gen; 7657 u64 right_gen; 7658 u64 reada_min_gen; 7659 7660 left_path = btrfs_alloc_path(); 7661 if (!left_path) { 7662 ret = -ENOMEM; 7663 goto out; 7664 } 7665 right_path = btrfs_alloc_path(); 7666 if (!right_path) { 7667 ret = -ENOMEM; 7668 goto out; 7669 } 7670 7671 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); 7672 if (!tmp_buf) { 7673 ret = -ENOMEM; 7674 goto out; 7675 } 7676 7677 left_path->search_commit_root = 1; 7678 left_path->skip_locking = 1; 7679 right_path->search_commit_root = 1; 7680 right_path->skip_locking = 1; 7681 7682 /* 7683 * Strategy: Go to the first items of both trees. Then do 7684 * 7685 * If both trees are at level 0 7686 * Compare keys of current items 7687 * If left < right treat left item as new, advance left tree 7688 * and repeat 7689 * If left > right treat right item as deleted, advance right tree 7690 * and repeat 7691 * If left == right do deep compare of items, treat as changed if 7692 * needed, advance both trees and repeat 7693 * If both trees are at the same level but not at level 0 7694 * Compare keys of current nodes/leafs 7695 * If left < right advance left tree and repeat 7696 * If left > right advance right tree and repeat 7697 * If left == right compare blockptrs of the next nodes/leafs 7698 * If they match advance both trees but stay at the same level 7699 * and repeat 7700 * If they don't match advance both trees while allowing to go 7701 * deeper and repeat 7702 * If tree levels are different 7703 * Advance the tree that needs it and repeat 7704 * 7705 * Advancing a tree means: 7706 * If we are at level 0, try to go to the next slot. If that's not 7707 * possible, go one level up and repeat. Stop when we found a level 7708 * where we could go to the next slot. We may at this point be on a 7709 * node or a leaf. 7710 * 7711 * If we are not at level 0 and not on shared tree blocks, go one 7712 * level deeper. 7713 * 7714 * If we are not at level 0 and on shared tree blocks, go one slot to 7715 * the right if possible or go up and right. 7716 */ 7717 7718 down_read(&fs_info->commit_root_sem); 7719 left_level = btrfs_header_level(left_root->commit_root); 7720 left_root_level = left_level; 7721 /* 7722 * We clone the root node of the send and parent roots to prevent races 7723 * with snapshot creation of these roots. Snapshot creation COWs the 7724 * root node of a tree, so after the transaction is committed the old 7725 * extent can be reallocated while this send operation is still ongoing. 7726 * So we clone them, under the commit root semaphore, to be race free. 7727 */ 7728 left_path->nodes[left_level] = 7729 btrfs_clone_extent_buffer(left_root->commit_root); 7730 if (!left_path->nodes[left_level]) { 7731 ret = -ENOMEM; 7732 goto out_unlock; 7733 } 7734 7735 right_level = btrfs_header_level(right_root->commit_root); 7736 right_root_level = right_level; 7737 right_path->nodes[right_level] = 7738 btrfs_clone_extent_buffer(right_root->commit_root); 7739 if (!right_path->nodes[right_level]) { 7740 ret = -ENOMEM; 7741 goto out_unlock; 7742 } 7743 /* 7744 * Our right root is the parent root, while the left root is the "send" 7745 * root. We know that all new nodes/leaves in the left root must have 7746 * a generation greater than the right root's generation, so we trigger 7747 * readahead for those nodes and leaves of the left root, as we know we 7748 * will need to read them at some point. 7749 */ 7750 reada_min_gen = btrfs_header_generation(right_root->commit_root); 7751 7752 if (left_level == 0) 7753 btrfs_item_key_to_cpu(left_path->nodes[left_level], 7754 &left_key, left_path->slots[left_level]); 7755 else 7756 btrfs_node_key_to_cpu(left_path->nodes[left_level], 7757 &left_key, left_path->slots[left_level]); 7758 if (right_level == 0) 7759 btrfs_item_key_to_cpu(right_path->nodes[right_level], 7760 &right_key, right_path->slots[right_level]); 7761 else 7762 btrfs_node_key_to_cpu(right_path->nodes[right_level], 7763 &right_key, right_path->slots[right_level]); 7764 7765 sctx->last_reloc_trans = fs_info->last_reloc_trans; 7766 7767 while (1) { 7768 if (need_resched() || 7769 rwsem_is_contended(&fs_info->commit_root_sem)) { 7770 up_read(&fs_info->commit_root_sem); 7771 cond_resched(); 7772 down_read(&fs_info->commit_root_sem); 7773 } 7774 7775 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) { 7776 ret = restart_after_relocation(left_path, right_path, 7777 &left_key, &right_key, 7778 left_level, right_level, 7779 sctx); 7780 if (ret < 0) 7781 goto out_unlock; 7782 sctx->last_reloc_trans = fs_info->last_reloc_trans; 7783 } 7784 7785 if (advance_left && !left_end_reached) { 7786 ret = tree_advance(left_path, &left_level, 7787 left_root_level, 7788 advance_left != ADVANCE_ONLY_NEXT, 7789 &left_key, reada_min_gen); 7790 if (ret == -1) 7791 left_end_reached = ADVANCE; 7792 else if (ret < 0) 7793 goto out_unlock; 7794 advance_left = 0; 7795 } 7796 if (advance_right && !right_end_reached) { 7797 ret = tree_advance(right_path, &right_level, 7798 right_root_level, 7799 advance_right != ADVANCE_ONLY_NEXT, 7800 &right_key, reada_min_gen); 7801 if (ret == -1) 7802 right_end_reached = ADVANCE; 7803 else if (ret < 0) 7804 goto out_unlock; 7805 advance_right = 0; 7806 } 7807 7808 if (left_end_reached && right_end_reached) { 7809 ret = 0; 7810 goto out_unlock; 7811 } else if (left_end_reached) { 7812 if (right_level == 0) { 7813 up_read(&fs_info->commit_root_sem); 7814 ret = changed_cb(left_path, right_path, 7815 &right_key, 7816 BTRFS_COMPARE_TREE_DELETED, 7817 sctx); 7818 if (ret < 0) 7819 goto out; 7820 down_read(&fs_info->commit_root_sem); 7821 } 7822 advance_right = ADVANCE; 7823 continue; 7824 } else if (right_end_reached) { 7825 if (left_level == 0) { 7826 up_read(&fs_info->commit_root_sem); 7827 ret = changed_cb(left_path, right_path, 7828 &left_key, 7829 BTRFS_COMPARE_TREE_NEW, 7830 sctx); 7831 if (ret < 0) 7832 goto out; 7833 down_read(&fs_info->commit_root_sem); 7834 } 7835 advance_left = ADVANCE; 7836 continue; 7837 } 7838 7839 if (left_level == 0 && right_level == 0) { 7840 up_read(&fs_info->commit_root_sem); 7841 cmp = btrfs_comp_cpu_keys(&left_key, &right_key); 7842 if (cmp < 0) { 7843 ret = changed_cb(left_path, right_path, 7844 &left_key, 7845 BTRFS_COMPARE_TREE_NEW, 7846 sctx); 7847 advance_left = ADVANCE; 7848 } else if (cmp > 0) { 7849 ret = changed_cb(left_path, right_path, 7850 &right_key, 7851 BTRFS_COMPARE_TREE_DELETED, 7852 sctx); 7853 advance_right = ADVANCE; 7854 } else { 7855 enum btrfs_compare_tree_result result; 7856 7857 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); 7858 ret = tree_compare_item(left_path, right_path, 7859 tmp_buf); 7860 if (ret) 7861 result = BTRFS_COMPARE_TREE_CHANGED; 7862 else 7863 result = BTRFS_COMPARE_TREE_SAME; 7864 ret = changed_cb(left_path, right_path, 7865 &left_key, result, sctx); 7866 advance_left = ADVANCE; 7867 advance_right = ADVANCE; 7868 } 7869 7870 if (ret < 0) 7871 goto out; 7872 down_read(&fs_info->commit_root_sem); 7873 } else if (left_level == right_level) { 7874 cmp = btrfs_comp_cpu_keys(&left_key, &right_key); 7875 if (cmp < 0) { 7876 advance_left = ADVANCE; 7877 } else if (cmp > 0) { 7878 advance_right = ADVANCE; 7879 } else { 7880 left_blockptr = btrfs_node_blockptr( 7881 left_path->nodes[left_level], 7882 left_path->slots[left_level]); 7883 right_blockptr = btrfs_node_blockptr( 7884 right_path->nodes[right_level], 7885 right_path->slots[right_level]); 7886 left_gen = btrfs_node_ptr_generation( 7887 left_path->nodes[left_level], 7888 left_path->slots[left_level]); 7889 right_gen = btrfs_node_ptr_generation( 7890 right_path->nodes[right_level], 7891 right_path->slots[right_level]); 7892 if (left_blockptr == right_blockptr && 7893 left_gen == right_gen) { 7894 /* 7895 * As we're on a shared block, don't 7896 * allow to go deeper. 7897 */ 7898 advance_left = ADVANCE_ONLY_NEXT; 7899 advance_right = ADVANCE_ONLY_NEXT; 7900 } else { 7901 advance_left = ADVANCE; 7902 advance_right = ADVANCE; 7903 } 7904 } 7905 } else if (left_level < right_level) { 7906 advance_right = ADVANCE; 7907 } else { 7908 advance_left = ADVANCE; 7909 } 7910 } 7911 7912 out_unlock: 7913 up_read(&fs_info->commit_root_sem); 7914 out: 7915 btrfs_free_path(left_path); 7916 btrfs_free_path(right_path); 7917 kvfree(tmp_buf); 7918 return ret; 7919 } 7920 7921 static int send_subvol(struct send_ctx *sctx) 7922 { 7923 int ret; 7924 7925 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) { 7926 ret = send_header(sctx); 7927 if (ret < 0) 7928 goto out; 7929 } 7930 7931 ret = send_subvol_begin(sctx); 7932 if (ret < 0) 7933 goto out; 7934 7935 if (sctx->parent_root) { 7936 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, sctx); 7937 if (ret < 0) 7938 goto out; 7939 ret = finish_inode_if_needed(sctx, 1); 7940 if (ret < 0) 7941 goto out; 7942 } else { 7943 ret = full_send_tree(sctx); 7944 if (ret < 0) 7945 goto out; 7946 } 7947 7948 out: 7949 free_recorded_refs(sctx); 7950 return ret; 7951 } 7952 7953 /* 7954 * If orphan cleanup did remove any orphans from a root, it means the tree 7955 * was modified and therefore the commit root is not the same as the current 7956 * root anymore. This is a problem, because send uses the commit root and 7957 * therefore can see inode items that don't exist in the current root anymore, 7958 * and for example make calls to btrfs_iget, which will do tree lookups based 7959 * on the current root and not on the commit root. Those lookups will fail, 7960 * returning a -ESTALE error, and making send fail with that error. So make 7961 * sure a send does not see any orphans we have just removed, and that it will 7962 * see the same inodes regardless of whether a transaction commit happened 7963 * before it started (meaning that the commit root will be the same as the 7964 * current root) or not. 7965 */ 7966 static int ensure_commit_roots_uptodate(struct send_ctx *sctx) 7967 { 7968 int i; 7969 struct btrfs_trans_handle *trans = NULL; 7970 7971 again: 7972 if (sctx->parent_root && 7973 sctx->parent_root->node != sctx->parent_root->commit_root) 7974 goto commit_trans; 7975 7976 for (i = 0; i < sctx->clone_roots_cnt; i++) 7977 if (sctx->clone_roots[i].root->node != 7978 sctx->clone_roots[i].root->commit_root) 7979 goto commit_trans; 7980 7981 if (trans) 7982 return btrfs_end_transaction(trans); 7983 7984 return 0; 7985 7986 commit_trans: 7987 /* Use any root, all fs roots will get their commit roots updated. */ 7988 if (!trans) { 7989 trans = btrfs_join_transaction(sctx->send_root); 7990 if (IS_ERR(trans)) 7991 return PTR_ERR(trans); 7992 goto again; 7993 } 7994 7995 return btrfs_commit_transaction(trans); 7996 } 7997 7998 /* 7999 * Make sure any existing dellaloc is flushed for any root used by a send 8000 * operation so that we do not miss any data and we do not race with writeback 8001 * finishing and changing a tree while send is using the tree. This could 8002 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and 8003 * a send operation then uses the subvolume. 8004 * After flushing delalloc ensure_commit_roots_uptodate() must be called. 8005 */ 8006 static int flush_delalloc_roots(struct send_ctx *sctx) 8007 { 8008 struct btrfs_root *root = sctx->parent_root; 8009 int ret; 8010 int i; 8011 8012 if (root) { 8013 ret = btrfs_start_delalloc_snapshot(root, false); 8014 if (ret) 8015 return ret; 8016 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX); 8017 } 8018 8019 for (i = 0; i < sctx->clone_roots_cnt; i++) { 8020 root = sctx->clone_roots[i].root; 8021 ret = btrfs_start_delalloc_snapshot(root, false); 8022 if (ret) 8023 return ret; 8024 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX); 8025 } 8026 8027 return 0; 8028 } 8029 8030 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root) 8031 { 8032 spin_lock(&root->root_item_lock); 8033 root->send_in_progress--; 8034 /* 8035 * Not much left to do, we don't know why it's unbalanced and 8036 * can't blindly reset it to 0. 8037 */ 8038 if (root->send_in_progress < 0) 8039 btrfs_err(root->fs_info, 8040 "send_in_progress unbalanced %d root %llu", 8041 root->send_in_progress, root->root_key.objectid); 8042 spin_unlock(&root->root_item_lock); 8043 } 8044 8045 static void dedupe_in_progress_warn(const struct btrfs_root *root) 8046 { 8047 btrfs_warn_rl(root->fs_info, 8048 "cannot use root %llu for send while deduplications on it are in progress (%d in progress)", 8049 root->root_key.objectid, root->dedupe_in_progress); 8050 } 8051 8052 long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg) 8053 { 8054 int ret = 0; 8055 struct btrfs_root *send_root = BTRFS_I(inode)->root; 8056 struct btrfs_fs_info *fs_info = send_root->fs_info; 8057 struct btrfs_root *clone_root; 8058 struct send_ctx *sctx = NULL; 8059 u32 i; 8060 u64 *clone_sources_tmp = NULL; 8061 int clone_sources_to_rollback = 0; 8062 size_t alloc_size; 8063 int sort_clone_roots = 0; 8064 struct btrfs_lru_cache_entry *entry; 8065 struct btrfs_lru_cache_entry *tmp; 8066 8067 if (!capable(CAP_SYS_ADMIN)) 8068 return -EPERM; 8069 8070 /* 8071 * The subvolume must remain read-only during send, protect against 8072 * making it RW. This also protects against deletion. 8073 */ 8074 spin_lock(&send_root->root_item_lock); 8075 if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) { 8076 dedupe_in_progress_warn(send_root); 8077 spin_unlock(&send_root->root_item_lock); 8078 return -EAGAIN; 8079 } 8080 send_root->send_in_progress++; 8081 spin_unlock(&send_root->root_item_lock); 8082 8083 /* 8084 * Userspace tools do the checks and warn the user if it's 8085 * not RO. 8086 */ 8087 if (!btrfs_root_readonly(send_root)) { 8088 ret = -EPERM; 8089 goto out; 8090 } 8091 8092 /* 8093 * Check that we don't overflow at later allocations, we request 8094 * clone_sources_count + 1 items, and compare to unsigned long inside 8095 * access_ok. Also set an upper limit for allocation size so this can't 8096 * easily exhaust memory. Max number of clone sources is about 200K. 8097 */ 8098 if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) { 8099 ret = -EINVAL; 8100 goto out; 8101 } 8102 8103 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) { 8104 ret = -EINVAL; 8105 goto out; 8106 } 8107 8108 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL); 8109 if (!sctx) { 8110 ret = -ENOMEM; 8111 goto out; 8112 } 8113 8114 INIT_LIST_HEAD(&sctx->new_refs); 8115 INIT_LIST_HEAD(&sctx->deleted_refs); 8116 8117 btrfs_lru_cache_init(&sctx->name_cache, SEND_MAX_NAME_CACHE_SIZE); 8118 btrfs_lru_cache_init(&sctx->backref_cache, SEND_MAX_BACKREF_CACHE_SIZE); 8119 btrfs_lru_cache_init(&sctx->dir_created_cache, 8120 SEND_MAX_DIR_CREATED_CACHE_SIZE); 8121 /* 8122 * This cache is periodically trimmed to a fixed size elsewhere, see 8123 * cache_dir_utimes() and trim_dir_utimes_cache(). 8124 */ 8125 btrfs_lru_cache_init(&sctx->dir_utimes_cache, 0); 8126 8127 sctx->pending_dir_moves = RB_ROOT; 8128 sctx->waiting_dir_moves = RB_ROOT; 8129 sctx->orphan_dirs = RB_ROOT; 8130 sctx->rbtree_new_refs = RB_ROOT; 8131 sctx->rbtree_deleted_refs = RB_ROOT; 8132 8133 sctx->flags = arg->flags; 8134 8135 if (arg->flags & BTRFS_SEND_FLAG_VERSION) { 8136 if (arg->version > BTRFS_SEND_STREAM_VERSION) { 8137 ret = -EPROTO; 8138 goto out; 8139 } 8140 /* Zero means "use the highest version" */ 8141 sctx->proto = arg->version ?: BTRFS_SEND_STREAM_VERSION; 8142 } else { 8143 sctx->proto = 1; 8144 } 8145 if ((arg->flags & BTRFS_SEND_FLAG_COMPRESSED) && sctx->proto < 2) { 8146 ret = -EINVAL; 8147 goto out; 8148 } 8149 8150 sctx->send_filp = fget(arg->send_fd); 8151 if (!sctx->send_filp) { 8152 ret = -EBADF; 8153 goto out; 8154 } 8155 8156 sctx->send_root = send_root; 8157 /* 8158 * Unlikely but possible, if the subvolume is marked for deletion but 8159 * is slow to remove the directory entry, send can still be started 8160 */ 8161 if (btrfs_root_dead(sctx->send_root)) { 8162 ret = -EPERM; 8163 goto out; 8164 } 8165 8166 sctx->clone_roots_cnt = arg->clone_sources_count; 8167 8168 if (sctx->proto >= 2) { 8169 u32 send_buf_num_pages; 8170 8171 sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V2; 8172 sctx->send_buf = vmalloc(sctx->send_max_size); 8173 if (!sctx->send_buf) { 8174 ret = -ENOMEM; 8175 goto out; 8176 } 8177 send_buf_num_pages = sctx->send_max_size >> PAGE_SHIFT; 8178 sctx->send_buf_pages = kcalloc(send_buf_num_pages, 8179 sizeof(*sctx->send_buf_pages), 8180 GFP_KERNEL); 8181 if (!sctx->send_buf_pages) { 8182 ret = -ENOMEM; 8183 goto out; 8184 } 8185 for (i = 0; i < send_buf_num_pages; i++) { 8186 sctx->send_buf_pages[i] = 8187 vmalloc_to_page(sctx->send_buf + (i << PAGE_SHIFT)); 8188 } 8189 } else { 8190 sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V1; 8191 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL); 8192 } 8193 if (!sctx->send_buf) { 8194 ret = -ENOMEM; 8195 goto out; 8196 } 8197 8198 sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots), 8199 arg->clone_sources_count + 1, 8200 GFP_KERNEL); 8201 if (!sctx->clone_roots) { 8202 ret = -ENOMEM; 8203 goto out; 8204 } 8205 8206 alloc_size = array_size(sizeof(*arg->clone_sources), 8207 arg->clone_sources_count); 8208 8209 if (arg->clone_sources_count) { 8210 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL); 8211 if (!clone_sources_tmp) { 8212 ret = -ENOMEM; 8213 goto out; 8214 } 8215 8216 ret = copy_from_user(clone_sources_tmp, arg->clone_sources, 8217 alloc_size); 8218 if (ret) { 8219 ret = -EFAULT; 8220 goto out; 8221 } 8222 8223 for (i = 0; i < arg->clone_sources_count; i++) { 8224 clone_root = btrfs_get_fs_root(fs_info, 8225 clone_sources_tmp[i], true); 8226 if (IS_ERR(clone_root)) { 8227 ret = PTR_ERR(clone_root); 8228 goto out; 8229 } 8230 spin_lock(&clone_root->root_item_lock); 8231 if (!btrfs_root_readonly(clone_root) || 8232 btrfs_root_dead(clone_root)) { 8233 spin_unlock(&clone_root->root_item_lock); 8234 btrfs_put_root(clone_root); 8235 ret = -EPERM; 8236 goto out; 8237 } 8238 if (clone_root->dedupe_in_progress) { 8239 dedupe_in_progress_warn(clone_root); 8240 spin_unlock(&clone_root->root_item_lock); 8241 btrfs_put_root(clone_root); 8242 ret = -EAGAIN; 8243 goto out; 8244 } 8245 clone_root->send_in_progress++; 8246 spin_unlock(&clone_root->root_item_lock); 8247 8248 sctx->clone_roots[i].root = clone_root; 8249 clone_sources_to_rollback = i + 1; 8250 } 8251 kvfree(clone_sources_tmp); 8252 clone_sources_tmp = NULL; 8253 } 8254 8255 if (arg->parent_root) { 8256 sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root, 8257 true); 8258 if (IS_ERR(sctx->parent_root)) { 8259 ret = PTR_ERR(sctx->parent_root); 8260 goto out; 8261 } 8262 8263 spin_lock(&sctx->parent_root->root_item_lock); 8264 sctx->parent_root->send_in_progress++; 8265 if (!btrfs_root_readonly(sctx->parent_root) || 8266 btrfs_root_dead(sctx->parent_root)) { 8267 spin_unlock(&sctx->parent_root->root_item_lock); 8268 ret = -EPERM; 8269 goto out; 8270 } 8271 if (sctx->parent_root->dedupe_in_progress) { 8272 dedupe_in_progress_warn(sctx->parent_root); 8273 spin_unlock(&sctx->parent_root->root_item_lock); 8274 ret = -EAGAIN; 8275 goto out; 8276 } 8277 spin_unlock(&sctx->parent_root->root_item_lock); 8278 } 8279 8280 /* 8281 * Clones from send_root are allowed, but only if the clone source 8282 * is behind the current send position. This is checked while searching 8283 * for possible clone sources. 8284 */ 8285 sctx->clone_roots[sctx->clone_roots_cnt++].root = 8286 btrfs_grab_root(sctx->send_root); 8287 8288 /* We do a bsearch later */ 8289 sort(sctx->clone_roots, sctx->clone_roots_cnt, 8290 sizeof(*sctx->clone_roots), __clone_root_cmp_sort, 8291 NULL); 8292 sort_clone_roots = 1; 8293 8294 ret = flush_delalloc_roots(sctx); 8295 if (ret) 8296 goto out; 8297 8298 ret = ensure_commit_roots_uptodate(sctx); 8299 if (ret) 8300 goto out; 8301 8302 ret = send_subvol(sctx); 8303 if (ret < 0) 8304 goto out; 8305 8306 btrfs_lru_cache_for_each_entry_safe(&sctx->dir_utimes_cache, entry, tmp) { 8307 ret = send_utimes(sctx, entry->key, entry->gen); 8308 if (ret < 0) 8309 goto out; 8310 btrfs_lru_cache_remove(&sctx->dir_utimes_cache, entry); 8311 } 8312 8313 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) { 8314 ret = begin_cmd(sctx, BTRFS_SEND_C_END); 8315 if (ret < 0) 8316 goto out; 8317 ret = send_cmd(sctx); 8318 if (ret < 0) 8319 goto out; 8320 } 8321 8322 out: 8323 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)); 8324 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) { 8325 struct rb_node *n; 8326 struct pending_dir_move *pm; 8327 8328 n = rb_first(&sctx->pending_dir_moves); 8329 pm = rb_entry(n, struct pending_dir_move, node); 8330 while (!list_empty(&pm->list)) { 8331 struct pending_dir_move *pm2; 8332 8333 pm2 = list_first_entry(&pm->list, 8334 struct pending_dir_move, list); 8335 free_pending_move(sctx, pm2); 8336 } 8337 free_pending_move(sctx, pm); 8338 } 8339 8340 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)); 8341 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) { 8342 struct rb_node *n; 8343 struct waiting_dir_move *dm; 8344 8345 n = rb_first(&sctx->waiting_dir_moves); 8346 dm = rb_entry(n, struct waiting_dir_move, node); 8347 rb_erase(&dm->node, &sctx->waiting_dir_moves); 8348 kfree(dm); 8349 } 8350 8351 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs)); 8352 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) { 8353 struct rb_node *n; 8354 struct orphan_dir_info *odi; 8355 8356 n = rb_first(&sctx->orphan_dirs); 8357 odi = rb_entry(n, struct orphan_dir_info, node); 8358 free_orphan_dir_info(sctx, odi); 8359 } 8360 8361 if (sort_clone_roots) { 8362 for (i = 0; i < sctx->clone_roots_cnt; i++) { 8363 btrfs_root_dec_send_in_progress( 8364 sctx->clone_roots[i].root); 8365 btrfs_put_root(sctx->clone_roots[i].root); 8366 } 8367 } else { 8368 for (i = 0; sctx && i < clone_sources_to_rollback; i++) { 8369 btrfs_root_dec_send_in_progress( 8370 sctx->clone_roots[i].root); 8371 btrfs_put_root(sctx->clone_roots[i].root); 8372 } 8373 8374 btrfs_root_dec_send_in_progress(send_root); 8375 } 8376 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) { 8377 btrfs_root_dec_send_in_progress(sctx->parent_root); 8378 btrfs_put_root(sctx->parent_root); 8379 } 8380 8381 kvfree(clone_sources_tmp); 8382 8383 if (sctx) { 8384 if (sctx->send_filp) 8385 fput(sctx->send_filp); 8386 8387 kvfree(sctx->clone_roots); 8388 kfree(sctx->send_buf_pages); 8389 kvfree(sctx->send_buf); 8390 kvfree(sctx->verity_descriptor); 8391 8392 close_current_inode(sctx); 8393 8394 btrfs_lru_cache_clear(&sctx->name_cache); 8395 btrfs_lru_cache_clear(&sctx->backref_cache); 8396 btrfs_lru_cache_clear(&sctx->dir_created_cache); 8397 btrfs_lru_cache_clear(&sctx->dir_utimes_cache); 8398 8399 kfree(sctx); 8400 } 8401 8402 return ret; 8403 } 8404