1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2012 Alexander Block. All rights reserved. 4 */ 5 6 #include <linux/bsearch.h> 7 #include <linux/fs.h> 8 #include <linux/file.h> 9 #include <linux/sort.h> 10 #include <linux/mount.h> 11 #include <linux/xattr.h> 12 #include <linux/posix_acl_xattr.h> 13 #include <linux/radix-tree.h> 14 #include <linux/vmalloc.h> 15 #include <linux/string.h> 16 #include <linux/compat.h> 17 #include <linux/crc32c.h> 18 19 #include "send.h" 20 #include "backref.h" 21 #include "locking.h" 22 #include "disk-io.h" 23 #include "btrfs_inode.h" 24 #include "transaction.h" 25 #include "compression.h" 26 27 /* 28 * A fs_path is a helper to dynamically build path names with unknown size. 29 * It reallocates the internal buffer on demand. 30 * It allows fast adding of path elements on the right side (normal path) and 31 * fast adding to the left side (reversed path). A reversed path can also be 32 * unreversed if needed. 33 */ 34 struct fs_path { 35 union { 36 struct { 37 char *start; 38 char *end; 39 40 char *buf; 41 unsigned short buf_len:15; 42 unsigned short reversed:1; 43 char inline_buf[]; 44 }; 45 /* 46 * Average path length does not exceed 200 bytes, we'll have 47 * better packing in the slab and higher chance to satisfy 48 * a allocation later during send. 49 */ 50 char pad[256]; 51 }; 52 }; 53 #define FS_PATH_INLINE_SIZE \ 54 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf)) 55 56 57 /* reused for each extent */ 58 struct clone_root { 59 struct btrfs_root *root; 60 u64 ino; 61 u64 offset; 62 63 u64 found_refs; 64 }; 65 66 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128 67 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2) 68 69 struct send_ctx { 70 struct file *send_filp; 71 loff_t send_off; 72 char *send_buf; 73 u32 send_size; 74 u32 send_max_size; 75 u64 total_send_size; 76 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1]; 77 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */ 78 79 struct btrfs_root *send_root; 80 struct btrfs_root *parent_root; 81 struct clone_root *clone_roots; 82 int clone_roots_cnt; 83 84 /* current state of the compare_tree call */ 85 struct btrfs_path *left_path; 86 struct btrfs_path *right_path; 87 struct btrfs_key *cmp_key; 88 89 /* 90 * infos of the currently processed inode. In case of deleted inodes, 91 * these are the values from the deleted inode. 92 */ 93 u64 cur_ino; 94 u64 cur_inode_gen; 95 int cur_inode_new; 96 int cur_inode_new_gen; 97 int cur_inode_deleted; 98 u64 cur_inode_size; 99 u64 cur_inode_mode; 100 u64 cur_inode_rdev; 101 u64 cur_inode_last_extent; 102 u64 cur_inode_next_write_offset; 103 104 u64 send_progress; 105 106 struct list_head new_refs; 107 struct list_head deleted_refs; 108 109 struct radix_tree_root name_cache; 110 struct list_head name_cache_list; 111 int name_cache_size; 112 113 struct file_ra_state ra; 114 115 char *read_buf; 116 117 /* 118 * We process inodes by their increasing order, so if before an 119 * incremental send we reverse the parent/child relationship of 120 * directories such that a directory with a lower inode number was 121 * the parent of a directory with a higher inode number, and the one 122 * becoming the new parent got renamed too, we can't rename/move the 123 * directory with lower inode number when we finish processing it - we 124 * must process the directory with higher inode number first, then 125 * rename/move it and then rename/move the directory with lower inode 126 * number. Example follows. 127 * 128 * Tree state when the first send was performed: 129 * 130 * . 131 * |-- a (ino 257) 132 * |-- b (ino 258) 133 * | 134 * | 135 * |-- c (ino 259) 136 * | |-- d (ino 260) 137 * | 138 * |-- c2 (ino 261) 139 * 140 * Tree state when the second (incremental) send is performed: 141 * 142 * . 143 * |-- a (ino 257) 144 * |-- b (ino 258) 145 * |-- c2 (ino 261) 146 * |-- d2 (ino 260) 147 * |-- cc (ino 259) 148 * 149 * The sequence of steps that lead to the second state was: 150 * 151 * mv /a/b/c/d /a/b/c2/d2 152 * mv /a/b/c /a/b/c2/d2/cc 153 * 154 * "c" has lower inode number, but we can't move it (2nd mv operation) 155 * before we move "d", which has higher inode number. 156 * 157 * So we just memorize which move/rename operations must be performed 158 * later when their respective parent is processed and moved/renamed. 159 */ 160 161 /* Indexed by parent directory inode number. */ 162 struct rb_root pending_dir_moves; 163 164 /* 165 * Reverse index, indexed by the inode number of a directory that 166 * is waiting for the move/rename of its immediate parent before its 167 * own move/rename can be performed. 168 */ 169 struct rb_root waiting_dir_moves; 170 171 /* 172 * A directory that is going to be rm'ed might have a child directory 173 * which is in the pending directory moves index above. In this case, 174 * the directory can only be removed after the move/rename of its child 175 * is performed. Example: 176 * 177 * Parent snapshot: 178 * 179 * . (ino 256) 180 * |-- a/ (ino 257) 181 * |-- b/ (ino 258) 182 * |-- c/ (ino 259) 183 * | |-- x/ (ino 260) 184 * | 185 * |-- y/ (ino 261) 186 * 187 * Send snapshot: 188 * 189 * . (ino 256) 190 * |-- a/ (ino 257) 191 * |-- b/ (ino 258) 192 * |-- YY/ (ino 261) 193 * |-- x/ (ino 260) 194 * 195 * Sequence of steps that lead to the send snapshot: 196 * rm -f /a/b/c/foo.txt 197 * mv /a/b/y /a/b/YY 198 * mv /a/b/c/x /a/b/YY 199 * rmdir /a/b/c 200 * 201 * When the child is processed, its move/rename is delayed until its 202 * parent is processed (as explained above), but all other operations 203 * like update utimes, chown, chgrp, etc, are performed and the paths 204 * that it uses for those operations must use the orphanized name of 205 * its parent (the directory we're going to rm later), so we need to 206 * memorize that name. 207 * 208 * Indexed by the inode number of the directory to be deleted. 209 */ 210 struct rb_root orphan_dirs; 211 }; 212 213 struct pending_dir_move { 214 struct rb_node node; 215 struct list_head list; 216 u64 parent_ino; 217 u64 ino; 218 u64 gen; 219 struct list_head update_refs; 220 }; 221 222 struct waiting_dir_move { 223 struct rb_node node; 224 u64 ino; 225 /* 226 * There might be some directory that could not be removed because it 227 * was waiting for this directory inode to be moved first. Therefore 228 * after this directory is moved, we can try to rmdir the ino rmdir_ino. 229 */ 230 u64 rmdir_ino; 231 bool orphanized; 232 }; 233 234 struct orphan_dir_info { 235 struct rb_node node; 236 u64 ino; 237 u64 gen; 238 u64 last_dir_index_offset; 239 }; 240 241 struct name_cache_entry { 242 struct list_head list; 243 /* 244 * radix_tree has only 32bit entries but we need to handle 64bit inums. 245 * We use the lower 32bit of the 64bit inum to store it in the tree. If 246 * more then one inum would fall into the same entry, we use radix_list 247 * to store the additional entries. radix_list is also used to store 248 * entries where two entries have the same inum but different 249 * generations. 250 */ 251 struct list_head radix_list; 252 u64 ino; 253 u64 gen; 254 u64 parent_ino; 255 u64 parent_gen; 256 int ret; 257 int need_later_update; 258 int name_len; 259 char name[]; 260 }; 261 262 __cold 263 static void inconsistent_snapshot_error(struct send_ctx *sctx, 264 enum btrfs_compare_tree_result result, 265 const char *what) 266 { 267 const char *result_string; 268 269 switch (result) { 270 case BTRFS_COMPARE_TREE_NEW: 271 result_string = "new"; 272 break; 273 case BTRFS_COMPARE_TREE_DELETED: 274 result_string = "deleted"; 275 break; 276 case BTRFS_COMPARE_TREE_CHANGED: 277 result_string = "updated"; 278 break; 279 case BTRFS_COMPARE_TREE_SAME: 280 ASSERT(0); 281 result_string = "unchanged"; 282 break; 283 default: 284 ASSERT(0); 285 result_string = "unexpected"; 286 } 287 288 btrfs_err(sctx->send_root->fs_info, 289 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu", 290 result_string, what, sctx->cmp_key->objectid, 291 sctx->send_root->root_key.objectid, 292 (sctx->parent_root ? 293 sctx->parent_root->root_key.objectid : 0)); 294 } 295 296 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); 297 298 static struct waiting_dir_move * 299 get_waiting_dir_move(struct send_ctx *sctx, u64 ino); 300 301 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino); 302 303 static int need_send_hole(struct send_ctx *sctx) 304 { 305 return (sctx->parent_root && !sctx->cur_inode_new && 306 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted && 307 S_ISREG(sctx->cur_inode_mode)); 308 } 309 310 static void fs_path_reset(struct fs_path *p) 311 { 312 if (p->reversed) { 313 p->start = p->buf + p->buf_len - 1; 314 p->end = p->start; 315 *p->start = 0; 316 } else { 317 p->start = p->buf; 318 p->end = p->start; 319 *p->start = 0; 320 } 321 } 322 323 static struct fs_path *fs_path_alloc(void) 324 { 325 struct fs_path *p; 326 327 p = kmalloc(sizeof(*p), GFP_KERNEL); 328 if (!p) 329 return NULL; 330 p->reversed = 0; 331 p->buf = p->inline_buf; 332 p->buf_len = FS_PATH_INLINE_SIZE; 333 fs_path_reset(p); 334 return p; 335 } 336 337 static struct fs_path *fs_path_alloc_reversed(void) 338 { 339 struct fs_path *p; 340 341 p = fs_path_alloc(); 342 if (!p) 343 return NULL; 344 p->reversed = 1; 345 fs_path_reset(p); 346 return p; 347 } 348 349 static void fs_path_free(struct fs_path *p) 350 { 351 if (!p) 352 return; 353 if (p->buf != p->inline_buf) 354 kfree(p->buf); 355 kfree(p); 356 } 357 358 static int fs_path_len(struct fs_path *p) 359 { 360 return p->end - p->start; 361 } 362 363 static int fs_path_ensure_buf(struct fs_path *p, int len) 364 { 365 char *tmp_buf; 366 int path_len; 367 int old_buf_len; 368 369 len++; 370 371 if (p->buf_len >= len) 372 return 0; 373 374 if (len > PATH_MAX) { 375 WARN_ON(1); 376 return -ENOMEM; 377 } 378 379 path_len = p->end - p->start; 380 old_buf_len = p->buf_len; 381 382 /* 383 * First time the inline_buf does not suffice 384 */ 385 if (p->buf == p->inline_buf) { 386 tmp_buf = kmalloc(len, GFP_KERNEL); 387 if (tmp_buf) 388 memcpy(tmp_buf, p->buf, old_buf_len); 389 } else { 390 tmp_buf = krealloc(p->buf, len, GFP_KERNEL); 391 } 392 if (!tmp_buf) 393 return -ENOMEM; 394 p->buf = tmp_buf; 395 /* 396 * The real size of the buffer is bigger, this will let the fast path 397 * happen most of the time 398 */ 399 p->buf_len = ksize(p->buf); 400 401 if (p->reversed) { 402 tmp_buf = p->buf + old_buf_len - path_len - 1; 403 p->end = p->buf + p->buf_len - 1; 404 p->start = p->end - path_len; 405 memmove(p->start, tmp_buf, path_len + 1); 406 } else { 407 p->start = p->buf; 408 p->end = p->start + path_len; 409 } 410 return 0; 411 } 412 413 static int fs_path_prepare_for_add(struct fs_path *p, int name_len, 414 char **prepared) 415 { 416 int ret; 417 int new_len; 418 419 new_len = p->end - p->start + name_len; 420 if (p->start != p->end) 421 new_len++; 422 ret = fs_path_ensure_buf(p, new_len); 423 if (ret < 0) 424 goto out; 425 426 if (p->reversed) { 427 if (p->start != p->end) 428 *--p->start = '/'; 429 p->start -= name_len; 430 *prepared = p->start; 431 } else { 432 if (p->start != p->end) 433 *p->end++ = '/'; 434 *prepared = p->end; 435 p->end += name_len; 436 *p->end = 0; 437 } 438 439 out: 440 return ret; 441 } 442 443 static int fs_path_add(struct fs_path *p, const char *name, int name_len) 444 { 445 int ret; 446 char *prepared; 447 448 ret = fs_path_prepare_for_add(p, name_len, &prepared); 449 if (ret < 0) 450 goto out; 451 memcpy(prepared, name, name_len); 452 453 out: 454 return ret; 455 } 456 457 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) 458 { 459 int ret; 460 char *prepared; 461 462 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared); 463 if (ret < 0) 464 goto out; 465 memcpy(prepared, p2->start, p2->end - p2->start); 466 467 out: 468 return ret; 469 } 470 471 static int fs_path_add_from_extent_buffer(struct fs_path *p, 472 struct extent_buffer *eb, 473 unsigned long off, int len) 474 { 475 int ret; 476 char *prepared; 477 478 ret = fs_path_prepare_for_add(p, len, &prepared); 479 if (ret < 0) 480 goto out; 481 482 read_extent_buffer(eb, prepared, off, len); 483 484 out: 485 return ret; 486 } 487 488 static int fs_path_copy(struct fs_path *p, struct fs_path *from) 489 { 490 int ret; 491 492 p->reversed = from->reversed; 493 fs_path_reset(p); 494 495 ret = fs_path_add_path(p, from); 496 497 return ret; 498 } 499 500 501 static void fs_path_unreverse(struct fs_path *p) 502 { 503 char *tmp; 504 int len; 505 506 if (!p->reversed) 507 return; 508 509 tmp = p->start; 510 len = p->end - p->start; 511 p->start = p->buf; 512 p->end = p->start + len; 513 memmove(p->start, tmp, len + 1); 514 p->reversed = 0; 515 } 516 517 static struct btrfs_path *alloc_path_for_send(void) 518 { 519 struct btrfs_path *path; 520 521 path = btrfs_alloc_path(); 522 if (!path) 523 return NULL; 524 path->search_commit_root = 1; 525 path->skip_locking = 1; 526 path->need_commit_sem = 1; 527 return path; 528 } 529 530 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) 531 { 532 int ret; 533 u32 pos = 0; 534 535 while (pos < len) { 536 ret = kernel_write(filp, buf + pos, len - pos, off); 537 /* TODO handle that correctly */ 538 /*if (ret == -ERESTARTSYS) { 539 continue; 540 }*/ 541 if (ret < 0) 542 return ret; 543 if (ret == 0) { 544 return -EIO; 545 } 546 pos += ret; 547 } 548 549 return 0; 550 } 551 552 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) 553 { 554 struct btrfs_tlv_header *hdr; 555 int total_len = sizeof(*hdr) + len; 556 int left = sctx->send_max_size - sctx->send_size; 557 558 if (unlikely(left < total_len)) 559 return -EOVERFLOW; 560 561 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); 562 hdr->tlv_type = cpu_to_le16(attr); 563 hdr->tlv_len = cpu_to_le16(len); 564 memcpy(hdr + 1, data, len); 565 sctx->send_size += total_len; 566 567 return 0; 568 } 569 570 #define TLV_PUT_DEFINE_INT(bits) \ 571 static int tlv_put_u##bits(struct send_ctx *sctx, \ 572 u##bits attr, u##bits value) \ 573 { \ 574 __le##bits __tmp = cpu_to_le##bits(value); \ 575 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \ 576 } 577 578 TLV_PUT_DEFINE_INT(64) 579 580 static int tlv_put_string(struct send_ctx *sctx, u16 attr, 581 const char *str, int len) 582 { 583 if (len == -1) 584 len = strlen(str); 585 return tlv_put(sctx, attr, str, len); 586 } 587 588 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, 589 const u8 *uuid) 590 { 591 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); 592 } 593 594 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, 595 struct extent_buffer *eb, 596 struct btrfs_timespec *ts) 597 { 598 struct btrfs_timespec bts; 599 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); 600 return tlv_put(sctx, attr, &bts, sizeof(bts)); 601 } 602 603 604 #define TLV_PUT(sctx, attrtype, data, attrlen) \ 605 do { \ 606 ret = tlv_put(sctx, attrtype, data, attrlen); \ 607 if (ret < 0) \ 608 goto tlv_put_failure; \ 609 } while (0) 610 611 #define TLV_PUT_INT(sctx, attrtype, bits, value) \ 612 do { \ 613 ret = tlv_put_u##bits(sctx, attrtype, value); \ 614 if (ret < 0) \ 615 goto tlv_put_failure; \ 616 } while (0) 617 618 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data) 619 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data) 620 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data) 621 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data) 622 #define TLV_PUT_STRING(sctx, attrtype, str, len) \ 623 do { \ 624 ret = tlv_put_string(sctx, attrtype, str, len); \ 625 if (ret < 0) \ 626 goto tlv_put_failure; \ 627 } while (0) 628 #define TLV_PUT_PATH(sctx, attrtype, p) \ 629 do { \ 630 ret = tlv_put_string(sctx, attrtype, p->start, \ 631 p->end - p->start); \ 632 if (ret < 0) \ 633 goto tlv_put_failure; \ 634 } while(0) 635 #define TLV_PUT_UUID(sctx, attrtype, uuid) \ 636 do { \ 637 ret = tlv_put_uuid(sctx, attrtype, uuid); \ 638 if (ret < 0) \ 639 goto tlv_put_failure; \ 640 } while (0) 641 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ 642 do { \ 643 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ 644 if (ret < 0) \ 645 goto tlv_put_failure; \ 646 } while (0) 647 648 static int send_header(struct send_ctx *sctx) 649 { 650 struct btrfs_stream_header hdr; 651 652 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); 653 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION); 654 655 return write_buf(sctx->send_filp, &hdr, sizeof(hdr), 656 &sctx->send_off); 657 } 658 659 /* 660 * For each command/item we want to send to userspace, we call this function. 661 */ 662 static int begin_cmd(struct send_ctx *sctx, int cmd) 663 { 664 struct btrfs_cmd_header *hdr; 665 666 if (WARN_ON(!sctx->send_buf)) 667 return -EINVAL; 668 669 BUG_ON(sctx->send_size); 670 671 sctx->send_size += sizeof(*hdr); 672 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 673 hdr->cmd = cpu_to_le16(cmd); 674 675 return 0; 676 } 677 678 static int send_cmd(struct send_ctx *sctx) 679 { 680 int ret; 681 struct btrfs_cmd_header *hdr; 682 u32 crc; 683 684 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 685 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr)); 686 hdr->crc = 0; 687 688 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); 689 hdr->crc = cpu_to_le32(crc); 690 691 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, 692 &sctx->send_off); 693 694 sctx->total_send_size += sctx->send_size; 695 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size; 696 sctx->send_size = 0; 697 698 return ret; 699 } 700 701 /* 702 * Sends a move instruction to user space 703 */ 704 static int send_rename(struct send_ctx *sctx, 705 struct fs_path *from, struct fs_path *to) 706 { 707 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 708 int ret; 709 710 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start); 711 712 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); 713 if (ret < 0) 714 goto out; 715 716 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); 717 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); 718 719 ret = send_cmd(sctx); 720 721 tlv_put_failure: 722 out: 723 return ret; 724 } 725 726 /* 727 * Sends a link instruction to user space 728 */ 729 static int send_link(struct send_ctx *sctx, 730 struct fs_path *path, struct fs_path *lnk) 731 { 732 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 733 int ret; 734 735 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start); 736 737 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); 738 if (ret < 0) 739 goto out; 740 741 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 742 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); 743 744 ret = send_cmd(sctx); 745 746 tlv_put_failure: 747 out: 748 return ret; 749 } 750 751 /* 752 * Sends an unlink instruction to user space 753 */ 754 static int send_unlink(struct send_ctx *sctx, struct fs_path *path) 755 { 756 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 757 int ret; 758 759 btrfs_debug(fs_info, "send_unlink %s", path->start); 760 761 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); 762 if (ret < 0) 763 goto out; 764 765 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 766 767 ret = send_cmd(sctx); 768 769 tlv_put_failure: 770 out: 771 return ret; 772 } 773 774 /* 775 * Sends a rmdir instruction to user space 776 */ 777 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) 778 { 779 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 780 int ret; 781 782 btrfs_debug(fs_info, "send_rmdir %s", path->start); 783 784 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); 785 if (ret < 0) 786 goto out; 787 788 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 789 790 ret = send_cmd(sctx); 791 792 tlv_put_failure: 793 out: 794 return ret; 795 } 796 797 /* 798 * Helper function to retrieve some fields from an inode item. 799 */ 800 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path, 801 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid, 802 u64 *gid, u64 *rdev) 803 { 804 int ret; 805 struct btrfs_inode_item *ii; 806 struct btrfs_key key; 807 808 key.objectid = ino; 809 key.type = BTRFS_INODE_ITEM_KEY; 810 key.offset = 0; 811 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 812 if (ret) { 813 if (ret > 0) 814 ret = -ENOENT; 815 return ret; 816 } 817 818 ii = btrfs_item_ptr(path->nodes[0], path->slots[0], 819 struct btrfs_inode_item); 820 if (size) 821 *size = btrfs_inode_size(path->nodes[0], ii); 822 if (gen) 823 *gen = btrfs_inode_generation(path->nodes[0], ii); 824 if (mode) 825 *mode = btrfs_inode_mode(path->nodes[0], ii); 826 if (uid) 827 *uid = btrfs_inode_uid(path->nodes[0], ii); 828 if (gid) 829 *gid = btrfs_inode_gid(path->nodes[0], ii); 830 if (rdev) 831 *rdev = btrfs_inode_rdev(path->nodes[0], ii); 832 833 return ret; 834 } 835 836 static int get_inode_info(struct btrfs_root *root, 837 u64 ino, u64 *size, u64 *gen, 838 u64 *mode, u64 *uid, u64 *gid, 839 u64 *rdev) 840 { 841 struct btrfs_path *path; 842 int ret; 843 844 path = alloc_path_for_send(); 845 if (!path) 846 return -ENOMEM; 847 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid, 848 rdev); 849 btrfs_free_path(path); 850 return ret; 851 } 852 853 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index, 854 struct fs_path *p, 855 void *ctx); 856 857 /* 858 * Helper function to iterate the entries in ONE btrfs_inode_ref or 859 * btrfs_inode_extref. 860 * The iterate callback may return a non zero value to stop iteration. This can 861 * be a negative value for error codes or 1 to simply stop it. 862 * 863 * path must point to the INODE_REF or INODE_EXTREF when called. 864 */ 865 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path, 866 struct btrfs_key *found_key, int resolve, 867 iterate_inode_ref_t iterate, void *ctx) 868 { 869 struct extent_buffer *eb = path->nodes[0]; 870 struct btrfs_item *item; 871 struct btrfs_inode_ref *iref; 872 struct btrfs_inode_extref *extref; 873 struct btrfs_path *tmp_path; 874 struct fs_path *p; 875 u32 cur = 0; 876 u32 total; 877 int slot = path->slots[0]; 878 u32 name_len; 879 char *start; 880 int ret = 0; 881 int num = 0; 882 int index; 883 u64 dir; 884 unsigned long name_off; 885 unsigned long elem_size; 886 unsigned long ptr; 887 888 p = fs_path_alloc_reversed(); 889 if (!p) 890 return -ENOMEM; 891 892 tmp_path = alloc_path_for_send(); 893 if (!tmp_path) { 894 fs_path_free(p); 895 return -ENOMEM; 896 } 897 898 899 if (found_key->type == BTRFS_INODE_REF_KEY) { 900 ptr = (unsigned long)btrfs_item_ptr(eb, slot, 901 struct btrfs_inode_ref); 902 item = btrfs_item_nr(slot); 903 total = btrfs_item_size(eb, item); 904 elem_size = sizeof(*iref); 905 } else { 906 ptr = btrfs_item_ptr_offset(eb, slot); 907 total = btrfs_item_size_nr(eb, slot); 908 elem_size = sizeof(*extref); 909 } 910 911 while (cur < total) { 912 fs_path_reset(p); 913 914 if (found_key->type == BTRFS_INODE_REF_KEY) { 915 iref = (struct btrfs_inode_ref *)(ptr + cur); 916 name_len = btrfs_inode_ref_name_len(eb, iref); 917 name_off = (unsigned long)(iref + 1); 918 index = btrfs_inode_ref_index(eb, iref); 919 dir = found_key->offset; 920 } else { 921 extref = (struct btrfs_inode_extref *)(ptr + cur); 922 name_len = btrfs_inode_extref_name_len(eb, extref); 923 name_off = (unsigned long)&extref->name; 924 index = btrfs_inode_extref_index(eb, extref); 925 dir = btrfs_inode_extref_parent(eb, extref); 926 } 927 928 if (resolve) { 929 start = btrfs_ref_to_path(root, tmp_path, name_len, 930 name_off, eb, dir, 931 p->buf, p->buf_len); 932 if (IS_ERR(start)) { 933 ret = PTR_ERR(start); 934 goto out; 935 } 936 if (start < p->buf) { 937 /* overflow , try again with larger buffer */ 938 ret = fs_path_ensure_buf(p, 939 p->buf_len + p->buf - start); 940 if (ret < 0) 941 goto out; 942 start = btrfs_ref_to_path(root, tmp_path, 943 name_len, name_off, 944 eb, dir, 945 p->buf, p->buf_len); 946 if (IS_ERR(start)) { 947 ret = PTR_ERR(start); 948 goto out; 949 } 950 BUG_ON(start < p->buf); 951 } 952 p->start = start; 953 } else { 954 ret = fs_path_add_from_extent_buffer(p, eb, name_off, 955 name_len); 956 if (ret < 0) 957 goto out; 958 } 959 960 cur += elem_size + name_len; 961 ret = iterate(num, dir, index, p, ctx); 962 if (ret) 963 goto out; 964 num++; 965 } 966 967 out: 968 btrfs_free_path(tmp_path); 969 fs_path_free(p); 970 return ret; 971 } 972 973 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key, 974 const char *name, int name_len, 975 const char *data, int data_len, 976 u8 type, void *ctx); 977 978 /* 979 * Helper function to iterate the entries in ONE btrfs_dir_item. 980 * The iterate callback may return a non zero value to stop iteration. This can 981 * be a negative value for error codes or 1 to simply stop it. 982 * 983 * path must point to the dir item when called. 984 */ 985 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, 986 iterate_dir_item_t iterate, void *ctx) 987 { 988 int ret = 0; 989 struct extent_buffer *eb; 990 struct btrfs_item *item; 991 struct btrfs_dir_item *di; 992 struct btrfs_key di_key; 993 char *buf = NULL; 994 int buf_len; 995 u32 name_len; 996 u32 data_len; 997 u32 cur; 998 u32 len; 999 u32 total; 1000 int slot; 1001 int num; 1002 u8 type; 1003 1004 /* 1005 * Start with a small buffer (1 page). If later we end up needing more 1006 * space, which can happen for xattrs on a fs with a leaf size greater 1007 * then the page size, attempt to increase the buffer. Typically xattr 1008 * values are small. 1009 */ 1010 buf_len = PATH_MAX; 1011 buf = kmalloc(buf_len, GFP_KERNEL); 1012 if (!buf) { 1013 ret = -ENOMEM; 1014 goto out; 1015 } 1016 1017 eb = path->nodes[0]; 1018 slot = path->slots[0]; 1019 item = btrfs_item_nr(slot); 1020 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 1021 cur = 0; 1022 len = 0; 1023 total = btrfs_item_size(eb, item); 1024 1025 num = 0; 1026 while (cur < total) { 1027 name_len = btrfs_dir_name_len(eb, di); 1028 data_len = btrfs_dir_data_len(eb, di); 1029 type = btrfs_dir_type(eb, di); 1030 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 1031 1032 if (type == BTRFS_FT_XATTR) { 1033 if (name_len > XATTR_NAME_MAX) { 1034 ret = -ENAMETOOLONG; 1035 goto out; 1036 } 1037 if (name_len + data_len > 1038 BTRFS_MAX_XATTR_SIZE(root->fs_info)) { 1039 ret = -E2BIG; 1040 goto out; 1041 } 1042 } else { 1043 /* 1044 * Path too long 1045 */ 1046 if (name_len + data_len > PATH_MAX) { 1047 ret = -ENAMETOOLONG; 1048 goto out; 1049 } 1050 } 1051 1052 if (name_len + data_len > buf_len) { 1053 buf_len = name_len + data_len; 1054 if (is_vmalloc_addr(buf)) { 1055 vfree(buf); 1056 buf = NULL; 1057 } else { 1058 char *tmp = krealloc(buf, buf_len, 1059 GFP_KERNEL | __GFP_NOWARN); 1060 1061 if (!tmp) 1062 kfree(buf); 1063 buf = tmp; 1064 } 1065 if (!buf) { 1066 buf = kvmalloc(buf_len, GFP_KERNEL); 1067 if (!buf) { 1068 ret = -ENOMEM; 1069 goto out; 1070 } 1071 } 1072 } 1073 1074 read_extent_buffer(eb, buf, (unsigned long)(di + 1), 1075 name_len + data_len); 1076 1077 len = sizeof(*di) + name_len + data_len; 1078 di = (struct btrfs_dir_item *)((char *)di + len); 1079 cur += len; 1080 1081 ret = iterate(num, &di_key, buf, name_len, buf + name_len, 1082 data_len, type, ctx); 1083 if (ret < 0) 1084 goto out; 1085 if (ret) { 1086 ret = 0; 1087 goto out; 1088 } 1089 1090 num++; 1091 } 1092 1093 out: 1094 kvfree(buf); 1095 return ret; 1096 } 1097 1098 static int __copy_first_ref(int num, u64 dir, int index, 1099 struct fs_path *p, void *ctx) 1100 { 1101 int ret; 1102 struct fs_path *pt = ctx; 1103 1104 ret = fs_path_copy(pt, p); 1105 if (ret < 0) 1106 return ret; 1107 1108 /* we want the first only */ 1109 return 1; 1110 } 1111 1112 /* 1113 * Retrieve the first path of an inode. If an inode has more then one 1114 * ref/hardlink, this is ignored. 1115 */ 1116 static int get_inode_path(struct btrfs_root *root, 1117 u64 ino, struct fs_path *path) 1118 { 1119 int ret; 1120 struct btrfs_key key, found_key; 1121 struct btrfs_path *p; 1122 1123 p = alloc_path_for_send(); 1124 if (!p) 1125 return -ENOMEM; 1126 1127 fs_path_reset(path); 1128 1129 key.objectid = ino; 1130 key.type = BTRFS_INODE_REF_KEY; 1131 key.offset = 0; 1132 1133 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0); 1134 if (ret < 0) 1135 goto out; 1136 if (ret) { 1137 ret = 1; 1138 goto out; 1139 } 1140 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); 1141 if (found_key.objectid != ino || 1142 (found_key.type != BTRFS_INODE_REF_KEY && 1143 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1144 ret = -ENOENT; 1145 goto out; 1146 } 1147 1148 ret = iterate_inode_ref(root, p, &found_key, 1, 1149 __copy_first_ref, path); 1150 if (ret < 0) 1151 goto out; 1152 ret = 0; 1153 1154 out: 1155 btrfs_free_path(p); 1156 return ret; 1157 } 1158 1159 struct backref_ctx { 1160 struct send_ctx *sctx; 1161 1162 struct btrfs_path *path; 1163 /* number of total found references */ 1164 u64 found; 1165 1166 /* 1167 * used for clones found in send_root. clones found behind cur_objectid 1168 * and cur_offset are not considered as allowed clones. 1169 */ 1170 u64 cur_objectid; 1171 u64 cur_offset; 1172 1173 /* may be truncated in case it's the last extent in a file */ 1174 u64 extent_len; 1175 1176 /* data offset in the file extent item */ 1177 u64 data_offset; 1178 1179 /* Just to check for bugs in backref resolving */ 1180 int found_itself; 1181 }; 1182 1183 static int __clone_root_cmp_bsearch(const void *key, const void *elt) 1184 { 1185 u64 root = (u64)(uintptr_t)key; 1186 struct clone_root *cr = (struct clone_root *)elt; 1187 1188 if (root < cr->root->objectid) 1189 return -1; 1190 if (root > cr->root->objectid) 1191 return 1; 1192 return 0; 1193 } 1194 1195 static int __clone_root_cmp_sort(const void *e1, const void *e2) 1196 { 1197 struct clone_root *cr1 = (struct clone_root *)e1; 1198 struct clone_root *cr2 = (struct clone_root *)e2; 1199 1200 if (cr1->root->objectid < cr2->root->objectid) 1201 return -1; 1202 if (cr1->root->objectid > cr2->root->objectid) 1203 return 1; 1204 return 0; 1205 } 1206 1207 /* 1208 * Called for every backref that is found for the current extent. 1209 * Results are collected in sctx->clone_roots->ino/offset/found_refs 1210 */ 1211 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) 1212 { 1213 struct backref_ctx *bctx = ctx_; 1214 struct clone_root *found; 1215 int ret; 1216 u64 i_size; 1217 1218 /* First check if the root is in the list of accepted clone sources */ 1219 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots, 1220 bctx->sctx->clone_roots_cnt, 1221 sizeof(struct clone_root), 1222 __clone_root_cmp_bsearch); 1223 if (!found) 1224 return 0; 1225 1226 if (found->root == bctx->sctx->send_root && 1227 ino == bctx->cur_objectid && 1228 offset == bctx->cur_offset) { 1229 bctx->found_itself = 1; 1230 } 1231 1232 /* 1233 * There are inodes that have extents that lie behind its i_size. Don't 1234 * accept clones from these extents. 1235 */ 1236 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL, 1237 NULL, NULL, NULL); 1238 btrfs_release_path(bctx->path); 1239 if (ret < 0) 1240 return ret; 1241 1242 if (offset + bctx->data_offset + bctx->extent_len > i_size) 1243 return 0; 1244 1245 /* 1246 * Make sure we don't consider clones from send_root that are 1247 * behind the current inode/offset. 1248 */ 1249 if (found->root == bctx->sctx->send_root) { 1250 /* 1251 * TODO for the moment we don't accept clones from the inode 1252 * that is currently send. We may change this when 1253 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same 1254 * file. 1255 */ 1256 if (ino >= bctx->cur_objectid) 1257 return 0; 1258 } 1259 1260 bctx->found++; 1261 found->found_refs++; 1262 if (ino < found->ino) { 1263 found->ino = ino; 1264 found->offset = offset; 1265 } else if (found->ino == ino) { 1266 /* 1267 * same extent found more then once in the same file. 1268 */ 1269 if (found->offset > offset + bctx->extent_len) 1270 found->offset = offset; 1271 } 1272 1273 return 0; 1274 } 1275 1276 /* 1277 * Given an inode, offset and extent item, it finds a good clone for a clone 1278 * instruction. Returns -ENOENT when none could be found. The function makes 1279 * sure that the returned clone is usable at the point where sending is at the 1280 * moment. This means, that no clones are accepted which lie behind the current 1281 * inode+offset. 1282 * 1283 * path must point to the extent item when called. 1284 */ 1285 static int find_extent_clone(struct send_ctx *sctx, 1286 struct btrfs_path *path, 1287 u64 ino, u64 data_offset, 1288 u64 ino_size, 1289 struct clone_root **found) 1290 { 1291 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 1292 int ret; 1293 int extent_type; 1294 u64 logical; 1295 u64 disk_byte; 1296 u64 num_bytes; 1297 u64 extent_item_pos; 1298 u64 flags = 0; 1299 struct btrfs_file_extent_item *fi; 1300 struct extent_buffer *eb = path->nodes[0]; 1301 struct backref_ctx *backref_ctx = NULL; 1302 struct clone_root *cur_clone_root; 1303 struct btrfs_key found_key; 1304 struct btrfs_path *tmp_path; 1305 int compressed; 1306 u32 i; 1307 1308 tmp_path = alloc_path_for_send(); 1309 if (!tmp_path) 1310 return -ENOMEM; 1311 1312 /* We only use this path under the commit sem */ 1313 tmp_path->need_commit_sem = 0; 1314 1315 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL); 1316 if (!backref_ctx) { 1317 ret = -ENOMEM; 1318 goto out; 1319 } 1320 1321 backref_ctx->path = tmp_path; 1322 1323 if (data_offset >= ino_size) { 1324 /* 1325 * There may be extents that lie behind the file's size. 1326 * I at least had this in combination with snapshotting while 1327 * writing large files. 1328 */ 1329 ret = 0; 1330 goto out; 1331 } 1332 1333 fi = btrfs_item_ptr(eb, path->slots[0], 1334 struct btrfs_file_extent_item); 1335 extent_type = btrfs_file_extent_type(eb, fi); 1336 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1337 ret = -ENOENT; 1338 goto out; 1339 } 1340 compressed = btrfs_file_extent_compression(eb, fi); 1341 1342 num_bytes = btrfs_file_extent_num_bytes(eb, fi); 1343 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 1344 if (disk_byte == 0) { 1345 ret = -ENOENT; 1346 goto out; 1347 } 1348 logical = disk_byte + btrfs_file_extent_offset(eb, fi); 1349 1350 down_read(&fs_info->commit_root_sem); 1351 ret = extent_from_logical(fs_info, disk_byte, tmp_path, 1352 &found_key, &flags); 1353 up_read(&fs_info->commit_root_sem); 1354 btrfs_release_path(tmp_path); 1355 1356 if (ret < 0) 1357 goto out; 1358 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1359 ret = -EIO; 1360 goto out; 1361 } 1362 1363 /* 1364 * Setup the clone roots. 1365 */ 1366 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1367 cur_clone_root = sctx->clone_roots + i; 1368 cur_clone_root->ino = (u64)-1; 1369 cur_clone_root->offset = 0; 1370 cur_clone_root->found_refs = 0; 1371 } 1372 1373 backref_ctx->sctx = sctx; 1374 backref_ctx->found = 0; 1375 backref_ctx->cur_objectid = ino; 1376 backref_ctx->cur_offset = data_offset; 1377 backref_ctx->found_itself = 0; 1378 backref_ctx->extent_len = num_bytes; 1379 /* 1380 * For non-compressed extents iterate_extent_inodes() gives us extent 1381 * offsets that already take into account the data offset, but not for 1382 * compressed extents, since the offset is logical and not relative to 1383 * the physical extent locations. We must take this into account to 1384 * avoid sending clone offsets that go beyond the source file's size, 1385 * which would result in the clone ioctl failing with -EINVAL on the 1386 * receiving end. 1387 */ 1388 if (compressed == BTRFS_COMPRESS_NONE) 1389 backref_ctx->data_offset = 0; 1390 else 1391 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi); 1392 1393 /* 1394 * The last extent of a file may be too large due to page alignment. 1395 * We need to adjust extent_len in this case so that the checks in 1396 * __iterate_backrefs work. 1397 */ 1398 if (data_offset + num_bytes >= ino_size) 1399 backref_ctx->extent_len = ino_size - data_offset; 1400 1401 /* 1402 * Now collect all backrefs. 1403 */ 1404 if (compressed == BTRFS_COMPRESS_NONE) 1405 extent_item_pos = logical - found_key.objectid; 1406 else 1407 extent_item_pos = 0; 1408 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1409 extent_item_pos, 1, __iterate_backrefs, 1410 backref_ctx, false); 1411 1412 if (ret < 0) 1413 goto out; 1414 1415 if (!backref_ctx->found_itself) { 1416 /* found a bug in backref code? */ 1417 ret = -EIO; 1418 btrfs_err(fs_info, 1419 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu", 1420 ino, data_offset, disk_byte, found_key.objectid); 1421 goto out; 1422 } 1423 1424 btrfs_debug(fs_info, 1425 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu", 1426 data_offset, ino, num_bytes, logical); 1427 1428 if (!backref_ctx->found) 1429 btrfs_debug(fs_info, "no clones found"); 1430 1431 cur_clone_root = NULL; 1432 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1433 if (sctx->clone_roots[i].found_refs) { 1434 if (!cur_clone_root) 1435 cur_clone_root = sctx->clone_roots + i; 1436 else if (sctx->clone_roots[i].root == sctx->send_root) 1437 /* prefer clones from send_root over others */ 1438 cur_clone_root = sctx->clone_roots + i; 1439 } 1440 1441 } 1442 1443 if (cur_clone_root) { 1444 *found = cur_clone_root; 1445 ret = 0; 1446 } else { 1447 ret = -ENOENT; 1448 } 1449 1450 out: 1451 btrfs_free_path(tmp_path); 1452 kfree(backref_ctx); 1453 return ret; 1454 } 1455 1456 static int read_symlink(struct btrfs_root *root, 1457 u64 ino, 1458 struct fs_path *dest) 1459 { 1460 int ret; 1461 struct btrfs_path *path; 1462 struct btrfs_key key; 1463 struct btrfs_file_extent_item *ei; 1464 u8 type; 1465 u8 compression; 1466 unsigned long off; 1467 int len; 1468 1469 path = alloc_path_for_send(); 1470 if (!path) 1471 return -ENOMEM; 1472 1473 key.objectid = ino; 1474 key.type = BTRFS_EXTENT_DATA_KEY; 1475 key.offset = 0; 1476 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1477 if (ret < 0) 1478 goto out; 1479 if (ret) { 1480 /* 1481 * An empty symlink inode. Can happen in rare error paths when 1482 * creating a symlink (transaction committed before the inode 1483 * eviction handler removed the symlink inode items and a crash 1484 * happened in between or the subvol was snapshoted in between). 1485 * Print an informative message to dmesg/syslog so that the user 1486 * can delete the symlink. 1487 */ 1488 btrfs_err(root->fs_info, 1489 "Found empty symlink inode %llu at root %llu", 1490 ino, root->root_key.objectid); 1491 ret = -EIO; 1492 goto out; 1493 } 1494 1495 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 1496 struct btrfs_file_extent_item); 1497 type = btrfs_file_extent_type(path->nodes[0], ei); 1498 compression = btrfs_file_extent_compression(path->nodes[0], ei); 1499 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE); 1500 BUG_ON(compression); 1501 1502 off = btrfs_file_extent_inline_start(ei); 1503 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei); 1504 1505 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); 1506 1507 out: 1508 btrfs_free_path(path); 1509 return ret; 1510 } 1511 1512 /* 1513 * Helper function to generate a file name that is unique in the root of 1514 * send_root and parent_root. This is used to generate names for orphan inodes. 1515 */ 1516 static int gen_unique_name(struct send_ctx *sctx, 1517 u64 ino, u64 gen, 1518 struct fs_path *dest) 1519 { 1520 int ret = 0; 1521 struct btrfs_path *path; 1522 struct btrfs_dir_item *di; 1523 char tmp[64]; 1524 int len; 1525 u64 idx = 0; 1526 1527 path = alloc_path_for_send(); 1528 if (!path) 1529 return -ENOMEM; 1530 1531 while (1) { 1532 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", 1533 ino, gen, idx); 1534 ASSERT(len < sizeof(tmp)); 1535 1536 di = btrfs_lookup_dir_item(NULL, sctx->send_root, 1537 path, BTRFS_FIRST_FREE_OBJECTID, 1538 tmp, strlen(tmp), 0); 1539 btrfs_release_path(path); 1540 if (IS_ERR(di)) { 1541 ret = PTR_ERR(di); 1542 goto out; 1543 } 1544 if (di) { 1545 /* not unique, try again */ 1546 idx++; 1547 continue; 1548 } 1549 1550 if (!sctx->parent_root) { 1551 /* unique */ 1552 ret = 0; 1553 break; 1554 } 1555 1556 di = btrfs_lookup_dir_item(NULL, sctx->parent_root, 1557 path, BTRFS_FIRST_FREE_OBJECTID, 1558 tmp, strlen(tmp), 0); 1559 btrfs_release_path(path); 1560 if (IS_ERR(di)) { 1561 ret = PTR_ERR(di); 1562 goto out; 1563 } 1564 if (di) { 1565 /* not unique, try again */ 1566 idx++; 1567 continue; 1568 } 1569 /* unique */ 1570 break; 1571 } 1572 1573 ret = fs_path_add(dest, tmp, strlen(tmp)); 1574 1575 out: 1576 btrfs_free_path(path); 1577 return ret; 1578 } 1579 1580 enum inode_state { 1581 inode_state_no_change, 1582 inode_state_will_create, 1583 inode_state_did_create, 1584 inode_state_will_delete, 1585 inode_state_did_delete, 1586 }; 1587 1588 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) 1589 { 1590 int ret; 1591 int left_ret; 1592 int right_ret; 1593 u64 left_gen; 1594 u64 right_gen; 1595 1596 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, 1597 NULL, NULL); 1598 if (ret < 0 && ret != -ENOENT) 1599 goto out; 1600 left_ret = ret; 1601 1602 if (!sctx->parent_root) { 1603 right_ret = -ENOENT; 1604 } else { 1605 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, 1606 NULL, NULL, NULL, NULL); 1607 if (ret < 0 && ret != -ENOENT) 1608 goto out; 1609 right_ret = ret; 1610 } 1611 1612 if (!left_ret && !right_ret) { 1613 if (left_gen == gen && right_gen == gen) { 1614 ret = inode_state_no_change; 1615 } else if (left_gen == gen) { 1616 if (ino < sctx->send_progress) 1617 ret = inode_state_did_create; 1618 else 1619 ret = inode_state_will_create; 1620 } else if (right_gen == gen) { 1621 if (ino < sctx->send_progress) 1622 ret = inode_state_did_delete; 1623 else 1624 ret = inode_state_will_delete; 1625 } else { 1626 ret = -ENOENT; 1627 } 1628 } else if (!left_ret) { 1629 if (left_gen == gen) { 1630 if (ino < sctx->send_progress) 1631 ret = inode_state_did_create; 1632 else 1633 ret = inode_state_will_create; 1634 } else { 1635 ret = -ENOENT; 1636 } 1637 } else if (!right_ret) { 1638 if (right_gen == gen) { 1639 if (ino < sctx->send_progress) 1640 ret = inode_state_did_delete; 1641 else 1642 ret = inode_state_will_delete; 1643 } else { 1644 ret = -ENOENT; 1645 } 1646 } else { 1647 ret = -ENOENT; 1648 } 1649 1650 out: 1651 return ret; 1652 } 1653 1654 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) 1655 { 1656 int ret; 1657 1658 if (ino == BTRFS_FIRST_FREE_OBJECTID) 1659 return 1; 1660 1661 ret = get_cur_inode_state(sctx, ino, gen); 1662 if (ret < 0) 1663 goto out; 1664 1665 if (ret == inode_state_no_change || 1666 ret == inode_state_did_create || 1667 ret == inode_state_will_delete) 1668 ret = 1; 1669 else 1670 ret = 0; 1671 1672 out: 1673 return ret; 1674 } 1675 1676 /* 1677 * Helper function to lookup a dir item in a dir. 1678 */ 1679 static int lookup_dir_item_inode(struct btrfs_root *root, 1680 u64 dir, const char *name, int name_len, 1681 u64 *found_inode, 1682 u8 *found_type) 1683 { 1684 int ret = 0; 1685 struct btrfs_dir_item *di; 1686 struct btrfs_key key; 1687 struct btrfs_path *path; 1688 1689 path = alloc_path_for_send(); 1690 if (!path) 1691 return -ENOMEM; 1692 1693 di = btrfs_lookup_dir_item(NULL, root, path, 1694 dir, name, name_len, 0); 1695 if (!di) { 1696 ret = -ENOENT; 1697 goto out; 1698 } 1699 if (IS_ERR(di)) { 1700 ret = PTR_ERR(di); 1701 goto out; 1702 } 1703 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 1704 if (key.type == BTRFS_ROOT_ITEM_KEY) { 1705 ret = -ENOENT; 1706 goto out; 1707 } 1708 *found_inode = key.objectid; 1709 *found_type = btrfs_dir_type(path->nodes[0], di); 1710 1711 out: 1712 btrfs_free_path(path); 1713 return ret; 1714 } 1715 1716 /* 1717 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir, 1718 * generation of the parent dir and the name of the dir entry. 1719 */ 1720 static int get_first_ref(struct btrfs_root *root, u64 ino, 1721 u64 *dir, u64 *dir_gen, struct fs_path *name) 1722 { 1723 int ret; 1724 struct btrfs_key key; 1725 struct btrfs_key found_key; 1726 struct btrfs_path *path; 1727 int len; 1728 u64 parent_dir; 1729 1730 path = alloc_path_for_send(); 1731 if (!path) 1732 return -ENOMEM; 1733 1734 key.objectid = ino; 1735 key.type = BTRFS_INODE_REF_KEY; 1736 key.offset = 0; 1737 1738 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); 1739 if (ret < 0) 1740 goto out; 1741 if (!ret) 1742 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1743 path->slots[0]); 1744 if (ret || found_key.objectid != ino || 1745 (found_key.type != BTRFS_INODE_REF_KEY && 1746 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1747 ret = -ENOENT; 1748 goto out; 1749 } 1750 1751 if (found_key.type == BTRFS_INODE_REF_KEY) { 1752 struct btrfs_inode_ref *iref; 1753 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1754 struct btrfs_inode_ref); 1755 len = btrfs_inode_ref_name_len(path->nodes[0], iref); 1756 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1757 (unsigned long)(iref + 1), 1758 len); 1759 parent_dir = found_key.offset; 1760 } else { 1761 struct btrfs_inode_extref *extref; 1762 extref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1763 struct btrfs_inode_extref); 1764 len = btrfs_inode_extref_name_len(path->nodes[0], extref); 1765 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1766 (unsigned long)&extref->name, len); 1767 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref); 1768 } 1769 if (ret < 0) 1770 goto out; 1771 btrfs_release_path(path); 1772 1773 if (dir_gen) { 1774 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, 1775 NULL, NULL, NULL); 1776 if (ret < 0) 1777 goto out; 1778 } 1779 1780 *dir = parent_dir; 1781 1782 out: 1783 btrfs_free_path(path); 1784 return ret; 1785 } 1786 1787 static int is_first_ref(struct btrfs_root *root, 1788 u64 ino, u64 dir, 1789 const char *name, int name_len) 1790 { 1791 int ret; 1792 struct fs_path *tmp_name; 1793 u64 tmp_dir; 1794 1795 tmp_name = fs_path_alloc(); 1796 if (!tmp_name) 1797 return -ENOMEM; 1798 1799 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name); 1800 if (ret < 0) 1801 goto out; 1802 1803 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) { 1804 ret = 0; 1805 goto out; 1806 } 1807 1808 ret = !memcmp(tmp_name->start, name, name_len); 1809 1810 out: 1811 fs_path_free(tmp_name); 1812 return ret; 1813 } 1814 1815 /* 1816 * Used by process_recorded_refs to determine if a new ref would overwrite an 1817 * already existing ref. In case it detects an overwrite, it returns the 1818 * inode/gen in who_ino/who_gen. 1819 * When an overwrite is detected, process_recorded_refs does proper orphanizing 1820 * to make sure later references to the overwritten inode are possible. 1821 * Orphanizing is however only required for the first ref of an inode. 1822 * process_recorded_refs does an additional is_first_ref check to see if 1823 * orphanizing is really required. 1824 */ 1825 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, 1826 const char *name, int name_len, 1827 u64 *who_ino, u64 *who_gen, u64 *who_mode) 1828 { 1829 int ret = 0; 1830 u64 gen; 1831 u64 other_inode = 0; 1832 u8 other_type = 0; 1833 1834 if (!sctx->parent_root) 1835 goto out; 1836 1837 ret = is_inode_existent(sctx, dir, dir_gen); 1838 if (ret <= 0) 1839 goto out; 1840 1841 /* 1842 * If we have a parent root we need to verify that the parent dir was 1843 * not deleted and then re-created, if it was then we have no overwrite 1844 * and we can just unlink this entry. 1845 */ 1846 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) { 1847 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, 1848 NULL, NULL, NULL); 1849 if (ret < 0 && ret != -ENOENT) 1850 goto out; 1851 if (ret) { 1852 ret = 0; 1853 goto out; 1854 } 1855 if (gen != dir_gen) 1856 goto out; 1857 } 1858 1859 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, 1860 &other_inode, &other_type); 1861 if (ret < 0 && ret != -ENOENT) 1862 goto out; 1863 if (ret) { 1864 ret = 0; 1865 goto out; 1866 } 1867 1868 /* 1869 * Check if the overwritten ref was already processed. If yes, the ref 1870 * was already unlinked/moved, so we can safely assume that we will not 1871 * overwrite anything at this point in time. 1872 */ 1873 if (other_inode > sctx->send_progress || 1874 is_waiting_for_move(sctx, other_inode)) { 1875 ret = get_inode_info(sctx->parent_root, other_inode, NULL, 1876 who_gen, who_mode, NULL, NULL, NULL); 1877 if (ret < 0) 1878 goto out; 1879 1880 ret = 1; 1881 *who_ino = other_inode; 1882 } else { 1883 ret = 0; 1884 } 1885 1886 out: 1887 return ret; 1888 } 1889 1890 /* 1891 * Checks if the ref was overwritten by an already processed inode. This is 1892 * used by __get_cur_name_and_parent to find out if the ref was orphanized and 1893 * thus the orphan name needs be used. 1894 * process_recorded_refs also uses it to avoid unlinking of refs that were 1895 * overwritten. 1896 */ 1897 static int did_overwrite_ref(struct send_ctx *sctx, 1898 u64 dir, u64 dir_gen, 1899 u64 ino, u64 ino_gen, 1900 const char *name, int name_len) 1901 { 1902 int ret = 0; 1903 u64 gen; 1904 u64 ow_inode; 1905 u8 other_type; 1906 1907 if (!sctx->parent_root) 1908 goto out; 1909 1910 ret = is_inode_existent(sctx, dir, dir_gen); 1911 if (ret <= 0) 1912 goto out; 1913 1914 if (dir != BTRFS_FIRST_FREE_OBJECTID) { 1915 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, 1916 NULL, NULL, NULL); 1917 if (ret < 0 && ret != -ENOENT) 1918 goto out; 1919 if (ret) { 1920 ret = 0; 1921 goto out; 1922 } 1923 if (gen != dir_gen) 1924 goto out; 1925 } 1926 1927 /* check if the ref was overwritten by another ref */ 1928 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, 1929 &ow_inode, &other_type); 1930 if (ret < 0 && ret != -ENOENT) 1931 goto out; 1932 if (ret) { 1933 /* was never and will never be overwritten */ 1934 ret = 0; 1935 goto out; 1936 } 1937 1938 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, 1939 NULL, NULL); 1940 if (ret < 0) 1941 goto out; 1942 1943 if (ow_inode == ino && gen == ino_gen) { 1944 ret = 0; 1945 goto out; 1946 } 1947 1948 /* 1949 * We know that it is or will be overwritten. Check this now. 1950 * The current inode being processed might have been the one that caused 1951 * inode 'ino' to be orphanized, therefore check if ow_inode matches 1952 * the current inode being processed. 1953 */ 1954 if ((ow_inode < sctx->send_progress) || 1955 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino && 1956 gen == sctx->cur_inode_gen)) 1957 ret = 1; 1958 else 1959 ret = 0; 1960 1961 out: 1962 return ret; 1963 } 1964 1965 /* 1966 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode 1967 * that got overwritten. This is used by process_recorded_refs to determine 1968 * if it has to use the path as returned by get_cur_path or the orphan name. 1969 */ 1970 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) 1971 { 1972 int ret = 0; 1973 struct fs_path *name = NULL; 1974 u64 dir; 1975 u64 dir_gen; 1976 1977 if (!sctx->parent_root) 1978 goto out; 1979 1980 name = fs_path_alloc(); 1981 if (!name) 1982 return -ENOMEM; 1983 1984 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name); 1985 if (ret < 0) 1986 goto out; 1987 1988 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, 1989 name->start, fs_path_len(name)); 1990 1991 out: 1992 fs_path_free(name); 1993 return ret; 1994 } 1995 1996 /* 1997 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit, 1998 * so we need to do some special handling in case we have clashes. This function 1999 * takes care of this with the help of name_cache_entry::radix_list. 2000 * In case of error, nce is kfreed. 2001 */ 2002 static int name_cache_insert(struct send_ctx *sctx, 2003 struct name_cache_entry *nce) 2004 { 2005 int ret = 0; 2006 struct list_head *nce_head; 2007 2008 nce_head = radix_tree_lookup(&sctx->name_cache, 2009 (unsigned long)nce->ino); 2010 if (!nce_head) { 2011 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL); 2012 if (!nce_head) { 2013 kfree(nce); 2014 return -ENOMEM; 2015 } 2016 INIT_LIST_HEAD(nce_head); 2017 2018 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); 2019 if (ret < 0) { 2020 kfree(nce_head); 2021 kfree(nce); 2022 return ret; 2023 } 2024 } 2025 list_add_tail(&nce->radix_list, nce_head); 2026 list_add_tail(&nce->list, &sctx->name_cache_list); 2027 sctx->name_cache_size++; 2028 2029 return ret; 2030 } 2031 2032 static void name_cache_delete(struct send_ctx *sctx, 2033 struct name_cache_entry *nce) 2034 { 2035 struct list_head *nce_head; 2036 2037 nce_head = radix_tree_lookup(&sctx->name_cache, 2038 (unsigned long)nce->ino); 2039 if (!nce_head) { 2040 btrfs_err(sctx->send_root->fs_info, 2041 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory", 2042 nce->ino, sctx->name_cache_size); 2043 } 2044 2045 list_del(&nce->radix_list); 2046 list_del(&nce->list); 2047 sctx->name_cache_size--; 2048 2049 /* 2050 * We may not get to the final release of nce_head if the lookup fails 2051 */ 2052 if (nce_head && list_empty(nce_head)) { 2053 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); 2054 kfree(nce_head); 2055 } 2056 } 2057 2058 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, 2059 u64 ino, u64 gen) 2060 { 2061 struct list_head *nce_head; 2062 struct name_cache_entry *cur; 2063 2064 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino); 2065 if (!nce_head) 2066 return NULL; 2067 2068 list_for_each_entry(cur, nce_head, radix_list) { 2069 if (cur->ino == ino && cur->gen == gen) 2070 return cur; 2071 } 2072 return NULL; 2073 } 2074 2075 /* 2076 * Removes the entry from the list and adds it back to the end. This marks the 2077 * entry as recently used so that name_cache_clean_unused does not remove it. 2078 */ 2079 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce) 2080 { 2081 list_del(&nce->list); 2082 list_add_tail(&nce->list, &sctx->name_cache_list); 2083 } 2084 2085 /* 2086 * Remove some entries from the beginning of name_cache_list. 2087 */ 2088 static void name_cache_clean_unused(struct send_ctx *sctx) 2089 { 2090 struct name_cache_entry *nce; 2091 2092 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE) 2093 return; 2094 2095 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) { 2096 nce = list_entry(sctx->name_cache_list.next, 2097 struct name_cache_entry, list); 2098 name_cache_delete(sctx, nce); 2099 kfree(nce); 2100 } 2101 } 2102 2103 static void name_cache_free(struct send_ctx *sctx) 2104 { 2105 struct name_cache_entry *nce; 2106 2107 while (!list_empty(&sctx->name_cache_list)) { 2108 nce = list_entry(sctx->name_cache_list.next, 2109 struct name_cache_entry, list); 2110 name_cache_delete(sctx, nce); 2111 kfree(nce); 2112 } 2113 } 2114 2115 /* 2116 * Used by get_cur_path for each ref up to the root. 2117 * Returns 0 if it succeeded. 2118 * Returns 1 if the inode is not existent or got overwritten. In that case, the 2119 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1 2120 * is returned, parent_ino/parent_gen are not guaranteed to be valid. 2121 * Returns <0 in case of error. 2122 */ 2123 static int __get_cur_name_and_parent(struct send_ctx *sctx, 2124 u64 ino, u64 gen, 2125 u64 *parent_ino, 2126 u64 *parent_gen, 2127 struct fs_path *dest) 2128 { 2129 int ret; 2130 int nce_ret; 2131 struct name_cache_entry *nce = NULL; 2132 2133 /* 2134 * First check if we already did a call to this function with the same 2135 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes 2136 * return the cached result. 2137 */ 2138 nce = name_cache_search(sctx, ino, gen); 2139 if (nce) { 2140 if (ino < sctx->send_progress && nce->need_later_update) { 2141 name_cache_delete(sctx, nce); 2142 kfree(nce); 2143 nce = NULL; 2144 } else { 2145 name_cache_used(sctx, nce); 2146 *parent_ino = nce->parent_ino; 2147 *parent_gen = nce->parent_gen; 2148 ret = fs_path_add(dest, nce->name, nce->name_len); 2149 if (ret < 0) 2150 goto out; 2151 ret = nce->ret; 2152 goto out; 2153 } 2154 } 2155 2156 /* 2157 * If the inode is not existent yet, add the orphan name and return 1. 2158 * This should only happen for the parent dir that we determine in 2159 * __record_new_ref 2160 */ 2161 ret = is_inode_existent(sctx, ino, gen); 2162 if (ret < 0) 2163 goto out; 2164 2165 if (!ret) { 2166 ret = gen_unique_name(sctx, ino, gen, dest); 2167 if (ret < 0) 2168 goto out; 2169 ret = 1; 2170 goto out_cache; 2171 } 2172 2173 /* 2174 * Depending on whether the inode was already processed or not, use 2175 * send_root or parent_root for ref lookup. 2176 */ 2177 if (ino < sctx->send_progress) 2178 ret = get_first_ref(sctx->send_root, ino, 2179 parent_ino, parent_gen, dest); 2180 else 2181 ret = get_first_ref(sctx->parent_root, ino, 2182 parent_ino, parent_gen, dest); 2183 if (ret < 0) 2184 goto out; 2185 2186 /* 2187 * Check if the ref was overwritten by an inode's ref that was processed 2188 * earlier. If yes, treat as orphan and return 1. 2189 */ 2190 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, 2191 dest->start, dest->end - dest->start); 2192 if (ret < 0) 2193 goto out; 2194 if (ret) { 2195 fs_path_reset(dest); 2196 ret = gen_unique_name(sctx, ino, gen, dest); 2197 if (ret < 0) 2198 goto out; 2199 ret = 1; 2200 } 2201 2202 out_cache: 2203 /* 2204 * Store the result of the lookup in the name cache. 2205 */ 2206 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL); 2207 if (!nce) { 2208 ret = -ENOMEM; 2209 goto out; 2210 } 2211 2212 nce->ino = ino; 2213 nce->gen = gen; 2214 nce->parent_ino = *parent_ino; 2215 nce->parent_gen = *parent_gen; 2216 nce->name_len = fs_path_len(dest); 2217 nce->ret = ret; 2218 strcpy(nce->name, dest->start); 2219 2220 if (ino < sctx->send_progress) 2221 nce->need_later_update = 0; 2222 else 2223 nce->need_later_update = 1; 2224 2225 nce_ret = name_cache_insert(sctx, nce); 2226 if (nce_ret < 0) 2227 ret = nce_ret; 2228 name_cache_clean_unused(sctx); 2229 2230 out: 2231 return ret; 2232 } 2233 2234 /* 2235 * Magic happens here. This function returns the first ref to an inode as it 2236 * would look like while receiving the stream at this point in time. 2237 * We walk the path up to the root. For every inode in between, we check if it 2238 * was already processed/sent. If yes, we continue with the parent as found 2239 * in send_root. If not, we continue with the parent as found in parent_root. 2240 * If we encounter an inode that was deleted at this point in time, we use the 2241 * inodes "orphan" name instead of the real name and stop. Same with new inodes 2242 * that were not created yet and overwritten inodes/refs. 2243 * 2244 * When do we have have orphan inodes: 2245 * 1. When an inode is freshly created and thus no valid refs are available yet 2246 * 2. When a directory lost all it's refs (deleted) but still has dir items 2247 * inside which were not processed yet (pending for move/delete). If anyone 2248 * tried to get the path to the dir items, it would get a path inside that 2249 * orphan directory. 2250 * 3. When an inode is moved around or gets new links, it may overwrite the ref 2251 * of an unprocessed inode. If in that case the first ref would be 2252 * overwritten, the overwritten inode gets "orphanized". Later when we 2253 * process this overwritten inode, it is restored at a new place by moving 2254 * the orphan inode. 2255 * 2256 * sctx->send_progress tells this function at which point in time receiving 2257 * would be. 2258 */ 2259 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, 2260 struct fs_path *dest) 2261 { 2262 int ret = 0; 2263 struct fs_path *name = NULL; 2264 u64 parent_inode = 0; 2265 u64 parent_gen = 0; 2266 int stop = 0; 2267 2268 name = fs_path_alloc(); 2269 if (!name) { 2270 ret = -ENOMEM; 2271 goto out; 2272 } 2273 2274 dest->reversed = 1; 2275 fs_path_reset(dest); 2276 2277 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { 2278 struct waiting_dir_move *wdm; 2279 2280 fs_path_reset(name); 2281 2282 if (is_waiting_for_rm(sctx, ino)) { 2283 ret = gen_unique_name(sctx, ino, gen, name); 2284 if (ret < 0) 2285 goto out; 2286 ret = fs_path_add_path(dest, name); 2287 break; 2288 } 2289 2290 wdm = get_waiting_dir_move(sctx, ino); 2291 if (wdm && wdm->orphanized) { 2292 ret = gen_unique_name(sctx, ino, gen, name); 2293 stop = 1; 2294 } else if (wdm) { 2295 ret = get_first_ref(sctx->parent_root, ino, 2296 &parent_inode, &parent_gen, name); 2297 } else { 2298 ret = __get_cur_name_and_parent(sctx, ino, gen, 2299 &parent_inode, 2300 &parent_gen, name); 2301 if (ret) 2302 stop = 1; 2303 } 2304 2305 if (ret < 0) 2306 goto out; 2307 2308 ret = fs_path_add_path(dest, name); 2309 if (ret < 0) 2310 goto out; 2311 2312 ino = parent_inode; 2313 gen = parent_gen; 2314 } 2315 2316 out: 2317 fs_path_free(name); 2318 if (!ret) 2319 fs_path_unreverse(dest); 2320 return ret; 2321 } 2322 2323 /* 2324 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace 2325 */ 2326 static int send_subvol_begin(struct send_ctx *sctx) 2327 { 2328 int ret; 2329 struct btrfs_root *send_root = sctx->send_root; 2330 struct btrfs_root *parent_root = sctx->parent_root; 2331 struct btrfs_path *path; 2332 struct btrfs_key key; 2333 struct btrfs_root_ref *ref; 2334 struct extent_buffer *leaf; 2335 char *name = NULL; 2336 int namelen; 2337 2338 path = btrfs_alloc_path(); 2339 if (!path) 2340 return -ENOMEM; 2341 2342 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL); 2343 if (!name) { 2344 btrfs_free_path(path); 2345 return -ENOMEM; 2346 } 2347 2348 key.objectid = send_root->objectid; 2349 key.type = BTRFS_ROOT_BACKREF_KEY; 2350 key.offset = 0; 2351 2352 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root, 2353 &key, path, 1, 0); 2354 if (ret < 0) 2355 goto out; 2356 if (ret) { 2357 ret = -ENOENT; 2358 goto out; 2359 } 2360 2361 leaf = path->nodes[0]; 2362 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2363 if (key.type != BTRFS_ROOT_BACKREF_KEY || 2364 key.objectid != send_root->objectid) { 2365 ret = -ENOENT; 2366 goto out; 2367 } 2368 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 2369 namelen = btrfs_root_ref_name_len(leaf, ref); 2370 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); 2371 btrfs_release_path(path); 2372 2373 if (parent_root) { 2374 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); 2375 if (ret < 0) 2376 goto out; 2377 } else { 2378 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); 2379 if (ret < 0) 2380 goto out; 2381 } 2382 2383 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); 2384 2385 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid)) 2386 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2387 sctx->send_root->root_item.received_uuid); 2388 else 2389 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2390 sctx->send_root->root_item.uuid); 2391 2392 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, 2393 le64_to_cpu(sctx->send_root->root_item.ctransid)); 2394 if (parent_root) { 2395 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid)) 2396 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2397 parent_root->root_item.received_uuid); 2398 else 2399 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2400 parent_root->root_item.uuid); 2401 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 2402 le64_to_cpu(sctx->parent_root->root_item.ctransid)); 2403 } 2404 2405 ret = send_cmd(sctx); 2406 2407 tlv_put_failure: 2408 out: 2409 btrfs_free_path(path); 2410 kfree(name); 2411 return ret; 2412 } 2413 2414 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) 2415 { 2416 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2417 int ret = 0; 2418 struct fs_path *p; 2419 2420 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size); 2421 2422 p = fs_path_alloc(); 2423 if (!p) 2424 return -ENOMEM; 2425 2426 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); 2427 if (ret < 0) 2428 goto out; 2429 2430 ret = get_cur_path(sctx, ino, gen, p); 2431 if (ret < 0) 2432 goto out; 2433 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2434 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); 2435 2436 ret = send_cmd(sctx); 2437 2438 tlv_put_failure: 2439 out: 2440 fs_path_free(p); 2441 return ret; 2442 } 2443 2444 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) 2445 { 2446 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2447 int ret = 0; 2448 struct fs_path *p; 2449 2450 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode); 2451 2452 p = fs_path_alloc(); 2453 if (!p) 2454 return -ENOMEM; 2455 2456 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); 2457 if (ret < 0) 2458 goto out; 2459 2460 ret = get_cur_path(sctx, ino, gen, p); 2461 if (ret < 0) 2462 goto out; 2463 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2464 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); 2465 2466 ret = send_cmd(sctx); 2467 2468 tlv_put_failure: 2469 out: 2470 fs_path_free(p); 2471 return ret; 2472 } 2473 2474 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) 2475 { 2476 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2477 int ret = 0; 2478 struct fs_path *p; 2479 2480 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu", 2481 ino, uid, gid); 2482 2483 p = fs_path_alloc(); 2484 if (!p) 2485 return -ENOMEM; 2486 2487 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); 2488 if (ret < 0) 2489 goto out; 2490 2491 ret = get_cur_path(sctx, ino, gen, p); 2492 if (ret < 0) 2493 goto out; 2494 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2495 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); 2496 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); 2497 2498 ret = send_cmd(sctx); 2499 2500 tlv_put_failure: 2501 out: 2502 fs_path_free(p); 2503 return ret; 2504 } 2505 2506 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) 2507 { 2508 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2509 int ret = 0; 2510 struct fs_path *p = NULL; 2511 struct btrfs_inode_item *ii; 2512 struct btrfs_path *path = NULL; 2513 struct extent_buffer *eb; 2514 struct btrfs_key key; 2515 int slot; 2516 2517 btrfs_debug(fs_info, "send_utimes %llu", ino); 2518 2519 p = fs_path_alloc(); 2520 if (!p) 2521 return -ENOMEM; 2522 2523 path = alloc_path_for_send(); 2524 if (!path) { 2525 ret = -ENOMEM; 2526 goto out; 2527 } 2528 2529 key.objectid = ino; 2530 key.type = BTRFS_INODE_ITEM_KEY; 2531 key.offset = 0; 2532 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2533 if (ret > 0) 2534 ret = -ENOENT; 2535 if (ret < 0) 2536 goto out; 2537 2538 eb = path->nodes[0]; 2539 slot = path->slots[0]; 2540 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 2541 2542 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); 2543 if (ret < 0) 2544 goto out; 2545 2546 ret = get_cur_path(sctx, ino, gen, p); 2547 if (ret < 0) 2548 goto out; 2549 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2550 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); 2551 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); 2552 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); 2553 /* TODO Add otime support when the otime patches get into upstream */ 2554 2555 ret = send_cmd(sctx); 2556 2557 tlv_put_failure: 2558 out: 2559 fs_path_free(p); 2560 btrfs_free_path(path); 2561 return ret; 2562 } 2563 2564 /* 2565 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have 2566 * a valid path yet because we did not process the refs yet. So, the inode 2567 * is created as orphan. 2568 */ 2569 static int send_create_inode(struct send_ctx *sctx, u64 ino) 2570 { 2571 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2572 int ret = 0; 2573 struct fs_path *p; 2574 int cmd; 2575 u64 gen; 2576 u64 mode; 2577 u64 rdev; 2578 2579 btrfs_debug(fs_info, "send_create_inode %llu", ino); 2580 2581 p = fs_path_alloc(); 2582 if (!p) 2583 return -ENOMEM; 2584 2585 if (ino != sctx->cur_ino) { 2586 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, 2587 NULL, NULL, &rdev); 2588 if (ret < 0) 2589 goto out; 2590 } else { 2591 gen = sctx->cur_inode_gen; 2592 mode = sctx->cur_inode_mode; 2593 rdev = sctx->cur_inode_rdev; 2594 } 2595 2596 if (S_ISREG(mode)) { 2597 cmd = BTRFS_SEND_C_MKFILE; 2598 } else if (S_ISDIR(mode)) { 2599 cmd = BTRFS_SEND_C_MKDIR; 2600 } else if (S_ISLNK(mode)) { 2601 cmd = BTRFS_SEND_C_SYMLINK; 2602 } else if (S_ISCHR(mode) || S_ISBLK(mode)) { 2603 cmd = BTRFS_SEND_C_MKNOD; 2604 } else if (S_ISFIFO(mode)) { 2605 cmd = BTRFS_SEND_C_MKFIFO; 2606 } else if (S_ISSOCK(mode)) { 2607 cmd = BTRFS_SEND_C_MKSOCK; 2608 } else { 2609 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o", 2610 (int)(mode & S_IFMT)); 2611 ret = -EOPNOTSUPP; 2612 goto out; 2613 } 2614 2615 ret = begin_cmd(sctx, cmd); 2616 if (ret < 0) 2617 goto out; 2618 2619 ret = gen_unique_name(sctx, ino, gen, p); 2620 if (ret < 0) 2621 goto out; 2622 2623 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2624 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); 2625 2626 if (S_ISLNK(mode)) { 2627 fs_path_reset(p); 2628 ret = read_symlink(sctx->send_root, ino, p); 2629 if (ret < 0) 2630 goto out; 2631 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); 2632 } else if (S_ISCHR(mode) || S_ISBLK(mode) || 2633 S_ISFIFO(mode) || S_ISSOCK(mode)) { 2634 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev)); 2635 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode); 2636 } 2637 2638 ret = send_cmd(sctx); 2639 if (ret < 0) 2640 goto out; 2641 2642 2643 tlv_put_failure: 2644 out: 2645 fs_path_free(p); 2646 return ret; 2647 } 2648 2649 /* 2650 * We need some special handling for inodes that get processed before the parent 2651 * directory got created. See process_recorded_refs for details. 2652 * This function does the check if we already created the dir out of order. 2653 */ 2654 static int did_create_dir(struct send_ctx *sctx, u64 dir) 2655 { 2656 int ret = 0; 2657 struct btrfs_path *path = NULL; 2658 struct btrfs_key key; 2659 struct btrfs_key found_key; 2660 struct btrfs_key di_key; 2661 struct extent_buffer *eb; 2662 struct btrfs_dir_item *di; 2663 int slot; 2664 2665 path = alloc_path_for_send(); 2666 if (!path) { 2667 ret = -ENOMEM; 2668 goto out; 2669 } 2670 2671 key.objectid = dir; 2672 key.type = BTRFS_DIR_INDEX_KEY; 2673 key.offset = 0; 2674 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2675 if (ret < 0) 2676 goto out; 2677 2678 while (1) { 2679 eb = path->nodes[0]; 2680 slot = path->slots[0]; 2681 if (slot >= btrfs_header_nritems(eb)) { 2682 ret = btrfs_next_leaf(sctx->send_root, path); 2683 if (ret < 0) { 2684 goto out; 2685 } else if (ret > 0) { 2686 ret = 0; 2687 break; 2688 } 2689 continue; 2690 } 2691 2692 btrfs_item_key_to_cpu(eb, &found_key, slot); 2693 if (found_key.objectid != key.objectid || 2694 found_key.type != key.type) { 2695 ret = 0; 2696 goto out; 2697 } 2698 2699 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 2700 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2701 2702 if (di_key.type != BTRFS_ROOT_ITEM_KEY && 2703 di_key.objectid < sctx->send_progress) { 2704 ret = 1; 2705 goto out; 2706 } 2707 2708 path->slots[0]++; 2709 } 2710 2711 out: 2712 btrfs_free_path(path); 2713 return ret; 2714 } 2715 2716 /* 2717 * Only creates the inode if it is: 2718 * 1. Not a directory 2719 * 2. Or a directory which was not created already due to out of order 2720 * directories. See did_create_dir and process_recorded_refs for details. 2721 */ 2722 static int send_create_inode_if_needed(struct send_ctx *sctx) 2723 { 2724 int ret; 2725 2726 if (S_ISDIR(sctx->cur_inode_mode)) { 2727 ret = did_create_dir(sctx, sctx->cur_ino); 2728 if (ret < 0) 2729 goto out; 2730 if (ret) { 2731 ret = 0; 2732 goto out; 2733 } 2734 } 2735 2736 ret = send_create_inode(sctx, sctx->cur_ino); 2737 if (ret < 0) 2738 goto out; 2739 2740 out: 2741 return ret; 2742 } 2743 2744 struct recorded_ref { 2745 struct list_head list; 2746 char *name; 2747 struct fs_path *full_path; 2748 u64 dir; 2749 u64 dir_gen; 2750 int name_len; 2751 }; 2752 2753 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path) 2754 { 2755 ref->full_path = path; 2756 ref->name = (char *)kbasename(ref->full_path->start); 2757 ref->name_len = ref->full_path->end - ref->name; 2758 } 2759 2760 /* 2761 * We need to process new refs before deleted refs, but compare_tree gives us 2762 * everything mixed. So we first record all refs and later process them. 2763 * This function is a helper to record one ref. 2764 */ 2765 static int __record_ref(struct list_head *head, u64 dir, 2766 u64 dir_gen, struct fs_path *path) 2767 { 2768 struct recorded_ref *ref; 2769 2770 ref = kmalloc(sizeof(*ref), GFP_KERNEL); 2771 if (!ref) 2772 return -ENOMEM; 2773 2774 ref->dir = dir; 2775 ref->dir_gen = dir_gen; 2776 set_ref_path(ref, path); 2777 list_add_tail(&ref->list, head); 2778 return 0; 2779 } 2780 2781 static int dup_ref(struct recorded_ref *ref, struct list_head *list) 2782 { 2783 struct recorded_ref *new; 2784 2785 new = kmalloc(sizeof(*ref), GFP_KERNEL); 2786 if (!new) 2787 return -ENOMEM; 2788 2789 new->dir = ref->dir; 2790 new->dir_gen = ref->dir_gen; 2791 new->full_path = NULL; 2792 INIT_LIST_HEAD(&new->list); 2793 list_add_tail(&new->list, list); 2794 return 0; 2795 } 2796 2797 static void __free_recorded_refs(struct list_head *head) 2798 { 2799 struct recorded_ref *cur; 2800 2801 while (!list_empty(head)) { 2802 cur = list_entry(head->next, struct recorded_ref, list); 2803 fs_path_free(cur->full_path); 2804 list_del(&cur->list); 2805 kfree(cur); 2806 } 2807 } 2808 2809 static void free_recorded_refs(struct send_ctx *sctx) 2810 { 2811 __free_recorded_refs(&sctx->new_refs); 2812 __free_recorded_refs(&sctx->deleted_refs); 2813 } 2814 2815 /* 2816 * Renames/moves a file/dir to its orphan name. Used when the first 2817 * ref of an unprocessed inode gets overwritten and for all non empty 2818 * directories. 2819 */ 2820 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, 2821 struct fs_path *path) 2822 { 2823 int ret; 2824 struct fs_path *orphan; 2825 2826 orphan = fs_path_alloc(); 2827 if (!orphan) 2828 return -ENOMEM; 2829 2830 ret = gen_unique_name(sctx, ino, gen, orphan); 2831 if (ret < 0) 2832 goto out; 2833 2834 ret = send_rename(sctx, path, orphan); 2835 2836 out: 2837 fs_path_free(orphan); 2838 return ret; 2839 } 2840 2841 static struct orphan_dir_info * 2842 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) 2843 { 2844 struct rb_node **p = &sctx->orphan_dirs.rb_node; 2845 struct rb_node *parent = NULL; 2846 struct orphan_dir_info *entry, *odi; 2847 2848 while (*p) { 2849 parent = *p; 2850 entry = rb_entry(parent, struct orphan_dir_info, node); 2851 if (dir_ino < entry->ino) { 2852 p = &(*p)->rb_left; 2853 } else if (dir_ino > entry->ino) { 2854 p = &(*p)->rb_right; 2855 } else { 2856 return entry; 2857 } 2858 } 2859 2860 odi = kmalloc(sizeof(*odi), GFP_KERNEL); 2861 if (!odi) 2862 return ERR_PTR(-ENOMEM); 2863 odi->ino = dir_ino; 2864 odi->gen = 0; 2865 odi->last_dir_index_offset = 0; 2866 2867 rb_link_node(&odi->node, parent, p); 2868 rb_insert_color(&odi->node, &sctx->orphan_dirs); 2869 return odi; 2870 } 2871 2872 static struct orphan_dir_info * 2873 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) 2874 { 2875 struct rb_node *n = sctx->orphan_dirs.rb_node; 2876 struct orphan_dir_info *entry; 2877 2878 while (n) { 2879 entry = rb_entry(n, struct orphan_dir_info, node); 2880 if (dir_ino < entry->ino) 2881 n = n->rb_left; 2882 else if (dir_ino > entry->ino) 2883 n = n->rb_right; 2884 else 2885 return entry; 2886 } 2887 return NULL; 2888 } 2889 2890 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino) 2891 { 2892 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino); 2893 2894 return odi != NULL; 2895 } 2896 2897 static void free_orphan_dir_info(struct send_ctx *sctx, 2898 struct orphan_dir_info *odi) 2899 { 2900 if (!odi) 2901 return; 2902 rb_erase(&odi->node, &sctx->orphan_dirs); 2903 kfree(odi); 2904 } 2905 2906 /* 2907 * Returns 1 if a directory can be removed at this point in time. 2908 * We check this by iterating all dir items and checking if the inode behind 2909 * the dir item was already processed. 2910 */ 2911 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, 2912 u64 send_progress) 2913 { 2914 int ret = 0; 2915 struct btrfs_root *root = sctx->parent_root; 2916 struct btrfs_path *path; 2917 struct btrfs_key key; 2918 struct btrfs_key found_key; 2919 struct btrfs_key loc; 2920 struct btrfs_dir_item *di; 2921 struct orphan_dir_info *odi = NULL; 2922 2923 /* 2924 * Don't try to rmdir the top/root subvolume dir. 2925 */ 2926 if (dir == BTRFS_FIRST_FREE_OBJECTID) 2927 return 0; 2928 2929 path = alloc_path_for_send(); 2930 if (!path) 2931 return -ENOMEM; 2932 2933 key.objectid = dir; 2934 key.type = BTRFS_DIR_INDEX_KEY; 2935 key.offset = 0; 2936 2937 odi = get_orphan_dir_info(sctx, dir); 2938 if (odi) 2939 key.offset = odi->last_dir_index_offset; 2940 2941 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2942 if (ret < 0) 2943 goto out; 2944 2945 while (1) { 2946 struct waiting_dir_move *dm; 2947 2948 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2949 ret = btrfs_next_leaf(root, path); 2950 if (ret < 0) 2951 goto out; 2952 else if (ret > 0) 2953 break; 2954 continue; 2955 } 2956 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2957 path->slots[0]); 2958 if (found_key.objectid != key.objectid || 2959 found_key.type != key.type) 2960 break; 2961 2962 di = btrfs_item_ptr(path->nodes[0], path->slots[0], 2963 struct btrfs_dir_item); 2964 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); 2965 2966 dm = get_waiting_dir_move(sctx, loc.objectid); 2967 if (dm) { 2968 odi = add_orphan_dir_info(sctx, dir); 2969 if (IS_ERR(odi)) { 2970 ret = PTR_ERR(odi); 2971 goto out; 2972 } 2973 odi->gen = dir_gen; 2974 odi->last_dir_index_offset = found_key.offset; 2975 dm->rmdir_ino = dir; 2976 ret = 0; 2977 goto out; 2978 } 2979 2980 if (loc.objectid > send_progress) { 2981 odi = add_orphan_dir_info(sctx, dir); 2982 if (IS_ERR(odi)) { 2983 ret = PTR_ERR(odi); 2984 goto out; 2985 } 2986 odi->gen = dir_gen; 2987 odi->last_dir_index_offset = found_key.offset; 2988 ret = 0; 2989 goto out; 2990 } 2991 2992 path->slots[0]++; 2993 } 2994 free_orphan_dir_info(sctx, odi); 2995 2996 ret = 1; 2997 2998 out: 2999 btrfs_free_path(path); 3000 return ret; 3001 } 3002 3003 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) 3004 { 3005 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino); 3006 3007 return entry != NULL; 3008 } 3009 3010 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized) 3011 { 3012 struct rb_node **p = &sctx->waiting_dir_moves.rb_node; 3013 struct rb_node *parent = NULL; 3014 struct waiting_dir_move *entry, *dm; 3015 3016 dm = kmalloc(sizeof(*dm), GFP_KERNEL); 3017 if (!dm) 3018 return -ENOMEM; 3019 dm->ino = ino; 3020 dm->rmdir_ino = 0; 3021 dm->orphanized = orphanized; 3022 3023 while (*p) { 3024 parent = *p; 3025 entry = rb_entry(parent, struct waiting_dir_move, node); 3026 if (ino < entry->ino) { 3027 p = &(*p)->rb_left; 3028 } else if (ino > entry->ino) { 3029 p = &(*p)->rb_right; 3030 } else { 3031 kfree(dm); 3032 return -EEXIST; 3033 } 3034 } 3035 3036 rb_link_node(&dm->node, parent, p); 3037 rb_insert_color(&dm->node, &sctx->waiting_dir_moves); 3038 return 0; 3039 } 3040 3041 static struct waiting_dir_move * 3042 get_waiting_dir_move(struct send_ctx *sctx, u64 ino) 3043 { 3044 struct rb_node *n = sctx->waiting_dir_moves.rb_node; 3045 struct waiting_dir_move *entry; 3046 3047 while (n) { 3048 entry = rb_entry(n, struct waiting_dir_move, node); 3049 if (ino < entry->ino) 3050 n = n->rb_left; 3051 else if (ino > entry->ino) 3052 n = n->rb_right; 3053 else 3054 return entry; 3055 } 3056 return NULL; 3057 } 3058 3059 static void free_waiting_dir_move(struct send_ctx *sctx, 3060 struct waiting_dir_move *dm) 3061 { 3062 if (!dm) 3063 return; 3064 rb_erase(&dm->node, &sctx->waiting_dir_moves); 3065 kfree(dm); 3066 } 3067 3068 static int add_pending_dir_move(struct send_ctx *sctx, 3069 u64 ino, 3070 u64 ino_gen, 3071 u64 parent_ino, 3072 struct list_head *new_refs, 3073 struct list_head *deleted_refs, 3074 const bool is_orphan) 3075 { 3076 struct rb_node **p = &sctx->pending_dir_moves.rb_node; 3077 struct rb_node *parent = NULL; 3078 struct pending_dir_move *entry = NULL, *pm; 3079 struct recorded_ref *cur; 3080 int exists = 0; 3081 int ret; 3082 3083 pm = kmalloc(sizeof(*pm), GFP_KERNEL); 3084 if (!pm) 3085 return -ENOMEM; 3086 pm->parent_ino = parent_ino; 3087 pm->ino = ino; 3088 pm->gen = ino_gen; 3089 INIT_LIST_HEAD(&pm->list); 3090 INIT_LIST_HEAD(&pm->update_refs); 3091 RB_CLEAR_NODE(&pm->node); 3092 3093 while (*p) { 3094 parent = *p; 3095 entry = rb_entry(parent, struct pending_dir_move, node); 3096 if (parent_ino < entry->parent_ino) { 3097 p = &(*p)->rb_left; 3098 } else if (parent_ino > entry->parent_ino) { 3099 p = &(*p)->rb_right; 3100 } else { 3101 exists = 1; 3102 break; 3103 } 3104 } 3105 3106 list_for_each_entry(cur, deleted_refs, list) { 3107 ret = dup_ref(cur, &pm->update_refs); 3108 if (ret < 0) 3109 goto out; 3110 } 3111 list_for_each_entry(cur, new_refs, list) { 3112 ret = dup_ref(cur, &pm->update_refs); 3113 if (ret < 0) 3114 goto out; 3115 } 3116 3117 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan); 3118 if (ret) 3119 goto out; 3120 3121 if (exists) { 3122 list_add_tail(&pm->list, &entry->list); 3123 } else { 3124 rb_link_node(&pm->node, parent, p); 3125 rb_insert_color(&pm->node, &sctx->pending_dir_moves); 3126 } 3127 ret = 0; 3128 out: 3129 if (ret) { 3130 __free_recorded_refs(&pm->update_refs); 3131 kfree(pm); 3132 } 3133 return ret; 3134 } 3135 3136 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx, 3137 u64 parent_ino) 3138 { 3139 struct rb_node *n = sctx->pending_dir_moves.rb_node; 3140 struct pending_dir_move *entry; 3141 3142 while (n) { 3143 entry = rb_entry(n, struct pending_dir_move, node); 3144 if (parent_ino < entry->parent_ino) 3145 n = n->rb_left; 3146 else if (parent_ino > entry->parent_ino) 3147 n = n->rb_right; 3148 else 3149 return entry; 3150 } 3151 return NULL; 3152 } 3153 3154 static int path_loop(struct send_ctx *sctx, struct fs_path *name, 3155 u64 ino, u64 gen, u64 *ancestor_ino) 3156 { 3157 int ret = 0; 3158 u64 parent_inode = 0; 3159 u64 parent_gen = 0; 3160 u64 start_ino = ino; 3161 3162 *ancestor_ino = 0; 3163 while (ino != BTRFS_FIRST_FREE_OBJECTID) { 3164 fs_path_reset(name); 3165 3166 if (is_waiting_for_rm(sctx, ino)) 3167 break; 3168 if (is_waiting_for_move(sctx, ino)) { 3169 if (*ancestor_ino == 0) 3170 *ancestor_ino = ino; 3171 ret = get_first_ref(sctx->parent_root, ino, 3172 &parent_inode, &parent_gen, name); 3173 } else { 3174 ret = __get_cur_name_and_parent(sctx, ino, gen, 3175 &parent_inode, 3176 &parent_gen, name); 3177 if (ret > 0) { 3178 ret = 0; 3179 break; 3180 } 3181 } 3182 if (ret < 0) 3183 break; 3184 if (parent_inode == start_ino) { 3185 ret = 1; 3186 if (*ancestor_ino == 0) 3187 *ancestor_ino = ino; 3188 break; 3189 } 3190 ino = parent_inode; 3191 gen = parent_gen; 3192 } 3193 return ret; 3194 } 3195 3196 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) 3197 { 3198 struct fs_path *from_path = NULL; 3199 struct fs_path *to_path = NULL; 3200 struct fs_path *name = NULL; 3201 u64 orig_progress = sctx->send_progress; 3202 struct recorded_ref *cur; 3203 u64 parent_ino, parent_gen; 3204 struct waiting_dir_move *dm = NULL; 3205 u64 rmdir_ino = 0; 3206 u64 ancestor; 3207 bool is_orphan; 3208 int ret; 3209 3210 name = fs_path_alloc(); 3211 from_path = fs_path_alloc(); 3212 if (!name || !from_path) { 3213 ret = -ENOMEM; 3214 goto out; 3215 } 3216 3217 dm = get_waiting_dir_move(sctx, pm->ino); 3218 ASSERT(dm); 3219 rmdir_ino = dm->rmdir_ino; 3220 is_orphan = dm->orphanized; 3221 free_waiting_dir_move(sctx, dm); 3222 3223 if (is_orphan) { 3224 ret = gen_unique_name(sctx, pm->ino, 3225 pm->gen, from_path); 3226 } else { 3227 ret = get_first_ref(sctx->parent_root, pm->ino, 3228 &parent_ino, &parent_gen, name); 3229 if (ret < 0) 3230 goto out; 3231 ret = get_cur_path(sctx, parent_ino, parent_gen, 3232 from_path); 3233 if (ret < 0) 3234 goto out; 3235 ret = fs_path_add_path(from_path, name); 3236 } 3237 if (ret < 0) 3238 goto out; 3239 3240 sctx->send_progress = sctx->cur_ino + 1; 3241 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor); 3242 if (ret < 0) 3243 goto out; 3244 if (ret) { 3245 LIST_HEAD(deleted_refs); 3246 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); 3247 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, 3248 &pm->update_refs, &deleted_refs, 3249 is_orphan); 3250 if (ret < 0) 3251 goto out; 3252 if (rmdir_ino) { 3253 dm = get_waiting_dir_move(sctx, pm->ino); 3254 ASSERT(dm); 3255 dm->rmdir_ino = rmdir_ino; 3256 } 3257 goto out; 3258 } 3259 fs_path_reset(name); 3260 to_path = name; 3261 name = NULL; 3262 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); 3263 if (ret < 0) 3264 goto out; 3265 3266 ret = send_rename(sctx, from_path, to_path); 3267 if (ret < 0) 3268 goto out; 3269 3270 if (rmdir_ino) { 3271 struct orphan_dir_info *odi; 3272 u64 gen; 3273 3274 odi = get_orphan_dir_info(sctx, rmdir_ino); 3275 if (!odi) { 3276 /* already deleted */ 3277 goto finish; 3278 } 3279 gen = odi->gen; 3280 3281 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino); 3282 if (ret < 0) 3283 goto out; 3284 if (!ret) 3285 goto finish; 3286 3287 name = fs_path_alloc(); 3288 if (!name) { 3289 ret = -ENOMEM; 3290 goto out; 3291 } 3292 ret = get_cur_path(sctx, rmdir_ino, gen, name); 3293 if (ret < 0) 3294 goto out; 3295 ret = send_rmdir(sctx, name); 3296 if (ret < 0) 3297 goto out; 3298 } 3299 3300 finish: 3301 ret = send_utimes(sctx, pm->ino, pm->gen); 3302 if (ret < 0) 3303 goto out; 3304 3305 /* 3306 * After rename/move, need to update the utimes of both new parent(s) 3307 * and old parent(s). 3308 */ 3309 list_for_each_entry(cur, &pm->update_refs, list) { 3310 /* 3311 * The parent inode might have been deleted in the send snapshot 3312 */ 3313 ret = get_inode_info(sctx->send_root, cur->dir, NULL, 3314 NULL, NULL, NULL, NULL, NULL); 3315 if (ret == -ENOENT) { 3316 ret = 0; 3317 continue; 3318 } 3319 if (ret < 0) 3320 goto out; 3321 3322 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 3323 if (ret < 0) 3324 goto out; 3325 } 3326 3327 out: 3328 fs_path_free(name); 3329 fs_path_free(from_path); 3330 fs_path_free(to_path); 3331 sctx->send_progress = orig_progress; 3332 3333 return ret; 3334 } 3335 3336 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m) 3337 { 3338 if (!list_empty(&m->list)) 3339 list_del(&m->list); 3340 if (!RB_EMPTY_NODE(&m->node)) 3341 rb_erase(&m->node, &sctx->pending_dir_moves); 3342 __free_recorded_refs(&m->update_refs); 3343 kfree(m); 3344 } 3345 3346 static void tail_append_pending_moves(struct pending_dir_move *moves, 3347 struct list_head *stack) 3348 { 3349 if (list_empty(&moves->list)) { 3350 list_add_tail(&moves->list, stack); 3351 } else { 3352 LIST_HEAD(list); 3353 list_splice_init(&moves->list, &list); 3354 list_add_tail(&moves->list, stack); 3355 list_splice_tail(&list, stack); 3356 } 3357 } 3358 3359 static int apply_children_dir_moves(struct send_ctx *sctx) 3360 { 3361 struct pending_dir_move *pm; 3362 struct list_head stack; 3363 u64 parent_ino = sctx->cur_ino; 3364 int ret = 0; 3365 3366 pm = get_pending_dir_moves(sctx, parent_ino); 3367 if (!pm) 3368 return 0; 3369 3370 INIT_LIST_HEAD(&stack); 3371 tail_append_pending_moves(pm, &stack); 3372 3373 while (!list_empty(&stack)) { 3374 pm = list_first_entry(&stack, struct pending_dir_move, list); 3375 parent_ino = pm->ino; 3376 ret = apply_dir_move(sctx, pm); 3377 free_pending_move(sctx, pm); 3378 if (ret) 3379 goto out; 3380 pm = get_pending_dir_moves(sctx, parent_ino); 3381 if (pm) 3382 tail_append_pending_moves(pm, &stack); 3383 } 3384 return 0; 3385 3386 out: 3387 while (!list_empty(&stack)) { 3388 pm = list_first_entry(&stack, struct pending_dir_move, list); 3389 free_pending_move(sctx, pm); 3390 } 3391 return ret; 3392 } 3393 3394 /* 3395 * We might need to delay a directory rename even when no ancestor directory 3396 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was 3397 * renamed. This happens when we rename a directory to the old name (the name 3398 * in the parent root) of some other unrelated directory that got its rename 3399 * delayed due to some ancestor with higher number that got renamed. 3400 * 3401 * Example: 3402 * 3403 * Parent snapshot: 3404 * . (ino 256) 3405 * |---- a/ (ino 257) 3406 * | |---- file (ino 260) 3407 * | 3408 * |---- b/ (ino 258) 3409 * |---- c/ (ino 259) 3410 * 3411 * Send snapshot: 3412 * . (ino 256) 3413 * |---- a/ (ino 258) 3414 * |---- x/ (ino 259) 3415 * |---- y/ (ino 257) 3416 * |----- file (ino 260) 3417 * 3418 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257 3419 * from 'a' to 'x/y' happening first, which in turn depends on the rename of 3420 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream 3421 * must issue is: 3422 * 3423 * 1 - rename 259 from 'c' to 'x' 3424 * 2 - rename 257 from 'a' to 'x/y' 3425 * 3 - rename 258 from 'b' to 'a' 3426 * 3427 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can 3428 * be done right away and < 0 on error. 3429 */ 3430 static int wait_for_dest_dir_move(struct send_ctx *sctx, 3431 struct recorded_ref *parent_ref, 3432 const bool is_orphan) 3433 { 3434 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info; 3435 struct btrfs_path *path; 3436 struct btrfs_key key; 3437 struct btrfs_key di_key; 3438 struct btrfs_dir_item *di; 3439 u64 left_gen; 3440 u64 right_gen; 3441 int ret = 0; 3442 struct waiting_dir_move *wdm; 3443 3444 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) 3445 return 0; 3446 3447 path = alloc_path_for_send(); 3448 if (!path) 3449 return -ENOMEM; 3450 3451 key.objectid = parent_ref->dir; 3452 key.type = BTRFS_DIR_ITEM_KEY; 3453 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len); 3454 3455 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); 3456 if (ret < 0) { 3457 goto out; 3458 } else if (ret > 0) { 3459 ret = 0; 3460 goto out; 3461 } 3462 3463 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name, 3464 parent_ref->name_len); 3465 if (!di) { 3466 ret = 0; 3467 goto out; 3468 } 3469 /* 3470 * di_key.objectid has the number of the inode that has a dentry in the 3471 * parent directory with the same name that sctx->cur_ino is being 3472 * renamed to. We need to check if that inode is in the send root as 3473 * well and if it is currently marked as an inode with a pending rename, 3474 * if it is, we need to delay the rename of sctx->cur_ino as well, so 3475 * that it happens after that other inode is renamed. 3476 */ 3477 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key); 3478 if (di_key.type != BTRFS_INODE_ITEM_KEY) { 3479 ret = 0; 3480 goto out; 3481 } 3482 3483 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL, 3484 &left_gen, NULL, NULL, NULL, NULL); 3485 if (ret < 0) 3486 goto out; 3487 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL, 3488 &right_gen, NULL, NULL, NULL, NULL); 3489 if (ret < 0) { 3490 if (ret == -ENOENT) 3491 ret = 0; 3492 goto out; 3493 } 3494 3495 /* Different inode, no need to delay the rename of sctx->cur_ino */ 3496 if (right_gen != left_gen) { 3497 ret = 0; 3498 goto out; 3499 } 3500 3501 wdm = get_waiting_dir_move(sctx, di_key.objectid); 3502 if (wdm && !wdm->orphanized) { 3503 ret = add_pending_dir_move(sctx, 3504 sctx->cur_ino, 3505 sctx->cur_inode_gen, 3506 di_key.objectid, 3507 &sctx->new_refs, 3508 &sctx->deleted_refs, 3509 is_orphan); 3510 if (!ret) 3511 ret = 1; 3512 } 3513 out: 3514 btrfs_free_path(path); 3515 return ret; 3516 } 3517 3518 /* 3519 * Check if inode ino2, or any of its ancestors, is inode ino1. 3520 * Return 1 if true, 0 if false and < 0 on error. 3521 */ 3522 static int check_ino_in_path(struct btrfs_root *root, 3523 const u64 ino1, 3524 const u64 ino1_gen, 3525 const u64 ino2, 3526 const u64 ino2_gen, 3527 struct fs_path *fs_path) 3528 { 3529 u64 ino = ino2; 3530 3531 if (ino1 == ino2) 3532 return ino1_gen == ino2_gen; 3533 3534 while (ino > BTRFS_FIRST_FREE_OBJECTID) { 3535 u64 parent; 3536 u64 parent_gen; 3537 int ret; 3538 3539 fs_path_reset(fs_path); 3540 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path); 3541 if (ret < 0) 3542 return ret; 3543 if (parent == ino1) 3544 return parent_gen == ino1_gen; 3545 ino = parent; 3546 } 3547 return 0; 3548 } 3549 3550 /* 3551 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any 3552 * possible path (in case ino2 is not a directory and has multiple hard links). 3553 * Return 1 if true, 0 if false and < 0 on error. 3554 */ 3555 static int is_ancestor(struct btrfs_root *root, 3556 const u64 ino1, 3557 const u64 ino1_gen, 3558 const u64 ino2, 3559 struct fs_path *fs_path) 3560 { 3561 bool free_fs_path = false; 3562 int ret = 0; 3563 struct btrfs_path *path = NULL; 3564 struct btrfs_key key; 3565 3566 if (!fs_path) { 3567 fs_path = fs_path_alloc(); 3568 if (!fs_path) 3569 return -ENOMEM; 3570 free_fs_path = true; 3571 } 3572 3573 path = alloc_path_for_send(); 3574 if (!path) { 3575 ret = -ENOMEM; 3576 goto out; 3577 } 3578 3579 key.objectid = ino2; 3580 key.type = BTRFS_INODE_REF_KEY; 3581 key.offset = 0; 3582 3583 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3584 if (ret < 0) 3585 goto out; 3586 3587 while (true) { 3588 struct extent_buffer *leaf = path->nodes[0]; 3589 int slot = path->slots[0]; 3590 u32 cur_offset = 0; 3591 u32 item_size; 3592 3593 if (slot >= btrfs_header_nritems(leaf)) { 3594 ret = btrfs_next_leaf(root, path); 3595 if (ret < 0) 3596 goto out; 3597 if (ret > 0) 3598 break; 3599 continue; 3600 } 3601 3602 btrfs_item_key_to_cpu(leaf, &key, slot); 3603 if (key.objectid != ino2) 3604 break; 3605 if (key.type != BTRFS_INODE_REF_KEY && 3606 key.type != BTRFS_INODE_EXTREF_KEY) 3607 break; 3608 3609 item_size = btrfs_item_size_nr(leaf, slot); 3610 while (cur_offset < item_size) { 3611 u64 parent; 3612 u64 parent_gen; 3613 3614 if (key.type == BTRFS_INODE_EXTREF_KEY) { 3615 unsigned long ptr; 3616 struct btrfs_inode_extref *extref; 3617 3618 ptr = btrfs_item_ptr_offset(leaf, slot); 3619 extref = (struct btrfs_inode_extref *) 3620 (ptr + cur_offset); 3621 parent = btrfs_inode_extref_parent(leaf, 3622 extref); 3623 cur_offset += sizeof(*extref); 3624 cur_offset += btrfs_inode_extref_name_len(leaf, 3625 extref); 3626 } else { 3627 parent = key.offset; 3628 cur_offset = item_size; 3629 } 3630 3631 ret = get_inode_info(root, parent, NULL, &parent_gen, 3632 NULL, NULL, NULL, NULL); 3633 if (ret < 0) 3634 goto out; 3635 ret = check_ino_in_path(root, ino1, ino1_gen, 3636 parent, parent_gen, fs_path); 3637 if (ret) 3638 goto out; 3639 } 3640 path->slots[0]++; 3641 } 3642 ret = 0; 3643 out: 3644 btrfs_free_path(path); 3645 if (free_fs_path) 3646 fs_path_free(fs_path); 3647 return ret; 3648 } 3649 3650 static int wait_for_parent_move(struct send_ctx *sctx, 3651 struct recorded_ref *parent_ref, 3652 const bool is_orphan) 3653 { 3654 int ret = 0; 3655 u64 ino = parent_ref->dir; 3656 u64 ino_gen = parent_ref->dir_gen; 3657 u64 parent_ino_before, parent_ino_after; 3658 struct fs_path *path_before = NULL; 3659 struct fs_path *path_after = NULL; 3660 int len1, len2; 3661 3662 path_after = fs_path_alloc(); 3663 path_before = fs_path_alloc(); 3664 if (!path_after || !path_before) { 3665 ret = -ENOMEM; 3666 goto out; 3667 } 3668 3669 /* 3670 * Our current directory inode may not yet be renamed/moved because some 3671 * ancestor (immediate or not) has to be renamed/moved first. So find if 3672 * such ancestor exists and make sure our own rename/move happens after 3673 * that ancestor is processed to avoid path build infinite loops (done 3674 * at get_cur_path()). 3675 */ 3676 while (ino > BTRFS_FIRST_FREE_OBJECTID) { 3677 u64 parent_ino_after_gen; 3678 3679 if (is_waiting_for_move(sctx, ino)) { 3680 /* 3681 * If the current inode is an ancestor of ino in the 3682 * parent root, we need to delay the rename of the 3683 * current inode, otherwise don't delayed the rename 3684 * because we can end up with a circular dependency 3685 * of renames, resulting in some directories never 3686 * getting the respective rename operations issued in 3687 * the send stream or getting into infinite path build 3688 * loops. 3689 */ 3690 ret = is_ancestor(sctx->parent_root, 3691 sctx->cur_ino, sctx->cur_inode_gen, 3692 ino, path_before); 3693 if (ret) 3694 break; 3695 } 3696 3697 fs_path_reset(path_before); 3698 fs_path_reset(path_after); 3699 3700 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, 3701 &parent_ino_after_gen, path_after); 3702 if (ret < 0) 3703 goto out; 3704 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, 3705 NULL, path_before); 3706 if (ret < 0 && ret != -ENOENT) { 3707 goto out; 3708 } else if (ret == -ENOENT) { 3709 ret = 0; 3710 break; 3711 } 3712 3713 len1 = fs_path_len(path_before); 3714 len2 = fs_path_len(path_after); 3715 if (ino > sctx->cur_ino && 3716 (parent_ino_before != parent_ino_after || len1 != len2 || 3717 memcmp(path_before->start, path_after->start, len1))) { 3718 u64 parent_ino_gen; 3719 3720 ret = get_inode_info(sctx->parent_root, ino, NULL, 3721 &parent_ino_gen, NULL, NULL, NULL, 3722 NULL); 3723 if (ret < 0) 3724 goto out; 3725 if (ino_gen == parent_ino_gen) { 3726 ret = 1; 3727 break; 3728 } 3729 } 3730 ino = parent_ino_after; 3731 ino_gen = parent_ino_after_gen; 3732 } 3733 3734 out: 3735 fs_path_free(path_before); 3736 fs_path_free(path_after); 3737 3738 if (ret == 1) { 3739 ret = add_pending_dir_move(sctx, 3740 sctx->cur_ino, 3741 sctx->cur_inode_gen, 3742 ino, 3743 &sctx->new_refs, 3744 &sctx->deleted_refs, 3745 is_orphan); 3746 if (!ret) 3747 ret = 1; 3748 } 3749 3750 return ret; 3751 } 3752 3753 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) 3754 { 3755 int ret; 3756 struct fs_path *new_path; 3757 3758 /* 3759 * Our reference's name member points to its full_path member string, so 3760 * we use here a new path. 3761 */ 3762 new_path = fs_path_alloc(); 3763 if (!new_path) 3764 return -ENOMEM; 3765 3766 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path); 3767 if (ret < 0) { 3768 fs_path_free(new_path); 3769 return ret; 3770 } 3771 ret = fs_path_add(new_path, ref->name, ref->name_len); 3772 if (ret < 0) { 3773 fs_path_free(new_path); 3774 return ret; 3775 } 3776 3777 fs_path_free(ref->full_path); 3778 set_ref_path(ref, new_path); 3779 3780 return 0; 3781 } 3782 3783 /* 3784 * This does all the move/link/unlink/rmdir magic. 3785 */ 3786 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) 3787 { 3788 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 3789 int ret = 0; 3790 struct recorded_ref *cur; 3791 struct recorded_ref *cur2; 3792 struct list_head check_dirs; 3793 struct fs_path *valid_path = NULL; 3794 u64 ow_inode = 0; 3795 u64 ow_gen; 3796 u64 ow_mode; 3797 int did_overwrite = 0; 3798 int is_orphan = 0; 3799 u64 last_dir_ino_rm = 0; 3800 bool can_rename = true; 3801 bool orphanized_dir = false; 3802 bool orphanized_ancestor = false; 3803 3804 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino); 3805 3806 /* 3807 * This should never happen as the root dir always has the same ref 3808 * which is always '..' 3809 */ 3810 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); 3811 INIT_LIST_HEAD(&check_dirs); 3812 3813 valid_path = fs_path_alloc(); 3814 if (!valid_path) { 3815 ret = -ENOMEM; 3816 goto out; 3817 } 3818 3819 /* 3820 * First, check if the first ref of the current inode was overwritten 3821 * before. If yes, we know that the current inode was already orphanized 3822 * and thus use the orphan name. If not, we can use get_cur_path to 3823 * get the path of the first ref as it would like while receiving at 3824 * this point in time. 3825 * New inodes are always orphan at the beginning, so force to use the 3826 * orphan name in this case. 3827 * The first ref is stored in valid_path and will be updated if it 3828 * gets moved around. 3829 */ 3830 if (!sctx->cur_inode_new) { 3831 ret = did_overwrite_first_ref(sctx, sctx->cur_ino, 3832 sctx->cur_inode_gen); 3833 if (ret < 0) 3834 goto out; 3835 if (ret) 3836 did_overwrite = 1; 3837 } 3838 if (sctx->cur_inode_new || did_overwrite) { 3839 ret = gen_unique_name(sctx, sctx->cur_ino, 3840 sctx->cur_inode_gen, valid_path); 3841 if (ret < 0) 3842 goto out; 3843 is_orphan = 1; 3844 } else { 3845 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, 3846 valid_path); 3847 if (ret < 0) 3848 goto out; 3849 } 3850 3851 list_for_each_entry(cur, &sctx->new_refs, list) { 3852 /* 3853 * We may have refs where the parent directory does not exist 3854 * yet. This happens if the parent directories inum is higher 3855 * the the current inum. To handle this case, we create the 3856 * parent directory out of order. But we need to check if this 3857 * did already happen before due to other refs in the same dir. 3858 */ 3859 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 3860 if (ret < 0) 3861 goto out; 3862 if (ret == inode_state_will_create) { 3863 ret = 0; 3864 /* 3865 * First check if any of the current inodes refs did 3866 * already create the dir. 3867 */ 3868 list_for_each_entry(cur2, &sctx->new_refs, list) { 3869 if (cur == cur2) 3870 break; 3871 if (cur2->dir == cur->dir) { 3872 ret = 1; 3873 break; 3874 } 3875 } 3876 3877 /* 3878 * If that did not happen, check if a previous inode 3879 * did already create the dir. 3880 */ 3881 if (!ret) 3882 ret = did_create_dir(sctx, cur->dir); 3883 if (ret < 0) 3884 goto out; 3885 if (!ret) { 3886 ret = send_create_inode(sctx, cur->dir); 3887 if (ret < 0) 3888 goto out; 3889 } 3890 } 3891 3892 /* 3893 * Check if this new ref would overwrite the first ref of 3894 * another unprocessed inode. If yes, orphanize the 3895 * overwritten inode. If we find an overwritten ref that is 3896 * not the first ref, simply unlink it. 3897 */ 3898 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, 3899 cur->name, cur->name_len, 3900 &ow_inode, &ow_gen, &ow_mode); 3901 if (ret < 0) 3902 goto out; 3903 if (ret) { 3904 ret = is_first_ref(sctx->parent_root, 3905 ow_inode, cur->dir, cur->name, 3906 cur->name_len); 3907 if (ret < 0) 3908 goto out; 3909 if (ret) { 3910 struct name_cache_entry *nce; 3911 struct waiting_dir_move *wdm; 3912 3913 ret = orphanize_inode(sctx, ow_inode, ow_gen, 3914 cur->full_path); 3915 if (ret < 0) 3916 goto out; 3917 if (S_ISDIR(ow_mode)) 3918 orphanized_dir = true; 3919 3920 /* 3921 * If ow_inode has its rename operation delayed 3922 * make sure that its orphanized name is used in 3923 * the source path when performing its rename 3924 * operation. 3925 */ 3926 if (is_waiting_for_move(sctx, ow_inode)) { 3927 wdm = get_waiting_dir_move(sctx, 3928 ow_inode); 3929 ASSERT(wdm); 3930 wdm->orphanized = true; 3931 } 3932 3933 /* 3934 * Make sure we clear our orphanized inode's 3935 * name from the name cache. This is because the 3936 * inode ow_inode might be an ancestor of some 3937 * other inode that will be orphanized as well 3938 * later and has an inode number greater than 3939 * sctx->send_progress. We need to prevent 3940 * future name lookups from using the old name 3941 * and get instead the orphan name. 3942 */ 3943 nce = name_cache_search(sctx, ow_inode, ow_gen); 3944 if (nce) { 3945 name_cache_delete(sctx, nce); 3946 kfree(nce); 3947 } 3948 3949 /* 3950 * ow_inode might currently be an ancestor of 3951 * cur_ino, therefore compute valid_path (the 3952 * current path of cur_ino) again because it 3953 * might contain the pre-orphanization name of 3954 * ow_inode, which is no longer valid. 3955 */ 3956 ret = is_ancestor(sctx->parent_root, 3957 ow_inode, ow_gen, 3958 sctx->cur_ino, NULL); 3959 if (ret > 0) { 3960 orphanized_ancestor = true; 3961 fs_path_reset(valid_path); 3962 ret = get_cur_path(sctx, sctx->cur_ino, 3963 sctx->cur_inode_gen, 3964 valid_path); 3965 } 3966 if (ret < 0) 3967 goto out; 3968 } else { 3969 ret = send_unlink(sctx, cur->full_path); 3970 if (ret < 0) 3971 goto out; 3972 } 3973 } 3974 3975 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { 3976 ret = wait_for_dest_dir_move(sctx, cur, is_orphan); 3977 if (ret < 0) 3978 goto out; 3979 if (ret == 1) { 3980 can_rename = false; 3981 *pending_move = 1; 3982 } 3983 } 3984 3985 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root && 3986 can_rename) { 3987 ret = wait_for_parent_move(sctx, cur, is_orphan); 3988 if (ret < 0) 3989 goto out; 3990 if (ret == 1) { 3991 can_rename = false; 3992 *pending_move = 1; 3993 } 3994 } 3995 3996 /* 3997 * link/move the ref to the new place. If we have an orphan 3998 * inode, move it and update valid_path. If not, link or move 3999 * it depending on the inode mode. 4000 */ 4001 if (is_orphan && can_rename) { 4002 ret = send_rename(sctx, valid_path, cur->full_path); 4003 if (ret < 0) 4004 goto out; 4005 is_orphan = 0; 4006 ret = fs_path_copy(valid_path, cur->full_path); 4007 if (ret < 0) 4008 goto out; 4009 } else if (can_rename) { 4010 if (S_ISDIR(sctx->cur_inode_mode)) { 4011 /* 4012 * Dirs can't be linked, so move it. For moved 4013 * dirs, we always have one new and one deleted 4014 * ref. The deleted ref is ignored later. 4015 */ 4016 ret = send_rename(sctx, valid_path, 4017 cur->full_path); 4018 if (!ret) 4019 ret = fs_path_copy(valid_path, 4020 cur->full_path); 4021 if (ret < 0) 4022 goto out; 4023 } else { 4024 /* 4025 * We might have previously orphanized an inode 4026 * which is an ancestor of our current inode, 4027 * so our reference's full path, which was 4028 * computed before any such orphanizations, must 4029 * be updated. 4030 */ 4031 if (orphanized_dir) { 4032 ret = update_ref_path(sctx, cur); 4033 if (ret < 0) 4034 goto out; 4035 } 4036 ret = send_link(sctx, cur->full_path, 4037 valid_path); 4038 if (ret < 0) 4039 goto out; 4040 } 4041 } 4042 ret = dup_ref(cur, &check_dirs); 4043 if (ret < 0) 4044 goto out; 4045 } 4046 4047 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { 4048 /* 4049 * Check if we can already rmdir the directory. If not, 4050 * orphanize it. For every dir item inside that gets deleted 4051 * later, we do this check again and rmdir it then if possible. 4052 * See the use of check_dirs for more details. 4053 */ 4054 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen, 4055 sctx->cur_ino); 4056 if (ret < 0) 4057 goto out; 4058 if (ret) { 4059 ret = send_rmdir(sctx, valid_path); 4060 if (ret < 0) 4061 goto out; 4062 } else if (!is_orphan) { 4063 ret = orphanize_inode(sctx, sctx->cur_ino, 4064 sctx->cur_inode_gen, valid_path); 4065 if (ret < 0) 4066 goto out; 4067 is_orphan = 1; 4068 } 4069 4070 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4071 ret = dup_ref(cur, &check_dirs); 4072 if (ret < 0) 4073 goto out; 4074 } 4075 } else if (S_ISDIR(sctx->cur_inode_mode) && 4076 !list_empty(&sctx->deleted_refs)) { 4077 /* 4078 * We have a moved dir. Add the old parent to check_dirs 4079 */ 4080 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, 4081 list); 4082 ret = dup_ref(cur, &check_dirs); 4083 if (ret < 0) 4084 goto out; 4085 } else if (!S_ISDIR(sctx->cur_inode_mode)) { 4086 /* 4087 * We have a non dir inode. Go through all deleted refs and 4088 * unlink them if they were not already overwritten by other 4089 * inodes. 4090 */ 4091 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4092 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, 4093 sctx->cur_ino, sctx->cur_inode_gen, 4094 cur->name, cur->name_len); 4095 if (ret < 0) 4096 goto out; 4097 if (!ret) { 4098 /* 4099 * If we orphanized any ancestor before, we need 4100 * to recompute the full path for deleted names, 4101 * since any such path was computed before we 4102 * processed any references and orphanized any 4103 * ancestor inode. 4104 */ 4105 if (orphanized_ancestor) { 4106 ret = update_ref_path(sctx, cur); 4107 if (ret < 0) 4108 goto out; 4109 } 4110 ret = send_unlink(sctx, cur->full_path); 4111 if (ret < 0) 4112 goto out; 4113 } 4114 ret = dup_ref(cur, &check_dirs); 4115 if (ret < 0) 4116 goto out; 4117 } 4118 /* 4119 * If the inode is still orphan, unlink the orphan. This may 4120 * happen when a previous inode did overwrite the first ref 4121 * of this inode and no new refs were added for the current 4122 * inode. Unlinking does not mean that the inode is deleted in 4123 * all cases. There may still be links to this inode in other 4124 * places. 4125 */ 4126 if (is_orphan) { 4127 ret = send_unlink(sctx, valid_path); 4128 if (ret < 0) 4129 goto out; 4130 } 4131 } 4132 4133 /* 4134 * We did collect all parent dirs where cur_inode was once located. We 4135 * now go through all these dirs and check if they are pending for 4136 * deletion and if it's finally possible to perform the rmdir now. 4137 * We also update the inode stats of the parent dirs here. 4138 */ 4139 list_for_each_entry(cur, &check_dirs, list) { 4140 /* 4141 * In case we had refs into dirs that were not processed yet, 4142 * we don't need to do the utime and rmdir logic for these dirs. 4143 * The dir will be processed later. 4144 */ 4145 if (cur->dir > sctx->cur_ino) 4146 continue; 4147 4148 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 4149 if (ret < 0) 4150 goto out; 4151 4152 if (ret == inode_state_did_create || 4153 ret == inode_state_no_change) { 4154 /* TODO delayed utimes */ 4155 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 4156 if (ret < 0) 4157 goto out; 4158 } else if (ret == inode_state_did_delete && 4159 cur->dir != last_dir_ino_rm) { 4160 ret = can_rmdir(sctx, cur->dir, cur->dir_gen, 4161 sctx->cur_ino); 4162 if (ret < 0) 4163 goto out; 4164 if (ret) { 4165 ret = get_cur_path(sctx, cur->dir, 4166 cur->dir_gen, valid_path); 4167 if (ret < 0) 4168 goto out; 4169 ret = send_rmdir(sctx, valid_path); 4170 if (ret < 0) 4171 goto out; 4172 last_dir_ino_rm = cur->dir; 4173 } 4174 } 4175 } 4176 4177 ret = 0; 4178 4179 out: 4180 __free_recorded_refs(&check_dirs); 4181 free_recorded_refs(sctx); 4182 fs_path_free(valid_path); 4183 return ret; 4184 } 4185 4186 static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name, 4187 void *ctx, struct list_head *refs) 4188 { 4189 int ret = 0; 4190 struct send_ctx *sctx = ctx; 4191 struct fs_path *p; 4192 u64 gen; 4193 4194 p = fs_path_alloc(); 4195 if (!p) 4196 return -ENOMEM; 4197 4198 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL, 4199 NULL, NULL); 4200 if (ret < 0) 4201 goto out; 4202 4203 ret = get_cur_path(sctx, dir, gen, p); 4204 if (ret < 0) 4205 goto out; 4206 ret = fs_path_add_path(p, name); 4207 if (ret < 0) 4208 goto out; 4209 4210 ret = __record_ref(refs, dir, gen, p); 4211 4212 out: 4213 if (ret) 4214 fs_path_free(p); 4215 return ret; 4216 } 4217 4218 static int __record_new_ref(int num, u64 dir, int index, 4219 struct fs_path *name, 4220 void *ctx) 4221 { 4222 struct send_ctx *sctx = ctx; 4223 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs); 4224 } 4225 4226 4227 static int __record_deleted_ref(int num, u64 dir, int index, 4228 struct fs_path *name, 4229 void *ctx) 4230 { 4231 struct send_ctx *sctx = ctx; 4232 return record_ref(sctx->parent_root, dir, name, ctx, 4233 &sctx->deleted_refs); 4234 } 4235 4236 static int record_new_ref(struct send_ctx *sctx) 4237 { 4238 int ret; 4239 4240 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 4241 sctx->cmp_key, 0, __record_new_ref, sctx); 4242 if (ret < 0) 4243 goto out; 4244 ret = 0; 4245 4246 out: 4247 return ret; 4248 } 4249 4250 static int record_deleted_ref(struct send_ctx *sctx) 4251 { 4252 int ret; 4253 4254 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 4255 sctx->cmp_key, 0, __record_deleted_ref, sctx); 4256 if (ret < 0) 4257 goto out; 4258 ret = 0; 4259 4260 out: 4261 return ret; 4262 } 4263 4264 struct find_ref_ctx { 4265 u64 dir; 4266 u64 dir_gen; 4267 struct btrfs_root *root; 4268 struct fs_path *name; 4269 int found_idx; 4270 }; 4271 4272 static int __find_iref(int num, u64 dir, int index, 4273 struct fs_path *name, 4274 void *ctx_) 4275 { 4276 struct find_ref_ctx *ctx = ctx_; 4277 u64 dir_gen; 4278 int ret; 4279 4280 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) && 4281 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) { 4282 /* 4283 * To avoid doing extra lookups we'll only do this if everything 4284 * else matches. 4285 */ 4286 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL, 4287 NULL, NULL, NULL); 4288 if (ret) 4289 return ret; 4290 if (dir_gen != ctx->dir_gen) 4291 return 0; 4292 ctx->found_idx = num; 4293 return 1; 4294 } 4295 return 0; 4296 } 4297 4298 static int find_iref(struct btrfs_root *root, 4299 struct btrfs_path *path, 4300 struct btrfs_key *key, 4301 u64 dir, u64 dir_gen, struct fs_path *name) 4302 { 4303 int ret; 4304 struct find_ref_ctx ctx; 4305 4306 ctx.dir = dir; 4307 ctx.name = name; 4308 ctx.dir_gen = dir_gen; 4309 ctx.found_idx = -1; 4310 ctx.root = root; 4311 4312 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx); 4313 if (ret < 0) 4314 return ret; 4315 4316 if (ctx.found_idx == -1) 4317 return -ENOENT; 4318 4319 return ctx.found_idx; 4320 } 4321 4322 static int __record_changed_new_ref(int num, u64 dir, int index, 4323 struct fs_path *name, 4324 void *ctx) 4325 { 4326 u64 dir_gen; 4327 int ret; 4328 struct send_ctx *sctx = ctx; 4329 4330 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL, 4331 NULL, NULL, NULL); 4332 if (ret) 4333 return ret; 4334 4335 ret = find_iref(sctx->parent_root, sctx->right_path, 4336 sctx->cmp_key, dir, dir_gen, name); 4337 if (ret == -ENOENT) 4338 ret = __record_new_ref(num, dir, index, name, sctx); 4339 else if (ret > 0) 4340 ret = 0; 4341 4342 return ret; 4343 } 4344 4345 static int __record_changed_deleted_ref(int num, u64 dir, int index, 4346 struct fs_path *name, 4347 void *ctx) 4348 { 4349 u64 dir_gen; 4350 int ret; 4351 struct send_ctx *sctx = ctx; 4352 4353 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL, 4354 NULL, NULL, NULL); 4355 if (ret) 4356 return ret; 4357 4358 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key, 4359 dir, dir_gen, name); 4360 if (ret == -ENOENT) 4361 ret = __record_deleted_ref(num, dir, index, name, sctx); 4362 else if (ret > 0) 4363 ret = 0; 4364 4365 return ret; 4366 } 4367 4368 static int record_changed_ref(struct send_ctx *sctx) 4369 { 4370 int ret = 0; 4371 4372 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 4373 sctx->cmp_key, 0, __record_changed_new_ref, sctx); 4374 if (ret < 0) 4375 goto out; 4376 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 4377 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx); 4378 if (ret < 0) 4379 goto out; 4380 ret = 0; 4381 4382 out: 4383 return ret; 4384 } 4385 4386 /* 4387 * Record and process all refs at once. Needed when an inode changes the 4388 * generation number, which means that it was deleted and recreated. 4389 */ 4390 static int process_all_refs(struct send_ctx *sctx, 4391 enum btrfs_compare_tree_result cmd) 4392 { 4393 int ret; 4394 struct btrfs_root *root; 4395 struct btrfs_path *path; 4396 struct btrfs_key key; 4397 struct btrfs_key found_key; 4398 struct extent_buffer *eb; 4399 int slot; 4400 iterate_inode_ref_t cb; 4401 int pending_move = 0; 4402 4403 path = alloc_path_for_send(); 4404 if (!path) 4405 return -ENOMEM; 4406 4407 if (cmd == BTRFS_COMPARE_TREE_NEW) { 4408 root = sctx->send_root; 4409 cb = __record_new_ref; 4410 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) { 4411 root = sctx->parent_root; 4412 cb = __record_deleted_ref; 4413 } else { 4414 btrfs_err(sctx->send_root->fs_info, 4415 "Wrong command %d in process_all_refs", cmd); 4416 ret = -EINVAL; 4417 goto out; 4418 } 4419 4420 key.objectid = sctx->cmp_key->objectid; 4421 key.type = BTRFS_INODE_REF_KEY; 4422 key.offset = 0; 4423 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4424 if (ret < 0) 4425 goto out; 4426 4427 while (1) { 4428 eb = path->nodes[0]; 4429 slot = path->slots[0]; 4430 if (slot >= btrfs_header_nritems(eb)) { 4431 ret = btrfs_next_leaf(root, path); 4432 if (ret < 0) 4433 goto out; 4434 else if (ret > 0) 4435 break; 4436 continue; 4437 } 4438 4439 btrfs_item_key_to_cpu(eb, &found_key, slot); 4440 4441 if (found_key.objectid != key.objectid || 4442 (found_key.type != BTRFS_INODE_REF_KEY && 4443 found_key.type != BTRFS_INODE_EXTREF_KEY)) 4444 break; 4445 4446 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); 4447 if (ret < 0) 4448 goto out; 4449 4450 path->slots[0]++; 4451 } 4452 btrfs_release_path(path); 4453 4454 /* 4455 * We don't actually care about pending_move as we are simply 4456 * re-creating this inode and will be rename'ing it into place once we 4457 * rename the parent directory. 4458 */ 4459 ret = process_recorded_refs(sctx, &pending_move); 4460 out: 4461 btrfs_free_path(path); 4462 return ret; 4463 } 4464 4465 static int send_set_xattr(struct send_ctx *sctx, 4466 struct fs_path *path, 4467 const char *name, int name_len, 4468 const char *data, int data_len) 4469 { 4470 int ret = 0; 4471 4472 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); 4473 if (ret < 0) 4474 goto out; 4475 4476 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 4477 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 4478 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); 4479 4480 ret = send_cmd(sctx); 4481 4482 tlv_put_failure: 4483 out: 4484 return ret; 4485 } 4486 4487 static int send_remove_xattr(struct send_ctx *sctx, 4488 struct fs_path *path, 4489 const char *name, int name_len) 4490 { 4491 int ret = 0; 4492 4493 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); 4494 if (ret < 0) 4495 goto out; 4496 4497 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 4498 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 4499 4500 ret = send_cmd(sctx); 4501 4502 tlv_put_failure: 4503 out: 4504 return ret; 4505 } 4506 4507 static int __process_new_xattr(int num, struct btrfs_key *di_key, 4508 const char *name, int name_len, 4509 const char *data, int data_len, 4510 u8 type, void *ctx) 4511 { 4512 int ret; 4513 struct send_ctx *sctx = ctx; 4514 struct fs_path *p; 4515 struct posix_acl_xattr_header dummy_acl; 4516 4517 p = fs_path_alloc(); 4518 if (!p) 4519 return -ENOMEM; 4520 4521 /* 4522 * This hack is needed because empty acls are stored as zero byte 4523 * data in xattrs. Problem with that is, that receiving these zero byte 4524 * acls will fail later. To fix this, we send a dummy acl list that 4525 * only contains the version number and no entries. 4526 */ 4527 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || 4528 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) { 4529 if (data_len == 0) { 4530 dummy_acl.a_version = 4531 cpu_to_le32(POSIX_ACL_XATTR_VERSION); 4532 data = (char *)&dummy_acl; 4533 data_len = sizeof(dummy_acl); 4534 } 4535 } 4536 4537 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4538 if (ret < 0) 4539 goto out; 4540 4541 ret = send_set_xattr(sctx, p, name, name_len, data, data_len); 4542 4543 out: 4544 fs_path_free(p); 4545 return ret; 4546 } 4547 4548 static int __process_deleted_xattr(int num, struct btrfs_key *di_key, 4549 const char *name, int name_len, 4550 const char *data, int data_len, 4551 u8 type, void *ctx) 4552 { 4553 int ret; 4554 struct send_ctx *sctx = ctx; 4555 struct fs_path *p; 4556 4557 p = fs_path_alloc(); 4558 if (!p) 4559 return -ENOMEM; 4560 4561 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4562 if (ret < 0) 4563 goto out; 4564 4565 ret = send_remove_xattr(sctx, p, name, name_len); 4566 4567 out: 4568 fs_path_free(p); 4569 return ret; 4570 } 4571 4572 static int process_new_xattr(struct send_ctx *sctx) 4573 { 4574 int ret = 0; 4575 4576 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 4577 __process_new_xattr, sctx); 4578 4579 return ret; 4580 } 4581 4582 static int process_deleted_xattr(struct send_ctx *sctx) 4583 { 4584 return iterate_dir_item(sctx->parent_root, sctx->right_path, 4585 __process_deleted_xattr, sctx); 4586 } 4587 4588 struct find_xattr_ctx { 4589 const char *name; 4590 int name_len; 4591 int found_idx; 4592 char *found_data; 4593 int found_data_len; 4594 }; 4595 4596 static int __find_xattr(int num, struct btrfs_key *di_key, 4597 const char *name, int name_len, 4598 const char *data, int data_len, 4599 u8 type, void *vctx) 4600 { 4601 struct find_xattr_ctx *ctx = vctx; 4602 4603 if (name_len == ctx->name_len && 4604 strncmp(name, ctx->name, name_len) == 0) { 4605 ctx->found_idx = num; 4606 ctx->found_data_len = data_len; 4607 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL); 4608 if (!ctx->found_data) 4609 return -ENOMEM; 4610 return 1; 4611 } 4612 return 0; 4613 } 4614 4615 static int find_xattr(struct btrfs_root *root, 4616 struct btrfs_path *path, 4617 struct btrfs_key *key, 4618 const char *name, int name_len, 4619 char **data, int *data_len) 4620 { 4621 int ret; 4622 struct find_xattr_ctx ctx; 4623 4624 ctx.name = name; 4625 ctx.name_len = name_len; 4626 ctx.found_idx = -1; 4627 ctx.found_data = NULL; 4628 ctx.found_data_len = 0; 4629 4630 ret = iterate_dir_item(root, path, __find_xattr, &ctx); 4631 if (ret < 0) 4632 return ret; 4633 4634 if (ctx.found_idx == -1) 4635 return -ENOENT; 4636 if (data) { 4637 *data = ctx.found_data; 4638 *data_len = ctx.found_data_len; 4639 } else { 4640 kfree(ctx.found_data); 4641 } 4642 return ctx.found_idx; 4643 } 4644 4645 4646 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key, 4647 const char *name, int name_len, 4648 const char *data, int data_len, 4649 u8 type, void *ctx) 4650 { 4651 int ret; 4652 struct send_ctx *sctx = ctx; 4653 char *found_data = NULL; 4654 int found_data_len = 0; 4655 4656 ret = find_xattr(sctx->parent_root, sctx->right_path, 4657 sctx->cmp_key, name, name_len, &found_data, 4658 &found_data_len); 4659 if (ret == -ENOENT) { 4660 ret = __process_new_xattr(num, di_key, name, name_len, data, 4661 data_len, type, ctx); 4662 } else if (ret >= 0) { 4663 if (data_len != found_data_len || 4664 memcmp(data, found_data, data_len)) { 4665 ret = __process_new_xattr(num, di_key, name, name_len, 4666 data, data_len, type, ctx); 4667 } else { 4668 ret = 0; 4669 } 4670 } 4671 4672 kfree(found_data); 4673 return ret; 4674 } 4675 4676 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key, 4677 const char *name, int name_len, 4678 const char *data, int data_len, 4679 u8 type, void *ctx) 4680 { 4681 int ret; 4682 struct send_ctx *sctx = ctx; 4683 4684 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key, 4685 name, name_len, NULL, NULL); 4686 if (ret == -ENOENT) 4687 ret = __process_deleted_xattr(num, di_key, name, name_len, data, 4688 data_len, type, ctx); 4689 else if (ret >= 0) 4690 ret = 0; 4691 4692 return ret; 4693 } 4694 4695 static int process_changed_xattr(struct send_ctx *sctx) 4696 { 4697 int ret = 0; 4698 4699 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 4700 __process_changed_new_xattr, sctx); 4701 if (ret < 0) 4702 goto out; 4703 ret = iterate_dir_item(sctx->parent_root, sctx->right_path, 4704 __process_changed_deleted_xattr, sctx); 4705 4706 out: 4707 return ret; 4708 } 4709 4710 static int process_all_new_xattrs(struct send_ctx *sctx) 4711 { 4712 int ret; 4713 struct btrfs_root *root; 4714 struct btrfs_path *path; 4715 struct btrfs_key key; 4716 struct btrfs_key found_key; 4717 struct extent_buffer *eb; 4718 int slot; 4719 4720 path = alloc_path_for_send(); 4721 if (!path) 4722 return -ENOMEM; 4723 4724 root = sctx->send_root; 4725 4726 key.objectid = sctx->cmp_key->objectid; 4727 key.type = BTRFS_XATTR_ITEM_KEY; 4728 key.offset = 0; 4729 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4730 if (ret < 0) 4731 goto out; 4732 4733 while (1) { 4734 eb = path->nodes[0]; 4735 slot = path->slots[0]; 4736 if (slot >= btrfs_header_nritems(eb)) { 4737 ret = btrfs_next_leaf(root, path); 4738 if (ret < 0) { 4739 goto out; 4740 } else if (ret > 0) { 4741 ret = 0; 4742 break; 4743 } 4744 continue; 4745 } 4746 4747 btrfs_item_key_to_cpu(eb, &found_key, slot); 4748 if (found_key.objectid != key.objectid || 4749 found_key.type != key.type) { 4750 ret = 0; 4751 goto out; 4752 } 4753 4754 ret = iterate_dir_item(root, path, __process_new_xattr, sctx); 4755 if (ret < 0) 4756 goto out; 4757 4758 path->slots[0]++; 4759 } 4760 4761 out: 4762 btrfs_free_path(path); 4763 return ret; 4764 } 4765 4766 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) 4767 { 4768 struct btrfs_root *root = sctx->send_root; 4769 struct btrfs_fs_info *fs_info = root->fs_info; 4770 struct inode *inode; 4771 struct page *page; 4772 char *addr; 4773 struct btrfs_key key; 4774 pgoff_t index = offset >> PAGE_SHIFT; 4775 pgoff_t last_index; 4776 unsigned pg_offset = offset & ~PAGE_MASK; 4777 ssize_t ret = 0; 4778 4779 key.objectid = sctx->cur_ino; 4780 key.type = BTRFS_INODE_ITEM_KEY; 4781 key.offset = 0; 4782 4783 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 4784 if (IS_ERR(inode)) 4785 return PTR_ERR(inode); 4786 4787 if (offset + len > i_size_read(inode)) { 4788 if (offset > i_size_read(inode)) 4789 len = 0; 4790 else 4791 len = offset - i_size_read(inode); 4792 } 4793 if (len == 0) 4794 goto out; 4795 4796 last_index = (offset + len - 1) >> PAGE_SHIFT; 4797 4798 /* initial readahead */ 4799 memset(&sctx->ra, 0, sizeof(struct file_ra_state)); 4800 file_ra_state_init(&sctx->ra, inode->i_mapping); 4801 4802 while (index <= last_index) { 4803 unsigned cur_len = min_t(unsigned, len, 4804 PAGE_SIZE - pg_offset); 4805 4806 page = find_lock_page(inode->i_mapping, index); 4807 if (!page) { 4808 page_cache_sync_readahead(inode->i_mapping, &sctx->ra, 4809 NULL, index, last_index + 1 - index); 4810 4811 page = find_or_create_page(inode->i_mapping, index, 4812 GFP_KERNEL); 4813 if (!page) { 4814 ret = -ENOMEM; 4815 break; 4816 } 4817 } 4818 4819 if (PageReadahead(page)) { 4820 page_cache_async_readahead(inode->i_mapping, &sctx->ra, 4821 NULL, page, index, last_index + 1 - index); 4822 } 4823 4824 if (!PageUptodate(page)) { 4825 btrfs_readpage(NULL, page); 4826 lock_page(page); 4827 if (!PageUptodate(page)) { 4828 unlock_page(page); 4829 put_page(page); 4830 ret = -EIO; 4831 break; 4832 } 4833 } 4834 4835 addr = kmap(page); 4836 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len); 4837 kunmap(page); 4838 unlock_page(page); 4839 put_page(page); 4840 index++; 4841 pg_offset = 0; 4842 len -= cur_len; 4843 ret += cur_len; 4844 } 4845 out: 4846 iput(inode); 4847 return ret; 4848 } 4849 4850 /* 4851 * Read some bytes from the current inode/file and send a write command to 4852 * user space. 4853 */ 4854 static int send_write(struct send_ctx *sctx, u64 offset, u32 len) 4855 { 4856 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 4857 int ret = 0; 4858 struct fs_path *p; 4859 ssize_t num_read = 0; 4860 4861 p = fs_path_alloc(); 4862 if (!p) 4863 return -ENOMEM; 4864 4865 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len); 4866 4867 num_read = fill_read_buf(sctx, offset, len); 4868 if (num_read <= 0) { 4869 if (num_read < 0) 4870 ret = num_read; 4871 goto out; 4872 } 4873 4874 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 4875 if (ret < 0) 4876 goto out; 4877 4878 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4879 if (ret < 0) 4880 goto out; 4881 4882 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4883 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4884 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read); 4885 4886 ret = send_cmd(sctx); 4887 4888 tlv_put_failure: 4889 out: 4890 fs_path_free(p); 4891 if (ret < 0) 4892 return ret; 4893 return num_read; 4894 } 4895 4896 /* 4897 * Send a clone command to user space. 4898 */ 4899 static int send_clone(struct send_ctx *sctx, 4900 u64 offset, u32 len, 4901 struct clone_root *clone_root) 4902 { 4903 int ret = 0; 4904 struct fs_path *p; 4905 u64 gen; 4906 4907 btrfs_debug(sctx->send_root->fs_info, 4908 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu", 4909 offset, len, clone_root->root->objectid, clone_root->ino, 4910 clone_root->offset); 4911 4912 p = fs_path_alloc(); 4913 if (!p) 4914 return -ENOMEM; 4915 4916 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); 4917 if (ret < 0) 4918 goto out; 4919 4920 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4921 if (ret < 0) 4922 goto out; 4923 4924 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4925 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); 4926 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4927 4928 if (clone_root->root == sctx->send_root) { 4929 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, 4930 &gen, NULL, NULL, NULL, NULL); 4931 if (ret < 0) 4932 goto out; 4933 ret = get_cur_path(sctx, clone_root->ino, gen, p); 4934 } else { 4935 ret = get_inode_path(clone_root->root, clone_root->ino, p); 4936 } 4937 if (ret < 0) 4938 goto out; 4939 4940 /* 4941 * If the parent we're using has a received_uuid set then use that as 4942 * our clone source as that is what we will look for when doing a 4943 * receive. 4944 * 4945 * This covers the case that we create a snapshot off of a received 4946 * subvolume and then use that as the parent and try to receive on a 4947 * different host. 4948 */ 4949 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid)) 4950 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 4951 clone_root->root->root_item.received_uuid); 4952 else 4953 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 4954 clone_root->root->root_item.uuid); 4955 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 4956 le64_to_cpu(clone_root->root->root_item.ctransid)); 4957 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); 4958 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, 4959 clone_root->offset); 4960 4961 ret = send_cmd(sctx); 4962 4963 tlv_put_failure: 4964 out: 4965 fs_path_free(p); 4966 return ret; 4967 } 4968 4969 /* 4970 * Send an update extent command to user space. 4971 */ 4972 static int send_update_extent(struct send_ctx *sctx, 4973 u64 offset, u32 len) 4974 { 4975 int ret = 0; 4976 struct fs_path *p; 4977 4978 p = fs_path_alloc(); 4979 if (!p) 4980 return -ENOMEM; 4981 4982 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); 4983 if (ret < 0) 4984 goto out; 4985 4986 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4987 if (ret < 0) 4988 goto out; 4989 4990 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4991 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4992 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); 4993 4994 ret = send_cmd(sctx); 4995 4996 tlv_put_failure: 4997 out: 4998 fs_path_free(p); 4999 return ret; 5000 } 5001 5002 static int send_hole(struct send_ctx *sctx, u64 end) 5003 { 5004 struct fs_path *p = NULL; 5005 u64 offset = sctx->cur_inode_last_extent; 5006 u64 len; 5007 int ret = 0; 5008 5009 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) 5010 return send_update_extent(sctx, offset, end - offset); 5011 5012 p = fs_path_alloc(); 5013 if (!p) 5014 return -ENOMEM; 5015 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5016 if (ret < 0) 5017 goto tlv_put_failure; 5018 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE); 5019 while (offset < end) { 5020 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE); 5021 5022 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 5023 if (ret < 0) 5024 break; 5025 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5026 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5027 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len); 5028 ret = send_cmd(sctx); 5029 if (ret < 0) 5030 break; 5031 offset += len; 5032 } 5033 sctx->cur_inode_next_write_offset = offset; 5034 tlv_put_failure: 5035 fs_path_free(p); 5036 return ret; 5037 } 5038 5039 static int send_extent_data(struct send_ctx *sctx, 5040 const u64 offset, 5041 const u64 len) 5042 { 5043 u64 sent = 0; 5044 5045 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) 5046 return send_update_extent(sctx, offset, len); 5047 5048 while (sent < len) { 5049 u64 size = len - sent; 5050 int ret; 5051 5052 if (size > BTRFS_SEND_READ_SIZE) 5053 size = BTRFS_SEND_READ_SIZE; 5054 ret = send_write(sctx, offset + sent, size); 5055 if (ret < 0) 5056 return ret; 5057 if (!ret) 5058 break; 5059 sent += ret; 5060 } 5061 return 0; 5062 } 5063 5064 static int clone_range(struct send_ctx *sctx, 5065 struct clone_root *clone_root, 5066 const u64 disk_byte, 5067 u64 data_offset, 5068 u64 offset, 5069 u64 len) 5070 { 5071 struct btrfs_path *path; 5072 struct btrfs_key key; 5073 int ret; 5074 5075 /* 5076 * Prevent cloning from a zero offset with a length matching the sector 5077 * size because in some scenarios this will make the receiver fail. 5078 * 5079 * For example, if in the source filesystem the extent at offset 0 5080 * has a length of sectorsize and it was written using direct IO, then 5081 * it can never be an inline extent (even if compression is enabled). 5082 * Then this extent can be cloned in the original filesystem to a non 5083 * zero file offset, but it may not be possible to clone in the 5084 * destination filesystem because it can be inlined due to compression 5085 * on the destination filesystem (as the receiver's write operations are 5086 * always done using buffered IO). The same happens when the original 5087 * filesystem does not have compression enabled but the destination 5088 * filesystem has. 5089 */ 5090 if (clone_root->offset == 0 && 5091 len == sctx->send_root->fs_info->sectorsize) 5092 return send_extent_data(sctx, offset, len); 5093 5094 path = alloc_path_for_send(); 5095 if (!path) 5096 return -ENOMEM; 5097 5098 /* 5099 * We can't send a clone operation for the entire range if we find 5100 * extent items in the respective range in the source file that 5101 * refer to different extents or if we find holes. 5102 * So check for that and do a mix of clone and regular write/copy 5103 * operations if needed. 5104 * 5105 * Example: 5106 * 5107 * mkfs.btrfs -f /dev/sda 5108 * mount /dev/sda /mnt 5109 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo 5110 * cp --reflink=always /mnt/foo /mnt/bar 5111 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo 5112 * btrfs subvolume snapshot -r /mnt /mnt/snap 5113 * 5114 * If when we send the snapshot and we are processing file bar (which 5115 * has a higher inode number than foo) we blindly send a clone operation 5116 * for the [0, 100K[ range from foo to bar, the receiver ends up getting 5117 * a file bar that matches the content of file foo - iow, doesn't match 5118 * the content from bar in the original filesystem. 5119 */ 5120 key.objectid = clone_root->ino; 5121 key.type = BTRFS_EXTENT_DATA_KEY; 5122 key.offset = clone_root->offset; 5123 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0); 5124 if (ret < 0) 5125 goto out; 5126 if (ret > 0 && path->slots[0] > 0) { 5127 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); 5128 if (key.objectid == clone_root->ino && 5129 key.type == BTRFS_EXTENT_DATA_KEY) 5130 path->slots[0]--; 5131 } 5132 5133 while (true) { 5134 struct extent_buffer *leaf = path->nodes[0]; 5135 int slot = path->slots[0]; 5136 struct btrfs_file_extent_item *ei; 5137 u8 type; 5138 u64 ext_len; 5139 u64 clone_len; 5140 5141 if (slot >= btrfs_header_nritems(leaf)) { 5142 ret = btrfs_next_leaf(clone_root->root, path); 5143 if (ret < 0) 5144 goto out; 5145 else if (ret > 0) 5146 break; 5147 continue; 5148 } 5149 5150 btrfs_item_key_to_cpu(leaf, &key, slot); 5151 5152 /* 5153 * We might have an implicit trailing hole (NO_HOLES feature 5154 * enabled). We deal with it after leaving this loop. 5155 */ 5156 if (key.objectid != clone_root->ino || 5157 key.type != BTRFS_EXTENT_DATA_KEY) 5158 break; 5159 5160 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5161 type = btrfs_file_extent_type(leaf, ei); 5162 if (type == BTRFS_FILE_EXTENT_INLINE) { 5163 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei); 5164 ext_len = PAGE_ALIGN(ext_len); 5165 } else { 5166 ext_len = btrfs_file_extent_num_bytes(leaf, ei); 5167 } 5168 5169 if (key.offset + ext_len <= clone_root->offset) 5170 goto next; 5171 5172 if (key.offset > clone_root->offset) { 5173 /* Implicit hole, NO_HOLES feature enabled. */ 5174 u64 hole_len = key.offset - clone_root->offset; 5175 5176 if (hole_len > len) 5177 hole_len = len; 5178 ret = send_extent_data(sctx, offset, hole_len); 5179 if (ret < 0) 5180 goto out; 5181 5182 len -= hole_len; 5183 if (len == 0) 5184 break; 5185 offset += hole_len; 5186 clone_root->offset += hole_len; 5187 data_offset += hole_len; 5188 } 5189 5190 if (key.offset >= clone_root->offset + len) 5191 break; 5192 5193 clone_len = min_t(u64, ext_len, len); 5194 5195 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte && 5196 btrfs_file_extent_offset(leaf, ei) == data_offset) 5197 ret = send_clone(sctx, offset, clone_len, clone_root); 5198 else 5199 ret = send_extent_data(sctx, offset, clone_len); 5200 5201 if (ret < 0) 5202 goto out; 5203 5204 len -= clone_len; 5205 if (len == 0) 5206 break; 5207 offset += clone_len; 5208 clone_root->offset += clone_len; 5209 data_offset += clone_len; 5210 next: 5211 path->slots[0]++; 5212 } 5213 5214 if (len > 0) 5215 ret = send_extent_data(sctx, offset, len); 5216 else 5217 ret = 0; 5218 out: 5219 btrfs_free_path(path); 5220 return ret; 5221 } 5222 5223 static int send_write_or_clone(struct send_ctx *sctx, 5224 struct btrfs_path *path, 5225 struct btrfs_key *key, 5226 struct clone_root *clone_root) 5227 { 5228 int ret = 0; 5229 struct btrfs_file_extent_item *ei; 5230 u64 offset = key->offset; 5231 u64 len; 5232 u8 type; 5233 u64 bs = sctx->send_root->fs_info->sb->s_blocksize; 5234 5235 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 5236 struct btrfs_file_extent_item); 5237 type = btrfs_file_extent_type(path->nodes[0], ei); 5238 if (type == BTRFS_FILE_EXTENT_INLINE) { 5239 len = btrfs_file_extent_inline_len(path->nodes[0], 5240 path->slots[0], ei); 5241 /* 5242 * it is possible the inline item won't cover the whole page, 5243 * but there may be items after this page. Make 5244 * sure to send the whole thing 5245 */ 5246 len = PAGE_ALIGN(len); 5247 } else { 5248 len = btrfs_file_extent_num_bytes(path->nodes[0], ei); 5249 } 5250 5251 if (offset >= sctx->cur_inode_size) { 5252 ret = 0; 5253 goto out; 5254 } 5255 if (offset + len > sctx->cur_inode_size) 5256 len = sctx->cur_inode_size - offset; 5257 if (len == 0) { 5258 ret = 0; 5259 goto out; 5260 } 5261 5262 if (clone_root && IS_ALIGNED(offset + len, bs)) { 5263 u64 disk_byte; 5264 u64 data_offset; 5265 5266 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); 5267 data_offset = btrfs_file_extent_offset(path->nodes[0], ei); 5268 ret = clone_range(sctx, clone_root, disk_byte, data_offset, 5269 offset, len); 5270 } else { 5271 ret = send_extent_data(sctx, offset, len); 5272 } 5273 sctx->cur_inode_next_write_offset = offset + len; 5274 out: 5275 return ret; 5276 } 5277 5278 static int is_extent_unchanged(struct send_ctx *sctx, 5279 struct btrfs_path *left_path, 5280 struct btrfs_key *ekey) 5281 { 5282 int ret = 0; 5283 struct btrfs_key key; 5284 struct btrfs_path *path = NULL; 5285 struct extent_buffer *eb; 5286 int slot; 5287 struct btrfs_key found_key; 5288 struct btrfs_file_extent_item *ei; 5289 u64 left_disknr; 5290 u64 right_disknr; 5291 u64 left_offset; 5292 u64 right_offset; 5293 u64 left_offset_fixed; 5294 u64 left_len; 5295 u64 right_len; 5296 u64 left_gen; 5297 u64 right_gen; 5298 u8 left_type; 5299 u8 right_type; 5300 5301 path = alloc_path_for_send(); 5302 if (!path) 5303 return -ENOMEM; 5304 5305 eb = left_path->nodes[0]; 5306 slot = left_path->slots[0]; 5307 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 5308 left_type = btrfs_file_extent_type(eb, ei); 5309 5310 if (left_type != BTRFS_FILE_EXTENT_REG) { 5311 ret = 0; 5312 goto out; 5313 } 5314 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5315 left_len = btrfs_file_extent_num_bytes(eb, ei); 5316 left_offset = btrfs_file_extent_offset(eb, ei); 5317 left_gen = btrfs_file_extent_generation(eb, ei); 5318 5319 /* 5320 * Following comments will refer to these graphics. L is the left 5321 * extents which we are checking at the moment. 1-8 are the right 5322 * extents that we iterate. 5323 * 5324 * |-----L-----| 5325 * |-1-|-2a-|-3-|-4-|-5-|-6-| 5326 * 5327 * |-----L-----| 5328 * |--1--|-2b-|...(same as above) 5329 * 5330 * Alternative situation. Happens on files where extents got split. 5331 * |-----L-----| 5332 * |-----------7-----------|-6-| 5333 * 5334 * Alternative situation. Happens on files which got larger. 5335 * |-----L-----| 5336 * |-8-| 5337 * Nothing follows after 8. 5338 */ 5339 5340 key.objectid = ekey->objectid; 5341 key.type = BTRFS_EXTENT_DATA_KEY; 5342 key.offset = ekey->offset; 5343 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); 5344 if (ret < 0) 5345 goto out; 5346 if (ret) { 5347 ret = 0; 5348 goto out; 5349 } 5350 5351 /* 5352 * Handle special case where the right side has no extents at all. 5353 */ 5354 eb = path->nodes[0]; 5355 slot = path->slots[0]; 5356 btrfs_item_key_to_cpu(eb, &found_key, slot); 5357 if (found_key.objectid != key.objectid || 5358 found_key.type != key.type) { 5359 /* If we're a hole then just pretend nothing changed */ 5360 ret = (left_disknr) ? 0 : 1; 5361 goto out; 5362 } 5363 5364 /* 5365 * We're now on 2a, 2b or 7. 5366 */ 5367 key = found_key; 5368 while (key.offset < ekey->offset + left_len) { 5369 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 5370 right_type = btrfs_file_extent_type(eb, ei); 5371 if (right_type != BTRFS_FILE_EXTENT_REG && 5372 right_type != BTRFS_FILE_EXTENT_INLINE) { 5373 ret = 0; 5374 goto out; 5375 } 5376 5377 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 5378 right_len = btrfs_file_extent_inline_len(eb, slot, ei); 5379 right_len = PAGE_ALIGN(right_len); 5380 } else { 5381 right_len = btrfs_file_extent_num_bytes(eb, ei); 5382 } 5383 5384 /* 5385 * Are we at extent 8? If yes, we know the extent is changed. 5386 * This may only happen on the first iteration. 5387 */ 5388 if (found_key.offset + right_len <= ekey->offset) { 5389 /* If we're a hole just pretend nothing changed */ 5390 ret = (left_disknr) ? 0 : 1; 5391 goto out; 5392 } 5393 5394 /* 5395 * We just wanted to see if when we have an inline extent, what 5396 * follows it is a regular extent (wanted to check the above 5397 * condition for inline extents too). This should normally not 5398 * happen but it's possible for example when we have an inline 5399 * compressed extent representing data with a size matching 5400 * the page size (currently the same as sector size). 5401 */ 5402 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 5403 ret = 0; 5404 goto out; 5405 } 5406 5407 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5408 right_offset = btrfs_file_extent_offset(eb, ei); 5409 right_gen = btrfs_file_extent_generation(eb, ei); 5410 5411 left_offset_fixed = left_offset; 5412 if (key.offset < ekey->offset) { 5413 /* Fix the right offset for 2a and 7. */ 5414 right_offset += ekey->offset - key.offset; 5415 } else { 5416 /* Fix the left offset for all behind 2a and 2b */ 5417 left_offset_fixed += key.offset - ekey->offset; 5418 } 5419 5420 /* 5421 * Check if we have the same extent. 5422 */ 5423 if (left_disknr != right_disknr || 5424 left_offset_fixed != right_offset || 5425 left_gen != right_gen) { 5426 ret = 0; 5427 goto out; 5428 } 5429 5430 /* 5431 * Go to the next extent. 5432 */ 5433 ret = btrfs_next_item(sctx->parent_root, path); 5434 if (ret < 0) 5435 goto out; 5436 if (!ret) { 5437 eb = path->nodes[0]; 5438 slot = path->slots[0]; 5439 btrfs_item_key_to_cpu(eb, &found_key, slot); 5440 } 5441 if (ret || found_key.objectid != key.objectid || 5442 found_key.type != key.type) { 5443 key.offset += right_len; 5444 break; 5445 } 5446 if (found_key.offset != key.offset + right_len) { 5447 ret = 0; 5448 goto out; 5449 } 5450 key = found_key; 5451 } 5452 5453 /* 5454 * We're now behind the left extent (treat as unchanged) or at the end 5455 * of the right side (treat as changed). 5456 */ 5457 if (key.offset >= ekey->offset + left_len) 5458 ret = 1; 5459 else 5460 ret = 0; 5461 5462 5463 out: 5464 btrfs_free_path(path); 5465 return ret; 5466 } 5467 5468 static int get_last_extent(struct send_ctx *sctx, u64 offset) 5469 { 5470 struct btrfs_path *path; 5471 struct btrfs_root *root = sctx->send_root; 5472 struct btrfs_file_extent_item *fi; 5473 struct btrfs_key key; 5474 u64 extent_end; 5475 u8 type; 5476 int ret; 5477 5478 path = alloc_path_for_send(); 5479 if (!path) 5480 return -ENOMEM; 5481 5482 sctx->cur_inode_last_extent = 0; 5483 5484 key.objectid = sctx->cur_ino; 5485 key.type = BTRFS_EXTENT_DATA_KEY; 5486 key.offset = offset; 5487 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1); 5488 if (ret < 0) 5489 goto out; 5490 ret = 0; 5491 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 5492 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY) 5493 goto out; 5494 5495 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 5496 struct btrfs_file_extent_item); 5497 type = btrfs_file_extent_type(path->nodes[0], fi); 5498 if (type == BTRFS_FILE_EXTENT_INLINE) { 5499 u64 size = btrfs_file_extent_inline_len(path->nodes[0], 5500 path->slots[0], fi); 5501 extent_end = ALIGN(key.offset + size, 5502 sctx->send_root->fs_info->sectorsize); 5503 } else { 5504 extent_end = key.offset + 5505 btrfs_file_extent_num_bytes(path->nodes[0], fi); 5506 } 5507 sctx->cur_inode_last_extent = extent_end; 5508 out: 5509 btrfs_free_path(path); 5510 return ret; 5511 } 5512 5513 static int range_is_hole_in_parent(struct send_ctx *sctx, 5514 const u64 start, 5515 const u64 end) 5516 { 5517 struct btrfs_path *path; 5518 struct btrfs_key key; 5519 struct btrfs_root *root = sctx->parent_root; 5520 u64 search_start = start; 5521 int ret; 5522 5523 path = alloc_path_for_send(); 5524 if (!path) 5525 return -ENOMEM; 5526 5527 key.objectid = sctx->cur_ino; 5528 key.type = BTRFS_EXTENT_DATA_KEY; 5529 key.offset = search_start; 5530 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5531 if (ret < 0) 5532 goto out; 5533 if (ret > 0 && path->slots[0] > 0) 5534 path->slots[0]--; 5535 5536 while (search_start < end) { 5537 struct extent_buffer *leaf = path->nodes[0]; 5538 int slot = path->slots[0]; 5539 struct btrfs_file_extent_item *fi; 5540 u64 extent_end; 5541 5542 if (slot >= btrfs_header_nritems(leaf)) { 5543 ret = btrfs_next_leaf(root, path); 5544 if (ret < 0) 5545 goto out; 5546 else if (ret > 0) 5547 break; 5548 continue; 5549 } 5550 5551 btrfs_item_key_to_cpu(leaf, &key, slot); 5552 if (key.objectid < sctx->cur_ino || 5553 key.type < BTRFS_EXTENT_DATA_KEY) 5554 goto next; 5555 if (key.objectid > sctx->cur_ino || 5556 key.type > BTRFS_EXTENT_DATA_KEY || 5557 key.offset >= end) 5558 break; 5559 5560 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5561 if (btrfs_file_extent_type(leaf, fi) == 5562 BTRFS_FILE_EXTENT_INLINE) { 5563 u64 size = btrfs_file_extent_inline_len(leaf, slot, fi); 5564 5565 extent_end = ALIGN(key.offset + size, 5566 root->fs_info->sectorsize); 5567 } else { 5568 extent_end = key.offset + 5569 btrfs_file_extent_num_bytes(leaf, fi); 5570 } 5571 if (extent_end <= start) 5572 goto next; 5573 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) { 5574 search_start = extent_end; 5575 goto next; 5576 } 5577 ret = 0; 5578 goto out; 5579 next: 5580 path->slots[0]++; 5581 } 5582 ret = 1; 5583 out: 5584 btrfs_free_path(path); 5585 return ret; 5586 } 5587 5588 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, 5589 struct btrfs_key *key) 5590 { 5591 struct btrfs_file_extent_item *fi; 5592 u64 extent_end; 5593 u8 type; 5594 int ret = 0; 5595 5596 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx)) 5597 return 0; 5598 5599 if (sctx->cur_inode_last_extent == (u64)-1) { 5600 ret = get_last_extent(sctx, key->offset - 1); 5601 if (ret) 5602 return ret; 5603 } 5604 5605 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 5606 struct btrfs_file_extent_item); 5607 type = btrfs_file_extent_type(path->nodes[0], fi); 5608 if (type == BTRFS_FILE_EXTENT_INLINE) { 5609 u64 size = btrfs_file_extent_inline_len(path->nodes[0], 5610 path->slots[0], fi); 5611 extent_end = ALIGN(key->offset + size, 5612 sctx->send_root->fs_info->sectorsize); 5613 } else { 5614 extent_end = key->offset + 5615 btrfs_file_extent_num_bytes(path->nodes[0], fi); 5616 } 5617 5618 if (path->slots[0] == 0 && 5619 sctx->cur_inode_last_extent < key->offset) { 5620 /* 5621 * We might have skipped entire leafs that contained only 5622 * file extent items for our current inode. These leafs have 5623 * a generation number smaller (older) than the one in the 5624 * current leaf and the leaf our last extent came from, and 5625 * are located between these 2 leafs. 5626 */ 5627 ret = get_last_extent(sctx, key->offset - 1); 5628 if (ret) 5629 return ret; 5630 } 5631 5632 if (sctx->cur_inode_last_extent < key->offset) { 5633 ret = range_is_hole_in_parent(sctx, 5634 sctx->cur_inode_last_extent, 5635 key->offset); 5636 if (ret < 0) 5637 return ret; 5638 else if (ret == 0) 5639 ret = send_hole(sctx, key->offset); 5640 else 5641 ret = 0; 5642 } 5643 sctx->cur_inode_last_extent = extent_end; 5644 return ret; 5645 } 5646 5647 static int process_extent(struct send_ctx *sctx, 5648 struct btrfs_path *path, 5649 struct btrfs_key *key) 5650 { 5651 struct clone_root *found_clone = NULL; 5652 int ret = 0; 5653 5654 if (S_ISLNK(sctx->cur_inode_mode)) 5655 return 0; 5656 5657 if (sctx->parent_root && !sctx->cur_inode_new) { 5658 ret = is_extent_unchanged(sctx, path, key); 5659 if (ret < 0) 5660 goto out; 5661 if (ret) { 5662 ret = 0; 5663 goto out_hole; 5664 } 5665 } else { 5666 struct btrfs_file_extent_item *ei; 5667 u8 type; 5668 5669 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 5670 struct btrfs_file_extent_item); 5671 type = btrfs_file_extent_type(path->nodes[0], ei); 5672 if (type == BTRFS_FILE_EXTENT_PREALLOC || 5673 type == BTRFS_FILE_EXTENT_REG) { 5674 /* 5675 * The send spec does not have a prealloc command yet, 5676 * so just leave a hole for prealloc'ed extents until 5677 * we have enough commands queued up to justify rev'ing 5678 * the send spec. 5679 */ 5680 if (type == BTRFS_FILE_EXTENT_PREALLOC) { 5681 ret = 0; 5682 goto out; 5683 } 5684 5685 /* Have a hole, just skip it. */ 5686 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) { 5687 ret = 0; 5688 goto out; 5689 } 5690 } 5691 } 5692 5693 ret = find_extent_clone(sctx, path, key->objectid, key->offset, 5694 sctx->cur_inode_size, &found_clone); 5695 if (ret != -ENOENT && ret < 0) 5696 goto out; 5697 5698 ret = send_write_or_clone(sctx, path, key, found_clone); 5699 if (ret) 5700 goto out; 5701 out_hole: 5702 ret = maybe_send_hole(sctx, path, key); 5703 out: 5704 return ret; 5705 } 5706 5707 static int process_all_extents(struct send_ctx *sctx) 5708 { 5709 int ret; 5710 struct btrfs_root *root; 5711 struct btrfs_path *path; 5712 struct btrfs_key key; 5713 struct btrfs_key found_key; 5714 struct extent_buffer *eb; 5715 int slot; 5716 5717 root = sctx->send_root; 5718 path = alloc_path_for_send(); 5719 if (!path) 5720 return -ENOMEM; 5721 5722 key.objectid = sctx->cmp_key->objectid; 5723 key.type = BTRFS_EXTENT_DATA_KEY; 5724 key.offset = 0; 5725 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5726 if (ret < 0) 5727 goto out; 5728 5729 while (1) { 5730 eb = path->nodes[0]; 5731 slot = path->slots[0]; 5732 5733 if (slot >= btrfs_header_nritems(eb)) { 5734 ret = btrfs_next_leaf(root, path); 5735 if (ret < 0) { 5736 goto out; 5737 } else if (ret > 0) { 5738 ret = 0; 5739 break; 5740 } 5741 continue; 5742 } 5743 5744 btrfs_item_key_to_cpu(eb, &found_key, slot); 5745 5746 if (found_key.objectid != key.objectid || 5747 found_key.type != key.type) { 5748 ret = 0; 5749 goto out; 5750 } 5751 5752 ret = process_extent(sctx, path, &found_key); 5753 if (ret < 0) 5754 goto out; 5755 5756 path->slots[0]++; 5757 } 5758 5759 out: 5760 btrfs_free_path(path); 5761 return ret; 5762 } 5763 5764 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end, 5765 int *pending_move, 5766 int *refs_processed) 5767 { 5768 int ret = 0; 5769 5770 if (sctx->cur_ino == 0) 5771 goto out; 5772 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && 5773 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY) 5774 goto out; 5775 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) 5776 goto out; 5777 5778 ret = process_recorded_refs(sctx, pending_move); 5779 if (ret < 0) 5780 goto out; 5781 5782 *refs_processed = 1; 5783 out: 5784 return ret; 5785 } 5786 5787 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) 5788 { 5789 int ret = 0; 5790 u64 left_mode; 5791 u64 left_uid; 5792 u64 left_gid; 5793 u64 right_mode; 5794 u64 right_uid; 5795 u64 right_gid; 5796 int need_chmod = 0; 5797 int need_chown = 0; 5798 int need_truncate = 1; 5799 int pending_move = 0; 5800 int refs_processed = 0; 5801 5802 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move, 5803 &refs_processed); 5804 if (ret < 0) 5805 goto out; 5806 5807 /* 5808 * We have processed the refs and thus need to advance send_progress. 5809 * Now, calls to get_cur_xxx will take the updated refs of the current 5810 * inode into account. 5811 * 5812 * On the other hand, if our current inode is a directory and couldn't 5813 * be moved/renamed because its parent was renamed/moved too and it has 5814 * a higher inode number, we can only move/rename our current inode 5815 * after we moved/renamed its parent. Therefore in this case operate on 5816 * the old path (pre move/rename) of our current inode, and the 5817 * move/rename will be performed later. 5818 */ 5819 if (refs_processed && !pending_move) 5820 sctx->send_progress = sctx->cur_ino + 1; 5821 5822 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted) 5823 goto out; 5824 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino) 5825 goto out; 5826 5827 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL, 5828 &left_mode, &left_uid, &left_gid, NULL); 5829 if (ret < 0) 5830 goto out; 5831 5832 if (!sctx->parent_root || sctx->cur_inode_new) { 5833 need_chown = 1; 5834 if (!S_ISLNK(sctx->cur_inode_mode)) 5835 need_chmod = 1; 5836 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size) 5837 need_truncate = 0; 5838 } else { 5839 u64 old_size; 5840 5841 ret = get_inode_info(sctx->parent_root, sctx->cur_ino, 5842 &old_size, NULL, &right_mode, &right_uid, 5843 &right_gid, NULL); 5844 if (ret < 0) 5845 goto out; 5846 5847 if (left_uid != right_uid || left_gid != right_gid) 5848 need_chown = 1; 5849 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode) 5850 need_chmod = 1; 5851 if ((old_size == sctx->cur_inode_size) || 5852 (sctx->cur_inode_size > old_size && 5853 sctx->cur_inode_next_write_offset == sctx->cur_inode_size)) 5854 need_truncate = 0; 5855 } 5856 5857 if (S_ISREG(sctx->cur_inode_mode)) { 5858 if (need_send_hole(sctx)) { 5859 if (sctx->cur_inode_last_extent == (u64)-1 || 5860 sctx->cur_inode_last_extent < 5861 sctx->cur_inode_size) { 5862 ret = get_last_extent(sctx, (u64)-1); 5863 if (ret) 5864 goto out; 5865 } 5866 if (sctx->cur_inode_last_extent < 5867 sctx->cur_inode_size) { 5868 ret = send_hole(sctx, sctx->cur_inode_size); 5869 if (ret) 5870 goto out; 5871 } 5872 } 5873 if (need_truncate) { 5874 ret = send_truncate(sctx, sctx->cur_ino, 5875 sctx->cur_inode_gen, 5876 sctx->cur_inode_size); 5877 if (ret < 0) 5878 goto out; 5879 } 5880 } 5881 5882 if (need_chown) { 5883 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen, 5884 left_uid, left_gid); 5885 if (ret < 0) 5886 goto out; 5887 } 5888 if (need_chmod) { 5889 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen, 5890 left_mode); 5891 if (ret < 0) 5892 goto out; 5893 } 5894 5895 /* 5896 * If other directory inodes depended on our current directory 5897 * inode's move/rename, now do their move/rename operations. 5898 */ 5899 if (!is_waiting_for_move(sctx, sctx->cur_ino)) { 5900 ret = apply_children_dir_moves(sctx); 5901 if (ret) 5902 goto out; 5903 /* 5904 * Need to send that every time, no matter if it actually 5905 * changed between the two trees as we have done changes to 5906 * the inode before. If our inode is a directory and it's 5907 * waiting to be moved/renamed, we will send its utimes when 5908 * it's moved/renamed, therefore we don't need to do it here. 5909 */ 5910 sctx->send_progress = sctx->cur_ino + 1; 5911 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); 5912 if (ret < 0) 5913 goto out; 5914 } 5915 5916 out: 5917 return ret; 5918 } 5919 5920 static int changed_inode(struct send_ctx *sctx, 5921 enum btrfs_compare_tree_result result) 5922 { 5923 int ret = 0; 5924 struct btrfs_key *key = sctx->cmp_key; 5925 struct btrfs_inode_item *left_ii = NULL; 5926 struct btrfs_inode_item *right_ii = NULL; 5927 u64 left_gen = 0; 5928 u64 right_gen = 0; 5929 5930 sctx->cur_ino = key->objectid; 5931 sctx->cur_inode_new_gen = 0; 5932 sctx->cur_inode_last_extent = (u64)-1; 5933 sctx->cur_inode_next_write_offset = 0; 5934 5935 /* 5936 * Set send_progress to current inode. This will tell all get_cur_xxx 5937 * functions that the current inode's refs are not updated yet. Later, 5938 * when process_recorded_refs is finished, it is set to cur_ino + 1. 5939 */ 5940 sctx->send_progress = sctx->cur_ino; 5941 5942 if (result == BTRFS_COMPARE_TREE_NEW || 5943 result == BTRFS_COMPARE_TREE_CHANGED) { 5944 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0], 5945 sctx->left_path->slots[0], 5946 struct btrfs_inode_item); 5947 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0], 5948 left_ii); 5949 } else { 5950 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], 5951 sctx->right_path->slots[0], 5952 struct btrfs_inode_item); 5953 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], 5954 right_ii); 5955 } 5956 if (result == BTRFS_COMPARE_TREE_CHANGED) { 5957 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], 5958 sctx->right_path->slots[0], 5959 struct btrfs_inode_item); 5960 5961 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], 5962 right_ii); 5963 5964 /* 5965 * The cur_ino = root dir case is special here. We can't treat 5966 * the inode as deleted+reused because it would generate a 5967 * stream that tries to delete/mkdir the root dir. 5968 */ 5969 if (left_gen != right_gen && 5970 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) 5971 sctx->cur_inode_new_gen = 1; 5972 } 5973 5974 if (result == BTRFS_COMPARE_TREE_NEW) { 5975 sctx->cur_inode_gen = left_gen; 5976 sctx->cur_inode_new = 1; 5977 sctx->cur_inode_deleted = 0; 5978 sctx->cur_inode_size = btrfs_inode_size( 5979 sctx->left_path->nodes[0], left_ii); 5980 sctx->cur_inode_mode = btrfs_inode_mode( 5981 sctx->left_path->nodes[0], left_ii); 5982 sctx->cur_inode_rdev = btrfs_inode_rdev( 5983 sctx->left_path->nodes[0], left_ii); 5984 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) 5985 ret = send_create_inode_if_needed(sctx); 5986 } else if (result == BTRFS_COMPARE_TREE_DELETED) { 5987 sctx->cur_inode_gen = right_gen; 5988 sctx->cur_inode_new = 0; 5989 sctx->cur_inode_deleted = 1; 5990 sctx->cur_inode_size = btrfs_inode_size( 5991 sctx->right_path->nodes[0], right_ii); 5992 sctx->cur_inode_mode = btrfs_inode_mode( 5993 sctx->right_path->nodes[0], right_ii); 5994 } else if (result == BTRFS_COMPARE_TREE_CHANGED) { 5995 /* 5996 * We need to do some special handling in case the inode was 5997 * reported as changed with a changed generation number. This 5998 * means that the original inode was deleted and new inode 5999 * reused the same inum. So we have to treat the old inode as 6000 * deleted and the new one as new. 6001 */ 6002 if (sctx->cur_inode_new_gen) { 6003 /* 6004 * First, process the inode as if it was deleted. 6005 */ 6006 sctx->cur_inode_gen = right_gen; 6007 sctx->cur_inode_new = 0; 6008 sctx->cur_inode_deleted = 1; 6009 sctx->cur_inode_size = btrfs_inode_size( 6010 sctx->right_path->nodes[0], right_ii); 6011 sctx->cur_inode_mode = btrfs_inode_mode( 6012 sctx->right_path->nodes[0], right_ii); 6013 ret = process_all_refs(sctx, 6014 BTRFS_COMPARE_TREE_DELETED); 6015 if (ret < 0) 6016 goto out; 6017 6018 /* 6019 * Now process the inode as if it was new. 6020 */ 6021 sctx->cur_inode_gen = left_gen; 6022 sctx->cur_inode_new = 1; 6023 sctx->cur_inode_deleted = 0; 6024 sctx->cur_inode_size = btrfs_inode_size( 6025 sctx->left_path->nodes[0], left_ii); 6026 sctx->cur_inode_mode = btrfs_inode_mode( 6027 sctx->left_path->nodes[0], left_ii); 6028 sctx->cur_inode_rdev = btrfs_inode_rdev( 6029 sctx->left_path->nodes[0], left_ii); 6030 ret = send_create_inode_if_needed(sctx); 6031 if (ret < 0) 6032 goto out; 6033 6034 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); 6035 if (ret < 0) 6036 goto out; 6037 /* 6038 * Advance send_progress now as we did not get into 6039 * process_recorded_refs_if_needed in the new_gen case. 6040 */ 6041 sctx->send_progress = sctx->cur_ino + 1; 6042 6043 /* 6044 * Now process all extents and xattrs of the inode as if 6045 * they were all new. 6046 */ 6047 ret = process_all_extents(sctx); 6048 if (ret < 0) 6049 goto out; 6050 ret = process_all_new_xattrs(sctx); 6051 if (ret < 0) 6052 goto out; 6053 } else { 6054 sctx->cur_inode_gen = left_gen; 6055 sctx->cur_inode_new = 0; 6056 sctx->cur_inode_new_gen = 0; 6057 sctx->cur_inode_deleted = 0; 6058 sctx->cur_inode_size = btrfs_inode_size( 6059 sctx->left_path->nodes[0], left_ii); 6060 sctx->cur_inode_mode = btrfs_inode_mode( 6061 sctx->left_path->nodes[0], left_ii); 6062 } 6063 } 6064 6065 out: 6066 return ret; 6067 } 6068 6069 /* 6070 * We have to process new refs before deleted refs, but compare_trees gives us 6071 * the new and deleted refs mixed. To fix this, we record the new/deleted refs 6072 * first and later process them in process_recorded_refs. 6073 * For the cur_inode_new_gen case, we skip recording completely because 6074 * changed_inode did already initiate processing of refs. The reason for this is 6075 * that in this case, compare_tree actually compares the refs of 2 different 6076 * inodes. To fix this, process_all_refs is used in changed_inode to handle all 6077 * refs of the right tree as deleted and all refs of the left tree as new. 6078 */ 6079 static int changed_ref(struct send_ctx *sctx, 6080 enum btrfs_compare_tree_result result) 6081 { 6082 int ret = 0; 6083 6084 if (sctx->cur_ino != sctx->cmp_key->objectid) { 6085 inconsistent_snapshot_error(sctx, result, "reference"); 6086 return -EIO; 6087 } 6088 6089 if (!sctx->cur_inode_new_gen && 6090 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { 6091 if (result == BTRFS_COMPARE_TREE_NEW) 6092 ret = record_new_ref(sctx); 6093 else if (result == BTRFS_COMPARE_TREE_DELETED) 6094 ret = record_deleted_ref(sctx); 6095 else if (result == BTRFS_COMPARE_TREE_CHANGED) 6096 ret = record_changed_ref(sctx); 6097 } 6098 6099 return ret; 6100 } 6101 6102 /* 6103 * Process new/deleted/changed xattrs. We skip processing in the 6104 * cur_inode_new_gen case because changed_inode did already initiate processing 6105 * of xattrs. The reason is the same as in changed_ref 6106 */ 6107 static int changed_xattr(struct send_ctx *sctx, 6108 enum btrfs_compare_tree_result result) 6109 { 6110 int ret = 0; 6111 6112 if (sctx->cur_ino != sctx->cmp_key->objectid) { 6113 inconsistent_snapshot_error(sctx, result, "xattr"); 6114 return -EIO; 6115 } 6116 6117 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 6118 if (result == BTRFS_COMPARE_TREE_NEW) 6119 ret = process_new_xattr(sctx); 6120 else if (result == BTRFS_COMPARE_TREE_DELETED) 6121 ret = process_deleted_xattr(sctx); 6122 else if (result == BTRFS_COMPARE_TREE_CHANGED) 6123 ret = process_changed_xattr(sctx); 6124 } 6125 6126 return ret; 6127 } 6128 6129 /* 6130 * Process new/deleted/changed extents. We skip processing in the 6131 * cur_inode_new_gen case because changed_inode did already initiate processing 6132 * of extents. The reason is the same as in changed_ref 6133 */ 6134 static int changed_extent(struct send_ctx *sctx, 6135 enum btrfs_compare_tree_result result) 6136 { 6137 int ret = 0; 6138 6139 if (sctx->cur_ino != sctx->cmp_key->objectid) { 6140 6141 if (result == BTRFS_COMPARE_TREE_CHANGED) { 6142 struct extent_buffer *leaf_l; 6143 struct extent_buffer *leaf_r; 6144 struct btrfs_file_extent_item *ei_l; 6145 struct btrfs_file_extent_item *ei_r; 6146 6147 leaf_l = sctx->left_path->nodes[0]; 6148 leaf_r = sctx->right_path->nodes[0]; 6149 ei_l = btrfs_item_ptr(leaf_l, 6150 sctx->left_path->slots[0], 6151 struct btrfs_file_extent_item); 6152 ei_r = btrfs_item_ptr(leaf_r, 6153 sctx->right_path->slots[0], 6154 struct btrfs_file_extent_item); 6155 6156 /* 6157 * We may have found an extent item that has changed 6158 * only its disk_bytenr field and the corresponding 6159 * inode item was not updated. This case happens due to 6160 * very specific timings during relocation when a leaf 6161 * that contains file extent items is COWed while 6162 * relocation is ongoing and its in the stage where it 6163 * updates data pointers. So when this happens we can 6164 * safely ignore it since we know it's the same extent, 6165 * but just at different logical and physical locations 6166 * (when an extent is fully replaced with a new one, we 6167 * know the generation number must have changed too, 6168 * since snapshot creation implies committing the current 6169 * transaction, and the inode item must have been updated 6170 * as well). 6171 * This replacement of the disk_bytenr happens at 6172 * relocation.c:replace_file_extents() through 6173 * relocation.c:btrfs_reloc_cow_block(). 6174 */ 6175 if (btrfs_file_extent_generation(leaf_l, ei_l) == 6176 btrfs_file_extent_generation(leaf_r, ei_r) && 6177 btrfs_file_extent_ram_bytes(leaf_l, ei_l) == 6178 btrfs_file_extent_ram_bytes(leaf_r, ei_r) && 6179 btrfs_file_extent_compression(leaf_l, ei_l) == 6180 btrfs_file_extent_compression(leaf_r, ei_r) && 6181 btrfs_file_extent_encryption(leaf_l, ei_l) == 6182 btrfs_file_extent_encryption(leaf_r, ei_r) && 6183 btrfs_file_extent_other_encoding(leaf_l, ei_l) == 6184 btrfs_file_extent_other_encoding(leaf_r, ei_r) && 6185 btrfs_file_extent_type(leaf_l, ei_l) == 6186 btrfs_file_extent_type(leaf_r, ei_r) && 6187 btrfs_file_extent_disk_bytenr(leaf_l, ei_l) != 6188 btrfs_file_extent_disk_bytenr(leaf_r, ei_r) && 6189 btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) == 6190 btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) && 6191 btrfs_file_extent_offset(leaf_l, ei_l) == 6192 btrfs_file_extent_offset(leaf_r, ei_r) && 6193 btrfs_file_extent_num_bytes(leaf_l, ei_l) == 6194 btrfs_file_extent_num_bytes(leaf_r, ei_r)) 6195 return 0; 6196 } 6197 6198 inconsistent_snapshot_error(sctx, result, "extent"); 6199 return -EIO; 6200 } 6201 6202 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 6203 if (result != BTRFS_COMPARE_TREE_DELETED) 6204 ret = process_extent(sctx, sctx->left_path, 6205 sctx->cmp_key); 6206 } 6207 6208 return ret; 6209 } 6210 6211 static int dir_changed(struct send_ctx *sctx, u64 dir) 6212 { 6213 u64 orig_gen, new_gen; 6214 int ret; 6215 6216 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL, 6217 NULL, NULL); 6218 if (ret) 6219 return ret; 6220 6221 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL, 6222 NULL, NULL, NULL); 6223 if (ret) 6224 return ret; 6225 6226 return (orig_gen != new_gen) ? 1 : 0; 6227 } 6228 6229 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path, 6230 struct btrfs_key *key) 6231 { 6232 struct btrfs_inode_extref *extref; 6233 struct extent_buffer *leaf; 6234 u64 dirid = 0, last_dirid = 0; 6235 unsigned long ptr; 6236 u32 item_size; 6237 u32 cur_offset = 0; 6238 int ref_name_len; 6239 int ret = 0; 6240 6241 /* Easy case, just check this one dirid */ 6242 if (key->type == BTRFS_INODE_REF_KEY) { 6243 dirid = key->offset; 6244 6245 ret = dir_changed(sctx, dirid); 6246 goto out; 6247 } 6248 6249 leaf = path->nodes[0]; 6250 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 6251 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 6252 while (cur_offset < item_size) { 6253 extref = (struct btrfs_inode_extref *)(ptr + 6254 cur_offset); 6255 dirid = btrfs_inode_extref_parent(leaf, extref); 6256 ref_name_len = btrfs_inode_extref_name_len(leaf, extref); 6257 cur_offset += ref_name_len + sizeof(*extref); 6258 if (dirid == last_dirid) 6259 continue; 6260 ret = dir_changed(sctx, dirid); 6261 if (ret) 6262 break; 6263 last_dirid = dirid; 6264 } 6265 out: 6266 return ret; 6267 } 6268 6269 /* 6270 * Updates compare related fields in sctx and simply forwards to the actual 6271 * changed_xxx functions. 6272 */ 6273 static int changed_cb(struct btrfs_path *left_path, 6274 struct btrfs_path *right_path, 6275 struct btrfs_key *key, 6276 enum btrfs_compare_tree_result result, 6277 void *ctx) 6278 { 6279 int ret = 0; 6280 struct send_ctx *sctx = ctx; 6281 6282 if (result == BTRFS_COMPARE_TREE_SAME) { 6283 if (key->type == BTRFS_INODE_REF_KEY || 6284 key->type == BTRFS_INODE_EXTREF_KEY) { 6285 ret = compare_refs(sctx, left_path, key); 6286 if (!ret) 6287 return 0; 6288 if (ret < 0) 6289 return ret; 6290 } else if (key->type == BTRFS_EXTENT_DATA_KEY) { 6291 return maybe_send_hole(sctx, left_path, key); 6292 } else { 6293 return 0; 6294 } 6295 result = BTRFS_COMPARE_TREE_CHANGED; 6296 ret = 0; 6297 } 6298 6299 sctx->left_path = left_path; 6300 sctx->right_path = right_path; 6301 sctx->cmp_key = key; 6302 6303 ret = finish_inode_if_needed(sctx, 0); 6304 if (ret < 0) 6305 goto out; 6306 6307 /* Ignore non-FS objects */ 6308 if (key->objectid == BTRFS_FREE_INO_OBJECTID || 6309 key->objectid == BTRFS_FREE_SPACE_OBJECTID) 6310 goto out; 6311 6312 if (key->type == BTRFS_INODE_ITEM_KEY) 6313 ret = changed_inode(sctx, result); 6314 else if (key->type == BTRFS_INODE_REF_KEY || 6315 key->type == BTRFS_INODE_EXTREF_KEY) 6316 ret = changed_ref(sctx, result); 6317 else if (key->type == BTRFS_XATTR_ITEM_KEY) 6318 ret = changed_xattr(sctx, result); 6319 else if (key->type == BTRFS_EXTENT_DATA_KEY) 6320 ret = changed_extent(sctx, result); 6321 6322 out: 6323 return ret; 6324 } 6325 6326 static int full_send_tree(struct send_ctx *sctx) 6327 { 6328 int ret; 6329 struct btrfs_root *send_root = sctx->send_root; 6330 struct btrfs_key key; 6331 struct btrfs_key found_key; 6332 struct btrfs_path *path; 6333 struct extent_buffer *eb; 6334 int slot; 6335 6336 path = alloc_path_for_send(); 6337 if (!path) 6338 return -ENOMEM; 6339 6340 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 6341 key.type = BTRFS_INODE_ITEM_KEY; 6342 key.offset = 0; 6343 6344 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); 6345 if (ret < 0) 6346 goto out; 6347 if (ret) 6348 goto out_finish; 6349 6350 while (1) { 6351 eb = path->nodes[0]; 6352 slot = path->slots[0]; 6353 btrfs_item_key_to_cpu(eb, &found_key, slot); 6354 6355 ret = changed_cb(path, NULL, &found_key, 6356 BTRFS_COMPARE_TREE_NEW, sctx); 6357 if (ret < 0) 6358 goto out; 6359 6360 key.objectid = found_key.objectid; 6361 key.type = found_key.type; 6362 key.offset = found_key.offset + 1; 6363 6364 ret = btrfs_next_item(send_root, path); 6365 if (ret < 0) 6366 goto out; 6367 if (ret) { 6368 ret = 0; 6369 break; 6370 } 6371 } 6372 6373 out_finish: 6374 ret = finish_inode_if_needed(sctx, 1); 6375 6376 out: 6377 btrfs_free_path(path); 6378 return ret; 6379 } 6380 6381 static int send_subvol(struct send_ctx *sctx) 6382 { 6383 int ret; 6384 6385 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) { 6386 ret = send_header(sctx); 6387 if (ret < 0) 6388 goto out; 6389 } 6390 6391 ret = send_subvol_begin(sctx); 6392 if (ret < 0) 6393 goto out; 6394 6395 if (sctx->parent_root) { 6396 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, 6397 changed_cb, sctx); 6398 if (ret < 0) 6399 goto out; 6400 ret = finish_inode_if_needed(sctx, 1); 6401 if (ret < 0) 6402 goto out; 6403 } else { 6404 ret = full_send_tree(sctx); 6405 if (ret < 0) 6406 goto out; 6407 } 6408 6409 out: 6410 free_recorded_refs(sctx); 6411 return ret; 6412 } 6413 6414 /* 6415 * If orphan cleanup did remove any orphans from a root, it means the tree 6416 * was modified and therefore the commit root is not the same as the current 6417 * root anymore. This is a problem, because send uses the commit root and 6418 * therefore can see inode items that don't exist in the current root anymore, 6419 * and for example make calls to btrfs_iget, which will do tree lookups based 6420 * on the current root and not on the commit root. Those lookups will fail, 6421 * returning a -ESTALE error, and making send fail with that error. So make 6422 * sure a send does not see any orphans we have just removed, and that it will 6423 * see the same inodes regardless of whether a transaction commit happened 6424 * before it started (meaning that the commit root will be the same as the 6425 * current root) or not. 6426 */ 6427 static int ensure_commit_roots_uptodate(struct send_ctx *sctx) 6428 { 6429 int i; 6430 struct btrfs_trans_handle *trans = NULL; 6431 6432 again: 6433 if (sctx->parent_root && 6434 sctx->parent_root->node != sctx->parent_root->commit_root) 6435 goto commit_trans; 6436 6437 for (i = 0; i < sctx->clone_roots_cnt; i++) 6438 if (sctx->clone_roots[i].root->node != 6439 sctx->clone_roots[i].root->commit_root) 6440 goto commit_trans; 6441 6442 if (trans) 6443 return btrfs_end_transaction(trans); 6444 6445 return 0; 6446 6447 commit_trans: 6448 /* Use any root, all fs roots will get their commit roots updated. */ 6449 if (!trans) { 6450 trans = btrfs_join_transaction(sctx->send_root); 6451 if (IS_ERR(trans)) 6452 return PTR_ERR(trans); 6453 goto again; 6454 } 6455 6456 return btrfs_commit_transaction(trans); 6457 } 6458 6459 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root) 6460 { 6461 spin_lock(&root->root_item_lock); 6462 root->send_in_progress--; 6463 /* 6464 * Not much left to do, we don't know why it's unbalanced and 6465 * can't blindly reset it to 0. 6466 */ 6467 if (root->send_in_progress < 0) 6468 btrfs_err(root->fs_info, 6469 "send_in_progress unbalanced %d root %llu", 6470 root->send_in_progress, root->root_key.objectid); 6471 spin_unlock(&root->root_item_lock); 6472 } 6473 6474 long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) 6475 { 6476 int ret = 0; 6477 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root; 6478 struct btrfs_fs_info *fs_info = send_root->fs_info; 6479 struct btrfs_root *clone_root; 6480 struct btrfs_key key; 6481 struct send_ctx *sctx = NULL; 6482 u32 i; 6483 u64 *clone_sources_tmp = NULL; 6484 int clone_sources_to_rollback = 0; 6485 unsigned alloc_size; 6486 int sort_clone_roots = 0; 6487 int index; 6488 6489 if (!capable(CAP_SYS_ADMIN)) 6490 return -EPERM; 6491 6492 /* 6493 * The subvolume must remain read-only during send, protect against 6494 * making it RW. This also protects against deletion. 6495 */ 6496 spin_lock(&send_root->root_item_lock); 6497 send_root->send_in_progress++; 6498 spin_unlock(&send_root->root_item_lock); 6499 6500 /* 6501 * This is done when we lookup the root, it should already be complete 6502 * by the time we get here. 6503 */ 6504 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE); 6505 6506 /* 6507 * Userspace tools do the checks and warn the user if it's 6508 * not RO. 6509 */ 6510 if (!btrfs_root_readonly(send_root)) { 6511 ret = -EPERM; 6512 goto out; 6513 } 6514 6515 /* 6516 * Check that we don't overflow at later allocations, we request 6517 * clone_sources_count + 1 items, and compare to unsigned long inside 6518 * access_ok. 6519 */ 6520 if (arg->clone_sources_count > 6521 ULONG_MAX / sizeof(struct clone_root) - 1) { 6522 ret = -EINVAL; 6523 goto out; 6524 } 6525 6526 if (!access_ok(VERIFY_READ, arg->clone_sources, 6527 sizeof(*arg->clone_sources) * 6528 arg->clone_sources_count)) { 6529 ret = -EFAULT; 6530 goto out; 6531 } 6532 6533 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) { 6534 ret = -EINVAL; 6535 goto out; 6536 } 6537 6538 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL); 6539 if (!sctx) { 6540 ret = -ENOMEM; 6541 goto out; 6542 } 6543 6544 INIT_LIST_HEAD(&sctx->new_refs); 6545 INIT_LIST_HEAD(&sctx->deleted_refs); 6546 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL); 6547 INIT_LIST_HEAD(&sctx->name_cache_list); 6548 6549 sctx->flags = arg->flags; 6550 6551 sctx->send_filp = fget(arg->send_fd); 6552 if (!sctx->send_filp) { 6553 ret = -EBADF; 6554 goto out; 6555 } 6556 6557 sctx->send_root = send_root; 6558 /* 6559 * Unlikely but possible, if the subvolume is marked for deletion but 6560 * is slow to remove the directory entry, send can still be started 6561 */ 6562 if (btrfs_root_dead(sctx->send_root)) { 6563 ret = -EPERM; 6564 goto out; 6565 } 6566 6567 sctx->clone_roots_cnt = arg->clone_sources_count; 6568 6569 sctx->send_max_size = BTRFS_SEND_BUF_SIZE; 6570 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL); 6571 if (!sctx->send_buf) { 6572 ret = -ENOMEM; 6573 goto out; 6574 } 6575 6576 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL); 6577 if (!sctx->read_buf) { 6578 ret = -ENOMEM; 6579 goto out; 6580 } 6581 6582 sctx->pending_dir_moves = RB_ROOT; 6583 sctx->waiting_dir_moves = RB_ROOT; 6584 sctx->orphan_dirs = RB_ROOT; 6585 6586 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1); 6587 6588 sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL); 6589 if (!sctx->clone_roots) { 6590 ret = -ENOMEM; 6591 goto out; 6592 } 6593 6594 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources); 6595 6596 if (arg->clone_sources_count) { 6597 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL); 6598 if (!clone_sources_tmp) { 6599 ret = -ENOMEM; 6600 goto out; 6601 } 6602 6603 ret = copy_from_user(clone_sources_tmp, arg->clone_sources, 6604 alloc_size); 6605 if (ret) { 6606 ret = -EFAULT; 6607 goto out; 6608 } 6609 6610 for (i = 0; i < arg->clone_sources_count; i++) { 6611 key.objectid = clone_sources_tmp[i]; 6612 key.type = BTRFS_ROOT_ITEM_KEY; 6613 key.offset = (u64)-1; 6614 6615 index = srcu_read_lock(&fs_info->subvol_srcu); 6616 6617 clone_root = btrfs_read_fs_root_no_name(fs_info, &key); 6618 if (IS_ERR(clone_root)) { 6619 srcu_read_unlock(&fs_info->subvol_srcu, index); 6620 ret = PTR_ERR(clone_root); 6621 goto out; 6622 } 6623 spin_lock(&clone_root->root_item_lock); 6624 if (!btrfs_root_readonly(clone_root) || 6625 btrfs_root_dead(clone_root)) { 6626 spin_unlock(&clone_root->root_item_lock); 6627 srcu_read_unlock(&fs_info->subvol_srcu, index); 6628 ret = -EPERM; 6629 goto out; 6630 } 6631 clone_root->send_in_progress++; 6632 spin_unlock(&clone_root->root_item_lock); 6633 srcu_read_unlock(&fs_info->subvol_srcu, index); 6634 6635 sctx->clone_roots[i].root = clone_root; 6636 clone_sources_to_rollback = i + 1; 6637 } 6638 kvfree(clone_sources_tmp); 6639 clone_sources_tmp = NULL; 6640 } 6641 6642 if (arg->parent_root) { 6643 key.objectid = arg->parent_root; 6644 key.type = BTRFS_ROOT_ITEM_KEY; 6645 key.offset = (u64)-1; 6646 6647 index = srcu_read_lock(&fs_info->subvol_srcu); 6648 6649 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key); 6650 if (IS_ERR(sctx->parent_root)) { 6651 srcu_read_unlock(&fs_info->subvol_srcu, index); 6652 ret = PTR_ERR(sctx->parent_root); 6653 goto out; 6654 } 6655 6656 spin_lock(&sctx->parent_root->root_item_lock); 6657 sctx->parent_root->send_in_progress++; 6658 if (!btrfs_root_readonly(sctx->parent_root) || 6659 btrfs_root_dead(sctx->parent_root)) { 6660 spin_unlock(&sctx->parent_root->root_item_lock); 6661 srcu_read_unlock(&fs_info->subvol_srcu, index); 6662 ret = -EPERM; 6663 goto out; 6664 } 6665 spin_unlock(&sctx->parent_root->root_item_lock); 6666 6667 srcu_read_unlock(&fs_info->subvol_srcu, index); 6668 } 6669 6670 /* 6671 * Clones from send_root are allowed, but only if the clone source 6672 * is behind the current send position. This is checked while searching 6673 * for possible clone sources. 6674 */ 6675 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root; 6676 6677 /* We do a bsearch later */ 6678 sort(sctx->clone_roots, sctx->clone_roots_cnt, 6679 sizeof(*sctx->clone_roots), __clone_root_cmp_sort, 6680 NULL); 6681 sort_clone_roots = 1; 6682 6683 ret = ensure_commit_roots_uptodate(sctx); 6684 if (ret) 6685 goto out; 6686 6687 current->journal_info = BTRFS_SEND_TRANS_STUB; 6688 ret = send_subvol(sctx); 6689 current->journal_info = NULL; 6690 if (ret < 0) 6691 goto out; 6692 6693 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) { 6694 ret = begin_cmd(sctx, BTRFS_SEND_C_END); 6695 if (ret < 0) 6696 goto out; 6697 ret = send_cmd(sctx); 6698 if (ret < 0) 6699 goto out; 6700 } 6701 6702 out: 6703 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)); 6704 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) { 6705 struct rb_node *n; 6706 struct pending_dir_move *pm; 6707 6708 n = rb_first(&sctx->pending_dir_moves); 6709 pm = rb_entry(n, struct pending_dir_move, node); 6710 while (!list_empty(&pm->list)) { 6711 struct pending_dir_move *pm2; 6712 6713 pm2 = list_first_entry(&pm->list, 6714 struct pending_dir_move, list); 6715 free_pending_move(sctx, pm2); 6716 } 6717 free_pending_move(sctx, pm); 6718 } 6719 6720 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)); 6721 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) { 6722 struct rb_node *n; 6723 struct waiting_dir_move *dm; 6724 6725 n = rb_first(&sctx->waiting_dir_moves); 6726 dm = rb_entry(n, struct waiting_dir_move, node); 6727 rb_erase(&dm->node, &sctx->waiting_dir_moves); 6728 kfree(dm); 6729 } 6730 6731 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs)); 6732 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) { 6733 struct rb_node *n; 6734 struct orphan_dir_info *odi; 6735 6736 n = rb_first(&sctx->orphan_dirs); 6737 odi = rb_entry(n, struct orphan_dir_info, node); 6738 free_orphan_dir_info(sctx, odi); 6739 } 6740 6741 if (sort_clone_roots) { 6742 for (i = 0; i < sctx->clone_roots_cnt; i++) 6743 btrfs_root_dec_send_in_progress( 6744 sctx->clone_roots[i].root); 6745 } else { 6746 for (i = 0; sctx && i < clone_sources_to_rollback; i++) 6747 btrfs_root_dec_send_in_progress( 6748 sctx->clone_roots[i].root); 6749 6750 btrfs_root_dec_send_in_progress(send_root); 6751 } 6752 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) 6753 btrfs_root_dec_send_in_progress(sctx->parent_root); 6754 6755 kvfree(clone_sources_tmp); 6756 6757 if (sctx) { 6758 if (sctx->send_filp) 6759 fput(sctx->send_filp); 6760 6761 kvfree(sctx->clone_roots); 6762 kvfree(sctx->send_buf); 6763 kvfree(sctx->read_buf); 6764 6765 name_cache_free(sctx); 6766 6767 kfree(sctx); 6768 } 6769 6770 return ret; 6771 } 6772