1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/slab.h> 9 #include <linux/ratelimit.h> 10 #include <linux/kthread.h> 11 #include <linux/semaphore.h> 12 #include <linux/uuid.h> 13 #include <linux/list_sort.h> 14 #include <linux/namei.h> 15 #include "misc.h" 16 #include "ctree.h" 17 #include "disk-io.h" 18 #include "transaction.h" 19 #include "volumes.h" 20 #include "raid56.h" 21 #include "rcu-string.h" 22 #include "dev-replace.h" 23 #include "sysfs.h" 24 #include "tree-checker.h" 25 #include "space-info.h" 26 #include "block-group.h" 27 #include "discard.h" 28 #include "zoned.h" 29 #include "fs.h" 30 #include "accessors.h" 31 #include "uuid-tree.h" 32 #include "ioctl.h" 33 #include "relocation.h" 34 #include "scrub.h" 35 #include "super.h" 36 #include "raid-stripe-tree.h" 37 38 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 39 BTRFS_BLOCK_GROUP_RAID10 | \ 40 BTRFS_BLOCK_GROUP_RAID56_MASK) 41 42 struct btrfs_io_geometry { 43 u32 stripe_index; 44 u32 stripe_nr; 45 int mirror_num; 46 int num_stripes; 47 u64 stripe_offset; 48 u64 raid56_full_stripe_start; 49 int max_errors; 50 enum btrfs_map_op op; 51 }; 52 53 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 54 [BTRFS_RAID_RAID10] = { 55 .sub_stripes = 2, 56 .dev_stripes = 1, 57 .devs_max = 0, /* 0 == as many as possible */ 58 .devs_min = 2, 59 .tolerated_failures = 1, 60 .devs_increment = 2, 61 .ncopies = 2, 62 .nparity = 0, 63 .raid_name = "raid10", 64 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 65 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 66 }, 67 [BTRFS_RAID_RAID1] = { 68 .sub_stripes = 1, 69 .dev_stripes = 1, 70 .devs_max = 2, 71 .devs_min = 2, 72 .tolerated_failures = 1, 73 .devs_increment = 2, 74 .ncopies = 2, 75 .nparity = 0, 76 .raid_name = "raid1", 77 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 78 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 79 }, 80 [BTRFS_RAID_RAID1C3] = { 81 .sub_stripes = 1, 82 .dev_stripes = 1, 83 .devs_max = 3, 84 .devs_min = 3, 85 .tolerated_failures = 2, 86 .devs_increment = 3, 87 .ncopies = 3, 88 .nparity = 0, 89 .raid_name = "raid1c3", 90 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 91 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 92 }, 93 [BTRFS_RAID_RAID1C4] = { 94 .sub_stripes = 1, 95 .dev_stripes = 1, 96 .devs_max = 4, 97 .devs_min = 4, 98 .tolerated_failures = 3, 99 .devs_increment = 4, 100 .ncopies = 4, 101 .nparity = 0, 102 .raid_name = "raid1c4", 103 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 104 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 105 }, 106 [BTRFS_RAID_DUP] = { 107 .sub_stripes = 1, 108 .dev_stripes = 2, 109 .devs_max = 1, 110 .devs_min = 1, 111 .tolerated_failures = 0, 112 .devs_increment = 1, 113 .ncopies = 2, 114 .nparity = 0, 115 .raid_name = "dup", 116 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 117 .mindev_error = 0, 118 }, 119 [BTRFS_RAID_RAID0] = { 120 .sub_stripes = 1, 121 .dev_stripes = 1, 122 .devs_max = 0, 123 .devs_min = 1, 124 .tolerated_failures = 0, 125 .devs_increment = 1, 126 .ncopies = 1, 127 .nparity = 0, 128 .raid_name = "raid0", 129 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 130 .mindev_error = 0, 131 }, 132 [BTRFS_RAID_SINGLE] = { 133 .sub_stripes = 1, 134 .dev_stripes = 1, 135 .devs_max = 1, 136 .devs_min = 1, 137 .tolerated_failures = 0, 138 .devs_increment = 1, 139 .ncopies = 1, 140 .nparity = 0, 141 .raid_name = "single", 142 .bg_flag = 0, 143 .mindev_error = 0, 144 }, 145 [BTRFS_RAID_RAID5] = { 146 .sub_stripes = 1, 147 .dev_stripes = 1, 148 .devs_max = 0, 149 .devs_min = 2, 150 .tolerated_failures = 1, 151 .devs_increment = 1, 152 .ncopies = 1, 153 .nparity = 1, 154 .raid_name = "raid5", 155 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 156 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 157 }, 158 [BTRFS_RAID_RAID6] = { 159 .sub_stripes = 1, 160 .dev_stripes = 1, 161 .devs_max = 0, 162 .devs_min = 3, 163 .tolerated_failures = 2, 164 .devs_increment = 1, 165 .ncopies = 1, 166 .nparity = 2, 167 .raid_name = "raid6", 168 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 169 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 170 }, 171 }; 172 173 /* 174 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 175 * can be used as index to access btrfs_raid_array[]. 176 */ 177 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 178 { 179 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 180 181 if (!profile) 182 return BTRFS_RAID_SINGLE; 183 184 return BTRFS_BG_FLAG_TO_INDEX(profile); 185 } 186 187 const char *btrfs_bg_type_to_raid_name(u64 flags) 188 { 189 const int index = btrfs_bg_flags_to_raid_index(flags); 190 191 if (index >= BTRFS_NR_RAID_TYPES) 192 return NULL; 193 194 return btrfs_raid_array[index].raid_name; 195 } 196 197 int btrfs_nr_parity_stripes(u64 type) 198 { 199 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 200 201 return btrfs_raid_array[index].nparity; 202 } 203 204 /* 205 * Fill @buf with textual description of @bg_flags, no more than @size_buf 206 * bytes including terminating null byte. 207 */ 208 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 209 { 210 int i; 211 int ret; 212 char *bp = buf; 213 u64 flags = bg_flags; 214 u32 size_bp = size_buf; 215 216 if (!flags) { 217 strcpy(bp, "NONE"); 218 return; 219 } 220 221 #define DESCRIBE_FLAG(flag, desc) \ 222 do { \ 223 if (flags & (flag)) { \ 224 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 225 if (ret < 0 || ret >= size_bp) \ 226 goto out_overflow; \ 227 size_bp -= ret; \ 228 bp += ret; \ 229 flags &= ~(flag); \ 230 } \ 231 } while (0) 232 233 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 234 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 235 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 236 237 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 238 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 239 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 240 btrfs_raid_array[i].raid_name); 241 #undef DESCRIBE_FLAG 242 243 if (flags) { 244 ret = snprintf(bp, size_bp, "0x%llx|", flags); 245 size_bp -= ret; 246 } 247 248 if (size_bp < size_buf) 249 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 250 251 /* 252 * The text is trimmed, it's up to the caller to provide sufficiently 253 * large buffer 254 */ 255 out_overflow:; 256 } 257 258 static int init_first_rw_device(struct btrfs_trans_handle *trans); 259 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 260 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 261 262 /* 263 * Device locking 264 * ============== 265 * 266 * There are several mutexes that protect manipulation of devices and low-level 267 * structures like chunks but not block groups, extents or files 268 * 269 * uuid_mutex (global lock) 270 * ------------------------ 271 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 272 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 273 * device) or requested by the device= mount option 274 * 275 * the mutex can be very coarse and can cover long-running operations 276 * 277 * protects: updates to fs_devices counters like missing devices, rw devices, 278 * seeding, structure cloning, opening/closing devices at mount/umount time 279 * 280 * global::fs_devs - add, remove, updates to the global list 281 * 282 * does not protect: manipulation of the fs_devices::devices list in general 283 * but in mount context it could be used to exclude list modifications by eg. 284 * scan ioctl 285 * 286 * btrfs_device::name - renames (write side), read is RCU 287 * 288 * fs_devices::device_list_mutex (per-fs, with RCU) 289 * ------------------------------------------------ 290 * protects updates to fs_devices::devices, ie. adding and deleting 291 * 292 * simple list traversal with read-only actions can be done with RCU protection 293 * 294 * may be used to exclude some operations from running concurrently without any 295 * modifications to the list (see write_all_supers) 296 * 297 * Is not required at mount and close times, because our device list is 298 * protected by the uuid_mutex at that point. 299 * 300 * balance_mutex 301 * ------------- 302 * protects balance structures (status, state) and context accessed from 303 * several places (internally, ioctl) 304 * 305 * chunk_mutex 306 * ----------- 307 * protects chunks, adding or removing during allocation, trim or when a new 308 * device is added/removed. Additionally it also protects post_commit_list of 309 * individual devices, since they can be added to the transaction's 310 * post_commit_list only with chunk_mutex held. 311 * 312 * cleaner_mutex 313 * ------------- 314 * a big lock that is held by the cleaner thread and prevents running subvolume 315 * cleaning together with relocation or delayed iputs 316 * 317 * 318 * Lock nesting 319 * ============ 320 * 321 * uuid_mutex 322 * device_list_mutex 323 * chunk_mutex 324 * balance_mutex 325 * 326 * 327 * Exclusive operations 328 * ==================== 329 * 330 * Maintains the exclusivity of the following operations that apply to the 331 * whole filesystem and cannot run in parallel. 332 * 333 * - Balance (*) 334 * - Device add 335 * - Device remove 336 * - Device replace (*) 337 * - Resize 338 * 339 * The device operations (as above) can be in one of the following states: 340 * 341 * - Running state 342 * - Paused state 343 * - Completed state 344 * 345 * Only device operations marked with (*) can go into the Paused state for the 346 * following reasons: 347 * 348 * - ioctl (only Balance can be Paused through ioctl) 349 * - filesystem remounted as read-only 350 * - filesystem unmounted and mounted as read-only 351 * - system power-cycle and filesystem mounted as read-only 352 * - filesystem or device errors leading to forced read-only 353 * 354 * The status of exclusive operation is set and cleared atomically. 355 * During the course of Paused state, fs_info::exclusive_operation remains set. 356 * A device operation in Paused or Running state can be canceled or resumed 357 * either by ioctl (Balance only) or when remounted as read-write. 358 * The exclusive status is cleared when the device operation is canceled or 359 * completed. 360 */ 361 362 DEFINE_MUTEX(uuid_mutex); 363 static LIST_HEAD(fs_uuids); 364 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 365 { 366 return &fs_uuids; 367 } 368 369 /* 370 * Allocate new btrfs_fs_devices structure identified by a fsid. 371 * 372 * @fsid: if not NULL, copy the UUID to fs_devices::fsid and to 373 * fs_devices::metadata_fsid 374 * 375 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 376 * The returned struct is not linked onto any lists and can be destroyed with 377 * kfree() right away. 378 */ 379 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) 380 { 381 struct btrfs_fs_devices *fs_devs; 382 383 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 384 if (!fs_devs) 385 return ERR_PTR(-ENOMEM); 386 387 mutex_init(&fs_devs->device_list_mutex); 388 389 INIT_LIST_HEAD(&fs_devs->devices); 390 INIT_LIST_HEAD(&fs_devs->alloc_list); 391 INIT_LIST_HEAD(&fs_devs->fs_list); 392 INIT_LIST_HEAD(&fs_devs->seed_list); 393 394 if (fsid) { 395 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 396 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 397 } 398 399 return fs_devs; 400 } 401 402 static void btrfs_free_device(struct btrfs_device *device) 403 { 404 WARN_ON(!list_empty(&device->post_commit_list)); 405 rcu_string_free(device->name); 406 extent_io_tree_release(&device->alloc_state); 407 btrfs_destroy_dev_zone_info(device); 408 kfree(device); 409 } 410 411 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 412 { 413 struct btrfs_device *device; 414 415 WARN_ON(fs_devices->opened); 416 while (!list_empty(&fs_devices->devices)) { 417 device = list_entry(fs_devices->devices.next, 418 struct btrfs_device, dev_list); 419 list_del(&device->dev_list); 420 btrfs_free_device(device); 421 } 422 kfree(fs_devices); 423 } 424 425 void __exit btrfs_cleanup_fs_uuids(void) 426 { 427 struct btrfs_fs_devices *fs_devices; 428 429 while (!list_empty(&fs_uuids)) { 430 fs_devices = list_entry(fs_uuids.next, 431 struct btrfs_fs_devices, fs_list); 432 list_del(&fs_devices->fs_list); 433 free_fs_devices(fs_devices); 434 } 435 } 436 437 static bool match_fsid_fs_devices(const struct btrfs_fs_devices *fs_devices, 438 const u8 *fsid, const u8 *metadata_fsid) 439 { 440 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0) 441 return false; 442 443 if (!metadata_fsid) 444 return true; 445 446 if (memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0) 447 return false; 448 449 return true; 450 } 451 452 static noinline struct btrfs_fs_devices *find_fsid( 453 const u8 *fsid, const u8 *metadata_fsid) 454 { 455 struct btrfs_fs_devices *fs_devices; 456 457 ASSERT(fsid); 458 459 /* Handle non-split brain cases */ 460 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 461 if (match_fsid_fs_devices(fs_devices, fsid, metadata_fsid)) 462 return fs_devices; 463 } 464 return NULL; 465 } 466 467 static int 468 btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, 469 int flush, struct file **bdev_file, 470 struct btrfs_super_block **disk_super) 471 { 472 struct block_device *bdev; 473 int ret; 474 475 *bdev_file = bdev_file_open_by_path(device_path, flags, holder, NULL); 476 477 if (IS_ERR(*bdev_file)) { 478 ret = PTR_ERR(*bdev_file); 479 goto error; 480 } 481 bdev = file_bdev(*bdev_file); 482 483 if (flush) 484 sync_blockdev(bdev); 485 ret = set_blocksize(bdev, BTRFS_BDEV_BLOCKSIZE); 486 if (ret) { 487 fput(*bdev_file); 488 goto error; 489 } 490 invalidate_bdev(bdev); 491 *disk_super = btrfs_read_dev_super(bdev); 492 if (IS_ERR(*disk_super)) { 493 ret = PTR_ERR(*disk_super); 494 fput(*bdev_file); 495 goto error; 496 } 497 498 return 0; 499 500 error: 501 *bdev_file = NULL; 502 return ret; 503 } 504 505 /* 506 * Search and remove all stale devices (which are not mounted). When both 507 * inputs are NULL, it will search and release all stale devices. 508 * 509 * @devt: Optional. When provided will it release all unmounted devices 510 * matching this devt only. 511 * @skip_device: Optional. Will skip this device when searching for the stale 512 * devices. 513 * 514 * Return: 0 for success or if @devt is 0. 515 * -EBUSY if @devt is a mounted device. 516 * -ENOENT if @devt does not match any device in the list. 517 */ 518 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 519 { 520 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 521 struct btrfs_device *device, *tmp_device; 522 int ret; 523 bool freed = false; 524 525 lockdep_assert_held(&uuid_mutex); 526 527 /* Return good status if there is no instance of devt. */ 528 ret = 0; 529 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 530 531 mutex_lock(&fs_devices->device_list_mutex); 532 list_for_each_entry_safe(device, tmp_device, 533 &fs_devices->devices, dev_list) { 534 if (skip_device && skip_device == device) 535 continue; 536 if (devt && devt != device->devt) 537 continue; 538 if (fs_devices->opened) { 539 if (devt) 540 ret = -EBUSY; 541 break; 542 } 543 544 /* delete the stale device */ 545 fs_devices->num_devices--; 546 list_del(&device->dev_list); 547 btrfs_free_device(device); 548 549 freed = true; 550 } 551 mutex_unlock(&fs_devices->device_list_mutex); 552 553 if (fs_devices->num_devices == 0) { 554 btrfs_sysfs_remove_fsid(fs_devices); 555 list_del(&fs_devices->fs_list); 556 free_fs_devices(fs_devices); 557 } 558 } 559 560 /* If there is at least one freed device return 0. */ 561 if (freed) 562 return 0; 563 564 return ret; 565 } 566 567 static struct btrfs_fs_devices *find_fsid_by_device( 568 struct btrfs_super_block *disk_super, 569 dev_t devt, bool *same_fsid_diff_dev) 570 { 571 struct btrfs_fs_devices *fsid_fs_devices; 572 struct btrfs_fs_devices *devt_fs_devices; 573 const bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 574 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 575 bool found_by_devt = false; 576 577 /* Find the fs_device by the usual method, if found use it. */ 578 fsid_fs_devices = find_fsid(disk_super->fsid, 579 has_metadata_uuid ? disk_super->metadata_uuid : NULL); 580 581 /* The temp_fsid feature is supported only with single device filesystem. */ 582 if (btrfs_super_num_devices(disk_super) != 1) 583 return fsid_fs_devices; 584 585 /* 586 * A seed device is an integral component of the sprout device, which 587 * functions as a multi-device filesystem. So, temp-fsid feature is 588 * not supported. 589 */ 590 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) 591 return fsid_fs_devices; 592 593 /* Try to find a fs_devices by matching devt. */ 594 list_for_each_entry(devt_fs_devices, &fs_uuids, fs_list) { 595 struct btrfs_device *device; 596 597 list_for_each_entry(device, &devt_fs_devices->devices, dev_list) { 598 if (device->devt == devt) { 599 found_by_devt = true; 600 break; 601 } 602 } 603 if (found_by_devt) 604 break; 605 } 606 607 if (found_by_devt) { 608 /* Existing device. */ 609 if (fsid_fs_devices == NULL) { 610 if (devt_fs_devices->opened == 0) { 611 /* Stale device. */ 612 return NULL; 613 } else { 614 /* temp_fsid is mounting a subvol. */ 615 return devt_fs_devices; 616 } 617 } else { 618 /* Regular or temp_fsid device mounting a subvol. */ 619 return devt_fs_devices; 620 } 621 } else { 622 /* New device. */ 623 if (fsid_fs_devices == NULL) { 624 return NULL; 625 } else { 626 /* sb::fsid is already used create a new temp_fsid. */ 627 *same_fsid_diff_dev = true; 628 return NULL; 629 } 630 } 631 632 /* Not reached. */ 633 } 634 635 /* 636 * This is only used on mount, and we are protected from competing things 637 * messing with our fs_devices by the uuid_mutex, thus we do not need the 638 * fs_devices->device_list_mutex here. 639 */ 640 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 641 struct btrfs_device *device, blk_mode_t flags, 642 void *holder) 643 { 644 struct file *bdev_file; 645 struct btrfs_super_block *disk_super; 646 u64 devid; 647 int ret; 648 649 if (device->bdev) 650 return -EINVAL; 651 if (!device->name) 652 return -EINVAL; 653 654 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 655 &bdev_file, &disk_super); 656 if (ret) 657 return ret; 658 659 devid = btrfs_stack_device_id(&disk_super->dev_item); 660 if (devid != device->devid) 661 goto error_free_page; 662 663 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 664 goto error_free_page; 665 666 device->generation = btrfs_super_generation(disk_super); 667 668 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 669 if (btrfs_super_incompat_flags(disk_super) & 670 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 671 pr_err( 672 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 673 goto error_free_page; 674 } 675 676 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 677 fs_devices->seeding = true; 678 } else { 679 if (bdev_read_only(file_bdev(bdev_file))) 680 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 681 else 682 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 683 } 684 685 if (!bdev_nonrot(file_bdev(bdev_file))) 686 fs_devices->rotating = true; 687 688 if (bdev_max_discard_sectors(file_bdev(bdev_file))) 689 fs_devices->discardable = true; 690 691 device->bdev_file = bdev_file; 692 device->bdev = file_bdev(bdev_file); 693 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 694 695 fs_devices->open_devices++; 696 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 697 device->devid != BTRFS_DEV_REPLACE_DEVID) { 698 fs_devices->rw_devices++; 699 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 700 } 701 btrfs_release_disk_super(disk_super); 702 703 return 0; 704 705 error_free_page: 706 btrfs_release_disk_super(disk_super); 707 fput(bdev_file); 708 709 return -EINVAL; 710 } 711 712 u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb) 713 { 714 bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) & 715 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 716 717 return has_metadata_uuid ? sb->metadata_uuid : sb->fsid; 718 } 719 720 /* 721 * Add new device to list of registered devices 722 * 723 * Returns: 724 * device pointer which was just added or updated when successful 725 * error pointer when failed 726 */ 727 static noinline struct btrfs_device *device_list_add(const char *path, 728 struct btrfs_super_block *disk_super, 729 bool *new_device_added) 730 { 731 struct btrfs_device *device; 732 struct btrfs_fs_devices *fs_devices = NULL; 733 struct rcu_string *name; 734 u64 found_transid = btrfs_super_generation(disk_super); 735 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 736 dev_t path_devt; 737 int error; 738 bool same_fsid_diff_dev = false; 739 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 740 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 741 742 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) { 743 btrfs_err(NULL, 744 "device %s has incomplete metadata_uuid change, please use btrfstune to complete", 745 path); 746 return ERR_PTR(-EAGAIN); 747 } 748 749 error = lookup_bdev(path, &path_devt); 750 if (error) { 751 btrfs_err(NULL, "failed to lookup block device for path %s: %d", 752 path, error); 753 return ERR_PTR(error); 754 } 755 756 fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev); 757 758 if (!fs_devices) { 759 fs_devices = alloc_fs_devices(disk_super->fsid); 760 if (IS_ERR(fs_devices)) 761 return ERR_CAST(fs_devices); 762 763 if (has_metadata_uuid) 764 memcpy(fs_devices->metadata_uuid, 765 disk_super->metadata_uuid, BTRFS_FSID_SIZE); 766 767 if (same_fsid_diff_dev) { 768 generate_random_uuid(fs_devices->fsid); 769 fs_devices->temp_fsid = true; 770 pr_info("BTRFS: device %s (%d:%d) using temp-fsid %pU\n", 771 path, MAJOR(path_devt), MINOR(path_devt), 772 fs_devices->fsid); 773 } 774 775 mutex_lock(&fs_devices->device_list_mutex); 776 list_add(&fs_devices->fs_list, &fs_uuids); 777 778 device = NULL; 779 } else { 780 struct btrfs_dev_lookup_args args = { 781 .devid = devid, 782 .uuid = disk_super->dev_item.uuid, 783 }; 784 785 mutex_lock(&fs_devices->device_list_mutex); 786 device = btrfs_find_device(fs_devices, &args); 787 788 if (found_transid > fs_devices->latest_generation) { 789 memcpy(fs_devices->fsid, disk_super->fsid, 790 BTRFS_FSID_SIZE); 791 memcpy(fs_devices->metadata_uuid, 792 btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE); 793 } 794 } 795 796 if (!device) { 797 unsigned int nofs_flag; 798 799 if (fs_devices->opened) { 800 btrfs_err(NULL, 801 "device %s (%d:%d) belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)", 802 path, MAJOR(path_devt), MINOR(path_devt), 803 fs_devices->fsid, current->comm, 804 task_pid_nr(current)); 805 mutex_unlock(&fs_devices->device_list_mutex); 806 return ERR_PTR(-EBUSY); 807 } 808 809 nofs_flag = memalloc_nofs_save(); 810 device = btrfs_alloc_device(NULL, &devid, 811 disk_super->dev_item.uuid, path); 812 memalloc_nofs_restore(nofs_flag); 813 if (IS_ERR(device)) { 814 mutex_unlock(&fs_devices->device_list_mutex); 815 /* we can safely leave the fs_devices entry around */ 816 return device; 817 } 818 819 device->devt = path_devt; 820 821 list_add_rcu(&device->dev_list, &fs_devices->devices); 822 fs_devices->num_devices++; 823 824 device->fs_devices = fs_devices; 825 *new_device_added = true; 826 827 if (disk_super->label[0]) 828 pr_info( 829 "BTRFS: device label %s devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n", 830 disk_super->label, devid, found_transid, path, 831 MAJOR(path_devt), MINOR(path_devt), 832 current->comm, task_pid_nr(current)); 833 else 834 pr_info( 835 "BTRFS: device fsid %pU devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n", 836 disk_super->fsid, devid, found_transid, path, 837 MAJOR(path_devt), MINOR(path_devt), 838 current->comm, task_pid_nr(current)); 839 840 } else if (!device->name || strcmp(device->name->str, path)) { 841 /* 842 * When FS is already mounted. 843 * 1. If you are here and if the device->name is NULL that 844 * means this device was missing at time of FS mount. 845 * 2. If you are here and if the device->name is different 846 * from 'path' that means either 847 * a. The same device disappeared and reappeared with 848 * different name. or 849 * b. The missing-disk-which-was-replaced, has 850 * reappeared now. 851 * 852 * We must allow 1 and 2a above. But 2b would be a spurious 853 * and unintentional. 854 * 855 * Further in case of 1 and 2a above, the disk at 'path' 856 * would have missed some transaction when it was away and 857 * in case of 2a the stale bdev has to be updated as well. 858 * 2b must not be allowed at all time. 859 */ 860 861 /* 862 * For now, we do allow update to btrfs_fs_device through the 863 * btrfs dev scan cli after FS has been mounted. We're still 864 * tracking a problem where systems fail mount by subvolume id 865 * when we reject replacement on a mounted FS. 866 */ 867 if (!fs_devices->opened && found_transid < device->generation) { 868 /* 869 * That is if the FS is _not_ mounted and if you 870 * are here, that means there is more than one 871 * disk with same uuid and devid.We keep the one 872 * with larger generation number or the last-in if 873 * generation are equal. 874 */ 875 mutex_unlock(&fs_devices->device_list_mutex); 876 btrfs_err(NULL, 877 "device %s already registered with a higher generation, found %llu expect %llu", 878 path, found_transid, device->generation); 879 return ERR_PTR(-EEXIST); 880 } 881 882 /* 883 * We are going to replace the device path for a given devid, 884 * make sure it's the same device if the device is mounted 885 * 886 * NOTE: the device->fs_info may not be reliable here so pass 887 * in a NULL to message helpers instead. This avoids a possible 888 * use-after-free when the fs_info and fs_info->sb are already 889 * torn down. 890 */ 891 if (device->bdev) { 892 if (device->devt != path_devt) { 893 mutex_unlock(&fs_devices->device_list_mutex); 894 btrfs_warn_in_rcu(NULL, 895 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 896 path, devid, found_transid, 897 current->comm, 898 task_pid_nr(current)); 899 return ERR_PTR(-EEXIST); 900 } 901 btrfs_info_in_rcu(NULL, 902 "devid %llu device path %s changed to %s scanned by %s (%d)", 903 devid, btrfs_dev_name(device), 904 path, current->comm, 905 task_pid_nr(current)); 906 } 907 908 name = rcu_string_strdup(path, GFP_NOFS); 909 if (!name) { 910 mutex_unlock(&fs_devices->device_list_mutex); 911 return ERR_PTR(-ENOMEM); 912 } 913 rcu_string_free(device->name); 914 rcu_assign_pointer(device->name, name); 915 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 916 fs_devices->missing_devices--; 917 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 918 } 919 device->devt = path_devt; 920 } 921 922 /* 923 * Unmount does not free the btrfs_device struct but would zero 924 * generation along with most of the other members. So just update 925 * it back. We need it to pick the disk with largest generation 926 * (as above). 927 */ 928 if (!fs_devices->opened) { 929 device->generation = found_transid; 930 fs_devices->latest_generation = max_t(u64, found_transid, 931 fs_devices->latest_generation); 932 } 933 934 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 935 936 mutex_unlock(&fs_devices->device_list_mutex); 937 return device; 938 } 939 940 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 941 { 942 struct btrfs_fs_devices *fs_devices; 943 struct btrfs_device *device; 944 struct btrfs_device *orig_dev; 945 int ret = 0; 946 947 lockdep_assert_held(&uuid_mutex); 948 949 fs_devices = alloc_fs_devices(orig->fsid); 950 if (IS_ERR(fs_devices)) 951 return fs_devices; 952 953 fs_devices->total_devices = orig->total_devices; 954 955 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 956 const char *dev_path = NULL; 957 958 /* 959 * This is ok to do without RCU read locked because we hold the 960 * uuid mutex so nothing we touch in here is going to disappear. 961 */ 962 if (orig_dev->name) 963 dev_path = orig_dev->name->str; 964 965 device = btrfs_alloc_device(NULL, &orig_dev->devid, 966 orig_dev->uuid, dev_path); 967 if (IS_ERR(device)) { 968 ret = PTR_ERR(device); 969 goto error; 970 } 971 972 if (orig_dev->zone_info) { 973 struct btrfs_zoned_device_info *zone_info; 974 975 zone_info = btrfs_clone_dev_zone_info(orig_dev); 976 if (!zone_info) { 977 btrfs_free_device(device); 978 ret = -ENOMEM; 979 goto error; 980 } 981 device->zone_info = zone_info; 982 } 983 984 list_add(&device->dev_list, &fs_devices->devices); 985 device->fs_devices = fs_devices; 986 fs_devices->num_devices++; 987 } 988 return fs_devices; 989 error: 990 free_fs_devices(fs_devices); 991 return ERR_PTR(ret); 992 } 993 994 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 995 struct btrfs_device **latest_dev) 996 { 997 struct btrfs_device *device, *next; 998 999 /* This is the initialized path, it is safe to release the devices. */ 1000 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1001 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1002 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1003 &device->dev_state) && 1004 !test_bit(BTRFS_DEV_STATE_MISSING, 1005 &device->dev_state) && 1006 (!*latest_dev || 1007 device->generation > (*latest_dev)->generation)) { 1008 *latest_dev = device; 1009 } 1010 continue; 1011 } 1012 1013 /* 1014 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1015 * in btrfs_init_dev_replace() so just continue. 1016 */ 1017 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1018 continue; 1019 1020 if (device->bdev_file) { 1021 fput(device->bdev_file); 1022 device->bdev = NULL; 1023 device->bdev_file = NULL; 1024 fs_devices->open_devices--; 1025 } 1026 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1027 list_del_init(&device->dev_alloc_list); 1028 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1029 fs_devices->rw_devices--; 1030 } 1031 list_del_init(&device->dev_list); 1032 fs_devices->num_devices--; 1033 btrfs_free_device(device); 1034 } 1035 1036 } 1037 1038 /* 1039 * After we have read the system tree and know devids belonging to this 1040 * filesystem, remove the device which does not belong there. 1041 */ 1042 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1043 { 1044 struct btrfs_device *latest_dev = NULL; 1045 struct btrfs_fs_devices *seed_dev; 1046 1047 mutex_lock(&uuid_mutex); 1048 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1049 1050 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1051 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1052 1053 fs_devices->latest_dev = latest_dev; 1054 1055 mutex_unlock(&uuid_mutex); 1056 } 1057 1058 static void btrfs_close_bdev(struct btrfs_device *device) 1059 { 1060 if (!device->bdev) 1061 return; 1062 1063 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1064 sync_blockdev(device->bdev); 1065 invalidate_bdev(device->bdev); 1066 } 1067 1068 fput(device->bdev_file); 1069 } 1070 1071 static void btrfs_close_one_device(struct btrfs_device *device) 1072 { 1073 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1074 1075 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1076 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1077 list_del_init(&device->dev_alloc_list); 1078 fs_devices->rw_devices--; 1079 } 1080 1081 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1082 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1083 1084 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1085 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1086 fs_devices->missing_devices--; 1087 } 1088 1089 btrfs_close_bdev(device); 1090 if (device->bdev) { 1091 fs_devices->open_devices--; 1092 device->bdev = NULL; 1093 } 1094 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1095 btrfs_destroy_dev_zone_info(device); 1096 1097 device->fs_info = NULL; 1098 atomic_set(&device->dev_stats_ccnt, 0); 1099 extent_io_tree_release(&device->alloc_state); 1100 1101 /* 1102 * Reset the flush error record. We might have a transient flush error 1103 * in this mount, and if so we aborted the current transaction and set 1104 * the fs to an error state, guaranteeing no super blocks can be further 1105 * committed. However that error might be transient and if we unmount the 1106 * filesystem and mount it again, we should allow the mount to succeed 1107 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1108 * filesystem again we still get flush errors, then we will again abort 1109 * any transaction and set the error state, guaranteeing no commits of 1110 * unsafe super blocks. 1111 */ 1112 device->last_flush_error = 0; 1113 1114 /* Verify the device is back in a pristine state */ 1115 WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1116 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1117 WARN_ON(!list_empty(&device->dev_alloc_list)); 1118 WARN_ON(!list_empty(&device->post_commit_list)); 1119 } 1120 1121 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1122 { 1123 struct btrfs_device *device, *tmp; 1124 1125 lockdep_assert_held(&uuid_mutex); 1126 1127 if (--fs_devices->opened > 0) 1128 return; 1129 1130 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1131 btrfs_close_one_device(device); 1132 1133 WARN_ON(fs_devices->open_devices); 1134 WARN_ON(fs_devices->rw_devices); 1135 fs_devices->opened = 0; 1136 fs_devices->seeding = false; 1137 fs_devices->fs_info = NULL; 1138 } 1139 1140 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1141 { 1142 LIST_HEAD(list); 1143 struct btrfs_fs_devices *tmp; 1144 1145 mutex_lock(&uuid_mutex); 1146 close_fs_devices(fs_devices); 1147 if (!fs_devices->opened) { 1148 list_splice_init(&fs_devices->seed_list, &list); 1149 1150 /* 1151 * If the struct btrfs_fs_devices is not assembled with any 1152 * other device, it can be re-initialized during the next mount 1153 * without the needing device-scan step. Therefore, it can be 1154 * fully freed. 1155 */ 1156 if (fs_devices->num_devices == 1) { 1157 list_del(&fs_devices->fs_list); 1158 free_fs_devices(fs_devices); 1159 } 1160 } 1161 1162 1163 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1164 close_fs_devices(fs_devices); 1165 list_del(&fs_devices->seed_list); 1166 free_fs_devices(fs_devices); 1167 } 1168 mutex_unlock(&uuid_mutex); 1169 } 1170 1171 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1172 blk_mode_t flags, void *holder) 1173 { 1174 struct btrfs_device *device; 1175 struct btrfs_device *latest_dev = NULL; 1176 struct btrfs_device *tmp_device; 1177 1178 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1179 dev_list) { 1180 int ret; 1181 1182 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1183 if (ret == 0 && 1184 (!latest_dev || device->generation > latest_dev->generation)) { 1185 latest_dev = device; 1186 } else if (ret == -ENODATA) { 1187 fs_devices->num_devices--; 1188 list_del(&device->dev_list); 1189 btrfs_free_device(device); 1190 } 1191 } 1192 if (fs_devices->open_devices == 0) 1193 return -EINVAL; 1194 1195 fs_devices->opened = 1; 1196 fs_devices->latest_dev = latest_dev; 1197 fs_devices->total_rw_bytes = 0; 1198 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1199 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1200 1201 return 0; 1202 } 1203 1204 static int devid_cmp(void *priv, const struct list_head *a, 1205 const struct list_head *b) 1206 { 1207 const struct btrfs_device *dev1, *dev2; 1208 1209 dev1 = list_entry(a, struct btrfs_device, dev_list); 1210 dev2 = list_entry(b, struct btrfs_device, dev_list); 1211 1212 if (dev1->devid < dev2->devid) 1213 return -1; 1214 else if (dev1->devid > dev2->devid) 1215 return 1; 1216 return 0; 1217 } 1218 1219 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1220 blk_mode_t flags, void *holder) 1221 { 1222 int ret; 1223 1224 lockdep_assert_held(&uuid_mutex); 1225 /* 1226 * The device_list_mutex cannot be taken here in case opening the 1227 * underlying device takes further locks like open_mutex. 1228 * 1229 * We also don't need the lock here as this is called during mount and 1230 * exclusion is provided by uuid_mutex 1231 */ 1232 1233 if (fs_devices->opened) { 1234 fs_devices->opened++; 1235 ret = 0; 1236 } else { 1237 list_sort(NULL, &fs_devices->devices, devid_cmp); 1238 ret = open_fs_devices(fs_devices, flags, holder); 1239 } 1240 1241 return ret; 1242 } 1243 1244 void btrfs_release_disk_super(struct btrfs_super_block *super) 1245 { 1246 struct page *page = virt_to_page(super); 1247 1248 put_page(page); 1249 } 1250 1251 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1252 u64 bytenr, u64 bytenr_orig) 1253 { 1254 struct btrfs_super_block *disk_super; 1255 struct page *page; 1256 void *p; 1257 pgoff_t index; 1258 1259 /* make sure our super fits in the device */ 1260 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1261 return ERR_PTR(-EINVAL); 1262 1263 /* make sure our super fits in the page */ 1264 if (sizeof(*disk_super) > PAGE_SIZE) 1265 return ERR_PTR(-EINVAL); 1266 1267 /* make sure our super doesn't straddle pages on disk */ 1268 index = bytenr >> PAGE_SHIFT; 1269 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1270 return ERR_PTR(-EINVAL); 1271 1272 /* pull in the page with our super */ 1273 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1274 1275 if (IS_ERR(page)) 1276 return ERR_CAST(page); 1277 1278 p = page_address(page); 1279 1280 /* align our pointer to the offset of the super block */ 1281 disk_super = p + offset_in_page(bytenr); 1282 1283 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1284 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1285 btrfs_release_disk_super(p); 1286 return ERR_PTR(-EINVAL); 1287 } 1288 1289 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1290 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1291 1292 return disk_super; 1293 } 1294 1295 int btrfs_forget_devices(dev_t devt) 1296 { 1297 int ret; 1298 1299 mutex_lock(&uuid_mutex); 1300 ret = btrfs_free_stale_devices(devt, NULL); 1301 mutex_unlock(&uuid_mutex); 1302 1303 return ret; 1304 } 1305 1306 /* 1307 * Look for a btrfs signature on a device. This may be called out of the mount path 1308 * and we are not allowed to call set_blocksize during the scan. The superblock 1309 * is read via pagecache. 1310 * 1311 * With @mount_arg_dev it's a scan during mount time that will always register 1312 * the device or return an error. Multi-device and seeding devices are registered 1313 * in both cases. 1314 */ 1315 struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, 1316 bool mount_arg_dev) 1317 { 1318 struct btrfs_super_block *disk_super; 1319 bool new_device_added = false; 1320 struct btrfs_device *device = NULL; 1321 struct file *bdev_file; 1322 u64 bytenr, bytenr_orig; 1323 int ret; 1324 1325 lockdep_assert_held(&uuid_mutex); 1326 1327 /* 1328 * we would like to check all the supers, but that would make 1329 * a btrfs mount succeed after a mkfs from a different FS. 1330 * So, we need to add a special mount option to scan for 1331 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1332 */ 1333 1334 /* 1335 * Avoid an exclusive open here, as the systemd-udev may initiate the 1336 * device scan which may race with the user's mount or mkfs command, 1337 * resulting in failure. 1338 * Since the device scan is solely for reading purposes, there is no 1339 * need for an exclusive open. Additionally, the devices are read again 1340 * during the mount process. It is ok to get some inconsistent 1341 * values temporarily, as the device paths of the fsid are the only 1342 * required information for assembling the volume. 1343 */ 1344 bdev_file = bdev_file_open_by_path(path, flags, NULL, NULL); 1345 if (IS_ERR(bdev_file)) 1346 return ERR_CAST(bdev_file); 1347 1348 bytenr_orig = btrfs_sb_offset(0); 1349 ret = btrfs_sb_log_location_bdev(file_bdev(bdev_file), 0, READ, &bytenr); 1350 if (ret) { 1351 device = ERR_PTR(ret); 1352 goto error_bdev_put; 1353 } 1354 1355 disk_super = btrfs_read_disk_super(file_bdev(bdev_file), bytenr, 1356 bytenr_orig); 1357 if (IS_ERR(disk_super)) { 1358 device = ERR_CAST(disk_super); 1359 goto error_bdev_put; 1360 } 1361 1362 if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 && 1363 !(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)) { 1364 dev_t devt; 1365 1366 ret = lookup_bdev(path, &devt); 1367 if (ret) 1368 btrfs_warn(NULL, "lookup bdev failed for path %s: %d", 1369 path, ret); 1370 else 1371 btrfs_free_stale_devices(devt, NULL); 1372 1373 pr_debug("BTRFS: skip registering single non-seed device %s (%d:%d)\n", 1374 path, MAJOR(devt), MINOR(devt)); 1375 device = NULL; 1376 goto free_disk_super; 1377 } 1378 1379 device = device_list_add(path, disk_super, &new_device_added); 1380 if (!IS_ERR(device) && new_device_added) 1381 btrfs_free_stale_devices(device->devt, device); 1382 1383 free_disk_super: 1384 btrfs_release_disk_super(disk_super); 1385 1386 error_bdev_put: 1387 fput(bdev_file); 1388 1389 return device; 1390 } 1391 1392 /* 1393 * Try to find a chunk that intersects [start, start + len] range and when one 1394 * such is found, record the end of it in *start 1395 */ 1396 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1397 u64 len) 1398 { 1399 u64 physical_start, physical_end; 1400 1401 lockdep_assert_held(&device->fs_info->chunk_mutex); 1402 1403 if (find_first_extent_bit(&device->alloc_state, *start, 1404 &physical_start, &physical_end, 1405 CHUNK_ALLOCATED, NULL)) { 1406 1407 if (in_range(physical_start, *start, len) || 1408 in_range(*start, physical_start, 1409 physical_end + 1 - physical_start)) { 1410 *start = physical_end + 1; 1411 return true; 1412 } 1413 } 1414 return false; 1415 } 1416 1417 static u64 dev_extent_search_start(struct btrfs_device *device) 1418 { 1419 switch (device->fs_devices->chunk_alloc_policy) { 1420 case BTRFS_CHUNK_ALLOC_REGULAR: 1421 return BTRFS_DEVICE_RANGE_RESERVED; 1422 case BTRFS_CHUNK_ALLOC_ZONED: 1423 /* 1424 * We don't care about the starting region like regular 1425 * allocator, because we anyway use/reserve the first two zones 1426 * for superblock logging. 1427 */ 1428 return 0; 1429 default: 1430 BUG(); 1431 } 1432 } 1433 1434 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1435 u64 *hole_start, u64 *hole_size, 1436 u64 num_bytes) 1437 { 1438 u64 zone_size = device->zone_info->zone_size; 1439 u64 pos; 1440 int ret; 1441 bool changed = false; 1442 1443 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1444 1445 while (*hole_size > 0) { 1446 pos = btrfs_find_allocatable_zones(device, *hole_start, 1447 *hole_start + *hole_size, 1448 num_bytes); 1449 if (pos != *hole_start) { 1450 *hole_size = *hole_start + *hole_size - pos; 1451 *hole_start = pos; 1452 changed = true; 1453 if (*hole_size < num_bytes) 1454 break; 1455 } 1456 1457 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1458 1459 /* Range is ensured to be empty */ 1460 if (!ret) 1461 return changed; 1462 1463 /* Given hole range was invalid (outside of device) */ 1464 if (ret == -ERANGE) { 1465 *hole_start += *hole_size; 1466 *hole_size = 0; 1467 return true; 1468 } 1469 1470 *hole_start += zone_size; 1471 *hole_size -= zone_size; 1472 changed = true; 1473 } 1474 1475 return changed; 1476 } 1477 1478 /* 1479 * Check if specified hole is suitable for allocation. 1480 * 1481 * @device: the device which we have the hole 1482 * @hole_start: starting position of the hole 1483 * @hole_size: the size of the hole 1484 * @num_bytes: the size of the free space that we need 1485 * 1486 * This function may modify @hole_start and @hole_size to reflect the suitable 1487 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1488 */ 1489 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1490 u64 *hole_size, u64 num_bytes) 1491 { 1492 bool changed = false; 1493 u64 hole_end = *hole_start + *hole_size; 1494 1495 for (;;) { 1496 /* 1497 * Check before we set max_hole_start, otherwise we could end up 1498 * sending back this offset anyway. 1499 */ 1500 if (contains_pending_extent(device, hole_start, *hole_size)) { 1501 if (hole_end >= *hole_start) 1502 *hole_size = hole_end - *hole_start; 1503 else 1504 *hole_size = 0; 1505 changed = true; 1506 } 1507 1508 switch (device->fs_devices->chunk_alloc_policy) { 1509 case BTRFS_CHUNK_ALLOC_REGULAR: 1510 /* No extra check */ 1511 break; 1512 case BTRFS_CHUNK_ALLOC_ZONED: 1513 if (dev_extent_hole_check_zoned(device, hole_start, 1514 hole_size, num_bytes)) { 1515 changed = true; 1516 /* 1517 * The changed hole can contain pending extent. 1518 * Loop again to check that. 1519 */ 1520 continue; 1521 } 1522 break; 1523 default: 1524 BUG(); 1525 } 1526 1527 break; 1528 } 1529 1530 return changed; 1531 } 1532 1533 /* 1534 * Find free space in the specified device. 1535 * 1536 * @device: the device which we search the free space in 1537 * @num_bytes: the size of the free space that we need 1538 * @search_start: the position from which to begin the search 1539 * @start: store the start of the free space. 1540 * @len: the size of the free space. that we find, or the size 1541 * of the max free space if we don't find suitable free space 1542 * 1543 * This does a pretty simple search, the expectation is that it is called very 1544 * infrequently and that a given device has a small number of extents. 1545 * 1546 * @start is used to store the start of the free space if we find. But if we 1547 * don't find suitable free space, it will be used to store the start position 1548 * of the max free space. 1549 * 1550 * @len is used to store the size of the free space that we find. 1551 * But if we don't find suitable free space, it is used to store the size of 1552 * the max free space. 1553 * 1554 * NOTE: This function will search *commit* root of device tree, and does extra 1555 * check to ensure dev extents are not double allocated. 1556 * This makes the function safe to allocate dev extents but may not report 1557 * correct usable device space, as device extent freed in current transaction 1558 * is not reported as available. 1559 */ 1560 static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1561 u64 *start, u64 *len) 1562 { 1563 struct btrfs_fs_info *fs_info = device->fs_info; 1564 struct btrfs_root *root = fs_info->dev_root; 1565 struct btrfs_key key; 1566 struct btrfs_dev_extent *dev_extent; 1567 struct btrfs_path *path; 1568 u64 search_start; 1569 u64 hole_size; 1570 u64 max_hole_start; 1571 u64 max_hole_size = 0; 1572 u64 extent_end; 1573 u64 search_end = device->total_bytes; 1574 int ret; 1575 int slot; 1576 struct extent_buffer *l; 1577 1578 search_start = dev_extent_search_start(device); 1579 max_hole_start = search_start; 1580 1581 WARN_ON(device->zone_info && 1582 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1583 1584 path = btrfs_alloc_path(); 1585 if (!path) { 1586 ret = -ENOMEM; 1587 goto out; 1588 } 1589 again: 1590 if (search_start >= search_end || 1591 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1592 ret = -ENOSPC; 1593 goto out; 1594 } 1595 1596 path->reada = READA_FORWARD; 1597 path->search_commit_root = 1; 1598 path->skip_locking = 1; 1599 1600 key.objectid = device->devid; 1601 key.offset = search_start; 1602 key.type = BTRFS_DEV_EXTENT_KEY; 1603 1604 ret = btrfs_search_backwards(root, &key, path); 1605 if (ret < 0) 1606 goto out; 1607 1608 while (search_start < search_end) { 1609 l = path->nodes[0]; 1610 slot = path->slots[0]; 1611 if (slot >= btrfs_header_nritems(l)) { 1612 ret = btrfs_next_leaf(root, path); 1613 if (ret == 0) 1614 continue; 1615 if (ret < 0) 1616 goto out; 1617 1618 break; 1619 } 1620 btrfs_item_key_to_cpu(l, &key, slot); 1621 1622 if (key.objectid < device->devid) 1623 goto next; 1624 1625 if (key.objectid > device->devid) 1626 break; 1627 1628 if (key.type != BTRFS_DEV_EXTENT_KEY) 1629 goto next; 1630 1631 if (key.offset > search_end) 1632 break; 1633 1634 if (key.offset > search_start) { 1635 hole_size = key.offset - search_start; 1636 dev_extent_hole_check(device, &search_start, &hole_size, 1637 num_bytes); 1638 1639 if (hole_size > max_hole_size) { 1640 max_hole_start = search_start; 1641 max_hole_size = hole_size; 1642 } 1643 1644 /* 1645 * If this free space is greater than which we need, 1646 * it must be the max free space that we have found 1647 * until now, so max_hole_start must point to the start 1648 * of this free space and the length of this free space 1649 * is stored in max_hole_size. Thus, we return 1650 * max_hole_start and max_hole_size and go back to the 1651 * caller. 1652 */ 1653 if (hole_size >= num_bytes) { 1654 ret = 0; 1655 goto out; 1656 } 1657 } 1658 1659 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1660 extent_end = key.offset + btrfs_dev_extent_length(l, 1661 dev_extent); 1662 if (extent_end > search_start) 1663 search_start = extent_end; 1664 next: 1665 path->slots[0]++; 1666 cond_resched(); 1667 } 1668 1669 /* 1670 * At this point, search_start should be the end of 1671 * allocated dev extents, and when shrinking the device, 1672 * search_end may be smaller than search_start. 1673 */ 1674 if (search_end > search_start) { 1675 hole_size = search_end - search_start; 1676 if (dev_extent_hole_check(device, &search_start, &hole_size, 1677 num_bytes)) { 1678 btrfs_release_path(path); 1679 goto again; 1680 } 1681 1682 if (hole_size > max_hole_size) { 1683 max_hole_start = search_start; 1684 max_hole_size = hole_size; 1685 } 1686 } 1687 1688 /* See above. */ 1689 if (max_hole_size < num_bytes) 1690 ret = -ENOSPC; 1691 else 1692 ret = 0; 1693 1694 ASSERT(max_hole_start + max_hole_size <= search_end); 1695 out: 1696 btrfs_free_path(path); 1697 *start = max_hole_start; 1698 if (len) 1699 *len = max_hole_size; 1700 return ret; 1701 } 1702 1703 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1704 struct btrfs_device *device, 1705 u64 start, u64 *dev_extent_len) 1706 { 1707 struct btrfs_fs_info *fs_info = device->fs_info; 1708 struct btrfs_root *root = fs_info->dev_root; 1709 int ret; 1710 struct btrfs_path *path; 1711 struct btrfs_key key; 1712 struct btrfs_key found_key; 1713 struct extent_buffer *leaf = NULL; 1714 struct btrfs_dev_extent *extent = NULL; 1715 1716 path = btrfs_alloc_path(); 1717 if (!path) 1718 return -ENOMEM; 1719 1720 key.objectid = device->devid; 1721 key.offset = start; 1722 key.type = BTRFS_DEV_EXTENT_KEY; 1723 again: 1724 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1725 if (ret > 0) { 1726 ret = btrfs_previous_item(root, path, key.objectid, 1727 BTRFS_DEV_EXTENT_KEY); 1728 if (ret) 1729 goto out; 1730 leaf = path->nodes[0]; 1731 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1732 extent = btrfs_item_ptr(leaf, path->slots[0], 1733 struct btrfs_dev_extent); 1734 BUG_ON(found_key.offset > start || found_key.offset + 1735 btrfs_dev_extent_length(leaf, extent) < start); 1736 key = found_key; 1737 btrfs_release_path(path); 1738 goto again; 1739 } else if (ret == 0) { 1740 leaf = path->nodes[0]; 1741 extent = btrfs_item_ptr(leaf, path->slots[0], 1742 struct btrfs_dev_extent); 1743 } else { 1744 goto out; 1745 } 1746 1747 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1748 1749 ret = btrfs_del_item(trans, root, path); 1750 if (ret == 0) 1751 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1752 out: 1753 btrfs_free_path(path); 1754 return ret; 1755 } 1756 1757 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1758 { 1759 struct rb_node *n; 1760 u64 ret = 0; 1761 1762 read_lock(&fs_info->mapping_tree_lock); 1763 n = rb_last(&fs_info->mapping_tree.rb_root); 1764 if (n) { 1765 struct btrfs_chunk_map *map; 1766 1767 map = rb_entry(n, struct btrfs_chunk_map, rb_node); 1768 ret = map->start + map->chunk_len; 1769 } 1770 read_unlock(&fs_info->mapping_tree_lock); 1771 1772 return ret; 1773 } 1774 1775 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1776 u64 *devid_ret) 1777 { 1778 int ret; 1779 struct btrfs_key key; 1780 struct btrfs_key found_key; 1781 struct btrfs_path *path; 1782 1783 path = btrfs_alloc_path(); 1784 if (!path) 1785 return -ENOMEM; 1786 1787 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1788 key.type = BTRFS_DEV_ITEM_KEY; 1789 key.offset = (u64)-1; 1790 1791 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1792 if (ret < 0) 1793 goto error; 1794 1795 if (ret == 0) { 1796 /* Corruption */ 1797 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1798 ret = -EUCLEAN; 1799 goto error; 1800 } 1801 1802 ret = btrfs_previous_item(fs_info->chunk_root, path, 1803 BTRFS_DEV_ITEMS_OBJECTID, 1804 BTRFS_DEV_ITEM_KEY); 1805 if (ret) { 1806 *devid_ret = 1; 1807 } else { 1808 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1809 path->slots[0]); 1810 *devid_ret = found_key.offset + 1; 1811 } 1812 ret = 0; 1813 error: 1814 btrfs_free_path(path); 1815 return ret; 1816 } 1817 1818 /* 1819 * the device information is stored in the chunk root 1820 * the btrfs_device struct should be fully filled in 1821 */ 1822 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1823 struct btrfs_device *device) 1824 { 1825 int ret; 1826 struct btrfs_path *path; 1827 struct btrfs_dev_item *dev_item; 1828 struct extent_buffer *leaf; 1829 struct btrfs_key key; 1830 unsigned long ptr; 1831 1832 path = btrfs_alloc_path(); 1833 if (!path) 1834 return -ENOMEM; 1835 1836 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1837 key.type = BTRFS_DEV_ITEM_KEY; 1838 key.offset = device->devid; 1839 1840 btrfs_reserve_chunk_metadata(trans, true); 1841 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1842 &key, sizeof(*dev_item)); 1843 btrfs_trans_release_chunk_metadata(trans); 1844 if (ret) 1845 goto out; 1846 1847 leaf = path->nodes[0]; 1848 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1849 1850 btrfs_set_device_id(leaf, dev_item, device->devid); 1851 btrfs_set_device_generation(leaf, dev_item, 0); 1852 btrfs_set_device_type(leaf, dev_item, device->type); 1853 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1854 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1855 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1856 btrfs_set_device_total_bytes(leaf, dev_item, 1857 btrfs_device_get_disk_total_bytes(device)); 1858 btrfs_set_device_bytes_used(leaf, dev_item, 1859 btrfs_device_get_bytes_used(device)); 1860 btrfs_set_device_group(leaf, dev_item, 0); 1861 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1862 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1863 btrfs_set_device_start_offset(leaf, dev_item, 0); 1864 1865 ptr = btrfs_device_uuid(dev_item); 1866 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1867 ptr = btrfs_device_fsid(dev_item); 1868 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1869 ptr, BTRFS_FSID_SIZE); 1870 btrfs_mark_buffer_dirty(trans, leaf); 1871 1872 ret = 0; 1873 out: 1874 btrfs_free_path(path); 1875 return ret; 1876 } 1877 1878 /* 1879 * Function to update ctime/mtime for a given device path. 1880 * Mainly used for ctime/mtime based probe like libblkid. 1881 * 1882 * We don't care about errors here, this is just to be kind to userspace. 1883 */ 1884 static void update_dev_time(const char *device_path) 1885 { 1886 struct path path; 1887 int ret; 1888 1889 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1890 if (ret) 1891 return; 1892 1893 inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION); 1894 path_put(&path); 1895 } 1896 1897 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1898 struct btrfs_device *device) 1899 { 1900 struct btrfs_root *root = device->fs_info->chunk_root; 1901 int ret; 1902 struct btrfs_path *path; 1903 struct btrfs_key key; 1904 1905 path = btrfs_alloc_path(); 1906 if (!path) 1907 return -ENOMEM; 1908 1909 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1910 key.type = BTRFS_DEV_ITEM_KEY; 1911 key.offset = device->devid; 1912 1913 btrfs_reserve_chunk_metadata(trans, false); 1914 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1915 btrfs_trans_release_chunk_metadata(trans); 1916 if (ret) { 1917 if (ret > 0) 1918 ret = -ENOENT; 1919 goto out; 1920 } 1921 1922 ret = btrfs_del_item(trans, root, path); 1923 out: 1924 btrfs_free_path(path); 1925 return ret; 1926 } 1927 1928 /* 1929 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1930 * filesystem. It's up to the caller to adjust that number regarding eg. device 1931 * replace. 1932 */ 1933 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1934 u64 num_devices) 1935 { 1936 u64 all_avail; 1937 unsigned seq; 1938 int i; 1939 1940 do { 1941 seq = read_seqbegin(&fs_info->profiles_lock); 1942 1943 all_avail = fs_info->avail_data_alloc_bits | 1944 fs_info->avail_system_alloc_bits | 1945 fs_info->avail_metadata_alloc_bits; 1946 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1947 1948 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1949 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1950 continue; 1951 1952 if (num_devices < btrfs_raid_array[i].devs_min) 1953 return btrfs_raid_array[i].mindev_error; 1954 } 1955 1956 return 0; 1957 } 1958 1959 static struct btrfs_device * btrfs_find_next_active_device( 1960 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1961 { 1962 struct btrfs_device *next_device; 1963 1964 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1965 if (next_device != device && 1966 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1967 && next_device->bdev) 1968 return next_device; 1969 } 1970 1971 return NULL; 1972 } 1973 1974 /* 1975 * Helper function to check if the given device is part of s_bdev / latest_dev 1976 * and replace it with the provided or the next active device, in the context 1977 * where this function called, there should be always be another device (or 1978 * this_dev) which is active. 1979 */ 1980 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 1981 struct btrfs_device *next_device) 1982 { 1983 struct btrfs_fs_info *fs_info = device->fs_info; 1984 1985 if (!next_device) 1986 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1987 device); 1988 ASSERT(next_device); 1989 1990 if (fs_info->sb->s_bdev && 1991 (fs_info->sb->s_bdev == device->bdev)) 1992 fs_info->sb->s_bdev = next_device->bdev; 1993 1994 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 1995 fs_info->fs_devices->latest_dev = next_device; 1996 } 1997 1998 /* 1999 * Return btrfs_fs_devices::num_devices excluding the device that's being 2000 * currently replaced. 2001 */ 2002 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2003 { 2004 u64 num_devices = fs_info->fs_devices->num_devices; 2005 2006 down_read(&fs_info->dev_replace.rwsem); 2007 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2008 ASSERT(num_devices > 1); 2009 num_devices--; 2010 } 2011 up_read(&fs_info->dev_replace.rwsem); 2012 2013 return num_devices; 2014 } 2015 2016 static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info, 2017 struct block_device *bdev, int copy_num) 2018 { 2019 struct btrfs_super_block *disk_super; 2020 const size_t len = sizeof(disk_super->magic); 2021 const u64 bytenr = btrfs_sb_offset(copy_num); 2022 int ret; 2023 2024 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr); 2025 if (IS_ERR(disk_super)) 2026 return; 2027 2028 memset(&disk_super->magic, 0, len); 2029 folio_mark_dirty(virt_to_folio(disk_super)); 2030 btrfs_release_disk_super(disk_super); 2031 2032 ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1); 2033 if (ret) 2034 btrfs_warn(fs_info, "error clearing superblock number %d (%d)", 2035 copy_num, ret); 2036 } 2037 2038 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_device *device) 2039 { 2040 int copy_num; 2041 struct block_device *bdev = device->bdev; 2042 2043 if (!bdev) 2044 return; 2045 2046 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2047 if (bdev_is_zoned(bdev)) 2048 btrfs_reset_sb_log_zones(bdev, copy_num); 2049 else 2050 btrfs_scratch_superblock(fs_info, bdev, copy_num); 2051 } 2052 2053 /* Notify udev that device has changed */ 2054 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2055 2056 /* Update ctime/mtime for device path for libblkid */ 2057 update_dev_time(device->name->str); 2058 } 2059 2060 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2061 struct btrfs_dev_lookup_args *args, 2062 struct file **bdev_file) 2063 { 2064 struct btrfs_trans_handle *trans; 2065 struct btrfs_device *device; 2066 struct btrfs_fs_devices *cur_devices; 2067 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2068 u64 num_devices; 2069 int ret = 0; 2070 2071 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2072 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2073 return -EINVAL; 2074 } 2075 2076 /* 2077 * The device list in fs_devices is accessed without locks (neither 2078 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2079 * filesystem and another device rm cannot run. 2080 */ 2081 num_devices = btrfs_num_devices(fs_info); 2082 2083 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2084 if (ret) 2085 return ret; 2086 2087 device = btrfs_find_device(fs_info->fs_devices, args); 2088 if (!device) { 2089 if (args->missing) 2090 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2091 else 2092 ret = -ENOENT; 2093 return ret; 2094 } 2095 2096 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2097 btrfs_warn_in_rcu(fs_info, 2098 "cannot remove device %s (devid %llu) due to active swapfile", 2099 btrfs_dev_name(device), device->devid); 2100 return -ETXTBSY; 2101 } 2102 2103 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2104 return BTRFS_ERROR_DEV_TGT_REPLACE; 2105 2106 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2107 fs_info->fs_devices->rw_devices == 1) 2108 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2109 2110 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2111 mutex_lock(&fs_info->chunk_mutex); 2112 list_del_init(&device->dev_alloc_list); 2113 device->fs_devices->rw_devices--; 2114 mutex_unlock(&fs_info->chunk_mutex); 2115 } 2116 2117 ret = btrfs_shrink_device(device, 0); 2118 if (ret) 2119 goto error_undo; 2120 2121 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2122 if (IS_ERR(trans)) { 2123 ret = PTR_ERR(trans); 2124 goto error_undo; 2125 } 2126 2127 ret = btrfs_rm_dev_item(trans, device); 2128 if (ret) { 2129 /* Any error in dev item removal is critical */ 2130 btrfs_crit(fs_info, 2131 "failed to remove device item for devid %llu: %d", 2132 device->devid, ret); 2133 btrfs_abort_transaction(trans, ret); 2134 btrfs_end_transaction(trans); 2135 return ret; 2136 } 2137 2138 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2139 btrfs_scrub_cancel_dev(device); 2140 2141 /* 2142 * the device list mutex makes sure that we don't change 2143 * the device list while someone else is writing out all 2144 * the device supers. Whoever is writing all supers, should 2145 * lock the device list mutex before getting the number of 2146 * devices in the super block (super_copy). Conversely, 2147 * whoever updates the number of devices in the super block 2148 * (super_copy) should hold the device list mutex. 2149 */ 2150 2151 /* 2152 * In normal cases the cur_devices == fs_devices. But in case 2153 * of deleting a seed device, the cur_devices should point to 2154 * its own fs_devices listed under the fs_devices->seed_list. 2155 */ 2156 cur_devices = device->fs_devices; 2157 mutex_lock(&fs_devices->device_list_mutex); 2158 list_del_rcu(&device->dev_list); 2159 2160 cur_devices->num_devices--; 2161 cur_devices->total_devices--; 2162 /* Update total_devices of the parent fs_devices if it's seed */ 2163 if (cur_devices != fs_devices) 2164 fs_devices->total_devices--; 2165 2166 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2167 cur_devices->missing_devices--; 2168 2169 btrfs_assign_next_active_device(device, NULL); 2170 2171 if (device->bdev_file) { 2172 cur_devices->open_devices--; 2173 /* remove sysfs entry */ 2174 btrfs_sysfs_remove_device(device); 2175 } 2176 2177 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2178 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2179 mutex_unlock(&fs_devices->device_list_mutex); 2180 2181 /* 2182 * At this point, the device is zero sized and detached from the 2183 * devices list. All that's left is to zero out the old supers and 2184 * free the device. 2185 * 2186 * We cannot call btrfs_close_bdev() here because we're holding the sb 2187 * write lock, and fput() on the block device will pull in the 2188 * ->open_mutex on the block device and it's dependencies. Instead 2189 * just flush the device and let the caller do the final bdev_release. 2190 */ 2191 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2192 btrfs_scratch_superblocks(fs_info, device); 2193 if (device->bdev) { 2194 sync_blockdev(device->bdev); 2195 invalidate_bdev(device->bdev); 2196 } 2197 } 2198 2199 *bdev_file = device->bdev_file; 2200 synchronize_rcu(); 2201 btrfs_free_device(device); 2202 2203 /* 2204 * This can happen if cur_devices is the private seed devices list. We 2205 * cannot call close_fs_devices() here because it expects the uuid_mutex 2206 * to be held, but in fact we don't need that for the private 2207 * seed_devices, we can simply decrement cur_devices->opened and then 2208 * remove it from our list and free the fs_devices. 2209 */ 2210 if (cur_devices->num_devices == 0) { 2211 list_del_init(&cur_devices->seed_list); 2212 ASSERT(cur_devices->opened == 1); 2213 cur_devices->opened--; 2214 free_fs_devices(cur_devices); 2215 } 2216 2217 ret = btrfs_commit_transaction(trans); 2218 2219 return ret; 2220 2221 error_undo: 2222 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2223 mutex_lock(&fs_info->chunk_mutex); 2224 list_add(&device->dev_alloc_list, 2225 &fs_devices->alloc_list); 2226 device->fs_devices->rw_devices++; 2227 mutex_unlock(&fs_info->chunk_mutex); 2228 } 2229 return ret; 2230 } 2231 2232 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2233 { 2234 struct btrfs_fs_devices *fs_devices; 2235 2236 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2237 2238 /* 2239 * in case of fs with no seed, srcdev->fs_devices will point 2240 * to fs_devices of fs_info. However when the dev being replaced is 2241 * a seed dev it will point to the seed's local fs_devices. In short 2242 * srcdev will have its correct fs_devices in both the cases. 2243 */ 2244 fs_devices = srcdev->fs_devices; 2245 2246 list_del_rcu(&srcdev->dev_list); 2247 list_del(&srcdev->dev_alloc_list); 2248 fs_devices->num_devices--; 2249 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2250 fs_devices->missing_devices--; 2251 2252 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2253 fs_devices->rw_devices--; 2254 2255 if (srcdev->bdev) 2256 fs_devices->open_devices--; 2257 } 2258 2259 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2260 { 2261 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2262 2263 mutex_lock(&uuid_mutex); 2264 2265 btrfs_close_bdev(srcdev); 2266 synchronize_rcu(); 2267 btrfs_free_device(srcdev); 2268 2269 /* if this is no devs we rather delete the fs_devices */ 2270 if (!fs_devices->num_devices) { 2271 /* 2272 * On a mounted FS, num_devices can't be zero unless it's a 2273 * seed. In case of a seed device being replaced, the replace 2274 * target added to the sprout FS, so there will be no more 2275 * device left under the seed FS. 2276 */ 2277 ASSERT(fs_devices->seeding); 2278 2279 list_del_init(&fs_devices->seed_list); 2280 close_fs_devices(fs_devices); 2281 free_fs_devices(fs_devices); 2282 } 2283 mutex_unlock(&uuid_mutex); 2284 } 2285 2286 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2287 { 2288 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2289 2290 mutex_lock(&fs_devices->device_list_mutex); 2291 2292 btrfs_sysfs_remove_device(tgtdev); 2293 2294 if (tgtdev->bdev) 2295 fs_devices->open_devices--; 2296 2297 fs_devices->num_devices--; 2298 2299 btrfs_assign_next_active_device(tgtdev, NULL); 2300 2301 list_del_rcu(&tgtdev->dev_list); 2302 2303 mutex_unlock(&fs_devices->device_list_mutex); 2304 2305 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev); 2306 2307 btrfs_close_bdev(tgtdev); 2308 synchronize_rcu(); 2309 btrfs_free_device(tgtdev); 2310 } 2311 2312 /* 2313 * Populate args from device at path. 2314 * 2315 * @fs_info: the filesystem 2316 * @args: the args to populate 2317 * @path: the path to the device 2318 * 2319 * This will read the super block of the device at @path and populate @args with 2320 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2321 * lookup a device to operate on, but need to do it before we take any locks. 2322 * This properly handles the special case of "missing" that a user may pass in, 2323 * and does some basic sanity checks. The caller must make sure that @path is 2324 * properly NUL terminated before calling in, and must call 2325 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2326 * uuid buffers. 2327 * 2328 * Return: 0 for success, -errno for failure 2329 */ 2330 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2331 struct btrfs_dev_lookup_args *args, 2332 const char *path) 2333 { 2334 struct btrfs_super_block *disk_super; 2335 struct file *bdev_file; 2336 int ret; 2337 2338 if (!path || !path[0]) 2339 return -EINVAL; 2340 if (!strcmp(path, "missing")) { 2341 args->missing = true; 2342 return 0; 2343 } 2344 2345 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2346 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2347 if (!args->uuid || !args->fsid) { 2348 btrfs_put_dev_args_from_path(args); 2349 return -ENOMEM; 2350 } 2351 2352 ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0, 2353 &bdev_file, &disk_super); 2354 if (ret) { 2355 btrfs_put_dev_args_from_path(args); 2356 return ret; 2357 } 2358 2359 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2360 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2361 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2362 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2363 else 2364 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2365 btrfs_release_disk_super(disk_super); 2366 fput(bdev_file); 2367 return 0; 2368 } 2369 2370 /* 2371 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2372 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2373 * that don't need to be freed. 2374 */ 2375 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2376 { 2377 kfree(args->uuid); 2378 kfree(args->fsid); 2379 args->uuid = NULL; 2380 args->fsid = NULL; 2381 } 2382 2383 struct btrfs_device *btrfs_find_device_by_devspec( 2384 struct btrfs_fs_info *fs_info, u64 devid, 2385 const char *device_path) 2386 { 2387 BTRFS_DEV_LOOKUP_ARGS(args); 2388 struct btrfs_device *device; 2389 int ret; 2390 2391 if (devid) { 2392 args.devid = devid; 2393 device = btrfs_find_device(fs_info->fs_devices, &args); 2394 if (!device) 2395 return ERR_PTR(-ENOENT); 2396 return device; 2397 } 2398 2399 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2400 if (ret) 2401 return ERR_PTR(ret); 2402 device = btrfs_find_device(fs_info->fs_devices, &args); 2403 btrfs_put_dev_args_from_path(&args); 2404 if (!device) 2405 return ERR_PTR(-ENOENT); 2406 return device; 2407 } 2408 2409 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2410 { 2411 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2412 struct btrfs_fs_devices *old_devices; 2413 struct btrfs_fs_devices *seed_devices; 2414 2415 lockdep_assert_held(&uuid_mutex); 2416 if (!fs_devices->seeding) 2417 return ERR_PTR(-EINVAL); 2418 2419 /* 2420 * Private copy of the seed devices, anchored at 2421 * fs_info->fs_devices->seed_list 2422 */ 2423 seed_devices = alloc_fs_devices(NULL); 2424 if (IS_ERR(seed_devices)) 2425 return seed_devices; 2426 2427 /* 2428 * It's necessary to retain a copy of the original seed fs_devices in 2429 * fs_uuids so that filesystems which have been seeded can successfully 2430 * reference the seed device from open_seed_devices. This also supports 2431 * multiple fs seed. 2432 */ 2433 old_devices = clone_fs_devices(fs_devices); 2434 if (IS_ERR(old_devices)) { 2435 kfree(seed_devices); 2436 return old_devices; 2437 } 2438 2439 list_add(&old_devices->fs_list, &fs_uuids); 2440 2441 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2442 seed_devices->opened = 1; 2443 INIT_LIST_HEAD(&seed_devices->devices); 2444 INIT_LIST_HEAD(&seed_devices->alloc_list); 2445 mutex_init(&seed_devices->device_list_mutex); 2446 2447 return seed_devices; 2448 } 2449 2450 /* 2451 * Splice seed devices into the sprout fs_devices. 2452 * Generate a new fsid for the sprouted read-write filesystem. 2453 */ 2454 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2455 struct btrfs_fs_devices *seed_devices) 2456 { 2457 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2458 struct btrfs_super_block *disk_super = fs_info->super_copy; 2459 struct btrfs_device *device; 2460 u64 super_flags; 2461 2462 /* 2463 * We are updating the fsid, the thread leading to device_list_add() 2464 * could race, so uuid_mutex is needed. 2465 */ 2466 lockdep_assert_held(&uuid_mutex); 2467 2468 /* 2469 * The threads listed below may traverse dev_list but can do that without 2470 * device_list_mutex: 2471 * - All device ops and balance - as we are in btrfs_exclop_start. 2472 * - Various dev_list readers - are using RCU. 2473 * - btrfs_ioctl_fitrim() - is using RCU. 2474 * 2475 * For-read threads as below are using device_list_mutex: 2476 * - Readonly scrub btrfs_scrub_dev() 2477 * - Readonly scrub btrfs_scrub_progress() 2478 * - btrfs_get_dev_stats() 2479 */ 2480 lockdep_assert_held(&fs_devices->device_list_mutex); 2481 2482 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2483 synchronize_rcu); 2484 list_for_each_entry(device, &seed_devices->devices, dev_list) 2485 device->fs_devices = seed_devices; 2486 2487 fs_devices->seeding = false; 2488 fs_devices->num_devices = 0; 2489 fs_devices->open_devices = 0; 2490 fs_devices->missing_devices = 0; 2491 fs_devices->rotating = false; 2492 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2493 2494 generate_random_uuid(fs_devices->fsid); 2495 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2496 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2497 2498 super_flags = btrfs_super_flags(disk_super) & 2499 ~BTRFS_SUPER_FLAG_SEEDING; 2500 btrfs_set_super_flags(disk_super, super_flags); 2501 } 2502 2503 /* 2504 * Store the expected generation for seed devices in device items. 2505 */ 2506 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2507 { 2508 BTRFS_DEV_LOOKUP_ARGS(args); 2509 struct btrfs_fs_info *fs_info = trans->fs_info; 2510 struct btrfs_root *root = fs_info->chunk_root; 2511 struct btrfs_path *path; 2512 struct extent_buffer *leaf; 2513 struct btrfs_dev_item *dev_item; 2514 struct btrfs_device *device; 2515 struct btrfs_key key; 2516 u8 fs_uuid[BTRFS_FSID_SIZE]; 2517 u8 dev_uuid[BTRFS_UUID_SIZE]; 2518 int ret; 2519 2520 path = btrfs_alloc_path(); 2521 if (!path) 2522 return -ENOMEM; 2523 2524 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2525 key.offset = 0; 2526 key.type = BTRFS_DEV_ITEM_KEY; 2527 2528 while (1) { 2529 btrfs_reserve_chunk_metadata(trans, false); 2530 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2531 btrfs_trans_release_chunk_metadata(trans); 2532 if (ret < 0) 2533 goto error; 2534 2535 leaf = path->nodes[0]; 2536 next_slot: 2537 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2538 ret = btrfs_next_leaf(root, path); 2539 if (ret > 0) 2540 break; 2541 if (ret < 0) 2542 goto error; 2543 leaf = path->nodes[0]; 2544 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2545 btrfs_release_path(path); 2546 continue; 2547 } 2548 2549 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2550 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2551 key.type != BTRFS_DEV_ITEM_KEY) 2552 break; 2553 2554 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2555 struct btrfs_dev_item); 2556 args.devid = btrfs_device_id(leaf, dev_item); 2557 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2558 BTRFS_UUID_SIZE); 2559 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2560 BTRFS_FSID_SIZE); 2561 args.uuid = dev_uuid; 2562 args.fsid = fs_uuid; 2563 device = btrfs_find_device(fs_info->fs_devices, &args); 2564 BUG_ON(!device); /* Logic error */ 2565 2566 if (device->fs_devices->seeding) { 2567 btrfs_set_device_generation(leaf, dev_item, 2568 device->generation); 2569 btrfs_mark_buffer_dirty(trans, leaf); 2570 } 2571 2572 path->slots[0]++; 2573 goto next_slot; 2574 } 2575 ret = 0; 2576 error: 2577 btrfs_free_path(path); 2578 return ret; 2579 } 2580 2581 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2582 { 2583 struct btrfs_root *root = fs_info->dev_root; 2584 struct btrfs_trans_handle *trans; 2585 struct btrfs_device *device; 2586 struct file *bdev_file; 2587 struct super_block *sb = fs_info->sb; 2588 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2589 struct btrfs_fs_devices *seed_devices = NULL; 2590 u64 orig_super_total_bytes; 2591 u64 orig_super_num_devices; 2592 int ret = 0; 2593 bool seeding_dev = false; 2594 bool locked = false; 2595 2596 if (sb_rdonly(sb) && !fs_devices->seeding) 2597 return -EROFS; 2598 2599 bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE, 2600 fs_info->bdev_holder, NULL); 2601 if (IS_ERR(bdev_file)) 2602 return PTR_ERR(bdev_file); 2603 2604 if (!btrfs_check_device_zone_type(fs_info, file_bdev(bdev_file))) { 2605 ret = -EINVAL; 2606 goto error; 2607 } 2608 2609 if (fs_devices->seeding) { 2610 seeding_dev = true; 2611 down_write(&sb->s_umount); 2612 mutex_lock(&uuid_mutex); 2613 locked = true; 2614 } 2615 2616 sync_blockdev(file_bdev(bdev_file)); 2617 2618 rcu_read_lock(); 2619 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2620 if (device->bdev == file_bdev(bdev_file)) { 2621 ret = -EEXIST; 2622 rcu_read_unlock(); 2623 goto error; 2624 } 2625 } 2626 rcu_read_unlock(); 2627 2628 device = btrfs_alloc_device(fs_info, NULL, NULL, device_path); 2629 if (IS_ERR(device)) { 2630 /* we can safely leave the fs_devices entry around */ 2631 ret = PTR_ERR(device); 2632 goto error; 2633 } 2634 2635 device->fs_info = fs_info; 2636 device->bdev_file = bdev_file; 2637 device->bdev = file_bdev(bdev_file); 2638 ret = lookup_bdev(device_path, &device->devt); 2639 if (ret) 2640 goto error_free_device; 2641 2642 ret = btrfs_get_dev_zone_info(device, false); 2643 if (ret) 2644 goto error_free_device; 2645 2646 trans = btrfs_start_transaction(root, 0); 2647 if (IS_ERR(trans)) { 2648 ret = PTR_ERR(trans); 2649 goto error_free_zone; 2650 } 2651 2652 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2653 device->generation = trans->transid; 2654 device->io_width = fs_info->sectorsize; 2655 device->io_align = fs_info->sectorsize; 2656 device->sector_size = fs_info->sectorsize; 2657 device->total_bytes = 2658 round_down(bdev_nr_bytes(device->bdev), fs_info->sectorsize); 2659 device->disk_total_bytes = device->total_bytes; 2660 device->commit_total_bytes = device->total_bytes; 2661 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2662 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2663 device->dev_stats_valid = 1; 2664 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2665 2666 if (seeding_dev) { 2667 btrfs_clear_sb_rdonly(sb); 2668 2669 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2670 seed_devices = btrfs_init_sprout(fs_info); 2671 if (IS_ERR(seed_devices)) { 2672 ret = PTR_ERR(seed_devices); 2673 btrfs_abort_transaction(trans, ret); 2674 goto error_trans; 2675 } 2676 } 2677 2678 mutex_lock(&fs_devices->device_list_mutex); 2679 if (seeding_dev) { 2680 btrfs_setup_sprout(fs_info, seed_devices); 2681 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2682 device); 2683 } 2684 2685 device->fs_devices = fs_devices; 2686 2687 mutex_lock(&fs_info->chunk_mutex); 2688 list_add_rcu(&device->dev_list, &fs_devices->devices); 2689 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2690 fs_devices->num_devices++; 2691 fs_devices->open_devices++; 2692 fs_devices->rw_devices++; 2693 fs_devices->total_devices++; 2694 fs_devices->total_rw_bytes += device->total_bytes; 2695 2696 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2697 2698 if (!bdev_nonrot(device->bdev)) 2699 fs_devices->rotating = true; 2700 2701 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2702 btrfs_set_super_total_bytes(fs_info->super_copy, 2703 round_down(orig_super_total_bytes + device->total_bytes, 2704 fs_info->sectorsize)); 2705 2706 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2707 btrfs_set_super_num_devices(fs_info->super_copy, 2708 orig_super_num_devices + 1); 2709 2710 /* 2711 * we've got more storage, clear any full flags on the space 2712 * infos 2713 */ 2714 btrfs_clear_space_info_full(fs_info); 2715 2716 mutex_unlock(&fs_info->chunk_mutex); 2717 2718 /* Add sysfs device entry */ 2719 btrfs_sysfs_add_device(device); 2720 2721 mutex_unlock(&fs_devices->device_list_mutex); 2722 2723 if (seeding_dev) { 2724 mutex_lock(&fs_info->chunk_mutex); 2725 ret = init_first_rw_device(trans); 2726 mutex_unlock(&fs_info->chunk_mutex); 2727 if (ret) { 2728 btrfs_abort_transaction(trans, ret); 2729 goto error_sysfs; 2730 } 2731 } 2732 2733 ret = btrfs_add_dev_item(trans, device); 2734 if (ret) { 2735 btrfs_abort_transaction(trans, ret); 2736 goto error_sysfs; 2737 } 2738 2739 if (seeding_dev) { 2740 ret = btrfs_finish_sprout(trans); 2741 if (ret) { 2742 btrfs_abort_transaction(trans, ret); 2743 goto error_sysfs; 2744 } 2745 2746 /* 2747 * fs_devices now represents the newly sprouted filesystem and 2748 * its fsid has been changed by btrfs_sprout_splice(). 2749 */ 2750 btrfs_sysfs_update_sprout_fsid(fs_devices); 2751 } 2752 2753 ret = btrfs_commit_transaction(trans); 2754 2755 if (seeding_dev) { 2756 mutex_unlock(&uuid_mutex); 2757 up_write(&sb->s_umount); 2758 locked = false; 2759 2760 if (ret) /* transaction commit */ 2761 return ret; 2762 2763 ret = btrfs_relocate_sys_chunks(fs_info); 2764 if (ret < 0) 2765 btrfs_handle_fs_error(fs_info, ret, 2766 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2767 trans = btrfs_attach_transaction(root); 2768 if (IS_ERR(trans)) { 2769 if (PTR_ERR(trans) == -ENOENT) 2770 return 0; 2771 ret = PTR_ERR(trans); 2772 trans = NULL; 2773 goto error_sysfs; 2774 } 2775 ret = btrfs_commit_transaction(trans); 2776 } 2777 2778 /* 2779 * Now that we have written a new super block to this device, check all 2780 * other fs_devices list if device_path alienates any other scanned 2781 * device. 2782 * We can ignore the return value as it typically returns -EINVAL and 2783 * only succeeds if the device was an alien. 2784 */ 2785 btrfs_forget_devices(device->devt); 2786 2787 /* Update ctime/mtime for blkid or udev */ 2788 update_dev_time(device_path); 2789 2790 return ret; 2791 2792 error_sysfs: 2793 btrfs_sysfs_remove_device(device); 2794 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2795 mutex_lock(&fs_info->chunk_mutex); 2796 list_del_rcu(&device->dev_list); 2797 list_del(&device->dev_alloc_list); 2798 fs_info->fs_devices->num_devices--; 2799 fs_info->fs_devices->open_devices--; 2800 fs_info->fs_devices->rw_devices--; 2801 fs_info->fs_devices->total_devices--; 2802 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2803 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2804 btrfs_set_super_total_bytes(fs_info->super_copy, 2805 orig_super_total_bytes); 2806 btrfs_set_super_num_devices(fs_info->super_copy, 2807 orig_super_num_devices); 2808 mutex_unlock(&fs_info->chunk_mutex); 2809 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2810 error_trans: 2811 if (seeding_dev) 2812 btrfs_set_sb_rdonly(sb); 2813 if (trans) 2814 btrfs_end_transaction(trans); 2815 error_free_zone: 2816 btrfs_destroy_dev_zone_info(device); 2817 error_free_device: 2818 btrfs_free_device(device); 2819 error: 2820 fput(bdev_file); 2821 if (locked) { 2822 mutex_unlock(&uuid_mutex); 2823 up_write(&sb->s_umount); 2824 } 2825 return ret; 2826 } 2827 2828 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2829 struct btrfs_device *device) 2830 { 2831 int ret; 2832 struct btrfs_path *path; 2833 struct btrfs_root *root = device->fs_info->chunk_root; 2834 struct btrfs_dev_item *dev_item; 2835 struct extent_buffer *leaf; 2836 struct btrfs_key key; 2837 2838 path = btrfs_alloc_path(); 2839 if (!path) 2840 return -ENOMEM; 2841 2842 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2843 key.type = BTRFS_DEV_ITEM_KEY; 2844 key.offset = device->devid; 2845 2846 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2847 if (ret < 0) 2848 goto out; 2849 2850 if (ret > 0) { 2851 ret = -ENOENT; 2852 goto out; 2853 } 2854 2855 leaf = path->nodes[0]; 2856 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2857 2858 btrfs_set_device_id(leaf, dev_item, device->devid); 2859 btrfs_set_device_type(leaf, dev_item, device->type); 2860 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2861 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2862 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2863 btrfs_set_device_total_bytes(leaf, dev_item, 2864 btrfs_device_get_disk_total_bytes(device)); 2865 btrfs_set_device_bytes_used(leaf, dev_item, 2866 btrfs_device_get_bytes_used(device)); 2867 btrfs_mark_buffer_dirty(trans, leaf); 2868 2869 out: 2870 btrfs_free_path(path); 2871 return ret; 2872 } 2873 2874 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2875 struct btrfs_device *device, u64 new_size) 2876 { 2877 struct btrfs_fs_info *fs_info = device->fs_info; 2878 struct btrfs_super_block *super_copy = fs_info->super_copy; 2879 u64 old_total; 2880 u64 diff; 2881 int ret; 2882 2883 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2884 return -EACCES; 2885 2886 new_size = round_down(new_size, fs_info->sectorsize); 2887 2888 mutex_lock(&fs_info->chunk_mutex); 2889 old_total = btrfs_super_total_bytes(super_copy); 2890 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2891 2892 if (new_size <= device->total_bytes || 2893 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2894 mutex_unlock(&fs_info->chunk_mutex); 2895 return -EINVAL; 2896 } 2897 2898 btrfs_set_super_total_bytes(super_copy, 2899 round_down(old_total + diff, fs_info->sectorsize)); 2900 device->fs_devices->total_rw_bytes += diff; 2901 atomic64_add(diff, &fs_info->free_chunk_space); 2902 2903 btrfs_device_set_total_bytes(device, new_size); 2904 btrfs_device_set_disk_total_bytes(device, new_size); 2905 btrfs_clear_space_info_full(device->fs_info); 2906 if (list_empty(&device->post_commit_list)) 2907 list_add_tail(&device->post_commit_list, 2908 &trans->transaction->dev_update_list); 2909 mutex_unlock(&fs_info->chunk_mutex); 2910 2911 btrfs_reserve_chunk_metadata(trans, false); 2912 ret = btrfs_update_device(trans, device); 2913 btrfs_trans_release_chunk_metadata(trans); 2914 2915 return ret; 2916 } 2917 2918 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2919 { 2920 struct btrfs_fs_info *fs_info = trans->fs_info; 2921 struct btrfs_root *root = fs_info->chunk_root; 2922 int ret; 2923 struct btrfs_path *path; 2924 struct btrfs_key key; 2925 2926 path = btrfs_alloc_path(); 2927 if (!path) 2928 return -ENOMEM; 2929 2930 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2931 key.offset = chunk_offset; 2932 key.type = BTRFS_CHUNK_ITEM_KEY; 2933 2934 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2935 if (ret < 0) 2936 goto out; 2937 else if (ret > 0) { /* Logic error or corruption */ 2938 btrfs_handle_fs_error(fs_info, -ENOENT, 2939 "Failed lookup while freeing chunk."); 2940 ret = -ENOENT; 2941 goto out; 2942 } 2943 2944 ret = btrfs_del_item(trans, root, path); 2945 if (ret < 0) 2946 btrfs_handle_fs_error(fs_info, ret, 2947 "Failed to delete chunk item."); 2948 out: 2949 btrfs_free_path(path); 2950 return ret; 2951 } 2952 2953 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2954 { 2955 struct btrfs_super_block *super_copy = fs_info->super_copy; 2956 struct btrfs_disk_key *disk_key; 2957 struct btrfs_chunk *chunk; 2958 u8 *ptr; 2959 int ret = 0; 2960 u32 num_stripes; 2961 u32 array_size; 2962 u32 len = 0; 2963 u32 cur; 2964 struct btrfs_key key; 2965 2966 lockdep_assert_held(&fs_info->chunk_mutex); 2967 array_size = btrfs_super_sys_array_size(super_copy); 2968 2969 ptr = super_copy->sys_chunk_array; 2970 cur = 0; 2971 2972 while (cur < array_size) { 2973 disk_key = (struct btrfs_disk_key *)ptr; 2974 btrfs_disk_key_to_cpu(&key, disk_key); 2975 2976 len = sizeof(*disk_key); 2977 2978 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2979 chunk = (struct btrfs_chunk *)(ptr + len); 2980 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2981 len += btrfs_chunk_item_size(num_stripes); 2982 } else { 2983 ret = -EIO; 2984 break; 2985 } 2986 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2987 key.offset == chunk_offset) { 2988 memmove(ptr, ptr + len, array_size - (cur + len)); 2989 array_size -= len; 2990 btrfs_set_super_sys_array_size(super_copy, array_size); 2991 } else { 2992 ptr += len; 2993 cur += len; 2994 } 2995 } 2996 return ret; 2997 } 2998 2999 struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info, 3000 u64 logical, u64 length) 3001 { 3002 struct rb_node *node = fs_info->mapping_tree.rb_root.rb_node; 3003 struct rb_node *prev = NULL; 3004 struct rb_node *orig_prev; 3005 struct btrfs_chunk_map *map; 3006 struct btrfs_chunk_map *prev_map = NULL; 3007 3008 while (node) { 3009 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 3010 prev = node; 3011 prev_map = map; 3012 3013 if (logical < map->start) { 3014 node = node->rb_left; 3015 } else if (logical >= map->start + map->chunk_len) { 3016 node = node->rb_right; 3017 } else { 3018 refcount_inc(&map->refs); 3019 return map; 3020 } 3021 } 3022 3023 if (!prev) 3024 return NULL; 3025 3026 orig_prev = prev; 3027 while (prev && logical >= prev_map->start + prev_map->chunk_len) { 3028 prev = rb_next(prev); 3029 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3030 } 3031 3032 if (!prev) { 3033 prev = orig_prev; 3034 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3035 while (prev && logical < prev_map->start) { 3036 prev = rb_prev(prev); 3037 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3038 } 3039 } 3040 3041 if (prev) { 3042 u64 end = logical + length; 3043 3044 /* 3045 * Caller can pass a U64_MAX length when it wants to get any 3046 * chunk starting at an offset of 'logical' or higher, so deal 3047 * with underflow by resetting the end offset to U64_MAX. 3048 */ 3049 if (end < logical) 3050 end = U64_MAX; 3051 3052 if (end > prev_map->start && 3053 logical < prev_map->start + prev_map->chunk_len) { 3054 refcount_inc(&prev_map->refs); 3055 return prev_map; 3056 } 3057 } 3058 3059 return NULL; 3060 } 3061 3062 struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info, 3063 u64 logical, u64 length) 3064 { 3065 struct btrfs_chunk_map *map; 3066 3067 read_lock(&fs_info->mapping_tree_lock); 3068 map = btrfs_find_chunk_map_nolock(fs_info, logical, length); 3069 read_unlock(&fs_info->mapping_tree_lock); 3070 3071 return map; 3072 } 3073 3074 /* 3075 * Find the mapping containing the given logical extent. 3076 * 3077 * @logical: Logical block offset in bytes. 3078 * @length: Length of extent in bytes. 3079 * 3080 * Return: Chunk mapping or ERR_PTR. 3081 */ 3082 struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3083 u64 logical, u64 length) 3084 { 3085 struct btrfs_chunk_map *map; 3086 3087 map = btrfs_find_chunk_map(fs_info, logical, length); 3088 3089 if (unlikely(!map)) { 3090 btrfs_crit(fs_info, 3091 "unable to find chunk map for logical %llu length %llu", 3092 logical, length); 3093 return ERR_PTR(-EINVAL); 3094 } 3095 3096 if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) { 3097 btrfs_crit(fs_info, 3098 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu", 3099 logical, logical + length, map->start, 3100 map->start + map->chunk_len); 3101 btrfs_free_chunk_map(map); 3102 return ERR_PTR(-EINVAL); 3103 } 3104 3105 /* Callers are responsible for dropping the reference. */ 3106 return map; 3107 } 3108 3109 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3110 struct btrfs_chunk_map *map, u64 chunk_offset) 3111 { 3112 int i; 3113 3114 /* 3115 * Removing chunk items and updating the device items in the chunks btree 3116 * requires holding the chunk_mutex. 3117 * See the comment at btrfs_chunk_alloc() for the details. 3118 */ 3119 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3120 3121 for (i = 0; i < map->num_stripes; i++) { 3122 int ret; 3123 3124 ret = btrfs_update_device(trans, map->stripes[i].dev); 3125 if (ret) 3126 return ret; 3127 } 3128 3129 return btrfs_free_chunk(trans, chunk_offset); 3130 } 3131 3132 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3133 { 3134 struct btrfs_fs_info *fs_info = trans->fs_info; 3135 struct btrfs_chunk_map *map; 3136 u64 dev_extent_len = 0; 3137 int i, ret = 0; 3138 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3139 3140 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3141 if (IS_ERR(map)) { 3142 /* 3143 * This is a logic error, but we don't want to just rely on the 3144 * user having built with ASSERT enabled, so if ASSERT doesn't 3145 * do anything we still error out. 3146 */ 3147 ASSERT(0); 3148 return PTR_ERR(map); 3149 } 3150 3151 /* 3152 * First delete the device extent items from the devices btree. 3153 * We take the device_list_mutex to avoid racing with the finishing phase 3154 * of a device replace operation. See the comment below before acquiring 3155 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3156 * because that can result in a deadlock when deleting the device extent 3157 * items from the devices btree - COWing an extent buffer from the btree 3158 * may result in allocating a new metadata chunk, which would attempt to 3159 * lock again fs_info->chunk_mutex. 3160 */ 3161 mutex_lock(&fs_devices->device_list_mutex); 3162 for (i = 0; i < map->num_stripes; i++) { 3163 struct btrfs_device *device = map->stripes[i].dev; 3164 ret = btrfs_free_dev_extent(trans, device, 3165 map->stripes[i].physical, 3166 &dev_extent_len); 3167 if (ret) { 3168 mutex_unlock(&fs_devices->device_list_mutex); 3169 btrfs_abort_transaction(trans, ret); 3170 goto out; 3171 } 3172 3173 if (device->bytes_used > 0) { 3174 mutex_lock(&fs_info->chunk_mutex); 3175 btrfs_device_set_bytes_used(device, 3176 device->bytes_used - dev_extent_len); 3177 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3178 btrfs_clear_space_info_full(fs_info); 3179 mutex_unlock(&fs_info->chunk_mutex); 3180 } 3181 } 3182 mutex_unlock(&fs_devices->device_list_mutex); 3183 3184 /* 3185 * We acquire fs_info->chunk_mutex for 2 reasons: 3186 * 3187 * 1) Just like with the first phase of the chunk allocation, we must 3188 * reserve system space, do all chunk btree updates and deletions, and 3189 * update the system chunk array in the superblock while holding this 3190 * mutex. This is for similar reasons as explained on the comment at 3191 * the top of btrfs_chunk_alloc(); 3192 * 3193 * 2) Prevent races with the final phase of a device replace operation 3194 * that replaces the device object associated with the map's stripes, 3195 * because the device object's id can change at any time during that 3196 * final phase of the device replace operation 3197 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3198 * replaced device and then see it with an ID of 3199 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3200 * the device item, which does not exists on the chunk btree. 3201 * The finishing phase of device replace acquires both the 3202 * device_list_mutex and the chunk_mutex, in that order, so we are 3203 * safe by just acquiring the chunk_mutex. 3204 */ 3205 trans->removing_chunk = true; 3206 mutex_lock(&fs_info->chunk_mutex); 3207 3208 check_system_chunk(trans, map->type); 3209 3210 ret = remove_chunk_item(trans, map, chunk_offset); 3211 /* 3212 * Normally we should not get -ENOSPC since we reserved space before 3213 * through the call to check_system_chunk(). 3214 * 3215 * Despite our system space_info having enough free space, we may not 3216 * be able to allocate extents from its block groups, because all have 3217 * an incompatible profile, which will force us to allocate a new system 3218 * block group with the right profile, or right after we called 3219 * check_system_space() above, a scrub turned the only system block group 3220 * with enough free space into RO mode. 3221 * This is explained with more detail at do_chunk_alloc(). 3222 * 3223 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3224 */ 3225 if (ret == -ENOSPC) { 3226 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3227 struct btrfs_block_group *sys_bg; 3228 3229 sys_bg = btrfs_create_chunk(trans, sys_flags); 3230 if (IS_ERR(sys_bg)) { 3231 ret = PTR_ERR(sys_bg); 3232 btrfs_abort_transaction(trans, ret); 3233 goto out; 3234 } 3235 3236 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3237 if (ret) { 3238 btrfs_abort_transaction(trans, ret); 3239 goto out; 3240 } 3241 3242 ret = remove_chunk_item(trans, map, chunk_offset); 3243 if (ret) { 3244 btrfs_abort_transaction(trans, ret); 3245 goto out; 3246 } 3247 } else if (ret) { 3248 btrfs_abort_transaction(trans, ret); 3249 goto out; 3250 } 3251 3252 trace_btrfs_chunk_free(fs_info, map, chunk_offset, map->chunk_len); 3253 3254 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3255 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3256 if (ret) { 3257 btrfs_abort_transaction(trans, ret); 3258 goto out; 3259 } 3260 } 3261 3262 mutex_unlock(&fs_info->chunk_mutex); 3263 trans->removing_chunk = false; 3264 3265 /* 3266 * We are done with chunk btree updates and deletions, so release the 3267 * system space we previously reserved (with check_system_chunk()). 3268 */ 3269 btrfs_trans_release_chunk_metadata(trans); 3270 3271 ret = btrfs_remove_block_group(trans, map); 3272 if (ret) { 3273 btrfs_abort_transaction(trans, ret); 3274 goto out; 3275 } 3276 3277 out: 3278 if (trans->removing_chunk) { 3279 mutex_unlock(&fs_info->chunk_mutex); 3280 trans->removing_chunk = false; 3281 } 3282 /* once for us */ 3283 btrfs_free_chunk_map(map); 3284 return ret; 3285 } 3286 3287 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3288 { 3289 struct btrfs_root *root = fs_info->chunk_root; 3290 struct btrfs_trans_handle *trans; 3291 struct btrfs_block_group *block_group; 3292 u64 length; 3293 int ret; 3294 3295 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3296 btrfs_err(fs_info, 3297 "relocate: not supported on extent tree v2 yet"); 3298 return -EINVAL; 3299 } 3300 3301 /* 3302 * Prevent races with automatic removal of unused block groups. 3303 * After we relocate and before we remove the chunk with offset 3304 * chunk_offset, automatic removal of the block group can kick in, 3305 * resulting in a failure when calling btrfs_remove_chunk() below. 3306 * 3307 * Make sure to acquire this mutex before doing a tree search (dev 3308 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3309 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3310 * we release the path used to search the chunk/dev tree and before 3311 * the current task acquires this mutex and calls us. 3312 */ 3313 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3314 3315 /* step one, relocate all the extents inside this chunk */ 3316 btrfs_scrub_pause(fs_info); 3317 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3318 btrfs_scrub_continue(fs_info); 3319 if (ret) { 3320 /* 3321 * If we had a transaction abort, stop all running scrubs. 3322 * See transaction.c:cleanup_transaction() why we do it here. 3323 */ 3324 if (BTRFS_FS_ERROR(fs_info)) 3325 btrfs_scrub_cancel(fs_info); 3326 return ret; 3327 } 3328 3329 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3330 if (!block_group) 3331 return -ENOENT; 3332 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3333 length = block_group->length; 3334 btrfs_put_block_group(block_group); 3335 3336 /* 3337 * On a zoned file system, discard the whole block group, this will 3338 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3339 * resetting the zone fails, don't treat it as a fatal problem from the 3340 * filesystem's point of view. 3341 */ 3342 if (btrfs_is_zoned(fs_info)) { 3343 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3344 if (ret) 3345 btrfs_info(fs_info, 3346 "failed to reset zone %llu after relocation", 3347 chunk_offset); 3348 } 3349 3350 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3351 chunk_offset); 3352 if (IS_ERR(trans)) { 3353 ret = PTR_ERR(trans); 3354 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3355 return ret; 3356 } 3357 3358 /* 3359 * step two, delete the device extents and the 3360 * chunk tree entries 3361 */ 3362 ret = btrfs_remove_chunk(trans, chunk_offset); 3363 btrfs_end_transaction(trans); 3364 return ret; 3365 } 3366 3367 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3368 { 3369 struct btrfs_root *chunk_root = fs_info->chunk_root; 3370 struct btrfs_path *path; 3371 struct extent_buffer *leaf; 3372 struct btrfs_chunk *chunk; 3373 struct btrfs_key key; 3374 struct btrfs_key found_key; 3375 u64 chunk_type; 3376 bool retried = false; 3377 int failed = 0; 3378 int ret; 3379 3380 path = btrfs_alloc_path(); 3381 if (!path) 3382 return -ENOMEM; 3383 3384 again: 3385 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3386 key.offset = (u64)-1; 3387 key.type = BTRFS_CHUNK_ITEM_KEY; 3388 3389 while (1) { 3390 mutex_lock(&fs_info->reclaim_bgs_lock); 3391 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3392 if (ret < 0) { 3393 mutex_unlock(&fs_info->reclaim_bgs_lock); 3394 goto error; 3395 } 3396 if (ret == 0) { 3397 /* 3398 * On the first search we would find chunk tree with 3399 * offset -1, which is not possible. On subsequent 3400 * loops this would find an existing item on an invalid 3401 * offset (one less than the previous one, wrong 3402 * alignment and size). 3403 */ 3404 ret = -EUCLEAN; 3405 goto error; 3406 } 3407 3408 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3409 key.type); 3410 if (ret) 3411 mutex_unlock(&fs_info->reclaim_bgs_lock); 3412 if (ret < 0) 3413 goto error; 3414 if (ret > 0) 3415 break; 3416 3417 leaf = path->nodes[0]; 3418 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3419 3420 chunk = btrfs_item_ptr(leaf, path->slots[0], 3421 struct btrfs_chunk); 3422 chunk_type = btrfs_chunk_type(leaf, chunk); 3423 btrfs_release_path(path); 3424 3425 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3426 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3427 if (ret == -ENOSPC) 3428 failed++; 3429 else 3430 BUG_ON(ret); 3431 } 3432 mutex_unlock(&fs_info->reclaim_bgs_lock); 3433 3434 if (found_key.offset == 0) 3435 break; 3436 key.offset = found_key.offset - 1; 3437 } 3438 ret = 0; 3439 if (failed && !retried) { 3440 failed = 0; 3441 retried = true; 3442 goto again; 3443 } else if (WARN_ON(failed && retried)) { 3444 ret = -ENOSPC; 3445 } 3446 error: 3447 btrfs_free_path(path); 3448 return ret; 3449 } 3450 3451 /* 3452 * return 1 : allocate a data chunk successfully, 3453 * return <0: errors during allocating a data chunk, 3454 * return 0 : no need to allocate a data chunk. 3455 */ 3456 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3457 u64 chunk_offset) 3458 { 3459 struct btrfs_block_group *cache; 3460 u64 bytes_used; 3461 u64 chunk_type; 3462 3463 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3464 ASSERT(cache); 3465 chunk_type = cache->flags; 3466 btrfs_put_block_group(cache); 3467 3468 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3469 return 0; 3470 3471 spin_lock(&fs_info->data_sinfo->lock); 3472 bytes_used = fs_info->data_sinfo->bytes_used; 3473 spin_unlock(&fs_info->data_sinfo->lock); 3474 3475 if (!bytes_used) { 3476 struct btrfs_trans_handle *trans; 3477 int ret; 3478 3479 trans = btrfs_join_transaction(fs_info->tree_root); 3480 if (IS_ERR(trans)) 3481 return PTR_ERR(trans); 3482 3483 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3484 btrfs_end_transaction(trans); 3485 if (ret < 0) 3486 return ret; 3487 return 1; 3488 } 3489 3490 return 0; 3491 } 3492 3493 static void btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, 3494 const struct btrfs_disk_balance_args *disk) 3495 { 3496 memset(cpu, 0, sizeof(*cpu)); 3497 3498 cpu->profiles = le64_to_cpu(disk->profiles); 3499 cpu->usage = le64_to_cpu(disk->usage); 3500 cpu->devid = le64_to_cpu(disk->devid); 3501 cpu->pstart = le64_to_cpu(disk->pstart); 3502 cpu->pend = le64_to_cpu(disk->pend); 3503 cpu->vstart = le64_to_cpu(disk->vstart); 3504 cpu->vend = le64_to_cpu(disk->vend); 3505 cpu->target = le64_to_cpu(disk->target); 3506 cpu->flags = le64_to_cpu(disk->flags); 3507 cpu->limit = le64_to_cpu(disk->limit); 3508 cpu->stripes_min = le32_to_cpu(disk->stripes_min); 3509 cpu->stripes_max = le32_to_cpu(disk->stripes_max); 3510 } 3511 3512 static void btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, 3513 const struct btrfs_balance_args *cpu) 3514 { 3515 memset(disk, 0, sizeof(*disk)); 3516 3517 disk->profiles = cpu_to_le64(cpu->profiles); 3518 disk->usage = cpu_to_le64(cpu->usage); 3519 disk->devid = cpu_to_le64(cpu->devid); 3520 disk->pstart = cpu_to_le64(cpu->pstart); 3521 disk->pend = cpu_to_le64(cpu->pend); 3522 disk->vstart = cpu_to_le64(cpu->vstart); 3523 disk->vend = cpu_to_le64(cpu->vend); 3524 disk->target = cpu_to_le64(cpu->target); 3525 disk->flags = cpu_to_le64(cpu->flags); 3526 disk->limit = cpu_to_le64(cpu->limit); 3527 disk->stripes_min = cpu_to_le32(cpu->stripes_min); 3528 disk->stripes_max = cpu_to_le32(cpu->stripes_max); 3529 } 3530 3531 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3532 struct btrfs_balance_control *bctl) 3533 { 3534 struct btrfs_root *root = fs_info->tree_root; 3535 struct btrfs_trans_handle *trans; 3536 struct btrfs_balance_item *item; 3537 struct btrfs_disk_balance_args disk_bargs; 3538 struct btrfs_path *path; 3539 struct extent_buffer *leaf; 3540 struct btrfs_key key; 3541 int ret, err; 3542 3543 path = btrfs_alloc_path(); 3544 if (!path) 3545 return -ENOMEM; 3546 3547 trans = btrfs_start_transaction(root, 0); 3548 if (IS_ERR(trans)) { 3549 btrfs_free_path(path); 3550 return PTR_ERR(trans); 3551 } 3552 3553 key.objectid = BTRFS_BALANCE_OBJECTID; 3554 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3555 key.offset = 0; 3556 3557 ret = btrfs_insert_empty_item(trans, root, path, &key, 3558 sizeof(*item)); 3559 if (ret) 3560 goto out; 3561 3562 leaf = path->nodes[0]; 3563 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3564 3565 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3566 3567 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3568 btrfs_set_balance_data(leaf, item, &disk_bargs); 3569 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3570 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3571 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3572 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3573 3574 btrfs_set_balance_flags(leaf, item, bctl->flags); 3575 3576 btrfs_mark_buffer_dirty(trans, leaf); 3577 out: 3578 btrfs_free_path(path); 3579 err = btrfs_commit_transaction(trans); 3580 if (err && !ret) 3581 ret = err; 3582 return ret; 3583 } 3584 3585 static int del_balance_item(struct btrfs_fs_info *fs_info) 3586 { 3587 struct btrfs_root *root = fs_info->tree_root; 3588 struct btrfs_trans_handle *trans; 3589 struct btrfs_path *path; 3590 struct btrfs_key key; 3591 int ret, err; 3592 3593 path = btrfs_alloc_path(); 3594 if (!path) 3595 return -ENOMEM; 3596 3597 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3598 if (IS_ERR(trans)) { 3599 btrfs_free_path(path); 3600 return PTR_ERR(trans); 3601 } 3602 3603 key.objectid = BTRFS_BALANCE_OBJECTID; 3604 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3605 key.offset = 0; 3606 3607 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3608 if (ret < 0) 3609 goto out; 3610 if (ret > 0) { 3611 ret = -ENOENT; 3612 goto out; 3613 } 3614 3615 ret = btrfs_del_item(trans, root, path); 3616 out: 3617 btrfs_free_path(path); 3618 err = btrfs_commit_transaction(trans); 3619 if (err && !ret) 3620 ret = err; 3621 return ret; 3622 } 3623 3624 /* 3625 * This is a heuristic used to reduce the number of chunks balanced on 3626 * resume after balance was interrupted. 3627 */ 3628 static void update_balance_args(struct btrfs_balance_control *bctl) 3629 { 3630 /* 3631 * Turn on soft mode for chunk types that were being converted. 3632 */ 3633 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3634 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3635 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3636 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3637 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3638 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3639 3640 /* 3641 * Turn on usage filter if is not already used. The idea is 3642 * that chunks that we have already balanced should be 3643 * reasonably full. Don't do it for chunks that are being 3644 * converted - that will keep us from relocating unconverted 3645 * (albeit full) chunks. 3646 */ 3647 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3648 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3649 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3650 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3651 bctl->data.usage = 90; 3652 } 3653 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3654 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3655 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3656 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3657 bctl->sys.usage = 90; 3658 } 3659 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3660 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3661 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3662 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3663 bctl->meta.usage = 90; 3664 } 3665 } 3666 3667 /* 3668 * Clear the balance status in fs_info and delete the balance item from disk. 3669 */ 3670 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3671 { 3672 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3673 int ret; 3674 3675 ASSERT(fs_info->balance_ctl); 3676 3677 spin_lock(&fs_info->balance_lock); 3678 fs_info->balance_ctl = NULL; 3679 spin_unlock(&fs_info->balance_lock); 3680 3681 kfree(bctl); 3682 ret = del_balance_item(fs_info); 3683 if (ret) 3684 btrfs_handle_fs_error(fs_info, ret, NULL); 3685 } 3686 3687 /* 3688 * Balance filters. Return 1 if chunk should be filtered out 3689 * (should not be balanced). 3690 */ 3691 static int chunk_profiles_filter(u64 chunk_type, 3692 struct btrfs_balance_args *bargs) 3693 { 3694 chunk_type = chunk_to_extended(chunk_type) & 3695 BTRFS_EXTENDED_PROFILE_MASK; 3696 3697 if (bargs->profiles & chunk_type) 3698 return 0; 3699 3700 return 1; 3701 } 3702 3703 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3704 struct btrfs_balance_args *bargs) 3705 { 3706 struct btrfs_block_group *cache; 3707 u64 chunk_used; 3708 u64 user_thresh_min; 3709 u64 user_thresh_max; 3710 int ret = 1; 3711 3712 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3713 chunk_used = cache->used; 3714 3715 if (bargs->usage_min == 0) 3716 user_thresh_min = 0; 3717 else 3718 user_thresh_min = mult_perc(cache->length, bargs->usage_min); 3719 3720 if (bargs->usage_max == 0) 3721 user_thresh_max = 1; 3722 else if (bargs->usage_max > 100) 3723 user_thresh_max = cache->length; 3724 else 3725 user_thresh_max = mult_perc(cache->length, bargs->usage_max); 3726 3727 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3728 ret = 0; 3729 3730 btrfs_put_block_group(cache); 3731 return ret; 3732 } 3733 3734 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3735 u64 chunk_offset, struct btrfs_balance_args *bargs) 3736 { 3737 struct btrfs_block_group *cache; 3738 u64 chunk_used, user_thresh; 3739 int ret = 1; 3740 3741 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3742 chunk_used = cache->used; 3743 3744 if (bargs->usage_min == 0) 3745 user_thresh = 1; 3746 else if (bargs->usage > 100) 3747 user_thresh = cache->length; 3748 else 3749 user_thresh = mult_perc(cache->length, bargs->usage); 3750 3751 if (chunk_used < user_thresh) 3752 ret = 0; 3753 3754 btrfs_put_block_group(cache); 3755 return ret; 3756 } 3757 3758 static int chunk_devid_filter(struct extent_buffer *leaf, 3759 struct btrfs_chunk *chunk, 3760 struct btrfs_balance_args *bargs) 3761 { 3762 struct btrfs_stripe *stripe; 3763 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3764 int i; 3765 3766 for (i = 0; i < num_stripes; i++) { 3767 stripe = btrfs_stripe_nr(chunk, i); 3768 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3769 return 0; 3770 } 3771 3772 return 1; 3773 } 3774 3775 static u64 calc_data_stripes(u64 type, int num_stripes) 3776 { 3777 const int index = btrfs_bg_flags_to_raid_index(type); 3778 const int ncopies = btrfs_raid_array[index].ncopies; 3779 const int nparity = btrfs_raid_array[index].nparity; 3780 3781 return (num_stripes - nparity) / ncopies; 3782 } 3783 3784 /* [pstart, pend) */ 3785 static int chunk_drange_filter(struct extent_buffer *leaf, 3786 struct btrfs_chunk *chunk, 3787 struct btrfs_balance_args *bargs) 3788 { 3789 struct btrfs_stripe *stripe; 3790 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3791 u64 stripe_offset; 3792 u64 stripe_length; 3793 u64 type; 3794 int factor; 3795 int i; 3796 3797 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3798 return 0; 3799 3800 type = btrfs_chunk_type(leaf, chunk); 3801 factor = calc_data_stripes(type, num_stripes); 3802 3803 for (i = 0; i < num_stripes; i++) { 3804 stripe = btrfs_stripe_nr(chunk, i); 3805 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3806 continue; 3807 3808 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3809 stripe_length = btrfs_chunk_length(leaf, chunk); 3810 stripe_length = div_u64(stripe_length, factor); 3811 3812 if (stripe_offset < bargs->pend && 3813 stripe_offset + stripe_length > bargs->pstart) 3814 return 0; 3815 } 3816 3817 return 1; 3818 } 3819 3820 /* [vstart, vend) */ 3821 static int chunk_vrange_filter(struct extent_buffer *leaf, 3822 struct btrfs_chunk *chunk, 3823 u64 chunk_offset, 3824 struct btrfs_balance_args *bargs) 3825 { 3826 if (chunk_offset < bargs->vend && 3827 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3828 /* at least part of the chunk is inside this vrange */ 3829 return 0; 3830 3831 return 1; 3832 } 3833 3834 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3835 struct btrfs_chunk *chunk, 3836 struct btrfs_balance_args *bargs) 3837 { 3838 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3839 3840 if (bargs->stripes_min <= num_stripes 3841 && num_stripes <= bargs->stripes_max) 3842 return 0; 3843 3844 return 1; 3845 } 3846 3847 static int chunk_soft_convert_filter(u64 chunk_type, 3848 struct btrfs_balance_args *bargs) 3849 { 3850 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3851 return 0; 3852 3853 chunk_type = chunk_to_extended(chunk_type) & 3854 BTRFS_EXTENDED_PROFILE_MASK; 3855 3856 if (bargs->target == chunk_type) 3857 return 1; 3858 3859 return 0; 3860 } 3861 3862 static int should_balance_chunk(struct extent_buffer *leaf, 3863 struct btrfs_chunk *chunk, u64 chunk_offset) 3864 { 3865 struct btrfs_fs_info *fs_info = leaf->fs_info; 3866 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3867 struct btrfs_balance_args *bargs = NULL; 3868 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3869 3870 /* type filter */ 3871 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3872 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3873 return 0; 3874 } 3875 3876 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3877 bargs = &bctl->data; 3878 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3879 bargs = &bctl->sys; 3880 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3881 bargs = &bctl->meta; 3882 3883 /* profiles filter */ 3884 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3885 chunk_profiles_filter(chunk_type, bargs)) { 3886 return 0; 3887 } 3888 3889 /* usage filter */ 3890 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3891 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3892 return 0; 3893 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3894 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3895 return 0; 3896 } 3897 3898 /* devid filter */ 3899 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3900 chunk_devid_filter(leaf, chunk, bargs)) { 3901 return 0; 3902 } 3903 3904 /* drange filter, makes sense only with devid filter */ 3905 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3906 chunk_drange_filter(leaf, chunk, bargs)) { 3907 return 0; 3908 } 3909 3910 /* vrange filter */ 3911 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3912 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3913 return 0; 3914 } 3915 3916 /* stripes filter */ 3917 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3918 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3919 return 0; 3920 } 3921 3922 /* soft profile changing mode */ 3923 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3924 chunk_soft_convert_filter(chunk_type, bargs)) { 3925 return 0; 3926 } 3927 3928 /* 3929 * limited by count, must be the last filter 3930 */ 3931 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3932 if (bargs->limit == 0) 3933 return 0; 3934 else 3935 bargs->limit--; 3936 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3937 /* 3938 * Same logic as the 'limit' filter; the minimum cannot be 3939 * determined here because we do not have the global information 3940 * about the count of all chunks that satisfy the filters. 3941 */ 3942 if (bargs->limit_max == 0) 3943 return 0; 3944 else 3945 bargs->limit_max--; 3946 } 3947 3948 return 1; 3949 } 3950 3951 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3952 { 3953 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3954 struct btrfs_root *chunk_root = fs_info->chunk_root; 3955 u64 chunk_type; 3956 struct btrfs_chunk *chunk; 3957 struct btrfs_path *path = NULL; 3958 struct btrfs_key key; 3959 struct btrfs_key found_key; 3960 struct extent_buffer *leaf; 3961 int slot; 3962 int ret; 3963 int enospc_errors = 0; 3964 bool counting = true; 3965 /* The single value limit and min/max limits use the same bytes in the */ 3966 u64 limit_data = bctl->data.limit; 3967 u64 limit_meta = bctl->meta.limit; 3968 u64 limit_sys = bctl->sys.limit; 3969 u32 count_data = 0; 3970 u32 count_meta = 0; 3971 u32 count_sys = 0; 3972 int chunk_reserved = 0; 3973 3974 path = btrfs_alloc_path(); 3975 if (!path) { 3976 ret = -ENOMEM; 3977 goto error; 3978 } 3979 3980 /* zero out stat counters */ 3981 spin_lock(&fs_info->balance_lock); 3982 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3983 spin_unlock(&fs_info->balance_lock); 3984 again: 3985 if (!counting) { 3986 /* 3987 * The single value limit and min/max limits use the same bytes 3988 * in the 3989 */ 3990 bctl->data.limit = limit_data; 3991 bctl->meta.limit = limit_meta; 3992 bctl->sys.limit = limit_sys; 3993 } 3994 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3995 key.offset = (u64)-1; 3996 key.type = BTRFS_CHUNK_ITEM_KEY; 3997 3998 while (1) { 3999 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 4000 atomic_read(&fs_info->balance_cancel_req)) { 4001 ret = -ECANCELED; 4002 goto error; 4003 } 4004 4005 mutex_lock(&fs_info->reclaim_bgs_lock); 4006 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 4007 if (ret < 0) { 4008 mutex_unlock(&fs_info->reclaim_bgs_lock); 4009 goto error; 4010 } 4011 4012 /* 4013 * this shouldn't happen, it means the last relocate 4014 * failed 4015 */ 4016 if (ret == 0) 4017 BUG(); /* FIXME break ? */ 4018 4019 ret = btrfs_previous_item(chunk_root, path, 0, 4020 BTRFS_CHUNK_ITEM_KEY); 4021 if (ret) { 4022 mutex_unlock(&fs_info->reclaim_bgs_lock); 4023 ret = 0; 4024 break; 4025 } 4026 4027 leaf = path->nodes[0]; 4028 slot = path->slots[0]; 4029 btrfs_item_key_to_cpu(leaf, &found_key, slot); 4030 4031 if (found_key.objectid != key.objectid) { 4032 mutex_unlock(&fs_info->reclaim_bgs_lock); 4033 break; 4034 } 4035 4036 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 4037 chunk_type = btrfs_chunk_type(leaf, chunk); 4038 4039 if (!counting) { 4040 spin_lock(&fs_info->balance_lock); 4041 bctl->stat.considered++; 4042 spin_unlock(&fs_info->balance_lock); 4043 } 4044 4045 ret = should_balance_chunk(leaf, chunk, found_key.offset); 4046 4047 btrfs_release_path(path); 4048 if (!ret) { 4049 mutex_unlock(&fs_info->reclaim_bgs_lock); 4050 goto loop; 4051 } 4052 4053 if (counting) { 4054 mutex_unlock(&fs_info->reclaim_bgs_lock); 4055 spin_lock(&fs_info->balance_lock); 4056 bctl->stat.expected++; 4057 spin_unlock(&fs_info->balance_lock); 4058 4059 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 4060 count_data++; 4061 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 4062 count_sys++; 4063 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 4064 count_meta++; 4065 4066 goto loop; 4067 } 4068 4069 /* 4070 * Apply limit_min filter, no need to check if the LIMITS 4071 * filter is used, limit_min is 0 by default 4072 */ 4073 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 4074 count_data < bctl->data.limit_min) 4075 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 4076 count_meta < bctl->meta.limit_min) 4077 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 4078 count_sys < bctl->sys.limit_min)) { 4079 mutex_unlock(&fs_info->reclaim_bgs_lock); 4080 goto loop; 4081 } 4082 4083 if (!chunk_reserved) { 4084 /* 4085 * We may be relocating the only data chunk we have, 4086 * which could potentially end up with losing data's 4087 * raid profile, so lets allocate an empty one in 4088 * advance. 4089 */ 4090 ret = btrfs_may_alloc_data_chunk(fs_info, 4091 found_key.offset); 4092 if (ret < 0) { 4093 mutex_unlock(&fs_info->reclaim_bgs_lock); 4094 goto error; 4095 } else if (ret == 1) { 4096 chunk_reserved = 1; 4097 } 4098 } 4099 4100 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 4101 mutex_unlock(&fs_info->reclaim_bgs_lock); 4102 if (ret == -ENOSPC) { 4103 enospc_errors++; 4104 } else if (ret == -ETXTBSY) { 4105 btrfs_info(fs_info, 4106 "skipping relocation of block group %llu due to active swapfile", 4107 found_key.offset); 4108 ret = 0; 4109 } else if (ret) { 4110 goto error; 4111 } else { 4112 spin_lock(&fs_info->balance_lock); 4113 bctl->stat.completed++; 4114 spin_unlock(&fs_info->balance_lock); 4115 } 4116 loop: 4117 if (found_key.offset == 0) 4118 break; 4119 key.offset = found_key.offset - 1; 4120 } 4121 4122 if (counting) { 4123 btrfs_release_path(path); 4124 counting = false; 4125 goto again; 4126 } 4127 error: 4128 btrfs_free_path(path); 4129 if (enospc_errors) { 4130 btrfs_info(fs_info, "%d enospc errors during balance", 4131 enospc_errors); 4132 if (!ret) 4133 ret = -ENOSPC; 4134 } 4135 4136 return ret; 4137 } 4138 4139 /* 4140 * See if a given profile is valid and reduced. 4141 * 4142 * @flags: profile to validate 4143 * @extended: if true @flags is treated as an extended profile 4144 */ 4145 static int alloc_profile_is_valid(u64 flags, int extended) 4146 { 4147 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4148 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4149 4150 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4151 4152 /* 1) check that all other bits are zeroed */ 4153 if (flags & ~mask) 4154 return 0; 4155 4156 /* 2) see if profile is reduced */ 4157 if (flags == 0) 4158 return !extended; /* "0" is valid for usual profiles */ 4159 4160 return has_single_bit_set(flags); 4161 } 4162 4163 /* 4164 * Validate target profile against allowed profiles and return true if it's OK. 4165 * Otherwise print the error message and return false. 4166 */ 4167 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4168 const struct btrfs_balance_args *bargs, 4169 u64 allowed, const char *type) 4170 { 4171 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4172 return true; 4173 4174 /* Profile is valid and does not have bits outside of the allowed set */ 4175 if (alloc_profile_is_valid(bargs->target, 1) && 4176 (bargs->target & ~allowed) == 0) 4177 return true; 4178 4179 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4180 type, btrfs_bg_type_to_raid_name(bargs->target)); 4181 return false; 4182 } 4183 4184 /* 4185 * Fill @buf with textual description of balance filter flags @bargs, up to 4186 * @size_buf including the terminating null. The output may be trimmed if it 4187 * does not fit into the provided buffer. 4188 */ 4189 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4190 u32 size_buf) 4191 { 4192 int ret; 4193 u32 size_bp = size_buf; 4194 char *bp = buf; 4195 u64 flags = bargs->flags; 4196 char tmp_buf[128] = {'\0'}; 4197 4198 if (!flags) 4199 return; 4200 4201 #define CHECK_APPEND_NOARG(a) \ 4202 do { \ 4203 ret = snprintf(bp, size_bp, (a)); \ 4204 if (ret < 0 || ret >= size_bp) \ 4205 goto out_overflow; \ 4206 size_bp -= ret; \ 4207 bp += ret; \ 4208 } while (0) 4209 4210 #define CHECK_APPEND_1ARG(a, v1) \ 4211 do { \ 4212 ret = snprintf(bp, size_bp, (a), (v1)); \ 4213 if (ret < 0 || ret >= size_bp) \ 4214 goto out_overflow; \ 4215 size_bp -= ret; \ 4216 bp += ret; \ 4217 } while (0) 4218 4219 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4220 do { \ 4221 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4222 if (ret < 0 || ret >= size_bp) \ 4223 goto out_overflow; \ 4224 size_bp -= ret; \ 4225 bp += ret; \ 4226 } while (0) 4227 4228 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4229 CHECK_APPEND_1ARG("convert=%s,", 4230 btrfs_bg_type_to_raid_name(bargs->target)); 4231 4232 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4233 CHECK_APPEND_NOARG("soft,"); 4234 4235 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4236 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4237 sizeof(tmp_buf)); 4238 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4239 } 4240 4241 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4242 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4243 4244 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4245 CHECK_APPEND_2ARG("usage=%u..%u,", 4246 bargs->usage_min, bargs->usage_max); 4247 4248 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4249 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4250 4251 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4252 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4253 bargs->pstart, bargs->pend); 4254 4255 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4256 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4257 bargs->vstart, bargs->vend); 4258 4259 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4260 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4261 4262 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4263 CHECK_APPEND_2ARG("limit=%u..%u,", 4264 bargs->limit_min, bargs->limit_max); 4265 4266 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4267 CHECK_APPEND_2ARG("stripes=%u..%u,", 4268 bargs->stripes_min, bargs->stripes_max); 4269 4270 #undef CHECK_APPEND_2ARG 4271 #undef CHECK_APPEND_1ARG 4272 #undef CHECK_APPEND_NOARG 4273 4274 out_overflow: 4275 4276 if (size_bp < size_buf) 4277 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4278 else 4279 buf[0] = '\0'; 4280 } 4281 4282 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4283 { 4284 u32 size_buf = 1024; 4285 char tmp_buf[192] = {'\0'}; 4286 char *buf; 4287 char *bp; 4288 u32 size_bp = size_buf; 4289 int ret; 4290 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4291 4292 buf = kzalloc(size_buf, GFP_KERNEL); 4293 if (!buf) 4294 return; 4295 4296 bp = buf; 4297 4298 #define CHECK_APPEND_1ARG(a, v1) \ 4299 do { \ 4300 ret = snprintf(bp, size_bp, (a), (v1)); \ 4301 if (ret < 0 || ret >= size_bp) \ 4302 goto out_overflow; \ 4303 size_bp -= ret; \ 4304 bp += ret; \ 4305 } while (0) 4306 4307 if (bctl->flags & BTRFS_BALANCE_FORCE) 4308 CHECK_APPEND_1ARG("%s", "-f "); 4309 4310 if (bctl->flags & BTRFS_BALANCE_DATA) { 4311 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4312 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4313 } 4314 4315 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4316 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4317 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4318 } 4319 4320 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4321 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4322 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4323 } 4324 4325 #undef CHECK_APPEND_1ARG 4326 4327 out_overflow: 4328 4329 if (size_bp < size_buf) 4330 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4331 btrfs_info(fs_info, "balance: %s %s", 4332 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4333 "resume" : "start", buf); 4334 4335 kfree(buf); 4336 } 4337 4338 /* 4339 * Should be called with balance mutexe held 4340 */ 4341 int btrfs_balance(struct btrfs_fs_info *fs_info, 4342 struct btrfs_balance_control *bctl, 4343 struct btrfs_ioctl_balance_args *bargs) 4344 { 4345 u64 meta_target, data_target; 4346 u64 allowed; 4347 int mixed = 0; 4348 int ret; 4349 u64 num_devices; 4350 unsigned seq; 4351 bool reducing_redundancy; 4352 bool paused = false; 4353 int i; 4354 4355 if (btrfs_fs_closing(fs_info) || 4356 atomic_read(&fs_info->balance_pause_req) || 4357 btrfs_should_cancel_balance(fs_info)) { 4358 ret = -EINVAL; 4359 goto out; 4360 } 4361 4362 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4363 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4364 mixed = 1; 4365 4366 /* 4367 * In case of mixed groups both data and meta should be picked, 4368 * and identical options should be given for both of them. 4369 */ 4370 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4371 if (mixed && (bctl->flags & allowed)) { 4372 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4373 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4374 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4375 btrfs_err(fs_info, 4376 "balance: mixed groups data and metadata options must be the same"); 4377 ret = -EINVAL; 4378 goto out; 4379 } 4380 } 4381 4382 /* 4383 * rw_devices will not change at the moment, device add/delete/replace 4384 * are exclusive 4385 */ 4386 num_devices = fs_info->fs_devices->rw_devices; 4387 4388 /* 4389 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4390 * special bit for it, to make it easier to distinguish. Thus we need 4391 * to set it manually, or balance would refuse the profile. 4392 */ 4393 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4394 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4395 if (num_devices >= btrfs_raid_array[i].devs_min) 4396 allowed |= btrfs_raid_array[i].bg_flag; 4397 4398 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4399 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4400 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4401 ret = -EINVAL; 4402 goto out; 4403 } 4404 4405 /* 4406 * Allow to reduce metadata or system integrity only if force set for 4407 * profiles with redundancy (copies, parity) 4408 */ 4409 allowed = 0; 4410 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4411 if (btrfs_raid_array[i].ncopies >= 2 || 4412 btrfs_raid_array[i].tolerated_failures >= 1) 4413 allowed |= btrfs_raid_array[i].bg_flag; 4414 } 4415 do { 4416 seq = read_seqbegin(&fs_info->profiles_lock); 4417 4418 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4419 (fs_info->avail_system_alloc_bits & allowed) && 4420 !(bctl->sys.target & allowed)) || 4421 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4422 (fs_info->avail_metadata_alloc_bits & allowed) && 4423 !(bctl->meta.target & allowed))) 4424 reducing_redundancy = true; 4425 else 4426 reducing_redundancy = false; 4427 4428 /* if we're not converting, the target field is uninitialized */ 4429 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4430 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4431 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4432 bctl->data.target : fs_info->avail_data_alloc_bits; 4433 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4434 4435 if (reducing_redundancy) { 4436 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4437 btrfs_info(fs_info, 4438 "balance: force reducing metadata redundancy"); 4439 } else { 4440 btrfs_err(fs_info, 4441 "balance: reduces metadata redundancy, use --force if you want this"); 4442 ret = -EINVAL; 4443 goto out; 4444 } 4445 } 4446 4447 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4448 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4449 btrfs_warn(fs_info, 4450 "balance: metadata profile %s has lower redundancy than data profile %s", 4451 btrfs_bg_type_to_raid_name(meta_target), 4452 btrfs_bg_type_to_raid_name(data_target)); 4453 } 4454 4455 ret = insert_balance_item(fs_info, bctl); 4456 if (ret && ret != -EEXIST) 4457 goto out; 4458 4459 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4460 BUG_ON(ret == -EEXIST); 4461 BUG_ON(fs_info->balance_ctl); 4462 spin_lock(&fs_info->balance_lock); 4463 fs_info->balance_ctl = bctl; 4464 spin_unlock(&fs_info->balance_lock); 4465 } else { 4466 BUG_ON(ret != -EEXIST); 4467 spin_lock(&fs_info->balance_lock); 4468 update_balance_args(bctl); 4469 spin_unlock(&fs_info->balance_lock); 4470 } 4471 4472 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4473 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4474 describe_balance_start_or_resume(fs_info); 4475 mutex_unlock(&fs_info->balance_mutex); 4476 4477 ret = __btrfs_balance(fs_info); 4478 4479 mutex_lock(&fs_info->balance_mutex); 4480 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4481 btrfs_info(fs_info, "balance: paused"); 4482 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4483 paused = true; 4484 } 4485 /* 4486 * Balance can be canceled by: 4487 * 4488 * - Regular cancel request 4489 * Then ret == -ECANCELED and balance_cancel_req > 0 4490 * 4491 * - Fatal signal to "btrfs" process 4492 * Either the signal caught by wait_reserve_ticket() and callers 4493 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4494 * got -ECANCELED. 4495 * Either way, in this case balance_cancel_req = 0, and 4496 * ret == -EINTR or ret == -ECANCELED. 4497 * 4498 * So here we only check the return value to catch canceled balance. 4499 */ 4500 else if (ret == -ECANCELED || ret == -EINTR) 4501 btrfs_info(fs_info, "balance: canceled"); 4502 else 4503 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4504 4505 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4506 4507 if (bargs) { 4508 memset(bargs, 0, sizeof(*bargs)); 4509 btrfs_update_ioctl_balance_args(fs_info, bargs); 4510 } 4511 4512 /* We didn't pause, we can clean everything up. */ 4513 if (!paused) { 4514 reset_balance_state(fs_info); 4515 btrfs_exclop_finish(fs_info); 4516 } 4517 4518 wake_up(&fs_info->balance_wait_q); 4519 4520 return ret; 4521 out: 4522 if (bctl->flags & BTRFS_BALANCE_RESUME) 4523 reset_balance_state(fs_info); 4524 else 4525 kfree(bctl); 4526 btrfs_exclop_finish(fs_info); 4527 4528 return ret; 4529 } 4530 4531 static int balance_kthread(void *data) 4532 { 4533 struct btrfs_fs_info *fs_info = data; 4534 int ret = 0; 4535 4536 sb_start_write(fs_info->sb); 4537 mutex_lock(&fs_info->balance_mutex); 4538 if (fs_info->balance_ctl) 4539 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4540 mutex_unlock(&fs_info->balance_mutex); 4541 sb_end_write(fs_info->sb); 4542 4543 return ret; 4544 } 4545 4546 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4547 { 4548 struct task_struct *tsk; 4549 4550 mutex_lock(&fs_info->balance_mutex); 4551 if (!fs_info->balance_ctl) { 4552 mutex_unlock(&fs_info->balance_mutex); 4553 return 0; 4554 } 4555 mutex_unlock(&fs_info->balance_mutex); 4556 4557 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4558 btrfs_info(fs_info, "balance: resume skipped"); 4559 return 0; 4560 } 4561 4562 spin_lock(&fs_info->super_lock); 4563 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4564 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4565 spin_unlock(&fs_info->super_lock); 4566 /* 4567 * A ro->rw remount sequence should continue with the paused balance 4568 * regardless of who pauses it, system or the user as of now, so set 4569 * the resume flag. 4570 */ 4571 spin_lock(&fs_info->balance_lock); 4572 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4573 spin_unlock(&fs_info->balance_lock); 4574 4575 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4576 return PTR_ERR_OR_ZERO(tsk); 4577 } 4578 4579 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4580 { 4581 struct btrfs_balance_control *bctl; 4582 struct btrfs_balance_item *item; 4583 struct btrfs_disk_balance_args disk_bargs; 4584 struct btrfs_path *path; 4585 struct extent_buffer *leaf; 4586 struct btrfs_key key; 4587 int ret; 4588 4589 path = btrfs_alloc_path(); 4590 if (!path) 4591 return -ENOMEM; 4592 4593 key.objectid = BTRFS_BALANCE_OBJECTID; 4594 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4595 key.offset = 0; 4596 4597 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4598 if (ret < 0) 4599 goto out; 4600 if (ret > 0) { /* ret = -ENOENT; */ 4601 ret = 0; 4602 goto out; 4603 } 4604 4605 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4606 if (!bctl) { 4607 ret = -ENOMEM; 4608 goto out; 4609 } 4610 4611 leaf = path->nodes[0]; 4612 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4613 4614 bctl->flags = btrfs_balance_flags(leaf, item); 4615 bctl->flags |= BTRFS_BALANCE_RESUME; 4616 4617 btrfs_balance_data(leaf, item, &disk_bargs); 4618 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4619 btrfs_balance_meta(leaf, item, &disk_bargs); 4620 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4621 btrfs_balance_sys(leaf, item, &disk_bargs); 4622 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4623 4624 /* 4625 * This should never happen, as the paused balance state is recovered 4626 * during mount without any chance of other exclusive ops to collide. 4627 * 4628 * This gives the exclusive op status to balance and keeps in paused 4629 * state until user intervention (cancel or umount). If the ownership 4630 * cannot be assigned, show a message but do not fail. The balance 4631 * is in a paused state and must have fs_info::balance_ctl properly 4632 * set up. 4633 */ 4634 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4635 btrfs_warn(fs_info, 4636 "balance: cannot set exclusive op status, resume manually"); 4637 4638 btrfs_release_path(path); 4639 4640 mutex_lock(&fs_info->balance_mutex); 4641 BUG_ON(fs_info->balance_ctl); 4642 spin_lock(&fs_info->balance_lock); 4643 fs_info->balance_ctl = bctl; 4644 spin_unlock(&fs_info->balance_lock); 4645 mutex_unlock(&fs_info->balance_mutex); 4646 out: 4647 btrfs_free_path(path); 4648 return ret; 4649 } 4650 4651 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4652 { 4653 int ret = 0; 4654 4655 mutex_lock(&fs_info->balance_mutex); 4656 if (!fs_info->balance_ctl) { 4657 mutex_unlock(&fs_info->balance_mutex); 4658 return -ENOTCONN; 4659 } 4660 4661 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4662 atomic_inc(&fs_info->balance_pause_req); 4663 mutex_unlock(&fs_info->balance_mutex); 4664 4665 wait_event(fs_info->balance_wait_q, 4666 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4667 4668 mutex_lock(&fs_info->balance_mutex); 4669 /* we are good with balance_ctl ripped off from under us */ 4670 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4671 atomic_dec(&fs_info->balance_pause_req); 4672 } else { 4673 ret = -ENOTCONN; 4674 } 4675 4676 mutex_unlock(&fs_info->balance_mutex); 4677 return ret; 4678 } 4679 4680 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4681 { 4682 mutex_lock(&fs_info->balance_mutex); 4683 if (!fs_info->balance_ctl) { 4684 mutex_unlock(&fs_info->balance_mutex); 4685 return -ENOTCONN; 4686 } 4687 4688 /* 4689 * A paused balance with the item stored on disk can be resumed at 4690 * mount time if the mount is read-write. Otherwise it's still paused 4691 * and we must not allow cancelling as it deletes the item. 4692 */ 4693 if (sb_rdonly(fs_info->sb)) { 4694 mutex_unlock(&fs_info->balance_mutex); 4695 return -EROFS; 4696 } 4697 4698 atomic_inc(&fs_info->balance_cancel_req); 4699 /* 4700 * if we are running just wait and return, balance item is 4701 * deleted in btrfs_balance in this case 4702 */ 4703 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4704 mutex_unlock(&fs_info->balance_mutex); 4705 wait_event(fs_info->balance_wait_q, 4706 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4707 mutex_lock(&fs_info->balance_mutex); 4708 } else { 4709 mutex_unlock(&fs_info->balance_mutex); 4710 /* 4711 * Lock released to allow other waiters to continue, we'll 4712 * reexamine the status again. 4713 */ 4714 mutex_lock(&fs_info->balance_mutex); 4715 4716 if (fs_info->balance_ctl) { 4717 reset_balance_state(fs_info); 4718 btrfs_exclop_finish(fs_info); 4719 btrfs_info(fs_info, "balance: canceled"); 4720 } 4721 } 4722 4723 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4724 atomic_dec(&fs_info->balance_cancel_req); 4725 mutex_unlock(&fs_info->balance_mutex); 4726 return 0; 4727 } 4728 4729 int btrfs_uuid_scan_kthread(void *data) 4730 { 4731 struct btrfs_fs_info *fs_info = data; 4732 struct btrfs_root *root = fs_info->tree_root; 4733 struct btrfs_key key; 4734 struct btrfs_path *path = NULL; 4735 int ret = 0; 4736 struct extent_buffer *eb; 4737 int slot; 4738 struct btrfs_root_item root_item; 4739 u32 item_size; 4740 struct btrfs_trans_handle *trans = NULL; 4741 bool closing = false; 4742 4743 path = btrfs_alloc_path(); 4744 if (!path) { 4745 ret = -ENOMEM; 4746 goto out; 4747 } 4748 4749 key.objectid = 0; 4750 key.type = BTRFS_ROOT_ITEM_KEY; 4751 key.offset = 0; 4752 4753 while (1) { 4754 if (btrfs_fs_closing(fs_info)) { 4755 closing = true; 4756 break; 4757 } 4758 ret = btrfs_search_forward(root, &key, path, 4759 BTRFS_OLDEST_GENERATION); 4760 if (ret) { 4761 if (ret > 0) 4762 ret = 0; 4763 break; 4764 } 4765 4766 if (key.type != BTRFS_ROOT_ITEM_KEY || 4767 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4768 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4769 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4770 goto skip; 4771 4772 eb = path->nodes[0]; 4773 slot = path->slots[0]; 4774 item_size = btrfs_item_size(eb, slot); 4775 if (item_size < sizeof(root_item)) 4776 goto skip; 4777 4778 read_extent_buffer(eb, &root_item, 4779 btrfs_item_ptr_offset(eb, slot), 4780 (int)sizeof(root_item)); 4781 if (btrfs_root_refs(&root_item) == 0) 4782 goto skip; 4783 4784 if (!btrfs_is_empty_uuid(root_item.uuid) || 4785 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4786 if (trans) 4787 goto update_tree; 4788 4789 btrfs_release_path(path); 4790 /* 4791 * 1 - subvol uuid item 4792 * 1 - received_subvol uuid item 4793 */ 4794 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4795 if (IS_ERR(trans)) { 4796 ret = PTR_ERR(trans); 4797 break; 4798 } 4799 continue; 4800 } else { 4801 goto skip; 4802 } 4803 update_tree: 4804 btrfs_release_path(path); 4805 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4806 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4807 BTRFS_UUID_KEY_SUBVOL, 4808 key.objectid); 4809 if (ret < 0) { 4810 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4811 ret); 4812 break; 4813 } 4814 } 4815 4816 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4817 ret = btrfs_uuid_tree_add(trans, 4818 root_item.received_uuid, 4819 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4820 key.objectid); 4821 if (ret < 0) { 4822 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4823 ret); 4824 break; 4825 } 4826 } 4827 4828 skip: 4829 btrfs_release_path(path); 4830 if (trans) { 4831 ret = btrfs_end_transaction(trans); 4832 trans = NULL; 4833 if (ret) 4834 break; 4835 } 4836 4837 if (key.offset < (u64)-1) { 4838 key.offset++; 4839 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4840 key.offset = 0; 4841 key.type = BTRFS_ROOT_ITEM_KEY; 4842 } else if (key.objectid < (u64)-1) { 4843 key.offset = 0; 4844 key.type = BTRFS_ROOT_ITEM_KEY; 4845 key.objectid++; 4846 } else { 4847 break; 4848 } 4849 cond_resched(); 4850 } 4851 4852 out: 4853 btrfs_free_path(path); 4854 if (trans && !IS_ERR(trans)) 4855 btrfs_end_transaction(trans); 4856 if (ret) 4857 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4858 else if (!closing) 4859 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4860 up(&fs_info->uuid_tree_rescan_sem); 4861 return 0; 4862 } 4863 4864 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4865 { 4866 struct btrfs_trans_handle *trans; 4867 struct btrfs_root *tree_root = fs_info->tree_root; 4868 struct btrfs_root *uuid_root; 4869 struct task_struct *task; 4870 int ret; 4871 4872 /* 4873 * 1 - root node 4874 * 1 - root item 4875 */ 4876 trans = btrfs_start_transaction(tree_root, 2); 4877 if (IS_ERR(trans)) 4878 return PTR_ERR(trans); 4879 4880 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4881 if (IS_ERR(uuid_root)) { 4882 ret = PTR_ERR(uuid_root); 4883 btrfs_abort_transaction(trans, ret); 4884 btrfs_end_transaction(trans); 4885 return ret; 4886 } 4887 4888 fs_info->uuid_root = uuid_root; 4889 4890 ret = btrfs_commit_transaction(trans); 4891 if (ret) 4892 return ret; 4893 4894 down(&fs_info->uuid_tree_rescan_sem); 4895 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4896 if (IS_ERR(task)) { 4897 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4898 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4899 up(&fs_info->uuid_tree_rescan_sem); 4900 return PTR_ERR(task); 4901 } 4902 4903 return 0; 4904 } 4905 4906 /* 4907 * shrinking a device means finding all of the device extents past 4908 * the new size, and then following the back refs to the chunks. 4909 * The chunk relocation code actually frees the device extent 4910 */ 4911 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4912 { 4913 struct btrfs_fs_info *fs_info = device->fs_info; 4914 struct btrfs_root *root = fs_info->dev_root; 4915 struct btrfs_trans_handle *trans; 4916 struct btrfs_dev_extent *dev_extent = NULL; 4917 struct btrfs_path *path; 4918 u64 length; 4919 u64 chunk_offset; 4920 int ret; 4921 int slot; 4922 int failed = 0; 4923 bool retried = false; 4924 struct extent_buffer *l; 4925 struct btrfs_key key; 4926 struct btrfs_super_block *super_copy = fs_info->super_copy; 4927 u64 old_total = btrfs_super_total_bytes(super_copy); 4928 u64 old_size = btrfs_device_get_total_bytes(device); 4929 u64 diff; 4930 u64 start; 4931 u64 free_diff = 0; 4932 4933 new_size = round_down(new_size, fs_info->sectorsize); 4934 start = new_size; 4935 diff = round_down(old_size - new_size, fs_info->sectorsize); 4936 4937 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4938 return -EINVAL; 4939 4940 path = btrfs_alloc_path(); 4941 if (!path) 4942 return -ENOMEM; 4943 4944 path->reada = READA_BACK; 4945 4946 trans = btrfs_start_transaction(root, 0); 4947 if (IS_ERR(trans)) { 4948 btrfs_free_path(path); 4949 return PTR_ERR(trans); 4950 } 4951 4952 mutex_lock(&fs_info->chunk_mutex); 4953 4954 btrfs_device_set_total_bytes(device, new_size); 4955 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4956 device->fs_devices->total_rw_bytes -= diff; 4957 4958 /* 4959 * The new free_chunk_space is new_size - used, so we have to 4960 * subtract the delta of the old free_chunk_space which included 4961 * old_size - used. If used > new_size then just subtract this 4962 * entire device's free space. 4963 */ 4964 if (device->bytes_used < new_size) 4965 free_diff = (old_size - device->bytes_used) - 4966 (new_size - device->bytes_used); 4967 else 4968 free_diff = old_size - device->bytes_used; 4969 atomic64_sub(free_diff, &fs_info->free_chunk_space); 4970 } 4971 4972 /* 4973 * Once the device's size has been set to the new size, ensure all 4974 * in-memory chunks are synced to disk so that the loop below sees them 4975 * and relocates them accordingly. 4976 */ 4977 if (contains_pending_extent(device, &start, diff)) { 4978 mutex_unlock(&fs_info->chunk_mutex); 4979 ret = btrfs_commit_transaction(trans); 4980 if (ret) 4981 goto done; 4982 } else { 4983 mutex_unlock(&fs_info->chunk_mutex); 4984 btrfs_end_transaction(trans); 4985 } 4986 4987 again: 4988 key.objectid = device->devid; 4989 key.offset = (u64)-1; 4990 key.type = BTRFS_DEV_EXTENT_KEY; 4991 4992 do { 4993 mutex_lock(&fs_info->reclaim_bgs_lock); 4994 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4995 if (ret < 0) { 4996 mutex_unlock(&fs_info->reclaim_bgs_lock); 4997 goto done; 4998 } 4999 5000 ret = btrfs_previous_item(root, path, 0, key.type); 5001 if (ret) { 5002 mutex_unlock(&fs_info->reclaim_bgs_lock); 5003 if (ret < 0) 5004 goto done; 5005 ret = 0; 5006 btrfs_release_path(path); 5007 break; 5008 } 5009 5010 l = path->nodes[0]; 5011 slot = path->slots[0]; 5012 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 5013 5014 if (key.objectid != device->devid) { 5015 mutex_unlock(&fs_info->reclaim_bgs_lock); 5016 btrfs_release_path(path); 5017 break; 5018 } 5019 5020 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 5021 length = btrfs_dev_extent_length(l, dev_extent); 5022 5023 if (key.offset + length <= new_size) { 5024 mutex_unlock(&fs_info->reclaim_bgs_lock); 5025 btrfs_release_path(path); 5026 break; 5027 } 5028 5029 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 5030 btrfs_release_path(path); 5031 5032 /* 5033 * We may be relocating the only data chunk we have, 5034 * which could potentially end up with losing data's 5035 * raid profile, so lets allocate an empty one in 5036 * advance. 5037 */ 5038 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 5039 if (ret < 0) { 5040 mutex_unlock(&fs_info->reclaim_bgs_lock); 5041 goto done; 5042 } 5043 5044 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 5045 mutex_unlock(&fs_info->reclaim_bgs_lock); 5046 if (ret == -ENOSPC) { 5047 failed++; 5048 } else if (ret) { 5049 if (ret == -ETXTBSY) { 5050 btrfs_warn(fs_info, 5051 "could not shrink block group %llu due to active swapfile", 5052 chunk_offset); 5053 } 5054 goto done; 5055 } 5056 } while (key.offset-- > 0); 5057 5058 if (failed && !retried) { 5059 failed = 0; 5060 retried = true; 5061 goto again; 5062 } else if (failed && retried) { 5063 ret = -ENOSPC; 5064 goto done; 5065 } 5066 5067 /* Shrinking succeeded, else we would be at "done". */ 5068 trans = btrfs_start_transaction(root, 0); 5069 if (IS_ERR(trans)) { 5070 ret = PTR_ERR(trans); 5071 goto done; 5072 } 5073 5074 mutex_lock(&fs_info->chunk_mutex); 5075 /* Clear all state bits beyond the shrunk device size */ 5076 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 5077 CHUNK_STATE_MASK); 5078 5079 btrfs_device_set_disk_total_bytes(device, new_size); 5080 if (list_empty(&device->post_commit_list)) 5081 list_add_tail(&device->post_commit_list, 5082 &trans->transaction->dev_update_list); 5083 5084 WARN_ON(diff > old_total); 5085 btrfs_set_super_total_bytes(super_copy, 5086 round_down(old_total - diff, fs_info->sectorsize)); 5087 mutex_unlock(&fs_info->chunk_mutex); 5088 5089 btrfs_reserve_chunk_metadata(trans, false); 5090 /* Now btrfs_update_device() will change the on-disk size. */ 5091 ret = btrfs_update_device(trans, device); 5092 btrfs_trans_release_chunk_metadata(trans); 5093 if (ret < 0) { 5094 btrfs_abort_transaction(trans, ret); 5095 btrfs_end_transaction(trans); 5096 } else { 5097 ret = btrfs_commit_transaction(trans); 5098 } 5099 done: 5100 btrfs_free_path(path); 5101 if (ret) { 5102 mutex_lock(&fs_info->chunk_mutex); 5103 btrfs_device_set_total_bytes(device, old_size); 5104 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5105 device->fs_devices->total_rw_bytes += diff; 5106 atomic64_add(free_diff, &fs_info->free_chunk_space); 5107 } 5108 mutex_unlock(&fs_info->chunk_mutex); 5109 } 5110 return ret; 5111 } 5112 5113 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 5114 struct btrfs_key *key, 5115 struct btrfs_chunk *chunk, int item_size) 5116 { 5117 struct btrfs_super_block *super_copy = fs_info->super_copy; 5118 struct btrfs_disk_key disk_key; 5119 u32 array_size; 5120 u8 *ptr; 5121 5122 lockdep_assert_held(&fs_info->chunk_mutex); 5123 5124 array_size = btrfs_super_sys_array_size(super_copy); 5125 if (array_size + item_size + sizeof(disk_key) 5126 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5127 return -EFBIG; 5128 5129 ptr = super_copy->sys_chunk_array + array_size; 5130 btrfs_cpu_key_to_disk(&disk_key, key); 5131 memcpy(ptr, &disk_key, sizeof(disk_key)); 5132 ptr += sizeof(disk_key); 5133 memcpy(ptr, chunk, item_size); 5134 item_size += sizeof(disk_key); 5135 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5136 5137 return 0; 5138 } 5139 5140 /* 5141 * sort the devices in descending order by max_avail, total_avail 5142 */ 5143 static int btrfs_cmp_device_info(const void *a, const void *b) 5144 { 5145 const struct btrfs_device_info *di_a = a; 5146 const struct btrfs_device_info *di_b = b; 5147 5148 if (di_a->max_avail > di_b->max_avail) 5149 return -1; 5150 if (di_a->max_avail < di_b->max_avail) 5151 return 1; 5152 if (di_a->total_avail > di_b->total_avail) 5153 return -1; 5154 if (di_a->total_avail < di_b->total_avail) 5155 return 1; 5156 return 0; 5157 } 5158 5159 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5160 { 5161 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5162 return; 5163 5164 btrfs_set_fs_incompat(info, RAID56); 5165 } 5166 5167 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5168 { 5169 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5170 return; 5171 5172 btrfs_set_fs_incompat(info, RAID1C34); 5173 } 5174 5175 /* 5176 * Structure used internally for btrfs_create_chunk() function. 5177 * Wraps needed parameters. 5178 */ 5179 struct alloc_chunk_ctl { 5180 u64 start; 5181 u64 type; 5182 /* Total number of stripes to allocate */ 5183 int num_stripes; 5184 /* sub_stripes info for map */ 5185 int sub_stripes; 5186 /* Stripes per device */ 5187 int dev_stripes; 5188 /* Maximum number of devices to use */ 5189 int devs_max; 5190 /* Minimum number of devices to use */ 5191 int devs_min; 5192 /* ndevs has to be a multiple of this */ 5193 int devs_increment; 5194 /* Number of copies */ 5195 int ncopies; 5196 /* Number of stripes worth of bytes to store parity information */ 5197 int nparity; 5198 u64 max_stripe_size; 5199 u64 max_chunk_size; 5200 u64 dev_extent_min; 5201 u64 stripe_size; 5202 u64 chunk_size; 5203 int ndevs; 5204 }; 5205 5206 static void init_alloc_chunk_ctl_policy_regular( 5207 struct btrfs_fs_devices *fs_devices, 5208 struct alloc_chunk_ctl *ctl) 5209 { 5210 struct btrfs_space_info *space_info; 5211 5212 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5213 ASSERT(space_info); 5214 5215 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5216 ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G); 5217 5218 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5219 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5220 5221 /* We don't want a chunk larger than 10% of writable space */ 5222 ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), 5223 ctl->max_chunk_size); 5224 ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes); 5225 } 5226 5227 static void init_alloc_chunk_ctl_policy_zoned( 5228 struct btrfs_fs_devices *fs_devices, 5229 struct alloc_chunk_ctl *ctl) 5230 { 5231 u64 zone_size = fs_devices->fs_info->zone_size; 5232 u64 limit; 5233 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5234 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5235 u64 min_chunk_size = min_data_stripes * zone_size; 5236 u64 type = ctl->type; 5237 5238 ctl->max_stripe_size = zone_size; 5239 if (type & BTRFS_BLOCK_GROUP_DATA) { 5240 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5241 zone_size); 5242 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5243 ctl->max_chunk_size = ctl->max_stripe_size; 5244 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5245 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5246 ctl->devs_max = min_t(int, ctl->devs_max, 5247 BTRFS_MAX_DEVS_SYS_CHUNK); 5248 } else { 5249 BUG(); 5250 } 5251 5252 /* We don't want a chunk larger than 10% of writable space */ 5253 limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10), 5254 zone_size), 5255 min_chunk_size); 5256 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5257 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5258 } 5259 5260 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5261 struct alloc_chunk_ctl *ctl) 5262 { 5263 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5264 5265 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5266 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5267 ctl->devs_max = btrfs_raid_array[index].devs_max; 5268 if (!ctl->devs_max) 5269 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5270 ctl->devs_min = btrfs_raid_array[index].devs_min; 5271 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5272 ctl->ncopies = btrfs_raid_array[index].ncopies; 5273 ctl->nparity = btrfs_raid_array[index].nparity; 5274 ctl->ndevs = 0; 5275 5276 switch (fs_devices->chunk_alloc_policy) { 5277 case BTRFS_CHUNK_ALLOC_REGULAR: 5278 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5279 break; 5280 case BTRFS_CHUNK_ALLOC_ZONED: 5281 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5282 break; 5283 default: 5284 BUG(); 5285 } 5286 } 5287 5288 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5289 struct alloc_chunk_ctl *ctl, 5290 struct btrfs_device_info *devices_info) 5291 { 5292 struct btrfs_fs_info *info = fs_devices->fs_info; 5293 struct btrfs_device *device; 5294 u64 total_avail; 5295 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5296 int ret; 5297 int ndevs = 0; 5298 u64 max_avail; 5299 u64 dev_offset; 5300 5301 /* 5302 * in the first pass through the devices list, we gather information 5303 * about the available holes on each device. 5304 */ 5305 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5306 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5307 WARN(1, KERN_ERR 5308 "BTRFS: read-only device in alloc_list\n"); 5309 continue; 5310 } 5311 5312 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5313 &device->dev_state) || 5314 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5315 continue; 5316 5317 if (device->total_bytes > device->bytes_used) 5318 total_avail = device->total_bytes - device->bytes_used; 5319 else 5320 total_avail = 0; 5321 5322 /* If there is no space on this device, skip it. */ 5323 if (total_avail < ctl->dev_extent_min) 5324 continue; 5325 5326 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5327 &max_avail); 5328 if (ret && ret != -ENOSPC) 5329 return ret; 5330 5331 if (ret == 0) 5332 max_avail = dev_extent_want; 5333 5334 if (max_avail < ctl->dev_extent_min) { 5335 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5336 btrfs_debug(info, 5337 "%s: devid %llu has no free space, have=%llu want=%llu", 5338 __func__, device->devid, max_avail, 5339 ctl->dev_extent_min); 5340 continue; 5341 } 5342 5343 if (ndevs == fs_devices->rw_devices) { 5344 WARN(1, "%s: found more than %llu devices\n", 5345 __func__, fs_devices->rw_devices); 5346 break; 5347 } 5348 devices_info[ndevs].dev_offset = dev_offset; 5349 devices_info[ndevs].max_avail = max_avail; 5350 devices_info[ndevs].total_avail = total_avail; 5351 devices_info[ndevs].dev = device; 5352 ++ndevs; 5353 } 5354 ctl->ndevs = ndevs; 5355 5356 /* 5357 * now sort the devices by hole size / available space 5358 */ 5359 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5360 btrfs_cmp_device_info, NULL); 5361 5362 return 0; 5363 } 5364 5365 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5366 struct btrfs_device_info *devices_info) 5367 { 5368 /* Number of stripes that count for block group size */ 5369 int data_stripes; 5370 5371 /* 5372 * The primary goal is to maximize the number of stripes, so use as 5373 * many devices as possible, even if the stripes are not maximum sized. 5374 * 5375 * The DUP profile stores more than one stripe per device, the 5376 * max_avail is the total size so we have to adjust. 5377 */ 5378 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5379 ctl->dev_stripes); 5380 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5381 5382 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5383 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5384 5385 /* 5386 * Use the number of data stripes to figure out how big this chunk is 5387 * really going to be in terms of logical address space, and compare 5388 * that answer with the max chunk size. If it's higher, we try to 5389 * reduce stripe_size. 5390 */ 5391 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5392 /* 5393 * Reduce stripe_size, round it up to a 16MB boundary again and 5394 * then use it, unless it ends up being even bigger than the 5395 * previous value we had already. 5396 */ 5397 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5398 data_stripes), SZ_16M), 5399 ctl->stripe_size); 5400 } 5401 5402 /* Stripe size should not go beyond 1G. */ 5403 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G); 5404 5405 /* Align to BTRFS_STRIPE_LEN */ 5406 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5407 ctl->chunk_size = ctl->stripe_size * data_stripes; 5408 5409 return 0; 5410 } 5411 5412 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5413 struct btrfs_device_info *devices_info) 5414 { 5415 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5416 /* Number of stripes that count for block group size */ 5417 int data_stripes; 5418 5419 /* 5420 * It should hold because: 5421 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5422 */ 5423 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5424 5425 ctl->stripe_size = zone_size; 5426 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5427 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5428 5429 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5430 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5431 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5432 ctl->stripe_size) + ctl->nparity, 5433 ctl->dev_stripes); 5434 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5435 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5436 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5437 } 5438 5439 ctl->chunk_size = ctl->stripe_size * data_stripes; 5440 5441 return 0; 5442 } 5443 5444 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5445 struct alloc_chunk_ctl *ctl, 5446 struct btrfs_device_info *devices_info) 5447 { 5448 struct btrfs_fs_info *info = fs_devices->fs_info; 5449 5450 /* 5451 * Round down to number of usable stripes, devs_increment can be any 5452 * number so we can't use round_down() that requires power of 2, while 5453 * rounddown is safe. 5454 */ 5455 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5456 5457 if (ctl->ndevs < ctl->devs_min) { 5458 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5459 btrfs_debug(info, 5460 "%s: not enough devices with free space: have=%d minimum required=%d", 5461 __func__, ctl->ndevs, ctl->devs_min); 5462 } 5463 return -ENOSPC; 5464 } 5465 5466 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5467 5468 switch (fs_devices->chunk_alloc_policy) { 5469 case BTRFS_CHUNK_ALLOC_REGULAR: 5470 return decide_stripe_size_regular(ctl, devices_info); 5471 case BTRFS_CHUNK_ALLOC_ZONED: 5472 return decide_stripe_size_zoned(ctl, devices_info); 5473 default: 5474 BUG(); 5475 } 5476 } 5477 5478 static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int bits) 5479 { 5480 for (int i = 0; i < map->num_stripes; i++) { 5481 struct btrfs_io_stripe *stripe = &map->stripes[i]; 5482 struct btrfs_device *device = stripe->dev; 5483 5484 set_extent_bit(&device->alloc_state, stripe->physical, 5485 stripe->physical + map->stripe_size - 1, 5486 bits | EXTENT_NOWAIT, NULL); 5487 } 5488 } 5489 5490 static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits) 5491 { 5492 for (int i = 0; i < map->num_stripes; i++) { 5493 struct btrfs_io_stripe *stripe = &map->stripes[i]; 5494 struct btrfs_device *device = stripe->dev; 5495 5496 __clear_extent_bit(&device->alloc_state, stripe->physical, 5497 stripe->physical + map->stripe_size - 1, 5498 bits | EXTENT_NOWAIT, 5499 NULL, NULL); 5500 } 5501 } 5502 5503 void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map) 5504 { 5505 write_lock(&fs_info->mapping_tree_lock); 5506 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree); 5507 RB_CLEAR_NODE(&map->rb_node); 5508 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED); 5509 write_unlock(&fs_info->mapping_tree_lock); 5510 5511 /* Once for the tree reference. */ 5512 btrfs_free_chunk_map(map); 5513 } 5514 5515 EXPORT_FOR_TESTS 5516 int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map) 5517 { 5518 struct rb_node **p; 5519 struct rb_node *parent = NULL; 5520 bool leftmost = true; 5521 5522 write_lock(&fs_info->mapping_tree_lock); 5523 p = &fs_info->mapping_tree.rb_root.rb_node; 5524 while (*p) { 5525 struct btrfs_chunk_map *entry; 5526 5527 parent = *p; 5528 entry = rb_entry(parent, struct btrfs_chunk_map, rb_node); 5529 5530 if (map->start < entry->start) { 5531 p = &(*p)->rb_left; 5532 } else if (map->start > entry->start) { 5533 p = &(*p)->rb_right; 5534 leftmost = false; 5535 } else { 5536 write_unlock(&fs_info->mapping_tree_lock); 5537 return -EEXIST; 5538 } 5539 } 5540 rb_link_node(&map->rb_node, parent, p); 5541 rb_insert_color_cached(&map->rb_node, &fs_info->mapping_tree, leftmost); 5542 chunk_map_device_set_bits(map, CHUNK_ALLOCATED); 5543 chunk_map_device_clear_bits(map, CHUNK_TRIMMED); 5544 write_unlock(&fs_info->mapping_tree_lock); 5545 5546 return 0; 5547 } 5548 5549 EXPORT_FOR_TESTS 5550 struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp) 5551 { 5552 struct btrfs_chunk_map *map; 5553 5554 map = kmalloc(btrfs_chunk_map_size(num_stripes), gfp); 5555 if (!map) 5556 return NULL; 5557 5558 refcount_set(&map->refs, 1); 5559 RB_CLEAR_NODE(&map->rb_node); 5560 5561 return map; 5562 } 5563 5564 struct btrfs_chunk_map *btrfs_clone_chunk_map(struct btrfs_chunk_map *map, gfp_t gfp) 5565 { 5566 const int size = btrfs_chunk_map_size(map->num_stripes); 5567 struct btrfs_chunk_map *clone; 5568 5569 clone = kmemdup(map, size, gfp); 5570 if (!clone) 5571 return NULL; 5572 5573 refcount_set(&clone->refs, 1); 5574 RB_CLEAR_NODE(&clone->rb_node); 5575 5576 return clone; 5577 } 5578 5579 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5580 struct alloc_chunk_ctl *ctl, 5581 struct btrfs_device_info *devices_info) 5582 { 5583 struct btrfs_fs_info *info = trans->fs_info; 5584 struct btrfs_chunk_map *map; 5585 struct btrfs_block_group *block_group; 5586 u64 start = ctl->start; 5587 u64 type = ctl->type; 5588 int ret; 5589 int i; 5590 int j; 5591 5592 map = btrfs_alloc_chunk_map(ctl->num_stripes, GFP_NOFS); 5593 if (!map) 5594 return ERR_PTR(-ENOMEM); 5595 5596 map->start = start; 5597 map->chunk_len = ctl->chunk_size; 5598 map->stripe_size = ctl->stripe_size; 5599 map->type = type; 5600 map->io_align = BTRFS_STRIPE_LEN; 5601 map->io_width = BTRFS_STRIPE_LEN; 5602 map->sub_stripes = ctl->sub_stripes; 5603 map->num_stripes = ctl->num_stripes; 5604 5605 for (i = 0; i < ctl->ndevs; ++i) { 5606 for (j = 0; j < ctl->dev_stripes; ++j) { 5607 int s = i * ctl->dev_stripes + j; 5608 map->stripes[s].dev = devices_info[i].dev; 5609 map->stripes[s].physical = devices_info[i].dev_offset + 5610 j * ctl->stripe_size; 5611 } 5612 } 5613 5614 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5615 5616 ret = btrfs_add_chunk_map(info, map); 5617 if (ret) { 5618 btrfs_free_chunk_map(map); 5619 return ERR_PTR(ret); 5620 } 5621 5622 block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size); 5623 if (IS_ERR(block_group)) { 5624 btrfs_remove_chunk_map(info, map); 5625 return block_group; 5626 } 5627 5628 for (int i = 0; i < map->num_stripes; i++) { 5629 struct btrfs_device *dev = map->stripes[i].dev; 5630 5631 btrfs_device_set_bytes_used(dev, 5632 dev->bytes_used + ctl->stripe_size); 5633 if (list_empty(&dev->post_commit_list)) 5634 list_add_tail(&dev->post_commit_list, 5635 &trans->transaction->dev_update_list); 5636 } 5637 5638 atomic64_sub(ctl->stripe_size * map->num_stripes, 5639 &info->free_chunk_space); 5640 5641 check_raid56_incompat_flag(info, type); 5642 check_raid1c34_incompat_flag(info, type); 5643 5644 return block_group; 5645 } 5646 5647 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5648 u64 type) 5649 { 5650 struct btrfs_fs_info *info = trans->fs_info; 5651 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5652 struct btrfs_device_info *devices_info = NULL; 5653 struct alloc_chunk_ctl ctl; 5654 struct btrfs_block_group *block_group; 5655 int ret; 5656 5657 lockdep_assert_held(&info->chunk_mutex); 5658 5659 if (!alloc_profile_is_valid(type, 0)) { 5660 ASSERT(0); 5661 return ERR_PTR(-EINVAL); 5662 } 5663 5664 if (list_empty(&fs_devices->alloc_list)) { 5665 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5666 btrfs_debug(info, "%s: no writable device", __func__); 5667 return ERR_PTR(-ENOSPC); 5668 } 5669 5670 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5671 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5672 ASSERT(0); 5673 return ERR_PTR(-EINVAL); 5674 } 5675 5676 ctl.start = find_next_chunk(info); 5677 ctl.type = type; 5678 init_alloc_chunk_ctl(fs_devices, &ctl); 5679 5680 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5681 GFP_NOFS); 5682 if (!devices_info) 5683 return ERR_PTR(-ENOMEM); 5684 5685 ret = gather_device_info(fs_devices, &ctl, devices_info); 5686 if (ret < 0) { 5687 block_group = ERR_PTR(ret); 5688 goto out; 5689 } 5690 5691 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5692 if (ret < 0) { 5693 block_group = ERR_PTR(ret); 5694 goto out; 5695 } 5696 5697 block_group = create_chunk(trans, &ctl, devices_info); 5698 5699 out: 5700 kfree(devices_info); 5701 return block_group; 5702 } 5703 5704 /* 5705 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5706 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5707 * chunks. 5708 * 5709 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5710 * phases. 5711 */ 5712 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5713 struct btrfs_block_group *bg) 5714 { 5715 struct btrfs_fs_info *fs_info = trans->fs_info; 5716 struct btrfs_root *chunk_root = fs_info->chunk_root; 5717 struct btrfs_key key; 5718 struct btrfs_chunk *chunk; 5719 struct btrfs_stripe *stripe; 5720 struct btrfs_chunk_map *map; 5721 size_t item_size; 5722 int i; 5723 int ret; 5724 5725 /* 5726 * We take the chunk_mutex for 2 reasons: 5727 * 5728 * 1) Updates and insertions in the chunk btree must be done while holding 5729 * the chunk_mutex, as well as updating the system chunk array in the 5730 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5731 * details; 5732 * 5733 * 2) To prevent races with the final phase of a device replace operation 5734 * that replaces the device object associated with the map's stripes, 5735 * because the device object's id can change at any time during that 5736 * final phase of the device replace operation 5737 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5738 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5739 * which would cause a failure when updating the device item, which does 5740 * not exists, or persisting a stripe of the chunk item with such ID. 5741 * Here we can't use the device_list_mutex because our caller already 5742 * has locked the chunk_mutex, and the final phase of device replace 5743 * acquires both mutexes - first the device_list_mutex and then the 5744 * chunk_mutex. Using any of those two mutexes protects us from a 5745 * concurrent device replace. 5746 */ 5747 lockdep_assert_held(&fs_info->chunk_mutex); 5748 5749 map = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5750 if (IS_ERR(map)) { 5751 ret = PTR_ERR(map); 5752 btrfs_abort_transaction(trans, ret); 5753 return ret; 5754 } 5755 5756 item_size = btrfs_chunk_item_size(map->num_stripes); 5757 5758 chunk = kzalloc(item_size, GFP_NOFS); 5759 if (!chunk) { 5760 ret = -ENOMEM; 5761 btrfs_abort_transaction(trans, ret); 5762 goto out; 5763 } 5764 5765 for (i = 0; i < map->num_stripes; i++) { 5766 struct btrfs_device *device = map->stripes[i].dev; 5767 5768 ret = btrfs_update_device(trans, device); 5769 if (ret) 5770 goto out; 5771 } 5772 5773 stripe = &chunk->stripe; 5774 for (i = 0; i < map->num_stripes; i++) { 5775 struct btrfs_device *device = map->stripes[i].dev; 5776 const u64 dev_offset = map->stripes[i].physical; 5777 5778 btrfs_set_stack_stripe_devid(stripe, device->devid); 5779 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5780 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5781 stripe++; 5782 } 5783 5784 btrfs_set_stack_chunk_length(chunk, bg->length); 5785 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5786 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN); 5787 btrfs_set_stack_chunk_type(chunk, map->type); 5788 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5789 btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN); 5790 btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN); 5791 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5792 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5793 5794 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5795 key.type = BTRFS_CHUNK_ITEM_KEY; 5796 key.offset = bg->start; 5797 5798 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5799 if (ret) 5800 goto out; 5801 5802 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags); 5803 5804 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5805 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5806 if (ret) 5807 goto out; 5808 } 5809 5810 out: 5811 kfree(chunk); 5812 btrfs_free_chunk_map(map); 5813 return ret; 5814 } 5815 5816 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5817 { 5818 struct btrfs_fs_info *fs_info = trans->fs_info; 5819 u64 alloc_profile; 5820 struct btrfs_block_group *meta_bg; 5821 struct btrfs_block_group *sys_bg; 5822 5823 /* 5824 * When adding a new device for sprouting, the seed device is read-only 5825 * so we must first allocate a metadata and a system chunk. But before 5826 * adding the block group items to the extent, device and chunk btrees, 5827 * we must first: 5828 * 5829 * 1) Create both chunks without doing any changes to the btrees, as 5830 * otherwise we would get -ENOSPC since the block groups from the 5831 * seed device are read-only; 5832 * 5833 * 2) Add the device item for the new sprout device - finishing the setup 5834 * of a new block group requires updating the device item in the chunk 5835 * btree, so it must exist when we attempt to do it. The previous step 5836 * ensures this does not fail with -ENOSPC. 5837 * 5838 * After that we can add the block group items to their btrees: 5839 * update existing device item in the chunk btree, add a new block group 5840 * item to the extent btree, add a new chunk item to the chunk btree and 5841 * finally add the new device extent items to the devices btree. 5842 */ 5843 5844 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5845 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5846 if (IS_ERR(meta_bg)) 5847 return PTR_ERR(meta_bg); 5848 5849 alloc_profile = btrfs_system_alloc_profile(fs_info); 5850 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5851 if (IS_ERR(sys_bg)) 5852 return PTR_ERR(sys_bg); 5853 5854 return 0; 5855 } 5856 5857 static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map *map) 5858 { 5859 const int index = btrfs_bg_flags_to_raid_index(map->type); 5860 5861 return btrfs_raid_array[index].tolerated_failures; 5862 } 5863 5864 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5865 { 5866 struct btrfs_chunk_map *map; 5867 int miss_ndevs = 0; 5868 int i; 5869 bool ret = true; 5870 5871 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5872 if (IS_ERR(map)) 5873 return false; 5874 5875 for (i = 0; i < map->num_stripes; i++) { 5876 if (test_bit(BTRFS_DEV_STATE_MISSING, 5877 &map->stripes[i].dev->dev_state)) { 5878 miss_ndevs++; 5879 continue; 5880 } 5881 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5882 &map->stripes[i].dev->dev_state)) { 5883 ret = false; 5884 goto end; 5885 } 5886 } 5887 5888 /* 5889 * If the number of missing devices is larger than max errors, we can 5890 * not write the data into that chunk successfully. 5891 */ 5892 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5893 ret = false; 5894 end: 5895 btrfs_free_chunk_map(map); 5896 return ret; 5897 } 5898 5899 void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info) 5900 { 5901 write_lock(&fs_info->mapping_tree_lock); 5902 while (!RB_EMPTY_ROOT(&fs_info->mapping_tree.rb_root)) { 5903 struct btrfs_chunk_map *map; 5904 struct rb_node *node; 5905 5906 node = rb_first_cached(&fs_info->mapping_tree); 5907 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 5908 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree); 5909 RB_CLEAR_NODE(&map->rb_node); 5910 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED); 5911 /* Once for the tree ref. */ 5912 btrfs_free_chunk_map(map); 5913 cond_resched_rwlock_write(&fs_info->mapping_tree_lock); 5914 } 5915 write_unlock(&fs_info->mapping_tree_lock); 5916 } 5917 5918 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5919 { 5920 struct btrfs_chunk_map *map; 5921 enum btrfs_raid_types index; 5922 int ret = 1; 5923 5924 map = btrfs_get_chunk_map(fs_info, logical, len); 5925 if (IS_ERR(map)) 5926 /* 5927 * We could return errors for these cases, but that could get 5928 * ugly and we'd probably do the same thing which is just not do 5929 * anything else and exit, so return 1 so the callers don't try 5930 * to use other copies. 5931 */ 5932 return 1; 5933 5934 index = btrfs_bg_flags_to_raid_index(map->type); 5935 5936 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5937 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5938 ret = btrfs_raid_array[index].ncopies; 5939 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5940 ret = 2; 5941 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5942 /* 5943 * There could be two corrupted data stripes, we need 5944 * to loop retry in order to rebuild the correct data. 5945 * 5946 * Fail a stripe at a time on every retry except the 5947 * stripe under reconstruction. 5948 */ 5949 ret = map->num_stripes; 5950 btrfs_free_chunk_map(map); 5951 return ret; 5952 } 5953 5954 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5955 u64 logical) 5956 { 5957 struct btrfs_chunk_map *map; 5958 unsigned long len = fs_info->sectorsize; 5959 5960 if (!btrfs_fs_incompat(fs_info, RAID56)) 5961 return len; 5962 5963 map = btrfs_get_chunk_map(fs_info, logical, len); 5964 5965 if (!WARN_ON(IS_ERR(map))) { 5966 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5967 len = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 5968 btrfs_free_chunk_map(map); 5969 } 5970 return len; 5971 } 5972 5973 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5974 { 5975 struct btrfs_chunk_map *map; 5976 int ret = 0; 5977 5978 if (!btrfs_fs_incompat(fs_info, RAID56)) 5979 return 0; 5980 5981 map = btrfs_get_chunk_map(fs_info, logical, len); 5982 5983 if (!WARN_ON(IS_ERR(map))) { 5984 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5985 ret = 1; 5986 btrfs_free_chunk_map(map); 5987 } 5988 return ret; 5989 } 5990 5991 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5992 struct btrfs_chunk_map *map, int first, 5993 int dev_replace_is_ongoing) 5994 { 5995 const enum btrfs_read_policy policy = READ_ONCE(fs_info->fs_devices->read_policy); 5996 int i; 5997 int num_stripes; 5998 int preferred_mirror; 5999 int tolerance; 6000 struct btrfs_device *srcdev; 6001 6002 ASSERT((map->type & 6003 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 6004 6005 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 6006 num_stripes = map->sub_stripes; 6007 else 6008 num_stripes = map->num_stripes; 6009 6010 switch (policy) { 6011 default: 6012 /* Shouldn't happen, just warn and use pid instead of failing */ 6013 btrfs_warn_rl(fs_info, "unknown read_policy type %u, reset to pid", 6014 policy); 6015 WRITE_ONCE(fs_info->fs_devices->read_policy, BTRFS_READ_POLICY_PID); 6016 fallthrough; 6017 case BTRFS_READ_POLICY_PID: 6018 preferred_mirror = first + (current->pid % num_stripes); 6019 break; 6020 } 6021 6022 if (dev_replace_is_ongoing && 6023 fs_info->dev_replace.cont_reading_from_srcdev_mode == 6024 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 6025 srcdev = fs_info->dev_replace.srcdev; 6026 else 6027 srcdev = NULL; 6028 6029 /* 6030 * try to avoid the drive that is the source drive for a 6031 * dev-replace procedure, only choose it if no other non-missing 6032 * mirror is available 6033 */ 6034 for (tolerance = 0; tolerance < 2; tolerance++) { 6035 if (map->stripes[preferred_mirror].dev->bdev && 6036 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 6037 return preferred_mirror; 6038 for (i = first; i < first + num_stripes; i++) { 6039 if (map->stripes[i].dev->bdev && 6040 (tolerance || map->stripes[i].dev != srcdev)) 6041 return i; 6042 } 6043 } 6044 6045 /* we couldn't find one that doesn't fail. Just return something 6046 * and the io error handling code will clean up eventually 6047 */ 6048 return preferred_mirror; 6049 } 6050 6051 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 6052 u64 logical, 6053 u16 total_stripes) 6054 { 6055 struct btrfs_io_context *bioc; 6056 6057 bioc = kzalloc( 6058 /* The size of btrfs_io_context */ 6059 sizeof(struct btrfs_io_context) + 6060 /* Plus the variable array for the stripes */ 6061 sizeof(struct btrfs_io_stripe) * (total_stripes), 6062 GFP_NOFS); 6063 6064 if (!bioc) 6065 return NULL; 6066 6067 refcount_set(&bioc->refs, 1); 6068 6069 bioc->fs_info = fs_info; 6070 bioc->replace_stripe_src = -1; 6071 bioc->full_stripe_logical = (u64)-1; 6072 bioc->logical = logical; 6073 6074 return bioc; 6075 } 6076 6077 void btrfs_get_bioc(struct btrfs_io_context *bioc) 6078 { 6079 WARN_ON(!refcount_read(&bioc->refs)); 6080 refcount_inc(&bioc->refs); 6081 } 6082 6083 void btrfs_put_bioc(struct btrfs_io_context *bioc) 6084 { 6085 if (!bioc) 6086 return; 6087 if (refcount_dec_and_test(&bioc->refs)) 6088 kfree(bioc); 6089 } 6090 6091 /* 6092 * Please note that, discard won't be sent to target device of device 6093 * replace. 6094 */ 6095 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 6096 u64 logical, u64 *length_ret, 6097 u32 *num_stripes) 6098 { 6099 struct btrfs_chunk_map *map; 6100 struct btrfs_discard_stripe *stripes; 6101 u64 length = *length_ret; 6102 u64 offset; 6103 u32 stripe_nr; 6104 u32 stripe_nr_end; 6105 u32 stripe_cnt; 6106 u64 stripe_end_offset; 6107 u64 stripe_offset; 6108 u32 stripe_index; 6109 u32 factor = 0; 6110 u32 sub_stripes = 0; 6111 u32 stripes_per_dev = 0; 6112 u32 remaining_stripes = 0; 6113 u32 last_stripe = 0; 6114 int ret; 6115 int i; 6116 6117 map = btrfs_get_chunk_map(fs_info, logical, length); 6118 if (IS_ERR(map)) 6119 return ERR_CAST(map); 6120 6121 /* we don't discard raid56 yet */ 6122 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6123 ret = -EOPNOTSUPP; 6124 goto out_free_map; 6125 } 6126 6127 offset = logical - map->start; 6128 length = min_t(u64, map->start + map->chunk_len - logical, length); 6129 *length_ret = length; 6130 6131 /* 6132 * stripe_nr counts the total number of stripes we have to stride 6133 * to get to this block 6134 */ 6135 stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6136 6137 /* stripe_offset is the offset of this block in its stripe */ 6138 stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr); 6139 6140 stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >> 6141 BTRFS_STRIPE_LEN_SHIFT; 6142 stripe_cnt = stripe_nr_end - stripe_nr; 6143 stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) - 6144 (offset + length); 6145 /* 6146 * after this, stripe_nr is the number of stripes on this 6147 * device we have to walk to find the data, and stripe_index is 6148 * the number of our device in the stripe array 6149 */ 6150 *num_stripes = 1; 6151 stripe_index = 0; 6152 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6153 BTRFS_BLOCK_GROUP_RAID10)) { 6154 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6155 sub_stripes = 1; 6156 else 6157 sub_stripes = map->sub_stripes; 6158 6159 factor = map->num_stripes / sub_stripes; 6160 *num_stripes = min_t(u64, map->num_stripes, 6161 sub_stripes * stripe_cnt); 6162 stripe_index = stripe_nr % factor; 6163 stripe_nr /= factor; 6164 stripe_index *= sub_stripes; 6165 6166 remaining_stripes = stripe_cnt % factor; 6167 stripes_per_dev = stripe_cnt / factor; 6168 last_stripe = ((stripe_nr_end - 1) % factor) * sub_stripes; 6169 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6170 BTRFS_BLOCK_GROUP_DUP)) { 6171 *num_stripes = map->num_stripes; 6172 } else { 6173 stripe_index = stripe_nr % map->num_stripes; 6174 stripe_nr /= map->num_stripes; 6175 } 6176 6177 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6178 if (!stripes) { 6179 ret = -ENOMEM; 6180 goto out_free_map; 6181 } 6182 6183 for (i = 0; i < *num_stripes; i++) { 6184 stripes[i].physical = 6185 map->stripes[stripe_index].physical + 6186 stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr); 6187 stripes[i].dev = map->stripes[stripe_index].dev; 6188 6189 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6190 BTRFS_BLOCK_GROUP_RAID10)) { 6191 stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev); 6192 6193 if (i / sub_stripes < remaining_stripes) 6194 stripes[i].length += BTRFS_STRIPE_LEN; 6195 6196 /* 6197 * Special for the first stripe and 6198 * the last stripe: 6199 * 6200 * |-------|...|-------| 6201 * |----------| 6202 * off end_off 6203 */ 6204 if (i < sub_stripes) 6205 stripes[i].length -= stripe_offset; 6206 6207 if (stripe_index >= last_stripe && 6208 stripe_index <= (last_stripe + 6209 sub_stripes - 1)) 6210 stripes[i].length -= stripe_end_offset; 6211 6212 if (i == sub_stripes - 1) 6213 stripe_offset = 0; 6214 } else { 6215 stripes[i].length = length; 6216 } 6217 6218 stripe_index++; 6219 if (stripe_index == map->num_stripes) { 6220 stripe_index = 0; 6221 stripe_nr++; 6222 } 6223 } 6224 6225 btrfs_free_chunk_map(map); 6226 return stripes; 6227 out_free_map: 6228 btrfs_free_chunk_map(map); 6229 return ERR_PTR(ret); 6230 } 6231 6232 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6233 { 6234 struct btrfs_block_group *cache; 6235 bool ret; 6236 6237 /* Non zoned filesystem does not use "to_copy" flag */ 6238 if (!btrfs_is_zoned(fs_info)) 6239 return false; 6240 6241 cache = btrfs_lookup_block_group(fs_info, logical); 6242 6243 ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 6244 6245 btrfs_put_block_group(cache); 6246 return ret; 6247 } 6248 6249 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6250 struct btrfs_io_context *bioc, 6251 struct btrfs_dev_replace *dev_replace, 6252 u64 logical, 6253 int *num_stripes_ret, int *max_errors_ret) 6254 { 6255 u64 srcdev_devid = dev_replace->srcdev->devid; 6256 /* 6257 * At this stage, num_stripes is still the real number of stripes, 6258 * excluding the duplicated stripes. 6259 */ 6260 int num_stripes = *num_stripes_ret; 6261 int nr_extra_stripes = 0; 6262 int max_errors = *max_errors_ret; 6263 int i; 6264 6265 /* 6266 * A block group which has "to_copy" set will eventually be copied by 6267 * the dev-replace process. We can avoid cloning IO here. 6268 */ 6269 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6270 return; 6271 6272 /* 6273 * Duplicate the write operations while the dev-replace procedure is 6274 * running. Since the copying of the old disk to the new disk takes 6275 * place at run time while the filesystem is mounted writable, the 6276 * regular write operations to the old disk have to be duplicated to go 6277 * to the new disk as well. 6278 * 6279 * Note that device->missing is handled by the caller, and that the 6280 * write to the old disk is already set up in the stripes array. 6281 */ 6282 for (i = 0; i < num_stripes; i++) { 6283 struct btrfs_io_stripe *old = &bioc->stripes[i]; 6284 struct btrfs_io_stripe *new = &bioc->stripes[num_stripes + nr_extra_stripes]; 6285 6286 if (old->dev->devid != srcdev_devid) 6287 continue; 6288 6289 new->physical = old->physical; 6290 new->dev = dev_replace->tgtdev; 6291 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) 6292 bioc->replace_stripe_src = i; 6293 nr_extra_stripes++; 6294 } 6295 6296 /* We can only have at most 2 extra nr_stripes (for DUP). */ 6297 ASSERT(nr_extra_stripes <= 2); 6298 /* 6299 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for 6300 * replace. 6301 * If we have 2 extra stripes, only choose the one with smaller physical. 6302 */ 6303 if (op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) { 6304 struct btrfs_io_stripe *first = &bioc->stripes[num_stripes]; 6305 struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1]; 6306 6307 /* Only DUP can have two extra stripes. */ 6308 ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP); 6309 6310 /* 6311 * Swap the last stripe stripes and reduce @nr_extra_stripes. 6312 * The extra stripe would still be there, but won't be accessed. 6313 */ 6314 if (first->physical > second->physical) { 6315 swap(second->physical, first->physical); 6316 swap(second->dev, first->dev); 6317 nr_extra_stripes--; 6318 } 6319 } 6320 6321 *num_stripes_ret = num_stripes + nr_extra_stripes; 6322 *max_errors_ret = max_errors + nr_extra_stripes; 6323 bioc->replace_nr_stripes = nr_extra_stripes; 6324 } 6325 6326 static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset, 6327 struct btrfs_io_geometry *io_geom) 6328 { 6329 /* 6330 * Stripe_nr is the stripe where this block falls. stripe_offset is 6331 * the offset of this block in its stripe. 6332 */ 6333 io_geom->stripe_offset = offset & BTRFS_STRIPE_LEN_MASK; 6334 io_geom->stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6335 ASSERT(io_geom->stripe_offset < U32_MAX); 6336 6337 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6338 unsigned long full_stripe_len = 6339 btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 6340 6341 /* 6342 * For full stripe start, we use previously calculated 6343 * @stripe_nr. Align it to nr_data_stripes, then multiply with 6344 * STRIPE_LEN. 6345 * 6346 * By this we can avoid u64 division completely. And we have 6347 * to go rounddown(), not round_down(), as nr_data_stripes is 6348 * not ensured to be power of 2. 6349 */ 6350 io_geom->raid56_full_stripe_start = btrfs_stripe_nr_to_offset( 6351 rounddown(io_geom->stripe_nr, nr_data_stripes(map))); 6352 6353 ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset); 6354 ASSERT(io_geom->raid56_full_stripe_start <= offset); 6355 /* 6356 * For writes to RAID56, allow to write a full stripe set, but 6357 * no straddling of stripe sets. 6358 */ 6359 if (io_geom->op == BTRFS_MAP_WRITE) 6360 return full_stripe_len - (offset - io_geom->raid56_full_stripe_start); 6361 } 6362 6363 /* 6364 * For other RAID types and for RAID56 reads, allow a single stripe (on 6365 * a single disk). 6366 */ 6367 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) 6368 return BTRFS_STRIPE_LEN - io_geom->stripe_offset; 6369 return U64_MAX; 6370 } 6371 6372 static int set_io_stripe(struct btrfs_fs_info *fs_info, u64 logical, 6373 u64 *length, struct btrfs_io_stripe *dst, 6374 struct btrfs_chunk_map *map, 6375 struct btrfs_io_geometry *io_geom) 6376 { 6377 dst->dev = map->stripes[io_geom->stripe_index].dev; 6378 6379 if (io_geom->op == BTRFS_MAP_READ && 6380 btrfs_need_stripe_tree_update(fs_info, map->type)) 6381 return btrfs_get_raid_extent_offset(fs_info, logical, length, 6382 map->type, 6383 io_geom->stripe_index, dst); 6384 6385 dst->physical = map->stripes[io_geom->stripe_index].physical + 6386 io_geom->stripe_offset + 6387 btrfs_stripe_nr_to_offset(io_geom->stripe_nr); 6388 return 0; 6389 } 6390 6391 static bool is_single_device_io(struct btrfs_fs_info *fs_info, 6392 const struct btrfs_io_stripe *smap, 6393 const struct btrfs_chunk_map *map, 6394 int num_alloc_stripes, 6395 enum btrfs_map_op op, int mirror_num) 6396 { 6397 if (!smap) 6398 return false; 6399 6400 if (num_alloc_stripes != 1) 6401 return false; 6402 6403 if (btrfs_need_stripe_tree_update(fs_info, map->type) && op != BTRFS_MAP_READ) 6404 return false; 6405 6406 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) 6407 return false; 6408 6409 return true; 6410 } 6411 6412 static void map_blocks_raid0(const struct btrfs_chunk_map *map, 6413 struct btrfs_io_geometry *io_geom) 6414 { 6415 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes; 6416 io_geom->stripe_nr /= map->num_stripes; 6417 if (io_geom->op == BTRFS_MAP_READ) 6418 io_geom->mirror_num = 1; 6419 } 6420 6421 static void map_blocks_raid1(struct btrfs_fs_info *fs_info, 6422 struct btrfs_chunk_map *map, 6423 struct btrfs_io_geometry *io_geom, 6424 bool dev_replace_is_ongoing) 6425 { 6426 if (io_geom->op != BTRFS_MAP_READ) { 6427 io_geom->num_stripes = map->num_stripes; 6428 return; 6429 } 6430 6431 if (io_geom->mirror_num) { 6432 io_geom->stripe_index = io_geom->mirror_num - 1; 6433 return; 6434 } 6435 6436 io_geom->stripe_index = find_live_mirror(fs_info, map, 0, 6437 dev_replace_is_ongoing); 6438 io_geom->mirror_num = io_geom->stripe_index + 1; 6439 } 6440 6441 static void map_blocks_dup(const struct btrfs_chunk_map *map, 6442 struct btrfs_io_geometry *io_geom) 6443 { 6444 if (io_geom->op != BTRFS_MAP_READ) { 6445 io_geom->num_stripes = map->num_stripes; 6446 return; 6447 } 6448 6449 if (io_geom->mirror_num) { 6450 io_geom->stripe_index = io_geom->mirror_num - 1; 6451 return; 6452 } 6453 6454 io_geom->mirror_num = 1; 6455 } 6456 6457 static void map_blocks_raid10(struct btrfs_fs_info *fs_info, 6458 struct btrfs_chunk_map *map, 6459 struct btrfs_io_geometry *io_geom, 6460 bool dev_replace_is_ongoing) 6461 { 6462 u32 factor = map->num_stripes / map->sub_stripes; 6463 int old_stripe_index; 6464 6465 io_geom->stripe_index = (io_geom->stripe_nr % factor) * map->sub_stripes; 6466 io_geom->stripe_nr /= factor; 6467 6468 if (io_geom->op != BTRFS_MAP_READ) { 6469 io_geom->num_stripes = map->sub_stripes; 6470 return; 6471 } 6472 6473 if (io_geom->mirror_num) { 6474 io_geom->stripe_index += io_geom->mirror_num - 1; 6475 return; 6476 } 6477 6478 old_stripe_index = io_geom->stripe_index; 6479 io_geom->stripe_index = find_live_mirror(fs_info, map, 6480 io_geom->stripe_index, 6481 dev_replace_is_ongoing); 6482 io_geom->mirror_num = io_geom->stripe_index - old_stripe_index + 1; 6483 } 6484 6485 static void map_blocks_raid56_write(struct btrfs_chunk_map *map, 6486 struct btrfs_io_geometry *io_geom, 6487 u64 logical, u64 *length) 6488 { 6489 int data_stripes = nr_data_stripes(map); 6490 6491 /* 6492 * Needs full stripe mapping. 6493 * 6494 * Push stripe_nr back to the start of the full stripe For those cases 6495 * needing a full stripe, @stripe_nr is the full stripe number. 6496 * 6497 * Originally we go raid56_full_stripe_start / full_stripe_len, but 6498 * that can be expensive. Here we just divide @stripe_nr with 6499 * @data_stripes. 6500 */ 6501 io_geom->stripe_nr /= data_stripes; 6502 6503 /* RAID[56] write or recovery. Return all stripes */ 6504 io_geom->num_stripes = map->num_stripes; 6505 io_geom->max_errors = btrfs_chunk_max_errors(map); 6506 6507 /* Return the length to the full stripe end. */ 6508 *length = min(logical + *length, 6509 io_geom->raid56_full_stripe_start + map->start + 6510 btrfs_stripe_nr_to_offset(data_stripes)) - 6511 logical; 6512 io_geom->stripe_index = 0; 6513 io_geom->stripe_offset = 0; 6514 } 6515 6516 static void map_blocks_raid56_read(struct btrfs_chunk_map *map, 6517 struct btrfs_io_geometry *io_geom) 6518 { 6519 int data_stripes = nr_data_stripes(map); 6520 6521 ASSERT(io_geom->mirror_num <= 1); 6522 /* Just grab the data stripe directly. */ 6523 io_geom->stripe_index = io_geom->stripe_nr % data_stripes; 6524 io_geom->stripe_nr /= data_stripes; 6525 6526 /* We distribute the parity blocks across stripes. */ 6527 io_geom->stripe_index = 6528 (io_geom->stripe_nr + io_geom->stripe_index) % map->num_stripes; 6529 6530 if (io_geom->op == BTRFS_MAP_READ && io_geom->mirror_num < 1) 6531 io_geom->mirror_num = 1; 6532 } 6533 6534 static void map_blocks_single(const struct btrfs_chunk_map *map, 6535 struct btrfs_io_geometry *io_geom) 6536 { 6537 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes; 6538 io_geom->stripe_nr /= map->num_stripes; 6539 io_geom->mirror_num = io_geom->stripe_index + 1; 6540 } 6541 6542 /* 6543 * Map one logical range to one or more physical ranges. 6544 * 6545 * @length: (Mandatory) mapped length of this run. 6546 * One logical range can be split into different segments 6547 * due to factors like zones and RAID0/5/6/10 stripe 6548 * boundaries. 6549 * 6550 * @bioc_ret: (Mandatory) returned btrfs_io_context structure. 6551 * which has one or more physical ranges (btrfs_io_stripe) 6552 * recorded inside. 6553 * Caller should call btrfs_put_bioc() to free it after use. 6554 * 6555 * @smap: (Optional) single physical range optimization. 6556 * If the map request can be fulfilled by one single 6557 * physical range, and this is parameter is not NULL, 6558 * then @bioc_ret would be NULL, and @smap would be 6559 * updated. 6560 * 6561 * @mirror_num_ret: (Mandatory) returned mirror number if the original 6562 * value is 0. 6563 * 6564 * Mirror number 0 means to choose any live mirrors. 6565 * 6566 * For non-RAID56 profiles, non-zero mirror_num means 6567 * the Nth mirror. (e.g. mirror_num 1 means the first 6568 * copy). 6569 * 6570 * For RAID56 profile, mirror 1 means rebuild from P and 6571 * the remaining data stripes. 6572 * 6573 * For RAID6 profile, mirror > 2 means mark another 6574 * data/P stripe error and rebuild from the remaining 6575 * stripes.. 6576 */ 6577 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6578 u64 logical, u64 *length, 6579 struct btrfs_io_context **bioc_ret, 6580 struct btrfs_io_stripe *smap, int *mirror_num_ret) 6581 { 6582 struct btrfs_chunk_map *map; 6583 struct btrfs_io_geometry io_geom = { 0 }; 6584 u64 map_offset; 6585 int i; 6586 int ret = 0; 6587 int num_copies; 6588 struct btrfs_io_context *bioc = NULL; 6589 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6590 int dev_replace_is_ongoing = 0; 6591 u16 num_alloc_stripes; 6592 u64 max_len; 6593 6594 ASSERT(bioc_ret); 6595 6596 io_geom.mirror_num = (mirror_num_ret ? *mirror_num_ret : 0); 6597 io_geom.num_stripes = 1; 6598 io_geom.stripe_index = 0; 6599 io_geom.op = op; 6600 6601 num_copies = btrfs_num_copies(fs_info, logical, fs_info->sectorsize); 6602 if (io_geom.mirror_num > num_copies) 6603 return -EINVAL; 6604 6605 map = btrfs_get_chunk_map(fs_info, logical, *length); 6606 if (IS_ERR(map)) 6607 return PTR_ERR(map); 6608 6609 map_offset = logical - map->start; 6610 io_geom.raid56_full_stripe_start = (u64)-1; 6611 max_len = btrfs_max_io_len(map, map_offset, &io_geom); 6612 *length = min_t(u64, map->chunk_len - map_offset, max_len); 6613 6614 down_read(&dev_replace->rwsem); 6615 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6616 /* 6617 * Hold the semaphore for read during the whole operation, write is 6618 * requested at commit time but must wait. 6619 */ 6620 if (!dev_replace_is_ongoing) 6621 up_read(&dev_replace->rwsem); 6622 6623 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 6624 case BTRFS_BLOCK_GROUP_RAID0: 6625 map_blocks_raid0(map, &io_geom); 6626 break; 6627 case BTRFS_BLOCK_GROUP_RAID1: 6628 case BTRFS_BLOCK_GROUP_RAID1C3: 6629 case BTRFS_BLOCK_GROUP_RAID1C4: 6630 map_blocks_raid1(fs_info, map, &io_geom, dev_replace_is_ongoing); 6631 break; 6632 case BTRFS_BLOCK_GROUP_DUP: 6633 map_blocks_dup(map, &io_geom); 6634 break; 6635 case BTRFS_BLOCK_GROUP_RAID10: 6636 map_blocks_raid10(fs_info, map, &io_geom, dev_replace_is_ongoing); 6637 break; 6638 case BTRFS_BLOCK_GROUP_RAID5: 6639 case BTRFS_BLOCK_GROUP_RAID6: 6640 if (op != BTRFS_MAP_READ || io_geom.mirror_num > 1) 6641 map_blocks_raid56_write(map, &io_geom, logical, length); 6642 else 6643 map_blocks_raid56_read(map, &io_geom); 6644 break; 6645 default: 6646 /* 6647 * After this, stripe_nr is the number of stripes on this 6648 * device we have to walk to find the data, and stripe_index is 6649 * the number of our device in the stripe array 6650 */ 6651 map_blocks_single(map, &io_geom); 6652 break; 6653 } 6654 if (io_geom.stripe_index >= map->num_stripes) { 6655 btrfs_crit(fs_info, 6656 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6657 io_geom.stripe_index, map->num_stripes); 6658 ret = -EINVAL; 6659 goto out; 6660 } 6661 6662 num_alloc_stripes = io_geom.num_stripes; 6663 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6664 op != BTRFS_MAP_READ) 6665 /* 6666 * For replace case, we need to add extra stripes for extra 6667 * duplicated stripes. 6668 * 6669 * For both WRITE and GET_READ_MIRRORS, we may have at most 6670 * 2 more stripes (DUP types, otherwise 1). 6671 */ 6672 num_alloc_stripes += 2; 6673 6674 /* 6675 * If this I/O maps to a single device, try to return the device and 6676 * physical block information on the stack instead of allocating an 6677 * I/O context structure. 6678 */ 6679 if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, op, 6680 io_geom.mirror_num)) { 6681 ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom); 6682 if (mirror_num_ret) 6683 *mirror_num_ret = io_geom.mirror_num; 6684 *bioc_ret = NULL; 6685 goto out; 6686 } 6687 6688 bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes); 6689 if (!bioc) { 6690 ret = -ENOMEM; 6691 goto out; 6692 } 6693 bioc->map_type = map->type; 6694 6695 /* 6696 * For RAID56 full map, we need to make sure the stripes[] follows the 6697 * rule that data stripes are all ordered, then followed with P and Q 6698 * (if we have). 6699 * 6700 * It's still mostly the same as other profiles, just with extra rotation. 6701 */ 6702 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && 6703 (op != BTRFS_MAP_READ || io_geom.mirror_num > 1)) { 6704 /* 6705 * For RAID56 @stripe_nr is already the number of full stripes 6706 * before us, which is also the rotation value (needs to modulo 6707 * with num_stripes). 6708 * 6709 * In this case, we just add @stripe_nr with @i, then do the 6710 * modulo, to reduce one modulo call. 6711 */ 6712 bioc->full_stripe_logical = map->start + 6713 btrfs_stripe_nr_to_offset(io_geom.stripe_nr * 6714 nr_data_stripes(map)); 6715 for (int i = 0; i < io_geom.num_stripes; i++) { 6716 struct btrfs_io_stripe *dst = &bioc->stripes[i]; 6717 u32 stripe_index; 6718 6719 stripe_index = (i + io_geom.stripe_nr) % io_geom.num_stripes; 6720 dst->dev = map->stripes[stripe_index].dev; 6721 dst->physical = 6722 map->stripes[stripe_index].physical + 6723 io_geom.stripe_offset + 6724 btrfs_stripe_nr_to_offset(io_geom.stripe_nr); 6725 } 6726 } else { 6727 /* 6728 * For all other non-RAID56 profiles, just copy the target 6729 * stripe into the bioc. 6730 */ 6731 for (i = 0; i < io_geom.num_stripes; i++) { 6732 ret = set_io_stripe(fs_info, logical, length, 6733 &bioc->stripes[i], map, &io_geom); 6734 if (ret < 0) 6735 break; 6736 io_geom.stripe_index++; 6737 } 6738 } 6739 6740 if (ret) { 6741 *bioc_ret = NULL; 6742 btrfs_put_bioc(bioc); 6743 goto out; 6744 } 6745 6746 if (op != BTRFS_MAP_READ) 6747 io_geom.max_errors = btrfs_chunk_max_errors(map); 6748 6749 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6750 op != BTRFS_MAP_READ) { 6751 handle_ops_on_dev_replace(op, bioc, dev_replace, logical, 6752 &io_geom.num_stripes, &io_geom.max_errors); 6753 } 6754 6755 *bioc_ret = bioc; 6756 bioc->num_stripes = io_geom.num_stripes; 6757 bioc->max_errors = io_geom.max_errors; 6758 bioc->mirror_num = io_geom.mirror_num; 6759 6760 out: 6761 if (dev_replace_is_ongoing) { 6762 lockdep_assert_held(&dev_replace->rwsem); 6763 /* Unlock and let waiting writers proceed */ 6764 up_read(&dev_replace->rwsem); 6765 } 6766 btrfs_free_chunk_map(map); 6767 return ret; 6768 } 6769 6770 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6771 const struct btrfs_fs_devices *fs_devices) 6772 { 6773 if (args->fsid == NULL) 6774 return true; 6775 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6776 return true; 6777 return false; 6778 } 6779 6780 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6781 const struct btrfs_device *device) 6782 { 6783 if (args->missing) { 6784 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6785 !device->bdev) 6786 return true; 6787 return false; 6788 } 6789 6790 if (device->devid != args->devid) 6791 return false; 6792 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6793 return false; 6794 return true; 6795 } 6796 6797 /* 6798 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6799 * return NULL. 6800 * 6801 * If devid and uuid are both specified, the match must be exact, otherwise 6802 * only devid is used. 6803 */ 6804 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6805 const struct btrfs_dev_lookup_args *args) 6806 { 6807 struct btrfs_device *device; 6808 struct btrfs_fs_devices *seed_devs; 6809 6810 if (dev_args_match_fs_devices(args, fs_devices)) { 6811 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6812 if (dev_args_match_device(args, device)) 6813 return device; 6814 } 6815 } 6816 6817 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6818 if (!dev_args_match_fs_devices(args, seed_devs)) 6819 continue; 6820 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6821 if (dev_args_match_device(args, device)) 6822 return device; 6823 } 6824 } 6825 6826 return NULL; 6827 } 6828 6829 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6830 u64 devid, u8 *dev_uuid) 6831 { 6832 struct btrfs_device *device; 6833 unsigned int nofs_flag; 6834 6835 /* 6836 * We call this under the chunk_mutex, so we want to use NOFS for this 6837 * allocation, however we don't want to change btrfs_alloc_device() to 6838 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6839 * places. 6840 */ 6841 6842 nofs_flag = memalloc_nofs_save(); 6843 device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL); 6844 memalloc_nofs_restore(nofs_flag); 6845 if (IS_ERR(device)) 6846 return device; 6847 6848 list_add(&device->dev_list, &fs_devices->devices); 6849 device->fs_devices = fs_devices; 6850 fs_devices->num_devices++; 6851 6852 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6853 fs_devices->missing_devices++; 6854 6855 return device; 6856 } 6857 6858 /* 6859 * Allocate new device struct, set up devid and UUID. 6860 * 6861 * @fs_info: used only for generating a new devid, can be NULL if 6862 * devid is provided (i.e. @devid != NULL). 6863 * @devid: a pointer to devid for this device. If NULL a new devid 6864 * is generated. 6865 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6866 * is generated. 6867 * @path: a pointer to device path if available, NULL otherwise. 6868 * 6869 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6870 * on error. Returned struct is not linked onto any lists and must be 6871 * destroyed with btrfs_free_device. 6872 */ 6873 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6874 const u64 *devid, const u8 *uuid, 6875 const char *path) 6876 { 6877 struct btrfs_device *dev; 6878 u64 tmp; 6879 6880 if (WARN_ON(!devid && !fs_info)) 6881 return ERR_PTR(-EINVAL); 6882 6883 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6884 if (!dev) 6885 return ERR_PTR(-ENOMEM); 6886 6887 INIT_LIST_HEAD(&dev->dev_list); 6888 INIT_LIST_HEAD(&dev->dev_alloc_list); 6889 INIT_LIST_HEAD(&dev->post_commit_list); 6890 6891 atomic_set(&dev->dev_stats_ccnt, 0); 6892 btrfs_device_data_ordered_init(dev); 6893 extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE); 6894 6895 if (devid) 6896 tmp = *devid; 6897 else { 6898 int ret; 6899 6900 ret = find_next_devid(fs_info, &tmp); 6901 if (ret) { 6902 btrfs_free_device(dev); 6903 return ERR_PTR(ret); 6904 } 6905 } 6906 dev->devid = tmp; 6907 6908 if (uuid) 6909 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6910 else 6911 generate_random_uuid(dev->uuid); 6912 6913 if (path) { 6914 struct rcu_string *name; 6915 6916 name = rcu_string_strdup(path, GFP_KERNEL); 6917 if (!name) { 6918 btrfs_free_device(dev); 6919 return ERR_PTR(-ENOMEM); 6920 } 6921 rcu_assign_pointer(dev->name, name); 6922 } 6923 6924 return dev; 6925 } 6926 6927 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6928 u64 devid, u8 *uuid, bool error) 6929 { 6930 if (error) 6931 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6932 devid, uuid); 6933 else 6934 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6935 devid, uuid); 6936 } 6937 6938 u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map) 6939 { 6940 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 6941 6942 return div_u64(map->chunk_len, data_stripes); 6943 } 6944 6945 #if BITS_PER_LONG == 32 6946 /* 6947 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6948 * can't be accessed on 32bit systems. 6949 * 6950 * This function do mount time check to reject the fs if it already has 6951 * metadata chunk beyond that limit. 6952 */ 6953 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6954 u64 logical, u64 length, u64 type) 6955 { 6956 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6957 return 0; 6958 6959 if (logical + length < MAX_LFS_FILESIZE) 6960 return 0; 6961 6962 btrfs_err_32bit_limit(fs_info); 6963 return -EOVERFLOW; 6964 } 6965 6966 /* 6967 * This is to give early warning for any metadata chunk reaching 6968 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 6969 * Although we can still access the metadata, it's not going to be possible 6970 * once the limit is reached. 6971 */ 6972 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6973 u64 logical, u64 length, u64 type) 6974 { 6975 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6976 return; 6977 6978 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 6979 return; 6980 6981 btrfs_warn_32bit_limit(fs_info); 6982 } 6983 #endif 6984 6985 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 6986 u64 devid, u8 *uuid) 6987 { 6988 struct btrfs_device *dev; 6989 6990 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6991 btrfs_report_missing_device(fs_info, devid, uuid, true); 6992 return ERR_PTR(-ENOENT); 6993 } 6994 6995 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 6996 if (IS_ERR(dev)) { 6997 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 6998 devid, PTR_ERR(dev)); 6999 return dev; 7000 } 7001 btrfs_report_missing_device(fs_info, devid, uuid, false); 7002 7003 return dev; 7004 } 7005 7006 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7007 struct btrfs_chunk *chunk) 7008 { 7009 BTRFS_DEV_LOOKUP_ARGS(args); 7010 struct btrfs_fs_info *fs_info = leaf->fs_info; 7011 struct btrfs_chunk_map *map; 7012 u64 logical; 7013 u64 length; 7014 u64 devid; 7015 u64 type; 7016 u8 uuid[BTRFS_UUID_SIZE]; 7017 int index; 7018 int num_stripes; 7019 int ret; 7020 int i; 7021 7022 logical = key->offset; 7023 length = btrfs_chunk_length(leaf, chunk); 7024 type = btrfs_chunk_type(leaf, chunk); 7025 index = btrfs_bg_flags_to_raid_index(type); 7026 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7027 7028 #if BITS_PER_LONG == 32 7029 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7030 if (ret < 0) 7031 return ret; 7032 warn_32bit_meta_chunk(fs_info, logical, length, type); 7033 #endif 7034 7035 /* 7036 * Only need to verify chunk item if we're reading from sys chunk array, 7037 * as chunk item in tree block is already verified by tree-checker. 7038 */ 7039 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7040 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7041 if (ret) 7042 return ret; 7043 } 7044 7045 map = btrfs_find_chunk_map(fs_info, logical, 1); 7046 7047 /* already mapped? */ 7048 if (map && map->start <= logical && map->start + map->chunk_len > logical) { 7049 btrfs_free_chunk_map(map); 7050 return 0; 7051 } else if (map) { 7052 btrfs_free_chunk_map(map); 7053 } 7054 7055 map = btrfs_alloc_chunk_map(num_stripes, GFP_NOFS); 7056 if (!map) 7057 return -ENOMEM; 7058 7059 map->start = logical; 7060 map->chunk_len = length; 7061 map->num_stripes = num_stripes; 7062 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7063 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7064 map->type = type; 7065 /* 7066 * We can't use the sub_stripes value, as for profiles other than 7067 * RAID10, they may have 0 as sub_stripes for filesystems created by 7068 * older mkfs (<v5.4). 7069 * In that case, it can cause divide-by-zero errors later. 7070 * Since currently sub_stripes is fixed for each profile, let's 7071 * use the trusted value instead. 7072 */ 7073 map->sub_stripes = btrfs_raid_array[index].sub_stripes; 7074 map->verified_stripes = 0; 7075 map->stripe_size = btrfs_calc_stripe_length(map); 7076 for (i = 0; i < num_stripes; i++) { 7077 map->stripes[i].physical = 7078 btrfs_stripe_offset_nr(leaf, chunk, i); 7079 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7080 args.devid = devid; 7081 read_extent_buffer(leaf, uuid, (unsigned long) 7082 btrfs_stripe_dev_uuid_nr(chunk, i), 7083 BTRFS_UUID_SIZE); 7084 args.uuid = uuid; 7085 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7086 if (!map->stripes[i].dev) { 7087 map->stripes[i].dev = handle_missing_device(fs_info, 7088 devid, uuid); 7089 if (IS_ERR(map->stripes[i].dev)) { 7090 ret = PTR_ERR(map->stripes[i].dev); 7091 btrfs_free_chunk_map(map); 7092 return ret; 7093 } 7094 } 7095 7096 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7097 &(map->stripes[i].dev->dev_state)); 7098 } 7099 7100 ret = btrfs_add_chunk_map(fs_info, map); 7101 if (ret < 0) { 7102 btrfs_err(fs_info, 7103 "failed to add chunk map, start=%llu len=%llu: %d", 7104 map->start, map->chunk_len, ret); 7105 } 7106 7107 return ret; 7108 } 7109 7110 static void fill_device_from_item(struct extent_buffer *leaf, 7111 struct btrfs_dev_item *dev_item, 7112 struct btrfs_device *device) 7113 { 7114 unsigned long ptr; 7115 7116 device->devid = btrfs_device_id(leaf, dev_item); 7117 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7118 device->total_bytes = device->disk_total_bytes; 7119 device->commit_total_bytes = device->disk_total_bytes; 7120 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7121 device->commit_bytes_used = device->bytes_used; 7122 device->type = btrfs_device_type(leaf, dev_item); 7123 device->io_align = btrfs_device_io_align(leaf, dev_item); 7124 device->io_width = btrfs_device_io_width(leaf, dev_item); 7125 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7126 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7127 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7128 7129 ptr = btrfs_device_uuid(dev_item); 7130 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7131 } 7132 7133 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7134 u8 *fsid) 7135 { 7136 struct btrfs_fs_devices *fs_devices; 7137 int ret; 7138 7139 lockdep_assert_held(&uuid_mutex); 7140 ASSERT(fsid); 7141 7142 /* This will match only for multi-device seed fs */ 7143 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7144 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7145 return fs_devices; 7146 7147 7148 fs_devices = find_fsid(fsid, NULL); 7149 if (!fs_devices) { 7150 if (!btrfs_test_opt(fs_info, DEGRADED)) 7151 return ERR_PTR(-ENOENT); 7152 7153 fs_devices = alloc_fs_devices(fsid); 7154 if (IS_ERR(fs_devices)) 7155 return fs_devices; 7156 7157 fs_devices->seeding = true; 7158 fs_devices->opened = 1; 7159 return fs_devices; 7160 } 7161 7162 /* 7163 * Upon first call for a seed fs fsid, just create a private copy of the 7164 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7165 */ 7166 fs_devices = clone_fs_devices(fs_devices); 7167 if (IS_ERR(fs_devices)) 7168 return fs_devices; 7169 7170 ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder); 7171 if (ret) { 7172 free_fs_devices(fs_devices); 7173 return ERR_PTR(ret); 7174 } 7175 7176 if (!fs_devices->seeding) { 7177 close_fs_devices(fs_devices); 7178 free_fs_devices(fs_devices); 7179 return ERR_PTR(-EINVAL); 7180 } 7181 7182 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7183 7184 return fs_devices; 7185 } 7186 7187 static int read_one_dev(struct extent_buffer *leaf, 7188 struct btrfs_dev_item *dev_item) 7189 { 7190 BTRFS_DEV_LOOKUP_ARGS(args); 7191 struct btrfs_fs_info *fs_info = leaf->fs_info; 7192 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7193 struct btrfs_device *device; 7194 u64 devid; 7195 int ret; 7196 u8 fs_uuid[BTRFS_FSID_SIZE]; 7197 u8 dev_uuid[BTRFS_UUID_SIZE]; 7198 7199 devid = btrfs_device_id(leaf, dev_item); 7200 args.devid = devid; 7201 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7202 BTRFS_UUID_SIZE); 7203 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7204 BTRFS_FSID_SIZE); 7205 args.uuid = dev_uuid; 7206 args.fsid = fs_uuid; 7207 7208 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7209 fs_devices = open_seed_devices(fs_info, fs_uuid); 7210 if (IS_ERR(fs_devices)) 7211 return PTR_ERR(fs_devices); 7212 } 7213 7214 device = btrfs_find_device(fs_info->fs_devices, &args); 7215 if (!device) { 7216 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7217 btrfs_report_missing_device(fs_info, devid, 7218 dev_uuid, true); 7219 return -ENOENT; 7220 } 7221 7222 device = add_missing_dev(fs_devices, devid, dev_uuid); 7223 if (IS_ERR(device)) { 7224 btrfs_err(fs_info, 7225 "failed to add missing dev %llu: %ld", 7226 devid, PTR_ERR(device)); 7227 return PTR_ERR(device); 7228 } 7229 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7230 } else { 7231 if (!device->bdev) { 7232 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7233 btrfs_report_missing_device(fs_info, 7234 devid, dev_uuid, true); 7235 return -ENOENT; 7236 } 7237 btrfs_report_missing_device(fs_info, devid, 7238 dev_uuid, false); 7239 } 7240 7241 if (!device->bdev && 7242 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7243 /* 7244 * this happens when a device that was properly setup 7245 * in the device info lists suddenly goes bad. 7246 * device->bdev is NULL, and so we have to set 7247 * device->missing to one here 7248 */ 7249 device->fs_devices->missing_devices++; 7250 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7251 } 7252 7253 /* Move the device to its own fs_devices */ 7254 if (device->fs_devices != fs_devices) { 7255 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7256 &device->dev_state)); 7257 7258 list_move(&device->dev_list, &fs_devices->devices); 7259 device->fs_devices->num_devices--; 7260 fs_devices->num_devices++; 7261 7262 device->fs_devices->missing_devices--; 7263 fs_devices->missing_devices++; 7264 7265 device->fs_devices = fs_devices; 7266 } 7267 } 7268 7269 if (device->fs_devices != fs_info->fs_devices) { 7270 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7271 if (device->generation != 7272 btrfs_device_generation(leaf, dev_item)) 7273 return -EINVAL; 7274 } 7275 7276 fill_device_from_item(leaf, dev_item, device); 7277 if (device->bdev) { 7278 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7279 7280 if (device->total_bytes > max_total_bytes) { 7281 btrfs_err(fs_info, 7282 "device total_bytes should be at most %llu but found %llu", 7283 max_total_bytes, device->total_bytes); 7284 return -EINVAL; 7285 } 7286 } 7287 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7288 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7289 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7290 device->fs_devices->total_rw_bytes += device->total_bytes; 7291 atomic64_add(device->total_bytes - device->bytes_used, 7292 &fs_info->free_chunk_space); 7293 } 7294 ret = 0; 7295 return ret; 7296 } 7297 7298 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7299 { 7300 struct btrfs_super_block *super_copy = fs_info->super_copy; 7301 struct extent_buffer *sb; 7302 struct btrfs_disk_key *disk_key; 7303 struct btrfs_chunk *chunk; 7304 u8 *array_ptr; 7305 unsigned long sb_array_offset; 7306 int ret = 0; 7307 u32 num_stripes; 7308 u32 array_size; 7309 u32 len = 0; 7310 u32 cur_offset; 7311 u64 type; 7312 struct btrfs_key key; 7313 7314 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7315 7316 /* 7317 * We allocated a dummy extent, just to use extent buffer accessors. 7318 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7319 * that's fine, we will not go beyond system chunk array anyway. 7320 */ 7321 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7322 if (!sb) 7323 return -ENOMEM; 7324 set_extent_buffer_uptodate(sb); 7325 7326 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7327 array_size = btrfs_super_sys_array_size(super_copy); 7328 7329 array_ptr = super_copy->sys_chunk_array; 7330 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7331 cur_offset = 0; 7332 7333 while (cur_offset < array_size) { 7334 disk_key = (struct btrfs_disk_key *)array_ptr; 7335 len = sizeof(*disk_key); 7336 if (cur_offset + len > array_size) 7337 goto out_short_read; 7338 7339 btrfs_disk_key_to_cpu(&key, disk_key); 7340 7341 array_ptr += len; 7342 sb_array_offset += len; 7343 cur_offset += len; 7344 7345 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7346 btrfs_err(fs_info, 7347 "unexpected item type %u in sys_array at offset %u", 7348 (u32)key.type, cur_offset); 7349 ret = -EIO; 7350 break; 7351 } 7352 7353 chunk = (struct btrfs_chunk *)sb_array_offset; 7354 /* 7355 * At least one btrfs_chunk with one stripe must be present, 7356 * exact stripe count check comes afterwards 7357 */ 7358 len = btrfs_chunk_item_size(1); 7359 if (cur_offset + len > array_size) 7360 goto out_short_read; 7361 7362 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7363 if (!num_stripes) { 7364 btrfs_err(fs_info, 7365 "invalid number of stripes %u in sys_array at offset %u", 7366 num_stripes, cur_offset); 7367 ret = -EIO; 7368 break; 7369 } 7370 7371 type = btrfs_chunk_type(sb, chunk); 7372 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7373 btrfs_err(fs_info, 7374 "invalid chunk type %llu in sys_array at offset %u", 7375 type, cur_offset); 7376 ret = -EIO; 7377 break; 7378 } 7379 7380 len = btrfs_chunk_item_size(num_stripes); 7381 if (cur_offset + len > array_size) 7382 goto out_short_read; 7383 7384 ret = read_one_chunk(&key, sb, chunk); 7385 if (ret) 7386 break; 7387 7388 array_ptr += len; 7389 sb_array_offset += len; 7390 cur_offset += len; 7391 } 7392 clear_extent_buffer_uptodate(sb); 7393 free_extent_buffer_stale(sb); 7394 return ret; 7395 7396 out_short_read: 7397 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7398 len, cur_offset); 7399 clear_extent_buffer_uptodate(sb); 7400 free_extent_buffer_stale(sb); 7401 return -EIO; 7402 } 7403 7404 /* 7405 * Check if all chunks in the fs are OK for read-write degraded mount 7406 * 7407 * If the @failing_dev is specified, it's accounted as missing. 7408 * 7409 * Return true if all chunks meet the minimal RW mount requirements. 7410 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7411 */ 7412 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7413 struct btrfs_device *failing_dev) 7414 { 7415 struct btrfs_chunk_map *map; 7416 u64 next_start; 7417 bool ret = true; 7418 7419 map = btrfs_find_chunk_map(fs_info, 0, U64_MAX); 7420 /* No chunk at all? Return false anyway */ 7421 if (!map) { 7422 ret = false; 7423 goto out; 7424 } 7425 while (map) { 7426 int missing = 0; 7427 int max_tolerated; 7428 int i; 7429 7430 max_tolerated = 7431 btrfs_get_num_tolerated_disk_barrier_failures( 7432 map->type); 7433 for (i = 0; i < map->num_stripes; i++) { 7434 struct btrfs_device *dev = map->stripes[i].dev; 7435 7436 if (!dev || !dev->bdev || 7437 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7438 dev->last_flush_error) 7439 missing++; 7440 else if (failing_dev && failing_dev == dev) 7441 missing++; 7442 } 7443 if (missing > max_tolerated) { 7444 if (!failing_dev) 7445 btrfs_warn(fs_info, 7446 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7447 map->start, missing, max_tolerated); 7448 btrfs_free_chunk_map(map); 7449 ret = false; 7450 goto out; 7451 } 7452 next_start = map->start + map->chunk_len; 7453 btrfs_free_chunk_map(map); 7454 7455 map = btrfs_find_chunk_map(fs_info, next_start, U64_MAX - next_start); 7456 } 7457 out: 7458 return ret; 7459 } 7460 7461 static void readahead_tree_node_children(struct extent_buffer *node) 7462 { 7463 int i; 7464 const int nr_items = btrfs_header_nritems(node); 7465 7466 for (i = 0; i < nr_items; i++) 7467 btrfs_readahead_node_child(node, i); 7468 } 7469 7470 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7471 { 7472 struct btrfs_root *root = fs_info->chunk_root; 7473 struct btrfs_path *path; 7474 struct extent_buffer *leaf; 7475 struct btrfs_key key; 7476 struct btrfs_key found_key; 7477 int ret; 7478 int slot; 7479 int iter_ret = 0; 7480 u64 total_dev = 0; 7481 u64 last_ra_node = 0; 7482 7483 path = btrfs_alloc_path(); 7484 if (!path) 7485 return -ENOMEM; 7486 7487 /* 7488 * uuid_mutex is needed only if we are mounting a sprout FS 7489 * otherwise we don't need it. 7490 */ 7491 mutex_lock(&uuid_mutex); 7492 7493 /* 7494 * It is possible for mount and umount to race in such a way that 7495 * we execute this code path, but open_fs_devices failed to clear 7496 * total_rw_bytes. We certainly want it cleared before reading the 7497 * device items, so clear it here. 7498 */ 7499 fs_info->fs_devices->total_rw_bytes = 0; 7500 7501 /* 7502 * Lockdep complains about possible circular locking dependency between 7503 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7504 * used for freeze procection of a fs (struct super_block.s_writers), 7505 * which we take when starting a transaction, and extent buffers of the 7506 * chunk tree if we call read_one_dev() while holding a lock on an 7507 * extent buffer of the chunk tree. Since we are mounting the filesystem 7508 * and at this point there can't be any concurrent task modifying the 7509 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7510 */ 7511 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7512 path->skip_locking = 1; 7513 7514 /* 7515 * Read all device items, and then all the chunk items. All 7516 * device items are found before any chunk item (their object id 7517 * is smaller than the lowest possible object id for a chunk 7518 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7519 */ 7520 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7521 key.offset = 0; 7522 key.type = 0; 7523 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7524 struct extent_buffer *node = path->nodes[1]; 7525 7526 leaf = path->nodes[0]; 7527 slot = path->slots[0]; 7528 7529 if (node) { 7530 if (last_ra_node != node->start) { 7531 readahead_tree_node_children(node); 7532 last_ra_node = node->start; 7533 } 7534 } 7535 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7536 struct btrfs_dev_item *dev_item; 7537 dev_item = btrfs_item_ptr(leaf, slot, 7538 struct btrfs_dev_item); 7539 ret = read_one_dev(leaf, dev_item); 7540 if (ret) 7541 goto error; 7542 total_dev++; 7543 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7544 struct btrfs_chunk *chunk; 7545 7546 /* 7547 * We are only called at mount time, so no need to take 7548 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7549 * we always lock first fs_info->chunk_mutex before 7550 * acquiring any locks on the chunk tree. This is a 7551 * requirement for chunk allocation, see the comment on 7552 * top of btrfs_chunk_alloc() for details. 7553 */ 7554 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7555 ret = read_one_chunk(&found_key, leaf, chunk); 7556 if (ret) 7557 goto error; 7558 } 7559 } 7560 /* Catch error found during iteration */ 7561 if (iter_ret < 0) { 7562 ret = iter_ret; 7563 goto error; 7564 } 7565 7566 /* 7567 * After loading chunk tree, we've got all device information, 7568 * do another round of validation checks. 7569 */ 7570 if (total_dev != fs_info->fs_devices->total_devices) { 7571 btrfs_warn(fs_info, 7572 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7573 btrfs_super_num_devices(fs_info->super_copy), 7574 total_dev); 7575 fs_info->fs_devices->total_devices = total_dev; 7576 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7577 } 7578 if (btrfs_super_total_bytes(fs_info->super_copy) < 7579 fs_info->fs_devices->total_rw_bytes) { 7580 btrfs_err(fs_info, 7581 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7582 btrfs_super_total_bytes(fs_info->super_copy), 7583 fs_info->fs_devices->total_rw_bytes); 7584 ret = -EINVAL; 7585 goto error; 7586 } 7587 ret = 0; 7588 error: 7589 mutex_unlock(&uuid_mutex); 7590 7591 btrfs_free_path(path); 7592 return ret; 7593 } 7594 7595 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7596 { 7597 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7598 struct btrfs_device *device; 7599 int ret = 0; 7600 7601 fs_devices->fs_info = fs_info; 7602 7603 mutex_lock(&fs_devices->device_list_mutex); 7604 list_for_each_entry(device, &fs_devices->devices, dev_list) 7605 device->fs_info = fs_info; 7606 7607 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7608 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7609 device->fs_info = fs_info; 7610 ret = btrfs_get_dev_zone_info(device, false); 7611 if (ret) 7612 break; 7613 } 7614 7615 seed_devs->fs_info = fs_info; 7616 } 7617 mutex_unlock(&fs_devices->device_list_mutex); 7618 7619 return ret; 7620 } 7621 7622 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7623 const struct btrfs_dev_stats_item *ptr, 7624 int index) 7625 { 7626 u64 val; 7627 7628 read_extent_buffer(eb, &val, 7629 offsetof(struct btrfs_dev_stats_item, values) + 7630 ((unsigned long)ptr) + (index * sizeof(u64)), 7631 sizeof(val)); 7632 return val; 7633 } 7634 7635 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7636 struct btrfs_dev_stats_item *ptr, 7637 int index, u64 val) 7638 { 7639 write_extent_buffer(eb, &val, 7640 offsetof(struct btrfs_dev_stats_item, values) + 7641 ((unsigned long)ptr) + (index * sizeof(u64)), 7642 sizeof(val)); 7643 } 7644 7645 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7646 struct btrfs_path *path) 7647 { 7648 struct btrfs_dev_stats_item *ptr; 7649 struct extent_buffer *eb; 7650 struct btrfs_key key; 7651 int item_size; 7652 int i, ret, slot; 7653 7654 if (!device->fs_info->dev_root) 7655 return 0; 7656 7657 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7658 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7659 key.offset = device->devid; 7660 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7661 if (ret) { 7662 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7663 btrfs_dev_stat_set(device, i, 0); 7664 device->dev_stats_valid = 1; 7665 btrfs_release_path(path); 7666 return ret < 0 ? ret : 0; 7667 } 7668 slot = path->slots[0]; 7669 eb = path->nodes[0]; 7670 item_size = btrfs_item_size(eb, slot); 7671 7672 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7673 7674 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7675 if (item_size >= (1 + i) * sizeof(__le64)) 7676 btrfs_dev_stat_set(device, i, 7677 btrfs_dev_stats_value(eb, ptr, i)); 7678 else 7679 btrfs_dev_stat_set(device, i, 0); 7680 } 7681 7682 device->dev_stats_valid = 1; 7683 btrfs_dev_stat_print_on_load(device); 7684 btrfs_release_path(path); 7685 7686 return 0; 7687 } 7688 7689 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7690 { 7691 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7692 struct btrfs_device *device; 7693 struct btrfs_path *path = NULL; 7694 int ret = 0; 7695 7696 path = btrfs_alloc_path(); 7697 if (!path) 7698 return -ENOMEM; 7699 7700 mutex_lock(&fs_devices->device_list_mutex); 7701 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7702 ret = btrfs_device_init_dev_stats(device, path); 7703 if (ret) 7704 goto out; 7705 } 7706 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7707 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7708 ret = btrfs_device_init_dev_stats(device, path); 7709 if (ret) 7710 goto out; 7711 } 7712 } 7713 out: 7714 mutex_unlock(&fs_devices->device_list_mutex); 7715 7716 btrfs_free_path(path); 7717 return ret; 7718 } 7719 7720 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7721 struct btrfs_device *device) 7722 { 7723 struct btrfs_fs_info *fs_info = trans->fs_info; 7724 struct btrfs_root *dev_root = fs_info->dev_root; 7725 struct btrfs_path *path; 7726 struct btrfs_key key; 7727 struct extent_buffer *eb; 7728 struct btrfs_dev_stats_item *ptr; 7729 int ret; 7730 int i; 7731 7732 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7733 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7734 key.offset = device->devid; 7735 7736 path = btrfs_alloc_path(); 7737 if (!path) 7738 return -ENOMEM; 7739 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7740 if (ret < 0) { 7741 btrfs_warn_in_rcu(fs_info, 7742 "error %d while searching for dev_stats item for device %s", 7743 ret, btrfs_dev_name(device)); 7744 goto out; 7745 } 7746 7747 if (ret == 0 && 7748 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7749 /* need to delete old one and insert a new one */ 7750 ret = btrfs_del_item(trans, dev_root, path); 7751 if (ret != 0) { 7752 btrfs_warn_in_rcu(fs_info, 7753 "delete too small dev_stats item for device %s failed %d", 7754 btrfs_dev_name(device), ret); 7755 goto out; 7756 } 7757 ret = 1; 7758 } 7759 7760 if (ret == 1) { 7761 /* need to insert a new item */ 7762 btrfs_release_path(path); 7763 ret = btrfs_insert_empty_item(trans, dev_root, path, 7764 &key, sizeof(*ptr)); 7765 if (ret < 0) { 7766 btrfs_warn_in_rcu(fs_info, 7767 "insert dev_stats item for device %s failed %d", 7768 btrfs_dev_name(device), ret); 7769 goto out; 7770 } 7771 } 7772 7773 eb = path->nodes[0]; 7774 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7775 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7776 btrfs_set_dev_stats_value(eb, ptr, i, 7777 btrfs_dev_stat_read(device, i)); 7778 btrfs_mark_buffer_dirty(trans, eb); 7779 7780 out: 7781 btrfs_free_path(path); 7782 return ret; 7783 } 7784 7785 /* 7786 * called from commit_transaction. Writes all changed device stats to disk. 7787 */ 7788 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7789 { 7790 struct btrfs_fs_info *fs_info = trans->fs_info; 7791 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7792 struct btrfs_device *device; 7793 int stats_cnt; 7794 int ret = 0; 7795 7796 mutex_lock(&fs_devices->device_list_mutex); 7797 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7798 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7799 if (!device->dev_stats_valid || stats_cnt == 0) 7800 continue; 7801 7802 7803 /* 7804 * There is a LOAD-LOAD control dependency between the value of 7805 * dev_stats_ccnt and updating the on-disk values which requires 7806 * reading the in-memory counters. Such control dependencies 7807 * require explicit read memory barriers. 7808 * 7809 * This memory barriers pairs with smp_mb__before_atomic in 7810 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7811 * barrier implied by atomic_xchg in 7812 * btrfs_dev_stats_read_and_reset 7813 */ 7814 smp_rmb(); 7815 7816 ret = update_dev_stat_item(trans, device); 7817 if (!ret) 7818 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7819 } 7820 mutex_unlock(&fs_devices->device_list_mutex); 7821 7822 return ret; 7823 } 7824 7825 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7826 { 7827 btrfs_dev_stat_inc(dev, index); 7828 7829 if (!dev->dev_stats_valid) 7830 return; 7831 btrfs_err_rl_in_rcu(dev->fs_info, 7832 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7833 btrfs_dev_name(dev), 7834 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7835 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7836 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7837 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7838 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7839 } 7840 7841 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7842 { 7843 int i; 7844 7845 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7846 if (btrfs_dev_stat_read(dev, i) != 0) 7847 break; 7848 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7849 return; /* all values == 0, suppress message */ 7850 7851 btrfs_info_in_rcu(dev->fs_info, 7852 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7853 btrfs_dev_name(dev), 7854 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7855 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7856 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7857 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7858 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7859 } 7860 7861 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7862 struct btrfs_ioctl_get_dev_stats *stats) 7863 { 7864 BTRFS_DEV_LOOKUP_ARGS(args); 7865 struct btrfs_device *dev; 7866 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7867 int i; 7868 7869 mutex_lock(&fs_devices->device_list_mutex); 7870 args.devid = stats->devid; 7871 dev = btrfs_find_device(fs_info->fs_devices, &args); 7872 mutex_unlock(&fs_devices->device_list_mutex); 7873 7874 if (!dev) { 7875 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7876 return -ENODEV; 7877 } else if (!dev->dev_stats_valid) { 7878 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7879 return -ENODEV; 7880 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7881 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7882 if (stats->nr_items > i) 7883 stats->values[i] = 7884 btrfs_dev_stat_read_and_reset(dev, i); 7885 else 7886 btrfs_dev_stat_set(dev, i, 0); 7887 } 7888 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7889 current->comm, task_pid_nr(current)); 7890 } else { 7891 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7892 if (stats->nr_items > i) 7893 stats->values[i] = btrfs_dev_stat_read(dev, i); 7894 } 7895 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7896 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7897 return 0; 7898 } 7899 7900 /* 7901 * Update the size and bytes used for each device where it changed. This is 7902 * delayed since we would otherwise get errors while writing out the 7903 * superblocks. 7904 * 7905 * Must be invoked during transaction commit. 7906 */ 7907 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7908 { 7909 struct btrfs_device *curr, *next; 7910 7911 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7912 7913 if (list_empty(&trans->dev_update_list)) 7914 return; 7915 7916 /* 7917 * We don't need the device_list_mutex here. This list is owned by the 7918 * transaction and the transaction must complete before the device is 7919 * released. 7920 */ 7921 mutex_lock(&trans->fs_info->chunk_mutex); 7922 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7923 post_commit_list) { 7924 list_del_init(&curr->post_commit_list); 7925 curr->commit_total_bytes = curr->disk_total_bytes; 7926 curr->commit_bytes_used = curr->bytes_used; 7927 } 7928 mutex_unlock(&trans->fs_info->chunk_mutex); 7929 } 7930 7931 /* 7932 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7933 */ 7934 int btrfs_bg_type_to_factor(u64 flags) 7935 { 7936 const int index = btrfs_bg_flags_to_raid_index(flags); 7937 7938 return btrfs_raid_array[index].ncopies; 7939 } 7940 7941 7942 7943 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7944 u64 chunk_offset, u64 devid, 7945 u64 physical_offset, u64 physical_len) 7946 { 7947 struct btrfs_dev_lookup_args args = { .devid = devid }; 7948 struct btrfs_chunk_map *map; 7949 struct btrfs_device *dev; 7950 u64 stripe_len; 7951 bool found = false; 7952 int ret = 0; 7953 int i; 7954 7955 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1); 7956 if (!map) { 7957 btrfs_err(fs_info, 7958 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7959 physical_offset, devid); 7960 ret = -EUCLEAN; 7961 goto out; 7962 } 7963 7964 stripe_len = btrfs_calc_stripe_length(map); 7965 if (physical_len != stripe_len) { 7966 btrfs_err(fs_info, 7967 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7968 physical_offset, devid, map->start, physical_len, 7969 stripe_len); 7970 ret = -EUCLEAN; 7971 goto out; 7972 } 7973 7974 /* 7975 * Very old mkfs.btrfs (before v4.1) will not respect the reserved 7976 * space. Although kernel can handle it without problem, better to warn 7977 * the users. 7978 */ 7979 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED) 7980 btrfs_warn(fs_info, 7981 "devid %llu physical %llu len %llu inside the reserved space", 7982 devid, physical_offset, physical_len); 7983 7984 for (i = 0; i < map->num_stripes; i++) { 7985 if (map->stripes[i].dev->devid == devid && 7986 map->stripes[i].physical == physical_offset) { 7987 found = true; 7988 if (map->verified_stripes >= map->num_stripes) { 7989 btrfs_err(fs_info, 7990 "too many dev extents for chunk %llu found", 7991 map->start); 7992 ret = -EUCLEAN; 7993 goto out; 7994 } 7995 map->verified_stripes++; 7996 break; 7997 } 7998 } 7999 if (!found) { 8000 btrfs_err(fs_info, 8001 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8002 physical_offset, devid); 8003 ret = -EUCLEAN; 8004 } 8005 8006 /* Make sure no dev extent is beyond device boundary */ 8007 dev = btrfs_find_device(fs_info->fs_devices, &args); 8008 if (!dev) { 8009 btrfs_err(fs_info, "failed to find devid %llu", devid); 8010 ret = -EUCLEAN; 8011 goto out; 8012 } 8013 8014 if (physical_offset + physical_len > dev->disk_total_bytes) { 8015 btrfs_err(fs_info, 8016 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8017 devid, physical_offset, physical_len, 8018 dev->disk_total_bytes); 8019 ret = -EUCLEAN; 8020 goto out; 8021 } 8022 8023 if (dev->zone_info) { 8024 u64 zone_size = dev->zone_info->zone_size; 8025 8026 if (!IS_ALIGNED(physical_offset, zone_size) || 8027 !IS_ALIGNED(physical_len, zone_size)) { 8028 btrfs_err(fs_info, 8029 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8030 devid, physical_offset, physical_len); 8031 ret = -EUCLEAN; 8032 goto out; 8033 } 8034 } 8035 8036 out: 8037 btrfs_free_chunk_map(map); 8038 return ret; 8039 } 8040 8041 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8042 { 8043 struct rb_node *node; 8044 int ret = 0; 8045 8046 read_lock(&fs_info->mapping_tree_lock); 8047 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { 8048 struct btrfs_chunk_map *map; 8049 8050 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 8051 if (map->num_stripes != map->verified_stripes) { 8052 btrfs_err(fs_info, 8053 "chunk %llu has missing dev extent, have %d expect %d", 8054 map->start, map->verified_stripes, map->num_stripes); 8055 ret = -EUCLEAN; 8056 goto out; 8057 } 8058 } 8059 out: 8060 read_unlock(&fs_info->mapping_tree_lock); 8061 return ret; 8062 } 8063 8064 /* 8065 * Ensure that all dev extents are mapped to correct chunk, otherwise 8066 * later chunk allocation/free would cause unexpected behavior. 8067 * 8068 * NOTE: This will iterate through the whole device tree, which should be of 8069 * the same size level as the chunk tree. This slightly increases mount time. 8070 */ 8071 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8072 { 8073 struct btrfs_path *path; 8074 struct btrfs_root *root = fs_info->dev_root; 8075 struct btrfs_key key; 8076 u64 prev_devid = 0; 8077 u64 prev_dev_ext_end = 0; 8078 int ret = 0; 8079 8080 /* 8081 * We don't have a dev_root because we mounted with ignorebadroots and 8082 * failed to load the root, so we want to skip the verification in this 8083 * case for sure. 8084 * 8085 * However if the dev root is fine, but the tree itself is corrupted 8086 * we'd still fail to mount. This verification is only to make sure 8087 * writes can happen safely, so instead just bypass this check 8088 * completely in the case of IGNOREBADROOTS. 8089 */ 8090 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8091 return 0; 8092 8093 key.objectid = 1; 8094 key.type = BTRFS_DEV_EXTENT_KEY; 8095 key.offset = 0; 8096 8097 path = btrfs_alloc_path(); 8098 if (!path) 8099 return -ENOMEM; 8100 8101 path->reada = READA_FORWARD; 8102 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8103 if (ret < 0) 8104 goto out; 8105 8106 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8107 ret = btrfs_next_leaf(root, path); 8108 if (ret < 0) 8109 goto out; 8110 /* No dev extents at all? Not good */ 8111 if (ret > 0) { 8112 ret = -EUCLEAN; 8113 goto out; 8114 } 8115 } 8116 while (1) { 8117 struct extent_buffer *leaf = path->nodes[0]; 8118 struct btrfs_dev_extent *dext; 8119 int slot = path->slots[0]; 8120 u64 chunk_offset; 8121 u64 physical_offset; 8122 u64 physical_len; 8123 u64 devid; 8124 8125 btrfs_item_key_to_cpu(leaf, &key, slot); 8126 if (key.type != BTRFS_DEV_EXTENT_KEY) 8127 break; 8128 devid = key.objectid; 8129 physical_offset = key.offset; 8130 8131 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8132 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8133 physical_len = btrfs_dev_extent_length(leaf, dext); 8134 8135 /* Check if this dev extent overlaps with the previous one */ 8136 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8137 btrfs_err(fs_info, 8138 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8139 devid, physical_offset, prev_dev_ext_end); 8140 ret = -EUCLEAN; 8141 goto out; 8142 } 8143 8144 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8145 physical_offset, physical_len); 8146 if (ret < 0) 8147 goto out; 8148 prev_devid = devid; 8149 prev_dev_ext_end = physical_offset + physical_len; 8150 8151 ret = btrfs_next_item(root, path); 8152 if (ret < 0) 8153 goto out; 8154 if (ret > 0) { 8155 ret = 0; 8156 break; 8157 } 8158 } 8159 8160 /* Ensure all chunks have corresponding dev extents */ 8161 ret = verify_chunk_dev_extent_mapping(fs_info); 8162 out: 8163 btrfs_free_path(path); 8164 return ret; 8165 } 8166 8167 /* 8168 * Check whether the given block group or device is pinned by any inode being 8169 * used as a swapfile. 8170 */ 8171 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8172 { 8173 struct btrfs_swapfile_pin *sp; 8174 struct rb_node *node; 8175 8176 spin_lock(&fs_info->swapfile_pins_lock); 8177 node = fs_info->swapfile_pins.rb_node; 8178 while (node) { 8179 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8180 if (ptr < sp->ptr) 8181 node = node->rb_left; 8182 else if (ptr > sp->ptr) 8183 node = node->rb_right; 8184 else 8185 break; 8186 } 8187 spin_unlock(&fs_info->swapfile_pins_lock); 8188 return node != NULL; 8189 } 8190 8191 static int relocating_repair_kthread(void *data) 8192 { 8193 struct btrfs_block_group *cache = data; 8194 struct btrfs_fs_info *fs_info = cache->fs_info; 8195 u64 target; 8196 int ret = 0; 8197 8198 target = cache->start; 8199 btrfs_put_block_group(cache); 8200 8201 sb_start_write(fs_info->sb); 8202 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8203 btrfs_info(fs_info, 8204 "zoned: skip relocating block group %llu to repair: EBUSY", 8205 target); 8206 sb_end_write(fs_info->sb); 8207 return -EBUSY; 8208 } 8209 8210 mutex_lock(&fs_info->reclaim_bgs_lock); 8211 8212 /* Ensure block group still exists */ 8213 cache = btrfs_lookup_block_group(fs_info, target); 8214 if (!cache) 8215 goto out; 8216 8217 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) 8218 goto out; 8219 8220 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8221 if (ret < 0) 8222 goto out; 8223 8224 btrfs_info(fs_info, 8225 "zoned: relocating block group %llu to repair IO failure", 8226 target); 8227 ret = btrfs_relocate_chunk(fs_info, target); 8228 8229 out: 8230 if (cache) 8231 btrfs_put_block_group(cache); 8232 mutex_unlock(&fs_info->reclaim_bgs_lock); 8233 btrfs_exclop_finish(fs_info); 8234 sb_end_write(fs_info->sb); 8235 8236 return ret; 8237 } 8238 8239 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8240 { 8241 struct btrfs_block_group *cache; 8242 8243 if (!btrfs_is_zoned(fs_info)) 8244 return false; 8245 8246 /* Do not attempt to repair in degraded state */ 8247 if (btrfs_test_opt(fs_info, DEGRADED)) 8248 return true; 8249 8250 cache = btrfs_lookup_block_group(fs_info, logical); 8251 if (!cache) 8252 return true; 8253 8254 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) { 8255 btrfs_put_block_group(cache); 8256 return true; 8257 } 8258 8259 kthread_run(relocating_repair_kthread, cache, 8260 "btrfs-relocating-repair"); 8261 8262 return true; 8263 } 8264 8265 static void map_raid56_repair_block(struct btrfs_io_context *bioc, 8266 struct btrfs_io_stripe *smap, 8267 u64 logical) 8268 { 8269 int data_stripes = nr_bioc_data_stripes(bioc); 8270 int i; 8271 8272 for (i = 0; i < data_stripes; i++) { 8273 u64 stripe_start = bioc->full_stripe_logical + 8274 btrfs_stripe_nr_to_offset(i); 8275 8276 if (logical >= stripe_start && 8277 logical < stripe_start + BTRFS_STRIPE_LEN) 8278 break; 8279 } 8280 ASSERT(i < data_stripes); 8281 smap->dev = bioc->stripes[i].dev; 8282 smap->physical = bioc->stripes[i].physical + 8283 ((logical - bioc->full_stripe_logical) & 8284 BTRFS_STRIPE_LEN_MASK); 8285 } 8286 8287 /* 8288 * Map a repair write into a single device. 8289 * 8290 * A repair write is triggered by read time repair or scrub, which would only 8291 * update the contents of a single device. 8292 * Not update any other mirrors nor go through RMW path. 8293 * 8294 * Callers should ensure: 8295 * 8296 * - Call btrfs_bio_counter_inc_blocked() first 8297 * - The range does not cross stripe boundary 8298 * - Has a valid @mirror_num passed in. 8299 */ 8300 int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, 8301 struct btrfs_io_stripe *smap, u64 logical, 8302 u32 length, int mirror_num) 8303 { 8304 struct btrfs_io_context *bioc = NULL; 8305 u64 map_length = length; 8306 int mirror_ret = mirror_num; 8307 int ret; 8308 8309 ASSERT(mirror_num > 0); 8310 8311 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length, 8312 &bioc, smap, &mirror_ret); 8313 if (ret < 0) 8314 return ret; 8315 8316 /* The map range should not cross stripe boundary. */ 8317 ASSERT(map_length >= length); 8318 8319 /* Already mapped to single stripe. */ 8320 if (!bioc) 8321 goto out; 8322 8323 /* Map the RAID56 multi-stripe writes to a single one. */ 8324 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 8325 map_raid56_repair_block(bioc, smap, logical); 8326 goto out; 8327 } 8328 8329 ASSERT(mirror_num <= bioc->num_stripes); 8330 smap->dev = bioc->stripes[mirror_num - 1].dev; 8331 smap->physical = bioc->stripes[mirror_num - 1].physical; 8332 out: 8333 btrfs_put_bioc(bioc); 8334 ASSERT(smap->dev); 8335 return 0; 8336 } 8337