1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/slab.h> 9 #include <linux/ratelimit.h> 10 #include <linux/kthread.h> 11 #include <linux/semaphore.h> 12 #include <linux/uuid.h> 13 #include <linux/list_sort.h> 14 #include <linux/namei.h> 15 #include "misc.h" 16 #include "ctree.h" 17 #include "disk-io.h" 18 #include "transaction.h" 19 #include "volumes.h" 20 #include "raid56.h" 21 #include "rcu-string.h" 22 #include "dev-replace.h" 23 #include "sysfs.h" 24 #include "tree-checker.h" 25 #include "space-info.h" 26 #include "block-group.h" 27 #include "discard.h" 28 #include "zoned.h" 29 #include "fs.h" 30 #include "accessors.h" 31 #include "uuid-tree.h" 32 #include "ioctl.h" 33 #include "relocation.h" 34 #include "scrub.h" 35 #include "super.h" 36 #include "raid-stripe-tree.h" 37 38 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 39 BTRFS_BLOCK_GROUP_RAID10 | \ 40 BTRFS_BLOCK_GROUP_RAID56_MASK) 41 42 struct btrfs_io_geometry { 43 u32 stripe_index; 44 u32 stripe_nr; 45 int mirror_num; 46 int num_stripes; 47 u64 stripe_offset; 48 u64 raid56_full_stripe_start; 49 int max_errors; 50 enum btrfs_map_op op; 51 }; 52 53 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 54 [BTRFS_RAID_RAID10] = { 55 .sub_stripes = 2, 56 .dev_stripes = 1, 57 .devs_max = 0, /* 0 == as many as possible */ 58 .devs_min = 2, 59 .tolerated_failures = 1, 60 .devs_increment = 2, 61 .ncopies = 2, 62 .nparity = 0, 63 .raid_name = "raid10", 64 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 65 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 66 }, 67 [BTRFS_RAID_RAID1] = { 68 .sub_stripes = 1, 69 .dev_stripes = 1, 70 .devs_max = 2, 71 .devs_min = 2, 72 .tolerated_failures = 1, 73 .devs_increment = 2, 74 .ncopies = 2, 75 .nparity = 0, 76 .raid_name = "raid1", 77 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 78 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 79 }, 80 [BTRFS_RAID_RAID1C3] = { 81 .sub_stripes = 1, 82 .dev_stripes = 1, 83 .devs_max = 3, 84 .devs_min = 3, 85 .tolerated_failures = 2, 86 .devs_increment = 3, 87 .ncopies = 3, 88 .nparity = 0, 89 .raid_name = "raid1c3", 90 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 91 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 92 }, 93 [BTRFS_RAID_RAID1C4] = { 94 .sub_stripes = 1, 95 .dev_stripes = 1, 96 .devs_max = 4, 97 .devs_min = 4, 98 .tolerated_failures = 3, 99 .devs_increment = 4, 100 .ncopies = 4, 101 .nparity = 0, 102 .raid_name = "raid1c4", 103 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 104 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 105 }, 106 [BTRFS_RAID_DUP] = { 107 .sub_stripes = 1, 108 .dev_stripes = 2, 109 .devs_max = 1, 110 .devs_min = 1, 111 .tolerated_failures = 0, 112 .devs_increment = 1, 113 .ncopies = 2, 114 .nparity = 0, 115 .raid_name = "dup", 116 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 117 .mindev_error = 0, 118 }, 119 [BTRFS_RAID_RAID0] = { 120 .sub_stripes = 1, 121 .dev_stripes = 1, 122 .devs_max = 0, 123 .devs_min = 1, 124 .tolerated_failures = 0, 125 .devs_increment = 1, 126 .ncopies = 1, 127 .nparity = 0, 128 .raid_name = "raid0", 129 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 130 .mindev_error = 0, 131 }, 132 [BTRFS_RAID_SINGLE] = { 133 .sub_stripes = 1, 134 .dev_stripes = 1, 135 .devs_max = 1, 136 .devs_min = 1, 137 .tolerated_failures = 0, 138 .devs_increment = 1, 139 .ncopies = 1, 140 .nparity = 0, 141 .raid_name = "single", 142 .bg_flag = 0, 143 .mindev_error = 0, 144 }, 145 [BTRFS_RAID_RAID5] = { 146 .sub_stripes = 1, 147 .dev_stripes = 1, 148 .devs_max = 0, 149 .devs_min = 2, 150 .tolerated_failures = 1, 151 .devs_increment = 1, 152 .ncopies = 1, 153 .nparity = 1, 154 .raid_name = "raid5", 155 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 156 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 157 }, 158 [BTRFS_RAID_RAID6] = { 159 .sub_stripes = 1, 160 .dev_stripes = 1, 161 .devs_max = 0, 162 .devs_min = 3, 163 .tolerated_failures = 2, 164 .devs_increment = 1, 165 .ncopies = 1, 166 .nparity = 2, 167 .raid_name = "raid6", 168 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 169 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 170 }, 171 }; 172 173 /* 174 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 175 * can be used as index to access btrfs_raid_array[]. 176 */ 177 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 178 { 179 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 180 181 if (!profile) 182 return BTRFS_RAID_SINGLE; 183 184 return BTRFS_BG_FLAG_TO_INDEX(profile); 185 } 186 187 const char *btrfs_bg_type_to_raid_name(u64 flags) 188 { 189 const int index = btrfs_bg_flags_to_raid_index(flags); 190 191 if (index >= BTRFS_NR_RAID_TYPES) 192 return NULL; 193 194 return btrfs_raid_array[index].raid_name; 195 } 196 197 int btrfs_nr_parity_stripes(u64 type) 198 { 199 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 200 201 return btrfs_raid_array[index].nparity; 202 } 203 204 /* 205 * Fill @buf with textual description of @bg_flags, no more than @size_buf 206 * bytes including terminating null byte. 207 */ 208 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 209 { 210 int i; 211 int ret; 212 char *bp = buf; 213 u64 flags = bg_flags; 214 u32 size_bp = size_buf; 215 216 if (!flags) { 217 strcpy(bp, "NONE"); 218 return; 219 } 220 221 #define DESCRIBE_FLAG(flag, desc) \ 222 do { \ 223 if (flags & (flag)) { \ 224 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 225 if (ret < 0 || ret >= size_bp) \ 226 goto out_overflow; \ 227 size_bp -= ret; \ 228 bp += ret; \ 229 flags &= ~(flag); \ 230 } \ 231 } while (0) 232 233 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 234 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 235 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 236 237 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 238 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 239 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 240 btrfs_raid_array[i].raid_name); 241 #undef DESCRIBE_FLAG 242 243 if (flags) { 244 ret = snprintf(bp, size_bp, "0x%llx|", flags); 245 size_bp -= ret; 246 } 247 248 if (size_bp < size_buf) 249 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 250 251 /* 252 * The text is trimmed, it's up to the caller to provide sufficiently 253 * large buffer 254 */ 255 out_overflow:; 256 } 257 258 static int init_first_rw_device(struct btrfs_trans_handle *trans); 259 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 260 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 261 262 /* 263 * Device locking 264 * ============== 265 * 266 * There are several mutexes that protect manipulation of devices and low-level 267 * structures like chunks but not block groups, extents or files 268 * 269 * uuid_mutex (global lock) 270 * ------------------------ 271 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 272 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 273 * device) or requested by the device= mount option 274 * 275 * the mutex can be very coarse and can cover long-running operations 276 * 277 * protects: updates to fs_devices counters like missing devices, rw devices, 278 * seeding, structure cloning, opening/closing devices at mount/umount time 279 * 280 * global::fs_devs - add, remove, updates to the global list 281 * 282 * does not protect: manipulation of the fs_devices::devices list in general 283 * but in mount context it could be used to exclude list modifications by eg. 284 * scan ioctl 285 * 286 * btrfs_device::name - renames (write side), read is RCU 287 * 288 * fs_devices::device_list_mutex (per-fs, with RCU) 289 * ------------------------------------------------ 290 * protects updates to fs_devices::devices, ie. adding and deleting 291 * 292 * simple list traversal with read-only actions can be done with RCU protection 293 * 294 * may be used to exclude some operations from running concurrently without any 295 * modifications to the list (see write_all_supers) 296 * 297 * Is not required at mount and close times, because our device list is 298 * protected by the uuid_mutex at that point. 299 * 300 * balance_mutex 301 * ------------- 302 * protects balance structures (status, state) and context accessed from 303 * several places (internally, ioctl) 304 * 305 * chunk_mutex 306 * ----------- 307 * protects chunks, adding or removing during allocation, trim or when a new 308 * device is added/removed. Additionally it also protects post_commit_list of 309 * individual devices, since they can be added to the transaction's 310 * post_commit_list only with chunk_mutex held. 311 * 312 * cleaner_mutex 313 * ------------- 314 * a big lock that is held by the cleaner thread and prevents running subvolume 315 * cleaning together with relocation or delayed iputs 316 * 317 * 318 * Lock nesting 319 * ============ 320 * 321 * uuid_mutex 322 * device_list_mutex 323 * chunk_mutex 324 * balance_mutex 325 * 326 * 327 * Exclusive operations 328 * ==================== 329 * 330 * Maintains the exclusivity of the following operations that apply to the 331 * whole filesystem and cannot run in parallel. 332 * 333 * - Balance (*) 334 * - Device add 335 * - Device remove 336 * - Device replace (*) 337 * - Resize 338 * 339 * The device operations (as above) can be in one of the following states: 340 * 341 * - Running state 342 * - Paused state 343 * - Completed state 344 * 345 * Only device operations marked with (*) can go into the Paused state for the 346 * following reasons: 347 * 348 * - ioctl (only Balance can be Paused through ioctl) 349 * - filesystem remounted as read-only 350 * - filesystem unmounted and mounted as read-only 351 * - system power-cycle and filesystem mounted as read-only 352 * - filesystem or device errors leading to forced read-only 353 * 354 * The status of exclusive operation is set and cleared atomically. 355 * During the course of Paused state, fs_info::exclusive_operation remains set. 356 * A device operation in Paused or Running state can be canceled or resumed 357 * either by ioctl (Balance only) or when remounted as read-write. 358 * The exclusive status is cleared when the device operation is canceled or 359 * completed. 360 */ 361 362 DEFINE_MUTEX(uuid_mutex); 363 static LIST_HEAD(fs_uuids); 364 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 365 { 366 return &fs_uuids; 367 } 368 369 /* 370 * Allocate new btrfs_fs_devices structure identified by a fsid. 371 * 372 * @fsid: if not NULL, copy the UUID to fs_devices::fsid and to 373 * fs_devices::metadata_fsid 374 * 375 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 376 * The returned struct is not linked onto any lists and can be destroyed with 377 * kfree() right away. 378 */ 379 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) 380 { 381 struct btrfs_fs_devices *fs_devs; 382 383 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 384 if (!fs_devs) 385 return ERR_PTR(-ENOMEM); 386 387 mutex_init(&fs_devs->device_list_mutex); 388 389 INIT_LIST_HEAD(&fs_devs->devices); 390 INIT_LIST_HEAD(&fs_devs->alloc_list); 391 INIT_LIST_HEAD(&fs_devs->fs_list); 392 INIT_LIST_HEAD(&fs_devs->seed_list); 393 394 if (fsid) { 395 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 396 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 397 } 398 399 return fs_devs; 400 } 401 402 static void btrfs_free_device(struct btrfs_device *device) 403 { 404 WARN_ON(!list_empty(&device->post_commit_list)); 405 rcu_string_free(device->name); 406 extent_io_tree_release(&device->alloc_state); 407 btrfs_destroy_dev_zone_info(device); 408 kfree(device); 409 } 410 411 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 412 { 413 struct btrfs_device *device; 414 415 WARN_ON(fs_devices->opened); 416 while (!list_empty(&fs_devices->devices)) { 417 device = list_entry(fs_devices->devices.next, 418 struct btrfs_device, dev_list); 419 list_del(&device->dev_list); 420 btrfs_free_device(device); 421 } 422 kfree(fs_devices); 423 } 424 425 void __exit btrfs_cleanup_fs_uuids(void) 426 { 427 struct btrfs_fs_devices *fs_devices; 428 429 while (!list_empty(&fs_uuids)) { 430 fs_devices = list_entry(fs_uuids.next, 431 struct btrfs_fs_devices, fs_list); 432 list_del(&fs_devices->fs_list); 433 free_fs_devices(fs_devices); 434 } 435 } 436 437 static bool match_fsid_fs_devices(const struct btrfs_fs_devices *fs_devices, 438 const u8 *fsid, const u8 *metadata_fsid) 439 { 440 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0) 441 return false; 442 443 if (!metadata_fsid) 444 return true; 445 446 if (memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0) 447 return false; 448 449 return true; 450 } 451 452 static noinline struct btrfs_fs_devices *find_fsid( 453 const u8 *fsid, const u8 *metadata_fsid) 454 { 455 struct btrfs_fs_devices *fs_devices; 456 457 ASSERT(fsid); 458 459 /* Handle non-split brain cases */ 460 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 461 if (match_fsid_fs_devices(fs_devices, fsid, metadata_fsid)) 462 return fs_devices; 463 } 464 return NULL; 465 } 466 467 static int 468 btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, 469 int flush, struct file **bdev_file, 470 struct btrfs_super_block **disk_super) 471 { 472 struct block_device *bdev; 473 int ret; 474 475 *bdev_file = bdev_file_open_by_path(device_path, flags, holder, NULL); 476 477 if (IS_ERR(*bdev_file)) { 478 ret = PTR_ERR(*bdev_file); 479 goto error; 480 } 481 bdev = file_bdev(*bdev_file); 482 483 if (flush) 484 sync_blockdev(bdev); 485 ret = set_blocksize(bdev, BTRFS_BDEV_BLOCKSIZE); 486 if (ret) { 487 fput(*bdev_file); 488 goto error; 489 } 490 invalidate_bdev(bdev); 491 *disk_super = btrfs_read_dev_super(bdev); 492 if (IS_ERR(*disk_super)) { 493 ret = PTR_ERR(*disk_super); 494 fput(*bdev_file); 495 goto error; 496 } 497 498 return 0; 499 500 error: 501 *bdev_file = NULL; 502 return ret; 503 } 504 505 /* 506 * Search and remove all stale devices (which are not mounted). When both 507 * inputs are NULL, it will search and release all stale devices. 508 * 509 * @devt: Optional. When provided will it release all unmounted devices 510 * matching this devt only. 511 * @skip_device: Optional. Will skip this device when searching for the stale 512 * devices. 513 * 514 * Return: 0 for success or if @devt is 0. 515 * -EBUSY if @devt is a mounted device. 516 * -ENOENT if @devt does not match any device in the list. 517 */ 518 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 519 { 520 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 521 struct btrfs_device *device, *tmp_device; 522 int ret; 523 bool freed = false; 524 525 lockdep_assert_held(&uuid_mutex); 526 527 /* Return good status if there is no instance of devt. */ 528 ret = 0; 529 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 530 531 mutex_lock(&fs_devices->device_list_mutex); 532 list_for_each_entry_safe(device, tmp_device, 533 &fs_devices->devices, dev_list) { 534 if (skip_device && skip_device == device) 535 continue; 536 if (devt && devt != device->devt) 537 continue; 538 if (fs_devices->opened) { 539 if (devt) 540 ret = -EBUSY; 541 break; 542 } 543 544 /* delete the stale device */ 545 fs_devices->num_devices--; 546 list_del(&device->dev_list); 547 btrfs_free_device(device); 548 549 freed = true; 550 } 551 mutex_unlock(&fs_devices->device_list_mutex); 552 553 if (fs_devices->num_devices == 0) { 554 btrfs_sysfs_remove_fsid(fs_devices); 555 list_del(&fs_devices->fs_list); 556 free_fs_devices(fs_devices); 557 } 558 } 559 560 /* If there is at least one freed device return 0. */ 561 if (freed) 562 return 0; 563 564 return ret; 565 } 566 567 static struct btrfs_fs_devices *find_fsid_by_device( 568 struct btrfs_super_block *disk_super, 569 dev_t devt, bool *same_fsid_diff_dev) 570 { 571 struct btrfs_fs_devices *fsid_fs_devices; 572 struct btrfs_fs_devices *devt_fs_devices; 573 const bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 574 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 575 bool found_by_devt = false; 576 577 /* Find the fs_device by the usual method, if found use it. */ 578 fsid_fs_devices = find_fsid(disk_super->fsid, 579 has_metadata_uuid ? disk_super->metadata_uuid : NULL); 580 581 /* The temp_fsid feature is supported only with single device filesystem. */ 582 if (btrfs_super_num_devices(disk_super) != 1) 583 return fsid_fs_devices; 584 585 /* 586 * A seed device is an integral component of the sprout device, which 587 * functions as a multi-device filesystem. So, temp-fsid feature is 588 * not supported. 589 */ 590 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) 591 return fsid_fs_devices; 592 593 /* Try to find a fs_devices by matching devt. */ 594 list_for_each_entry(devt_fs_devices, &fs_uuids, fs_list) { 595 struct btrfs_device *device; 596 597 list_for_each_entry(device, &devt_fs_devices->devices, dev_list) { 598 if (device->devt == devt) { 599 found_by_devt = true; 600 break; 601 } 602 } 603 if (found_by_devt) 604 break; 605 } 606 607 if (found_by_devt) { 608 /* Existing device. */ 609 if (fsid_fs_devices == NULL) { 610 if (devt_fs_devices->opened == 0) { 611 /* Stale device. */ 612 return NULL; 613 } else { 614 /* temp_fsid is mounting a subvol. */ 615 return devt_fs_devices; 616 } 617 } else { 618 /* Regular or temp_fsid device mounting a subvol. */ 619 return devt_fs_devices; 620 } 621 } else { 622 /* New device. */ 623 if (fsid_fs_devices == NULL) { 624 return NULL; 625 } else { 626 /* sb::fsid is already used create a new temp_fsid. */ 627 *same_fsid_diff_dev = true; 628 return NULL; 629 } 630 } 631 632 /* Not reached. */ 633 } 634 635 /* 636 * This is only used on mount, and we are protected from competing things 637 * messing with our fs_devices by the uuid_mutex, thus we do not need the 638 * fs_devices->device_list_mutex here. 639 */ 640 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 641 struct btrfs_device *device, blk_mode_t flags, 642 void *holder) 643 { 644 struct file *bdev_file; 645 struct btrfs_super_block *disk_super; 646 u64 devid; 647 int ret; 648 649 if (device->bdev) 650 return -EINVAL; 651 if (!device->name) 652 return -EINVAL; 653 654 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 655 &bdev_file, &disk_super); 656 if (ret) 657 return ret; 658 659 devid = btrfs_stack_device_id(&disk_super->dev_item); 660 if (devid != device->devid) 661 goto error_free_page; 662 663 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 664 goto error_free_page; 665 666 device->generation = btrfs_super_generation(disk_super); 667 668 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 669 if (btrfs_super_incompat_flags(disk_super) & 670 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 671 pr_err( 672 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 673 goto error_free_page; 674 } 675 676 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 677 fs_devices->seeding = true; 678 } else { 679 if (bdev_read_only(file_bdev(bdev_file))) 680 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 681 else 682 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 683 } 684 685 if (!bdev_nonrot(file_bdev(bdev_file))) 686 fs_devices->rotating = true; 687 688 if (bdev_max_discard_sectors(file_bdev(bdev_file))) 689 fs_devices->discardable = true; 690 691 device->bdev_file = bdev_file; 692 device->bdev = file_bdev(bdev_file); 693 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 694 695 if (device->devt != device->bdev->bd_dev) { 696 btrfs_warn(NULL, 697 "device %s maj:min changed from %d:%d to %d:%d", 698 device->name->str, MAJOR(device->devt), 699 MINOR(device->devt), MAJOR(device->bdev->bd_dev), 700 MINOR(device->bdev->bd_dev)); 701 702 device->devt = device->bdev->bd_dev; 703 } 704 705 fs_devices->open_devices++; 706 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 707 device->devid != BTRFS_DEV_REPLACE_DEVID) { 708 fs_devices->rw_devices++; 709 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 710 } 711 btrfs_release_disk_super(disk_super); 712 713 return 0; 714 715 error_free_page: 716 btrfs_release_disk_super(disk_super); 717 fput(bdev_file); 718 719 return -EINVAL; 720 } 721 722 u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb) 723 { 724 bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) & 725 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 726 727 return has_metadata_uuid ? sb->metadata_uuid : sb->fsid; 728 } 729 730 /* 731 * Add new device to list of registered devices 732 * 733 * Returns: 734 * device pointer which was just added or updated when successful 735 * error pointer when failed 736 */ 737 static noinline struct btrfs_device *device_list_add(const char *path, 738 struct btrfs_super_block *disk_super, 739 bool *new_device_added) 740 { 741 struct btrfs_device *device; 742 struct btrfs_fs_devices *fs_devices = NULL; 743 struct rcu_string *name; 744 u64 found_transid = btrfs_super_generation(disk_super); 745 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 746 dev_t path_devt; 747 int error; 748 bool same_fsid_diff_dev = false; 749 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 750 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 751 752 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) { 753 btrfs_err(NULL, 754 "device %s has incomplete metadata_uuid change, please use btrfstune to complete", 755 path); 756 return ERR_PTR(-EAGAIN); 757 } 758 759 error = lookup_bdev(path, &path_devt); 760 if (error) { 761 btrfs_err(NULL, "failed to lookup block device for path %s: %d", 762 path, error); 763 return ERR_PTR(error); 764 } 765 766 fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev); 767 768 if (!fs_devices) { 769 fs_devices = alloc_fs_devices(disk_super->fsid); 770 if (IS_ERR(fs_devices)) 771 return ERR_CAST(fs_devices); 772 773 if (has_metadata_uuid) 774 memcpy(fs_devices->metadata_uuid, 775 disk_super->metadata_uuid, BTRFS_FSID_SIZE); 776 777 if (same_fsid_diff_dev) { 778 generate_random_uuid(fs_devices->fsid); 779 fs_devices->temp_fsid = true; 780 pr_info("BTRFS: device %s (%d:%d) using temp-fsid %pU\n", 781 path, MAJOR(path_devt), MINOR(path_devt), 782 fs_devices->fsid); 783 } 784 785 mutex_lock(&fs_devices->device_list_mutex); 786 list_add(&fs_devices->fs_list, &fs_uuids); 787 788 device = NULL; 789 } else { 790 struct btrfs_dev_lookup_args args = { 791 .devid = devid, 792 .uuid = disk_super->dev_item.uuid, 793 }; 794 795 mutex_lock(&fs_devices->device_list_mutex); 796 device = btrfs_find_device(fs_devices, &args); 797 798 if (found_transid > fs_devices->latest_generation) { 799 memcpy(fs_devices->fsid, disk_super->fsid, 800 BTRFS_FSID_SIZE); 801 memcpy(fs_devices->metadata_uuid, 802 btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE); 803 } 804 } 805 806 if (!device) { 807 unsigned int nofs_flag; 808 809 if (fs_devices->opened) { 810 btrfs_err(NULL, 811 "device %s (%d:%d) belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)", 812 path, MAJOR(path_devt), MINOR(path_devt), 813 fs_devices->fsid, current->comm, 814 task_pid_nr(current)); 815 mutex_unlock(&fs_devices->device_list_mutex); 816 return ERR_PTR(-EBUSY); 817 } 818 819 nofs_flag = memalloc_nofs_save(); 820 device = btrfs_alloc_device(NULL, &devid, 821 disk_super->dev_item.uuid, path); 822 memalloc_nofs_restore(nofs_flag); 823 if (IS_ERR(device)) { 824 mutex_unlock(&fs_devices->device_list_mutex); 825 /* we can safely leave the fs_devices entry around */ 826 return device; 827 } 828 829 device->devt = path_devt; 830 831 list_add_rcu(&device->dev_list, &fs_devices->devices); 832 fs_devices->num_devices++; 833 834 device->fs_devices = fs_devices; 835 *new_device_added = true; 836 837 if (disk_super->label[0]) 838 pr_info( 839 "BTRFS: device label %s devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n", 840 disk_super->label, devid, found_transid, path, 841 MAJOR(path_devt), MINOR(path_devt), 842 current->comm, task_pid_nr(current)); 843 else 844 pr_info( 845 "BTRFS: device fsid %pU devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n", 846 disk_super->fsid, devid, found_transid, path, 847 MAJOR(path_devt), MINOR(path_devt), 848 current->comm, task_pid_nr(current)); 849 850 } else if (!device->name || strcmp(device->name->str, path)) { 851 /* 852 * When FS is already mounted. 853 * 1. If you are here and if the device->name is NULL that 854 * means this device was missing at time of FS mount. 855 * 2. If you are here and if the device->name is different 856 * from 'path' that means either 857 * a. The same device disappeared and reappeared with 858 * different name. or 859 * b. The missing-disk-which-was-replaced, has 860 * reappeared now. 861 * 862 * We must allow 1 and 2a above. But 2b would be a spurious 863 * and unintentional. 864 * 865 * Further in case of 1 and 2a above, the disk at 'path' 866 * would have missed some transaction when it was away and 867 * in case of 2a the stale bdev has to be updated as well. 868 * 2b must not be allowed at all time. 869 */ 870 871 /* 872 * For now, we do allow update to btrfs_fs_device through the 873 * btrfs dev scan cli after FS has been mounted. We're still 874 * tracking a problem where systems fail mount by subvolume id 875 * when we reject replacement on a mounted FS. 876 */ 877 if (!fs_devices->opened && found_transid < device->generation) { 878 /* 879 * That is if the FS is _not_ mounted and if you 880 * are here, that means there is more than one 881 * disk with same uuid and devid.We keep the one 882 * with larger generation number or the last-in if 883 * generation are equal. 884 */ 885 mutex_unlock(&fs_devices->device_list_mutex); 886 btrfs_err(NULL, 887 "device %s already registered with a higher generation, found %llu expect %llu", 888 path, found_transid, device->generation); 889 return ERR_PTR(-EEXIST); 890 } 891 892 /* 893 * We are going to replace the device path for a given devid, 894 * make sure it's the same device if the device is mounted 895 * 896 * NOTE: the device->fs_info may not be reliable here so pass 897 * in a NULL to message helpers instead. This avoids a possible 898 * use-after-free when the fs_info and fs_info->sb are already 899 * torn down. 900 */ 901 if (device->bdev) { 902 if (device->devt != path_devt) { 903 mutex_unlock(&fs_devices->device_list_mutex); 904 btrfs_warn_in_rcu(NULL, 905 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 906 path, devid, found_transid, 907 current->comm, 908 task_pid_nr(current)); 909 return ERR_PTR(-EEXIST); 910 } 911 btrfs_info_in_rcu(NULL, 912 "devid %llu device path %s changed to %s scanned by %s (%d)", 913 devid, btrfs_dev_name(device), 914 path, current->comm, 915 task_pid_nr(current)); 916 } 917 918 name = rcu_string_strdup(path, GFP_NOFS); 919 if (!name) { 920 mutex_unlock(&fs_devices->device_list_mutex); 921 return ERR_PTR(-ENOMEM); 922 } 923 rcu_string_free(device->name); 924 rcu_assign_pointer(device->name, name); 925 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 926 fs_devices->missing_devices--; 927 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 928 } 929 device->devt = path_devt; 930 } 931 932 /* 933 * Unmount does not free the btrfs_device struct but would zero 934 * generation along with most of the other members. So just update 935 * it back. We need it to pick the disk with largest generation 936 * (as above). 937 */ 938 if (!fs_devices->opened) { 939 device->generation = found_transid; 940 fs_devices->latest_generation = max_t(u64, found_transid, 941 fs_devices->latest_generation); 942 } 943 944 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 945 946 mutex_unlock(&fs_devices->device_list_mutex); 947 return device; 948 } 949 950 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 951 { 952 struct btrfs_fs_devices *fs_devices; 953 struct btrfs_device *device; 954 struct btrfs_device *orig_dev; 955 int ret = 0; 956 957 lockdep_assert_held(&uuid_mutex); 958 959 fs_devices = alloc_fs_devices(orig->fsid); 960 if (IS_ERR(fs_devices)) 961 return fs_devices; 962 963 fs_devices->total_devices = orig->total_devices; 964 965 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 966 const char *dev_path = NULL; 967 968 /* 969 * This is ok to do without RCU read locked because we hold the 970 * uuid mutex so nothing we touch in here is going to disappear. 971 */ 972 if (orig_dev->name) 973 dev_path = orig_dev->name->str; 974 975 device = btrfs_alloc_device(NULL, &orig_dev->devid, 976 orig_dev->uuid, dev_path); 977 if (IS_ERR(device)) { 978 ret = PTR_ERR(device); 979 goto error; 980 } 981 982 if (orig_dev->zone_info) { 983 struct btrfs_zoned_device_info *zone_info; 984 985 zone_info = btrfs_clone_dev_zone_info(orig_dev); 986 if (!zone_info) { 987 btrfs_free_device(device); 988 ret = -ENOMEM; 989 goto error; 990 } 991 device->zone_info = zone_info; 992 } 993 994 list_add(&device->dev_list, &fs_devices->devices); 995 device->fs_devices = fs_devices; 996 fs_devices->num_devices++; 997 } 998 return fs_devices; 999 error: 1000 free_fs_devices(fs_devices); 1001 return ERR_PTR(ret); 1002 } 1003 1004 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1005 struct btrfs_device **latest_dev) 1006 { 1007 struct btrfs_device *device, *next; 1008 1009 /* This is the initialized path, it is safe to release the devices. */ 1010 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1011 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1012 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1013 &device->dev_state) && 1014 !test_bit(BTRFS_DEV_STATE_MISSING, 1015 &device->dev_state) && 1016 (!*latest_dev || 1017 device->generation > (*latest_dev)->generation)) { 1018 *latest_dev = device; 1019 } 1020 continue; 1021 } 1022 1023 /* 1024 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1025 * in btrfs_init_dev_replace() so just continue. 1026 */ 1027 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1028 continue; 1029 1030 if (device->bdev_file) { 1031 fput(device->bdev_file); 1032 device->bdev = NULL; 1033 device->bdev_file = NULL; 1034 fs_devices->open_devices--; 1035 } 1036 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1037 list_del_init(&device->dev_alloc_list); 1038 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1039 fs_devices->rw_devices--; 1040 } 1041 list_del_init(&device->dev_list); 1042 fs_devices->num_devices--; 1043 btrfs_free_device(device); 1044 } 1045 1046 } 1047 1048 /* 1049 * After we have read the system tree and know devids belonging to this 1050 * filesystem, remove the device which does not belong there. 1051 */ 1052 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1053 { 1054 struct btrfs_device *latest_dev = NULL; 1055 struct btrfs_fs_devices *seed_dev; 1056 1057 mutex_lock(&uuid_mutex); 1058 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1059 1060 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1061 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1062 1063 fs_devices->latest_dev = latest_dev; 1064 1065 mutex_unlock(&uuid_mutex); 1066 } 1067 1068 static void btrfs_close_bdev(struct btrfs_device *device) 1069 { 1070 if (!device->bdev) 1071 return; 1072 1073 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1074 sync_blockdev(device->bdev); 1075 invalidate_bdev(device->bdev); 1076 } 1077 1078 fput(device->bdev_file); 1079 } 1080 1081 static void btrfs_close_one_device(struct btrfs_device *device) 1082 { 1083 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1084 1085 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1086 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1087 list_del_init(&device->dev_alloc_list); 1088 fs_devices->rw_devices--; 1089 } 1090 1091 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1092 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1093 1094 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1095 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1096 fs_devices->missing_devices--; 1097 } 1098 1099 btrfs_close_bdev(device); 1100 if (device->bdev) { 1101 fs_devices->open_devices--; 1102 device->bdev = NULL; 1103 } 1104 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1105 btrfs_destroy_dev_zone_info(device); 1106 1107 device->fs_info = NULL; 1108 atomic_set(&device->dev_stats_ccnt, 0); 1109 extent_io_tree_release(&device->alloc_state); 1110 1111 /* 1112 * Reset the flush error record. We might have a transient flush error 1113 * in this mount, and if so we aborted the current transaction and set 1114 * the fs to an error state, guaranteeing no super blocks can be further 1115 * committed. However that error might be transient and if we unmount the 1116 * filesystem and mount it again, we should allow the mount to succeed 1117 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1118 * filesystem again we still get flush errors, then we will again abort 1119 * any transaction and set the error state, guaranteeing no commits of 1120 * unsafe super blocks. 1121 */ 1122 device->last_flush_error = 0; 1123 1124 /* Verify the device is back in a pristine state */ 1125 WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1126 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1127 WARN_ON(!list_empty(&device->dev_alloc_list)); 1128 WARN_ON(!list_empty(&device->post_commit_list)); 1129 } 1130 1131 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1132 { 1133 struct btrfs_device *device, *tmp; 1134 1135 lockdep_assert_held(&uuid_mutex); 1136 1137 if (--fs_devices->opened > 0) 1138 return; 1139 1140 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1141 btrfs_close_one_device(device); 1142 1143 WARN_ON(fs_devices->open_devices); 1144 WARN_ON(fs_devices->rw_devices); 1145 fs_devices->opened = 0; 1146 fs_devices->seeding = false; 1147 fs_devices->fs_info = NULL; 1148 } 1149 1150 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1151 { 1152 LIST_HEAD(list); 1153 struct btrfs_fs_devices *tmp; 1154 1155 mutex_lock(&uuid_mutex); 1156 close_fs_devices(fs_devices); 1157 if (!fs_devices->opened) { 1158 list_splice_init(&fs_devices->seed_list, &list); 1159 1160 /* 1161 * If the struct btrfs_fs_devices is not assembled with any 1162 * other device, it can be re-initialized during the next mount 1163 * without the needing device-scan step. Therefore, it can be 1164 * fully freed. 1165 */ 1166 if (fs_devices->num_devices == 1) { 1167 list_del(&fs_devices->fs_list); 1168 free_fs_devices(fs_devices); 1169 } 1170 } 1171 1172 1173 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1174 close_fs_devices(fs_devices); 1175 list_del(&fs_devices->seed_list); 1176 free_fs_devices(fs_devices); 1177 } 1178 mutex_unlock(&uuid_mutex); 1179 } 1180 1181 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1182 blk_mode_t flags, void *holder) 1183 { 1184 struct btrfs_device *device; 1185 struct btrfs_device *latest_dev = NULL; 1186 struct btrfs_device *tmp_device; 1187 int ret = 0; 1188 1189 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1190 dev_list) { 1191 int ret2; 1192 1193 ret2 = btrfs_open_one_device(fs_devices, device, flags, holder); 1194 if (ret2 == 0 && 1195 (!latest_dev || device->generation > latest_dev->generation)) { 1196 latest_dev = device; 1197 } else if (ret2 == -ENODATA) { 1198 fs_devices->num_devices--; 1199 list_del(&device->dev_list); 1200 btrfs_free_device(device); 1201 } 1202 if (ret == 0 && ret2 != 0) 1203 ret = ret2; 1204 } 1205 1206 if (fs_devices->open_devices == 0) { 1207 if (ret) 1208 return ret; 1209 return -EINVAL; 1210 } 1211 1212 fs_devices->opened = 1; 1213 fs_devices->latest_dev = latest_dev; 1214 fs_devices->total_rw_bytes = 0; 1215 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1216 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1217 1218 return 0; 1219 } 1220 1221 static int devid_cmp(void *priv, const struct list_head *a, 1222 const struct list_head *b) 1223 { 1224 const struct btrfs_device *dev1, *dev2; 1225 1226 dev1 = list_entry(a, struct btrfs_device, dev_list); 1227 dev2 = list_entry(b, struct btrfs_device, dev_list); 1228 1229 if (dev1->devid < dev2->devid) 1230 return -1; 1231 else if (dev1->devid > dev2->devid) 1232 return 1; 1233 return 0; 1234 } 1235 1236 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1237 blk_mode_t flags, void *holder) 1238 { 1239 int ret; 1240 1241 lockdep_assert_held(&uuid_mutex); 1242 /* 1243 * The device_list_mutex cannot be taken here in case opening the 1244 * underlying device takes further locks like open_mutex. 1245 * 1246 * We also don't need the lock here as this is called during mount and 1247 * exclusion is provided by uuid_mutex 1248 */ 1249 1250 if (fs_devices->opened) { 1251 fs_devices->opened++; 1252 ret = 0; 1253 } else { 1254 list_sort(NULL, &fs_devices->devices, devid_cmp); 1255 ret = open_fs_devices(fs_devices, flags, holder); 1256 } 1257 1258 return ret; 1259 } 1260 1261 void btrfs_release_disk_super(struct btrfs_super_block *super) 1262 { 1263 struct page *page = virt_to_page(super); 1264 1265 put_page(page); 1266 } 1267 1268 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1269 u64 bytenr, u64 bytenr_orig) 1270 { 1271 struct btrfs_super_block *disk_super; 1272 struct page *page; 1273 void *p; 1274 pgoff_t index; 1275 1276 /* make sure our super fits in the device */ 1277 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1278 return ERR_PTR(-EINVAL); 1279 1280 /* make sure our super fits in the page */ 1281 if (sizeof(*disk_super) > PAGE_SIZE) 1282 return ERR_PTR(-EINVAL); 1283 1284 /* make sure our super doesn't straddle pages on disk */ 1285 index = bytenr >> PAGE_SHIFT; 1286 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1287 return ERR_PTR(-EINVAL); 1288 1289 /* pull in the page with our super */ 1290 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1291 1292 if (IS_ERR(page)) 1293 return ERR_CAST(page); 1294 1295 p = page_address(page); 1296 1297 /* align our pointer to the offset of the super block */ 1298 disk_super = p + offset_in_page(bytenr); 1299 1300 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1301 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1302 btrfs_release_disk_super(p); 1303 return ERR_PTR(-EINVAL); 1304 } 1305 1306 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1307 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1308 1309 return disk_super; 1310 } 1311 1312 int btrfs_forget_devices(dev_t devt) 1313 { 1314 int ret; 1315 1316 mutex_lock(&uuid_mutex); 1317 ret = btrfs_free_stale_devices(devt, NULL); 1318 mutex_unlock(&uuid_mutex); 1319 1320 return ret; 1321 } 1322 1323 static bool btrfs_skip_registration(struct btrfs_super_block *disk_super, 1324 const char *path, dev_t devt, 1325 bool mount_arg_dev) 1326 { 1327 struct btrfs_fs_devices *fs_devices; 1328 1329 /* 1330 * Do not skip device registration for mounted devices with matching 1331 * maj:min but different paths. Booting without initrd relies on 1332 * /dev/root initially, later replaced with the actual root device. 1333 * A successful scan ensures grub2-probe selects the correct device. 1334 */ 1335 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 1336 struct btrfs_device *device; 1337 1338 mutex_lock(&fs_devices->device_list_mutex); 1339 1340 if (!fs_devices->opened) { 1341 mutex_unlock(&fs_devices->device_list_mutex); 1342 continue; 1343 } 1344 1345 list_for_each_entry(device, &fs_devices->devices, dev_list) { 1346 if (device->bdev && (device->bdev->bd_dev == devt) && 1347 strcmp(device->name->str, path) != 0) { 1348 mutex_unlock(&fs_devices->device_list_mutex); 1349 1350 /* Do not skip registration. */ 1351 return false; 1352 } 1353 } 1354 mutex_unlock(&fs_devices->device_list_mutex); 1355 } 1356 1357 if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 && 1358 !(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)) 1359 return true; 1360 1361 return false; 1362 } 1363 1364 /* 1365 * Look for a btrfs signature on a device. This may be called out of the mount path 1366 * and we are not allowed to call set_blocksize during the scan. The superblock 1367 * is read via pagecache. 1368 * 1369 * With @mount_arg_dev it's a scan during mount time that will always register 1370 * the device or return an error. Multi-device and seeding devices are registered 1371 * in both cases. 1372 */ 1373 struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, 1374 bool mount_arg_dev) 1375 { 1376 struct btrfs_super_block *disk_super; 1377 bool new_device_added = false; 1378 struct btrfs_device *device = NULL; 1379 struct file *bdev_file; 1380 u64 bytenr, bytenr_orig; 1381 dev_t devt; 1382 int ret; 1383 1384 lockdep_assert_held(&uuid_mutex); 1385 1386 /* 1387 * we would like to check all the supers, but that would make 1388 * a btrfs mount succeed after a mkfs from a different FS. 1389 * So, we need to add a special mount option to scan for 1390 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1391 */ 1392 1393 /* 1394 * Avoid an exclusive open here, as the systemd-udev may initiate the 1395 * device scan which may race with the user's mount or mkfs command, 1396 * resulting in failure. 1397 * Since the device scan is solely for reading purposes, there is no 1398 * need for an exclusive open. Additionally, the devices are read again 1399 * during the mount process. It is ok to get some inconsistent 1400 * values temporarily, as the device paths of the fsid are the only 1401 * required information for assembling the volume. 1402 */ 1403 bdev_file = bdev_file_open_by_path(path, flags, NULL, NULL); 1404 if (IS_ERR(bdev_file)) 1405 return ERR_CAST(bdev_file); 1406 1407 bytenr_orig = btrfs_sb_offset(0); 1408 ret = btrfs_sb_log_location_bdev(file_bdev(bdev_file), 0, READ, &bytenr); 1409 if (ret) { 1410 device = ERR_PTR(ret); 1411 goto error_bdev_put; 1412 } 1413 1414 disk_super = btrfs_read_disk_super(file_bdev(bdev_file), bytenr, 1415 bytenr_orig); 1416 if (IS_ERR(disk_super)) { 1417 device = ERR_CAST(disk_super); 1418 goto error_bdev_put; 1419 } 1420 1421 devt = file_bdev(bdev_file)->bd_dev; 1422 if (btrfs_skip_registration(disk_super, path, devt, mount_arg_dev)) { 1423 pr_debug("BTRFS: skip registering single non-seed device %s (%d:%d)\n", 1424 path, MAJOR(devt), MINOR(devt)); 1425 1426 btrfs_free_stale_devices(devt, NULL); 1427 1428 device = NULL; 1429 goto free_disk_super; 1430 } 1431 1432 device = device_list_add(path, disk_super, &new_device_added); 1433 if (!IS_ERR(device) && new_device_added) 1434 btrfs_free_stale_devices(device->devt, device); 1435 1436 free_disk_super: 1437 btrfs_release_disk_super(disk_super); 1438 1439 error_bdev_put: 1440 fput(bdev_file); 1441 1442 return device; 1443 } 1444 1445 /* 1446 * Try to find a chunk that intersects [start, start + len] range and when one 1447 * such is found, record the end of it in *start 1448 */ 1449 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1450 u64 len) 1451 { 1452 u64 physical_start, physical_end; 1453 1454 lockdep_assert_held(&device->fs_info->chunk_mutex); 1455 1456 if (find_first_extent_bit(&device->alloc_state, *start, 1457 &physical_start, &physical_end, 1458 CHUNK_ALLOCATED, NULL)) { 1459 1460 if (in_range(physical_start, *start, len) || 1461 in_range(*start, physical_start, 1462 physical_end + 1 - physical_start)) { 1463 *start = physical_end + 1; 1464 return true; 1465 } 1466 } 1467 return false; 1468 } 1469 1470 static u64 dev_extent_search_start(struct btrfs_device *device) 1471 { 1472 switch (device->fs_devices->chunk_alloc_policy) { 1473 case BTRFS_CHUNK_ALLOC_REGULAR: 1474 return BTRFS_DEVICE_RANGE_RESERVED; 1475 case BTRFS_CHUNK_ALLOC_ZONED: 1476 /* 1477 * We don't care about the starting region like regular 1478 * allocator, because we anyway use/reserve the first two zones 1479 * for superblock logging. 1480 */ 1481 return 0; 1482 default: 1483 BUG(); 1484 } 1485 } 1486 1487 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1488 u64 *hole_start, u64 *hole_size, 1489 u64 num_bytes) 1490 { 1491 u64 zone_size = device->zone_info->zone_size; 1492 u64 pos; 1493 int ret; 1494 bool changed = false; 1495 1496 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1497 1498 while (*hole_size > 0) { 1499 pos = btrfs_find_allocatable_zones(device, *hole_start, 1500 *hole_start + *hole_size, 1501 num_bytes); 1502 if (pos != *hole_start) { 1503 *hole_size = *hole_start + *hole_size - pos; 1504 *hole_start = pos; 1505 changed = true; 1506 if (*hole_size < num_bytes) 1507 break; 1508 } 1509 1510 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1511 1512 /* Range is ensured to be empty */ 1513 if (!ret) 1514 return changed; 1515 1516 /* Given hole range was invalid (outside of device) */ 1517 if (ret == -ERANGE) { 1518 *hole_start += *hole_size; 1519 *hole_size = 0; 1520 return true; 1521 } 1522 1523 *hole_start += zone_size; 1524 *hole_size -= zone_size; 1525 changed = true; 1526 } 1527 1528 return changed; 1529 } 1530 1531 /* 1532 * Check if specified hole is suitable for allocation. 1533 * 1534 * @device: the device which we have the hole 1535 * @hole_start: starting position of the hole 1536 * @hole_size: the size of the hole 1537 * @num_bytes: the size of the free space that we need 1538 * 1539 * This function may modify @hole_start and @hole_size to reflect the suitable 1540 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1541 */ 1542 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1543 u64 *hole_size, u64 num_bytes) 1544 { 1545 bool changed = false; 1546 u64 hole_end = *hole_start + *hole_size; 1547 1548 for (;;) { 1549 /* 1550 * Check before we set max_hole_start, otherwise we could end up 1551 * sending back this offset anyway. 1552 */ 1553 if (contains_pending_extent(device, hole_start, *hole_size)) { 1554 if (hole_end >= *hole_start) 1555 *hole_size = hole_end - *hole_start; 1556 else 1557 *hole_size = 0; 1558 changed = true; 1559 } 1560 1561 switch (device->fs_devices->chunk_alloc_policy) { 1562 case BTRFS_CHUNK_ALLOC_REGULAR: 1563 /* No extra check */ 1564 break; 1565 case BTRFS_CHUNK_ALLOC_ZONED: 1566 if (dev_extent_hole_check_zoned(device, hole_start, 1567 hole_size, num_bytes)) { 1568 changed = true; 1569 /* 1570 * The changed hole can contain pending extent. 1571 * Loop again to check that. 1572 */ 1573 continue; 1574 } 1575 break; 1576 default: 1577 BUG(); 1578 } 1579 1580 break; 1581 } 1582 1583 return changed; 1584 } 1585 1586 /* 1587 * Find free space in the specified device. 1588 * 1589 * @device: the device which we search the free space in 1590 * @num_bytes: the size of the free space that we need 1591 * @search_start: the position from which to begin the search 1592 * @start: store the start of the free space. 1593 * @len: the size of the free space. that we find, or the size 1594 * of the max free space if we don't find suitable free space 1595 * 1596 * This does a pretty simple search, the expectation is that it is called very 1597 * infrequently and that a given device has a small number of extents. 1598 * 1599 * @start is used to store the start of the free space if we find. But if we 1600 * don't find suitable free space, it will be used to store the start position 1601 * of the max free space. 1602 * 1603 * @len is used to store the size of the free space that we find. 1604 * But if we don't find suitable free space, it is used to store the size of 1605 * the max free space. 1606 * 1607 * NOTE: This function will search *commit* root of device tree, and does extra 1608 * check to ensure dev extents are not double allocated. 1609 * This makes the function safe to allocate dev extents but may not report 1610 * correct usable device space, as device extent freed in current transaction 1611 * is not reported as available. 1612 */ 1613 static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1614 u64 *start, u64 *len) 1615 { 1616 struct btrfs_fs_info *fs_info = device->fs_info; 1617 struct btrfs_root *root = fs_info->dev_root; 1618 struct btrfs_key key; 1619 struct btrfs_dev_extent *dev_extent; 1620 struct btrfs_path *path; 1621 u64 search_start; 1622 u64 hole_size; 1623 u64 max_hole_start; 1624 u64 max_hole_size = 0; 1625 u64 extent_end; 1626 u64 search_end = device->total_bytes; 1627 int ret; 1628 int slot; 1629 struct extent_buffer *l; 1630 1631 search_start = dev_extent_search_start(device); 1632 max_hole_start = search_start; 1633 1634 WARN_ON(device->zone_info && 1635 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1636 1637 path = btrfs_alloc_path(); 1638 if (!path) { 1639 ret = -ENOMEM; 1640 goto out; 1641 } 1642 again: 1643 if (search_start >= search_end || 1644 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1645 ret = -ENOSPC; 1646 goto out; 1647 } 1648 1649 path->reada = READA_FORWARD; 1650 path->search_commit_root = 1; 1651 path->skip_locking = 1; 1652 1653 key.objectid = device->devid; 1654 key.offset = search_start; 1655 key.type = BTRFS_DEV_EXTENT_KEY; 1656 1657 ret = btrfs_search_backwards(root, &key, path); 1658 if (ret < 0) 1659 goto out; 1660 1661 while (search_start < search_end) { 1662 l = path->nodes[0]; 1663 slot = path->slots[0]; 1664 if (slot >= btrfs_header_nritems(l)) { 1665 ret = btrfs_next_leaf(root, path); 1666 if (ret == 0) 1667 continue; 1668 if (ret < 0) 1669 goto out; 1670 1671 break; 1672 } 1673 btrfs_item_key_to_cpu(l, &key, slot); 1674 1675 if (key.objectid < device->devid) 1676 goto next; 1677 1678 if (key.objectid > device->devid) 1679 break; 1680 1681 if (key.type != BTRFS_DEV_EXTENT_KEY) 1682 goto next; 1683 1684 if (key.offset > search_end) 1685 break; 1686 1687 if (key.offset > search_start) { 1688 hole_size = key.offset - search_start; 1689 dev_extent_hole_check(device, &search_start, &hole_size, 1690 num_bytes); 1691 1692 if (hole_size > max_hole_size) { 1693 max_hole_start = search_start; 1694 max_hole_size = hole_size; 1695 } 1696 1697 /* 1698 * If this free space is greater than which we need, 1699 * it must be the max free space that we have found 1700 * until now, so max_hole_start must point to the start 1701 * of this free space and the length of this free space 1702 * is stored in max_hole_size. Thus, we return 1703 * max_hole_start and max_hole_size and go back to the 1704 * caller. 1705 */ 1706 if (hole_size >= num_bytes) { 1707 ret = 0; 1708 goto out; 1709 } 1710 } 1711 1712 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1713 extent_end = key.offset + btrfs_dev_extent_length(l, 1714 dev_extent); 1715 if (extent_end > search_start) 1716 search_start = extent_end; 1717 next: 1718 path->slots[0]++; 1719 cond_resched(); 1720 } 1721 1722 /* 1723 * At this point, search_start should be the end of 1724 * allocated dev extents, and when shrinking the device, 1725 * search_end may be smaller than search_start. 1726 */ 1727 if (search_end > search_start) { 1728 hole_size = search_end - search_start; 1729 if (dev_extent_hole_check(device, &search_start, &hole_size, 1730 num_bytes)) { 1731 btrfs_release_path(path); 1732 goto again; 1733 } 1734 1735 if (hole_size > max_hole_size) { 1736 max_hole_start = search_start; 1737 max_hole_size = hole_size; 1738 } 1739 } 1740 1741 /* See above. */ 1742 if (max_hole_size < num_bytes) 1743 ret = -ENOSPC; 1744 else 1745 ret = 0; 1746 1747 ASSERT(max_hole_start + max_hole_size <= search_end); 1748 out: 1749 btrfs_free_path(path); 1750 *start = max_hole_start; 1751 if (len) 1752 *len = max_hole_size; 1753 return ret; 1754 } 1755 1756 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1757 struct btrfs_device *device, 1758 u64 start, u64 *dev_extent_len) 1759 { 1760 struct btrfs_fs_info *fs_info = device->fs_info; 1761 struct btrfs_root *root = fs_info->dev_root; 1762 int ret; 1763 struct btrfs_path *path; 1764 struct btrfs_key key; 1765 struct btrfs_key found_key; 1766 struct extent_buffer *leaf = NULL; 1767 struct btrfs_dev_extent *extent = NULL; 1768 1769 path = btrfs_alloc_path(); 1770 if (!path) 1771 return -ENOMEM; 1772 1773 key.objectid = device->devid; 1774 key.offset = start; 1775 key.type = BTRFS_DEV_EXTENT_KEY; 1776 again: 1777 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1778 if (ret > 0) { 1779 ret = btrfs_previous_item(root, path, key.objectid, 1780 BTRFS_DEV_EXTENT_KEY); 1781 if (ret) 1782 goto out; 1783 leaf = path->nodes[0]; 1784 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1785 extent = btrfs_item_ptr(leaf, path->slots[0], 1786 struct btrfs_dev_extent); 1787 BUG_ON(found_key.offset > start || found_key.offset + 1788 btrfs_dev_extent_length(leaf, extent) < start); 1789 key = found_key; 1790 btrfs_release_path(path); 1791 goto again; 1792 } else if (ret == 0) { 1793 leaf = path->nodes[0]; 1794 extent = btrfs_item_ptr(leaf, path->slots[0], 1795 struct btrfs_dev_extent); 1796 } else { 1797 goto out; 1798 } 1799 1800 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1801 1802 ret = btrfs_del_item(trans, root, path); 1803 if (ret == 0) 1804 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1805 out: 1806 btrfs_free_path(path); 1807 return ret; 1808 } 1809 1810 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1811 { 1812 struct rb_node *n; 1813 u64 ret = 0; 1814 1815 read_lock(&fs_info->mapping_tree_lock); 1816 n = rb_last(&fs_info->mapping_tree.rb_root); 1817 if (n) { 1818 struct btrfs_chunk_map *map; 1819 1820 map = rb_entry(n, struct btrfs_chunk_map, rb_node); 1821 ret = map->start + map->chunk_len; 1822 } 1823 read_unlock(&fs_info->mapping_tree_lock); 1824 1825 return ret; 1826 } 1827 1828 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1829 u64 *devid_ret) 1830 { 1831 int ret; 1832 struct btrfs_key key; 1833 struct btrfs_key found_key; 1834 struct btrfs_path *path; 1835 1836 path = btrfs_alloc_path(); 1837 if (!path) 1838 return -ENOMEM; 1839 1840 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1841 key.type = BTRFS_DEV_ITEM_KEY; 1842 key.offset = (u64)-1; 1843 1844 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1845 if (ret < 0) 1846 goto error; 1847 1848 if (ret == 0) { 1849 /* Corruption */ 1850 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1851 ret = -EUCLEAN; 1852 goto error; 1853 } 1854 1855 ret = btrfs_previous_item(fs_info->chunk_root, path, 1856 BTRFS_DEV_ITEMS_OBJECTID, 1857 BTRFS_DEV_ITEM_KEY); 1858 if (ret) { 1859 *devid_ret = 1; 1860 } else { 1861 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1862 path->slots[0]); 1863 *devid_ret = found_key.offset + 1; 1864 } 1865 ret = 0; 1866 error: 1867 btrfs_free_path(path); 1868 return ret; 1869 } 1870 1871 /* 1872 * the device information is stored in the chunk root 1873 * the btrfs_device struct should be fully filled in 1874 */ 1875 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1876 struct btrfs_device *device) 1877 { 1878 int ret; 1879 struct btrfs_path *path; 1880 struct btrfs_dev_item *dev_item; 1881 struct extent_buffer *leaf; 1882 struct btrfs_key key; 1883 unsigned long ptr; 1884 1885 path = btrfs_alloc_path(); 1886 if (!path) 1887 return -ENOMEM; 1888 1889 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1890 key.type = BTRFS_DEV_ITEM_KEY; 1891 key.offset = device->devid; 1892 1893 btrfs_reserve_chunk_metadata(trans, true); 1894 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1895 &key, sizeof(*dev_item)); 1896 btrfs_trans_release_chunk_metadata(trans); 1897 if (ret) 1898 goto out; 1899 1900 leaf = path->nodes[0]; 1901 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1902 1903 btrfs_set_device_id(leaf, dev_item, device->devid); 1904 btrfs_set_device_generation(leaf, dev_item, 0); 1905 btrfs_set_device_type(leaf, dev_item, device->type); 1906 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1907 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1908 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1909 btrfs_set_device_total_bytes(leaf, dev_item, 1910 btrfs_device_get_disk_total_bytes(device)); 1911 btrfs_set_device_bytes_used(leaf, dev_item, 1912 btrfs_device_get_bytes_used(device)); 1913 btrfs_set_device_group(leaf, dev_item, 0); 1914 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1915 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1916 btrfs_set_device_start_offset(leaf, dev_item, 0); 1917 1918 ptr = btrfs_device_uuid(dev_item); 1919 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1920 ptr = btrfs_device_fsid(dev_item); 1921 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1922 ptr, BTRFS_FSID_SIZE); 1923 btrfs_mark_buffer_dirty(trans, leaf); 1924 1925 ret = 0; 1926 out: 1927 btrfs_free_path(path); 1928 return ret; 1929 } 1930 1931 /* 1932 * Function to update ctime/mtime for a given device path. 1933 * Mainly used for ctime/mtime based probe like libblkid. 1934 * 1935 * We don't care about errors here, this is just to be kind to userspace. 1936 */ 1937 static void update_dev_time(const char *device_path) 1938 { 1939 struct path path; 1940 int ret; 1941 1942 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1943 if (ret) 1944 return; 1945 1946 inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION); 1947 path_put(&path); 1948 } 1949 1950 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1951 struct btrfs_device *device) 1952 { 1953 struct btrfs_root *root = device->fs_info->chunk_root; 1954 int ret; 1955 struct btrfs_path *path; 1956 struct btrfs_key key; 1957 1958 path = btrfs_alloc_path(); 1959 if (!path) 1960 return -ENOMEM; 1961 1962 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1963 key.type = BTRFS_DEV_ITEM_KEY; 1964 key.offset = device->devid; 1965 1966 btrfs_reserve_chunk_metadata(trans, false); 1967 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1968 btrfs_trans_release_chunk_metadata(trans); 1969 if (ret) { 1970 if (ret > 0) 1971 ret = -ENOENT; 1972 goto out; 1973 } 1974 1975 ret = btrfs_del_item(trans, root, path); 1976 out: 1977 btrfs_free_path(path); 1978 return ret; 1979 } 1980 1981 /* 1982 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1983 * filesystem. It's up to the caller to adjust that number regarding eg. device 1984 * replace. 1985 */ 1986 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1987 u64 num_devices) 1988 { 1989 u64 all_avail; 1990 unsigned seq; 1991 int i; 1992 1993 do { 1994 seq = read_seqbegin(&fs_info->profiles_lock); 1995 1996 all_avail = fs_info->avail_data_alloc_bits | 1997 fs_info->avail_system_alloc_bits | 1998 fs_info->avail_metadata_alloc_bits; 1999 } while (read_seqretry(&fs_info->profiles_lock, seq)); 2000 2001 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2002 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 2003 continue; 2004 2005 if (num_devices < btrfs_raid_array[i].devs_min) 2006 return btrfs_raid_array[i].mindev_error; 2007 } 2008 2009 return 0; 2010 } 2011 2012 static struct btrfs_device * btrfs_find_next_active_device( 2013 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 2014 { 2015 struct btrfs_device *next_device; 2016 2017 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 2018 if (next_device != device && 2019 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 2020 && next_device->bdev) 2021 return next_device; 2022 } 2023 2024 return NULL; 2025 } 2026 2027 /* 2028 * Helper function to check if the given device is part of s_bdev / latest_dev 2029 * and replace it with the provided or the next active device, in the context 2030 * where this function called, there should be always be another device (or 2031 * this_dev) which is active. 2032 */ 2033 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2034 struct btrfs_device *next_device) 2035 { 2036 struct btrfs_fs_info *fs_info = device->fs_info; 2037 2038 if (!next_device) 2039 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2040 device); 2041 ASSERT(next_device); 2042 2043 if (fs_info->sb->s_bdev && 2044 (fs_info->sb->s_bdev == device->bdev)) 2045 fs_info->sb->s_bdev = next_device->bdev; 2046 2047 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2048 fs_info->fs_devices->latest_dev = next_device; 2049 } 2050 2051 /* 2052 * Return btrfs_fs_devices::num_devices excluding the device that's being 2053 * currently replaced. 2054 */ 2055 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2056 { 2057 u64 num_devices = fs_info->fs_devices->num_devices; 2058 2059 down_read(&fs_info->dev_replace.rwsem); 2060 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2061 ASSERT(num_devices > 1); 2062 num_devices--; 2063 } 2064 up_read(&fs_info->dev_replace.rwsem); 2065 2066 return num_devices; 2067 } 2068 2069 static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info, 2070 struct block_device *bdev, int copy_num) 2071 { 2072 struct btrfs_super_block *disk_super; 2073 const size_t len = sizeof(disk_super->magic); 2074 const u64 bytenr = btrfs_sb_offset(copy_num); 2075 int ret; 2076 2077 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr); 2078 if (IS_ERR(disk_super)) 2079 return; 2080 2081 memset(&disk_super->magic, 0, len); 2082 folio_mark_dirty(virt_to_folio(disk_super)); 2083 btrfs_release_disk_super(disk_super); 2084 2085 ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1); 2086 if (ret) 2087 btrfs_warn(fs_info, "error clearing superblock number %d (%d)", 2088 copy_num, ret); 2089 } 2090 2091 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_device *device) 2092 { 2093 int copy_num; 2094 struct block_device *bdev = device->bdev; 2095 2096 if (!bdev) 2097 return; 2098 2099 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2100 if (bdev_is_zoned(bdev)) 2101 btrfs_reset_sb_log_zones(bdev, copy_num); 2102 else 2103 btrfs_scratch_superblock(fs_info, bdev, copy_num); 2104 } 2105 2106 /* Notify udev that device has changed */ 2107 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2108 2109 /* Update ctime/mtime for device path for libblkid */ 2110 update_dev_time(device->name->str); 2111 } 2112 2113 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2114 struct btrfs_dev_lookup_args *args, 2115 struct file **bdev_file) 2116 { 2117 struct btrfs_trans_handle *trans; 2118 struct btrfs_device *device; 2119 struct btrfs_fs_devices *cur_devices; 2120 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2121 u64 num_devices; 2122 int ret = 0; 2123 2124 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2125 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2126 return -EINVAL; 2127 } 2128 2129 /* 2130 * The device list in fs_devices is accessed without locks (neither 2131 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2132 * filesystem and another device rm cannot run. 2133 */ 2134 num_devices = btrfs_num_devices(fs_info); 2135 2136 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2137 if (ret) 2138 return ret; 2139 2140 device = btrfs_find_device(fs_info->fs_devices, args); 2141 if (!device) { 2142 if (args->missing) 2143 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2144 else 2145 ret = -ENOENT; 2146 return ret; 2147 } 2148 2149 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2150 btrfs_warn_in_rcu(fs_info, 2151 "cannot remove device %s (devid %llu) due to active swapfile", 2152 btrfs_dev_name(device), device->devid); 2153 return -ETXTBSY; 2154 } 2155 2156 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2157 return BTRFS_ERROR_DEV_TGT_REPLACE; 2158 2159 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2160 fs_info->fs_devices->rw_devices == 1) 2161 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2162 2163 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2164 mutex_lock(&fs_info->chunk_mutex); 2165 list_del_init(&device->dev_alloc_list); 2166 device->fs_devices->rw_devices--; 2167 mutex_unlock(&fs_info->chunk_mutex); 2168 } 2169 2170 ret = btrfs_shrink_device(device, 0); 2171 if (ret) 2172 goto error_undo; 2173 2174 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2175 if (IS_ERR(trans)) { 2176 ret = PTR_ERR(trans); 2177 goto error_undo; 2178 } 2179 2180 ret = btrfs_rm_dev_item(trans, device); 2181 if (ret) { 2182 /* Any error in dev item removal is critical */ 2183 btrfs_crit(fs_info, 2184 "failed to remove device item for devid %llu: %d", 2185 device->devid, ret); 2186 btrfs_abort_transaction(trans, ret); 2187 btrfs_end_transaction(trans); 2188 return ret; 2189 } 2190 2191 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2192 btrfs_scrub_cancel_dev(device); 2193 2194 /* 2195 * the device list mutex makes sure that we don't change 2196 * the device list while someone else is writing out all 2197 * the device supers. Whoever is writing all supers, should 2198 * lock the device list mutex before getting the number of 2199 * devices in the super block (super_copy). Conversely, 2200 * whoever updates the number of devices in the super block 2201 * (super_copy) should hold the device list mutex. 2202 */ 2203 2204 /* 2205 * In normal cases the cur_devices == fs_devices. But in case 2206 * of deleting a seed device, the cur_devices should point to 2207 * its own fs_devices listed under the fs_devices->seed_list. 2208 */ 2209 cur_devices = device->fs_devices; 2210 mutex_lock(&fs_devices->device_list_mutex); 2211 list_del_rcu(&device->dev_list); 2212 2213 cur_devices->num_devices--; 2214 cur_devices->total_devices--; 2215 /* Update total_devices of the parent fs_devices if it's seed */ 2216 if (cur_devices != fs_devices) 2217 fs_devices->total_devices--; 2218 2219 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2220 cur_devices->missing_devices--; 2221 2222 btrfs_assign_next_active_device(device, NULL); 2223 2224 if (device->bdev_file) { 2225 cur_devices->open_devices--; 2226 /* remove sysfs entry */ 2227 btrfs_sysfs_remove_device(device); 2228 } 2229 2230 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2231 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2232 mutex_unlock(&fs_devices->device_list_mutex); 2233 2234 /* 2235 * At this point, the device is zero sized and detached from the 2236 * devices list. All that's left is to zero out the old supers and 2237 * free the device. 2238 * 2239 * We cannot call btrfs_close_bdev() here because we're holding the sb 2240 * write lock, and fput() on the block device will pull in the 2241 * ->open_mutex on the block device and it's dependencies. Instead 2242 * just flush the device and let the caller do the final bdev_release. 2243 */ 2244 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2245 btrfs_scratch_superblocks(fs_info, device); 2246 if (device->bdev) { 2247 sync_blockdev(device->bdev); 2248 invalidate_bdev(device->bdev); 2249 } 2250 } 2251 2252 *bdev_file = device->bdev_file; 2253 synchronize_rcu(); 2254 btrfs_free_device(device); 2255 2256 /* 2257 * This can happen if cur_devices is the private seed devices list. We 2258 * cannot call close_fs_devices() here because it expects the uuid_mutex 2259 * to be held, but in fact we don't need that for the private 2260 * seed_devices, we can simply decrement cur_devices->opened and then 2261 * remove it from our list and free the fs_devices. 2262 */ 2263 if (cur_devices->num_devices == 0) { 2264 list_del_init(&cur_devices->seed_list); 2265 ASSERT(cur_devices->opened == 1); 2266 cur_devices->opened--; 2267 free_fs_devices(cur_devices); 2268 } 2269 2270 ret = btrfs_commit_transaction(trans); 2271 2272 return ret; 2273 2274 error_undo: 2275 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2276 mutex_lock(&fs_info->chunk_mutex); 2277 list_add(&device->dev_alloc_list, 2278 &fs_devices->alloc_list); 2279 device->fs_devices->rw_devices++; 2280 mutex_unlock(&fs_info->chunk_mutex); 2281 } 2282 return ret; 2283 } 2284 2285 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2286 { 2287 struct btrfs_fs_devices *fs_devices; 2288 2289 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2290 2291 /* 2292 * in case of fs with no seed, srcdev->fs_devices will point 2293 * to fs_devices of fs_info. However when the dev being replaced is 2294 * a seed dev it will point to the seed's local fs_devices. In short 2295 * srcdev will have its correct fs_devices in both the cases. 2296 */ 2297 fs_devices = srcdev->fs_devices; 2298 2299 list_del_rcu(&srcdev->dev_list); 2300 list_del(&srcdev->dev_alloc_list); 2301 fs_devices->num_devices--; 2302 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2303 fs_devices->missing_devices--; 2304 2305 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2306 fs_devices->rw_devices--; 2307 2308 if (srcdev->bdev) 2309 fs_devices->open_devices--; 2310 } 2311 2312 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2313 { 2314 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2315 2316 mutex_lock(&uuid_mutex); 2317 2318 btrfs_close_bdev(srcdev); 2319 synchronize_rcu(); 2320 btrfs_free_device(srcdev); 2321 2322 /* if this is no devs we rather delete the fs_devices */ 2323 if (!fs_devices->num_devices) { 2324 /* 2325 * On a mounted FS, num_devices can't be zero unless it's a 2326 * seed. In case of a seed device being replaced, the replace 2327 * target added to the sprout FS, so there will be no more 2328 * device left under the seed FS. 2329 */ 2330 ASSERT(fs_devices->seeding); 2331 2332 list_del_init(&fs_devices->seed_list); 2333 close_fs_devices(fs_devices); 2334 free_fs_devices(fs_devices); 2335 } 2336 mutex_unlock(&uuid_mutex); 2337 } 2338 2339 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2340 { 2341 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2342 2343 mutex_lock(&fs_devices->device_list_mutex); 2344 2345 btrfs_sysfs_remove_device(tgtdev); 2346 2347 if (tgtdev->bdev) 2348 fs_devices->open_devices--; 2349 2350 fs_devices->num_devices--; 2351 2352 btrfs_assign_next_active_device(tgtdev, NULL); 2353 2354 list_del_rcu(&tgtdev->dev_list); 2355 2356 mutex_unlock(&fs_devices->device_list_mutex); 2357 2358 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev); 2359 2360 btrfs_close_bdev(tgtdev); 2361 synchronize_rcu(); 2362 btrfs_free_device(tgtdev); 2363 } 2364 2365 /* 2366 * Populate args from device at path. 2367 * 2368 * @fs_info: the filesystem 2369 * @args: the args to populate 2370 * @path: the path to the device 2371 * 2372 * This will read the super block of the device at @path and populate @args with 2373 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2374 * lookup a device to operate on, but need to do it before we take any locks. 2375 * This properly handles the special case of "missing" that a user may pass in, 2376 * and does some basic sanity checks. The caller must make sure that @path is 2377 * properly NUL terminated before calling in, and must call 2378 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2379 * uuid buffers. 2380 * 2381 * Return: 0 for success, -errno for failure 2382 */ 2383 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2384 struct btrfs_dev_lookup_args *args, 2385 const char *path) 2386 { 2387 struct btrfs_super_block *disk_super; 2388 struct file *bdev_file; 2389 int ret; 2390 2391 if (!path || !path[0]) 2392 return -EINVAL; 2393 if (!strcmp(path, "missing")) { 2394 args->missing = true; 2395 return 0; 2396 } 2397 2398 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2399 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2400 if (!args->uuid || !args->fsid) { 2401 btrfs_put_dev_args_from_path(args); 2402 return -ENOMEM; 2403 } 2404 2405 ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0, 2406 &bdev_file, &disk_super); 2407 if (ret) { 2408 btrfs_put_dev_args_from_path(args); 2409 return ret; 2410 } 2411 2412 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2413 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2414 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2415 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2416 else 2417 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2418 btrfs_release_disk_super(disk_super); 2419 fput(bdev_file); 2420 return 0; 2421 } 2422 2423 /* 2424 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2425 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2426 * that don't need to be freed. 2427 */ 2428 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2429 { 2430 kfree(args->uuid); 2431 kfree(args->fsid); 2432 args->uuid = NULL; 2433 args->fsid = NULL; 2434 } 2435 2436 struct btrfs_device *btrfs_find_device_by_devspec( 2437 struct btrfs_fs_info *fs_info, u64 devid, 2438 const char *device_path) 2439 { 2440 BTRFS_DEV_LOOKUP_ARGS(args); 2441 struct btrfs_device *device; 2442 int ret; 2443 2444 if (devid) { 2445 args.devid = devid; 2446 device = btrfs_find_device(fs_info->fs_devices, &args); 2447 if (!device) 2448 return ERR_PTR(-ENOENT); 2449 return device; 2450 } 2451 2452 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2453 if (ret) 2454 return ERR_PTR(ret); 2455 device = btrfs_find_device(fs_info->fs_devices, &args); 2456 btrfs_put_dev_args_from_path(&args); 2457 if (!device) 2458 return ERR_PTR(-ENOENT); 2459 return device; 2460 } 2461 2462 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2463 { 2464 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2465 struct btrfs_fs_devices *old_devices; 2466 struct btrfs_fs_devices *seed_devices; 2467 2468 lockdep_assert_held(&uuid_mutex); 2469 if (!fs_devices->seeding) 2470 return ERR_PTR(-EINVAL); 2471 2472 /* 2473 * Private copy of the seed devices, anchored at 2474 * fs_info->fs_devices->seed_list 2475 */ 2476 seed_devices = alloc_fs_devices(NULL); 2477 if (IS_ERR(seed_devices)) 2478 return seed_devices; 2479 2480 /* 2481 * It's necessary to retain a copy of the original seed fs_devices in 2482 * fs_uuids so that filesystems which have been seeded can successfully 2483 * reference the seed device from open_seed_devices. This also supports 2484 * multiple fs seed. 2485 */ 2486 old_devices = clone_fs_devices(fs_devices); 2487 if (IS_ERR(old_devices)) { 2488 kfree(seed_devices); 2489 return old_devices; 2490 } 2491 2492 list_add(&old_devices->fs_list, &fs_uuids); 2493 2494 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2495 seed_devices->opened = 1; 2496 INIT_LIST_HEAD(&seed_devices->devices); 2497 INIT_LIST_HEAD(&seed_devices->alloc_list); 2498 mutex_init(&seed_devices->device_list_mutex); 2499 2500 return seed_devices; 2501 } 2502 2503 /* 2504 * Splice seed devices into the sprout fs_devices. 2505 * Generate a new fsid for the sprouted read-write filesystem. 2506 */ 2507 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2508 struct btrfs_fs_devices *seed_devices) 2509 { 2510 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2511 struct btrfs_super_block *disk_super = fs_info->super_copy; 2512 struct btrfs_device *device; 2513 u64 super_flags; 2514 2515 /* 2516 * We are updating the fsid, the thread leading to device_list_add() 2517 * could race, so uuid_mutex is needed. 2518 */ 2519 lockdep_assert_held(&uuid_mutex); 2520 2521 /* 2522 * The threads listed below may traverse dev_list but can do that without 2523 * device_list_mutex: 2524 * - All device ops and balance - as we are in btrfs_exclop_start. 2525 * - Various dev_list readers - are using RCU. 2526 * - btrfs_ioctl_fitrim() - is using RCU. 2527 * 2528 * For-read threads as below are using device_list_mutex: 2529 * - Readonly scrub btrfs_scrub_dev() 2530 * - Readonly scrub btrfs_scrub_progress() 2531 * - btrfs_get_dev_stats() 2532 */ 2533 lockdep_assert_held(&fs_devices->device_list_mutex); 2534 2535 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2536 synchronize_rcu); 2537 list_for_each_entry(device, &seed_devices->devices, dev_list) 2538 device->fs_devices = seed_devices; 2539 2540 fs_devices->seeding = false; 2541 fs_devices->num_devices = 0; 2542 fs_devices->open_devices = 0; 2543 fs_devices->missing_devices = 0; 2544 fs_devices->rotating = false; 2545 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2546 2547 generate_random_uuid(fs_devices->fsid); 2548 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2549 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2550 2551 super_flags = btrfs_super_flags(disk_super) & 2552 ~BTRFS_SUPER_FLAG_SEEDING; 2553 btrfs_set_super_flags(disk_super, super_flags); 2554 } 2555 2556 /* 2557 * Store the expected generation for seed devices in device items. 2558 */ 2559 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2560 { 2561 BTRFS_DEV_LOOKUP_ARGS(args); 2562 struct btrfs_fs_info *fs_info = trans->fs_info; 2563 struct btrfs_root *root = fs_info->chunk_root; 2564 struct btrfs_path *path; 2565 struct extent_buffer *leaf; 2566 struct btrfs_dev_item *dev_item; 2567 struct btrfs_device *device; 2568 struct btrfs_key key; 2569 u8 fs_uuid[BTRFS_FSID_SIZE]; 2570 u8 dev_uuid[BTRFS_UUID_SIZE]; 2571 int ret; 2572 2573 path = btrfs_alloc_path(); 2574 if (!path) 2575 return -ENOMEM; 2576 2577 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2578 key.offset = 0; 2579 key.type = BTRFS_DEV_ITEM_KEY; 2580 2581 while (1) { 2582 btrfs_reserve_chunk_metadata(trans, false); 2583 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2584 btrfs_trans_release_chunk_metadata(trans); 2585 if (ret < 0) 2586 goto error; 2587 2588 leaf = path->nodes[0]; 2589 next_slot: 2590 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2591 ret = btrfs_next_leaf(root, path); 2592 if (ret > 0) 2593 break; 2594 if (ret < 0) 2595 goto error; 2596 leaf = path->nodes[0]; 2597 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2598 btrfs_release_path(path); 2599 continue; 2600 } 2601 2602 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2603 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2604 key.type != BTRFS_DEV_ITEM_KEY) 2605 break; 2606 2607 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2608 struct btrfs_dev_item); 2609 args.devid = btrfs_device_id(leaf, dev_item); 2610 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2611 BTRFS_UUID_SIZE); 2612 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2613 BTRFS_FSID_SIZE); 2614 args.uuid = dev_uuid; 2615 args.fsid = fs_uuid; 2616 device = btrfs_find_device(fs_info->fs_devices, &args); 2617 BUG_ON(!device); /* Logic error */ 2618 2619 if (device->fs_devices->seeding) { 2620 btrfs_set_device_generation(leaf, dev_item, 2621 device->generation); 2622 btrfs_mark_buffer_dirty(trans, leaf); 2623 } 2624 2625 path->slots[0]++; 2626 goto next_slot; 2627 } 2628 ret = 0; 2629 error: 2630 btrfs_free_path(path); 2631 return ret; 2632 } 2633 2634 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2635 { 2636 struct btrfs_root *root = fs_info->dev_root; 2637 struct btrfs_trans_handle *trans; 2638 struct btrfs_device *device; 2639 struct file *bdev_file; 2640 struct super_block *sb = fs_info->sb; 2641 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2642 struct btrfs_fs_devices *seed_devices = NULL; 2643 u64 orig_super_total_bytes; 2644 u64 orig_super_num_devices; 2645 int ret = 0; 2646 bool seeding_dev = false; 2647 bool locked = false; 2648 2649 if (sb_rdonly(sb) && !fs_devices->seeding) 2650 return -EROFS; 2651 2652 bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE, 2653 fs_info->bdev_holder, NULL); 2654 if (IS_ERR(bdev_file)) 2655 return PTR_ERR(bdev_file); 2656 2657 if (!btrfs_check_device_zone_type(fs_info, file_bdev(bdev_file))) { 2658 ret = -EINVAL; 2659 goto error; 2660 } 2661 2662 if (fs_devices->seeding) { 2663 seeding_dev = true; 2664 down_write(&sb->s_umount); 2665 mutex_lock(&uuid_mutex); 2666 locked = true; 2667 } 2668 2669 sync_blockdev(file_bdev(bdev_file)); 2670 2671 rcu_read_lock(); 2672 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2673 if (device->bdev == file_bdev(bdev_file)) { 2674 ret = -EEXIST; 2675 rcu_read_unlock(); 2676 goto error; 2677 } 2678 } 2679 rcu_read_unlock(); 2680 2681 device = btrfs_alloc_device(fs_info, NULL, NULL, device_path); 2682 if (IS_ERR(device)) { 2683 /* we can safely leave the fs_devices entry around */ 2684 ret = PTR_ERR(device); 2685 goto error; 2686 } 2687 2688 device->fs_info = fs_info; 2689 device->bdev_file = bdev_file; 2690 device->bdev = file_bdev(bdev_file); 2691 ret = lookup_bdev(device_path, &device->devt); 2692 if (ret) 2693 goto error_free_device; 2694 2695 ret = btrfs_get_dev_zone_info(device, false); 2696 if (ret) 2697 goto error_free_device; 2698 2699 trans = btrfs_start_transaction(root, 0); 2700 if (IS_ERR(trans)) { 2701 ret = PTR_ERR(trans); 2702 goto error_free_zone; 2703 } 2704 2705 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2706 device->generation = trans->transid; 2707 device->io_width = fs_info->sectorsize; 2708 device->io_align = fs_info->sectorsize; 2709 device->sector_size = fs_info->sectorsize; 2710 device->total_bytes = 2711 round_down(bdev_nr_bytes(device->bdev), fs_info->sectorsize); 2712 device->disk_total_bytes = device->total_bytes; 2713 device->commit_total_bytes = device->total_bytes; 2714 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2715 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2716 device->dev_stats_valid = 1; 2717 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2718 2719 if (seeding_dev) { 2720 btrfs_clear_sb_rdonly(sb); 2721 2722 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2723 seed_devices = btrfs_init_sprout(fs_info); 2724 if (IS_ERR(seed_devices)) { 2725 ret = PTR_ERR(seed_devices); 2726 btrfs_abort_transaction(trans, ret); 2727 goto error_trans; 2728 } 2729 } 2730 2731 mutex_lock(&fs_devices->device_list_mutex); 2732 if (seeding_dev) { 2733 btrfs_setup_sprout(fs_info, seed_devices); 2734 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2735 device); 2736 } 2737 2738 device->fs_devices = fs_devices; 2739 2740 mutex_lock(&fs_info->chunk_mutex); 2741 list_add_rcu(&device->dev_list, &fs_devices->devices); 2742 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2743 fs_devices->num_devices++; 2744 fs_devices->open_devices++; 2745 fs_devices->rw_devices++; 2746 fs_devices->total_devices++; 2747 fs_devices->total_rw_bytes += device->total_bytes; 2748 2749 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2750 2751 if (!bdev_nonrot(device->bdev)) 2752 fs_devices->rotating = true; 2753 2754 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2755 btrfs_set_super_total_bytes(fs_info->super_copy, 2756 round_down(orig_super_total_bytes + device->total_bytes, 2757 fs_info->sectorsize)); 2758 2759 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2760 btrfs_set_super_num_devices(fs_info->super_copy, 2761 orig_super_num_devices + 1); 2762 2763 /* 2764 * we've got more storage, clear any full flags on the space 2765 * infos 2766 */ 2767 btrfs_clear_space_info_full(fs_info); 2768 2769 mutex_unlock(&fs_info->chunk_mutex); 2770 2771 /* Add sysfs device entry */ 2772 btrfs_sysfs_add_device(device); 2773 2774 mutex_unlock(&fs_devices->device_list_mutex); 2775 2776 if (seeding_dev) { 2777 mutex_lock(&fs_info->chunk_mutex); 2778 ret = init_first_rw_device(trans); 2779 mutex_unlock(&fs_info->chunk_mutex); 2780 if (ret) { 2781 btrfs_abort_transaction(trans, ret); 2782 goto error_sysfs; 2783 } 2784 } 2785 2786 ret = btrfs_add_dev_item(trans, device); 2787 if (ret) { 2788 btrfs_abort_transaction(trans, ret); 2789 goto error_sysfs; 2790 } 2791 2792 if (seeding_dev) { 2793 ret = btrfs_finish_sprout(trans); 2794 if (ret) { 2795 btrfs_abort_transaction(trans, ret); 2796 goto error_sysfs; 2797 } 2798 2799 /* 2800 * fs_devices now represents the newly sprouted filesystem and 2801 * its fsid has been changed by btrfs_sprout_splice(). 2802 */ 2803 btrfs_sysfs_update_sprout_fsid(fs_devices); 2804 } 2805 2806 ret = btrfs_commit_transaction(trans); 2807 2808 if (seeding_dev) { 2809 mutex_unlock(&uuid_mutex); 2810 up_write(&sb->s_umount); 2811 locked = false; 2812 2813 if (ret) /* transaction commit */ 2814 return ret; 2815 2816 ret = btrfs_relocate_sys_chunks(fs_info); 2817 if (ret < 0) 2818 btrfs_handle_fs_error(fs_info, ret, 2819 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2820 trans = btrfs_attach_transaction(root); 2821 if (IS_ERR(trans)) { 2822 if (PTR_ERR(trans) == -ENOENT) 2823 return 0; 2824 ret = PTR_ERR(trans); 2825 trans = NULL; 2826 goto error_sysfs; 2827 } 2828 ret = btrfs_commit_transaction(trans); 2829 } 2830 2831 /* 2832 * Now that we have written a new super block to this device, check all 2833 * other fs_devices list if device_path alienates any other scanned 2834 * device. 2835 * We can ignore the return value as it typically returns -EINVAL and 2836 * only succeeds if the device was an alien. 2837 */ 2838 btrfs_forget_devices(device->devt); 2839 2840 /* Update ctime/mtime for blkid or udev */ 2841 update_dev_time(device_path); 2842 2843 return ret; 2844 2845 error_sysfs: 2846 btrfs_sysfs_remove_device(device); 2847 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2848 mutex_lock(&fs_info->chunk_mutex); 2849 list_del_rcu(&device->dev_list); 2850 list_del(&device->dev_alloc_list); 2851 fs_info->fs_devices->num_devices--; 2852 fs_info->fs_devices->open_devices--; 2853 fs_info->fs_devices->rw_devices--; 2854 fs_info->fs_devices->total_devices--; 2855 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2856 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2857 btrfs_set_super_total_bytes(fs_info->super_copy, 2858 orig_super_total_bytes); 2859 btrfs_set_super_num_devices(fs_info->super_copy, 2860 orig_super_num_devices); 2861 mutex_unlock(&fs_info->chunk_mutex); 2862 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2863 error_trans: 2864 if (seeding_dev) 2865 btrfs_set_sb_rdonly(sb); 2866 if (trans) 2867 btrfs_end_transaction(trans); 2868 error_free_zone: 2869 btrfs_destroy_dev_zone_info(device); 2870 error_free_device: 2871 btrfs_free_device(device); 2872 error: 2873 fput(bdev_file); 2874 if (locked) { 2875 mutex_unlock(&uuid_mutex); 2876 up_write(&sb->s_umount); 2877 } 2878 return ret; 2879 } 2880 2881 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2882 struct btrfs_device *device) 2883 { 2884 int ret; 2885 struct btrfs_path *path; 2886 struct btrfs_root *root = device->fs_info->chunk_root; 2887 struct btrfs_dev_item *dev_item; 2888 struct extent_buffer *leaf; 2889 struct btrfs_key key; 2890 2891 path = btrfs_alloc_path(); 2892 if (!path) 2893 return -ENOMEM; 2894 2895 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2896 key.type = BTRFS_DEV_ITEM_KEY; 2897 key.offset = device->devid; 2898 2899 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2900 if (ret < 0) 2901 goto out; 2902 2903 if (ret > 0) { 2904 ret = -ENOENT; 2905 goto out; 2906 } 2907 2908 leaf = path->nodes[0]; 2909 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2910 2911 btrfs_set_device_id(leaf, dev_item, device->devid); 2912 btrfs_set_device_type(leaf, dev_item, device->type); 2913 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2914 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2915 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2916 btrfs_set_device_total_bytes(leaf, dev_item, 2917 btrfs_device_get_disk_total_bytes(device)); 2918 btrfs_set_device_bytes_used(leaf, dev_item, 2919 btrfs_device_get_bytes_used(device)); 2920 btrfs_mark_buffer_dirty(trans, leaf); 2921 2922 out: 2923 btrfs_free_path(path); 2924 return ret; 2925 } 2926 2927 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2928 struct btrfs_device *device, u64 new_size) 2929 { 2930 struct btrfs_fs_info *fs_info = device->fs_info; 2931 struct btrfs_super_block *super_copy = fs_info->super_copy; 2932 u64 old_total; 2933 u64 diff; 2934 int ret; 2935 2936 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2937 return -EACCES; 2938 2939 new_size = round_down(new_size, fs_info->sectorsize); 2940 2941 mutex_lock(&fs_info->chunk_mutex); 2942 old_total = btrfs_super_total_bytes(super_copy); 2943 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2944 2945 if (new_size <= device->total_bytes || 2946 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2947 mutex_unlock(&fs_info->chunk_mutex); 2948 return -EINVAL; 2949 } 2950 2951 btrfs_set_super_total_bytes(super_copy, 2952 round_down(old_total + diff, fs_info->sectorsize)); 2953 device->fs_devices->total_rw_bytes += diff; 2954 atomic64_add(diff, &fs_info->free_chunk_space); 2955 2956 btrfs_device_set_total_bytes(device, new_size); 2957 btrfs_device_set_disk_total_bytes(device, new_size); 2958 btrfs_clear_space_info_full(device->fs_info); 2959 if (list_empty(&device->post_commit_list)) 2960 list_add_tail(&device->post_commit_list, 2961 &trans->transaction->dev_update_list); 2962 mutex_unlock(&fs_info->chunk_mutex); 2963 2964 btrfs_reserve_chunk_metadata(trans, false); 2965 ret = btrfs_update_device(trans, device); 2966 btrfs_trans_release_chunk_metadata(trans); 2967 2968 return ret; 2969 } 2970 2971 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2972 { 2973 struct btrfs_fs_info *fs_info = trans->fs_info; 2974 struct btrfs_root *root = fs_info->chunk_root; 2975 int ret; 2976 struct btrfs_path *path; 2977 struct btrfs_key key; 2978 2979 path = btrfs_alloc_path(); 2980 if (!path) 2981 return -ENOMEM; 2982 2983 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2984 key.offset = chunk_offset; 2985 key.type = BTRFS_CHUNK_ITEM_KEY; 2986 2987 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2988 if (ret < 0) 2989 goto out; 2990 else if (ret > 0) { /* Logic error or corruption */ 2991 btrfs_handle_fs_error(fs_info, -ENOENT, 2992 "Failed lookup while freeing chunk."); 2993 ret = -ENOENT; 2994 goto out; 2995 } 2996 2997 ret = btrfs_del_item(trans, root, path); 2998 if (ret < 0) 2999 btrfs_handle_fs_error(fs_info, ret, 3000 "Failed to delete chunk item."); 3001 out: 3002 btrfs_free_path(path); 3003 return ret; 3004 } 3005 3006 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3007 { 3008 struct btrfs_super_block *super_copy = fs_info->super_copy; 3009 struct btrfs_disk_key *disk_key; 3010 struct btrfs_chunk *chunk; 3011 u8 *ptr; 3012 int ret = 0; 3013 u32 num_stripes; 3014 u32 array_size; 3015 u32 len = 0; 3016 u32 cur; 3017 struct btrfs_key key; 3018 3019 lockdep_assert_held(&fs_info->chunk_mutex); 3020 array_size = btrfs_super_sys_array_size(super_copy); 3021 3022 ptr = super_copy->sys_chunk_array; 3023 cur = 0; 3024 3025 while (cur < array_size) { 3026 disk_key = (struct btrfs_disk_key *)ptr; 3027 btrfs_disk_key_to_cpu(&key, disk_key); 3028 3029 len = sizeof(*disk_key); 3030 3031 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3032 chunk = (struct btrfs_chunk *)(ptr + len); 3033 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 3034 len += btrfs_chunk_item_size(num_stripes); 3035 } else { 3036 ret = -EIO; 3037 break; 3038 } 3039 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3040 key.offset == chunk_offset) { 3041 memmove(ptr, ptr + len, array_size - (cur + len)); 3042 array_size -= len; 3043 btrfs_set_super_sys_array_size(super_copy, array_size); 3044 } else { 3045 ptr += len; 3046 cur += len; 3047 } 3048 } 3049 return ret; 3050 } 3051 3052 struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info, 3053 u64 logical, u64 length) 3054 { 3055 struct rb_node *node = fs_info->mapping_tree.rb_root.rb_node; 3056 struct rb_node *prev = NULL; 3057 struct rb_node *orig_prev; 3058 struct btrfs_chunk_map *map; 3059 struct btrfs_chunk_map *prev_map = NULL; 3060 3061 while (node) { 3062 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 3063 prev = node; 3064 prev_map = map; 3065 3066 if (logical < map->start) { 3067 node = node->rb_left; 3068 } else if (logical >= map->start + map->chunk_len) { 3069 node = node->rb_right; 3070 } else { 3071 refcount_inc(&map->refs); 3072 return map; 3073 } 3074 } 3075 3076 if (!prev) 3077 return NULL; 3078 3079 orig_prev = prev; 3080 while (prev && logical >= prev_map->start + prev_map->chunk_len) { 3081 prev = rb_next(prev); 3082 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3083 } 3084 3085 if (!prev) { 3086 prev = orig_prev; 3087 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3088 while (prev && logical < prev_map->start) { 3089 prev = rb_prev(prev); 3090 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3091 } 3092 } 3093 3094 if (prev) { 3095 u64 end = logical + length; 3096 3097 /* 3098 * Caller can pass a U64_MAX length when it wants to get any 3099 * chunk starting at an offset of 'logical' or higher, so deal 3100 * with underflow by resetting the end offset to U64_MAX. 3101 */ 3102 if (end < logical) 3103 end = U64_MAX; 3104 3105 if (end > prev_map->start && 3106 logical < prev_map->start + prev_map->chunk_len) { 3107 refcount_inc(&prev_map->refs); 3108 return prev_map; 3109 } 3110 } 3111 3112 return NULL; 3113 } 3114 3115 struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info, 3116 u64 logical, u64 length) 3117 { 3118 struct btrfs_chunk_map *map; 3119 3120 read_lock(&fs_info->mapping_tree_lock); 3121 map = btrfs_find_chunk_map_nolock(fs_info, logical, length); 3122 read_unlock(&fs_info->mapping_tree_lock); 3123 3124 return map; 3125 } 3126 3127 /* 3128 * Find the mapping containing the given logical extent. 3129 * 3130 * @logical: Logical block offset in bytes. 3131 * @length: Length of extent in bytes. 3132 * 3133 * Return: Chunk mapping or ERR_PTR. 3134 */ 3135 struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3136 u64 logical, u64 length) 3137 { 3138 struct btrfs_chunk_map *map; 3139 3140 map = btrfs_find_chunk_map(fs_info, logical, length); 3141 3142 if (unlikely(!map)) { 3143 btrfs_crit(fs_info, 3144 "unable to find chunk map for logical %llu length %llu", 3145 logical, length); 3146 return ERR_PTR(-EINVAL); 3147 } 3148 3149 if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) { 3150 btrfs_crit(fs_info, 3151 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu", 3152 logical, logical + length, map->start, 3153 map->start + map->chunk_len); 3154 btrfs_free_chunk_map(map); 3155 return ERR_PTR(-EINVAL); 3156 } 3157 3158 /* Callers are responsible for dropping the reference. */ 3159 return map; 3160 } 3161 3162 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3163 struct btrfs_chunk_map *map, u64 chunk_offset) 3164 { 3165 int i; 3166 3167 /* 3168 * Removing chunk items and updating the device items in the chunks btree 3169 * requires holding the chunk_mutex. 3170 * See the comment at btrfs_chunk_alloc() for the details. 3171 */ 3172 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3173 3174 for (i = 0; i < map->num_stripes; i++) { 3175 int ret; 3176 3177 ret = btrfs_update_device(trans, map->stripes[i].dev); 3178 if (ret) 3179 return ret; 3180 } 3181 3182 return btrfs_free_chunk(trans, chunk_offset); 3183 } 3184 3185 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3186 { 3187 struct btrfs_fs_info *fs_info = trans->fs_info; 3188 struct btrfs_chunk_map *map; 3189 u64 dev_extent_len = 0; 3190 int i, ret = 0; 3191 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3192 3193 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3194 if (IS_ERR(map)) { 3195 /* 3196 * This is a logic error, but we don't want to just rely on the 3197 * user having built with ASSERT enabled, so if ASSERT doesn't 3198 * do anything we still error out. 3199 */ 3200 ASSERT(0); 3201 return PTR_ERR(map); 3202 } 3203 3204 /* 3205 * First delete the device extent items from the devices btree. 3206 * We take the device_list_mutex to avoid racing with the finishing phase 3207 * of a device replace operation. See the comment below before acquiring 3208 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3209 * because that can result in a deadlock when deleting the device extent 3210 * items from the devices btree - COWing an extent buffer from the btree 3211 * may result in allocating a new metadata chunk, which would attempt to 3212 * lock again fs_info->chunk_mutex. 3213 */ 3214 mutex_lock(&fs_devices->device_list_mutex); 3215 for (i = 0; i < map->num_stripes; i++) { 3216 struct btrfs_device *device = map->stripes[i].dev; 3217 ret = btrfs_free_dev_extent(trans, device, 3218 map->stripes[i].physical, 3219 &dev_extent_len); 3220 if (ret) { 3221 mutex_unlock(&fs_devices->device_list_mutex); 3222 btrfs_abort_transaction(trans, ret); 3223 goto out; 3224 } 3225 3226 if (device->bytes_used > 0) { 3227 mutex_lock(&fs_info->chunk_mutex); 3228 btrfs_device_set_bytes_used(device, 3229 device->bytes_used - dev_extent_len); 3230 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3231 btrfs_clear_space_info_full(fs_info); 3232 mutex_unlock(&fs_info->chunk_mutex); 3233 } 3234 } 3235 mutex_unlock(&fs_devices->device_list_mutex); 3236 3237 /* 3238 * We acquire fs_info->chunk_mutex for 2 reasons: 3239 * 3240 * 1) Just like with the first phase of the chunk allocation, we must 3241 * reserve system space, do all chunk btree updates and deletions, and 3242 * update the system chunk array in the superblock while holding this 3243 * mutex. This is for similar reasons as explained on the comment at 3244 * the top of btrfs_chunk_alloc(); 3245 * 3246 * 2) Prevent races with the final phase of a device replace operation 3247 * that replaces the device object associated with the map's stripes, 3248 * because the device object's id can change at any time during that 3249 * final phase of the device replace operation 3250 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3251 * replaced device and then see it with an ID of 3252 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3253 * the device item, which does not exists on the chunk btree. 3254 * The finishing phase of device replace acquires both the 3255 * device_list_mutex and the chunk_mutex, in that order, so we are 3256 * safe by just acquiring the chunk_mutex. 3257 */ 3258 trans->removing_chunk = true; 3259 mutex_lock(&fs_info->chunk_mutex); 3260 3261 check_system_chunk(trans, map->type); 3262 3263 ret = remove_chunk_item(trans, map, chunk_offset); 3264 /* 3265 * Normally we should not get -ENOSPC since we reserved space before 3266 * through the call to check_system_chunk(). 3267 * 3268 * Despite our system space_info having enough free space, we may not 3269 * be able to allocate extents from its block groups, because all have 3270 * an incompatible profile, which will force us to allocate a new system 3271 * block group with the right profile, or right after we called 3272 * check_system_space() above, a scrub turned the only system block group 3273 * with enough free space into RO mode. 3274 * This is explained with more detail at do_chunk_alloc(). 3275 * 3276 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3277 */ 3278 if (ret == -ENOSPC) { 3279 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3280 struct btrfs_block_group *sys_bg; 3281 3282 sys_bg = btrfs_create_chunk(trans, sys_flags); 3283 if (IS_ERR(sys_bg)) { 3284 ret = PTR_ERR(sys_bg); 3285 btrfs_abort_transaction(trans, ret); 3286 goto out; 3287 } 3288 3289 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3290 if (ret) { 3291 btrfs_abort_transaction(trans, ret); 3292 goto out; 3293 } 3294 3295 ret = remove_chunk_item(trans, map, chunk_offset); 3296 if (ret) { 3297 btrfs_abort_transaction(trans, ret); 3298 goto out; 3299 } 3300 } else if (ret) { 3301 btrfs_abort_transaction(trans, ret); 3302 goto out; 3303 } 3304 3305 trace_btrfs_chunk_free(fs_info, map, chunk_offset, map->chunk_len); 3306 3307 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3308 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3309 if (ret) { 3310 btrfs_abort_transaction(trans, ret); 3311 goto out; 3312 } 3313 } 3314 3315 mutex_unlock(&fs_info->chunk_mutex); 3316 trans->removing_chunk = false; 3317 3318 /* 3319 * We are done with chunk btree updates and deletions, so release the 3320 * system space we previously reserved (with check_system_chunk()). 3321 */ 3322 btrfs_trans_release_chunk_metadata(trans); 3323 3324 ret = btrfs_remove_block_group(trans, map); 3325 if (ret) { 3326 btrfs_abort_transaction(trans, ret); 3327 goto out; 3328 } 3329 3330 out: 3331 if (trans->removing_chunk) { 3332 mutex_unlock(&fs_info->chunk_mutex); 3333 trans->removing_chunk = false; 3334 } 3335 /* once for us */ 3336 btrfs_free_chunk_map(map); 3337 return ret; 3338 } 3339 3340 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3341 { 3342 struct btrfs_root *root = fs_info->chunk_root; 3343 struct btrfs_trans_handle *trans; 3344 struct btrfs_block_group *block_group; 3345 u64 length; 3346 int ret; 3347 3348 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3349 btrfs_err(fs_info, 3350 "relocate: not supported on extent tree v2 yet"); 3351 return -EINVAL; 3352 } 3353 3354 /* 3355 * Prevent races with automatic removal of unused block groups. 3356 * After we relocate and before we remove the chunk with offset 3357 * chunk_offset, automatic removal of the block group can kick in, 3358 * resulting in a failure when calling btrfs_remove_chunk() below. 3359 * 3360 * Make sure to acquire this mutex before doing a tree search (dev 3361 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3362 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3363 * we release the path used to search the chunk/dev tree and before 3364 * the current task acquires this mutex and calls us. 3365 */ 3366 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3367 3368 /* step one, relocate all the extents inside this chunk */ 3369 btrfs_scrub_pause(fs_info); 3370 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3371 btrfs_scrub_continue(fs_info); 3372 if (ret) { 3373 /* 3374 * If we had a transaction abort, stop all running scrubs. 3375 * See transaction.c:cleanup_transaction() why we do it here. 3376 */ 3377 if (BTRFS_FS_ERROR(fs_info)) 3378 btrfs_scrub_cancel(fs_info); 3379 return ret; 3380 } 3381 3382 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3383 if (!block_group) 3384 return -ENOENT; 3385 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3386 length = block_group->length; 3387 btrfs_put_block_group(block_group); 3388 3389 /* 3390 * On a zoned file system, discard the whole block group, this will 3391 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3392 * resetting the zone fails, don't treat it as a fatal problem from the 3393 * filesystem's point of view. 3394 */ 3395 if (btrfs_is_zoned(fs_info)) { 3396 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3397 if (ret) 3398 btrfs_info(fs_info, 3399 "failed to reset zone %llu after relocation", 3400 chunk_offset); 3401 } 3402 3403 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3404 chunk_offset); 3405 if (IS_ERR(trans)) { 3406 ret = PTR_ERR(trans); 3407 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3408 return ret; 3409 } 3410 3411 /* 3412 * step two, delete the device extents and the 3413 * chunk tree entries 3414 */ 3415 ret = btrfs_remove_chunk(trans, chunk_offset); 3416 btrfs_end_transaction(trans); 3417 return ret; 3418 } 3419 3420 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3421 { 3422 struct btrfs_root *chunk_root = fs_info->chunk_root; 3423 struct btrfs_path *path; 3424 struct extent_buffer *leaf; 3425 struct btrfs_chunk *chunk; 3426 struct btrfs_key key; 3427 struct btrfs_key found_key; 3428 u64 chunk_type; 3429 bool retried = false; 3430 int failed = 0; 3431 int ret; 3432 3433 path = btrfs_alloc_path(); 3434 if (!path) 3435 return -ENOMEM; 3436 3437 again: 3438 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3439 key.offset = (u64)-1; 3440 key.type = BTRFS_CHUNK_ITEM_KEY; 3441 3442 while (1) { 3443 mutex_lock(&fs_info->reclaim_bgs_lock); 3444 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3445 if (ret < 0) { 3446 mutex_unlock(&fs_info->reclaim_bgs_lock); 3447 goto error; 3448 } 3449 if (ret == 0) { 3450 /* 3451 * On the first search we would find chunk tree with 3452 * offset -1, which is not possible. On subsequent 3453 * loops this would find an existing item on an invalid 3454 * offset (one less than the previous one, wrong 3455 * alignment and size). 3456 */ 3457 ret = -EUCLEAN; 3458 mutex_unlock(&fs_info->reclaim_bgs_lock); 3459 goto error; 3460 } 3461 3462 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3463 key.type); 3464 if (ret) 3465 mutex_unlock(&fs_info->reclaim_bgs_lock); 3466 if (ret < 0) 3467 goto error; 3468 if (ret > 0) 3469 break; 3470 3471 leaf = path->nodes[0]; 3472 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3473 3474 chunk = btrfs_item_ptr(leaf, path->slots[0], 3475 struct btrfs_chunk); 3476 chunk_type = btrfs_chunk_type(leaf, chunk); 3477 btrfs_release_path(path); 3478 3479 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3480 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3481 if (ret == -ENOSPC) 3482 failed++; 3483 else 3484 BUG_ON(ret); 3485 } 3486 mutex_unlock(&fs_info->reclaim_bgs_lock); 3487 3488 if (found_key.offset == 0) 3489 break; 3490 key.offset = found_key.offset - 1; 3491 } 3492 ret = 0; 3493 if (failed && !retried) { 3494 failed = 0; 3495 retried = true; 3496 goto again; 3497 } else if (WARN_ON(failed && retried)) { 3498 ret = -ENOSPC; 3499 } 3500 error: 3501 btrfs_free_path(path); 3502 return ret; 3503 } 3504 3505 /* 3506 * return 1 : allocate a data chunk successfully, 3507 * return <0: errors during allocating a data chunk, 3508 * return 0 : no need to allocate a data chunk. 3509 */ 3510 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3511 u64 chunk_offset) 3512 { 3513 struct btrfs_block_group *cache; 3514 u64 bytes_used; 3515 u64 chunk_type; 3516 3517 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3518 ASSERT(cache); 3519 chunk_type = cache->flags; 3520 btrfs_put_block_group(cache); 3521 3522 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3523 return 0; 3524 3525 spin_lock(&fs_info->data_sinfo->lock); 3526 bytes_used = fs_info->data_sinfo->bytes_used; 3527 spin_unlock(&fs_info->data_sinfo->lock); 3528 3529 if (!bytes_used) { 3530 struct btrfs_trans_handle *trans; 3531 int ret; 3532 3533 trans = btrfs_join_transaction(fs_info->tree_root); 3534 if (IS_ERR(trans)) 3535 return PTR_ERR(trans); 3536 3537 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3538 btrfs_end_transaction(trans); 3539 if (ret < 0) 3540 return ret; 3541 return 1; 3542 } 3543 3544 return 0; 3545 } 3546 3547 static void btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, 3548 const struct btrfs_disk_balance_args *disk) 3549 { 3550 memset(cpu, 0, sizeof(*cpu)); 3551 3552 cpu->profiles = le64_to_cpu(disk->profiles); 3553 cpu->usage = le64_to_cpu(disk->usage); 3554 cpu->devid = le64_to_cpu(disk->devid); 3555 cpu->pstart = le64_to_cpu(disk->pstart); 3556 cpu->pend = le64_to_cpu(disk->pend); 3557 cpu->vstart = le64_to_cpu(disk->vstart); 3558 cpu->vend = le64_to_cpu(disk->vend); 3559 cpu->target = le64_to_cpu(disk->target); 3560 cpu->flags = le64_to_cpu(disk->flags); 3561 cpu->limit = le64_to_cpu(disk->limit); 3562 cpu->stripes_min = le32_to_cpu(disk->stripes_min); 3563 cpu->stripes_max = le32_to_cpu(disk->stripes_max); 3564 } 3565 3566 static void btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, 3567 const struct btrfs_balance_args *cpu) 3568 { 3569 memset(disk, 0, sizeof(*disk)); 3570 3571 disk->profiles = cpu_to_le64(cpu->profiles); 3572 disk->usage = cpu_to_le64(cpu->usage); 3573 disk->devid = cpu_to_le64(cpu->devid); 3574 disk->pstart = cpu_to_le64(cpu->pstart); 3575 disk->pend = cpu_to_le64(cpu->pend); 3576 disk->vstart = cpu_to_le64(cpu->vstart); 3577 disk->vend = cpu_to_le64(cpu->vend); 3578 disk->target = cpu_to_le64(cpu->target); 3579 disk->flags = cpu_to_le64(cpu->flags); 3580 disk->limit = cpu_to_le64(cpu->limit); 3581 disk->stripes_min = cpu_to_le32(cpu->stripes_min); 3582 disk->stripes_max = cpu_to_le32(cpu->stripes_max); 3583 } 3584 3585 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3586 struct btrfs_balance_control *bctl) 3587 { 3588 struct btrfs_root *root = fs_info->tree_root; 3589 struct btrfs_trans_handle *trans; 3590 struct btrfs_balance_item *item; 3591 struct btrfs_disk_balance_args disk_bargs; 3592 struct btrfs_path *path; 3593 struct extent_buffer *leaf; 3594 struct btrfs_key key; 3595 int ret, err; 3596 3597 path = btrfs_alloc_path(); 3598 if (!path) 3599 return -ENOMEM; 3600 3601 trans = btrfs_start_transaction(root, 0); 3602 if (IS_ERR(trans)) { 3603 btrfs_free_path(path); 3604 return PTR_ERR(trans); 3605 } 3606 3607 key.objectid = BTRFS_BALANCE_OBJECTID; 3608 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3609 key.offset = 0; 3610 3611 ret = btrfs_insert_empty_item(trans, root, path, &key, 3612 sizeof(*item)); 3613 if (ret) 3614 goto out; 3615 3616 leaf = path->nodes[0]; 3617 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3618 3619 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3620 3621 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3622 btrfs_set_balance_data(leaf, item, &disk_bargs); 3623 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3624 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3625 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3626 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3627 3628 btrfs_set_balance_flags(leaf, item, bctl->flags); 3629 3630 btrfs_mark_buffer_dirty(trans, leaf); 3631 out: 3632 btrfs_free_path(path); 3633 err = btrfs_commit_transaction(trans); 3634 if (err && !ret) 3635 ret = err; 3636 return ret; 3637 } 3638 3639 static int del_balance_item(struct btrfs_fs_info *fs_info) 3640 { 3641 struct btrfs_root *root = fs_info->tree_root; 3642 struct btrfs_trans_handle *trans; 3643 struct btrfs_path *path; 3644 struct btrfs_key key; 3645 int ret, err; 3646 3647 path = btrfs_alloc_path(); 3648 if (!path) 3649 return -ENOMEM; 3650 3651 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3652 if (IS_ERR(trans)) { 3653 btrfs_free_path(path); 3654 return PTR_ERR(trans); 3655 } 3656 3657 key.objectid = BTRFS_BALANCE_OBJECTID; 3658 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3659 key.offset = 0; 3660 3661 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3662 if (ret < 0) 3663 goto out; 3664 if (ret > 0) { 3665 ret = -ENOENT; 3666 goto out; 3667 } 3668 3669 ret = btrfs_del_item(trans, root, path); 3670 out: 3671 btrfs_free_path(path); 3672 err = btrfs_commit_transaction(trans); 3673 if (err && !ret) 3674 ret = err; 3675 return ret; 3676 } 3677 3678 /* 3679 * This is a heuristic used to reduce the number of chunks balanced on 3680 * resume after balance was interrupted. 3681 */ 3682 static void update_balance_args(struct btrfs_balance_control *bctl) 3683 { 3684 /* 3685 * Turn on soft mode for chunk types that were being converted. 3686 */ 3687 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3688 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3689 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3690 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3691 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3692 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3693 3694 /* 3695 * Turn on usage filter if is not already used. The idea is 3696 * that chunks that we have already balanced should be 3697 * reasonably full. Don't do it for chunks that are being 3698 * converted - that will keep us from relocating unconverted 3699 * (albeit full) chunks. 3700 */ 3701 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3702 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3703 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3704 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3705 bctl->data.usage = 90; 3706 } 3707 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3708 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3709 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3710 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3711 bctl->sys.usage = 90; 3712 } 3713 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3714 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3715 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3716 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3717 bctl->meta.usage = 90; 3718 } 3719 } 3720 3721 /* 3722 * Clear the balance status in fs_info and delete the balance item from disk. 3723 */ 3724 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3725 { 3726 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3727 int ret; 3728 3729 ASSERT(fs_info->balance_ctl); 3730 3731 spin_lock(&fs_info->balance_lock); 3732 fs_info->balance_ctl = NULL; 3733 spin_unlock(&fs_info->balance_lock); 3734 3735 kfree(bctl); 3736 ret = del_balance_item(fs_info); 3737 if (ret) 3738 btrfs_handle_fs_error(fs_info, ret, NULL); 3739 } 3740 3741 /* 3742 * Balance filters. Return 1 if chunk should be filtered out 3743 * (should not be balanced). 3744 */ 3745 static int chunk_profiles_filter(u64 chunk_type, 3746 struct btrfs_balance_args *bargs) 3747 { 3748 chunk_type = chunk_to_extended(chunk_type) & 3749 BTRFS_EXTENDED_PROFILE_MASK; 3750 3751 if (bargs->profiles & chunk_type) 3752 return 0; 3753 3754 return 1; 3755 } 3756 3757 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3758 struct btrfs_balance_args *bargs) 3759 { 3760 struct btrfs_block_group *cache; 3761 u64 chunk_used; 3762 u64 user_thresh_min; 3763 u64 user_thresh_max; 3764 int ret = 1; 3765 3766 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3767 chunk_used = cache->used; 3768 3769 if (bargs->usage_min == 0) 3770 user_thresh_min = 0; 3771 else 3772 user_thresh_min = mult_perc(cache->length, bargs->usage_min); 3773 3774 if (bargs->usage_max == 0) 3775 user_thresh_max = 1; 3776 else if (bargs->usage_max > 100) 3777 user_thresh_max = cache->length; 3778 else 3779 user_thresh_max = mult_perc(cache->length, bargs->usage_max); 3780 3781 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3782 ret = 0; 3783 3784 btrfs_put_block_group(cache); 3785 return ret; 3786 } 3787 3788 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3789 u64 chunk_offset, struct btrfs_balance_args *bargs) 3790 { 3791 struct btrfs_block_group *cache; 3792 u64 chunk_used, user_thresh; 3793 int ret = 1; 3794 3795 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3796 chunk_used = cache->used; 3797 3798 if (bargs->usage_min == 0) 3799 user_thresh = 1; 3800 else if (bargs->usage > 100) 3801 user_thresh = cache->length; 3802 else 3803 user_thresh = mult_perc(cache->length, bargs->usage); 3804 3805 if (chunk_used < user_thresh) 3806 ret = 0; 3807 3808 btrfs_put_block_group(cache); 3809 return ret; 3810 } 3811 3812 static int chunk_devid_filter(struct extent_buffer *leaf, 3813 struct btrfs_chunk *chunk, 3814 struct btrfs_balance_args *bargs) 3815 { 3816 struct btrfs_stripe *stripe; 3817 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3818 int i; 3819 3820 for (i = 0; i < num_stripes; i++) { 3821 stripe = btrfs_stripe_nr(chunk, i); 3822 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3823 return 0; 3824 } 3825 3826 return 1; 3827 } 3828 3829 static u64 calc_data_stripes(u64 type, int num_stripes) 3830 { 3831 const int index = btrfs_bg_flags_to_raid_index(type); 3832 const int ncopies = btrfs_raid_array[index].ncopies; 3833 const int nparity = btrfs_raid_array[index].nparity; 3834 3835 return (num_stripes - nparity) / ncopies; 3836 } 3837 3838 /* [pstart, pend) */ 3839 static int chunk_drange_filter(struct extent_buffer *leaf, 3840 struct btrfs_chunk *chunk, 3841 struct btrfs_balance_args *bargs) 3842 { 3843 struct btrfs_stripe *stripe; 3844 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3845 u64 stripe_offset; 3846 u64 stripe_length; 3847 u64 type; 3848 int factor; 3849 int i; 3850 3851 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3852 return 0; 3853 3854 type = btrfs_chunk_type(leaf, chunk); 3855 factor = calc_data_stripes(type, num_stripes); 3856 3857 for (i = 0; i < num_stripes; i++) { 3858 stripe = btrfs_stripe_nr(chunk, i); 3859 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3860 continue; 3861 3862 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3863 stripe_length = btrfs_chunk_length(leaf, chunk); 3864 stripe_length = div_u64(stripe_length, factor); 3865 3866 if (stripe_offset < bargs->pend && 3867 stripe_offset + stripe_length > bargs->pstart) 3868 return 0; 3869 } 3870 3871 return 1; 3872 } 3873 3874 /* [vstart, vend) */ 3875 static int chunk_vrange_filter(struct extent_buffer *leaf, 3876 struct btrfs_chunk *chunk, 3877 u64 chunk_offset, 3878 struct btrfs_balance_args *bargs) 3879 { 3880 if (chunk_offset < bargs->vend && 3881 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3882 /* at least part of the chunk is inside this vrange */ 3883 return 0; 3884 3885 return 1; 3886 } 3887 3888 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3889 struct btrfs_chunk *chunk, 3890 struct btrfs_balance_args *bargs) 3891 { 3892 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3893 3894 if (bargs->stripes_min <= num_stripes 3895 && num_stripes <= bargs->stripes_max) 3896 return 0; 3897 3898 return 1; 3899 } 3900 3901 static int chunk_soft_convert_filter(u64 chunk_type, 3902 struct btrfs_balance_args *bargs) 3903 { 3904 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3905 return 0; 3906 3907 chunk_type = chunk_to_extended(chunk_type) & 3908 BTRFS_EXTENDED_PROFILE_MASK; 3909 3910 if (bargs->target == chunk_type) 3911 return 1; 3912 3913 return 0; 3914 } 3915 3916 static int should_balance_chunk(struct extent_buffer *leaf, 3917 struct btrfs_chunk *chunk, u64 chunk_offset) 3918 { 3919 struct btrfs_fs_info *fs_info = leaf->fs_info; 3920 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3921 struct btrfs_balance_args *bargs = NULL; 3922 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3923 3924 /* type filter */ 3925 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3926 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3927 return 0; 3928 } 3929 3930 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3931 bargs = &bctl->data; 3932 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3933 bargs = &bctl->sys; 3934 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3935 bargs = &bctl->meta; 3936 3937 /* profiles filter */ 3938 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3939 chunk_profiles_filter(chunk_type, bargs)) { 3940 return 0; 3941 } 3942 3943 /* usage filter */ 3944 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3945 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3946 return 0; 3947 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3948 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3949 return 0; 3950 } 3951 3952 /* devid filter */ 3953 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3954 chunk_devid_filter(leaf, chunk, bargs)) { 3955 return 0; 3956 } 3957 3958 /* drange filter, makes sense only with devid filter */ 3959 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3960 chunk_drange_filter(leaf, chunk, bargs)) { 3961 return 0; 3962 } 3963 3964 /* vrange filter */ 3965 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3966 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3967 return 0; 3968 } 3969 3970 /* stripes filter */ 3971 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3972 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3973 return 0; 3974 } 3975 3976 /* soft profile changing mode */ 3977 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3978 chunk_soft_convert_filter(chunk_type, bargs)) { 3979 return 0; 3980 } 3981 3982 /* 3983 * limited by count, must be the last filter 3984 */ 3985 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3986 if (bargs->limit == 0) 3987 return 0; 3988 else 3989 bargs->limit--; 3990 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3991 /* 3992 * Same logic as the 'limit' filter; the minimum cannot be 3993 * determined here because we do not have the global information 3994 * about the count of all chunks that satisfy the filters. 3995 */ 3996 if (bargs->limit_max == 0) 3997 return 0; 3998 else 3999 bargs->limit_max--; 4000 } 4001 4002 return 1; 4003 } 4004 4005 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 4006 { 4007 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4008 struct btrfs_root *chunk_root = fs_info->chunk_root; 4009 u64 chunk_type; 4010 struct btrfs_chunk *chunk; 4011 struct btrfs_path *path = NULL; 4012 struct btrfs_key key; 4013 struct btrfs_key found_key; 4014 struct extent_buffer *leaf; 4015 int slot; 4016 int ret; 4017 int enospc_errors = 0; 4018 bool counting = true; 4019 /* The single value limit and min/max limits use the same bytes in the */ 4020 u64 limit_data = bctl->data.limit; 4021 u64 limit_meta = bctl->meta.limit; 4022 u64 limit_sys = bctl->sys.limit; 4023 u32 count_data = 0; 4024 u32 count_meta = 0; 4025 u32 count_sys = 0; 4026 int chunk_reserved = 0; 4027 4028 path = btrfs_alloc_path(); 4029 if (!path) { 4030 ret = -ENOMEM; 4031 goto error; 4032 } 4033 4034 /* zero out stat counters */ 4035 spin_lock(&fs_info->balance_lock); 4036 memset(&bctl->stat, 0, sizeof(bctl->stat)); 4037 spin_unlock(&fs_info->balance_lock); 4038 again: 4039 if (!counting) { 4040 /* 4041 * The single value limit and min/max limits use the same bytes 4042 * in the 4043 */ 4044 bctl->data.limit = limit_data; 4045 bctl->meta.limit = limit_meta; 4046 bctl->sys.limit = limit_sys; 4047 } 4048 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 4049 key.offset = (u64)-1; 4050 key.type = BTRFS_CHUNK_ITEM_KEY; 4051 4052 while (1) { 4053 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 4054 atomic_read(&fs_info->balance_cancel_req)) { 4055 ret = -ECANCELED; 4056 goto error; 4057 } 4058 4059 mutex_lock(&fs_info->reclaim_bgs_lock); 4060 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 4061 if (ret < 0) { 4062 mutex_unlock(&fs_info->reclaim_bgs_lock); 4063 goto error; 4064 } 4065 4066 /* 4067 * this shouldn't happen, it means the last relocate 4068 * failed 4069 */ 4070 if (ret == 0) 4071 BUG(); /* FIXME break ? */ 4072 4073 ret = btrfs_previous_item(chunk_root, path, 0, 4074 BTRFS_CHUNK_ITEM_KEY); 4075 if (ret) { 4076 mutex_unlock(&fs_info->reclaim_bgs_lock); 4077 ret = 0; 4078 break; 4079 } 4080 4081 leaf = path->nodes[0]; 4082 slot = path->slots[0]; 4083 btrfs_item_key_to_cpu(leaf, &found_key, slot); 4084 4085 if (found_key.objectid != key.objectid) { 4086 mutex_unlock(&fs_info->reclaim_bgs_lock); 4087 break; 4088 } 4089 4090 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 4091 chunk_type = btrfs_chunk_type(leaf, chunk); 4092 4093 if (!counting) { 4094 spin_lock(&fs_info->balance_lock); 4095 bctl->stat.considered++; 4096 spin_unlock(&fs_info->balance_lock); 4097 } 4098 4099 ret = should_balance_chunk(leaf, chunk, found_key.offset); 4100 4101 btrfs_release_path(path); 4102 if (!ret) { 4103 mutex_unlock(&fs_info->reclaim_bgs_lock); 4104 goto loop; 4105 } 4106 4107 if (counting) { 4108 mutex_unlock(&fs_info->reclaim_bgs_lock); 4109 spin_lock(&fs_info->balance_lock); 4110 bctl->stat.expected++; 4111 spin_unlock(&fs_info->balance_lock); 4112 4113 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 4114 count_data++; 4115 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 4116 count_sys++; 4117 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 4118 count_meta++; 4119 4120 goto loop; 4121 } 4122 4123 /* 4124 * Apply limit_min filter, no need to check if the LIMITS 4125 * filter is used, limit_min is 0 by default 4126 */ 4127 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 4128 count_data < bctl->data.limit_min) 4129 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 4130 count_meta < bctl->meta.limit_min) 4131 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 4132 count_sys < bctl->sys.limit_min)) { 4133 mutex_unlock(&fs_info->reclaim_bgs_lock); 4134 goto loop; 4135 } 4136 4137 if (!chunk_reserved) { 4138 /* 4139 * We may be relocating the only data chunk we have, 4140 * which could potentially end up with losing data's 4141 * raid profile, so lets allocate an empty one in 4142 * advance. 4143 */ 4144 ret = btrfs_may_alloc_data_chunk(fs_info, 4145 found_key.offset); 4146 if (ret < 0) { 4147 mutex_unlock(&fs_info->reclaim_bgs_lock); 4148 goto error; 4149 } else if (ret == 1) { 4150 chunk_reserved = 1; 4151 } 4152 } 4153 4154 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 4155 mutex_unlock(&fs_info->reclaim_bgs_lock); 4156 if (ret == -ENOSPC) { 4157 enospc_errors++; 4158 } else if (ret == -ETXTBSY) { 4159 btrfs_info(fs_info, 4160 "skipping relocation of block group %llu due to active swapfile", 4161 found_key.offset); 4162 ret = 0; 4163 } else if (ret) { 4164 goto error; 4165 } else { 4166 spin_lock(&fs_info->balance_lock); 4167 bctl->stat.completed++; 4168 spin_unlock(&fs_info->balance_lock); 4169 } 4170 loop: 4171 if (found_key.offset == 0) 4172 break; 4173 key.offset = found_key.offset - 1; 4174 } 4175 4176 if (counting) { 4177 btrfs_release_path(path); 4178 counting = false; 4179 goto again; 4180 } 4181 error: 4182 btrfs_free_path(path); 4183 if (enospc_errors) { 4184 btrfs_info(fs_info, "%d enospc errors during balance", 4185 enospc_errors); 4186 if (!ret) 4187 ret = -ENOSPC; 4188 } 4189 4190 return ret; 4191 } 4192 4193 /* 4194 * See if a given profile is valid and reduced. 4195 * 4196 * @flags: profile to validate 4197 * @extended: if true @flags is treated as an extended profile 4198 */ 4199 static int alloc_profile_is_valid(u64 flags, int extended) 4200 { 4201 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4202 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4203 4204 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4205 4206 /* 1) check that all other bits are zeroed */ 4207 if (flags & ~mask) 4208 return 0; 4209 4210 /* 2) see if profile is reduced */ 4211 if (flags == 0) 4212 return !extended; /* "0" is valid for usual profiles */ 4213 4214 return has_single_bit_set(flags); 4215 } 4216 4217 /* 4218 * Validate target profile against allowed profiles and return true if it's OK. 4219 * Otherwise print the error message and return false. 4220 */ 4221 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4222 const struct btrfs_balance_args *bargs, 4223 u64 allowed, const char *type) 4224 { 4225 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4226 return true; 4227 4228 /* Profile is valid and does not have bits outside of the allowed set */ 4229 if (alloc_profile_is_valid(bargs->target, 1) && 4230 (bargs->target & ~allowed) == 0) 4231 return true; 4232 4233 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4234 type, btrfs_bg_type_to_raid_name(bargs->target)); 4235 return false; 4236 } 4237 4238 /* 4239 * Fill @buf with textual description of balance filter flags @bargs, up to 4240 * @size_buf including the terminating null. The output may be trimmed if it 4241 * does not fit into the provided buffer. 4242 */ 4243 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4244 u32 size_buf) 4245 { 4246 int ret; 4247 u32 size_bp = size_buf; 4248 char *bp = buf; 4249 u64 flags = bargs->flags; 4250 char tmp_buf[128] = {'\0'}; 4251 4252 if (!flags) 4253 return; 4254 4255 #define CHECK_APPEND_NOARG(a) \ 4256 do { \ 4257 ret = snprintf(bp, size_bp, (a)); \ 4258 if (ret < 0 || ret >= size_bp) \ 4259 goto out_overflow; \ 4260 size_bp -= ret; \ 4261 bp += ret; \ 4262 } while (0) 4263 4264 #define CHECK_APPEND_1ARG(a, v1) \ 4265 do { \ 4266 ret = snprintf(bp, size_bp, (a), (v1)); \ 4267 if (ret < 0 || ret >= size_bp) \ 4268 goto out_overflow; \ 4269 size_bp -= ret; \ 4270 bp += ret; \ 4271 } while (0) 4272 4273 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4274 do { \ 4275 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4276 if (ret < 0 || ret >= size_bp) \ 4277 goto out_overflow; \ 4278 size_bp -= ret; \ 4279 bp += ret; \ 4280 } while (0) 4281 4282 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4283 CHECK_APPEND_1ARG("convert=%s,", 4284 btrfs_bg_type_to_raid_name(bargs->target)); 4285 4286 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4287 CHECK_APPEND_NOARG("soft,"); 4288 4289 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4290 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4291 sizeof(tmp_buf)); 4292 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4293 } 4294 4295 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4296 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4297 4298 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4299 CHECK_APPEND_2ARG("usage=%u..%u,", 4300 bargs->usage_min, bargs->usage_max); 4301 4302 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4303 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4304 4305 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4306 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4307 bargs->pstart, bargs->pend); 4308 4309 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4310 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4311 bargs->vstart, bargs->vend); 4312 4313 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4314 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4315 4316 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4317 CHECK_APPEND_2ARG("limit=%u..%u,", 4318 bargs->limit_min, bargs->limit_max); 4319 4320 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4321 CHECK_APPEND_2ARG("stripes=%u..%u,", 4322 bargs->stripes_min, bargs->stripes_max); 4323 4324 #undef CHECK_APPEND_2ARG 4325 #undef CHECK_APPEND_1ARG 4326 #undef CHECK_APPEND_NOARG 4327 4328 out_overflow: 4329 4330 if (size_bp < size_buf) 4331 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4332 else 4333 buf[0] = '\0'; 4334 } 4335 4336 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4337 { 4338 u32 size_buf = 1024; 4339 char tmp_buf[192] = {'\0'}; 4340 char *buf; 4341 char *bp; 4342 u32 size_bp = size_buf; 4343 int ret; 4344 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4345 4346 buf = kzalloc(size_buf, GFP_KERNEL); 4347 if (!buf) 4348 return; 4349 4350 bp = buf; 4351 4352 #define CHECK_APPEND_1ARG(a, v1) \ 4353 do { \ 4354 ret = snprintf(bp, size_bp, (a), (v1)); \ 4355 if (ret < 0 || ret >= size_bp) \ 4356 goto out_overflow; \ 4357 size_bp -= ret; \ 4358 bp += ret; \ 4359 } while (0) 4360 4361 if (bctl->flags & BTRFS_BALANCE_FORCE) 4362 CHECK_APPEND_1ARG("%s", "-f "); 4363 4364 if (bctl->flags & BTRFS_BALANCE_DATA) { 4365 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4366 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4367 } 4368 4369 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4370 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4371 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4372 } 4373 4374 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4375 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4376 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4377 } 4378 4379 #undef CHECK_APPEND_1ARG 4380 4381 out_overflow: 4382 4383 if (size_bp < size_buf) 4384 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4385 btrfs_info(fs_info, "balance: %s %s", 4386 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4387 "resume" : "start", buf); 4388 4389 kfree(buf); 4390 } 4391 4392 /* 4393 * Should be called with balance mutexe held 4394 */ 4395 int btrfs_balance(struct btrfs_fs_info *fs_info, 4396 struct btrfs_balance_control *bctl, 4397 struct btrfs_ioctl_balance_args *bargs) 4398 { 4399 u64 meta_target, data_target; 4400 u64 allowed; 4401 int mixed = 0; 4402 int ret; 4403 u64 num_devices; 4404 unsigned seq; 4405 bool reducing_redundancy; 4406 bool paused = false; 4407 int i; 4408 4409 if (btrfs_fs_closing(fs_info) || 4410 atomic_read(&fs_info->balance_pause_req) || 4411 btrfs_should_cancel_balance(fs_info)) { 4412 ret = -EINVAL; 4413 goto out; 4414 } 4415 4416 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4417 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4418 mixed = 1; 4419 4420 /* 4421 * In case of mixed groups both data and meta should be picked, 4422 * and identical options should be given for both of them. 4423 */ 4424 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4425 if (mixed && (bctl->flags & allowed)) { 4426 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4427 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4428 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4429 btrfs_err(fs_info, 4430 "balance: mixed groups data and metadata options must be the same"); 4431 ret = -EINVAL; 4432 goto out; 4433 } 4434 } 4435 4436 /* 4437 * rw_devices will not change at the moment, device add/delete/replace 4438 * are exclusive 4439 */ 4440 num_devices = fs_info->fs_devices->rw_devices; 4441 4442 /* 4443 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4444 * special bit for it, to make it easier to distinguish. Thus we need 4445 * to set it manually, or balance would refuse the profile. 4446 */ 4447 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4448 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4449 if (num_devices >= btrfs_raid_array[i].devs_min) 4450 allowed |= btrfs_raid_array[i].bg_flag; 4451 4452 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4453 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4454 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4455 ret = -EINVAL; 4456 goto out; 4457 } 4458 4459 /* 4460 * Allow to reduce metadata or system integrity only if force set for 4461 * profiles with redundancy (copies, parity) 4462 */ 4463 allowed = 0; 4464 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4465 if (btrfs_raid_array[i].ncopies >= 2 || 4466 btrfs_raid_array[i].tolerated_failures >= 1) 4467 allowed |= btrfs_raid_array[i].bg_flag; 4468 } 4469 do { 4470 seq = read_seqbegin(&fs_info->profiles_lock); 4471 4472 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4473 (fs_info->avail_system_alloc_bits & allowed) && 4474 !(bctl->sys.target & allowed)) || 4475 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4476 (fs_info->avail_metadata_alloc_bits & allowed) && 4477 !(bctl->meta.target & allowed))) 4478 reducing_redundancy = true; 4479 else 4480 reducing_redundancy = false; 4481 4482 /* if we're not converting, the target field is uninitialized */ 4483 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4484 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4485 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4486 bctl->data.target : fs_info->avail_data_alloc_bits; 4487 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4488 4489 if (reducing_redundancy) { 4490 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4491 btrfs_info(fs_info, 4492 "balance: force reducing metadata redundancy"); 4493 } else { 4494 btrfs_err(fs_info, 4495 "balance: reduces metadata redundancy, use --force if you want this"); 4496 ret = -EINVAL; 4497 goto out; 4498 } 4499 } 4500 4501 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4502 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4503 btrfs_warn(fs_info, 4504 "balance: metadata profile %s has lower redundancy than data profile %s", 4505 btrfs_bg_type_to_raid_name(meta_target), 4506 btrfs_bg_type_to_raid_name(data_target)); 4507 } 4508 4509 ret = insert_balance_item(fs_info, bctl); 4510 if (ret && ret != -EEXIST) 4511 goto out; 4512 4513 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4514 BUG_ON(ret == -EEXIST); 4515 BUG_ON(fs_info->balance_ctl); 4516 spin_lock(&fs_info->balance_lock); 4517 fs_info->balance_ctl = bctl; 4518 spin_unlock(&fs_info->balance_lock); 4519 } else { 4520 BUG_ON(ret != -EEXIST); 4521 spin_lock(&fs_info->balance_lock); 4522 update_balance_args(bctl); 4523 spin_unlock(&fs_info->balance_lock); 4524 } 4525 4526 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4527 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4528 describe_balance_start_or_resume(fs_info); 4529 mutex_unlock(&fs_info->balance_mutex); 4530 4531 ret = __btrfs_balance(fs_info); 4532 4533 mutex_lock(&fs_info->balance_mutex); 4534 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4535 btrfs_info(fs_info, "balance: paused"); 4536 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4537 paused = true; 4538 } 4539 /* 4540 * Balance can be canceled by: 4541 * 4542 * - Regular cancel request 4543 * Then ret == -ECANCELED and balance_cancel_req > 0 4544 * 4545 * - Fatal signal to "btrfs" process 4546 * Either the signal caught by wait_reserve_ticket() and callers 4547 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4548 * got -ECANCELED. 4549 * Either way, in this case balance_cancel_req = 0, and 4550 * ret == -EINTR or ret == -ECANCELED. 4551 * 4552 * So here we only check the return value to catch canceled balance. 4553 */ 4554 else if (ret == -ECANCELED || ret == -EINTR) 4555 btrfs_info(fs_info, "balance: canceled"); 4556 else 4557 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4558 4559 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4560 4561 if (bargs) { 4562 memset(bargs, 0, sizeof(*bargs)); 4563 btrfs_update_ioctl_balance_args(fs_info, bargs); 4564 } 4565 4566 /* We didn't pause, we can clean everything up. */ 4567 if (!paused) { 4568 reset_balance_state(fs_info); 4569 btrfs_exclop_finish(fs_info); 4570 } 4571 4572 wake_up(&fs_info->balance_wait_q); 4573 4574 return ret; 4575 out: 4576 if (bctl->flags & BTRFS_BALANCE_RESUME) 4577 reset_balance_state(fs_info); 4578 else 4579 kfree(bctl); 4580 btrfs_exclop_finish(fs_info); 4581 4582 return ret; 4583 } 4584 4585 static int balance_kthread(void *data) 4586 { 4587 struct btrfs_fs_info *fs_info = data; 4588 int ret = 0; 4589 4590 sb_start_write(fs_info->sb); 4591 mutex_lock(&fs_info->balance_mutex); 4592 if (fs_info->balance_ctl) 4593 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4594 mutex_unlock(&fs_info->balance_mutex); 4595 sb_end_write(fs_info->sb); 4596 4597 return ret; 4598 } 4599 4600 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4601 { 4602 struct task_struct *tsk; 4603 4604 mutex_lock(&fs_info->balance_mutex); 4605 if (!fs_info->balance_ctl) { 4606 mutex_unlock(&fs_info->balance_mutex); 4607 return 0; 4608 } 4609 mutex_unlock(&fs_info->balance_mutex); 4610 4611 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4612 btrfs_info(fs_info, "balance: resume skipped"); 4613 return 0; 4614 } 4615 4616 spin_lock(&fs_info->super_lock); 4617 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4618 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4619 spin_unlock(&fs_info->super_lock); 4620 /* 4621 * A ro->rw remount sequence should continue with the paused balance 4622 * regardless of who pauses it, system or the user as of now, so set 4623 * the resume flag. 4624 */ 4625 spin_lock(&fs_info->balance_lock); 4626 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4627 spin_unlock(&fs_info->balance_lock); 4628 4629 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4630 return PTR_ERR_OR_ZERO(tsk); 4631 } 4632 4633 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4634 { 4635 struct btrfs_balance_control *bctl; 4636 struct btrfs_balance_item *item; 4637 struct btrfs_disk_balance_args disk_bargs; 4638 struct btrfs_path *path; 4639 struct extent_buffer *leaf; 4640 struct btrfs_key key; 4641 int ret; 4642 4643 path = btrfs_alloc_path(); 4644 if (!path) 4645 return -ENOMEM; 4646 4647 key.objectid = BTRFS_BALANCE_OBJECTID; 4648 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4649 key.offset = 0; 4650 4651 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4652 if (ret < 0) 4653 goto out; 4654 if (ret > 0) { /* ret = -ENOENT; */ 4655 ret = 0; 4656 goto out; 4657 } 4658 4659 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4660 if (!bctl) { 4661 ret = -ENOMEM; 4662 goto out; 4663 } 4664 4665 leaf = path->nodes[0]; 4666 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4667 4668 bctl->flags = btrfs_balance_flags(leaf, item); 4669 bctl->flags |= BTRFS_BALANCE_RESUME; 4670 4671 btrfs_balance_data(leaf, item, &disk_bargs); 4672 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4673 btrfs_balance_meta(leaf, item, &disk_bargs); 4674 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4675 btrfs_balance_sys(leaf, item, &disk_bargs); 4676 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4677 4678 /* 4679 * This should never happen, as the paused balance state is recovered 4680 * during mount without any chance of other exclusive ops to collide. 4681 * 4682 * This gives the exclusive op status to balance and keeps in paused 4683 * state until user intervention (cancel or umount). If the ownership 4684 * cannot be assigned, show a message but do not fail. The balance 4685 * is in a paused state and must have fs_info::balance_ctl properly 4686 * set up. 4687 */ 4688 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4689 btrfs_warn(fs_info, 4690 "balance: cannot set exclusive op status, resume manually"); 4691 4692 btrfs_release_path(path); 4693 4694 mutex_lock(&fs_info->balance_mutex); 4695 BUG_ON(fs_info->balance_ctl); 4696 spin_lock(&fs_info->balance_lock); 4697 fs_info->balance_ctl = bctl; 4698 spin_unlock(&fs_info->balance_lock); 4699 mutex_unlock(&fs_info->balance_mutex); 4700 out: 4701 btrfs_free_path(path); 4702 return ret; 4703 } 4704 4705 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4706 { 4707 int ret = 0; 4708 4709 mutex_lock(&fs_info->balance_mutex); 4710 if (!fs_info->balance_ctl) { 4711 mutex_unlock(&fs_info->balance_mutex); 4712 return -ENOTCONN; 4713 } 4714 4715 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4716 atomic_inc(&fs_info->balance_pause_req); 4717 mutex_unlock(&fs_info->balance_mutex); 4718 4719 wait_event(fs_info->balance_wait_q, 4720 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4721 4722 mutex_lock(&fs_info->balance_mutex); 4723 /* we are good with balance_ctl ripped off from under us */ 4724 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4725 atomic_dec(&fs_info->balance_pause_req); 4726 } else { 4727 ret = -ENOTCONN; 4728 } 4729 4730 mutex_unlock(&fs_info->balance_mutex); 4731 return ret; 4732 } 4733 4734 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4735 { 4736 mutex_lock(&fs_info->balance_mutex); 4737 if (!fs_info->balance_ctl) { 4738 mutex_unlock(&fs_info->balance_mutex); 4739 return -ENOTCONN; 4740 } 4741 4742 /* 4743 * A paused balance with the item stored on disk can be resumed at 4744 * mount time if the mount is read-write. Otherwise it's still paused 4745 * and we must not allow cancelling as it deletes the item. 4746 */ 4747 if (sb_rdonly(fs_info->sb)) { 4748 mutex_unlock(&fs_info->balance_mutex); 4749 return -EROFS; 4750 } 4751 4752 atomic_inc(&fs_info->balance_cancel_req); 4753 /* 4754 * if we are running just wait and return, balance item is 4755 * deleted in btrfs_balance in this case 4756 */ 4757 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4758 mutex_unlock(&fs_info->balance_mutex); 4759 wait_event(fs_info->balance_wait_q, 4760 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4761 mutex_lock(&fs_info->balance_mutex); 4762 } else { 4763 mutex_unlock(&fs_info->balance_mutex); 4764 /* 4765 * Lock released to allow other waiters to continue, we'll 4766 * reexamine the status again. 4767 */ 4768 mutex_lock(&fs_info->balance_mutex); 4769 4770 if (fs_info->balance_ctl) { 4771 reset_balance_state(fs_info); 4772 btrfs_exclop_finish(fs_info); 4773 btrfs_info(fs_info, "balance: canceled"); 4774 } 4775 } 4776 4777 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4778 atomic_dec(&fs_info->balance_cancel_req); 4779 mutex_unlock(&fs_info->balance_mutex); 4780 return 0; 4781 } 4782 4783 int btrfs_uuid_scan_kthread(void *data) 4784 { 4785 struct btrfs_fs_info *fs_info = data; 4786 struct btrfs_root *root = fs_info->tree_root; 4787 struct btrfs_key key; 4788 struct btrfs_path *path = NULL; 4789 int ret = 0; 4790 struct extent_buffer *eb; 4791 int slot; 4792 struct btrfs_root_item root_item; 4793 u32 item_size; 4794 struct btrfs_trans_handle *trans = NULL; 4795 bool closing = false; 4796 4797 path = btrfs_alloc_path(); 4798 if (!path) { 4799 ret = -ENOMEM; 4800 goto out; 4801 } 4802 4803 key.objectid = 0; 4804 key.type = BTRFS_ROOT_ITEM_KEY; 4805 key.offset = 0; 4806 4807 while (1) { 4808 if (btrfs_fs_closing(fs_info)) { 4809 closing = true; 4810 break; 4811 } 4812 ret = btrfs_search_forward(root, &key, path, 4813 BTRFS_OLDEST_GENERATION); 4814 if (ret) { 4815 if (ret > 0) 4816 ret = 0; 4817 break; 4818 } 4819 4820 if (key.type != BTRFS_ROOT_ITEM_KEY || 4821 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4822 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4823 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4824 goto skip; 4825 4826 eb = path->nodes[0]; 4827 slot = path->slots[0]; 4828 item_size = btrfs_item_size(eb, slot); 4829 if (item_size < sizeof(root_item)) 4830 goto skip; 4831 4832 read_extent_buffer(eb, &root_item, 4833 btrfs_item_ptr_offset(eb, slot), 4834 (int)sizeof(root_item)); 4835 if (btrfs_root_refs(&root_item) == 0) 4836 goto skip; 4837 4838 if (!btrfs_is_empty_uuid(root_item.uuid) || 4839 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4840 if (trans) 4841 goto update_tree; 4842 4843 btrfs_release_path(path); 4844 /* 4845 * 1 - subvol uuid item 4846 * 1 - received_subvol uuid item 4847 */ 4848 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4849 if (IS_ERR(trans)) { 4850 ret = PTR_ERR(trans); 4851 break; 4852 } 4853 continue; 4854 } else { 4855 goto skip; 4856 } 4857 update_tree: 4858 btrfs_release_path(path); 4859 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4860 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4861 BTRFS_UUID_KEY_SUBVOL, 4862 key.objectid); 4863 if (ret < 0) { 4864 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4865 ret); 4866 break; 4867 } 4868 } 4869 4870 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4871 ret = btrfs_uuid_tree_add(trans, 4872 root_item.received_uuid, 4873 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4874 key.objectid); 4875 if (ret < 0) { 4876 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4877 ret); 4878 break; 4879 } 4880 } 4881 4882 skip: 4883 btrfs_release_path(path); 4884 if (trans) { 4885 ret = btrfs_end_transaction(trans); 4886 trans = NULL; 4887 if (ret) 4888 break; 4889 } 4890 4891 if (key.offset < (u64)-1) { 4892 key.offset++; 4893 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4894 key.offset = 0; 4895 key.type = BTRFS_ROOT_ITEM_KEY; 4896 } else if (key.objectid < (u64)-1) { 4897 key.offset = 0; 4898 key.type = BTRFS_ROOT_ITEM_KEY; 4899 key.objectid++; 4900 } else { 4901 break; 4902 } 4903 cond_resched(); 4904 } 4905 4906 out: 4907 btrfs_free_path(path); 4908 if (trans && !IS_ERR(trans)) 4909 btrfs_end_transaction(trans); 4910 if (ret) 4911 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4912 else if (!closing) 4913 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4914 up(&fs_info->uuid_tree_rescan_sem); 4915 return 0; 4916 } 4917 4918 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4919 { 4920 struct btrfs_trans_handle *trans; 4921 struct btrfs_root *tree_root = fs_info->tree_root; 4922 struct btrfs_root *uuid_root; 4923 struct task_struct *task; 4924 int ret; 4925 4926 /* 4927 * 1 - root node 4928 * 1 - root item 4929 */ 4930 trans = btrfs_start_transaction(tree_root, 2); 4931 if (IS_ERR(trans)) 4932 return PTR_ERR(trans); 4933 4934 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4935 if (IS_ERR(uuid_root)) { 4936 ret = PTR_ERR(uuid_root); 4937 btrfs_abort_transaction(trans, ret); 4938 btrfs_end_transaction(trans); 4939 return ret; 4940 } 4941 4942 fs_info->uuid_root = uuid_root; 4943 4944 ret = btrfs_commit_transaction(trans); 4945 if (ret) 4946 return ret; 4947 4948 down(&fs_info->uuid_tree_rescan_sem); 4949 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4950 if (IS_ERR(task)) { 4951 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4952 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4953 up(&fs_info->uuid_tree_rescan_sem); 4954 return PTR_ERR(task); 4955 } 4956 4957 return 0; 4958 } 4959 4960 /* 4961 * shrinking a device means finding all of the device extents past 4962 * the new size, and then following the back refs to the chunks. 4963 * The chunk relocation code actually frees the device extent 4964 */ 4965 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4966 { 4967 struct btrfs_fs_info *fs_info = device->fs_info; 4968 struct btrfs_root *root = fs_info->dev_root; 4969 struct btrfs_trans_handle *trans; 4970 struct btrfs_dev_extent *dev_extent = NULL; 4971 struct btrfs_path *path; 4972 u64 length; 4973 u64 chunk_offset; 4974 int ret; 4975 int slot; 4976 int failed = 0; 4977 bool retried = false; 4978 struct extent_buffer *l; 4979 struct btrfs_key key; 4980 struct btrfs_super_block *super_copy = fs_info->super_copy; 4981 u64 old_total = btrfs_super_total_bytes(super_copy); 4982 u64 old_size = btrfs_device_get_total_bytes(device); 4983 u64 diff; 4984 u64 start; 4985 u64 free_diff = 0; 4986 4987 new_size = round_down(new_size, fs_info->sectorsize); 4988 start = new_size; 4989 diff = round_down(old_size - new_size, fs_info->sectorsize); 4990 4991 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4992 return -EINVAL; 4993 4994 path = btrfs_alloc_path(); 4995 if (!path) 4996 return -ENOMEM; 4997 4998 path->reada = READA_BACK; 4999 5000 trans = btrfs_start_transaction(root, 0); 5001 if (IS_ERR(trans)) { 5002 btrfs_free_path(path); 5003 return PTR_ERR(trans); 5004 } 5005 5006 mutex_lock(&fs_info->chunk_mutex); 5007 5008 btrfs_device_set_total_bytes(device, new_size); 5009 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5010 device->fs_devices->total_rw_bytes -= diff; 5011 5012 /* 5013 * The new free_chunk_space is new_size - used, so we have to 5014 * subtract the delta of the old free_chunk_space which included 5015 * old_size - used. If used > new_size then just subtract this 5016 * entire device's free space. 5017 */ 5018 if (device->bytes_used < new_size) 5019 free_diff = (old_size - device->bytes_used) - 5020 (new_size - device->bytes_used); 5021 else 5022 free_diff = old_size - device->bytes_used; 5023 atomic64_sub(free_diff, &fs_info->free_chunk_space); 5024 } 5025 5026 /* 5027 * Once the device's size has been set to the new size, ensure all 5028 * in-memory chunks are synced to disk so that the loop below sees them 5029 * and relocates them accordingly. 5030 */ 5031 if (contains_pending_extent(device, &start, diff)) { 5032 mutex_unlock(&fs_info->chunk_mutex); 5033 ret = btrfs_commit_transaction(trans); 5034 if (ret) 5035 goto done; 5036 } else { 5037 mutex_unlock(&fs_info->chunk_mutex); 5038 btrfs_end_transaction(trans); 5039 } 5040 5041 again: 5042 key.objectid = device->devid; 5043 key.offset = (u64)-1; 5044 key.type = BTRFS_DEV_EXTENT_KEY; 5045 5046 do { 5047 mutex_lock(&fs_info->reclaim_bgs_lock); 5048 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5049 if (ret < 0) { 5050 mutex_unlock(&fs_info->reclaim_bgs_lock); 5051 goto done; 5052 } 5053 5054 ret = btrfs_previous_item(root, path, 0, key.type); 5055 if (ret) { 5056 mutex_unlock(&fs_info->reclaim_bgs_lock); 5057 if (ret < 0) 5058 goto done; 5059 ret = 0; 5060 btrfs_release_path(path); 5061 break; 5062 } 5063 5064 l = path->nodes[0]; 5065 slot = path->slots[0]; 5066 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 5067 5068 if (key.objectid != device->devid) { 5069 mutex_unlock(&fs_info->reclaim_bgs_lock); 5070 btrfs_release_path(path); 5071 break; 5072 } 5073 5074 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 5075 length = btrfs_dev_extent_length(l, dev_extent); 5076 5077 if (key.offset + length <= new_size) { 5078 mutex_unlock(&fs_info->reclaim_bgs_lock); 5079 btrfs_release_path(path); 5080 break; 5081 } 5082 5083 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 5084 btrfs_release_path(path); 5085 5086 /* 5087 * We may be relocating the only data chunk we have, 5088 * which could potentially end up with losing data's 5089 * raid profile, so lets allocate an empty one in 5090 * advance. 5091 */ 5092 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 5093 if (ret < 0) { 5094 mutex_unlock(&fs_info->reclaim_bgs_lock); 5095 goto done; 5096 } 5097 5098 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 5099 mutex_unlock(&fs_info->reclaim_bgs_lock); 5100 if (ret == -ENOSPC) { 5101 failed++; 5102 } else if (ret) { 5103 if (ret == -ETXTBSY) { 5104 btrfs_warn(fs_info, 5105 "could not shrink block group %llu due to active swapfile", 5106 chunk_offset); 5107 } 5108 goto done; 5109 } 5110 } while (key.offset-- > 0); 5111 5112 if (failed && !retried) { 5113 failed = 0; 5114 retried = true; 5115 goto again; 5116 } else if (failed && retried) { 5117 ret = -ENOSPC; 5118 goto done; 5119 } 5120 5121 /* Shrinking succeeded, else we would be at "done". */ 5122 trans = btrfs_start_transaction(root, 0); 5123 if (IS_ERR(trans)) { 5124 ret = PTR_ERR(trans); 5125 goto done; 5126 } 5127 5128 mutex_lock(&fs_info->chunk_mutex); 5129 /* Clear all state bits beyond the shrunk device size */ 5130 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 5131 CHUNK_STATE_MASK); 5132 5133 btrfs_device_set_disk_total_bytes(device, new_size); 5134 if (list_empty(&device->post_commit_list)) 5135 list_add_tail(&device->post_commit_list, 5136 &trans->transaction->dev_update_list); 5137 5138 WARN_ON(diff > old_total); 5139 btrfs_set_super_total_bytes(super_copy, 5140 round_down(old_total - diff, fs_info->sectorsize)); 5141 mutex_unlock(&fs_info->chunk_mutex); 5142 5143 btrfs_reserve_chunk_metadata(trans, false); 5144 /* Now btrfs_update_device() will change the on-disk size. */ 5145 ret = btrfs_update_device(trans, device); 5146 btrfs_trans_release_chunk_metadata(trans); 5147 if (ret < 0) { 5148 btrfs_abort_transaction(trans, ret); 5149 btrfs_end_transaction(trans); 5150 } else { 5151 ret = btrfs_commit_transaction(trans); 5152 } 5153 done: 5154 btrfs_free_path(path); 5155 if (ret) { 5156 mutex_lock(&fs_info->chunk_mutex); 5157 btrfs_device_set_total_bytes(device, old_size); 5158 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5159 device->fs_devices->total_rw_bytes += diff; 5160 atomic64_add(free_diff, &fs_info->free_chunk_space); 5161 } 5162 mutex_unlock(&fs_info->chunk_mutex); 5163 } 5164 return ret; 5165 } 5166 5167 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 5168 struct btrfs_key *key, 5169 struct btrfs_chunk *chunk, int item_size) 5170 { 5171 struct btrfs_super_block *super_copy = fs_info->super_copy; 5172 struct btrfs_disk_key disk_key; 5173 u32 array_size; 5174 u8 *ptr; 5175 5176 lockdep_assert_held(&fs_info->chunk_mutex); 5177 5178 array_size = btrfs_super_sys_array_size(super_copy); 5179 if (array_size + item_size + sizeof(disk_key) 5180 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5181 return -EFBIG; 5182 5183 ptr = super_copy->sys_chunk_array + array_size; 5184 btrfs_cpu_key_to_disk(&disk_key, key); 5185 memcpy(ptr, &disk_key, sizeof(disk_key)); 5186 ptr += sizeof(disk_key); 5187 memcpy(ptr, chunk, item_size); 5188 item_size += sizeof(disk_key); 5189 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5190 5191 return 0; 5192 } 5193 5194 /* 5195 * sort the devices in descending order by max_avail, total_avail 5196 */ 5197 static int btrfs_cmp_device_info(const void *a, const void *b) 5198 { 5199 const struct btrfs_device_info *di_a = a; 5200 const struct btrfs_device_info *di_b = b; 5201 5202 if (di_a->max_avail > di_b->max_avail) 5203 return -1; 5204 if (di_a->max_avail < di_b->max_avail) 5205 return 1; 5206 if (di_a->total_avail > di_b->total_avail) 5207 return -1; 5208 if (di_a->total_avail < di_b->total_avail) 5209 return 1; 5210 return 0; 5211 } 5212 5213 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5214 { 5215 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5216 return; 5217 5218 btrfs_set_fs_incompat(info, RAID56); 5219 } 5220 5221 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5222 { 5223 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5224 return; 5225 5226 btrfs_set_fs_incompat(info, RAID1C34); 5227 } 5228 5229 /* 5230 * Structure used internally for btrfs_create_chunk() function. 5231 * Wraps needed parameters. 5232 */ 5233 struct alloc_chunk_ctl { 5234 u64 start; 5235 u64 type; 5236 /* Total number of stripes to allocate */ 5237 int num_stripes; 5238 /* sub_stripes info for map */ 5239 int sub_stripes; 5240 /* Stripes per device */ 5241 int dev_stripes; 5242 /* Maximum number of devices to use */ 5243 int devs_max; 5244 /* Minimum number of devices to use */ 5245 int devs_min; 5246 /* ndevs has to be a multiple of this */ 5247 int devs_increment; 5248 /* Number of copies */ 5249 int ncopies; 5250 /* Number of stripes worth of bytes to store parity information */ 5251 int nparity; 5252 u64 max_stripe_size; 5253 u64 max_chunk_size; 5254 u64 dev_extent_min; 5255 u64 stripe_size; 5256 u64 chunk_size; 5257 int ndevs; 5258 }; 5259 5260 static void init_alloc_chunk_ctl_policy_regular( 5261 struct btrfs_fs_devices *fs_devices, 5262 struct alloc_chunk_ctl *ctl) 5263 { 5264 struct btrfs_space_info *space_info; 5265 5266 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5267 ASSERT(space_info); 5268 5269 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5270 ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G); 5271 5272 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5273 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5274 5275 /* We don't want a chunk larger than 10% of writable space */ 5276 ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), 5277 ctl->max_chunk_size); 5278 ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes); 5279 } 5280 5281 static void init_alloc_chunk_ctl_policy_zoned( 5282 struct btrfs_fs_devices *fs_devices, 5283 struct alloc_chunk_ctl *ctl) 5284 { 5285 u64 zone_size = fs_devices->fs_info->zone_size; 5286 u64 limit; 5287 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5288 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5289 u64 min_chunk_size = min_data_stripes * zone_size; 5290 u64 type = ctl->type; 5291 5292 ctl->max_stripe_size = zone_size; 5293 if (type & BTRFS_BLOCK_GROUP_DATA) { 5294 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5295 zone_size); 5296 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5297 ctl->max_chunk_size = ctl->max_stripe_size; 5298 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5299 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5300 ctl->devs_max = min_t(int, ctl->devs_max, 5301 BTRFS_MAX_DEVS_SYS_CHUNK); 5302 } else { 5303 BUG(); 5304 } 5305 5306 /* We don't want a chunk larger than 10% of writable space */ 5307 limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10), 5308 zone_size), 5309 min_chunk_size); 5310 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5311 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5312 } 5313 5314 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5315 struct alloc_chunk_ctl *ctl) 5316 { 5317 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5318 5319 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5320 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5321 ctl->devs_max = btrfs_raid_array[index].devs_max; 5322 if (!ctl->devs_max) 5323 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5324 ctl->devs_min = btrfs_raid_array[index].devs_min; 5325 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5326 ctl->ncopies = btrfs_raid_array[index].ncopies; 5327 ctl->nparity = btrfs_raid_array[index].nparity; 5328 ctl->ndevs = 0; 5329 5330 switch (fs_devices->chunk_alloc_policy) { 5331 case BTRFS_CHUNK_ALLOC_REGULAR: 5332 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5333 break; 5334 case BTRFS_CHUNK_ALLOC_ZONED: 5335 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5336 break; 5337 default: 5338 BUG(); 5339 } 5340 } 5341 5342 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5343 struct alloc_chunk_ctl *ctl, 5344 struct btrfs_device_info *devices_info) 5345 { 5346 struct btrfs_fs_info *info = fs_devices->fs_info; 5347 struct btrfs_device *device; 5348 u64 total_avail; 5349 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5350 int ret; 5351 int ndevs = 0; 5352 u64 max_avail; 5353 u64 dev_offset; 5354 5355 /* 5356 * in the first pass through the devices list, we gather information 5357 * about the available holes on each device. 5358 */ 5359 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5360 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5361 WARN(1, KERN_ERR 5362 "BTRFS: read-only device in alloc_list\n"); 5363 continue; 5364 } 5365 5366 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5367 &device->dev_state) || 5368 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5369 continue; 5370 5371 if (device->total_bytes > device->bytes_used) 5372 total_avail = device->total_bytes - device->bytes_used; 5373 else 5374 total_avail = 0; 5375 5376 /* If there is no space on this device, skip it. */ 5377 if (total_avail < ctl->dev_extent_min) 5378 continue; 5379 5380 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5381 &max_avail); 5382 if (ret && ret != -ENOSPC) 5383 return ret; 5384 5385 if (ret == 0) 5386 max_avail = dev_extent_want; 5387 5388 if (max_avail < ctl->dev_extent_min) { 5389 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5390 btrfs_debug(info, 5391 "%s: devid %llu has no free space, have=%llu want=%llu", 5392 __func__, device->devid, max_avail, 5393 ctl->dev_extent_min); 5394 continue; 5395 } 5396 5397 if (ndevs == fs_devices->rw_devices) { 5398 WARN(1, "%s: found more than %llu devices\n", 5399 __func__, fs_devices->rw_devices); 5400 break; 5401 } 5402 devices_info[ndevs].dev_offset = dev_offset; 5403 devices_info[ndevs].max_avail = max_avail; 5404 devices_info[ndevs].total_avail = total_avail; 5405 devices_info[ndevs].dev = device; 5406 ++ndevs; 5407 } 5408 ctl->ndevs = ndevs; 5409 5410 /* 5411 * now sort the devices by hole size / available space 5412 */ 5413 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5414 btrfs_cmp_device_info, NULL); 5415 5416 return 0; 5417 } 5418 5419 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5420 struct btrfs_device_info *devices_info) 5421 { 5422 /* Number of stripes that count for block group size */ 5423 int data_stripes; 5424 5425 /* 5426 * The primary goal is to maximize the number of stripes, so use as 5427 * many devices as possible, even if the stripes are not maximum sized. 5428 * 5429 * The DUP profile stores more than one stripe per device, the 5430 * max_avail is the total size so we have to adjust. 5431 */ 5432 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5433 ctl->dev_stripes); 5434 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5435 5436 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5437 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5438 5439 /* 5440 * Use the number of data stripes to figure out how big this chunk is 5441 * really going to be in terms of logical address space, and compare 5442 * that answer with the max chunk size. If it's higher, we try to 5443 * reduce stripe_size. 5444 */ 5445 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5446 /* 5447 * Reduce stripe_size, round it up to a 16MB boundary again and 5448 * then use it, unless it ends up being even bigger than the 5449 * previous value we had already. 5450 */ 5451 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5452 data_stripes), SZ_16M), 5453 ctl->stripe_size); 5454 } 5455 5456 /* Stripe size should not go beyond 1G. */ 5457 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G); 5458 5459 /* Align to BTRFS_STRIPE_LEN */ 5460 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5461 ctl->chunk_size = ctl->stripe_size * data_stripes; 5462 5463 return 0; 5464 } 5465 5466 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5467 struct btrfs_device_info *devices_info) 5468 { 5469 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5470 /* Number of stripes that count for block group size */ 5471 int data_stripes; 5472 5473 /* 5474 * It should hold because: 5475 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5476 */ 5477 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5478 5479 ctl->stripe_size = zone_size; 5480 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5481 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5482 5483 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5484 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5485 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5486 ctl->stripe_size) + ctl->nparity, 5487 ctl->dev_stripes); 5488 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5489 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5490 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5491 } 5492 5493 ctl->chunk_size = ctl->stripe_size * data_stripes; 5494 5495 return 0; 5496 } 5497 5498 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5499 struct alloc_chunk_ctl *ctl, 5500 struct btrfs_device_info *devices_info) 5501 { 5502 struct btrfs_fs_info *info = fs_devices->fs_info; 5503 5504 /* 5505 * Round down to number of usable stripes, devs_increment can be any 5506 * number so we can't use round_down() that requires power of 2, while 5507 * rounddown is safe. 5508 */ 5509 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5510 5511 if (ctl->ndevs < ctl->devs_min) { 5512 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5513 btrfs_debug(info, 5514 "%s: not enough devices with free space: have=%d minimum required=%d", 5515 __func__, ctl->ndevs, ctl->devs_min); 5516 } 5517 return -ENOSPC; 5518 } 5519 5520 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5521 5522 switch (fs_devices->chunk_alloc_policy) { 5523 case BTRFS_CHUNK_ALLOC_REGULAR: 5524 return decide_stripe_size_regular(ctl, devices_info); 5525 case BTRFS_CHUNK_ALLOC_ZONED: 5526 return decide_stripe_size_zoned(ctl, devices_info); 5527 default: 5528 BUG(); 5529 } 5530 } 5531 5532 static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int bits) 5533 { 5534 for (int i = 0; i < map->num_stripes; i++) { 5535 struct btrfs_io_stripe *stripe = &map->stripes[i]; 5536 struct btrfs_device *device = stripe->dev; 5537 5538 set_extent_bit(&device->alloc_state, stripe->physical, 5539 stripe->physical + map->stripe_size - 1, 5540 bits | EXTENT_NOWAIT, NULL); 5541 } 5542 } 5543 5544 static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits) 5545 { 5546 for (int i = 0; i < map->num_stripes; i++) { 5547 struct btrfs_io_stripe *stripe = &map->stripes[i]; 5548 struct btrfs_device *device = stripe->dev; 5549 5550 __clear_extent_bit(&device->alloc_state, stripe->physical, 5551 stripe->physical + map->stripe_size - 1, 5552 bits | EXTENT_NOWAIT, 5553 NULL, NULL); 5554 } 5555 } 5556 5557 void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map) 5558 { 5559 write_lock(&fs_info->mapping_tree_lock); 5560 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree); 5561 RB_CLEAR_NODE(&map->rb_node); 5562 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED); 5563 write_unlock(&fs_info->mapping_tree_lock); 5564 5565 /* Once for the tree reference. */ 5566 btrfs_free_chunk_map(map); 5567 } 5568 5569 EXPORT_FOR_TESTS 5570 int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map) 5571 { 5572 struct rb_node **p; 5573 struct rb_node *parent = NULL; 5574 bool leftmost = true; 5575 5576 write_lock(&fs_info->mapping_tree_lock); 5577 p = &fs_info->mapping_tree.rb_root.rb_node; 5578 while (*p) { 5579 struct btrfs_chunk_map *entry; 5580 5581 parent = *p; 5582 entry = rb_entry(parent, struct btrfs_chunk_map, rb_node); 5583 5584 if (map->start < entry->start) { 5585 p = &(*p)->rb_left; 5586 } else if (map->start > entry->start) { 5587 p = &(*p)->rb_right; 5588 leftmost = false; 5589 } else { 5590 write_unlock(&fs_info->mapping_tree_lock); 5591 return -EEXIST; 5592 } 5593 } 5594 rb_link_node(&map->rb_node, parent, p); 5595 rb_insert_color_cached(&map->rb_node, &fs_info->mapping_tree, leftmost); 5596 chunk_map_device_set_bits(map, CHUNK_ALLOCATED); 5597 chunk_map_device_clear_bits(map, CHUNK_TRIMMED); 5598 write_unlock(&fs_info->mapping_tree_lock); 5599 5600 return 0; 5601 } 5602 5603 EXPORT_FOR_TESTS 5604 struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp) 5605 { 5606 struct btrfs_chunk_map *map; 5607 5608 map = kmalloc(btrfs_chunk_map_size(num_stripes), gfp); 5609 if (!map) 5610 return NULL; 5611 5612 refcount_set(&map->refs, 1); 5613 RB_CLEAR_NODE(&map->rb_node); 5614 5615 return map; 5616 } 5617 5618 struct btrfs_chunk_map *btrfs_clone_chunk_map(struct btrfs_chunk_map *map, gfp_t gfp) 5619 { 5620 const int size = btrfs_chunk_map_size(map->num_stripes); 5621 struct btrfs_chunk_map *clone; 5622 5623 clone = kmemdup(map, size, gfp); 5624 if (!clone) 5625 return NULL; 5626 5627 refcount_set(&clone->refs, 1); 5628 RB_CLEAR_NODE(&clone->rb_node); 5629 5630 return clone; 5631 } 5632 5633 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5634 struct alloc_chunk_ctl *ctl, 5635 struct btrfs_device_info *devices_info) 5636 { 5637 struct btrfs_fs_info *info = trans->fs_info; 5638 struct btrfs_chunk_map *map; 5639 struct btrfs_block_group *block_group; 5640 u64 start = ctl->start; 5641 u64 type = ctl->type; 5642 int ret; 5643 int i; 5644 int j; 5645 5646 map = btrfs_alloc_chunk_map(ctl->num_stripes, GFP_NOFS); 5647 if (!map) 5648 return ERR_PTR(-ENOMEM); 5649 5650 map->start = start; 5651 map->chunk_len = ctl->chunk_size; 5652 map->stripe_size = ctl->stripe_size; 5653 map->type = type; 5654 map->io_align = BTRFS_STRIPE_LEN; 5655 map->io_width = BTRFS_STRIPE_LEN; 5656 map->sub_stripes = ctl->sub_stripes; 5657 map->num_stripes = ctl->num_stripes; 5658 5659 for (i = 0; i < ctl->ndevs; ++i) { 5660 for (j = 0; j < ctl->dev_stripes; ++j) { 5661 int s = i * ctl->dev_stripes + j; 5662 map->stripes[s].dev = devices_info[i].dev; 5663 map->stripes[s].physical = devices_info[i].dev_offset + 5664 j * ctl->stripe_size; 5665 } 5666 } 5667 5668 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5669 5670 ret = btrfs_add_chunk_map(info, map); 5671 if (ret) { 5672 btrfs_free_chunk_map(map); 5673 return ERR_PTR(ret); 5674 } 5675 5676 block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size); 5677 if (IS_ERR(block_group)) { 5678 btrfs_remove_chunk_map(info, map); 5679 return block_group; 5680 } 5681 5682 for (int i = 0; i < map->num_stripes; i++) { 5683 struct btrfs_device *dev = map->stripes[i].dev; 5684 5685 btrfs_device_set_bytes_used(dev, 5686 dev->bytes_used + ctl->stripe_size); 5687 if (list_empty(&dev->post_commit_list)) 5688 list_add_tail(&dev->post_commit_list, 5689 &trans->transaction->dev_update_list); 5690 } 5691 5692 atomic64_sub(ctl->stripe_size * map->num_stripes, 5693 &info->free_chunk_space); 5694 5695 check_raid56_incompat_flag(info, type); 5696 check_raid1c34_incompat_flag(info, type); 5697 5698 return block_group; 5699 } 5700 5701 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5702 u64 type) 5703 { 5704 struct btrfs_fs_info *info = trans->fs_info; 5705 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5706 struct btrfs_device_info *devices_info = NULL; 5707 struct alloc_chunk_ctl ctl; 5708 struct btrfs_block_group *block_group; 5709 int ret; 5710 5711 lockdep_assert_held(&info->chunk_mutex); 5712 5713 if (!alloc_profile_is_valid(type, 0)) { 5714 ASSERT(0); 5715 return ERR_PTR(-EINVAL); 5716 } 5717 5718 if (list_empty(&fs_devices->alloc_list)) { 5719 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5720 btrfs_debug(info, "%s: no writable device", __func__); 5721 return ERR_PTR(-ENOSPC); 5722 } 5723 5724 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5725 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5726 ASSERT(0); 5727 return ERR_PTR(-EINVAL); 5728 } 5729 5730 ctl.start = find_next_chunk(info); 5731 ctl.type = type; 5732 init_alloc_chunk_ctl(fs_devices, &ctl); 5733 5734 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5735 GFP_NOFS); 5736 if (!devices_info) 5737 return ERR_PTR(-ENOMEM); 5738 5739 ret = gather_device_info(fs_devices, &ctl, devices_info); 5740 if (ret < 0) { 5741 block_group = ERR_PTR(ret); 5742 goto out; 5743 } 5744 5745 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5746 if (ret < 0) { 5747 block_group = ERR_PTR(ret); 5748 goto out; 5749 } 5750 5751 block_group = create_chunk(trans, &ctl, devices_info); 5752 5753 out: 5754 kfree(devices_info); 5755 return block_group; 5756 } 5757 5758 /* 5759 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5760 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5761 * chunks. 5762 * 5763 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5764 * phases. 5765 */ 5766 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5767 struct btrfs_block_group *bg) 5768 { 5769 struct btrfs_fs_info *fs_info = trans->fs_info; 5770 struct btrfs_root *chunk_root = fs_info->chunk_root; 5771 struct btrfs_key key; 5772 struct btrfs_chunk *chunk; 5773 struct btrfs_stripe *stripe; 5774 struct btrfs_chunk_map *map; 5775 size_t item_size; 5776 int i; 5777 int ret; 5778 5779 /* 5780 * We take the chunk_mutex for 2 reasons: 5781 * 5782 * 1) Updates and insertions in the chunk btree must be done while holding 5783 * the chunk_mutex, as well as updating the system chunk array in the 5784 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5785 * details; 5786 * 5787 * 2) To prevent races with the final phase of a device replace operation 5788 * that replaces the device object associated with the map's stripes, 5789 * because the device object's id can change at any time during that 5790 * final phase of the device replace operation 5791 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5792 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5793 * which would cause a failure when updating the device item, which does 5794 * not exists, or persisting a stripe of the chunk item with such ID. 5795 * Here we can't use the device_list_mutex because our caller already 5796 * has locked the chunk_mutex, and the final phase of device replace 5797 * acquires both mutexes - first the device_list_mutex and then the 5798 * chunk_mutex. Using any of those two mutexes protects us from a 5799 * concurrent device replace. 5800 */ 5801 lockdep_assert_held(&fs_info->chunk_mutex); 5802 5803 map = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5804 if (IS_ERR(map)) { 5805 ret = PTR_ERR(map); 5806 btrfs_abort_transaction(trans, ret); 5807 return ret; 5808 } 5809 5810 item_size = btrfs_chunk_item_size(map->num_stripes); 5811 5812 chunk = kzalloc(item_size, GFP_NOFS); 5813 if (!chunk) { 5814 ret = -ENOMEM; 5815 btrfs_abort_transaction(trans, ret); 5816 goto out; 5817 } 5818 5819 for (i = 0; i < map->num_stripes; i++) { 5820 struct btrfs_device *device = map->stripes[i].dev; 5821 5822 ret = btrfs_update_device(trans, device); 5823 if (ret) 5824 goto out; 5825 } 5826 5827 stripe = &chunk->stripe; 5828 for (i = 0; i < map->num_stripes; i++) { 5829 struct btrfs_device *device = map->stripes[i].dev; 5830 const u64 dev_offset = map->stripes[i].physical; 5831 5832 btrfs_set_stack_stripe_devid(stripe, device->devid); 5833 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5834 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5835 stripe++; 5836 } 5837 5838 btrfs_set_stack_chunk_length(chunk, bg->length); 5839 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5840 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN); 5841 btrfs_set_stack_chunk_type(chunk, map->type); 5842 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5843 btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN); 5844 btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN); 5845 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5846 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5847 5848 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5849 key.type = BTRFS_CHUNK_ITEM_KEY; 5850 key.offset = bg->start; 5851 5852 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5853 if (ret) 5854 goto out; 5855 5856 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags); 5857 5858 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5859 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5860 if (ret) 5861 goto out; 5862 } 5863 5864 out: 5865 kfree(chunk); 5866 btrfs_free_chunk_map(map); 5867 return ret; 5868 } 5869 5870 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5871 { 5872 struct btrfs_fs_info *fs_info = trans->fs_info; 5873 u64 alloc_profile; 5874 struct btrfs_block_group *meta_bg; 5875 struct btrfs_block_group *sys_bg; 5876 5877 /* 5878 * When adding a new device for sprouting, the seed device is read-only 5879 * so we must first allocate a metadata and a system chunk. But before 5880 * adding the block group items to the extent, device and chunk btrees, 5881 * we must first: 5882 * 5883 * 1) Create both chunks without doing any changes to the btrees, as 5884 * otherwise we would get -ENOSPC since the block groups from the 5885 * seed device are read-only; 5886 * 5887 * 2) Add the device item for the new sprout device - finishing the setup 5888 * of a new block group requires updating the device item in the chunk 5889 * btree, so it must exist when we attempt to do it. The previous step 5890 * ensures this does not fail with -ENOSPC. 5891 * 5892 * After that we can add the block group items to their btrees: 5893 * update existing device item in the chunk btree, add a new block group 5894 * item to the extent btree, add a new chunk item to the chunk btree and 5895 * finally add the new device extent items to the devices btree. 5896 */ 5897 5898 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5899 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5900 if (IS_ERR(meta_bg)) 5901 return PTR_ERR(meta_bg); 5902 5903 alloc_profile = btrfs_system_alloc_profile(fs_info); 5904 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5905 if (IS_ERR(sys_bg)) 5906 return PTR_ERR(sys_bg); 5907 5908 return 0; 5909 } 5910 5911 static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map *map) 5912 { 5913 const int index = btrfs_bg_flags_to_raid_index(map->type); 5914 5915 return btrfs_raid_array[index].tolerated_failures; 5916 } 5917 5918 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5919 { 5920 struct btrfs_chunk_map *map; 5921 int miss_ndevs = 0; 5922 int i; 5923 bool ret = true; 5924 5925 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5926 if (IS_ERR(map)) 5927 return false; 5928 5929 for (i = 0; i < map->num_stripes; i++) { 5930 if (test_bit(BTRFS_DEV_STATE_MISSING, 5931 &map->stripes[i].dev->dev_state)) { 5932 miss_ndevs++; 5933 continue; 5934 } 5935 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5936 &map->stripes[i].dev->dev_state)) { 5937 ret = false; 5938 goto end; 5939 } 5940 } 5941 5942 /* 5943 * If the number of missing devices is larger than max errors, we can 5944 * not write the data into that chunk successfully. 5945 */ 5946 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5947 ret = false; 5948 end: 5949 btrfs_free_chunk_map(map); 5950 return ret; 5951 } 5952 5953 void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info) 5954 { 5955 write_lock(&fs_info->mapping_tree_lock); 5956 while (!RB_EMPTY_ROOT(&fs_info->mapping_tree.rb_root)) { 5957 struct btrfs_chunk_map *map; 5958 struct rb_node *node; 5959 5960 node = rb_first_cached(&fs_info->mapping_tree); 5961 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 5962 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree); 5963 RB_CLEAR_NODE(&map->rb_node); 5964 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED); 5965 /* Once for the tree ref. */ 5966 btrfs_free_chunk_map(map); 5967 cond_resched_rwlock_write(&fs_info->mapping_tree_lock); 5968 } 5969 write_unlock(&fs_info->mapping_tree_lock); 5970 } 5971 5972 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5973 { 5974 struct btrfs_chunk_map *map; 5975 enum btrfs_raid_types index; 5976 int ret = 1; 5977 5978 map = btrfs_get_chunk_map(fs_info, logical, len); 5979 if (IS_ERR(map)) 5980 /* 5981 * We could return errors for these cases, but that could get 5982 * ugly and we'd probably do the same thing which is just not do 5983 * anything else and exit, so return 1 so the callers don't try 5984 * to use other copies. 5985 */ 5986 return 1; 5987 5988 index = btrfs_bg_flags_to_raid_index(map->type); 5989 5990 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5991 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5992 ret = btrfs_raid_array[index].ncopies; 5993 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5994 ret = 2; 5995 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5996 /* 5997 * There could be two corrupted data stripes, we need 5998 * to loop retry in order to rebuild the correct data. 5999 * 6000 * Fail a stripe at a time on every retry except the 6001 * stripe under reconstruction. 6002 */ 6003 ret = map->num_stripes; 6004 btrfs_free_chunk_map(map); 6005 return ret; 6006 } 6007 6008 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 6009 u64 logical) 6010 { 6011 struct btrfs_chunk_map *map; 6012 unsigned long len = fs_info->sectorsize; 6013 6014 if (!btrfs_fs_incompat(fs_info, RAID56)) 6015 return len; 6016 6017 map = btrfs_get_chunk_map(fs_info, logical, len); 6018 6019 if (!WARN_ON(IS_ERR(map))) { 6020 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 6021 len = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 6022 btrfs_free_chunk_map(map); 6023 } 6024 return len; 6025 } 6026 6027 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 6028 { 6029 struct btrfs_chunk_map *map; 6030 int ret = 0; 6031 6032 if (!btrfs_fs_incompat(fs_info, RAID56)) 6033 return 0; 6034 6035 map = btrfs_get_chunk_map(fs_info, logical, len); 6036 6037 if (!WARN_ON(IS_ERR(map))) { 6038 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 6039 ret = 1; 6040 btrfs_free_chunk_map(map); 6041 } 6042 return ret; 6043 } 6044 6045 static int find_live_mirror(struct btrfs_fs_info *fs_info, 6046 struct btrfs_chunk_map *map, int first, 6047 int dev_replace_is_ongoing) 6048 { 6049 const enum btrfs_read_policy policy = READ_ONCE(fs_info->fs_devices->read_policy); 6050 int i; 6051 int num_stripes; 6052 int preferred_mirror; 6053 int tolerance; 6054 struct btrfs_device *srcdev; 6055 6056 ASSERT((map->type & 6057 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 6058 6059 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 6060 num_stripes = map->sub_stripes; 6061 else 6062 num_stripes = map->num_stripes; 6063 6064 switch (policy) { 6065 default: 6066 /* Shouldn't happen, just warn and use pid instead of failing */ 6067 btrfs_warn_rl(fs_info, "unknown read_policy type %u, reset to pid", 6068 policy); 6069 WRITE_ONCE(fs_info->fs_devices->read_policy, BTRFS_READ_POLICY_PID); 6070 fallthrough; 6071 case BTRFS_READ_POLICY_PID: 6072 preferred_mirror = first + (current->pid % num_stripes); 6073 break; 6074 } 6075 6076 if (dev_replace_is_ongoing && 6077 fs_info->dev_replace.cont_reading_from_srcdev_mode == 6078 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 6079 srcdev = fs_info->dev_replace.srcdev; 6080 else 6081 srcdev = NULL; 6082 6083 /* 6084 * try to avoid the drive that is the source drive for a 6085 * dev-replace procedure, only choose it if no other non-missing 6086 * mirror is available 6087 */ 6088 for (tolerance = 0; tolerance < 2; tolerance++) { 6089 if (map->stripes[preferred_mirror].dev->bdev && 6090 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 6091 return preferred_mirror; 6092 for (i = first; i < first + num_stripes; i++) { 6093 if (map->stripes[i].dev->bdev && 6094 (tolerance || map->stripes[i].dev != srcdev)) 6095 return i; 6096 } 6097 } 6098 6099 /* we couldn't find one that doesn't fail. Just return something 6100 * and the io error handling code will clean up eventually 6101 */ 6102 return preferred_mirror; 6103 } 6104 6105 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 6106 u64 logical, 6107 u16 total_stripes) 6108 { 6109 struct btrfs_io_context *bioc; 6110 6111 bioc = kzalloc( 6112 /* The size of btrfs_io_context */ 6113 sizeof(struct btrfs_io_context) + 6114 /* Plus the variable array for the stripes */ 6115 sizeof(struct btrfs_io_stripe) * (total_stripes), 6116 GFP_NOFS); 6117 6118 if (!bioc) 6119 return NULL; 6120 6121 refcount_set(&bioc->refs, 1); 6122 6123 bioc->fs_info = fs_info; 6124 bioc->replace_stripe_src = -1; 6125 bioc->full_stripe_logical = (u64)-1; 6126 bioc->logical = logical; 6127 6128 return bioc; 6129 } 6130 6131 void btrfs_get_bioc(struct btrfs_io_context *bioc) 6132 { 6133 WARN_ON(!refcount_read(&bioc->refs)); 6134 refcount_inc(&bioc->refs); 6135 } 6136 6137 void btrfs_put_bioc(struct btrfs_io_context *bioc) 6138 { 6139 if (!bioc) 6140 return; 6141 if (refcount_dec_and_test(&bioc->refs)) 6142 kfree(bioc); 6143 } 6144 6145 /* 6146 * Please note that, discard won't be sent to target device of device 6147 * replace. 6148 */ 6149 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 6150 u64 logical, u64 *length_ret, 6151 u32 *num_stripes) 6152 { 6153 struct btrfs_chunk_map *map; 6154 struct btrfs_discard_stripe *stripes; 6155 u64 length = *length_ret; 6156 u64 offset; 6157 u32 stripe_nr; 6158 u32 stripe_nr_end; 6159 u32 stripe_cnt; 6160 u64 stripe_end_offset; 6161 u64 stripe_offset; 6162 u32 stripe_index; 6163 u32 factor = 0; 6164 u32 sub_stripes = 0; 6165 u32 stripes_per_dev = 0; 6166 u32 remaining_stripes = 0; 6167 u32 last_stripe = 0; 6168 int ret; 6169 int i; 6170 6171 map = btrfs_get_chunk_map(fs_info, logical, length); 6172 if (IS_ERR(map)) 6173 return ERR_CAST(map); 6174 6175 /* we don't discard raid56 yet */ 6176 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6177 ret = -EOPNOTSUPP; 6178 goto out_free_map; 6179 } 6180 6181 offset = logical - map->start; 6182 length = min_t(u64, map->start + map->chunk_len - logical, length); 6183 *length_ret = length; 6184 6185 /* 6186 * stripe_nr counts the total number of stripes we have to stride 6187 * to get to this block 6188 */ 6189 stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6190 6191 /* stripe_offset is the offset of this block in its stripe */ 6192 stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr); 6193 6194 stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >> 6195 BTRFS_STRIPE_LEN_SHIFT; 6196 stripe_cnt = stripe_nr_end - stripe_nr; 6197 stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) - 6198 (offset + length); 6199 /* 6200 * after this, stripe_nr is the number of stripes on this 6201 * device we have to walk to find the data, and stripe_index is 6202 * the number of our device in the stripe array 6203 */ 6204 *num_stripes = 1; 6205 stripe_index = 0; 6206 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6207 BTRFS_BLOCK_GROUP_RAID10)) { 6208 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6209 sub_stripes = 1; 6210 else 6211 sub_stripes = map->sub_stripes; 6212 6213 factor = map->num_stripes / sub_stripes; 6214 *num_stripes = min_t(u64, map->num_stripes, 6215 sub_stripes * stripe_cnt); 6216 stripe_index = stripe_nr % factor; 6217 stripe_nr /= factor; 6218 stripe_index *= sub_stripes; 6219 6220 remaining_stripes = stripe_cnt % factor; 6221 stripes_per_dev = stripe_cnt / factor; 6222 last_stripe = ((stripe_nr_end - 1) % factor) * sub_stripes; 6223 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6224 BTRFS_BLOCK_GROUP_DUP)) { 6225 *num_stripes = map->num_stripes; 6226 } else { 6227 stripe_index = stripe_nr % map->num_stripes; 6228 stripe_nr /= map->num_stripes; 6229 } 6230 6231 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6232 if (!stripes) { 6233 ret = -ENOMEM; 6234 goto out_free_map; 6235 } 6236 6237 for (i = 0; i < *num_stripes; i++) { 6238 stripes[i].physical = 6239 map->stripes[stripe_index].physical + 6240 stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr); 6241 stripes[i].dev = map->stripes[stripe_index].dev; 6242 6243 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6244 BTRFS_BLOCK_GROUP_RAID10)) { 6245 stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev); 6246 6247 if (i / sub_stripes < remaining_stripes) 6248 stripes[i].length += BTRFS_STRIPE_LEN; 6249 6250 /* 6251 * Special for the first stripe and 6252 * the last stripe: 6253 * 6254 * |-------|...|-------| 6255 * |----------| 6256 * off end_off 6257 */ 6258 if (i < sub_stripes) 6259 stripes[i].length -= stripe_offset; 6260 6261 if (stripe_index >= last_stripe && 6262 stripe_index <= (last_stripe + 6263 sub_stripes - 1)) 6264 stripes[i].length -= stripe_end_offset; 6265 6266 if (i == sub_stripes - 1) 6267 stripe_offset = 0; 6268 } else { 6269 stripes[i].length = length; 6270 } 6271 6272 stripe_index++; 6273 if (stripe_index == map->num_stripes) { 6274 stripe_index = 0; 6275 stripe_nr++; 6276 } 6277 } 6278 6279 btrfs_free_chunk_map(map); 6280 return stripes; 6281 out_free_map: 6282 btrfs_free_chunk_map(map); 6283 return ERR_PTR(ret); 6284 } 6285 6286 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6287 { 6288 struct btrfs_block_group *cache; 6289 bool ret; 6290 6291 /* Non zoned filesystem does not use "to_copy" flag */ 6292 if (!btrfs_is_zoned(fs_info)) 6293 return false; 6294 6295 cache = btrfs_lookup_block_group(fs_info, logical); 6296 6297 ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 6298 6299 btrfs_put_block_group(cache); 6300 return ret; 6301 } 6302 6303 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6304 struct btrfs_io_context *bioc, 6305 struct btrfs_dev_replace *dev_replace, 6306 u64 logical, 6307 int *num_stripes_ret, int *max_errors_ret) 6308 { 6309 u64 srcdev_devid = dev_replace->srcdev->devid; 6310 /* 6311 * At this stage, num_stripes is still the real number of stripes, 6312 * excluding the duplicated stripes. 6313 */ 6314 int num_stripes = *num_stripes_ret; 6315 int nr_extra_stripes = 0; 6316 int max_errors = *max_errors_ret; 6317 int i; 6318 6319 /* 6320 * A block group which has "to_copy" set will eventually be copied by 6321 * the dev-replace process. We can avoid cloning IO here. 6322 */ 6323 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6324 return; 6325 6326 /* 6327 * Duplicate the write operations while the dev-replace procedure is 6328 * running. Since the copying of the old disk to the new disk takes 6329 * place at run time while the filesystem is mounted writable, the 6330 * regular write operations to the old disk have to be duplicated to go 6331 * to the new disk as well. 6332 * 6333 * Note that device->missing is handled by the caller, and that the 6334 * write to the old disk is already set up in the stripes array. 6335 */ 6336 for (i = 0; i < num_stripes; i++) { 6337 struct btrfs_io_stripe *old = &bioc->stripes[i]; 6338 struct btrfs_io_stripe *new = &bioc->stripes[num_stripes + nr_extra_stripes]; 6339 6340 if (old->dev->devid != srcdev_devid) 6341 continue; 6342 6343 new->physical = old->physical; 6344 new->dev = dev_replace->tgtdev; 6345 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) 6346 bioc->replace_stripe_src = i; 6347 nr_extra_stripes++; 6348 } 6349 6350 /* We can only have at most 2 extra nr_stripes (for DUP). */ 6351 ASSERT(nr_extra_stripes <= 2); 6352 /* 6353 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for 6354 * replace. 6355 * If we have 2 extra stripes, only choose the one with smaller physical. 6356 */ 6357 if (op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) { 6358 struct btrfs_io_stripe *first = &bioc->stripes[num_stripes]; 6359 struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1]; 6360 6361 /* Only DUP can have two extra stripes. */ 6362 ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP); 6363 6364 /* 6365 * Swap the last stripe stripes and reduce @nr_extra_stripes. 6366 * The extra stripe would still be there, but won't be accessed. 6367 */ 6368 if (first->physical > second->physical) { 6369 swap(second->physical, first->physical); 6370 swap(second->dev, first->dev); 6371 nr_extra_stripes--; 6372 } 6373 } 6374 6375 *num_stripes_ret = num_stripes + nr_extra_stripes; 6376 *max_errors_ret = max_errors + nr_extra_stripes; 6377 bioc->replace_nr_stripes = nr_extra_stripes; 6378 } 6379 6380 static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset, 6381 struct btrfs_io_geometry *io_geom) 6382 { 6383 /* 6384 * Stripe_nr is the stripe where this block falls. stripe_offset is 6385 * the offset of this block in its stripe. 6386 */ 6387 io_geom->stripe_offset = offset & BTRFS_STRIPE_LEN_MASK; 6388 io_geom->stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6389 ASSERT(io_geom->stripe_offset < U32_MAX); 6390 6391 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6392 unsigned long full_stripe_len = 6393 btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 6394 6395 /* 6396 * For full stripe start, we use previously calculated 6397 * @stripe_nr. Align it to nr_data_stripes, then multiply with 6398 * STRIPE_LEN. 6399 * 6400 * By this we can avoid u64 division completely. And we have 6401 * to go rounddown(), not round_down(), as nr_data_stripes is 6402 * not ensured to be power of 2. 6403 */ 6404 io_geom->raid56_full_stripe_start = btrfs_stripe_nr_to_offset( 6405 rounddown(io_geom->stripe_nr, nr_data_stripes(map))); 6406 6407 ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset); 6408 ASSERT(io_geom->raid56_full_stripe_start <= offset); 6409 /* 6410 * For writes to RAID56, allow to write a full stripe set, but 6411 * no straddling of stripe sets. 6412 */ 6413 if (io_geom->op == BTRFS_MAP_WRITE) 6414 return full_stripe_len - (offset - io_geom->raid56_full_stripe_start); 6415 } 6416 6417 /* 6418 * For other RAID types and for RAID56 reads, allow a single stripe (on 6419 * a single disk). 6420 */ 6421 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) 6422 return BTRFS_STRIPE_LEN - io_geom->stripe_offset; 6423 return U64_MAX; 6424 } 6425 6426 static int set_io_stripe(struct btrfs_fs_info *fs_info, u64 logical, 6427 u64 *length, struct btrfs_io_stripe *dst, 6428 struct btrfs_chunk_map *map, 6429 struct btrfs_io_geometry *io_geom) 6430 { 6431 dst->dev = map->stripes[io_geom->stripe_index].dev; 6432 6433 if (io_geom->op == BTRFS_MAP_READ && 6434 btrfs_need_stripe_tree_update(fs_info, map->type)) 6435 return btrfs_get_raid_extent_offset(fs_info, logical, length, 6436 map->type, 6437 io_geom->stripe_index, dst); 6438 6439 dst->physical = map->stripes[io_geom->stripe_index].physical + 6440 io_geom->stripe_offset + 6441 btrfs_stripe_nr_to_offset(io_geom->stripe_nr); 6442 return 0; 6443 } 6444 6445 static bool is_single_device_io(struct btrfs_fs_info *fs_info, 6446 const struct btrfs_io_stripe *smap, 6447 const struct btrfs_chunk_map *map, 6448 int num_alloc_stripes, 6449 enum btrfs_map_op op, int mirror_num) 6450 { 6451 if (!smap) 6452 return false; 6453 6454 if (num_alloc_stripes != 1) 6455 return false; 6456 6457 if (btrfs_need_stripe_tree_update(fs_info, map->type) && op != BTRFS_MAP_READ) 6458 return false; 6459 6460 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) 6461 return false; 6462 6463 return true; 6464 } 6465 6466 static void map_blocks_raid0(const struct btrfs_chunk_map *map, 6467 struct btrfs_io_geometry *io_geom) 6468 { 6469 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes; 6470 io_geom->stripe_nr /= map->num_stripes; 6471 if (io_geom->op == BTRFS_MAP_READ) 6472 io_geom->mirror_num = 1; 6473 } 6474 6475 static void map_blocks_raid1(struct btrfs_fs_info *fs_info, 6476 struct btrfs_chunk_map *map, 6477 struct btrfs_io_geometry *io_geom, 6478 bool dev_replace_is_ongoing) 6479 { 6480 if (io_geom->op != BTRFS_MAP_READ) { 6481 io_geom->num_stripes = map->num_stripes; 6482 return; 6483 } 6484 6485 if (io_geom->mirror_num) { 6486 io_geom->stripe_index = io_geom->mirror_num - 1; 6487 return; 6488 } 6489 6490 io_geom->stripe_index = find_live_mirror(fs_info, map, 0, 6491 dev_replace_is_ongoing); 6492 io_geom->mirror_num = io_geom->stripe_index + 1; 6493 } 6494 6495 static void map_blocks_dup(const struct btrfs_chunk_map *map, 6496 struct btrfs_io_geometry *io_geom) 6497 { 6498 if (io_geom->op != BTRFS_MAP_READ) { 6499 io_geom->num_stripes = map->num_stripes; 6500 return; 6501 } 6502 6503 if (io_geom->mirror_num) { 6504 io_geom->stripe_index = io_geom->mirror_num - 1; 6505 return; 6506 } 6507 6508 io_geom->mirror_num = 1; 6509 } 6510 6511 static void map_blocks_raid10(struct btrfs_fs_info *fs_info, 6512 struct btrfs_chunk_map *map, 6513 struct btrfs_io_geometry *io_geom, 6514 bool dev_replace_is_ongoing) 6515 { 6516 u32 factor = map->num_stripes / map->sub_stripes; 6517 int old_stripe_index; 6518 6519 io_geom->stripe_index = (io_geom->stripe_nr % factor) * map->sub_stripes; 6520 io_geom->stripe_nr /= factor; 6521 6522 if (io_geom->op != BTRFS_MAP_READ) { 6523 io_geom->num_stripes = map->sub_stripes; 6524 return; 6525 } 6526 6527 if (io_geom->mirror_num) { 6528 io_geom->stripe_index += io_geom->mirror_num - 1; 6529 return; 6530 } 6531 6532 old_stripe_index = io_geom->stripe_index; 6533 io_geom->stripe_index = find_live_mirror(fs_info, map, 6534 io_geom->stripe_index, 6535 dev_replace_is_ongoing); 6536 io_geom->mirror_num = io_geom->stripe_index - old_stripe_index + 1; 6537 } 6538 6539 static void map_blocks_raid56_write(struct btrfs_chunk_map *map, 6540 struct btrfs_io_geometry *io_geom, 6541 u64 logical, u64 *length) 6542 { 6543 int data_stripes = nr_data_stripes(map); 6544 6545 /* 6546 * Needs full stripe mapping. 6547 * 6548 * Push stripe_nr back to the start of the full stripe For those cases 6549 * needing a full stripe, @stripe_nr is the full stripe number. 6550 * 6551 * Originally we go raid56_full_stripe_start / full_stripe_len, but 6552 * that can be expensive. Here we just divide @stripe_nr with 6553 * @data_stripes. 6554 */ 6555 io_geom->stripe_nr /= data_stripes; 6556 6557 /* RAID[56] write or recovery. Return all stripes */ 6558 io_geom->num_stripes = map->num_stripes; 6559 io_geom->max_errors = btrfs_chunk_max_errors(map); 6560 6561 /* Return the length to the full stripe end. */ 6562 *length = min(logical + *length, 6563 io_geom->raid56_full_stripe_start + map->start + 6564 btrfs_stripe_nr_to_offset(data_stripes)) - 6565 logical; 6566 io_geom->stripe_index = 0; 6567 io_geom->stripe_offset = 0; 6568 } 6569 6570 static void map_blocks_raid56_read(struct btrfs_chunk_map *map, 6571 struct btrfs_io_geometry *io_geom) 6572 { 6573 int data_stripes = nr_data_stripes(map); 6574 6575 ASSERT(io_geom->mirror_num <= 1); 6576 /* Just grab the data stripe directly. */ 6577 io_geom->stripe_index = io_geom->stripe_nr % data_stripes; 6578 io_geom->stripe_nr /= data_stripes; 6579 6580 /* We distribute the parity blocks across stripes. */ 6581 io_geom->stripe_index = 6582 (io_geom->stripe_nr + io_geom->stripe_index) % map->num_stripes; 6583 6584 if (io_geom->op == BTRFS_MAP_READ && io_geom->mirror_num < 1) 6585 io_geom->mirror_num = 1; 6586 } 6587 6588 static void map_blocks_single(const struct btrfs_chunk_map *map, 6589 struct btrfs_io_geometry *io_geom) 6590 { 6591 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes; 6592 io_geom->stripe_nr /= map->num_stripes; 6593 io_geom->mirror_num = io_geom->stripe_index + 1; 6594 } 6595 6596 /* 6597 * Map one logical range to one or more physical ranges. 6598 * 6599 * @length: (Mandatory) mapped length of this run. 6600 * One logical range can be split into different segments 6601 * due to factors like zones and RAID0/5/6/10 stripe 6602 * boundaries. 6603 * 6604 * @bioc_ret: (Mandatory) returned btrfs_io_context structure. 6605 * which has one or more physical ranges (btrfs_io_stripe) 6606 * recorded inside. 6607 * Caller should call btrfs_put_bioc() to free it after use. 6608 * 6609 * @smap: (Optional) single physical range optimization. 6610 * If the map request can be fulfilled by one single 6611 * physical range, and this is parameter is not NULL, 6612 * then @bioc_ret would be NULL, and @smap would be 6613 * updated. 6614 * 6615 * @mirror_num_ret: (Mandatory) returned mirror number if the original 6616 * value is 0. 6617 * 6618 * Mirror number 0 means to choose any live mirrors. 6619 * 6620 * For non-RAID56 profiles, non-zero mirror_num means 6621 * the Nth mirror. (e.g. mirror_num 1 means the first 6622 * copy). 6623 * 6624 * For RAID56 profile, mirror 1 means rebuild from P and 6625 * the remaining data stripes. 6626 * 6627 * For RAID6 profile, mirror > 2 means mark another 6628 * data/P stripe error and rebuild from the remaining 6629 * stripes.. 6630 */ 6631 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6632 u64 logical, u64 *length, 6633 struct btrfs_io_context **bioc_ret, 6634 struct btrfs_io_stripe *smap, int *mirror_num_ret) 6635 { 6636 struct btrfs_chunk_map *map; 6637 struct btrfs_io_geometry io_geom = { 0 }; 6638 u64 map_offset; 6639 int i; 6640 int ret = 0; 6641 int num_copies; 6642 struct btrfs_io_context *bioc = NULL; 6643 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6644 int dev_replace_is_ongoing = 0; 6645 u16 num_alloc_stripes; 6646 u64 max_len; 6647 6648 ASSERT(bioc_ret); 6649 6650 io_geom.mirror_num = (mirror_num_ret ? *mirror_num_ret : 0); 6651 io_geom.num_stripes = 1; 6652 io_geom.stripe_index = 0; 6653 io_geom.op = op; 6654 6655 num_copies = btrfs_num_copies(fs_info, logical, fs_info->sectorsize); 6656 if (io_geom.mirror_num > num_copies) 6657 return -EINVAL; 6658 6659 map = btrfs_get_chunk_map(fs_info, logical, *length); 6660 if (IS_ERR(map)) 6661 return PTR_ERR(map); 6662 6663 map_offset = logical - map->start; 6664 io_geom.raid56_full_stripe_start = (u64)-1; 6665 max_len = btrfs_max_io_len(map, map_offset, &io_geom); 6666 *length = min_t(u64, map->chunk_len - map_offset, max_len); 6667 6668 down_read(&dev_replace->rwsem); 6669 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6670 /* 6671 * Hold the semaphore for read during the whole operation, write is 6672 * requested at commit time but must wait. 6673 */ 6674 if (!dev_replace_is_ongoing) 6675 up_read(&dev_replace->rwsem); 6676 6677 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 6678 case BTRFS_BLOCK_GROUP_RAID0: 6679 map_blocks_raid0(map, &io_geom); 6680 break; 6681 case BTRFS_BLOCK_GROUP_RAID1: 6682 case BTRFS_BLOCK_GROUP_RAID1C3: 6683 case BTRFS_BLOCK_GROUP_RAID1C4: 6684 map_blocks_raid1(fs_info, map, &io_geom, dev_replace_is_ongoing); 6685 break; 6686 case BTRFS_BLOCK_GROUP_DUP: 6687 map_blocks_dup(map, &io_geom); 6688 break; 6689 case BTRFS_BLOCK_GROUP_RAID10: 6690 map_blocks_raid10(fs_info, map, &io_geom, dev_replace_is_ongoing); 6691 break; 6692 case BTRFS_BLOCK_GROUP_RAID5: 6693 case BTRFS_BLOCK_GROUP_RAID6: 6694 if (op != BTRFS_MAP_READ || io_geom.mirror_num > 1) 6695 map_blocks_raid56_write(map, &io_geom, logical, length); 6696 else 6697 map_blocks_raid56_read(map, &io_geom); 6698 break; 6699 default: 6700 /* 6701 * After this, stripe_nr is the number of stripes on this 6702 * device we have to walk to find the data, and stripe_index is 6703 * the number of our device in the stripe array 6704 */ 6705 map_blocks_single(map, &io_geom); 6706 break; 6707 } 6708 if (io_geom.stripe_index >= map->num_stripes) { 6709 btrfs_crit(fs_info, 6710 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6711 io_geom.stripe_index, map->num_stripes); 6712 ret = -EINVAL; 6713 goto out; 6714 } 6715 6716 num_alloc_stripes = io_geom.num_stripes; 6717 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6718 op != BTRFS_MAP_READ) 6719 /* 6720 * For replace case, we need to add extra stripes for extra 6721 * duplicated stripes. 6722 * 6723 * For both WRITE and GET_READ_MIRRORS, we may have at most 6724 * 2 more stripes (DUP types, otherwise 1). 6725 */ 6726 num_alloc_stripes += 2; 6727 6728 /* 6729 * If this I/O maps to a single device, try to return the device and 6730 * physical block information on the stack instead of allocating an 6731 * I/O context structure. 6732 */ 6733 if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, op, 6734 io_geom.mirror_num)) { 6735 ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom); 6736 if (mirror_num_ret) 6737 *mirror_num_ret = io_geom.mirror_num; 6738 *bioc_ret = NULL; 6739 goto out; 6740 } 6741 6742 bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes); 6743 if (!bioc) { 6744 ret = -ENOMEM; 6745 goto out; 6746 } 6747 bioc->map_type = map->type; 6748 6749 /* 6750 * For RAID56 full map, we need to make sure the stripes[] follows the 6751 * rule that data stripes are all ordered, then followed with P and Q 6752 * (if we have). 6753 * 6754 * It's still mostly the same as other profiles, just with extra rotation. 6755 */ 6756 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && 6757 (op != BTRFS_MAP_READ || io_geom.mirror_num > 1)) { 6758 /* 6759 * For RAID56 @stripe_nr is already the number of full stripes 6760 * before us, which is also the rotation value (needs to modulo 6761 * with num_stripes). 6762 * 6763 * In this case, we just add @stripe_nr with @i, then do the 6764 * modulo, to reduce one modulo call. 6765 */ 6766 bioc->full_stripe_logical = map->start + 6767 btrfs_stripe_nr_to_offset(io_geom.stripe_nr * 6768 nr_data_stripes(map)); 6769 for (int i = 0; i < io_geom.num_stripes; i++) { 6770 struct btrfs_io_stripe *dst = &bioc->stripes[i]; 6771 u32 stripe_index; 6772 6773 stripe_index = (i + io_geom.stripe_nr) % io_geom.num_stripes; 6774 dst->dev = map->stripes[stripe_index].dev; 6775 dst->physical = 6776 map->stripes[stripe_index].physical + 6777 io_geom.stripe_offset + 6778 btrfs_stripe_nr_to_offset(io_geom.stripe_nr); 6779 } 6780 } else { 6781 /* 6782 * For all other non-RAID56 profiles, just copy the target 6783 * stripe into the bioc. 6784 */ 6785 for (i = 0; i < io_geom.num_stripes; i++) { 6786 ret = set_io_stripe(fs_info, logical, length, 6787 &bioc->stripes[i], map, &io_geom); 6788 if (ret < 0) 6789 break; 6790 io_geom.stripe_index++; 6791 } 6792 } 6793 6794 if (ret) { 6795 *bioc_ret = NULL; 6796 btrfs_put_bioc(bioc); 6797 goto out; 6798 } 6799 6800 if (op != BTRFS_MAP_READ) 6801 io_geom.max_errors = btrfs_chunk_max_errors(map); 6802 6803 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6804 op != BTRFS_MAP_READ) { 6805 handle_ops_on_dev_replace(op, bioc, dev_replace, logical, 6806 &io_geom.num_stripes, &io_geom.max_errors); 6807 } 6808 6809 *bioc_ret = bioc; 6810 bioc->num_stripes = io_geom.num_stripes; 6811 bioc->max_errors = io_geom.max_errors; 6812 bioc->mirror_num = io_geom.mirror_num; 6813 6814 out: 6815 if (dev_replace_is_ongoing) { 6816 lockdep_assert_held(&dev_replace->rwsem); 6817 /* Unlock and let waiting writers proceed */ 6818 up_read(&dev_replace->rwsem); 6819 } 6820 btrfs_free_chunk_map(map); 6821 return ret; 6822 } 6823 6824 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6825 const struct btrfs_fs_devices *fs_devices) 6826 { 6827 if (args->fsid == NULL) 6828 return true; 6829 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6830 return true; 6831 return false; 6832 } 6833 6834 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6835 const struct btrfs_device *device) 6836 { 6837 if (args->missing) { 6838 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6839 !device->bdev) 6840 return true; 6841 return false; 6842 } 6843 6844 if (device->devid != args->devid) 6845 return false; 6846 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6847 return false; 6848 return true; 6849 } 6850 6851 /* 6852 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6853 * return NULL. 6854 * 6855 * If devid and uuid are both specified, the match must be exact, otherwise 6856 * only devid is used. 6857 */ 6858 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6859 const struct btrfs_dev_lookup_args *args) 6860 { 6861 struct btrfs_device *device; 6862 struct btrfs_fs_devices *seed_devs; 6863 6864 if (dev_args_match_fs_devices(args, fs_devices)) { 6865 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6866 if (dev_args_match_device(args, device)) 6867 return device; 6868 } 6869 } 6870 6871 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6872 if (!dev_args_match_fs_devices(args, seed_devs)) 6873 continue; 6874 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6875 if (dev_args_match_device(args, device)) 6876 return device; 6877 } 6878 } 6879 6880 return NULL; 6881 } 6882 6883 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6884 u64 devid, u8 *dev_uuid) 6885 { 6886 struct btrfs_device *device; 6887 unsigned int nofs_flag; 6888 6889 /* 6890 * We call this under the chunk_mutex, so we want to use NOFS for this 6891 * allocation, however we don't want to change btrfs_alloc_device() to 6892 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6893 * places. 6894 */ 6895 6896 nofs_flag = memalloc_nofs_save(); 6897 device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL); 6898 memalloc_nofs_restore(nofs_flag); 6899 if (IS_ERR(device)) 6900 return device; 6901 6902 list_add(&device->dev_list, &fs_devices->devices); 6903 device->fs_devices = fs_devices; 6904 fs_devices->num_devices++; 6905 6906 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6907 fs_devices->missing_devices++; 6908 6909 return device; 6910 } 6911 6912 /* 6913 * Allocate new device struct, set up devid and UUID. 6914 * 6915 * @fs_info: used only for generating a new devid, can be NULL if 6916 * devid is provided (i.e. @devid != NULL). 6917 * @devid: a pointer to devid for this device. If NULL a new devid 6918 * is generated. 6919 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6920 * is generated. 6921 * @path: a pointer to device path if available, NULL otherwise. 6922 * 6923 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6924 * on error. Returned struct is not linked onto any lists and must be 6925 * destroyed with btrfs_free_device. 6926 */ 6927 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6928 const u64 *devid, const u8 *uuid, 6929 const char *path) 6930 { 6931 struct btrfs_device *dev; 6932 u64 tmp; 6933 6934 if (WARN_ON(!devid && !fs_info)) 6935 return ERR_PTR(-EINVAL); 6936 6937 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6938 if (!dev) 6939 return ERR_PTR(-ENOMEM); 6940 6941 INIT_LIST_HEAD(&dev->dev_list); 6942 INIT_LIST_HEAD(&dev->dev_alloc_list); 6943 INIT_LIST_HEAD(&dev->post_commit_list); 6944 6945 atomic_set(&dev->dev_stats_ccnt, 0); 6946 btrfs_device_data_ordered_init(dev); 6947 extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE); 6948 6949 if (devid) 6950 tmp = *devid; 6951 else { 6952 int ret; 6953 6954 ret = find_next_devid(fs_info, &tmp); 6955 if (ret) { 6956 btrfs_free_device(dev); 6957 return ERR_PTR(ret); 6958 } 6959 } 6960 dev->devid = tmp; 6961 6962 if (uuid) 6963 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6964 else 6965 generate_random_uuid(dev->uuid); 6966 6967 if (path) { 6968 struct rcu_string *name; 6969 6970 name = rcu_string_strdup(path, GFP_KERNEL); 6971 if (!name) { 6972 btrfs_free_device(dev); 6973 return ERR_PTR(-ENOMEM); 6974 } 6975 rcu_assign_pointer(dev->name, name); 6976 } 6977 6978 return dev; 6979 } 6980 6981 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6982 u64 devid, u8 *uuid, bool error) 6983 { 6984 if (error) 6985 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6986 devid, uuid); 6987 else 6988 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6989 devid, uuid); 6990 } 6991 6992 u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map) 6993 { 6994 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 6995 6996 return div_u64(map->chunk_len, data_stripes); 6997 } 6998 6999 #if BITS_PER_LONG == 32 7000 /* 7001 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 7002 * can't be accessed on 32bit systems. 7003 * 7004 * This function do mount time check to reject the fs if it already has 7005 * metadata chunk beyond that limit. 7006 */ 7007 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7008 u64 logical, u64 length, u64 type) 7009 { 7010 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7011 return 0; 7012 7013 if (logical + length < MAX_LFS_FILESIZE) 7014 return 0; 7015 7016 btrfs_err_32bit_limit(fs_info); 7017 return -EOVERFLOW; 7018 } 7019 7020 /* 7021 * This is to give early warning for any metadata chunk reaching 7022 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 7023 * Although we can still access the metadata, it's not going to be possible 7024 * once the limit is reached. 7025 */ 7026 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 7027 u64 logical, u64 length, u64 type) 7028 { 7029 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 7030 return; 7031 7032 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 7033 return; 7034 7035 btrfs_warn_32bit_limit(fs_info); 7036 } 7037 #endif 7038 7039 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 7040 u64 devid, u8 *uuid) 7041 { 7042 struct btrfs_device *dev; 7043 7044 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7045 btrfs_report_missing_device(fs_info, devid, uuid, true); 7046 return ERR_PTR(-ENOENT); 7047 } 7048 7049 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 7050 if (IS_ERR(dev)) { 7051 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 7052 devid, PTR_ERR(dev)); 7053 return dev; 7054 } 7055 btrfs_report_missing_device(fs_info, devid, uuid, false); 7056 7057 return dev; 7058 } 7059 7060 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 7061 struct btrfs_chunk *chunk) 7062 { 7063 BTRFS_DEV_LOOKUP_ARGS(args); 7064 struct btrfs_fs_info *fs_info = leaf->fs_info; 7065 struct btrfs_chunk_map *map; 7066 u64 logical; 7067 u64 length; 7068 u64 devid; 7069 u64 type; 7070 u8 uuid[BTRFS_UUID_SIZE]; 7071 int index; 7072 int num_stripes; 7073 int ret; 7074 int i; 7075 7076 logical = key->offset; 7077 length = btrfs_chunk_length(leaf, chunk); 7078 type = btrfs_chunk_type(leaf, chunk); 7079 index = btrfs_bg_flags_to_raid_index(type); 7080 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 7081 7082 #if BITS_PER_LONG == 32 7083 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7084 if (ret < 0) 7085 return ret; 7086 warn_32bit_meta_chunk(fs_info, logical, length, type); 7087 #endif 7088 7089 /* 7090 * Only need to verify chunk item if we're reading from sys chunk array, 7091 * as chunk item in tree block is already verified by tree-checker. 7092 */ 7093 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7094 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7095 if (ret) 7096 return ret; 7097 } 7098 7099 map = btrfs_find_chunk_map(fs_info, logical, 1); 7100 7101 /* already mapped? */ 7102 if (map && map->start <= logical && map->start + map->chunk_len > logical) { 7103 btrfs_free_chunk_map(map); 7104 return 0; 7105 } else if (map) { 7106 btrfs_free_chunk_map(map); 7107 } 7108 7109 map = btrfs_alloc_chunk_map(num_stripes, GFP_NOFS); 7110 if (!map) 7111 return -ENOMEM; 7112 7113 map->start = logical; 7114 map->chunk_len = length; 7115 map->num_stripes = num_stripes; 7116 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7117 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7118 map->type = type; 7119 /* 7120 * We can't use the sub_stripes value, as for profiles other than 7121 * RAID10, they may have 0 as sub_stripes for filesystems created by 7122 * older mkfs (<v5.4). 7123 * In that case, it can cause divide-by-zero errors later. 7124 * Since currently sub_stripes is fixed for each profile, let's 7125 * use the trusted value instead. 7126 */ 7127 map->sub_stripes = btrfs_raid_array[index].sub_stripes; 7128 map->verified_stripes = 0; 7129 map->stripe_size = btrfs_calc_stripe_length(map); 7130 for (i = 0; i < num_stripes; i++) { 7131 map->stripes[i].physical = 7132 btrfs_stripe_offset_nr(leaf, chunk, i); 7133 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7134 args.devid = devid; 7135 read_extent_buffer(leaf, uuid, (unsigned long) 7136 btrfs_stripe_dev_uuid_nr(chunk, i), 7137 BTRFS_UUID_SIZE); 7138 args.uuid = uuid; 7139 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7140 if (!map->stripes[i].dev) { 7141 map->stripes[i].dev = handle_missing_device(fs_info, 7142 devid, uuid); 7143 if (IS_ERR(map->stripes[i].dev)) { 7144 ret = PTR_ERR(map->stripes[i].dev); 7145 btrfs_free_chunk_map(map); 7146 return ret; 7147 } 7148 } 7149 7150 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7151 &(map->stripes[i].dev->dev_state)); 7152 } 7153 7154 ret = btrfs_add_chunk_map(fs_info, map); 7155 if (ret < 0) { 7156 btrfs_err(fs_info, 7157 "failed to add chunk map, start=%llu len=%llu: %d", 7158 map->start, map->chunk_len, ret); 7159 } 7160 7161 return ret; 7162 } 7163 7164 static void fill_device_from_item(struct extent_buffer *leaf, 7165 struct btrfs_dev_item *dev_item, 7166 struct btrfs_device *device) 7167 { 7168 unsigned long ptr; 7169 7170 device->devid = btrfs_device_id(leaf, dev_item); 7171 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7172 device->total_bytes = device->disk_total_bytes; 7173 device->commit_total_bytes = device->disk_total_bytes; 7174 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7175 device->commit_bytes_used = device->bytes_used; 7176 device->type = btrfs_device_type(leaf, dev_item); 7177 device->io_align = btrfs_device_io_align(leaf, dev_item); 7178 device->io_width = btrfs_device_io_width(leaf, dev_item); 7179 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7180 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7181 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7182 7183 ptr = btrfs_device_uuid(dev_item); 7184 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7185 } 7186 7187 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7188 u8 *fsid) 7189 { 7190 struct btrfs_fs_devices *fs_devices; 7191 int ret; 7192 7193 lockdep_assert_held(&uuid_mutex); 7194 ASSERT(fsid); 7195 7196 /* This will match only for multi-device seed fs */ 7197 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7198 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7199 return fs_devices; 7200 7201 7202 fs_devices = find_fsid(fsid, NULL); 7203 if (!fs_devices) { 7204 if (!btrfs_test_opt(fs_info, DEGRADED)) 7205 return ERR_PTR(-ENOENT); 7206 7207 fs_devices = alloc_fs_devices(fsid); 7208 if (IS_ERR(fs_devices)) 7209 return fs_devices; 7210 7211 fs_devices->seeding = true; 7212 fs_devices->opened = 1; 7213 return fs_devices; 7214 } 7215 7216 /* 7217 * Upon first call for a seed fs fsid, just create a private copy of the 7218 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7219 */ 7220 fs_devices = clone_fs_devices(fs_devices); 7221 if (IS_ERR(fs_devices)) 7222 return fs_devices; 7223 7224 ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder); 7225 if (ret) { 7226 free_fs_devices(fs_devices); 7227 return ERR_PTR(ret); 7228 } 7229 7230 if (!fs_devices->seeding) { 7231 close_fs_devices(fs_devices); 7232 free_fs_devices(fs_devices); 7233 return ERR_PTR(-EINVAL); 7234 } 7235 7236 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7237 7238 return fs_devices; 7239 } 7240 7241 static int read_one_dev(struct extent_buffer *leaf, 7242 struct btrfs_dev_item *dev_item) 7243 { 7244 BTRFS_DEV_LOOKUP_ARGS(args); 7245 struct btrfs_fs_info *fs_info = leaf->fs_info; 7246 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7247 struct btrfs_device *device; 7248 u64 devid; 7249 int ret; 7250 u8 fs_uuid[BTRFS_FSID_SIZE]; 7251 u8 dev_uuid[BTRFS_UUID_SIZE]; 7252 7253 devid = btrfs_device_id(leaf, dev_item); 7254 args.devid = devid; 7255 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7256 BTRFS_UUID_SIZE); 7257 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7258 BTRFS_FSID_SIZE); 7259 args.uuid = dev_uuid; 7260 args.fsid = fs_uuid; 7261 7262 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7263 fs_devices = open_seed_devices(fs_info, fs_uuid); 7264 if (IS_ERR(fs_devices)) 7265 return PTR_ERR(fs_devices); 7266 } 7267 7268 device = btrfs_find_device(fs_info->fs_devices, &args); 7269 if (!device) { 7270 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7271 btrfs_report_missing_device(fs_info, devid, 7272 dev_uuid, true); 7273 return -ENOENT; 7274 } 7275 7276 device = add_missing_dev(fs_devices, devid, dev_uuid); 7277 if (IS_ERR(device)) { 7278 btrfs_err(fs_info, 7279 "failed to add missing dev %llu: %ld", 7280 devid, PTR_ERR(device)); 7281 return PTR_ERR(device); 7282 } 7283 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7284 } else { 7285 if (!device->bdev) { 7286 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7287 btrfs_report_missing_device(fs_info, 7288 devid, dev_uuid, true); 7289 return -ENOENT; 7290 } 7291 btrfs_report_missing_device(fs_info, devid, 7292 dev_uuid, false); 7293 } 7294 7295 if (!device->bdev && 7296 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7297 /* 7298 * this happens when a device that was properly setup 7299 * in the device info lists suddenly goes bad. 7300 * device->bdev is NULL, and so we have to set 7301 * device->missing to one here 7302 */ 7303 device->fs_devices->missing_devices++; 7304 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7305 } 7306 7307 /* Move the device to its own fs_devices */ 7308 if (device->fs_devices != fs_devices) { 7309 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7310 &device->dev_state)); 7311 7312 list_move(&device->dev_list, &fs_devices->devices); 7313 device->fs_devices->num_devices--; 7314 fs_devices->num_devices++; 7315 7316 device->fs_devices->missing_devices--; 7317 fs_devices->missing_devices++; 7318 7319 device->fs_devices = fs_devices; 7320 } 7321 } 7322 7323 if (device->fs_devices != fs_info->fs_devices) { 7324 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7325 if (device->generation != 7326 btrfs_device_generation(leaf, dev_item)) 7327 return -EINVAL; 7328 } 7329 7330 fill_device_from_item(leaf, dev_item, device); 7331 if (device->bdev) { 7332 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7333 7334 if (device->total_bytes > max_total_bytes) { 7335 btrfs_err(fs_info, 7336 "device total_bytes should be at most %llu but found %llu", 7337 max_total_bytes, device->total_bytes); 7338 return -EINVAL; 7339 } 7340 } 7341 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7342 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7343 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7344 device->fs_devices->total_rw_bytes += device->total_bytes; 7345 atomic64_add(device->total_bytes - device->bytes_used, 7346 &fs_info->free_chunk_space); 7347 } 7348 ret = 0; 7349 return ret; 7350 } 7351 7352 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7353 { 7354 struct btrfs_super_block *super_copy = fs_info->super_copy; 7355 struct extent_buffer *sb; 7356 struct btrfs_disk_key *disk_key; 7357 struct btrfs_chunk *chunk; 7358 u8 *array_ptr; 7359 unsigned long sb_array_offset; 7360 int ret = 0; 7361 u32 num_stripes; 7362 u32 array_size; 7363 u32 len = 0; 7364 u32 cur_offset; 7365 u64 type; 7366 struct btrfs_key key; 7367 7368 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7369 7370 /* 7371 * We allocated a dummy extent, just to use extent buffer accessors. 7372 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7373 * that's fine, we will not go beyond system chunk array anyway. 7374 */ 7375 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7376 if (!sb) 7377 return -ENOMEM; 7378 set_extent_buffer_uptodate(sb); 7379 7380 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7381 array_size = btrfs_super_sys_array_size(super_copy); 7382 7383 array_ptr = super_copy->sys_chunk_array; 7384 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7385 cur_offset = 0; 7386 7387 while (cur_offset < array_size) { 7388 disk_key = (struct btrfs_disk_key *)array_ptr; 7389 len = sizeof(*disk_key); 7390 if (cur_offset + len > array_size) 7391 goto out_short_read; 7392 7393 btrfs_disk_key_to_cpu(&key, disk_key); 7394 7395 array_ptr += len; 7396 sb_array_offset += len; 7397 cur_offset += len; 7398 7399 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7400 btrfs_err(fs_info, 7401 "unexpected item type %u in sys_array at offset %u", 7402 (u32)key.type, cur_offset); 7403 ret = -EIO; 7404 break; 7405 } 7406 7407 chunk = (struct btrfs_chunk *)sb_array_offset; 7408 /* 7409 * At least one btrfs_chunk with one stripe must be present, 7410 * exact stripe count check comes afterwards 7411 */ 7412 len = btrfs_chunk_item_size(1); 7413 if (cur_offset + len > array_size) 7414 goto out_short_read; 7415 7416 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7417 if (!num_stripes) { 7418 btrfs_err(fs_info, 7419 "invalid number of stripes %u in sys_array at offset %u", 7420 num_stripes, cur_offset); 7421 ret = -EIO; 7422 break; 7423 } 7424 7425 type = btrfs_chunk_type(sb, chunk); 7426 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7427 btrfs_err(fs_info, 7428 "invalid chunk type %llu in sys_array at offset %u", 7429 type, cur_offset); 7430 ret = -EIO; 7431 break; 7432 } 7433 7434 len = btrfs_chunk_item_size(num_stripes); 7435 if (cur_offset + len > array_size) 7436 goto out_short_read; 7437 7438 ret = read_one_chunk(&key, sb, chunk); 7439 if (ret) 7440 break; 7441 7442 array_ptr += len; 7443 sb_array_offset += len; 7444 cur_offset += len; 7445 } 7446 clear_extent_buffer_uptodate(sb); 7447 free_extent_buffer_stale(sb); 7448 return ret; 7449 7450 out_short_read: 7451 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7452 len, cur_offset); 7453 clear_extent_buffer_uptodate(sb); 7454 free_extent_buffer_stale(sb); 7455 return -EIO; 7456 } 7457 7458 /* 7459 * Check if all chunks in the fs are OK for read-write degraded mount 7460 * 7461 * If the @failing_dev is specified, it's accounted as missing. 7462 * 7463 * Return true if all chunks meet the minimal RW mount requirements. 7464 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7465 */ 7466 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7467 struct btrfs_device *failing_dev) 7468 { 7469 struct btrfs_chunk_map *map; 7470 u64 next_start; 7471 bool ret = true; 7472 7473 map = btrfs_find_chunk_map(fs_info, 0, U64_MAX); 7474 /* No chunk at all? Return false anyway */ 7475 if (!map) { 7476 ret = false; 7477 goto out; 7478 } 7479 while (map) { 7480 int missing = 0; 7481 int max_tolerated; 7482 int i; 7483 7484 max_tolerated = 7485 btrfs_get_num_tolerated_disk_barrier_failures( 7486 map->type); 7487 for (i = 0; i < map->num_stripes; i++) { 7488 struct btrfs_device *dev = map->stripes[i].dev; 7489 7490 if (!dev || !dev->bdev || 7491 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7492 dev->last_flush_error) 7493 missing++; 7494 else if (failing_dev && failing_dev == dev) 7495 missing++; 7496 } 7497 if (missing > max_tolerated) { 7498 if (!failing_dev) 7499 btrfs_warn(fs_info, 7500 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7501 map->start, missing, max_tolerated); 7502 btrfs_free_chunk_map(map); 7503 ret = false; 7504 goto out; 7505 } 7506 next_start = map->start + map->chunk_len; 7507 btrfs_free_chunk_map(map); 7508 7509 map = btrfs_find_chunk_map(fs_info, next_start, U64_MAX - next_start); 7510 } 7511 out: 7512 return ret; 7513 } 7514 7515 static void readahead_tree_node_children(struct extent_buffer *node) 7516 { 7517 int i; 7518 const int nr_items = btrfs_header_nritems(node); 7519 7520 for (i = 0; i < nr_items; i++) 7521 btrfs_readahead_node_child(node, i); 7522 } 7523 7524 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7525 { 7526 struct btrfs_root *root = fs_info->chunk_root; 7527 struct btrfs_path *path; 7528 struct extent_buffer *leaf; 7529 struct btrfs_key key; 7530 struct btrfs_key found_key; 7531 int ret; 7532 int slot; 7533 int iter_ret = 0; 7534 u64 total_dev = 0; 7535 u64 last_ra_node = 0; 7536 7537 path = btrfs_alloc_path(); 7538 if (!path) 7539 return -ENOMEM; 7540 7541 /* 7542 * uuid_mutex is needed only if we are mounting a sprout FS 7543 * otherwise we don't need it. 7544 */ 7545 mutex_lock(&uuid_mutex); 7546 7547 /* 7548 * It is possible for mount and umount to race in such a way that 7549 * we execute this code path, but open_fs_devices failed to clear 7550 * total_rw_bytes. We certainly want it cleared before reading the 7551 * device items, so clear it here. 7552 */ 7553 fs_info->fs_devices->total_rw_bytes = 0; 7554 7555 /* 7556 * Lockdep complains about possible circular locking dependency between 7557 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7558 * used for freeze procection of a fs (struct super_block.s_writers), 7559 * which we take when starting a transaction, and extent buffers of the 7560 * chunk tree if we call read_one_dev() while holding a lock on an 7561 * extent buffer of the chunk tree. Since we are mounting the filesystem 7562 * and at this point there can't be any concurrent task modifying the 7563 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7564 */ 7565 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7566 path->skip_locking = 1; 7567 7568 /* 7569 * Read all device items, and then all the chunk items. All 7570 * device items are found before any chunk item (their object id 7571 * is smaller than the lowest possible object id for a chunk 7572 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7573 */ 7574 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7575 key.offset = 0; 7576 key.type = 0; 7577 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7578 struct extent_buffer *node = path->nodes[1]; 7579 7580 leaf = path->nodes[0]; 7581 slot = path->slots[0]; 7582 7583 if (node) { 7584 if (last_ra_node != node->start) { 7585 readahead_tree_node_children(node); 7586 last_ra_node = node->start; 7587 } 7588 } 7589 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7590 struct btrfs_dev_item *dev_item; 7591 dev_item = btrfs_item_ptr(leaf, slot, 7592 struct btrfs_dev_item); 7593 ret = read_one_dev(leaf, dev_item); 7594 if (ret) 7595 goto error; 7596 total_dev++; 7597 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7598 struct btrfs_chunk *chunk; 7599 7600 /* 7601 * We are only called at mount time, so no need to take 7602 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7603 * we always lock first fs_info->chunk_mutex before 7604 * acquiring any locks on the chunk tree. This is a 7605 * requirement for chunk allocation, see the comment on 7606 * top of btrfs_chunk_alloc() for details. 7607 */ 7608 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7609 ret = read_one_chunk(&found_key, leaf, chunk); 7610 if (ret) 7611 goto error; 7612 } 7613 } 7614 /* Catch error found during iteration */ 7615 if (iter_ret < 0) { 7616 ret = iter_ret; 7617 goto error; 7618 } 7619 7620 /* 7621 * After loading chunk tree, we've got all device information, 7622 * do another round of validation checks. 7623 */ 7624 if (total_dev != fs_info->fs_devices->total_devices) { 7625 btrfs_warn(fs_info, 7626 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7627 btrfs_super_num_devices(fs_info->super_copy), 7628 total_dev); 7629 fs_info->fs_devices->total_devices = total_dev; 7630 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7631 } 7632 if (btrfs_super_total_bytes(fs_info->super_copy) < 7633 fs_info->fs_devices->total_rw_bytes) { 7634 btrfs_err(fs_info, 7635 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7636 btrfs_super_total_bytes(fs_info->super_copy), 7637 fs_info->fs_devices->total_rw_bytes); 7638 ret = -EINVAL; 7639 goto error; 7640 } 7641 ret = 0; 7642 error: 7643 mutex_unlock(&uuid_mutex); 7644 7645 btrfs_free_path(path); 7646 return ret; 7647 } 7648 7649 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7650 { 7651 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7652 struct btrfs_device *device; 7653 int ret = 0; 7654 7655 fs_devices->fs_info = fs_info; 7656 7657 mutex_lock(&fs_devices->device_list_mutex); 7658 list_for_each_entry(device, &fs_devices->devices, dev_list) 7659 device->fs_info = fs_info; 7660 7661 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7662 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7663 device->fs_info = fs_info; 7664 ret = btrfs_get_dev_zone_info(device, false); 7665 if (ret) 7666 break; 7667 } 7668 7669 seed_devs->fs_info = fs_info; 7670 } 7671 mutex_unlock(&fs_devices->device_list_mutex); 7672 7673 return ret; 7674 } 7675 7676 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7677 const struct btrfs_dev_stats_item *ptr, 7678 int index) 7679 { 7680 u64 val; 7681 7682 read_extent_buffer(eb, &val, 7683 offsetof(struct btrfs_dev_stats_item, values) + 7684 ((unsigned long)ptr) + (index * sizeof(u64)), 7685 sizeof(val)); 7686 return val; 7687 } 7688 7689 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7690 struct btrfs_dev_stats_item *ptr, 7691 int index, u64 val) 7692 { 7693 write_extent_buffer(eb, &val, 7694 offsetof(struct btrfs_dev_stats_item, values) + 7695 ((unsigned long)ptr) + (index * sizeof(u64)), 7696 sizeof(val)); 7697 } 7698 7699 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7700 struct btrfs_path *path) 7701 { 7702 struct btrfs_dev_stats_item *ptr; 7703 struct extent_buffer *eb; 7704 struct btrfs_key key; 7705 int item_size; 7706 int i, ret, slot; 7707 7708 if (!device->fs_info->dev_root) 7709 return 0; 7710 7711 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7712 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7713 key.offset = device->devid; 7714 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7715 if (ret) { 7716 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7717 btrfs_dev_stat_set(device, i, 0); 7718 device->dev_stats_valid = 1; 7719 btrfs_release_path(path); 7720 return ret < 0 ? ret : 0; 7721 } 7722 slot = path->slots[0]; 7723 eb = path->nodes[0]; 7724 item_size = btrfs_item_size(eb, slot); 7725 7726 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7727 7728 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7729 if (item_size >= (1 + i) * sizeof(__le64)) 7730 btrfs_dev_stat_set(device, i, 7731 btrfs_dev_stats_value(eb, ptr, i)); 7732 else 7733 btrfs_dev_stat_set(device, i, 0); 7734 } 7735 7736 device->dev_stats_valid = 1; 7737 btrfs_dev_stat_print_on_load(device); 7738 btrfs_release_path(path); 7739 7740 return 0; 7741 } 7742 7743 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7744 { 7745 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7746 struct btrfs_device *device; 7747 struct btrfs_path *path = NULL; 7748 int ret = 0; 7749 7750 path = btrfs_alloc_path(); 7751 if (!path) 7752 return -ENOMEM; 7753 7754 mutex_lock(&fs_devices->device_list_mutex); 7755 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7756 ret = btrfs_device_init_dev_stats(device, path); 7757 if (ret) 7758 goto out; 7759 } 7760 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7761 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7762 ret = btrfs_device_init_dev_stats(device, path); 7763 if (ret) 7764 goto out; 7765 } 7766 } 7767 out: 7768 mutex_unlock(&fs_devices->device_list_mutex); 7769 7770 btrfs_free_path(path); 7771 return ret; 7772 } 7773 7774 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7775 struct btrfs_device *device) 7776 { 7777 struct btrfs_fs_info *fs_info = trans->fs_info; 7778 struct btrfs_root *dev_root = fs_info->dev_root; 7779 struct btrfs_path *path; 7780 struct btrfs_key key; 7781 struct extent_buffer *eb; 7782 struct btrfs_dev_stats_item *ptr; 7783 int ret; 7784 int i; 7785 7786 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7787 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7788 key.offset = device->devid; 7789 7790 path = btrfs_alloc_path(); 7791 if (!path) 7792 return -ENOMEM; 7793 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7794 if (ret < 0) { 7795 btrfs_warn_in_rcu(fs_info, 7796 "error %d while searching for dev_stats item for device %s", 7797 ret, btrfs_dev_name(device)); 7798 goto out; 7799 } 7800 7801 if (ret == 0 && 7802 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7803 /* need to delete old one and insert a new one */ 7804 ret = btrfs_del_item(trans, dev_root, path); 7805 if (ret != 0) { 7806 btrfs_warn_in_rcu(fs_info, 7807 "delete too small dev_stats item for device %s failed %d", 7808 btrfs_dev_name(device), ret); 7809 goto out; 7810 } 7811 ret = 1; 7812 } 7813 7814 if (ret == 1) { 7815 /* need to insert a new item */ 7816 btrfs_release_path(path); 7817 ret = btrfs_insert_empty_item(trans, dev_root, path, 7818 &key, sizeof(*ptr)); 7819 if (ret < 0) { 7820 btrfs_warn_in_rcu(fs_info, 7821 "insert dev_stats item for device %s failed %d", 7822 btrfs_dev_name(device), ret); 7823 goto out; 7824 } 7825 } 7826 7827 eb = path->nodes[0]; 7828 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7829 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7830 btrfs_set_dev_stats_value(eb, ptr, i, 7831 btrfs_dev_stat_read(device, i)); 7832 btrfs_mark_buffer_dirty(trans, eb); 7833 7834 out: 7835 btrfs_free_path(path); 7836 return ret; 7837 } 7838 7839 /* 7840 * called from commit_transaction. Writes all changed device stats to disk. 7841 */ 7842 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7843 { 7844 struct btrfs_fs_info *fs_info = trans->fs_info; 7845 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7846 struct btrfs_device *device; 7847 int stats_cnt; 7848 int ret = 0; 7849 7850 mutex_lock(&fs_devices->device_list_mutex); 7851 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7852 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7853 if (!device->dev_stats_valid || stats_cnt == 0) 7854 continue; 7855 7856 7857 /* 7858 * There is a LOAD-LOAD control dependency between the value of 7859 * dev_stats_ccnt and updating the on-disk values which requires 7860 * reading the in-memory counters. Such control dependencies 7861 * require explicit read memory barriers. 7862 * 7863 * This memory barriers pairs with smp_mb__before_atomic in 7864 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7865 * barrier implied by atomic_xchg in 7866 * btrfs_dev_stats_read_and_reset 7867 */ 7868 smp_rmb(); 7869 7870 ret = update_dev_stat_item(trans, device); 7871 if (!ret) 7872 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7873 } 7874 mutex_unlock(&fs_devices->device_list_mutex); 7875 7876 return ret; 7877 } 7878 7879 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7880 { 7881 btrfs_dev_stat_inc(dev, index); 7882 7883 if (!dev->dev_stats_valid) 7884 return; 7885 btrfs_err_rl_in_rcu(dev->fs_info, 7886 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7887 btrfs_dev_name(dev), 7888 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7889 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7890 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7891 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7892 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7893 } 7894 7895 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7896 { 7897 int i; 7898 7899 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7900 if (btrfs_dev_stat_read(dev, i) != 0) 7901 break; 7902 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7903 return; /* all values == 0, suppress message */ 7904 7905 btrfs_info_in_rcu(dev->fs_info, 7906 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7907 btrfs_dev_name(dev), 7908 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7909 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7910 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7911 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7912 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7913 } 7914 7915 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7916 struct btrfs_ioctl_get_dev_stats *stats) 7917 { 7918 BTRFS_DEV_LOOKUP_ARGS(args); 7919 struct btrfs_device *dev; 7920 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7921 int i; 7922 7923 mutex_lock(&fs_devices->device_list_mutex); 7924 args.devid = stats->devid; 7925 dev = btrfs_find_device(fs_info->fs_devices, &args); 7926 mutex_unlock(&fs_devices->device_list_mutex); 7927 7928 if (!dev) { 7929 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7930 return -ENODEV; 7931 } else if (!dev->dev_stats_valid) { 7932 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7933 return -ENODEV; 7934 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7935 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7936 if (stats->nr_items > i) 7937 stats->values[i] = 7938 btrfs_dev_stat_read_and_reset(dev, i); 7939 else 7940 btrfs_dev_stat_set(dev, i, 0); 7941 } 7942 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7943 current->comm, task_pid_nr(current)); 7944 } else { 7945 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7946 if (stats->nr_items > i) 7947 stats->values[i] = btrfs_dev_stat_read(dev, i); 7948 } 7949 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7950 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7951 return 0; 7952 } 7953 7954 /* 7955 * Update the size and bytes used for each device where it changed. This is 7956 * delayed since we would otherwise get errors while writing out the 7957 * superblocks. 7958 * 7959 * Must be invoked during transaction commit. 7960 */ 7961 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7962 { 7963 struct btrfs_device *curr, *next; 7964 7965 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7966 7967 if (list_empty(&trans->dev_update_list)) 7968 return; 7969 7970 /* 7971 * We don't need the device_list_mutex here. This list is owned by the 7972 * transaction and the transaction must complete before the device is 7973 * released. 7974 */ 7975 mutex_lock(&trans->fs_info->chunk_mutex); 7976 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7977 post_commit_list) { 7978 list_del_init(&curr->post_commit_list); 7979 curr->commit_total_bytes = curr->disk_total_bytes; 7980 curr->commit_bytes_used = curr->bytes_used; 7981 } 7982 mutex_unlock(&trans->fs_info->chunk_mutex); 7983 } 7984 7985 /* 7986 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7987 */ 7988 int btrfs_bg_type_to_factor(u64 flags) 7989 { 7990 const int index = btrfs_bg_flags_to_raid_index(flags); 7991 7992 return btrfs_raid_array[index].ncopies; 7993 } 7994 7995 7996 7997 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7998 u64 chunk_offset, u64 devid, 7999 u64 physical_offset, u64 physical_len) 8000 { 8001 struct btrfs_dev_lookup_args args = { .devid = devid }; 8002 struct btrfs_chunk_map *map; 8003 struct btrfs_device *dev; 8004 u64 stripe_len; 8005 bool found = false; 8006 int ret = 0; 8007 int i; 8008 8009 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1); 8010 if (!map) { 8011 btrfs_err(fs_info, 8012 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 8013 physical_offset, devid); 8014 ret = -EUCLEAN; 8015 goto out; 8016 } 8017 8018 stripe_len = btrfs_calc_stripe_length(map); 8019 if (physical_len != stripe_len) { 8020 btrfs_err(fs_info, 8021 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 8022 physical_offset, devid, map->start, physical_len, 8023 stripe_len); 8024 ret = -EUCLEAN; 8025 goto out; 8026 } 8027 8028 /* 8029 * Very old mkfs.btrfs (before v4.1) will not respect the reserved 8030 * space. Although kernel can handle it without problem, better to warn 8031 * the users. 8032 */ 8033 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED) 8034 btrfs_warn(fs_info, 8035 "devid %llu physical %llu len %llu inside the reserved space", 8036 devid, physical_offset, physical_len); 8037 8038 for (i = 0; i < map->num_stripes; i++) { 8039 if (map->stripes[i].dev->devid == devid && 8040 map->stripes[i].physical == physical_offset) { 8041 found = true; 8042 if (map->verified_stripes >= map->num_stripes) { 8043 btrfs_err(fs_info, 8044 "too many dev extents for chunk %llu found", 8045 map->start); 8046 ret = -EUCLEAN; 8047 goto out; 8048 } 8049 map->verified_stripes++; 8050 break; 8051 } 8052 } 8053 if (!found) { 8054 btrfs_err(fs_info, 8055 "dev extent physical offset %llu devid %llu has no corresponding chunk", 8056 physical_offset, devid); 8057 ret = -EUCLEAN; 8058 } 8059 8060 /* Make sure no dev extent is beyond device boundary */ 8061 dev = btrfs_find_device(fs_info->fs_devices, &args); 8062 if (!dev) { 8063 btrfs_err(fs_info, "failed to find devid %llu", devid); 8064 ret = -EUCLEAN; 8065 goto out; 8066 } 8067 8068 if (physical_offset + physical_len > dev->disk_total_bytes) { 8069 btrfs_err(fs_info, 8070 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 8071 devid, physical_offset, physical_len, 8072 dev->disk_total_bytes); 8073 ret = -EUCLEAN; 8074 goto out; 8075 } 8076 8077 if (dev->zone_info) { 8078 u64 zone_size = dev->zone_info->zone_size; 8079 8080 if (!IS_ALIGNED(physical_offset, zone_size) || 8081 !IS_ALIGNED(physical_len, zone_size)) { 8082 btrfs_err(fs_info, 8083 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8084 devid, physical_offset, physical_len); 8085 ret = -EUCLEAN; 8086 goto out; 8087 } 8088 } 8089 8090 out: 8091 btrfs_free_chunk_map(map); 8092 return ret; 8093 } 8094 8095 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8096 { 8097 struct rb_node *node; 8098 int ret = 0; 8099 8100 read_lock(&fs_info->mapping_tree_lock); 8101 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { 8102 struct btrfs_chunk_map *map; 8103 8104 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 8105 if (map->num_stripes != map->verified_stripes) { 8106 btrfs_err(fs_info, 8107 "chunk %llu has missing dev extent, have %d expect %d", 8108 map->start, map->verified_stripes, map->num_stripes); 8109 ret = -EUCLEAN; 8110 goto out; 8111 } 8112 } 8113 out: 8114 read_unlock(&fs_info->mapping_tree_lock); 8115 return ret; 8116 } 8117 8118 /* 8119 * Ensure that all dev extents are mapped to correct chunk, otherwise 8120 * later chunk allocation/free would cause unexpected behavior. 8121 * 8122 * NOTE: This will iterate through the whole device tree, which should be of 8123 * the same size level as the chunk tree. This slightly increases mount time. 8124 */ 8125 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8126 { 8127 struct btrfs_path *path; 8128 struct btrfs_root *root = fs_info->dev_root; 8129 struct btrfs_key key; 8130 u64 prev_devid = 0; 8131 u64 prev_dev_ext_end = 0; 8132 int ret = 0; 8133 8134 /* 8135 * We don't have a dev_root because we mounted with ignorebadroots and 8136 * failed to load the root, so we want to skip the verification in this 8137 * case for sure. 8138 * 8139 * However if the dev root is fine, but the tree itself is corrupted 8140 * we'd still fail to mount. This verification is only to make sure 8141 * writes can happen safely, so instead just bypass this check 8142 * completely in the case of IGNOREBADROOTS. 8143 */ 8144 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8145 return 0; 8146 8147 key.objectid = 1; 8148 key.type = BTRFS_DEV_EXTENT_KEY; 8149 key.offset = 0; 8150 8151 path = btrfs_alloc_path(); 8152 if (!path) 8153 return -ENOMEM; 8154 8155 path->reada = READA_FORWARD; 8156 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8157 if (ret < 0) 8158 goto out; 8159 8160 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8161 ret = btrfs_next_leaf(root, path); 8162 if (ret < 0) 8163 goto out; 8164 /* No dev extents at all? Not good */ 8165 if (ret > 0) { 8166 ret = -EUCLEAN; 8167 goto out; 8168 } 8169 } 8170 while (1) { 8171 struct extent_buffer *leaf = path->nodes[0]; 8172 struct btrfs_dev_extent *dext; 8173 int slot = path->slots[0]; 8174 u64 chunk_offset; 8175 u64 physical_offset; 8176 u64 physical_len; 8177 u64 devid; 8178 8179 btrfs_item_key_to_cpu(leaf, &key, slot); 8180 if (key.type != BTRFS_DEV_EXTENT_KEY) 8181 break; 8182 devid = key.objectid; 8183 physical_offset = key.offset; 8184 8185 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8186 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8187 physical_len = btrfs_dev_extent_length(leaf, dext); 8188 8189 /* Check if this dev extent overlaps with the previous one */ 8190 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8191 btrfs_err(fs_info, 8192 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8193 devid, physical_offset, prev_dev_ext_end); 8194 ret = -EUCLEAN; 8195 goto out; 8196 } 8197 8198 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8199 physical_offset, physical_len); 8200 if (ret < 0) 8201 goto out; 8202 prev_devid = devid; 8203 prev_dev_ext_end = physical_offset + physical_len; 8204 8205 ret = btrfs_next_item(root, path); 8206 if (ret < 0) 8207 goto out; 8208 if (ret > 0) { 8209 ret = 0; 8210 break; 8211 } 8212 } 8213 8214 /* Ensure all chunks have corresponding dev extents */ 8215 ret = verify_chunk_dev_extent_mapping(fs_info); 8216 out: 8217 btrfs_free_path(path); 8218 return ret; 8219 } 8220 8221 /* 8222 * Check whether the given block group or device is pinned by any inode being 8223 * used as a swapfile. 8224 */ 8225 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8226 { 8227 struct btrfs_swapfile_pin *sp; 8228 struct rb_node *node; 8229 8230 spin_lock(&fs_info->swapfile_pins_lock); 8231 node = fs_info->swapfile_pins.rb_node; 8232 while (node) { 8233 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8234 if (ptr < sp->ptr) 8235 node = node->rb_left; 8236 else if (ptr > sp->ptr) 8237 node = node->rb_right; 8238 else 8239 break; 8240 } 8241 spin_unlock(&fs_info->swapfile_pins_lock); 8242 return node != NULL; 8243 } 8244 8245 static int relocating_repair_kthread(void *data) 8246 { 8247 struct btrfs_block_group *cache = data; 8248 struct btrfs_fs_info *fs_info = cache->fs_info; 8249 u64 target; 8250 int ret = 0; 8251 8252 target = cache->start; 8253 btrfs_put_block_group(cache); 8254 8255 sb_start_write(fs_info->sb); 8256 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8257 btrfs_info(fs_info, 8258 "zoned: skip relocating block group %llu to repair: EBUSY", 8259 target); 8260 sb_end_write(fs_info->sb); 8261 return -EBUSY; 8262 } 8263 8264 mutex_lock(&fs_info->reclaim_bgs_lock); 8265 8266 /* Ensure block group still exists */ 8267 cache = btrfs_lookup_block_group(fs_info, target); 8268 if (!cache) 8269 goto out; 8270 8271 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) 8272 goto out; 8273 8274 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8275 if (ret < 0) 8276 goto out; 8277 8278 btrfs_info(fs_info, 8279 "zoned: relocating block group %llu to repair IO failure", 8280 target); 8281 ret = btrfs_relocate_chunk(fs_info, target); 8282 8283 out: 8284 if (cache) 8285 btrfs_put_block_group(cache); 8286 mutex_unlock(&fs_info->reclaim_bgs_lock); 8287 btrfs_exclop_finish(fs_info); 8288 sb_end_write(fs_info->sb); 8289 8290 return ret; 8291 } 8292 8293 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8294 { 8295 struct btrfs_block_group *cache; 8296 8297 if (!btrfs_is_zoned(fs_info)) 8298 return false; 8299 8300 /* Do not attempt to repair in degraded state */ 8301 if (btrfs_test_opt(fs_info, DEGRADED)) 8302 return true; 8303 8304 cache = btrfs_lookup_block_group(fs_info, logical); 8305 if (!cache) 8306 return true; 8307 8308 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) { 8309 btrfs_put_block_group(cache); 8310 return true; 8311 } 8312 8313 kthread_run(relocating_repair_kthread, cache, 8314 "btrfs-relocating-repair"); 8315 8316 return true; 8317 } 8318 8319 static void map_raid56_repair_block(struct btrfs_io_context *bioc, 8320 struct btrfs_io_stripe *smap, 8321 u64 logical) 8322 { 8323 int data_stripes = nr_bioc_data_stripes(bioc); 8324 int i; 8325 8326 for (i = 0; i < data_stripes; i++) { 8327 u64 stripe_start = bioc->full_stripe_logical + 8328 btrfs_stripe_nr_to_offset(i); 8329 8330 if (logical >= stripe_start && 8331 logical < stripe_start + BTRFS_STRIPE_LEN) 8332 break; 8333 } 8334 ASSERT(i < data_stripes); 8335 smap->dev = bioc->stripes[i].dev; 8336 smap->physical = bioc->stripes[i].physical + 8337 ((logical - bioc->full_stripe_logical) & 8338 BTRFS_STRIPE_LEN_MASK); 8339 } 8340 8341 /* 8342 * Map a repair write into a single device. 8343 * 8344 * A repair write is triggered by read time repair or scrub, which would only 8345 * update the contents of a single device. 8346 * Not update any other mirrors nor go through RMW path. 8347 * 8348 * Callers should ensure: 8349 * 8350 * - Call btrfs_bio_counter_inc_blocked() first 8351 * - The range does not cross stripe boundary 8352 * - Has a valid @mirror_num passed in. 8353 */ 8354 int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, 8355 struct btrfs_io_stripe *smap, u64 logical, 8356 u32 length, int mirror_num) 8357 { 8358 struct btrfs_io_context *bioc = NULL; 8359 u64 map_length = length; 8360 int mirror_ret = mirror_num; 8361 int ret; 8362 8363 ASSERT(mirror_num > 0); 8364 8365 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length, 8366 &bioc, smap, &mirror_ret); 8367 if (ret < 0) 8368 return ret; 8369 8370 /* The map range should not cross stripe boundary. */ 8371 ASSERT(map_length >= length); 8372 8373 /* Already mapped to single stripe. */ 8374 if (!bioc) 8375 goto out; 8376 8377 /* Map the RAID56 multi-stripe writes to a single one. */ 8378 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 8379 map_raid56_repair_block(bioc, smap, logical); 8380 goto out; 8381 } 8382 8383 ASSERT(mirror_num <= bioc->num_stripes); 8384 smap->dev = bioc->stripes[mirror_num - 1].dev; 8385 smap->physical = bioc->stripes[mirror_num - 1].physical; 8386 out: 8387 btrfs_put_bioc(bioc); 8388 ASSERT(smap->dev); 8389 return 0; 8390 } 8391