1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/slab.h> 9 #include <linux/ratelimit.h> 10 #include <linux/kthread.h> 11 #include <linux/semaphore.h> 12 #include <linux/uuid.h> 13 #include <linux/list_sort.h> 14 #include <linux/namei.h> 15 #include "misc.h" 16 #include "ctree.h" 17 #include "disk-io.h" 18 #include "transaction.h" 19 #include "volumes.h" 20 #include "raid56.h" 21 #include "rcu-string.h" 22 #include "dev-replace.h" 23 #include "sysfs.h" 24 #include "tree-checker.h" 25 #include "space-info.h" 26 #include "block-group.h" 27 #include "discard.h" 28 #include "zoned.h" 29 #include "fs.h" 30 #include "accessors.h" 31 #include "uuid-tree.h" 32 #include "ioctl.h" 33 #include "relocation.h" 34 #include "scrub.h" 35 #include "super.h" 36 #include "raid-stripe-tree.h" 37 38 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 39 BTRFS_BLOCK_GROUP_RAID10 | \ 40 BTRFS_BLOCK_GROUP_RAID56_MASK) 41 42 struct btrfs_io_geometry { 43 u32 stripe_index; 44 u32 stripe_nr; 45 int mirror_num; 46 int num_stripes; 47 u64 stripe_offset; 48 u64 raid56_full_stripe_start; 49 int max_errors; 50 enum btrfs_map_op op; 51 }; 52 53 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 54 [BTRFS_RAID_RAID10] = { 55 .sub_stripes = 2, 56 .dev_stripes = 1, 57 .devs_max = 0, /* 0 == as many as possible */ 58 .devs_min = 2, 59 .tolerated_failures = 1, 60 .devs_increment = 2, 61 .ncopies = 2, 62 .nparity = 0, 63 .raid_name = "raid10", 64 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 65 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 66 }, 67 [BTRFS_RAID_RAID1] = { 68 .sub_stripes = 1, 69 .dev_stripes = 1, 70 .devs_max = 2, 71 .devs_min = 2, 72 .tolerated_failures = 1, 73 .devs_increment = 2, 74 .ncopies = 2, 75 .nparity = 0, 76 .raid_name = "raid1", 77 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 78 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 79 }, 80 [BTRFS_RAID_RAID1C3] = { 81 .sub_stripes = 1, 82 .dev_stripes = 1, 83 .devs_max = 3, 84 .devs_min = 3, 85 .tolerated_failures = 2, 86 .devs_increment = 3, 87 .ncopies = 3, 88 .nparity = 0, 89 .raid_name = "raid1c3", 90 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 91 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 92 }, 93 [BTRFS_RAID_RAID1C4] = { 94 .sub_stripes = 1, 95 .dev_stripes = 1, 96 .devs_max = 4, 97 .devs_min = 4, 98 .tolerated_failures = 3, 99 .devs_increment = 4, 100 .ncopies = 4, 101 .nparity = 0, 102 .raid_name = "raid1c4", 103 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 104 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 105 }, 106 [BTRFS_RAID_DUP] = { 107 .sub_stripes = 1, 108 .dev_stripes = 2, 109 .devs_max = 1, 110 .devs_min = 1, 111 .tolerated_failures = 0, 112 .devs_increment = 1, 113 .ncopies = 2, 114 .nparity = 0, 115 .raid_name = "dup", 116 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 117 .mindev_error = 0, 118 }, 119 [BTRFS_RAID_RAID0] = { 120 .sub_stripes = 1, 121 .dev_stripes = 1, 122 .devs_max = 0, 123 .devs_min = 1, 124 .tolerated_failures = 0, 125 .devs_increment = 1, 126 .ncopies = 1, 127 .nparity = 0, 128 .raid_name = "raid0", 129 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 130 .mindev_error = 0, 131 }, 132 [BTRFS_RAID_SINGLE] = { 133 .sub_stripes = 1, 134 .dev_stripes = 1, 135 .devs_max = 1, 136 .devs_min = 1, 137 .tolerated_failures = 0, 138 .devs_increment = 1, 139 .ncopies = 1, 140 .nparity = 0, 141 .raid_name = "single", 142 .bg_flag = 0, 143 .mindev_error = 0, 144 }, 145 [BTRFS_RAID_RAID5] = { 146 .sub_stripes = 1, 147 .dev_stripes = 1, 148 .devs_max = 0, 149 .devs_min = 2, 150 .tolerated_failures = 1, 151 .devs_increment = 1, 152 .ncopies = 1, 153 .nparity = 1, 154 .raid_name = "raid5", 155 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 156 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 157 }, 158 [BTRFS_RAID_RAID6] = { 159 .sub_stripes = 1, 160 .dev_stripes = 1, 161 .devs_max = 0, 162 .devs_min = 3, 163 .tolerated_failures = 2, 164 .devs_increment = 1, 165 .ncopies = 1, 166 .nparity = 2, 167 .raid_name = "raid6", 168 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 169 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 170 }, 171 }; 172 173 /* 174 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 175 * can be used as index to access btrfs_raid_array[]. 176 */ 177 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 178 { 179 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 180 181 if (!profile) 182 return BTRFS_RAID_SINGLE; 183 184 return BTRFS_BG_FLAG_TO_INDEX(profile); 185 } 186 187 const char *btrfs_bg_type_to_raid_name(u64 flags) 188 { 189 const int index = btrfs_bg_flags_to_raid_index(flags); 190 191 if (index >= BTRFS_NR_RAID_TYPES) 192 return NULL; 193 194 return btrfs_raid_array[index].raid_name; 195 } 196 197 int btrfs_nr_parity_stripes(u64 type) 198 { 199 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 200 201 return btrfs_raid_array[index].nparity; 202 } 203 204 /* 205 * Fill @buf with textual description of @bg_flags, no more than @size_buf 206 * bytes including terminating null byte. 207 */ 208 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 209 { 210 int i; 211 int ret; 212 char *bp = buf; 213 u64 flags = bg_flags; 214 u32 size_bp = size_buf; 215 216 if (!flags) { 217 strcpy(bp, "NONE"); 218 return; 219 } 220 221 #define DESCRIBE_FLAG(flag, desc) \ 222 do { \ 223 if (flags & (flag)) { \ 224 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 225 if (ret < 0 || ret >= size_bp) \ 226 goto out_overflow; \ 227 size_bp -= ret; \ 228 bp += ret; \ 229 flags &= ~(flag); \ 230 } \ 231 } while (0) 232 233 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 234 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 235 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 236 237 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 238 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 239 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 240 btrfs_raid_array[i].raid_name); 241 #undef DESCRIBE_FLAG 242 243 if (flags) { 244 ret = snprintf(bp, size_bp, "0x%llx|", flags); 245 size_bp -= ret; 246 } 247 248 if (size_bp < size_buf) 249 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 250 251 /* 252 * The text is trimmed, it's up to the caller to provide sufficiently 253 * large buffer 254 */ 255 out_overflow:; 256 } 257 258 static int init_first_rw_device(struct btrfs_trans_handle *trans); 259 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 260 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 261 262 /* 263 * Device locking 264 * ============== 265 * 266 * There are several mutexes that protect manipulation of devices and low-level 267 * structures like chunks but not block groups, extents or files 268 * 269 * uuid_mutex (global lock) 270 * ------------------------ 271 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 272 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 273 * device) or requested by the device= mount option 274 * 275 * the mutex can be very coarse and can cover long-running operations 276 * 277 * protects: updates to fs_devices counters like missing devices, rw devices, 278 * seeding, structure cloning, opening/closing devices at mount/umount time 279 * 280 * global::fs_devs - add, remove, updates to the global list 281 * 282 * does not protect: manipulation of the fs_devices::devices list in general 283 * but in mount context it could be used to exclude list modifications by eg. 284 * scan ioctl 285 * 286 * btrfs_device::name - renames (write side), read is RCU 287 * 288 * fs_devices::device_list_mutex (per-fs, with RCU) 289 * ------------------------------------------------ 290 * protects updates to fs_devices::devices, ie. adding and deleting 291 * 292 * simple list traversal with read-only actions can be done with RCU protection 293 * 294 * may be used to exclude some operations from running concurrently without any 295 * modifications to the list (see write_all_supers) 296 * 297 * Is not required at mount and close times, because our device list is 298 * protected by the uuid_mutex at that point. 299 * 300 * balance_mutex 301 * ------------- 302 * protects balance structures (status, state) and context accessed from 303 * several places (internally, ioctl) 304 * 305 * chunk_mutex 306 * ----------- 307 * protects chunks, adding or removing during allocation, trim or when a new 308 * device is added/removed. Additionally it also protects post_commit_list of 309 * individual devices, since they can be added to the transaction's 310 * post_commit_list only with chunk_mutex held. 311 * 312 * cleaner_mutex 313 * ------------- 314 * a big lock that is held by the cleaner thread and prevents running subvolume 315 * cleaning together with relocation or delayed iputs 316 * 317 * 318 * Lock nesting 319 * ============ 320 * 321 * uuid_mutex 322 * device_list_mutex 323 * chunk_mutex 324 * balance_mutex 325 * 326 * 327 * Exclusive operations 328 * ==================== 329 * 330 * Maintains the exclusivity of the following operations that apply to the 331 * whole filesystem and cannot run in parallel. 332 * 333 * - Balance (*) 334 * - Device add 335 * - Device remove 336 * - Device replace (*) 337 * - Resize 338 * 339 * The device operations (as above) can be in one of the following states: 340 * 341 * - Running state 342 * - Paused state 343 * - Completed state 344 * 345 * Only device operations marked with (*) can go into the Paused state for the 346 * following reasons: 347 * 348 * - ioctl (only Balance can be Paused through ioctl) 349 * - filesystem remounted as read-only 350 * - filesystem unmounted and mounted as read-only 351 * - system power-cycle and filesystem mounted as read-only 352 * - filesystem or device errors leading to forced read-only 353 * 354 * The status of exclusive operation is set and cleared atomically. 355 * During the course of Paused state, fs_info::exclusive_operation remains set. 356 * A device operation in Paused or Running state can be canceled or resumed 357 * either by ioctl (Balance only) or when remounted as read-write. 358 * The exclusive status is cleared when the device operation is canceled or 359 * completed. 360 */ 361 362 DEFINE_MUTEX(uuid_mutex); 363 static LIST_HEAD(fs_uuids); 364 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 365 { 366 return &fs_uuids; 367 } 368 369 /* 370 * Allocate new btrfs_fs_devices structure identified by a fsid. 371 * 372 * @fsid: if not NULL, copy the UUID to fs_devices::fsid and to 373 * fs_devices::metadata_fsid 374 * 375 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 376 * The returned struct is not linked onto any lists and can be destroyed with 377 * kfree() right away. 378 */ 379 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) 380 { 381 struct btrfs_fs_devices *fs_devs; 382 383 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 384 if (!fs_devs) 385 return ERR_PTR(-ENOMEM); 386 387 mutex_init(&fs_devs->device_list_mutex); 388 389 INIT_LIST_HEAD(&fs_devs->devices); 390 INIT_LIST_HEAD(&fs_devs->alloc_list); 391 INIT_LIST_HEAD(&fs_devs->fs_list); 392 INIT_LIST_HEAD(&fs_devs->seed_list); 393 394 if (fsid) { 395 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 396 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 397 } 398 399 return fs_devs; 400 } 401 402 static void btrfs_free_device(struct btrfs_device *device) 403 { 404 WARN_ON(!list_empty(&device->post_commit_list)); 405 rcu_string_free(device->name); 406 extent_io_tree_release(&device->alloc_state); 407 btrfs_destroy_dev_zone_info(device); 408 kfree(device); 409 } 410 411 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 412 { 413 struct btrfs_device *device; 414 415 WARN_ON(fs_devices->opened); 416 while (!list_empty(&fs_devices->devices)) { 417 device = list_entry(fs_devices->devices.next, 418 struct btrfs_device, dev_list); 419 list_del(&device->dev_list); 420 btrfs_free_device(device); 421 } 422 kfree(fs_devices); 423 } 424 425 void __exit btrfs_cleanup_fs_uuids(void) 426 { 427 struct btrfs_fs_devices *fs_devices; 428 429 while (!list_empty(&fs_uuids)) { 430 fs_devices = list_entry(fs_uuids.next, 431 struct btrfs_fs_devices, fs_list); 432 list_del(&fs_devices->fs_list); 433 free_fs_devices(fs_devices); 434 } 435 } 436 437 static bool match_fsid_fs_devices(const struct btrfs_fs_devices *fs_devices, 438 const u8 *fsid, const u8 *metadata_fsid) 439 { 440 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0) 441 return false; 442 443 if (!metadata_fsid) 444 return true; 445 446 if (memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0) 447 return false; 448 449 return true; 450 } 451 452 static noinline struct btrfs_fs_devices *find_fsid( 453 const u8 *fsid, const u8 *metadata_fsid) 454 { 455 struct btrfs_fs_devices *fs_devices; 456 457 ASSERT(fsid); 458 459 /* Handle non-split brain cases */ 460 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 461 if (match_fsid_fs_devices(fs_devices, fsid, metadata_fsid)) 462 return fs_devices; 463 } 464 return NULL; 465 } 466 467 static int 468 btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, 469 int flush, struct file **bdev_file, 470 struct btrfs_super_block **disk_super) 471 { 472 struct block_device *bdev; 473 int ret; 474 475 *bdev_file = bdev_file_open_by_path(device_path, flags, holder, NULL); 476 477 if (IS_ERR(*bdev_file)) { 478 ret = PTR_ERR(*bdev_file); 479 btrfs_err(NULL, "failed to open device for path %s with flags 0x%x: %d", 480 device_path, flags, ret); 481 goto error; 482 } 483 bdev = file_bdev(*bdev_file); 484 485 if (flush) 486 sync_blockdev(bdev); 487 if (holder) { 488 ret = set_blocksize(*bdev_file, BTRFS_BDEV_BLOCKSIZE); 489 if (ret) { 490 fput(*bdev_file); 491 goto error; 492 } 493 } 494 invalidate_bdev(bdev); 495 *disk_super = btrfs_read_dev_super(bdev); 496 if (IS_ERR(*disk_super)) { 497 ret = PTR_ERR(*disk_super); 498 fput(*bdev_file); 499 goto error; 500 } 501 502 return 0; 503 504 error: 505 *disk_super = NULL; 506 *bdev_file = NULL; 507 return ret; 508 } 509 510 /* 511 * Search and remove all stale devices (which are not mounted). When both 512 * inputs are NULL, it will search and release all stale devices. 513 * 514 * @devt: Optional. When provided will it release all unmounted devices 515 * matching this devt only. 516 * @skip_device: Optional. Will skip this device when searching for the stale 517 * devices. 518 * 519 * Return: 0 for success or if @devt is 0. 520 * -EBUSY if @devt is a mounted device. 521 * -ENOENT if @devt does not match any device in the list. 522 */ 523 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 524 { 525 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 526 struct btrfs_device *device, *tmp_device; 527 int ret; 528 bool freed = false; 529 530 lockdep_assert_held(&uuid_mutex); 531 532 /* Return good status if there is no instance of devt. */ 533 ret = 0; 534 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 535 536 mutex_lock(&fs_devices->device_list_mutex); 537 list_for_each_entry_safe(device, tmp_device, 538 &fs_devices->devices, dev_list) { 539 if (skip_device && skip_device == device) 540 continue; 541 if (devt && devt != device->devt) 542 continue; 543 if (fs_devices->opened) { 544 if (devt) 545 ret = -EBUSY; 546 break; 547 } 548 549 /* delete the stale device */ 550 fs_devices->num_devices--; 551 list_del(&device->dev_list); 552 btrfs_free_device(device); 553 554 freed = true; 555 } 556 mutex_unlock(&fs_devices->device_list_mutex); 557 558 if (fs_devices->num_devices == 0) { 559 btrfs_sysfs_remove_fsid(fs_devices); 560 list_del(&fs_devices->fs_list); 561 free_fs_devices(fs_devices); 562 } 563 } 564 565 /* If there is at least one freed device return 0. */ 566 if (freed) 567 return 0; 568 569 return ret; 570 } 571 572 static struct btrfs_fs_devices *find_fsid_by_device( 573 struct btrfs_super_block *disk_super, 574 dev_t devt, bool *same_fsid_diff_dev) 575 { 576 struct btrfs_fs_devices *fsid_fs_devices; 577 struct btrfs_fs_devices *devt_fs_devices; 578 const bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 579 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 580 bool found_by_devt = false; 581 582 /* Find the fs_device by the usual method, if found use it. */ 583 fsid_fs_devices = find_fsid(disk_super->fsid, 584 has_metadata_uuid ? disk_super->metadata_uuid : NULL); 585 586 /* The temp_fsid feature is supported only with single device filesystem. */ 587 if (btrfs_super_num_devices(disk_super) != 1) 588 return fsid_fs_devices; 589 590 /* 591 * A seed device is an integral component of the sprout device, which 592 * functions as a multi-device filesystem. So, temp-fsid feature is 593 * not supported. 594 */ 595 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) 596 return fsid_fs_devices; 597 598 /* Try to find a fs_devices by matching devt. */ 599 list_for_each_entry(devt_fs_devices, &fs_uuids, fs_list) { 600 struct btrfs_device *device; 601 602 list_for_each_entry(device, &devt_fs_devices->devices, dev_list) { 603 if (device->devt == devt) { 604 found_by_devt = true; 605 break; 606 } 607 } 608 if (found_by_devt) 609 break; 610 } 611 612 if (found_by_devt) { 613 /* Existing device. */ 614 if (fsid_fs_devices == NULL) { 615 if (devt_fs_devices->opened == 0) { 616 /* Stale device. */ 617 return NULL; 618 } else { 619 /* temp_fsid is mounting a subvol. */ 620 return devt_fs_devices; 621 } 622 } else { 623 /* Regular or temp_fsid device mounting a subvol. */ 624 return devt_fs_devices; 625 } 626 } else { 627 /* New device. */ 628 if (fsid_fs_devices == NULL) { 629 return NULL; 630 } else { 631 /* sb::fsid is already used create a new temp_fsid. */ 632 *same_fsid_diff_dev = true; 633 return NULL; 634 } 635 } 636 637 /* Not reached. */ 638 } 639 640 /* 641 * This is only used on mount, and we are protected from competing things 642 * messing with our fs_devices by the uuid_mutex, thus we do not need the 643 * fs_devices->device_list_mutex here. 644 */ 645 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 646 struct btrfs_device *device, blk_mode_t flags, 647 void *holder) 648 { 649 struct file *bdev_file; 650 struct btrfs_super_block *disk_super; 651 u64 devid; 652 int ret; 653 654 if (device->bdev) 655 return -EINVAL; 656 if (!device->name) 657 return -EINVAL; 658 659 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 660 &bdev_file, &disk_super); 661 if (ret) 662 return ret; 663 664 devid = btrfs_stack_device_id(&disk_super->dev_item); 665 if (devid != device->devid) 666 goto error_free_page; 667 668 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 669 goto error_free_page; 670 671 device->generation = btrfs_super_generation(disk_super); 672 673 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 674 if (btrfs_super_incompat_flags(disk_super) & 675 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 676 pr_err( 677 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 678 goto error_free_page; 679 } 680 681 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 682 fs_devices->seeding = true; 683 } else { 684 if (bdev_read_only(file_bdev(bdev_file))) 685 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 686 else 687 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 688 } 689 690 if (!bdev_nonrot(file_bdev(bdev_file))) 691 fs_devices->rotating = true; 692 693 if (bdev_max_discard_sectors(file_bdev(bdev_file))) 694 fs_devices->discardable = true; 695 696 device->bdev_file = bdev_file; 697 device->bdev = file_bdev(bdev_file); 698 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 699 700 if (device->devt != device->bdev->bd_dev) { 701 btrfs_warn(NULL, 702 "device %s maj:min changed from %d:%d to %d:%d", 703 device->name->str, MAJOR(device->devt), 704 MINOR(device->devt), MAJOR(device->bdev->bd_dev), 705 MINOR(device->bdev->bd_dev)); 706 707 device->devt = device->bdev->bd_dev; 708 } 709 710 fs_devices->open_devices++; 711 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 712 device->devid != BTRFS_DEV_REPLACE_DEVID) { 713 fs_devices->rw_devices++; 714 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 715 } 716 btrfs_release_disk_super(disk_super); 717 718 return 0; 719 720 error_free_page: 721 btrfs_release_disk_super(disk_super); 722 fput(bdev_file); 723 724 return -EINVAL; 725 } 726 727 const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb) 728 { 729 bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) & 730 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 731 732 return has_metadata_uuid ? sb->metadata_uuid : sb->fsid; 733 } 734 735 /* 736 * Add new device to list of registered devices 737 * 738 * Returns: 739 * device pointer which was just added or updated when successful 740 * error pointer when failed 741 */ 742 static noinline struct btrfs_device *device_list_add(const char *path, 743 struct btrfs_super_block *disk_super, 744 bool *new_device_added) 745 { 746 struct btrfs_device *device; 747 struct btrfs_fs_devices *fs_devices = NULL; 748 struct rcu_string *name; 749 u64 found_transid = btrfs_super_generation(disk_super); 750 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 751 dev_t path_devt; 752 int error; 753 bool same_fsid_diff_dev = false; 754 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 755 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 756 757 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) { 758 btrfs_err(NULL, 759 "device %s has incomplete metadata_uuid change, please use btrfstune to complete", 760 path); 761 return ERR_PTR(-EAGAIN); 762 } 763 764 error = lookup_bdev(path, &path_devt); 765 if (error) { 766 btrfs_err(NULL, "failed to lookup block device for path %s: %d", 767 path, error); 768 return ERR_PTR(error); 769 } 770 771 fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev); 772 773 if (!fs_devices) { 774 fs_devices = alloc_fs_devices(disk_super->fsid); 775 if (IS_ERR(fs_devices)) 776 return ERR_CAST(fs_devices); 777 778 if (has_metadata_uuid) 779 memcpy(fs_devices->metadata_uuid, 780 disk_super->metadata_uuid, BTRFS_FSID_SIZE); 781 782 if (same_fsid_diff_dev) { 783 generate_random_uuid(fs_devices->fsid); 784 fs_devices->temp_fsid = true; 785 pr_info("BTRFS: device %s (%d:%d) using temp-fsid %pU\n", 786 path, MAJOR(path_devt), MINOR(path_devt), 787 fs_devices->fsid); 788 } 789 790 mutex_lock(&fs_devices->device_list_mutex); 791 list_add(&fs_devices->fs_list, &fs_uuids); 792 793 device = NULL; 794 } else { 795 struct btrfs_dev_lookup_args args = { 796 .devid = devid, 797 .uuid = disk_super->dev_item.uuid, 798 }; 799 800 mutex_lock(&fs_devices->device_list_mutex); 801 device = btrfs_find_device(fs_devices, &args); 802 803 if (found_transid > fs_devices->latest_generation) { 804 memcpy(fs_devices->fsid, disk_super->fsid, 805 BTRFS_FSID_SIZE); 806 memcpy(fs_devices->metadata_uuid, 807 btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE); 808 } 809 } 810 811 if (!device) { 812 unsigned int nofs_flag; 813 814 if (fs_devices->opened) { 815 btrfs_err(NULL, 816 "device %s (%d:%d) belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)", 817 path, MAJOR(path_devt), MINOR(path_devt), 818 fs_devices->fsid, current->comm, 819 task_pid_nr(current)); 820 mutex_unlock(&fs_devices->device_list_mutex); 821 return ERR_PTR(-EBUSY); 822 } 823 824 nofs_flag = memalloc_nofs_save(); 825 device = btrfs_alloc_device(NULL, &devid, 826 disk_super->dev_item.uuid, path); 827 memalloc_nofs_restore(nofs_flag); 828 if (IS_ERR(device)) { 829 mutex_unlock(&fs_devices->device_list_mutex); 830 /* we can safely leave the fs_devices entry around */ 831 return device; 832 } 833 834 device->devt = path_devt; 835 836 list_add_rcu(&device->dev_list, &fs_devices->devices); 837 fs_devices->num_devices++; 838 839 device->fs_devices = fs_devices; 840 *new_device_added = true; 841 842 if (disk_super->label[0]) 843 pr_info( 844 "BTRFS: device label %s devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n", 845 disk_super->label, devid, found_transid, path, 846 MAJOR(path_devt), MINOR(path_devt), 847 current->comm, task_pid_nr(current)); 848 else 849 pr_info( 850 "BTRFS: device fsid %pU devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n", 851 disk_super->fsid, devid, found_transid, path, 852 MAJOR(path_devt), MINOR(path_devt), 853 current->comm, task_pid_nr(current)); 854 855 } else if (!device->name || strcmp(device->name->str, path)) { 856 /* 857 * When FS is already mounted. 858 * 1. If you are here and if the device->name is NULL that 859 * means this device was missing at time of FS mount. 860 * 2. If you are here and if the device->name is different 861 * from 'path' that means either 862 * a. The same device disappeared and reappeared with 863 * different name. or 864 * b. The missing-disk-which-was-replaced, has 865 * reappeared now. 866 * 867 * We must allow 1 and 2a above. But 2b would be a spurious 868 * and unintentional. 869 * 870 * Further in case of 1 and 2a above, the disk at 'path' 871 * would have missed some transaction when it was away and 872 * in case of 2a the stale bdev has to be updated as well. 873 * 2b must not be allowed at all time. 874 */ 875 876 /* 877 * For now, we do allow update to btrfs_fs_device through the 878 * btrfs dev scan cli after FS has been mounted. We're still 879 * tracking a problem where systems fail mount by subvolume id 880 * when we reject replacement on a mounted FS. 881 */ 882 if (!fs_devices->opened && found_transid < device->generation) { 883 /* 884 * That is if the FS is _not_ mounted and if you 885 * are here, that means there is more than one 886 * disk with same uuid and devid.We keep the one 887 * with larger generation number or the last-in if 888 * generation are equal. 889 */ 890 mutex_unlock(&fs_devices->device_list_mutex); 891 btrfs_err(NULL, 892 "device %s already registered with a higher generation, found %llu expect %llu", 893 path, found_transid, device->generation); 894 return ERR_PTR(-EEXIST); 895 } 896 897 /* 898 * We are going to replace the device path for a given devid, 899 * make sure it's the same device if the device is mounted 900 * 901 * NOTE: the device->fs_info may not be reliable here so pass 902 * in a NULL to message helpers instead. This avoids a possible 903 * use-after-free when the fs_info and fs_info->sb are already 904 * torn down. 905 */ 906 if (device->bdev) { 907 if (device->devt != path_devt) { 908 mutex_unlock(&fs_devices->device_list_mutex); 909 btrfs_warn_in_rcu(NULL, 910 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 911 path, devid, found_transid, 912 current->comm, 913 task_pid_nr(current)); 914 return ERR_PTR(-EEXIST); 915 } 916 btrfs_info_in_rcu(NULL, 917 "devid %llu device path %s changed to %s scanned by %s (%d)", 918 devid, btrfs_dev_name(device), 919 path, current->comm, 920 task_pid_nr(current)); 921 } 922 923 name = rcu_string_strdup(path, GFP_NOFS); 924 if (!name) { 925 mutex_unlock(&fs_devices->device_list_mutex); 926 return ERR_PTR(-ENOMEM); 927 } 928 rcu_string_free(device->name); 929 rcu_assign_pointer(device->name, name); 930 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 931 fs_devices->missing_devices--; 932 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 933 } 934 device->devt = path_devt; 935 } 936 937 /* 938 * Unmount does not free the btrfs_device struct but would zero 939 * generation along with most of the other members. So just update 940 * it back. We need it to pick the disk with largest generation 941 * (as above). 942 */ 943 if (!fs_devices->opened) { 944 device->generation = found_transid; 945 fs_devices->latest_generation = max_t(u64, found_transid, 946 fs_devices->latest_generation); 947 } 948 949 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 950 951 mutex_unlock(&fs_devices->device_list_mutex); 952 return device; 953 } 954 955 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 956 { 957 struct btrfs_fs_devices *fs_devices; 958 struct btrfs_device *device; 959 struct btrfs_device *orig_dev; 960 int ret = 0; 961 962 lockdep_assert_held(&uuid_mutex); 963 964 fs_devices = alloc_fs_devices(orig->fsid); 965 if (IS_ERR(fs_devices)) 966 return fs_devices; 967 968 fs_devices->total_devices = orig->total_devices; 969 970 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 971 const char *dev_path = NULL; 972 973 /* 974 * This is ok to do without RCU read locked because we hold the 975 * uuid mutex so nothing we touch in here is going to disappear. 976 */ 977 if (orig_dev->name) 978 dev_path = orig_dev->name->str; 979 980 device = btrfs_alloc_device(NULL, &orig_dev->devid, 981 orig_dev->uuid, dev_path); 982 if (IS_ERR(device)) { 983 ret = PTR_ERR(device); 984 goto error; 985 } 986 987 if (orig_dev->zone_info) { 988 struct btrfs_zoned_device_info *zone_info; 989 990 zone_info = btrfs_clone_dev_zone_info(orig_dev); 991 if (!zone_info) { 992 btrfs_free_device(device); 993 ret = -ENOMEM; 994 goto error; 995 } 996 device->zone_info = zone_info; 997 } 998 999 list_add(&device->dev_list, &fs_devices->devices); 1000 device->fs_devices = fs_devices; 1001 fs_devices->num_devices++; 1002 } 1003 return fs_devices; 1004 error: 1005 free_fs_devices(fs_devices); 1006 return ERR_PTR(ret); 1007 } 1008 1009 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1010 struct btrfs_device **latest_dev) 1011 { 1012 struct btrfs_device *device, *next; 1013 1014 /* This is the initialized path, it is safe to release the devices. */ 1015 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1016 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1017 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1018 &device->dev_state) && 1019 !test_bit(BTRFS_DEV_STATE_MISSING, 1020 &device->dev_state) && 1021 (!*latest_dev || 1022 device->generation > (*latest_dev)->generation)) { 1023 *latest_dev = device; 1024 } 1025 continue; 1026 } 1027 1028 /* 1029 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1030 * in btrfs_init_dev_replace() so just continue. 1031 */ 1032 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1033 continue; 1034 1035 if (device->bdev_file) { 1036 fput(device->bdev_file); 1037 device->bdev = NULL; 1038 device->bdev_file = NULL; 1039 fs_devices->open_devices--; 1040 } 1041 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1042 list_del_init(&device->dev_alloc_list); 1043 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1044 fs_devices->rw_devices--; 1045 } 1046 list_del_init(&device->dev_list); 1047 fs_devices->num_devices--; 1048 btrfs_free_device(device); 1049 } 1050 1051 } 1052 1053 /* 1054 * After we have read the system tree and know devids belonging to this 1055 * filesystem, remove the device which does not belong there. 1056 */ 1057 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1058 { 1059 struct btrfs_device *latest_dev = NULL; 1060 struct btrfs_fs_devices *seed_dev; 1061 1062 mutex_lock(&uuid_mutex); 1063 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1064 1065 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1066 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1067 1068 fs_devices->latest_dev = latest_dev; 1069 1070 mutex_unlock(&uuid_mutex); 1071 } 1072 1073 static void btrfs_close_bdev(struct btrfs_device *device) 1074 { 1075 if (!device->bdev) 1076 return; 1077 1078 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1079 sync_blockdev(device->bdev); 1080 invalidate_bdev(device->bdev); 1081 } 1082 1083 fput(device->bdev_file); 1084 } 1085 1086 static void btrfs_close_one_device(struct btrfs_device *device) 1087 { 1088 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1089 1090 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1091 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1092 list_del_init(&device->dev_alloc_list); 1093 fs_devices->rw_devices--; 1094 } 1095 1096 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1097 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1098 1099 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1100 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1101 fs_devices->missing_devices--; 1102 } 1103 1104 btrfs_close_bdev(device); 1105 if (device->bdev) { 1106 fs_devices->open_devices--; 1107 device->bdev = NULL; 1108 device->bdev_file = NULL; 1109 } 1110 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1111 btrfs_destroy_dev_zone_info(device); 1112 1113 device->fs_info = NULL; 1114 atomic_set(&device->dev_stats_ccnt, 0); 1115 extent_io_tree_release(&device->alloc_state); 1116 1117 /* 1118 * Reset the flush error record. We might have a transient flush error 1119 * in this mount, and if so we aborted the current transaction and set 1120 * the fs to an error state, guaranteeing no super blocks can be further 1121 * committed. However that error might be transient and if we unmount the 1122 * filesystem and mount it again, we should allow the mount to succeed 1123 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1124 * filesystem again we still get flush errors, then we will again abort 1125 * any transaction and set the error state, guaranteeing no commits of 1126 * unsafe super blocks. 1127 */ 1128 device->last_flush_error = 0; 1129 1130 /* Verify the device is back in a pristine state */ 1131 WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1132 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1133 WARN_ON(!list_empty(&device->dev_alloc_list)); 1134 WARN_ON(!list_empty(&device->post_commit_list)); 1135 } 1136 1137 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1138 { 1139 struct btrfs_device *device, *tmp; 1140 1141 lockdep_assert_held(&uuid_mutex); 1142 1143 if (--fs_devices->opened > 0) 1144 return; 1145 1146 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1147 btrfs_close_one_device(device); 1148 1149 WARN_ON(fs_devices->open_devices); 1150 WARN_ON(fs_devices->rw_devices); 1151 fs_devices->opened = 0; 1152 fs_devices->seeding = false; 1153 fs_devices->fs_info = NULL; 1154 } 1155 1156 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1157 { 1158 LIST_HEAD(list); 1159 struct btrfs_fs_devices *tmp; 1160 1161 mutex_lock(&uuid_mutex); 1162 close_fs_devices(fs_devices); 1163 if (!fs_devices->opened) { 1164 list_splice_init(&fs_devices->seed_list, &list); 1165 1166 /* 1167 * If the struct btrfs_fs_devices is not assembled with any 1168 * other device, it can be re-initialized during the next mount 1169 * without the needing device-scan step. Therefore, it can be 1170 * fully freed. 1171 */ 1172 if (fs_devices->num_devices == 1) { 1173 list_del(&fs_devices->fs_list); 1174 free_fs_devices(fs_devices); 1175 } 1176 } 1177 1178 1179 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1180 close_fs_devices(fs_devices); 1181 list_del(&fs_devices->seed_list); 1182 free_fs_devices(fs_devices); 1183 } 1184 mutex_unlock(&uuid_mutex); 1185 } 1186 1187 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1188 blk_mode_t flags, void *holder) 1189 { 1190 struct btrfs_device *device; 1191 struct btrfs_device *latest_dev = NULL; 1192 struct btrfs_device *tmp_device; 1193 int ret = 0; 1194 1195 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1196 dev_list) { 1197 int ret2; 1198 1199 ret2 = btrfs_open_one_device(fs_devices, device, flags, holder); 1200 if (ret2 == 0 && 1201 (!latest_dev || device->generation > latest_dev->generation)) { 1202 latest_dev = device; 1203 } else if (ret2 == -ENODATA) { 1204 fs_devices->num_devices--; 1205 list_del(&device->dev_list); 1206 btrfs_free_device(device); 1207 } 1208 if (ret == 0 && ret2 != 0) 1209 ret = ret2; 1210 } 1211 1212 if (fs_devices->open_devices == 0) { 1213 if (ret) 1214 return ret; 1215 return -EINVAL; 1216 } 1217 1218 fs_devices->opened = 1; 1219 fs_devices->latest_dev = latest_dev; 1220 fs_devices->total_rw_bytes = 0; 1221 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1222 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1223 1224 return 0; 1225 } 1226 1227 static int devid_cmp(void *priv, const struct list_head *a, 1228 const struct list_head *b) 1229 { 1230 const struct btrfs_device *dev1, *dev2; 1231 1232 dev1 = list_entry(a, struct btrfs_device, dev_list); 1233 dev2 = list_entry(b, struct btrfs_device, dev_list); 1234 1235 if (dev1->devid < dev2->devid) 1236 return -1; 1237 else if (dev1->devid > dev2->devid) 1238 return 1; 1239 return 0; 1240 } 1241 1242 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1243 blk_mode_t flags, void *holder) 1244 { 1245 int ret; 1246 1247 lockdep_assert_held(&uuid_mutex); 1248 /* 1249 * The device_list_mutex cannot be taken here in case opening the 1250 * underlying device takes further locks like open_mutex. 1251 * 1252 * We also don't need the lock here as this is called during mount and 1253 * exclusion is provided by uuid_mutex 1254 */ 1255 1256 if (fs_devices->opened) { 1257 fs_devices->opened++; 1258 ret = 0; 1259 } else { 1260 list_sort(NULL, &fs_devices->devices, devid_cmp); 1261 ret = open_fs_devices(fs_devices, flags, holder); 1262 } 1263 1264 return ret; 1265 } 1266 1267 void btrfs_release_disk_super(struct btrfs_super_block *super) 1268 { 1269 struct page *page = virt_to_page(super); 1270 1271 put_page(page); 1272 } 1273 1274 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1275 u64 bytenr, u64 bytenr_orig) 1276 { 1277 struct btrfs_super_block *disk_super; 1278 struct page *page; 1279 void *p; 1280 pgoff_t index; 1281 1282 /* make sure our super fits in the device */ 1283 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1284 return ERR_PTR(-EINVAL); 1285 1286 /* make sure our super fits in the page */ 1287 if (sizeof(*disk_super) > PAGE_SIZE) 1288 return ERR_PTR(-EINVAL); 1289 1290 /* make sure our super doesn't straddle pages on disk */ 1291 index = bytenr >> PAGE_SHIFT; 1292 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1293 return ERR_PTR(-EINVAL); 1294 1295 /* pull in the page with our super */ 1296 page = read_cache_page_gfp(bdev->bd_mapping, index, GFP_KERNEL); 1297 1298 if (IS_ERR(page)) 1299 return ERR_CAST(page); 1300 1301 p = page_address(page); 1302 1303 /* align our pointer to the offset of the super block */ 1304 disk_super = p + offset_in_page(bytenr); 1305 1306 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1307 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1308 btrfs_release_disk_super(p); 1309 return ERR_PTR(-EINVAL); 1310 } 1311 1312 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1313 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1314 1315 return disk_super; 1316 } 1317 1318 int btrfs_forget_devices(dev_t devt) 1319 { 1320 int ret; 1321 1322 mutex_lock(&uuid_mutex); 1323 ret = btrfs_free_stale_devices(devt, NULL); 1324 mutex_unlock(&uuid_mutex); 1325 1326 return ret; 1327 } 1328 1329 static bool btrfs_skip_registration(struct btrfs_super_block *disk_super, 1330 const char *path, dev_t devt, 1331 bool mount_arg_dev) 1332 { 1333 struct btrfs_fs_devices *fs_devices; 1334 1335 /* 1336 * Do not skip device registration for mounted devices with matching 1337 * maj:min but different paths. Booting without initrd relies on 1338 * /dev/root initially, later replaced with the actual root device. 1339 * A successful scan ensures grub2-probe selects the correct device. 1340 */ 1341 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 1342 struct btrfs_device *device; 1343 1344 mutex_lock(&fs_devices->device_list_mutex); 1345 1346 if (!fs_devices->opened) { 1347 mutex_unlock(&fs_devices->device_list_mutex); 1348 continue; 1349 } 1350 1351 list_for_each_entry(device, &fs_devices->devices, dev_list) { 1352 if (device->bdev && (device->bdev->bd_dev == devt) && 1353 strcmp(device->name->str, path) != 0) { 1354 mutex_unlock(&fs_devices->device_list_mutex); 1355 1356 /* Do not skip registration. */ 1357 return false; 1358 } 1359 } 1360 mutex_unlock(&fs_devices->device_list_mutex); 1361 } 1362 1363 if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 && 1364 !(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)) 1365 return true; 1366 1367 return false; 1368 } 1369 1370 /* 1371 * Look for a btrfs signature on a device. This may be called out of the mount path 1372 * and we are not allowed to call set_blocksize during the scan. The superblock 1373 * is read via pagecache. 1374 * 1375 * With @mount_arg_dev it's a scan during mount time that will always register 1376 * the device or return an error. Multi-device and seeding devices are registered 1377 * in both cases. 1378 */ 1379 struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, 1380 bool mount_arg_dev) 1381 { 1382 struct btrfs_super_block *disk_super; 1383 bool new_device_added = false; 1384 struct btrfs_device *device = NULL; 1385 struct file *bdev_file; 1386 u64 bytenr; 1387 dev_t devt; 1388 int ret; 1389 1390 lockdep_assert_held(&uuid_mutex); 1391 1392 /* 1393 * Avoid an exclusive open here, as the systemd-udev may initiate the 1394 * device scan which may race with the user's mount or mkfs command, 1395 * resulting in failure. 1396 * Since the device scan is solely for reading purposes, there is no 1397 * need for an exclusive open. Additionally, the devices are read again 1398 * during the mount process. It is ok to get some inconsistent 1399 * values temporarily, as the device paths of the fsid are the only 1400 * required information for assembling the volume. 1401 */ 1402 bdev_file = bdev_file_open_by_path(path, flags, NULL, NULL); 1403 if (IS_ERR(bdev_file)) 1404 return ERR_CAST(bdev_file); 1405 1406 /* 1407 * We would like to check all the super blocks, but doing so would 1408 * allow a mount to succeed after a mkfs from a different filesystem. 1409 * Currently, recovery from a bad primary btrfs superblock is done 1410 * using the userspace command 'btrfs check --super'. 1411 */ 1412 ret = btrfs_sb_log_location_bdev(file_bdev(bdev_file), 0, READ, &bytenr); 1413 if (ret) { 1414 device = ERR_PTR(ret); 1415 goto error_bdev_put; 1416 } 1417 1418 disk_super = btrfs_read_disk_super(file_bdev(bdev_file), bytenr, 1419 btrfs_sb_offset(0)); 1420 if (IS_ERR(disk_super)) { 1421 device = ERR_CAST(disk_super); 1422 goto error_bdev_put; 1423 } 1424 1425 devt = file_bdev(bdev_file)->bd_dev; 1426 if (btrfs_skip_registration(disk_super, path, devt, mount_arg_dev)) { 1427 pr_debug("BTRFS: skip registering single non-seed device %s (%d:%d)\n", 1428 path, MAJOR(devt), MINOR(devt)); 1429 1430 btrfs_free_stale_devices(devt, NULL); 1431 1432 device = NULL; 1433 goto free_disk_super; 1434 } 1435 1436 device = device_list_add(path, disk_super, &new_device_added); 1437 if (!IS_ERR(device) && new_device_added) 1438 btrfs_free_stale_devices(device->devt, device); 1439 1440 free_disk_super: 1441 btrfs_release_disk_super(disk_super); 1442 1443 error_bdev_put: 1444 fput(bdev_file); 1445 1446 return device; 1447 } 1448 1449 /* 1450 * Try to find a chunk that intersects [start, start + len] range and when one 1451 * such is found, record the end of it in *start 1452 */ 1453 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1454 u64 len) 1455 { 1456 u64 physical_start, physical_end; 1457 1458 lockdep_assert_held(&device->fs_info->chunk_mutex); 1459 1460 if (find_first_extent_bit(&device->alloc_state, *start, 1461 &physical_start, &physical_end, 1462 CHUNK_ALLOCATED, NULL)) { 1463 1464 if (in_range(physical_start, *start, len) || 1465 in_range(*start, physical_start, 1466 physical_end + 1 - physical_start)) { 1467 *start = physical_end + 1; 1468 return true; 1469 } 1470 } 1471 return false; 1472 } 1473 1474 static u64 dev_extent_search_start(struct btrfs_device *device) 1475 { 1476 switch (device->fs_devices->chunk_alloc_policy) { 1477 case BTRFS_CHUNK_ALLOC_REGULAR: 1478 return BTRFS_DEVICE_RANGE_RESERVED; 1479 case BTRFS_CHUNK_ALLOC_ZONED: 1480 /* 1481 * We don't care about the starting region like regular 1482 * allocator, because we anyway use/reserve the first two zones 1483 * for superblock logging. 1484 */ 1485 return 0; 1486 default: 1487 BUG(); 1488 } 1489 } 1490 1491 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1492 u64 *hole_start, u64 *hole_size, 1493 u64 num_bytes) 1494 { 1495 u64 zone_size = device->zone_info->zone_size; 1496 u64 pos; 1497 int ret; 1498 bool changed = false; 1499 1500 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1501 1502 while (*hole_size > 0) { 1503 pos = btrfs_find_allocatable_zones(device, *hole_start, 1504 *hole_start + *hole_size, 1505 num_bytes); 1506 if (pos != *hole_start) { 1507 *hole_size = *hole_start + *hole_size - pos; 1508 *hole_start = pos; 1509 changed = true; 1510 if (*hole_size < num_bytes) 1511 break; 1512 } 1513 1514 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1515 1516 /* Range is ensured to be empty */ 1517 if (!ret) 1518 return changed; 1519 1520 /* Given hole range was invalid (outside of device) */ 1521 if (ret == -ERANGE) { 1522 *hole_start += *hole_size; 1523 *hole_size = 0; 1524 return true; 1525 } 1526 1527 *hole_start += zone_size; 1528 *hole_size -= zone_size; 1529 changed = true; 1530 } 1531 1532 return changed; 1533 } 1534 1535 /* 1536 * Check if specified hole is suitable for allocation. 1537 * 1538 * @device: the device which we have the hole 1539 * @hole_start: starting position of the hole 1540 * @hole_size: the size of the hole 1541 * @num_bytes: the size of the free space that we need 1542 * 1543 * This function may modify @hole_start and @hole_size to reflect the suitable 1544 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1545 */ 1546 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1547 u64 *hole_size, u64 num_bytes) 1548 { 1549 bool changed = false; 1550 u64 hole_end = *hole_start + *hole_size; 1551 1552 for (;;) { 1553 /* 1554 * Check before we set max_hole_start, otherwise we could end up 1555 * sending back this offset anyway. 1556 */ 1557 if (contains_pending_extent(device, hole_start, *hole_size)) { 1558 if (hole_end >= *hole_start) 1559 *hole_size = hole_end - *hole_start; 1560 else 1561 *hole_size = 0; 1562 changed = true; 1563 } 1564 1565 switch (device->fs_devices->chunk_alloc_policy) { 1566 case BTRFS_CHUNK_ALLOC_REGULAR: 1567 /* No extra check */ 1568 break; 1569 case BTRFS_CHUNK_ALLOC_ZONED: 1570 if (dev_extent_hole_check_zoned(device, hole_start, 1571 hole_size, num_bytes)) { 1572 changed = true; 1573 /* 1574 * The changed hole can contain pending extent. 1575 * Loop again to check that. 1576 */ 1577 continue; 1578 } 1579 break; 1580 default: 1581 BUG(); 1582 } 1583 1584 break; 1585 } 1586 1587 return changed; 1588 } 1589 1590 /* 1591 * Find free space in the specified device. 1592 * 1593 * @device: the device which we search the free space in 1594 * @num_bytes: the size of the free space that we need 1595 * @search_start: the position from which to begin the search 1596 * @start: store the start of the free space. 1597 * @len: the size of the free space. that we find, or the size 1598 * of the max free space if we don't find suitable free space 1599 * 1600 * This does a pretty simple search, the expectation is that it is called very 1601 * infrequently and that a given device has a small number of extents. 1602 * 1603 * @start is used to store the start of the free space if we find. But if we 1604 * don't find suitable free space, it will be used to store the start position 1605 * of the max free space. 1606 * 1607 * @len is used to store the size of the free space that we find. 1608 * But if we don't find suitable free space, it is used to store the size of 1609 * the max free space. 1610 * 1611 * NOTE: This function will search *commit* root of device tree, and does extra 1612 * check to ensure dev extents are not double allocated. 1613 * This makes the function safe to allocate dev extents but may not report 1614 * correct usable device space, as device extent freed in current transaction 1615 * is not reported as available. 1616 */ 1617 static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1618 u64 *start, u64 *len) 1619 { 1620 struct btrfs_fs_info *fs_info = device->fs_info; 1621 struct btrfs_root *root = fs_info->dev_root; 1622 struct btrfs_key key; 1623 struct btrfs_dev_extent *dev_extent; 1624 struct btrfs_path *path; 1625 u64 search_start; 1626 u64 hole_size; 1627 u64 max_hole_start; 1628 u64 max_hole_size = 0; 1629 u64 extent_end; 1630 u64 search_end = device->total_bytes; 1631 int ret; 1632 int slot; 1633 struct extent_buffer *l; 1634 1635 search_start = dev_extent_search_start(device); 1636 max_hole_start = search_start; 1637 1638 WARN_ON(device->zone_info && 1639 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1640 1641 path = btrfs_alloc_path(); 1642 if (!path) { 1643 ret = -ENOMEM; 1644 goto out; 1645 } 1646 again: 1647 if (search_start >= search_end || 1648 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1649 ret = -ENOSPC; 1650 goto out; 1651 } 1652 1653 path->reada = READA_FORWARD; 1654 path->search_commit_root = 1; 1655 path->skip_locking = 1; 1656 1657 key.objectid = device->devid; 1658 key.offset = search_start; 1659 key.type = BTRFS_DEV_EXTENT_KEY; 1660 1661 ret = btrfs_search_backwards(root, &key, path); 1662 if (ret < 0) 1663 goto out; 1664 1665 while (search_start < search_end) { 1666 l = path->nodes[0]; 1667 slot = path->slots[0]; 1668 if (slot >= btrfs_header_nritems(l)) { 1669 ret = btrfs_next_leaf(root, path); 1670 if (ret == 0) 1671 continue; 1672 if (ret < 0) 1673 goto out; 1674 1675 break; 1676 } 1677 btrfs_item_key_to_cpu(l, &key, slot); 1678 1679 if (key.objectid < device->devid) 1680 goto next; 1681 1682 if (key.objectid > device->devid) 1683 break; 1684 1685 if (key.type != BTRFS_DEV_EXTENT_KEY) 1686 goto next; 1687 1688 if (key.offset > search_end) 1689 break; 1690 1691 if (key.offset > search_start) { 1692 hole_size = key.offset - search_start; 1693 dev_extent_hole_check(device, &search_start, &hole_size, 1694 num_bytes); 1695 1696 if (hole_size > max_hole_size) { 1697 max_hole_start = search_start; 1698 max_hole_size = hole_size; 1699 } 1700 1701 /* 1702 * If this free space is greater than which we need, 1703 * it must be the max free space that we have found 1704 * until now, so max_hole_start must point to the start 1705 * of this free space and the length of this free space 1706 * is stored in max_hole_size. Thus, we return 1707 * max_hole_start and max_hole_size and go back to the 1708 * caller. 1709 */ 1710 if (hole_size >= num_bytes) { 1711 ret = 0; 1712 goto out; 1713 } 1714 } 1715 1716 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1717 extent_end = key.offset + btrfs_dev_extent_length(l, 1718 dev_extent); 1719 if (extent_end > search_start) 1720 search_start = extent_end; 1721 next: 1722 path->slots[0]++; 1723 cond_resched(); 1724 } 1725 1726 /* 1727 * At this point, search_start should be the end of 1728 * allocated dev extents, and when shrinking the device, 1729 * search_end may be smaller than search_start. 1730 */ 1731 if (search_end > search_start) { 1732 hole_size = search_end - search_start; 1733 if (dev_extent_hole_check(device, &search_start, &hole_size, 1734 num_bytes)) { 1735 btrfs_release_path(path); 1736 goto again; 1737 } 1738 1739 if (hole_size > max_hole_size) { 1740 max_hole_start = search_start; 1741 max_hole_size = hole_size; 1742 } 1743 } 1744 1745 /* See above. */ 1746 if (max_hole_size < num_bytes) 1747 ret = -ENOSPC; 1748 else 1749 ret = 0; 1750 1751 ASSERT(max_hole_start + max_hole_size <= search_end); 1752 out: 1753 btrfs_free_path(path); 1754 *start = max_hole_start; 1755 if (len) 1756 *len = max_hole_size; 1757 return ret; 1758 } 1759 1760 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1761 struct btrfs_device *device, 1762 u64 start, u64 *dev_extent_len) 1763 { 1764 struct btrfs_fs_info *fs_info = device->fs_info; 1765 struct btrfs_root *root = fs_info->dev_root; 1766 int ret; 1767 struct btrfs_path *path; 1768 struct btrfs_key key; 1769 struct btrfs_key found_key; 1770 struct extent_buffer *leaf = NULL; 1771 struct btrfs_dev_extent *extent = NULL; 1772 1773 path = btrfs_alloc_path(); 1774 if (!path) 1775 return -ENOMEM; 1776 1777 key.objectid = device->devid; 1778 key.offset = start; 1779 key.type = BTRFS_DEV_EXTENT_KEY; 1780 again: 1781 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1782 if (ret > 0) { 1783 ret = btrfs_previous_item(root, path, key.objectid, 1784 BTRFS_DEV_EXTENT_KEY); 1785 if (ret) 1786 goto out; 1787 leaf = path->nodes[0]; 1788 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1789 extent = btrfs_item_ptr(leaf, path->slots[0], 1790 struct btrfs_dev_extent); 1791 BUG_ON(found_key.offset > start || found_key.offset + 1792 btrfs_dev_extent_length(leaf, extent) < start); 1793 key = found_key; 1794 btrfs_release_path(path); 1795 goto again; 1796 } else if (ret == 0) { 1797 leaf = path->nodes[0]; 1798 extent = btrfs_item_ptr(leaf, path->slots[0], 1799 struct btrfs_dev_extent); 1800 } else { 1801 goto out; 1802 } 1803 1804 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1805 1806 ret = btrfs_del_item(trans, root, path); 1807 if (ret == 0) 1808 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1809 out: 1810 btrfs_free_path(path); 1811 return ret; 1812 } 1813 1814 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1815 { 1816 struct rb_node *n; 1817 u64 ret = 0; 1818 1819 read_lock(&fs_info->mapping_tree_lock); 1820 n = rb_last(&fs_info->mapping_tree.rb_root); 1821 if (n) { 1822 struct btrfs_chunk_map *map; 1823 1824 map = rb_entry(n, struct btrfs_chunk_map, rb_node); 1825 ret = map->start + map->chunk_len; 1826 } 1827 read_unlock(&fs_info->mapping_tree_lock); 1828 1829 return ret; 1830 } 1831 1832 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1833 u64 *devid_ret) 1834 { 1835 int ret; 1836 struct btrfs_key key; 1837 struct btrfs_key found_key; 1838 struct btrfs_path *path; 1839 1840 path = btrfs_alloc_path(); 1841 if (!path) 1842 return -ENOMEM; 1843 1844 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1845 key.type = BTRFS_DEV_ITEM_KEY; 1846 key.offset = (u64)-1; 1847 1848 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1849 if (ret < 0) 1850 goto error; 1851 1852 if (ret == 0) { 1853 /* Corruption */ 1854 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1855 ret = -EUCLEAN; 1856 goto error; 1857 } 1858 1859 ret = btrfs_previous_item(fs_info->chunk_root, path, 1860 BTRFS_DEV_ITEMS_OBJECTID, 1861 BTRFS_DEV_ITEM_KEY); 1862 if (ret) { 1863 *devid_ret = 1; 1864 } else { 1865 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1866 path->slots[0]); 1867 *devid_ret = found_key.offset + 1; 1868 } 1869 ret = 0; 1870 error: 1871 btrfs_free_path(path); 1872 return ret; 1873 } 1874 1875 /* 1876 * the device information is stored in the chunk root 1877 * the btrfs_device struct should be fully filled in 1878 */ 1879 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1880 struct btrfs_device *device) 1881 { 1882 int ret; 1883 struct btrfs_path *path; 1884 struct btrfs_dev_item *dev_item; 1885 struct extent_buffer *leaf; 1886 struct btrfs_key key; 1887 unsigned long ptr; 1888 1889 path = btrfs_alloc_path(); 1890 if (!path) 1891 return -ENOMEM; 1892 1893 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1894 key.type = BTRFS_DEV_ITEM_KEY; 1895 key.offset = device->devid; 1896 1897 btrfs_reserve_chunk_metadata(trans, true); 1898 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1899 &key, sizeof(*dev_item)); 1900 btrfs_trans_release_chunk_metadata(trans); 1901 if (ret) 1902 goto out; 1903 1904 leaf = path->nodes[0]; 1905 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1906 1907 btrfs_set_device_id(leaf, dev_item, device->devid); 1908 btrfs_set_device_generation(leaf, dev_item, 0); 1909 btrfs_set_device_type(leaf, dev_item, device->type); 1910 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1911 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1912 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1913 btrfs_set_device_total_bytes(leaf, dev_item, 1914 btrfs_device_get_disk_total_bytes(device)); 1915 btrfs_set_device_bytes_used(leaf, dev_item, 1916 btrfs_device_get_bytes_used(device)); 1917 btrfs_set_device_group(leaf, dev_item, 0); 1918 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1919 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1920 btrfs_set_device_start_offset(leaf, dev_item, 0); 1921 1922 ptr = btrfs_device_uuid(dev_item); 1923 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1924 ptr = btrfs_device_fsid(dev_item); 1925 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1926 ptr, BTRFS_FSID_SIZE); 1927 btrfs_mark_buffer_dirty(trans, leaf); 1928 1929 ret = 0; 1930 out: 1931 btrfs_free_path(path); 1932 return ret; 1933 } 1934 1935 /* 1936 * Function to update ctime/mtime for a given device path. 1937 * Mainly used for ctime/mtime based probe like libblkid. 1938 * 1939 * We don't care about errors here, this is just to be kind to userspace. 1940 */ 1941 static void update_dev_time(const char *device_path) 1942 { 1943 struct path path; 1944 int ret; 1945 1946 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1947 if (ret) 1948 return; 1949 1950 inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION); 1951 path_put(&path); 1952 } 1953 1954 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1955 struct btrfs_device *device) 1956 { 1957 struct btrfs_root *root = device->fs_info->chunk_root; 1958 int ret; 1959 struct btrfs_path *path; 1960 struct btrfs_key key; 1961 1962 path = btrfs_alloc_path(); 1963 if (!path) 1964 return -ENOMEM; 1965 1966 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1967 key.type = BTRFS_DEV_ITEM_KEY; 1968 key.offset = device->devid; 1969 1970 btrfs_reserve_chunk_metadata(trans, false); 1971 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1972 btrfs_trans_release_chunk_metadata(trans); 1973 if (ret) { 1974 if (ret > 0) 1975 ret = -ENOENT; 1976 goto out; 1977 } 1978 1979 ret = btrfs_del_item(trans, root, path); 1980 out: 1981 btrfs_free_path(path); 1982 return ret; 1983 } 1984 1985 /* 1986 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1987 * filesystem. It's up to the caller to adjust that number regarding eg. device 1988 * replace. 1989 */ 1990 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1991 u64 num_devices) 1992 { 1993 u64 all_avail; 1994 unsigned seq; 1995 int i; 1996 1997 do { 1998 seq = read_seqbegin(&fs_info->profiles_lock); 1999 2000 all_avail = fs_info->avail_data_alloc_bits | 2001 fs_info->avail_system_alloc_bits | 2002 fs_info->avail_metadata_alloc_bits; 2003 } while (read_seqretry(&fs_info->profiles_lock, seq)); 2004 2005 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2006 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 2007 continue; 2008 2009 if (num_devices < btrfs_raid_array[i].devs_min) 2010 return btrfs_raid_array[i].mindev_error; 2011 } 2012 2013 return 0; 2014 } 2015 2016 static struct btrfs_device * btrfs_find_next_active_device( 2017 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 2018 { 2019 struct btrfs_device *next_device; 2020 2021 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 2022 if (next_device != device && 2023 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 2024 && next_device->bdev) 2025 return next_device; 2026 } 2027 2028 return NULL; 2029 } 2030 2031 /* 2032 * Helper function to check if the given device is part of s_bdev / latest_dev 2033 * and replace it with the provided or the next active device, in the context 2034 * where this function called, there should be always be another device (or 2035 * this_dev) which is active. 2036 */ 2037 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2038 struct btrfs_device *next_device) 2039 { 2040 struct btrfs_fs_info *fs_info = device->fs_info; 2041 2042 if (!next_device) 2043 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2044 device); 2045 ASSERT(next_device); 2046 2047 if (fs_info->sb->s_bdev && 2048 (fs_info->sb->s_bdev == device->bdev)) 2049 fs_info->sb->s_bdev = next_device->bdev; 2050 2051 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2052 fs_info->fs_devices->latest_dev = next_device; 2053 } 2054 2055 /* 2056 * Return btrfs_fs_devices::num_devices excluding the device that's being 2057 * currently replaced. 2058 */ 2059 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2060 { 2061 u64 num_devices = fs_info->fs_devices->num_devices; 2062 2063 down_read(&fs_info->dev_replace.rwsem); 2064 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2065 ASSERT(num_devices > 1); 2066 num_devices--; 2067 } 2068 up_read(&fs_info->dev_replace.rwsem); 2069 2070 return num_devices; 2071 } 2072 2073 static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info, 2074 struct block_device *bdev, int copy_num) 2075 { 2076 struct btrfs_super_block *disk_super; 2077 const size_t len = sizeof(disk_super->magic); 2078 const u64 bytenr = btrfs_sb_offset(copy_num); 2079 int ret; 2080 2081 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr); 2082 if (IS_ERR(disk_super)) 2083 return; 2084 2085 memset(&disk_super->magic, 0, len); 2086 folio_mark_dirty(virt_to_folio(disk_super)); 2087 btrfs_release_disk_super(disk_super); 2088 2089 ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1); 2090 if (ret) 2091 btrfs_warn(fs_info, "error clearing superblock number %d (%d)", 2092 copy_num, ret); 2093 } 2094 2095 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_device *device) 2096 { 2097 int copy_num; 2098 struct block_device *bdev = device->bdev; 2099 2100 if (!bdev) 2101 return; 2102 2103 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2104 if (bdev_is_zoned(bdev)) 2105 btrfs_reset_sb_log_zones(bdev, copy_num); 2106 else 2107 btrfs_scratch_superblock(fs_info, bdev, copy_num); 2108 } 2109 2110 /* Notify udev that device has changed */ 2111 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2112 2113 /* Update ctime/mtime for device path for libblkid */ 2114 update_dev_time(device->name->str); 2115 } 2116 2117 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2118 struct btrfs_dev_lookup_args *args, 2119 struct file **bdev_file) 2120 { 2121 struct btrfs_trans_handle *trans; 2122 struct btrfs_device *device; 2123 struct btrfs_fs_devices *cur_devices; 2124 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2125 u64 num_devices; 2126 int ret = 0; 2127 2128 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2129 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2130 return -EINVAL; 2131 } 2132 2133 /* 2134 * The device list in fs_devices is accessed without locks (neither 2135 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2136 * filesystem and another device rm cannot run. 2137 */ 2138 num_devices = btrfs_num_devices(fs_info); 2139 2140 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2141 if (ret) 2142 return ret; 2143 2144 device = btrfs_find_device(fs_info->fs_devices, args); 2145 if (!device) { 2146 if (args->missing) 2147 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2148 else 2149 ret = -ENOENT; 2150 return ret; 2151 } 2152 2153 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2154 btrfs_warn_in_rcu(fs_info, 2155 "cannot remove device %s (devid %llu) due to active swapfile", 2156 btrfs_dev_name(device), device->devid); 2157 return -ETXTBSY; 2158 } 2159 2160 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2161 return BTRFS_ERROR_DEV_TGT_REPLACE; 2162 2163 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2164 fs_info->fs_devices->rw_devices == 1) 2165 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2166 2167 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2168 mutex_lock(&fs_info->chunk_mutex); 2169 list_del_init(&device->dev_alloc_list); 2170 device->fs_devices->rw_devices--; 2171 mutex_unlock(&fs_info->chunk_mutex); 2172 } 2173 2174 ret = btrfs_shrink_device(device, 0); 2175 if (ret) 2176 goto error_undo; 2177 2178 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2179 if (IS_ERR(trans)) { 2180 ret = PTR_ERR(trans); 2181 goto error_undo; 2182 } 2183 2184 ret = btrfs_rm_dev_item(trans, device); 2185 if (ret) { 2186 /* Any error in dev item removal is critical */ 2187 btrfs_crit(fs_info, 2188 "failed to remove device item for devid %llu: %d", 2189 device->devid, ret); 2190 btrfs_abort_transaction(trans, ret); 2191 btrfs_end_transaction(trans); 2192 return ret; 2193 } 2194 2195 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2196 btrfs_scrub_cancel_dev(device); 2197 2198 /* 2199 * the device list mutex makes sure that we don't change 2200 * the device list while someone else is writing out all 2201 * the device supers. Whoever is writing all supers, should 2202 * lock the device list mutex before getting the number of 2203 * devices in the super block (super_copy). Conversely, 2204 * whoever updates the number of devices in the super block 2205 * (super_copy) should hold the device list mutex. 2206 */ 2207 2208 /* 2209 * In normal cases the cur_devices == fs_devices. But in case 2210 * of deleting a seed device, the cur_devices should point to 2211 * its own fs_devices listed under the fs_devices->seed_list. 2212 */ 2213 cur_devices = device->fs_devices; 2214 mutex_lock(&fs_devices->device_list_mutex); 2215 list_del_rcu(&device->dev_list); 2216 2217 cur_devices->num_devices--; 2218 cur_devices->total_devices--; 2219 /* Update total_devices of the parent fs_devices if it's seed */ 2220 if (cur_devices != fs_devices) 2221 fs_devices->total_devices--; 2222 2223 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2224 cur_devices->missing_devices--; 2225 2226 btrfs_assign_next_active_device(device, NULL); 2227 2228 if (device->bdev_file) { 2229 cur_devices->open_devices--; 2230 /* remove sysfs entry */ 2231 btrfs_sysfs_remove_device(device); 2232 } 2233 2234 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2235 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2236 mutex_unlock(&fs_devices->device_list_mutex); 2237 2238 /* 2239 * At this point, the device is zero sized and detached from the 2240 * devices list. All that's left is to zero out the old supers and 2241 * free the device. 2242 * 2243 * We cannot call btrfs_close_bdev() here because we're holding the sb 2244 * write lock, and fput() on the block device will pull in the 2245 * ->open_mutex on the block device and it's dependencies. Instead 2246 * just flush the device and let the caller do the final bdev_release. 2247 */ 2248 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2249 btrfs_scratch_superblocks(fs_info, device); 2250 if (device->bdev) { 2251 sync_blockdev(device->bdev); 2252 invalidate_bdev(device->bdev); 2253 } 2254 } 2255 2256 *bdev_file = device->bdev_file; 2257 synchronize_rcu(); 2258 btrfs_free_device(device); 2259 2260 /* 2261 * This can happen if cur_devices is the private seed devices list. We 2262 * cannot call close_fs_devices() here because it expects the uuid_mutex 2263 * to be held, but in fact we don't need that for the private 2264 * seed_devices, we can simply decrement cur_devices->opened and then 2265 * remove it from our list and free the fs_devices. 2266 */ 2267 if (cur_devices->num_devices == 0) { 2268 list_del_init(&cur_devices->seed_list); 2269 ASSERT(cur_devices->opened == 1); 2270 cur_devices->opened--; 2271 free_fs_devices(cur_devices); 2272 } 2273 2274 ret = btrfs_commit_transaction(trans); 2275 2276 return ret; 2277 2278 error_undo: 2279 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2280 mutex_lock(&fs_info->chunk_mutex); 2281 list_add(&device->dev_alloc_list, 2282 &fs_devices->alloc_list); 2283 device->fs_devices->rw_devices++; 2284 mutex_unlock(&fs_info->chunk_mutex); 2285 } 2286 return ret; 2287 } 2288 2289 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2290 { 2291 struct btrfs_fs_devices *fs_devices; 2292 2293 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2294 2295 /* 2296 * in case of fs with no seed, srcdev->fs_devices will point 2297 * to fs_devices of fs_info. However when the dev being replaced is 2298 * a seed dev it will point to the seed's local fs_devices. In short 2299 * srcdev will have its correct fs_devices in both the cases. 2300 */ 2301 fs_devices = srcdev->fs_devices; 2302 2303 list_del_rcu(&srcdev->dev_list); 2304 list_del(&srcdev->dev_alloc_list); 2305 fs_devices->num_devices--; 2306 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2307 fs_devices->missing_devices--; 2308 2309 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2310 fs_devices->rw_devices--; 2311 2312 if (srcdev->bdev) 2313 fs_devices->open_devices--; 2314 } 2315 2316 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2317 { 2318 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2319 2320 mutex_lock(&uuid_mutex); 2321 2322 btrfs_close_bdev(srcdev); 2323 synchronize_rcu(); 2324 btrfs_free_device(srcdev); 2325 2326 /* if this is no devs we rather delete the fs_devices */ 2327 if (!fs_devices->num_devices) { 2328 /* 2329 * On a mounted FS, num_devices can't be zero unless it's a 2330 * seed. In case of a seed device being replaced, the replace 2331 * target added to the sprout FS, so there will be no more 2332 * device left under the seed FS. 2333 */ 2334 ASSERT(fs_devices->seeding); 2335 2336 list_del_init(&fs_devices->seed_list); 2337 close_fs_devices(fs_devices); 2338 free_fs_devices(fs_devices); 2339 } 2340 mutex_unlock(&uuid_mutex); 2341 } 2342 2343 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2344 { 2345 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2346 2347 mutex_lock(&fs_devices->device_list_mutex); 2348 2349 btrfs_sysfs_remove_device(tgtdev); 2350 2351 if (tgtdev->bdev) 2352 fs_devices->open_devices--; 2353 2354 fs_devices->num_devices--; 2355 2356 btrfs_assign_next_active_device(tgtdev, NULL); 2357 2358 list_del_rcu(&tgtdev->dev_list); 2359 2360 mutex_unlock(&fs_devices->device_list_mutex); 2361 2362 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev); 2363 2364 btrfs_close_bdev(tgtdev); 2365 synchronize_rcu(); 2366 btrfs_free_device(tgtdev); 2367 } 2368 2369 /* 2370 * Populate args from device at path. 2371 * 2372 * @fs_info: the filesystem 2373 * @args: the args to populate 2374 * @path: the path to the device 2375 * 2376 * This will read the super block of the device at @path and populate @args with 2377 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2378 * lookup a device to operate on, but need to do it before we take any locks. 2379 * This properly handles the special case of "missing" that a user may pass in, 2380 * and does some basic sanity checks. The caller must make sure that @path is 2381 * properly NUL terminated before calling in, and must call 2382 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2383 * uuid buffers. 2384 * 2385 * Return: 0 for success, -errno for failure 2386 */ 2387 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2388 struct btrfs_dev_lookup_args *args, 2389 const char *path) 2390 { 2391 struct btrfs_super_block *disk_super; 2392 struct file *bdev_file; 2393 int ret; 2394 2395 if (!path || !path[0]) 2396 return -EINVAL; 2397 if (!strcmp(path, "missing")) { 2398 args->missing = true; 2399 return 0; 2400 } 2401 2402 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2403 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2404 if (!args->uuid || !args->fsid) { 2405 btrfs_put_dev_args_from_path(args); 2406 return -ENOMEM; 2407 } 2408 2409 ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0, 2410 &bdev_file, &disk_super); 2411 if (ret) { 2412 btrfs_put_dev_args_from_path(args); 2413 return ret; 2414 } 2415 2416 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2417 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2418 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2419 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2420 else 2421 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2422 btrfs_release_disk_super(disk_super); 2423 fput(bdev_file); 2424 return 0; 2425 } 2426 2427 /* 2428 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2429 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2430 * that don't need to be freed. 2431 */ 2432 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2433 { 2434 kfree(args->uuid); 2435 kfree(args->fsid); 2436 args->uuid = NULL; 2437 args->fsid = NULL; 2438 } 2439 2440 struct btrfs_device *btrfs_find_device_by_devspec( 2441 struct btrfs_fs_info *fs_info, u64 devid, 2442 const char *device_path) 2443 { 2444 BTRFS_DEV_LOOKUP_ARGS(args); 2445 struct btrfs_device *device; 2446 int ret; 2447 2448 if (devid) { 2449 args.devid = devid; 2450 device = btrfs_find_device(fs_info->fs_devices, &args); 2451 if (!device) 2452 return ERR_PTR(-ENOENT); 2453 return device; 2454 } 2455 2456 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2457 if (ret) 2458 return ERR_PTR(ret); 2459 device = btrfs_find_device(fs_info->fs_devices, &args); 2460 btrfs_put_dev_args_from_path(&args); 2461 if (!device) 2462 return ERR_PTR(-ENOENT); 2463 return device; 2464 } 2465 2466 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2467 { 2468 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2469 struct btrfs_fs_devices *old_devices; 2470 struct btrfs_fs_devices *seed_devices; 2471 2472 lockdep_assert_held(&uuid_mutex); 2473 if (!fs_devices->seeding) 2474 return ERR_PTR(-EINVAL); 2475 2476 /* 2477 * Private copy of the seed devices, anchored at 2478 * fs_info->fs_devices->seed_list 2479 */ 2480 seed_devices = alloc_fs_devices(NULL); 2481 if (IS_ERR(seed_devices)) 2482 return seed_devices; 2483 2484 /* 2485 * It's necessary to retain a copy of the original seed fs_devices in 2486 * fs_uuids so that filesystems which have been seeded can successfully 2487 * reference the seed device from open_seed_devices. This also supports 2488 * multiple fs seed. 2489 */ 2490 old_devices = clone_fs_devices(fs_devices); 2491 if (IS_ERR(old_devices)) { 2492 kfree(seed_devices); 2493 return old_devices; 2494 } 2495 2496 list_add(&old_devices->fs_list, &fs_uuids); 2497 2498 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2499 seed_devices->opened = 1; 2500 INIT_LIST_HEAD(&seed_devices->devices); 2501 INIT_LIST_HEAD(&seed_devices->alloc_list); 2502 mutex_init(&seed_devices->device_list_mutex); 2503 2504 return seed_devices; 2505 } 2506 2507 /* 2508 * Splice seed devices into the sprout fs_devices. 2509 * Generate a new fsid for the sprouted read-write filesystem. 2510 */ 2511 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2512 struct btrfs_fs_devices *seed_devices) 2513 { 2514 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2515 struct btrfs_super_block *disk_super = fs_info->super_copy; 2516 struct btrfs_device *device; 2517 u64 super_flags; 2518 2519 /* 2520 * We are updating the fsid, the thread leading to device_list_add() 2521 * could race, so uuid_mutex is needed. 2522 */ 2523 lockdep_assert_held(&uuid_mutex); 2524 2525 /* 2526 * The threads listed below may traverse dev_list but can do that without 2527 * device_list_mutex: 2528 * - All device ops and balance - as we are in btrfs_exclop_start. 2529 * - Various dev_list readers - are using RCU. 2530 * - btrfs_ioctl_fitrim() - is using RCU. 2531 * 2532 * For-read threads as below are using device_list_mutex: 2533 * - Readonly scrub btrfs_scrub_dev() 2534 * - Readonly scrub btrfs_scrub_progress() 2535 * - btrfs_get_dev_stats() 2536 */ 2537 lockdep_assert_held(&fs_devices->device_list_mutex); 2538 2539 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2540 synchronize_rcu); 2541 list_for_each_entry(device, &seed_devices->devices, dev_list) 2542 device->fs_devices = seed_devices; 2543 2544 fs_devices->seeding = false; 2545 fs_devices->num_devices = 0; 2546 fs_devices->open_devices = 0; 2547 fs_devices->missing_devices = 0; 2548 fs_devices->rotating = false; 2549 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2550 2551 generate_random_uuid(fs_devices->fsid); 2552 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2553 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2554 2555 super_flags = btrfs_super_flags(disk_super) & 2556 ~BTRFS_SUPER_FLAG_SEEDING; 2557 btrfs_set_super_flags(disk_super, super_flags); 2558 } 2559 2560 /* 2561 * Store the expected generation for seed devices in device items. 2562 */ 2563 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2564 { 2565 BTRFS_DEV_LOOKUP_ARGS(args); 2566 struct btrfs_fs_info *fs_info = trans->fs_info; 2567 struct btrfs_root *root = fs_info->chunk_root; 2568 struct btrfs_path *path; 2569 struct extent_buffer *leaf; 2570 struct btrfs_dev_item *dev_item; 2571 struct btrfs_device *device; 2572 struct btrfs_key key; 2573 u8 fs_uuid[BTRFS_FSID_SIZE]; 2574 u8 dev_uuid[BTRFS_UUID_SIZE]; 2575 int ret; 2576 2577 path = btrfs_alloc_path(); 2578 if (!path) 2579 return -ENOMEM; 2580 2581 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2582 key.offset = 0; 2583 key.type = BTRFS_DEV_ITEM_KEY; 2584 2585 while (1) { 2586 btrfs_reserve_chunk_metadata(trans, false); 2587 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2588 btrfs_trans_release_chunk_metadata(trans); 2589 if (ret < 0) 2590 goto error; 2591 2592 leaf = path->nodes[0]; 2593 next_slot: 2594 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2595 ret = btrfs_next_leaf(root, path); 2596 if (ret > 0) 2597 break; 2598 if (ret < 0) 2599 goto error; 2600 leaf = path->nodes[0]; 2601 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2602 btrfs_release_path(path); 2603 continue; 2604 } 2605 2606 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2607 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2608 key.type != BTRFS_DEV_ITEM_KEY) 2609 break; 2610 2611 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2612 struct btrfs_dev_item); 2613 args.devid = btrfs_device_id(leaf, dev_item); 2614 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2615 BTRFS_UUID_SIZE); 2616 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2617 BTRFS_FSID_SIZE); 2618 args.uuid = dev_uuid; 2619 args.fsid = fs_uuid; 2620 device = btrfs_find_device(fs_info->fs_devices, &args); 2621 BUG_ON(!device); /* Logic error */ 2622 2623 if (device->fs_devices->seeding) { 2624 btrfs_set_device_generation(leaf, dev_item, 2625 device->generation); 2626 btrfs_mark_buffer_dirty(trans, leaf); 2627 } 2628 2629 path->slots[0]++; 2630 goto next_slot; 2631 } 2632 ret = 0; 2633 error: 2634 btrfs_free_path(path); 2635 return ret; 2636 } 2637 2638 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2639 { 2640 struct btrfs_root *root = fs_info->dev_root; 2641 struct btrfs_trans_handle *trans; 2642 struct btrfs_device *device; 2643 struct file *bdev_file; 2644 struct super_block *sb = fs_info->sb; 2645 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2646 struct btrfs_fs_devices *seed_devices = NULL; 2647 u64 orig_super_total_bytes; 2648 u64 orig_super_num_devices; 2649 int ret = 0; 2650 bool seeding_dev = false; 2651 bool locked = false; 2652 2653 if (sb_rdonly(sb) && !fs_devices->seeding) 2654 return -EROFS; 2655 2656 bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE, 2657 fs_info->bdev_holder, NULL); 2658 if (IS_ERR(bdev_file)) 2659 return PTR_ERR(bdev_file); 2660 2661 if (!btrfs_check_device_zone_type(fs_info, file_bdev(bdev_file))) { 2662 ret = -EINVAL; 2663 goto error; 2664 } 2665 2666 if (fs_devices->seeding) { 2667 seeding_dev = true; 2668 down_write(&sb->s_umount); 2669 mutex_lock(&uuid_mutex); 2670 locked = true; 2671 } 2672 2673 sync_blockdev(file_bdev(bdev_file)); 2674 2675 rcu_read_lock(); 2676 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2677 if (device->bdev == file_bdev(bdev_file)) { 2678 ret = -EEXIST; 2679 rcu_read_unlock(); 2680 goto error; 2681 } 2682 } 2683 rcu_read_unlock(); 2684 2685 device = btrfs_alloc_device(fs_info, NULL, NULL, device_path); 2686 if (IS_ERR(device)) { 2687 /* we can safely leave the fs_devices entry around */ 2688 ret = PTR_ERR(device); 2689 goto error; 2690 } 2691 2692 device->fs_info = fs_info; 2693 device->bdev_file = bdev_file; 2694 device->bdev = file_bdev(bdev_file); 2695 ret = lookup_bdev(device_path, &device->devt); 2696 if (ret) 2697 goto error_free_device; 2698 2699 ret = btrfs_get_dev_zone_info(device, false); 2700 if (ret) 2701 goto error_free_device; 2702 2703 trans = btrfs_start_transaction(root, 0); 2704 if (IS_ERR(trans)) { 2705 ret = PTR_ERR(trans); 2706 goto error_free_zone; 2707 } 2708 2709 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2710 device->generation = trans->transid; 2711 device->io_width = fs_info->sectorsize; 2712 device->io_align = fs_info->sectorsize; 2713 device->sector_size = fs_info->sectorsize; 2714 device->total_bytes = 2715 round_down(bdev_nr_bytes(device->bdev), fs_info->sectorsize); 2716 device->disk_total_bytes = device->total_bytes; 2717 device->commit_total_bytes = device->total_bytes; 2718 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2719 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2720 device->dev_stats_valid = 1; 2721 set_blocksize(device->bdev_file, BTRFS_BDEV_BLOCKSIZE); 2722 2723 if (seeding_dev) { 2724 btrfs_clear_sb_rdonly(sb); 2725 2726 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2727 seed_devices = btrfs_init_sprout(fs_info); 2728 if (IS_ERR(seed_devices)) { 2729 ret = PTR_ERR(seed_devices); 2730 btrfs_abort_transaction(trans, ret); 2731 goto error_trans; 2732 } 2733 } 2734 2735 mutex_lock(&fs_devices->device_list_mutex); 2736 if (seeding_dev) { 2737 btrfs_setup_sprout(fs_info, seed_devices); 2738 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2739 device); 2740 } 2741 2742 device->fs_devices = fs_devices; 2743 2744 mutex_lock(&fs_info->chunk_mutex); 2745 list_add_rcu(&device->dev_list, &fs_devices->devices); 2746 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2747 fs_devices->num_devices++; 2748 fs_devices->open_devices++; 2749 fs_devices->rw_devices++; 2750 fs_devices->total_devices++; 2751 fs_devices->total_rw_bytes += device->total_bytes; 2752 2753 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2754 2755 if (!bdev_nonrot(device->bdev)) 2756 fs_devices->rotating = true; 2757 2758 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2759 btrfs_set_super_total_bytes(fs_info->super_copy, 2760 round_down(orig_super_total_bytes + device->total_bytes, 2761 fs_info->sectorsize)); 2762 2763 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2764 btrfs_set_super_num_devices(fs_info->super_copy, 2765 orig_super_num_devices + 1); 2766 2767 /* 2768 * we've got more storage, clear any full flags on the space 2769 * infos 2770 */ 2771 btrfs_clear_space_info_full(fs_info); 2772 2773 mutex_unlock(&fs_info->chunk_mutex); 2774 2775 /* Add sysfs device entry */ 2776 btrfs_sysfs_add_device(device); 2777 2778 mutex_unlock(&fs_devices->device_list_mutex); 2779 2780 if (seeding_dev) { 2781 mutex_lock(&fs_info->chunk_mutex); 2782 ret = init_first_rw_device(trans); 2783 mutex_unlock(&fs_info->chunk_mutex); 2784 if (ret) { 2785 btrfs_abort_transaction(trans, ret); 2786 goto error_sysfs; 2787 } 2788 } 2789 2790 ret = btrfs_add_dev_item(trans, device); 2791 if (ret) { 2792 btrfs_abort_transaction(trans, ret); 2793 goto error_sysfs; 2794 } 2795 2796 if (seeding_dev) { 2797 ret = btrfs_finish_sprout(trans); 2798 if (ret) { 2799 btrfs_abort_transaction(trans, ret); 2800 goto error_sysfs; 2801 } 2802 2803 /* 2804 * fs_devices now represents the newly sprouted filesystem and 2805 * its fsid has been changed by btrfs_sprout_splice(). 2806 */ 2807 btrfs_sysfs_update_sprout_fsid(fs_devices); 2808 } 2809 2810 ret = btrfs_commit_transaction(trans); 2811 2812 if (seeding_dev) { 2813 mutex_unlock(&uuid_mutex); 2814 up_write(&sb->s_umount); 2815 locked = false; 2816 2817 if (ret) /* transaction commit */ 2818 return ret; 2819 2820 ret = btrfs_relocate_sys_chunks(fs_info); 2821 if (ret < 0) 2822 btrfs_handle_fs_error(fs_info, ret, 2823 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2824 trans = btrfs_attach_transaction(root); 2825 if (IS_ERR(trans)) { 2826 if (PTR_ERR(trans) == -ENOENT) 2827 return 0; 2828 ret = PTR_ERR(trans); 2829 trans = NULL; 2830 goto error_sysfs; 2831 } 2832 ret = btrfs_commit_transaction(trans); 2833 } 2834 2835 /* 2836 * Now that we have written a new super block to this device, check all 2837 * other fs_devices list if device_path alienates any other scanned 2838 * device. 2839 * We can ignore the return value as it typically returns -EINVAL and 2840 * only succeeds if the device was an alien. 2841 */ 2842 btrfs_forget_devices(device->devt); 2843 2844 /* Update ctime/mtime for blkid or udev */ 2845 update_dev_time(device_path); 2846 2847 return ret; 2848 2849 error_sysfs: 2850 btrfs_sysfs_remove_device(device); 2851 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2852 mutex_lock(&fs_info->chunk_mutex); 2853 list_del_rcu(&device->dev_list); 2854 list_del(&device->dev_alloc_list); 2855 fs_info->fs_devices->num_devices--; 2856 fs_info->fs_devices->open_devices--; 2857 fs_info->fs_devices->rw_devices--; 2858 fs_info->fs_devices->total_devices--; 2859 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2860 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2861 btrfs_set_super_total_bytes(fs_info->super_copy, 2862 orig_super_total_bytes); 2863 btrfs_set_super_num_devices(fs_info->super_copy, 2864 orig_super_num_devices); 2865 mutex_unlock(&fs_info->chunk_mutex); 2866 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2867 error_trans: 2868 if (seeding_dev) 2869 btrfs_set_sb_rdonly(sb); 2870 if (trans) 2871 btrfs_end_transaction(trans); 2872 error_free_zone: 2873 btrfs_destroy_dev_zone_info(device); 2874 error_free_device: 2875 btrfs_free_device(device); 2876 error: 2877 fput(bdev_file); 2878 if (locked) { 2879 mutex_unlock(&uuid_mutex); 2880 up_write(&sb->s_umount); 2881 } 2882 return ret; 2883 } 2884 2885 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2886 struct btrfs_device *device) 2887 { 2888 int ret; 2889 struct btrfs_path *path; 2890 struct btrfs_root *root = device->fs_info->chunk_root; 2891 struct btrfs_dev_item *dev_item; 2892 struct extent_buffer *leaf; 2893 struct btrfs_key key; 2894 2895 path = btrfs_alloc_path(); 2896 if (!path) 2897 return -ENOMEM; 2898 2899 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2900 key.type = BTRFS_DEV_ITEM_KEY; 2901 key.offset = device->devid; 2902 2903 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2904 if (ret < 0) 2905 goto out; 2906 2907 if (ret > 0) { 2908 ret = -ENOENT; 2909 goto out; 2910 } 2911 2912 leaf = path->nodes[0]; 2913 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2914 2915 btrfs_set_device_id(leaf, dev_item, device->devid); 2916 btrfs_set_device_type(leaf, dev_item, device->type); 2917 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2918 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2919 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2920 btrfs_set_device_total_bytes(leaf, dev_item, 2921 btrfs_device_get_disk_total_bytes(device)); 2922 btrfs_set_device_bytes_used(leaf, dev_item, 2923 btrfs_device_get_bytes_used(device)); 2924 btrfs_mark_buffer_dirty(trans, leaf); 2925 2926 out: 2927 btrfs_free_path(path); 2928 return ret; 2929 } 2930 2931 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2932 struct btrfs_device *device, u64 new_size) 2933 { 2934 struct btrfs_fs_info *fs_info = device->fs_info; 2935 struct btrfs_super_block *super_copy = fs_info->super_copy; 2936 u64 old_total; 2937 u64 diff; 2938 int ret; 2939 2940 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2941 return -EACCES; 2942 2943 new_size = round_down(new_size, fs_info->sectorsize); 2944 2945 mutex_lock(&fs_info->chunk_mutex); 2946 old_total = btrfs_super_total_bytes(super_copy); 2947 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2948 2949 if (new_size <= device->total_bytes || 2950 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2951 mutex_unlock(&fs_info->chunk_mutex); 2952 return -EINVAL; 2953 } 2954 2955 btrfs_set_super_total_bytes(super_copy, 2956 round_down(old_total + diff, fs_info->sectorsize)); 2957 device->fs_devices->total_rw_bytes += diff; 2958 atomic64_add(diff, &fs_info->free_chunk_space); 2959 2960 btrfs_device_set_total_bytes(device, new_size); 2961 btrfs_device_set_disk_total_bytes(device, new_size); 2962 btrfs_clear_space_info_full(device->fs_info); 2963 if (list_empty(&device->post_commit_list)) 2964 list_add_tail(&device->post_commit_list, 2965 &trans->transaction->dev_update_list); 2966 mutex_unlock(&fs_info->chunk_mutex); 2967 2968 btrfs_reserve_chunk_metadata(trans, false); 2969 ret = btrfs_update_device(trans, device); 2970 btrfs_trans_release_chunk_metadata(trans); 2971 2972 return ret; 2973 } 2974 2975 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2976 { 2977 struct btrfs_fs_info *fs_info = trans->fs_info; 2978 struct btrfs_root *root = fs_info->chunk_root; 2979 int ret; 2980 struct btrfs_path *path; 2981 struct btrfs_key key; 2982 2983 path = btrfs_alloc_path(); 2984 if (!path) 2985 return -ENOMEM; 2986 2987 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2988 key.offset = chunk_offset; 2989 key.type = BTRFS_CHUNK_ITEM_KEY; 2990 2991 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2992 if (ret < 0) 2993 goto out; 2994 else if (ret > 0) { /* Logic error or corruption */ 2995 btrfs_err(fs_info, "failed to lookup chunk %llu when freeing", 2996 chunk_offset); 2997 btrfs_abort_transaction(trans, -ENOENT); 2998 ret = -EUCLEAN; 2999 goto out; 3000 } 3001 3002 ret = btrfs_del_item(trans, root, path); 3003 if (ret < 0) { 3004 btrfs_err(fs_info, "failed to delete chunk %llu item", chunk_offset); 3005 btrfs_abort_transaction(trans, ret); 3006 goto out; 3007 } 3008 out: 3009 btrfs_free_path(path); 3010 return ret; 3011 } 3012 3013 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3014 { 3015 struct btrfs_super_block *super_copy = fs_info->super_copy; 3016 struct btrfs_disk_key *disk_key; 3017 struct btrfs_chunk *chunk; 3018 u8 *ptr; 3019 int ret = 0; 3020 u32 num_stripes; 3021 u32 array_size; 3022 u32 len = 0; 3023 u32 cur; 3024 struct btrfs_key key; 3025 3026 lockdep_assert_held(&fs_info->chunk_mutex); 3027 array_size = btrfs_super_sys_array_size(super_copy); 3028 3029 ptr = super_copy->sys_chunk_array; 3030 cur = 0; 3031 3032 while (cur < array_size) { 3033 disk_key = (struct btrfs_disk_key *)ptr; 3034 btrfs_disk_key_to_cpu(&key, disk_key); 3035 3036 len = sizeof(*disk_key); 3037 3038 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3039 chunk = (struct btrfs_chunk *)(ptr + len); 3040 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 3041 len += btrfs_chunk_item_size(num_stripes); 3042 } else { 3043 ret = -EIO; 3044 break; 3045 } 3046 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3047 key.offset == chunk_offset) { 3048 memmove(ptr, ptr + len, array_size - (cur + len)); 3049 array_size -= len; 3050 btrfs_set_super_sys_array_size(super_copy, array_size); 3051 } else { 3052 ptr += len; 3053 cur += len; 3054 } 3055 } 3056 return ret; 3057 } 3058 3059 struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info, 3060 u64 logical, u64 length) 3061 { 3062 struct rb_node *node = fs_info->mapping_tree.rb_root.rb_node; 3063 struct rb_node *prev = NULL; 3064 struct rb_node *orig_prev; 3065 struct btrfs_chunk_map *map; 3066 struct btrfs_chunk_map *prev_map = NULL; 3067 3068 while (node) { 3069 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 3070 prev = node; 3071 prev_map = map; 3072 3073 if (logical < map->start) { 3074 node = node->rb_left; 3075 } else if (logical >= map->start + map->chunk_len) { 3076 node = node->rb_right; 3077 } else { 3078 refcount_inc(&map->refs); 3079 return map; 3080 } 3081 } 3082 3083 if (!prev) 3084 return NULL; 3085 3086 orig_prev = prev; 3087 while (prev && logical >= prev_map->start + prev_map->chunk_len) { 3088 prev = rb_next(prev); 3089 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3090 } 3091 3092 if (!prev) { 3093 prev = orig_prev; 3094 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3095 while (prev && logical < prev_map->start) { 3096 prev = rb_prev(prev); 3097 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3098 } 3099 } 3100 3101 if (prev) { 3102 u64 end = logical + length; 3103 3104 /* 3105 * Caller can pass a U64_MAX length when it wants to get any 3106 * chunk starting at an offset of 'logical' or higher, so deal 3107 * with underflow by resetting the end offset to U64_MAX. 3108 */ 3109 if (end < logical) 3110 end = U64_MAX; 3111 3112 if (end > prev_map->start && 3113 logical < prev_map->start + prev_map->chunk_len) { 3114 refcount_inc(&prev_map->refs); 3115 return prev_map; 3116 } 3117 } 3118 3119 return NULL; 3120 } 3121 3122 struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info, 3123 u64 logical, u64 length) 3124 { 3125 struct btrfs_chunk_map *map; 3126 3127 read_lock(&fs_info->mapping_tree_lock); 3128 map = btrfs_find_chunk_map_nolock(fs_info, logical, length); 3129 read_unlock(&fs_info->mapping_tree_lock); 3130 3131 return map; 3132 } 3133 3134 /* 3135 * Find the mapping containing the given logical extent. 3136 * 3137 * @logical: Logical block offset in bytes. 3138 * @length: Length of extent in bytes. 3139 * 3140 * Return: Chunk mapping or ERR_PTR. 3141 */ 3142 struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3143 u64 logical, u64 length) 3144 { 3145 struct btrfs_chunk_map *map; 3146 3147 map = btrfs_find_chunk_map(fs_info, logical, length); 3148 3149 if (unlikely(!map)) { 3150 btrfs_crit(fs_info, 3151 "unable to find chunk map for logical %llu length %llu", 3152 logical, length); 3153 return ERR_PTR(-EINVAL); 3154 } 3155 3156 if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) { 3157 btrfs_crit(fs_info, 3158 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu", 3159 logical, logical + length, map->start, 3160 map->start + map->chunk_len); 3161 btrfs_free_chunk_map(map); 3162 return ERR_PTR(-EINVAL); 3163 } 3164 3165 /* Callers are responsible for dropping the reference. */ 3166 return map; 3167 } 3168 3169 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3170 struct btrfs_chunk_map *map, u64 chunk_offset) 3171 { 3172 int i; 3173 3174 /* 3175 * Removing chunk items and updating the device items in the chunks btree 3176 * requires holding the chunk_mutex. 3177 * See the comment at btrfs_chunk_alloc() for the details. 3178 */ 3179 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3180 3181 for (i = 0; i < map->num_stripes; i++) { 3182 int ret; 3183 3184 ret = btrfs_update_device(trans, map->stripes[i].dev); 3185 if (ret) 3186 return ret; 3187 } 3188 3189 return btrfs_free_chunk(trans, chunk_offset); 3190 } 3191 3192 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3193 { 3194 struct btrfs_fs_info *fs_info = trans->fs_info; 3195 struct btrfs_chunk_map *map; 3196 u64 dev_extent_len = 0; 3197 int i, ret = 0; 3198 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3199 3200 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3201 if (IS_ERR(map)) { 3202 /* 3203 * This is a logic error, but we don't want to just rely on the 3204 * user having built with ASSERT enabled, so if ASSERT doesn't 3205 * do anything we still error out. 3206 */ 3207 ASSERT(0); 3208 return PTR_ERR(map); 3209 } 3210 3211 /* 3212 * First delete the device extent items from the devices btree. 3213 * We take the device_list_mutex to avoid racing with the finishing phase 3214 * of a device replace operation. See the comment below before acquiring 3215 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3216 * because that can result in a deadlock when deleting the device extent 3217 * items from the devices btree - COWing an extent buffer from the btree 3218 * may result in allocating a new metadata chunk, which would attempt to 3219 * lock again fs_info->chunk_mutex. 3220 */ 3221 mutex_lock(&fs_devices->device_list_mutex); 3222 for (i = 0; i < map->num_stripes; i++) { 3223 struct btrfs_device *device = map->stripes[i].dev; 3224 ret = btrfs_free_dev_extent(trans, device, 3225 map->stripes[i].physical, 3226 &dev_extent_len); 3227 if (ret) { 3228 mutex_unlock(&fs_devices->device_list_mutex); 3229 btrfs_abort_transaction(trans, ret); 3230 goto out; 3231 } 3232 3233 if (device->bytes_used > 0) { 3234 mutex_lock(&fs_info->chunk_mutex); 3235 btrfs_device_set_bytes_used(device, 3236 device->bytes_used - dev_extent_len); 3237 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3238 btrfs_clear_space_info_full(fs_info); 3239 mutex_unlock(&fs_info->chunk_mutex); 3240 } 3241 } 3242 mutex_unlock(&fs_devices->device_list_mutex); 3243 3244 /* 3245 * We acquire fs_info->chunk_mutex for 2 reasons: 3246 * 3247 * 1) Just like with the first phase of the chunk allocation, we must 3248 * reserve system space, do all chunk btree updates and deletions, and 3249 * update the system chunk array in the superblock while holding this 3250 * mutex. This is for similar reasons as explained on the comment at 3251 * the top of btrfs_chunk_alloc(); 3252 * 3253 * 2) Prevent races with the final phase of a device replace operation 3254 * that replaces the device object associated with the map's stripes, 3255 * because the device object's id can change at any time during that 3256 * final phase of the device replace operation 3257 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3258 * replaced device and then see it with an ID of 3259 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3260 * the device item, which does not exists on the chunk btree. 3261 * The finishing phase of device replace acquires both the 3262 * device_list_mutex and the chunk_mutex, in that order, so we are 3263 * safe by just acquiring the chunk_mutex. 3264 */ 3265 trans->removing_chunk = true; 3266 mutex_lock(&fs_info->chunk_mutex); 3267 3268 check_system_chunk(trans, map->type); 3269 3270 ret = remove_chunk_item(trans, map, chunk_offset); 3271 /* 3272 * Normally we should not get -ENOSPC since we reserved space before 3273 * through the call to check_system_chunk(). 3274 * 3275 * Despite our system space_info having enough free space, we may not 3276 * be able to allocate extents from its block groups, because all have 3277 * an incompatible profile, which will force us to allocate a new system 3278 * block group with the right profile, or right after we called 3279 * check_system_space() above, a scrub turned the only system block group 3280 * with enough free space into RO mode. 3281 * This is explained with more detail at do_chunk_alloc(). 3282 * 3283 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3284 */ 3285 if (ret == -ENOSPC) { 3286 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3287 struct btrfs_block_group *sys_bg; 3288 3289 sys_bg = btrfs_create_chunk(trans, sys_flags); 3290 if (IS_ERR(sys_bg)) { 3291 ret = PTR_ERR(sys_bg); 3292 btrfs_abort_transaction(trans, ret); 3293 goto out; 3294 } 3295 3296 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3297 if (ret) { 3298 btrfs_abort_transaction(trans, ret); 3299 goto out; 3300 } 3301 3302 ret = remove_chunk_item(trans, map, chunk_offset); 3303 if (ret) { 3304 btrfs_abort_transaction(trans, ret); 3305 goto out; 3306 } 3307 } else if (ret) { 3308 btrfs_abort_transaction(trans, ret); 3309 goto out; 3310 } 3311 3312 trace_btrfs_chunk_free(fs_info, map, chunk_offset, map->chunk_len); 3313 3314 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3315 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3316 if (ret) { 3317 btrfs_abort_transaction(trans, ret); 3318 goto out; 3319 } 3320 } 3321 3322 mutex_unlock(&fs_info->chunk_mutex); 3323 trans->removing_chunk = false; 3324 3325 /* 3326 * We are done with chunk btree updates and deletions, so release the 3327 * system space we previously reserved (with check_system_chunk()). 3328 */ 3329 btrfs_trans_release_chunk_metadata(trans); 3330 3331 ret = btrfs_remove_block_group(trans, map); 3332 if (ret) { 3333 btrfs_abort_transaction(trans, ret); 3334 goto out; 3335 } 3336 3337 out: 3338 if (trans->removing_chunk) { 3339 mutex_unlock(&fs_info->chunk_mutex); 3340 trans->removing_chunk = false; 3341 } 3342 /* once for us */ 3343 btrfs_free_chunk_map(map); 3344 return ret; 3345 } 3346 3347 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3348 { 3349 struct btrfs_root *root = fs_info->chunk_root; 3350 struct btrfs_trans_handle *trans; 3351 struct btrfs_block_group *block_group; 3352 u64 length; 3353 int ret; 3354 3355 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3356 btrfs_err(fs_info, 3357 "relocate: not supported on extent tree v2 yet"); 3358 return -EINVAL; 3359 } 3360 3361 /* 3362 * Prevent races with automatic removal of unused block groups. 3363 * After we relocate and before we remove the chunk with offset 3364 * chunk_offset, automatic removal of the block group can kick in, 3365 * resulting in a failure when calling btrfs_remove_chunk() below. 3366 * 3367 * Make sure to acquire this mutex before doing a tree search (dev 3368 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3369 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3370 * we release the path used to search the chunk/dev tree and before 3371 * the current task acquires this mutex and calls us. 3372 */ 3373 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3374 3375 /* step one, relocate all the extents inside this chunk */ 3376 btrfs_scrub_pause(fs_info); 3377 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3378 btrfs_scrub_continue(fs_info); 3379 if (ret) { 3380 /* 3381 * If we had a transaction abort, stop all running scrubs. 3382 * See transaction.c:cleanup_transaction() why we do it here. 3383 */ 3384 if (BTRFS_FS_ERROR(fs_info)) 3385 btrfs_scrub_cancel(fs_info); 3386 return ret; 3387 } 3388 3389 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3390 if (!block_group) 3391 return -ENOENT; 3392 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3393 length = block_group->length; 3394 btrfs_put_block_group(block_group); 3395 3396 /* 3397 * On a zoned file system, discard the whole block group, this will 3398 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3399 * resetting the zone fails, don't treat it as a fatal problem from the 3400 * filesystem's point of view. 3401 */ 3402 if (btrfs_is_zoned(fs_info)) { 3403 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3404 if (ret) 3405 btrfs_info(fs_info, 3406 "failed to reset zone %llu after relocation", 3407 chunk_offset); 3408 } 3409 3410 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3411 chunk_offset); 3412 if (IS_ERR(trans)) { 3413 ret = PTR_ERR(trans); 3414 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3415 return ret; 3416 } 3417 3418 /* 3419 * step two, delete the device extents and the 3420 * chunk tree entries 3421 */ 3422 ret = btrfs_remove_chunk(trans, chunk_offset); 3423 btrfs_end_transaction(trans); 3424 return ret; 3425 } 3426 3427 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3428 { 3429 struct btrfs_root *chunk_root = fs_info->chunk_root; 3430 struct btrfs_path *path; 3431 struct extent_buffer *leaf; 3432 struct btrfs_chunk *chunk; 3433 struct btrfs_key key; 3434 struct btrfs_key found_key; 3435 u64 chunk_type; 3436 bool retried = false; 3437 int failed = 0; 3438 int ret; 3439 3440 path = btrfs_alloc_path(); 3441 if (!path) 3442 return -ENOMEM; 3443 3444 again: 3445 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3446 key.offset = (u64)-1; 3447 key.type = BTRFS_CHUNK_ITEM_KEY; 3448 3449 while (1) { 3450 mutex_lock(&fs_info->reclaim_bgs_lock); 3451 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3452 if (ret < 0) { 3453 mutex_unlock(&fs_info->reclaim_bgs_lock); 3454 goto error; 3455 } 3456 if (ret == 0) { 3457 /* 3458 * On the first search we would find chunk tree with 3459 * offset -1, which is not possible. On subsequent 3460 * loops this would find an existing item on an invalid 3461 * offset (one less than the previous one, wrong 3462 * alignment and size). 3463 */ 3464 ret = -EUCLEAN; 3465 mutex_unlock(&fs_info->reclaim_bgs_lock); 3466 goto error; 3467 } 3468 3469 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3470 key.type); 3471 if (ret) 3472 mutex_unlock(&fs_info->reclaim_bgs_lock); 3473 if (ret < 0) 3474 goto error; 3475 if (ret > 0) 3476 break; 3477 3478 leaf = path->nodes[0]; 3479 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3480 3481 chunk = btrfs_item_ptr(leaf, path->slots[0], 3482 struct btrfs_chunk); 3483 chunk_type = btrfs_chunk_type(leaf, chunk); 3484 btrfs_release_path(path); 3485 3486 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3487 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3488 if (ret == -ENOSPC) 3489 failed++; 3490 else 3491 BUG_ON(ret); 3492 } 3493 mutex_unlock(&fs_info->reclaim_bgs_lock); 3494 3495 if (found_key.offset == 0) 3496 break; 3497 key.offset = found_key.offset - 1; 3498 } 3499 ret = 0; 3500 if (failed && !retried) { 3501 failed = 0; 3502 retried = true; 3503 goto again; 3504 } else if (WARN_ON(failed && retried)) { 3505 ret = -ENOSPC; 3506 } 3507 error: 3508 btrfs_free_path(path); 3509 return ret; 3510 } 3511 3512 /* 3513 * return 1 : allocate a data chunk successfully, 3514 * return <0: errors during allocating a data chunk, 3515 * return 0 : no need to allocate a data chunk. 3516 */ 3517 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3518 u64 chunk_offset) 3519 { 3520 struct btrfs_block_group *cache; 3521 u64 bytes_used; 3522 u64 chunk_type; 3523 3524 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3525 ASSERT(cache); 3526 chunk_type = cache->flags; 3527 btrfs_put_block_group(cache); 3528 3529 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3530 return 0; 3531 3532 spin_lock(&fs_info->data_sinfo->lock); 3533 bytes_used = fs_info->data_sinfo->bytes_used; 3534 spin_unlock(&fs_info->data_sinfo->lock); 3535 3536 if (!bytes_used) { 3537 struct btrfs_trans_handle *trans; 3538 int ret; 3539 3540 trans = btrfs_join_transaction(fs_info->tree_root); 3541 if (IS_ERR(trans)) 3542 return PTR_ERR(trans); 3543 3544 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3545 btrfs_end_transaction(trans); 3546 if (ret < 0) 3547 return ret; 3548 return 1; 3549 } 3550 3551 return 0; 3552 } 3553 3554 static void btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, 3555 const struct btrfs_disk_balance_args *disk) 3556 { 3557 memset(cpu, 0, sizeof(*cpu)); 3558 3559 cpu->profiles = le64_to_cpu(disk->profiles); 3560 cpu->usage = le64_to_cpu(disk->usage); 3561 cpu->devid = le64_to_cpu(disk->devid); 3562 cpu->pstart = le64_to_cpu(disk->pstart); 3563 cpu->pend = le64_to_cpu(disk->pend); 3564 cpu->vstart = le64_to_cpu(disk->vstart); 3565 cpu->vend = le64_to_cpu(disk->vend); 3566 cpu->target = le64_to_cpu(disk->target); 3567 cpu->flags = le64_to_cpu(disk->flags); 3568 cpu->limit = le64_to_cpu(disk->limit); 3569 cpu->stripes_min = le32_to_cpu(disk->stripes_min); 3570 cpu->stripes_max = le32_to_cpu(disk->stripes_max); 3571 } 3572 3573 static void btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, 3574 const struct btrfs_balance_args *cpu) 3575 { 3576 memset(disk, 0, sizeof(*disk)); 3577 3578 disk->profiles = cpu_to_le64(cpu->profiles); 3579 disk->usage = cpu_to_le64(cpu->usage); 3580 disk->devid = cpu_to_le64(cpu->devid); 3581 disk->pstart = cpu_to_le64(cpu->pstart); 3582 disk->pend = cpu_to_le64(cpu->pend); 3583 disk->vstart = cpu_to_le64(cpu->vstart); 3584 disk->vend = cpu_to_le64(cpu->vend); 3585 disk->target = cpu_to_le64(cpu->target); 3586 disk->flags = cpu_to_le64(cpu->flags); 3587 disk->limit = cpu_to_le64(cpu->limit); 3588 disk->stripes_min = cpu_to_le32(cpu->stripes_min); 3589 disk->stripes_max = cpu_to_le32(cpu->stripes_max); 3590 } 3591 3592 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3593 struct btrfs_balance_control *bctl) 3594 { 3595 struct btrfs_root *root = fs_info->tree_root; 3596 struct btrfs_trans_handle *trans; 3597 struct btrfs_balance_item *item; 3598 struct btrfs_disk_balance_args disk_bargs; 3599 struct btrfs_path *path; 3600 struct extent_buffer *leaf; 3601 struct btrfs_key key; 3602 int ret, err; 3603 3604 path = btrfs_alloc_path(); 3605 if (!path) 3606 return -ENOMEM; 3607 3608 trans = btrfs_start_transaction(root, 0); 3609 if (IS_ERR(trans)) { 3610 btrfs_free_path(path); 3611 return PTR_ERR(trans); 3612 } 3613 3614 key.objectid = BTRFS_BALANCE_OBJECTID; 3615 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3616 key.offset = 0; 3617 3618 ret = btrfs_insert_empty_item(trans, root, path, &key, 3619 sizeof(*item)); 3620 if (ret) 3621 goto out; 3622 3623 leaf = path->nodes[0]; 3624 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3625 3626 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3627 3628 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3629 btrfs_set_balance_data(leaf, item, &disk_bargs); 3630 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3631 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3632 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3633 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3634 3635 btrfs_set_balance_flags(leaf, item, bctl->flags); 3636 3637 btrfs_mark_buffer_dirty(trans, leaf); 3638 out: 3639 btrfs_free_path(path); 3640 err = btrfs_commit_transaction(trans); 3641 if (err && !ret) 3642 ret = err; 3643 return ret; 3644 } 3645 3646 static int del_balance_item(struct btrfs_fs_info *fs_info) 3647 { 3648 struct btrfs_root *root = fs_info->tree_root; 3649 struct btrfs_trans_handle *trans; 3650 struct btrfs_path *path; 3651 struct btrfs_key key; 3652 int ret, err; 3653 3654 path = btrfs_alloc_path(); 3655 if (!path) 3656 return -ENOMEM; 3657 3658 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3659 if (IS_ERR(trans)) { 3660 btrfs_free_path(path); 3661 return PTR_ERR(trans); 3662 } 3663 3664 key.objectid = BTRFS_BALANCE_OBJECTID; 3665 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3666 key.offset = 0; 3667 3668 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3669 if (ret < 0) 3670 goto out; 3671 if (ret > 0) { 3672 ret = -ENOENT; 3673 goto out; 3674 } 3675 3676 ret = btrfs_del_item(trans, root, path); 3677 out: 3678 btrfs_free_path(path); 3679 err = btrfs_commit_transaction(trans); 3680 if (err && !ret) 3681 ret = err; 3682 return ret; 3683 } 3684 3685 /* 3686 * This is a heuristic used to reduce the number of chunks balanced on 3687 * resume after balance was interrupted. 3688 */ 3689 static void update_balance_args(struct btrfs_balance_control *bctl) 3690 { 3691 /* 3692 * Turn on soft mode for chunk types that were being converted. 3693 */ 3694 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3695 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3696 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3697 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3698 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3699 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3700 3701 /* 3702 * Turn on usage filter if is not already used. The idea is 3703 * that chunks that we have already balanced should be 3704 * reasonably full. Don't do it for chunks that are being 3705 * converted - that will keep us from relocating unconverted 3706 * (albeit full) chunks. 3707 */ 3708 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3709 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3710 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3711 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3712 bctl->data.usage = 90; 3713 } 3714 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3715 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3716 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3717 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3718 bctl->sys.usage = 90; 3719 } 3720 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3721 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3722 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3723 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3724 bctl->meta.usage = 90; 3725 } 3726 } 3727 3728 /* 3729 * Clear the balance status in fs_info and delete the balance item from disk. 3730 */ 3731 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3732 { 3733 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3734 int ret; 3735 3736 ASSERT(fs_info->balance_ctl); 3737 3738 spin_lock(&fs_info->balance_lock); 3739 fs_info->balance_ctl = NULL; 3740 spin_unlock(&fs_info->balance_lock); 3741 3742 kfree(bctl); 3743 ret = del_balance_item(fs_info); 3744 if (ret) 3745 btrfs_handle_fs_error(fs_info, ret, NULL); 3746 } 3747 3748 /* 3749 * Balance filters. Return 1 if chunk should be filtered out 3750 * (should not be balanced). 3751 */ 3752 static int chunk_profiles_filter(u64 chunk_type, 3753 struct btrfs_balance_args *bargs) 3754 { 3755 chunk_type = chunk_to_extended(chunk_type) & 3756 BTRFS_EXTENDED_PROFILE_MASK; 3757 3758 if (bargs->profiles & chunk_type) 3759 return 0; 3760 3761 return 1; 3762 } 3763 3764 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3765 struct btrfs_balance_args *bargs) 3766 { 3767 struct btrfs_block_group *cache; 3768 u64 chunk_used; 3769 u64 user_thresh_min; 3770 u64 user_thresh_max; 3771 int ret = 1; 3772 3773 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3774 chunk_used = cache->used; 3775 3776 if (bargs->usage_min == 0) 3777 user_thresh_min = 0; 3778 else 3779 user_thresh_min = mult_perc(cache->length, bargs->usage_min); 3780 3781 if (bargs->usage_max == 0) 3782 user_thresh_max = 1; 3783 else if (bargs->usage_max > 100) 3784 user_thresh_max = cache->length; 3785 else 3786 user_thresh_max = mult_perc(cache->length, bargs->usage_max); 3787 3788 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3789 ret = 0; 3790 3791 btrfs_put_block_group(cache); 3792 return ret; 3793 } 3794 3795 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3796 u64 chunk_offset, struct btrfs_balance_args *bargs) 3797 { 3798 struct btrfs_block_group *cache; 3799 u64 chunk_used, user_thresh; 3800 int ret = 1; 3801 3802 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3803 chunk_used = cache->used; 3804 3805 if (bargs->usage_min == 0) 3806 user_thresh = 1; 3807 else if (bargs->usage > 100) 3808 user_thresh = cache->length; 3809 else 3810 user_thresh = mult_perc(cache->length, bargs->usage); 3811 3812 if (chunk_used < user_thresh) 3813 ret = 0; 3814 3815 btrfs_put_block_group(cache); 3816 return ret; 3817 } 3818 3819 static int chunk_devid_filter(struct extent_buffer *leaf, 3820 struct btrfs_chunk *chunk, 3821 struct btrfs_balance_args *bargs) 3822 { 3823 struct btrfs_stripe *stripe; 3824 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3825 int i; 3826 3827 for (i = 0; i < num_stripes; i++) { 3828 stripe = btrfs_stripe_nr(chunk, i); 3829 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3830 return 0; 3831 } 3832 3833 return 1; 3834 } 3835 3836 static u64 calc_data_stripes(u64 type, int num_stripes) 3837 { 3838 const int index = btrfs_bg_flags_to_raid_index(type); 3839 const int ncopies = btrfs_raid_array[index].ncopies; 3840 const int nparity = btrfs_raid_array[index].nparity; 3841 3842 return (num_stripes - nparity) / ncopies; 3843 } 3844 3845 /* [pstart, pend) */ 3846 static int chunk_drange_filter(struct extent_buffer *leaf, 3847 struct btrfs_chunk *chunk, 3848 struct btrfs_balance_args *bargs) 3849 { 3850 struct btrfs_stripe *stripe; 3851 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3852 u64 stripe_offset; 3853 u64 stripe_length; 3854 u64 type; 3855 int factor; 3856 int i; 3857 3858 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3859 return 0; 3860 3861 type = btrfs_chunk_type(leaf, chunk); 3862 factor = calc_data_stripes(type, num_stripes); 3863 3864 for (i = 0; i < num_stripes; i++) { 3865 stripe = btrfs_stripe_nr(chunk, i); 3866 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3867 continue; 3868 3869 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3870 stripe_length = btrfs_chunk_length(leaf, chunk); 3871 stripe_length = div_u64(stripe_length, factor); 3872 3873 if (stripe_offset < bargs->pend && 3874 stripe_offset + stripe_length > bargs->pstart) 3875 return 0; 3876 } 3877 3878 return 1; 3879 } 3880 3881 /* [vstart, vend) */ 3882 static int chunk_vrange_filter(struct extent_buffer *leaf, 3883 struct btrfs_chunk *chunk, 3884 u64 chunk_offset, 3885 struct btrfs_balance_args *bargs) 3886 { 3887 if (chunk_offset < bargs->vend && 3888 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3889 /* at least part of the chunk is inside this vrange */ 3890 return 0; 3891 3892 return 1; 3893 } 3894 3895 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3896 struct btrfs_chunk *chunk, 3897 struct btrfs_balance_args *bargs) 3898 { 3899 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3900 3901 if (bargs->stripes_min <= num_stripes 3902 && num_stripes <= bargs->stripes_max) 3903 return 0; 3904 3905 return 1; 3906 } 3907 3908 static int chunk_soft_convert_filter(u64 chunk_type, 3909 struct btrfs_balance_args *bargs) 3910 { 3911 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3912 return 0; 3913 3914 chunk_type = chunk_to_extended(chunk_type) & 3915 BTRFS_EXTENDED_PROFILE_MASK; 3916 3917 if (bargs->target == chunk_type) 3918 return 1; 3919 3920 return 0; 3921 } 3922 3923 static int should_balance_chunk(struct extent_buffer *leaf, 3924 struct btrfs_chunk *chunk, u64 chunk_offset) 3925 { 3926 struct btrfs_fs_info *fs_info = leaf->fs_info; 3927 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3928 struct btrfs_balance_args *bargs = NULL; 3929 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3930 3931 /* type filter */ 3932 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3933 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3934 return 0; 3935 } 3936 3937 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3938 bargs = &bctl->data; 3939 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3940 bargs = &bctl->sys; 3941 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3942 bargs = &bctl->meta; 3943 3944 /* profiles filter */ 3945 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3946 chunk_profiles_filter(chunk_type, bargs)) { 3947 return 0; 3948 } 3949 3950 /* usage filter */ 3951 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3952 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3953 return 0; 3954 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3955 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3956 return 0; 3957 } 3958 3959 /* devid filter */ 3960 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3961 chunk_devid_filter(leaf, chunk, bargs)) { 3962 return 0; 3963 } 3964 3965 /* drange filter, makes sense only with devid filter */ 3966 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3967 chunk_drange_filter(leaf, chunk, bargs)) { 3968 return 0; 3969 } 3970 3971 /* vrange filter */ 3972 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3973 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3974 return 0; 3975 } 3976 3977 /* stripes filter */ 3978 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3979 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3980 return 0; 3981 } 3982 3983 /* soft profile changing mode */ 3984 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3985 chunk_soft_convert_filter(chunk_type, bargs)) { 3986 return 0; 3987 } 3988 3989 /* 3990 * limited by count, must be the last filter 3991 */ 3992 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3993 if (bargs->limit == 0) 3994 return 0; 3995 else 3996 bargs->limit--; 3997 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3998 /* 3999 * Same logic as the 'limit' filter; the minimum cannot be 4000 * determined here because we do not have the global information 4001 * about the count of all chunks that satisfy the filters. 4002 */ 4003 if (bargs->limit_max == 0) 4004 return 0; 4005 else 4006 bargs->limit_max--; 4007 } 4008 4009 return 1; 4010 } 4011 4012 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 4013 { 4014 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4015 struct btrfs_root *chunk_root = fs_info->chunk_root; 4016 u64 chunk_type; 4017 struct btrfs_chunk *chunk; 4018 struct btrfs_path *path = NULL; 4019 struct btrfs_key key; 4020 struct btrfs_key found_key; 4021 struct extent_buffer *leaf; 4022 int slot; 4023 int ret; 4024 int enospc_errors = 0; 4025 bool counting = true; 4026 /* The single value limit and min/max limits use the same bytes in the */ 4027 u64 limit_data = bctl->data.limit; 4028 u64 limit_meta = bctl->meta.limit; 4029 u64 limit_sys = bctl->sys.limit; 4030 u32 count_data = 0; 4031 u32 count_meta = 0; 4032 u32 count_sys = 0; 4033 int chunk_reserved = 0; 4034 4035 path = btrfs_alloc_path(); 4036 if (!path) { 4037 ret = -ENOMEM; 4038 goto error; 4039 } 4040 4041 /* zero out stat counters */ 4042 spin_lock(&fs_info->balance_lock); 4043 memset(&bctl->stat, 0, sizeof(bctl->stat)); 4044 spin_unlock(&fs_info->balance_lock); 4045 again: 4046 if (!counting) { 4047 /* 4048 * The single value limit and min/max limits use the same bytes 4049 * in the 4050 */ 4051 bctl->data.limit = limit_data; 4052 bctl->meta.limit = limit_meta; 4053 bctl->sys.limit = limit_sys; 4054 } 4055 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 4056 key.offset = (u64)-1; 4057 key.type = BTRFS_CHUNK_ITEM_KEY; 4058 4059 while (1) { 4060 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 4061 atomic_read(&fs_info->balance_cancel_req)) { 4062 ret = -ECANCELED; 4063 goto error; 4064 } 4065 4066 mutex_lock(&fs_info->reclaim_bgs_lock); 4067 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 4068 if (ret < 0) { 4069 mutex_unlock(&fs_info->reclaim_bgs_lock); 4070 goto error; 4071 } 4072 4073 /* 4074 * this shouldn't happen, it means the last relocate 4075 * failed 4076 */ 4077 if (ret == 0) 4078 BUG(); /* FIXME break ? */ 4079 4080 ret = btrfs_previous_item(chunk_root, path, 0, 4081 BTRFS_CHUNK_ITEM_KEY); 4082 if (ret) { 4083 mutex_unlock(&fs_info->reclaim_bgs_lock); 4084 ret = 0; 4085 break; 4086 } 4087 4088 leaf = path->nodes[0]; 4089 slot = path->slots[0]; 4090 btrfs_item_key_to_cpu(leaf, &found_key, slot); 4091 4092 if (found_key.objectid != key.objectid) { 4093 mutex_unlock(&fs_info->reclaim_bgs_lock); 4094 break; 4095 } 4096 4097 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 4098 chunk_type = btrfs_chunk_type(leaf, chunk); 4099 4100 if (!counting) { 4101 spin_lock(&fs_info->balance_lock); 4102 bctl->stat.considered++; 4103 spin_unlock(&fs_info->balance_lock); 4104 } 4105 4106 ret = should_balance_chunk(leaf, chunk, found_key.offset); 4107 4108 btrfs_release_path(path); 4109 if (!ret) { 4110 mutex_unlock(&fs_info->reclaim_bgs_lock); 4111 goto loop; 4112 } 4113 4114 if (counting) { 4115 mutex_unlock(&fs_info->reclaim_bgs_lock); 4116 spin_lock(&fs_info->balance_lock); 4117 bctl->stat.expected++; 4118 spin_unlock(&fs_info->balance_lock); 4119 4120 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 4121 count_data++; 4122 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 4123 count_sys++; 4124 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 4125 count_meta++; 4126 4127 goto loop; 4128 } 4129 4130 /* 4131 * Apply limit_min filter, no need to check if the LIMITS 4132 * filter is used, limit_min is 0 by default 4133 */ 4134 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 4135 count_data < bctl->data.limit_min) 4136 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 4137 count_meta < bctl->meta.limit_min) 4138 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 4139 count_sys < bctl->sys.limit_min)) { 4140 mutex_unlock(&fs_info->reclaim_bgs_lock); 4141 goto loop; 4142 } 4143 4144 if (!chunk_reserved) { 4145 /* 4146 * We may be relocating the only data chunk we have, 4147 * which could potentially end up with losing data's 4148 * raid profile, so lets allocate an empty one in 4149 * advance. 4150 */ 4151 ret = btrfs_may_alloc_data_chunk(fs_info, 4152 found_key.offset); 4153 if (ret < 0) { 4154 mutex_unlock(&fs_info->reclaim_bgs_lock); 4155 goto error; 4156 } else if (ret == 1) { 4157 chunk_reserved = 1; 4158 } 4159 } 4160 4161 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 4162 mutex_unlock(&fs_info->reclaim_bgs_lock); 4163 if (ret == -ENOSPC) { 4164 enospc_errors++; 4165 } else if (ret == -ETXTBSY) { 4166 btrfs_info(fs_info, 4167 "skipping relocation of block group %llu due to active swapfile", 4168 found_key.offset); 4169 ret = 0; 4170 } else if (ret) { 4171 goto error; 4172 } else { 4173 spin_lock(&fs_info->balance_lock); 4174 bctl->stat.completed++; 4175 spin_unlock(&fs_info->balance_lock); 4176 } 4177 loop: 4178 if (found_key.offset == 0) 4179 break; 4180 key.offset = found_key.offset - 1; 4181 } 4182 4183 if (counting) { 4184 btrfs_release_path(path); 4185 counting = false; 4186 goto again; 4187 } 4188 error: 4189 btrfs_free_path(path); 4190 if (enospc_errors) { 4191 btrfs_info(fs_info, "%d enospc errors during balance", 4192 enospc_errors); 4193 if (!ret) 4194 ret = -ENOSPC; 4195 } 4196 4197 return ret; 4198 } 4199 4200 /* 4201 * See if a given profile is valid and reduced. 4202 * 4203 * @flags: profile to validate 4204 * @extended: if true @flags is treated as an extended profile 4205 */ 4206 static int alloc_profile_is_valid(u64 flags, int extended) 4207 { 4208 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4209 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4210 4211 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4212 4213 /* 1) check that all other bits are zeroed */ 4214 if (flags & ~mask) 4215 return 0; 4216 4217 /* 2) see if profile is reduced */ 4218 if (flags == 0) 4219 return !extended; /* "0" is valid for usual profiles */ 4220 4221 return has_single_bit_set(flags); 4222 } 4223 4224 /* 4225 * Validate target profile against allowed profiles and return true if it's OK. 4226 * Otherwise print the error message and return false. 4227 */ 4228 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4229 const struct btrfs_balance_args *bargs, 4230 u64 allowed, const char *type) 4231 { 4232 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4233 return true; 4234 4235 /* Profile is valid and does not have bits outside of the allowed set */ 4236 if (alloc_profile_is_valid(bargs->target, 1) && 4237 (bargs->target & ~allowed) == 0) 4238 return true; 4239 4240 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4241 type, btrfs_bg_type_to_raid_name(bargs->target)); 4242 return false; 4243 } 4244 4245 /* 4246 * Fill @buf with textual description of balance filter flags @bargs, up to 4247 * @size_buf including the terminating null. The output may be trimmed if it 4248 * does not fit into the provided buffer. 4249 */ 4250 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4251 u32 size_buf) 4252 { 4253 int ret; 4254 u32 size_bp = size_buf; 4255 char *bp = buf; 4256 u64 flags = bargs->flags; 4257 char tmp_buf[128] = {'\0'}; 4258 4259 if (!flags) 4260 return; 4261 4262 #define CHECK_APPEND_NOARG(a) \ 4263 do { \ 4264 ret = snprintf(bp, size_bp, (a)); \ 4265 if (ret < 0 || ret >= size_bp) \ 4266 goto out_overflow; \ 4267 size_bp -= ret; \ 4268 bp += ret; \ 4269 } while (0) 4270 4271 #define CHECK_APPEND_1ARG(a, v1) \ 4272 do { \ 4273 ret = snprintf(bp, size_bp, (a), (v1)); \ 4274 if (ret < 0 || ret >= size_bp) \ 4275 goto out_overflow; \ 4276 size_bp -= ret; \ 4277 bp += ret; \ 4278 } while (0) 4279 4280 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4281 do { \ 4282 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4283 if (ret < 0 || ret >= size_bp) \ 4284 goto out_overflow; \ 4285 size_bp -= ret; \ 4286 bp += ret; \ 4287 } while (0) 4288 4289 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4290 CHECK_APPEND_1ARG("convert=%s,", 4291 btrfs_bg_type_to_raid_name(bargs->target)); 4292 4293 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4294 CHECK_APPEND_NOARG("soft,"); 4295 4296 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4297 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4298 sizeof(tmp_buf)); 4299 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4300 } 4301 4302 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4303 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4304 4305 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4306 CHECK_APPEND_2ARG("usage=%u..%u,", 4307 bargs->usage_min, bargs->usage_max); 4308 4309 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4310 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4311 4312 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4313 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4314 bargs->pstart, bargs->pend); 4315 4316 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4317 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4318 bargs->vstart, bargs->vend); 4319 4320 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4321 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4322 4323 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4324 CHECK_APPEND_2ARG("limit=%u..%u,", 4325 bargs->limit_min, bargs->limit_max); 4326 4327 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4328 CHECK_APPEND_2ARG("stripes=%u..%u,", 4329 bargs->stripes_min, bargs->stripes_max); 4330 4331 #undef CHECK_APPEND_2ARG 4332 #undef CHECK_APPEND_1ARG 4333 #undef CHECK_APPEND_NOARG 4334 4335 out_overflow: 4336 4337 if (size_bp < size_buf) 4338 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4339 else 4340 buf[0] = '\0'; 4341 } 4342 4343 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4344 { 4345 u32 size_buf = 1024; 4346 char tmp_buf[192] = {'\0'}; 4347 char *buf; 4348 char *bp; 4349 u32 size_bp = size_buf; 4350 int ret; 4351 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4352 4353 buf = kzalloc(size_buf, GFP_KERNEL); 4354 if (!buf) 4355 return; 4356 4357 bp = buf; 4358 4359 #define CHECK_APPEND_1ARG(a, v1) \ 4360 do { \ 4361 ret = snprintf(bp, size_bp, (a), (v1)); \ 4362 if (ret < 0 || ret >= size_bp) \ 4363 goto out_overflow; \ 4364 size_bp -= ret; \ 4365 bp += ret; \ 4366 } while (0) 4367 4368 if (bctl->flags & BTRFS_BALANCE_FORCE) 4369 CHECK_APPEND_1ARG("%s", "-f "); 4370 4371 if (bctl->flags & BTRFS_BALANCE_DATA) { 4372 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4373 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4374 } 4375 4376 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4377 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4378 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4379 } 4380 4381 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4382 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4383 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4384 } 4385 4386 #undef CHECK_APPEND_1ARG 4387 4388 out_overflow: 4389 4390 if (size_bp < size_buf) 4391 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4392 btrfs_info(fs_info, "balance: %s %s", 4393 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4394 "resume" : "start", buf); 4395 4396 kfree(buf); 4397 } 4398 4399 /* 4400 * Should be called with balance mutexe held 4401 */ 4402 int btrfs_balance(struct btrfs_fs_info *fs_info, 4403 struct btrfs_balance_control *bctl, 4404 struct btrfs_ioctl_balance_args *bargs) 4405 { 4406 u64 meta_target, data_target; 4407 u64 allowed; 4408 int mixed = 0; 4409 int ret; 4410 u64 num_devices; 4411 unsigned seq; 4412 bool reducing_redundancy; 4413 bool paused = false; 4414 int i; 4415 4416 if (btrfs_fs_closing(fs_info) || 4417 atomic_read(&fs_info->balance_pause_req) || 4418 btrfs_should_cancel_balance(fs_info)) { 4419 ret = -EINVAL; 4420 goto out; 4421 } 4422 4423 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4424 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4425 mixed = 1; 4426 4427 /* 4428 * In case of mixed groups both data and meta should be picked, 4429 * and identical options should be given for both of them. 4430 */ 4431 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4432 if (mixed && (bctl->flags & allowed)) { 4433 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4434 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4435 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4436 btrfs_err(fs_info, 4437 "balance: mixed groups data and metadata options must be the same"); 4438 ret = -EINVAL; 4439 goto out; 4440 } 4441 } 4442 4443 /* 4444 * rw_devices will not change at the moment, device add/delete/replace 4445 * are exclusive 4446 */ 4447 num_devices = fs_info->fs_devices->rw_devices; 4448 4449 /* 4450 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4451 * special bit for it, to make it easier to distinguish. Thus we need 4452 * to set it manually, or balance would refuse the profile. 4453 */ 4454 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4455 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4456 if (num_devices >= btrfs_raid_array[i].devs_min) 4457 allowed |= btrfs_raid_array[i].bg_flag; 4458 4459 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4460 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4461 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4462 ret = -EINVAL; 4463 goto out; 4464 } 4465 4466 /* 4467 * Allow to reduce metadata or system integrity only if force set for 4468 * profiles with redundancy (copies, parity) 4469 */ 4470 allowed = 0; 4471 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4472 if (btrfs_raid_array[i].ncopies >= 2 || 4473 btrfs_raid_array[i].tolerated_failures >= 1) 4474 allowed |= btrfs_raid_array[i].bg_flag; 4475 } 4476 do { 4477 seq = read_seqbegin(&fs_info->profiles_lock); 4478 4479 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4480 (fs_info->avail_system_alloc_bits & allowed) && 4481 !(bctl->sys.target & allowed)) || 4482 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4483 (fs_info->avail_metadata_alloc_bits & allowed) && 4484 !(bctl->meta.target & allowed))) 4485 reducing_redundancy = true; 4486 else 4487 reducing_redundancy = false; 4488 4489 /* if we're not converting, the target field is uninitialized */ 4490 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4491 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4492 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4493 bctl->data.target : fs_info->avail_data_alloc_bits; 4494 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4495 4496 if (reducing_redundancy) { 4497 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4498 btrfs_info(fs_info, 4499 "balance: force reducing metadata redundancy"); 4500 } else { 4501 btrfs_err(fs_info, 4502 "balance: reduces metadata redundancy, use --force if you want this"); 4503 ret = -EINVAL; 4504 goto out; 4505 } 4506 } 4507 4508 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4509 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4510 btrfs_warn(fs_info, 4511 "balance: metadata profile %s has lower redundancy than data profile %s", 4512 btrfs_bg_type_to_raid_name(meta_target), 4513 btrfs_bg_type_to_raid_name(data_target)); 4514 } 4515 4516 ret = insert_balance_item(fs_info, bctl); 4517 if (ret && ret != -EEXIST) 4518 goto out; 4519 4520 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4521 BUG_ON(ret == -EEXIST); 4522 BUG_ON(fs_info->balance_ctl); 4523 spin_lock(&fs_info->balance_lock); 4524 fs_info->balance_ctl = bctl; 4525 spin_unlock(&fs_info->balance_lock); 4526 } else { 4527 BUG_ON(ret != -EEXIST); 4528 spin_lock(&fs_info->balance_lock); 4529 update_balance_args(bctl); 4530 spin_unlock(&fs_info->balance_lock); 4531 } 4532 4533 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4534 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4535 describe_balance_start_or_resume(fs_info); 4536 mutex_unlock(&fs_info->balance_mutex); 4537 4538 ret = __btrfs_balance(fs_info); 4539 4540 mutex_lock(&fs_info->balance_mutex); 4541 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4542 btrfs_info(fs_info, "balance: paused"); 4543 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4544 paused = true; 4545 } 4546 /* 4547 * Balance can be canceled by: 4548 * 4549 * - Regular cancel request 4550 * Then ret == -ECANCELED and balance_cancel_req > 0 4551 * 4552 * - Fatal signal to "btrfs" process 4553 * Either the signal caught by wait_reserve_ticket() and callers 4554 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4555 * got -ECANCELED. 4556 * Either way, in this case balance_cancel_req = 0, and 4557 * ret == -EINTR or ret == -ECANCELED. 4558 * 4559 * So here we only check the return value to catch canceled balance. 4560 */ 4561 else if (ret == -ECANCELED || ret == -EINTR) 4562 btrfs_info(fs_info, "balance: canceled"); 4563 else 4564 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4565 4566 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4567 4568 if (bargs) { 4569 memset(bargs, 0, sizeof(*bargs)); 4570 btrfs_update_ioctl_balance_args(fs_info, bargs); 4571 } 4572 4573 /* We didn't pause, we can clean everything up. */ 4574 if (!paused) { 4575 reset_balance_state(fs_info); 4576 btrfs_exclop_finish(fs_info); 4577 } 4578 4579 wake_up(&fs_info->balance_wait_q); 4580 4581 return ret; 4582 out: 4583 if (bctl->flags & BTRFS_BALANCE_RESUME) 4584 reset_balance_state(fs_info); 4585 else 4586 kfree(bctl); 4587 btrfs_exclop_finish(fs_info); 4588 4589 return ret; 4590 } 4591 4592 static int balance_kthread(void *data) 4593 { 4594 struct btrfs_fs_info *fs_info = data; 4595 int ret = 0; 4596 4597 sb_start_write(fs_info->sb); 4598 mutex_lock(&fs_info->balance_mutex); 4599 if (fs_info->balance_ctl) 4600 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4601 mutex_unlock(&fs_info->balance_mutex); 4602 sb_end_write(fs_info->sb); 4603 4604 return ret; 4605 } 4606 4607 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4608 { 4609 struct task_struct *tsk; 4610 4611 mutex_lock(&fs_info->balance_mutex); 4612 if (!fs_info->balance_ctl) { 4613 mutex_unlock(&fs_info->balance_mutex); 4614 return 0; 4615 } 4616 mutex_unlock(&fs_info->balance_mutex); 4617 4618 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4619 btrfs_info(fs_info, "balance: resume skipped"); 4620 return 0; 4621 } 4622 4623 spin_lock(&fs_info->super_lock); 4624 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4625 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4626 spin_unlock(&fs_info->super_lock); 4627 /* 4628 * A ro->rw remount sequence should continue with the paused balance 4629 * regardless of who pauses it, system or the user as of now, so set 4630 * the resume flag. 4631 */ 4632 spin_lock(&fs_info->balance_lock); 4633 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4634 spin_unlock(&fs_info->balance_lock); 4635 4636 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4637 return PTR_ERR_OR_ZERO(tsk); 4638 } 4639 4640 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4641 { 4642 struct btrfs_balance_control *bctl; 4643 struct btrfs_balance_item *item; 4644 struct btrfs_disk_balance_args disk_bargs; 4645 struct btrfs_path *path; 4646 struct extent_buffer *leaf; 4647 struct btrfs_key key; 4648 int ret; 4649 4650 path = btrfs_alloc_path(); 4651 if (!path) 4652 return -ENOMEM; 4653 4654 key.objectid = BTRFS_BALANCE_OBJECTID; 4655 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4656 key.offset = 0; 4657 4658 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4659 if (ret < 0) 4660 goto out; 4661 if (ret > 0) { /* ret = -ENOENT; */ 4662 ret = 0; 4663 goto out; 4664 } 4665 4666 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4667 if (!bctl) { 4668 ret = -ENOMEM; 4669 goto out; 4670 } 4671 4672 leaf = path->nodes[0]; 4673 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4674 4675 bctl->flags = btrfs_balance_flags(leaf, item); 4676 bctl->flags |= BTRFS_BALANCE_RESUME; 4677 4678 btrfs_balance_data(leaf, item, &disk_bargs); 4679 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4680 btrfs_balance_meta(leaf, item, &disk_bargs); 4681 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4682 btrfs_balance_sys(leaf, item, &disk_bargs); 4683 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4684 4685 /* 4686 * This should never happen, as the paused balance state is recovered 4687 * during mount without any chance of other exclusive ops to collide. 4688 * 4689 * This gives the exclusive op status to balance and keeps in paused 4690 * state until user intervention (cancel or umount). If the ownership 4691 * cannot be assigned, show a message but do not fail. The balance 4692 * is in a paused state and must have fs_info::balance_ctl properly 4693 * set up. 4694 */ 4695 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4696 btrfs_warn(fs_info, 4697 "balance: cannot set exclusive op status, resume manually"); 4698 4699 btrfs_release_path(path); 4700 4701 mutex_lock(&fs_info->balance_mutex); 4702 BUG_ON(fs_info->balance_ctl); 4703 spin_lock(&fs_info->balance_lock); 4704 fs_info->balance_ctl = bctl; 4705 spin_unlock(&fs_info->balance_lock); 4706 mutex_unlock(&fs_info->balance_mutex); 4707 out: 4708 btrfs_free_path(path); 4709 return ret; 4710 } 4711 4712 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4713 { 4714 int ret = 0; 4715 4716 mutex_lock(&fs_info->balance_mutex); 4717 if (!fs_info->balance_ctl) { 4718 mutex_unlock(&fs_info->balance_mutex); 4719 return -ENOTCONN; 4720 } 4721 4722 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4723 atomic_inc(&fs_info->balance_pause_req); 4724 mutex_unlock(&fs_info->balance_mutex); 4725 4726 wait_event(fs_info->balance_wait_q, 4727 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4728 4729 mutex_lock(&fs_info->balance_mutex); 4730 /* we are good with balance_ctl ripped off from under us */ 4731 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4732 atomic_dec(&fs_info->balance_pause_req); 4733 } else { 4734 ret = -ENOTCONN; 4735 } 4736 4737 mutex_unlock(&fs_info->balance_mutex); 4738 return ret; 4739 } 4740 4741 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4742 { 4743 mutex_lock(&fs_info->balance_mutex); 4744 if (!fs_info->balance_ctl) { 4745 mutex_unlock(&fs_info->balance_mutex); 4746 return -ENOTCONN; 4747 } 4748 4749 /* 4750 * A paused balance with the item stored on disk can be resumed at 4751 * mount time if the mount is read-write. Otherwise it's still paused 4752 * and we must not allow cancelling as it deletes the item. 4753 */ 4754 if (sb_rdonly(fs_info->sb)) { 4755 mutex_unlock(&fs_info->balance_mutex); 4756 return -EROFS; 4757 } 4758 4759 atomic_inc(&fs_info->balance_cancel_req); 4760 /* 4761 * if we are running just wait and return, balance item is 4762 * deleted in btrfs_balance in this case 4763 */ 4764 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4765 mutex_unlock(&fs_info->balance_mutex); 4766 wait_event(fs_info->balance_wait_q, 4767 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4768 mutex_lock(&fs_info->balance_mutex); 4769 } else { 4770 mutex_unlock(&fs_info->balance_mutex); 4771 /* 4772 * Lock released to allow other waiters to continue, we'll 4773 * reexamine the status again. 4774 */ 4775 mutex_lock(&fs_info->balance_mutex); 4776 4777 if (fs_info->balance_ctl) { 4778 reset_balance_state(fs_info); 4779 btrfs_exclop_finish(fs_info); 4780 btrfs_info(fs_info, "balance: canceled"); 4781 } 4782 } 4783 4784 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4785 atomic_dec(&fs_info->balance_cancel_req); 4786 mutex_unlock(&fs_info->balance_mutex); 4787 return 0; 4788 } 4789 4790 /* 4791 * shrinking a device means finding all of the device extents past 4792 * the new size, and then following the back refs to the chunks. 4793 * The chunk relocation code actually frees the device extent 4794 */ 4795 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4796 { 4797 struct btrfs_fs_info *fs_info = device->fs_info; 4798 struct btrfs_root *root = fs_info->dev_root; 4799 struct btrfs_trans_handle *trans; 4800 struct btrfs_dev_extent *dev_extent = NULL; 4801 struct btrfs_path *path; 4802 u64 length; 4803 u64 chunk_offset; 4804 int ret; 4805 int slot; 4806 int failed = 0; 4807 bool retried = false; 4808 struct extent_buffer *l; 4809 struct btrfs_key key; 4810 struct btrfs_super_block *super_copy = fs_info->super_copy; 4811 u64 old_total = btrfs_super_total_bytes(super_copy); 4812 u64 old_size = btrfs_device_get_total_bytes(device); 4813 u64 diff; 4814 u64 start; 4815 u64 free_diff = 0; 4816 4817 new_size = round_down(new_size, fs_info->sectorsize); 4818 start = new_size; 4819 diff = round_down(old_size - new_size, fs_info->sectorsize); 4820 4821 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4822 return -EINVAL; 4823 4824 path = btrfs_alloc_path(); 4825 if (!path) 4826 return -ENOMEM; 4827 4828 path->reada = READA_BACK; 4829 4830 trans = btrfs_start_transaction(root, 0); 4831 if (IS_ERR(trans)) { 4832 btrfs_free_path(path); 4833 return PTR_ERR(trans); 4834 } 4835 4836 mutex_lock(&fs_info->chunk_mutex); 4837 4838 btrfs_device_set_total_bytes(device, new_size); 4839 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4840 device->fs_devices->total_rw_bytes -= diff; 4841 4842 /* 4843 * The new free_chunk_space is new_size - used, so we have to 4844 * subtract the delta of the old free_chunk_space which included 4845 * old_size - used. If used > new_size then just subtract this 4846 * entire device's free space. 4847 */ 4848 if (device->bytes_used < new_size) 4849 free_diff = (old_size - device->bytes_used) - 4850 (new_size - device->bytes_used); 4851 else 4852 free_diff = old_size - device->bytes_used; 4853 atomic64_sub(free_diff, &fs_info->free_chunk_space); 4854 } 4855 4856 /* 4857 * Once the device's size has been set to the new size, ensure all 4858 * in-memory chunks are synced to disk so that the loop below sees them 4859 * and relocates them accordingly. 4860 */ 4861 if (contains_pending_extent(device, &start, diff)) { 4862 mutex_unlock(&fs_info->chunk_mutex); 4863 ret = btrfs_commit_transaction(trans); 4864 if (ret) 4865 goto done; 4866 } else { 4867 mutex_unlock(&fs_info->chunk_mutex); 4868 btrfs_end_transaction(trans); 4869 } 4870 4871 again: 4872 key.objectid = device->devid; 4873 key.offset = (u64)-1; 4874 key.type = BTRFS_DEV_EXTENT_KEY; 4875 4876 do { 4877 mutex_lock(&fs_info->reclaim_bgs_lock); 4878 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4879 if (ret < 0) { 4880 mutex_unlock(&fs_info->reclaim_bgs_lock); 4881 goto done; 4882 } 4883 4884 ret = btrfs_previous_item(root, path, 0, key.type); 4885 if (ret) { 4886 mutex_unlock(&fs_info->reclaim_bgs_lock); 4887 if (ret < 0) 4888 goto done; 4889 ret = 0; 4890 btrfs_release_path(path); 4891 break; 4892 } 4893 4894 l = path->nodes[0]; 4895 slot = path->slots[0]; 4896 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4897 4898 if (key.objectid != device->devid) { 4899 mutex_unlock(&fs_info->reclaim_bgs_lock); 4900 btrfs_release_path(path); 4901 break; 4902 } 4903 4904 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4905 length = btrfs_dev_extent_length(l, dev_extent); 4906 4907 if (key.offset + length <= new_size) { 4908 mutex_unlock(&fs_info->reclaim_bgs_lock); 4909 btrfs_release_path(path); 4910 break; 4911 } 4912 4913 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4914 btrfs_release_path(path); 4915 4916 /* 4917 * We may be relocating the only data chunk we have, 4918 * which could potentially end up with losing data's 4919 * raid profile, so lets allocate an empty one in 4920 * advance. 4921 */ 4922 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4923 if (ret < 0) { 4924 mutex_unlock(&fs_info->reclaim_bgs_lock); 4925 goto done; 4926 } 4927 4928 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4929 mutex_unlock(&fs_info->reclaim_bgs_lock); 4930 if (ret == -ENOSPC) { 4931 failed++; 4932 } else if (ret) { 4933 if (ret == -ETXTBSY) { 4934 btrfs_warn(fs_info, 4935 "could not shrink block group %llu due to active swapfile", 4936 chunk_offset); 4937 } 4938 goto done; 4939 } 4940 } while (key.offset-- > 0); 4941 4942 if (failed && !retried) { 4943 failed = 0; 4944 retried = true; 4945 goto again; 4946 } else if (failed && retried) { 4947 ret = -ENOSPC; 4948 goto done; 4949 } 4950 4951 /* Shrinking succeeded, else we would be at "done". */ 4952 trans = btrfs_start_transaction(root, 0); 4953 if (IS_ERR(trans)) { 4954 ret = PTR_ERR(trans); 4955 goto done; 4956 } 4957 4958 mutex_lock(&fs_info->chunk_mutex); 4959 /* Clear all state bits beyond the shrunk device size */ 4960 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4961 CHUNK_STATE_MASK); 4962 4963 btrfs_device_set_disk_total_bytes(device, new_size); 4964 if (list_empty(&device->post_commit_list)) 4965 list_add_tail(&device->post_commit_list, 4966 &trans->transaction->dev_update_list); 4967 4968 WARN_ON(diff > old_total); 4969 btrfs_set_super_total_bytes(super_copy, 4970 round_down(old_total - diff, fs_info->sectorsize)); 4971 mutex_unlock(&fs_info->chunk_mutex); 4972 4973 btrfs_reserve_chunk_metadata(trans, false); 4974 /* Now btrfs_update_device() will change the on-disk size. */ 4975 ret = btrfs_update_device(trans, device); 4976 btrfs_trans_release_chunk_metadata(trans); 4977 if (ret < 0) { 4978 btrfs_abort_transaction(trans, ret); 4979 btrfs_end_transaction(trans); 4980 } else { 4981 ret = btrfs_commit_transaction(trans); 4982 } 4983 done: 4984 btrfs_free_path(path); 4985 if (ret) { 4986 mutex_lock(&fs_info->chunk_mutex); 4987 btrfs_device_set_total_bytes(device, old_size); 4988 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4989 device->fs_devices->total_rw_bytes += diff; 4990 atomic64_add(free_diff, &fs_info->free_chunk_space); 4991 } 4992 mutex_unlock(&fs_info->chunk_mutex); 4993 } 4994 return ret; 4995 } 4996 4997 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4998 struct btrfs_key *key, 4999 struct btrfs_chunk *chunk, int item_size) 5000 { 5001 struct btrfs_super_block *super_copy = fs_info->super_copy; 5002 struct btrfs_disk_key disk_key; 5003 u32 array_size; 5004 u8 *ptr; 5005 5006 lockdep_assert_held(&fs_info->chunk_mutex); 5007 5008 array_size = btrfs_super_sys_array_size(super_copy); 5009 if (array_size + item_size + sizeof(disk_key) 5010 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5011 return -EFBIG; 5012 5013 ptr = super_copy->sys_chunk_array + array_size; 5014 btrfs_cpu_key_to_disk(&disk_key, key); 5015 memcpy(ptr, &disk_key, sizeof(disk_key)); 5016 ptr += sizeof(disk_key); 5017 memcpy(ptr, chunk, item_size); 5018 item_size += sizeof(disk_key); 5019 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5020 5021 return 0; 5022 } 5023 5024 /* 5025 * sort the devices in descending order by max_avail, total_avail 5026 */ 5027 static int btrfs_cmp_device_info(const void *a, const void *b) 5028 { 5029 const struct btrfs_device_info *di_a = a; 5030 const struct btrfs_device_info *di_b = b; 5031 5032 if (di_a->max_avail > di_b->max_avail) 5033 return -1; 5034 if (di_a->max_avail < di_b->max_avail) 5035 return 1; 5036 if (di_a->total_avail > di_b->total_avail) 5037 return -1; 5038 if (di_a->total_avail < di_b->total_avail) 5039 return 1; 5040 return 0; 5041 } 5042 5043 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5044 { 5045 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5046 return; 5047 5048 btrfs_set_fs_incompat(info, RAID56); 5049 } 5050 5051 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5052 { 5053 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5054 return; 5055 5056 btrfs_set_fs_incompat(info, RAID1C34); 5057 } 5058 5059 /* 5060 * Structure used internally for btrfs_create_chunk() function. 5061 * Wraps needed parameters. 5062 */ 5063 struct alloc_chunk_ctl { 5064 u64 start; 5065 u64 type; 5066 /* Total number of stripes to allocate */ 5067 int num_stripes; 5068 /* sub_stripes info for map */ 5069 int sub_stripes; 5070 /* Stripes per device */ 5071 int dev_stripes; 5072 /* Maximum number of devices to use */ 5073 int devs_max; 5074 /* Minimum number of devices to use */ 5075 int devs_min; 5076 /* ndevs has to be a multiple of this */ 5077 int devs_increment; 5078 /* Number of copies */ 5079 int ncopies; 5080 /* Number of stripes worth of bytes to store parity information */ 5081 int nparity; 5082 u64 max_stripe_size; 5083 u64 max_chunk_size; 5084 u64 dev_extent_min; 5085 u64 stripe_size; 5086 u64 chunk_size; 5087 int ndevs; 5088 }; 5089 5090 static void init_alloc_chunk_ctl_policy_regular( 5091 struct btrfs_fs_devices *fs_devices, 5092 struct alloc_chunk_ctl *ctl) 5093 { 5094 struct btrfs_space_info *space_info; 5095 5096 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5097 ASSERT(space_info); 5098 5099 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5100 ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G); 5101 5102 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5103 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5104 5105 /* We don't want a chunk larger than 10% of writable space */ 5106 ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), 5107 ctl->max_chunk_size); 5108 ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes); 5109 } 5110 5111 static void init_alloc_chunk_ctl_policy_zoned( 5112 struct btrfs_fs_devices *fs_devices, 5113 struct alloc_chunk_ctl *ctl) 5114 { 5115 u64 zone_size = fs_devices->fs_info->zone_size; 5116 u64 limit; 5117 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5118 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5119 u64 min_chunk_size = min_data_stripes * zone_size; 5120 u64 type = ctl->type; 5121 5122 ctl->max_stripe_size = zone_size; 5123 if (type & BTRFS_BLOCK_GROUP_DATA) { 5124 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5125 zone_size); 5126 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5127 ctl->max_chunk_size = ctl->max_stripe_size; 5128 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5129 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5130 ctl->devs_max = min_t(int, ctl->devs_max, 5131 BTRFS_MAX_DEVS_SYS_CHUNK); 5132 } else { 5133 BUG(); 5134 } 5135 5136 /* We don't want a chunk larger than 10% of writable space */ 5137 limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10), 5138 zone_size), 5139 min_chunk_size); 5140 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5141 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5142 } 5143 5144 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5145 struct alloc_chunk_ctl *ctl) 5146 { 5147 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5148 5149 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5150 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5151 ctl->devs_max = btrfs_raid_array[index].devs_max; 5152 if (!ctl->devs_max) 5153 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5154 ctl->devs_min = btrfs_raid_array[index].devs_min; 5155 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5156 ctl->ncopies = btrfs_raid_array[index].ncopies; 5157 ctl->nparity = btrfs_raid_array[index].nparity; 5158 ctl->ndevs = 0; 5159 5160 switch (fs_devices->chunk_alloc_policy) { 5161 case BTRFS_CHUNK_ALLOC_REGULAR: 5162 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5163 break; 5164 case BTRFS_CHUNK_ALLOC_ZONED: 5165 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5166 break; 5167 default: 5168 BUG(); 5169 } 5170 } 5171 5172 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5173 struct alloc_chunk_ctl *ctl, 5174 struct btrfs_device_info *devices_info) 5175 { 5176 struct btrfs_fs_info *info = fs_devices->fs_info; 5177 struct btrfs_device *device; 5178 u64 total_avail; 5179 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5180 int ret; 5181 int ndevs = 0; 5182 u64 max_avail; 5183 u64 dev_offset; 5184 5185 /* 5186 * in the first pass through the devices list, we gather information 5187 * about the available holes on each device. 5188 */ 5189 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5190 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5191 WARN(1, KERN_ERR 5192 "BTRFS: read-only device in alloc_list\n"); 5193 continue; 5194 } 5195 5196 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5197 &device->dev_state) || 5198 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5199 continue; 5200 5201 if (device->total_bytes > device->bytes_used) 5202 total_avail = device->total_bytes - device->bytes_used; 5203 else 5204 total_avail = 0; 5205 5206 /* If there is no space on this device, skip it. */ 5207 if (total_avail < ctl->dev_extent_min) 5208 continue; 5209 5210 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5211 &max_avail); 5212 if (ret && ret != -ENOSPC) 5213 return ret; 5214 5215 if (ret == 0) 5216 max_avail = dev_extent_want; 5217 5218 if (max_avail < ctl->dev_extent_min) { 5219 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5220 btrfs_debug(info, 5221 "%s: devid %llu has no free space, have=%llu want=%llu", 5222 __func__, device->devid, max_avail, 5223 ctl->dev_extent_min); 5224 continue; 5225 } 5226 5227 if (ndevs == fs_devices->rw_devices) { 5228 WARN(1, "%s: found more than %llu devices\n", 5229 __func__, fs_devices->rw_devices); 5230 break; 5231 } 5232 devices_info[ndevs].dev_offset = dev_offset; 5233 devices_info[ndevs].max_avail = max_avail; 5234 devices_info[ndevs].total_avail = total_avail; 5235 devices_info[ndevs].dev = device; 5236 ++ndevs; 5237 } 5238 ctl->ndevs = ndevs; 5239 5240 /* 5241 * now sort the devices by hole size / available space 5242 */ 5243 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5244 btrfs_cmp_device_info, NULL); 5245 5246 return 0; 5247 } 5248 5249 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5250 struct btrfs_device_info *devices_info) 5251 { 5252 /* Number of stripes that count for block group size */ 5253 int data_stripes; 5254 5255 /* 5256 * The primary goal is to maximize the number of stripes, so use as 5257 * many devices as possible, even if the stripes are not maximum sized. 5258 * 5259 * The DUP profile stores more than one stripe per device, the 5260 * max_avail is the total size so we have to adjust. 5261 */ 5262 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5263 ctl->dev_stripes); 5264 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5265 5266 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5267 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5268 5269 /* 5270 * Use the number of data stripes to figure out how big this chunk is 5271 * really going to be in terms of logical address space, and compare 5272 * that answer with the max chunk size. If it's higher, we try to 5273 * reduce stripe_size. 5274 */ 5275 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5276 /* 5277 * Reduce stripe_size, round it up to a 16MB boundary again and 5278 * then use it, unless it ends up being even bigger than the 5279 * previous value we had already. 5280 */ 5281 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5282 data_stripes), SZ_16M), 5283 ctl->stripe_size); 5284 } 5285 5286 /* Stripe size should not go beyond 1G. */ 5287 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G); 5288 5289 /* Align to BTRFS_STRIPE_LEN */ 5290 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5291 ctl->chunk_size = ctl->stripe_size * data_stripes; 5292 5293 return 0; 5294 } 5295 5296 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5297 struct btrfs_device_info *devices_info) 5298 { 5299 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5300 /* Number of stripes that count for block group size */ 5301 int data_stripes; 5302 5303 /* 5304 * It should hold because: 5305 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5306 */ 5307 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5308 5309 ctl->stripe_size = zone_size; 5310 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5311 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5312 5313 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5314 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5315 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5316 ctl->stripe_size) + ctl->nparity, 5317 ctl->dev_stripes); 5318 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5319 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5320 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5321 } 5322 5323 ctl->chunk_size = ctl->stripe_size * data_stripes; 5324 5325 return 0; 5326 } 5327 5328 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5329 struct alloc_chunk_ctl *ctl, 5330 struct btrfs_device_info *devices_info) 5331 { 5332 struct btrfs_fs_info *info = fs_devices->fs_info; 5333 5334 /* 5335 * Round down to number of usable stripes, devs_increment can be any 5336 * number so we can't use round_down() that requires power of 2, while 5337 * rounddown is safe. 5338 */ 5339 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5340 5341 if (ctl->ndevs < ctl->devs_min) { 5342 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5343 btrfs_debug(info, 5344 "%s: not enough devices with free space: have=%d minimum required=%d", 5345 __func__, ctl->ndevs, ctl->devs_min); 5346 } 5347 return -ENOSPC; 5348 } 5349 5350 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5351 5352 switch (fs_devices->chunk_alloc_policy) { 5353 case BTRFS_CHUNK_ALLOC_REGULAR: 5354 return decide_stripe_size_regular(ctl, devices_info); 5355 case BTRFS_CHUNK_ALLOC_ZONED: 5356 return decide_stripe_size_zoned(ctl, devices_info); 5357 default: 5358 BUG(); 5359 } 5360 } 5361 5362 static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int bits) 5363 { 5364 for (int i = 0; i < map->num_stripes; i++) { 5365 struct btrfs_io_stripe *stripe = &map->stripes[i]; 5366 struct btrfs_device *device = stripe->dev; 5367 5368 set_extent_bit(&device->alloc_state, stripe->physical, 5369 stripe->physical + map->stripe_size - 1, 5370 bits | EXTENT_NOWAIT, NULL); 5371 } 5372 } 5373 5374 static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits) 5375 { 5376 for (int i = 0; i < map->num_stripes; i++) { 5377 struct btrfs_io_stripe *stripe = &map->stripes[i]; 5378 struct btrfs_device *device = stripe->dev; 5379 5380 __clear_extent_bit(&device->alloc_state, stripe->physical, 5381 stripe->physical + map->stripe_size - 1, 5382 bits | EXTENT_NOWAIT, 5383 NULL, NULL); 5384 } 5385 } 5386 5387 void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map) 5388 { 5389 write_lock(&fs_info->mapping_tree_lock); 5390 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree); 5391 RB_CLEAR_NODE(&map->rb_node); 5392 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED); 5393 write_unlock(&fs_info->mapping_tree_lock); 5394 5395 /* Once for the tree reference. */ 5396 btrfs_free_chunk_map(map); 5397 } 5398 5399 EXPORT_FOR_TESTS 5400 int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map) 5401 { 5402 struct rb_node **p; 5403 struct rb_node *parent = NULL; 5404 bool leftmost = true; 5405 5406 write_lock(&fs_info->mapping_tree_lock); 5407 p = &fs_info->mapping_tree.rb_root.rb_node; 5408 while (*p) { 5409 struct btrfs_chunk_map *entry; 5410 5411 parent = *p; 5412 entry = rb_entry(parent, struct btrfs_chunk_map, rb_node); 5413 5414 if (map->start < entry->start) { 5415 p = &(*p)->rb_left; 5416 } else if (map->start > entry->start) { 5417 p = &(*p)->rb_right; 5418 leftmost = false; 5419 } else { 5420 write_unlock(&fs_info->mapping_tree_lock); 5421 return -EEXIST; 5422 } 5423 } 5424 rb_link_node(&map->rb_node, parent, p); 5425 rb_insert_color_cached(&map->rb_node, &fs_info->mapping_tree, leftmost); 5426 chunk_map_device_set_bits(map, CHUNK_ALLOCATED); 5427 chunk_map_device_clear_bits(map, CHUNK_TRIMMED); 5428 write_unlock(&fs_info->mapping_tree_lock); 5429 5430 return 0; 5431 } 5432 5433 EXPORT_FOR_TESTS 5434 struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp) 5435 { 5436 struct btrfs_chunk_map *map; 5437 5438 map = kmalloc(btrfs_chunk_map_size(num_stripes), gfp); 5439 if (!map) 5440 return NULL; 5441 5442 refcount_set(&map->refs, 1); 5443 RB_CLEAR_NODE(&map->rb_node); 5444 5445 return map; 5446 } 5447 5448 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5449 struct alloc_chunk_ctl *ctl, 5450 struct btrfs_device_info *devices_info) 5451 { 5452 struct btrfs_fs_info *info = trans->fs_info; 5453 struct btrfs_chunk_map *map; 5454 struct btrfs_block_group *block_group; 5455 u64 start = ctl->start; 5456 u64 type = ctl->type; 5457 int ret; 5458 5459 map = btrfs_alloc_chunk_map(ctl->num_stripes, GFP_NOFS); 5460 if (!map) 5461 return ERR_PTR(-ENOMEM); 5462 5463 map->start = start; 5464 map->chunk_len = ctl->chunk_size; 5465 map->stripe_size = ctl->stripe_size; 5466 map->type = type; 5467 map->io_align = BTRFS_STRIPE_LEN; 5468 map->io_width = BTRFS_STRIPE_LEN; 5469 map->sub_stripes = ctl->sub_stripes; 5470 map->num_stripes = ctl->num_stripes; 5471 5472 for (int i = 0; i < ctl->ndevs; i++) { 5473 for (int j = 0; j < ctl->dev_stripes; j++) { 5474 int s = i * ctl->dev_stripes + j; 5475 map->stripes[s].dev = devices_info[i].dev; 5476 map->stripes[s].physical = devices_info[i].dev_offset + 5477 j * ctl->stripe_size; 5478 } 5479 } 5480 5481 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5482 5483 ret = btrfs_add_chunk_map(info, map); 5484 if (ret) { 5485 btrfs_free_chunk_map(map); 5486 return ERR_PTR(ret); 5487 } 5488 5489 block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size); 5490 if (IS_ERR(block_group)) { 5491 btrfs_remove_chunk_map(info, map); 5492 return block_group; 5493 } 5494 5495 for (int i = 0; i < map->num_stripes; i++) { 5496 struct btrfs_device *dev = map->stripes[i].dev; 5497 5498 btrfs_device_set_bytes_used(dev, 5499 dev->bytes_used + ctl->stripe_size); 5500 if (list_empty(&dev->post_commit_list)) 5501 list_add_tail(&dev->post_commit_list, 5502 &trans->transaction->dev_update_list); 5503 } 5504 5505 atomic64_sub(ctl->stripe_size * map->num_stripes, 5506 &info->free_chunk_space); 5507 5508 check_raid56_incompat_flag(info, type); 5509 check_raid1c34_incompat_flag(info, type); 5510 5511 return block_group; 5512 } 5513 5514 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5515 u64 type) 5516 { 5517 struct btrfs_fs_info *info = trans->fs_info; 5518 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5519 struct btrfs_device_info *devices_info = NULL; 5520 struct alloc_chunk_ctl ctl; 5521 struct btrfs_block_group *block_group; 5522 int ret; 5523 5524 lockdep_assert_held(&info->chunk_mutex); 5525 5526 if (!alloc_profile_is_valid(type, 0)) { 5527 ASSERT(0); 5528 return ERR_PTR(-EINVAL); 5529 } 5530 5531 if (list_empty(&fs_devices->alloc_list)) { 5532 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5533 btrfs_debug(info, "%s: no writable device", __func__); 5534 return ERR_PTR(-ENOSPC); 5535 } 5536 5537 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5538 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5539 ASSERT(0); 5540 return ERR_PTR(-EINVAL); 5541 } 5542 5543 ctl.start = find_next_chunk(info); 5544 ctl.type = type; 5545 init_alloc_chunk_ctl(fs_devices, &ctl); 5546 5547 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5548 GFP_NOFS); 5549 if (!devices_info) 5550 return ERR_PTR(-ENOMEM); 5551 5552 ret = gather_device_info(fs_devices, &ctl, devices_info); 5553 if (ret < 0) { 5554 block_group = ERR_PTR(ret); 5555 goto out; 5556 } 5557 5558 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5559 if (ret < 0) { 5560 block_group = ERR_PTR(ret); 5561 goto out; 5562 } 5563 5564 block_group = create_chunk(trans, &ctl, devices_info); 5565 5566 out: 5567 kfree(devices_info); 5568 return block_group; 5569 } 5570 5571 /* 5572 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5573 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5574 * chunks. 5575 * 5576 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5577 * phases. 5578 */ 5579 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5580 struct btrfs_block_group *bg) 5581 { 5582 struct btrfs_fs_info *fs_info = trans->fs_info; 5583 struct btrfs_root *chunk_root = fs_info->chunk_root; 5584 struct btrfs_key key; 5585 struct btrfs_chunk *chunk; 5586 struct btrfs_stripe *stripe; 5587 struct btrfs_chunk_map *map; 5588 size_t item_size; 5589 int i; 5590 int ret; 5591 5592 /* 5593 * We take the chunk_mutex for 2 reasons: 5594 * 5595 * 1) Updates and insertions in the chunk btree must be done while holding 5596 * the chunk_mutex, as well as updating the system chunk array in the 5597 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5598 * details; 5599 * 5600 * 2) To prevent races with the final phase of a device replace operation 5601 * that replaces the device object associated with the map's stripes, 5602 * because the device object's id can change at any time during that 5603 * final phase of the device replace operation 5604 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5605 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5606 * which would cause a failure when updating the device item, which does 5607 * not exists, or persisting a stripe of the chunk item with such ID. 5608 * Here we can't use the device_list_mutex because our caller already 5609 * has locked the chunk_mutex, and the final phase of device replace 5610 * acquires both mutexes - first the device_list_mutex and then the 5611 * chunk_mutex. Using any of those two mutexes protects us from a 5612 * concurrent device replace. 5613 */ 5614 lockdep_assert_held(&fs_info->chunk_mutex); 5615 5616 map = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5617 if (IS_ERR(map)) { 5618 ret = PTR_ERR(map); 5619 btrfs_abort_transaction(trans, ret); 5620 return ret; 5621 } 5622 5623 item_size = btrfs_chunk_item_size(map->num_stripes); 5624 5625 chunk = kzalloc(item_size, GFP_NOFS); 5626 if (!chunk) { 5627 ret = -ENOMEM; 5628 btrfs_abort_transaction(trans, ret); 5629 goto out; 5630 } 5631 5632 for (i = 0; i < map->num_stripes; i++) { 5633 struct btrfs_device *device = map->stripes[i].dev; 5634 5635 ret = btrfs_update_device(trans, device); 5636 if (ret) 5637 goto out; 5638 } 5639 5640 stripe = &chunk->stripe; 5641 for (i = 0; i < map->num_stripes; i++) { 5642 struct btrfs_device *device = map->stripes[i].dev; 5643 const u64 dev_offset = map->stripes[i].physical; 5644 5645 btrfs_set_stack_stripe_devid(stripe, device->devid); 5646 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5647 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5648 stripe++; 5649 } 5650 5651 btrfs_set_stack_chunk_length(chunk, bg->length); 5652 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5653 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN); 5654 btrfs_set_stack_chunk_type(chunk, map->type); 5655 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5656 btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN); 5657 btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN); 5658 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5659 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5660 5661 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5662 key.type = BTRFS_CHUNK_ITEM_KEY; 5663 key.offset = bg->start; 5664 5665 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5666 if (ret) 5667 goto out; 5668 5669 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags); 5670 5671 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5672 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5673 if (ret) 5674 goto out; 5675 } 5676 5677 out: 5678 kfree(chunk); 5679 btrfs_free_chunk_map(map); 5680 return ret; 5681 } 5682 5683 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5684 { 5685 struct btrfs_fs_info *fs_info = trans->fs_info; 5686 u64 alloc_profile; 5687 struct btrfs_block_group *meta_bg; 5688 struct btrfs_block_group *sys_bg; 5689 5690 /* 5691 * When adding a new device for sprouting, the seed device is read-only 5692 * so we must first allocate a metadata and a system chunk. But before 5693 * adding the block group items to the extent, device and chunk btrees, 5694 * we must first: 5695 * 5696 * 1) Create both chunks without doing any changes to the btrees, as 5697 * otherwise we would get -ENOSPC since the block groups from the 5698 * seed device are read-only; 5699 * 5700 * 2) Add the device item for the new sprout device - finishing the setup 5701 * of a new block group requires updating the device item in the chunk 5702 * btree, so it must exist when we attempt to do it. The previous step 5703 * ensures this does not fail with -ENOSPC. 5704 * 5705 * After that we can add the block group items to their btrees: 5706 * update existing device item in the chunk btree, add a new block group 5707 * item to the extent btree, add a new chunk item to the chunk btree and 5708 * finally add the new device extent items to the devices btree. 5709 */ 5710 5711 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5712 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5713 if (IS_ERR(meta_bg)) 5714 return PTR_ERR(meta_bg); 5715 5716 alloc_profile = btrfs_system_alloc_profile(fs_info); 5717 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5718 if (IS_ERR(sys_bg)) 5719 return PTR_ERR(sys_bg); 5720 5721 return 0; 5722 } 5723 5724 static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map *map) 5725 { 5726 const int index = btrfs_bg_flags_to_raid_index(map->type); 5727 5728 return btrfs_raid_array[index].tolerated_failures; 5729 } 5730 5731 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5732 { 5733 struct btrfs_chunk_map *map; 5734 int miss_ndevs = 0; 5735 int i; 5736 bool ret = true; 5737 5738 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5739 if (IS_ERR(map)) 5740 return false; 5741 5742 for (i = 0; i < map->num_stripes; i++) { 5743 if (test_bit(BTRFS_DEV_STATE_MISSING, 5744 &map->stripes[i].dev->dev_state)) { 5745 miss_ndevs++; 5746 continue; 5747 } 5748 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5749 &map->stripes[i].dev->dev_state)) { 5750 ret = false; 5751 goto end; 5752 } 5753 } 5754 5755 /* 5756 * If the number of missing devices is larger than max errors, we can 5757 * not write the data into that chunk successfully. 5758 */ 5759 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5760 ret = false; 5761 end: 5762 btrfs_free_chunk_map(map); 5763 return ret; 5764 } 5765 5766 void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info) 5767 { 5768 write_lock(&fs_info->mapping_tree_lock); 5769 while (!RB_EMPTY_ROOT(&fs_info->mapping_tree.rb_root)) { 5770 struct btrfs_chunk_map *map; 5771 struct rb_node *node; 5772 5773 node = rb_first_cached(&fs_info->mapping_tree); 5774 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 5775 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree); 5776 RB_CLEAR_NODE(&map->rb_node); 5777 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED); 5778 /* Once for the tree ref. */ 5779 btrfs_free_chunk_map(map); 5780 cond_resched_rwlock_write(&fs_info->mapping_tree_lock); 5781 } 5782 write_unlock(&fs_info->mapping_tree_lock); 5783 } 5784 5785 static int btrfs_chunk_map_num_copies(const struct btrfs_chunk_map *map) 5786 { 5787 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(map->type); 5788 5789 if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5790 return 2; 5791 5792 /* 5793 * There could be two corrupted data stripes, we need to loop retry in 5794 * order to rebuild the correct data. 5795 * 5796 * Fail a stripe at a time on every retry except the stripe under 5797 * reconstruction. 5798 */ 5799 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5800 return map->num_stripes; 5801 5802 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5803 return btrfs_raid_array[index].ncopies; 5804 } 5805 5806 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5807 { 5808 struct btrfs_chunk_map *map; 5809 int ret; 5810 5811 map = btrfs_get_chunk_map(fs_info, logical, len); 5812 if (IS_ERR(map)) 5813 /* 5814 * We could return errors for these cases, but that could get 5815 * ugly and we'd probably do the same thing which is just not do 5816 * anything else and exit, so return 1 so the callers don't try 5817 * to use other copies. 5818 */ 5819 return 1; 5820 5821 ret = btrfs_chunk_map_num_copies(map); 5822 btrfs_free_chunk_map(map); 5823 return ret; 5824 } 5825 5826 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5827 u64 logical) 5828 { 5829 struct btrfs_chunk_map *map; 5830 unsigned long len = fs_info->sectorsize; 5831 5832 if (!btrfs_fs_incompat(fs_info, RAID56)) 5833 return len; 5834 5835 map = btrfs_get_chunk_map(fs_info, logical, len); 5836 5837 if (!WARN_ON(IS_ERR(map))) { 5838 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5839 len = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 5840 btrfs_free_chunk_map(map); 5841 } 5842 return len; 5843 } 5844 5845 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5846 { 5847 struct btrfs_chunk_map *map; 5848 int ret = 0; 5849 5850 if (!btrfs_fs_incompat(fs_info, RAID56)) 5851 return 0; 5852 5853 map = btrfs_get_chunk_map(fs_info, logical, len); 5854 5855 if (!WARN_ON(IS_ERR(map))) { 5856 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5857 ret = 1; 5858 btrfs_free_chunk_map(map); 5859 } 5860 return ret; 5861 } 5862 5863 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5864 struct btrfs_chunk_map *map, int first, 5865 int dev_replace_is_ongoing) 5866 { 5867 const enum btrfs_read_policy policy = READ_ONCE(fs_info->fs_devices->read_policy); 5868 int i; 5869 int num_stripes; 5870 int preferred_mirror; 5871 int tolerance; 5872 struct btrfs_device *srcdev; 5873 5874 ASSERT((map->type & 5875 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5876 5877 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5878 num_stripes = map->sub_stripes; 5879 else 5880 num_stripes = map->num_stripes; 5881 5882 switch (policy) { 5883 default: 5884 /* Shouldn't happen, just warn and use pid instead of failing */ 5885 btrfs_warn_rl(fs_info, "unknown read_policy type %u, reset to pid", 5886 policy); 5887 WRITE_ONCE(fs_info->fs_devices->read_policy, BTRFS_READ_POLICY_PID); 5888 fallthrough; 5889 case BTRFS_READ_POLICY_PID: 5890 preferred_mirror = first + (current->pid % num_stripes); 5891 break; 5892 } 5893 5894 if (dev_replace_is_ongoing && 5895 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5896 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5897 srcdev = fs_info->dev_replace.srcdev; 5898 else 5899 srcdev = NULL; 5900 5901 /* 5902 * try to avoid the drive that is the source drive for a 5903 * dev-replace procedure, only choose it if no other non-missing 5904 * mirror is available 5905 */ 5906 for (tolerance = 0; tolerance < 2; tolerance++) { 5907 if (map->stripes[preferred_mirror].dev->bdev && 5908 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5909 return preferred_mirror; 5910 for (i = first; i < first + num_stripes; i++) { 5911 if (map->stripes[i].dev->bdev && 5912 (tolerance || map->stripes[i].dev != srcdev)) 5913 return i; 5914 } 5915 } 5916 5917 /* we couldn't find one that doesn't fail. Just return something 5918 * and the io error handling code will clean up eventually 5919 */ 5920 return preferred_mirror; 5921 } 5922 5923 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5924 u64 logical, 5925 u16 total_stripes) 5926 { 5927 struct btrfs_io_context *bioc; 5928 5929 bioc = kzalloc( 5930 /* The size of btrfs_io_context */ 5931 sizeof(struct btrfs_io_context) + 5932 /* Plus the variable array for the stripes */ 5933 sizeof(struct btrfs_io_stripe) * (total_stripes), 5934 GFP_NOFS); 5935 5936 if (!bioc) 5937 return NULL; 5938 5939 refcount_set(&bioc->refs, 1); 5940 5941 bioc->fs_info = fs_info; 5942 bioc->replace_stripe_src = -1; 5943 bioc->full_stripe_logical = (u64)-1; 5944 bioc->logical = logical; 5945 5946 return bioc; 5947 } 5948 5949 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5950 { 5951 WARN_ON(!refcount_read(&bioc->refs)); 5952 refcount_inc(&bioc->refs); 5953 } 5954 5955 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5956 { 5957 if (!bioc) 5958 return; 5959 if (refcount_dec_and_test(&bioc->refs)) 5960 kfree(bioc); 5961 } 5962 5963 /* 5964 * Please note that, discard won't be sent to target device of device 5965 * replace. 5966 */ 5967 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 5968 u64 logical, u64 *length_ret, 5969 u32 *num_stripes) 5970 { 5971 struct btrfs_chunk_map *map; 5972 struct btrfs_discard_stripe *stripes; 5973 u64 length = *length_ret; 5974 u64 offset; 5975 u32 stripe_nr; 5976 u32 stripe_nr_end; 5977 u32 stripe_cnt; 5978 u64 stripe_end_offset; 5979 u64 stripe_offset; 5980 u32 stripe_index; 5981 u32 factor = 0; 5982 u32 sub_stripes = 0; 5983 u32 stripes_per_dev = 0; 5984 u32 remaining_stripes = 0; 5985 u32 last_stripe = 0; 5986 int ret; 5987 int i; 5988 5989 map = btrfs_get_chunk_map(fs_info, logical, length); 5990 if (IS_ERR(map)) 5991 return ERR_CAST(map); 5992 5993 /* we don't discard raid56 yet */ 5994 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5995 ret = -EOPNOTSUPP; 5996 goto out_free_map; 5997 } 5998 5999 offset = logical - map->start; 6000 length = min_t(u64, map->start + map->chunk_len - logical, length); 6001 *length_ret = length; 6002 6003 /* 6004 * stripe_nr counts the total number of stripes we have to stride 6005 * to get to this block 6006 */ 6007 stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6008 6009 /* stripe_offset is the offset of this block in its stripe */ 6010 stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr); 6011 6012 stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >> 6013 BTRFS_STRIPE_LEN_SHIFT; 6014 stripe_cnt = stripe_nr_end - stripe_nr; 6015 stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) - 6016 (offset + length); 6017 /* 6018 * after this, stripe_nr is the number of stripes on this 6019 * device we have to walk to find the data, and stripe_index is 6020 * the number of our device in the stripe array 6021 */ 6022 *num_stripes = 1; 6023 stripe_index = 0; 6024 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6025 BTRFS_BLOCK_GROUP_RAID10)) { 6026 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6027 sub_stripes = 1; 6028 else 6029 sub_stripes = map->sub_stripes; 6030 6031 factor = map->num_stripes / sub_stripes; 6032 *num_stripes = min_t(u64, map->num_stripes, 6033 sub_stripes * stripe_cnt); 6034 stripe_index = stripe_nr % factor; 6035 stripe_nr /= factor; 6036 stripe_index *= sub_stripes; 6037 6038 remaining_stripes = stripe_cnt % factor; 6039 stripes_per_dev = stripe_cnt / factor; 6040 last_stripe = ((stripe_nr_end - 1) % factor) * sub_stripes; 6041 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6042 BTRFS_BLOCK_GROUP_DUP)) { 6043 *num_stripes = map->num_stripes; 6044 } else { 6045 stripe_index = stripe_nr % map->num_stripes; 6046 stripe_nr /= map->num_stripes; 6047 } 6048 6049 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6050 if (!stripes) { 6051 ret = -ENOMEM; 6052 goto out_free_map; 6053 } 6054 6055 for (i = 0; i < *num_stripes; i++) { 6056 stripes[i].physical = 6057 map->stripes[stripe_index].physical + 6058 stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr); 6059 stripes[i].dev = map->stripes[stripe_index].dev; 6060 6061 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6062 BTRFS_BLOCK_GROUP_RAID10)) { 6063 stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev); 6064 6065 if (i / sub_stripes < remaining_stripes) 6066 stripes[i].length += BTRFS_STRIPE_LEN; 6067 6068 /* 6069 * Special for the first stripe and 6070 * the last stripe: 6071 * 6072 * |-------|...|-------| 6073 * |----------| 6074 * off end_off 6075 */ 6076 if (i < sub_stripes) 6077 stripes[i].length -= stripe_offset; 6078 6079 if (stripe_index >= last_stripe && 6080 stripe_index <= (last_stripe + 6081 sub_stripes - 1)) 6082 stripes[i].length -= stripe_end_offset; 6083 6084 if (i == sub_stripes - 1) 6085 stripe_offset = 0; 6086 } else { 6087 stripes[i].length = length; 6088 } 6089 6090 stripe_index++; 6091 if (stripe_index == map->num_stripes) { 6092 stripe_index = 0; 6093 stripe_nr++; 6094 } 6095 } 6096 6097 btrfs_free_chunk_map(map); 6098 return stripes; 6099 out_free_map: 6100 btrfs_free_chunk_map(map); 6101 return ERR_PTR(ret); 6102 } 6103 6104 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6105 { 6106 struct btrfs_block_group *cache; 6107 bool ret; 6108 6109 /* Non zoned filesystem does not use "to_copy" flag */ 6110 if (!btrfs_is_zoned(fs_info)) 6111 return false; 6112 6113 cache = btrfs_lookup_block_group(fs_info, logical); 6114 6115 ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 6116 6117 btrfs_put_block_group(cache); 6118 return ret; 6119 } 6120 6121 static void handle_ops_on_dev_replace(struct btrfs_io_context *bioc, 6122 struct btrfs_dev_replace *dev_replace, 6123 u64 logical, 6124 struct btrfs_io_geometry *io_geom) 6125 { 6126 u64 srcdev_devid = dev_replace->srcdev->devid; 6127 /* 6128 * At this stage, num_stripes is still the real number of stripes, 6129 * excluding the duplicated stripes. 6130 */ 6131 int num_stripes = io_geom->num_stripes; 6132 int max_errors = io_geom->max_errors; 6133 int nr_extra_stripes = 0; 6134 int i; 6135 6136 /* 6137 * A block group which has "to_copy" set will eventually be copied by 6138 * the dev-replace process. We can avoid cloning IO here. 6139 */ 6140 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6141 return; 6142 6143 /* 6144 * Duplicate the write operations while the dev-replace procedure is 6145 * running. Since the copying of the old disk to the new disk takes 6146 * place at run time while the filesystem is mounted writable, the 6147 * regular write operations to the old disk have to be duplicated to go 6148 * to the new disk as well. 6149 * 6150 * Note that device->missing is handled by the caller, and that the 6151 * write to the old disk is already set up in the stripes array. 6152 */ 6153 for (i = 0; i < num_stripes; i++) { 6154 struct btrfs_io_stripe *old = &bioc->stripes[i]; 6155 struct btrfs_io_stripe *new = &bioc->stripes[num_stripes + nr_extra_stripes]; 6156 6157 if (old->dev->devid != srcdev_devid) 6158 continue; 6159 6160 new->physical = old->physical; 6161 new->dev = dev_replace->tgtdev; 6162 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) 6163 bioc->replace_stripe_src = i; 6164 nr_extra_stripes++; 6165 } 6166 6167 /* We can only have at most 2 extra nr_stripes (for DUP). */ 6168 ASSERT(nr_extra_stripes <= 2); 6169 /* 6170 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for 6171 * replace. 6172 * If we have 2 extra stripes, only choose the one with smaller physical. 6173 */ 6174 if (io_geom->op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) { 6175 struct btrfs_io_stripe *first = &bioc->stripes[num_stripes]; 6176 struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1]; 6177 6178 /* Only DUP can have two extra stripes. */ 6179 ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP); 6180 6181 /* 6182 * Swap the last stripe stripes and reduce @nr_extra_stripes. 6183 * The extra stripe would still be there, but won't be accessed. 6184 */ 6185 if (first->physical > second->physical) { 6186 swap(second->physical, first->physical); 6187 swap(second->dev, first->dev); 6188 nr_extra_stripes--; 6189 } 6190 } 6191 6192 io_geom->num_stripes = num_stripes + nr_extra_stripes; 6193 io_geom->max_errors = max_errors + nr_extra_stripes; 6194 bioc->replace_nr_stripes = nr_extra_stripes; 6195 } 6196 6197 static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset, 6198 struct btrfs_io_geometry *io_geom) 6199 { 6200 /* 6201 * Stripe_nr is the stripe where this block falls. stripe_offset is 6202 * the offset of this block in its stripe. 6203 */ 6204 io_geom->stripe_offset = offset & BTRFS_STRIPE_LEN_MASK; 6205 io_geom->stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6206 ASSERT(io_geom->stripe_offset < U32_MAX); 6207 6208 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6209 unsigned long full_stripe_len = 6210 btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 6211 6212 /* 6213 * For full stripe start, we use previously calculated 6214 * @stripe_nr. Align it to nr_data_stripes, then multiply with 6215 * STRIPE_LEN. 6216 * 6217 * By this we can avoid u64 division completely. And we have 6218 * to go rounddown(), not round_down(), as nr_data_stripes is 6219 * not ensured to be power of 2. 6220 */ 6221 io_geom->raid56_full_stripe_start = btrfs_stripe_nr_to_offset( 6222 rounddown(io_geom->stripe_nr, nr_data_stripes(map))); 6223 6224 ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset); 6225 ASSERT(io_geom->raid56_full_stripe_start <= offset); 6226 /* 6227 * For writes to RAID56, allow to write a full stripe set, but 6228 * no straddling of stripe sets. 6229 */ 6230 if (io_geom->op == BTRFS_MAP_WRITE) 6231 return full_stripe_len - (offset - io_geom->raid56_full_stripe_start); 6232 } 6233 6234 /* 6235 * For other RAID types and for RAID56 reads, allow a single stripe (on 6236 * a single disk). 6237 */ 6238 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) 6239 return BTRFS_STRIPE_LEN - io_geom->stripe_offset; 6240 return U64_MAX; 6241 } 6242 6243 static int set_io_stripe(struct btrfs_fs_info *fs_info, u64 logical, 6244 u64 *length, struct btrfs_io_stripe *dst, 6245 struct btrfs_chunk_map *map, 6246 struct btrfs_io_geometry *io_geom) 6247 { 6248 dst->dev = map->stripes[io_geom->stripe_index].dev; 6249 6250 if (io_geom->op == BTRFS_MAP_READ && 6251 btrfs_need_stripe_tree_update(fs_info, map->type)) 6252 return btrfs_get_raid_extent_offset(fs_info, logical, length, 6253 map->type, 6254 io_geom->stripe_index, dst); 6255 6256 dst->physical = map->stripes[io_geom->stripe_index].physical + 6257 io_geom->stripe_offset + 6258 btrfs_stripe_nr_to_offset(io_geom->stripe_nr); 6259 return 0; 6260 } 6261 6262 static bool is_single_device_io(struct btrfs_fs_info *fs_info, 6263 const struct btrfs_io_stripe *smap, 6264 const struct btrfs_chunk_map *map, 6265 int num_alloc_stripes, 6266 enum btrfs_map_op op, int mirror_num) 6267 { 6268 if (!smap) 6269 return false; 6270 6271 if (num_alloc_stripes != 1) 6272 return false; 6273 6274 if (btrfs_need_stripe_tree_update(fs_info, map->type) && op != BTRFS_MAP_READ) 6275 return false; 6276 6277 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) 6278 return false; 6279 6280 return true; 6281 } 6282 6283 static void map_blocks_raid0(const struct btrfs_chunk_map *map, 6284 struct btrfs_io_geometry *io_geom) 6285 { 6286 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes; 6287 io_geom->stripe_nr /= map->num_stripes; 6288 if (io_geom->op == BTRFS_MAP_READ) 6289 io_geom->mirror_num = 1; 6290 } 6291 6292 static void map_blocks_raid1(struct btrfs_fs_info *fs_info, 6293 struct btrfs_chunk_map *map, 6294 struct btrfs_io_geometry *io_geom, 6295 bool dev_replace_is_ongoing) 6296 { 6297 if (io_geom->op != BTRFS_MAP_READ) { 6298 io_geom->num_stripes = map->num_stripes; 6299 return; 6300 } 6301 6302 if (io_geom->mirror_num) { 6303 io_geom->stripe_index = io_geom->mirror_num - 1; 6304 return; 6305 } 6306 6307 io_geom->stripe_index = find_live_mirror(fs_info, map, 0, 6308 dev_replace_is_ongoing); 6309 io_geom->mirror_num = io_geom->stripe_index + 1; 6310 } 6311 6312 static void map_blocks_dup(const struct btrfs_chunk_map *map, 6313 struct btrfs_io_geometry *io_geom) 6314 { 6315 if (io_geom->op != BTRFS_MAP_READ) { 6316 io_geom->num_stripes = map->num_stripes; 6317 return; 6318 } 6319 6320 if (io_geom->mirror_num) { 6321 io_geom->stripe_index = io_geom->mirror_num - 1; 6322 return; 6323 } 6324 6325 io_geom->mirror_num = 1; 6326 } 6327 6328 static void map_blocks_raid10(struct btrfs_fs_info *fs_info, 6329 struct btrfs_chunk_map *map, 6330 struct btrfs_io_geometry *io_geom, 6331 bool dev_replace_is_ongoing) 6332 { 6333 u32 factor = map->num_stripes / map->sub_stripes; 6334 int old_stripe_index; 6335 6336 io_geom->stripe_index = (io_geom->stripe_nr % factor) * map->sub_stripes; 6337 io_geom->stripe_nr /= factor; 6338 6339 if (io_geom->op != BTRFS_MAP_READ) { 6340 io_geom->num_stripes = map->sub_stripes; 6341 return; 6342 } 6343 6344 if (io_geom->mirror_num) { 6345 io_geom->stripe_index += io_geom->mirror_num - 1; 6346 return; 6347 } 6348 6349 old_stripe_index = io_geom->stripe_index; 6350 io_geom->stripe_index = find_live_mirror(fs_info, map, 6351 io_geom->stripe_index, 6352 dev_replace_is_ongoing); 6353 io_geom->mirror_num = io_geom->stripe_index - old_stripe_index + 1; 6354 } 6355 6356 static void map_blocks_raid56_write(struct btrfs_chunk_map *map, 6357 struct btrfs_io_geometry *io_geom, 6358 u64 logical, u64 *length) 6359 { 6360 int data_stripes = nr_data_stripes(map); 6361 6362 /* 6363 * Needs full stripe mapping. 6364 * 6365 * Push stripe_nr back to the start of the full stripe For those cases 6366 * needing a full stripe, @stripe_nr is the full stripe number. 6367 * 6368 * Originally we go raid56_full_stripe_start / full_stripe_len, but 6369 * that can be expensive. Here we just divide @stripe_nr with 6370 * @data_stripes. 6371 */ 6372 io_geom->stripe_nr /= data_stripes; 6373 6374 /* RAID[56] write or recovery. Return all stripes */ 6375 io_geom->num_stripes = map->num_stripes; 6376 io_geom->max_errors = btrfs_chunk_max_errors(map); 6377 6378 /* Return the length to the full stripe end. */ 6379 *length = min(logical + *length, 6380 io_geom->raid56_full_stripe_start + map->start + 6381 btrfs_stripe_nr_to_offset(data_stripes)) - 6382 logical; 6383 io_geom->stripe_index = 0; 6384 io_geom->stripe_offset = 0; 6385 } 6386 6387 static void map_blocks_raid56_read(struct btrfs_chunk_map *map, 6388 struct btrfs_io_geometry *io_geom) 6389 { 6390 int data_stripes = nr_data_stripes(map); 6391 6392 ASSERT(io_geom->mirror_num <= 1); 6393 /* Just grab the data stripe directly. */ 6394 io_geom->stripe_index = io_geom->stripe_nr % data_stripes; 6395 io_geom->stripe_nr /= data_stripes; 6396 6397 /* We distribute the parity blocks across stripes. */ 6398 io_geom->stripe_index = 6399 (io_geom->stripe_nr + io_geom->stripe_index) % map->num_stripes; 6400 6401 if (io_geom->op == BTRFS_MAP_READ && io_geom->mirror_num < 1) 6402 io_geom->mirror_num = 1; 6403 } 6404 6405 static void map_blocks_single(const struct btrfs_chunk_map *map, 6406 struct btrfs_io_geometry *io_geom) 6407 { 6408 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes; 6409 io_geom->stripe_nr /= map->num_stripes; 6410 io_geom->mirror_num = io_geom->stripe_index + 1; 6411 } 6412 6413 /* 6414 * Map one logical range to one or more physical ranges. 6415 * 6416 * @length: (Mandatory) mapped length of this run. 6417 * One logical range can be split into different segments 6418 * due to factors like zones and RAID0/5/6/10 stripe 6419 * boundaries. 6420 * 6421 * @bioc_ret: (Mandatory) returned btrfs_io_context structure. 6422 * which has one or more physical ranges (btrfs_io_stripe) 6423 * recorded inside. 6424 * Caller should call btrfs_put_bioc() to free it after use. 6425 * 6426 * @smap: (Optional) single physical range optimization. 6427 * If the map request can be fulfilled by one single 6428 * physical range, and this is parameter is not NULL, 6429 * then @bioc_ret would be NULL, and @smap would be 6430 * updated. 6431 * 6432 * @mirror_num_ret: (Mandatory) returned mirror number if the original 6433 * value is 0. 6434 * 6435 * Mirror number 0 means to choose any live mirrors. 6436 * 6437 * For non-RAID56 profiles, non-zero mirror_num means 6438 * the Nth mirror. (e.g. mirror_num 1 means the first 6439 * copy). 6440 * 6441 * For RAID56 profile, mirror 1 means rebuild from P and 6442 * the remaining data stripes. 6443 * 6444 * For RAID6 profile, mirror > 2 means mark another 6445 * data/P stripe error and rebuild from the remaining 6446 * stripes.. 6447 */ 6448 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6449 u64 logical, u64 *length, 6450 struct btrfs_io_context **bioc_ret, 6451 struct btrfs_io_stripe *smap, int *mirror_num_ret) 6452 { 6453 struct btrfs_chunk_map *map; 6454 struct btrfs_io_geometry io_geom = { 0 }; 6455 u64 map_offset; 6456 int ret = 0; 6457 int num_copies; 6458 struct btrfs_io_context *bioc = NULL; 6459 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6460 int dev_replace_is_ongoing = 0; 6461 u16 num_alloc_stripes; 6462 u64 max_len; 6463 6464 ASSERT(bioc_ret); 6465 6466 io_geom.mirror_num = (mirror_num_ret ? *mirror_num_ret : 0); 6467 io_geom.num_stripes = 1; 6468 io_geom.stripe_index = 0; 6469 io_geom.op = op; 6470 6471 map = btrfs_get_chunk_map(fs_info, logical, *length); 6472 if (IS_ERR(map)) 6473 return PTR_ERR(map); 6474 6475 num_copies = btrfs_chunk_map_num_copies(map); 6476 if (io_geom.mirror_num > num_copies) 6477 return -EINVAL; 6478 6479 map_offset = logical - map->start; 6480 io_geom.raid56_full_stripe_start = (u64)-1; 6481 max_len = btrfs_max_io_len(map, map_offset, &io_geom); 6482 *length = min_t(u64, map->chunk_len - map_offset, max_len); 6483 6484 down_read(&dev_replace->rwsem); 6485 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6486 /* 6487 * Hold the semaphore for read during the whole operation, write is 6488 * requested at commit time but must wait. 6489 */ 6490 if (!dev_replace_is_ongoing) 6491 up_read(&dev_replace->rwsem); 6492 6493 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 6494 case BTRFS_BLOCK_GROUP_RAID0: 6495 map_blocks_raid0(map, &io_geom); 6496 break; 6497 case BTRFS_BLOCK_GROUP_RAID1: 6498 case BTRFS_BLOCK_GROUP_RAID1C3: 6499 case BTRFS_BLOCK_GROUP_RAID1C4: 6500 map_blocks_raid1(fs_info, map, &io_geom, dev_replace_is_ongoing); 6501 break; 6502 case BTRFS_BLOCK_GROUP_DUP: 6503 map_blocks_dup(map, &io_geom); 6504 break; 6505 case BTRFS_BLOCK_GROUP_RAID10: 6506 map_blocks_raid10(fs_info, map, &io_geom, dev_replace_is_ongoing); 6507 break; 6508 case BTRFS_BLOCK_GROUP_RAID5: 6509 case BTRFS_BLOCK_GROUP_RAID6: 6510 if (op != BTRFS_MAP_READ || io_geom.mirror_num > 1) 6511 map_blocks_raid56_write(map, &io_geom, logical, length); 6512 else 6513 map_blocks_raid56_read(map, &io_geom); 6514 break; 6515 default: 6516 /* 6517 * After this, stripe_nr is the number of stripes on this 6518 * device we have to walk to find the data, and stripe_index is 6519 * the number of our device in the stripe array 6520 */ 6521 map_blocks_single(map, &io_geom); 6522 break; 6523 } 6524 if (io_geom.stripe_index >= map->num_stripes) { 6525 btrfs_crit(fs_info, 6526 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6527 io_geom.stripe_index, map->num_stripes); 6528 ret = -EINVAL; 6529 goto out; 6530 } 6531 6532 num_alloc_stripes = io_geom.num_stripes; 6533 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6534 op != BTRFS_MAP_READ) 6535 /* 6536 * For replace case, we need to add extra stripes for extra 6537 * duplicated stripes. 6538 * 6539 * For both WRITE and GET_READ_MIRRORS, we may have at most 6540 * 2 more stripes (DUP types, otherwise 1). 6541 */ 6542 num_alloc_stripes += 2; 6543 6544 /* 6545 * If this I/O maps to a single device, try to return the device and 6546 * physical block information on the stack instead of allocating an 6547 * I/O context structure. 6548 */ 6549 if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, op, 6550 io_geom.mirror_num)) { 6551 ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom); 6552 if (mirror_num_ret) 6553 *mirror_num_ret = io_geom.mirror_num; 6554 *bioc_ret = NULL; 6555 goto out; 6556 } 6557 6558 bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes); 6559 if (!bioc) { 6560 ret = -ENOMEM; 6561 goto out; 6562 } 6563 bioc->map_type = map->type; 6564 6565 /* 6566 * For RAID56 full map, we need to make sure the stripes[] follows the 6567 * rule that data stripes are all ordered, then followed with P and Q 6568 * (if we have). 6569 * 6570 * It's still mostly the same as other profiles, just with extra rotation. 6571 */ 6572 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && 6573 (op != BTRFS_MAP_READ || io_geom.mirror_num > 1)) { 6574 /* 6575 * For RAID56 @stripe_nr is already the number of full stripes 6576 * before us, which is also the rotation value (needs to modulo 6577 * with num_stripes). 6578 * 6579 * In this case, we just add @stripe_nr with @i, then do the 6580 * modulo, to reduce one modulo call. 6581 */ 6582 bioc->full_stripe_logical = map->start + 6583 btrfs_stripe_nr_to_offset(io_geom.stripe_nr * 6584 nr_data_stripes(map)); 6585 for (int i = 0; i < io_geom.num_stripes; i++) { 6586 struct btrfs_io_stripe *dst = &bioc->stripes[i]; 6587 u32 stripe_index; 6588 6589 stripe_index = (i + io_geom.stripe_nr) % io_geom.num_stripes; 6590 dst->dev = map->stripes[stripe_index].dev; 6591 dst->physical = 6592 map->stripes[stripe_index].physical + 6593 io_geom.stripe_offset + 6594 btrfs_stripe_nr_to_offset(io_geom.stripe_nr); 6595 } 6596 } else { 6597 /* 6598 * For all other non-RAID56 profiles, just copy the target 6599 * stripe into the bioc. 6600 */ 6601 for (int i = 0; i < io_geom.num_stripes; i++) { 6602 ret = set_io_stripe(fs_info, logical, length, 6603 &bioc->stripes[i], map, &io_geom); 6604 if (ret < 0) 6605 break; 6606 io_geom.stripe_index++; 6607 } 6608 } 6609 6610 if (ret) { 6611 *bioc_ret = NULL; 6612 btrfs_put_bioc(bioc); 6613 goto out; 6614 } 6615 6616 if (op != BTRFS_MAP_READ) 6617 io_geom.max_errors = btrfs_chunk_max_errors(map); 6618 6619 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6620 op != BTRFS_MAP_READ) { 6621 handle_ops_on_dev_replace(bioc, dev_replace, logical, &io_geom); 6622 } 6623 6624 *bioc_ret = bioc; 6625 bioc->num_stripes = io_geom.num_stripes; 6626 bioc->max_errors = io_geom.max_errors; 6627 bioc->mirror_num = io_geom.mirror_num; 6628 6629 out: 6630 if (dev_replace_is_ongoing) { 6631 lockdep_assert_held(&dev_replace->rwsem); 6632 /* Unlock and let waiting writers proceed */ 6633 up_read(&dev_replace->rwsem); 6634 } 6635 btrfs_free_chunk_map(map); 6636 return ret; 6637 } 6638 6639 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6640 const struct btrfs_fs_devices *fs_devices) 6641 { 6642 if (args->fsid == NULL) 6643 return true; 6644 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6645 return true; 6646 return false; 6647 } 6648 6649 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6650 const struct btrfs_device *device) 6651 { 6652 if (args->missing) { 6653 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6654 !device->bdev) 6655 return true; 6656 return false; 6657 } 6658 6659 if (device->devid != args->devid) 6660 return false; 6661 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6662 return false; 6663 return true; 6664 } 6665 6666 /* 6667 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6668 * return NULL. 6669 * 6670 * If devid and uuid are both specified, the match must be exact, otherwise 6671 * only devid is used. 6672 */ 6673 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6674 const struct btrfs_dev_lookup_args *args) 6675 { 6676 struct btrfs_device *device; 6677 struct btrfs_fs_devices *seed_devs; 6678 6679 if (dev_args_match_fs_devices(args, fs_devices)) { 6680 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6681 if (dev_args_match_device(args, device)) 6682 return device; 6683 } 6684 } 6685 6686 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6687 if (!dev_args_match_fs_devices(args, seed_devs)) 6688 continue; 6689 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6690 if (dev_args_match_device(args, device)) 6691 return device; 6692 } 6693 } 6694 6695 return NULL; 6696 } 6697 6698 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6699 u64 devid, u8 *dev_uuid) 6700 { 6701 struct btrfs_device *device; 6702 unsigned int nofs_flag; 6703 6704 /* 6705 * We call this under the chunk_mutex, so we want to use NOFS for this 6706 * allocation, however we don't want to change btrfs_alloc_device() to 6707 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6708 * places. 6709 */ 6710 6711 nofs_flag = memalloc_nofs_save(); 6712 device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL); 6713 memalloc_nofs_restore(nofs_flag); 6714 if (IS_ERR(device)) 6715 return device; 6716 6717 list_add(&device->dev_list, &fs_devices->devices); 6718 device->fs_devices = fs_devices; 6719 fs_devices->num_devices++; 6720 6721 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6722 fs_devices->missing_devices++; 6723 6724 return device; 6725 } 6726 6727 /* 6728 * Allocate new device struct, set up devid and UUID. 6729 * 6730 * @fs_info: used only for generating a new devid, can be NULL if 6731 * devid is provided (i.e. @devid != NULL). 6732 * @devid: a pointer to devid for this device. If NULL a new devid 6733 * is generated. 6734 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6735 * is generated. 6736 * @path: a pointer to device path if available, NULL otherwise. 6737 * 6738 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6739 * on error. Returned struct is not linked onto any lists and must be 6740 * destroyed with btrfs_free_device. 6741 */ 6742 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6743 const u64 *devid, const u8 *uuid, 6744 const char *path) 6745 { 6746 struct btrfs_device *dev; 6747 u64 tmp; 6748 6749 if (WARN_ON(!devid && !fs_info)) 6750 return ERR_PTR(-EINVAL); 6751 6752 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6753 if (!dev) 6754 return ERR_PTR(-ENOMEM); 6755 6756 INIT_LIST_HEAD(&dev->dev_list); 6757 INIT_LIST_HEAD(&dev->dev_alloc_list); 6758 INIT_LIST_HEAD(&dev->post_commit_list); 6759 6760 atomic_set(&dev->dev_stats_ccnt, 0); 6761 btrfs_device_data_ordered_init(dev); 6762 extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE); 6763 6764 if (devid) 6765 tmp = *devid; 6766 else { 6767 int ret; 6768 6769 ret = find_next_devid(fs_info, &tmp); 6770 if (ret) { 6771 btrfs_free_device(dev); 6772 return ERR_PTR(ret); 6773 } 6774 } 6775 dev->devid = tmp; 6776 6777 if (uuid) 6778 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6779 else 6780 generate_random_uuid(dev->uuid); 6781 6782 if (path) { 6783 struct rcu_string *name; 6784 6785 name = rcu_string_strdup(path, GFP_KERNEL); 6786 if (!name) { 6787 btrfs_free_device(dev); 6788 return ERR_PTR(-ENOMEM); 6789 } 6790 rcu_assign_pointer(dev->name, name); 6791 } 6792 6793 return dev; 6794 } 6795 6796 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6797 u64 devid, u8 *uuid, bool error) 6798 { 6799 if (error) 6800 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6801 devid, uuid); 6802 else 6803 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6804 devid, uuid); 6805 } 6806 6807 u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map) 6808 { 6809 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 6810 6811 return div_u64(map->chunk_len, data_stripes); 6812 } 6813 6814 #if BITS_PER_LONG == 32 6815 /* 6816 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6817 * can't be accessed on 32bit systems. 6818 * 6819 * This function do mount time check to reject the fs if it already has 6820 * metadata chunk beyond that limit. 6821 */ 6822 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6823 u64 logical, u64 length, u64 type) 6824 { 6825 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6826 return 0; 6827 6828 if (logical + length < MAX_LFS_FILESIZE) 6829 return 0; 6830 6831 btrfs_err_32bit_limit(fs_info); 6832 return -EOVERFLOW; 6833 } 6834 6835 /* 6836 * This is to give early warning for any metadata chunk reaching 6837 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 6838 * Although we can still access the metadata, it's not going to be possible 6839 * once the limit is reached. 6840 */ 6841 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6842 u64 logical, u64 length, u64 type) 6843 { 6844 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6845 return; 6846 6847 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 6848 return; 6849 6850 btrfs_warn_32bit_limit(fs_info); 6851 } 6852 #endif 6853 6854 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 6855 u64 devid, u8 *uuid) 6856 { 6857 struct btrfs_device *dev; 6858 6859 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6860 btrfs_report_missing_device(fs_info, devid, uuid, true); 6861 return ERR_PTR(-ENOENT); 6862 } 6863 6864 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 6865 if (IS_ERR(dev)) { 6866 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 6867 devid, PTR_ERR(dev)); 6868 return dev; 6869 } 6870 btrfs_report_missing_device(fs_info, devid, uuid, false); 6871 6872 return dev; 6873 } 6874 6875 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 6876 struct btrfs_chunk *chunk) 6877 { 6878 BTRFS_DEV_LOOKUP_ARGS(args); 6879 struct btrfs_fs_info *fs_info = leaf->fs_info; 6880 struct btrfs_chunk_map *map; 6881 u64 logical; 6882 u64 length; 6883 u64 devid; 6884 u64 type; 6885 u8 uuid[BTRFS_UUID_SIZE]; 6886 int index; 6887 int num_stripes; 6888 int ret; 6889 int i; 6890 6891 logical = key->offset; 6892 length = btrfs_chunk_length(leaf, chunk); 6893 type = btrfs_chunk_type(leaf, chunk); 6894 index = btrfs_bg_flags_to_raid_index(type); 6895 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6896 6897 #if BITS_PER_LONG == 32 6898 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 6899 if (ret < 0) 6900 return ret; 6901 warn_32bit_meta_chunk(fs_info, logical, length, type); 6902 #endif 6903 6904 /* 6905 * Only need to verify chunk item if we're reading from sys chunk array, 6906 * as chunk item in tree block is already verified by tree-checker. 6907 */ 6908 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 6909 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 6910 if (ret) 6911 return ret; 6912 } 6913 6914 map = btrfs_find_chunk_map(fs_info, logical, 1); 6915 6916 /* already mapped? */ 6917 if (map && map->start <= logical && map->start + map->chunk_len > logical) { 6918 btrfs_free_chunk_map(map); 6919 return 0; 6920 } else if (map) { 6921 btrfs_free_chunk_map(map); 6922 } 6923 6924 map = btrfs_alloc_chunk_map(num_stripes, GFP_NOFS); 6925 if (!map) 6926 return -ENOMEM; 6927 6928 map->start = logical; 6929 map->chunk_len = length; 6930 map->num_stripes = num_stripes; 6931 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6932 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6933 map->type = type; 6934 /* 6935 * We can't use the sub_stripes value, as for profiles other than 6936 * RAID10, they may have 0 as sub_stripes for filesystems created by 6937 * older mkfs (<v5.4). 6938 * In that case, it can cause divide-by-zero errors later. 6939 * Since currently sub_stripes is fixed for each profile, let's 6940 * use the trusted value instead. 6941 */ 6942 map->sub_stripes = btrfs_raid_array[index].sub_stripes; 6943 map->verified_stripes = 0; 6944 map->stripe_size = btrfs_calc_stripe_length(map); 6945 for (i = 0; i < num_stripes; i++) { 6946 map->stripes[i].physical = 6947 btrfs_stripe_offset_nr(leaf, chunk, i); 6948 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6949 args.devid = devid; 6950 read_extent_buffer(leaf, uuid, (unsigned long) 6951 btrfs_stripe_dev_uuid_nr(chunk, i), 6952 BTRFS_UUID_SIZE); 6953 args.uuid = uuid; 6954 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 6955 if (!map->stripes[i].dev) { 6956 map->stripes[i].dev = handle_missing_device(fs_info, 6957 devid, uuid); 6958 if (IS_ERR(map->stripes[i].dev)) { 6959 ret = PTR_ERR(map->stripes[i].dev); 6960 btrfs_free_chunk_map(map); 6961 return ret; 6962 } 6963 } 6964 6965 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 6966 &(map->stripes[i].dev->dev_state)); 6967 } 6968 6969 ret = btrfs_add_chunk_map(fs_info, map); 6970 if (ret < 0) { 6971 btrfs_err(fs_info, 6972 "failed to add chunk map, start=%llu len=%llu: %d", 6973 map->start, map->chunk_len, ret); 6974 } 6975 6976 return ret; 6977 } 6978 6979 static void fill_device_from_item(struct extent_buffer *leaf, 6980 struct btrfs_dev_item *dev_item, 6981 struct btrfs_device *device) 6982 { 6983 unsigned long ptr; 6984 6985 device->devid = btrfs_device_id(leaf, dev_item); 6986 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6987 device->total_bytes = device->disk_total_bytes; 6988 device->commit_total_bytes = device->disk_total_bytes; 6989 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6990 device->commit_bytes_used = device->bytes_used; 6991 device->type = btrfs_device_type(leaf, dev_item); 6992 device->io_align = btrfs_device_io_align(leaf, dev_item); 6993 device->io_width = btrfs_device_io_width(leaf, dev_item); 6994 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6995 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6996 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 6997 6998 ptr = btrfs_device_uuid(dev_item); 6999 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7000 } 7001 7002 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7003 u8 *fsid) 7004 { 7005 struct btrfs_fs_devices *fs_devices; 7006 int ret; 7007 7008 lockdep_assert_held(&uuid_mutex); 7009 ASSERT(fsid); 7010 7011 /* This will match only for multi-device seed fs */ 7012 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7013 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7014 return fs_devices; 7015 7016 7017 fs_devices = find_fsid(fsid, NULL); 7018 if (!fs_devices) { 7019 if (!btrfs_test_opt(fs_info, DEGRADED)) 7020 return ERR_PTR(-ENOENT); 7021 7022 fs_devices = alloc_fs_devices(fsid); 7023 if (IS_ERR(fs_devices)) 7024 return fs_devices; 7025 7026 fs_devices->seeding = true; 7027 fs_devices->opened = 1; 7028 return fs_devices; 7029 } 7030 7031 /* 7032 * Upon first call for a seed fs fsid, just create a private copy of the 7033 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7034 */ 7035 fs_devices = clone_fs_devices(fs_devices); 7036 if (IS_ERR(fs_devices)) 7037 return fs_devices; 7038 7039 ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder); 7040 if (ret) { 7041 free_fs_devices(fs_devices); 7042 return ERR_PTR(ret); 7043 } 7044 7045 if (!fs_devices->seeding) { 7046 close_fs_devices(fs_devices); 7047 free_fs_devices(fs_devices); 7048 return ERR_PTR(-EINVAL); 7049 } 7050 7051 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7052 7053 return fs_devices; 7054 } 7055 7056 static int read_one_dev(struct extent_buffer *leaf, 7057 struct btrfs_dev_item *dev_item) 7058 { 7059 BTRFS_DEV_LOOKUP_ARGS(args); 7060 struct btrfs_fs_info *fs_info = leaf->fs_info; 7061 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7062 struct btrfs_device *device; 7063 u64 devid; 7064 int ret; 7065 u8 fs_uuid[BTRFS_FSID_SIZE]; 7066 u8 dev_uuid[BTRFS_UUID_SIZE]; 7067 7068 devid = btrfs_device_id(leaf, dev_item); 7069 args.devid = devid; 7070 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7071 BTRFS_UUID_SIZE); 7072 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7073 BTRFS_FSID_SIZE); 7074 args.uuid = dev_uuid; 7075 args.fsid = fs_uuid; 7076 7077 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7078 fs_devices = open_seed_devices(fs_info, fs_uuid); 7079 if (IS_ERR(fs_devices)) 7080 return PTR_ERR(fs_devices); 7081 } 7082 7083 device = btrfs_find_device(fs_info->fs_devices, &args); 7084 if (!device) { 7085 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7086 btrfs_report_missing_device(fs_info, devid, 7087 dev_uuid, true); 7088 return -ENOENT; 7089 } 7090 7091 device = add_missing_dev(fs_devices, devid, dev_uuid); 7092 if (IS_ERR(device)) { 7093 btrfs_err(fs_info, 7094 "failed to add missing dev %llu: %ld", 7095 devid, PTR_ERR(device)); 7096 return PTR_ERR(device); 7097 } 7098 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7099 } else { 7100 if (!device->bdev) { 7101 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7102 btrfs_report_missing_device(fs_info, 7103 devid, dev_uuid, true); 7104 return -ENOENT; 7105 } 7106 btrfs_report_missing_device(fs_info, devid, 7107 dev_uuid, false); 7108 } 7109 7110 if (!device->bdev && 7111 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7112 /* 7113 * this happens when a device that was properly setup 7114 * in the device info lists suddenly goes bad. 7115 * device->bdev is NULL, and so we have to set 7116 * device->missing to one here 7117 */ 7118 device->fs_devices->missing_devices++; 7119 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7120 } 7121 7122 /* Move the device to its own fs_devices */ 7123 if (device->fs_devices != fs_devices) { 7124 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7125 &device->dev_state)); 7126 7127 list_move(&device->dev_list, &fs_devices->devices); 7128 device->fs_devices->num_devices--; 7129 fs_devices->num_devices++; 7130 7131 device->fs_devices->missing_devices--; 7132 fs_devices->missing_devices++; 7133 7134 device->fs_devices = fs_devices; 7135 } 7136 } 7137 7138 if (device->fs_devices != fs_info->fs_devices) { 7139 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7140 if (device->generation != 7141 btrfs_device_generation(leaf, dev_item)) 7142 return -EINVAL; 7143 } 7144 7145 fill_device_from_item(leaf, dev_item, device); 7146 if (device->bdev) { 7147 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7148 7149 if (device->total_bytes > max_total_bytes) { 7150 btrfs_err(fs_info, 7151 "device total_bytes should be at most %llu but found %llu", 7152 max_total_bytes, device->total_bytes); 7153 return -EINVAL; 7154 } 7155 } 7156 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7157 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7158 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7159 device->fs_devices->total_rw_bytes += device->total_bytes; 7160 atomic64_add(device->total_bytes - device->bytes_used, 7161 &fs_info->free_chunk_space); 7162 } 7163 ret = 0; 7164 return ret; 7165 } 7166 7167 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7168 { 7169 struct btrfs_super_block *super_copy = fs_info->super_copy; 7170 struct extent_buffer *sb; 7171 struct btrfs_disk_key *disk_key; 7172 struct btrfs_chunk *chunk; 7173 u8 *array_ptr; 7174 unsigned long sb_array_offset; 7175 int ret = 0; 7176 u32 num_stripes; 7177 u32 array_size; 7178 u32 len = 0; 7179 u32 cur_offset; 7180 u64 type; 7181 struct btrfs_key key; 7182 7183 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7184 7185 /* 7186 * We allocated a dummy extent, just to use extent buffer accessors. 7187 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7188 * that's fine, we will not go beyond system chunk array anyway. 7189 */ 7190 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7191 if (!sb) 7192 return -ENOMEM; 7193 set_extent_buffer_uptodate(sb); 7194 7195 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7196 array_size = btrfs_super_sys_array_size(super_copy); 7197 7198 array_ptr = super_copy->sys_chunk_array; 7199 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7200 cur_offset = 0; 7201 7202 while (cur_offset < array_size) { 7203 disk_key = (struct btrfs_disk_key *)array_ptr; 7204 len = sizeof(*disk_key); 7205 if (cur_offset + len > array_size) 7206 goto out_short_read; 7207 7208 btrfs_disk_key_to_cpu(&key, disk_key); 7209 7210 array_ptr += len; 7211 sb_array_offset += len; 7212 cur_offset += len; 7213 7214 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7215 btrfs_err(fs_info, 7216 "unexpected item type %u in sys_array at offset %u", 7217 (u32)key.type, cur_offset); 7218 ret = -EIO; 7219 break; 7220 } 7221 7222 chunk = (struct btrfs_chunk *)sb_array_offset; 7223 /* 7224 * At least one btrfs_chunk with one stripe must be present, 7225 * exact stripe count check comes afterwards 7226 */ 7227 len = btrfs_chunk_item_size(1); 7228 if (cur_offset + len > array_size) 7229 goto out_short_read; 7230 7231 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7232 if (!num_stripes) { 7233 btrfs_err(fs_info, 7234 "invalid number of stripes %u in sys_array at offset %u", 7235 num_stripes, cur_offset); 7236 ret = -EIO; 7237 break; 7238 } 7239 7240 type = btrfs_chunk_type(sb, chunk); 7241 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7242 btrfs_err(fs_info, 7243 "invalid chunk type %llu in sys_array at offset %u", 7244 type, cur_offset); 7245 ret = -EIO; 7246 break; 7247 } 7248 7249 len = btrfs_chunk_item_size(num_stripes); 7250 if (cur_offset + len > array_size) 7251 goto out_short_read; 7252 7253 ret = read_one_chunk(&key, sb, chunk); 7254 if (ret) 7255 break; 7256 7257 array_ptr += len; 7258 sb_array_offset += len; 7259 cur_offset += len; 7260 } 7261 clear_extent_buffer_uptodate(sb); 7262 free_extent_buffer_stale(sb); 7263 return ret; 7264 7265 out_short_read: 7266 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7267 len, cur_offset); 7268 clear_extent_buffer_uptodate(sb); 7269 free_extent_buffer_stale(sb); 7270 return -EIO; 7271 } 7272 7273 /* 7274 * Check if all chunks in the fs are OK for read-write degraded mount 7275 * 7276 * If the @failing_dev is specified, it's accounted as missing. 7277 * 7278 * Return true if all chunks meet the minimal RW mount requirements. 7279 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7280 */ 7281 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7282 struct btrfs_device *failing_dev) 7283 { 7284 struct btrfs_chunk_map *map; 7285 u64 next_start; 7286 bool ret = true; 7287 7288 map = btrfs_find_chunk_map(fs_info, 0, U64_MAX); 7289 /* No chunk at all? Return false anyway */ 7290 if (!map) { 7291 ret = false; 7292 goto out; 7293 } 7294 while (map) { 7295 int missing = 0; 7296 int max_tolerated; 7297 int i; 7298 7299 max_tolerated = 7300 btrfs_get_num_tolerated_disk_barrier_failures( 7301 map->type); 7302 for (i = 0; i < map->num_stripes; i++) { 7303 struct btrfs_device *dev = map->stripes[i].dev; 7304 7305 if (!dev || !dev->bdev || 7306 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7307 dev->last_flush_error) 7308 missing++; 7309 else if (failing_dev && failing_dev == dev) 7310 missing++; 7311 } 7312 if (missing > max_tolerated) { 7313 if (!failing_dev) 7314 btrfs_warn(fs_info, 7315 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7316 map->start, missing, max_tolerated); 7317 btrfs_free_chunk_map(map); 7318 ret = false; 7319 goto out; 7320 } 7321 next_start = map->start + map->chunk_len; 7322 btrfs_free_chunk_map(map); 7323 7324 map = btrfs_find_chunk_map(fs_info, next_start, U64_MAX - next_start); 7325 } 7326 out: 7327 return ret; 7328 } 7329 7330 static void readahead_tree_node_children(struct extent_buffer *node) 7331 { 7332 int i; 7333 const int nr_items = btrfs_header_nritems(node); 7334 7335 for (i = 0; i < nr_items; i++) 7336 btrfs_readahead_node_child(node, i); 7337 } 7338 7339 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7340 { 7341 struct btrfs_root *root = fs_info->chunk_root; 7342 struct btrfs_path *path; 7343 struct extent_buffer *leaf; 7344 struct btrfs_key key; 7345 struct btrfs_key found_key; 7346 int ret; 7347 int slot; 7348 int iter_ret = 0; 7349 u64 total_dev = 0; 7350 u64 last_ra_node = 0; 7351 7352 path = btrfs_alloc_path(); 7353 if (!path) 7354 return -ENOMEM; 7355 7356 /* 7357 * uuid_mutex is needed only if we are mounting a sprout FS 7358 * otherwise we don't need it. 7359 */ 7360 mutex_lock(&uuid_mutex); 7361 7362 /* 7363 * It is possible for mount and umount to race in such a way that 7364 * we execute this code path, but open_fs_devices failed to clear 7365 * total_rw_bytes. We certainly want it cleared before reading the 7366 * device items, so clear it here. 7367 */ 7368 fs_info->fs_devices->total_rw_bytes = 0; 7369 7370 /* 7371 * Lockdep complains about possible circular locking dependency between 7372 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7373 * used for freeze procection of a fs (struct super_block.s_writers), 7374 * which we take when starting a transaction, and extent buffers of the 7375 * chunk tree if we call read_one_dev() while holding a lock on an 7376 * extent buffer of the chunk tree. Since we are mounting the filesystem 7377 * and at this point there can't be any concurrent task modifying the 7378 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7379 */ 7380 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7381 path->skip_locking = 1; 7382 7383 /* 7384 * Read all device items, and then all the chunk items. All 7385 * device items are found before any chunk item (their object id 7386 * is smaller than the lowest possible object id for a chunk 7387 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7388 */ 7389 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7390 key.offset = 0; 7391 key.type = 0; 7392 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7393 struct extent_buffer *node = path->nodes[1]; 7394 7395 leaf = path->nodes[0]; 7396 slot = path->slots[0]; 7397 7398 if (node) { 7399 if (last_ra_node != node->start) { 7400 readahead_tree_node_children(node); 7401 last_ra_node = node->start; 7402 } 7403 } 7404 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7405 struct btrfs_dev_item *dev_item; 7406 dev_item = btrfs_item_ptr(leaf, slot, 7407 struct btrfs_dev_item); 7408 ret = read_one_dev(leaf, dev_item); 7409 if (ret) 7410 goto error; 7411 total_dev++; 7412 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7413 struct btrfs_chunk *chunk; 7414 7415 /* 7416 * We are only called at mount time, so no need to take 7417 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7418 * we always lock first fs_info->chunk_mutex before 7419 * acquiring any locks on the chunk tree. This is a 7420 * requirement for chunk allocation, see the comment on 7421 * top of btrfs_chunk_alloc() for details. 7422 */ 7423 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7424 ret = read_one_chunk(&found_key, leaf, chunk); 7425 if (ret) 7426 goto error; 7427 } 7428 } 7429 /* Catch error found during iteration */ 7430 if (iter_ret < 0) { 7431 ret = iter_ret; 7432 goto error; 7433 } 7434 7435 /* 7436 * After loading chunk tree, we've got all device information, 7437 * do another round of validation checks. 7438 */ 7439 if (total_dev != fs_info->fs_devices->total_devices) { 7440 btrfs_warn(fs_info, 7441 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7442 btrfs_super_num_devices(fs_info->super_copy), 7443 total_dev); 7444 fs_info->fs_devices->total_devices = total_dev; 7445 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7446 } 7447 if (btrfs_super_total_bytes(fs_info->super_copy) < 7448 fs_info->fs_devices->total_rw_bytes) { 7449 btrfs_err(fs_info, 7450 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7451 btrfs_super_total_bytes(fs_info->super_copy), 7452 fs_info->fs_devices->total_rw_bytes); 7453 ret = -EINVAL; 7454 goto error; 7455 } 7456 ret = 0; 7457 error: 7458 mutex_unlock(&uuid_mutex); 7459 7460 btrfs_free_path(path); 7461 return ret; 7462 } 7463 7464 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7465 { 7466 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7467 struct btrfs_device *device; 7468 int ret = 0; 7469 7470 fs_devices->fs_info = fs_info; 7471 7472 mutex_lock(&fs_devices->device_list_mutex); 7473 list_for_each_entry(device, &fs_devices->devices, dev_list) 7474 device->fs_info = fs_info; 7475 7476 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7477 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7478 device->fs_info = fs_info; 7479 ret = btrfs_get_dev_zone_info(device, false); 7480 if (ret) 7481 break; 7482 } 7483 7484 seed_devs->fs_info = fs_info; 7485 } 7486 mutex_unlock(&fs_devices->device_list_mutex); 7487 7488 return ret; 7489 } 7490 7491 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7492 const struct btrfs_dev_stats_item *ptr, 7493 int index) 7494 { 7495 u64 val; 7496 7497 read_extent_buffer(eb, &val, 7498 offsetof(struct btrfs_dev_stats_item, values) + 7499 ((unsigned long)ptr) + (index * sizeof(u64)), 7500 sizeof(val)); 7501 return val; 7502 } 7503 7504 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7505 struct btrfs_dev_stats_item *ptr, 7506 int index, u64 val) 7507 { 7508 write_extent_buffer(eb, &val, 7509 offsetof(struct btrfs_dev_stats_item, values) + 7510 ((unsigned long)ptr) + (index * sizeof(u64)), 7511 sizeof(val)); 7512 } 7513 7514 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7515 struct btrfs_path *path) 7516 { 7517 struct btrfs_dev_stats_item *ptr; 7518 struct extent_buffer *eb; 7519 struct btrfs_key key; 7520 int item_size; 7521 int i, ret, slot; 7522 7523 if (!device->fs_info->dev_root) 7524 return 0; 7525 7526 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7527 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7528 key.offset = device->devid; 7529 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7530 if (ret) { 7531 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7532 btrfs_dev_stat_set(device, i, 0); 7533 device->dev_stats_valid = 1; 7534 btrfs_release_path(path); 7535 return ret < 0 ? ret : 0; 7536 } 7537 slot = path->slots[0]; 7538 eb = path->nodes[0]; 7539 item_size = btrfs_item_size(eb, slot); 7540 7541 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7542 7543 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7544 if (item_size >= (1 + i) * sizeof(__le64)) 7545 btrfs_dev_stat_set(device, i, 7546 btrfs_dev_stats_value(eb, ptr, i)); 7547 else 7548 btrfs_dev_stat_set(device, i, 0); 7549 } 7550 7551 device->dev_stats_valid = 1; 7552 btrfs_dev_stat_print_on_load(device); 7553 btrfs_release_path(path); 7554 7555 return 0; 7556 } 7557 7558 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7559 { 7560 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7561 struct btrfs_device *device; 7562 struct btrfs_path *path = NULL; 7563 int ret = 0; 7564 7565 path = btrfs_alloc_path(); 7566 if (!path) 7567 return -ENOMEM; 7568 7569 mutex_lock(&fs_devices->device_list_mutex); 7570 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7571 ret = btrfs_device_init_dev_stats(device, path); 7572 if (ret) 7573 goto out; 7574 } 7575 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7576 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7577 ret = btrfs_device_init_dev_stats(device, path); 7578 if (ret) 7579 goto out; 7580 } 7581 } 7582 out: 7583 mutex_unlock(&fs_devices->device_list_mutex); 7584 7585 btrfs_free_path(path); 7586 return ret; 7587 } 7588 7589 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7590 struct btrfs_device *device) 7591 { 7592 struct btrfs_fs_info *fs_info = trans->fs_info; 7593 struct btrfs_root *dev_root = fs_info->dev_root; 7594 struct btrfs_path *path; 7595 struct btrfs_key key; 7596 struct extent_buffer *eb; 7597 struct btrfs_dev_stats_item *ptr; 7598 int ret; 7599 int i; 7600 7601 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7602 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7603 key.offset = device->devid; 7604 7605 path = btrfs_alloc_path(); 7606 if (!path) 7607 return -ENOMEM; 7608 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7609 if (ret < 0) { 7610 btrfs_warn_in_rcu(fs_info, 7611 "error %d while searching for dev_stats item for device %s", 7612 ret, btrfs_dev_name(device)); 7613 goto out; 7614 } 7615 7616 if (ret == 0 && 7617 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7618 /* need to delete old one and insert a new one */ 7619 ret = btrfs_del_item(trans, dev_root, path); 7620 if (ret != 0) { 7621 btrfs_warn_in_rcu(fs_info, 7622 "delete too small dev_stats item for device %s failed %d", 7623 btrfs_dev_name(device), ret); 7624 goto out; 7625 } 7626 ret = 1; 7627 } 7628 7629 if (ret == 1) { 7630 /* need to insert a new item */ 7631 btrfs_release_path(path); 7632 ret = btrfs_insert_empty_item(trans, dev_root, path, 7633 &key, sizeof(*ptr)); 7634 if (ret < 0) { 7635 btrfs_warn_in_rcu(fs_info, 7636 "insert dev_stats item for device %s failed %d", 7637 btrfs_dev_name(device), ret); 7638 goto out; 7639 } 7640 } 7641 7642 eb = path->nodes[0]; 7643 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7644 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7645 btrfs_set_dev_stats_value(eb, ptr, i, 7646 btrfs_dev_stat_read(device, i)); 7647 btrfs_mark_buffer_dirty(trans, eb); 7648 7649 out: 7650 btrfs_free_path(path); 7651 return ret; 7652 } 7653 7654 /* 7655 * called from commit_transaction. Writes all changed device stats to disk. 7656 */ 7657 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7658 { 7659 struct btrfs_fs_info *fs_info = trans->fs_info; 7660 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7661 struct btrfs_device *device; 7662 int stats_cnt; 7663 int ret = 0; 7664 7665 mutex_lock(&fs_devices->device_list_mutex); 7666 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7667 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7668 if (!device->dev_stats_valid || stats_cnt == 0) 7669 continue; 7670 7671 7672 /* 7673 * There is a LOAD-LOAD control dependency between the value of 7674 * dev_stats_ccnt and updating the on-disk values which requires 7675 * reading the in-memory counters. Such control dependencies 7676 * require explicit read memory barriers. 7677 * 7678 * This memory barriers pairs with smp_mb__before_atomic in 7679 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7680 * barrier implied by atomic_xchg in 7681 * btrfs_dev_stats_read_and_reset 7682 */ 7683 smp_rmb(); 7684 7685 ret = update_dev_stat_item(trans, device); 7686 if (!ret) 7687 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7688 } 7689 mutex_unlock(&fs_devices->device_list_mutex); 7690 7691 return ret; 7692 } 7693 7694 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7695 { 7696 btrfs_dev_stat_inc(dev, index); 7697 7698 if (!dev->dev_stats_valid) 7699 return; 7700 btrfs_err_rl_in_rcu(dev->fs_info, 7701 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7702 btrfs_dev_name(dev), 7703 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7704 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7705 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7706 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7707 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7708 } 7709 7710 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7711 { 7712 int i; 7713 7714 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7715 if (btrfs_dev_stat_read(dev, i) != 0) 7716 break; 7717 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7718 return; /* all values == 0, suppress message */ 7719 7720 btrfs_info_in_rcu(dev->fs_info, 7721 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7722 btrfs_dev_name(dev), 7723 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7724 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7725 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7726 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7727 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7728 } 7729 7730 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7731 struct btrfs_ioctl_get_dev_stats *stats) 7732 { 7733 BTRFS_DEV_LOOKUP_ARGS(args); 7734 struct btrfs_device *dev; 7735 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7736 int i; 7737 7738 mutex_lock(&fs_devices->device_list_mutex); 7739 args.devid = stats->devid; 7740 dev = btrfs_find_device(fs_info->fs_devices, &args); 7741 mutex_unlock(&fs_devices->device_list_mutex); 7742 7743 if (!dev) { 7744 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7745 return -ENODEV; 7746 } else if (!dev->dev_stats_valid) { 7747 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7748 return -ENODEV; 7749 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7750 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7751 if (stats->nr_items > i) 7752 stats->values[i] = 7753 btrfs_dev_stat_read_and_reset(dev, i); 7754 else 7755 btrfs_dev_stat_set(dev, i, 0); 7756 } 7757 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7758 current->comm, task_pid_nr(current)); 7759 } else { 7760 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7761 if (stats->nr_items > i) 7762 stats->values[i] = btrfs_dev_stat_read(dev, i); 7763 } 7764 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7765 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7766 return 0; 7767 } 7768 7769 /* 7770 * Update the size and bytes used for each device where it changed. This is 7771 * delayed since we would otherwise get errors while writing out the 7772 * superblocks. 7773 * 7774 * Must be invoked during transaction commit. 7775 */ 7776 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7777 { 7778 struct btrfs_device *curr, *next; 7779 7780 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7781 7782 if (list_empty(&trans->dev_update_list)) 7783 return; 7784 7785 /* 7786 * We don't need the device_list_mutex here. This list is owned by the 7787 * transaction and the transaction must complete before the device is 7788 * released. 7789 */ 7790 mutex_lock(&trans->fs_info->chunk_mutex); 7791 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7792 post_commit_list) { 7793 list_del_init(&curr->post_commit_list); 7794 curr->commit_total_bytes = curr->disk_total_bytes; 7795 curr->commit_bytes_used = curr->bytes_used; 7796 } 7797 mutex_unlock(&trans->fs_info->chunk_mutex); 7798 } 7799 7800 /* 7801 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7802 */ 7803 int btrfs_bg_type_to_factor(u64 flags) 7804 { 7805 const int index = btrfs_bg_flags_to_raid_index(flags); 7806 7807 return btrfs_raid_array[index].ncopies; 7808 } 7809 7810 7811 7812 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7813 u64 chunk_offset, u64 devid, 7814 u64 physical_offset, u64 physical_len) 7815 { 7816 struct btrfs_dev_lookup_args args = { .devid = devid }; 7817 struct btrfs_chunk_map *map; 7818 struct btrfs_device *dev; 7819 u64 stripe_len; 7820 bool found = false; 7821 int ret = 0; 7822 int i; 7823 7824 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1); 7825 if (!map) { 7826 btrfs_err(fs_info, 7827 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7828 physical_offset, devid); 7829 ret = -EUCLEAN; 7830 goto out; 7831 } 7832 7833 stripe_len = btrfs_calc_stripe_length(map); 7834 if (physical_len != stripe_len) { 7835 btrfs_err(fs_info, 7836 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7837 physical_offset, devid, map->start, physical_len, 7838 stripe_len); 7839 ret = -EUCLEAN; 7840 goto out; 7841 } 7842 7843 /* 7844 * Very old mkfs.btrfs (before v4.1) will not respect the reserved 7845 * space. Although kernel can handle it without problem, better to warn 7846 * the users. 7847 */ 7848 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED) 7849 btrfs_warn(fs_info, 7850 "devid %llu physical %llu len %llu inside the reserved space", 7851 devid, physical_offset, physical_len); 7852 7853 for (i = 0; i < map->num_stripes; i++) { 7854 if (map->stripes[i].dev->devid == devid && 7855 map->stripes[i].physical == physical_offset) { 7856 found = true; 7857 if (map->verified_stripes >= map->num_stripes) { 7858 btrfs_err(fs_info, 7859 "too many dev extents for chunk %llu found", 7860 map->start); 7861 ret = -EUCLEAN; 7862 goto out; 7863 } 7864 map->verified_stripes++; 7865 break; 7866 } 7867 } 7868 if (!found) { 7869 btrfs_err(fs_info, 7870 "dev extent physical offset %llu devid %llu has no corresponding chunk", 7871 physical_offset, devid); 7872 ret = -EUCLEAN; 7873 } 7874 7875 /* Make sure no dev extent is beyond device boundary */ 7876 dev = btrfs_find_device(fs_info->fs_devices, &args); 7877 if (!dev) { 7878 btrfs_err(fs_info, "failed to find devid %llu", devid); 7879 ret = -EUCLEAN; 7880 goto out; 7881 } 7882 7883 if (physical_offset + physical_len > dev->disk_total_bytes) { 7884 btrfs_err(fs_info, 7885 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7886 devid, physical_offset, physical_len, 7887 dev->disk_total_bytes); 7888 ret = -EUCLEAN; 7889 goto out; 7890 } 7891 7892 if (dev->zone_info) { 7893 u64 zone_size = dev->zone_info->zone_size; 7894 7895 if (!IS_ALIGNED(physical_offset, zone_size) || 7896 !IS_ALIGNED(physical_len, zone_size)) { 7897 btrfs_err(fs_info, 7898 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 7899 devid, physical_offset, physical_len); 7900 ret = -EUCLEAN; 7901 goto out; 7902 } 7903 } 7904 7905 out: 7906 btrfs_free_chunk_map(map); 7907 return ret; 7908 } 7909 7910 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 7911 { 7912 struct rb_node *node; 7913 int ret = 0; 7914 7915 read_lock(&fs_info->mapping_tree_lock); 7916 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { 7917 struct btrfs_chunk_map *map; 7918 7919 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 7920 if (map->num_stripes != map->verified_stripes) { 7921 btrfs_err(fs_info, 7922 "chunk %llu has missing dev extent, have %d expect %d", 7923 map->start, map->verified_stripes, map->num_stripes); 7924 ret = -EUCLEAN; 7925 goto out; 7926 } 7927 } 7928 out: 7929 read_unlock(&fs_info->mapping_tree_lock); 7930 return ret; 7931 } 7932 7933 /* 7934 * Ensure that all dev extents are mapped to correct chunk, otherwise 7935 * later chunk allocation/free would cause unexpected behavior. 7936 * 7937 * NOTE: This will iterate through the whole device tree, which should be of 7938 * the same size level as the chunk tree. This slightly increases mount time. 7939 */ 7940 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 7941 { 7942 struct btrfs_path *path; 7943 struct btrfs_root *root = fs_info->dev_root; 7944 struct btrfs_key key; 7945 u64 prev_devid = 0; 7946 u64 prev_dev_ext_end = 0; 7947 int ret = 0; 7948 7949 /* 7950 * We don't have a dev_root because we mounted with ignorebadroots and 7951 * failed to load the root, so we want to skip the verification in this 7952 * case for sure. 7953 * 7954 * However if the dev root is fine, but the tree itself is corrupted 7955 * we'd still fail to mount. This verification is only to make sure 7956 * writes can happen safely, so instead just bypass this check 7957 * completely in the case of IGNOREBADROOTS. 7958 */ 7959 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 7960 return 0; 7961 7962 key.objectid = 1; 7963 key.type = BTRFS_DEV_EXTENT_KEY; 7964 key.offset = 0; 7965 7966 path = btrfs_alloc_path(); 7967 if (!path) 7968 return -ENOMEM; 7969 7970 path->reada = READA_FORWARD; 7971 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7972 if (ret < 0) 7973 goto out; 7974 7975 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 7976 ret = btrfs_next_leaf(root, path); 7977 if (ret < 0) 7978 goto out; 7979 /* No dev extents at all? Not good */ 7980 if (ret > 0) { 7981 ret = -EUCLEAN; 7982 goto out; 7983 } 7984 } 7985 while (1) { 7986 struct extent_buffer *leaf = path->nodes[0]; 7987 struct btrfs_dev_extent *dext; 7988 int slot = path->slots[0]; 7989 u64 chunk_offset; 7990 u64 physical_offset; 7991 u64 physical_len; 7992 u64 devid; 7993 7994 btrfs_item_key_to_cpu(leaf, &key, slot); 7995 if (key.type != BTRFS_DEV_EXTENT_KEY) 7996 break; 7997 devid = key.objectid; 7998 physical_offset = key.offset; 7999 8000 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8001 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8002 physical_len = btrfs_dev_extent_length(leaf, dext); 8003 8004 /* Check if this dev extent overlaps with the previous one */ 8005 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8006 btrfs_err(fs_info, 8007 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8008 devid, physical_offset, prev_dev_ext_end); 8009 ret = -EUCLEAN; 8010 goto out; 8011 } 8012 8013 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8014 physical_offset, physical_len); 8015 if (ret < 0) 8016 goto out; 8017 prev_devid = devid; 8018 prev_dev_ext_end = physical_offset + physical_len; 8019 8020 ret = btrfs_next_item(root, path); 8021 if (ret < 0) 8022 goto out; 8023 if (ret > 0) { 8024 ret = 0; 8025 break; 8026 } 8027 } 8028 8029 /* Ensure all chunks have corresponding dev extents */ 8030 ret = verify_chunk_dev_extent_mapping(fs_info); 8031 out: 8032 btrfs_free_path(path); 8033 return ret; 8034 } 8035 8036 /* 8037 * Check whether the given block group or device is pinned by any inode being 8038 * used as a swapfile. 8039 */ 8040 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8041 { 8042 struct btrfs_swapfile_pin *sp; 8043 struct rb_node *node; 8044 8045 spin_lock(&fs_info->swapfile_pins_lock); 8046 node = fs_info->swapfile_pins.rb_node; 8047 while (node) { 8048 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8049 if (ptr < sp->ptr) 8050 node = node->rb_left; 8051 else if (ptr > sp->ptr) 8052 node = node->rb_right; 8053 else 8054 break; 8055 } 8056 spin_unlock(&fs_info->swapfile_pins_lock); 8057 return node != NULL; 8058 } 8059 8060 static int relocating_repair_kthread(void *data) 8061 { 8062 struct btrfs_block_group *cache = data; 8063 struct btrfs_fs_info *fs_info = cache->fs_info; 8064 u64 target; 8065 int ret = 0; 8066 8067 target = cache->start; 8068 btrfs_put_block_group(cache); 8069 8070 sb_start_write(fs_info->sb); 8071 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8072 btrfs_info(fs_info, 8073 "zoned: skip relocating block group %llu to repair: EBUSY", 8074 target); 8075 sb_end_write(fs_info->sb); 8076 return -EBUSY; 8077 } 8078 8079 mutex_lock(&fs_info->reclaim_bgs_lock); 8080 8081 /* Ensure block group still exists */ 8082 cache = btrfs_lookup_block_group(fs_info, target); 8083 if (!cache) 8084 goto out; 8085 8086 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) 8087 goto out; 8088 8089 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8090 if (ret < 0) 8091 goto out; 8092 8093 btrfs_info(fs_info, 8094 "zoned: relocating block group %llu to repair IO failure", 8095 target); 8096 ret = btrfs_relocate_chunk(fs_info, target); 8097 8098 out: 8099 if (cache) 8100 btrfs_put_block_group(cache); 8101 mutex_unlock(&fs_info->reclaim_bgs_lock); 8102 btrfs_exclop_finish(fs_info); 8103 sb_end_write(fs_info->sb); 8104 8105 return ret; 8106 } 8107 8108 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8109 { 8110 struct btrfs_block_group *cache; 8111 8112 if (!btrfs_is_zoned(fs_info)) 8113 return false; 8114 8115 /* Do not attempt to repair in degraded state */ 8116 if (btrfs_test_opt(fs_info, DEGRADED)) 8117 return true; 8118 8119 cache = btrfs_lookup_block_group(fs_info, logical); 8120 if (!cache) 8121 return true; 8122 8123 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) { 8124 btrfs_put_block_group(cache); 8125 return true; 8126 } 8127 8128 kthread_run(relocating_repair_kthread, cache, 8129 "btrfs-relocating-repair"); 8130 8131 return true; 8132 } 8133 8134 static void map_raid56_repair_block(struct btrfs_io_context *bioc, 8135 struct btrfs_io_stripe *smap, 8136 u64 logical) 8137 { 8138 int data_stripes = nr_bioc_data_stripes(bioc); 8139 int i; 8140 8141 for (i = 0; i < data_stripes; i++) { 8142 u64 stripe_start = bioc->full_stripe_logical + 8143 btrfs_stripe_nr_to_offset(i); 8144 8145 if (logical >= stripe_start && 8146 logical < stripe_start + BTRFS_STRIPE_LEN) 8147 break; 8148 } 8149 ASSERT(i < data_stripes); 8150 smap->dev = bioc->stripes[i].dev; 8151 smap->physical = bioc->stripes[i].physical + 8152 ((logical - bioc->full_stripe_logical) & 8153 BTRFS_STRIPE_LEN_MASK); 8154 } 8155 8156 /* 8157 * Map a repair write into a single device. 8158 * 8159 * A repair write is triggered by read time repair or scrub, which would only 8160 * update the contents of a single device. 8161 * Not update any other mirrors nor go through RMW path. 8162 * 8163 * Callers should ensure: 8164 * 8165 * - Call btrfs_bio_counter_inc_blocked() first 8166 * - The range does not cross stripe boundary 8167 * - Has a valid @mirror_num passed in. 8168 */ 8169 int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, 8170 struct btrfs_io_stripe *smap, u64 logical, 8171 u32 length, int mirror_num) 8172 { 8173 struct btrfs_io_context *bioc = NULL; 8174 u64 map_length = length; 8175 int mirror_ret = mirror_num; 8176 int ret; 8177 8178 ASSERT(mirror_num > 0); 8179 8180 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length, 8181 &bioc, smap, &mirror_ret); 8182 if (ret < 0) 8183 return ret; 8184 8185 /* The map range should not cross stripe boundary. */ 8186 ASSERT(map_length >= length); 8187 8188 /* Already mapped to single stripe. */ 8189 if (!bioc) 8190 goto out; 8191 8192 /* Map the RAID56 multi-stripe writes to a single one. */ 8193 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 8194 map_raid56_repair_block(bioc, smap, logical); 8195 goto out; 8196 } 8197 8198 ASSERT(mirror_num <= bioc->num_stripes); 8199 smap->dev = bioc->stripes[mirror_num - 1].dev; 8200 smap->physical = bioc->stripes[mirror_num - 1].physical; 8201 out: 8202 btrfs_put_bioc(bioc); 8203 ASSERT(smap->dev); 8204 return 0; 8205 } 8206