1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/slab.h> 9 #include <linux/ratelimit.h> 10 #include <linux/kthread.h> 11 #include <linux/semaphore.h> 12 #include <linux/uuid.h> 13 #include <linux/list_sort.h> 14 #include <linux/namei.h> 15 #include "misc.h" 16 #include "ctree.h" 17 #include "disk-io.h" 18 #include "transaction.h" 19 #include "volumes.h" 20 #include "raid56.h" 21 #include "rcu-string.h" 22 #include "dev-replace.h" 23 #include "sysfs.h" 24 #include "tree-checker.h" 25 #include "space-info.h" 26 #include "block-group.h" 27 #include "discard.h" 28 #include "zoned.h" 29 #include "fs.h" 30 #include "accessors.h" 31 #include "uuid-tree.h" 32 #include "ioctl.h" 33 #include "relocation.h" 34 #include "scrub.h" 35 #include "super.h" 36 #include "raid-stripe-tree.h" 37 38 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 39 BTRFS_BLOCK_GROUP_RAID10 | \ 40 BTRFS_BLOCK_GROUP_RAID56_MASK) 41 42 struct btrfs_io_geometry { 43 u32 stripe_index; 44 u32 stripe_nr; 45 int mirror_num; 46 int num_stripes; 47 u64 stripe_offset; 48 u64 raid56_full_stripe_start; 49 int max_errors; 50 enum btrfs_map_op op; 51 }; 52 53 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 54 [BTRFS_RAID_RAID10] = { 55 .sub_stripes = 2, 56 .dev_stripes = 1, 57 .devs_max = 0, /* 0 == as many as possible */ 58 .devs_min = 2, 59 .tolerated_failures = 1, 60 .devs_increment = 2, 61 .ncopies = 2, 62 .nparity = 0, 63 .raid_name = "raid10", 64 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 65 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 66 }, 67 [BTRFS_RAID_RAID1] = { 68 .sub_stripes = 1, 69 .dev_stripes = 1, 70 .devs_max = 2, 71 .devs_min = 2, 72 .tolerated_failures = 1, 73 .devs_increment = 2, 74 .ncopies = 2, 75 .nparity = 0, 76 .raid_name = "raid1", 77 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 78 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 79 }, 80 [BTRFS_RAID_RAID1C3] = { 81 .sub_stripes = 1, 82 .dev_stripes = 1, 83 .devs_max = 3, 84 .devs_min = 3, 85 .tolerated_failures = 2, 86 .devs_increment = 3, 87 .ncopies = 3, 88 .nparity = 0, 89 .raid_name = "raid1c3", 90 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 91 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 92 }, 93 [BTRFS_RAID_RAID1C4] = { 94 .sub_stripes = 1, 95 .dev_stripes = 1, 96 .devs_max = 4, 97 .devs_min = 4, 98 .tolerated_failures = 3, 99 .devs_increment = 4, 100 .ncopies = 4, 101 .nparity = 0, 102 .raid_name = "raid1c4", 103 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 104 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 105 }, 106 [BTRFS_RAID_DUP] = { 107 .sub_stripes = 1, 108 .dev_stripes = 2, 109 .devs_max = 1, 110 .devs_min = 1, 111 .tolerated_failures = 0, 112 .devs_increment = 1, 113 .ncopies = 2, 114 .nparity = 0, 115 .raid_name = "dup", 116 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 117 .mindev_error = 0, 118 }, 119 [BTRFS_RAID_RAID0] = { 120 .sub_stripes = 1, 121 .dev_stripes = 1, 122 .devs_max = 0, 123 .devs_min = 1, 124 .tolerated_failures = 0, 125 .devs_increment = 1, 126 .ncopies = 1, 127 .nparity = 0, 128 .raid_name = "raid0", 129 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 130 .mindev_error = 0, 131 }, 132 [BTRFS_RAID_SINGLE] = { 133 .sub_stripes = 1, 134 .dev_stripes = 1, 135 .devs_max = 1, 136 .devs_min = 1, 137 .tolerated_failures = 0, 138 .devs_increment = 1, 139 .ncopies = 1, 140 .nparity = 0, 141 .raid_name = "single", 142 .bg_flag = 0, 143 .mindev_error = 0, 144 }, 145 [BTRFS_RAID_RAID5] = { 146 .sub_stripes = 1, 147 .dev_stripes = 1, 148 .devs_max = 0, 149 .devs_min = 2, 150 .tolerated_failures = 1, 151 .devs_increment = 1, 152 .ncopies = 1, 153 .nparity = 1, 154 .raid_name = "raid5", 155 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 156 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 157 }, 158 [BTRFS_RAID_RAID6] = { 159 .sub_stripes = 1, 160 .dev_stripes = 1, 161 .devs_max = 0, 162 .devs_min = 3, 163 .tolerated_failures = 2, 164 .devs_increment = 1, 165 .ncopies = 1, 166 .nparity = 2, 167 .raid_name = "raid6", 168 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 169 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 170 }, 171 }; 172 173 /* 174 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 175 * can be used as index to access btrfs_raid_array[]. 176 */ 177 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 178 { 179 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 180 181 if (!profile) 182 return BTRFS_RAID_SINGLE; 183 184 return BTRFS_BG_FLAG_TO_INDEX(profile); 185 } 186 187 const char *btrfs_bg_type_to_raid_name(u64 flags) 188 { 189 const int index = btrfs_bg_flags_to_raid_index(flags); 190 191 if (index >= BTRFS_NR_RAID_TYPES) 192 return NULL; 193 194 return btrfs_raid_array[index].raid_name; 195 } 196 197 int btrfs_nr_parity_stripes(u64 type) 198 { 199 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 200 201 return btrfs_raid_array[index].nparity; 202 } 203 204 /* 205 * Fill @buf with textual description of @bg_flags, no more than @size_buf 206 * bytes including terminating null byte. 207 */ 208 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 209 { 210 int i; 211 int ret; 212 char *bp = buf; 213 u64 flags = bg_flags; 214 u32 size_bp = size_buf; 215 216 if (!flags) { 217 strcpy(bp, "NONE"); 218 return; 219 } 220 221 #define DESCRIBE_FLAG(flag, desc) \ 222 do { \ 223 if (flags & (flag)) { \ 224 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 225 if (ret < 0 || ret >= size_bp) \ 226 goto out_overflow; \ 227 size_bp -= ret; \ 228 bp += ret; \ 229 flags &= ~(flag); \ 230 } \ 231 } while (0) 232 233 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 234 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 235 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 236 237 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 238 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 239 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 240 btrfs_raid_array[i].raid_name); 241 #undef DESCRIBE_FLAG 242 243 if (flags) { 244 ret = snprintf(bp, size_bp, "0x%llx|", flags); 245 size_bp -= ret; 246 } 247 248 if (size_bp < size_buf) 249 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 250 251 /* 252 * The text is trimmed, it's up to the caller to provide sufficiently 253 * large buffer 254 */ 255 out_overflow:; 256 } 257 258 static int init_first_rw_device(struct btrfs_trans_handle *trans); 259 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 260 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 261 262 /* 263 * Device locking 264 * ============== 265 * 266 * There are several mutexes that protect manipulation of devices and low-level 267 * structures like chunks but not block groups, extents or files 268 * 269 * uuid_mutex (global lock) 270 * ------------------------ 271 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 272 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 273 * device) or requested by the device= mount option 274 * 275 * the mutex can be very coarse and can cover long-running operations 276 * 277 * protects: updates to fs_devices counters like missing devices, rw devices, 278 * seeding, structure cloning, opening/closing devices at mount/umount time 279 * 280 * global::fs_devs - add, remove, updates to the global list 281 * 282 * does not protect: manipulation of the fs_devices::devices list in general 283 * but in mount context it could be used to exclude list modifications by eg. 284 * scan ioctl 285 * 286 * btrfs_device::name - renames (write side), read is RCU 287 * 288 * fs_devices::device_list_mutex (per-fs, with RCU) 289 * ------------------------------------------------ 290 * protects updates to fs_devices::devices, ie. adding and deleting 291 * 292 * simple list traversal with read-only actions can be done with RCU protection 293 * 294 * may be used to exclude some operations from running concurrently without any 295 * modifications to the list (see write_all_supers) 296 * 297 * Is not required at mount and close times, because our device list is 298 * protected by the uuid_mutex at that point. 299 * 300 * balance_mutex 301 * ------------- 302 * protects balance structures (status, state) and context accessed from 303 * several places (internally, ioctl) 304 * 305 * chunk_mutex 306 * ----------- 307 * protects chunks, adding or removing during allocation, trim or when a new 308 * device is added/removed. Additionally it also protects post_commit_list of 309 * individual devices, since they can be added to the transaction's 310 * post_commit_list only with chunk_mutex held. 311 * 312 * cleaner_mutex 313 * ------------- 314 * a big lock that is held by the cleaner thread and prevents running subvolume 315 * cleaning together with relocation or delayed iputs 316 * 317 * 318 * Lock nesting 319 * ============ 320 * 321 * uuid_mutex 322 * device_list_mutex 323 * chunk_mutex 324 * balance_mutex 325 * 326 * 327 * Exclusive operations 328 * ==================== 329 * 330 * Maintains the exclusivity of the following operations that apply to the 331 * whole filesystem and cannot run in parallel. 332 * 333 * - Balance (*) 334 * - Device add 335 * - Device remove 336 * - Device replace (*) 337 * - Resize 338 * 339 * The device operations (as above) can be in one of the following states: 340 * 341 * - Running state 342 * - Paused state 343 * - Completed state 344 * 345 * Only device operations marked with (*) can go into the Paused state for the 346 * following reasons: 347 * 348 * - ioctl (only Balance can be Paused through ioctl) 349 * - filesystem remounted as read-only 350 * - filesystem unmounted and mounted as read-only 351 * - system power-cycle and filesystem mounted as read-only 352 * - filesystem or device errors leading to forced read-only 353 * 354 * The status of exclusive operation is set and cleared atomically. 355 * During the course of Paused state, fs_info::exclusive_operation remains set. 356 * A device operation in Paused or Running state can be canceled or resumed 357 * either by ioctl (Balance only) or when remounted as read-write. 358 * The exclusive status is cleared when the device operation is canceled or 359 * completed. 360 */ 361 362 DEFINE_MUTEX(uuid_mutex); 363 static LIST_HEAD(fs_uuids); 364 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 365 { 366 return &fs_uuids; 367 } 368 369 /* 370 * Allocate new btrfs_fs_devices structure identified by a fsid. 371 * 372 * @fsid: if not NULL, copy the UUID to fs_devices::fsid and to 373 * fs_devices::metadata_fsid 374 * 375 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 376 * The returned struct is not linked onto any lists and can be destroyed with 377 * kfree() right away. 378 */ 379 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) 380 { 381 struct btrfs_fs_devices *fs_devs; 382 383 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 384 if (!fs_devs) 385 return ERR_PTR(-ENOMEM); 386 387 mutex_init(&fs_devs->device_list_mutex); 388 389 INIT_LIST_HEAD(&fs_devs->devices); 390 INIT_LIST_HEAD(&fs_devs->alloc_list); 391 INIT_LIST_HEAD(&fs_devs->fs_list); 392 INIT_LIST_HEAD(&fs_devs->seed_list); 393 394 if (fsid) { 395 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 396 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 397 } 398 399 return fs_devs; 400 } 401 402 static void btrfs_free_device(struct btrfs_device *device) 403 { 404 WARN_ON(!list_empty(&device->post_commit_list)); 405 rcu_string_free(device->name); 406 extent_io_tree_release(&device->alloc_state); 407 btrfs_destroy_dev_zone_info(device); 408 kfree(device); 409 } 410 411 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 412 { 413 struct btrfs_device *device; 414 415 WARN_ON(fs_devices->opened); 416 while (!list_empty(&fs_devices->devices)) { 417 device = list_entry(fs_devices->devices.next, 418 struct btrfs_device, dev_list); 419 list_del(&device->dev_list); 420 btrfs_free_device(device); 421 } 422 kfree(fs_devices); 423 } 424 425 void __exit btrfs_cleanup_fs_uuids(void) 426 { 427 struct btrfs_fs_devices *fs_devices; 428 429 while (!list_empty(&fs_uuids)) { 430 fs_devices = list_entry(fs_uuids.next, 431 struct btrfs_fs_devices, fs_list); 432 list_del(&fs_devices->fs_list); 433 free_fs_devices(fs_devices); 434 } 435 } 436 437 static bool match_fsid_fs_devices(const struct btrfs_fs_devices *fs_devices, 438 const u8 *fsid, const u8 *metadata_fsid) 439 { 440 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0) 441 return false; 442 443 if (!metadata_fsid) 444 return true; 445 446 if (memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0) 447 return false; 448 449 return true; 450 } 451 452 static noinline struct btrfs_fs_devices *find_fsid( 453 const u8 *fsid, const u8 *metadata_fsid) 454 { 455 struct btrfs_fs_devices *fs_devices; 456 457 ASSERT(fsid); 458 459 /* Handle non-split brain cases */ 460 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 461 if (match_fsid_fs_devices(fs_devices, fsid, metadata_fsid)) 462 return fs_devices; 463 } 464 return NULL; 465 } 466 467 static int 468 btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, 469 int flush, struct file **bdev_file, 470 struct btrfs_super_block **disk_super) 471 { 472 struct block_device *bdev; 473 int ret; 474 475 *bdev_file = bdev_file_open_by_path(device_path, flags, holder, NULL); 476 477 if (IS_ERR(*bdev_file)) { 478 ret = PTR_ERR(*bdev_file); 479 btrfs_err(NULL, "failed to open device for path %s with flags 0x%x: %d", 480 device_path, flags, ret); 481 goto error; 482 } 483 bdev = file_bdev(*bdev_file); 484 485 if (flush) 486 sync_blockdev(bdev); 487 if (holder) { 488 ret = set_blocksize(*bdev_file, BTRFS_BDEV_BLOCKSIZE); 489 if (ret) { 490 fput(*bdev_file); 491 goto error; 492 } 493 } 494 invalidate_bdev(bdev); 495 *disk_super = btrfs_read_dev_super(bdev); 496 if (IS_ERR(*disk_super)) { 497 ret = PTR_ERR(*disk_super); 498 fput(*bdev_file); 499 goto error; 500 } 501 502 return 0; 503 504 error: 505 *disk_super = NULL; 506 *bdev_file = NULL; 507 return ret; 508 } 509 510 /* 511 * Search and remove all stale devices (which are not mounted). When both 512 * inputs are NULL, it will search and release all stale devices. 513 * 514 * @devt: Optional. When provided will it release all unmounted devices 515 * matching this devt only. 516 * @skip_device: Optional. Will skip this device when searching for the stale 517 * devices. 518 * 519 * Return: 0 for success or if @devt is 0. 520 * -EBUSY if @devt is a mounted device. 521 * -ENOENT if @devt does not match any device in the list. 522 */ 523 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 524 { 525 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 526 struct btrfs_device *device, *tmp_device; 527 int ret; 528 bool freed = false; 529 530 lockdep_assert_held(&uuid_mutex); 531 532 /* Return good status if there is no instance of devt. */ 533 ret = 0; 534 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 535 536 mutex_lock(&fs_devices->device_list_mutex); 537 list_for_each_entry_safe(device, tmp_device, 538 &fs_devices->devices, dev_list) { 539 if (skip_device && skip_device == device) 540 continue; 541 if (devt && devt != device->devt) 542 continue; 543 if (fs_devices->opened) { 544 if (devt) 545 ret = -EBUSY; 546 break; 547 } 548 549 /* delete the stale device */ 550 fs_devices->num_devices--; 551 list_del(&device->dev_list); 552 btrfs_free_device(device); 553 554 freed = true; 555 } 556 mutex_unlock(&fs_devices->device_list_mutex); 557 558 if (fs_devices->num_devices == 0) { 559 btrfs_sysfs_remove_fsid(fs_devices); 560 list_del(&fs_devices->fs_list); 561 free_fs_devices(fs_devices); 562 } 563 } 564 565 /* If there is at least one freed device return 0. */ 566 if (freed) 567 return 0; 568 569 return ret; 570 } 571 572 static struct btrfs_fs_devices *find_fsid_by_device( 573 struct btrfs_super_block *disk_super, 574 dev_t devt, bool *same_fsid_diff_dev) 575 { 576 struct btrfs_fs_devices *fsid_fs_devices; 577 struct btrfs_fs_devices *devt_fs_devices; 578 const bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 579 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 580 bool found_by_devt = false; 581 582 /* Find the fs_device by the usual method, if found use it. */ 583 fsid_fs_devices = find_fsid(disk_super->fsid, 584 has_metadata_uuid ? disk_super->metadata_uuid : NULL); 585 586 /* The temp_fsid feature is supported only with single device filesystem. */ 587 if (btrfs_super_num_devices(disk_super) != 1) 588 return fsid_fs_devices; 589 590 /* 591 * A seed device is an integral component of the sprout device, which 592 * functions as a multi-device filesystem. So, temp-fsid feature is 593 * not supported. 594 */ 595 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) 596 return fsid_fs_devices; 597 598 /* Try to find a fs_devices by matching devt. */ 599 list_for_each_entry(devt_fs_devices, &fs_uuids, fs_list) { 600 struct btrfs_device *device; 601 602 list_for_each_entry(device, &devt_fs_devices->devices, dev_list) { 603 if (device->devt == devt) { 604 found_by_devt = true; 605 break; 606 } 607 } 608 if (found_by_devt) 609 break; 610 } 611 612 if (found_by_devt) { 613 /* Existing device. */ 614 if (fsid_fs_devices == NULL) { 615 if (devt_fs_devices->opened == 0) { 616 /* Stale device. */ 617 return NULL; 618 } else { 619 /* temp_fsid is mounting a subvol. */ 620 return devt_fs_devices; 621 } 622 } else { 623 /* Regular or temp_fsid device mounting a subvol. */ 624 return devt_fs_devices; 625 } 626 } else { 627 /* New device. */ 628 if (fsid_fs_devices == NULL) { 629 return NULL; 630 } else { 631 /* sb::fsid is already used create a new temp_fsid. */ 632 *same_fsid_diff_dev = true; 633 return NULL; 634 } 635 } 636 637 /* Not reached. */ 638 } 639 640 /* 641 * This is only used on mount, and we are protected from competing things 642 * messing with our fs_devices by the uuid_mutex, thus we do not need the 643 * fs_devices->device_list_mutex here. 644 */ 645 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 646 struct btrfs_device *device, blk_mode_t flags, 647 void *holder) 648 { 649 struct file *bdev_file; 650 struct btrfs_super_block *disk_super; 651 u64 devid; 652 int ret; 653 654 if (device->bdev) 655 return -EINVAL; 656 if (!device->name) 657 return -EINVAL; 658 659 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 660 &bdev_file, &disk_super); 661 if (ret) 662 return ret; 663 664 devid = btrfs_stack_device_id(&disk_super->dev_item); 665 if (devid != device->devid) 666 goto error_free_page; 667 668 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 669 goto error_free_page; 670 671 device->generation = btrfs_super_generation(disk_super); 672 673 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 674 if (btrfs_super_incompat_flags(disk_super) & 675 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 676 pr_err( 677 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 678 goto error_free_page; 679 } 680 681 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 682 fs_devices->seeding = true; 683 } else { 684 if (bdev_read_only(file_bdev(bdev_file))) 685 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 686 else 687 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 688 } 689 690 if (!bdev_nonrot(file_bdev(bdev_file))) 691 fs_devices->rotating = true; 692 693 if (bdev_max_discard_sectors(file_bdev(bdev_file))) 694 fs_devices->discardable = true; 695 696 device->bdev_file = bdev_file; 697 device->bdev = file_bdev(bdev_file); 698 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 699 700 if (device->devt != device->bdev->bd_dev) { 701 btrfs_warn(NULL, 702 "device %s maj:min changed from %d:%d to %d:%d", 703 device->name->str, MAJOR(device->devt), 704 MINOR(device->devt), MAJOR(device->bdev->bd_dev), 705 MINOR(device->bdev->bd_dev)); 706 707 device->devt = device->bdev->bd_dev; 708 } 709 710 fs_devices->open_devices++; 711 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 712 device->devid != BTRFS_DEV_REPLACE_DEVID) { 713 fs_devices->rw_devices++; 714 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 715 } 716 btrfs_release_disk_super(disk_super); 717 718 return 0; 719 720 error_free_page: 721 btrfs_release_disk_super(disk_super); 722 fput(bdev_file); 723 724 return -EINVAL; 725 } 726 727 const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb) 728 { 729 bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) & 730 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 731 732 return has_metadata_uuid ? sb->metadata_uuid : sb->fsid; 733 } 734 735 /* 736 * We can have very weird soft links passed in. 737 * One example is "/proc/self/fd/<fd>", which can be a soft link to 738 * a block device. 739 * 740 * But it's never a good idea to use those weird names. 741 * Here we check if the path (not following symlinks) is a good one inside 742 * "/dev/". 743 */ 744 static bool is_good_dev_path(const char *dev_path) 745 { 746 struct path path = { .mnt = NULL, .dentry = NULL }; 747 char *path_buf = NULL; 748 char *resolved_path; 749 bool is_good = false; 750 int ret; 751 752 if (!dev_path) 753 goto out; 754 755 path_buf = kmalloc(PATH_MAX, GFP_KERNEL); 756 if (!path_buf) 757 goto out; 758 759 /* 760 * Do not follow soft link, just check if the original path is inside 761 * "/dev/". 762 */ 763 ret = kern_path(dev_path, 0, &path); 764 if (ret) 765 goto out; 766 resolved_path = d_path(&path, path_buf, PATH_MAX); 767 if (IS_ERR(resolved_path)) 768 goto out; 769 if (strncmp(resolved_path, "/dev/", strlen("/dev/"))) 770 goto out; 771 is_good = true; 772 out: 773 kfree(path_buf); 774 path_put(&path); 775 return is_good; 776 } 777 778 static int get_canonical_dev_path(const char *dev_path, char *canonical) 779 { 780 struct path path = { .mnt = NULL, .dentry = NULL }; 781 char *path_buf = NULL; 782 char *resolved_path; 783 int ret; 784 785 if (!dev_path) { 786 ret = -EINVAL; 787 goto out; 788 } 789 790 path_buf = kmalloc(PATH_MAX, GFP_KERNEL); 791 if (!path_buf) { 792 ret = -ENOMEM; 793 goto out; 794 } 795 796 ret = kern_path(dev_path, LOOKUP_FOLLOW, &path); 797 if (ret) 798 goto out; 799 resolved_path = d_path(&path, path_buf, PATH_MAX); 800 ret = strscpy(canonical, resolved_path, PATH_MAX); 801 out: 802 kfree(path_buf); 803 path_put(&path); 804 return ret; 805 } 806 807 static bool is_same_device(struct btrfs_device *device, const char *new_path) 808 { 809 struct path old = { .mnt = NULL, .dentry = NULL }; 810 struct path new = { .mnt = NULL, .dentry = NULL }; 811 char *old_path = NULL; 812 bool is_same = false; 813 int ret; 814 815 if (!device->name) 816 goto out; 817 818 old_path = kzalloc(PATH_MAX, GFP_NOFS); 819 if (!old_path) 820 goto out; 821 822 rcu_read_lock(); 823 ret = strscpy(old_path, rcu_str_deref(device->name), PATH_MAX); 824 rcu_read_unlock(); 825 if (ret < 0) 826 goto out; 827 828 ret = kern_path(old_path, LOOKUP_FOLLOW, &old); 829 if (ret) 830 goto out; 831 ret = kern_path(new_path, LOOKUP_FOLLOW, &new); 832 if (ret) 833 goto out; 834 if (path_equal(&old, &new)) 835 is_same = true; 836 out: 837 kfree(old_path); 838 path_put(&old); 839 path_put(&new); 840 return is_same; 841 } 842 843 /* 844 * Add new device to list of registered devices 845 * 846 * Returns: 847 * device pointer which was just added or updated when successful 848 * error pointer when failed 849 */ 850 static noinline struct btrfs_device *device_list_add(const char *path, 851 struct btrfs_super_block *disk_super, 852 bool *new_device_added) 853 { 854 struct btrfs_device *device; 855 struct btrfs_fs_devices *fs_devices = NULL; 856 struct rcu_string *name; 857 u64 found_transid = btrfs_super_generation(disk_super); 858 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 859 dev_t path_devt; 860 int error; 861 bool same_fsid_diff_dev = false; 862 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 863 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 864 865 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) { 866 btrfs_err(NULL, 867 "device %s has incomplete metadata_uuid change, please use btrfstune to complete", 868 path); 869 return ERR_PTR(-EAGAIN); 870 } 871 872 error = lookup_bdev(path, &path_devt); 873 if (error) { 874 btrfs_err(NULL, "failed to lookup block device for path %s: %d", 875 path, error); 876 return ERR_PTR(error); 877 } 878 879 fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev); 880 881 if (!fs_devices) { 882 fs_devices = alloc_fs_devices(disk_super->fsid); 883 if (IS_ERR(fs_devices)) 884 return ERR_CAST(fs_devices); 885 886 if (has_metadata_uuid) 887 memcpy(fs_devices->metadata_uuid, 888 disk_super->metadata_uuid, BTRFS_FSID_SIZE); 889 890 if (same_fsid_diff_dev) { 891 generate_random_uuid(fs_devices->fsid); 892 fs_devices->temp_fsid = true; 893 pr_info("BTRFS: device %s (%d:%d) using temp-fsid %pU\n", 894 path, MAJOR(path_devt), MINOR(path_devt), 895 fs_devices->fsid); 896 } 897 898 mutex_lock(&fs_devices->device_list_mutex); 899 list_add(&fs_devices->fs_list, &fs_uuids); 900 901 device = NULL; 902 } else { 903 struct btrfs_dev_lookup_args args = { 904 .devid = devid, 905 .uuid = disk_super->dev_item.uuid, 906 }; 907 908 mutex_lock(&fs_devices->device_list_mutex); 909 device = btrfs_find_device(fs_devices, &args); 910 911 if (found_transid > fs_devices->latest_generation) { 912 memcpy(fs_devices->fsid, disk_super->fsid, 913 BTRFS_FSID_SIZE); 914 memcpy(fs_devices->metadata_uuid, 915 btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE); 916 } 917 } 918 919 if (!device) { 920 unsigned int nofs_flag; 921 922 if (fs_devices->opened) { 923 btrfs_err(NULL, 924 "device %s (%d:%d) belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)", 925 path, MAJOR(path_devt), MINOR(path_devt), 926 fs_devices->fsid, current->comm, 927 task_pid_nr(current)); 928 mutex_unlock(&fs_devices->device_list_mutex); 929 return ERR_PTR(-EBUSY); 930 } 931 932 nofs_flag = memalloc_nofs_save(); 933 device = btrfs_alloc_device(NULL, &devid, 934 disk_super->dev_item.uuid, path); 935 memalloc_nofs_restore(nofs_flag); 936 if (IS_ERR(device)) { 937 mutex_unlock(&fs_devices->device_list_mutex); 938 /* we can safely leave the fs_devices entry around */ 939 return device; 940 } 941 942 device->devt = path_devt; 943 944 list_add_rcu(&device->dev_list, &fs_devices->devices); 945 fs_devices->num_devices++; 946 947 device->fs_devices = fs_devices; 948 *new_device_added = true; 949 950 if (disk_super->label[0]) 951 pr_info( 952 "BTRFS: device label %s devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n", 953 disk_super->label, devid, found_transid, path, 954 MAJOR(path_devt), MINOR(path_devt), 955 current->comm, task_pid_nr(current)); 956 else 957 pr_info( 958 "BTRFS: device fsid %pU devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n", 959 disk_super->fsid, devid, found_transid, path, 960 MAJOR(path_devt), MINOR(path_devt), 961 current->comm, task_pid_nr(current)); 962 963 } else if (!device->name || !is_same_device(device, path)) { 964 /* 965 * When FS is already mounted. 966 * 1. If you are here and if the device->name is NULL that 967 * means this device was missing at time of FS mount. 968 * 2. If you are here and if the device->name is different 969 * from 'path' that means either 970 * a. The same device disappeared and reappeared with 971 * different name. or 972 * b. The missing-disk-which-was-replaced, has 973 * reappeared now. 974 * 975 * We must allow 1 and 2a above. But 2b would be a spurious 976 * and unintentional. 977 * 978 * Further in case of 1 and 2a above, the disk at 'path' 979 * would have missed some transaction when it was away and 980 * in case of 2a the stale bdev has to be updated as well. 981 * 2b must not be allowed at all time. 982 */ 983 984 /* 985 * For now, we do allow update to btrfs_fs_device through the 986 * btrfs dev scan cli after FS has been mounted. We're still 987 * tracking a problem where systems fail mount by subvolume id 988 * when we reject replacement on a mounted FS. 989 */ 990 if (!fs_devices->opened && found_transid < device->generation) { 991 /* 992 * That is if the FS is _not_ mounted and if you 993 * are here, that means there is more than one 994 * disk with same uuid and devid.We keep the one 995 * with larger generation number or the last-in if 996 * generation are equal. 997 */ 998 mutex_unlock(&fs_devices->device_list_mutex); 999 btrfs_err(NULL, 1000 "device %s already registered with a higher generation, found %llu expect %llu", 1001 path, found_transid, device->generation); 1002 return ERR_PTR(-EEXIST); 1003 } 1004 1005 /* 1006 * We are going to replace the device path for a given devid, 1007 * make sure it's the same device if the device is mounted 1008 * 1009 * NOTE: the device->fs_info may not be reliable here so pass 1010 * in a NULL to message helpers instead. This avoids a possible 1011 * use-after-free when the fs_info and fs_info->sb are already 1012 * torn down. 1013 */ 1014 if (device->bdev) { 1015 if (device->devt != path_devt) { 1016 mutex_unlock(&fs_devices->device_list_mutex); 1017 btrfs_warn_in_rcu(NULL, 1018 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 1019 path, devid, found_transid, 1020 current->comm, 1021 task_pid_nr(current)); 1022 return ERR_PTR(-EEXIST); 1023 } 1024 btrfs_info_in_rcu(NULL, 1025 "devid %llu device path %s changed to %s scanned by %s (%d)", 1026 devid, btrfs_dev_name(device), 1027 path, current->comm, 1028 task_pid_nr(current)); 1029 } 1030 1031 name = rcu_string_strdup(path, GFP_NOFS); 1032 if (!name) { 1033 mutex_unlock(&fs_devices->device_list_mutex); 1034 return ERR_PTR(-ENOMEM); 1035 } 1036 rcu_string_free(device->name); 1037 rcu_assign_pointer(device->name, name); 1038 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1039 fs_devices->missing_devices--; 1040 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1041 } 1042 device->devt = path_devt; 1043 } 1044 1045 /* 1046 * Unmount does not free the btrfs_device struct but would zero 1047 * generation along with most of the other members. So just update 1048 * it back. We need it to pick the disk with largest generation 1049 * (as above). 1050 */ 1051 if (!fs_devices->opened) { 1052 device->generation = found_transid; 1053 fs_devices->latest_generation = max_t(u64, found_transid, 1054 fs_devices->latest_generation); 1055 } 1056 1057 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 1058 1059 mutex_unlock(&fs_devices->device_list_mutex); 1060 return device; 1061 } 1062 1063 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 1064 { 1065 struct btrfs_fs_devices *fs_devices; 1066 struct btrfs_device *device; 1067 struct btrfs_device *orig_dev; 1068 int ret = 0; 1069 1070 lockdep_assert_held(&uuid_mutex); 1071 1072 fs_devices = alloc_fs_devices(orig->fsid); 1073 if (IS_ERR(fs_devices)) 1074 return fs_devices; 1075 1076 fs_devices->total_devices = orig->total_devices; 1077 1078 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 1079 const char *dev_path = NULL; 1080 1081 /* 1082 * This is ok to do without RCU read locked because we hold the 1083 * uuid mutex so nothing we touch in here is going to disappear. 1084 */ 1085 if (orig_dev->name) 1086 dev_path = orig_dev->name->str; 1087 1088 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1089 orig_dev->uuid, dev_path); 1090 if (IS_ERR(device)) { 1091 ret = PTR_ERR(device); 1092 goto error; 1093 } 1094 1095 if (orig_dev->zone_info) { 1096 struct btrfs_zoned_device_info *zone_info; 1097 1098 zone_info = btrfs_clone_dev_zone_info(orig_dev); 1099 if (!zone_info) { 1100 btrfs_free_device(device); 1101 ret = -ENOMEM; 1102 goto error; 1103 } 1104 device->zone_info = zone_info; 1105 } 1106 1107 list_add(&device->dev_list, &fs_devices->devices); 1108 device->fs_devices = fs_devices; 1109 fs_devices->num_devices++; 1110 } 1111 return fs_devices; 1112 error: 1113 free_fs_devices(fs_devices); 1114 return ERR_PTR(ret); 1115 } 1116 1117 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1118 struct btrfs_device **latest_dev) 1119 { 1120 struct btrfs_device *device, *next; 1121 1122 /* This is the initialized path, it is safe to release the devices. */ 1123 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1124 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1125 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1126 &device->dev_state) && 1127 !test_bit(BTRFS_DEV_STATE_MISSING, 1128 &device->dev_state) && 1129 (!*latest_dev || 1130 device->generation > (*latest_dev)->generation)) { 1131 *latest_dev = device; 1132 } 1133 continue; 1134 } 1135 1136 /* 1137 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1138 * in btrfs_init_dev_replace() so just continue. 1139 */ 1140 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1141 continue; 1142 1143 if (device->bdev_file) { 1144 fput(device->bdev_file); 1145 device->bdev = NULL; 1146 device->bdev_file = NULL; 1147 fs_devices->open_devices--; 1148 } 1149 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1150 list_del_init(&device->dev_alloc_list); 1151 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1152 fs_devices->rw_devices--; 1153 } 1154 list_del_init(&device->dev_list); 1155 fs_devices->num_devices--; 1156 btrfs_free_device(device); 1157 } 1158 1159 } 1160 1161 /* 1162 * After we have read the system tree and know devids belonging to this 1163 * filesystem, remove the device which does not belong there. 1164 */ 1165 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1166 { 1167 struct btrfs_device *latest_dev = NULL; 1168 struct btrfs_fs_devices *seed_dev; 1169 1170 mutex_lock(&uuid_mutex); 1171 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1172 1173 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1174 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1175 1176 fs_devices->latest_dev = latest_dev; 1177 1178 mutex_unlock(&uuid_mutex); 1179 } 1180 1181 static void btrfs_close_bdev(struct btrfs_device *device) 1182 { 1183 if (!device->bdev) 1184 return; 1185 1186 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1187 sync_blockdev(device->bdev); 1188 invalidate_bdev(device->bdev); 1189 } 1190 1191 fput(device->bdev_file); 1192 } 1193 1194 static void btrfs_close_one_device(struct btrfs_device *device) 1195 { 1196 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1197 1198 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1199 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1200 list_del_init(&device->dev_alloc_list); 1201 fs_devices->rw_devices--; 1202 } 1203 1204 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1205 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1206 1207 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1208 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1209 fs_devices->missing_devices--; 1210 } 1211 1212 btrfs_close_bdev(device); 1213 if (device->bdev) { 1214 fs_devices->open_devices--; 1215 device->bdev = NULL; 1216 device->bdev_file = NULL; 1217 } 1218 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1219 btrfs_destroy_dev_zone_info(device); 1220 1221 device->fs_info = NULL; 1222 atomic_set(&device->dev_stats_ccnt, 0); 1223 extent_io_tree_release(&device->alloc_state); 1224 1225 /* 1226 * Reset the flush error record. We might have a transient flush error 1227 * in this mount, and if so we aborted the current transaction and set 1228 * the fs to an error state, guaranteeing no super blocks can be further 1229 * committed. However that error might be transient and if we unmount the 1230 * filesystem and mount it again, we should allow the mount to succeed 1231 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1232 * filesystem again we still get flush errors, then we will again abort 1233 * any transaction and set the error state, guaranteeing no commits of 1234 * unsafe super blocks. 1235 */ 1236 device->last_flush_error = 0; 1237 1238 /* Verify the device is back in a pristine state */ 1239 WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1240 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1241 WARN_ON(!list_empty(&device->dev_alloc_list)); 1242 WARN_ON(!list_empty(&device->post_commit_list)); 1243 } 1244 1245 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1246 { 1247 struct btrfs_device *device, *tmp; 1248 1249 lockdep_assert_held(&uuid_mutex); 1250 1251 if (--fs_devices->opened > 0) 1252 return; 1253 1254 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1255 btrfs_close_one_device(device); 1256 1257 WARN_ON(fs_devices->open_devices); 1258 WARN_ON(fs_devices->rw_devices); 1259 fs_devices->opened = 0; 1260 fs_devices->seeding = false; 1261 fs_devices->fs_info = NULL; 1262 } 1263 1264 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1265 { 1266 LIST_HEAD(list); 1267 struct btrfs_fs_devices *tmp; 1268 1269 mutex_lock(&uuid_mutex); 1270 close_fs_devices(fs_devices); 1271 if (!fs_devices->opened) { 1272 list_splice_init(&fs_devices->seed_list, &list); 1273 1274 /* 1275 * If the struct btrfs_fs_devices is not assembled with any 1276 * other device, it can be re-initialized during the next mount 1277 * without the needing device-scan step. Therefore, it can be 1278 * fully freed. 1279 */ 1280 if (fs_devices->num_devices == 1) { 1281 list_del(&fs_devices->fs_list); 1282 free_fs_devices(fs_devices); 1283 } 1284 } 1285 1286 1287 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1288 close_fs_devices(fs_devices); 1289 list_del(&fs_devices->seed_list); 1290 free_fs_devices(fs_devices); 1291 } 1292 mutex_unlock(&uuid_mutex); 1293 } 1294 1295 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1296 blk_mode_t flags, void *holder) 1297 { 1298 struct btrfs_device *device; 1299 struct btrfs_device *latest_dev = NULL; 1300 struct btrfs_device *tmp_device; 1301 int ret = 0; 1302 1303 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1304 dev_list) { 1305 int ret2; 1306 1307 ret2 = btrfs_open_one_device(fs_devices, device, flags, holder); 1308 if (ret2 == 0 && 1309 (!latest_dev || device->generation > latest_dev->generation)) { 1310 latest_dev = device; 1311 } else if (ret2 == -ENODATA) { 1312 fs_devices->num_devices--; 1313 list_del(&device->dev_list); 1314 btrfs_free_device(device); 1315 } 1316 if (ret == 0 && ret2 != 0) 1317 ret = ret2; 1318 } 1319 1320 if (fs_devices->open_devices == 0) { 1321 if (ret) 1322 return ret; 1323 return -EINVAL; 1324 } 1325 1326 fs_devices->opened = 1; 1327 fs_devices->latest_dev = latest_dev; 1328 fs_devices->total_rw_bytes = 0; 1329 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1330 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1331 1332 return 0; 1333 } 1334 1335 static int devid_cmp(void *priv, const struct list_head *a, 1336 const struct list_head *b) 1337 { 1338 const struct btrfs_device *dev1, *dev2; 1339 1340 dev1 = list_entry(a, struct btrfs_device, dev_list); 1341 dev2 = list_entry(b, struct btrfs_device, dev_list); 1342 1343 if (dev1->devid < dev2->devid) 1344 return -1; 1345 else if (dev1->devid > dev2->devid) 1346 return 1; 1347 return 0; 1348 } 1349 1350 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1351 blk_mode_t flags, void *holder) 1352 { 1353 int ret; 1354 1355 lockdep_assert_held(&uuid_mutex); 1356 /* 1357 * The device_list_mutex cannot be taken here in case opening the 1358 * underlying device takes further locks like open_mutex. 1359 * 1360 * We also don't need the lock here as this is called during mount and 1361 * exclusion is provided by uuid_mutex 1362 */ 1363 1364 if (fs_devices->opened) { 1365 fs_devices->opened++; 1366 ret = 0; 1367 } else { 1368 list_sort(NULL, &fs_devices->devices, devid_cmp); 1369 ret = open_fs_devices(fs_devices, flags, holder); 1370 } 1371 1372 return ret; 1373 } 1374 1375 void btrfs_release_disk_super(struct btrfs_super_block *super) 1376 { 1377 struct page *page = virt_to_page(super); 1378 1379 put_page(page); 1380 } 1381 1382 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1383 u64 bytenr, u64 bytenr_orig) 1384 { 1385 struct btrfs_super_block *disk_super; 1386 struct page *page; 1387 void *p; 1388 pgoff_t index; 1389 1390 /* make sure our super fits in the device */ 1391 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1392 return ERR_PTR(-EINVAL); 1393 1394 /* make sure our super fits in the page */ 1395 if (sizeof(*disk_super) > PAGE_SIZE) 1396 return ERR_PTR(-EINVAL); 1397 1398 /* make sure our super doesn't straddle pages on disk */ 1399 index = bytenr >> PAGE_SHIFT; 1400 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1401 return ERR_PTR(-EINVAL); 1402 1403 /* pull in the page with our super */ 1404 page = read_cache_page_gfp(bdev->bd_mapping, index, GFP_KERNEL); 1405 1406 if (IS_ERR(page)) 1407 return ERR_CAST(page); 1408 1409 p = page_address(page); 1410 1411 /* align our pointer to the offset of the super block */ 1412 disk_super = p + offset_in_page(bytenr); 1413 1414 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1415 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1416 btrfs_release_disk_super(p); 1417 return ERR_PTR(-EINVAL); 1418 } 1419 1420 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1421 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1422 1423 return disk_super; 1424 } 1425 1426 int btrfs_forget_devices(dev_t devt) 1427 { 1428 int ret; 1429 1430 mutex_lock(&uuid_mutex); 1431 ret = btrfs_free_stale_devices(devt, NULL); 1432 mutex_unlock(&uuid_mutex); 1433 1434 return ret; 1435 } 1436 1437 static bool btrfs_skip_registration(struct btrfs_super_block *disk_super, 1438 const char *path, dev_t devt, 1439 bool mount_arg_dev) 1440 { 1441 struct btrfs_fs_devices *fs_devices; 1442 1443 /* 1444 * Do not skip device registration for mounted devices with matching 1445 * maj:min but different paths. Booting without initrd relies on 1446 * /dev/root initially, later replaced with the actual root device. 1447 * A successful scan ensures grub2-probe selects the correct device. 1448 */ 1449 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 1450 struct btrfs_device *device; 1451 1452 mutex_lock(&fs_devices->device_list_mutex); 1453 1454 if (!fs_devices->opened) { 1455 mutex_unlock(&fs_devices->device_list_mutex); 1456 continue; 1457 } 1458 1459 list_for_each_entry(device, &fs_devices->devices, dev_list) { 1460 if (device->bdev && (device->bdev->bd_dev == devt) && 1461 strcmp(device->name->str, path) != 0) { 1462 mutex_unlock(&fs_devices->device_list_mutex); 1463 1464 /* Do not skip registration. */ 1465 return false; 1466 } 1467 } 1468 mutex_unlock(&fs_devices->device_list_mutex); 1469 } 1470 1471 if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 && 1472 !(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)) 1473 return true; 1474 1475 return false; 1476 } 1477 1478 /* 1479 * Look for a btrfs signature on a device. This may be called out of the mount path 1480 * and we are not allowed to call set_blocksize during the scan. The superblock 1481 * is read via pagecache. 1482 * 1483 * With @mount_arg_dev it's a scan during mount time that will always register 1484 * the device or return an error. Multi-device and seeding devices are registered 1485 * in both cases. 1486 */ 1487 struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, 1488 bool mount_arg_dev) 1489 { 1490 struct btrfs_super_block *disk_super; 1491 bool new_device_added = false; 1492 struct btrfs_device *device = NULL; 1493 struct file *bdev_file; 1494 char *canonical_path = NULL; 1495 u64 bytenr; 1496 dev_t devt; 1497 int ret; 1498 1499 lockdep_assert_held(&uuid_mutex); 1500 1501 if (!is_good_dev_path(path)) { 1502 canonical_path = kmalloc(PATH_MAX, GFP_KERNEL); 1503 if (canonical_path) { 1504 ret = get_canonical_dev_path(path, canonical_path); 1505 if (ret < 0) { 1506 kfree(canonical_path); 1507 canonical_path = NULL; 1508 } 1509 } 1510 } 1511 /* 1512 * Avoid an exclusive open here, as the systemd-udev may initiate the 1513 * device scan which may race with the user's mount or mkfs command, 1514 * resulting in failure. 1515 * Since the device scan is solely for reading purposes, there is no 1516 * need for an exclusive open. Additionally, the devices are read again 1517 * during the mount process. It is ok to get some inconsistent 1518 * values temporarily, as the device paths of the fsid are the only 1519 * required information for assembling the volume. 1520 */ 1521 bdev_file = bdev_file_open_by_path(path, flags, NULL, NULL); 1522 if (IS_ERR(bdev_file)) 1523 return ERR_CAST(bdev_file); 1524 1525 /* 1526 * We would like to check all the super blocks, but doing so would 1527 * allow a mount to succeed after a mkfs from a different filesystem. 1528 * Currently, recovery from a bad primary btrfs superblock is done 1529 * using the userspace command 'btrfs check --super'. 1530 */ 1531 ret = btrfs_sb_log_location_bdev(file_bdev(bdev_file), 0, READ, &bytenr); 1532 if (ret) { 1533 device = ERR_PTR(ret); 1534 goto error_bdev_put; 1535 } 1536 1537 disk_super = btrfs_read_disk_super(file_bdev(bdev_file), bytenr, 1538 btrfs_sb_offset(0)); 1539 if (IS_ERR(disk_super)) { 1540 device = ERR_CAST(disk_super); 1541 goto error_bdev_put; 1542 } 1543 1544 devt = file_bdev(bdev_file)->bd_dev; 1545 if (btrfs_skip_registration(disk_super, path, devt, mount_arg_dev)) { 1546 pr_debug("BTRFS: skip registering single non-seed device %s (%d:%d)\n", 1547 path, MAJOR(devt), MINOR(devt)); 1548 1549 btrfs_free_stale_devices(devt, NULL); 1550 1551 device = NULL; 1552 goto free_disk_super; 1553 } 1554 1555 device = device_list_add(canonical_path ? : path, disk_super, 1556 &new_device_added); 1557 if (!IS_ERR(device) && new_device_added) 1558 btrfs_free_stale_devices(device->devt, device); 1559 1560 free_disk_super: 1561 btrfs_release_disk_super(disk_super); 1562 1563 error_bdev_put: 1564 fput(bdev_file); 1565 kfree(canonical_path); 1566 1567 return device; 1568 } 1569 1570 /* 1571 * Try to find a chunk that intersects [start, start + len] range and when one 1572 * such is found, record the end of it in *start 1573 */ 1574 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1575 u64 len) 1576 { 1577 u64 physical_start, physical_end; 1578 1579 lockdep_assert_held(&device->fs_info->chunk_mutex); 1580 1581 if (find_first_extent_bit(&device->alloc_state, *start, 1582 &physical_start, &physical_end, 1583 CHUNK_ALLOCATED, NULL)) { 1584 1585 if (in_range(physical_start, *start, len) || 1586 in_range(*start, physical_start, 1587 physical_end + 1 - physical_start)) { 1588 *start = physical_end + 1; 1589 return true; 1590 } 1591 } 1592 return false; 1593 } 1594 1595 static u64 dev_extent_search_start(struct btrfs_device *device) 1596 { 1597 switch (device->fs_devices->chunk_alloc_policy) { 1598 case BTRFS_CHUNK_ALLOC_REGULAR: 1599 return BTRFS_DEVICE_RANGE_RESERVED; 1600 case BTRFS_CHUNK_ALLOC_ZONED: 1601 /* 1602 * We don't care about the starting region like regular 1603 * allocator, because we anyway use/reserve the first two zones 1604 * for superblock logging. 1605 */ 1606 return 0; 1607 default: 1608 BUG(); 1609 } 1610 } 1611 1612 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1613 u64 *hole_start, u64 *hole_size, 1614 u64 num_bytes) 1615 { 1616 u64 zone_size = device->zone_info->zone_size; 1617 u64 pos; 1618 int ret; 1619 bool changed = false; 1620 1621 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1622 1623 while (*hole_size > 0) { 1624 pos = btrfs_find_allocatable_zones(device, *hole_start, 1625 *hole_start + *hole_size, 1626 num_bytes); 1627 if (pos != *hole_start) { 1628 *hole_size = *hole_start + *hole_size - pos; 1629 *hole_start = pos; 1630 changed = true; 1631 if (*hole_size < num_bytes) 1632 break; 1633 } 1634 1635 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1636 1637 /* Range is ensured to be empty */ 1638 if (!ret) 1639 return changed; 1640 1641 /* Given hole range was invalid (outside of device) */ 1642 if (ret == -ERANGE) { 1643 *hole_start += *hole_size; 1644 *hole_size = 0; 1645 return true; 1646 } 1647 1648 *hole_start += zone_size; 1649 *hole_size -= zone_size; 1650 changed = true; 1651 } 1652 1653 return changed; 1654 } 1655 1656 /* 1657 * Check if specified hole is suitable for allocation. 1658 * 1659 * @device: the device which we have the hole 1660 * @hole_start: starting position of the hole 1661 * @hole_size: the size of the hole 1662 * @num_bytes: the size of the free space that we need 1663 * 1664 * This function may modify @hole_start and @hole_size to reflect the suitable 1665 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1666 */ 1667 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1668 u64 *hole_size, u64 num_bytes) 1669 { 1670 bool changed = false; 1671 u64 hole_end = *hole_start + *hole_size; 1672 1673 for (;;) { 1674 /* 1675 * Check before we set max_hole_start, otherwise we could end up 1676 * sending back this offset anyway. 1677 */ 1678 if (contains_pending_extent(device, hole_start, *hole_size)) { 1679 if (hole_end >= *hole_start) 1680 *hole_size = hole_end - *hole_start; 1681 else 1682 *hole_size = 0; 1683 changed = true; 1684 } 1685 1686 switch (device->fs_devices->chunk_alloc_policy) { 1687 case BTRFS_CHUNK_ALLOC_REGULAR: 1688 /* No extra check */ 1689 break; 1690 case BTRFS_CHUNK_ALLOC_ZONED: 1691 if (dev_extent_hole_check_zoned(device, hole_start, 1692 hole_size, num_bytes)) { 1693 changed = true; 1694 /* 1695 * The changed hole can contain pending extent. 1696 * Loop again to check that. 1697 */ 1698 continue; 1699 } 1700 break; 1701 default: 1702 BUG(); 1703 } 1704 1705 break; 1706 } 1707 1708 return changed; 1709 } 1710 1711 /* 1712 * Find free space in the specified device. 1713 * 1714 * @device: the device which we search the free space in 1715 * @num_bytes: the size of the free space that we need 1716 * @search_start: the position from which to begin the search 1717 * @start: store the start of the free space. 1718 * @len: the size of the free space. that we find, or the size 1719 * of the max free space if we don't find suitable free space 1720 * 1721 * This does a pretty simple search, the expectation is that it is called very 1722 * infrequently and that a given device has a small number of extents. 1723 * 1724 * @start is used to store the start of the free space if we find. But if we 1725 * don't find suitable free space, it will be used to store the start position 1726 * of the max free space. 1727 * 1728 * @len is used to store the size of the free space that we find. 1729 * But if we don't find suitable free space, it is used to store the size of 1730 * the max free space. 1731 * 1732 * NOTE: This function will search *commit* root of device tree, and does extra 1733 * check to ensure dev extents are not double allocated. 1734 * This makes the function safe to allocate dev extents but may not report 1735 * correct usable device space, as device extent freed in current transaction 1736 * is not reported as available. 1737 */ 1738 static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1739 u64 *start, u64 *len) 1740 { 1741 struct btrfs_fs_info *fs_info = device->fs_info; 1742 struct btrfs_root *root = fs_info->dev_root; 1743 struct btrfs_key key; 1744 struct btrfs_dev_extent *dev_extent; 1745 struct btrfs_path *path; 1746 u64 search_start; 1747 u64 hole_size; 1748 u64 max_hole_start; 1749 u64 max_hole_size = 0; 1750 u64 extent_end; 1751 u64 search_end = device->total_bytes; 1752 int ret; 1753 int slot; 1754 struct extent_buffer *l; 1755 1756 search_start = dev_extent_search_start(device); 1757 max_hole_start = search_start; 1758 1759 WARN_ON(device->zone_info && 1760 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1761 1762 path = btrfs_alloc_path(); 1763 if (!path) { 1764 ret = -ENOMEM; 1765 goto out; 1766 } 1767 again: 1768 if (search_start >= search_end || 1769 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1770 ret = -ENOSPC; 1771 goto out; 1772 } 1773 1774 path->reada = READA_FORWARD; 1775 path->search_commit_root = 1; 1776 path->skip_locking = 1; 1777 1778 key.objectid = device->devid; 1779 key.offset = search_start; 1780 key.type = BTRFS_DEV_EXTENT_KEY; 1781 1782 ret = btrfs_search_backwards(root, &key, path); 1783 if (ret < 0) 1784 goto out; 1785 1786 while (search_start < search_end) { 1787 l = path->nodes[0]; 1788 slot = path->slots[0]; 1789 if (slot >= btrfs_header_nritems(l)) { 1790 ret = btrfs_next_leaf(root, path); 1791 if (ret == 0) 1792 continue; 1793 if (ret < 0) 1794 goto out; 1795 1796 break; 1797 } 1798 btrfs_item_key_to_cpu(l, &key, slot); 1799 1800 if (key.objectid < device->devid) 1801 goto next; 1802 1803 if (key.objectid > device->devid) 1804 break; 1805 1806 if (key.type != BTRFS_DEV_EXTENT_KEY) 1807 goto next; 1808 1809 if (key.offset > search_end) 1810 break; 1811 1812 if (key.offset > search_start) { 1813 hole_size = key.offset - search_start; 1814 dev_extent_hole_check(device, &search_start, &hole_size, 1815 num_bytes); 1816 1817 if (hole_size > max_hole_size) { 1818 max_hole_start = search_start; 1819 max_hole_size = hole_size; 1820 } 1821 1822 /* 1823 * If this free space is greater than which we need, 1824 * it must be the max free space that we have found 1825 * until now, so max_hole_start must point to the start 1826 * of this free space and the length of this free space 1827 * is stored in max_hole_size. Thus, we return 1828 * max_hole_start and max_hole_size and go back to the 1829 * caller. 1830 */ 1831 if (hole_size >= num_bytes) { 1832 ret = 0; 1833 goto out; 1834 } 1835 } 1836 1837 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1838 extent_end = key.offset + btrfs_dev_extent_length(l, 1839 dev_extent); 1840 if (extent_end > search_start) 1841 search_start = extent_end; 1842 next: 1843 path->slots[0]++; 1844 cond_resched(); 1845 } 1846 1847 /* 1848 * At this point, search_start should be the end of 1849 * allocated dev extents, and when shrinking the device, 1850 * search_end may be smaller than search_start. 1851 */ 1852 if (search_end > search_start) { 1853 hole_size = search_end - search_start; 1854 if (dev_extent_hole_check(device, &search_start, &hole_size, 1855 num_bytes)) { 1856 btrfs_release_path(path); 1857 goto again; 1858 } 1859 1860 if (hole_size > max_hole_size) { 1861 max_hole_start = search_start; 1862 max_hole_size = hole_size; 1863 } 1864 } 1865 1866 /* See above. */ 1867 if (max_hole_size < num_bytes) 1868 ret = -ENOSPC; 1869 else 1870 ret = 0; 1871 1872 ASSERT(max_hole_start + max_hole_size <= search_end); 1873 out: 1874 btrfs_free_path(path); 1875 *start = max_hole_start; 1876 if (len) 1877 *len = max_hole_size; 1878 return ret; 1879 } 1880 1881 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1882 struct btrfs_device *device, 1883 u64 start, u64 *dev_extent_len) 1884 { 1885 struct btrfs_fs_info *fs_info = device->fs_info; 1886 struct btrfs_root *root = fs_info->dev_root; 1887 int ret; 1888 struct btrfs_path *path; 1889 struct btrfs_key key; 1890 struct btrfs_key found_key; 1891 struct extent_buffer *leaf = NULL; 1892 struct btrfs_dev_extent *extent = NULL; 1893 1894 path = btrfs_alloc_path(); 1895 if (!path) 1896 return -ENOMEM; 1897 1898 key.objectid = device->devid; 1899 key.offset = start; 1900 key.type = BTRFS_DEV_EXTENT_KEY; 1901 again: 1902 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1903 if (ret > 0) { 1904 ret = btrfs_previous_item(root, path, key.objectid, 1905 BTRFS_DEV_EXTENT_KEY); 1906 if (ret) 1907 goto out; 1908 leaf = path->nodes[0]; 1909 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1910 extent = btrfs_item_ptr(leaf, path->slots[0], 1911 struct btrfs_dev_extent); 1912 BUG_ON(found_key.offset > start || found_key.offset + 1913 btrfs_dev_extent_length(leaf, extent) < start); 1914 key = found_key; 1915 btrfs_release_path(path); 1916 goto again; 1917 } else if (ret == 0) { 1918 leaf = path->nodes[0]; 1919 extent = btrfs_item_ptr(leaf, path->slots[0], 1920 struct btrfs_dev_extent); 1921 } else { 1922 goto out; 1923 } 1924 1925 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1926 1927 ret = btrfs_del_item(trans, root, path); 1928 if (ret == 0) 1929 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1930 out: 1931 btrfs_free_path(path); 1932 return ret; 1933 } 1934 1935 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1936 { 1937 struct rb_node *n; 1938 u64 ret = 0; 1939 1940 read_lock(&fs_info->mapping_tree_lock); 1941 n = rb_last(&fs_info->mapping_tree.rb_root); 1942 if (n) { 1943 struct btrfs_chunk_map *map; 1944 1945 map = rb_entry(n, struct btrfs_chunk_map, rb_node); 1946 ret = map->start + map->chunk_len; 1947 } 1948 read_unlock(&fs_info->mapping_tree_lock); 1949 1950 return ret; 1951 } 1952 1953 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1954 u64 *devid_ret) 1955 { 1956 int ret; 1957 struct btrfs_key key; 1958 struct btrfs_key found_key; 1959 struct btrfs_path *path; 1960 1961 path = btrfs_alloc_path(); 1962 if (!path) 1963 return -ENOMEM; 1964 1965 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1966 key.type = BTRFS_DEV_ITEM_KEY; 1967 key.offset = (u64)-1; 1968 1969 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1970 if (ret < 0) 1971 goto error; 1972 1973 if (ret == 0) { 1974 /* Corruption */ 1975 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1976 ret = -EUCLEAN; 1977 goto error; 1978 } 1979 1980 ret = btrfs_previous_item(fs_info->chunk_root, path, 1981 BTRFS_DEV_ITEMS_OBJECTID, 1982 BTRFS_DEV_ITEM_KEY); 1983 if (ret) { 1984 *devid_ret = 1; 1985 } else { 1986 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1987 path->slots[0]); 1988 *devid_ret = found_key.offset + 1; 1989 } 1990 ret = 0; 1991 error: 1992 btrfs_free_path(path); 1993 return ret; 1994 } 1995 1996 /* 1997 * the device information is stored in the chunk root 1998 * the btrfs_device struct should be fully filled in 1999 */ 2000 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 2001 struct btrfs_device *device) 2002 { 2003 int ret; 2004 struct btrfs_path *path; 2005 struct btrfs_dev_item *dev_item; 2006 struct extent_buffer *leaf; 2007 struct btrfs_key key; 2008 unsigned long ptr; 2009 2010 path = btrfs_alloc_path(); 2011 if (!path) 2012 return -ENOMEM; 2013 2014 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2015 key.type = BTRFS_DEV_ITEM_KEY; 2016 key.offset = device->devid; 2017 2018 btrfs_reserve_chunk_metadata(trans, true); 2019 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 2020 &key, sizeof(*dev_item)); 2021 btrfs_trans_release_chunk_metadata(trans); 2022 if (ret) 2023 goto out; 2024 2025 leaf = path->nodes[0]; 2026 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2027 2028 btrfs_set_device_id(leaf, dev_item, device->devid); 2029 btrfs_set_device_generation(leaf, dev_item, 0); 2030 btrfs_set_device_type(leaf, dev_item, device->type); 2031 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2032 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2033 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2034 btrfs_set_device_total_bytes(leaf, dev_item, 2035 btrfs_device_get_disk_total_bytes(device)); 2036 btrfs_set_device_bytes_used(leaf, dev_item, 2037 btrfs_device_get_bytes_used(device)); 2038 btrfs_set_device_group(leaf, dev_item, 0); 2039 btrfs_set_device_seek_speed(leaf, dev_item, 0); 2040 btrfs_set_device_bandwidth(leaf, dev_item, 0); 2041 btrfs_set_device_start_offset(leaf, dev_item, 0); 2042 2043 ptr = btrfs_device_uuid(dev_item); 2044 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 2045 ptr = btrfs_device_fsid(dev_item); 2046 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 2047 ptr, BTRFS_FSID_SIZE); 2048 btrfs_mark_buffer_dirty(trans, leaf); 2049 2050 ret = 0; 2051 out: 2052 btrfs_free_path(path); 2053 return ret; 2054 } 2055 2056 /* 2057 * Function to update ctime/mtime for a given device path. 2058 * Mainly used for ctime/mtime based probe like libblkid. 2059 * 2060 * We don't care about errors here, this is just to be kind to userspace. 2061 */ 2062 static void update_dev_time(const char *device_path) 2063 { 2064 struct path path; 2065 int ret; 2066 2067 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 2068 if (ret) 2069 return; 2070 2071 inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION); 2072 path_put(&path); 2073 } 2074 2075 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 2076 struct btrfs_device *device) 2077 { 2078 struct btrfs_root *root = device->fs_info->chunk_root; 2079 int ret; 2080 struct btrfs_path *path; 2081 struct btrfs_key key; 2082 2083 path = btrfs_alloc_path(); 2084 if (!path) 2085 return -ENOMEM; 2086 2087 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2088 key.type = BTRFS_DEV_ITEM_KEY; 2089 key.offset = device->devid; 2090 2091 btrfs_reserve_chunk_metadata(trans, false); 2092 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2093 btrfs_trans_release_chunk_metadata(trans); 2094 if (ret) { 2095 if (ret > 0) 2096 ret = -ENOENT; 2097 goto out; 2098 } 2099 2100 ret = btrfs_del_item(trans, root, path); 2101 out: 2102 btrfs_free_path(path); 2103 return ret; 2104 } 2105 2106 /* 2107 * Verify that @num_devices satisfies the RAID profile constraints in the whole 2108 * filesystem. It's up to the caller to adjust that number regarding eg. device 2109 * replace. 2110 */ 2111 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 2112 u64 num_devices) 2113 { 2114 u64 all_avail; 2115 unsigned seq; 2116 int i; 2117 2118 do { 2119 seq = read_seqbegin(&fs_info->profiles_lock); 2120 2121 all_avail = fs_info->avail_data_alloc_bits | 2122 fs_info->avail_system_alloc_bits | 2123 fs_info->avail_metadata_alloc_bits; 2124 } while (read_seqretry(&fs_info->profiles_lock, seq)); 2125 2126 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2127 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 2128 continue; 2129 2130 if (num_devices < btrfs_raid_array[i].devs_min) 2131 return btrfs_raid_array[i].mindev_error; 2132 } 2133 2134 return 0; 2135 } 2136 2137 static struct btrfs_device * btrfs_find_next_active_device( 2138 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 2139 { 2140 struct btrfs_device *next_device; 2141 2142 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 2143 if (next_device != device && 2144 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 2145 && next_device->bdev) 2146 return next_device; 2147 } 2148 2149 return NULL; 2150 } 2151 2152 /* 2153 * Helper function to check if the given device is part of s_bdev / latest_dev 2154 * and replace it with the provided or the next active device, in the context 2155 * where this function called, there should be always be another device (or 2156 * this_dev) which is active. 2157 */ 2158 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2159 struct btrfs_device *next_device) 2160 { 2161 struct btrfs_fs_info *fs_info = device->fs_info; 2162 2163 if (!next_device) 2164 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2165 device); 2166 ASSERT(next_device); 2167 2168 if (fs_info->sb->s_bdev && 2169 (fs_info->sb->s_bdev == device->bdev)) 2170 fs_info->sb->s_bdev = next_device->bdev; 2171 2172 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2173 fs_info->fs_devices->latest_dev = next_device; 2174 } 2175 2176 /* 2177 * Return btrfs_fs_devices::num_devices excluding the device that's being 2178 * currently replaced. 2179 */ 2180 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2181 { 2182 u64 num_devices = fs_info->fs_devices->num_devices; 2183 2184 down_read(&fs_info->dev_replace.rwsem); 2185 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2186 ASSERT(num_devices > 1); 2187 num_devices--; 2188 } 2189 up_read(&fs_info->dev_replace.rwsem); 2190 2191 return num_devices; 2192 } 2193 2194 static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info, 2195 struct block_device *bdev, int copy_num) 2196 { 2197 struct btrfs_super_block *disk_super; 2198 const size_t len = sizeof(disk_super->magic); 2199 const u64 bytenr = btrfs_sb_offset(copy_num); 2200 int ret; 2201 2202 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr); 2203 if (IS_ERR(disk_super)) 2204 return; 2205 2206 memset(&disk_super->magic, 0, len); 2207 folio_mark_dirty(virt_to_folio(disk_super)); 2208 btrfs_release_disk_super(disk_super); 2209 2210 ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1); 2211 if (ret) 2212 btrfs_warn(fs_info, "error clearing superblock number %d (%d)", 2213 copy_num, ret); 2214 } 2215 2216 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_device *device) 2217 { 2218 int copy_num; 2219 struct block_device *bdev = device->bdev; 2220 2221 if (!bdev) 2222 return; 2223 2224 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2225 if (bdev_is_zoned(bdev)) 2226 btrfs_reset_sb_log_zones(bdev, copy_num); 2227 else 2228 btrfs_scratch_superblock(fs_info, bdev, copy_num); 2229 } 2230 2231 /* Notify udev that device has changed */ 2232 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2233 2234 /* Update ctime/mtime for device path for libblkid */ 2235 update_dev_time(device->name->str); 2236 } 2237 2238 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2239 struct btrfs_dev_lookup_args *args, 2240 struct file **bdev_file) 2241 { 2242 struct btrfs_trans_handle *trans; 2243 struct btrfs_device *device; 2244 struct btrfs_fs_devices *cur_devices; 2245 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2246 u64 num_devices; 2247 int ret = 0; 2248 2249 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2250 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2251 return -EINVAL; 2252 } 2253 2254 /* 2255 * The device list in fs_devices is accessed without locks (neither 2256 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2257 * filesystem and another device rm cannot run. 2258 */ 2259 num_devices = btrfs_num_devices(fs_info); 2260 2261 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2262 if (ret) 2263 return ret; 2264 2265 device = btrfs_find_device(fs_info->fs_devices, args); 2266 if (!device) { 2267 if (args->missing) 2268 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2269 else 2270 ret = -ENOENT; 2271 return ret; 2272 } 2273 2274 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2275 btrfs_warn_in_rcu(fs_info, 2276 "cannot remove device %s (devid %llu) due to active swapfile", 2277 btrfs_dev_name(device), device->devid); 2278 return -ETXTBSY; 2279 } 2280 2281 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2282 return BTRFS_ERROR_DEV_TGT_REPLACE; 2283 2284 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2285 fs_info->fs_devices->rw_devices == 1) 2286 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2287 2288 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2289 mutex_lock(&fs_info->chunk_mutex); 2290 list_del_init(&device->dev_alloc_list); 2291 device->fs_devices->rw_devices--; 2292 mutex_unlock(&fs_info->chunk_mutex); 2293 } 2294 2295 ret = btrfs_shrink_device(device, 0); 2296 if (ret) 2297 goto error_undo; 2298 2299 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2300 if (IS_ERR(trans)) { 2301 ret = PTR_ERR(trans); 2302 goto error_undo; 2303 } 2304 2305 ret = btrfs_rm_dev_item(trans, device); 2306 if (ret) { 2307 /* Any error in dev item removal is critical */ 2308 btrfs_crit(fs_info, 2309 "failed to remove device item for devid %llu: %d", 2310 device->devid, ret); 2311 btrfs_abort_transaction(trans, ret); 2312 btrfs_end_transaction(trans); 2313 return ret; 2314 } 2315 2316 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2317 btrfs_scrub_cancel_dev(device); 2318 2319 /* 2320 * the device list mutex makes sure that we don't change 2321 * the device list while someone else is writing out all 2322 * the device supers. Whoever is writing all supers, should 2323 * lock the device list mutex before getting the number of 2324 * devices in the super block (super_copy). Conversely, 2325 * whoever updates the number of devices in the super block 2326 * (super_copy) should hold the device list mutex. 2327 */ 2328 2329 /* 2330 * In normal cases the cur_devices == fs_devices. But in case 2331 * of deleting a seed device, the cur_devices should point to 2332 * its own fs_devices listed under the fs_devices->seed_list. 2333 */ 2334 cur_devices = device->fs_devices; 2335 mutex_lock(&fs_devices->device_list_mutex); 2336 list_del_rcu(&device->dev_list); 2337 2338 cur_devices->num_devices--; 2339 cur_devices->total_devices--; 2340 /* Update total_devices of the parent fs_devices if it's seed */ 2341 if (cur_devices != fs_devices) 2342 fs_devices->total_devices--; 2343 2344 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2345 cur_devices->missing_devices--; 2346 2347 btrfs_assign_next_active_device(device, NULL); 2348 2349 if (device->bdev_file) { 2350 cur_devices->open_devices--; 2351 /* remove sysfs entry */ 2352 btrfs_sysfs_remove_device(device); 2353 } 2354 2355 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2356 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2357 mutex_unlock(&fs_devices->device_list_mutex); 2358 2359 /* 2360 * At this point, the device is zero sized and detached from the 2361 * devices list. All that's left is to zero out the old supers and 2362 * free the device. 2363 * 2364 * We cannot call btrfs_close_bdev() here because we're holding the sb 2365 * write lock, and fput() on the block device will pull in the 2366 * ->open_mutex on the block device and it's dependencies. Instead 2367 * just flush the device and let the caller do the final bdev_release. 2368 */ 2369 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2370 btrfs_scratch_superblocks(fs_info, device); 2371 if (device->bdev) { 2372 sync_blockdev(device->bdev); 2373 invalidate_bdev(device->bdev); 2374 } 2375 } 2376 2377 *bdev_file = device->bdev_file; 2378 synchronize_rcu(); 2379 btrfs_free_device(device); 2380 2381 /* 2382 * This can happen if cur_devices is the private seed devices list. We 2383 * cannot call close_fs_devices() here because it expects the uuid_mutex 2384 * to be held, but in fact we don't need that for the private 2385 * seed_devices, we can simply decrement cur_devices->opened and then 2386 * remove it from our list and free the fs_devices. 2387 */ 2388 if (cur_devices->num_devices == 0) { 2389 list_del_init(&cur_devices->seed_list); 2390 ASSERT(cur_devices->opened == 1); 2391 cur_devices->opened--; 2392 free_fs_devices(cur_devices); 2393 } 2394 2395 ret = btrfs_commit_transaction(trans); 2396 2397 return ret; 2398 2399 error_undo: 2400 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2401 mutex_lock(&fs_info->chunk_mutex); 2402 list_add(&device->dev_alloc_list, 2403 &fs_devices->alloc_list); 2404 device->fs_devices->rw_devices++; 2405 mutex_unlock(&fs_info->chunk_mutex); 2406 } 2407 return ret; 2408 } 2409 2410 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2411 { 2412 struct btrfs_fs_devices *fs_devices; 2413 2414 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2415 2416 /* 2417 * in case of fs with no seed, srcdev->fs_devices will point 2418 * to fs_devices of fs_info. However when the dev being replaced is 2419 * a seed dev it will point to the seed's local fs_devices. In short 2420 * srcdev will have its correct fs_devices in both the cases. 2421 */ 2422 fs_devices = srcdev->fs_devices; 2423 2424 list_del_rcu(&srcdev->dev_list); 2425 list_del(&srcdev->dev_alloc_list); 2426 fs_devices->num_devices--; 2427 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2428 fs_devices->missing_devices--; 2429 2430 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2431 fs_devices->rw_devices--; 2432 2433 if (srcdev->bdev) 2434 fs_devices->open_devices--; 2435 } 2436 2437 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2438 { 2439 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2440 2441 mutex_lock(&uuid_mutex); 2442 2443 btrfs_close_bdev(srcdev); 2444 synchronize_rcu(); 2445 btrfs_free_device(srcdev); 2446 2447 /* if this is no devs we rather delete the fs_devices */ 2448 if (!fs_devices->num_devices) { 2449 /* 2450 * On a mounted FS, num_devices can't be zero unless it's a 2451 * seed. In case of a seed device being replaced, the replace 2452 * target added to the sprout FS, so there will be no more 2453 * device left under the seed FS. 2454 */ 2455 ASSERT(fs_devices->seeding); 2456 2457 list_del_init(&fs_devices->seed_list); 2458 close_fs_devices(fs_devices); 2459 free_fs_devices(fs_devices); 2460 } 2461 mutex_unlock(&uuid_mutex); 2462 } 2463 2464 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2465 { 2466 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2467 2468 mutex_lock(&fs_devices->device_list_mutex); 2469 2470 btrfs_sysfs_remove_device(tgtdev); 2471 2472 if (tgtdev->bdev) 2473 fs_devices->open_devices--; 2474 2475 fs_devices->num_devices--; 2476 2477 btrfs_assign_next_active_device(tgtdev, NULL); 2478 2479 list_del_rcu(&tgtdev->dev_list); 2480 2481 mutex_unlock(&fs_devices->device_list_mutex); 2482 2483 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev); 2484 2485 btrfs_close_bdev(tgtdev); 2486 synchronize_rcu(); 2487 btrfs_free_device(tgtdev); 2488 } 2489 2490 /* 2491 * Populate args from device at path. 2492 * 2493 * @fs_info: the filesystem 2494 * @args: the args to populate 2495 * @path: the path to the device 2496 * 2497 * This will read the super block of the device at @path and populate @args with 2498 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2499 * lookup a device to operate on, but need to do it before we take any locks. 2500 * This properly handles the special case of "missing" that a user may pass in, 2501 * and does some basic sanity checks. The caller must make sure that @path is 2502 * properly NUL terminated before calling in, and must call 2503 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2504 * uuid buffers. 2505 * 2506 * Return: 0 for success, -errno for failure 2507 */ 2508 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2509 struct btrfs_dev_lookup_args *args, 2510 const char *path) 2511 { 2512 struct btrfs_super_block *disk_super; 2513 struct file *bdev_file; 2514 int ret; 2515 2516 if (!path || !path[0]) 2517 return -EINVAL; 2518 if (!strcmp(path, "missing")) { 2519 args->missing = true; 2520 return 0; 2521 } 2522 2523 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2524 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2525 if (!args->uuid || !args->fsid) { 2526 btrfs_put_dev_args_from_path(args); 2527 return -ENOMEM; 2528 } 2529 2530 ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0, 2531 &bdev_file, &disk_super); 2532 if (ret) { 2533 btrfs_put_dev_args_from_path(args); 2534 return ret; 2535 } 2536 2537 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2538 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2539 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2540 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2541 else 2542 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2543 btrfs_release_disk_super(disk_super); 2544 fput(bdev_file); 2545 return 0; 2546 } 2547 2548 /* 2549 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2550 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2551 * that don't need to be freed. 2552 */ 2553 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2554 { 2555 kfree(args->uuid); 2556 kfree(args->fsid); 2557 args->uuid = NULL; 2558 args->fsid = NULL; 2559 } 2560 2561 struct btrfs_device *btrfs_find_device_by_devspec( 2562 struct btrfs_fs_info *fs_info, u64 devid, 2563 const char *device_path) 2564 { 2565 BTRFS_DEV_LOOKUP_ARGS(args); 2566 struct btrfs_device *device; 2567 int ret; 2568 2569 if (devid) { 2570 args.devid = devid; 2571 device = btrfs_find_device(fs_info->fs_devices, &args); 2572 if (!device) 2573 return ERR_PTR(-ENOENT); 2574 return device; 2575 } 2576 2577 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2578 if (ret) 2579 return ERR_PTR(ret); 2580 device = btrfs_find_device(fs_info->fs_devices, &args); 2581 btrfs_put_dev_args_from_path(&args); 2582 if (!device) 2583 return ERR_PTR(-ENOENT); 2584 return device; 2585 } 2586 2587 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2588 { 2589 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2590 struct btrfs_fs_devices *old_devices; 2591 struct btrfs_fs_devices *seed_devices; 2592 2593 lockdep_assert_held(&uuid_mutex); 2594 if (!fs_devices->seeding) 2595 return ERR_PTR(-EINVAL); 2596 2597 /* 2598 * Private copy of the seed devices, anchored at 2599 * fs_info->fs_devices->seed_list 2600 */ 2601 seed_devices = alloc_fs_devices(NULL); 2602 if (IS_ERR(seed_devices)) 2603 return seed_devices; 2604 2605 /* 2606 * It's necessary to retain a copy of the original seed fs_devices in 2607 * fs_uuids so that filesystems which have been seeded can successfully 2608 * reference the seed device from open_seed_devices. This also supports 2609 * multiple fs seed. 2610 */ 2611 old_devices = clone_fs_devices(fs_devices); 2612 if (IS_ERR(old_devices)) { 2613 kfree(seed_devices); 2614 return old_devices; 2615 } 2616 2617 list_add(&old_devices->fs_list, &fs_uuids); 2618 2619 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2620 seed_devices->opened = 1; 2621 INIT_LIST_HEAD(&seed_devices->devices); 2622 INIT_LIST_HEAD(&seed_devices->alloc_list); 2623 mutex_init(&seed_devices->device_list_mutex); 2624 2625 return seed_devices; 2626 } 2627 2628 /* 2629 * Splice seed devices into the sprout fs_devices. 2630 * Generate a new fsid for the sprouted read-write filesystem. 2631 */ 2632 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2633 struct btrfs_fs_devices *seed_devices) 2634 { 2635 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2636 struct btrfs_super_block *disk_super = fs_info->super_copy; 2637 struct btrfs_device *device; 2638 u64 super_flags; 2639 2640 /* 2641 * We are updating the fsid, the thread leading to device_list_add() 2642 * could race, so uuid_mutex is needed. 2643 */ 2644 lockdep_assert_held(&uuid_mutex); 2645 2646 /* 2647 * The threads listed below may traverse dev_list but can do that without 2648 * device_list_mutex: 2649 * - All device ops and balance - as we are in btrfs_exclop_start. 2650 * - Various dev_list readers - are using RCU. 2651 * - btrfs_ioctl_fitrim() - is using RCU. 2652 * 2653 * For-read threads as below are using device_list_mutex: 2654 * - Readonly scrub btrfs_scrub_dev() 2655 * - Readonly scrub btrfs_scrub_progress() 2656 * - btrfs_get_dev_stats() 2657 */ 2658 lockdep_assert_held(&fs_devices->device_list_mutex); 2659 2660 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2661 synchronize_rcu); 2662 list_for_each_entry(device, &seed_devices->devices, dev_list) 2663 device->fs_devices = seed_devices; 2664 2665 fs_devices->seeding = false; 2666 fs_devices->num_devices = 0; 2667 fs_devices->open_devices = 0; 2668 fs_devices->missing_devices = 0; 2669 fs_devices->rotating = false; 2670 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2671 2672 generate_random_uuid(fs_devices->fsid); 2673 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2674 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2675 2676 super_flags = btrfs_super_flags(disk_super) & 2677 ~BTRFS_SUPER_FLAG_SEEDING; 2678 btrfs_set_super_flags(disk_super, super_flags); 2679 } 2680 2681 /* 2682 * Store the expected generation for seed devices in device items. 2683 */ 2684 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2685 { 2686 BTRFS_DEV_LOOKUP_ARGS(args); 2687 struct btrfs_fs_info *fs_info = trans->fs_info; 2688 struct btrfs_root *root = fs_info->chunk_root; 2689 struct btrfs_path *path; 2690 struct extent_buffer *leaf; 2691 struct btrfs_dev_item *dev_item; 2692 struct btrfs_device *device; 2693 struct btrfs_key key; 2694 u8 fs_uuid[BTRFS_FSID_SIZE]; 2695 u8 dev_uuid[BTRFS_UUID_SIZE]; 2696 int ret; 2697 2698 path = btrfs_alloc_path(); 2699 if (!path) 2700 return -ENOMEM; 2701 2702 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2703 key.offset = 0; 2704 key.type = BTRFS_DEV_ITEM_KEY; 2705 2706 while (1) { 2707 btrfs_reserve_chunk_metadata(trans, false); 2708 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2709 btrfs_trans_release_chunk_metadata(trans); 2710 if (ret < 0) 2711 goto error; 2712 2713 leaf = path->nodes[0]; 2714 next_slot: 2715 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2716 ret = btrfs_next_leaf(root, path); 2717 if (ret > 0) 2718 break; 2719 if (ret < 0) 2720 goto error; 2721 leaf = path->nodes[0]; 2722 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2723 btrfs_release_path(path); 2724 continue; 2725 } 2726 2727 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2728 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2729 key.type != BTRFS_DEV_ITEM_KEY) 2730 break; 2731 2732 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2733 struct btrfs_dev_item); 2734 args.devid = btrfs_device_id(leaf, dev_item); 2735 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2736 BTRFS_UUID_SIZE); 2737 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2738 BTRFS_FSID_SIZE); 2739 args.uuid = dev_uuid; 2740 args.fsid = fs_uuid; 2741 device = btrfs_find_device(fs_info->fs_devices, &args); 2742 BUG_ON(!device); /* Logic error */ 2743 2744 if (device->fs_devices->seeding) { 2745 btrfs_set_device_generation(leaf, dev_item, 2746 device->generation); 2747 btrfs_mark_buffer_dirty(trans, leaf); 2748 } 2749 2750 path->slots[0]++; 2751 goto next_slot; 2752 } 2753 ret = 0; 2754 error: 2755 btrfs_free_path(path); 2756 return ret; 2757 } 2758 2759 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2760 { 2761 struct btrfs_root *root = fs_info->dev_root; 2762 struct btrfs_trans_handle *trans; 2763 struct btrfs_device *device; 2764 struct file *bdev_file; 2765 struct super_block *sb = fs_info->sb; 2766 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2767 struct btrfs_fs_devices *seed_devices = NULL; 2768 u64 orig_super_total_bytes; 2769 u64 orig_super_num_devices; 2770 int ret = 0; 2771 bool seeding_dev = false; 2772 bool locked = false; 2773 2774 if (sb_rdonly(sb) && !fs_devices->seeding) 2775 return -EROFS; 2776 2777 bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE, 2778 fs_info->bdev_holder, NULL); 2779 if (IS_ERR(bdev_file)) 2780 return PTR_ERR(bdev_file); 2781 2782 if (!btrfs_check_device_zone_type(fs_info, file_bdev(bdev_file))) { 2783 ret = -EINVAL; 2784 goto error; 2785 } 2786 2787 if (fs_devices->seeding) { 2788 seeding_dev = true; 2789 down_write(&sb->s_umount); 2790 mutex_lock(&uuid_mutex); 2791 locked = true; 2792 } 2793 2794 sync_blockdev(file_bdev(bdev_file)); 2795 2796 rcu_read_lock(); 2797 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2798 if (device->bdev == file_bdev(bdev_file)) { 2799 ret = -EEXIST; 2800 rcu_read_unlock(); 2801 goto error; 2802 } 2803 } 2804 rcu_read_unlock(); 2805 2806 device = btrfs_alloc_device(fs_info, NULL, NULL, device_path); 2807 if (IS_ERR(device)) { 2808 /* we can safely leave the fs_devices entry around */ 2809 ret = PTR_ERR(device); 2810 goto error; 2811 } 2812 2813 device->fs_info = fs_info; 2814 device->bdev_file = bdev_file; 2815 device->bdev = file_bdev(bdev_file); 2816 ret = lookup_bdev(device_path, &device->devt); 2817 if (ret) 2818 goto error_free_device; 2819 2820 ret = btrfs_get_dev_zone_info(device, false); 2821 if (ret) 2822 goto error_free_device; 2823 2824 trans = btrfs_start_transaction(root, 0); 2825 if (IS_ERR(trans)) { 2826 ret = PTR_ERR(trans); 2827 goto error_free_zone; 2828 } 2829 2830 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2831 device->generation = trans->transid; 2832 device->io_width = fs_info->sectorsize; 2833 device->io_align = fs_info->sectorsize; 2834 device->sector_size = fs_info->sectorsize; 2835 device->total_bytes = 2836 round_down(bdev_nr_bytes(device->bdev), fs_info->sectorsize); 2837 device->disk_total_bytes = device->total_bytes; 2838 device->commit_total_bytes = device->total_bytes; 2839 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2840 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2841 device->dev_stats_valid = 1; 2842 set_blocksize(device->bdev_file, BTRFS_BDEV_BLOCKSIZE); 2843 2844 if (seeding_dev) { 2845 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2846 seed_devices = btrfs_init_sprout(fs_info); 2847 if (IS_ERR(seed_devices)) { 2848 ret = PTR_ERR(seed_devices); 2849 btrfs_abort_transaction(trans, ret); 2850 goto error_trans; 2851 } 2852 } 2853 2854 mutex_lock(&fs_devices->device_list_mutex); 2855 if (seeding_dev) { 2856 btrfs_setup_sprout(fs_info, seed_devices); 2857 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2858 device); 2859 } 2860 2861 device->fs_devices = fs_devices; 2862 2863 mutex_lock(&fs_info->chunk_mutex); 2864 list_add_rcu(&device->dev_list, &fs_devices->devices); 2865 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2866 fs_devices->num_devices++; 2867 fs_devices->open_devices++; 2868 fs_devices->rw_devices++; 2869 fs_devices->total_devices++; 2870 fs_devices->total_rw_bytes += device->total_bytes; 2871 2872 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2873 2874 if (!bdev_nonrot(device->bdev)) 2875 fs_devices->rotating = true; 2876 2877 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2878 btrfs_set_super_total_bytes(fs_info->super_copy, 2879 round_down(orig_super_total_bytes + device->total_bytes, 2880 fs_info->sectorsize)); 2881 2882 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2883 btrfs_set_super_num_devices(fs_info->super_copy, 2884 orig_super_num_devices + 1); 2885 2886 /* 2887 * we've got more storage, clear any full flags on the space 2888 * infos 2889 */ 2890 btrfs_clear_space_info_full(fs_info); 2891 2892 mutex_unlock(&fs_info->chunk_mutex); 2893 2894 /* Add sysfs device entry */ 2895 btrfs_sysfs_add_device(device); 2896 2897 mutex_unlock(&fs_devices->device_list_mutex); 2898 2899 if (seeding_dev) { 2900 mutex_lock(&fs_info->chunk_mutex); 2901 ret = init_first_rw_device(trans); 2902 mutex_unlock(&fs_info->chunk_mutex); 2903 if (ret) { 2904 btrfs_abort_transaction(trans, ret); 2905 goto error_sysfs; 2906 } 2907 } 2908 2909 ret = btrfs_add_dev_item(trans, device); 2910 if (ret) { 2911 btrfs_abort_transaction(trans, ret); 2912 goto error_sysfs; 2913 } 2914 2915 if (seeding_dev) { 2916 ret = btrfs_finish_sprout(trans); 2917 if (ret) { 2918 btrfs_abort_transaction(trans, ret); 2919 goto error_sysfs; 2920 } 2921 2922 /* 2923 * fs_devices now represents the newly sprouted filesystem and 2924 * its fsid has been changed by btrfs_sprout_splice(). 2925 */ 2926 btrfs_sysfs_update_sprout_fsid(fs_devices); 2927 } 2928 2929 ret = btrfs_commit_transaction(trans); 2930 2931 if (seeding_dev) { 2932 mutex_unlock(&uuid_mutex); 2933 up_write(&sb->s_umount); 2934 locked = false; 2935 2936 if (ret) /* transaction commit */ 2937 return ret; 2938 2939 ret = btrfs_relocate_sys_chunks(fs_info); 2940 if (ret < 0) 2941 btrfs_handle_fs_error(fs_info, ret, 2942 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2943 trans = btrfs_attach_transaction(root); 2944 if (IS_ERR(trans)) { 2945 if (PTR_ERR(trans) == -ENOENT) 2946 return 0; 2947 ret = PTR_ERR(trans); 2948 trans = NULL; 2949 goto error_sysfs; 2950 } 2951 ret = btrfs_commit_transaction(trans); 2952 } 2953 2954 /* 2955 * Now that we have written a new super block to this device, check all 2956 * other fs_devices list if device_path alienates any other scanned 2957 * device. 2958 * We can ignore the return value as it typically returns -EINVAL and 2959 * only succeeds if the device was an alien. 2960 */ 2961 btrfs_forget_devices(device->devt); 2962 2963 /* Update ctime/mtime for blkid or udev */ 2964 update_dev_time(device_path); 2965 2966 return ret; 2967 2968 error_sysfs: 2969 btrfs_sysfs_remove_device(device); 2970 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2971 mutex_lock(&fs_info->chunk_mutex); 2972 list_del_rcu(&device->dev_list); 2973 list_del(&device->dev_alloc_list); 2974 fs_info->fs_devices->num_devices--; 2975 fs_info->fs_devices->open_devices--; 2976 fs_info->fs_devices->rw_devices--; 2977 fs_info->fs_devices->total_devices--; 2978 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2979 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2980 btrfs_set_super_total_bytes(fs_info->super_copy, 2981 orig_super_total_bytes); 2982 btrfs_set_super_num_devices(fs_info->super_copy, 2983 orig_super_num_devices); 2984 mutex_unlock(&fs_info->chunk_mutex); 2985 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2986 error_trans: 2987 if (trans) 2988 btrfs_end_transaction(trans); 2989 error_free_zone: 2990 btrfs_destroy_dev_zone_info(device); 2991 error_free_device: 2992 btrfs_free_device(device); 2993 error: 2994 fput(bdev_file); 2995 if (locked) { 2996 mutex_unlock(&uuid_mutex); 2997 up_write(&sb->s_umount); 2998 } 2999 return ret; 3000 } 3001 3002 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 3003 struct btrfs_device *device) 3004 { 3005 int ret; 3006 struct btrfs_path *path; 3007 struct btrfs_root *root = device->fs_info->chunk_root; 3008 struct btrfs_dev_item *dev_item; 3009 struct extent_buffer *leaf; 3010 struct btrfs_key key; 3011 3012 path = btrfs_alloc_path(); 3013 if (!path) 3014 return -ENOMEM; 3015 3016 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 3017 key.type = BTRFS_DEV_ITEM_KEY; 3018 key.offset = device->devid; 3019 3020 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3021 if (ret < 0) 3022 goto out; 3023 3024 if (ret > 0) { 3025 ret = -ENOENT; 3026 goto out; 3027 } 3028 3029 leaf = path->nodes[0]; 3030 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 3031 3032 btrfs_set_device_id(leaf, dev_item, device->devid); 3033 btrfs_set_device_type(leaf, dev_item, device->type); 3034 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 3035 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 3036 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 3037 btrfs_set_device_total_bytes(leaf, dev_item, 3038 btrfs_device_get_disk_total_bytes(device)); 3039 btrfs_set_device_bytes_used(leaf, dev_item, 3040 btrfs_device_get_bytes_used(device)); 3041 btrfs_mark_buffer_dirty(trans, leaf); 3042 3043 out: 3044 btrfs_free_path(path); 3045 return ret; 3046 } 3047 3048 int btrfs_grow_device(struct btrfs_trans_handle *trans, 3049 struct btrfs_device *device, u64 new_size) 3050 { 3051 struct btrfs_fs_info *fs_info = device->fs_info; 3052 struct btrfs_super_block *super_copy = fs_info->super_copy; 3053 u64 old_total; 3054 u64 diff; 3055 int ret; 3056 3057 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 3058 return -EACCES; 3059 3060 new_size = round_down(new_size, fs_info->sectorsize); 3061 3062 mutex_lock(&fs_info->chunk_mutex); 3063 old_total = btrfs_super_total_bytes(super_copy); 3064 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 3065 3066 if (new_size <= device->total_bytes || 3067 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 3068 mutex_unlock(&fs_info->chunk_mutex); 3069 return -EINVAL; 3070 } 3071 3072 btrfs_set_super_total_bytes(super_copy, 3073 round_down(old_total + diff, fs_info->sectorsize)); 3074 device->fs_devices->total_rw_bytes += diff; 3075 atomic64_add(diff, &fs_info->free_chunk_space); 3076 3077 btrfs_device_set_total_bytes(device, new_size); 3078 btrfs_device_set_disk_total_bytes(device, new_size); 3079 btrfs_clear_space_info_full(device->fs_info); 3080 if (list_empty(&device->post_commit_list)) 3081 list_add_tail(&device->post_commit_list, 3082 &trans->transaction->dev_update_list); 3083 mutex_unlock(&fs_info->chunk_mutex); 3084 3085 btrfs_reserve_chunk_metadata(trans, false); 3086 ret = btrfs_update_device(trans, device); 3087 btrfs_trans_release_chunk_metadata(trans); 3088 3089 return ret; 3090 } 3091 3092 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3093 { 3094 struct btrfs_fs_info *fs_info = trans->fs_info; 3095 struct btrfs_root *root = fs_info->chunk_root; 3096 int ret; 3097 struct btrfs_path *path; 3098 struct btrfs_key key; 3099 3100 path = btrfs_alloc_path(); 3101 if (!path) 3102 return -ENOMEM; 3103 3104 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3105 key.offset = chunk_offset; 3106 key.type = BTRFS_CHUNK_ITEM_KEY; 3107 3108 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3109 if (ret < 0) 3110 goto out; 3111 else if (ret > 0) { /* Logic error or corruption */ 3112 btrfs_err(fs_info, "failed to lookup chunk %llu when freeing", 3113 chunk_offset); 3114 btrfs_abort_transaction(trans, -ENOENT); 3115 ret = -EUCLEAN; 3116 goto out; 3117 } 3118 3119 ret = btrfs_del_item(trans, root, path); 3120 if (ret < 0) { 3121 btrfs_err(fs_info, "failed to delete chunk %llu item", chunk_offset); 3122 btrfs_abort_transaction(trans, ret); 3123 goto out; 3124 } 3125 out: 3126 btrfs_free_path(path); 3127 return ret; 3128 } 3129 3130 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3131 { 3132 struct btrfs_super_block *super_copy = fs_info->super_copy; 3133 struct btrfs_disk_key *disk_key; 3134 struct btrfs_chunk *chunk; 3135 u8 *ptr; 3136 int ret = 0; 3137 u32 num_stripes; 3138 u32 array_size; 3139 u32 len = 0; 3140 u32 cur; 3141 struct btrfs_key key; 3142 3143 lockdep_assert_held(&fs_info->chunk_mutex); 3144 array_size = btrfs_super_sys_array_size(super_copy); 3145 3146 ptr = super_copy->sys_chunk_array; 3147 cur = 0; 3148 3149 while (cur < array_size) { 3150 disk_key = (struct btrfs_disk_key *)ptr; 3151 btrfs_disk_key_to_cpu(&key, disk_key); 3152 3153 len = sizeof(*disk_key); 3154 3155 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3156 chunk = (struct btrfs_chunk *)(ptr + len); 3157 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 3158 len += btrfs_chunk_item_size(num_stripes); 3159 } else { 3160 ret = -EIO; 3161 break; 3162 } 3163 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3164 key.offset == chunk_offset) { 3165 memmove(ptr, ptr + len, array_size - (cur + len)); 3166 array_size -= len; 3167 btrfs_set_super_sys_array_size(super_copy, array_size); 3168 } else { 3169 ptr += len; 3170 cur += len; 3171 } 3172 } 3173 return ret; 3174 } 3175 3176 struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info, 3177 u64 logical, u64 length) 3178 { 3179 struct rb_node *node = fs_info->mapping_tree.rb_root.rb_node; 3180 struct rb_node *prev = NULL; 3181 struct rb_node *orig_prev; 3182 struct btrfs_chunk_map *map; 3183 struct btrfs_chunk_map *prev_map = NULL; 3184 3185 while (node) { 3186 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 3187 prev = node; 3188 prev_map = map; 3189 3190 if (logical < map->start) { 3191 node = node->rb_left; 3192 } else if (logical >= map->start + map->chunk_len) { 3193 node = node->rb_right; 3194 } else { 3195 refcount_inc(&map->refs); 3196 return map; 3197 } 3198 } 3199 3200 if (!prev) 3201 return NULL; 3202 3203 orig_prev = prev; 3204 while (prev && logical >= prev_map->start + prev_map->chunk_len) { 3205 prev = rb_next(prev); 3206 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3207 } 3208 3209 if (!prev) { 3210 prev = orig_prev; 3211 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3212 while (prev && logical < prev_map->start) { 3213 prev = rb_prev(prev); 3214 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node); 3215 } 3216 } 3217 3218 if (prev) { 3219 u64 end = logical + length; 3220 3221 /* 3222 * Caller can pass a U64_MAX length when it wants to get any 3223 * chunk starting at an offset of 'logical' or higher, so deal 3224 * with underflow by resetting the end offset to U64_MAX. 3225 */ 3226 if (end < logical) 3227 end = U64_MAX; 3228 3229 if (end > prev_map->start && 3230 logical < prev_map->start + prev_map->chunk_len) { 3231 refcount_inc(&prev_map->refs); 3232 return prev_map; 3233 } 3234 } 3235 3236 return NULL; 3237 } 3238 3239 struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info, 3240 u64 logical, u64 length) 3241 { 3242 struct btrfs_chunk_map *map; 3243 3244 read_lock(&fs_info->mapping_tree_lock); 3245 map = btrfs_find_chunk_map_nolock(fs_info, logical, length); 3246 read_unlock(&fs_info->mapping_tree_lock); 3247 3248 return map; 3249 } 3250 3251 /* 3252 * Find the mapping containing the given logical extent. 3253 * 3254 * @logical: Logical block offset in bytes. 3255 * @length: Length of extent in bytes. 3256 * 3257 * Return: Chunk mapping or ERR_PTR. 3258 */ 3259 struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3260 u64 logical, u64 length) 3261 { 3262 struct btrfs_chunk_map *map; 3263 3264 map = btrfs_find_chunk_map(fs_info, logical, length); 3265 3266 if (unlikely(!map)) { 3267 btrfs_crit(fs_info, 3268 "unable to find chunk map for logical %llu length %llu", 3269 logical, length); 3270 return ERR_PTR(-EINVAL); 3271 } 3272 3273 if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) { 3274 btrfs_crit(fs_info, 3275 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu", 3276 logical, logical + length, map->start, 3277 map->start + map->chunk_len); 3278 btrfs_free_chunk_map(map); 3279 return ERR_PTR(-EINVAL); 3280 } 3281 3282 /* Callers are responsible for dropping the reference. */ 3283 return map; 3284 } 3285 3286 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3287 struct btrfs_chunk_map *map, u64 chunk_offset) 3288 { 3289 int i; 3290 3291 /* 3292 * Removing chunk items and updating the device items in the chunks btree 3293 * requires holding the chunk_mutex. 3294 * See the comment at btrfs_chunk_alloc() for the details. 3295 */ 3296 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3297 3298 for (i = 0; i < map->num_stripes; i++) { 3299 int ret; 3300 3301 ret = btrfs_update_device(trans, map->stripes[i].dev); 3302 if (ret) 3303 return ret; 3304 } 3305 3306 return btrfs_free_chunk(trans, chunk_offset); 3307 } 3308 3309 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3310 { 3311 struct btrfs_fs_info *fs_info = trans->fs_info; 3312 struct btrfs_chunk_map *map; 3313 u64 dev_extent_len = 0; 3314 int i, ret = 0; 3315 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3316 3317 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3318 if (IS_ERR(map)) { 3319 /* 3320 * This is a logic error, but we don't want to just rely on the 3321 * user having built with ASSERT enabled, so if ASSERT doesn't 3322 * do anything we still error out. 3323 */ 3324 ASSERT(0); 3325 return PTR_ERR(map); 3326 } 3327 3328 /* 3329 * First delete the device extent items from the devices btree. 3330 * We take the device_list_mutex to avoid racing with the finishing phase 3331 * of a device replace operation. See the comment below before acquiring 3332 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3333 * because that can result in a deadlock when deleting the device extent 3334 * items from the devices btree - COWing an extent buffer from the btree 3335 * may result in allocating a new metadata chunk, which would attempt to 3336 * lock again fs_info->chunk_mutex. 3337 */ 3338 mutex_lock(&fs_devices->device_list_mutex); 3339 for (i = 0; i < map->num_stripes; i++) { 3340 struct btrfs_device *device = map->stripes[i].dev; 3341 ret = btrfs_free_dev_extent(trans, device, 3342 map->stripes[i].physical, 3343 &dev_extent_len); 3344 if (ret) { 3345 mutex_unlock(&fs_devices->device_list_mutex); 3346 btrfs_abort_transaction(trans, ret); 3347 goto out; 3348 } 3349 3350 if (device->bytes_used > 0) { 3351 mutex_lock(&fs_info->chunk_mutex); 3352 btrfs_device_set_bytes_used(device, 3353 device->bytes_used - dev_extent_len); 3354 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3355 btrfs_clear_space_info_full(fs_info); 3356 mutex_unlock(&fs_info->chunk_mutex); 3357 } 3358 } 3359 mutex_unlock(&fs_devices->device_list_mutex); 3360 3361 /* 3362 * We acquire fs_info->chunk_mutex for 2 reasons: 3363 * 3364 * 1) Just like with the first phase of the chunk allocation, we must 3365 * reserve system space, do all chunk btree updates and deletions, and 3366 * update the system chunk array in the superblock while holding this 3367 * mutex. This is for similar reasons as explained on the comment at 3368 * the top of btrfs_chunk_alloc(); 3369 * 3370 * 2) Prevent races with the final phase of a device replace operation 3371 * that replaces the device object associated with the map's stripes, 3372 * because the device object's id can change at any time during that 3373 * final phase of the device replace operation 3374 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3375 * replaced device and then see it with an ID of 3376 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3377 * the device item, which does not exists on the chunk btree. 3378 * The finishing phase of device replace acquires both the 3379 * device_list_mutex and the chunk_mutex, in that order, so we are 3380 * safe by just acquiring the chunk_mutex. 3381 */ 3382 trans->removing_chunk = true; 3383 mutex_lock(&fs_info->chunk_mutex); 3384 3385 check_system_chunk(trans, map->type); 3386 3387 ret = remove_chunk_item(trans, map, chunk_offset); 3388 /* 3389 * Normally we should not get -ENOSPC since we reserved space before 3390 * through the call to check_system_chunk(). 3391 * 3392 * Despite our system space_info having enough free space, we may not 3393 * be able to allocate extents from its block groups, because all have 3394 * an incompatible profile, which will force us to allocate a new system 3395 * block group with the right profile, or right after we called 3396 * check_system_space() above, a scrub turned the only system block group 3397 * with enough free space into RO mode. 3398 * This is explained with more detail at do_chunk_alloc(). 3399 * 3400 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3401 */ 3402 if (ret == -ENOSPC) { 3403 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3404 struct btrfs_block_group *sys_bg; 3405 3406 sys_bg = btrfs_create_chunk(trans, sys_flags); 3407 if (IS_ERR(sys_bg)) { 3408 ret = PTR_ERR(sys_bg); 3409 btrfs_abort_transaction(trans, ret); 3410 goto out; 3411 } 3412 3413 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3414 if (ret) { 3415 btrfs_abort_transaction(trans, ret); 3416 goto out; 3417 } 3418 3419 ret = remove_chunk_item(trans, map, chunk_offset); 3420 if (ret) { 3421 btrfs_abort_transaction(trans, ret); 3422 goto out; 3423 } 3424 } else if (ret) { 3425 btrfs_abort_transaction(trans, ret); 3426 goto out; 3427 } 3428 3429 trace_btrfs_chunk_free(fs_info, map, chunk_offset, map->chunk_len); 3430 3431 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3432 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3433 if (ret) { 3434 btrfs_abort_transaction(trans, ret); 3435 goto out; 3436 } 3437 } 3438 3439 mutex_unlock(&fs_info->chunk_mutex); 3440 trans->removing_chunk = false; 3441 3442 /* 3443 * We are done with chunk btree updates and deletions, so release the 3444 * system space we previously reserved (with check_system_chunk()). 3445 */ 3446 btrfs_trans_release_chunk_metadata(trans); 3447 3448 ret = btrfs_remove_block_group(trans, map); 3449 if (ret) { 3450 btrfs_abort_transaction(trans, ret); 3451 goto out; 3452 } 3453 3454 out: 3455 if (trans->removing_chunk) { 3456 mutex_unlock(&fs_info->chunk_mutex); 3457 trans->removing_chunk = false; 3458 } 3459 /* once for us */ 3460 btrfs_free_chunk_map(map); 3461 return ret; 3462 } 3463 3464 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3465 { 3466 struct btrfs_root *root = fs_info->chunk_root; 3467 struct btrfs_trans_handle *trans; 3468 struct btrfs_block_group *block_group; 3469 u64 length; 3470 int ret; 3471 3472 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3473 btrfs_err(fs_info, 3474 "relocate: not supported on extent tree v2 yet"); 3475 return -EINVAL; 3476 } 3477 3478 /* 3479 * Prevent races with automatic removal of unused block groups. 3480 * After we relocate and before we remove the chunk with offset 3481 * chunk_offset, automatic removal of the block group can kick in, 3482 * resulting in a failure when calling btrfs_remove_chunk() below. 3483 * 3484 * Make sure to acquire this mutex before doing a tree search (dev 3485 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3486 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3487 * we release the path used to search the chunk/dev tree and before 3488 * the current task acquires this mutex and calls us. 3489 */ 3490 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3491 3492 /* step one, relocate all the extents inside this chunk */ 3493 btrfs_scrub_pause(fs_info); 3494 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3495 btrfs_scrub_continue(fs_info); 3496 if (ret) { 3497 /* 3498 * If we had a transaction abort, stop all running scrubs. 3499 * See transaction.c:cleanup_transaction() why we do it here. 3500 */ 3501 if (BTRFS_FS_ERROR(fs_info)) 3502 btrfs_scrub_cancel(fs_info); 3503 return ret; 3504 } 3505 3506 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3507 if (!block_group) 3508 return -ENOENT; 3509 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3510 length = block_group->length; 3511 btrfs_put_block_group(block_group); 3512 3513 /* 3514 * On a zoned file system, discard the whole block group, this will 3515 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3516 * resetting the zone fails, don't treat it as a fatal problem from the 3517 * filesystem's point of view. 3518 */ 3519 if (btrfs_is_zoned(fs_info)) { 3520 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3521 if (ret) 3522 btrfs_info(fs_info, 3523 "failed to reset zone %llu after relocation", 3524 chunk_offset); 3525 } 3526 3527 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3528 chunk_offset); 3529 if (IS_ERR(trans)) { 3530 ret = PTR_ERR(trans); 3531 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3532 return ret; 3533 } 3534 3535 /* 3536 * step two, delete the device extents and the 3537 * chunk tree entries 3538 */ 3539 ret = btrfs_remove_chunk(trans, chunk_offset); 3540 btrfs_end_transaction(trans); 3541 return ret; 3542 } 3543 3544 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3545 { 3546 struct btrfs_root *chunk_root = fs_info->chunk_root; 3547 struct btrfs_path *path; 3548 struct extent_buffer *leaf; 3549 struct btrfs_chunk *chunk; 3550 struct btrfs_key key; 3551 struct btrfs_key found_key; 3552 u64 chunk_type; 3553 bool retried = false; 3554 int failed = 0; 3555 int ret; 3556 3557 path = btrfs_alloc_path(); 3558 if (!path) 3559 return -ENOMEM; 3560 3561 again: 3562 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3563 key.offset = (u64)-1; 3564 key.type = BTRFS_CHUNK_ITEM_KEY; 3565 3566 while (1) { 3567 mutex_lock(&fs_info->reclaim_bgs_lock); 3568 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3569 if (ret < 0) { 3570 mutex_unlock(&fs_info->reclaim_bgs_lock); 3571 goto error; 3572 } 3573 if (ret == 0) { 3574 /* 3575 * On the first search we would find chunk tree with 3576 * offset -1, which is not possible. On subsequent 3577 * loops this would find an existing item on an invalid 3578 * offset (one less than the previous one, wrong 3579 * alignment and size). 3580 */ 3581 ret = -EUCLEAN; 3582 mutex_unlock(&fs_info->reclaim_bgs_lock); 3583 goto error; 3584 } 3585 3586 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3587 key.type); 3588 if (ret) 3589 mutex_unlock(&fs_info->reclaim_bgs_lock); 3590 if (ret < 0) 3591 goto error; 3592 if (ret > 0) 3593 break; 3594 3595 leaf = path->nodes[0]; 3596 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3597 3598 chunk = btrfs_item_ptr(leaf, path->slots[0], 3599 struct btrfs_chunk); 3600 chunk_type = btrfs_chunk_type(leaf, chunk); 3601 btrfs_release_path(path); 3602 3603 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3604 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3605 if (ret == -ENOSPC) 3606 failed++; 3607 else 3608 BUG_ON(ret); 3609 } 3610 mutex_unlock(&fs_info->reclaim_bgs_lock); 3611 3612 if (found_key.offset == 0) 3613 break; 3614 key.offset = found_key.offset - 1; 3615 } 3616 ret = 0; 3617 if (failed && !retried) { 3618 failed = 0; 3619 retried = true; 3620 goto again; 3621 } else if (WARN_ON(failed && retried)) { 3622 ret = -ENOSPC; 3623 } 3624 error: 3625 btrfs_free_path(path); 3626 return ret; 3627 } 3628 3629 /* 3630 * return 1 : allocate a data chunk successfully, 3631 * return <0: errors during allocating a data chunk, 3632 * return 0 : no need to allocate a data chunk. 3633 */ 3634 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3635 u64 chunk_offset) 3636 { 3637 struct btrfs_block_group *cache; 3638 u64 bytes_used; 3639 u64 chunk_type; 3640 3641 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3642 ASSERT(cache); 3643 chunk_type = cache->flags; 3644 btrfs_put_block_group(cache); 3645 3646 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3647 return 0; 3648 3649 spin_lock(&fs_info->data_sinfo->lock); 3650 bytes_used = fs_info->data_sinfo->bytes_used; 3651 spin_unlock(&fs_info->data_sinfo->lock); 3652 3653 if (!bytes_used) { 3654 struct btrfs_trans_handle *trans; 3655 int ret; 3656 3657 trans = btrfs_join_transaction(fs_info->tree_root); 3658 if (IS_ERR(trans)) 3659 return PTR_ERR(trans); 3660 3661 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3662 btrfs_end_transaction(trans); 3663 if (ret < 0) 3664 return ret; 3665 return 1; 3666 } 3667 3668 return 0; 3669 } 3670 3671 static void btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, 3672 const struct btrfs_disk_balance_args *disk) 3673 { 3674 memset(cpu, 0, sizeof(*cpu)); 3675 3676 cpu->profiles = le64_to_cpu(disk->profiles); 3677 cpu->usage = le64_to_cpu(disk->usage); 3678 cpu->devid = le64_to_cpu(disk->devid); 3679 cpu->pstart = le64_to_cpu(disk->pstart); 3680 cpu->pend = le64_to_cpu(disk->pend); 3681 cpu->vstart = le64_to_cpu(disk->vstart); 3682 cpu->vend = le64_to_cpu(disk->vend); 3683 cpu->target = le64_to_cpu(disk->target); 3684 cpu->flags = le64_to_cpu(disk->flags); 3685 cpu->limit = le64_to_cpu(disk->limit); 3686 cpu->stripes_min = le32_to_cpu(disk->stripes_min); 3687 cpu->stripes_max = le32_to_cpu(disk->stripes_max); 3688 } 3689 3690 static void btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, 3691 const struct btrfs_balance_args *cpu) 3692 { 3693 memset(disk, 0, sizeof(*disk)); 3694 3695 disk->profiles = cpu_to_le64(cpu->profiles); 3696 disk->usage = cpu_to_le64(cpu->usage); 3697 disk->devid = cpu_to_le64(cpu->devid); 3698 disk->pstart = cpu_to_le64(cpu->pstart); 3699 disk->pend = cpu_to_le64(cpu->pend); 3700 disk->vstart = cpu_to_le64(cpu->vstart); 3701 disk->vend = cpu_to_le64(cpu->vend); 3702 disk->target = cpu_to_le64(cpu->target); 3703 disk->flags = cpu_to_le64(cpu->flags); 3704 disk->limit = cpu_to_le64(cpu->limit); 3705 disk->stripes_min = cpu_to_le32(cpu->stripes_min); 3706 disk->stripes_max = cpu_to_le32(cpu->stripes_max); 3707 } 3708 3709 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3710 struct btrfs_balance_control *bctl) 3711 { 3712 struct btrfs_root *root = fs_info->tree_root; 3713 struct btrfs_trans_handle *trans; 3714 struct btrfs_balance_item *item; 3715 struct btrfs_disk_balance_args disk_bargs; 3716 struct btrfs_path *path; 3717 struct extent_buffer *leaf; 3718 struct btrfs_key key; 3719 int ret, err; 3720 3721 path = btrfs_alloc_path(); 3722 if (!path) 3723 return -ENOMEM; 3724 3725 trans = btrfs_start_transaction(root, 0); 3726 if (IS_ERR(trans)) { 3727 btrfs_free_path(path); 3728 return PTR_ERR(trans); 3729 } 3730 3731 key.objectid = BTRFS_BALANCE_OBJECTID; 3732 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3733 key.offset = 0; 3734 3735 ret = btrfs_insert_empty_item(trans, root, path, &key, 3736 sizeof(*item)); 3737 if (ret) 3738 goto out; 3739 3740 leaf = path->nodes[0]; 3741 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3742 3743 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3744 3745 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3746 btrfs_set_balance_data(leaf, item, &disk_bargs); 3747 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3748 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3749 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3750 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3751 3752 btrfs_set_balance_flags(leaf, item, bctl->flags); 3753 3754 btrfs_mark_buffer_dirty(trans, leaf); 3755 out: 3756 btrfs_free_path(path); 3757 err = btrfs_commit_transaction(trans); 3758 if (err && !ret) 3759 ret = err; 3760 return ret; 3761 } 3762 3763 static int del_balance_item(struct btrfs_fs_info *fs_info) 3764 { 3765 struct btrfs_root *root = fs_info->tree_root; 3766 struct btrfs_trans_handle *trans; 3767 struct btrfs_path *path; 3768 struct btrfs_key key; 3769 int ret, err; 3770 3771 path = btrfs_alloc_path(); 3772 if (!path) 3773 return -ENOMEM; 3774 3775 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3776 if (IS_ERR(trans)) { 3777 btrfs_free_path(path); 3778 return PTR_ERR(trans); 3779 } 3780 3781 key.objectid = BTRFS_BALANCE_OBJECTID; 3782 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3783 key.offset = 0; 3784 3785 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3786 if (ret < 0) 3787 goto out; 3788 if (ret > 0) { 3789 ret = -ENOENT; 3790 goto out; 3791 } 3792 3793 ret = btrfs_del_item(trans, root, path); 3794 out: 3795 btrfs_free_path(path); 3796 err = btrfs_commit_transaction(trans); 3797 if (err && !ret) 3798 ret = err; 3799 return ret; 3800 } 3801 3802 /* 3803 * This is a heuristic used to reduce the number of chunks balanced on 3804 * resume after balance was interrupted. 3805 */ 3806 static void update_balance_args(struct btrfs_balance_control *bctl) 3807 { 3808 /* 3809 * Turn on soft mode for chunk types that were being converted. 3810 */ 3811 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3812 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3813 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3814 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3815 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3816 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3817 3818 /* 3819 * Turn on usage filter if is not already used. The idea is 3820 * that chunks that we have already balanced should be 3821 * reasonably full. Don't do it for chunks that are being 3822 * converted - that will keep us from relocating unconverted 3823 * (albeit full) chunks. 3824 */ 3825 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3826 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3827 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3828 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3829 bctl->data.usage = 90; 3830 } 3831 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3832 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3833 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3834 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3835 bctl->sys.usage = 90; 3836 } 3837 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3838 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3839 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3840 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3841 bctl->meta.usage = 90; 3842 } 3843 } 3844 3845 /* 3846 * Clear the balance status in fs_info and delete the balance item from disk. 3847 */ 3848 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3849 { 3850 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3851 int ret; 3852 3853 ASSERT(fs_info->balance_ctl); 3854 3855 spin_lock(&fs_info->balance_lock); 3856 fs_info->balance_ctl = NULL; 3857 spin_unlock(&fs_info->balance_lock); 3858 3859 kfree(bctl); 3860 ret = del_balance_item(fs_info); 3861 if (ret) 3862 btrfs_handle_fs_error(fs_info, ret, NULL); 3863 } 3864 3865 /* 3866 * Balance filters. Return 1 if chunk should be filtered out 3867 * (should not be balanced). 3868 */ 3869 static int chunk_profiles_filter(u64 chunk_type, 3870 struct btrfs_balance_args *bargs) 3871 { 3872 chunk_type = chunk_to_extended(chunk_type) & 3873 BTRFS_EXTENDED_PROFILE_MASK; 3874 3875 if (bargs->profiles & chunk_type) 3876 return 0; 3877 3878 return 1; 3879 } 3880 3881 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3882 struct btrfs_balance_args *bargs) 3883 { 3884 struct btrfs_block_group *cache; 3885 u64 chunk_used; 3886 u64 user_thresh_min; 3887 u64 user_thresh_max; 3888 int ret = 1; 3889 3890 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3891 chunk_used = cache->used; 3892 3893 if (bargs->usage_min == 0) 3894 user_thresh_min = 0; 3895 else 3896 user_thresh_min = mult_perc(cache->length, bargs->usage_min); 3897 3898 if (bargs->usage_max == 0) 3899 user_thresh_max = 1; 3900 else if (bargs->usage_max > 100) 3901 user_thresh_max = cache->length; 3902 else 3903 user_thresh_max = mult_perc(cache->length, bargs->usage_max); 3904 3905 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3906 ret = 0; 3907 3908 btrfs_put_block_group(cache); 3909 return ret; 3910 } 3911 3912 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3913 u64 chunk_offset, struct btrfs_balance_args *bargs) 3914 { 3915 struct btrfs_block_group *cache; 3916 u64 chunk_used, user_thresh; 3917 int ret = 1; 3918 3919 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3920 chunk_used = cache->used; 3921 3922 if (bargs->usage_min == 0) 3923 user_thresh = 1; 3924 else if (bargs->usage > 100) 3925 user_thresh = cache->length; 3926 else 3927 user_thresh = mult_perc(cache->length, bargs->usage); 3928 3929 if (chunk_used < user_thresh) 3930 ret = 0; 3931 3932 btrfs_put_block_group(cache); 3933 return ret; 3934 } 3935 3936 static int chunk_devid_filter(struct extent_buffer *leaf, 3937 struct btrfs_chunk *chunk, 3938 struct btrfs_balance_args *bargs) 3939 { 3940 struct btrfs_stripe *stripe; 3941 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3942 int i; 3943 3944 for (i = 0; i < num_stripes; i++) { 3945 stripe = btrfs_stripe_nr(chunk, i); 3946 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3947 return 0; 3948 } 3949 3950 return 1; 3951 } 3952 3953 static u64 calc_data_stripes(u64 type, int num_stripes) 3954 { 3955 const int index = btrfs_bg_flags_to_raid_index(type); 3956 const int ncopies = btrfs_raid_array[index].ncopies; 3957 const int nparity = btrfs_raid_array[index].nparity; 3958 3959 return (num_stripes - nparity) / ncopies; 3960 } 3961 3962 /* [pstart, pend) */ 3963 static int chunk_drange_filter(struct extent_buffer *leaf, 3964 struct btrfs_chunk *chunk, 3965 struct btrfs_balance_args *bargs) 3966 { 3967 struct btrfs_stripe *stripe; 3968 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3969 u64 stripe_offset; 3970 u64 stripe_length; 3971 u64 type; 3972 int factor; 3973 int i; 3974 3975 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3976 return 0; 3977 3978 type = btrfs_chunk_type(leaf, chunk); 3979 factor = calc_data_stripes(type, num_stripes); 3980 3981 for (i = 0; i < num_stripes; i++) { 3982 stripe = btrfs_stripe_nr(chunk, i); 3983 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3984 continue; 3985 3986 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3987 stripe_length = btrfs_chunk_length(leaf, chunk); 3988 stripe_length = div_u64(stripe_length, factor); 3989 3990 if (stripe_offset < bargs->pend && 3991 stripe_offset + stripe_length > bargs->pstart) 3992 return 0; 3993 } 3994 3995 return 1; 3996 } 3997 3998 /* [vstart, vend) */ 3999 static int chunk_vrange_filter(struct extent_buffer *leaf, 4000 struct btrfs_chunk *chunk, 4001 u64 chunk_offset, 4002 struct btrfs_balance_args *bargs) 4003 { 4004 if (chunk_offset < bargs->vend && 4005 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 4006 /* at least part of the chunk is inside this vrange */ 4007 return 0; 4008 4009 return 1; 4010 } 4011 4012 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 4013 struct btrfs_chunk *chunk, 4014 struct btrfs_balance_args *bargs) 4015 { 4016 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 4017 4018 if (bargs->stripes_min <= num_stripes 4019 && num_stripes <= bargs->stripes_max) 4020 return 0; 4021 4022 return 1; 4023 } 4024 4025 static int chunk_soft_convert_filter(u64 chunk_type, 4026 struct btrfs_balance_args *bargs) 4027 { 4028 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4029 return 0; 4030 4031 chunk_type = chunk_to_extended(chunk_type) & 4032 BTRFS_EXTENDED_PROFILE_MASK; 4033 4034 if (bargs->target == chunk_type) 4035 return 1; 4036 4037 return 0; 4038 } 4039 4040 static int should_balance_chunk(struct extent_buffer *leaf, 4041 struct btrfs_chunk *chunk, u64 chunk_offset) 4042 { 4043 struct btrfs_fs_info *fs_info = leaf->fs_info; 4044 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4045 struct btrfs_balance_args *bargs = NULL; 4046 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 4047 4048 /* type filter */ 4049 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 4050 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 4051 return 0; 4052 } 4053 4054 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 4055 bargs = &bctl->data; 4056 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 4057 bargs = &bctl->sys; 4058 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 4059 bargs = &bctl->meta; 4060 4061 /* profiles filter */ 4062 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 4063 chunk_profiles_filter(chunk_type, bargs)) { 4064 return 0; 4065 } 4066 4067 /* usage filter */ 4068 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 4069 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 4070 return 0; 4071 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 4072 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 4073 return 0; 4074 } 4075 4076 /* devid filter */ 4077 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 4078 chunk_devid_filter(leaf, chunk, bargs)) { 4079 return 0; 4080 } 4081 4082 /* drange filter, makes sense only with devid filter */ 4083 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 4084 chunk_drange_filter(leaf, chunk, bargs)) { 4085 return 0; 4086 } 4087 4088 /* vrange filter */ 4089 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 4090 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 4091 return 0; 4092 } 4093 4094 /* stripes filter */ 4095 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 4096 chunk_stripes_range_filter(leaf, chunk, bargs)) { 4097 return 0; 4098 } 4099 4100 /* soft profile changing mode */ 4101 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 4102 chunk_soft_convert_filter(chunk_type, bargs)) { 4103 return 0; 4104 } 4105 4106 /* 4107 * limited by count, must be the last filter 4108 */ 4109 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 4110 if (bargs->limit == 0) 4111 return 0; 4112 else 4113 bargs->limit--; 4114 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 4115 /* 4116 * Same logic as the 'limit' filter; the minimum cannot be 4117 * determined here because we do not have the global information 4118 * about the count of all chunks that satisfy the filters. 4119 */ 4120 if (bargs->limit_max == 0) 4121 return 0; 4122 else 4123 bargs->limit_max--; 4124 } 4125 4126 return 1; 4127 } 4128 4129 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 4130 { 4131 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4132 struct btrfs_root *chunk_root = fs_info->chunk_root; 4133 u64 chunk_type; 4134 struct btrfs_chunk *chunk; 4135 struct btrfs_path *path = NULL; 4136 struct btrfs_key key; 4137 struct btrfs_key found_key; 4138 struct extent_buffer *leaf; 4139 int slot; 4140 int ret; 4141 int enospc_errors = 0; 4142 bool counting = true; 4143 /* The single value limit and min/max limits use the same bytes in the */ 4144 u64 limit_data = bctl->data.limit; 4145 u64 limit_meta = bctl->meta.limit; 4146 u64 limit_sys = bctl->sys.limit; 4147 u32 count_data = 0; 4148 u32 count_meta = 0; 4149 u32 count_sys = 0; 4150 int chunk_reserved = 0; 4151 4152 path = btrfs_alloc_path(); 4153 if (!path) { 4154 ret = -ENOMEM; 4155 goto error; 4156 } 4157 4158 /* zero out stat counters */ 4159 spin_lock(&fs_info->balance_lock); 4160 memset(&bctl->stat, 0, sizeof(bctl->stat)); 4161 spin_unlock(&fs_info->balance_lock); 4162 again: 4163 if (!counting) { 4164 /* 4165 * The single value limit and min/max limits use the same bytes 4166 * in the 4167 */ 4168 bctl->data.limit = limit_data; 4169 bctl->meta.limit = limit_meta; 4170 bctl->sys.limit = limit_sys; 4171 } 4172 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 4173 key.offset = (u64)-1; 4174 key.type = BTRFS_CHUNK_ITEM_KEY; 4175 4176 while (1) { 4177 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 4178 atomic_read(&fs_info->balance_cancel_req)) { 4179 ret = -ECANCELED; 4180 goto error; 4181 } 4182 4183 mutex_lock(&fs_info->reclaim_bgs_lock); 4184 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 4185 if (ret < 0) { 4186 mutex_unlock(&fs_info->reclaim_bgs_lock); 4187 goto error; 4188 } 4189 4190 /* 4191 * this shouldn't happen, it means the last relocate 4192 * failed 4193 */ 4194 if (ret == 0) 4195 BUG(); /* FIXME break ? */ 4196 4197 ret = btrfs_previous_item(chunk_root, path, 0, 4198 BTRFS_CHUNK_ITEM_KEY); 4199 if (ret) { 4200 mutex_unlock(&fs_info->reclaim_bgs_lock); 4201 ret = 0; 4202 break; 4203 } 4204 4205 leaf = path->nodes[0]; 4206 slot = path->slots[0]; 4207 btrfs_item_key_to_cpu(leaf, &found_key, slot); 4208 4209 if (found_key.objectid != key.objectid) { 4210 mutex_unlock(&fs_info->reclaim_bgs_lock); 4211 break; 4212 } 4213 4214 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 4215 chunk_type = btrfs_chunk_type(leaf, chunk); 4216 4217 if (!counting) { 4218 spin_lock(&fs_info->balance_lock); 4219 bctl->stat.considered++; 4220 spin_unlock(&fs_info->balance_lock); 4221 } 4222 4223 ret = should_balance_chunk(leaf, chunk, found_key.offset); 4224 4225 btrfs_release_path(path); 4226 if (!ret) { 4227 mutex_unlock(&fs_info->reclaim_bgs_lock); 4228 goto loop; 4229 } 4230 4231 if (counting) { 4232 mutex_unlock(&fs_info->reclaim_bgs_lock); 4233 spin_lock(&fs_info->balance_lock); 4234 bctl->stat.expected++; 4235 spin_unlock(&fs_info->balance_lock); 4236 4237 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 4238 count_data++; 4239 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 4240 count_sys++; 4241 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 4242 count_meta++; 4243 4244 goto loop; 4245 } 4246 4247 /* 4248 * Apply limit_min filter, no need to check if the LIMITS 4249 * filter is used, limit_min is 0 by default 4250 */ 4251 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 4252 count_data < bctl->data.limit_min) 4253 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 4254 count_meta < bctl->meta.limit_min) 4255 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 4256 count_sys < bctl->sys.limit_min)) { 4257 mutex_unlock(&fs_info->reclaim_bgs_lock); 4258 goto loop; 4259 } 4260 4261 if (!chunk_reserved) { 4262 /* 4263 * We may be relocating the only data chunk we have, 4264 * which could potentially end up with losing data's 4265 * raid profile, so lets allocate an empty one in 4266 * advance. 4267 */ 4268 ret = btrfs_may_alloc_data_chunk(fs_info, 4269 found_key.offset); 4270 if (ret < 0) { 4271 mutex_unlock(&fs_info->reclaim_bgs_lock); 4272 goto error; 4273 } else if (ret == 1) { 4274 chunk_reserved = 1; 4275 } 4276 } 4277 4278 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 4279 mutex_unlock(&fs_info->reclaim_bgs_lock); 4280 if (ret == -ENOSPC) { 4281 enospc_errors++; 4282 } else if (ret == -ETXTBSY) { 4283 btrfs_info(fs_info, 4284 "skipping relocation of block group %llu due to active swapfile", 4285 found_key.offset); 4286 ret = 0; 4287 } else if (ret) { 4288 goto error; 4289 } else { 4290 spin_lock(&fs_info->balance_lock); 4291 bctl->stat.completed++; 4292 spin_unlock(&fs_info->balance_lock); 4293 } 4294 loop: 4295 if (found_key.offset == 0) 4296 break; 4297 key.offset = found_key.offset - 1; 4298 } 4299 4300 if (counting) { 4301 btrfs_release_path(path); 4302 counting = false; 4303 goto again; 4304 } 4305 error: 4306 btrfs_free_path(path); 4307 if (enospc_errors) { 4308 btrfs_info(fs_info, "%d enospc errors during balance", 4309 enospc_errors); 4310 if (!ret) 4311 ret = -ENOSPC; 4312 } 4313 4314 return ret; 4315 } 4316 4317 /* 4318 * See if a given profile is valid and reduced. 4319 * 4320 * @flags: profile to validate 4321 * @extended: if true @flags is treated as an extended profile 4322 */ 4323 static int alloc_profile_is_valid(u64 flags, int extended) 4324 { 4325 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4326 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4327 4328 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4329 4330 /* 1) check that all other bits are zeroed */ 4331 if (flags & ~mask) 4332 return 0; 4333 4334 /* 2) see if profile is reduced */ 4335 if (flags == 0) 4336 return !extended; /* "0" is valid for usual profiles */ 4337 4338 return has_single_bit_set(flags); 4339 } 4340 4341 /* 4342 * Validate target profile against allowed profiles and return true if it's OK. 4343 * Otherwise print the error message and return false. 4344 */ 4345 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4346 const struct btrfs_balance_args *bargs, 4347 u64 allowed, const char *type) 4348 { 4349 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4350 return true; 4351 4352 /* Profile is valid and does not have bits outside of the allowed set */ 4353 if (alloc_profile_is_valid(bargs->target, 1) && 4354 (bargs->target & ~allowed) == 0) 4355 return true; 4356 4357 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4358 type, btrfs_bg_type_to_raid_name(bargs->target)); 4359 return false; 4360 } 4361 4362 /* 4363 * Fill @buf with textual description of balance filter flags @bargs, up to 4364 * @size_buf including the terminating null. The output may be trimmed if it 4365 * does not fit into the provided buffer. 4366 */ 4367 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4368 u32 size_buf) 4369 { 4370 int ret; 4371 u32 size_bp = size_buf; 4372 char *bp = buf; 4373 u64 flags = bargs->flags; 4374 char tmp_buf[128] = {'\0'}; 4375 4376 if (!flags) 4377 return; 4378 4379 #define CHECK_APPEND_NOARG(a) \ 4380 do { \ 4381 ret = snprintf(bp, size_bp, (a)); \ 4382 if (ret < 0 || ret >= size_bp) \ 4383 goto out_overflow; \ 4384 size_bp -= ret; \ 4385 bp += ret; \ 4386 } while (0) 4387 4388 #define CHECK_APPEND_1ARG(a, v1) \ 4389 do { \ 4390 ret = snprintf(bp, size_bp, (a), (v1)); \ 4391 if (ret < 0 || ret >= size_bp) \ 4392 goto out_overflow; \ 4393 size_bp -= ret; \ 4394 bp += ret; \ 4395 } while (0) 4396 4397 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4398 do { \ 4399 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4400 if (ret < 0 || ret >= size_bp) \ 4401 goto out_overflow; \ 4402 size_bp -= ret; \ 4403 bp += ret; \ 4404 } while (0) 4405 4406 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4407 CHECK_APPEND_1ARG("convert=%s,", 4408 btrfs_bg_type_to_raid_name(bargs->target)); 4409 4410 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4411 CHECK_APPEND_NOARG("soft,"); 4412 4413 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4414 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4415 sizeof(tmp_buf)); 4416 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4417 } 4418 4419 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4420 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4421 4422 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4423 CHECK_APPEND_2ARG("usage=%u..%u,", 4424 bargs->usage_min, bargs->usage_max); 4425 4426 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4427 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4428 4429 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4430 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4431 bargs->pstart, bargs->pend); 4432 4433 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4434 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4435 bargs->vstart, bargs->vend); 4436 4437 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4438 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4439 4440 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4441 CHECK_APPEND_2ARG("limit=%u..%u,", 4442 bargs->limit_min, bargs->limit_max); 4443 4444 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4445 CHECK_APPEND_2ARG("stripes=%u..%u,", 4446 bargs->stripes_min, bargs->stripes_max); 4447 4448 #undef CHECK_APPEND_2ARG 4449 #undef CHECK_APPEND_1ARG 4450 #undef CHECK_APPEND_NOARG 4451 4452 out_overflow: 4453 4454 if (size_bp < size_buf) 4455 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4456 else 4457 buf[0] = '\0'; 4458 } 4459 4460 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4461 { 4462 u32 size_buf = 1024; 4463 char tmp_buf[192] = {'\0'}; 4464 char *buf; 4465 char *bp; 4466 u32 size_bp = size_buf; 4467 int ret; 4468 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4469 4470 buf = kzalloc(size_buf, GFP_KERNEL); 4471 if (!buf) 4472 return; 4473 4474 bp = buf; 4475 4476 #define CHECK_APPEND_1ARG(a, v1) \ 4477 do { \ 4478 ret = snprintf(bp, size_bp, (a), (v1)); \ 4479 if (ret < 0 || ret >= size_bp) \ 4480 goto out_overflow; \ 4481 size_bp -= ret; \ 4482 bp += ret; \ 4483 } while (0) 4484 4485 if (bctl->flags & BTRFS_BALANCE_FORCE) 4486 CHECK_APPEND_1ARG("%s", "-f "); 4487 4488 if (bctl->flags & BTRFS_BALANCE_DATA) { 4489 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4490 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4491 } 4492 4493 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4494 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4495 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4496 } 4497 4498 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4499 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4500 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4501 } 4502 4503 #undef CHECK_APPEND_1ARG 4504 4505 out_overflow: 4506 4507 if (size_bp < size_buf) 4508 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4509 btrfs_info(fs_info, "balance: %s %s", 4510 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4511 "resume" : "start", buf); 4512 4513 kfree(buf); 4514 } 4515 4516 /* 4517 * Should be called with balance mutexe held 4518 */ 4519 int btrfs_balance(struct btrfs_fs_info *fs_info, 4520 struct btrfs_balance_control *bctl, 4521 struct btrfs_ioctl_balance_args *bargs) 4522 { 4523 u64 meta_target, data_target; 4524 u64 allowed; 4525 int mixed = 0; 4526 int ret; 4527 u64 num_devices; 4528 unsigned seq; 4529 bool reducing_redundancy; 4530 bool paused = false; 4531 int i; 4532 4533 if (btrfs_fs_closing(fs_info) || 4534 atomic_read(&fs_info->balance_pause_req) || 4535 btrfs_should_cancel_balance(fs_info)) { 4536 ret = -EINVAL; 4537 goto out; 4538 } 4539 4540 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4541 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4542 mixed = 1; 4543 4544 /* 4545 * In case of mixed groups both data and meta should be picked, 4546 * and identical options should be given for both of them. 4547 */ 4548 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4549 if (mixed && (bctl->flags & allowed)) { 4550 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4551 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4552 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4553 btrfs_err(fs_info, 4554 "balance: mixed groups data and metadata options must be the same"); 4555 ret = -EINVAL; 4556 goto out; 4557 } 4558 } 4559 4560 /* 4561 * rw_devices will not change at the moment, device add/delete/replace 4562 * are exclusive 4563 */ 4564 num_devices = fs_info->fs_devices->rw_devices; 4565 4566 /* 4567 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4568 * special bit for it, to make it easier to distinguish. Thus we need 4569 * to set it manually, or balance would refuse the profile. 4570 */ 4571 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4572 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4573 if (num_devices >= btrfs_raid_array[i].devs_min) 4574 allowed |= btrfs_raid_array[i].bg_flag; 4575 4576 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4577 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4578 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4579 ret = -EINVAL; 4580 goto out; 4581 } 4582 4583 /* 4584 * Allow to reduce metadata or system integrity only if force set for 4585 * profiles with redundancy (copies, parity) 4586 */ 4587 allowed = 0; 4588 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4589 if (btrfs_raid_array[i].ncopies >= 2 || 4590 btrfs_raid_array[i].tolerated_failures >= 1) 4591 allowed |= btrfs_raid_array[i].bg_flag; 4592 } 4593 do { 4594 seq = read_seqbegin(&fs_info->profiles_lock); 4595 4596 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4597 (fs_info->avail_system_alloc_bits & allowed) && 4598 !(bctl->sys.target & allowed)) || 4599 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4600 (fs_info->avail_metadata_alloc_bits & allowed) && 4601 !(bctl->meta.target & allowed))) 4602 reducing_redundancy = true; 4603 else 4604 reducing_redundancy = false; 4605 4606 /* if we're not converting, the target field is uninitialized */ 4607 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4608 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4609 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4610 bctl->data.target : fs_info->avail_data_alloc_bits; 4611 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4612 4613 if (reducing_redundancy) { 4614 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4615 btrfs_info(fs_info, 4616 "balance: force reducing metadata redundancy"); 4617 } else { 4618 btrfs_err(fs_info, 4619 "balance: reduces metadata redundancy, use --force if you want this"); 4620 ret = -EINVAL; 4621 goto out; 4622 } 4623 } 4624 4625 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4626 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4627 btrfs_warn(fs_info, 4628 "balance: metadata profile %s has lower redundancy than data profile %s", 4629 btrfs_bg_type_to_raid_name(meta_target), 4630 btrfs_bg_type_to_raid_name(data_target)); 4631 } 4632 4633 ret = insert_balance_item(fs_info, bctl); 4634 if (ret && ret != -EEXIST) 4635 goto out; 4636 4637 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4638 BUG_ON(ret == -EEXIST); 4639 BUG_ON(fs_info->balance_ctl); 4640 spin_lock(&fs_info->balance_lock); 4641 fs_info->balance_ctl = bctl; 4642 spin_unlock(&fs_info->balance_lock); 4643 } else { 4644 BUG_ON(ret != -EEXIST); 4645 spin_lock(&fs_info->balance_lock); 4646 update_balance_args(bctl); 4647 spin_unlock(&fs_info->balance_lock); 4648 } 4649 4650 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4651 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4652 describe_balance_start_or_resume(fs_info); 4653 mutex_unlock(&fs_info->balance_mutex); 4654 4655 ret = __btrfs_balance(fs_info); 4656 4657 mutex_lock(&fs_info->balance_mutex); 4658 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4659 btrfs_info(fs_info, "balance: paused"); 4660 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4661 paused = true; 4662 } 4663 /* 4664 * Balance can be canceled by: 4665 * 4666 * - Regular cancel request 4667 * Then ret == -ECANCELED and balance_cancel_req > 0 4668 * 4669 * - Fatal signal to "btrfs" process 4670 * Either the signal caught by wait_reserve_ticket() and callers 4671 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4672 * got -ECANCELED. 4673 * Either way, in this case balance_cancel_req = 0, and 4674 * ret == -EINTR or ret == -ECANCELED. 4675 * 4676 * So here we only check the return value to catch canceled balance. 4677 */ 4678 else if (ret == -ECANCELED || ret == -EINTR) 4679 btrfs_info(fs_info, "balance: canceled"); 4680 else 4681 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4682 4683 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4684 4685 if (bargs) { 4686 memset(bargs, 0, sizeof(*bargs)); 4687 btrfs_update_ioctl_balance_args(fs_info, bargs); 4688 } 4689 4690 /* We didn't pause, we can clean everything up. */ 4691 if (!paused) { 4692 reset_balance_state(fs_info); 4693 btrfs_exclop_finish(fs_info); 4694 } 4695 4696 wake_up(&fs_info->balance_wait_q); 4697 4698 return ret; 4699 out: 4700 if (bctl->flags & BTRFS_BALANCE_RESUME) 4701 reset_balance_state(fs_info); 4702 else 4703 kfree(bctl); 4704 btrfs_exclop_finish(fs_info); 4705 4706 return ret; 4707 } 4708 4709 static int balance_kthread(void *data) 4710 { 4711 struct btrfs_fs_info *fs_info = data; 4712 int ret = 0; 4713 4714 sb_start_write(fs_info->sb); 4715 mutex_lock(&fs_info->balance_mutex); 4716 if (fs_info->balance_ctl) 4717 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4718 mutex_unlock(&fs_info->balance_mutex); 4719 sb_end_write(fs_info->sb); 4720 4721 return ret; 4722 } 4723 4724 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4725 { 4726 struct task_struct *tsk; 4727 4728 mutex_lock(&fs_info->balance_mutex); 4729 if (!fs_info->balance_ctl) { 4730 mutex_unlock(&fs_info->balance_mutex); 4731 return 0; 4732 } 4733 mutex_unlock(&fs_info->balance_mutex); 4734 4735 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4736 btrfs_info(fs_info, "balance: resume skipped"); 4737 return 0; 4738 } 4739 4740 spin_lock(&fs_info->super_lock); 4741 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4742 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4743 spin_unlock(&fs_info->super_lock); 4744 /* 4745 * A ro->rw remount sequence should continue with the paused balance 4746 * regardless of who pauses it, system or the user as of now, so set 4747 * the resume flag. 4748 */ 4749 spin_lock(&fs_info->balance_lock); 4750 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4751 spin_unlock(&fs_info->balance_lock); 4752 4753 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4754 return PTR_ERR_OR_ZERO(tsk); 4755 } 4756 4757 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4758 { 4759 struct btrfs_balance_control *bctl; 4760 struct btrfs_balance_item *item; 4761 struct btrfs_disk_balance_args disk_bargs; 4762 struct btrfs_path *path; 4763 struct extent_buffer *leaf; 4764 struct btrfs_key key; 4765 int ret; 4766 4767 path = btrfs_alloc_path(); 4768 if (!path) 4769 return -ENOMEM; 4770 4771 key.objectid = BTRFS_BALANCE_OBJECTID; 4772 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4773 key.offset = 0; 4774 4775 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4776 if (ret < 0) 4777 goto out; 4778 if (ret > 0) { /* ret = -ENOENT; */ 4779 ret = 0; 4780 goto out; 4781 } 4782 4783 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4784 if (!bctl) { 4785 ret = -ENOMEM; 4786 goto out; 4787 } 4788 4789 leaf = path->nodes[0]; 4790 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4791 4792 bctl->flags = btrfs_balance_flags(leaf, item); 4793 bctl->flags |= BTRFS_BALANCE_RESUME; 4794 4795 btrfs_balance_data(leaf, item, &disk_bargs); 4796 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4797 btrfs_balance_meta(leaf, item, &disk_bargs); 4798 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4799 btrfs_balance_sys(leaf, item, &disk_bargs); 4800 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4801 4802 /* 4803 * This should never happen, as the paused balance state is recovered 4804 * during mount without any chance of other exclusive ops to collide. 4805 * 4806 * This gives the exclusive op status to balance and keeps in paused 4807 * state until user intervention (cancel or umount). If the ownership 4808 * cannot be assigned, show a message but do not fail. The balance 4809 * is in a paused state and must have fs_info::balance_ctl properly 4810 * set up. 4811 */ 4812 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4813 btrfs_warn(fs_info, 4814 "balance: cannot set exclusive op status, resume manually"); 4815 4816 btrfs_release_path(path); 4817 4818 mutex_lock(&fs_info->balance_mutex); 4819 BUG_ON(fs_info->balance_ctl); 4820 spin_lock(&fs_info->balance_lock); 4821 fs_info->balance_ctl = bctl; 4822 spin_unlock(&fs_info->balance_lock); 4823 mutex_unlock(&fs_info->balance_mutex); 4824 out: 4825 btrfs_free_path(path); 4826 return ret; 4827 } 4828 4829 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4830 { 4831 int ret = 0; 4832 4833 mutex_lock(&fs_info->balance_mutex); 4834 if (!fs_info->balance_ctl) { 4835 mutex_unlock(&fs_info->balance_mutex); 4836 return -ENOTCONN; 4837 } 4838 4839 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4840 atomic_inc(&fs_info->balance_pause_req); 4841 mutex_unlock(&fs_info->balance_mutex); 4842 4843 wait_event(fs_info->balance_wait_q, 4844 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4845 4846 mutex_lock(&fs_info->balance_mutex); 4847 /* we are good with balance_ctl ripped off from under us */ 4848 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4849 atomic_dec(&fs_info->balance_pause_req); 4850 } else { 4851 ret = -ENOTCONN; 4852 } 4853 4854 mutex_unlock(&fs_info->balance_mutex); 4855 return ret; 4856 } 4857 4858 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4859 { 4860 mutex_lock(&fs_info->balance_mutex); 4861 if (!fs_info->balance_ctl) { 4862 mutex_unlock(&fs_info->balance_mutex); 4863 return -ENOTCONN; 4864 } 4865 4866 /* 4867 * A paused balance with the item stored on disk can be resumed at 4868 * mount time if the mount is read-write. Otherwise it's still paused 4869 * and we must not allow cancelling as it deletes the item. 4870 */ 4871 if (sb_rdonly(fs_info->sb)) { 4872 mutex_unlock(&fs_info->balance_mutex); 4873 return -EROFS; 4874 } 4875 4876 atomic_inc(&fs_info->balance_cancel_req); 4877 /* 4878 * if we are running just wait and return, balance item is 4879 * deleted in btrfs_balance in this case 4880 */ 4881 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4882 mutex_unlock(&fs_info->balance_mutex); 4883 wait_event(fs_info->balance_wait_q, 4884 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4885 mutex_lock(&fs_info->balance_mutex); 4886 } else { 4887 mutex_unlock(&fs_info->balance_mutex); 4888 /* 4889 * Lock released to allow other waiters to continue, we'll 4890 * reexamine the status again. 4891 */ 4892 mutex_lock(&fs_info->balance_mutex); 4893 4894 if (fs_info->balance_ctl) { 4895 reset_balance_state(fs_info); 4896 btrfs_exclop_finish(fs_info); 4897 btrfs_info(fs_info, "balance: canceled"); 4898 } 4899 } 4900 4901 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4902 atomic_dec(&fs_info->balance_cancel_req); 4903 mutex_unlock(&fs_info->balance_mutex); 4904 return 0; 4905 } 4906 4907 /* 4908 * shrinking a device means finding all of the device extents past 4909 * the new size, and then following the back refs to the chunks. 4910 * The chunk relocation code actually frees the device extent 4911 */ 4912 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4913 { 4914 struct btrfs_fs_info *fs_info = device->fs_info; 4915 struct btrfs_root *root = fs_info->dev_root; 4916 struct btrfs_trans_handle *trans; 4917 struct btrfs_dev_extent *dev_extent = NULL; 4918 struct btrfs_path *path; 4919 u64 length; 4920 u64 chunk_offset; 4921 int ret; 4922 int slot; 4923 int failed = 0; 4924 bool retried = false; 4925 struct extent_buffer *l; 4926 struct btrfs_key key; 4927 struct btrfs_super_block *super_copy = fs_info->super_copy; 4928 u64 old_total = btrfs_super_total_bytes(super_copy); 4929 u64 old_size = btrfs_device_get_total_bytes(device); 4930 u64 diff; 4931 u64 start; 4932 u64 free_diff = 0; 4933 4934 new_size = round_down(new_size, fs_info->sectorsize); 4935 start = new_size; 4936 diff = round_down(old_size - new_size, fs_info->sectorsize); 4937 4938 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4939 return -EINVAL; 4940 4941 path = btrfs_alloc_path(); 4942 if (!path) 4943 return -ENOMEM; 4944 4945 path->reada = READA_BACK; 4946 4947 trans = btrfs_start_transaction(root, 0); 4948 if (IS_ERR(trans)) { 4949 btrfs_free_path(path); 4950 return PTR_ERR(trans); 4951 } 4952 4953 mutex_lock(&fs_info->chunk_mutex); 4954 4955 btrfs_device_set_total_bytes(device, new_size); 4956 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4957 device->fs_devices->total_rw_bytes -= diff; 4958 4959 /* 4960 * The new free_chunk_space is new_size - used, so we have to 4961 * subtract the delta of the old free_chunk_space which included 4962 * old_size - used. If used > new_size then just subtract this 4963 * entire device's free space. 4964 */ 4965 if (device->bytes_used < new_size) 4966 free_diff = (old_size - device->bytes_used) - 4967 (new_size - device->bytes_used); 4968 else 4969 free_diff = old_size - device->bytes_used; 4970 atomic64_sub(free_diff, &fs_info->free_chunk_space); 4971 } 4972 4973 /* 4974 * Once the device's size has been set to the new size, ensure all 4975 * in-memory chunks are synced to disk so that the loop below sees them 4976 * and relocates them accordingly. 4977 */ 4978 if (contains_pending_extent(device, &start, diff)) { 4979 mutex_unlock(&fs_info->chunk_mutex); 4980 ret = btrfs_commit_transaction(trans); 4981 if (ret) 4982 goto done; 4983 } else { 4984 mutex_unlock(&fs_info->chunk_mutex); 4985 btrfs_end_transaction(trans); 4986 } 4987 4988 again: 4989 key.objectid = device->devid; 4990 key.offset = (u64)-1; 4991 key.type = BTRFS_DEV_EXTENT_KEY; 4992 4993 do { 4994 mutex_lock(&fs_info->reclaim_bgs_lock); 4995 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4996 if (ret < 0) { 4997 mutex_unlock(&fs_info->reclaim_bgs_lock); 4998 goto done; 4999 } 5000 5001 ret = btrfs_previous_item(root, path, 0, key.type); 5002 if (ret) { 5003 mutex_unlock(&fs_info->reclaim_bgs_lock); 5004 if (ret < 0) 5005 goto done; 5006 ret = 0; 5007 btrfs_release_path(path); 5008 break; 5009 } 5010 5011 l = path->nodes[0]; 5012 slot = path->slots[0]; 5013 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 5014 5015 if (key.objectid != device->devid) { 5016 mutex_unlock(&fs_info->reclaim_bgs_lock); 5017 btrfs_release_path(path); 5018 break; 5019 } 5020 5021 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 5022 length = btrfs_dev_extent_length(l, dev_extent); 5023 5024 if (key.offset + length <= new_size) { 5025 mutex_unlock(&fs_info->reclaim_bgs_lock); 5026 btrfs_release_path(path); 5027 break; 5028 } 5029 5030 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 5031 btrfs_release_path(path); 5032 5033 /* 5034 * We may be relocating the only data chunk we have, 5035 * which could potentially end up with losing data's 5036 * raid profile, so lets allocate an empty one in 5037 * advance. 5038 */ 5039 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 5040 if (ret < 0) { 5041 mutex_unlock(&fs_info->reclaim_bgs_lock); 5042 goto done; 5043 } 5044 5045 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 5046 mutex_unlock(&fs_info->reclaim_bgs_lock); 5047 if (ret == -ENOSPC) { 5048 failed++; 5049 } else if (ret) { 5050 if (ret == -ETXTBSY) { 5051 btrfs_warn(fs_info, 5052 "could not shrink block group %llu due to active swapfile", 5053 chunk_offset); 5054 } 5055 goto done; 5056 } 5057 } while (key.offset-- > 0); 5058 5059 if (failed && !retried) { 5060 failed = 0; 5061 retried = true; 5062 goto again; 5063 } else if (failed && retried) { 5064 ret = -ENOSPC; 5065 goto done; 5066 } 5067 5068 /* Shrinking succeeded, else we would be at "done". */ 5069 trans = btrfs_start_transaction(root, 0); 5070 if (IS_ERR(trans)) { 5071 ret = PTR_ERR(trans); 5072 goto done; 5073 } 5074 5075 mutex_lock(&fs_info->chunk_mutex); 5076 /* Clear all state bits beyond the shrunk device size */ 5077 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 5078 CHUNK_STATE_MASK); 5079 5080 btrfs_device_set_disk_total_bytes(device, new_size); 5081 if (list_empty(&device->post_commit_list)) 5082 list_add_tail(&device->post_commit_list, 5083 &trans->transaction->dev_update_list); 5084 5085 WARN_ON(diff > old_total); 5086 btrfs_set_super_total_bytes(super_copy, 5087 round_down(old_total - diff, fs_info->sectorsize)); 5088 mutex_unlock(&fs_info->chunk_mutex); 5089 5090 btrfs_reserve_chunk_metadata(trans, false); 5091 /* Now btrfs_update_device() will change the on-disk size. */ 5092 ret = btrfs_update_device(trans, device); 5093 btrfs_trans_release_chunk_metadata(trans); 5094 if (ret < 0) { 5095 btrfs_abort_transaction(trans, ret); 5096 btrfs_end_transaction(trans); 5097 } else { 5098 ret = btrfs_commit_transaction(trans); 5099 } 5100 done: 5101 btrfs_free_path(path); 5102 if (ret) { 5103 mutex_lock(&fs_info->chunk_mutex); 5104 btrfs_device_set_total_bytes(device, old_size); 5105 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5106 device->fs_devices->total_rw_bytes += diff; 5107 atomic64_add(free_diff, &fs_info->free_chunk_space); 5108 } 5109 mutex_unlock(&fs_info->chunk_mutex); 5110 } 5111 return ret; 5112 } 5113 5114 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 5115 struct btrfs_key *key, 5116 struct btrfs_chunk *chunk, int item_size) 5117 { 5118 struct btrfs_super_block *super_copy = fs_info->super_copy; 5119 struct btrfs_disk_key disk_key; 5120 u32 array_size; 5121 u8 *ptr; 5122 5123 lockdep_assert_held(&fs_info->chunk_mutex); 5124 5125 array_size = btrfs_super_sys_array_size(super_copy); 5126 if (array_size + item_size + sizeof(disk_key) 5127 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5128 return -EFBIG; 5129 5130 ptr = super_copy->sys_chunk_array + array_size; 5131 btrfs_cpu_key_to_disk(&disk_key, key); 5132 memcpy(ptr, &disk_key, sizeof(disk_key)); 5133 ptr += sizeof(disk_key); 5134 memcpy(ptr, chunk, item_size); 5135 item_size += sizeof(disk_key); 5136 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5137 5138 return 0; 5139 } 5140 5141 /* 5142 * sort the devices in descending order by max_avail, total_avail 5143 */ 5144 static int btrfs_cmp_device_info(const void *a, const void *b) 5145 { 5146 const struct btrfs_device_info *di_a = a; 5147 const struct btrfs_device_info *di_b = b; 5148 5149 if (di_a->max_avail > di_b->max_avail) 5150 return -1; 5151 if (di_a->max_avail < di_b->max_avail) 5152 return 1; 5153 if (di_a->total_avail > di_b->total_avail) 5154 return -1; 5155 if (di_a->total_avail < di_b->total_avail) 5156 return 1; 5157 return 0; 5158 } 5159 5160 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5161 { 5162 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5163 return; 5164 5165 btrfs_set_fs_incompat(info, RAID56); 5166 } 5167 5168 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5169 { 5170 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5171 return; 5172 5173 btrfs_set_fs_incompat(info, RAID1C34); 5174 } 5175 5176 /* 5177 * Structure used internally for btrfs_create_chunk() function. 5178 * Wraps needed parameters. 5179 */ 5180 struct alloc_chunk_ctl { 5181 u64 start; 5182 u64 type; 5183 /* Total number of stripes to allocate */ 5184 int num_stripes; 5185 /* sub_stripes info for map */ 5186 int sub_stripes; 5187 /* Stripes per device */ 5188 int dev_stripes; 5189 /* Maximum number of devices to use */ 5190 int devs_max; 5191 /* Minimum number of devices to use */ 5192 int devs_min; 5193 /* ndevs has to be a multiple of this */ 5194 int devs_increment; 5195 /* Number of copies */ 5196 int ncopies; 5197 /* Number of stripes worth of bytes to store parity information */ 5198 int nparity; 5199 u64 max_stripe_size; 5200 u64 max_chunk_size; 5201 u64 dev_extent_min; 5202 u64 stripe_size; 5203 u64 chunk_size; 5204 int ndevs; 5205 }; 5206 5207 static void init_alloc_chunk_ctl_policy_regular( 5208 struct btrfs_fs_devices *fs_devices, 5209 struct alloc_chunk_ctl *ctl) 5210 { 5211 struct btrfs_space_info *space_info; 5212 5213 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5214 ASSERT(space_info); 5215 5216 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5217 ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G); 5218 5219 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5220 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5221 5222 /* We don't want a chunk larger than 10% of writable space */ 5223 ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), 5224 ctl->max_chunk_size); 5225 ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes); 5226 } 5227 5228 static void init_alloc_chunk_ctl_policy_zoned( 5229 struct btrfs_fs_devices *fs_devices, 5230 struct alloc_chunk_ctl *ctl) 5231 { 5232 u64 zone_size = fs_devices->fs_info->zone_size; 5233 u64 limit; 5234 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5235 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5236 u64 min_chunk_size = min_data_stripes * zone_size; 5237 u64 type = ctl->type; 5238 5239 ctl->max_stripe_size = zone_size; 5240 if (type & BTRFS_BLOCK_GROUP_DATA) { 5241 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5242 zone_size); 5243 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5244 ctl->max_chunk_size = ctl->max_stripe_size; 5245 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5246 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5247 ctl->devs_max = min_t(int, ctl->devs_max, 5248 BTRFS_MAX_DEVS_SYS_CHUNK); 5249 } else { 5250 BUG(); 5251 } 5252 5253 /* We don't want a chunk larger than 10% of writable space */ 5254 limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10), 5255 zone_size), 5256 min_chunk_size); 5257 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5258 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5259 } 5260 5261 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5262 struct alloc_chunk_ctl *ctl) 5263 { 5264 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5265 5266 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5267 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5268 ctl->devs_max = btrfs_raid_array[index].devs_max; 5269 if (!ctl->devs_max) 5270 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5271 ctl->devs_min = btrfs_raid_array[index].devs_min; 5272 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5273 ctl->ncopies = btrfs_raid_array[index].ncopies; 5274 ctl->nparity = btrfs_raid_array[index].nparity; 5275 ctl->ndevs = 0; 5276 5277 switch (fs_devices->chunk_alloc_policy) { 5278 case BTRFS_CHUNK_ALLOC_REGULAR: 5279 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5280 break; 5281 case BTRFS_CHUNK_ALLOC_ZONED: 5282 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5283 break; 5284 default: 5285 BUG(); 5286 } 5287 } 5288 5289 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5290 struct alloc_chunk_ctl *ctl, 5291 struct btrfs_device_info *devices_info) 5292 { 5293 struct btrfs_fs_info *info = fs_devices->fs_info; 5294 struct btrfs_device *device; 5295 u64 total_avail; 5296 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5297 int ret; 5298 int ndevs = 0; 5299 u64 max_avail; 5300 u64 dev_offset; 5301 5302 /* 5303 * in the first pass through the devices list, we gather information 5304 * about the available holes on each device. 5305 */ 5306 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5307 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5308 WARN(1, KERN_ERR 5309 "BTRFS: read-only device in alloc_list\n"); 5310 continue; 5311 } 5312 5313 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5314 &device->dev_state) || 5315 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5316 continue; 5317 5318 if (device->total_bytes > device->bytes_used) 5319 total_avail = device->total_bytes - device->bytes_used; 5320 else 5321 total_avail = 0; 5322 5323 /* If there is no space on this device, skip it. */ 5324 if (total_avail < ctl->dev_extent_min) 5325 continue; 5326 5327 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5328 &max_avail); 5329 if (ret && ret != -ENOSPC) 5330 return ret; 5331 5332 if (ret == 0) 5333 max_avail = dev_extent_want; 5334 5335 if (max_avail < ctl->dev_extent_min) { 5336 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5337 btrfs_debug(info, 5338 "%s: devid %llu has no free space, have=%llu want=%llu", 5339 __func__, device->devid, max_avail, 5340 ctl->dev_extent_min); 5341 continue; 5342 } 5343 5344 if (ndevs == fs_devices->rw_devices) { 5345 WARN(1, "%s: found more than %llu devices\n", 5346 __func__, fs_devices->rw_devices); 5347 break; 5348 } 5349 devices_info[ndevs].dev_offset = dev_offset; 5350 devices_info[ndevs].max_avail = max_avail; 5351 devices_info[ndevs].total_avail = total_avail; 5352 devices_info[ndevs].dev = device; 5353 ++ndevs; 5354 } 5355 ctl->ndevs = ndevs; 5356 5357 /* 5358 * now sort the devices by hole size / available space 5359 */ 5360 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5361 btrfs_cmp_device_info, NULL); 5362 5363 return 0; 5364 } 5365 5366 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5367 struct btrfs_device_info *devices_info) 5368 { 5369 /* Number of stripes that count for block group size */ 5370 int data_stripes; 5371 5372 /* 5373 * The primary goal is to maximize the number of stripes, so use as 5374 * many devices as possible, even if the stripes are not maximum sized. 5375 * 5376 * The DUP profile stores more than one stripe per device, the 5377 * max_avail is the total size so we have to adjust. 5378 */ 5379 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5380 ctl->dev_stripes); 5381 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5382 5383 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5384 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5385 5386 /* 5387 * Use the number of data stripes to figure out how big this chunk is 5388 * really going to be in terms of logical address space, and compare 5389 * that answer with the max chunk size. If it's higher, we try to 5390 * reduce stripe_size. 5391 */ 5392 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5393 /* 5394 * Reduce stripe_size, round it up to a 16MB boundary again and 5395 * then use it, unless it ends up being even bigger than the 5396 * previous value we had already. 5397 */ 5398 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5399 data_stripes), SZ_16M), 5400 ctl->stripe_size); 5401 } 5402 5403 /* Stripe size should not go beyond 1G. */ 5404 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G); 5405 5406 /* Align to BTRFS_STRIPE_LEN */ 5407 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5408 ctl->chunk_size = ctl->stripe_size * data_stripes; 5409 5410 return 0; 5411 } 5412 5413 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5414 struct btrfs_device_info *devices_info) 5415 { 5416 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5417 /* Number of stripes that count for block group size */ 5418 int data_stripes; 5419 5420 /* 5421 * It should hold because: 5422 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5423 */ 5424 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5425 5426 ctl->stripe_size = zone_size; 5427 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5428 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5429 5430 /* stripe_size is fixed in zoned filesystem. Reduce ndevs instead. */ 5431 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5432 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5433 ctl->stripe_size) + ctl->nparity, 5434 ctl->dev_stripes); 5435 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5436 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5437 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5438 } 5439 5440 ctl->chunk_size = ctl->stripe_size * data_stripes; 5441 5442 return 0; 5443 } 5444 5445 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5446 struct alloc_chunk_ctl *ctl, 5447 struct btrfs_device_info *devices_info) 5448 { 5449 struct btrfs_fs_info *info = fs_devices->fs_info; 5450 5451 /* 5452 * Round down to number of usable stripes, devs_increment can be any 5453 * number so we can't use round_down() that requires power of 2, while 5454 * rounddown is safe. 5455 */ 5456 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5457 5458 if (ctl->ndevs < ctl->devs_min) { 5459 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5460 btrfs_debug(info, 5461 "%s: not enough devices with free space: have=%d minimum required=%d", 5462 __func__, ctl->ndevs, ctl->devs_min); 5463 } 5464 return -ENOSPC; 5465 } 5466 5467 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5468 5469 switch (fs_devices->chunk_alloc_policy) { 5470 case BTRFS_CHUNK_ALLOC_REGULAR: 5471 return decide_stripe_size_regular(ctl, devices_info); 5472 case BTRFS_CHUNK_ALLOC_ZONED: 5473 return decide_stripe_size_zoned(ctl, devices_info); 5474 default: 5475 BUG(); 5476 } 5477 } 5478 5479 static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int bits) 5480 { 5481 for (int i = 0; i < map->num_stripes; i++) { 5482 struct btrfs_io_stripe *stripe = &map->stripes[i]; 5483 struct btrfs_device *device = stripe->dev; 5484 5485 set_extent_bit(&device->alloc_state, stripe->physical, 5486 stripe->physical + map->stripe_size - 1, 5487 bits | EXTENT_NOWAIT, NULL); 5488 } 5489 } 5490 5491 static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits) 5492 { 5493 for (int i = 0; i < map->num_stripes; i++) { 5494 struct btrfs_io_stripe *stripe = &map->stripes[i]; 5495 struct btrfs_device *device = stripe->dev; 5496 5497 __clear_extent_bit(&device->alloc_state, stripe->physical, 5498 stripe->physical + map->stripe_size - 1, 5499 bits | EXTENT_NOWAIT, 5500 NULL, NULL); 5501 } 5502 } 5503 5504 void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map) 5505 { 5506 write_lock(&fs_info->mapping_tree_lock); 5507 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree); 5508 RB_CLEAR_NODE(&map->rb_node); 5509 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED); 5510 write_unlock(&fs_info->mapping_tree_lock); 5511 5512 /* Once for the tree reference. */ 5513 btrfs_free_chunk_map(map); 5514 } 5515 5516 EXPORT_FOR_TESTS 5517 int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map) 5518 { 5519 struct rb_node **p; 5520 struct rb_node *parent = NULL; 5521 bool leftmost = true; 5522 5523 write_lock(&fs_info->mapping_tree_lock); 5524 p = &fs_info->mapping_tree.rb_root.rb_node; 5525 while (*p) { 5526 struct btrfs_chunk_map *entry; 5527 5528 parent = *p; 5529 entry = rb_entry(parent, struct btrfs_chunk_map, rb_node); 5530 5531 if (map->start < entry->start) { 5532 p = &(*p)->rb_left; 5533 } else if (map->start > entry->start) { 5534 p = &(*p)->rb_right; 5535 leftmost = false; 5536 } else { 5537 write_unlock(&fs_info->mapping_tree_lock); 5538 return -EEXIST; 5539 } 5540 } 5541 rb_link_node(&map->rb_node, parent, p); 5542 rb_insert_color_cached(&map->rb_node, &fs_info->mapping_tree, leftmost); 5543 chunk_map_device_set_bits(map, CHUNK_ALLOCATED); 5544 chunk_map_device_clear_bits(map, CHUNK_TRIMMED); 5545 write_unlock(&fs_info->mapping_tree_lock); 5546 5547 return 0; 5548 } 5549 5550 EXPORT_FOR_TESTS 5551 struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp) 5552 { 5553 struct btrfs_chunk_map *map; 5554 5555 map = kmalloc(btrfs_chunk_map_size(num_stripes), gfp); 5556 if (!map) 5557 return NULL; 5558 5559 refcount_set(&map->refs, 1); 5560 RB_CLEAR_NODE(&map->rb_node); 5561 5562 return map; 5563 } 5564 5565 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5566 struct alloc_chunk_ctl *ctl, 5567 struct btrfs_device_info *devices_info) 5568 { 5569 struct btrfs_fs_info *info = trans->fs_info; 5570 struct btrfs_chunk_map *map; 5571 struct btrfs_block_group *block_group; 5572 u64 start = ctl->start; 5573 u64 type = ctl->type; 5574 int ret; 5575 5576 map = btrfs_alloc_chunk_map(ctl->num_stripes, GFP_NOFS); 5577 if (!map) 5578 return ERR_PTR(-ENOMEM); 5579 5580 map->start = start; 5581 map->chunk_len = ctl->chunk_size; 5582 map->stripe_size = ctl->stripe_size; 5583 map->type = type; 5584 map->io_align = BTRFS_STRIPE_LEN; 5585 map->io_width = BTRFS_STRIPE_LEN; 5586 map->sub_stripes = ctl->sub_stripes; 5587 map->num_stripes = ctl->num_stripes; 5588 5589 for (int i = 0; i < ctl->ndevs; i++) { 5590 for (int j = 0; j < ctl->dev_stripes; j++) { 5591 int s = i * ctl->dev_stripes + j; 5592 map->stripes[s].dev = devices_info[i].dev; 5593 map->stripes[s].physical = devices_info[i].dev_offset + 5594 j * ctl->stripe_size; 5595 } 5596 } 5597 5598 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5599 5600 ret = btrfs_add_chunk_map(info, map); 5601 if (ret) { 5602 btrfs_free_chunk_map(map); 5603 return ERR_PTR(ret); 5604 } 5605 5606 block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size); 5607 if (IS_ERR(block_group)) { 5608 btrfs_remove_chunk_map(info, map); 5609 return block_group; 5610 } 5611 5612 for (int i = 0; i < map->num_stripes; i++) { 5613 struct btrfs_device *dev = map->stripes[i].dev; 5614 5615 btrfs_device_set_bytes_used(dev, 5616 dev->bytes_used + ctl->stripe_size); 5617 if (list_empty(&dev->post_commit_list)) 5618 list_add_tail(&dev->post_commit_list, 5619 &trans->transaction->dev_update_list); 5620 } 5621 5622 atomic64_sub(ctl->stripe_size * map->num_stripes, 5623 &info->free_chunk_space); 5624 5625 check_raid56_incompat_flag(info, type); 5626 check_raid1c34_incompat_flag(info, type); 5627 5628 return block_group; 5629 } 5630 5631 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5632 u64 type) 5633 { 5634 struct btrfs_fs_info *info = trans->fs_info; 5635 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5636 struct btrfs_device_info *devices_info = NULL; 5637 struct alloc_chunk_ctl ctl; 5638 struct btrfs_block_group *block_group; 5639 int ret; 5640 5641 lockdep_assert_held(&info->chunk_mutex); 5642 5643 if (!alloc_profile_is_valid(type, 0)) { 5644 ASSERT(0); 5645 return ERR_PTR(-EINVAL); 5646 } 5647 5648 if (list_empty(&fs_devices->alloc_list)) { 5649 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5650 btrfs_debug(info, "%s: no writable device", __func__); 5651 return ERR_PTR(-ENOSPC); 5652 } 5653 5654 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5655 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5656 ASSERT(0); 5657 return ERR_PTR(-EINVAL); 5658 } 5659 5660 ctl.start = find_next_chunk(info); 5661 ctl.type = type; 5662 init_alloc_chunk_ctl(fs_devices, &ctl); 5663 5664 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5665 GFP_NOFS); 5666 if (!devices_info) 5667 return ERR_PTR(-ENOMEM); 5668 5669 ret = gather_device_info(fs_devices, &ctl, devices_info); 5670 if (ret < 0) { 5671 block_group = ERR_PTR(ret); 5672 goto out; 5673 } 5674 5675 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5676 if (ret < 0) { 5677 block_group = ERR_PTR(ret); 5678 goto out; 5679 } 5680 5681 block_group = create_chunk(trans, &ctl, devices_info); 5682 5683 out: 5684 kfree(devices_info); 5685 return block_group; 5686 } 5687 5688 /* 5689 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5690 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5691 * chunks. 5692 * 5693 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5694 * phases. 5695 */ 5696 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5697 struct btrfs_block_group *bg) 5698 { 5699 struct btrfs_fs_info *fs_info = trans->fs_info; 5700 struct btrfs_root *chunk_root = fs_info->chunk_root; 5701 struct btrfs_key key; 5702 struct btrfs_chunk *chunk; 5703 struct btrfs_stripe *stripe; 5704 struct btrfs_chunk_map *map; 5705 size_t item_size; 5706 int i; 5707 int ret; 5708 5709 /* 5710 * We take the chunk_mutex for 2 reasons: 5711 * 5712 * 1) Updates and insertions in the chunk btree must be done while holding 5713 * the chunk_mutex, as well as updating the system chunk array in the 5714 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5715 * details; 5716 * 5717 * 2) To prevent races with the final phase of a device replace operation 5718 * that replaces the device object associated with the map's stripes, 5719 * because the device object's id can change at any time during that 5720 * final phase of the device replace operation 5721 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5722 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5723 * which would cause a failure when updating the device item, which does 5724 * not exists, or persisting a stripe of the chunk item with such ID. 5725 * Here we can't use the device_list_mutex because our caller already 5726 * has locked the chunk_mutex, and the final phase of device replace 5727 * acquires both mutexes - first the device_list_mutex and then the 5728 * chunk_mutex. Using any of those two mutexes protects us from a 5729 * concurrent device replace. 5730 */ 5731 lockdep_assert_held(&fs_info->chunk_mutex); 5732 5733 map = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5734 if (IS_ERR(map)) { 5735 ret = PTR_ERR(map); 5736 btrfs_abort_transaction(trans, ret); 5737 return ret; 5738 } 5739 5740 item_size = btrfs_chunk_item_size(map->num_stripes); 5741 5742 chunk = kzalloc(item_size, GFP_NOFS); 5743 if (!chunk) { 5744 ret = -ENOMEM; 5745 btrfs_abort_transaction(trans, ret); 5746 goto out; 5747 } 5748 5749 for (i = 0; i < map->num_stripes; i++) { 5750 struct btrfs_device *device = map->stripes[i].dev; 5751 5752 ret = btrfs_update_device(trans, device); 5753 if (ret) 5754 goto out; 5755 } 5756 5757 stripe = &chunk->stripe; 5758 for (i = 0; i < map->num_stripes; i++) { 5759 struct btrfs_device *device = map->stripes[i].dev; 5760 const u64 dev_offset = map->stripes[i].physical; 5761 5762 btrfs_set_stack_stripe_devid(stripe, device->devid); 5763 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5764 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5765 stripe++; 5766 } 5767 5768 btrfs_set_stack_chunk_length(chunk, bg->length); 5769 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5770 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN); 5771 btrfs_set_stack_chunk_type(chunk, map->type); 5772 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5773 btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN); 5774 btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN); 5775 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5776 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5777 5778 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5779 key.type = BTRFS_CHUNK_ITEM_KEY; 5780 key.offset = bg->start; 5781 5782 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5783 if (ret) 5784 goto out; 5785 5786 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags); 5787 5788 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5789 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5790 if (ret) 5791 goto out; 5792 } 5793 5794 out: 5795 kfree(chunk); 5796 btrfs_free_chunk_map(map); 5797 return ret; 5798 } 5799 5800 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5801 { 5802 struct btrfs_fs_info *fs_info = trans->fs_info; 5803 u64 alloc_profile; 5804 struct btrfs_block_group *meta_bg; 5805 struct btrfs_block_group *sys_bg; 5806 5807 /* 5808 * When adding a new device for sprouting, the seed device is read-only 5809 * so we must first allocate a metadata and a system chunk. But before 5810 * adding the block group items to the extent, device and chunk btrees, 5811 * we must first: 5812 * 5813 * 1) Create both chunks without doing any changes to the btrees, as 5814 * otherwise we would get -ENOSPC since the block groups from the 5815 * seed device are read-only; 5816 * 5817 * 2) Add the device item for the new sprout device - finishing the setup 5818 * of a new block group requires updating the device item in the chunk 5819 * btree, so it must exist when we attempt to do it. The previous step 5820 * ensures this does not fail with -ENOSPC. 5821 * 5822 * After that we can add the block group items to their btrees: 5823 * update existing device item in the chunk btree, add a new block group 5824 * item to the extent btree, add a new chunk item to the chunk btree and 5825 * finally add the new device extent items to the devices btree. 5826 */ 5827 5828 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5829 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5830 if (IS_ERR(meta_bg)) 5831 return PTR_ERR(meta_bg); 5832 5833 alloc_profile = btrfs_system_alloc_profile(fs_info); 5834 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5835 if (IS_ERR(sys_bg)) 5836 return PTR_ERR(sys_bg); 5837 5838 return 0; 5839 } 5840 5841 static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map *map) 5842 { 5843 const int index = btrfs_bg_flags_to_raid_index(map->type); 5844 5845 return btrfs_raid_array[index].tolerated_failures; 5846 } 5847 5848 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5849 { 5850 struct btrfs_chunk_map *map; 5851 int miss_ndevs = 0; 5852 int i; 5853 bool ret = true; 5854 5855 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5856 if (IS_ERR(map)) 5857 return false; 5858 5859 for (i = 0; i < map->num_stripes; i++) { 5860 if (test_bit(BTRFS_DEV_STATE_MISSING, 5861 &map->stripes[i].dev->dev_state)) { 5862 miss_ndevs++; 5863 continue; 5864 } 5865 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5866 &map->stripes[i].dev->dev_state)) { 5867 ret = false; 5868 goto end; 5869 } 5870 } 5871 5872 /* 5873 * If the number of missing devices is larger than max errors, we can 5874 * not write the data into that chunk successfully. 5875 */ 5876 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5877 ret = false; 5878 end: 5879 btrfs_free_chunk_map(map); 5880 return ret; 5881 } 5882 5883 void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info) 5884 { 5885 write_lock(&fs_info->mapping_tree_lock); 5886 while (!RB_EMPTY_ROOT(&fs_info->mapping_tree.rb_root)) { 5887 struct btrfs_chunk_map *map; 5888 struct rb_node *node; 5889 5890 node = rb_first_cached(&fs_info->mapping_tree); 5891 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 5892 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree); 5893 RB_CLEAR_NODE(&map->rb_node); 5894 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED); 5895 /* Once for the tree ref. */ 5896 btrfs_free_chunk_map(map); 5897 cond_resched_rwlock_write(&fs_info->mapping_tree_lock); 5898 } 5899 write_unlock(&fs_info->mapping_tree_lock); 5900 } 5901 5902 static int btrfs_chunk_map_num_copies(const struct btrfs_chunk_map *map) 5903 { 5904 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(map->type); 5905 5906 if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5907 return 2; 5908 5909 /* 5910 * There could be two corrupted data stripes, we need to loop retry in 5911 * order to rebuild the correct data. 5912 * 5913 * Fail a stripe at a time on every retry except the stripe under 5914 * reconstruction. 5915 */ 5916 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5917 return map->num_stripes; 5918 5919 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5920 return btrfs_raid_array[index].ncopies; 5921 } 5922 5923 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5924 { 5925 struct btrfs_chunk_map *map; 5926 int ret; 5927 5928 map = btrfs_get_chunk_map(fs_info, logical, len); 5929 if (IS_ERR(map)) 5930 /* 5931 * We could return errors for these cases, but that could get 5932 * ugly and we'd probably do the same thing which is just not do 5933 * anything else and exit, so return 1 so the callers don't try 5934 * to use other copies. 5935 */ 5936 return 1; 5937 5938 ret = btrfs_chunk_map_num_copies(map); 5939 btrfs_free_chunk_map(map); 5940 return ret; 5941 } 5942 5943 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5944 u64 logical) 5945 { 5946 struct btrfs_chunk_map *map; 5947 unsigned long len = fs_info->sectorsize; 5948 5949 if (!btrfs_fs_incompat(fs_info, RAID56)) 5950 return len; 5951 5952 map = btrfs_get_chunk_map(fs_info, logical, len); 5953 5954 if (!WARN_ON(IS_ERR(map))) { 5955 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5956 len = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 5957 btrfs_free_chunk_map(map); 5958 } 5959 return len; 5960 } 5961 5962 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5963 struct btrfs_chunk_map *map, int first, 5964 int dev_replace_is_ongoing) 5965 { 5966 const enum btrfs_read_policy policy = READ_ONCE(fs_info->fs_devices->read_policy); 5967 int i; 5968 int num_stripes; 5969 int preferred_mirror; 5970 int tolerance; 5971 struct btrfs_device *srcdev; 5972 5973 ASSERT((map->type & 5974 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5975 5976 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5977 num_stripes = map->sub_stripes; 5978 else 5979 num_stripes = map->num_stripes; 5980 5981 switch (policy) { 5982 default: 5983 /* Shouldn't happen, just warn and use pid instead of failing */ 5984 btrfs_warn_rl(fs_info, "unknown read_policy type %u, reset to pid", 5985 policy); 5986 WRITE_ONCE(fs_info->fs_devices->read_policy, BTRFS_READ_POLICY_PID); 5987 fallthrough; 5988 case BTRFS_READ_POLICY_PID: 5989 preferred_mirror = first + (current->pid % num_stripes); 5990 break; 5991 } 5992 5993 if (dev_replace_is_ongoing && 5994 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5995 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5996 srcdev = fs_info->dev_replace.srcdev; 5997 else 5998 srcdev = NULL; 5999 6000 /* 6001 * try to avoid the drive that is the source drive for a 6002 * dev-replace procedure, only choose it if no other non-missing 6003 * mirror is available 6004 */ 6005 for (tolerance = 0; tolerance < 2; tolerance++) { 6006 if (map->stripes[preferred_mirror].dev->bdev && 6007 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 6008 return preferred_mirror; 6009 for (i = first; i < first + num_stripes; i++) { 6010 if (map->stripes[i].dev->bdev && 6011 (tolerance || map->stripes[i].dev != srcdev)) 6012 return i; 6013 } 6014 } 6015 6016 /* we couldn't find one that doesn't fail. Just return something 6017 * and the io error handling code will clean up eventually 6018 */ 6019 return preferred_mirror; 6020 } 6021 6022 EXPORT_FOR_TESTS 6023 struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 6024 u64 logical, u16 total_stripes) 6025 { 6026 struct btrfs_io_context *bioc; 6027 6028 bioc = kzalloc( 6029 /* The size of btrfs_io_context */ 6030 sizeof(struct btrfs_io_context) + 6031 /* Plus the variable array for the stripes */ 6032 sizeof(struct btrfs_io_stripe) * (total_stripes), 6033 GFP_NOFS); 6034 6035 if (!bioc) 6036 return NULL; 6037 6038 refcount_set(&bioc->refs, 1); 6039 6040 bioc->fs_info = fs_info; 6041 bioc->replace_stripe_src = -1; 6042 bioc->full_stripe_logical = (u64)-1; 6043 bioc->logical = logical; 6044 6045 return bioc; 6046 } 6047 6048 void btrfs_get_bioc(struct btrfs_io_context *bioc) 6049 { 6050 WARN_ON(!refcount_read(&bioc->refs)); 6051 refcount_inc(&bioc->refs); 6052 } 6053 6054 void btrfs_put_bioc(struct btrfs_io_context *bioc) 6055 { 6056 if (!bioc) 6057 return; 6058 if (refcount_dec_and_test(&bioc->refs)) 6059 kfree(bioc); 6060 } 6061 6062 /* 6063 * Please note that, discard won't be sent to target device of device 6064 * replace. 6065 */ 6066 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 6067 u64 logical, u64 *length_ret, 6068 u32 *num_stripes) 6069 { 6070 struct btrfs_chunk_map *map; 6071 struct btrfs_discard_stripe *stripes; 6072 u64 length = *length_ret; 6073 u64 offset; 6074 u32 stripe_nr; 6075 u32 stripe_nr_end; 6076 u32 stripe_cnt; 6077 u64 stripe_end_offset; 6078 u64 stripe_offset; 6079 u32 stripe_index; 6080 u32 factor = 0; 6081 u32 sub_stripes = 0; 6082 u32 stripes_per_dev = 0; 6083 u32 remaining_stripes = 0; 6084 u32 last_stripe = 0; 6085 int ret; 6086 int i; 6087 6088 map = btrfs_get_chunk_map(fs_info, logical, length); 6089 if (IS_ERR(map)) 6090 return ERR_CAST(map); 6091 6092 /* we don't discard raid56 yet */ 6093 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6094 ret = -EOPNOTSUPP; 6095 goto out_free_map; 6096 } 6097 6098 offset = logical - map->start; 6099 length = min_t(u64, map->start + map->chunk_len - logical, length); 6100 *length_ret = length; 6101 6102 /* 6103 * stripe_nr counts the total number of stripes we have to stride 6104 * to get to this block 6105 */ 6106 stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6107 6108 /* stripe_offset is the offset of this block in its stripe */ 6109 stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr); 6110 6111 stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >> 6112 BTRFS_STRIPE_LEN_SHIFT; 6113 stripe_cnt = stripe_nr_end - stripe_nr; 6114 stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) - 6115 (offset + length); 6116 /* 6117 * after this, stripe_nr is the number of stripes on this 6118 * device we have to walk to find the data, and stripe_index is 6119 * the number of our device in the stripe array 6120 */ 6121 *num_stripes = 1; 6122 stripe_index = 0; 6123 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6124 BTRFS_BLOCK_GROUP_RAID10)) { 6125 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6126 sub_stripes = 1; 6127 else 6128 sub_stripes = map->sub_stripes; 6129 6130 factor = map->num_stripes / sub_stripes; 6131 *num_stripes = min_t(u64, map->num_stripes, 6132 sub_stripes * stripe_cnt); 6133 stripe_index = stripe_nr % factor; 6134 stripe_nr /= factor; 6135 stripe_index *= sub_stripes; 6136 6137 remaining_stripes = stripe_cnt % factor; 6138 stripes_per_dev = stripe_cnt / factor; 6139 last_stripe = ((stripe_nr_end - 1) % factor) * sub_stripes; 6140 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6141 BTRFS_BLOCK_GROUP_DUP)) { 6142 *num_stripes = map->num_stripes; 6143 } else { 6144 stripe_index = stripe_nr % map->num_stripes; 6145 stripe_nr /= map->num_stripes; 6146 } 6147 6148 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6149 if (!stripes) { 6150 ret = -ENOMEM; 6151 goto out_free_map; 6152 } 6153 6154 for (i = 0; i < *num_stripes; i++) { 6155 stripes[i].physical = 6156 map->stripes[stripe_index].physical + 6157 stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr); 6158 stripes[i].dev = map->stripes[stripe_index].dev; 6159 6160 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6161 BTRFS_BLOCK_GROUP_RAID10)) { 6162 stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev); 6163 6164 if (i / sub_stripes < remaining_stripes) 6165 stripes[i].length += BTRFS_STRIPE_LEN; 6166 6167 /* 6168 * Special for the first stripe and 6169 * the last stripe: 6170 * 6171 * |-------|...|-------| 6172 * |----------| 6173 * off end_off 6174 */ 6175 if (i < sub_stripes) 6176 stripes[i].length -= stripe_offset; 6177 6178 if (stripe_index >= last_stripe && 6179 stripe_index <= (last_stripe + 6180 sub_stripes - 1)) 6181 stripes[i].length -= stripe_end_offset; 6182 6183 if (i == sub_stripes - 1) 6184 stripe_offset = 0; 6185 } else { 6186 stripes[i].length = length; 6187 } 6188 6189 stripe_index++; 6190 if (stripe_index == map->num_stripes) { 6191 stripe_index = 0; 6192 stripe_nr++; 6193 } 6194 } 6195 6196 btrfs_free_chunk_map(map); 6197 return stripes; 6198 out_free_map: 6199 btrfs_free_chunk_map(map); 6200 return ERR_PTR(ret); 6201 } 6202 6203 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6204 { 6205 struct btrfs_block_group *cache; 6206 bool ret; 6207 6208 /* Non zoned filesystem does not use "to_copy" flag */ 6209 if (!btrfs_is_zoned(fs_info)) 6210 return false; 6211 6212 cache = btrfs_lookup_block_group(fs_info, logical); 6213 6214 ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 6215 6216 btrfs_put_block_group(cache); 6217 return ret; 6218 } 6219 6220 static void handle_ops_on_dev_replace(struct btrfs_io_context *bioc, 6221 struct btrfs_dev_replace *dev_replace, 6222 u64 logical, 6223 struct btrfs_io_geometry *io_geom) 6224 { 6225 u64 srcdev_devid = dev_replace->srcdev->devid; 6226 /* 6227 * At this stage, num_stripes is still the real number of stripes, 6228 * excluding the duplicated stripes. 6229 */ 6230 int num_stripes = io_geom->num_stripes; 6231 int max_errors = io_geom->max_errors; 6232 int nr_extra_stripes = 0; 6233 int i; 6234 6235 /* 6236 * A block group which has "to_copy" set will eventually be copied by 6237 * the dev-replace process. We can avoid cloning IO here. 6238 */ 6239 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6240 return; 6241 6242 /* 6243 * Duplicate the write operations while the dev-replace procedure is 6244 * running. Since the copying of the old disk to the new disk takes 6245 * place at run time while the filesystem is mounted writable, the 6246 * regular write operations to the old disk have to be duplicated to go 6247 * to the new disk as well. 6248 * 6249 * Note that device->missing is handled by the caller, and that the 6250 * write to the old disk is already set up in the stripes array. 6251 */ 6252 for (i = 0; i < num_stripes; i++) { 6253 struct btrfs_io_stripe *old = &bioc->stripes[i]; 6254 struct btrfs_io_stripe *new = &bioc->stripes[num_stripes + nr_extra_stripes]; 6255 6256 if (old->dev->devid != srcdev_devid) 6257 continue; 6258 6259 new->physical = old->physical; 6260 new->dev = dev_replace->tgtdev; 6261 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) 6262 bioc->replace_stripe_src = i; 6263 nr_extra_stripes++; 6264 } 6265 6266 /* We can only have at most 2 extra nr_stripes (for DUP). */ 6267 ASSERT(nr_extra_stripes <= 2); 6268 /* 6269 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for 6270 * replace. 6271 * If we have 2 extra stripes, only choose the one with smaller physical. 6272 */ 6273 if (io_geom->op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) { 6274 struct btrfs_io_stripe *first = &bioc->stripes[num_stripes]; 6275 struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1]; 6276 6277 /* Only DUP can have two extra stripes. */ 6278 ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP); 6279 6280 /* 6281 * Swap the last stripe stripes and reduce @nr_extra_stripes. 6282 * The extra stripe would still be there, but won't be accessed. 6283 */ 6284 if (first->physical > second->physical) { 6285 swap(second->physical, first->physical); 6286 swap(second->dev, first->dev); 6287 nr_extra_stripes--; 6288 } 6289 } 6290 6291 io_geom->num_stripes = num_stripes + nr_extra_stripes; 6292 io_geom->max_errors = max_errors + nr_extra_stripes; 6293 bioc->replace_nr_stripes = nr_extra_stripes; 6294 } 6295 6296 static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset, 6297 struct btrfs_io_geometry *io_geom) 6298 { 6299 /* 6300 * Stripe_nr is the stripe where this block falls. stripe_offset is 6301 * the offset of this block in its stripe. 6302 */ 6303 io_geom->stripe_offset = offset & BTRFS_STRIPE_LEN_MASK; 6304 io_geom->stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6305 ASSERT(io_geom->stripe_offset < U32_MAX); 6306 6307 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6308 unsigned long full_stripe_len = 6309 btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 6310 6311 /* 6312 * For full stripe start, we use previously calculated 6313 * @stripe_nr. Align it to nr_data_stripes, then multiply with 6314 * STRIPE_LEN. 6315 * 6316 * By this we can avoid u64 division completely. And we have 6317 * to go rounddown(), not round_down(), as nr_data_stripes is 6318 * not ensured to be power of 2. 6319 */ 6320 io_geom->raid56_full_stripe_start = btrfs_stripe_nr_to_offset( 6321 rounddown(io_geom->stripe_nr, nr_data_stripes(map))); 6322 6323 ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset); 6324 ASSERT(io_geom->raid56_full_stripe_start <= offset); 6325 /* 6326 * For writes to RAID56, allow to write a full stripe set, but 6327 * no straddling of stripe sets. 6328 */ 6329 if (io_geom->op == BTRFS_MAP_WRITE) 6330 return full_stripe_len - (offset - io_geom->raid56_full_stripe_start); 6331 } 6332 6333 /* 6334 * For other RAID types and for RAID56 reads, allow a single stripe (on 6335 * a single disk). 6336 */ 6337 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) 6338 return BTRFS_STRIPE_LEN - io_geom->stripe_offset; 6339 return U64_MAX; 6340 } 6341 6342 static int set_io_stripe(struct btrfs_fs_info *fs_info, u64 logical, 6343 u64 *length, struct btrfs_io_stripe *dst, 6344 struct btrfs_chunk_map *map, 6345 struct btrfs_io_geometry *io_geom) 6346 { 6347 dst->dev = map->stripes[io_geom->stripe_index].dev; 6348 6349 if (io_geom->op == BTRFS_MAP_READ && 6350 btrfs_need_stripe_tree_update(fs_info, map->type)) 6351 return btrfs_get_raid_extent_offset(fs_info, logical, length, 6352 map->type, 6353 io_geom->stripe_index, dst); 6354 6355 dst->physical = map->stripes[io_geom->stripe_index].physical + 6356 io_geom->stripe_offset + 6357 btrfs_stripe_nr_to_offset(io_geom->stripe_nr); 6358 return 0; 6359 } 6360 6361 static bool is_single_device_io(struct btrfs_fs_info *fs_info, 6362 const struct btrfs_io_stripe *smap, 6363 const struct btrfs_chunk_map *map, 6364 int num_alloc_stripes, 6365 enum btrfs_map_op op, int mirror_num) 6366 { 6367 if (!smap) 6368 return false; 6369 6370 if (num_alloc_stripes != 1) 6371 return false; 6372 6373 if (btrfs_need_stripe_tree_update(fs_info, map->type) && op != BTRFS_MAP_READ) 6374 return false; 6375 6376 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) 6377 return false; 6378 6379 return true; 6380 } 6381 6382 static void map_blocks_raid0(const struct btrfs_chunk_map *map, 6383 struct btrfs_io_geometry *io_geom) 6384 { 6385 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes; 6386 io_geom->stripe_nr /= map->num_stripes; 6387 if (io_geom->op == BTRFS_MAP_READ) 6388 io_geom->mirror_num = 1; 6389 } 6390 6391 static void map_blocks_raid1(struct btrfs_fs_info *fs_info, 6392 struct btrfs_chunk_map *map, 6393 struct btrfs_io_geometry *io_geom, 6394 bool dev_replace_is_ongoing) 6395 { 6396 if (io_geom->op != BTRFS_MAP_READ) { 6397 io_geom->num_stripes = map->num_stripes; 6398 return; 6399 } 6400 6401 if (io_geom->mirror_num) { 6402 io_geom->stripe_index = io_geom->mirror_num - 1; 6403 return; 6404 } 6405 6406 io_geom->stripe_index = find_live_mirror(fs_info, map, 0, 6407 dev_replace_is_ongoing); 6408 io_geom->mirror_num = io_geom->stripe_index + 1; 6409 } 6410 6411 static void map_blocks_dup(const struct btrfs_chunk_map *map, 6412 struct btrfs_io_geometry *io_geom) 6413 { 6414 if (io_geom->op != BTRFS_MAP_READ) { 6415 io_geom->num_stripes = map->num_stripes; 6416 return; 6417 } 6418 6419 if (io_geom->mirror_num) { 6420 io_geom->stripe_index = io_geom->mirror_num - 1; 6421 return; 6422 } 6423 6424 io_geom->mirror_num = 1; 6425 } 6426 6427 static void map_blocks_raid10(struct btrfs_fs_info *fs_info, 6428 struct btrfs_chunk_map *map, 6429 struct btrfs_io_geometry *io_geom, 6430 bool dev_replace_is_ongoing) 6431 { 6432 u32 factor = map->num_stripes / map->sub_stripes; 6433 int old_stripe_index; 6434 6435 io_geom->stripe_index = (io_geom->stripe_nr % factor) * map->sub_stripes; 6436 io_geom->stripe_nr /= factor; 6437 6438 if (io_geom->op != BTRFS_MAP_READ) { 6439 io_geom->num_stripes = map->sub_stripes; 6440 return; 6441 } 6442 6443 if (io_geom->mirror_num) { 6444 io_geom->stripe_index += io_geom->mirror_num - 1; 6445 return; 6446 } 6447 6448 old_stripe_index = io_geom->stripe_index; 6449 io_geom->stripe_index = find_live_mirror(fs_info, map, 6450 io_geom->stripe_index, 6451 dev_replace_is_ongoing); 6452 io_geom->mirror_num = io_geom->stripe_index - old_stripe_index + 1; 6453 } 6454 6455 static void map_blocks_raid56_write(struct btrfs_chunk_map *map, 6456 struct btrfs_io_geometry *io_geom, 6457 u64 logical, u64 *length) 6458 { 6459 int data_stripes = nr_data_stripes(map); 6460 6461 /* 6462 * Needs full stripe mapping. 6463 * 6464 * Push stripe_nr back to the start of the full stripe For those cases 6465 * needing a full stripe, @stripe_nr is the full stripe number. 6466 * 6467 * Originally we go raid56_full_stripe_start / full_stripe_len, but 6468 * that can be expensive. Here we just divide @stripe_nr with 6469 * @data_stripes. 6470 */ 6471 io_geom->stripe_nr /= data_stripes; 6472 6473 /* RAID[56] write or recovery. Return all stripes */ 6474 io_geom->num_stripes = map->num_stripes; 6475 io_geom->max_errors = btrfs_chunk_max_errors(map); 6476 6477 /* Return the length to the full stripe end. */ 6478 *length = min(logical + *length, 6479 io_geom->raid56_full_stripe_start + map->start + 6480 btrfs_stripe_nr_to_offset(data_stripes)) - 6481 logical; 6482 io_geom->stripe_index = 0; 6483 io_geom->stripe_offset = 0; 6484 } 6485 6486 static void map_blocks_raid56_read(struct btrfs_chunk_map *map, 6487 struct btrfs_io_geometry *io_geom) 6488 { 6489 int data_stripes = nr_data_stripes(map); 6490 6491 ASSERT(io_geom->mirror_num <= 1); 6492 /* Just grab the data stripe directly. */ 6493 io_geom->stripe_index = io_geom->stripe_nr % data_stripes; 6494 io_geom->stripe_nr /= data_stripes; 6495 6496 /* We distribute the parity blocks across stripes. */ 6497 io_geom->stripe_index = 6498 (io_geom->stripe_nr + io_geom->stripe_index) % map->num_stripes; 6499 6500 if (io_geom->op == BTRFS_MAP_READ && io_geom->mirror_num < 1) 6501 io_geom->mirror_num = 1; 6502 } 6503 6504 static void map_blocks_single(const struct btrfs_chunk_map *map, 6505 struct btrfs_io_geometry *io_geom) 6506 { 6507 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes; 6508 io_geom->stripe_nr /= map->num_stripes; 6509 io_geom->mirror_num = io_geom->stripe_index + 1; 6510 } 6511 6512 /* 6513 * Map one logical range to one or more physical ranges. 6514 * 6515 * @length: (Mandatory) mapped length of this run. 6516 * One logical range can be split into different segments 6517 * due to factors like zones and RAID0/5/6/10 stripe 6518 * boundaries. 6519 * 6520 * @bioc_ret: (Mandatory) returned btrfs_io_context structure. 6521 * which has one or more physical ranges (btrfs_io_stripe) 6522 * recorded inside. 6523 * Caller should call btrfs_put_bioc() to free it after use. 6524 * 6525 * @smap: (Optional) single physical range optimization. 6526 * If the map request can be fulfilled by one single 6527 * physical range, and this is parameter is not NULL, 6528 * then @bioc_ret would be NULL, and @smap would be 6529 * updated. 6530 * 6531 * @mirror_num_ret: (Mandatory) returned mirror number if the original 6532 * value is 0. 6533 * 6534 * Mirror number 0 means to choose any live mirrors. 6535 * 6536 * For non-RAID56 profiles, non-zero mirror_num means 6537 * the Nth mirror. (e.g. mirror_num 1 means the first 6538 * copy). 6539 * 6540 * For RAID56 profile, mirror 1 means rebuild from P and 6541 * the remaining data stripes. 6542 * 6543 * For RAID6 profile, mirror > 2 means mark another 6544 * data/P stripe error and rebuild from the remaining 6545 * stripes.. 6546 */ 6547 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6548 u64 logical, u64 *length, 6549 struct btrfs_io_context **bioc_ret, 6550 struct btrfs_io_stripe *smap, int *mirror_num_ret) 6551 { 6552 struct btrfs_chunk_map *map; 6553 struct btrfs_io_geometry io_geom = { 0 }; 6554 u64 map_offset; 6555 int ret = 0; 6556 int num_copies; 6557 struct btrfs_io_context *bioc = NULL; 6558 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6559 int dev_replace_is_ongoing = 0; 6560 u16 num_alloc_stripes; 6561 u64 max_len; 6562 6563 ASSERT(bioc_ret); 6564 6565 io_geom.mirror_num = (mirror_num_ret ? *mirror_num_ret : 0); 6566 io_geom.num_stripes = 1; 6567 io_geom.stripe_index = 0; 6568 io_geom.op = op; 6569 6570 map = btrfs_get_chunk_map(fs_info, logical, *length); 6571 if (IS_ERR(map)) 6572 return PTR_ERR(map); 6573 6574 num_copies = btrfs_chunk_map_num_copies(map); 6575 if (io_geom.mirror_num > num_copies) 6576 return -EINVAL; 6577 6578 map_offset = logical - map->start; 6579 io_geom.raid56_full_stripe_start = (u64)-1; 6580 max_len = btrfs_max_io_len(map, map_offset, &io_geom); 6581 *length = min_t(u64, map->chunk_len - map_offset, max_len); 6582 6583 if (dev_replace->replace_task != current) 6584 down_read(&dev_replace->rwsem); 6585 6586 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6587 /* 6588 * Hold the semaphore for read during the whole operation, write is 6589 * requested at commit time but must wait. 6590 */ 6591 if (!dev_replace_is_ongoing && dev_replace->replace_task != current) 6592 up_read(&dev_replace->rwsem); 6593 6594 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 6595 case BTRFS_BLOCK_GROUP_RAID0: 6596 map_blocks_raid0(map, &io_geom); 6597 break; 6598 case BTRFS_BLOCK_GROUP_RAID1: 6599 case BTRFS_BLOCK_GROUP_RAID1C3: 6600 case BTRFS_BLOCK_GROUP_RAID1C4: 6601 map_blocks_raid1(fs_info, map, &io_geom, dev_replace_is_ongoing); 6602 break; 6603 case BTRFS_BLOCK_GROUP_DUP: 6604 map_blocks_dup(map, &io_geom); 6605 break; 6606 case BTRFS_BLOCK_GROUP_RAID10: 6607 map_blocks_raid10(fs_info, map, &io_geom, dev_replace_is_ongoing); 6608 break; 6609 case BTRFS_BLOCK_GROUP_RAID5: 6610 case BTRFS_BLOCK_GROUP_RAID6: 6611 if (op != BTRFS_MAP_READ || io_geom.mirror_num > 1) 6612 map_blocks_raid56_write(map, &io_geom, logical, length); 6613 else 6614 map_blocks_raid56_read(map, &io_geom); 6615 break; 6616 default: 6617 /* 6618 * After this, stripe_nr is the number of stripes on this 6619 * device we have to walk to find the data, and stripe_index is 6620 * the number of our device in the stripe array 6621 */ 6622 map_blocks_single(map, &io_geom); 6623 break; 6624 } 6625 if (io_geom.stripe_index >= map->num_stripes) { 6626 btrfs_crit(fs_info, 6627 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6628 io_geom.stripe_index, map->num_stripes); 6629 ret = -EINVAL; 6630 goto out; 6631 } 6632 6633 num_alloc_stripes = io_geom.num_stripes; 6634 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6635 op != BTRFS_MAP_READ) 6636 /* 6637 * For replace case, we need to add extra stripes for extra 6638 * duplicated stripes. 6639 * 6640 * For both WRITE and GET_READ_MIRRORS, we may have at most 6641 * 2 more stripes (DUP types, otherwise 1). 6642 */ 6643 num_alloc_stripes += 2; 6644 6645 /* 6646 * If this I/O maps to a single device, try to return the device and 6647 * physical block information on the stack instead of allocating an 6648 * I/O context structure. 6649 */ 6650 if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, op, 6651 io_geom.mirror_num)) { 6652 ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom); 6653 if (mirror_num_ret) 6654 *mirror_num_ret = io_geom.mirror_num; 6655 *bioc_ret = NULL; 6656 goto out; 6657 } 6658 6659 bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes); 6660 if (!bioc) { 6661 ret = -ENOMEM; 6662 goto out; 6663 } 6664 bioc->map_type = map->type; 6665 6666 /* 6667 * For RAID56 full map, we need to make sure the stripes[] follows the 6668 * rule that data stripes are all ordered, then followed with P and Q 6669 * (if we have). 6670 * 6671 * It's still mostly the same as other profiles, just with extra rotation. 6672 */ 6673 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && 6674 (op != BTRFS_MAP_READ || io_geom.mirror_num > 1)) { 6675 /* 6676 * For RAID56 @stripe_nr is already the number of full stripes 6677 * before us, which is also the rotation value (needs to modulo 6678 * with num_stripes). 6679 * 6680 * In this case, we just add @stripe_nr with @i, then do the 6681 * modulo, to reduce one modulo call. 6682 */ 6683 bioc->full_stripe_logical = map->start + 6684 btrfs_stripe_nr_to_offset(io_geom.stripe_nr * 6685 nr_data_stripes(map)); 6686 for (int i = 0; i < io_geom.num_stripes; i++) { 6687 struct btrfs_io_stripe *dst = &bioc->stripes[i]; 6688 u32 stripe_index; 6689 6690 stripe_index = (i + io_geom.stripe_nr) % io_geom.num_stripes; 6691 dst->dev = map->stripes[stripe_index].dev; 6692 dst->physical = 6693 map->stripes[stripe_index].physical + 6694 io_geom.stripe_offset + 6695 btrfs_stripe_nr_to_offset(io_geom.stripe_nr); 6696 } 6697 } else { 6698 /* 6699 * For all other non-RAID56 profiles, just copy the target 6700 * stripe into the bioc. 6701 */ 6702 for (int i = 0; i < io_geom.num_stripes; i++) { 6703 ret = set_io_stripe(fs_info, logical, length, 6704 &bioc->stripes[i], map, &io_geom); 6705 if (ret < 0) 6706 break; 6707 io_geom.stripe_index++; 6708 } 6709 } 6710 6711 if (ret) { 6712 *bioc_ret = NULL; 6713 btrfs_put_bioc(bioc); 6714 goto out; 6715 } 6716 6717 if (op != BTRFS_MAP_READ) 6718 io_geom.max_errors = btrfs_chunk_max_errors(map); 6719 6720 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6721 op != BTRFS_MAP_READ) { 6722 handle_ops_on_dev_replace(bioc, dev_replace, logical, &io_geom); 6723 } 6724 6725 *bioc_ret = bioc; 6726 bioc->num_stripes = io_geom.num_stripes; 6727 bioc->max_errors = io_geom.max_errors; 6728 bioc->mirror_num = io_geom.mirror_num; 6729 6730 out: 6731 if (dev_replace_is_ongoing && dev_replace->replace_task != current) { 6732 lockdep_assert_held(&dev_replace->rwsem); 6733 /* Unlock and let waiting writers proceed */ 6734 up_read(&dev_replace->rwsem); 6735 } 6736 btrfs_free_chunk_map(map); 6737 return ret; 6738 } 6739 6740 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6741 const struct btrfs_fs_devices *fs_devices) 6742 { 6743 if (args->fsid == NULL) 6744 return true; 6745 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6746 return true; 6747 return false; 6748 } 6749 6750 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6751 const struct btrfs_device *device) 6752 { 6753 if (args->missing) { 6754 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6755 !device->bdev) 6756 return true; 6757 return false; 6758 } 6759 6760 if (device->devid != args->devid) 6761 return false; 6762 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6763 return false; 6764 return true; 6765 } 6766 6767 /* 6768 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6769 * return NULL. 6770 * 6771 * If devid and uuid are both specified, the match must be exact, otherwise 6772 * only devid is used. 6773 */ 6774 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6775 const struct btrfs_dev_lookup_args *args) 6776 { 6777 struct btrfs_device *device; 6778 struct btrfs_fs_devices *seed_devs; 6779 6780 if (dev_args_match_fs_devices(args, fs_devices)) { 6781 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6782 if (dev_args_match_device(args, device)) 6783 return device; 6784 } 6785 } 6786 6787 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6788 if (!dev_args_match_fs_devices(args, seed_devs)) 6789 continue; 6790 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6791 if (dev_args_match_device(args, device)) 6792 return device; 6793 } 6794 } 6795 6796 return NULL; 6797 } 6798 6799 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6800 u64 devid, u8 *dev_uuid) 6801 { 6802 struct btrfs_device *device; 6803 unsigned int nofs_flag; 6804 6805 /* 6806 * We call this under the chunk_mutex, so we want to use NOFS for this 6807 * allocation, however we don't want to change btrfs_alloc_device() to 6808 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6809 * places. 6810 */ 6811 6812 nofs_flag = memalloc_nofs_save(); 6813 device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL); 6814 memalloc_nofs_restore(nofs_flag); 6815 if (IS_ERR(device)) 6816 return device; 6817 6818 list_add(&device->dev_list, &fs_devices->devices); 6819 device->fs_devices = fs_devices; 6820 fs_devices->num_devices++; 6821 6822 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6823 fs_devices->missing_devices++; 6824 6825 return device; 6826 } 6827 6828 /* 6829 * Allocate new device struct, set up devid and UUID. 6830 * 6831 * @fs_info: used only for generating a new devid, can be NULL if 6832 * devid is provided (i.e. @devid != NULL). 6833 * @devid: a pointer to devid for this device. If NULL a new devid 6834 * is generated. 6835 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6836 * is generated. 6837 * @path: a pointer to device path if available, NULL otherwise. 6838 * 6839 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6840 * on error. Returned struct is not linked onto any lists and must be 6841 * destroyed with btrfs_free_device. 6842 */ 6843 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6844 const u64 *devid, const u8 *uuid, 6845 const char *path) 6846 { 6847 struct btrfs_device *dev; 6848 u64 tmp; 6849 6850 if (WARN_ON(!devid && !fs_info)) 6851 return ERR_PTR(-EINVAL); 6852 6853 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6854 if (!dev) 6855 return ERR_PTR(-ENOMEM); 6856 6857 INIT_LIST_HEAD(&dev->dev_list); 6858 INIT_LIST_HEAD(&dev->dev_alloc_list); 6859 INIT_LIST_HEAD(&dev->post_commit_list); 6860 6861 atomic_set(&dev->dev_stats_ccnt, 0); 6862 btrfs_device_data_ordered_init(dev); 6863 extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE); 6864 6865 if (devid) 6866 tmp = *devid; 6867 else { 6868 int ret; 6869 6870 ret = find_next_devid(fs_info, &tmp); 6871 if (ret) { 6872 btrfs_free_device(dev); 6873 return ERR_PTR(ret); 6874 } 6875 } 6876 dev->devid = tmp; 6877 6878 if (uuid) 6879 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6880 else 6881 generate_random_uuid(dev->uuid); 6882 6883 if (path) { 6884 struct rcu_string *name; 6885 6886 name = rcu_string_strdup(path, GFP_KERNEL); 6887 if (!name) { 6888 btrfs_free_device(dev); 6889 return ERR_PTR(-ENOMEM); 6890 } 6891 rcu_assign_pointer(dev->name, name); 6892 } 6893 6894 return dev; 6895 } 6896 6897 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6898 u64 devid, u8 *uuid, bool error) 6899 { 6900 if (error) 6901 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6902 devid, uuid); 6903 else 6904 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6905 devid, uuid); 6906 } 6907 6908 u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map) 6909 { 6910 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 6911 6912 return div_u64(map->chunk_len, data_stripes); 6913 } 6914 6915 #if BITS_PER_LONG == 32 6916 /* 6917 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6918 * can't be accessed on 32bit systems. 6919 * 6920 * This function do mount time check to reject the fs if it already has 6921 * metadata chunk beyond that limit. 6922 */ 6923 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6924 u64 logical, u64 length, u64 type) 6925 { 6926 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6927 return 0; 6928 6929 if (logical + length < MAX_LFS_FILESIZE) 6930 return 0; 6931 6932 btrfs_err_32bit_limit(fs_info); 6933 return -EOVERFLOW; 6934 } 6935 6936 /* 6937 * This is to give early warning for any metadata chunk reaching 6938 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 6939 * Although we can still access the metadata, it's not going to be possible 6940 * once the limit is reached. 6941 */ 6942 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6943 u64 logical, u64 length, u64 type) 6944 { 6945 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6946 return; 6947 6948 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 6949 return; 6950 6951 btrfs_warn_32bit_limit(fs_info); 6952 } 6953 #endif 6954 6955 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 6956 u64 devid, u8 *uuid) 6957 { 6958 struct btrfs_device *dev; 6959 6960 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6961 btrfs_report_missing_device(fs_info, devid, uuid, true); 6962 return ERR_PTR(-ENOENT); 6963 } 6964 6965 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 6966 if (IS_ERR(dev)) { 6967 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 6968 devid, PTR_ERR(dev)); 6969 return dev; 6970 } 6971 btrfs_report_missing_device(fs_info, devid, uuid, false); 6972 6973 return dev; 6974 } 6975 6976 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 6977 struct btrfs_chunk *chunk) 6978 { 6979 BTRFS_DEV_LOOKUP_ARGS(args); 6980 struct btrfs_fs_info *fs_info = leaf->fs_info; 6981 struct btrfs_chunk_map *map; 6982 u64 logical; 6983 u64 length; 6984 u64 devid; 6985 u64 type; 6986 u8 uuid[BTRFS_UUID_SIZE]; 6987 int index; 6988 int num_stripes; 6989 int ret; 6990 int i; 6991 6992 logical = key->offset; 6993 length = btrfs_chunk_length(leaf, chunk); 6994 type = btrfs_chunk_type(leaf, chunk); 6995 index = btrfs_bg_flags_to_raid_index(type); 6996 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6997 6998 #if BITS_PER_LONG == 32 6999 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 7000 if (ret < 0) 7001 return ret; 7002 warn_32bit_meta_chunk(fs_info, logical, length, type); 7003 #endif 7004 7005 /* 7006 * Only need to verify chunk item if we're reading from sys chunk array, 7007 * as chunk item in tree block is already verified by tree-checker. 7008 */ 7009 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 7010 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 7011 if (ret) 7012 return ret; 7013 } 7014 7015 map = btrfs_find_chunk_map(fs_info, logical, 1); 7016 7017 /* already mapped? */ 7018 if (map && map->start <= logical && map->start + map->chunk_len > logical) { 7019 btrfs_free_chunk_map(map); 7020 return 0; 7021 } else if (map) { 7022 btrfs_free_chunk_map(map); 7023 } 7024 7025 map = btrfs_alloc_chunk_map(num_stripes, GFP_NOFS); 7026 if (!map) 7027 return -ENOMEM; 7028 7029 map->start = logical; 7030 map->chunk_len = length; 7031 map->num_stripes = num_stripes; 7032 map->io_width = btrfs_chunk_io_width(leaf, chunk); 7033 map->io_align = btrfs_chunk_io_align(leaf, chunk); 7034 map->type = type; 7035 /* 7036 * We can't use the sub_stripes value, as for profiles other than 7037 * RAID10, they may have 0 as sub_stripes for filesystems created by 7038 * older mkfs (<v5.4). 7039 * In that case, it can cause divide-by-zero errors later. 7040 * Since currently sub_stripes is fixed for each profile, let's 7041 * use the trusted value instead. 7042 */ 7043 map->sub_stripes = btrfs_raid_array[index].sub_stripes; 7044 map->verified_stripes = 0; 7045 map->stripe_size = btrfs_calc_stripe_length(map); 7046 for (i = 0; i < num_stripes; i++) { 7047 map->stripes[i].physical = 7048 btrfs_stripe_offset_nr(leaf, chunk, i); 7049 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 7050 args.devid = devid; 7051 read_extent_buffer(leaf, uuid, (unsigned long) 7052 btrfs_stripe_dev_uuid_nr(chunk, i), 7053 BTRFS_UUID_SIZE); 7054 args.uuid = uuid; 7055 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 7056 if (!map->stripes[i].dev) { 7057 map->stripes[i].dev = handle_missing_device(fs_info, 7058 devid, uuid); 7059 if (IS_ERR(map->stripes[i].dev)) { 7060 ret = PTR_ERR(map->stripes[i].dev); 7061 btrfs_free_chunk_map(map); 7062 return ret; 7063 } 7064 } 7065 7066 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 7067 &(map->stripes[i].dev->dev_state)); 7068 } 7069 7070 ret = btrfs_add_chunk_map(fs_info, map); 7071 if (ret < 0) { 7072 btrfs_err(fs_info, 7073 "failed to add chunk map, start=%llu len=%llu: %d", 7074 map->start, map->chunk_len, ret); 7075 } 7076 7077 return ret; 7078 } 7079 7080 static void fill_device_from_item(struct extent_buffer *leaf, 7081 struct btrfs_dev_item *dev_item, 7082 struct btrfs_device *device) 7083 { 7084 unsigned long ptr; 7085 7086 device->devid = btrfs_device_id(leaf, dev_item); 7087 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 7088 device->total_bytes = device->disk_total_bytes; 7089 device->commit_total_bytes = device->disk_total_bytes; 7090 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 7091 device->commit_bytes_used = device->bytes_used; 7092 device->type = btrfs_device_type(leaf, dev_item); 7093 device->io_align = btrfs_device_io_align(leaf, dev_item); 7094 device->io_width = btrfs_device_io_width(leaf, dev_item); 7095 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 7096 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 7097 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 7098 7099 ptr = btrfs_device_uuid(dev_item); 7100 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 7101 } 7102 7103 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 7104 u8 *fsid) 7105 { 7106 struct btrfs_fs_devices *fs_devices; 7107 int ret; 7108 7109 lockdep_assert_held(&uuid_mutex); 7110 ASSERT(fsid); 7111 7112 /* This will match only for multi-device seed fs */ 7113 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 7114 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 7115 return fs_devices; 7116 7117 7118 fs_devices = find_fsid(fsid, NULL); 7119 if (!fs_devices) { 7120 if (!btrfs_test_opt(fs_info, DEGRADED)) 7121 return ERR_PTR(-ENOENT); 7122 7123 fs_devices = alloc_fs_devices(fsid); 7124 if (IS_ERR(fs_devices)) 7125 return fs_devices; 7126 7127 fs_devices->seeding = true; 7128 fs_devices->opened = 1; 7129 return fs_devices; 7130 } 7131 7132 /* 7133 * Upon first call for a seed fs fsid, just create a private copy of the 7134 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 7135 */ 7136 fs_devices = clone_fs_devices(fs_devices); 7137 if (IS_ERR(fs_devices)) 7138 return fs_devices; 7139 7140 ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder); 7141 if (ret) { 7142 free_fs_devices(fs_devices); 7143 return ERR_PTR(ret); 7144 } 7145 7146 if (!fs_devices->seeding) { 7147 close_fs_devices(fs_devices); 7148 free_fs_devices(fs_devices); 7149 return ERR_PTR(-EINVAL); 7150 } 7151 7152 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 7153 7154 return fs_devices; 7155 } 7156 7157 static int read_one_dev(struct extent_buffer *leaf, 7158 struct btrfs_dev_item *dev_item) 7159 { 7160 BTRFS_DEV_LOOKUP_ARGS(args); 7161 struct btrfs_fs_info *fs_info = leaf->fs_info; 7162 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7163 struct btrfs_device *device; 7164 u64 devid; 7165 int ret; 7166 u8 fs_uuid[BTRFS_FSID_SIZE]; 7167 u8 dev_uuid[BTRFS_UUID_SIZE]; 7168 7169 devid = btrfs_device_id(leaf, dev_item); 7170 args.devid = devid; 7171 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 7172 BTRFS_UUID_SIZE); 7173 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 7174 BTRFS_FSID_SIZE); 7175 args.uuid = dev_uuid; 7176 args.fsid = fs_uuid; 7177 7178 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 7179 fs_devices = open_seed_devices(fs_info, fs_uuid); 7180 if (IS_ERR(fs_devices)) 7181 return PTR_ERR(fs_devices); 7182 } 7183 7184 device = btrfs_find_device(fs_info->fs_devices, &args); 7185 if (!device) { 7186 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7187 btrfs_report_missing_device(fs_info, devid, 7188 dev_uuid, true); 7189 return -ENOENT; 7190 } 7191 7192 device = add_missing_dev(fs_devices, devid, dev_uuid); 7193 if (IS_ERR(device)) { 7194 btrfs_err(fs_info, 7195 "failed to add missing dev %llu: %ld", 7196 devid, PTR_ERR(device)); 7197 return PTR_ERR(device); 7198 } 7199 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 7200 } else { 7201 if (!device->bdev) { 7202 if (!btrfs_test_opt(fs_info, DEGRADED)) { 7203 btrfs_report_missing_device(fs_info, 7204 devid, dev_uuid, true); 7205 return -ENOENT; 7206 } 7207 btrfs_report_missing_device(fs_info, devid, 7208 dev_uuid, false); 7209 } 7210 7211 if (!device->bdev && 7212 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 7213 /* 7214 * this happens when a device that was properly setup 7215 * in the device info lists suddenly goes bad. 7216 * device->bdev is NULL, and so we have to set 7217 * device->missing to one here 7218 */ 7219 device->fs_devices->missing_devices++; 7220 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 7221 } 7222 7223 /* Move the device to its own fs_devices */ 7224 if (device->fs_devices != fs_devices) { 7225 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 7226 &device->dev_state)); 7227 7228 list_move(&device->dev_list, &fs_devices->devices); 7229 device->fs_devices->num_devices--; 7230 fs_devices->num_devices++; 7231 7232 device->fs_devices->missing_devices--; 7233 fs_devices->missing_devices++; 7234 7235 device->fs_devices = fs_devices; 7236 } 7237 } 7238 7239 if (device->fs_devices != fs_info->fs_devices) { 7240 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7241 if (device->generation != 7242 btrfs_device_generation(leaf, dev_item)) 7243 return -EINVAL; 7244 } 7245 7246 fill_device_from_item(leaf, dev_item, device); 7247 if (device->bdev) { 7248 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7249 7250 if (device->total_bytes > max_total_bytes) { 7251 btrfs_err(fs_info, 7252 "device total_bytes should be at most %llu but found %llu", 7253 max_total_bytes, device->total_bytes); 7254 return -EINVAL; 7255 } 7256 } 7257 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7258 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7259 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7260 device->fs_devices->total_rw_bytes += device->total_bytes; 7261 atomic64_add(device->total_bytes - device->bytes_used, 7262 &fs_info->free_chunk_space); 7263 } 7264 ret = 0; 7265 return ret; 7266 } 7267 7268 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7269 { 7270 struct btrfs_super_block *super_copy = fs_info->super_copy; 7271 struct extent_buffer *sb; 7272 struct btrfs_disk_key *disk_key; 7273 struct btrfs_chunk *chunk; 7274 u8 *array_ptr; 7275 unsigned long sb_array_offset; 7276 int ret = 0; 7277 u32 num_stripes; 7278 u32 array_size; 7279 u32 len = 0; 7280 u32 cur_offset; 7281 u64 type; 7282 struct btrfs_key key; 7283 7284 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7285 7286 /* 7287 * We allocated a dummy extent, just to use extent buffer accessors. 7288 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7289 * that's fine, we will not go beyond system chunk array anyway. 7290 */ 7291 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7292 if (!sb) 7293 return -ENOMEM; 7294 set_extent_buffer_uptodate(sb); 7295 7296 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7297 array_size = btrfs_super_sys_array_size(super_copy); 7298 7299 array_ptr = super_copy->sys_chunk_array; 7300 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7301 cur_offset = 0; 7302 7303 while (cur_offset < array_size) { 7304 disk_key = (struct btrfs_disk_key *)array_ptr; 7305 len = sizeof(*disk_key); 7306 if (cur_offset + len > array_size) 7307 goto out_short_read; 7308 7309 btrfs_disk_key_to_cpu(&key, disk_key); 7310 7311 array_ptr += len; 7312 sb_array_offset += len; 7313 cur_offset += len; 7314 7315 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7316 btrfs_err(fs_info, 7317 "unexpected item type %u in sys_array at offset %u", 7318 (u32)key.type, cur_offset); 7319 ret = -EIO; 7320 break; 7321 } 7322 7323 chunk = (struct btrfs_chunk *)sb_array_offset; 7324 /* 7325 * At least one btrfs_chunk with one stripe must be present, 7326 * exact stripe count check comes afterwards 7327 */ 7328 len = btrfs_chunk_item_size(1); 7329 if (cur_offset + len > array_size) 7330 goto out_short_read; 7331 7332 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7333 if (!num_stripes) { 7334 btrfs_err(fs_info, 7335 "invalid number of stripes %u in sys_array at offset %u", 7336 num_stripes, cur_offset); 7337 ret = -EIO; 7338 break; 7339 } 7340 7341 type = btrfs_chunk_type(sb, chunk); 7342 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7343 btrfs_err(fs_info, 7344 "invalid chunk type %llu in sys_array at offset %u", 7345 type, cur_offset); 7346 ret = -EIO; 7347 break; 7348 } 7349 7350 len = btrfs_chunk_item_size(num_stripes); 7351 if (cur_offset + len > array_size) 7352 goto out_short_read; 7353 7354 ret = read_one_chunk(&key, sb, chunk); 7355 if (ret) 7356 break; 7357 7358 array_ptr += len; 7359 sb_array_offset += len; 7360 cur_offset += len; 7361 } 7362 clear_extent_buffer_uptodate(sb); 7363 free_extent_buffer_stale(sb); 7364 return ret; 7365 7366 out_short_read: 7367 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7368 len, cur_offset); 7369 clear_extent_buffer_uptodate(sb); 7370 free_extent_buffer_stale(sb); 7371 return -EIO; 7372 } 7373 7374 /* 7375 * Check if all chunks in the fs are OK for read-write degraded mount 7376 * 7377 * If the @failing_dev is specified, it's accounted as missing. 7378 * 7379 * Return true if all chunks meet the minimal RW mount requirements. 7380 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7381 */ 7382 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7383 struct btrfs_device *failing_dev) 7384 { 7385 struct btrfs_chunk_map *map; 7386 u64 next_start; 7387 bool ret = true; 7388 7389 map = btrfs_find_chunk_map(fs_info, 0, U64_MAX); 7390 /* No chunk at all? Return false anyway */ 7391 if (!map) { 7392 ret = false; 7393 goto out; 7394 } 7395 while (map) { 7396 int missing = 0; 7397 int max_tolerated; 7398 int i; 7399 7400 max_tolerated = 7401 btrfs_get_num_tolerated_disk_barrier_failures( 7402 map->type); 7403 for (i = 0; i < map->num_stripes; i++) { 7404 struct btrfs_device *dev = map->stripes[i].dev; 7405 7406 if (!dev || !dev->bdev || 7407 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7408 dev->last_flush_error) 7409 missing++; 7410 else if (failing_dev && failing_dev == dev) 7411 missing++; 7412 } 7413 if (missing > max_tolerated) { 7414 if (!failing_dev) 7415 btrfs_warn(fs_info, 7416 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7417 map->start, missing, max_tolerated); 7418 btrfs_free_chunk_map(map); 7419 ret = false; 7420 goto out; 7421 } 7422 next_start = map->start + map->chunk_len; 7423 btrfs_free_chunk_map(map); 7424 7425 map = btrfs_find_chunk_map(fs_info, next_start, U64_MAX - next_start); 7426 } 7427 out: 7428 return ret; 7429 } 7430 7431 static void readahead_tree_node_children(struct extent_buffer *node) 7432 { 7433 int i; 7434 const int nr_items = btrfs_header_nritems(node); 7435 7436 for (i = 0; i < nr_items; i++) 7437 btrfs_readahead_node_child(node, i); 7438 } 7439 7440 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7441 { 7442 struct btrfs_root *root = fs_info->chunk_root; 7443 struct btrfs_path *path; 7444 struct extent_buffer *leaf; 7445 struct btrfs_key key; 7446 struct btrfs_key found_key; 7447 int ret; 7448 int slot; 7449 int iter_ret = 0; 7450 u64 total_dev = 0; 7451 u64 last_ra_node = 0; 7452 7453 path = btrfs_alloc_path(); 7454 if (!path) 7455 return -ENOMEM; 7456 7457 /* 7458 * uuid_mutex is needed only if we are mounting a sprout FS 7459 * otherwise we don't need it. 7460 */ 7461 mutex_lock(&uuid_mutex); 7462 7463 /* 7464 * It is possible for mount and umount to race in such a way that 7465 * we execute this code path, but open_fs_devices failed to clear 7466 * total_rw_bytes. We certainly want it cleared before reading the 7467 * device items, so clear it here. 7468 */ 7469 fs_info->fs_devices->total_rw_bytes = 0; 7470 7471 /* 7472 * Lockdep complains about possible circular locking dependency between 7473 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7474 * used for freeze procection of a fs (struct super_block.s_writers), 7475 * which we take when starting a transaction, and extent buffers of the 7476 * chunk tree if we call read_one_dev() while holding a lock on an 7477 * extent buffer of the chunk tree. Since we are mounting the filesystem 7478 * and at this point there can't be any concurrent task modifying the 7479 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7480 */ 7481 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7482 path->skip_locking = 1; 7483 7484 /* 7485 * Read all device items, and then all the chunk items. All 7486 * device items are found before any chunk item (their object id 7487 * is smaller than the lowest possible object id for a chunk 7488 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7489 */ 7490 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7491 key.offset = 0; 7492 key.type = 0; 7493 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7494 struct extent_buffer *node = path->nodes[1]; 7495 7496 leaf = path->nodes[0]; 7497 slot = path->slots[0]; 7498 7499 if (node) { 7500 if (last_ra_node != node->start) { 7501 readahead_tree_node_children(node); 7502 last_ra_node = node->start; 7503 } 7504 } 7505 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7506 struct btrfs_dev_item *dev_item; 7507 dev_item = btrfs_item_ptr(leaf, slot, 7508 struct btrfs_dev_item); 7509 ret = read_one_dev(leaf, dev_item); 7510 if (ret) 7511 goto error; 7512 total_dev++; 7513 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7514 struct btrfs_chunk *chunk; 7515 7516 /* 7517 * We are only called at mount time, so no need to take 7518 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7519 * we always lock first fs_info->chunk_mutex before 7520 * acquiring any locks on the chunk tree. This is a 7521 * requirement for chunk allocation, see the comment on 7522 * top of btrfs_chunk_alloc() for details. 7523 */ 7524 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7525 ret = read_one_chunk(&found_key, leaf, chunk); 7526 if (ret) 7527 goto error; 7528 } 7529 } 7530 /* Catch error found during iteration */ 7531 if (iter_ret < 0) { 7532 ret = iter_ret; 7533 goto error; 7534 } 7535 7536 /* 7537 * After loading chunk tree, we've got all device information, 7538 * do another round of validation checks. 7539 */ 7540 if (total_dev != fs_info->fs_devices->total_devices) { 7541 btrfs_warn(fs_info, 7542 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7543 btrfs_super_num_devices(fs_info->super_copy), 7544 total_dev); 7545 fs_info->fs_devices->total_devices = total_dev; 7546 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7547 } 7548 if (btrfs_super_total_bytes(fs_info->super_copy) < 7549 fs_info->fs_devices->total_rw_bytes) { 7550 btrfs_err(fs_info, 7551 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7552 btrfs_super_total_bytes(fs_info->super_copy), 7553 fs_info->fs_devices->total_rw_bytes); 7554 ret = -EINVAL; 7555 goto error; 7556 } 7557 ret = 0; 7558 error: 7559 mutex_unlock(&uuid_mutex); 7560 7561 btrfs_free_path(path); 7562 return ret; 7563 } 7564 7565 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7566 { 7567 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7568 struct btrfs_device *device; 7569 int ret = 0; 7570 7571 fs_devices->fs_info = fs_info; 7572 7573 mutex_lock(&fs_devices->device_list_mutex); 7574 list_for_each_entry(device, &fs_devices->devices, dev_list) 7575 device->fs_info = fs_info; 7576 7577 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7578 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7579 device->fs_info = fs_info; 7580 ret = btrfs_get_dev_zone_info(device, false); 7581 if (ret) 7582 break; 7583 } 7584 7585 seed_devs->fs_info = fs_info; 7586 } 7587 mutex_unlock(&fs_devices->device_list_mutex); 7588 7589 return ret; 7590 } 7591 7592 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7593 const struct btrfs_dev_stats_item *ptr, 7594 int index) 7595 { 7596 u64 val; 7597 7598 read_extent_buffer(eb, &val, 7599 offsetof(struct btrfs_dev_stats_item, values) + 7600 ((unsigned long)ptr) + (index * sizeof(u64)), 7601 sizeof(val)); 7602 return val; 7603 } 7604 7605 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7606 struct btrfs_dev_stats_item *ptr, 7607 int index, u64 val) 7608 { 7609 write_extent_buffer(eb, &val, 7610 offsetof(struct btrfs_dev_stats_item, values) + 7611 ((unsigned long)ptr) + (index * sizeof(u64)), 7612 sizeof(val)); 7613 } 7614 7615 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7616 struct btrfs_path *path) 7617 { 7618 struct btrfs_dev_stats_item *ptr; 7619 struct extent_buffer *eb; 7620 struct btrfs_key key; 7621 int item_size; 7622 int i, ret, slot; 7623 7624 if (!device->fs_info->dev_root) 7625 return 0; 7626 7627 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7628 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7629 key.offset = device->devid; 7630 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7631 if (ret) { 7632 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7633 btrfs_dev_stat_set(device, i, 0); 7634 device->dev_stats_valid = 1; 7635 btrfs_release_path(path); 7636 return ret < 0 ? ret : 0; 7637 } 7638 slot = path->slots[0]; 7639 eb = path->nodes[0]; 7640 item_size = btrfs_item_size(eb, slot); 7641 7642 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7643 7644 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7645 if (item_size >= (1 + i) * sizeof(__le64)) 7646 btrfs_dev_stat_set(device, i, 7647 btrfs_dev_stats_value(eb, ptr, i)); 7648 else 7649 btrfs_dev_stat_set(device, i, 0); 7650 } 7651 7652 device->dev_stats_valid = 1; 7653 btrfs_dev_stat_print_on_load(device); 7654 btrfs_release_path(path); 7655 7656 return 0; 7657 } 7658 7659 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7660 { 7661 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7662 struct btrfs_device *device; 7663 struct btrfs_path *path = NULL; 7664 int ret = 0; 7665 7666 path = btrfs_alloc_path(); 7667 if (!path) 7668 return -ENOMEM; 7669 7670 mutex_lock(&fs_devices->device_list_mutex); 7671 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7672 ret = btrfs_device_init_dev_stats(device, path); 7673 if (ret) 7674 goto out; 7675 } 7676 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7677 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7678 ret = btrfs_device_init_dev_stats(device, path); 7679 if (ret) 7680 goto out; 7681 } 7682 } 7683 out: 7684 mutex_unlock(&fs_devices->device_list_mutex); 7685 7686 btrfs_free_path(path); 7687 return ret; 7688 } 7689 7690 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7691 struct btrfs_device *device) 7692 { 7693 struct btrfs_fs_info *fs_info = trans->fs_info; 7694 struct btrfs_root *dev_root = fs_info->dev_root; 7695 struct btrfs_path *path; 7696 struct btrfs_key key; 7697 struct extent_buffer *eb; 7698 struct btrfs_dev_stats_item *ptr; 7699 int ret; 7700 int i; 7701 7702 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7703 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7704 key.offset = device->devid; 7705 7706 path = btrfs_alloc_path(); 7707 if (!path) 7708 return -ENOMEM; 7709 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7710 if (ret < 0) { 7711 btrfs_warn_in_rcu(fs_info, 7712 "error %d while searching for dev_stats item for device %s", 7713 ret, btrfs_dev_name(device)); 7714 goto out; 7715 } 7716 7717 if (ret == 0 && 7718 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7719 /* need to delete old one and insert a new one */ 7720 ret = btrfs_del_item(trans, dev_root, path); 7721 if (ret != 0) { 7722 btrfs_warn_in_rcu(fs_info, 7723 "delete too small dev_stats item for device %s failed %d", 7724 btrfs_dev_name(device), ret); 7725 goto out; 7726 } 7727 ret = 1; 7728 } 7729 7730 if (ret == 1) { 7731 /* need to insert a new item */ 7732 btrfs_release_path(path); 7733 ret = btrfs_insert_empty_item(trans, dev_root, path, 7734 &key, sizeof(*ptr)); 7735 if (ret < 0) { 7736 btrfs_warn_in_rcu(fs_info, 7737 "insert dev_stats item for device %s failed %d", 7738 btrfs_dev_name(device), ret); 7739 goto out; 7740 } 7741 } 7742 7743 eb = path->nodes[0]; 7744 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7745 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7746 btrfs_set_dev_stats_value(eb, ptr, i, 7747 btrfs_dev_stat_read(device, i)); 7748 btrfs_mark_buffer_dirty(trans, eb); 7749 7750 out: 7751 btrfs_free_path(path); 7752 return ret; 7753 } 7754 7755 /* 7756 * called from commit_transaction. Writes all changed device stats to disk. 7757 */ 7758 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7759 { 7760 struct btrfs_fs_info *fs_info = trans->fs_info; 7761 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7762 struct btrfs_device *device; 7763 int stats_cnt; 7764 int ret = 0; 7765 7766 mutex_lock(&fs_devices->device_list_mutex); 7767 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7768 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7769 if (!device->dev_stats_valid || stats_cnt == 0) 7770 continue; 7771 7772 7773 /* 7774 * There is a LOAD-LOAD control dependency between the value of 7775 * dev_stats_ccnt and updating the on-disk values which requires 7776 * reading the in-memory counters. Such control dependencies 7777 * require explicit read memory barriers. 7778 * 7779 * This memory barriers pairs with smp_mb__before_atomic in 7780 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7781 * barrier implied by atomic_xchg in 7782 * btrfs_dev_stats_read_and_reset 7783 */ 7784 smp_rmb(); 7785 7786 ret = update_dev_stat_item(trans, device); 7787 if (!ret) 7788 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7789 } 7790 mutex_unlock(&fs_devices->device_list_mutex); 7791 7792 return ret; 7793 } 7794 7795 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7796 { 7797 btrfs_dev_stat_inc(dev, index); 7798 7799 if (!dev->dev_stats_valid) 7800 return; 7801 btrfs_err_rl_in_rcu(dev->fs_info, 7802 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7803 btrfs_dev_name(dev), 7804 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7805 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7806 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7807 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7808 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7809 } 7810 7811 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7812 { 7813 int i; 7814 7815 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7816 if (btrfs_dev_stat_read(dev, i) != 0) 7817 break; 7818 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7819 return; /* all values == 0, suppress message */ 7820 7821 btrfs_info_in_rcu(dev->fs_info, 7822 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7823 btrfs_dev_name(dev), 7824 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7825 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7826 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7827 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7828 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7829 } 7830 7831 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7832 struct btrfs_ioctl_get_dev_stats *stats) 7833 { 7834 BTRFS_DEV_LOOKUP_ARGS(args); 7835 struct btrfs_device *dev; 7836 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7837 int i; 7838 7839 mutex_lock(&fs_devices->device_list_mutex); 7840 args.devid = stats->devid; 7841 dev = btrfs_find_device(fs_info->fs_devices, &args); 7842 mutex_unlock(&fs_devices->device_list_mutex); 7843 7844 if (!dev) { 7845 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7846 return -ENODEV; 7847 } else if (!dev->dev_stats_valid) { 7848 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7849 return -ENODEV; 7850 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7851 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7852 if (stats->nr_items > i) 7853 stats->values[i] = 7854 btrfs_dev_stat_read_and_reset(dev, i); 7855 else 7856 btrfs_dev_stat_set(dev, i, 0); 7857 } 7858 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7859 current->comm, task_pid_nr(current)); 7860 } else { 7861 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7862 if (stats->nr_items > i) 7863 stats->values[i] = btrfs_dev_stat_read(dev, i); 7864 } 7865 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7866 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7867 return 0; 7868 } 7869 7870 /* 7871 * Update the size and bytes used for each device where it changed. This is 7872 * delayed since we would otherwise get errors while writing out the 7873 * superblocks. 7874 * 7875 * Must be invoked during transaction commit. 7876 */ 7877 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7878 { 7879 struct btrfs_device *curr, *next; 7880 7881 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7882 7883 if (list_empty(&trans->dev_update_list)) 7884 return; 7885 7886 /* 7887 * We don't need the device_list_mutex here. This list is owned by the 7888 * transaction and the transaction must complete before the device is 7889 * released. 7890 */ 7891 mutex_lock(&trans->fs_info->chunk_mutex); 7892 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7893 post_commit_list) { 7894 list_del_init(&curr->post_commit_list); 7895 curr->commit_total_bytes = curr->disk_total_bytes; 7896 curr->commit_bytes_used = curr->bytes_used; 7897 } 7898 mutex_unlock(&trans->fs_info->chunk_mutex); 7899 } 7900 7901 /* 7902 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7903 */ 7904 int btrfs_bg_type_to_factor(u64 flags) 7905 { 7906 const int index = btrfs_bg_flags_to_raid_index(flags); 7907 7908 return btrfs_raid_array[index].ncopies; 7909 } 7910 7911 7912 7913 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7914 u64 chunk_offset, u64 devid, 7915 u64 physical_offset, u64 physical_len) 7916 { 7917 struct btrfs_dev_lookup_args args = { .devid = devid }; 7918 struct btrfs_chunk_map *map; 7919 struct btrfs_device *dev; 7920 u64 stripe_len; 7921 bool found = false; 7922 int ret = 0; 7923 int i; 7924 7925 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1); 7926 if (!map) { 7927 btrfs_err(fs_info, 7928 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7929 physical_offset, devid); 7930 ret = -EUCLEAN; 7931 goto out; 7932 } 7933 7934 stripe_len = btrfs_calc_stripe_length(map); 7935 if (physical_len != stripe_len) { 7936 btrfs_err(fs_info, 7937 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7938 physical_offset, devid, map->start, physical_len, 7939 stripe_len); 7940 ret = -EUCLEAN; 7941 goto out; 7942 } 7943 7944 /* 7945 * Very old mkfs.btrfs (before v4.1) will not respect the reserved 7946 * space. Although kernel can handle it without problem, better to warn 7947 * the users. 7948 */ 7949 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED) 7950 btrfs_warn(fs_info, 7951 "devid %llu physical %llu len %llu inside the reserved space", 7952 devid, physical_offset, physical_len); 7953 7954 for (i = 0; i < map->num_stripes; i++) { 7955 if (map->stripes[i].dev->devid == devid && 7956 map->stripes[i].physical == physical_offset) { 7957 found = true; 7958 if (map->verified_stripes >= map->num_stripes) { 7959 btrfs_err(fs_info, 7960 "too many dev extents for chunk %llu found", 7961 map->start); 7962 ret = -EUCLEAN; 7963 goto out; 7964 } 7965 map->verified_stripes++; 7966 break; 7967 } 7968 } 7969 if (!found) { 7970 btrfs_err(fs_info, 7971 "dev extent physical offset %llu devid %llu has no corresponding chunk", 7972 physical_offset, devid); 7973 ret = -EUCLEAN; 7974 } 7975 7976 /* Make sure no dev extent is beyond device boundary */ 7977 dev = btrfs_find_device(fs_info->fs_devices, &args); 7978 if (!dev) { 7979 btrfs_err(fs_info, "failed to find devid %llu", devid); 7980 ret = -EUCLEAN; 7981 goto out; 7982 } 7983 7984 if (physical_offset + physical_len > dev->disk_total_bytes) { 7985 btrfs_err(fs_info, 7986 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7987 devid, physical_offset, physical_len, 7988 dev->disk_total_bytes); 7989 ret = -EUCLEAN; 7990 goto out; 7991 } 7992 7993 if (dev->zone_info) { 7994 u64 zone_size = dev->zone_info->zone_size; 7995 7996 if (!IS_ALIGNED(physical_offset, zone_size) || 7997 !IS_ALIGNED(physical_len, zone_size)) { 7998 btrfs_err(fs_info, 7999 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 8000 devid, physical_offset, physical_len); 8001 ret = -EUCLEAN; 8002 goto out; 8003 } 8004 } 8005 8006 out: 8007 btrfs_free_chunk_map(map); 8008 return ret; 8009 } 8010 8011 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 8012 { 8013 struct rb_node *node; 8014 int ret = 0; 8015 8016 read_lock(&fs_info->mapping_tree_lock); 8017 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { 8018 struct btrfs_chunk_map *map; 8019 8020 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 8021 if (map->num_stripes != map->verified_stripes) { 8022 btrfs_err(fs_info, 8023 "chunk %llu has missing dev extent, have %d expect %d", 8024 map->start, map->verified_stripes, map->num_stripes); 8025 ret = -EUCLEAN; 8026 goto out; 8027 } 8028 } 8029 out: 8030 read_unlock(&fs_info->mapping_tree_lock); 8031 return ret; 8032 } 8033 8034 /* 8035 * Ensure that all dev extents are mapped to correct chunk, otherwise 8036 * later chunk allocation/free would cause unexpected behavior. 8037 * 8038 * NOTE: This will iterate through the whole device tree, which should be of 8039 * the same size level as the chunk tree. This slightly increases mount time. 8040 */ 8041 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 8042 { 8043 struct btrfs_path *path; 8044 struct btrfs_root *root = fs_info->dev_root; 8045 struct btrfs_key key; 8046 u64 prev_devid = 0; 8047 u64 prev_dev_ext_end = 0; 8048 int ret = 0; 8049 8050 /* 8051 * We don't have a dev_root because we mounted with ignorebadroots and 8052 * failed to load the root, so we want to skip the verification in this 8053 * case for sure. 8054 * 8055 * However if the dev root is fine, but the tree itself is corrupted 8056 * we'd still fail to mount. This verification is only to make sure 8057 * writes can happen safely, so instead just bypass this check 8058 * completely in the case of IGNOREBADROOTS. 8059 */ 8060 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 8061 return 0; 8062 8063 key.objectid = 1; 8064 key.type = BTRFS_DEV_EXTENT_KEY; 8065 key.offset = 0; 8066 8067 path = btrfs_alloc_path(); 8068 if (!path) 8069 return -ENOMEM; 8070 8071 path->reada = READA_FORWARD; 8072 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8073 if (ret < 0) 8074 goto out; 8075 8076 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 8077 ret = btrfs_next_leaf(root, path); 8078 if (ret < 0) 8079 goto out; 8080 /* No dev extents at all? Not good */ 8081 if (ret > 0) { 8082 ret = -EUCLEAN; 8083 goto out; 8084 } 8085 } 8086 while (1) { 8087 struct extent_buffer *leaf = path->nodes[0]; 8088 struct btrfs_dev_extent *dext; 8089 int slot = path->slots[0]; 8090 u64 chunk_offset; 8091 u64 physical_offset; 8092 u64 physical_len; 8093 u64 devid; 8094 8095 btrfs_item_key_to_cpu(leaf, &key, slot); 8096 if (key.type != BTRFS_DEV_EXTENT_KEY) 8097 break; 8098 devid = key.objectid; 8099 physical_offset = key.offset; 8100 8101 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 8102 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 8103 physical_len = btrfs_dev_extent_length(leaf, dext); 8104 8105 /* Check if this dev extent overlaps with the previous one */ 8106 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 8107 btrfs_err(fs_info, 8108 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 8109 devid, physical_offset, prev_dev_ext_end); 8110 ret = -EUCLEAN; 8111 goto out; 8112 } 8113 8114 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 8115 physical_offset, physical_len); 8116 if (ret < 0) 8117 goto out; 8118 prev_devid = devid; 8119 prev_dev_ext_end = physical_offset + physical_len; 8120 8121 ret = btrfs_next_item(root, path); 8122 if (ret < 0) 8123 goto out; 8124 if (ret > 0) { 8125 ret = 0; 8126 break; 8127 } 8128 } 8129 8130 /* Ensure all chunks have corresponding dev extents */ 8131 ret = verify_chunk_dev_extent_mapping(fs_info); 8132 out: 8133 btrfs_free_path(path); 8134 return ret; 8135 } 8136 8137 /* 8138 * Check whether the given block group or device is pinned by any inode being 8139 * used as a swapfile. 8140 */ 8141 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 8142 { 8143 struct btrfs_swapfile_pin *sp; 8144 struct rb_node *node; 8145 8146 spin_lock(&fs_info->swapfile_pins_lock); 8147 node = fs_info->swapfile_pins.rb_node; 8148 while (node) { 8149 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 8150 if (ptr < sp->ptr) 8151 node = node->rb_left; 8152 else if (ptr > sp->ptr) 8153 node = node->rb_right; 8154 else 8155 break; 8156 } 8157 spin_unlock(&fs_info->swapfile_pins_lock); 8158 return node != NULL; 8159 } 8160 8161 static int relocating_repair_kthread(void *data) 8162 { 8163 struct btrfs_block_group *cache = data; 8164 struct btrfs_fs_info *fs_info = cache->fs_info; 8165 u64 target; 8166 int ret = 0; 8167 8168 target = cache->start; 8169 btrfs_put_block_group(cache); 8170 8171 sb_start_write(fs_info->sb); 8172 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 8173 btrfs_info(fs_info, 8174 "zoned: skip relocating block group %llu to repair: EBUSY", 8175 target); 8176 sb_end_write(fs_info->sb); 8177 return -EBUSY; 8178 } 8179 8180 mutex_lock(&fs_info->reclaim_bgs_lock); 8181 8182 /* Ensure block group still exists */ 8183 cache = btrfs_lookup_block_group(fs_info, target); 8184 if (!cache) 8185 goto out; 8186 8187 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) 8188 goto out; 8189 8190 ret = btrfs_may_alloc_data_chunk(fs_info, target); 8191 if (ret < 0) 8192 goto out; 8193 8194 btrfs_info(fs_info, 8195 "zoned: relocating block group %llu to repair IO failure", 8196 target); 8197 ret = btrfs_relocate_chunk(fs_info, target); 8198 8199 out: 8200 if (cache) 8201 btrfs_put_block_group(cache); 8202 mutex_unlock(&fs_info->reclaim_bgs_lock); 8203 btrfs_exclop_finish(fs_info); 8204 sb_end_write(fs_info->sb); 8205 8206 return ret; 8207 } 8208 8209 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 8210 { 8211 struct btrfs_block_group *cache; 8212 8213 if (!btrfs_is_zoned(fs_info)) 8214 return false; 8215 8216 /* Do not attempt to repair in degraded state */ 8217 if (btrfs_test_opt(fs_info, DEGRADED)) 8218 return true; 8219 8220 cache = btrfs_lookup_block_group(fs_info, logical); 8221 if (!cache) 8222 return true; 8223 8224 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) { 8225 btrfs_put_block_group(cache); 8226 return true; 8227 } 8228 8229 kthread_run(relocating_repair_kthread, cache, 8230 "btrfs-relocating-repair"); 8231 8232 return true; 8233 } 8234 8235 static void map_raid56_repair_block(struct btrfs_io_context *bioc, 8236 struct btrfs_io_stripe *smap, 8237 u64 logical) 8238 { 8239 int data_stripes = nr_bioc_data_stripes(bioc); 8240 int i; 8241 8242 for (i = 0; i < data_stripes; i++) { 8243 u64 stripe_start = bioc->full_stripe_logical + 8244 btrfs_stripe_nr_to_offset(i); 8245 8246 if (logical >= stripe_start && 8247 logical < stripe_start + BTRFS_STRIPE_LEN) 8248 break; 8249 } 8250 ASSERT(i < data_stripes); 8251 smap->dev = bioc->stripes[i].dev; 8252 smap->physical = bioc->stripes[i].physical + 8253 ((logical - bioc->full_stripe_logical) & 8254 BTRFS_STRIPE_LEN_MASK); 8255 } 8256 8257 /* 8258 * Map a repair write into a single device. 8259 * 8260 * A repair write is triggered by read time repair or scrub, which would only 8261 * update the contents of a single device. 8262 * Not update any other mirrors nor go through RMW path. 8263 * 8264 * Callers should ensure: 8265 * 8266 * - Call btrfs_bio_counter_inc_blocked() first 8267 * - The range does not cross stripe boundary 8268 * - Has a valid @mirror_num passed in. 8269 */ 8270 int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, 8271 struct btrfs_io_stripe *smap, u64 logical, 8272 u32 length, int mirror_num) 8273 { 8274 struct btrfs_io_context *bioc = NULL; 8275 u64 map_length = length; 8276 int mirror_ret = mirror_num; 8277 int ret; 8278 8279 ASSERT(mirror_num > 0); 8280 8281 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length, 8282 &bioc, smap, &mirror_ret); 8283 if (ret < 0) 8284 return ret; 8285 8286 /* The map range should not cross stripe boundary. */ 8287 ASSERT(map_length >= length); 8288 8289 /* Already mapped to single stripe. */ 8290 if (!bioc) 8291 goto out; 8292 8293 /* Map the RAID56 multi-stripe writes to a single one. */ 8294 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 8295 map_raid56_repair_block(bioc, smap, logical); 8296 goto out; 8297 } 8298 8299 ASSERT(mirror_num <= bioc->num_stripes); 8300 smap->dev = bioc->stripes[mirror_num - 1].dev; 8301 smap->physical = bioc->stripes[mirror_num - 1].physical; 8302 out: 8303 btrfs_put_bioc(bioc); 8304 ASSERT(smap->dev); 8305 return 0; 8306 } 8307