1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/mm.h> 8 #include <linux/slab.h> 9 #include <linux/ratelimit.h> 10 #include <linux/kthread.h> 11 #include <linux/semaphore.h> 12 #include <linux/uuid.h> 13 #include <linux/list_sort.h> 14 #include <linux/namei.h> 15 #include "misc.h" 16 #include "ctree.h" 17 #include "extent_map.h" 18 #include "disk-io.h" 19 #include "transaction.h" 20 #include "print-tree.h" 21 #include "volumes.h" 22 #include "raid56.h" 23 #include "rcu-string.h" 24 #include "dev-replace.h" 25 #include "sysfs.h" 26 #include "tree-checker.h" 27 #include "space-info.h" 28 #include "block-group.h" 29 #include "discard.h" 30 #include "zoned.h" 31 #include "fs.h" 32 #include "accessors.h" 33 #include "uuid-tree.h" 34 #include "ioctl.h" 35 #include "relocation.h" 36 #include "scrub.h" 37 #include "super.h" 38 39 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 40 BTRFS_BLOCK_GROUP_RAID10 | \ 41 BTRFS_BLOCK_GROUP_RAID56_MASK) 42 43 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 44 [BTRFS_RAID_RAID10] = { 45 .sub_stripes = 2, 46 .dev_stripes = 1, 47 .devs_max = 0, /* 0 == as many as possible */ 48 .devs_min = 2, 49 .tolerated_failures = 1, 50 .devs_increment = 2, 51 .ncopies = 2, 52 .nparity = 0, 53 .raid_name = "raid10", 54 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 55 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 56 }, 57 [BTRFS_RAID_RAID1] = { 58 .sub_stripes = 1, 59 .dev_stripes = 1, 60 .devs_max = 2, 61 .devs_min = 2, 62 .tolerated_failures = 1, 63 .devs_increment = 2, 64 .ncopies = 2, 65 .nparity = 0, 66 .raid_name = "raid1", 67 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 68 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 69 }, 70 [BTRFS_RAID_RAID1C3] = { 71 .sub_stripes = 1, 72 .dev_stripes = 1, 73 .devs_max = 3, 74 .devs_min = 3, 75 .tolerated_failures = 2, 76 .devs_increment = 3, 77 .ncopies = 3, 78 .nparity = 0, 79 .raid_name = "raid1c3", 80 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, 81 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, 82 }, 83 [BTRFS_RAID_RAID1C4] = { 84 .sub_stripes = 1, 85 .dev_stripes = 1, 86 .devs_max = 4, 87 .devs_min = 4, 88 .tolerated_failures = 3, 89 .devs_increment = 4, 90 .ncopies = 4, 91 .nparity = 0, 92 .raid_name = "raid1c4", 93 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, 94 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, 95 }, 96 [BTRFS_RAID_DUP] = { 97 .sub_stripes = 1, 98 .dev_stripes = 2, 99 .devs_max = 1, 100 .devs_min = 1, 101 .tolerated_failures = 0, 102 .devs_increment = 1, 103 .ncopies = 2, 104 .nparity = 0, 105 .raid_name = "dup", 106 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 107 .mindev_error = 0, 108 }, 109 [BTRFS_RAID_RAID0] = { 110 .sub_stripes = 1, 111 .dev_stripes = 1, 112 .devs_max = 0, 113 .devs_min = 1, 114 .tolerated_failures = 0, 115 .devs_increment = 1, 116 .ncopies = 1, 117 .nparity = 0, 118 .raid_name = "raid0", 119 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 120 .mindev_error = 0, 121 }, 122 [BTRFS_RAID_SINGLE] = { 123 .sub_stripes = 1, 124 .dev_stripes = 1, 125 .devs_max = 1, 126 .devs_min = 1, 127 .tolerated_failures = 0, 128 .devs_increment = 1, 129 .ncopies = 1, 130 .nparity = 0, 131 .raid_name = "single", 132 .bg_flag = 0, 133 .mindev_error = 0, 134 }, 135 [BTRFS_RAID_RAID5] = { 136 .sub_stripes = 1, 137 .dev_stripes = 1, 138 .devs_max = 0, 139 .devs_min = 2, 140 .tolerated_failures = 1, 141 .devs_increment = 1, 142 .ncopies = 1, 143 .nparity = 1, 144 .raid_name = "raid5", 145 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 146 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 147 }, 148 [BTRFS_RAID_RAID6] = { 149 .sub_stripes = 1, 150 .dev_stripes = 1, 151 .devs_max = 0, 152 .devs_min = 3, 153 .tolerated_failures = 2, 154 .devs_increment = 1, 155 .ncopies = 1, 156 .nparity = 2, 157 .raid_name = "raid6", 158 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 159 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 160 }, 161 }; 162 163 /* 164 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which 165 * can be used as index to access btrfs_raid_array[]. 166 */ 167 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags) 168 { 169 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK); 170 171 if (!profile) 172 return BTRFS_RAID_SINGLE; 173 174 return BTRFS_BG_FLAG_TO_INDEX(profile); 175 } 176 177 const char *btrfs_bg_type_to_raid_name(u64 flags) 178 { 179 const int index = btrfs_bg_flags_to_raid_index(flags); 180 181 if (index >= BTRFS_NR_RAID_TYPES) 182 return NULL; 183 184 return btrfs_raid_array[index].raid_name; 185 } 186 187 int btrfs_nr_parity_stripes(u64 type) 188 { 189 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type); 190 191 return btrfs_raid_array[index].nparity; 192 } 193 194 /* 195 * Fill @buf with textual description of @bg_flags, no more than @size_buf 196 * bytes including terminating null byte. 197 */ 198 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) 199 { 200 int i; 201 int ret; 202 char *bp = buf; 203 u64 flags = bg_flags; 204 u32 size_bp = size_buf; 205 206 if (!flags) { 207 strcpy(bp, "NONE"); 208 return; 209 } 210 211 #define DESCRIBE_FLAG(flag, desc) \ 212 do { \ 213 if (flags & (flag)) { \ 214 ret = snprintf(bp, size_bp, "%s|", (desc)); \ 215 if (ret < 0 || ret >= size_bp) \ 216 goto out_overflow; \ 217 size_bp -= ret; \ 218 bp += ret; \ 219 flags &= ~(flag); \ 220 } \ 221 } while (0) 222 223 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); 224 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); 225 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); 226 227 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); 228 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 229 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, 230 btrfs_raid_array[i].raid_name); 231 #undef DESCRIBE_FLAG 232 233 if (flags) { 234 ret = snprintf(bp, size_bp, "0x%llx|", flags); 235 size_bp -= ret; 236 } 237 238 if (size_bp < size_buf) 239 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ 240 241 /* 242 * The text is trimmed, it's up to the caller to provide sufficiently 243 * large buffer 244 */ 245 out_overflow:; 246 } 247 248 static int init_first_rw_device(struct btrfs_trans_handle *trans); 249 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 250 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 251 252 /* 253 * Device locking 254 * ============== 255 * 256 * There are several mutexes that protect manipulation of devices and low-level 257 * structures like chunks but not block groups, extents or files 258 * 259 * uuid_mutex (global lock) 260 * ------------------------ 261 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 262 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 263 * device) or requested by the device= mount option 264 * 265 * the mutex can be very coarse and can cover long-running operations 266 * 267 * protects: updates to fs_devices counters like missing devices, rw devices, 268 * seeding, structure cloning, opening/closing devices at mount/umount time 269 * 270 * global::fs_devs - add, remove, updates to the global list 271 * 272 * does not protect: manipulation of the fs_devices::devices list in general 273 * but in mount context it could be used to exclude list modifications by eg. 274 * scan ioctl 275 * 276 * btrfs_device::name - renames (write side), read is RCU 277 * 278 * fs_devices::device_list_mutex (per-fs, with RCU) 279 * ------------------------------------------------ 280 * protects updates to fs_devices::devices, ie. adding and deleting 281 * 282 * simple list traversal with read-only actions can be done with RCU protection 283 * 284 * may be used to exclude some operations from running concurrently without any 285 * modifications to the list (see write_all_supers) 286 * 287 * Is not required at mount and close times, because our device list is 288 * protected by the uuid_mutex at that point. 289 * 290 * balance_mutex 291 * ------------- 292 * protects balance structures (status, state) and context accessed from 293 * several places (internally, ioctl) 294 * 295 * chunk_mutex 296 * ----------- 297 * protects chunks, adding or removing during allocation, trim or when a new 298 * device is added/removed. Additionally it also protects post_commit_list of 299 * individual devices, since they can be added to the transaction's 300 * post_commit_list only with chunk_mutex held. 301 * 302 * cleaner_mutex 303 * ------------- 304 * a big lock that is held by the cleaner thread and prevents running subvolume 305 * cleaning together with relocation or delayed iputs 306 * 307 * 308 * Lock nesting 309 * ============ 310 * 311 * uuid_mutex 312 * device_list_mutex 313 * chunk_mutex 314 * balance_mutex 315 * 316 * 317 * Exclusive operations 318 * ==================== 319 * 320 * Maintains the exclusivity of the following operations that apply to the 321 * whole filesystem and cannot run in parallel. 322 * 323 * - Balance (*) 324 * - Device add 325 * - Device remove 326 * - Device replace (*) 327 * - Resize 328 * 329 * The device operations (as above) can be in one of the following states: 330 * 331 * - Running state 332 * - Paused state 333 * - Completed state 334 * 335 * Only device operations marked with (*) can go into the Paused state for the 336 * following reasons: 337 * 338 * - ioctl (only Balance can be Paused through ioctl) 339 * - filesystem remounted as read-only 340 * - filesystem unmounted and mounted as read-only 341 * - system power-cycle and filesystem mounted as read-only 342 * - filesystem or device errors leading to forced read-only 343 * 344 * The status of exclusive operation is set and cleared atomically. 345 * During the course of Paused state, fs_info::exclusive_operation remains set. 346 * A device operation in Paused or Running state can be canceled or resumed 347 * either by ioctl (Balance only) or when remounted as read-write. 348 * The exclusive status is cleared when the device operation is canceled or 349 * completed. 350 */ 351 352 DEFINE_MUTEX(uuid_mutex); 353 static LIST_HEAD(fs_uuids); 354 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) 355 { 356 return &fs_uuids; 357 } 358 359 /* 360 * alloc_fs_devices - allocate struct btrfs_fs_devices 361 * @fsid: if not NULL, copy the UUID to fs_devices::fsid 362 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid 363 * 364 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 365 * The returned struct is not linked onto any lists and can be destroyed with 366 * kfree() right away. 367 */ 368 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, 369 const u8 *metadata_fsid) 370 { 371 struct btrfs_fs_devices *fs_devs; 372 373 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 374 if (!fs_devs) 375 return ERR_PTR(-ENOMEM); 376 377 mutex_init(&fs_devs->device_list_mutex); 378 379 INIT_LIST_HEAD(&fs_devs->devices); 380 INIT_LIST_HEAD(&fs_devs->alloc_list); 381 INIT_LIST_HEAD(&fs_devs->fs_list); 382 INIT_LIST_HEAD(&fs_devs->seed_list); 383 if (fsid) 384 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 385 386 if (metadata_fsid) 387 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); 388 else if (fsid) 389 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); 390 391 return fs_devs; 392 } 393 394 void btrfs_free_device(struct btrfs_device *device) 395 { 396 WARN_ON(!list_empty(&device->post_commit_list)); 397 rcu_string_free(device->name); 398 btrfs_destroy_dev_zone_info(device); 399 kfree(device); 400 } 401 402 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 403 { 404 struct btrfs_device *device; 405 406 WARN_ON(fs_devices->opened); 407 while (!list_empty(&fs_devices->devices)) { 408 device = list_entry(fs_devices->devices.next, 409 struct btrfs_device, dev_list); 410 list_del(&device->dev_list); 411 btrfs_free_device(device); 412 } 413 kfree(fs_devices); 414 } 415 416 void __exit btrfs_cleanup_fs_uuids(void) 417 { 418 struct btrfs_fs_devices *fs_devices; 419 420 while (!list_empty(&fs_uuids)) { 421 fs_devices = list_entry(fs_uuids.next, 422 struct btrfs_fs_devices, fs_list); 423 list_del(&fs_devices->fs_list); 424 free_fs_devices(fs_devices); 425 } 426 } 427 428 static noinline struct btrfs_fs_devices *find_fsid( 429 const u8 *fsid, const u8 *metadata_fsid) 430 { 431 struct btrfs_fs_devices *fs_devices; 432 433 ASSERT(fsid); 434 435 /* Handle non-split brain cases */ 436 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 437 if (metadata_fsid) { 438 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 439 && memcmp(metadata_fsid, fs_devices->metadata_uuid, 440 BTRFS_FSID_SIZE) == 0) 441 return fs_devices; 442 } else { 443 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 444 return fs_devices; 445 } 446 } 447 return NULL; 448 } 449 450 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( 451 struct btrfs_super_block *disk_super) 452 { 453 454 struct btrfs_fs_devices *fs_devices; 455 456 /* 457 * Handle scanned device having completed its fsid change but 458 * belonging to a fs_devices that was created by first scanning 459 * a device which didn't have its fsid/metadata_uuid changed 460 * at all and the CHANGING_FSID_V2 flag set. 461 */ 462 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 463 if (fs_devices->fsid_change && 464 memcmp(disk_super->metadata_uuid, fs_devices->fsid, 465 BTRFS_FSID_SIZE) == 0 && 466 memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 467 BTRFS_FSID_SIZE) == 0) { 468 return fs_devices; 469 } 470 } 471 /* 472 * Handle scanned device having completed its fsid change but 473 * belonging to a fs_devices that was created by a device that 474 * has an outdated pair of fsid/metadata_uuid and 475 * CHANGING_FSID_V2 flag set. 476 */ 477 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 478 if (fs_devices->fsid_change && 479 memcmp(fs_devices->metadata_uuid, 480 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && 481 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid, 482 BTRFS_FSID_SIZE) == 0) { 483 return fs_devices; 484 } 485 } 486 487 return find_fsid(disk_super->fsid, disk_super->metadata_uuid); 488 } 489 490 491 static int 492 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 493 int flush, struct block_device **bdev, 494 struct btrfs_super_block **disk_super) 495 { 496 int ret; 497 498 *bdev = blkdev_get_by_path(device_path, flags, holder); 499 500 if (IS_ERR(*bdev)) { 501 ret = PTR_ERR(*bdev); 502 goto error; 503 } 504 505 if (flush) 506 sync_blockdev(*bdev); 507 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 508 if (ret) { 509 blkdev_put(*bdev, flags); 510 goto error; 511 } 512 invalidate_bdev(*bdev); 513 *disk_super = btrfs_read_dev_super(*bdev); 514 if (IS_ERR(*disk_super)) { 515 ret = PTR_ERR(*disk_super); 516 blkdev_put(*bdev, flags); 517 goto error; 518 } 519 520 return 0; 521 522 error: 523 *bdev = NULL; 524 return ret; 525 } 526 527 /* 528 * Search and remove all stale devices (which are not mounted). When both 529 * inputs are NULL, it will search and release all stale devices. 530 * 531 * @devt: Optional. When provided will it release all unmounted devices 532 * matching this devt only. 533 * @skip_device: Optional. Will skip this device when searching for the stale 534 * devices. 535 * 536 * Return: 0 for success or if @devt is 0. 537 * -EBUSY if @devt is a mounted device. 538 * -ENOENT if @devt does not match any device in the list. 539 */ 540 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device) 541 { 542 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; 543 struct btrfs_device *device, *tmp_device; 544 int ret = 0; 545 546 lockdep_assert_held(&uuid_mutex); 547 548 if (devt) 549 ret = -ENOENT; 550 551 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { 552 553 mutex_lock(&fs_devices->device_list_mutex); 554 list_for_each_entry_safe(device, tmp_device, 555 &fs_devices->devices, dev_list) { 556 if (skip_device && skip_device == device) 557 continue; 558 if (devt && devt != device->devt) 559 continue; 560 if (fs_devices->opened) { 561 /* for an already deleted device return 0 */ 562 if (devt && ret != 0) 563 ret = -EBUSY; 564 break; 565 } 566 567 /* delete the stale device */ 568 fs_devices->num_devices--; 569 list_del(&device->dev_list); 570 btrfs_free_device(device); 571 572 ret = 0; 573 } 574 mutex_unlock(&fs_devices->device_list_mutex); 575 576 if (fs_devices->num_devices == 0) { 577 btrfs_sysfs_remove_fsid(fs_devices); 578 list_del(&fs_devices->fs_list); 579 free_fs_devices(fs_devices); 580 } 581 } 582 583 return ret; 584 } 585 586 /* 587 * This is only used on mount, and we are protected from competing things 588 * messing with our fs_devices by the uuid_mutex, thus we do not need the 589 * fs_devices->device_list_mutex here. 590 */ 591 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 592 struct btrfs_device *device, fmode_t flags, 593 void *holder) 594 { 595 struct block_device *bdev; 596 struct btrfs_super_block *disk_super; 597 u64 devid; 598 int ret; 599 600 if (device->bdev) 601 return -EINVAL; 602 if (!device->name) 603 return -EINVAL; 604 605 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 606 &bdev, &disk_super); 607 if (ret) 608 return ret; 609 610 devid = btrfs_stack_device_id(&disk_super->dev_item); 611 if (devid != device->devid) 612 goto error_free_page; 613 614 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 615 goto error_free_page; 616 617 device->generation = btrfs_super_generation(disk_super); 618 619 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 620 if (btrfs_super_incompat_flags(disk_super) & 621 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { 622 pr_err( 623 "BTRFS: Invalid seeding and uuid-changed device detected\n"); 624 goto error_free_page; 625 } 626 627 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 628 fs_devices->seeding = true; 629 } else { 630 if (bdev_read_only(bdev)) 631 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 632 else 633 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 634 } 635 636 if (!bdev_nonrot(bdev)) 637 fs_devices->rotating = true; 638 639 if (bdev_max_discard_sectors(bdev)) 640 fs_devices->discardable = true; 641 642 device->bdev = bdev; 643 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 644 device->mode = flags; 645 646 fs_devices->open_devices++; 647 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 648 device->devid != BTRFS_DEV_REPLACE_DEVID) { 649 fs_devices->rw_devices++; 650 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 651 } 652 btrfs_release_disk_super(disk_super); 653 654 return 0; 655 656 error_free_page: 657 btrfs_release_disk_super(disk_super); 658 blkdev_put(bdev, flags); 659 660 return -EINVAL; 661 } 662 663 /* 664 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices 665 * being created with a disk that has already completed its fsid change. Such 666 * disk can belong to an fs which has its FSID changed or to one which doesn't. 667 * Handle both cases here. 668 */ 669 static struct btrfs_fs_devices *find_fsid_inprogress( 670 struct btrfs_super_block *disk_super) 671 { 672 struct btrfs_fs_devices *fs_devices; 673 674 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 675 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 676 BTRFS_FSID_SIZE) != 0 && 677 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 678 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { 679 return fs_devices; 680 } 681 } 682 683 return find_fsid(disk_super->fsid, NULL); 684 } 685 686 687 static struct btrfs_fs_devices *find_fsid_changed( 688 struct btrfs_super_block *disk_super) 689 { 690 struct btrfs_fs_devices *fs_devices; 691 692 /* 693 * Handles the case where scanned device is part of an fs that had 694 * multiple successful changes of FSID but currently device didn't 695 * observe it. Meaning our fsid will be different than theirs. We need 696 * to handle two subcases : 697 * 1 - The fs still continues to have different METADATA/FSID uuids. 698 * 2 - The fs is switched back to its original FSID (METADATA/FSID 699 * are equal). 700 */ 701 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 702 /* Changed UUIDs */ 703 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 704 BTRFS_FSID_SIZE) != 0 && 705 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, 706 BTRFS_FSID_SIZE) == 0 && 707 memcmp(fs_devices->fsid, disk_super->fsid, 708 BTRFS_FSID_SIZE) != 0) 709 return fs_devices; 710 711 /* Unchanged UUIDs */ 712 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, 713 BTRFS_FSID_SIZE) == 0 && 714 memcmp(fs_devices->fsid, disk_super->metadata_uuid, 715 BTRFS_FSID_SIZE) == 0) 716 return fs_devices; 717 } 718 719 return NULL; 720 } 721 722 static struct btrfs_fs_devices *find_fsid_reverted_metadata( 723 struct btrfs_super_block *disk_super) 724 { 725 struct btrfs_fs_devices *fs_devices; 726 727 /* 728 * Handle the case where the scanned device is part of an fs whose last 729 * metadata UUID change reverted it to the original FSID. At the same 730 * time fs_devices was first created by another constituent device 731 * which didn't fully observe the operation. This results in an 732 * btrfs_fs_devices created with metadata/fsid different AND 733 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the 734 * fs_devices equal to the FSID of the disk. 735 */ 736 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 737 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid, 738 BTRFS_FSID_SIZE) != 0 && 739 memcmp(fs_devices->metadata_uuid, disk_super->fsid, 740 BTRFS_FSID_SIZE) == 0 && 741 fs_devices->fsid_change) 742 return fs_devices; 743 } 744 745 return NULL; 746 } 747 /* 748 * Add new device to list of registered devices 749 * 750 * Returns: 751 * device pointer which was just added or updated when successful 752 * error pointer when failed 753 */ 754 static noinline struct btrfs_device *device_list_add(const char *path, 755 struct btrfs_super_block *disk_super, 756 bool *new_device_added) 757 { 758 struct btrfs_device *device; 759 struct btrfs_fs_devices *fs_devices = NULL; 760 struct rcu_string *name; 761 u64 found_transid = btrfs_super_generation(disk_super); 762 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 763 dev_t path_devt; 764 int error; 765 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & 766 BTRFS_FEATURE_INCOMPAT_METADATA_UUID); 767 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & 768 BTRFS_SUPER_FLAG_CHANGING_FSID_V2); 769 770 error = lookup_bdev(path, &path_devt); 771 if (error) { 772 btrfs_err(NULL, "failed to lookup block device for path %s: %d", 773 path, error); 774 return ERR_PTR(error); 775 } 776 777 if (fsid_change_in_progress) { 778 if (!has_metadata_uuid) 779 fs_devices = find_fsid_inprogress(disk_super); 780 else 781 fs_devices = find_fsid_changed(disk_super); 782 } else if (has_metadata_uuid) { 783 fs_devices = find_fsid_with_metadata_uuid(disk_super); 784 } else { 785 fs_devices = find_fsid_reverted_metadata(disk_super); 786 if (!fs_devices) 787 fs_devices = find_fsid(disk_super->fsid, NULL); 788 } 789 790 791 if (!fs_devices) { 792 if (has_metadata_uuid) 793 fs_devices = alloc_fs_devices(disk_super->fsid, 794 disk_super->metadata_uuid); 795 else 796 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 797 798 if (IS_ERR(fs_devices)) 799 return ERR_CAST(fs_devices); 800 801 fs_devices->fsid_change = fsid_change_in_progress; 802 803 mutex_lock(&fs_devices->device_list_mutex); 804 list_add(&fs_devices->fs_list, &fs_uuids); 805 806 device = NULL; 807 } else { 808 struct btrfs_dev_lookup_args args = { 809 .devid = devid, 810 .uuid = disk_super->dev_item.uuid, 811 }; 812 813 mutex_lock(&fs_devices->device_list_mutex); 814 device = btrfs_find_device(fs_devices, &args); 815 816 /* 817 * If this disk has been pulled into an fs devices created by 818 * a device which had the CHANGING_FSID_V2 flag then replace the 819 * metadata_uuid/fsid values of the fs_devices. 820 */ 821 if (fs_devices->fsid_change && 822 found_transid > fs_devices->latest_generation) { 823 memcpy(fs_devices->fsid, disk_super->fsid, 824 BTRFS_FSID_SIZE); 825 826 if (has_metadata_uuid) 827 memcpy(fs_devices->metadata_uuid, 828 disk_super->metadata_uuid, 829 BTRFS_FSID_SIZE); 830 else 831 memcpy(fs_devices->metadata_uuid, 832 disk_super->fsid, BTRFS_FSID_SIZE); 833 834 fs_devices->fsid_change = false; 835 } 836 } 837 838 if (!device) { 839 unsigned int nofs_flag; 840 841 if (fs_devices->opened) { 842 btrfs_err(NULL, 843 "device %s belongs to fsid %pU, and the fs is already mounted", 844 path, fs_devices->fsid); 845 mutex_unlock(&fs_devices->device_list_mutex); 846 return ERR_PTR(-EBUSY); 847 } 848 849 nofs_flag = memalloc_nofs_save(); 850 device = btrfs_alloc_device(NULL, &devid, 851 disk_super->dev_item.uuid, path); 852 memalloc_nofs_restore(nofs_flag); 853 if (IS_ERR(device)) { 854 mutex_unlock(&fs_devices->device_list_mutex); 855 /* we can safely leave the fs_devices entry around */ 856 return device; 857 } 858 859 device->devt = path_devt; 860 861 list_add_rcu(&device->dev_list, &fs_devices->devices); 862 fs_devices->num_devices++; 863 864 device->fs_devices = fs_devices; 865 *new_device_added = true; 866 867 if (disk_super->label[0]) 868 pr_info( 869 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", 870 disk_super->label, devid, found_transid, path, 871 current->comm, task_pid_nr(current)); 872 else 873 pr_info( 874 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", 875 disk_super->fsid, devid, found_transid, path, 876 current->comm, task_pid_nr(current)); 877 878 } else if (!device->name || strcmp(device->name->str, path)) { 879 /* 880 * When FS is already mounted. 881 * 1. If you are here and if the device->name is NULL that 882 * means this device was missing at time of FS mount. 883 * 2. If you are here and if the device->name is different 884 * from 'path' that means either 885 * a. The same device disappeared and reappeared with 886 * different name. or 887 * b. The missing-disk-which-was-replaced, has 888 * reappeared now. 889 * 890 * We must allow 1 and 2a above. But 2b would be a spurious 891 * and unintentional. 892 * 893 * Further in case of 1 and 2a above, the disk at 'path' 894 * would have missed some transaction when it was away and 895 * in case of 2a the stale bdev has to be updated as well. 896 * 2b must not be allowed at all time. 897 */ 898 899 /* 900 * For now, we do allow update to btrfs_fs_device through the 901 * btrfs dev scan cli after FS has been mounted. We're still 902 * tracking a problem where systems fail mount by subvolume id 903 * when we reject replacement on a mounted FS. 904 */ 905 if (!fs_devices->opened && found_transid < device->generation) { 906 /* 907 * That is if the FS is _not_ mounted and if you 908 * are here, that means there is more than one 909 * disk with same uuid and devid.We keep the one 910 * with larger generation number or the last-in if 911 * generation are equal. 912 */ 913 mutex_unlock(&fs_devices->device_list_mutex); 914 btrfs_err(NULL, 915 "device %s already registered with a higher generation, found %llu expect %llu", 916 path, found_transid, device->generation); 917 return ERR_PTR(-EEXIST); 918 } 919 920 /* 921 * We are going to replace the device path for a given devid, 922 * make sure it's the same device if the device is mounted 923 * 924 * NOTE: the device->fs_info may not be reliable here so pass 925 * in a NULL to message helpers instead. This avoids a possible 926 * use-after-free when the fs_info and fs_info->sb are already 927 * torn down. 928 */ 929 if (device->bdev) { 930 if (device->devt != path_devt) { 931 mutex_unlock(&fs_devices->device_list_mutex); 932 btrfs_warn_in_rcu(NULL, 933 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", 934 path, devid, found_transid, 935 current->comm, 936 task_pid_nr(current)); 937 return ERR_PTR(-EEXIST); 938 } 939 btrfs_info_in_rcu(NULL, 940 "devid %llu device path %s changed to %s scanned by %s (%d)", 941 devid, btrfs_dev_name(device), 942 path, current->comm, 943 task_pid_nr(current)); 944 } 945 946 name = rcu_string_strdup(path, GFP_NOFS); 947 if (!name) { 948 mutex_unlock(&fs_devices->device_list_mutex); 949 return ERR_PTR(-ENOMEM); 950 } 951 rcu_string_free(device->name); 952 rcu_assign_pointer(device->name, name); 953 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 954 fs_devices->missing_devices--; 955 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 956 } 957 device->devt = path_devt; 958 } 959 960 /* 961 * Unmount does not free the btrfs_device struct but would zero 962 * generation along with most of the other members. So just update 963 * it back. We need it to pick the disk with largest generation 964 * (as above). 965 */ 966 if (!fs_devices->opened) { 967 device->generation = found_transid; 968 fs_devices->latest_generation = max_t(u64, found_transid, 969 fs_devices->latest_generation); 970 } 971 972 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 973 974 mutex_unlock(&fs_devices->device_list_mutex); 975 return device; 976 } 977 978 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 979 { 980 struct btrfs_fs_devices *fs_devices; 981 struct btrfs_device *device; 982 struct btrfs_device *orig_dev; 983 int ret = 0; 984 985 lockdep_assert_held(&uuid_mutex); 986 987 fs_devices = alloc_fs_devices(orig->fsid, NULL); 988 if (IS_ERR(fs_devices)) 989 return fs_devices; 990 991 fs_devices->total_devices = orig->total_devices; 992 993 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 994 const char *dev_path = NULL; 995 996 /* 997 * This is ok to do without RCU read locked because we hold the 998 * uuid mutex so nothing we touch in here is going to disappear. 999 */ 1000 if (orig_dev->name) 1001 dev_path = orig_dev->name->str; 1002 1003 device = btrfs_alloc_device(NULL, &orig_dev->devid, 1004 orig_dev->uuid, dev_path); 1005 if (IS_ERR(device)) { 1006 ret = PTR_ERR(device); 1007 goto error; 1008 } 1009 1010 if (orig_dev->zone_info) { 1011 struct btrfs_zoned_device_info *zone_info; 1012 1013 zone_info = btrfs_clone_dev_zone_info(orig_dev); 1014 if (!zone_info) { 1015 btrfs_free_device(device); 1016 ret = -ENOMEM; 1017 goto error; 1018 } 1019 device->zone_info = zone_info; 1020 } 1021 1022 list_add(&device->dev_list, &fs_devices->devices); 1023 device->fs_devices = fs_devices; 1024 fs_devices->num_devices++; 1025 } 1026 return fs_devices; 1027 error: 1028 free_fs_devices(fs_devices); 1029 return ERR_PTR(ret); 1030 } 1031 1032 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, 1033 struct btrfs_device **latest_dev) 1034 { 1035 struct btrfs_device *device, *next; 1036 1037 /* This is the initialized path, it is safe to release the devices. */ 1038 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 1039 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { 1040 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1041 &device->dev_state) && 1042 !test_bit(BTRFS_DEV_STATE_MISSING, 1043 &device->dev_state) && 1044 (!*latest_dev || 1045 device->generation > (*latest_dev)->generation)) { 1046 *latest_dev = device; 1047 } 1048 continue; 1049 } 1050 1051 /* 1052 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, 1053 * in btrfs_init_dev_replace() so just continue. 1054 */ 1055 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1056 continue; 1057 1058 if (device->bdev) { 1059 blkdev_put(device->bdev, device->mode); 1060 device->bdev = NULL; 1061 fs_devices->open_devices--; 1062 } 1063 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1064 list_del_init(&device->dev_alloc_list); 1065 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1066 fs_devices->rw_devices--; 1067 } 1068 list_del_init(&device->dev_list); 1069 fs_devices->num_devices--; 1070 btrfs_free_device(device); 1071 } 1072 1073 } 1074 1075 /* 1076 * After we have read the system tree and know devids belonging to this 1077 * filesystem, remove the device which does not belong there. 1078 */ 1079 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices) 1080 { 1081 struct btrfs_device *latest_dev = NULL; 1082 struct btrfs_fs_devices *seed_dev; 1083 1084 mutex_lock(&uuid_mutex); 1085 __btrfs_free_extra_devids(fs_devices, &latest_dev); 1086 1087 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list) 1088 __btrfs_free_extra_devids(seed_dev, &latest_dev); 1089 1090 fs_devices->latest_dev = latest_dev; 1091 1092 mutex_unlock(&uuid_mutex); 1093 } 1094 1095 static void btrfs_close_bdev(struct btrfs_device *device) 1096 { 1097 if (!device->bdev) 1098 return; 1099 1100 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1101 sync_blockdev(device->bdev); 1102 invalidate_bdev(device->bdev); 1103 } 1104 1105 blkdev_put(device->bdev, device->mode); 1106 } 1107 1108 static void btrfs_close_one_device(struct btrfs_device *device) 1109 { 1110 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1111 1112 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1113 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1114 list_del_init(&device->dev_alloc_list); 1115 fs_devices->rw_devices--; 1116 } 1117 1118 if (device->devid == BTRFS_DEV_REPLACE_DEVID) 1119 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 1120 1121 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 1122 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 1123 fs_devices->missing_devices--; 1124 } 1125 1126 btrfs_close_bdev(device); 1127 if (device->bdev) { 1128 fs_devices->open_devices--; 1129 device->bdev = NULL; 1130 } 1131 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 1132 btrfs_destroy_dev_zone_info(device); 1133 1134 device->fs_info = NULL; 1135 atomic_set(&device->dev_stats_ccnt, 0); 1136 extent_io_tree_release(&device->alloc_state); 1137 1138 /* 1139 * Reset the flush error record. We might have a transient flush error 1140 * in this mount, and if so we aborted the current transaction and set 1141 * the fs to an error state, guaranteeing no super blocks can be further 1142 * committed. However that error might be transient and if we unmount the 1143 * filesystem and mount it again, we should allow the mount to succeed 1144 * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1145 * filesystem again we still get flush errors, then we will again abort 1146 * any transaction and set the error state, guaranteeing no commits of 1147 * unsafe super blocks. 1148 */ 1149 device->last_flush_error = 0; 1150 1151 /* Verify the device is back in a pristine state */ 1152 WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1153 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1154 WARN_ON(!list_empty(&device->dev_alloc_list)); 1155 WARN_ON(!list_empty(&device->post_commit_list)); 1156 } 1157 1158 static void close_fs_devices(struct btrfs_fs_devices *fs_devices) 1159 { 1160 struct btrfs_device *device, *tmp; 1161 1162 lockdep_assert_held(&uuid_mutex); 1163 1164 if (--fs_devices->opened > 0) 1165 return; 1166 1167 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) 1168 btrfs_close_one_device(device); 1169 1170 WARN_ON(fs_devices->open_devices); 1171 WARN_ON(fs_devices->rw_devices); 1172 fs_devices->opened = 0; 1173 fs_devices->seeding = false; 1174 fs_devices->fs_info = NULL; 1175 } 1176 1177 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1178 { 1179 LIST_HEAD(list); 1180 struct btrfs_fs_devices *tmp; 1181 1182 mutex_lock(&uuid_mutex); 1183 close_fs_devices(fs_devices); 1184 if (!fs_devices->opened) { 1185 list_splice_init(&fs_devices->seed_list, &list); 1186 1187 /* 1188 * If the struct btrfs_fs_devices is not assembled with any 1189 * other device, it can be re-initialized during the next mount 1190 * without the needing device-scan step. Therefore, it can be 1191 * fully freed. 1192 */ 1193 if (fs_devices->num_devices == 1) { 1194 list_del(&fs_devices->fs_list); 1195 free_fs_devices(fs_devices); 1196 } 1197 } 1198 1199 1200 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { 1201 close_fs_devices(fs_devices); 1202 list_del(&fs_devices->seed_list); 1203 free_fs_devices(fs_devices); 1204 } 1205 mutex_unlock(&uuid_mutex); 1206 } 1207 1208 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1209 fmode_t flags, void *holder) 1210 { 1211 struct btrfs_device *device; 1212 struct btrfs_device *latest_dev = NULL; 1213 struct btrfs_device *tmp_device; 1214 1215 flags |= FMODE_EXCL; 1216 1217 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, 1218 dev_list) { 1219 int ret; 1220 1221 ret = btrfs_open_one_device(fs_devices, device, flags, holder); 1222 if (ret == 0 && 1223 (!latest_dev || device->generation > latest_dev->generation)) { 1224 latest_dev = device; 1225 } else if (ret == -ENODATA) { 1226 fs_devices->num_devices--; 1227 list_del(&device->dev_list); 1228 btrfs_free_device(device); 1229 } 1230 } 1231 if (fs_devices->open_devices == 0) 1232 return -EINVAL; 1233 1234 fs_devices->opened = 1; 1235 fs_devices->latest_dev = latest_dev; 1236 fs_devices->total_rw_bytes = 0; 1237 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1238 fs_devices->read_policy = BTRFS_READ_POLICY_PID; 1239 1240 return 0; 1241 } 1242 1243 static int devid_cmp(void *priv, const struct list_head *a, 1244 const struct list_head *b) 1245 { 1246 const struct btrfs_device *dev1, *dev2; 1247 1248 dev1 = list_entry(a, struct btrfs_device, dev_list); 1249 dev2 = list_entry(b, struct btrfs_device, dev_list); 1250 1251 if (dev1->devid < dev2->devid) 1252 return -1; 1253 else if (dev1->devid > dev2->devid) 1254 return 1; 1255 return 0; 1256 } 1257 1258 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1259 fmode_t flags, void *holder) 1260 { 1261 int ret; 1262 1263 lockdep_assert_held(&uuid_mutex); 1264 /* 1265 * The device_list_mutex cannot be taken here in case opening the 1266 * underlying device takes further locks like open_mutex. 1267 * 1268 * We also don't need the lock here as this is called during mount and 1269 * exclusion is provided by uuid_mutex 1270 */ 1271 1272 if (fs_devices->opened) { 1273 fs_devices->opened++; 1274 ret = 0; 1275 } else { 1276 list_sort(NULL, &fs_devices->devices, devid_cmp); 1277 ret = open_fs_devices(fs_devices, flags, holder); 1278 } 1279 1280 return ret; 1281 } 1282 1283 void btrfs_release_disk_super(struct btrfs_super_block *super) 1284 { 1285 struct page *page = virt_to_page(super); 1286 1287 put_page(page); 1288 } 1289 1290 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1291 u64 bytenr, u64 bytenr_orig) 1292 { 1293 struct btrfs_super_block *disk_super; 1294 struct page *page; 1295 void *p; 1296 pgoff_t index; 1297 1298 /* make sure our super fits in the device */ 1299 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) 1300 return ERR_PTR(-EINVAL); 1301 1302 /* make sure our super fits in the page */ 1303 if (sizeof(*disk_super) > PAGE_SIZE) 1304 return ERR_PTR(-EINVAL); 1305 1306 /* make sure our super doesn't straddle pages on disk */ 1307 index = bytenr >> PAGE_SHIFT; 1308 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1309 return ERR_PTR(-EINVAL); 1310 1311 /* pull in the page with our super */ 1312 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1313 1314 if (IS_ERR(page)) 1315 return ERR_CAST(page); 1316 1317 p = page_address(page); 1318 1319 /* align our pointer to the offset of the super block */ 1320 disk_super = p + offset_in_page(bytenr); 1321 1322 if (btrfs_super_bytenr(disk_super) != bytenr_orig || 1323 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1324 btrfs_release_disk_super(p); 1325 return ERR_PTR(-EINVAL); 1326 } 1327 1328 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1329 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1330 1331 return disk_super; 1332 } 1333 1334 int btrfs_forget_devices(dev_t devt) 1335 { 1336 int ret; 1337 1338 mutex_lock(&uuid_mutex); 1339 ret = btrfs_free_stale_devices(devt, NULL); 1340 mutex_unlock(&uuid_mutex); 1341 1342 return ret; 1343 } 1344 1345 /* 1346 * Look for a btrfs signature on a device. This may be called out of the mount path 1347 * and we are not allowed to call set_blocksize during the scan. The superblock 1348 * is read via pagecache 1349 */ 1350 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, 1351 void *holder) 1352 { 1353 struct btrfs_super_block *disk_super; 1354 bool new_device_added = false; 1355 struct btrfs_device *device = NULL; 1356 struct block_device *bdev; 1357 u64 bytenr, bytenr_orig; 1358 int ret; 1359 1360 lockdep_assert_held(&uuid_mutex); 1361 1362 /* 1363 * we would like to check all the supers, but that would make 1364 * a btrfs mount succeed after a mkfs from a different FS. 1365 * So, we need to add a special mount option to scan for 1366 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1367 */ 1368 1369 /* 1370 * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may 1371 * initiate the device scan which may race with the user's mount 1372 * or mkfs command, resulting in failure. 1373 * Since the device scan is solely for reading purposes, there is 1374 * no need for FMODE_EXCL. Additionally, the devices are read again 1375 * during the mount process. It is ok to get some inconsistent 1376 * values temporarily, as the device paths of the fsid are the only 1377 * required information for assembling the volume. 1378 */ 1379 bdev = blkdev_get_by_path(path, flags, holder); 1380 if (IS_ERR(bdev)) 1381 return ERR_CAST(bdev); 1382 1383 bytenr_orig = btrfs_sb_offset(0); 1384 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr); 1385 if (ret) { 1386 device = ERR_PTR(ret); 1387 goto error_bdev_put; 1388 } 1389 1390 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig); 1391 if (IS_ERR(disk_super)) { 1392 device = ERR_CAST(disk_super); 1393 goto error_bdev_put; 1394 } 1395 1396 device = device_list_add(path, disk_super, &new_device_added); 1397 if (!IS_ERR(device) && new_device_added) 1398 btrfs_free_stale_devices(device->devt, device); 1399 1400 btrfs_release_disk_super(disk_super); 1401 1402 error_bdev_put: 1403 blkdev_put(bdev, flags); 1404 1405 return device; 1406 } 1407 1408 /* 1409 * Try to find a chunk that intersects [start, start + len] range and when one 1410 * such is found, record the end of it in *start 1411 */ 1412 static bool contains_pending_extent(struct btrfs_device *device, u64 *start, 1413 u64 len) 1414 { 1415 u64 physical_start, physical_end; 1416 1417 lockdep_assert_held(&device->fs_info->chunk_mutex); 1418 1419 if (!find_first_extent_bit(&device->alloc_state, *start, 1420 &physical_start, &physical_end, 1421 CHUNK_ALLOCATED, NULL)) { 1422 1423 if (in_range(physical_start, *start, len) || 1424 in_range(*start, physical_start, 1425 physical_end - physical_start)) { 1426 *start = physical_end + 1; 1427 return true; 1428 } 1429 } 1430 return false; 1431 } 1432 1433 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start) 1434 { 1435 switch (device->fs_devices->chunk_alloc_policy) { 1436 case BTRFS_CHUNK_ALLOC_REGULAR: 1437 return max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); 1438 case BTRFS_CHUNK_ALLOC_ZONED: 1439 /* 1440 * We don't care about the starting region like regular 1441 * allocator, because we anyway use/reserve the first two zones 1442 * for superblock logging. 1443 */ 1444 return ALIGN(start, device->zone_info->zone_size); 1445 default: 1446 BUG(); 1447 } 1448 } 1449 1450 static bool dev_extent_hole_check_zoned(struct btrfs_device *device, 1451 u64 *hole_start, u64 *hole_size, 1452 u64 num_bytes) 1453 { 1454 u64 zone_size = device->zone_info->zone_size; 1455 u64 pos; 1456 int ret; 1457 bool changed = false; 1458 1459 ASSERT(IS_ALIGNED(*hole_start, zone_size)); 1460 1461 while (*hole_size > 0) { 1462 pos = btrfs_find_allocatable_zones(device, *hole_start, 1463 *hole_start + *hole_size, 1464 num_bytes); 1465 if (pos != *hole_start) { 1466 *hole_size = *hole_start + *hole_size - pos; 1467 *hole_start = pos; 1468 changed = true; 1469 if (*hole_size < num_bytes) 1470 break; 1471 } 1472 1473 ret = btrfs_ensure_empty_zones(device, pos, num_bytes); 1474 1475 /* Range is ensured to be empty */ 1476 if (!ret) 1477 return changed; 1478 1479 /* Given hole range was invalid (outside of device) */ 1480 if (ret == -ERANGE) { 1481 *hole_start += *hole_size; 1482 *hole_size = 0; 1483 return true; 1484 } 1485 1486 *hole_start += zone_size; 1487 *hole_size -= zone_size; 1488 changed = true; 1489 } 1490 1491 return changed; 1492 } 1493 1494 /* 1495 * Check if specified hole is suitable for allocation. 1496 * 1497 * @device: the device which we have the hole 1498 * @hole_start: starting position of the hole 1499 * @hole_size: the size of the hole 1500 * @num_bytes: the size of the free space that we need 1501 * 1502 * This function may modify @hole_start and @hole_size to reflect the suitable 1503 * position for allocation. Returns 1 if hole position is updated, 0 otherwise. 1504 */ 1505 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, 1506 u64 *hole_size, u64 num_bytes) 1507 { 1508 bool changed = false; 1509 u64 hole_end = *hole_start + *hole_size; 1510 1511 for (;;) { 1512 /* 1513 * Check before we set max_hole_start, otherwise we could end up 1514 * sending back this offset anyway. 1515 */ 1516 if (contains_pending_extent(device, hole_start, *hole_size)) { 1517 if (hole_end >= *hole_start) 1518 *hole_size = hole_end - *hole_start; 1519 else 1520 *hole_size = 0; 1521 changed = true; 1522 } 1523 1524 switch (device->fs_devices->chunk_alloc_policy) { 1525 case BTRFS_CHUNK_ALLOC_REGULAR: 1526 /* No extra check */ 1527 break; 1528 case BTRFS_CHUNK_ALLOC_ZONED: 1529 if (dev_extent_hole_check_zoned(device, hole_start, 1530 hole_size, num_bytes)) { 1531 changed = true; 1532 /* 1533 * The changed hole can contain pending extent. 1534 * Loop again to check that. 1535 */ 1536 continue; 1537 } 1538 break; 1539 default: 1540 BUG(); 1541 } 1542 1543 break; 1544 } 1545 1546 return changed; 1547 } 1548 1549 /* 1550 * Find free space in the specified device. 1551 * 1552 * @device: the device which we search the free space in 1553 * @num_bytes: the size of the free space that we need 1554 * @search_start: the position from which to begin the search 1555 * @start: store the start of the free space. 1556 * @len: the size of the free space. that we find, or the size 1557 * of the max free space if we don't find suitable free space 1558 * 1559 * This does a pretty simple search, the expectation is that it is called very 1560 * infrequently and that a given device has a small number of extents. 1561 * 1562 * @start is used to store the start of the free space if we find. But if we 1563 * don't find suitable free space, it will be used to store the start position 1564 * of the max free space. 1565 * 1566 * @len is used to store the size of the free space that we find. 1567 * But if we don't find suitable free space, it is used to store the size of 1568 * the max free space. 1569 * 1570 * NOTE: This function will search *commit* root of device tree, and does extra 1571 * check to ensure dev extents are not double allocated. 1572 * This makes the function safe to allocate dev extents but may not report 1573 * correct usable device space, as device extent freed in current transaction 1574 * is not reported as available. 1575 */ 1576 static int find_free_dev_extent_start(struct btrfs_device *device, 1577 u64 num_bytes, u64 search_start, u64 *start, 1578 u64 *len) 1579 { 1580 struct btrfs_fs_info *fs_info = device->fs_info; 1581 struct btrfs_root *root = fs_info->dev_root; 1582 struct btrfs_key key; 1583 struct btrfs_dev_extent *dev_extent; 1584 struct btrfs_path *path; 1585 u64 hole_size; 1586 u64 max_hole_start; 1587 u64 max_hole_size; 1588 u64 extent_end; 1589 u64 search_end = device->total_bytes; 1590 int ret; 1591 int slot; 1592 struct extent_buffer *l; 1593 1594 search_start = dev_extent_search_start(device, search_start); 1595 1596 WARN_ON(device->zone_info && 1597 !IS_ALIGNED(num_bytes, device->zone_info->zone_size)); 1598 1599 path = btrfs_alloc_path(); 1600 if (!path) 1601 return -ENOMEM; 1602 1603 max_hole_start = search_start; 1604 max_hole_size = 0; 1605 1606 again: 1607 if (search_start >= search_end || 1608 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1609 ret = -ENOSPC; 1610 goto out; 1611 } 1612 1613 path->reada = READA_FORWARD; 1614 path->search_commit_root = 1; 1615 path->skip_locking = 1; 1616 1617 key.objectid = device->devid; 1618 key.offset = search_start; 1619 key.type = BTRFS_DEV_EXTENT_KEY; 1620 1621 ret = btrfs_search_backwards(root, &key, path); 1622 if (ret < 0) 1623 goto out; 1624 1625 while (search_start < search_end) { 1626 l = path->nodes[0]; 1627 slot = path->slots[0]; 1628 if (slot >= btrfs_header_nritems(l)) { 1629 ret = btrfs_next_leaf(root, path); 1630 if (ret == 0) 1631 continue; 1632 if (ret < 0) 1633 goto out; 1634 1635 break; 1636 } 1637 btrfs_item_key_to_cpu(l, &key, slot); 1638 1639 if (key.objectid < device->devid) 1640 goto next; 1641 1642 if (key.objectid > device->devid) 1643 break; 1644 1645 if (key.type != BTRFS_DEV_EXTENT_KEY) 1646 goto next; 1647 1648 if (key.offset > search_end) 1649 break; 1650 1651 if (key.offset > search_start) { 1652 hole_size = key.offset - search_start; 1653 dev_extent_hole_check(device, &search_start, &hole_size, 1654 num_bytes); 1655 1656 if (hole_size > max_hole_size) { 1657 max_hole_start = search_start; 1658 max_hole_size = hole_size; 1659 } 1660 1661 /* 1662 * If this free space is greater than which we need, 1663 * it must be the max free space that we have found 1664 * until now, so max_hole_start must point to the start 1665 * of this free space and the length of this free space 1666 * is stored in max_hole_size. Thus, we return 1667 * max_hole_start and max_hole_size and go back to the 1668 * caller. 1669 */ 1670 if (hole_size >= num_bytes) { 1671 ret = 0; 1672 goto out; 1673 } 1674 } 1675 1676 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1677 extent_end = key.offset + btrfs_dev_extent_length(l, 1678 dev_extent); 1679 if (extent_end > search_start) 1680 search_start = extent_end; 1681 next: 1682 path->slots[0]++; 1683 cond_resched(); 1684 } 1685 1686 /* 1687 * At this point, search_start should be the end of 1688 * allocated dev extents, and when shrinking the device, 1689 * search_end may be smaller than search_start. 1690 */ 1691 if (search_end > search_start) { 1692 hole_size = search_end - search_start; 1693 if (dev_extent_hole_check(device, &search_start, &hole_size, 1694 num_bytes)) { 1695 btrfs_release_path(path); 1696 goto again; 1697 } 1698 1699 if (hole_size > max_hole_size) { 1700 max_hole_start = search_start; 1701 max_hole_size = hole_size; 1702 } 1703 } 1704 1705 /* See above. */ 1706 if (max_hole_size < num_bytes) 1707 ret = -ENOSPC; 1708 else 1709 ret = 0; 1710 1711 ASSERT(max_hole_start + max_hole_size <= search_end); 1712 out: 1713 btrfs_free_path(path); 1714 *start = max_hole_start; 1715 if (len) 1716 *len = max_hole_size; 1717 return ret; 1718 } 1719 1720 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 1721 u64 *start, u64 *len) 1722 { 1723 /* FIXME use last free of some kind */ 1724 return find_free_dev_extent_start(device, num_bytes, 0, start, len); 1725 } 1726 1727 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1728 struct btrfs_device *device, 1729 u64 start, u64 *dev_extent_len) 1730 { 1731 struct btrfs_fs_info *fs_info = device->fs_info; 1732 struct btrfs_root *root = fs_info->dev_root; 1733 int ret; 1734 struct btrfs_path *path; 1735 struct btrfs_key key; 1736 struct btrfs_key found_key; 1737 struct extent_buffer *leaf = NULL; 1738 struct btrfs_dev_extent *extent = NULL; 1739 1740 path = btrfs_alloc_path(); 1741 if (!path) 1742 return -ENOMEM; 1743 1744 key.objectid = device->devid; 1745 key.offset = start; 1746 key.type = BTRFS_DEV_EXTENT_KEY; 1747 again: 1748 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1749 if (ret > 0) { 1750 ret = btrfs_previous_item(root, path, key.objectid, 1751 BTRFS_DEV_EXTENT_KEY); 1752 if (ret) 1753 goto out; 1754 leaf = path->nodes[0]; 1755 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1756 extent = btrfs_item_ptr(leaf, path->slots[0], 1757 struct btrfs_dev_extent); 1758 BUG_ON(found_key.offset > start || found_key.offset + 1759 btrfs_dev_extent_length(leaf, extent) < start); 1760 key = found_key; 1761 btrfs_release_path(path); 1762 goto again; 1763 } else if (ret == 0) { 1764 leaf = path->nodes[0]; 1765 extent = btrfs_item_ptr(leaf, path->slots[0], 1766 struct btrfs_dev_extent); 1767 } else { 1768 goto out; 1769 } 1770 1771 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1772 1773 ret = btrfs_del_item(trans, root, path); 1774 if (ret == 0) 1775 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1776 out: 1777 btrfs_free_path(path); 1778 return ret; 1779 } 1780 1781 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1782 { 1783 struct extent_map_tree *em_tree; 1784 struct extent_map *em; 1785 struct rb_node *n; 1786 u64 ret = 0; 1787 1788 em_tree = &fs_info->mapping_tree; 1789 read_lock(&em_tree->lock); 1790 n = rb_last(&em_tree->map.rb_root); 1791 if (n) { 1792 em = rb_entry(n, struct extent_map, rb_node); 1793 ret = em->start + em->len; 1794 } 1795 read_unlock(&em_tree->lock); 1796 1797 return ret; 1798 } 1799 1800 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1801 u64 *devid_ret) 1802 { 1803 int ret; 1804 struct btrfs_key key; 1805 struct btrfs_key found_key; 1806 struct btrfs_path *path; 1807 1808 path = btrfs_alloc_path(); 1809 if (!path) 1810 return -ENOMEM; 1811 1812 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1813 key.type = BTRFS_DEV_ITEM_KEY; 1814 key.offset = (u64)-1; 1815 1816 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1817 if (ret < 0) 1818 goto error; 1819 1820 if (ret == 0) { 1821 /* Corruption */ 1822 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); 1823 ret = -EUCLEAN; 1824 goto error; 1825 } 1826 1827 ret = btrfs_previous_item(fs_info->chunk_root, path, 1828 BTRFS_DEV_ITEMS_OBJECTID, 1829 BTRFS_DEV_ITEM_KEY); 1830 if (ret) { 1831 *devid_ret = 1; 1832 } else { 1833 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1834 path->slots[0]); 1835 *devid_ret = found_key.offset + 1; 1836 } 1837 ret = 0; 1838 error: 1839 btrfs_free_path(path); 1840 return ret; 1841 } 1842 1843 /* 1844 * the device information is stored in the chunk root 1845 * the btrfs_device struct should be fully filled in 1846 */ 1847 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1848 struct btrfs_device *device) 1849 { 1850 int ret; 1851 struct btrfs_path *path; 1852 struct btrfs_dev_item *dev_item; 1853 struct extent_buffer *leaf; 1854 struct btrfs_key key; 1855 unsigned long ptr; 1856 1857 path = btrfs_alloc_path(); 1858 if (!path) 1859 return -ENOMEM; 1860 1861 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1862 key.type = BTRFS_DEV_ITEM_KEY; 1863 key.offset = device->devid; 1864 1865 btrfs_reserve_chunk_metadata(trans, true); 1866 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, 1867 &key, sizeof(*dev_item)); 1868 btrfs_trans_release_chunk_metadata(trans); 1869 if (ret) 1870 goto out; 1871 1872 leaf = path->nodes[0]; 1873 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1874 1875 btrfs_set_device_id(leaf, dev_item, device->devid); 1876 btrfs_set_device_generation(leaf, dev_item, 0); 1877 btrfs_set_device_type(leaf, dev_item, device->type); 1878 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1879 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1880 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1881 btrfs_set_device_total_bytes(leaf, dev_item, 1882 btrfs_device_get_disk_total_bytes(device)); 1883 btrfs_set_device_bytes_used(leaf, dev_item, 1884 btrfs_device_get_bytes_used(device)); 1885 btrfs_set_device_group(leaf, dev_item, 0); 1886 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1887 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1888 btrfs_set_device_start_offset(leaf, dev_item, 0); 1889 1890 ptr = btrfs_device_uuid(dev_item); 1891 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1892 ptr = btrfs_device_fsid(dev_item); 1893 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, 1894 ptr, BTRFS_FSID_SIZE); 1895 btrfs_mark_buffer_dirty(leaf); 1896 1897 ret = 0; 1898 out: 1899 btrfs_free_path(path); 1900 return ret; 1901 } 1902 1903 /* 1904 * Function to update ctime/mtime for a given device path. 1905 * Mainly used for ctime/mtime based probe like libblkid. 1906 * 1907 * We don't care about errors here, this is just to be kind to userspace. 1908 */ 1909 static void update_dev_time(const char *device_path) 1910 { 1911 struct path path; 1912 struct timespec64 now; 1913 int ret; 1914 1915 ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 1916 if (ret) 1917 return; 1918 1919 now = current_time(d_inode(path.dentry)); 1920 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME); 1921 path_put(&path); 1922 } 1923 1924 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans, 1925 struct btrfs_device *device) 1926 { 1927 struct btrfs_root *root = device->fs_info->chunk_root; 1928 int ret; 1929 struct btrfs_path *path; 1930 struct btrfs_key key; 1931 1932 path = btrfs_alloc_path(); 1933 if (!path) 1934 return -ENOMEM; 1935 1936 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1937 key.type = BTRFS_DEV_ITEM_KEY; 1938 key.offset = device->devid; 1939 1940 btrfs_reserve_chunk_metadata(trans, false); 1941 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1942 btrfs_trans_release_chunk_metadata(trans); 1943 if (ret) { 1944 if (ret > 0) 1945 ret = -ENOENT; 1946 goto out; 1947 } 1948 1949 ret = btrfs_del_item(trans, root, path); 1950 out: 1951 btrfs_free_path(path); 1952 return ret; 1953 } 1954 1955 /* 1956 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1957 * filesystem. It's up to the caller to adjust that number regarding eg. device 1958 * replace. 1959 */ 1960 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1961 u64 num_devices) 1962 { 1963 u64 all_avail; 1964 unsigned seq; 1965 int i; 1966 1967 do { 1968 seq = read_seqbegin(&fs_info->profiles_lock); 1969 1970 all_avail = fs_info->avail_data_alloc_bits | 1971 fs_info->avail_system_alloc_bits | 1972 fs_info->avail_metadata_alloc_bits; 1973 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1974 1975 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1976 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1977 continue; 1978 1979 if (num_devices < btrfs_raid_array[i].devs_min) 1980 return btrfs_raid_array[i].mindev_error; 1981 } 1982 1983 return 0; 1984 } 1985 1986 static struct btrfs_device * btrfs_find_next_active_device( 1987 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1988 { 1989 struct btrfs_device *next_device; 1990 1991 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1992 if (next_device != device && 1993 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1994 && next_device->bdev) 1995 return next_device; 1996 } 1997 1998 return NULL; 1999 } 2000 2001 /* 2002 * Helper function to check if the given device is part of s_bdev / latest_dev 2003 * and replace it with the provided or the next active device, in the context 2004 * where this function called, there should be always be another device (or 2005 * this_dev) which is active. 2006 */ 2007 void __cold btrfs_assign_next_active_device(struct btrfs_device *device, 2008 struct btrfs_device *next_device) 2009 { 2010 struct btrfs_fs_info *fs_info = device->fs_info; 2011 2012 if (!next_device) 2013 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 2014 device); 2015 ASSERT(next_device); 2016 2017 if (fs_info->sb->s_bdev && 2018 (fs_info->sb->s_bdev == device->bdev)) 2019 fs_info->sb->s_bdev = next_device->bdev; 2020 2021 if (fs_info->fs_devices->latest_dev->bdev == device->bdev) 2022 fs_info->fs_devices->latest_dev = next_device; 2023 } 2024 2025 /* 2026 * Return btrfs_fs_devices::num_devices excluding the device that's being 2027 * currently replaced. 2028 */ 2029 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) 2030 { 2031 u64 num_devices = fs_info->fs_devices->num_devices; 2032 2033 down_read(&fs_info->dev_replace.rwsem); 2034 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 2035 ASSERT(num_devices > 1); 2036 num_devices--; 2037 } 2038 up_read(&fs_info->dev_replace.rwsem); 2039 2040 return num_devices; 2041 } 2042 2043 static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info, 2044 struct block_device *bdev, int copy_num) 2045 { 2046 struct btrfs_super_block *disk_super; 2047 const size_t len = sizeof(disk_super->magic); 2048 const u64 bytenr = btrfs_sb_offset(copy_num); 2049 int ret; 2050 2051 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr); 2052 if (IS_ERR(disk_super)) 2053 return; 2054 2055 memset(&disk_super->magic, 0, len); 2056 folio_mark_dirty(virt_to_folio(disk_super)); 2057 btrfs_release_disk_super(disk_super); 2058 2059 ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1); 2060 if (ret) 2061 btrfs_warn(fs_info, "error clearing superblock number %d (%d)", 2062 copy_num, ret); 2063 } 2064 2065 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2066 struct block_device *bdev, 2067 const char *device_path) 2068 { 2069 int copy_num; 2070 2071 if (!bdev) 2072 return; 2073 2074 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2075 if (bdev_is_zoned(bdev)) 2076 btrfs_reset_sb_log_zones(bdev, copy_num); 2077 else 2078 btrfs_scratch_superblock(fs_info, bdev, copy_num); 2079 } 2080 2081 /* Notify udev that device has changed */ 2082 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 2083 2084 /* Update ctime/mtime for device path for libblkid */ 2085 update_dev_time(device_path); 2086 } 2087 2088 int btrfs_rm_device(struct btrfs_fs_info *fs_info, 2089 struct btrfs_dev_lookup_args *args, 2090 struct block_device **bdev, fmode_t *mode) 2091 { 2092 struct btrfs_trans_handle *trans; 2093 struct btrfs_device *device; 2094 struct btrfs_fs_devices *cur_devices; 2095 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2096 u64 num_devices; 2097 int ret = 0; 2098 2099 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2100 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); 2101 return -EINVAL; 2102 } 2103 2104 /* 2105 * The device list in fs_devices is accessed without locks (neither 2106 * uuid_mutex nor device_list_mutex) as it won't change on a mounted 2107 * filesystem and another device rm cannot run. 2108 */ 2109 num_devices = btrfs_num_devices(fs_info); 2110 2111 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 2112 if (ret) 2113 return ret; 2114 2115 device = btrfs_find_device(fs_info->fs_devices, args); 2116 if (!device) { 2117 if (args->missing) 2118 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2119 else 2120 ret = -ENOENT; 2121 return ret; 2122 } 2123 2124 if (btrfs_pinned_by_swapfile(fs_info, device)) { 2125 btrfs_warn_in_rcu(fs_info, 2126 "cannot remove device %s (devid %llu) due to active swapfile", 2127 btrfs_dev_name(device), device->devid); 2128 return -ETXTBSY; 2129 } 2130 2131 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 2132 return BTRFS_ERROR_DEV_TGT_REPLACE; 2133 2134 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 2135 fs_info->fs_devices->rw_devices == 1) 2136 return BTRFS_ERROR_DEV_ONLY_WRITABLE; 2137 2138 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2139 mutex_lock(&fs_info->chunk_mutex); 2140 list_del_init(&device->dev_alloc_list); 2141 device->fs_devices->rw_devices--; 2142 mutex_unlock(&fs_info->chunk_mutex); 2143 } 2144 2145 ret = btrfs_shrink_device(device, 0); 2146 if (ret) 2147 goto error_undo; 2148 2149 trans = btrfs_start_transaction(fs_info->chunk_root, 0); 2150 if (IS_ERR(trans)) { 2151 ret = PTR_ERR(trans); 2152 goto error_undo; 2153 } 2154 2155 ret = btrfs_rm_dev_item(trans, device); 2156 if (ret) { 2157 /* Any error in dev item removal is critical */ 2158 btrfs_crit(fs_info, 2159 "failed to remove device item for devid %llu: %d", 2160 device->devid, ret); 2161 btrfs_abort_transaction(trans, ret); 2162 btrfs_end_transaction(trans); 2163 return ret; 2164 } 2165 2166 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2167 btrfs_scrub_cancel_dev(device); 2168 2169 /* 2170 * the device list mutex makes sure that we don't change 2171 * the device list while someone else is writing out all 2172 * the device supers. Whoever is writing all supers, should 2173 * lock the device list mutex before getting the number of 2174 * devices in the super block (super_copy). Conversely, 2175 * whoever updates the number of devices in the super block 2176 * (super_copy) should hold the device list mutex. 2177 */ 2178 2179 /* 2180 * In normal cases the cur_devices == fs_devices. But in case 2181 * of deleting a seed device, the cur_devices should point to 2182 * its own fs_devices listed under the fs_devices->seed_list. 2183 */ 2184 cur_devices = device->fs_devices; 2185 mutex_lock(&fs_devices->device_list_mutex); 2186 list_del_rcu(&device->dev_list); 2187 2188 cur_devices->num_devices--; 2189 cur_devices->total_devices--; 2190 /* Update total_devices of the parent fs_devices if it's seed */ 2191 if (cur_devices != fs_devices) 2192 fs_devices->total_devices--; 2193 2194 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2195 cur_devices->missing_devices--; 2196 2197 btrfs_assign_next_active_device(device, NULL); 2198 2199 if (device->bdev) { 2200 cur_devices->open_devices--; 2201 /* remove sysfs entry */ 2202 btrfs_sysfs_remove_device(device); 2203 } 2204 2205 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2206 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2207 mutex_unlock(&fs_devices->device_list_mutex); 2208 2209 /* 2210 * At this point, the device is zero sized and detached from the 2211 * devices list. All that's left is to zero out the old supers and 2212 * free the device. 2213 * 2214 * We cannot call btrfs_close_bdev() here because we're holding the sb 2215 * write lock, and blkdev_put() will pull in the ->open_mutex on the 2216 * block device and it's dependencies. Instead just flush the device 2217 * and let the caller do the final blkdev_put. 2218 */ 2219 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2220 btrfs_scratch_superblocks(fs_info, device->bdev, 2221 device->name->str); 2222 if (device->bdev) { 2223 sync_blockdev(device->bdev); 2224 invalidate_bdev(device->bdev); 2225 } 2226 } 2227 2228 *bdev = device->bdev; 2229 *mode = device->mode; 2230 synchronize_rcu(); 2231 btrfs_free_device(device); 2232 2233 /* 2234 * This can happen if cur_devices is the private seed devices list. We 2235 * cannot call close_fs_devices() here because it expects the uuid_mutex 2236 * to be held, but in fact we don't need that for the private 2237 * seed_devices, we can simply decrement cur_devices->opened and then 2238 * remove it from our list and free the fs_devices. 2239 */ 2240 if (cur_devices->num_devices == 0) { 2241 list_del_init(&cur_devices->seed_list); 2242 ASSERT(cur_devices->opened == 1); 2243 cur_devices->opened--; 2244 free_fs_devices(cur_devices); 2245 } 2246 2247 ret = btrfs_commit_transaction(trans); 2248 2249 return ret; 2250 2251 error_undo: 2252 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2253 mutex_lock(&fs_info->chunk_mutex); 2254 list_add(&device->dev_alloc_list, 2255 &fs_devices->alloc_list); 2256 device->fs_devices->rw_devices++; 2257 mutex_unlock(&fs_info->chunk_mutex); 2258 } 2259 return ret; 2260 } 2261 2262 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) 2263 { 2264 struct btrfs_fs_devices *fs_devices; 2265 2266 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); 2267 2268 /* 2269 * in case of fs with no seed, srcdev->fs_devices will point 2270 * to fs_devices of fs_info. However when the dev being replaced is 2271 * a seed dev it will point to the seed's local fs_devices. In short 2272 * srcdev will have its correct fs_devices in both the cases. 2273 */ 2274 fs_devices = srcdev->fs_devices; 2275 2276 list_del_rcu(&srcdev->dev_list); 2277 list_del(&srcdev->dev_alloc_list); 2278 fs_devices->num_devices--; 2279 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2280 fs_devices->missing_devices--; 2281 2282 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2283 fs_devices->rw_devices--; 2284 2285 if (srcdev->bdev) 2286 fs_devices->open_devices--; 2287 } 2288 2289 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) 2290 { 2291 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2292 2293 mutex_lock(&uuid_mutex); 2294 2295 btrfs_close_bdev(srcdev); 2296 synchronize_rcu(); 2297 btrfs_free_device(srcdev); 2298 2299 /* if this is no devs we rather delete the fs_devices */ 2300 if (!fs_devices->num_devices) { 2301 /* 2302 * On a mounted FS, num_devices can't be zero unless it's a 2303 * seed. In case of a seed device being replaced, the replace 2304 * target added to the sprout FS, so there will be no more 2305 * device left under the seed FS. 2306 */ 2307 ASSERT(fs_devices->seeding); 2308 2309 list_del_init(&fs_devices->seed_list); 2310 close_fs_devices(fs_devices); 2311 free_fs_devices(fs_devices); 2312 } 2313 mutex_unlock(&uuid_mutex); 2314 } 2315 2316 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) 2317 { 2318 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; 2319 2320 mutex_lock(&fs_devices->device_list_mutex); 2321 2322 btrfs_sysfs_remove_device(tgtdev); 2323 2324 if (tgtdev->bdev) 2325 fs_devices->open_devices--; 2326 2327 fs_devices->num_devices--; 2328 2329 btrfs_assign_next_active_device(tgtdev, NULL); 2330 2331 list_del_rcu(&tgtdev->dev_list); 2332 2333 mutex_unlock(&fs_devices->device_list_mutex); 2334 2335 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev, 2336 tgtdev->name->str); 2337 2338 btrfs_close_bdev(tgtdev); 2339 synchronize_rcu(); 2340 btrfs_free_device(tgtdev); 2341 } 2342 2343 /* 2344 * Populate args from device at path. 2345 * 2346 * @fs_info: the filesystem 2347 * @args: the args to populate 2348 * @path: the path to the device 2349 * 2350 * This will read the super block of the device at @path and populate @args with 2351 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to 2352 * lookup a device to operate on, but need to do it before we take any locks. 2353 * This properly handles the special case of "missing" that a user may pass in, 2354 * and does some basic sanity checks. The caller must make sure that @path is 2355 * properly NUL terminated before calling in, and must call 2356 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and 2357 * uuid buffers. 2358 * 2359 * Return: 0 for success, -errno for failure 2360 */ 2361 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 2362 struct btrfs_dev_lookup_args *args, 2363 const char *path) 2364 { 2365 struct btrfs_super_block *disk_super; 2366 struct block_device *bdev; 2367 int ret; 2368 2369 if (!path || !path[0]) 2370 return -EINVAL; 2371 if (!strcmp(path, "missing")) { 2372 args->missing = true; 2373 return 0; 2374 } 2375 2376 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL); 2377 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL); 2378 if (!args->uuid || !args->fsid) { 2379 btrfs_put_dev_args_from_path(args); 2380 return -ENOMEM; 2381 } 2382 2383 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0, 2384 &bdev, &disk_super); 2385 if (ret) { 2386 btrfs_put_dev_args_from_path(args); 2387 return ret; 2388 } 2389 2390 args->devid = btrfs_stack_device_id(&disk_super->dev_item); 2391 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); 2392 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 2393 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); 2394 else 2395 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 2396 btrfs_release_disk_super(disk_super); 2397 blkdev_put(bdev, FMODE_READ); 2398 return 0; 2399 } 2400 2401 /* 2402 * Only use this jointly with btrfs_get_dev_args_from_path() because we will 2403 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables 2404 * that don't need to be freed. 2405 */ 2406 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args) 2407 { 2408 kfree(args->uuid); 2409 kfree(args->fsid); 2410 args->uuid = NULL; 2411 args->fsid = NULL; 2412 } 2413 2414 struct btrfs_device *btrfs_find_device_by_devspec( 2415 struct btrfs_fs_info *fs_info, u64 devid, 2416 const char *device_path) 2417 { 2418 BTRFS_DEV_LOOKUP_ARGS(args); 2419 struct btrfs_device *device; 2420 int ret; 2421 2422 if (devid) { 2423 args.devid = devid; 2424 device = btrfs_find_device(fs_info->fs_devices, &args); 2425 if (!device) 2426 return ERR_PTR(-ENOENT); 2427 return device; 2428 } 2429 2430 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path); 2431 if (ret) 2432 return ERR_PTR(ret); 2433 device = btrfs_find_device(fs_info->fs_devices, &args); 2434 btrfs_put_dev_args_from_path(&args); 2435 if (!device) 2436 return ERR_PTR(-ENOENT); 2437 return device; 2438 } 2439 2440 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info) 2441 { 2442 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2443 struct btrfs_fs_devices *old_devices; 2444 struct btrfs_fs_devices *seed_devices; 2445 2446 lockdep_assert_held(&uuid_mutex); 2447 if (!fs_devices->seeding) 2448 return ERR_PTR(-EINVAL); 2449 2450 /* 2451 * Private copy of the seed devices, anchored at 2452 * fs_info->fs_devices->seed_list 2453 */ 2454 seed_devices = alloc_fs_devices(NULL, NULL); 2455 if (IS_ERR(seed_devices)) 2456 return seed_devices; 2457 2458 /* 2459 * It's necessary to retain a copy of the original seed fs_devices in 2460 * fs_uuids so that filesystems which have been seeded can successfully 2461 * reference the seed device from open_seed_devices. This also supports 2462 * multiple fs seed. 2463 */ 2464 old_devices = clone_fs_devices(fs_devices); 2465 if (IS_ERR(old_devices)) { 2466 kfree(seed_devices); 2467 return old_devices; 2468 } 2469 2470 list_add(&old_devices->fs_list, &fs_uuids); 2471 2472 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2473 seed_devices->opened = 1; 2474 INIT_LIST_HEAD(&seed_devices->devices); 2475 INIT_LIST_HEAD(&seed_devices->alloc_list); 2476 mutex_init(&seed_devices->device_list_mutex); 2477 2478 return seed_devices; 2479 } 2480 2481 /* 2482 * Splice seed devices into the sprout fs_devices. 2483 * Generate a new fsid for the sprouted read-write filesystem. 2484 */ 2485 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info, 2486 struct btrfs_fs_devices *seed_devices) 2487 { 2488 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2489 struct btrfs_super_block *disk_super = fs_info->super_copy; 2490 struct btrfs_device *device; 2491 u64 super_flags; 2492 2493 /* 2494 * We are updating the fsid, the thread leading to device_list_add() 2495 * could race, so uuid_mutex is needed. 2496 */ 2497 lockdep_assert_held(&uuid_mutex); 2498 2499 /* 2500 * The threads listed below may traverse dev_list but can do that without 2501 * device_list_mutex: 2502 * - All device ops and balance - as we are in btrfs_exclop_start. 2503 * - Various dev_list readers - are using RCU. 2504 * - btrfs_ioctl_fitrim() - is using RCU. 2505 * 2506 * For-read threads as below are using device_list_mutex: 2507 * - Readonly scrub btrfs_scrub_dev() 2508 * - Readonly scrub btrfs_scrub_progress() 2509 * - btrfs_get_dev_stats() 2510 */ 2511 lockdep_assert_held(&fs_devices->device_list_mutex); 2512 2513 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2514 synchronize_rcu); 2515 list_for_each_entry(device, &seed_devices->devices, dev_list) 2516 device->fs_devices = seed_devices; 2517 2518 fs_devices->seeding = false; 2519 fs_devices->num_devices = 0; 2520 fs_devices->open_devices = 0; 2521 fs_devices->missing_devices = 0; 2522 fs_devices->rotating = false; 2523 list_add(&seed_devices->seed_list, &fs_devices->seed_list); 2524 2525 generate_random_uuid(fs_devices->fsid); 2526 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); 2527 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2528 2529 super_flags = btrfs_super_flags(disk_super) & 2530 ~BTRFS_SUPER_FLAG_SEEDING; 2531 btrfs_set_super_flags(disk_super, super_flags); 2532 } 2533 2534 /* 2535 * Store the expected generation for seed devices in device items. 2536 */ 2537 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans) 2538 { 2539 BTRFS_DEV_LOOKUP_ARGS(args); 2540 struct btrfs_fs_info *fs_info = trans->fs_info; 2541 struct btrfs_root *root = fs_info->chunk_root; 2542 struct btrfs_path *path; 2543 struct extent_buffer *leaf; 2544 struct btrfs_dev_item *dev_item; 2545 struct btrfs_device *device; 2546 struct btrfs_key key; 2547 u8 fs_uuid[BTRFS_FSID_SIZE]; 2548 u8 dev_uuid[BTRFS_UUID_SIZE]; 2549 int ret; 2550 2551 path = btrfs_alloc_path(); 2552 if (!path) 2553 return -ENOMEM; 2554 2555 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2556 key.offset = 0; 2557 key.type = BTRFS_DEV_ITEM_KEY; 2558 2559 while (1) { 2560 btrfs_reserve_chunk_metadata(trans, false); 2561 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2562 btrfs_trans_release_chunk_metadata(trans); 2563 if (ret < 0) 2564 goto error; 2565 2566 leaf = path->nodes[0]; 2567 next_slot: 2568 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2569 ret = btrfs_next_leaf(root, path); 2570 if (ret > 0) 2571 break; 2572 if (ret < 0) 2573 goto error; 2574 leaf = path->nodes[0]; 2575 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2576 btrfs_release_path(path); 2577 continue; 2578 } 2579 2580 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2581 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2582 key.type != BTRFS_DEV_ITEM_KEY) 2583 break; 2584 2585 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2586 struct btrfs_dev_item); 2587 args.devid = btrfs_device_id(leaf, dev_item); 2588 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2589 BTRFS_UUID_SIZE); 2590 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2591 BTRFS_FSID_SIZE); 2592 args.uuid = dev_uuid; 2593 args.fsid = fs_uuid; 2594 device = btrfs_find_device(fs_info->fs_devices, &args); 2595 BUG_ON(!device); /* Logic error */ 2596 2597 if (device->fs_devices->seeding) { 2598 btrfs_set_device_generation(leaf, dev_item, 2599 device->generation); 2600 btrfs_mark_buffer_dirty(leaf); 2601 } 2602 2603 path->slots[0]++; 2604 goto next_slot; 2605 } 2606 ret = 0; 2607 error: 2608 btrfs_free_path(path); 2609 return ret; 2610 } 2611 2612 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2613 { 2614 struct btrfs_root *root = fs_info->dev_root; 2615 struct btrfs_trans_handle *trans; 2616 struct btrfs_device *device; 2617 struct block_device *bdev; 2618 struct super_block *sb = fs_info->sb; 2619 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2620 struct btrfs_fs_devices *seed_devices = NULL; 2621 u64 orig_super_total_bytes; 2622 u64 orig_super_num_devices; 2623 int ret = 0; 2624 bool seeding_dev = false; 2625 bool locked = false; 2626 2627 if (sb_rdonly(sb) && !fs_devices->seeding) 2628 return -EROFS; 2629 2630 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2631 fs_info->bdev_holder); 2632 if (IS_ERR(bdev)) 2633 return PTR_ERR(bdev); 2634 2635 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 2636 ret = -EINVAL; 2637 goto error; 2638 } 2639 2640 if (fs_devices->seeding) { 2641 seeding_dev = true; 2642 down_write(&sb->s_umount); 2643 mutex_lock(&uuid_mutex); 2644 locked = true; 2645 } 2646 2647 sync_blockdev(bdev); 2648 2649 rcu_read_lock(); 2650 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 2651 if (device->bdev == bdev) { 2652 ret = -EEXIST; 2653 rcu_read_unlock(); 2654 goto error; 2655 } 2656 } 2657 rcu_read_unlock(); 2658 2659 device = btrfs_alloc_device(fs_info, NULL, NULL, device_path); 2660 if (IS_ERR(device)) { 2661 /* we can safely leave the fs_devices entry around */ 2662 ret = PTR_ERR(device); 2663 goto error; 2664 } 2665 2666 device->fs_info = fs_info; 2667 device->bdev = bdev; 2668 ret = lookup_bdev(device_path, &device->devt); 2669 if (ret) 2670 goto error_free_device; 2671 2672 ret = btrfs_get_dev_zone_info(device, false); 2673 if (ret) 2674 goto error_free_device; 2675 2676 trans = btrfs_start_transaction(root, 0); 2677 if (IS_ERR(trans)) { 2678 ret = PTR_ERR(trans); 2679 goto error_free_zone; 2680 } 2681 2682 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2683 device->generation = trans->transid; 2684 device->io_width = fs_info->sectorsize; 2685 device->io_align = fs_info->sectorsize; 2686 device->sector_size = fs_info->sectorsize; 2687 device->total_bytes = 2688 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize); 2689 device->disk_total_bytes = device->total_bytes; 2690 device->commit_total_bytes = device->total_bytes; 2691 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2692 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2693 device->mode = FMODE_EXCL; 2694 device->dev_stats_valid = 1; 2695 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2696 2697 if (seeding_dev) { 2698 btrfs_clear_sb_rdonly(sb); 2699 2700 /* GFP_KERNEL allocation must not be under device_list_mutex */ 2701 seed_devices = btrfs_init_sprout(fs_info); 2702 if (IS_ERR(seed_devices)) { 2703 ret = PTR_ERR(seed_devices); 2704 btrfs_abort_transaction(trans, ret); 2705 goto error_trans; 2706 } 2707 } 2708 2709 mutex_lock(&fs_devices->device_list_mutex); 2710 if (seeding_dev) { 2711 btrfs_setup_sprout(fs_info, seed_devices); 2712 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev, 2713 device); 2714 } 2715 2716 device->fs_devices = fs_devices; 2717 2718 mutex_lock(&fs_info->chunk_mutex); 2719 list_add_rcu(&device->dev_list, &fs_devices->devices); 2720 list_add(&device->dev_alloc_list, &fs_devices->alloc_list); 2721 fs_devices->num_devices++; 2722 fs_devices->open_devices++; 2723 fs_devices->rw_devices++; 2724 fs_devices->total_devices++; 2725 fs_devices->total_rw_bytes += device->total_bytes; 2726 2727 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2728 2729 if (!bdev_nonrot(bdev)) 2730 fs_devices->rotating = true; 2731 2732 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 2733 btrfs_set_super_total_bytes(fs_info->super_copy, 2734 round_down(orig_super_total_bytes + device->total_bytes, 2735 fs_info->sectorsize)); 2736 2737 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); 2738 btrfs_set_super_num_devices(fs_info->super_copy, 2739 orig_super_num_devices + 1); 2740 2741 /* 2742 * we've got more storage, clear any full flags on the space 2743 * infos 2744 */ 2745 btrfs_clear_space_info_full(fs_info); 2746 2747 mutex_unlock(&fs_info->chunk_mutex); 2748 2749 /* Add sysfs device entry */ 2750 btrfs_sysfs_add_device(device); 2751 2752 mutex_unlock(&fs_devices->device_list_mutex); 2753 2754 if (seeding_dev) { 2755 mutex_lock(&fs_info->chunk_mutex); 2756 ret = init_first_rw_device(trans); 2757 mutex_unlock(&fs_info->chunk_mutex); 2758 if (ret) { 2759 btrfs_abort_transaction(trans, ret); 2760 goto error_sysfs; 2761 } 2762 } 2763 2764 ret = btrfs_add_dev_item(trans, device); 2765 if (ret) { 2766 btrfs_abort_transaction(trans, ret); 2767 goto error_sysfs; 2768 } 2769 2770 if (seeding_dev) { 2771 ret = btrfs_finish_sprout(trans); 2772 if (ret) { 2773 btrfs_abort_transaction(trans, ret); 2774 goto error_sysfs; 2775 } 2776 2777 /* 2778 * fs_devices now represents the newly sprouted filesystem and 2779 * its fsid has been changed by btrfs_sprout_splice(). 2780 */ 2781 btrfs_sysfs_update_sprout_fsid(fs_devices); 2782 } 2783 2784 ret = btrfs_commit_transaction(trans); 2785 2786 if (seeding_dev) { 2787 mutex_unlock(&uuid_mutex); 2788 up_write(&sb->s_umount); 2789 locked = false; 2790 2791 if (ret) /* transaction commit */ 2792 return ret; 2793 2794 ret = btrfs_relocate_sys_chunks(fs_info); 2795 if (ret < 0) 2796 btrfs_handle_fs_error(fs_info, ret, 2797 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2798 trans = btrfs_attach_transaction(root); 2799 if (IS_ERR(trans)) { 2800 if (PTR_ERR(trans) == -ENOENT) 2801 return 0; 2802 ret = PTR_ERR(trans); 2803 trans = NULL; 2804 goto error_sysfs; 2805 } 2806 ret = btrfs_commit_transaction(trans); 2807 } 2808 2809 /* 2810 * Now that we have written a new super block to this device, check all 2811 * other fs_devices list if device_path alienates any other scanned 2812 * device. 2813 * We can ignore the return value as it typically returns -EINVAL and 2814 * only succeeds if the device was an alien. 2815 */ 2816 btrfs_forget_devices(device->devt); 2817 2818 /* Update ctime/mtime for blkid or udev */ 2819 update_dev_time(device_path); 2820 2821 return ret; 2822 2823 error_sysfs: 2824 btrfs_sysfs_remove_device(device); 2825 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2826 mutex_lock(&fs_info->chunk_mutex); 2827 list_del_rcu(&device->dev_list); 2828 list_del(&device->dev_alloc_list); 2829 fs_info->fs_devices->num_devices--; 2830 fs_info->fs_devices->open_devices--; 2831 fs_info->fs_devices->rw_devices--; 2832 fs_info->fs_devices->total_devices--; 2833 fs_info->fs_devices->total_rw_bytes -= device->total_bytes; 2834 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); 2835 btrfs_set_super_total_bytes(fs_info->super_copy, 2836 orig_super_total_bytes); 2837 btrfs_set_super_num_devices(fs_info->super_copy, 2838 orig_super_num_devices); 2839 mutex_unlock(&fs_info->chunk_mutex); 2840 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2841 error_trans: 2842 if (seeding_dev) 2843 btrfs_set_sb_rdonly(sb); 2844 if (trans) 2845 btrfs_end_transaction(trans); 2846 error_free_zone: 2847 btrfs_destroy_dev_zone_info(device); 2848 error_free_device: 2849 btrfs_free_device(device); 2850 error: 2851 blkdev_put(bdev, FMODE_EXCL); 2852 if (locked) { 2853 mutex_unlock(&uuid_mutex); 2854 up_write(&sb->s_umount); 2855 } 2856 return ret; 2857 } 2858 2859 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2860 struct btrfs_device *device) 2861 { 2862 int ret; 2863 struct btrfs_path *path; 2864 struct btrfs_root *root = device->fs_info->chunk_root; 2865 struct btrfs_dev_item *dev_item; 2866 struct extent_buffer *leaf; 2867 struct btrfs_key key; 2868 2869 path = btrfs_alloc_path(); 2870 if (!path) 2871 return -ENOMEM; 2872 2873 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2874 key.type = BTRFS_DEV_ITEM_KEY; 2875 key.offset = device->devid; 2876 2877 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2878 if (ret < 0) 2879 goto out; 2880 2881 if (ret > 0) { 2882 ret = -ENOENT; 2883 goto out; 2884 } 2885 2886 leaf = path->nodes[0]; 2887 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2888 2889 btrfs_set_device_id(leaf, dev_item, device->devid); 2890 btrfs_set_device_type(leaf, dev_item, device->type); 2891 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2892 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2893 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2894 btrfs_set_device_total_bytes(leaf, dev_item, 2895 btrfs_device_get_disk_total_bytes(device)); 2896 btrfs_set_device_bytes_used(leaf, dev_item, 2897 btrfs_device_get_bytes_used(device)); 2898 btrfs_mark_buffer_dirty(leaf); 2899 2900 out: 2901 btrfs_free_path(path); 2902 return ret; 2903 } 2904 2905 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2906 struct btrfs_device *device, u64 new_size) 2907 { 2908 struct btrfs_fs_info *fs_info = device->fs_info; 2909 struct btrfs_super_block *super_copy = fs_info->super_copy; 2910 u64 old_total; 2911 u64 diff; 2912 int ret; 2913 2914 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2915 return -EACCES; 2916 2917 new_size = round_down(new_size, fs_info->sectorsize); 2918 2919 mutex_lock(&fs_info->chunk_mutex); 2920 old_total = btrfs_super_total_bytes(super_copy); 2921 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2922 2923 if (new_size <= device->total_bytes || 2924 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2925 mutex_unlock(&fs_info->chunk_mutex); 2926 return -EINVAL; 2927 } 2928 2929 btrfs_set_super_total_bytes(super_copy, 2930 round_down(old_total + diff, fs_info->sectorsize)); 2931 device->fs_devices->total_rw_bytes += diff; 2932 2933 btrfs_device_set_total_bytes(device, new_size); 2934 btrfs_device_set_disk_total_bytes(device, new_size); 2935 btrfs_clear_space_info_full(device->fs_info); 2936 if (list_empty(&device->post_commit_list)) 2937 list_add_tail(&device->post_commit_list, 2938 &trans->transaction->dev_update_list); 2939 mutex_unlock(&fs_info->chunk_mutex); 2940 2941 btrfs_reserve_chunk_metadata(trans, false); 2942 ret = btrfs_update_device(trans, device); 2943 btrfs_trans_release_chunk_metadata(trans); 2944 2945 return ret; 2946 } 2947 2948 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 2949 { 2950 struct btrfs_fs_info *fs_info = trans->fs_info; 2951 struct btrfs_root *root = fs_info->chunk_root; 2952 int ret; 2953 struct btrfs_path *path; 2954 struct btrfs_key key; 2955 2956 path = btrfs_alloc_path(); 2957 if (!path) 2958 return -ENOMEM; 2959 2960 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2961 key.offset = chunk_offset; 2962 key.type = BTRFS_CHUNK_ITEM_KEY; 2963 2964 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2965 if (ret < 0) 2966 goto out; 2967 else if (ret > 0) { /* Logic error or corruption */ 2968 btrfs_handle_fs_error(fs_info, -ENOENT, 2969 "Failed lookup while freeing chunk."); 2970 ret = -ENOENT; 2971 goto out; 2972 } 2973 2974 ret = btrfs_del_item(trans, root, path); 2975 if (ret < 0) 2976 btrfs_handle_fs_error(fs_info, ret, 2977 "Failed to delete chunk item."); 2978 out: 2979 btrfs_free_path(path); 2980 return ret; 2981 } 2982 2983 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2984 { 2985 struct btrfs_super_block *super_copy = fs_info->super_copy; 2986 struct btrfs_disk_key *disk_key; 2987 struct btrfs_chunk *chunk; 2988 u8 *ptr; 2989 int ret = 0; 2990 u32 num_stripes; 2991 u32 array_size; 2992 u32 len = 0; 2993 u32 cur; 2994 struct btrfs_key key; 2995 2996 lockdep_assert_held(&fs_info->chunk_mutex); 2997 array_size = btrfs_super_sys_array_size(super_copy); 2998 2999 ptr = super_copy->sys_chunk_array; 3000 cur = 0; 3001 3002 while (cur < array_size) { 3003 disk_key = (struct btrfs_disk_key *)ptr; 3004 btrfs_disk_key_to_cpu(&key, disk_key); 3005 3006 len = sizeof(*disk_key); 3007 3008 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3009 chunk = (struct btrfs_chunk *)(ptr + len); 3010 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 3011 len += btrfs_chunk_item_size(num_stripes); 3012 } else { 3013 ret = -EIO; 3014 break; 3015 } 3016 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 3017 key.offset == chunk_offset) { 3018 memmove(ptr, ptr + len, array_size - (cur + len)); 3019 array_size -= len; 3020 btrfs_set_super_sys_array_size(super_copy, array_size); 3021 } else { 3022 ptr += len; 3023 cur += len; 3024 } 3025 } 3026 return ret; 3027 } 3028 3029 /* 3030 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. 3031 * @logical: Logical block offset in bytes. 3032 * @length: Length of extent in bytes. 3033 * 3034 * Return: Chunk mapping or ERR_PTR. 3035 */ 3036 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 3037 u64 logical, u64 length) 3038 { 3039 struct extent_map_tree *em_tree; 3040 struct extent_map *em; 3041 3042 em_tree = &fs_info->mapping_tree; 3043 read_lock(&em_tree->lock); 3044 em = lookup_extent_mapping(em_tree, logical, length); 3045 read_unlock(&em_tree->lock); 3046 3047 if (!em) { 3048 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3049 logical, length); 3050 return ERR_PTR(-EINVAL); 3051 } 3052 3053 if (em->start > logical || em->start + em->len < logical) { 3054 btrfs_crit(fs_info, 3055 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3056 logical, length, em->start, em->start + em->len); 3057 free_extent_map(em); 3058 return ERR_PTR(-EINVAL); 3059 } 3060 3061 /* callers are responsible for dropping em's ref. */ 3062 return em; 3063 } 3064 3065 static int remove_chunk_item(struct btrfs_trans_handle *trans, 3066 struct map_lookup *map, u64 chunk_offset) 3067 { 3068 int i; 3069 3070 /* 3071 * Removing chunk items and updating the device items in the chunks btree 3072 * requires holding the chunk_mutex. 3073 * See the comment at btrfs_chunk_alloc() for the details. 3074 */ 3075 lockdep_assert_held(&trans->fs_info->chunk_mutex); 3076 3077 for (i = 0; i < map->num_stripes; i++) { 3078 int ret; 3079 3080 ret = btrfs_update_device(trans, map->stripes[i].dev); 3081 if (ret) 3082 return ret; 3083 } 3084 3085 return btrfs_free_chunk(trans, chunk_offset); 3086 } 3087 3088 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) 3089 { 3090 struct btrfs_fs_info *fs_info = trans->fs_info; 3091 struct extent_map *em; 3092 struct map_lookup *map; 3093 u64 dev_extent_len = 0; 3094 int i, ret = 0; 3095 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 3096 3097 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 3098 if (IS_ERR(em)) { 3099 /* 3100 * This is a logic error, but we don't want to just rely on the 3101 * user having built with ASSERT enabled, so if ASSERT doesn't 3102 * do anything we still error out. 3103 */ 3104 ASSERT(0); 3105 return PTR_ERR(em); 3106 } 3107 map = em->map_lookup; 3108 3109 /* 3110 * First delete the device extent items from the devices btree. 3111 * We take the device_list_mutex to avoid racing with the finishing phase 3112 * of a device replace operation. See the comment below before acquiring 3113 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex 3114 * because that can result in a deadlock when deleting the device extent 3115 * items from the devices btree - COWing an extent buffer from the btree 3116 * may result in allocating a new metadata chunk, which would attempt to 3117 * lock again fs_info->chunk_mutex. 3118 */ 3119 mutex_lock(&fs_devices->device_list_mutex); 3120 for (i = 0; i < map->num_stripes; i++) { 3121 struct btrfs_device *device = map->stripes[i].dev; 3122 ret = btrfs_free_dev_extent(trans, device, 3123 map->stripes[i].physical, 3124 &dev_extent_len); 3125 if (ret) { 3126 mutex_unlock(&fs_devices->device_list_mutex); 3127 btrfs_abort_transaction(trans, ret); 3128 goto out; 3129 } 3130 3131 if (device->bytes_used > 0) { 3132 mutex_lock(&fs_info->chunk_mutex); 3133 btrfs_device_set_bytes_used(device, 3134 device->bytes_used - dev_extent_len); 3135 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 3136 btrfs_clear_space_info_full(fs_info); 3137 mutex_unlock(&fs_info->chunk_mutex); 3138 } 3139 } 3140 mutex_unlock(&fs_devices->device_list_mutex); 3141 3142 /* 3143 * We acquire fs_info->chunk_mutex for 2 reasons: 3144 * 3145 * 1) Just like with the first phase of the chunk allocation, we must 3146 * reserve system space, do all chunk btree updates and deletions, and 3147 * update the system chunk array in the superblock while holding this 3148 * mutex. This is for similar reasons as explained on the comment at 3149 * the top of btrfs_chunk_alloc(); 3150 * 3151 * 2) Prevent races with the final phase of a device replace operation 3152 * that replaces the device object associated with the map's stripes, 3153 * because the device object's id can change at any time during that 3154 * final phase of the device replace operation 3155 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 3156 * replaced device and then see it with an ID of 3157 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating 3158 * the device item, which does not exists on the chunk btree. 3159 * The finishing phase of device replace acquires both the 3160 * device_list_mutex and the chunk_mutex, in that order, so we are 3161 * safe by just acquiring the chunk_mutex. 3162 */ 3163 trans->removing_chunk = true; 3164 mutex_lock(&fs_info->chunk_mutex); 3165 3166 check_system_chunk(trans, map->type); 3167 3168 ret = remove_chunk_item(trans, map, chunk_offset); 3169 /* 3170 * Normally we should not get -ENOSPC since we reserved space before 3171 * through the call to check_system_chunk(). 3172 * 3173 * Despite our system space_info having enough free space, we may not 3174 * be able to allocate extents from its block groups, because all have 3175 * an incompatible profile, which will force us to allocate a new system 3176 * block group with the right profile, or right after we called 3177 * check_system_space() above, a scrub turned the only system block group 3178 * with enough free space into RO mode. 3179 * This is explained with more detail at do_chunk_alloc(). 3180 * 3181 * So if we get -ENOSPC, allocate a new system chunk and retry once. 3182 */ 3183 if (ret == -ENOSPC) { 3184 const u64 sys_flags = btrfs_system_alloc_profile(fs_info); 3185 struct btrfs_block_group *sys_bg; 3186 3187 sys_bg = btrfs_create_chunk(trans, sys_flags); 3188 if (IS_ERR(sys_bg)) { 3189 ret = PTR_ERR(sys_bg); 3190 btrfs_abort_transaction(trans, ret); 3191 goto out; 3192 } 3193 3194 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3195 if (ret) { 3196 btrfs_abort_transaction(trans, ret); 3197 goto out; 3198 } 3199 3200 ret = remove_chunk_item(trans, map, chunk_offset); 3201 if (ret) { 3202 btrfs_abort_transaction(trans, ret); 3203 goto out; 3204 } 3205 } else if (ret) { 3206 btrfs_abort_transaction(trans, ret); 3207 goto out; 3208 } 3209 3210 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 3211 3212 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3213 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 3214 if (ret) { 3215 btrfs_abort_transaction(trans, ret); 3216 goto out; 3217 } 3218 } 3219 3220 mutex_unlock(&fs_info->chunk_mutex); 3221 trans->removing_chunk = false; 3222 3223 /* 3224 * We are done with chunk btree updates and deletions, so release the 3225 * system space we previously reserved (with check_system_chunk()). 3226 */ 3227 btrfs_trans_release_chunk_metadata(trans); 3228 3229 ret = btrfs_remove_block_group(trans, chunk_offset, em); 3230 if (ret) { 3231 btrfs_abort_transaction(trans, ret); 3232 goto out; 3233 } 3234 3235 out: 3236 if (trans->removing_chunk) { 3237 mutex_unlock(&fs_info->chunk_mutex); 3238 trans->removing_chunk = false; 3239 } 3240 /* once for us */ 3241 free_extent_map(em); 3242 return ret; 3243 } 3244 3245 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 3246 { 3247 struct btrfs_root *root = fs_info->chunk_root; 3248 struct btrfs_trans_handle *trans; 3249 struct btrfs_block_group *block_group; 3250 u64 length; 3251 int ret; 3252 3253 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 3254 btrfs_err(fs_info, 3255 "relocate: not supported on extent tree v2 yet"); 3256 return -EINVAL; 3257 } 3258 3259 /* 3260 * Prevent races with automatic removal of unused block groups. 3261 * After we relocate and before we remove the chunk with offset 3262 * chunk_offset, automatic removal of the block group can kick in, 3263 * resulting in a failure when calling btrfs_remove_chunk() below. 3264 * 3265 * Make sure to acquire this mutex before doing a tree search (dev 3266 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 3267 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 3268 * we release the path used to search the chunk/dev tree and before 3269 * the current task acquires this mutex and calls us. 3270 */ 3271 lockdep_assert_held(&fs_info->reclaim_bgs_lock); 3272 3273 /* step one, relocate all the extents inside this chunk */ 3274 btrfs_scrub_pause(fs_info); 3275 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 3276 btrfs_scrub_continue(fs_info); 3277 if (ret) { 3278 /* 3279 * If we had a transaction abort, stop all running scrubs. 3280 * See transaction.c:cleanup_transaction() why we do it here. 3281 */ 3282 if (BTRFS_FS_ERROR(fs_info)) 3283 btrfs_scrub_cancel(fs_info); 3284 return ret; 3285 } 3286 3287 block_group = btrfs_lookup_block_group(fs_info, chunk_offset); 3288 if (!block_group) 3289 return -ENOENT; 3290 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 3291 length = block_group->length; 3292 btrfs_put_block_group(block_group); 3293 3294 /* 3295 * On a zoned file system, discard the whole block group, this will 3296 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If 3297 * resetting the zone fails, don't treat it as a fatal problem from the 3298 * filesystem's point of view. 3299 */ 3300 if (btrfs_is_zoned(fs_info)) { 3301 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL); 3302 if (ret) 3303 btrfs_info(fs_info, 3304 "failed to reset zone %llu after relocation", 3305 chunk_offset); 3306 } 3307 3308 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3309 chunk_offset); 3310 if (IS_ERR(trans)) { 3311 ret = PTR_ERR(trans); 3312 btrfs_handle_fs_error(root->fs_info, ret, NULL); 3313 return ret; 3314 } 3315 3316 /* 3317 * step two, delete the device extents and the 3318 * chunk tree entries 3319 */ 3320 ret = btrfs_remove_chunk(trans, chunk_offset); 3321 btrfs_end_transaction(trans); 3322 return ret; 3323 } 3324 3325 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 3326 { 3327 struct btrfs_root *chunk_root = fs_info->chunk_root; 3328 struct btrfs_path *path; 3329 struct extent_buffer *leaf; 3330 struct btrfs_chunk *chunk; 3331 struct btrfs_key key; 3332 struct btrfs_key found_key; 3333 u64 chunk_type; 3334 bool retried = false; 3335 int failed = 0; 3336 int ret; 3337 3338 path = btrfs_alloc_path(); 3339 if (!path) 3340 return -ENOMEM; 3341 3342 again: 3343 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3344 key.offset = (u64)-1; 3345 key.type = BTRFS_CHUNK_ITEM_KEY; 3346 3347 while (1) { 3348 mutex_lock(&fs_info->reclaim_bgs_lock); 3349 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3350 if (ret < 0) { 3351 mutex_unlock(&fs_info->reclaim_bgs_lock); 3352 goto error; 3353 } 3354 BUG_ON(ret == 0); /* Corruption */ 3355 3356 ret = btrfs_previous_item(chunk_root, path, key.objectid, 3357 key.type); 3358 if (ret) 3359 mutex_unlock(&fs_info->reclaim_bgs_lock); 3360 if (ret < 0) 3361 goto error; 3362 if (ret > 0) 3363 break; 3364 3365 leaf = path->nodes[0]; 3366 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3367 3368 chunk = btrfs_item_ptr(leaf, path->slots[0], 3369 struct btrfs_chunk); 3370 chunk_type = btrfs_chunk_type(leaf, chunk); 3371 btrfs_release_path(path); 3372 3373 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3374 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3375 if (ret == -ENOSPC) 3376 failed++; 3377 else 3378 BUG_ON(ret); 3379 } 3380 mutex_unlock(&fs_info->reclaim_bgs_lock); 3381 3382 if (found_key.offset == 0) 3383 break; 3384 key.offset = found_key.offset - 1; 3385 } 3386 ret = 0; 3387 if (failed && !retried) { 3388 failed = 0; 3389 retried = true; 3390 goto again; 3391 } else if (WARN_ON(failed && retried)) { 3392 ret = -ENOSPC; 3393 } 3394 error: 3395 btrfs_free_path(path); 3396 return ret; 3397 } 3398 3399 /* 3400 * return 1 : allocate a data chunk successfully, 3401 * return <0: errors during allocating a data chunk, 3402 * return 0 : no need to allocate a data chunk. 3403 */ 3404 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3405 u64 chunk_offset) 3406 { 3407 struct btrfs_block_group *cache; 3408 u64 bytes_used; 3409 u64 chunk_type; 3410 3411 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3412 ASSERT(cache); 3413 chunk_type = cache->flags; 3414 btrfs_put_block_group(cache); 3415 3416 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) 3417 return 0; 3418 3419 spin_lock(&fs_info->data_sinfo->lock); 3420 bytes_used = fs_info->data_sinfo->bytes_used; 3421 spin_unlock(&fs_info->data_sinfo->lock); 3422 3423 if (!bytes_used) { 3424 struct btrfs_trans_handle *trans; 3425 int ret; 3426 3427 trans = btrfs_join_transaction(fs_info->tree_root); 3428 if (IS_ERR(trans)) 3429 return PTR_ERR(trans); 3430 3431 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); 3432 btrfs_end_transaction(trans); 3433 if (ret < 0) 3434 return ret; 3435 return 1; 3436 } 3437 3438 return 0; 3439 } 3440 3441 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3442 struct btrfs_balance_control *bctl) 3443 { 3444 struct btrfs_root *root = fs_info->tree_root; 3445 struct btrfs_trans_handle *trans; 3446 struct btrfs_balance_item *item; 3447 struct btrfs_disk_balance_args disk_bargs; 3448 struct btrfs_path *path; 3449 struct extent_buffer *leaf; 3450 struct btrfs_key key; 3451 int ret, err; 3452 3453 path = btrfs_alloc_path(); 3454 if (!path) 3455 return -ENOMEM; 3456 3457 trans = btrfs_start_transaction(root, 0); 3458 if (IS_ERR(trans)) { 3459 btrfs_free_path(path); 3460 return PTR_ERR(trans); 3461 } 3462 3463 key.objectid = BTRFS_BALANCE_OBJECTID; 3464 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3465 key.offset = 0; 3466 3467 ret = btrfs_insert_empty_item(trans, root, path, &key, 3468 sizeof(*item)); 3469 if (ret) 3470 goto out; 3471 3472 leaf = path->nodes[0]; 3473 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3474 3475 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3476 3477 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3478 btrfs_set_balance_data(leaf, item, &disk_bargs); 3479 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3480 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3481 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3482 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3483 3484 btrfs_set_balance_flags(leaf, item, bctl->flags); 3485 3486 btrfs_mark_buffer_dirty(leaf); 3487 out: 3488 btrfs_free_path(path); 3489 err = btrfs_commit_transaction(trans); 3490 if (err && !ret) 3491 ret = err; 3492 return ret; 3493 } 3494 3495 static int del_balance_item(struct btrfs_fs_info *fs_info) 3496 { 3497 struct btrfs_root *root = fs_info->tree_root; 3498 struct btrfs_trans_handle *trans; 3499 struct btrfs_path *path; 3500 struct btrfs_key key; 3501 int ret, err; 3502 3503 path = btrfs_alloc_path(); 3504 if (!path) 3505 return -ENOMEM; 3506 3507 trans = btrfs_start_transaction_fallback_global_rsv(root, 0); 3508 if (IS_ERR(trans)) { 3509 btrfs_free_path(path); 3510 return PTR_ERR(trans); 3511 } 3512 3513 key.objectid = BTRFS_BALANCE_OBJECTID; 3514 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3515 key.offset = 0; 3516 3517 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3518 if (ret < 0) 3519 goto out; 3520 if (ret > 0) { 3521 ret = -ENOENT; 3522 goto out; 3523 } 3524 3525 ret = btrfs_del_item(trans, root, path); 3526 out: 3527 btrfs_free_path(path); 3528 err = btrfs_commit_transaction(trans); 3529 if (err && !ret) 3530 ret = err; 3531 return ret; 3532 } 3533 3534 /* 3535 * This is a heuristic used to reduce the number of chunks balanced on 3536 * resume after balance was interrupted. 3537 */ 3538 static void update_balance_args(struct btrfs_balance_control *bctl) 3539 { 3540 /* 3541 * Turn on soft mode for chunk types that were being converted. 3542 */ 3543 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3544 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3545 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3546 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3547 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3548 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3549 3550 /* 3551 * Turn on usage filter if is not already used. The idea is 3552 * that chunks that we have already balanced should be 3553 * reasonably full. Don't do it for chunks that are being 3554 * converted - that will keep us from relocating unconverted 3555 * (albeit full) chunks. 3556 */ 3557 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3558 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3559 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3560 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3561 bctl->data.usage = 90; 3562 } 3563 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3564 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3565 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3566 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3567 bctl->sys.usage = 90; 3568 } 3569 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3570 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3571 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3572 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3573 bctl->meta.usage = 90; 3574 } 3575 } 3576 3577 /* 3578 * Clear the balance status in fs_info and delete the balance item from disk. 3579 */ 3580 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3581 { 3582 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3583 int ret; 3584 3585 BUG_ON(!fs_info->balance_ctl); 3586 3587 spin_lock(&fs_info->balance_lock); 3588 fs_info->balance_ctl = NULL; 3589 spin_unlock(&fs_info->balance_lock); 3590 3591 kfree(bctl); 3592 ret = del_balance_item(fs_info); 3593 if (ret) 3594 btrfs_handle_fs_error(fs_info, ret, NULL); 3595 } 3596 3597 /* 3598 * Balance filters. Return 1 if chunk should be filtered out 3599 * (should not be balanced). 3600 */ 3601 static int chunk_profiles_filter(u64 chunk_type, 3602 struct btrfs_balance_args *bargs) 3603 { 3604 chunk_type = chunk_to_extended(chunk_type) & 3605 BTRFS_EXTENDED_PROFILE_MASK; 3606 3607 if (bargs->profiles & chunk_type) 3608 return 0; 3609 3610 return 1; 3611 } 3612 3613 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3614 struct btrfs_balance_args *bargs) 3615 { 3616 struct btrfs_block_group *cache; 3617 u64 chunk_used; 3618 u64 user_thresh_min; 3619 u64 user_thresh_max; 3620 int ret = 1; 3621 3622 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3623 chunk_used = cache->used; 3624 3625 if (bargs->usage_min == 0) 3626 user_thresh_min = 0; 3627 else 3628 user_thresh_min = mult_perc(cache->length, bargs->usage_min); 3629 3630 if (bargs->usage_max == 0) 3631 user_thresh_max = 1; 3632 else if (bargs->usage_max > 100) 3633 user_thresh_max = cache->length; 3634 else 3635 user_thresh_max = mult_perc(cache->length, bargs->usage_max); 3636 3637 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3638 ret = 0; 3639 3640 btrfs_put_block_group(cache); 3641 return ret; 3642 } 3643 3644 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3645 u64 chunk_offset, struct btrfs_balance_args *bargs) 3646 { 3647 struct btrfs_block_group *cache; 3648 u64 chunk_used, user_thresh; 3649 int ret = 1; 3650 3651 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3652 chunk_used = cache->used; 3653 3654 if (bargs->usage_min == 0) 3655 user_thresh = 1; 3656 else if (bargs->usage > 100) 3657 user_thresh = cache->length; 3658 else 3659 user_thresh = mult_perc(cache->length, bargs->usage); 3660 3661 if (chunk_used < user_thresh) 3662 ret = 0; 3663 3664 btrfs_put_block_group(cache); 3665 return ret; 3666 } 3667 3668 static int chunk_devid_filter(struct extent_buffer *leaf, 3669 struct btrfs_chunk *chunk, 3670 struct btrfs_balance_args *bargs) 3671 { 3672 struct btrfs_stripe *stripe; 3673 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3674 int i; 3675 3676 for (i = 0; i < num_stripes; i++) { 3677 stripe = btrfs_stripe_nr(chunk, i); 3678 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3679 return 0; 3680 } 3681 3682 return 1; 3683 } 3684 3685 static u64 calc_data_stripes(u64 type, int num_stripes) 3686 { 3687 const int index = btrfs_bg_flags_to_raid_index(type); 3688 const int ncopies = btrfs_raid_array[index].ncopies; 3689 const int nparity = btrfs_raid_array[index].nparity; 3690 3691 return (num_stripes - nparity) / ncopies; 3692 } 3693 3694 /* [pstart, pend) */ 3695 static int chunk_drange_filter(struct extent_buffer *leaf, 3696 struct btrfs_chunk *chunk, 3697 struct btrfs_balance_args *bargs) 3698 { 3699 struct btrfs_stripe *stripe; 3700 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3701 u64 stripe_offset; 3702 u64 stripe_length; 3703 u64 type; 3704 int factor; 3705 int i; 3706 3707 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3708 return 0; 3709 3710 type = btrfs_chunk_type(leaf, chunk); 3711 factor = calc_data_stripes(type, num_stripes); 3712 3713 for (i = 0; i < num_stripes; i++) { 3714 stripe = btrfs_stripe_nr(chunk, i); 3715 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3716 continue; 3717 3718 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3719 stripe_length = btrfs_chunk_length(leaf, chunk); 3720 stripe_length = div_u64(stripe_length, factor); 3721 3722 if (stripe_offset < bargs->pend && 3723 stripe_offset + stripe_length > bargs->pstart) 3724 return 0; 3725 } 3726 3727 return 1; 3728 } 3729 3730 /* [vstart, vend) */ 3731 static int chunk_vrange_filter(struct extent_buffer *leaf, 3732 struct btrfs_chunk *chunk, 3733 u64 chunk_offset, 3734 struct btrfs_balance_args *bargs) 3735 { 3736 if (chunk_offset < bargs->vend && 3737 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3738 /* at least part of the chunk is inside this vrange */ 3739 return 0; 3740 3741 return 1; 3742 } 3743 3744 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3745 struct btrfs_chunk *chunk, 3746 struct btrfs_balance_args *bargs) 3747 { 3748 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3749 3750 if (bargs->stripes_min <= num_stripes 3751 && num_stripes <= bargs->stripes_max) 3752 return 0; 3753 3754 return 1; 3755 } 3756 3757 static int chunk_soft_convert_filter(u64 chunk_type, 3758 struct btrfs_balance_args *bargs) 3759 { 3760 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3761 return 0; 3762 3763 chunk_type = chunk_to_extended(chunk_type) & 3764 BTRFS_EXTENDED_PROFILE_MASK; 3765 3766 if (bargs->target == chunk_type) 3767 return 1; 3768 3769 return 0; 3770 } 3771 3772 static int should_balance_chunk(struct extent_buffer *leaf, 3773 struct btrfs_chunk *chunk, u64 chunk_offset) 3774 { 3775 struct btrfs_fs_info *fs_info = leaf->fs_info; 3776 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3777 struct btrfs_balance_args *bargs = NULL; 3778 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3779 3780 /* type filter */ 3781 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3782 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3783 return 0; 3784 } 3785 3786 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3787 bargs = &bctl->data; 3788 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3789 bargs = &bctl->sys; 3790 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3791 bargs = &bctl->meta; 3792 3793 /* profiles filter */ 3794 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3795 chunk_profiles_filter(chunk_type, bargs)) { 3796 return 0; 3797 } 3798 3799 /* usage filter */ 3800 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3801 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3802 return 0; 3803 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3804 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3805 return 0; 3806 } 3807 3808 /* devid filter */ 3809 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3810 chunk_devid_filter(leaf, chunk, bargs)) { 3811 return 0; 3812 } 3813 3814 /* drange filter, makes sense only with devid filter */ 3815 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3816 chunk_drange_filter(leaf, chunk, bargs)) { 3817 return 0; 3818 } 3819 3820 /* vrange filter */ 3821 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3822 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3823 return 0; 3824 } 3825 3826 /* stripes filter */ 3827 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3828 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3829 return 0; 3830 } 3831 3832 /* soft profile changing mode */ 3833 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3834 chunk_soft_convert_filter(chunk_type, bargs)) { 3835 return 0; 3836 } 3837 3838 /* 3839 * limited by count, must be the last filter 3840 */ 3841 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3842 if (bargs->limit == 0) 3843 return 0; 3844 else 3845 bargs->limit--; 3846 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3847 /* 3848 * Same logic as the 'limit' filter; the minimum cannot be 3849 * determined here because we do not have the global information 3850 * about the count of all chunks that satisfy the filters. 3851 */ 3852 if (bargs->limit_max == 0) 3853 return 0; 3854 else 3855 bargs->limit_max--; 3856 } 3857 3858 return 1; 3859 } 3860 3861 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3862 { 3863 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3864 struct btrfs_root *chunk_root = fs_info->chunk_root; 3865 u64 chunk_type; 3866 struct btrfs_chunk *chunk; 3867 struct btrfs_path *path = NULL; 3868 struct btrfs_key key; 3869 struct btrfs_key found_key; 3870 struct extent_buffer *leaf; 3871 int slot; 3872 int ret; 3873 int enospc_errors = 0; 3874 bool counting = true; 3875 /* The single value limit and min/max limits use the same bytes in the */ 3876 u64 limit_data = bctl->data.limit; 3877 u64 limit_meta = bctl->meta.limit; 3878 u64 limit_sys = bctl->sys.limit; 3879 u32 count_data = 0; 3880 u32 count_meta = 0; 3881 u32 count_sys = 0; 3882 int chunk_reserved = 0; 3883 3884 path = btrfs_alloc_path(); 3885 if (!path) { 3886 ret = -ENOMEM; 3887 goto error; 3888 } 3889 3890 /* zero out stat counters */ 3891 spin_lock(&fs_info->balance_lock); 3892 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3893 spin_unlock(&fs_info->balance_lock); 3894 again: 3895 if (!counting) { 3896 /* 3897 * The single value limit and min/max limits use the same bytes 3898 * in the 3899 */ 3900 bctl->data.limit = limit_data; 3901 bctl->meta.limit = limit_meta; 3902 bctl->sys.limit = limit_sys; 3903 } 3904 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3905 key.offset = (u64)-1; 3906 key.type = BTRFS_CHUNK_ITEM_KEY; 3907 3908 while (1) { 3909 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3910 atomic_read(&fs_info->balance_cancel_req)) { 3911 ret = -ECANCELED; 3912 goto error; 3913 } 3914 3915 mutex_lock(&fs_info->reclaim_bgs_lock); 3916 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3917 if (ret < 0) { 3918 mutex_unlock(&fs_info->reclaim_bgs_lock); 3919 goto error; 3920 } 3921 3922 /* 3923 * this shouldn't happen, it means the last relocate 3924 * failed 3925 */ 3926 if (ret == 0) 3927 BUG(); /* FIXME break ? */ 3928 3929 ret = btrfs_previous_item(chunk_root, path, 0, 3930 BTRFS_CHUNK_ITEM_KEY); 3931 if (ret) { 3932 mutex_unlock(&fs_info->reclaim_bgs_lock); 3933 ret = 0; 3934 break; 3935 } 3936 3937 leaf = path->nodes[0]; 3938 slot = path->slots[0]; 3939 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3940 3941 if (found_key.objectid != key.objectid) { 3942 mutex_unlock(&fs_info->reclaim_bgs_lock); 3943 break; 3944 } 3945 3946 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3947 chunk_type = btrfs_chunk_type(leaf, chunk); 3948 3949 if (!counting) { 3950 spin_lock(&fs_info->balance_lock); 3951 bctl->stat.considered++; 3952 spin_unlock(&fs_info->balance_lock); 3953 } 3954 3955 ret = should_balance_chunk(leaf, chunk, found_key.offset); 3956 3957 btrfs_release_path(path); 3958 if (!ret) { 3959 mutex_unlock(&fs_info->reclaim_bgs_lock); 3960 goto loop; 3961 } 3962 3963 if (counting) { 3964 mutex_unlock(&fs_info->reclaim_bgs_lock); 3965 spin_lock(&fs_info->balance_lock); 3966 bctl->stat.expected++; 3967 spin_unlock(&fs_info->balance_lock); 3968 3969 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3970 count_data++; 3971 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3972 count_sys++; 3973 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3974 count_meta++; 3975 3976 goto loop; 3977 } 3978 3979 /* 3980 * Apply limit_min filter, no need to check if the LIMITS 3981 * filter is used, limit_min is 0 by default 3982 */ 3983 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3984 count_data < bctl->data.limit_min) 3985 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3986 count_meta < bctl->meta.limit_min) 3987 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3988 count_sys < bctl->sys.limit_min)) { 3989 mutex_unlock(&fs_info->reclaim_bgs_lock); 3990 goto loop; 3991 } 3992 3993 if (!chunk_reserved) { 3994 /* 3995 * We may be relocating the only data chunk we have, 3996 * which could potentially end up with losing data's 3997 * raid profile, so lets allocate an empty one in 3998 * advance. 3999 */ 4000 ret = btrfs_may_alloc_data_chunk(fs_info, 4001 found_key.offset); 4002 if (ret < 0) { 4003 mutex_unlock(&fs_info->reclaim_bgs_lock); 4004 goto error; 4005 } else if (ret == 1) { 4006 chunk_reserved = 1; 4007 } 4008 } 4009 4010 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 4011 mutex_unlock(&fs_info->reclaim_bgs_lock); 4012 if (ret == -ENOSPC) { 4013 enospc_errors++; 4014 } else if (ret == -ETXTBSY) { 4015 btrfs_info(fs_info, 4016 "skipping relocation of block group %llu due to active swapfile", 4017 found_key.offset); 4018 ret = 0; 4019 } else if (ret) { 4020 goto error; 4021 } else { 4022 spin_lock(&fs_info->balance_lock); 4023 bctl->stat.completed++; 4024 spin_unlock(&fs_info->balance_lock); 4025 } 4026 loop: 4027 if (found_key.offset == 0) 4028 break; 4029 key.offset = found_key.offset - 1; 4030 } 4031 4032 if (counting) { 4033 btrfs_release_path(path); 4034 counting = false; 4035 goto again; 4036 } 4037 error: 4038 btrfs_free_path(path); 4039 if (enospc_errors) { 4040 btrfs_info(fs_info, "%d enospc errors during balance", 4041 enospc_errors); 4042 if (!ret) 4043 ret = -ENOSPC; 4044 } 4045 4046 return ret; 4047 } 4048 4049 /* 4050 * See if a given profile is valid and reduced. 4051 * 4052 * @flags: profile to validate 4053 * @extended: if true @flags is treated as an extended profile 4054 */ 4055 static int alloc_profile_is_valid(u64 flags, int extended) 4056 { 4057 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 4058 BTRFS_BLOCK_GROUP_PROFILE_MASK); 4059 4060 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 4061 4062 /* 1) check that all other bits are zeroed */ 4063 if (flags & ~mask) 4064 return 0; 4065 4066 /* 2) see if profile is reduced */ 4067 if (flags == 0) 4068 return !extended; /* "0" is valid for usual profiles */ 4069 4070 return has_single_bit_set(flags); 4071 } 4072 4073 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 4074 { 4075 /* cancel requested || normal exit path */ 4076 return atomic_read(&fs_info->balance_cancel_req) || 4077 (atomic_read(&fs_info->balance_pause_req) == 0 && 4078 atomic_read(&fs_info->balance_cancel_req) == 0); 4079 } 4080 4081 /* 4082 * Validate target profile against allowed profiles and return true if it's OK. 4083 * Otherwise print the error message and return false. 4084 */ 4085 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info, 4086 const struct btrfs_balance_args *bargs, 4087 u64 allowed, const char *type) 4088 { 4089 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 4090 return true; 4091 4092 /* Profile is valid and does not have bits outside of the allowed set */ 4093 if (alloc_profile_is_valid(bargs->target, 1) && 4094 (bargs->target & ~allowed) == 0) 4095 return true; 4096 4097 btrfs_err(fs_info, "balance: invalid convert %s profile %s", 4098 type, btrfs_bg_type_to_raid_name(bargs->target)); 4099 return false; 4100 } 4101 4102 /* 4103 * Fill @buf with textual description of balance filter flags @bargs, up to 4104 * @size_buf including the terminating null. The output may be trimmed if it 4105 * does not fit into the provided buffer. 4106 */ 4107 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, 4108 u32 size_buf) 4109 { 4110 int ret; 4111 u32 size_bp = size_buf; 4112 char *bp = buf; 4113 u64 flags = bargs->flags; 4114 char tmp_buf[128] = {'\0'}; 4115 4116 if (!flags) 4117 return; 4118 4119 #define CHECK_APPEND_NOARG(a) \ 4120 do { \ 4121 ret = snprintf(bp, size_bp, (a)); \ 4122 if (ret < 0 || ret >= size_bp) \ 4123 goto out_overflow; \ 4124 size_bp -= ret; \ 4125 bp += ret; \ 4126 } while (0) 4127 4128 #define CHECK_APPEND_1ARG(a, v1) \ 4129 do { \ 4130 ret = snprintf(bp, size_bp, (a), (v1)); \ 4131 if (ret < 0 || ret >= size_bp) \ 4132 goto out_overflow; \ 4133 size_bp -= ret; \ 4134 bp += ret; \ 4135 } while (0) 4136 4137 #define CHECK_APPEND_2ARG(a, v1, v2) \ 4138 do { \ 4139 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ 4140 if (ret < 0 || ret >= size_bp) \ 4141 goto out_overflow; \ 4142 size_bp -= ret; \ 4143 bp += ret; \ 4144 } while (0) 4145 4146 if (flags & BTRFS_BALANCE_ARGS_CONVERT) 4147 CHECK_APPEND_1ARG("convert=%s,", 4148 btrfs_bg_type_to_raid_name(bargs->target)); 4149 4150 if (flags & BTRFS_BALANCE_ARGS_SOFT) 4151 CHECK_APPEND_NOARG("soft,"); 4152 4153 if (flags & BTRFS_BALANCE_ARGS_PROFILES) { 4154 btrfs_describe_block_groups(bargs->profiles, tmp_buf, 4155 sizeof(tmp_buf)); 4156 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); 4157 } 4158 4159 if (flags & BTRFS_BALANCE_ARGS_USAGE) 4160 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); 4161 4162 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) 4163 CHECK_APPEND_2ARG("usage=%u..%u,", 4164 bargs->usage_min, bargs->usage_max); 4165 4166 if (flags & BTRFS_BALANCE_ARGS_DEVID) 4167 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); 4168 4169 if (flags & BTRFS_BALANCE_ARGS_DRANGE) 4170 CHECK_APPEND_2ARG("drange=%llu..%llu,", 4171 bargs->pstart, bargs->pend); 4172 4173 if (flags & BTRFS_BALANCE_ARGS_VRANGE) 4174 CHECK_APPEND_2ARG("vrange=%llu..%llu,", 4175 bargs->vstart, bargs->vend); 4176 4177 if (flags & BTRFS_BALANCE_ARGS_LIMIT) 4178 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); 4179 4180 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) 4181 CHECK_APPEND_2ARG("limit=%u..%u,", 4182 bargs->limit_min, bargs->limit_max); 4183 4184 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) 4185 CHECK_APPEND_2ARG("stripes=%u..%u,", 4186 bargs->stripes_min, bargs->stripes_max); 4187 4188 #undef CHECK_APPEND_2ARG 4189 #undef CHECK_APPEND_1ARG 4190 #undef CHECK_APPEND_NOARG 4191 4192 out_overflow: 4193 4194 if (size_bp < size_buf) 4195 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ 4196 else 4197 buf[0] = '\0'; 4198 } 4199 4200 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) 4201 { 4202 u32 size_buf = 1024; 4203 char tmp_buf[192] = {'\0'}; 4204 char *buf; 4205 char *bp; 4206 u32 size_bp = size_buf; 4207 int ret; 4208 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 4209 4210 buf = kzalloc(size_buf, GFP_KERNEL); 4211 if (!buf) 4212 return; 4213 4214 bp = buf; 4215 4216 #define CHECK_APPEND_1ARG(a, v1) \ 4217 do { \ 4218 ret = snprintf(bp, size_bp, (a), (v1)); \ 4219 if (ret < 0 || ret >= size_bp) \ 4220 goto out_overflow; \ 4221 size_bp -= ret; \ 4222 bp += ret; \ 4223 } while (0) 4224 4225 if (bctl->flags & BTRFS_BALANCE_FORCE) 4226 CHECK_APPEND_1ARG("%s", "-f "); 4227 4228 if (bctl->flags & BTRFS_BALANCE_DATA) { 4229 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); 4230 CHECK_APPEND_1ARG("-d%s ", tmp_buf); 4231 } 4232 4233 if (bctl->flags & BTRFS_BALANCE_METADATA) { 4234 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); 4235 CHECK_APPEND_1ARG("-m%s ", tmp_buf); 4236 } 4237 4238 if (bctl->flags & BTRFS_BALANCE_SYSTEM) { 4239 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); 4240 CHECK_APPEND_1ARG("-s%s ", tmp_buf); 4241 } 4242 4243 #undef CHECK_APPEND_1ARG 4244 4245 out_overflow: 4246 4247 if (size_bp < size_buf) 4248 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ 4249 btrfs_info(fs_info, "balance: %s %s", 4250 (bctl->flags & BTRFS_BALANCE_RESUME) ? 4251 "resume" : "start", buf); 4252 4253 kfree(buf); 4254 } 4255 4256 /* 4257 * Should be called with balance mutexe held 4258 */ 4259 int btrfs_balance(struct btrfs_fs_info *fs_info, 4260 struct btrfs_balance_control *bctl, 4261 struct btrfs_ioctl_balance_args *bargs) 4262 { 4263 u64 meta_target, data_target; 4264 u64 allowed; 4265 int mixed = 0; 4266 int ret; 4267 u64 num_devices; 4268 unsigned seq; 4269 bool reducing_redundancy; 4270 int i; 4271 4272 if (btrfs_fs_closing(fs_info) || 4273 atomic_read(&fs_info->balance_pause_req) || 4274 btrfs_should_cancel_balance(fs_info)) { 4275 ret = -EINVAL; 4276 goto out; 4277 } 4278 4279 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 4280 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 4281 mixed = 1; 4282 4283 /* 4284 * In case of mixed groups both data and meta should be picked, 4285 * and identical options should be given for both of them. 4286 */ 4287 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 4288 if (mixed && (bctl->flags & allowed)) { 4289 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 4290 !(bctl->flags & BTRFS_BALANCE_METADATA) || 4291 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 4292 btrfs_err(fs_info, 4293 "balance: mixed groups data and metadata options must be the same"); 4294 ret = -EINVAL; 4295 goto out; 4296 } 4297 } 4298 4299 /* 4300 * rw_devices will not change at the moment, device add/delete/replace 4301 * are exclusive 4302 */ 4303 num_devices = fs_info->fs_devices->rw_devices; 4304 4305 /* 4306 * SINGLE profile on-disk has no profile bit, but in-memory we have a 4307 * special bit for it, to make it easier to distinguish. Thus we need 4308 * to set it manually, or balance would refuse the profile. 4309 */ 4310 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 4311 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4312 if (num_devices >= btrfs_raid_array[i].devs_min) 4313 allowed |= btrfs_raid_array[i].bg_flag; 4314 4315 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") || 4316 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") || 4317 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) { 4318 ret = -EINVAL; 4319 goto out; 4320 } 4321 4322 /* 4323 * Allow to reduce metadata or system integrity only if force set for 4324 * profiles with redundancy (copies, parity) 4325 */ 4326 allowed = 0; 4327 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) { 4328 if (btrfs_raid_array[i].ncopies >= 2 || 4329 btrfs_raid_array[i].tolerated_failures >= 1) 4330 allowed |= btrfs_raid_array[i].bg_flag; 4331 } 4332 do { 4333 seq = read_seqbegin(&fs_info->profiles_lock); 4334 4335 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4336 (fs_info->avail_system_alloc_bits & allowed) && 4337 !(bctl->sys.target & allowed)) || 4338 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 4339 (fs_info->avail_metadata_alloc_bits & allowed) && 4340 !(bctl->meta.target & allowed))) 4341 reducing_redundancy = true; 4342 else 4343 reducing_redundancy = false; 4344 4345 /* if we're not converting, the target field is uninitialized */ 4346 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4347 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 4348 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 4349 bctl->data.target : fs_info->avail_data_alloc_bits; 4350 } while (read_seqretry(&fs_info->profiles_lock, seq)); 4351 4352 if (reducing_redundancy) { 4353 if (bctl->flags & BTRFS_BALANCE_FORCE) { 4354 btrfs_info(fs_info, 4355 "balance: force reducing metadata redundancy"); 4356 } else { 4357 btrfs_err(fs_info, 4358 "balance: reduces metadata redundancy, use --force if you want this"); 4359 ret = -EINVAL; 4360 goto out; 4361 } 4362 } 4363 4364 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 4365 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 4366 btrfs_warn(fs_info, 4367 "balance: metadata profile %s has lower redundancy than data profile %s", 4368 btrfs_bg_type_to_raid_name(meta_target), 4369 btrfs_bg_type_to_raid_name(data_target)); 4370 } 4371 4372 ret = insert_balance_item(fs_info, bctl); 4373 if (ret && ret != -EEXIST) 4374 goto out; 4375 4376 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 4377 BUG_ON(ret == -EEXIST); 4378 BUG_ON(fs_info->balance_ctl); 4379 spin_lock(&fs_info->balance_lock); 4380 fs_info->balance_ctl = bctl; 4381 spin_unlock(&fs_info->balance_lock); 4382 } else { 4383 BUG_ON(ret != -EEXIST); 4384 spin_lock(&fs_info->balance_lock); 4385 update_balance_args(bctl); 4386 spin_unlock(&fs_info->balance_lock); 4387 } 4388 4389 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4390 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4391 describe_balance_start_or_resume(fs_info); 4392 mutex_unlock(&fs_info->balance_mutex); 4393 4394 ret = __btrfs_balance(fs_info); 4395 4396 mutex_lock(&fs_info->balance_mutex); 4397 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { 4398 btrfs_info(fs_info, "balance: paused"); 4399 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); 4400 } 4401 /* 4402 * Balance can be canceled by: 4403 * 4404 * - Regular cancel request 4405 * Then ret == -ECANCELED and balance_cancel_req > 0 4406 * 4407 * - Fatal signal to "btrfs" process 4408 * Either the signal caught by wait_reserve_ticket() and callers 4409 * got -EINTR, or caught by btrfs_should_cancel_balance() and 4410 * got -ECANCELED. 4411 * Either way, in this case balance_cancel_req = 0, and 4412 * ret == -EINTR or ret == -ECANCELED. 4413 * 4414 * So here we only check the return value to catch canceled balance. 4415 */ 4416 else if (ret == -ECANCELED || ret == -EINTR) 4417 btrfs_info(fs_info, "balance: canceled"); 4418 else 4419 btrfs_info(fs_info, "balance: ended with status: %d", ret); 4420 4421 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 4422 4423 if (bargs) { 4424 memset(bargs, 0, sizeof(*bargs)); 4425 btrfs_update_ioctl_balance_args(fs_info, bargs); 4426 } 4427 4428 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 4429 balance_need_close(fs_info)) { 4430 reset_balance_state(fs_info); 4431 btrfs_exclop_finish(fs_info); 4432 } 4433 4434 wake_up(&fs_info->balance_wait_q); 4435 4436 return ret; 4437 out: 4438 if (bctl->flags & BTRFS_BALANCE_RESUME) 4439 reset_balance_state(fs_info); 4440 else 4441 kfree(bctl); 4442 btrfs_exclop_finish(fs_info); 4443 4444 return ret; 4445 } 4446 4447 static int balance_kthread(void *data) 4448 { 4449 struct btrfs_fs_info *fs_info = data; 4450 int ret = 0; 4451 4452 sb_start_write(fs_info->sb); 4453 mutex_lock(&fs_info->balance_mutex); 4454 if (fs_info->balance_ctl) 4455 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 4456 mutex_unlock(&fs_info->balance_mutex); 4457 sb_end_write(fs_info->sb); 4458 4459 return ret; 4460 } 4461 4462 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 4463 { 4464 struct task_struct *tsk; 4465 4466 mutex_lock(&fs_info->balance_mutex); 4467 if (!fs_info->balance_ctl) { 4468 mutex_unlock(&fs_info->balance_mutex); 4469 return 0; 4470 } 4471 mutex_unlock(&fs_info->balance_mutex); 4472 4473 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 4474 btrfs_info(fs_info, "balance: resume skipped"); 4475 return 0; 4476 } 4477 4478 spin_lock(&fs_info->super_lock); 4479 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); 4480 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; 4481 spin_unlock(&fs_info->super_lock); 4482 /* 4483 * A ro->rw remount sequence should continue with the paused balance 4484 * regardless of who pauses it, system or the user as of now, so set 4485 * the resume flag. 4486 */ 4487 spin_lock(&fs_info->balance_lock); 4488 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 4489 spin_unlock(&fs_info->balance_lock); 4490 4491 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 4492 return PTR_ERR_OR_ZERO(tsk); 4493 } 4494 4495 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 4496 { 4497 struct btrfs_balance_control *bctl; 4498 struct btrfs_balance_item *item; 4499 struct btrfs_disk_balance_args disk_bargs; 4500 struct btrfs_path *path; 4501 struct extent_buffer *leaf; 4502 struct btrfs_key key; 4503 int ret; 4504 4505 path = btrfs_alloc_path(); 4506 if (!path) 4507 return -ENOMEM; 4508 4509 key.objectid = BTRFS_BALANCE_OBJECTID; 4510 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4511 key.offset = 0; 4512 4513 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4514 if (ret < 0) 4515 goto out; 4516 if (ret > 0) { /* ret = -ENOENT; */ 4517 ret = 0; 4518 goto out; 4519 } 4520 4521 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4522 if (!bctl) { 4523 ret = -ENOMEM; 4524 goto out; 4525 } 4526 4527 leaf = path->nodes[0]; 4528 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4529 4530 bctl->flags = btrfs_balance_flags(leaf, item); 4531 bctl->flags |= BTRFS_BALANCE_RESUME; 4532 4533 btrfs_balance_data(leaf, item, &disk_bargs); 4534 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4535 btrfs_balance_meta(leaf, item, &disk_bargs); 4536 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4537 btrfs_balance_sys(leaf, item, &disk_bargs); 4538 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4539 4540 /* 4541 * This should never happen, as the paused balance state is recovered 4542 * during mount without any chance of other exclusive ops to collide. 4543 * 4544 * This gives the exclusive op status to balance and keeps in paused 4545 * state until user intervention (cancel or umount). If the ownership 4546 * cannot be assigned, show a message but do not fail. The balance 4547 * is in a paused state and must have fs_info::balance_ctl properly 4548 * set up. 4549 */ 4550 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED)) 4551 btrfs_warn(fs_info, 4552 "balance: cannot set exclusive op status, resume manually"); 4553 4554 btrfs_release_path(path); 4555 4556 mutex_lock(&fs_info->balance_mutex); 4557 BUG_ON(fs_info->balance_ctl); 4558 spin_lock(&fs_info->balance_lock); 4559 fs_info->balance_ctl = bctl; 4560 spin_unlock(&fs_info->balance_lock); 4561 mutex_unlock(&fs_info->balance_mutex); 4562 out: 4563 btrfs_free_path(path); 4564 return ret; 4565 } 4566 4567 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4568 { 4569 int ret = 0; 4570 4571 mutex_lock(&fs_info->balance_mutex); 4572 if (!fs_info->balance_ctl) { 4573 mutex_unlock(&fs_info->balance_mutex); 4574 return -ENOTCONN; 4575 } 4576 4577 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4578 atomic_inc(&fs_info->balance_pause_req); 4579 mutex_unlock(&fs_info->balance_mutex); 4580 4581 wait_event(fs_info->balance_wait_q, 4582 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4583 4584 mutex_lock(&fs_info->balance_mutex); 4585 /* we are good with balance_ctl ripped off from under us */ 4586 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4587 atomic_dec(&fs_info->balance_pause_req); 4588 } else { 4589 ret = -ENOTCONN; 4590 } 4591 4592 mutex_unlock(&fs_info->balance_mutex); 4593 return ret; 4594 } 4595 4596 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4597 { 4598 mutex_lock(&fs_info->balance_mutex); 4599 if (!fs_info->balance_ctl) { 4600 mutex_unlock(&fs_info->balance_mutex); 4601 return -ENOTCONN; 4602 } 4603 4604 /* 4605 * A paused balance with the item stored on disk can be resumed at 4606 * mount time if the mount is read-write. Otherwise it's still paused 4607 * and we must not allow cancelling as it deletes the item. 4608 */ 4609 if (sb_rdonly(fs_info->sb)) { 4610 mutex_unlock(&fs_info->balance_mutex); 4611 return -EROFS; 4612 } 4613 4614 atomic_inc(&fs_info->balance_cancel_req); 4615 /* 4616 * if we are running just wait and return, balance item is 4617 * deleted in btrfs_balance in this case 4618 */ 4619 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4620 mutex_unlock(&fs_info->balance_mutex); 4621 wait_event(fs_info->balance_wait_q, 4622 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4623 mutex_lock(&fs_info->balance_mutex); 4624 } else { 4625 mutex_unlock(&fs_info->balance_mutex); 4626 /* 4627 * Lock released to allow other waiters to continue, we'll 4628 * reexamine the status again. 4629 */ 4630 mutex_lock(&fs_info->balance_mutex); 4631 4632 if (fs_info->balance_ctl) { 4633 reset_balance_state(fs_info); 4634 btrfs_exclop_finish(fs_info); 4635 btrfs_info(fs_info, "balance: canceled"); 4636 } 4637 } 4638 4639 BUG_ON(fs_info->balance_ctl || 4640 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4641 atomic_dec(&fs_info->balance_cancel_req); 4642 mutex_unlock(&fs_info->balance_mutex); 4643 return 0; 4644 } 4645 4646 int btrfs_uuid_scan_kthread(void *data) 4647 { 4648 struct btrfs_fs_info *fs_info = data; 4649 struct btrfs_root *root = fs_info->tree_root; 4650 struct btrfs_key key; 4651 struct btrfs_path *path = NULL; 4652 int ret = 0; 4653 struct extent_buffer *eb; 4654 int slot; 4655 struct btrfs_root_item root_item; 4656 u32 item_size; 4657 struct btrfs_trans_handle *trans = NULL; 4658 bool closing = false; 4659 4660 path = btrfs_alloc_path(); 4661 if (!path) { 4662 ret = -ENOMEM; 4663 goto out; 4664 } 4665 4666 key.objectid = 0; 4667 key.type = BTRFS_ROOT_ITEM_KEY; 4668 key.offset = 0; 4669 4670 while (1) { 4671 if (btrfs_fs_closing(fs_info)) { 4672 closing = true; 4673 break; 4674 } 4675 ret = btrfs_search_forward(root, &key, path, 4676 BTRFS_OLDEST_GENERATION); 4677 if (ret) { 4678 if (ret > 0) 4679 ret = 0; 4680 break; 4681 } 4682 4683 if (key.type != BTRFS_ROOT_ITEM_KEY || 4684 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4685 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4686 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4687 goto skip; 4688 4689 eb = path->nodes[0]; 4690 slot = path->slots[0]; 4691 item_size = btrfs_item_size(eb, slot); 4692 if (item_size < sizeof(root_item)) 4693 goto skip; 4694 4695 read_extent_buffer(eb, &root_item, 4696 btrfs_item_ptr_offset(eb, slot), 4697 (int)sizeof(root_item)); 4698 if (btrfs_root_refs(&root_item) == 0) 4699 goto skip; 4700 4701 if (!btrfs_is_empty_uuid(root_item.uuid) || 4702 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4703 if (trans) 4704 goto update_tree; 4705 4706 btrfs_release_path(path); 4707 /* 4708 * 1 - subvol uuid item 4709 * 1 - received_subvol uuid item 4710 */ 4711 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4712 if (IS_ERR(trans)) { 4713 ret = PTR_ERR(trans); 4714 break; 4715 } 4716 continue; 4717 } else { 4718 goto skip; 4719 } 4720 update_tree: 4721 btrfs_release_path(path); 4722 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4723 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4724 BTRFS_UUID_KEY_SUBVOL, 4725 key.objectid); 4726 if (ret < 0) { 4727 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4728 ret); 4729 break; 4730 } 4731 } 4732 4733 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4734 ret = btrfs_uuid_tree_add(trans, 4735 root_item.received_uuid, 4736 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4737 key.objectid); 4738 if (ret < 0) { 4739 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4740 ret); 4741 break; 4742 } 4743 } 4744 4745 skip: 4746 btrfs_release_path(path); 4747 if (trans) { 4748 ret = btrfs_end_transaction(trans); 4749 trans = NULL; 4750 if (ret) 4751 break; 4752 } 4753 4754 if (key.offset < (u64)-1) { 4755 key.offset++; 4756 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4757 key.offset = 0; 4758 key.type = BTRFS_ROOT_ITEM_KEY; 4759 } else if (key.objectid < (u64)-1) { 4760 key.offset = 0; 4761 key.type = BTRFS_ROOT_ITEM_KEY; 4762 key.objectid++; 4763 } else { 4764 break; 4765 } 4766 cond_resched(); 4767 } 4768 4769 out: 4770 btrfs_free_path(path); 4771 if (trans && !IS_ERR(trans)) 4772 btrfs_end_transaction(trans); 4773 if (ret) 4774 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4775 else if (!closing) 4776 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4777 up(&fs_info->uuid_tree_rescan_sem); 4778 return 0; 4779 } 4780 4781 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4782 { 4783 struct btrfs_trans_handle *trans; 4784 struct btrfs_root *tree_root = fs_info->tree_root; 4785 struct btrfs_root *uuid_root; 4786 struct task_struct *task; 4787 int ret; 4788 4789 /* 4790 * 1 - root node 4791 * 1 - root item 4792 */ 4793 trans = btrfs_start_transaction(tree_root, 2); 4794 if (IS_ERR(trans)) 4795 return PTR_ERR(trans); 4796 4797 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID); 4798 if (IS_ERR(uuid_root)) { 4799 ret = PTR_ERR(uuid_root); 4800 btrfs_abort_transaction(trans, ret); 4801 btrfs_end_transaction(trans); 4802 return ret; 4803 } 4804 4805 fs_info->uuid_root = uuid_root; 4806 4807 ret = btrfs_commit_transaction(trans); 4808 if (ret) 4809 return ret; 4810 4811 down(&fs_info->uuid_tree_rescan_sem); 4812 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4813 if (IS_ERR(task)) { 4814 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4815 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4816 up(&fs_info->uuid_tree_rescan_sem); 4817 return PTR_ERR(task); 4818 } 4819 4820 return 0; 4821 } 4822 4823 /* 4824 * shrinking a device means finding all of the device extents past 4825 * the new size, and then following the back refs to the chunks. 4826 * The chunk relocation code actually frees the device extent 4827 */ 4828 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4829 { 4830 struct btrfs_fs_info *fs_info = device->fs_info; 4831 struct btrfs_root *root = fs_info->dev_root; 4832 struct btrfs_trans_handle *trans; 4833 struct btrfs_dev_extent *dev_extent = NULL; 4834 struct btrfs_path *path; 4835 u64 length; 4836 u64 chunk_offset; 4837 int ret; 4838 int slot; 4839 int failed = 0; 4840 bool retried = false; 4841 struct extent_buffer *l; 4842 struct btrfs_key key; 4843 struct btrfs_super_block *super_copy = fs_info->super_copy; 4844 u64 old_total = btrfs_super_total_bytes(super_copy); 4845 u64 old_size = btrfs_device_get_total_bytes(device); 4846 u64 diff; 4847 u64 start; 4848 4849 new_size = round_down(new_size, fs_info->sectorsize); 4850 start = new_size; 4851 diff = round_down(old_size - new_size, fs_info->sectorsize); 4852 4853 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4854 return -EINVAL; 4855 4856 path = btrfs_alloc_path(); 4857 if (!path) 4858 return -ENOMEM; 4859 4860 path->reada = READA_BACK; 4861 4862 trans = btrfs_start_transaction(root, 0); 4863 if (IS_ERR(trans)) { 4864 btrfs_free_path(path); 4865 return PTR_ERR(trans); 4866 } 4867 4868 mutex_lock(&fs_info->chunk_mutex); 4869 4870 btrfs_device_set_total_bytes(device, new_size); 4871 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4872 device->fs_devices->total_rw_bytes -= diff; 4873 atomic64_sub(diff, &fs_info->free_chunk_space); 4874 } 4875 4876 /* 4877 * Once the device's size has been set to the new size, ensure all 4878 * in-memory chunks are synced to disk so that the loop below sees them 4879 * and relocates them accordingly. 4880 */ 4881 if (contains_pending_extent(device, &start, diff)) { 4882 mutex_unlock(&fs_info->chunk_mutex); 4883 ret = btrfs_commit_transaction(trans); 4884 if (ret) 4885 goto done; 4886 } else { 4887 mutex_unlock(&fs_info->chunk_mutex); 4888 btrfs_end_transaction(trans); 4889 } 4890 4891 again: 4892 key.objectid = device->devid; 4893 key.offset = (u64)-1; 4894 key.type = BTRFS_DEV_EXTENT_KEY; 4895 4896 do { 4897 mutex_lock(&fs_info->reclaim_bgs_lock); 4898 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4899 if (ret < 0) { 4900 mutex_unlock(&fs_info->reclaim_bgs_lock); 4901 goto done; 4902 } 4903 4904 ret = btrfs_previous_item(root, path, 0, key.type); 4905 if (ret) { 4906 mutex_unlock(&fs_info->reclaim_bgs_lock); 4907 if (ret < 0) 4908 goto done; 4909 ret = 0; 4910 btrfs_release_path(path); 4911 break; 4912 } 4913 4914 l = path->nodes[0]; 4915 slot = path->slots[0]; 4916 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4917 4918 if (key.objectid != device->devid) { 4919 mutex_unlock(&fs_info->reclaim_bgs_lock); 4920 btrfs_release_path(path); 4921 break; 4922 } 4923 4924 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4925 length = btrfs_dev_extent_length(l, dev_extent); 4926 4927 if (key.offset + length <= new_size) { 4928 mutex_unlock(&fs_info->reclaim_bgs_lock); 4929 btrfs_release_path(path); 4930 break; 4931 } 4932 4933 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4934 btrfs_release_path(path); 4935 4936 /* 4937 * We may be relocating the only data chunk we have, 4938 * which could potentially end up with losing data's 4939 * raid profile, so lets allocate an empty one in 4940 * advance. 4941 */ 4942 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4943 if (ret < 0) { 4944 mutex_unlock(&fs_info->reclaim_bgs_lock); 4945 goto done; 4946 } 4947 4948 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4949 mutex_unlock(&fs_info->reclaim_bgs_lock); 4950 if (ret == -ENOSPC) { 4951 failed++; 4952 } else if (ret) { 4953 if (ret == -ETXTBSY) { 4954 btrfs_warn(fs_info, 4955 "could not shrink block group %llu due to active swapfile", 4956 chunk_offset); 4957 } 4958 goto done; 4959 } 4960 } while (key.offset-- > 0); 4961 4962 if (failed && !retried) { 4963 failed = 0; 4964 retried = true; 4965 goto again; 4966 } else if (failed && retried) { 4967 ret = -ENOSPC; 4968 goto done; 4969 } 4970 4971 /* Shrinking succeeded, else we would be at "done". */ 4972 trans = btrfs_start_transaction(root, 0); 4973 if (IS_ERR(trans)) { 4974 ret = PTR_ERR(trans); 4975 goto done; 4976 } 4977 4978 mutex_lock(&fs_info->chunk_mutex); 4979 /* Clear all state bits beyond the shrunk device size */ 4980 clear_extent_bits(&device->alloc_state, new_size, (u64)-1, 4981 CHUNK_STATE_MASK); 4982 4983 btrfs_device_set_disk_total_bytes(device, new_size); 4984 if (list_empty(&device->post_commit_list)) 4985 list_add_tail(&device->post_commit_list, 4986 &trans->transaction->dev_update_list); 4987 4988 WARN_ON(diff > old_total); 4989 btrfs_set_super_total_bytes(super_copy, 4990 round_down(old_total - diff, fs_info->sectorsize)); 4991 mutex_unlock(&fs_info->chunk_mutex); 4992 4993 btrfs_reserve_chunk_metadata(trans, false); 4994 /* Now btrfs_update_device() will change the on-disk size. */ 4995 ret = btrfs_update_device(trans, device); 4996 btrfs_trans_release_chunk_metadata(trans); 4997 if (ret < 0) { 4998 btrfs_abort_transaction(trans, ret); 4999 btrfs_end_transaction(trans); 5000 } else { 5001 ret = btrfs_commit_transaction(trans); 5002 } 5003 done: 5004 btrfs_free_path(path); 5005 if (ret) { 5006 mutex_lock(&fs_info->chunk_mutex); 5007 btrfs_device_set_total_bytes(device, old_size); 5008 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 5009 device->fs_devices->total_rw_bytes += diff; 5010 atomic64_add(diff, &fs_info->free_chunk_space); 5011 mutex_unlock(&fs_info->chunk_mutex); 5012 } 5013 return ret; 5014 } 5015 5016 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 5017 struct btrfs_key *key, 5018 struct btrfs_chunk *chunk, int item_size) 5019 { 5020 struct btrfs_super_block *super_copy = fs_info->super_copy; 5021 struct btrfs_disk_key disk_key; 5022 u32 array_size; 5023 u8 *ptr; 5024 5025 lockdep_assert_held(&fs_info->chunk_mutex); 5026 5027 array_size = btrfs_super_sys_array_size(super_copy); 5028 if (array_size + item_size + sizeof(disk_key) 5029 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 5030 return -EFBIG; 5031 5032 ptr = super_copy->sys_chunk_array + array_size; 5033 btrfs_cpu_key_to_disk(&disk_key, key); 5034 memcpy(ptr, &disk_key, sizeof(disk_key)); 5035 ptr += sizeof(disk_key); 5036 memcpy(ptr, chunk, item_size); 5037 item_size += sizeof(disk_key); 5038 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 5039 5040 return 0; 5041 } 5042 5043 /* 5044 * sort the devices in descending order by max_avail, total_avail 5045 */ 5046 static int btrfs_cmp_device_info(const void *a, const void *b) 5047 { 5048 const struct btrfs_device_info *di_a = a; 5049 const struct btrfs_device_info *di_b = b; 5050 5051 if (di_a->max_avail > di_b->max_avail) 5052 return -1; 5053 if (di_a->max_avail < di_b->max_avail) 5054 return 1; 5055 if (di_a->total_avail > di_b->total_avail) 5056 return -1; 5057 if (di_a->total_avail < di_b->total_avail) 5058 return 1; 5059 return 0; 5060 } 5061 5062 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 5063 { 5064 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5065 return; 5066 5067 btrfs_set_fs_incompat(info, RAID56); 5068 } 5069 5070 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) 5071 { 5072 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) 5073 return; 5074 5075 btrfs_set_fs_incompat(info, RAID1C34); 5076 } 5077 5078 /* 5079 * Structure used internally for btrfs_create_chunk() function. 5080 * Wraps needed parameters. 5081 */ 5082 struct alloc_chunk_ctl { 5083 u64 start; 5084 u64 type; 5085 /* Total number of stripes to allocate */ 5086 int num_stripes; 5087 /* sub_stripes info for map */ 5088 int sub_stripes; 5089 /* Stripes per device */ 5090 int dev_stripes; 5091 /* Maximum number of devices to use */ 5092 int devs_max; 5093 /* Minimum number of devices to use */ 5094 int devs_min; 5095 /* ndevs has to be a multiple of this */ 5096 int devs_increment; 5097 /* Number of copies */ 5098 int ncopies; 5099 /* Number of stripes worth of bytes to store parity information */ 5100 int nparity; 5101 u64 max_stripe_size; 5102 u64 max_chunk_size; 5103 u64 dev_extent_min; 5104 u64 stripe_size; 5105 u64 chunk_size; 5106 int ndevs; 5107 }; 5108 5109 static void init_alloc_chunk_ctl_policy_regular( 5110 struct btrfs_fs_devices *fs_devices, 5111 struct alloc_chunk_ctl *ctl) 5112 { 5113 struct btrfs_space_info *space_info; 5114 5115 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type); 5116 ASSERT(space_info); 5117 5118 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); 5119 ctl->max_stripe_size = ctl->max_chunk_size; 5120 5121 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) 5122 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); 5123 5124 /* We don't want a chunk larger than 10% of writable space */ 5125 ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), 5126 ctl->max_chunk_size); 5127 ctl->dev_extent_min = ctl->dev_stripes << BTRFS_STRIPE_LEN_SHIFT; 5128 } 5129 5130 static void init_alloc_chunk_ctl_policy_zoned( 5131 struct btrfs_fs_devices *fs_devices, 5132 struct alloc_chunk_ctl *ctl) 5133 { 5134 u64 zone_size = fs_devices->fs_info->zone_size; 5135 u64 limit; 5136 int min_num_stripes = ctl->devs_min * ctl->dev_stripes; 5137 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies; 5138 u64 min_chunk_size = min_data_stripes * zone_size; 5139 u64 type = ctl->type; 5140 5141 ctl->max_stripe_size = zone_size; 5142 if (type & BTRFS_BLOCK_GROUP_DATA) { 5143 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE, 5144 zone_size); 5145 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 5146 ctl->max_chunk_size = ctl->max_stripe_size; 5147 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 5148 ctl->max_chunk_size = 2 * ctl->max_stripe_size; 5149 ctl->devs_max = min_t(int, ctl->devs_max, 5150 BTRFS_MAX_DEVS_SYS_CHUNK); 5151 } else { 5152 BUG(); 5153 } 5154 5155 /* We don't want a chunk larger than 10% of writable space */ 5156 limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10), 5157 zone_size), 5158 min_chunk_size); 5159 ctl->max_chunk_size = min(limit, ctl->max_chunk_size); 5160 ctl->dev_extent_min = zone_size * ctl->dev_stripes; 5161 } 5162 5163 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, 5164 struct alloc_chunk_ctl *ctl) 5165 { 5166 int index = btrfs_bg_flags_to_raid_index(ctl->type); 5167 5168 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes; 5169 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes; 5170 ctl->devs_max = btrfs_raid_array[index].devs_max; 5171 if (!ctl->devs_max) 5172 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info); 5173 ctl->devs_min = btrfs_raid_array[index].devs_min; 5174 ctl->devs_increment = btrfs_raid_array[index].devs_increment; 5175 ctl->ncopies = btrfs_raid_array[index].ncopies; 5176 ctl->nparity = btrfs_raid_array[index].nparity; 5177 ctl->ndevs = 0; 5178 5179 switch (fs_devices->chunk_alloc_policy) { 5180 case BTRFS_CHUNK_ALLOC_REGULAR: 5181 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); 5182 break; 5183 case BTRFS_CHUNK_ALLOC_ZONED: 5184 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); 5185 break; 5186 default: 5187 BUG(); 5188 } 5189 } 5190 5191 static int gather_device_info(struct btrfs_fs_devices *fs_devices, 5192 struct alloc_chunk_ctl *ctl, 5193 struct btrfs_device_info *devices_info) 5194 { 5195 struct btrfs_fs_info *info = fs_devices->fs_info; 5196 struct btrfs_device *device; 5197 u64 total_avail; 5198 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes; 5199 int ret; 5200 int ndevs = 0; 5201 u64 max_avail; 5202 u64 dev_offset; 5203 5204 /* 5205 * in the first pass through the devices list, we gather information 5206 * about the available holes on each device. 5207 */ 5208 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 5209 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 5210 WARN(1, KERN_ERR 5211 "BTRFS: read-only device in alloc_list\n"); 5212 continue; 5213 } 5214 5215 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 5216 &device->dev_state) || 5217 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 5218 continue; 5219 5220 if (device->total_bytes > device->bytes_used) 5221 total_avail = device->total_bytes - device->bytes_used; 5222 else 5223 total_avail = 0; 5224 5225 /* If there is no space on this device, skip it. */ 5226 if (total_avail < ctl->dev_extent_min) 5227 continue; 5228 5229 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset, 5230 &max_avail); 5231 if (ret && ret != -ENOSPC) 5232 return ret; 5233 5234 if (ret == 0) 5235 max_avail = dev_extent_want; 5236 5237 if (max_avail < ctl->dev_extent_min) { 5238 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5239 btrfs_debug(info, 5240 "%s: devid %llu has no free space, have=%llu want=%llu", 5241 __func__, device->devid, max_avail, 5242 ctl->dev_extent_min); 5243 continue; 5244 } 5245 5246 if (ndevs == fs_devices->rw_devices) { 5247 WARN(1, "%s: found more than %llu devices\n", 5248 __func__, fs_devices->rw_devices); 5249 break; 5250 } 5251 devices_info[ndevs].dev_offset = dev_offset; 5252 devices_info[ndevs].max_avail = max_avail; 5253 devices_info[ndevs].total_avail = total_avail; 5254 devices_info[ndevs].dev = device; 5255 ++ndevs; 5256 } 5257 ctl->ndevs = ndevs; 5258 5259 /* 5260 * now sort the devices by hole size / available space 5261 */ 5262 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 5263 btrfs_cmp_device_info, NULL); 5264 5265 return 0; 5266 } 5267 5268 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, 5269 struct btrfs_device_info *devices_info) 5270 { 5271 /* Number of stripes that count for block group size */ 5272 int data_stripes; 5273 5274 /* 5275 * The primary goal is to maximize the number of stripes, so use as 5276 * many devices as possible, even if the stripes are not maximum sized. 5277 * 5278 * The DUP profile stores more than one stripe per device, the 5279 * max_avail is the total size so we have to adjust. 5280 */ 5281 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail, 5282 ctl->dev_stripes); 5283 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5284 5285 /* This will have to be fixed for RAID1 and RAID10 over more drives */ 5286 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5287 5288 /* 5289 * Use the number of data stripes to figure out how big this chunk is 5290 * really going to be in terms of logical address space, and compare 5291 * that answer with the max chunk size. If it's higher, we try to 5292 * reduce stripe_size. 5293 */ 5294 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5295 /* 5296 * Reduce stripe_size, round it up to a 16MB boundary again and 5297 * then use it, unless it ends up being even bigger than the 5298 * previous value we had already. 5299 */ 5300 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size, 5301 data_stripes), SZ_16M), 5302 ctl->stripe_size); 5303 } 5304 5305 /* Stripe size should not go beyond 1G. */ 5306 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G); 5307 5308 /* Align to BTRFS_STRIPE_LEN */ 5309 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); 5310 ctl->chunk_size = ctl->stripe_size * data_stripes; 5311 5312 return 0; 5313 } 5314 5315 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, 5316 struct btrfs_device_info *devices_info) 5317 { 5318 u64 zone_size = devices_info[0].dev->zone_info->zone_size; 5319 /* Number of stripes that count for block group size */ 5320 int data_stripes; 5321 5322 /* 5323 * It should hold because: 5324 * dev_extent_min == dev_extent_want == zone_size * dev_stripes 5325 */ 5326 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); 5327 5328 ctl->stripe_size = zone_size; 5329 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5330 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5331 5332 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */ 5333 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) { 5334 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies, 5335 ctl->stripe_size) + ctl->nparity, 5336 ctl->dev_stripes); 5337 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; 5338 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; 5339 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); 5340 } 5341 5342 ctl->chunk_size = ctl->stripe_size * data_stripes; 5343 5344 return 0; 5345 } 5346 5347 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, 5348 struct alloc_chunk_ctl *ctl, 5349 struct btrfs_device_info *devices_info) 5350 { 5351 struct btrfs_fs_info *info = fs_devices->fs_info; 5352 5353 /* 5354 * Round down to number of usable stripes, devs_increment can be any 5355 * number so we can't use round_down() that requires power of 2, while 5356 * rounddown is safe. 5357 */ 5358 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment); 5359 5360 if (ctl->ndevs < ctl->devs_min) { 5361 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 5362 btrfs_debug(info, 5363 "%s: not enough devices with free space: have=%d minimum required=%d", 5364 __func__, ctl->ndevs, ctl->devs_min); 5365 } 5366 return -ENOSPC; 5367 } 5368 5369 ctl->ndevs = min(ctl->ndevs, ctl->devs_max); 5370 5371 switch (fs_devices->chunk_alloc_policy) { 5372 case BTRFS_CHUNK_ALLOC_REGULAR: 5373 return decide_stripe_size_regular(ctl, devices_info); 5374 case BTRFS_CHUNK_ALLOC_ZONED: 5375 return decide_stripe_size_zoned(ctl, devices_info); 5376 default: 5377 BUG(); 5378 } 5379 } 5380 5381 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, 5382 struct alloc_chunk_ctl *ctl, 5383 struct btrfs_device_info *devices_info) 5384 { 5385 struct btrfs_fs_info *info = trans->fs_info; 5386 struct map_lookup *map = NULL; 5387 struct extent_map_tree *em_tree; 5388 struct btrfs_block_group *block_group; 5389 struct extent_map *em; 5390 u64 start = ctl->start; 5391 u64 type = ctl->type; 5392 int ret; 5393 int i; 5394 int j; 5395 5396 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); 5397 if (!map) 5398 return ERR_PTR(-ENOMEM); 5399 map->num_stripes = ctl->num_stripes; 5400 5401 for (i = 0; i < ctl->ndevs; ++i) { 5402 for (j = 0; j < ctl->dev_stripes; ++j) { 5403 int s = i * ctl->dev_stripes + j; 5404 map->stripes[s].dev = devices_info[i].dev; 5405 map->stripes[s].physical = devices_info[i].dev_offset + 5406 j * ctl->stripe_size; 5407 } 5408 } 5409 map->io_align = BTRFS_STRIPE_LEN; 5410 map->io_width = BTRFS_STRIPE_LEN; 5411 map->type = type; 5412 map->sub_stripes = ctl->sub_stripes; 5413 5414 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size); 5415 5416 em = alloc_extent_map(); 5417 if (!em) { 5418 kfree(map); 5419 return ERR_PTR(-ENOMEM); 5420 } 5421 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 5422 em->map_lookup = map; 5423 em->start = start; 5424 em->len = ctl->chunk_size; 5425 em->block_start = 0; 5426 em->block_len = em->len; 5427 em->orig_block_len = ctl->stripe_size; 5428 5429 em_tree = &info->mapping_tree; 5430 write_lock(&em_tree->lock); 5431 ret = add_extent_mapping(em_tree, em, 0); 5432 if (ret) { 5433 write_unlock(&em_tree->lock); 5434 free_extent_map(em); 5435 return ERR_PTR(ret); 5436 } 5437 write_unlock(&em_tree->lock); 5438 5439 block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size); 5440 if (IS_ERR(block_group)) 5441 goto error_del_extent; 5442 5443 for (i = 0; i < map->num_stripes; i++) { 5444 struct btrfs_device *dev = map->stripes[i].dev; 5445 5446 btrfs_device_set_bytes_used(dev, 5447 dev->bytes_used + ctl->stripe_size); 5448 if (list_empty(&dev->post_commit_list)) 5449 list_add_tail(&dev->post_commit_list, 5450 &trans->transaction->dev_update_list); 5451 } 5452 5453 atomic64_sub(ctl->stripe_size * map->num_stripes, 5454 &info->free_chunk_space); 5455 5456 free_extent_map(em); 5457 check_raid56_incompat_flag(info, type); 5458 check_raid1c34_incompat_flag(info, type); 5459 5460 return block_group; 5461 5462 error_del_extent: 5463 write_lock(&em_tree->lock); 5464 remove_extent_mapping(em_tree, em); 5465 write_unlock(&em_tree->lock); 5466 5467 /* One for our allocation */ 5468 free_extent_map(em); 5469 /* One for the tree reference */ 5470 free_extent_map(em); 5471 5472 return block_group; 5473 } 5474 5475 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 5476 u64 type) 5477 { 5478 struct btrfs_fs_info *info = trans->fs_info; 5479 struct btrfs_fs_devices *fs_devices = info->fs_devices; 5480 struct btrfs_device_info *devices_info = NULL; 5481 struct alloc_chunk_ctl ctl; 5482 struct btrfs_block_group *block_group; 5483 int ret; 5484 5485 lockdep_assert_held(&info->chunk_mutex); 5486 5487 if (!alloc_profile_is_valid(type, 0)) { 5488 ASSERT(0); 5489 return ERR_PTR(-EINVAL); 5490 } 5491 5492 if (list_empty(&fs_devices->alloc_list)) { 5493 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 5494 btrfs_debug(info, "%s: no writable device", __func__); 5495 return ERR_PTR(-ENOSPC); 5496 } 5497 5498 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 5499 btrfs_err(info, "invalid chunk type 0x%llx requested", type); 5500 ASSERT(0); 5501 return ERR_PTR(-EINVAL); 5502 } 5503 5504 ctl.start = find_next_chunk(info); 5505 ctl.type = type; 5506 init_alloc_chunk_ctl(fs_devices, &ctl); 5507 5508 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 5509 GFP_NOFS); 5510 if (!devices_info) 5511 return ERR_PTR(-ENOMEM); 5512 5513 ret = gather_device_info(fs_devices, &ctl, devices_info); 5514 if (ret < 0) { 5515 block_group = ERR_PTR(ret); 5516 goto out; 5517 } 5518 5519 ret = decide_stripe_size(fs_devices, &ctl, devices_info); 5520 if (ret < 0) { 5521 block_group = ERR_PTR(ret); 5522 goto out; 5523 } 5524 5525 block_group = create_chunk(trans, &ctl, devices_info); 5526 5527 out: 5528 kfree(devices_info); 5529 return block_group; 5530 } 5531 5532 /* 5533 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the 5534 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system 5535 * chunks. 5536 * 5537 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 5538 * phases. 5539 */ 5540 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 5541 struct btrfs_block_group *bg) 5542 { 5543 struct btrfs_fs_info *fs_info = trans->fs_info; 5544 struct btrfs_root *chunk_root = fs_info->chunk_root; 5545 struct btrfs_key key; 5546 struct btrfs_chunk *chunk; 5547 struct btrfs_stripe *stripe; 5548 struct extent_map *em; 5549 struct map_lookup *map; 5550 size_t item_size; 5551 int i; 5552 int ret; 5553 5554 /* 5555 * We take the chunk_mutex for 2 reasons: 5556 * 5557 * 1) Updates and insertions in the chunk btree must be done while holding 5558 * the chunk_mutex, as well as updating the system chunk array in the 5559 * superblock. See the comment on top of btrfs_chunk_alloc() for the 5560 * details; 5561 * 5562 * 2) To prevent races with the final phase of a device replace operation 5563 * that replaces the device object associated with the map's stripes, 5564 * because the device object's id can change at any time during that 5565 * final phase of the device replace operation 5566 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 5567 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 5568 * which would cause a failure when updating the device item, which does 5569 * not exists, or persisting a stripe of the chunk item with such ID. 5570 * Here we can't use the device_list_mutex because our caller already 5571 * has locked the chunk_mutex, and the final phase of device replace 5572 * acquires both mutexes - first the device_list_mutex and then the 5573 * chunk_mutex. Using any of those two mutexes protects us from a 5574 * concurrent device replace. 5575 */ 5576 lockdep_assert_held(&fs_info->chunk_mutex); 5577 5578 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); 5579 if (IS_ERR(em)) { 5580 ret = PTR_ERR(em); 5581 btrfs_abort_transaction(trans, ret); 5582 return ret; 5583 } 5584 5585 map = em->map_lookup; 5586 item_size = btrfs_chunk_item_size(map->num_stripes); 5587 5588 chunk = kzalloc(item_size, GFP_NOFS); 5589 if (!chunk) { 5590 ret = -ENOMEM; 5591 btrfs_abort_transaction(trans, ret); 5592 goto out; 5593 } 5594 5595 for (i = 0; i < map->num_stripes; i++) { 5596 struct btrfs_device *device = map->stripes[i].dev; 5597 5598 ret = btrfs_update_device(trans, device); 5599 if (ret) 5600 goto out; 5601 } 5602 5603 stripe = &chunk->stripe; 5604 for (i = 0; i < map->num_stripes; i++) { 5605 struct btrfs_device *device = map->stripes[i].dev; 5606 const u64 dev_offset = map->stripes[i].physical; 5607 5608 btrfs_set_stack_stripe_devid(stripe, device->devid); 5609 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5610 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5611 stripe++; 5612 } 5613 5614 btrfs_set_stack_chunk_length(chunk, bg->length); 5615 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); 5616 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN); 5617 btrfs_set_stack_chunk_type(chunk, map->type); 5618 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5619 btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN); 5620 btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN); 5621 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5622 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5623 5624 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5625 key.type = BTRFS_CHUNK_ITEM_KEY; 5626 key.offset = bg->start; 5627 5628 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5629 if (ret) 5630 goto out; 5631 5632 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags); 5633 5634 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5635 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5636 if (ret) 5637 goto out; 5638 } 5639 5640 out: 5641 kfree(chunk); 5642 free_extent_map(em); 5643 return ret; 5644 } 5645 5646 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) 5647 { 5648 struct btrfs_fs_info *fs_info = trans->fs_info; 5649 u64 alloc_profile; 5650 struct btrfs_block_group *meta_bg; 5651 struct btrfs_block_group *sys_bg; 5652 5653 /* 5654 * When adding a new device for sprouting, the seed device is read-only 5655 * so we must first allocate a metadata and a system chunk. But before 5656 * adding the block group items to the extent, device and chunk btrees, 5657 * we must first: 5658 * 5659 * 1) Create both chunks without doing any changes to the btrees, as 5660 * otherwise we would get -ENOSPC since the block groups from the 5661 * seed device are read-only; 5662 * 5663 * 2) Add the device item for the new sprout device - finishing the setup 5664 * of a new block group requires updating the device item in the chunk 5665 * btree, so it must exist when we attempt to do it. The previous step 5666 * ensures this does not fail with -ENOSPC. 5667 * 5668 * After that we can add the block group items to their btrees: 5669 * update existing device item in the chunk btree, add a new block group 5670 * item to the extent btree, add a new chunk item to the chunk btree and 5671 * finally add the new device extent items to the devices btree. 5672 */ 5673 5674 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5675 meta_bg = btrfs_create_chunk(trans, alloc_profile); 5676 if (IS_ERR(meta_bg)) 5677 return PTR_ERR(meta_bg); 5678 5679 alloc_profile = btrfs_system_alloc_profile(fs_info); 5680 sys_bg = btrfs_create_chunk(trans, alloc_profile); 5681 if (IS_ERR(sys_bg)) 5682 return PTR_ERR(sys_bg); 5683 5684 return 0; 5685 } 5686 5687 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5688 { 5689 const int index = btrfs_bg_flags_to_raid_index(map->type); 5690 5691 return btrfs_raid_array[index].tolerated_failures; 5692 } 5693 5694 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5695 { 5696 struct extent_map *em; 5697 struct map_lookup *map; 5698 int miss_ndevs = 0; 5699 int i; 5700 bool ret = true; 5701 5702 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 5703 if (IS_ERR(em)) 5704 return false; 5705 5706 map = em->map_lookup; 5707 for (i = 0; i < map->num_stripes; i++) { 5708 if (test_bit(BTRFS_DEV_STATE_MISSING, 5709 &map->stripes[i].dev->dev_state)) { 5710 miss_ndevs++; 5711 continue; 5712 } 5713 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5714 &map->stripes[i].dev->dev_state)) { 5715 ret = false; 5716 goto end; 5717 } 5718 } 5719 5720 /* 5721 * If the number of missing devices is larger than max errors, we can 5722 * not write the data into that chunk successfully. 5723 */ 5724 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5725 ret = false; 5726 end: 5727 free_extent_map(em); 5728 return ret; 5729 } 5730 5731 void btrfs_mapping_tree_free(struct extent_map_tree *tree) 5732 { 5733 struct extent_map *em; 5734 5735 while (1) { 5736 write_lock(&tree->lock); 5737 em = lookup_extent_mapping(tree, 0, (u64)-1); 5738 if (em) 5739 remove_extent_mapping(tree, em); 5740 write_unlock(&tree->lock); 5741 if (!em) 5742 break; 5743 /* once for us */ 5744 free_extent_map(em); 5745 /* once for the tree */ 5746 free_extent_map(em); 5747 } 5748 } 5749 5750 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5751 { 5752 struct extent_map *em; 5753 struct map_lookup *map; 5754 enum btrfs_raid_types index; 5755 int ret = 1; 5756 5757 em = btrfs_get_chunk_map(fs_info, logical, len); 5758 if (IS_ERR(em)) 5759 /* 5760 * We could return errors for these cases, but that could get 5761 * ugly and we'd probably do the same thing which is just not do 5762 * anything else and exit, so return 1 so the callers don't try 5763 * to use other copies. 5764 */ 5765 return 1; 5766 5767 map = em->map_lookup; 5768 index = btrfs_bg_flags_to_raid_index(map->type); 5769 5770 /* Non-RAID56, use their ncopies from btrfs_raid_array. */ 5771 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 5772 ret = btrfs_raid_array[index].ncopies; 5773 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5774 ret = 2; 5775 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5776 /* 5777 * There could be two corrupted data stripes, we need 5778 * to loop retry in order to rebuild the correct data. 5779 * 5780 * Fail a stripe at a time on every retry except the 5781 * stripe under reconstruction. 5782 */ 5783 ret = map->num_stripes; 5784 free_extent_map(em); 5785 return ret; 5786 } 5787 5788 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5789 u64 logical) 5790 { 5791 struct extent_map *em; 5792 struct map_lookup *map; 5793 unsigned long len = fs_info->sectorsize; 5794 5795 if (!btrfs_fs_incompat(fs_info, RAID56)) 5796 return len; 5797 5798 em = btrfs_get_chunk_map(fs_info, logical, len); 5799 5800 if (!WARN_ON(IS_ERR(em))) { 5801 map = em->map_lookup; 5802 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5803 len = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT; 5804 free_extent_map(em); 5805 } 5806 return len; 5807 } 5808 5809 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5810 { 5811 struct extent_map *em; 5812 struct map_lookup *map; 5813 int ret = 0; 5814 5815 if (!btrfs_fs_incompat(fs_info, RAID56)) 5816 return 0; 5817 5818 em = btrfs_get_chunk_map(fs_info, logical, len); 5819 5820 if(!WARN_ON(IS_ERR(em))) { 5821 map = em->map_lookup; 5822 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5823 ret = 1; 5824 free_extent_map(em); 5825 } 5826 return ret; 5827 } 5828 5829 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5830 struct map_lookup *map, int first, 5831 int dev_replace_is_ongoing) 5832 { 5833 int i; 5834 int num_stripes; 5835 int preferred_mirror; 5836 int tolerance; 5837 struct btrfs_device *srcdev; 5838 5839 ASSERT((map->type & 5840 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); 5841 5842 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5843 num_stripes = map->sub_stripes; 5844 else 5845 num_stripes = map->num_stripes; 5846 5847 switch (fs_info->fs_devices->read_policy) { 5848 default: 5849 /* Shouldn't happen, just warn and use pid instead of failing */ 5850 btrfs_warn_rl(fs_info, 5851 "unknown read_policy type %u, reset to pid", 5852 fs_info->fs_devices->read_policy); 5853 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID; 5854 fallthrough; 5855 case BTRFS_READ_POLICY_PID: 5856 preferred_mirror = first + (current->pid % num_stripes); 5857 break; 5858 } 5859 5860 if (dev_replace_is_ongoing && 5861 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5862 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5863 srcdev = fs_info->dev_replace.srcdev; 5864 else 5865 srcdev = NULL; 5866 5867 /* 5868 * try to avoid the drive that is the source drive for a 5869 * dev-replace procedure, only choose it if no other non-missing 5870 * mirror is available 5871 */ 5872 for (tolerance = 0; tolerance < 2; tolerance++) { 5873 if (map->stripes[preferred_mirror].dev->bdev && 5874 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5875 return preferred_mirror; 5876 for (i = first; i < first + num_stripes; i++) { 5877 if (map->stripes[i].dev->bdev && 5878 (tolerance || map->stripes[i].dev != srcdev)) 5879 return i; 5880 } 5881 } 5882 5883 /* we couldn't find one that doesn't fail. Just return something 5884 * and the io error handling code will clean up eventually 5885 */ 5886 return preferred_mirror; 5887 } 5888 5889 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, 5890 u16 total_stripes) 5891 { 5892 struct btrfs_io_context *bioc; 5893 5894 bioc = kzalloc( 5895 /* The size of btrfs_io_context */ 5896 sizeof(struct btrfs_io_context) + 5897 /* Plus the variable array for the stripes */ 5898 sizeof(struct btrfs_io_stripe) * (total_stripes), 5899 GFP_NOFS); 5900 5901 if (!bioc) 5902 return NULL; 5903 5904 refcount_set(&bioc->refs, 1); 5905 5906 bioc->fs_info = fs_info; 5907 bioc->replace_stripe_src = -1; 5908 bioc->full_stripe_logical = (u64)-1; 5909 5910 return bioc; 5911 } 5912 5913 void btrfs_get_bioc(struct btrfs_io_context *bioc) 5914 { 5915 WARN_ON(!refcount_read(&bioc->refs)); 5916 refcount_inc(&bioc->refs); 5917 } 5918 5919 void btrfs_put_bioc(struct btrfs_io_context *bioc) 5920 { 5921 if (!bioc) 5922 return; 5923 if (refcount_dec_and_test(&bioc->refs)) 5924 kfree(bioc); 5925 } 5926 5927 /* 5928 * Please note that, discard won't be sent to target device of device 5929 * replace. 5930 */ 5931 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 5932 u64 logical, u64 *length_ret, 5933 u32 *num_stripes) 5934 { 5935 struct extent_map *em; 5936 struct map_lookup *map; 5937 struct btrfs_discard_stripe *stripes; 5938 u64 length = *length_ret; 5939 u64 offset; 5940 u32 stripe_nr; 5941 u32 stripe_nr_end; 5942 u32 stripe_cnt; 5943 u64 stripe_end_offset; 5944 u64 stripe_offset; 5945 u32 stripe_index; 5946 u32 factor = 0; 5947 u32 sub_stripes = 0; 5948 u32 stripes_per_dev = 0; 5949 u32 remaining_stripes = 0; 5950 u32 last_stripe = 0; 5951 int ret; 5952 int i; 5953 5954 em = btrfs_get_chunk_map(fs_info, logical, length); 5955 if (IS_ERR(em)) 5956 return ERR_CAST(em); 5957 5958 map = em->map_lookup; 5959 5960 /* we don't discard raid56 yet */ 5961 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5962 ret = -EOPNOTSUPP; 5963 goto out_free_map; 5964 } 5965 5966 offset = logical - em->start; 5967 length = min_t(u64, em->start + em->len - logical, length); 5968 *length_ret = length; 5969 5970 /* 5971 * stripe_nr counts the total number of stripes we have to stride 5972 * to get to this block 5973 */ 5974 stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 5975 5976 /* stripe_offset is the offset of this block in its stripe */ 5977 stripe_offset = offset - (stripe_nr << BTRFS_STRIPE_LEN_SHIFT); 5978 5979 stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >> 5980 BTRFS_STRIPE_LEN_SHIFT; 5981 stripe_cnt = stripe_nr_end - stripe_nr; 5982 stripe_end_offset = (stripe_nr_end << BTRFS_STRIPE_LEN_SHIFT) - 5983 (offset + length); 5984 /* 5985 * after this, stripe_nr is the number of stripes on this 5986 * device we have to walk to find the data, and stripe_index is 5987 * the number of our device in the stripe array 5988 */ 5989 *num_stripes = 1; 5990 stripe_index = 0; 5991 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5992 BTRFS_BLOCK_GROUP_RAID10)) { 5993 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5994 sub_stripes = 1; 5995 else 5996 sub_stripes = map->sub_stripes; 5997 5998 factor = map->num_stripes / sub_stripes; 5999 *num_stripes = min_t(u64, map->num_stripes, 6000 sub_stripes * stripe_cnt); 6001 stripe_index = stripe_nr % factor; 6002 stripe_nr /= factor; 6003 stripe_index *= sub_stripes; 6004 6005 remaining_stripes = stripe_cnt % factor; 6006 stripes_per_dev = stripe_cnt / factor; 6007 last_stripe = ((stripe_nr_end - 1) % factor) * sub_stripes; 6008 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | 6009 BTRFS_BLOCK_GROUP_DUP)) { 6010 *num_stripes = map->num_stripes; 6011 } else { 6012 stripe_index = stripe_nr % map->num_stripes; 6013 stripe_nr /= map->num_stripes; 6014 } 6015 6016 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS); 6017 if (!stripes) { 6018 ret = -ENOMEM; 6019 goto out_free_map; 6020 } 6021 6022 for (i = 0; i < *num_stripes; i++) { 6023 stripes[i].physical = 6024 map->stripes[stripe_index].physical + 6025 stripe_offset + (stripe_nr << BTRFS_STRIPE_LEN_SHIFT); 6026 stripes[i].dev = map->stripes[stripe_index].dev; 6027 6028 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 6029 BTRFS_BLOCK_GROUP_RAID10)) { 6030 stripes[i].length = stripes_per_dev << BTRFS_STRIPE_LEN_SHIFT; 6031 6032 if (i / sub_stripes < remaining_stripes) 6033 stripes[i].length += BTRFS_STRIPE_LEN; 6034 6035 /* 6036 * Special for the first stripe and 6037 * the last stripe: 6038 * 6039 * |-------|...|-------| 6040 * |----------| 6041 * off end_off 6042 */ 6043 if (i < sub_stripes) 6044 stripes[i].length -= stripe_offset; 6045 6046 if (stripe_index >= last_stripe && 6047 stripe_index <= (last_stripe + 6048 sub_stripes - 1)) 6049 stripes[i].length -= stripe_end_offset; 6050 6051 if (i == sub_stripes - 1) 6052 stripe_offset = 0; 6053 } else { 6054 stripes[i].length = length; 6055 } 6056 6057 stripe_index++; 6058 if (stripe_index == map->num_stripes) { 6059 stripe_index = 0; 6060 stripe_nr++; 6061 } 6062 } 6063 6064 free_extent_map(em); 6065 return stripes; 6066 out_free_map: 6067 free_extent_map(em); 6068 return ERR_PTR(ret); 6069 } 6070 6071 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) 6072 { 6073 struct btrfs_block_group *cache; 6074 bool ret; 6075 6076 /* Non zoned filesystem does not use "to_copy" flag */ 6077 if (!btrfs_is_zoned(fs_info)) 6078 return false; 6079 6080 cache = btrfs_lookup_block_group(fs_info, logical); 6081 6082 ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 6083 6084 btrfs_put_block_group(cache); 6085 return ret; 6086 } 6087 6088 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 6089 struct btrfs_io_context *bioc, 6090 struct btrfs_dev_replace *dev_replace, 6091 u64 logical, 6092 int *num_stripes_ret, int *max_errors_ret) 6093 { 6094 u64 srcdev_devid = dev_replace->srcdev->devid; 6095 /* 6096 * At this stage, num_stripes is still the real number of stripes, 6097 * excluding the duplicated stripes. 6098 */ 6099 int num_stripes = *num_stripes_ret; 6100 int nr_extra_stripes = 0; 6101 int max_errors = *max_errors_ret; 6102 int i; 6103 6104 /* 6105 * A block group which has "to_copy" set will eventually be copied by 6106 * the dev-replace process. We can avoid cloning IO here. 6107 */ 6108 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical)) 6109 return; 6110 6111 /* 6112 * Duplicate the write operations while the dev-replace procedure is 6113 * running. Since the copying of the old disk to the new disk takes 6114 * place at run time while the filesystem is mounted writable, the 6115 * regular write operations to the old disk have to be duplicated to go 6116 * to the new disk as well. 6117 * 6118 * Note that device->missing is handled by the caller, and that the 6119 * write to the old disk is already set up in the stripes array. 6120 */ 6121 for (i = 0; i < num_stripes; i++) { 6122 struct btrfs_io_stripe *old = &bioc->stripes[i]; 6123 struct btrfs_io_stripe *new = &bioc->stripes[num_stripes + nr_extra_stripes]; 6124 6125 if (old->dev->devid != srcdev_devid) 6126 continue; 6127 6128 new->physical = old->physical; 6129 new->dev = dev_replace->tgtdev; 6130 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) 6131 bioc->replace_stripe_src = i; 6132 nr_extra_stripes++; 6133 } 6134 6135 /* We can only have at most 2 extra nr_stripes (for DUP). */ 6136 ASSERT(nr_extra_stripes <= 2); 6137 /* 6138 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for 6139 * replace. 6140 * If we have 2 extra stripes, only choose the one with smaller physical. 6141 */ 6142 if (op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) { 6143 struct btrfs_io_stripe *first = &bioc->stripes[num_stripes]; 6144 struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1]; 6145 6146 /* Only DUP can have two extra stripes. */ 6147 ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP); 6148 6149 /* 6150 * Swap the last stripe stripes and reduce @nr_extra_stripes. 6151 * The extra stripe would still be there, but won't be accessed. 6152 */ 6153 if (first->physical > second->physical) { 6154 swap(second->physical, first->physical); 6155 swap(second->dev, first->dev); 6156 nr_extra_stripes--; 6157 } 6158 } 6159 6160 *num_stripes_ret = num_stripes + nr_extra_stripes; 6161 *max_errors_ret = max_errors + nr_extra_stripes; 6162 bioc->replace_nr_stripes = nr_extra_stripes; 6163 } 6164 6165 static bool need_full_stripe(enum btrfs_map_op op) 6166 { 6167 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 6168 } 6169 6170 static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op, 6171 u64 offset, u32 *stripe_nr, u64 *stripe_offset, 6172 u64 *full_stripe_start) 6173 { 6174 ASSERT(op != BTRFS_MAP_DISCARD); 6175 6176 /* 6177 * Stripe_nr is the stripe where this block falls. stripe_offset is 6178 * the offset of this block in its stripe. 6179 */ 6180 *stripe_offset = offset & BTRFS_STRIPE_LEN_MASK; 6181 *stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; 6182 ASSERT(*stripe_offset < U32_MAX); 6183 6184 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6185 unsigned long full_stripe_len = nr_data_stripes(map) << 6186 BTRFS_STRIPE_LEN_SHIFT; 6187 6188 /* 6189 * For full stripe start, we use previously calculated 6190 * @stripe_nr. Align it to nr_data_stripes, then multiply with 6191 * STRIPE_LEN. 6192 * 6193 * By this we can avoid u64 division completely. And we have 6194 * to go rounddown(), not round_down(), as nr_data_stripes is 6195 * not ensured to be power of 2. 6196 */ 6197 *full_stripe_start = 6198 rounddown(*stripe_nr, nr_data_stripes(map)) << 6199 BTRFS_STRIPE_LEN_SHIFT; 6200 6201 /* 6202 * For writes to RAID56, allow to write a full stripe set, but 6203 * no straddling of stripe sets. 6204 */ 6205 if (op == BTRFS_MAP_WRITE) 6206 return full_stripe_len - (offset - *full_stripe_start); 6207 } 6208 6209 /* 6210 * For other RAID types and for RAID56 reads, allow a single stripe (on 6211 * a single disk). 6212 */ 6213 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) 6214 return BTRFS_STRIPE_LEN - *stripe_offset; 6215 return U64_MAX; 6216 } 6217 6218 static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map, 6219 u32 stripe_index, u64 stripe_offset, u32 stripe_nr) 6220 { 6221 dst->dev = map->stripes[stripe_index].dev; 6222 dst->physical = map->stripes[stripe_index].physical + 6223 stripe_offset + (stripe_nr << BTRFS_STRIPE_LEN_SHIFT); 6224 } 6225 6226 int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6227 u64 logical, u64 *length, 6228 struct btrfs_io_context **bioc_ret, 6229 struct btrfs_io_stripe *smap, int *mirror_num_ret, 6230 int need_raid_map) 6231 { 6232 struct extent_map *em; 6233 struct map_lookup *map; 6234 u64 map_offset; 6235 u64 stripe_offset; 6236 u32 stripe_nr; 6237 u32 stripe_index; 6238 int data_stripes; 6239 int i; 6240 int ret = 0; 6241 int mirror_num = (mirror_num_ret ? *mirror_num_ret : 0); 6242 int num_stripes; 6243 int num_copies; 6244 int max_errors = 0; 6245 struct btrfs_io_context *bioc = NULL; 6246 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 6247 int dev_replace_is_ongoing = 0; 6248 u16 num_alloc_stripes; 6249 u64 raid56_full_stripe_start = (u64)-1; 6250 u64 max_len; 6251 6252 ASSERT(bioc_ret); 6253 ASSERT(op != BTRFS_MAP_DISCARD); 6254 6255 num_copies = btrfs_num_copies(fs_info, logical, fs_info->sectorsize); 6256 if (mirror_num > num_copies) 6257 return -EINVAL; 6258 6259 em = btrfs_get_chunk_map(fs_info, logical, *length); 6260 if (IS_ERR(em)) 6261 return PTR_ERR(em); 6262 6263 map = em->map_lookup; 6264 data_stripes = nr_data_stripes(map); 6265 6266 map_offset = logical - em->start; 6267 max_len = btrfs_max_io_len(map, op, map_offset, &stripe_nr, 6268 &stripe_offset, &raid56_full_stripe_start); 6269 *length = min_t(u64, em->len - map_offset, max_len); 6270 6271 down_read(&dev_replace->rwsem); 6272 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 6273 /* 6274 * Hold the semaphore for read during the whole operation, write is 6275 * requested at commit time but must wait. 6276 */ 6277 if (!dev_replace_is_ongoing) 6278 up_read(&dev_replace->rwsem); 6279 6280 num_stripes = 1; 6281 stripe_index = 0; 6282 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6283 stripe_index = stripe_nr % map->num_stripes; 6284 stripe_nr /= map->num_stripes; 6285 if (!need_full_stripe(op)) 6286 mirror_num = 1; 6287 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { 6288 if (need_full_stripe(op)) 6289 num_stripes = map->num_stripes; 6290 else if (mirror_num) 6291 stripe_index = mirror_num - 1; 6292 else { 6293 stripe_index = find_live_mirror(fs_info, map, 0, 6294 dev_replace_is_ongoing); 6295 mirror_num = stripe_index + 1; 6296 } 6297 6298 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 6299 if (need_full_stripe(op)) { 6300 num_stripes = map->num_stripes; 6301 } else if (mirror_num) { 6302 stripe_index = mirror_num - 1; 6303 } else { 6304 mirror_num = 1; 6305 } 6306 6307 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6308 u32 factor = map->num_stripes / map->sub_stripes; 6309 6310 stripe_index = (stripe_nr % factor) * map->sub_stripes; 6311 stripe_nr /= factor; 6312 6313 if (need_full_stripe(op)) 6314 num_stripes = map->sub_stripes; 6315 else if (mirror_num) 6316 stripe_index += mirror_num - 1; 6317 else { 6318 int old_stripe_index = stripe_index; 6319 stripe_index = find_live_mirror(fs_info, map, 6320 stripe_index, 6321 dev_replace_is_ongoing); 6322 mirror_num = stripe_index - old_stripe_index + 1; 6323 } 6324 6325 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6326 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 6327 /* 6328 * Push stripe_nr back to the start of the full stripe 6329 * For those cases needing a full stripe, @stripe_nr 6330 * is the full stripe number. 6331 * 6332 * Originally we go raid56_full_stripe_start / full_stripe_len, 6333 * but that can be expensive. Here we just divide 6334 * @stripe_nr with @data_stripes. 6335 */ 6336 stripe_nr /= data_stripes; 6337 6338 /* RAID[56] write or recovery. Return all stripes */ 6339 num_stripes = map->num_stripes; 6340 max_errors = btrfs_chunk_max_errors(map); 6341 6342 /* Return the length to the full stripe end */ 6343 *length = min(logical + *length, 6344 raid56_full_stripe_start + em->start + 6345 (data_stripes << BTRFS_STRIPE_LEN_SHIFT)) - logical; 6346 stripe_index = 0; 6347 stripe_offset = 0; 6348 } else { 6349 /* 6350 * Mirror #0 or #1 means the original data block. 6351 * Mirror #2 is RAID5 parity block. 6352 * Mirror #3 is RAID6 Q block. 6353 */ 6354 stripe_index = stripe_nr % data_stripes; 6355 stripe_nr /= data_stripes; 6356 if (mirror_num > 1) 6357 stripe_index = data_stripes + mirror_num - 2; 6358 6359 /* We distribute the parity blocks across stripes */ 6360 stripe_index = (stripe_nr + stripe_index) % map->num_stripes; 6361 if (!need_full_stripe(op) && mirror_num <= 1) 6362 mirror_num = 1; 6363 } 6364 } else { 6365 /* 6366 * After this, stripe_nr is the number of stripes on this 6367 * device we have to walk to find the data, and stripe_index is 6368 * the number of our device in the stripe array 6369 */ 6370 stripe_index = stripe_nr % map->num_stripes; 6371 stripe_nr /= map->num_stripes; 6372 mirror_num = stripe_index + 1; 6373 } 6374 if (stripe_index >= map->num_stripes) { 6375 btrfs_crit(fs_info, 6376 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 6377 stripe_index, map->num_stripes); 6378 ret = -EINVAL; 6379 goto out; 6380 } 6381 6382 num_alloc_stripes = num_stripes; 6383 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6384 op != BTRFS_MAP_READ) 6385 /* 6386 * For replace case, we need to add extra stripes for extra 6387 * duplicated stripes. 6388 * 6389 * For both WRITE and GET_READ_MIRRORS, we may have at most 6390 * 2 more stripes (DUP types, otherwise 1). 6391 */ 6392 num_alloc_stripes += 2; 6393 6394 /* 6395 * If this I/O maps to a single device, try to return the device and 6396 * physical block information on the stack instead of allocating an 6397 * I/O context structure. 6398 */ 6399 if (smap && num_alloc_stripes == 1 && 6400 !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) && 6401 (!need_full_stripe(op) || !dev_replace_is_ongoing || 6402 !dev_replace->tgtdev)) { 6403 set_io_stripe(smap, map, stripe_index, stripe_offset, stripe_nr); 6404 *mirror_num_ret = mirror_num; 6405 *bioc_ret = NULL; 6406 ret = 0; 6407 goto out; 6408 } 6409 6410 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes); 6411 if (!bioc) { 6412 ret = -ENOMEM; 6413 goto out; 6414 } 6415 bioc->map_type = map->type; 6416 6417 /* 6418 * For RAID56 full map, we need to make sure the stripes[] follows the 6419 * rule that data stripes are all ordered, then followed with P and Q 6420 * (if we have). 6421 * 6422 * It's still mostly the same as other profiles, just with extra rotation. 6423 */ 6424 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 6425 (need_full_stripe(op) || mirror_num > 1)) { 6426 /* 6427 * For RAID56 @stripe_nr is already the number of full stripes 6428 * before us, which is also the rotation value (needs to modulo 6429 * with num_stripes). 6430 * 6431 * In this case, we just add @stripe_nr with @i, then do the 6432 * modulo, to reduce one modulo call. 6433 */ 6434 bioc->full_stripe_logical = em->start + 6435 ((stripe_nr * data_stripes) << BTRFS_STRIPE_LEN_SHIFT); 6436 for (i = 0; i < num_stripes; i++) 6437 set_io_stripe(&bioc->stripes[i], map, 6438 (i + stripe_nr) % num_stripes, 6439 stripe_offset, stripe_nr); 6440 } else { 6441 /* 6442 * For all other non-RAID56 profiles, just copy the target 6443 * stripe into the bioc. 6444 */ 6445 for (i = 0; i < num_stripes; i++) { 6446 set_io_stripe(&bioc->stripes[i], map, stripe_index, 6447 stripe_offset, stripe_nr); 6448 stripe_index++; 6449 } 6450 } 6451 6452 if (need_full_stripe(op)) 6453 max_errors = btrfs_chunk_max_errors(map); 6454 6455 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 6456 need_full_stripe(op)) { 6457 handle_ops_on_dev_replace(op, bioc, dev_replace, logical, 6458 &num_stripes, &max_errors); 6459 } 6460 6461 *bioc_ret = bioc; 6462 bioc->num_stripes = num_stripes; 6463 bioc->max_errors = max_errors; 6464 bioc->mirror_num = mirror_num; 6465 6466 out: 6467 if (dev_replace_is_ongoing) { 6468 lockdep_assert_held(&dev_replace->rwsem); 6469 /* Unlock and let waiting writers proceed */ 6470 up_read(&dev_replace->rwsem); 6471 } 6472 free_extent_map(em); 6473 return ret; 6474 } 6475 6476 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6477 u64 logical, u64 *length, 6478 struct btrfs_io_context **bioc_ret, int mirror_num) 6479 { 6480 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6481 NULL, &mirror_num, 0); 6482 } 6483 6484 /* For Scrub/replace */ 6485 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 6486 u64 logical, u64 *length, 6487 struct btrfs_io_context **bioc_ret) 6488 { 6489 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 6490 NULL, NULL, 1); 6491 } 6492 6493 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, 6494 const struct btrfs_fs_devices *fs_devices) 6495 { 6496 if (args->fsid == NULL) 6497 return true; 6498 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0) 6499 return true; 6500 return false; 6501 } 6502 6503 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, 6504 const struct btrfs_device *device) 6505 { 6506 if (args->missing) { 6507 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && 6508 !device->bdev) 6509 return true; 6510 return false; 6511 } 6512 6513 if (device->devid != args->devid) 6514 return false; 6515 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) 6516 return false; 6517 return true; 6518 } 6519 6520 /* 6521 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 6522 * return NULL. 6523 * 6524 * If devid and uuid are both specified, the match must be exact, otherwise 6525 * only devid is used. 6526 */ 6527 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 6528 const struct btrfs_dev_lookup_args *args) 6529 { 6530 struct btrfs_device *device; 6531 struct btrfs_fs_devices *seed_devs; 6532 6533 if (dev_args_match_fs_devices(args, fs_devices)) { 6534 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6535 if (dev_args_match_device(args, device)) 6536 return device; 6537 } 6538 } 6539 6540 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 6541 if (!dev_args_match_fs_devices(args, seed_devs)) 6542 continue; 6543 list_for_each_entry(device, &seed_devs->devices, dev_list) { 6544 if (dev_args_match_device(args, device)) 6545 return device; 6546 } 6547 } 6548 6549 return NULL; 6550 } 6551 6552 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6553 u64 devid, u8 *dev_uuid) 6554 { 6555 struct btrfs_device *device; 6556 unsigned int nofs_flag; 6557 6558 /* 6559 * We call this under the chunk_mutex, so we want to use NOFS for this 6560 * allocation, however we don't want to change btrfs_alloc_device() to 6561 * always do NOFS because we use it in a lot of other GFP_KERNEL safe 6562 * places. 6563 */ 6564 6565 nofs_flag = memalloc_nofs_save(); 6566 device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL); 6567 memalloc_nofs_restore(nofs_flag); 6568 if (IS_ERR(device)) 6569 return device; 6570 6571 list_add(&device->dev_list, &fs_devices->devices); 6572 device->fs_devices = fs_devices; 6573 fs_devices->num_devices++; 6574 6575 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6576 fs_devices->missing_devices++; 6577 6578 return device; 6579 } 6580 6581 /* 6582 * Allocate new device struct, set up devid and UUID. 6583 * 6584 * @fs_info: used only for generating a new devid, can be NULL if 6585 * devid is provided (i.e. @devid != NULL). 6586 * @devid: a pointer to devid for this device. If NULL a new devid 6587 * is generated. 6588 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6589 * is generated. 6590 * @path: a pointer to device path if available, NULL otherwise. 6591 * 6592 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6593 * on error. Returned struct is not linked onto any lists and must be 6594 * destroyed with btrfs_free_device. 6595 */ 6596 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6597 const u64 *devid, const u8 *uuid, 6598 const char *path) 6599 { 6600 struct btrfs_device *dev; 6601 u64 tmp; 6602 6603 if (WARN_ON(!devid && !fs_info)) 6604 return ERR_PTR(-EINVAL); 6605 6606 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 6607 if (!dev) 6608 return ERR_PTR(-ENOMEM); 6609 6610 INIT_LIST_HEAD(&dev->dev_list); 6611 INIT_LIST_HEAD(&dev->dev_alloc_list); 6612 INIT_LIST_HEAD(&dev->post_commit_list); 6613 6614 atomic_set(&dev->dev_stats_ccnt, 0); 6615 btrfs_device_data_ordered_init(dev); 6616 extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE); 6617 6618 if (devid) 6619 tmp = *devid; 6620 else { 6621 int ret; 6622 6623 ret = find_next_devid(fs_info, &tmp); 6624 if (ret) { 6625 btrfs_free_device(dev); 6626 return ERR_PTR(ret); 6627 } 6628 } 6629 dev->devid = tmp; 6630 6631 if (uuid) 6632 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6633 else 6634 generate_random_uuid(dev->uuid); 6635 6636 if (path) { 6637 struct rcu_string *name; 6638 6639 name = rcu_string_strdup(path, GFP_KERNEL); 6640 if (!name) { 6641 btrfs_free_device(dev); 6642 return ERR_PTR(-ENOMEM); 6643 } 6644 rcu_assign_pointer(dev->name, name); 6645 } 6646 6647 return dev; 6648 } 6649 6650 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6651 u64 devid, u8 *uuid, bool error) 6652 { 6653 if (error) 6654 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6655 devid, uuid); 6656 else 6657 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6658 devid, uuid); 6659 } 6660 6661 u64 btrfs_calc_stripe_length(const struct extent_map *em) 6662 { 6663 const struct map_lookup *map = em->map_lookup; 6664 const int data_stripes = calc_data_stripes(map->type, map->num_stripes); 6665 6666 return div_u64(em->len, data_stripes); 6667 } 6668 6669 #if BITS_PER_LONG == 32 6670 /* 6671 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE 6672 * can't be accessed on 32bit systems. 6673 * 6674 * This function do mount time check to reject the fs if it already has 6675 * metadata chunk beyond that limit. 6676 */ 6677 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6678 u64 logical, u64 length, u64 type) 6679 { 6680 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6681 return 0; 6682 6683 if (logical + length < MAX_LFS_FILESIZE) 6684 return 0; 6685 6686 btrfs_err_32bit_limit(fs_info); 6687 return -EOVERFLOW; 6688 } 6689 6690 /* 6691 * This is to give early warning for any metadata chunk reaching 6692 * BTRFS_32BIT_EARLY_WARN_THRESHOLD. 6693 * Although we can still access the metadata, it's not going to be possible 6694 * once the limit is reached. 6695 */ 6696 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, 6697 u64 logical, u64 length, u64 type) 6698 { 6699 if (!(type & BTRFS_BLOCK_GROUP_METADATA)) 6700 return; 6701 6702 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD) 6703 return; 6704 6705 btrfs_warn_32bit_limit(fs_info); 6706 } 6707 #endif 6708 6709 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, 6710 u64 devid, u8 *uuid) 6711 { 6712 struct btrfs_device *dev; 6713 6714 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6715 btrfs_report_missing_device(fs_info, devid, uuid, true); 6716 return ERR_PTR(-ENOENT); 6717 } 6718 6719 dev = add_missing_dev(fs_info->fs_devices, devid, uuid); 6720 if (IS_ERR(dev)) { 6721 btrfs_err(fs_info, "failed to init missing device %llu: %ld", 6722 devid, PTR_ERR(dev)); 6723 return dev; 6724 } 6725 btrfs_report_missing_device(fs_info, devid, uuid, false); 6726 6727 return dev; 6728 } 6729 6730 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, 6731 struct btrfs_chunk *chunk) 6732 { 6733 BTRFS_DEV_LOOKUP_ARGS(args); 6734 struct btrfs_fs_info *fs_info = leaf->fs_info; 6735 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 6736 struct map_lookup *map; 6737 struct extent_map *em; 6738 u64 logical; 6739 u64 length; 6740 u64 devid; 6741 u64 type; 6742 u8 uuid[BTRFS_UUID_SIZE]; 6743 int index; 6744 int num_stripes; 6745 int ret; 6746 int i; 6747 6748 logical = key->offset; 6749 length = btrfs_chunk_length(leaf, chunk); 6750 type = btrfs_chunk_type(leaf, chunk); 6751 index = btrfs_bg_flags_to_raid_index(type); 6752 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6753 6754 #if BITS_PER_LONG == 32 6755 ret = check_32bit_meta_chunk(fs_info, logical, length, type); 6756 if (ret < 0) 6757 return ret; 6758 warn_32bit_meta_chunk(fs_info, logical, length, type); 6759 #endif 6760 6761 /* 6762 * Only need to verify chunk item if we're reading from sys chunk array, 6763 * as chunk item in tree block is already verified by tree-checker. 6764 */ 6765 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) { 6766 ret = btrfs_check_chunk_valid(leaf, chunk, logical); 6767 if (ret) 6768 return ret; 6769 } 6770 6771 read_lock(&map_tree->lock); 6772 em = lookup_extent_mapping(map_tree, logical, 1); 6773 read_unlock(&map_tree->lock); 6774 6775 /* already mapped? */ 6776 if (em && em->start <= logical && em->start + em->len > logical) { 6777 free_extent_map(em); 6778 return 0; 6779 } else if (em) { 6780 free_extent_map(em); 6781 } 6782 6783 em = alloc_extent_map(); 6784 if (!em) 6785 return -ENOMEM; 6786 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6787 if (!map) { 6788 free_extent_map(em); 6789 return -ENOMEM; 6790 } 6791 6792 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6793 em->map_lookup = map; 6794 em->start = logical; 6795 em->len = length; 6796 em->orig_start = 0; 6797 em->block_start = 0; 6798 em->block_len = em->len; 6799 6800 map->num_stripes = num_stripes; 6801 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6802 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6803 map->type = type; 6804 /* 6805 * We can't use the sub_stripes value, as for profiles other than 6806 * RAID10, they may have 0 as sub_stripes for filesystems created by 6807 * older mkfs (<v5.4). 6808 * In that case, it can cause divide-by-zero errors later. 6809 * Since currently sub_stripes is fixed for each profile, let's 6810 * use the trusted value instead. 6811 */ 6812 map->sub_stripes = btrfs_raid_array[index].sub_stripes; 6813 map->verified_stripes = 0; 6814 em->orig_block_len = btrfs_calc_stripe_length(em); 6815 for (i = 0; i < num_stripes; i++) { 6816 map->stripes[i].physical = 6817 btrfs_stripe_offset_nr(leaf, chunk, i); 6818 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6819 args.devid = devid; 6820 read_extent_buffer(leaf, uuid, (unsigned long) 6821 btrfs_stripe_dev_uuid_nr(chunk, i), 6822 BTRFS_UUID_SIZE); 6823 args.uuid = uuid; 6824 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); 6825 if (!map->stripes[i].dev) { 6826 map->stripes[i].dev = handle_missing_device(fs_info, 6827 devid, uuid); 6828 if (IS_ERR(map->stripes[i].dev)) { 6829 ret = PTR_ERR(map->stripes[i].dev); 6830 free_extent_map(em); 6831 return ret; 6832 } 6833 } 6834 6835 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 6836 &(map->stripes[i].dev->dev_state)); 6837 } 6838 6839 write_lock(&map_tree->lock); 6840 ret = add_extent_mapping(map_tree, em, 0); 6841 write_unlock(&map_tree->lock); 6842 if (ret < 0) { 6843 btrfs_err(fs_info, 6844 "failed to add chunk map, start=%llu len=%llu: %d", 6845 em->start, em->len, ret); 6846 } 6847 free_extent_map(em); 6848 6849 return ret; 6850 } 6851 6852 static void fill_device_from_item(struct extent_buffer *leaf, 6853 struct btrfs_dev_item *dev_item, 6854 struct btrfs_device *device) 6855 { 6856 unsigned long ptr; 6857 6858 device->devid = btrfs_device_id(leaf, dev_item); 6859 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6860 device->total_bytes = device->disk_total_bytes; 6861 device->commit_total_bytes = device->disk_total_bytes; 6862 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6863 device->commit_bytes_used = device->bytes_used; 6864 device->type = btrfs_device_type(leaf, dev_item); 6865 device->io_align = btrfs_device_io_align(leaf, dev_item); 6866 device->io_width = btrfs_device_io_width(leaf, dev_item); 6867 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6868 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6869 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 6870 6871 ptr = btrfs_device_uuid(dev_item); 6872 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6873 } 6874 6875 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 6876 u8 *fsid) 6877 { 6878 struct btrfs_fs_devices *fs_devices; 6879 int ret; 6880 6881 lockdep_assert_held(&uuid_mutex); 6882 ASSERT(fsid); 6883 6884 /* This will match only for multi-device seed fs */ 6885 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list) 6886 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 6887 return fs_devices; 6888 6889 6890 fs_devices = find_fsid(fsid, NULL); 6891 if (!fs_devices) { 6892 if (!btrfs_test_opt(fs_info, DEGRADED)) 6893 return ERR_PTR(-ENOENT); 6894 6895 fs_devices = alloc_fs_devices(fsid, NULL); 6896 if (IS_ERR(fs_devices)) 6897 return fs_devices; 6898 6899 fs_devices->seeding = true; 6900 fs_devices->opened = 1; 6901 return fs_devices; 6902 } 6903 6904 /* 6905 * Upon first call for a seed fs fsid, just create a private copy of the 6906 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list 6907 */ 6908 fs_devices = clone_fs_devices(fs_devices); 6909 if (IS_ERR(fs_devices)) 6910 return fs_devices; 6911 6912 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 6913 if (ret) { 6914 free_fs_devices(fs_devices); 6915 return ERR_PTR(ret); 6916 } 6917 6918 if (!fs_devices->seeding) { 6919 close_fs_devices(fs_devices); 6920 free_fs_devices(fs_devices); 6921 return ERR_PTR(-EINVAL); 6922 } 6923 6924 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); 6925 6926 return fs_devices; 6927 } 6928 6929 static int read_one_dev(struct extent_buffer *leaf, 6930 struct btrfs_dev_item *dev_item) 6931 { 6932 BTRFS_DEV_LOOKUP_ARGS(args); 6933 struct btrfs_fs_info *fs_info = leaf->fs_info; 6934 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6935 struct btrfs_device *device; 6936 u64 devid; 6937 int ret; 6938 u8 fs_uuid[BTRFS_FSID_SIZE]; 6939 u8 dev_uuid[BTRFS_UUID_SIZE]; 6940 6941 devid = btrfs_device_id(leaf, dev_item); 6942 args.devid = devid; 6943 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 6944 BTRFS_UUID_SIZE); 6945 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 6946 BTRFS_FSID_SIZE); 6947 args.uuid = dev_uuid; 6948 args.fsid = fs_uuid; 6949 6950 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { 6951 fs_devices = open_seed_devices(fs_info, fs_uuid); 6952 if (IS_ERR(fs_devices)) 6953 return PTR_ERR(fs_devices); 6954 } 6955 6956 device = btrfs_find_device(fs_info->fs_devices, &args); 6957 if (!device) { 6958 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6959 btrfs_report_missing_device(fs_info, devid, 6960 dev_uuid, true); 6961 return -ENOENT; 6962 } 6963 6964 device = add_missing_dev(fs_devices, devid, dev_uuid); 6965 if (IS_ERR(device)) { 6966 btrfs_err(fs_info, 6967 "failed to add missing dev %llu: %ld", 6968 devid, PTR_ERR(device)); 6969 return PTR_ERR(device); 6970 } 6971 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 6972 } else { 6973 if (!device->bdev) { 6974 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6975 btrfs_report_missing_device(fs_info, 6976 devid, dev_uuid, true); 6977 return -ENOENT; 6978 } 6979 btrfs_report_missing_device(fs_info, devid, 6980 dev_uuid, false); 6981 } 6982 6983 if (!device->bdev && 6984 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 6985 /* 6986 * this happens when a device that was properly setup 6987 * in the device info lists suddenly goes bad. 6988 * device->bdev is NULL, and so we have to set 6989 * device->missing to one here 6990 */ 6991 device->fs_devices->missing_devices++; 6992 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6993 } 6994 6995 /* Move the device to its own fs_devices */ 6996 if (device->fs_devices != fs_devices) { 6997 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 6998 &device->dev_state)); 6999 7000 list_move(&device->dev_list, &fs_devices->devices); 7001 device->fs_devices->num_devices--; 7002 fs_devices->num_devices++; 7003 7004 device->fs_devices->missing_devices--; 7005 fs_devices->missing_devices++; 7006 7007 device->fs_devices = fs_devices; 7008 } 7009 } 7010 7011 if (device->fs_devices != fs_info->fs_devices) { 7012 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 7013 if (device->generation != 7014 btrfs_device_generation(leaf, dev_item)) 7015 return -EINVAL; 7016 } 7017 7018 fill_device_from_item(leaf, dev_item, device); 7019 if (device->bdev) { 7020 u64 max_total_bytes = bdev_nr_bytes(device->bdev); 7021 7022 if (device->total_bytes > max_total_bytes) { 7023 btrfs_err(fs_info, 7024 "device total_bytes should be at most %llu but found %llu", 7025 max_total_bytes, device->total_bytes); 7026 return -EINVAL; 7027 } 7028 } 7029 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 7030 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 7031 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 7032 device->fs_devices->total_rw_bytes += device->total_bytes; 7033 atomic64_add(device->total_bytes - device->bytes_used, 7034 &fs_info->free_chunk_space); 7035 } 7036 ret = 0; 7037 return ret; 7038 } 7039 7040 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 7041 { 7042 struct btrfs_super_block *super_copy = fs_info->super_copy; 7043 struct extent_buffer *sb; 7044 struct btrfs_disk_key *disk_key; 7045 struct btrfs_chunk *chunk; 7046 u8 *array_ptr; 7047 unsigned long sb_array_offset; 7048 int ret = 0; 7049 u32 num_stripes; 7050 u32 array_size; 7051 u32 len = 0; 7052 u32 cur_offset; 7053 u64 type; 7054 struct btrfs_key key; 7055 7056 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 7057 7058 /* 7059 * We allocated a dummy extent, just to use extent buffer accessors. 7060 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but 7061 * that's fine, we will not go beyond system chunk array anyway. 7062 */ 7063 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET); 7064 if (!sb) 7065 return -ENOMEM; 7066 set_extent_buffer_uptodate(sb); 7067 7068 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 7069 array_size = btrfs_super_sys_array_size(super_copy); 7070 7071 array_ptr = super_copy->sys_chunk_array; 7072 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 7073 cur_offset = 0; 7074 7075 while (cur_offset < array_size) { 7076 disk_key = (struct btrfs_disk_key *)array_ptr; 7077 len = sizeof(*disk_key); 7078 if (cur_offset + len > array_size) 7079 goto out_short_read; 7080 7081 btrfs_disk_key_to_cpu(&key, disk_key); 7082 7083 array_ptr += len; 7084 sb_array_offset += len; 7085 cur_offset += len; 7086 7087 if (key.type != BTRFS_CHUNK_ITEM_KEY) { 7088 btrfs_err(fs_info, 7089 "unexpected item type %u in sys_array at offset %u", 7090 (u32)key.type, cur_offset); 7091 ret = -EIO; 7092 break; 7093 } 7094 7095 chunk = (struct btrfs_chunk *)sb_array_offset; 7096 /* 7097 * At least one btrfs_chunk with one stripe must be present, 7098 * exact stripe count check comes afterwards 7099 */ 7100 len = btrfs_chunk_item_size(1); 7101 if (cur_offset + len > array_size) 7102 goto out_short_read; 7103 7104 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 7105 if (!num_stripes) { 7106 btrfs_err(fs_info, 7107 "invalid number of stripes %u in sys_array at offset %u", 7108 num_stripes, cur_offset); 7109 ret = -EIO; 7110 break; 7111 } 7112 7113 type = btrfs_chunk_type(sb, chunk); 7114 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 7115 btrfs_err(fs_info, 7116 "invalid chunk type %llu in sys_array at offset %u", 7117 type, cur_offset); 7118 ret = -EIO; 7119 break; 7120 } 7121 7122 len = btrfs_chunk_item_size(num_stripes); 7123 if (cur_offset + len > array_size) 7124 goto out_short_read; 7125 7126 ret = read_one_chunk(&key, sb, chunk); 7127 if (ret) 7128 break; 7129 7130 array_ptr += len; 7131 sb_array_offset += len; 7132 cur_offset += len; 7133 } 7134 clear_extent_buffer_uptodate(sb); 7135 free_extent_buffer_stale(sb); 7136 return ret; 7137 7138 out_short_read: 7139 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 7140 len, cur_offset); 7141 clear_extent_buffer_uptodate(sb); 7142 free_extent_buffer_stale(sb); 7143 return -EIO; 7144 } 7145 7146 /* 7147 * Check if all chunks in the fs are OK for read-write degraded mount 7148 * 7149 * If the @failing_dev is specified, it's accounted as missing. 7150 * 7151 * Return true if all chunks meet the minimal RW mount requirements. 7152 * Return false if any chunk doesn't meet the minimal RW mount requirements. 7153 */ 7154 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7155 struct btrfs_device *failing_dev) 7156 { 7157 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 7158 struct extent_map *em; 7159 u64 next_start = 0; 7160 bool ret = true; 7161 7162 read_lock(&map_tree->lock); 7163 em = lookup_extent_mapping(map_tree, 0, (u64)-1); 7164 read_unlock(&map_tree->lock); 7165 /* No chunk at all? Return false anyway */ 7166 if (!em) { 7167 ret = false; 7168 goto out; 7169 } 7170 while (em) { 7171 struct map_lookup *map; 7172 int missing = 0; 7173 int max_tolerated; 7174 int i; 7175 7176 map = em->map_lookup; 7177 max_tolerated = 7178 btrfs_get_num_tolerated_disk_barrier_failures( 7179 map->type); 7180 for (i = 0; i < map->num_stripes; i++) { 7181 struct btrfs_device *dev = map->stripes[i].dev; 7182 7183 if (!dev || !dev->bdev || 7184 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 7185 dev->last_flush_error) 7186 missing++; 7187 else if (failing_dev && failing_dev == dev) 7188 missing++; 7189 } 7190 if (missing > max_tolerated) { 7191 if (!failing_dev) 7192 btrfs_warn(fs_info, 7193 "chunk %llu missing %d devices, max tolerance is %d for writable mount", 7194 em->start, missing, max_tolerated); 7195 free_extent_map(em); 7196 ret = false; 7197 goto out; 7198 } 7199 next_start = extent_map_end(em); 7200 free_extent_map(em); 7201 7202 read_lock(&map_tree->lock); 7203 em = lookup_extent_mapping(map_tree, next_start, 7204 (u64)(-1) - next_start); 7205 read_unlock(&map_tree->lock); 7206 } 7207 out: 7208 return ret; 7209 } 7210 7211 static void readahead_tree_node_children(struct extent_buffer *node) 7212 { 7213 int i; 7214 const int nr_items = btrfs_header_nritems(node); 7215 7216 for (i = 0; i < nr_items; i++) 7217 btrfs_readahead_node_child(node, i); 7218 } 7219 7220 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 7221 { 7222 struct btrfs_root *root = fs_info->chunk_root; 7223 struct btrfs_path *path; 7224 struct extent_buffer *leaf; 7225 struct btrfs_key key; 7226 struct btrfs_key found_key; 7227 int ret; 7228 int slot; 7229 int iter_ret = 0; 7230 u64 total_dev = 0; 7231 u64 last_ra_node = 0; 7232 7233 path = btrfs_alloc_path(); 7234 if (!path) 7235 return -ENOMEM; 7236 7237 /* 7238 * uuid_mutex is needed only if we are mounting a sprout FS 7239 * otherwise we don't need it. 7240 */ 7241 mutex_lock(&uuid_mutex); 7242 7243 /* 7244 * It is possible for mount and umount to race in such a way that 7245 * we execute this code path, but open_fs_devices failed to clear 7246 * total_rw_bytes. We certainly want it cleared before reading the 7247 * device items, so clear it here. 7248 */ 7249 fs_info->fs_devices->total_rw_bytes = 0; 7250 7251 /* 7252 * Lockdep complains about possible circular locking dependency between 7253 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores 7254 * used for freeze procection of a fs (struct super_block.s_writers), 7255 * which we take when starting a transaction, and extent buffers of the 7256 * chunk tree if we call read_one_dev() while holding a lock on an 7257 * extent buffer of the chunk tree. Since we are mounting the filesystem 7258 * and at this point there can't be any concurrent task modifying the 7259 * chunk tree, to keep it simple, just skip locking on the chunk tree. 7260 */ 7261 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); 7262 path->skip_locking = 1; 7263 7264 /* 7265 * Read all device items, and then all the chunk items. All 7266 * device items are found before any chunk item (their object id 7267 * is smaller than the lowest possible object id for a chunk 7268 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 7269 */ 7270 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 7271 key.offset = 0; 7272 key.type = 0; 7273 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 7274 struct extent_buffer *node = path->nodes[1]; 7275 7276 leaf = path->nodes[0]; 7277 slot = path->slots[0]; 7278 7279 if (node) { 7280 if (last_ra_node != node->start) { 7281 readahead_tree_node_children(node); 7282 last_ra_node = node->start; 7283 } 7284 } 7285 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 7286 struct btrfs_dev_item *dev_item; 7287 dev_item = btrfs_item_ptr(leaf, slot, 7288 struct btrfs_dev_item); 7289 ret = read_one_dev(leaf, dev_item); 7290 if (ret) 7291 goto error; 7292 total_dev++; 7293 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 7294 struct btrfs_chunk *chunk; 7295 7296 /* 7297 * We are only called at mount time, so no need to take 7298 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, 7299 * we always lock first fs_info->chunk_mutex before 7300 * acquiring any locks on the chunk tree. This is a 7301 * requirement for chunk allocation, see the comment on 7302 * top of btrfs_chunk_alloc() for details. 7303 */ 7304 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 7305 ret = read_one_chunk(&found_key, leaf, chunk); 7306 if (ret) 7307 goto error; 7308 } 7309 } 7310 /* Catch error found during iteration */ 7311 if (iter_ret < 0) { 7312 ret = iter_ret; 7313 goto error; 7314 } 7315 7316 /* 7317 * After loading chunk tree, we've got all device information, 7318 * do another round of validation checks. 7319 */ 7320 if (total_dev != fs_info->fs_devices->total_devices) { 7321 btrfs_warn(fs_info, 7322 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", 7323 btrfs_super_num_devices(fs_info->super_copy), 7324 total_dev); 7325 fs_info->fs_devices->total_devices = total_dev; 7326 btrfs_set_super_num_devices(fs_info->super_copy, total_dev); 7327 } 7328 if (btrfs_super_total_bytes(fs_info->super_copy) < 7329 fs_info->fs_devices->total_rw_bytes) { 7330 btrfs_err(fs_info, 7331 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7332 btrfs_super_total_bytes(fs_info->super_copy), 7333 fs_info->fs_devices->total_rw_bytes); 7334 ret = -EINVAL; 7335 goto error; 7336 } 7337 ret = 0; 7338 error: 7339 mutex_unlock(&uuid_mutex); 7340 7341 btrfs_free_path(path); 7342 return ret; 7343 } 7344 7345 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7346 { 7347 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7348 struct btrfs_device *device; 7349 int ret = 0; 7350 7351 fs_devices->fs_info = fs_info; 7352 7353 mutex_lock(&fs_devices->device_list_mutex); 7354 list_for_each_entry(device, &fs_devices->devices, dev_list) 7355 device->fs_info = fs_info; 7356 7357 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7358 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7359 device->fs_info = fs_info; 7360 ret = btrfs_get_dev_zone_info(device, false); 7361 if (ret) 7362 break; 7363 } 7364 7365 seed_devs->fs_info = fs_info; 7366 } 7367 mutex_unlock(&fs_devices->device_list_mutex); 7368 7369 return ret; 7370 } 7371 7372 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, 7373 const struct btrfs_dev_stats_item *ptr, 7374 int index) 7375 { 7376 u64 val; 7377 7378 read_extent_buffer(eb, &val, 7379 offsetof(struct btrfs_dev_stats_item, values) + 7380 ((unsigned long)ptr) + (index * sizeof(u64)), 7381 sizeof(val)); 7382 return val; 7383 } 7384 7385 static void btrfs_set_dev_stats_value(struct extent_buffer *eb, 7386 struct btrfs_dev_stats_item *ptr, 7387 int index, u64 val) 7388 { 7389 write_extent_buffer(eb, &val, 7390 offsetof(struct btrfs_dev_stats_item, values) + 7391 ((unsigned long)ptr) + (index * sizeof(u64)), 7392 sizeof(val)); 7393 } 7394 7395 static int btrfs_device_init_dev_stats(struct btrfs_device *device, 7396 struct btrfs_path *path) 7397 { 7398 struct btrfs_dev_stats_item *ptr; 7399 struct extent_buffer *eb; 7400 struct btrfs_key key; 7401 int item_size; 7402 int i, ret, slot; 7403 7404 if (!device->fs_info->dev_root) 7405 return 0; 7406 7407 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7408 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7409 key.offset = device->devid; 7410 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0); 7411 if (ret) { 7412 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7413 btrfs_dev_stat_set(device, i, 0); 7414 device->dev_stats_valid = 1; 7415 btrfs_release_path(path); 7416 return ret < 0 ? ret : 0; 7417 } 7418 slot = path->slots[0]; 7419 eb = path->nodes[0]; 7420 item_size = btrfs_item_size(eb, slot); 7421 7422 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); 7423 7424 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7425 if (item_size >= (1 + i) * sizeof(__le64)) 7426 btrfs_dev_stat_set(device, i, 7427 btrfs_dev_stats_value(eb, ptr, i)); 7428 else 7429 btrfs_dev_stat_set(device, i, 0); 7430 } 7431 7432 device->dev_stats_valid = 1; 7433 btrfs_dev_stat_print_on_load(device); 7434 btrfs_release_path(path); 7435 7436 return 0; 7437 } 7438 7439 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7440 { 7441 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 7442 struct btrfs_device *device; 7443 struct btrfs_path *path = NULL; 7444 int ret = 0; 7445 7446 path = btrfs_alloc_path(); 7447 if (!path) 7448 return -ENOMEM; 7449 7450 mutex_lock(&fs_devices->device_list_mutex); 7451 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7452 ret = btrfs_device_init_dev_stats(device, path); 7453 if (ret) 7454 goto out; 7455 } 7456 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { 7457 list_for_each_entry(device, &seed_devs->devices, dev_list) { 7458 ret = btrfs_device_init_dev_stats(device, path); 7459 if (ret) 7460 goto out; 7461 } 7462 } 7463 out: 7464 mutex_unlock(&fs_devices->device_list_mutex); 7465 7466 btrfs_free_path(path); 7467 return ret; 7468 } 7469 7470 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7471 struct btrfs_device *device) 7472 { 7473 struct btrfs_fs_info *fs_info = trans->fs_info; 7474 struct btrfs_root *dev_root = fs_info->dev_root; 7475 struct btrfs_path *path; 7476 struct btrfs_key key; 7477 struct extent_buffer *eb; 7478 struct btrfs_dev_stats_item *ptr; 7479 int ret; 7480 int i; 7481 7482 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7483 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7484 key.offset = device->devid; 7485 7486 path = btrfs_alloc_path(); 7487 if (!path) 7488 return -ENOMEM; 7489 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7490 if (ret < 0) { 7491 btrfs_warn_in_rcu(fs_info, 7492 "error %d while searching for dev_stats item for device %s", 7493 ret, btrfs_dev_name(device)); 7494 goto out; 7495 } 7496 7497 if (ret == 0 && 7498 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7499 /* need to delete old one and insert a new one */ 7500 ret = btrfs_del_item(trans, dev_root, path); 7501 if (ret != 0) { 7502 btrfs_warn_in_rcu(fs_info, 7503 "delete too small dev_stats item for device %s failed %d", 7504 btrfs_dev_name(device), ret); 7505 goto out; 7506 } 7507 ret = 1; 7508 } 7509 7510 if (ret == 1) { 7511 /* need to insert a new item */ 7512 btrfs_release_path(path); 7513 ret = btrfs_insert_empty_item(trans, dev_root, path, 7514 &key, sizeof(*ptr)); 7515 if (ret < 0) { 7516 btrfs_warn_in_rcu(fs_info, 7517 "insert dev_stats item for device %s failed %d", 7518 btrfs_dev_name(device), ret); 7519 goto out; 7520 } 7521 } 7522 7523 eb = path->nodes[0]; 7524 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7525 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7526 btrfs_set_dev_stats_value(eb, ptr, i, 7527 btrfs_dev_stat_read(device, i)); 7528 btrfs_mark_buffer_dirty(eb); 7529 7530 out: 7531 btrfs_free_path(path); 7532 return ret; 7533 } 7534 7535 /* 7536 * called from commit_transaction. Writes all changed device stats to disk. 7537 */ 7538 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans) 7539 { 7540 struct btrfs_fs_info *fs_info = trans->fs_info; 7541 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7542 struct btrfs_device *device; 7543 int stats_cnt; 7544 int ret = 0; 7545 7546 mutex_lock(&fs_devices->device_list_mutex); 7547 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7548 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7549 if (!device->dev_stats_valid || stats_cnt == 0) 7550 continue; 7551 7552 7553 /* 7554 * There is a LOAD-LOAD control dependency between the value of 7555 * dev_stats_ccnt and updating the on-disk values which requires 7556 * reading the in-memory counters. Such control dependencies 7557 * require explicit read memory barriers. 7558 * 7559 * This memory barriers pairs with smp_mb__before_atomic in 7560 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7561 * barrier implied by atomic_xchg in 7562 * btrfs_dev_stats_read_and_reset 7563 */ 7564 smp_rmb(); 7565 7566 ret = update_dev_stat_item(trans, device); 7567 if (!ret) 7568 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7569 } 7570 mutex_unlock(&fs_devices->device_list_mutex); 7571 7572 return ret; 7573 } 7574 7575 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7576 { 7577 btrfs_dev_stat_inc(dev, index); 7578 7579 if (!dev->dev_stats_valid) 7580 return; 7581 btrfs_err_rl_in_rcu(dev->fs_info, 7582 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7583 btrfs_dev_name(dev), 7584 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7585 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7586 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7587 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7588 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7589 } 7590 7591 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7592 { 7593 int i; 7594 7595 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7596 if (btrfs_dev_stat_read(dev, i) != 0) 7597 break; 7598 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7599 return; /* all values == 0, suppress message */ 7600 7601 btrfs_info_in_rcu(dev->fs_info, 7602 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7603 btrfs_dev_name(dev), 7604 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7605 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7606 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7607 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7608 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7609 } 7610 7611 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7612 struct btrfs_ioctl_get_dev_stats *stats) 7613 { 7614 BTRFS_DEV_LOOKUP_ARGS(args); 7615 struct btrfs_device *dev; 7616 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7617 int i; 7618 7619 mutex_lock(&fs_devices->device_list_mutex); 7620 args.devid = stats->devid; 7621 dev = btrfs_find_device(fs_info->fs_devices, &args); 7622 mutex_unlock(&fs_devices->device_list_mutex); 7623 7624 if (!dev) { 7625 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7626 return -ENODEV; 7627 } else if (!dev->dev_stats_valid) { 7628 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7629 return -ENODEV; 7630 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7631 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7632 if (stats->nr_items > i) 7633 stats->values[i] = 7634 btrfs_dev_stat_read_and_reset(dev, i); 7635 else 7636 btrfs_dev_stat_set(dev, i, 0); 7637 } 7638 btrfs_info(fs_info, "device stats zeroed by %s (%d)", 7639 current->comm, task_pid_nr(current)); 7640 } else { 7641 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7642 if (stats->nr_items > i) 7643 stats->values[i] = btrfs_dev_stat_read(dev, i); 7644 } 7645 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7646 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7647 return 0; 7648 } 7649 7650 /* 7651 * Update the size and bytes used for each device where it changed. This is 7652 * delayed since we would otherwise get errors while writing out the 7653 * superblocks. 7654 * 7655 * Must be invoked during transaction commit. 7656 */ 7657 void btrfs_commit_device_sizes(struct btrfs_transaction *trans) 7658 { 7659 struct btrfs_device *curr, *next; 7660 7661 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); 7662 7663 if (list_empty(&trans->dev_update_list)) 7664 return; 7665 7666 /* 7667 * We don't need the device_list_mutex here. This list is owned by the 7668 * transaction and the transaction must complete before the device is 7669 * released. 7670 */ 7671 mutex_lock(&trans->fs_info->chunk_mutex); 7672 list_for_each_entry_safe(curr, next, &trans->dev_update_list, 7673 post_commit_list) { 7674 list_del_init(&curr->post_commit_list); 7675 curr->commit_total_bytes = curr->disk_total_bytes; 7676 curr->commit_bytes_used = curr->bytes_used; 7677 } 7678 mutex_unlock(&trans->fs_info->chunk_mutex); 7679 } 7680 7681 /* 7682 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. 7683 */ 7684 int btrfs_bg_type_to_factor(u64 flags) 7685 { 7686 const int index = btrfs_bg_flags_to_raid_index(flags); 7687 7688 return btrfs_raid_array[index].ncopies; 7689 } 7690 7691 7692 7693 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, 7694 u64 chunk_offset, u64 devid, 7695 u64 physical_offset, u64 physical_len) 7696 { 7697 struct btrfs_dev_lookup_args args = { .devid = devid }; 7698 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7699 struct extent_map *em; 7700 struct map_lookup *map; 7701 struct btrfs_device *dev; 7702 u64 stripe_len; 7703 bool found = false; 7704 int ret = 0; 7705 int i; 7706 7707 read_lock(&em_tree->lock); 7708 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 7709 read_unlock(&em_tree->lock); 7710 7711 if (!em) { 7712 btrfs_err(fs_info, 7713 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", 7714 physical_offset, devid); 7715 ret = -EUCLEAN; 7716 goto out; 7717 } 7718 7719 map = em->map_lookup; 7720 stripe_len = btrfs_calc_stripe_length(em); 7721 if (physical_len != stripe_len) { 7722 btrfs_err(fs_info, 7723 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", 7724 physical_offset, devid, em->start, physical_len, 7725 stripe_len); 7726 ret = -EUCLEAN; 7727 goto out; 7728 } 7729 7730 /* 7731 * Very old mkfs.btrfs (before v4.1) will not respect the reserved 7732 * space. Although kernel can handle it without problem, better to warn 7733 * the users. 7734 */ 7735 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED) 7736 btrfs_warn(fs_info, 7737 "devid %llu physical %llu len %llu inside the reserved space", 7738 devid, physical_offset, physical_len); 7739 7740 for (i = 0; i < map->num_stripes; i++) { 7741 if (map->stripes[i].dev->devid == devid && 7742 map->stripes[i].physical == physical_offset) { 7743 found = true; 7744 if (map->verified_stripes >= map->num_stripes) { 7745 btrfs_err(fs_info, 7746 "too many dev extents for chunk %llu found", 7747 em->start); 7748 ret = -EUCLEAN; 7749 goto out; 7750 } 7751 map->verified_stripes++; 7752 break; 7753 } 7754 } 7755 if (!found) { 7756 btrfs_err(fs_info, 7757 "dev extent physical offset %llu devid %llu has no corresponding chunk", 7758 physical_offset, devid); 7759 ret = -EUCLEAN; 7760 } 7761 7762 /* Make sure no dev extent is beyond device boundary */ 7763 dev = btrfs_find_device(fs_info->fs_devices, &args); 7764 if (!dev) { 7765 btrfs_err(fs_info, "failed to find devid %llu", devid); 7766 ret = -EUCLEAN; 7767 goto out; 7768 } 7769 7770 if (physical_offset + physical_len > dev->disk_total_bytes) { 7771 btrfs_err(fs_info, 7772 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7773 devid, physical_offset, physical_len, 7774 dev->disk_total_bytes); 7775 ret = -EUCLEAN; 7776 goto out; 7777 } 7778 7779 if (dev->zone_info) { 7780 u64 zone_size = dev->zone_info->zone_size; 7781 7782 if (!IS_ALIGNED(physical_offset, zone_size) || 7783 !IS_ALIGNED(physical_len, zone_size)) { 7784 btrfs_err(fs_info, 7785 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone", 7786 devid, physical_offset, physical_len); 7787 ret = -EUCLEAN; 7788 goto out; 7789 } 7790 } 7791 7792 out: 7793 free_extent_map(em); 7794 return ret; 7795 } 7796 7797 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) 7798 { 7799 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 7800 struct extent_map *em; 7801 struct rb_node *node; 7802 int ret = 0; 7803 7804 read_lock(&em_tree->lock); 7805 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 7806 em = rb_entry(node, struct extent_map, rb_node); 7807 if (em->map_lookup->num_stripes != 7808 em->map_lookup->verified_stripes) { 7809 btrfs_err(fs_info, 7810 "chunk %llu has missing dev extent, have %d expect %d", 7811 em->start, em->map_lookup->verified_stripes, 7812 em->map_lookup->num_stripes); 7813 ret = -EUCLEAN; 7814 goto out; 7815 } 7816 } 7817 out: 7818 read_unlock(&em_tree->lock); 7819 return ret; 7820 } 7821 7822 /* 7823 * Ensure that all dev extents are mapped to correct chunk, otherwise 7824 * later chunk allocation/free would cause unexpected behavior. 7825 * 7826 * NOTE: This will iterate through the whole device tree, which should be of 7827 * the same size level as the chunk tree. This slightly increases mount time. 7828 */ 7829 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) 7830 { 7831 struct btrfs_path *path; 7832 struct btrfs_root *root = fs_info->dev_root; 7833 struct btrfs_key key; 7834 u64 prev_devid = 0; 7835 u64 prev_dev_ext_end = 0; 7836 int ret = 0; 7837 7838 /* 7839 * We don't have a dev_root because we mounted with ignorebadroots and 7840 * failed to load the root, so we want to skip the verification in this 7841 * case for sure. 7842 * 7843 * However if the dev root is fine, but the tree itself is corrupted 7844 * we'd still fail to mount. This verification is only to make sure 7845 * writes can happen safely, so instead just bypass this check 7846 * completely in the case of IGNOREBADROOTS. 7847 */ 7848 if (btrfs_test_opt(fs_info, IGNOREBADROOTS)) 7849 return 0; 7850 7851 key.objectid = 1; 7852 key.type = BTRFS_DEV_EXTENT_KEY; 7853 key.offset = 0; 7854 7855 path = btrfs_alloc_path(); 7856 if (!path) 7857 return -ENOMEM; 7858 7859 path->reada = READA_FORWARD; 7860 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7861 if (ret < 0) 7862 goto out; 7863 7864 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 7865 ret = btrfs_next_leaf(root, path); 7866 if (ret < 0) 7867 goto out; 7868 /* No dev extents at all? Not good */ 7869 if (ret > 0) { 7870 ret = -EUCLEAN; 7871 goto out; 7872 } 7873 } 7874 while (1) { 7875 struct extent_buffer *leaf = path->nodes[0]; 7876 struct btrfs_dev_extent *dext; 7877 int slot = path->slots[0]; 7878 u64 chunk_offset; 7879 u64 physical_offset; 7880 u64 physical_len; 7881 u64 devid; 7882 7883 btrfs_item_key_to_cpu(leaf, &key, slot); 7884 if (key.type != BTRFS_DEV_EXTENT_KEY) 7885 break; 7886 devid = key.objectid; 7887 physical_offset = key.offset; 7888 7889 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 7890 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); 7891 physical_len = btrfs_dev_extent_length(leaf, dext); 7892 7893 /* Check if this dev extent overlaps with the previous one */ 7894 if (devid == prev_devid && physical_offset < prev_dev_ext_end) { 7895 btrfs_err(fs_info, 7896 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", 7897 devid, physical_offset, prev_dev_ext_end); 7898 ret = -EUCLEAN; 7899 goto out; 7900 } 7901 7902 ret = verify_one_dev_extent(fs_info, chunk_offset, devid, 7903 physical_offset, physical_len); 7904 if (ret < 0) 7905 goto out; 7906 prev_devid = devid; 7907 prev_dev_ext_end = physical_offset + physical_len; 7908 7909 ret = btrfs_next_item(root, path); 7910 if (ret < 0) 7911 goto out; 7912 if (ret > 0) { 7913 ret = 0; 7914 break; 7915 } 7916 } 7917 7918 /* Ensure all chunks have corresponding dev extents */ 7919 ret = verify_chunk_dev_extent_mapping(fs_info); 7920 out: 7921 btrfs_free_path(path); 7922 return ret; 7923 } 7924 7925 /* 7926 * Check whether the given block group or device is pinned by any inode being 7927 * used as a swapfile. 7928 */ 7929 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) 7930 { 7931 struct btrfs_swapfile_pin *sp; 7932 struct rb_node *node; 7933 7934 spin_lock(&fs_info->swapfile_pins_lock); 7935 node = fs_info->swapfile_pins.rb_node; 7936 while (node) { 7937 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 7938 if (ptr < sp->ptr) 7939 node = node->rb_left; 7940 else if (ptr > sp->ptr) 7941 node = node->rb_right; 7942 else 7943 break; 7944 } 7945 spin_unlock(&fs_info->swapfile_pins_lock); 7946 return node != NULL; 7947 } 7948 7949 static int relocating_repair_kthread(void *data) 7950 { 7951 struct btrfs_block_group *cache = data; 7952 struct btrfs_fs_info *fs_info = cache->fs_info; 7953 u64 target; 7954 int ret = 0; 7955 7956 target = cache->start; 7957 btrfs_put_block_group(cache); 7958 7959 sb_start_write(fs_info->sb); 7960 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 7961 btrfs_info(fs_info, 7962 "zoned: skip relocating block group %llu to repair: EBUSY", 7963 target); 7964 sb_end_write(fs_info->sb); 7965 return -EBUSY; 7966 } 7967 7968 mutex_lock(&fs_info->reclaim_bgs_lock); 7969 7970 /* Ensure block group still exists */ 7971 cache = btrfs_lookup_block_group(fs_info, target); 7972 if (!cache) 7973 goto out; 7974 7975 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) 7976 goto out; 7977 7978 ret = btrfs_may_alloc_data_chunk(fs_info, target); 7979 if (ret < 0) 7980 goto out; 7981 7982 btrfs_info(fs_info, 7983 "zoned: relocating block group %llu to repair IO failure", 7984 target); 7985 ret = btrfs_relocate_chunk(fs_info, target); 7986 7987 out: 7988 if (cache) 7989 btrfs_put_block_group(cache); 7990 mutex_unlock(&fs_info->reclaim_bgs_lock); 7991 btrfs_exclop_finish(fs_info); 7992 sb_end_write(fs_info->sb); 7993 7994 return ret; 7995 } 7996 7997 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) 7998 { 7999 struct btrfs_block_group *cache; 8000 8001 if (!btrfs_is_zoned(fs_info)) 8002 return false; 8003 8004 /* Do not attempt to repair in degraded state */ 8005 if (btrfs_test_opt(fs_info, DEGRADED)) 8006 return true; 8007 8008 cache = btrfs_lookup_block_group(fs_info, logical); 8009 if (!cache) 8010 return true; 8011 8012 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) { 8013 btrfs_put_block_group(cache); 8014 return true; 8015 } 8016 8017 kthread_run(relocating_repair_kthread, cache, 8018 "btrfs-relocating-repair"); 8019 8020 return true; 8021 } 8022 8023 static void map_raid56_repair_block(struct btrfs_io_context *bioc, 8024 struct btrfs_io_stripe *smap, 8025 u64 logical) 8026 { 8027 int data_stripes = nr_bioc_data_stripes(bioc); 8028 int i; 8029 8030 for (i = 0; i < data_stripes; i++) { 8031 u64 stripe_start = bioc->full_stripe_logical + 8032 (i << BTRFS_STRIPE_LEN_SHIFT); 8033 8034 if (logical >= stripe_start && 8035 logical < stripe_start + BTRFS_STRIPE_LEN) 8036 break; 8037 } 8038 ASSERT(i < data_stripes); 8039 smap->dev = bioc->stripes[i].dev; 8040 smap->physical = bioc->stripes[i].physical + 8041 ((logical - bioc->full_stripe_logical) & 8042 BTRFS_STRIPE_LEN_MASK); 8043 } 8044 8045 /* 8046 * Map a repair write into a single device. 8047 * 8048 * A repair write is triggered by read time repair or scrub, which would only 8049 * update the contents of a single device. 8050 * Not update any other mirrors nor go through RMW path. 8051 * 8052 * Callers should ensure: 8053 * 8054 * - Call btrfs_bio_counter_inc_blocked() first 8055 * - The range does not cross stripe boundary 8056 * - Has a valid @mirror_num passed in. 8057 */ 8058 int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, 8059 struct btrfs_io_stripe *smap, u64 logical, 8060 u32 length, int mirror_num) 8061 { 8062 struct btrfs_io_context *bioc = NULL; 8063 u64 map_length = length; 8064 int mirror_ret = mirror_num; 8065 int ret; 8066 8067 ASSERT(mirror_num > 0); 8068 8069 ret = __btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length, 8070 &bioc, smap, &mirror_ret, true); 8071 if (ret < 0) 8072 return ret; 8073 8074 /* The map range should not cross stripe boundary. */ 8075 ASSERT(map_length >= length); 8076 8077 /* Already mapped to single stripe. */ 8078 if (!bioc) 8079 goto out; 8080 8081 /* Map the RAID56 multi-stripe writes to a single one. */ 8082 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 8083 map_raid56_repair_block(bioc, smap, logical); 8084 goto out; 8085 } 8086 8087 ASSERT(mirror_num <= bioc->num_stripes); 8088 smap->dev = bioc->stripes[mirror_num - 1].dev; 8089 smap->physical = bioc->stripes[mirror_num - 1].physical; 8090 out: 8091 btrfs_put_bioc(bioc); 8092 ASSERT(smap->dev); 8093 return 0; 8094 } 8095