1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/bio.h> 8 #include <linux/slab.h> 9 #include <linux/buffer_head.h> 10 #include <linux/blkdev.h> 11 #include <linux/iocontext.h> 12 #include <linux/capability.h> 13 #include <linux/ratelimit.h> 14 #include <linux/kthread.h> 15 #include <linux/raid/pq.h> 16 #include <linux/semaphore.h> 17 #include <linux/uuid.h> 18 #include <linux/list_sort.h> 19 #include <asm/div64.h> 20 #include "ctree.h" 21 #include "extent_map.h" 22 #include "disk-io.h" 23 #include "transaction.h" 24 #include "print-tree.h" 25 #include "volumes.h" 26 #include "raid56.h" 27 #include "async-thread.h" 28 #include "check-integrity.h" 29 #include "rcu-string.h" 30 #include "math.h" 31 #include "dev-replace.h" 32 #include "sysfs.h" 33 34 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 35 [BTRFS_RAID_RAID10] = { 36 .sub_stripes = 2, 37 .dev_stripes = 1, 38 .devs_max = 0, /* 0 == as many as possible */ 39 .devs_min = 4, 40 .tolerated_failures = 1, 41 .devs_increment = 2, 42 .ncopies = 2, 43 .raid_name = "raid10", 44 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 45 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 46 }, 47 [BTRFS_RAID_RAID1] = { 48 .sub_stripes = 1, 49 .dev_stripes = 1, 50 .devs_max = 2, 51 .devs_min = 2, 52 .tolerated_failures = 1, 53 .devs_increment = 2, 54 .ncopies = 2, 55 .raid_name = "raid1", 56 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 57 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 58 }, 59 [BTRFS_RAID_DUP] = { 60 .sub_stripes = 1, 61 .dev_stripes = 2, 62 .devs_max = 1, 63 .devs_min = 1, 64 .tolerated_failures = 0, 65 .devs_increment = 1, 66 .ncopies = 2, 67 .raid_name = "dup", 68 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 69 .mindev_error = 0, 70 }, 71 [BTRFS_RAID_RAID0] = { 72 .sub_stripes = 1, 73 .dev_stripes = 1, 74 .devs_max = 0, 75 .devs_min = 2, 76 .tolerated_failures = 0, 77 .devs_increment = 1, 78 .ncopies = 1, 79 .raid_name = "raid0", 80 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 81 .mindev_error = 0, 82 }, 83 [BTRFS_RAID_SINGLE] = { 84 .sub_stripes = 1, 85 .dev_stripes = 1, 86 .devs_max = 1, 87 .devs_min = 1, 88 .tolerated_failures = 0, 89 .devs_increment = 1, 90 .ncopies = 1, 91 .raid_name = "single", 92 .bg_flag = 0, 93 .mindev_error = 0, 94 }, 95 [BTRFS_RAID_RAID5] = { 96 .sub_stripes = 1, 97 .dev_stripes = 1, 98 .devs_max = 0, 99 .devs_min = 2, 100 .tolerated_failures = 1, 101 .devs_increment = 1, 102 .ncopies = 2, 103 .raid_name = "raid5", 104 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 105 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 106 }, 107 [BTRFS_RAID_RAID6] = { 108 .sub_stripes = 1, 109 .dev_stripes = 1, 110 .devs_max = 0, 111 .devs_min = 3, 112 .tolerated_failures = 2, 113 .devs_increment = 1, 114 .ncopies = 3, 115 .raid_name = "raid6", 116 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 117 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 118 }, 119 }; 120 121 const char *get_raid_name(enum btrfs_raid_types type) 122 { 123 if (type >= BTRFS_NR_RAID_TYPES) 124 return NULL; 125 126 return btrfs_raid_array[type].raid_name; 127 } 128 129 static int init_first_rw_device(struct btrfs_trans_handle *trans, 130 struct btrfs_fs_info *fs_info); 131 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 132 static void __btrfs_reset_dev_stats(struct btrfs_device *dev); 133 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 134 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 135 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 136 enum btrfs_map_op op, 137 u64 logical, u64 *length, 138 struct btrfs_bio **bbio_ret, 139 int mirror_num, int need_raid_map); 140 141 /* 142 * Device locking 143 * ============== 144 * 145 * There are several mutexes that protect manipulation of devices and low-level 146 * structures like chunks but not block groups, extents or files 147 * 148 * uuid_mutex (global lock) 149 * ------------------------ 150 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 151 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 152 * device) or requested by the device= mount option 153 * 154 * the mutex can be very coarse and can cover long-running operations 155 * 156 * protects: updates to fs_devices counters like missing devices, rw devices, 157 * seeding, structure cloning, openning/closing devices at mount/umount time 158 * 159 * global::fs_devs - add, remove, updates to the global list 160 * 161 * does not protect: manipulation of the fs_devices::devices list! 162 * 163 * btrfs_device::name - renames (write side), read is RCU 164 * 165 * fs_devices::device_list_mutex (per-fs, with RCU) 166 * ------------------------------------------------ 167 * protects updates to fs_devices::devices, ie. adding and deleting 168 * 169 * simple list traversal with read-only actions can be done with RCU protection 170 * 171 * may be used to exclude some operations from running concurrently without any 172 * modifications to the list (see write_all_supers) 173 * 174 * balance_mutex 175 * ------------- 176 * protects balance structures (status, state) and context accessed from 177 * several places (internally, ioctl) 178 * 179 * chunk_mutex 180 * ----------- 181 * protects chunks, adding or removing during allocation, trim or when a new 182 * device is added/removed 183 * 184 * cleaner_mutex 185 * ------------- 186 * a big lock that is held by the cleaner thread and prevents running subvolume 187 * cleaning together with relocation or delayed iputs 188 * 189 * 190 * Lock nesting 191 * ============ 192 * 193 * uuid_mutex 194 * volume_mutex 195 * device_list_mutex 196 * chunk_mutex 197 * balance_mutex 198 * 199 * 200 * Exclusive operations, BTRFS_FS_EXCL_OP 201 * ====================================== 202 * 203 * Maintains the exclusivity of the following operations that apply to the 204 * whole filesystem and cannot run in parallel. 205 * 206 * - Balance (*) 207 * - Device add 208 * - Device remove 209 * - Device replace (*) 210 * - Resize 211 * 212 * The device operations (as above) can be in one of the following states: 213 * 214 * - Running state 215 * - Paused state 216 * - Completed state 217 * 218 * Only device operations marked with (*) can go into the Paused state for the 219 * following reasons: 220 * 221 * - ioctl (only Balance can be Paused through ioctl) 222 * - filesystem remounted as read-only 223 * - filesystem unmounted and mounted as read-only 224 * - system power-cycle and filesystem mounted as read-only 225 * - filesystem or device errors leading to forced read-only 226 * 227 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations. 228 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set. 229 * A device operation in Paused or Running state can be canceled or resumed 230 * either by ioctl (Balance only) or when remounted as read-write. 231 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or 232 * completed. 233 */ 234 235 DEFINE_MUTEX(uuid_mutex); 236 static LIST_HEAD(fs_uuids); 237 struct list_head *btrfs_get_fs_uuids(void) 238 { 239 return &fs_uuids; 240 } 241 242 /* 243 * alloc_fs_devices - allocate struct btrfs_fs_devices 244 * @fsid: if not NULL, copy the uuid to fs_devices::fsid 245 * 246 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 247 * The returned struct is not linked onto any lists and can be destroyed with 248 * kfree() right away. 249 */ 250 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) 251 { 252 struct btrfs_fs_devices *fs_devs; 253 254 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 255 if (!fs_devs) 256 return ERR_PTR(-ENOMEM); 257 258 mutex_init(&fs_devs->device_list_mutex); 259 260 INIT_LIST_HEAD(&fs_devs->devices); 261 INIT_LIST_HEAD(&fs_devs->resized_devices); 262 INIT_LIST_HEAD(&fs_devs->alloc_list); 263 INIT_LIST_HEAD(&fs_devs->fs_list); 264 if (fsid) 265 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 266 267 return fs_devs; 268 } 269 270 void btrfs_free_device(struct btrfs_device *device) 271 { 272 rcu_string_free(device->name); 273 bio_put(device->flush_bio); 274 kfree(device); 275 } 276 277 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 278 { 279 struct btrfs_device *device; 280 WARN_ON(fs_devices->opened); 281 while (!list_empty(&fs_devices->devices)) { 282 device = list_entry(fs_devices->devices.next, 283 struct btrfs_device, dev_list); 284 list_del(&device->dev_list); 285 btrfs_free_device(device); 286 } 287 kfree(fs_devices); 288 } 289 290 static void btrfs_kobject_uevent(struct block_device *bdev, 291 enum kobject_action action) 292 { 293 int ret; 294 295 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); 296 if (ret) 297 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", 298 action, 299 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), 300 &disk_to_dev(bdev->bd_disk)->kobj); 301 } 302 303 void __exit btrfs_cleanup_fs_uuids(void) 304 { 305 struct btrfs_fs_devices *fs_devices; 306 307 while (!list_empty(&fs_uuids)) { 308 fs_devices = list_entry(fs_uuids.next, 309 struct btrfs_fs_devices, fs_list); 310 list_del(&fs_devices->fs_list); 311 free_fs_devices(fs_devices); 312 } 313 } 314 315 /* 316 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error. 317 * Returned struct is not linked onto any lists and must be destroyed using 318 * btrfs_free_device. 319 */ 320 static struct btrfs_device *__alloc_device(void) 321 { 322 struct btrfs_device *dev; 323 324 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 325 if (!dev) 326 return ERR_PTR(-ENOMEM); 327 328 /* 329 * Preallocate a bio that's always going to be used for flushing device 330 * barriers and matches the device lifespan 331 */ 332 dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL); 333 if (!dev->flush_bio) { 334 kfree(dev); 335 return ERR_PTR(-ENOMEM); 336 } 337 338 INIT_LIST_HEAD(&dev->dev_list); 339 INIT_LIST_HEAD(&dev->dev_alloc_list); 340 INIT_LIST_HEAD(&dev->resized_list); 341 342 spin_lock_init(&dev->io_lock); 343 344 atomic_set(&dev->reada_in_flight, 0); 345 atomic_set(&dev->dev_stats_ccnt, 0); 346 btrfs_device_data_ordered_init(dev); 347 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 348 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 349 350 return dev; 351 } 352 353 /* 354 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 355 * return NULL. 356 * 357 * If devid and uuid are both specified, the match must be exact, otherwise 358 * only devid is used. 359 */ 360 static struct btrfs_device *find_device(struct btrfs_fs_devices *fs_devices, 361 u64 devid, const u8 *uuid) 362 { 363 struct btrfs_device *dev; 364 365 list_for_each_entry(dev, &fs_devices->devices, dev_list) { 366 if (dev->devid == devid && 367 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 368 return dev; 369 } 370 } 371 return NULL; 372 } 373 374 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 375 { 376 struct btrfs_fs_devices *fs_devices; 377 378 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 379 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 380 return fs_devices; 381 } 382 return NULL; 383 } 384 385 static int 386 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 387 int flush, struct block_device **bdev, 388 struct buffer_head **bh) 389 { 390 int ret; 391 392 *bdev = blkdev_get_by_path(device_path, flags, holder); 393 394 if (IS_ERR(*bdev)) { 395 ret = PTR_ERR(*bdev); 396 goto error; 397 } 398 399 if (flush) 400 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 401 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 402 if (ret) { 403 blkdev_put(*bdev, flags); 404 goto error; 405 } 406 invalidate_bdev(*bdev); 407 *bh = btrfs_read_dev_super(*bdev); 408 if (IS_ERR(*bh)) { 409 ret = PTR_ERR(*bh); 410 blkdev_put(*bdev, flags); 411 goto error; 412 } 413 414 return 0; 415 416 error: 417 *bdev = NULL; 418 *bh = NULL; 419 return ret; 420 } 421 422 static void requeue_list(struct btrfs_pending_bios *pending_bios, 423 struct bio *head, struct bio *tail) 424 { 425 426 struct bio *old_head; 427 428 old_head = pending_bios->head; 429 pending_bios->head = head; 430 if (pending_bios->tail) 431 tail->bi_next = old_head; 432 else 433 pending_bios->tail = tail; 434 } 435 436 /* 437 * we try to collect pending bios for a device so we don't get a large 438 * number of procs sending bios down to the same device. This greatly 439 * improves the schedulers ability to collect and merge the bios. 440 * 441 * But, it also turns into a long list of bios to process and that is sure 442 * to eventually make the worker thread block. The solution here is to 443 * make some progress and then put this work struct back at the end of 444 * the list if the block device is congested. This way, multiple devices 445 * can make progress from a single worker thread. 446 */ 447 static noinline void run_scheduled_bios(struct btrfs_device *device) 448 { 449 struct btrfs_fs_info *fs_info = device->fs_info; 450 struct bio *pending; 451 struct backing_dev_info *bdi; 452 struct btrfs_pending_bios *pending_bios; 453 struct bio *tail; 454 struct bio *cur; 455 int again = 0; 456 unsigned long num_run; 457 unsigned long batch_run = 0; 458 unsigned long last_waited = 0; 459 int force_reg = 0; 460 int sync_pending = 0; 461 struct blk_plug plug; 462 463 /* 464 * this function runs all the bios we've collected for 465 * a particular device. We don't want to wander off to 466 * another device without first sending all of these down. 467 * So, setup a plug here and finish it off before we return 468 */ 469 blk_start_plug(&plug); 470 471 bdi = device->bdev->bd_bdi; 472 473 loop: 474 spin_lock(&device->io_lock); 475 476 loop_lock: 477 num_run = 0; 478 479 /* take all the bios off the list at once and process them 480 * later on (without the lock held). But, remember the 481 * tail and other pointers so the bios can be properly reinserted 482 * into the list if we hit congestion 483 */ 484 if (!force_reg && device->pending_sync_bios.head) { 485 pending_bios = &device->pending_sync_bios; 486 force_reg = 1; 487 } else { 488 pending_bios = &device->pending_bios; 489 force_reg = 0; 490 } 491 492 pending = pending_bios->head; 493 tail = pending_bios->tail; 494 WARN_ON(pending && !tail); 495 496 /* 497 * if pending was null this time around, no bios need processing 498 * at all and we can stop. Otherwise it'll loop back up again 499 * and do an additional check so no bios are missed. 500 * 501 * device->running_pending is used to synchronize with the 502 * schedule_bio code. 503 */ 504 if (device->pending_sync_bios.head == NULL && 505 device->pending_bios.head == NULL) { 506 again = 0; 507 device->running_pending = 0; 508 } else { 509 again = 1; 510 device->running_pending = 1; 511 } 512 513 pending_bios->head = NULL; 514 pending_bios->tail = NULL; 515 516 spin_unlock(&device->io_lock); 517 518 while (pending) { 519 520 rmb(); 521 /* we want to work on both lists, but do more bios on the 522 * sync list than the regular list 523 */ 524 if ((num_run > 32 && 525 pending_bios != &device->pending_sync_bios && 526 device->pending_sync_bios.head) || 527 (num_run > 64 && pending_bios == &device->pending_sync_bios && 528 device->pending_bios.head)) { 529 spin_lock(&device->io_lock); 530 requeue_list(pending_bios, pending, tail); 531 goto loop_lock; 532 } 533 534 cur = pending; 535 pending = pending->bi_next; 536 cur->bi_next = NULL; 537 538 BUG_ON(atomic_read(&cur->__bi_cnt) == 0); 539 540 /* 541 * if we're doing the sync list, record that our 542 * plug has some sync requests on it 543 * 544 * If we're doing the regular list and there are 545 * sync requests sitting around, unplug before 546 * we add more 547 */ 548 if (pending_bios == &device->pending_sync_bios) { 549 sync_pending = 1; 550 } else if (sync_pending) { 551 blk_finish_plug(&plug); 552 blk_start_plug(&plug); 553 sync_pending = 0; 554 } 555 556 btrfsic_submit_bio(cur); 557 num_run++; 558 batch_run++; 559 560 cond_resched(); 561 562 /* 563 * we made progress, there is more work to do and the bdi 564 * is now congested. Back off and let other work structs 565 * run instead 566 */ 567 if (pending && bdi_write_congested(bdi) && batch_run > 8 && 568 fs_info->fs_devices->open_devices > 1) { 569 struct io_context *ioc; 570 571 ioc = current->io_context; 572 573 /* 574 * the main goal here is that we don't want to 575 * block if we're going to be able to submit 576 * more requests without blocking. 577 * 578 * This code does two great things, it pokes into 579 * the elevator code from a filesystem _and_ 580 * it makes assumptions about how batching works. 581 */ 582 if (ioc && ioc->nr_batch_requests > 0 && 583 time_before(jiffies, ioc->last_waited + HZ/50UL) && 584 (last_waited == 0 || 585 ioc->last_waited == last_waited)) { 586 /* 587 * we want to go through our batch of 588 * requests and stop. So, we copy out 589 * the ioc->last_waited time and test 590 * against it before looping 591 */ 592 last_waited = ioc->last_waited; 593 cond_resched(); 594 continue; 595 } 596 spin_lock(&device->io_lock); 597 requeue_list(pending_bios, pending, tail); 598 device->running_pending = 1; 599 600 spin_unlock(&device->io_lock); 601 btrfs_queue_work(fs_info->submit_workers, 602 &device->work); 603 goto done; 604 } 605 } 606 607 cond_resched(); 608 if (again) 609 goto loop; 610 611 spin_lock(&device->io_lock); 612 if (device->pending_bios.head || device->pending_sync_bios.head) 613 goto loop_lock; 614 spin_unlock(&device->io_lock); 615 616 done: 617 blk_finish_plug(&plug); 618 } 619 620 static void pending_bios_fn(struct btrfs_work *work) 621 { 622 struct btrfs_device *device; 623 624 device = container_of(work, struct btrfs_device, work); 625 run_scheduled_bios(device); 626 } 627 628 /* 629 * Search and remove all stale (devices which are not mounted) devices. 630 * When both inputs are NULL, it will search and release all stale devices. 631 * path: Optional. When provided will it release all unmounted devices 632 * matching this path only. 633 * skip_dev: Optional. Will skip this device when searching for the stale 634 * devices. 635 */ 636 static void btrfs_free_stale_devices(const char *path, 637 struct btrfs_device *skip_dev) 638 { 639 struct btrfs_fs_devices *fs_devs, *tmp_fs_devs; 640 struct btrfs_device *dev, *tmp_dev; 641 642 list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, fs_list) { 643 644 if (fs_devs->opened) 645 continue; 646 647 list_for_each_entry_safe(dev, tmp_dev, 648 &fs_devs->devices, dev_list) { 649 int not_found = 0; 650 651 if (skip_dev && skip_dev == dev) 652 continue; 653 if (path && !dev->name) 654 continue; 655 656 rcu_read_lock(); 657 if (path) 658 not_found = strcmp(rcu_str_deref(dev->name), 659 path); 660 rcu_read_unlock(); 661 if (not_found) 662 continue; 663 664 /* delete the stale device */ 665 if (fs_devs->num_devices == 1) { 666 btrfs_sysfs_remove_fsid(fs_devs); 667 list_del(&fs_devs->fs_list); 668 free_fs_devices(fs_devs); 669 break; 670 } else { 671 fs_devs->num_devices--; 672 list_del(&dev->dev_list); 673 btrfs_free_device(dev); 674 } 675 } 676 } 677 } 678 679 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 680 struct btrfs_device *device, fmode_t flags, 681 void *holder) 682 { 683 struct request_queue *q; 684 struct block_device *bdev; 685 struct buffer_head *bh; 686 struct btrfs_super_block *disk_super; 687 u64 devid; 688 int ret; 689 690 if (device->bdev) 691 return -EINVAL; 692 if (!device->name) 693 return -EINVAL; 694 695 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 696 &bdev, &bh); 697 if (ret) 698 return ret; 699 700 disk_super = (struct btrfs_super_block *)bh->b_data; 701 devid = btrfs_stack_device_id(&disk_super->dev_item); 702 if (devid != device->devid) 703 goto error_brelse; 704 705 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 706 goto error_brelse; 707 708 device->generation = btrfs_super_generation(disk_super); 709 710 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 711 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 712 fs_devices->seeding = 1; 713 } else { 714 if (bdev_read_only(bdev)) 715 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 716 else 717 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 718 } 719 720 q = bdev_get_queue(bdev); 721 if (!blk_queue_nonrot(q)) 722 fs_devices->rotating = 1; 723 724 device->bdev = bdev; 725 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 726 device->mode = flags; 727 728 fs_devices->open_devices++; 729 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 730 device->devid != BTRFS_DEV_REPLACE_DEVID) { 731 fs_devices->rw_devices++; 732 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 733 } 734 brelse(bh); 735 736 return 0; 737 738 error_brelse: 739 brelse(bh); 740 blkdev_put(bdev, flags); 741 742 return -EINVAL; 743 } 744 745 /* 746 * Add new device to list of registered devices 747 * 748 * Returns: 749 * device pointer which was just added or updated when successful 750 * error pointer when failed 751 */ 752 static noinline struct btrfs_device *device_list_add(const char *path, 753 struct btrfs_super_block *disk_super) 754 { 755 struct btrfs_device *device; 756 struct btrfs_fs_devices *fs_devices; 757 struct rcu_string *name; 758 u64 found_transid = btrfs_super_generation(disk_super); 759 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 760 761 fs_devices = find_fsid(disk_super->fsid); 762 if (!fs_devices) { 763 fs_devices = alloc_fs_devices(disk_super->fsid); 764 if (IS_ERR(fs_devices)) 765 return ERR_CAST(fs_devices); 766 767 list_add(&fs_devices->fs_list, &fs_uuids); 768 769 device = NULL; 770 } else { 771 device = find_device(fs_devices, devid, 772 disk_super->dev_item.uuid); 773 } 774 775 if (!device) { 776 if (fs_devices->opened) 777 return ERR_PTR(-EBUSY); 778 779 device = btrfs_alloc_device(NULL, &devid, 780 disk_super->dev_item.uuid); 781 if (IS_ERR(device)) { 782 /* we can safely leave the fs_devices entry around */ 783 return device; 784 } 785 786 name = rcu_string_strdup(path, GFP_NOFS); 787 if (!name) { 788 btrfs_free_device(device); 789 return ERR_PTR(-ENOMEM); 790 } 791 rcu_assign_pointer(device->name, name); 792 793 mutex_lock(&fs_devices->device_list_mutex); 794 list_add_rcu(&device->dev_list, &fs_devices->devices); 795 fs_devices->num_devices++; 796 mutex_unlock(&fs_devices->device_list_mutex); 797 798 device->fs_devices = fs_devices; 799 btrfs_free_stale_devices(path, device); 800 801 if (disk_super->label[0]) 802 pr_info("BTRFS: device label %s devid %llu transid %llu %s\n", 803 disk_super->label, devid, found_transid, path); 804 else 805 pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n", 806 disk_super->fsid, devid, found_transid, path); 807 808 } else if (!device->name || strcmp(device->name->str, path)) { 809 /* 810 * When FS is already mounted. 811 * 1. If you are here and if the device->name is NULL that 812 * means this device was missing at time of FS mount. 813 * 2. If you are here and if the device->name is different 814 * from 'path' that means either 815 * a. The same device disappeared and reappeared with 816 * different name. or 817 * b. The missing-disk-which-was-replaced, has 818 * reappeared now. 819 * 820 * We must allow 1 and 2a above. But 2b would be a spurious 821 * and unintentional. 822 * 823 * Further in case of 1 and 2a above, the disk at 'path' 824 * would have missed some transaction when it was away and 825 * in case of 2a the stale bdev has to be updated as well. 826 * 2b must not be allowed at all time. 827 */ 828 829 /* 830 * For now, we do allow update to btrfs_fs_device through the 831 * btrfs dev scan cli after FS has been mounted. We're still 832 * tracking a problem where systems fail mount by subvolume id 833 * when we reject replacement on a mounted FS. 834 */ 835 if (!fs_devices->opened && found_transid < device->generation) { 836 /* 837 * That is if the FS is _not_ mounted and if you 838 * are here, that means there is more than one 839 * disk with same uuid and devid.We keep the one 840 * with larger generation number or the last-in if 841 * generation are equal. 842 */ 843 return ERR_PTR(-EEXIST); 844 } 845 846 name = rcu_string_strdup(path, GFP_NOFS); 847 if (!name) 848 return ERR_PTR(-ENOMEM); 849 rcu_string_free(device->name); 850 rcu_assign_pointer(device->name, name); 851 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 852 fs_devices->missing_devices--; 853 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 854 } 855 } 856 857 /* 858 * Unmount does not free the btrfs_device struct but would zero 859 * generation along with most of the other members. So just update 860 * it back. We need it to pick the disk with largest generation 861 * (as above). 862 */ 863 if (!fs_devices->opened) 864 device->generation = found_transid; 865 866 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 867 868 return device; 869 } 870 871 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 872 { 873 struct btrfs_fs_devices *fs_devices; 874 struct btrfs_device *device; 875 struct btrfs_device *orig_dev; 876 877 fs_devices = alloc_fs_devices(orig->fsid); 878 if (IS_ERR(fs_devices)) 879 return fs_devices; 880 881 mutex_lock(&orig->device_list_mutex); 882 fs_devices->total_devices = orig->total_devices; 883 884 /* We have held the volume lock, it is safe to get the devices. */ 885 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 886 struct rcu_string *name; 887 888 device = btrfs_alloc_device(NULL, &orig_dev->devid, 889 orig_dev->uuid); 890 if (IS_ERR(device)) 891 goto error; 892 893 /* 894 * This is ok to do without rcu read locked because we hold the 895 * uuid mutex so nothing we touch in here is going to disappear. 896 */ 897 if (orig_dev->name) { 898 name = rcu_string_strdup(orig_dev->name->str, 899 GFP_KERNEL); 900 if (!name) { 901 btrfs_free_device(device); 902 goto error; 903 } 904 rcu_assign_pointer(device->name, name); 905 } 906 907 list_add(&device->dev_list, &fs_devices->devices); 908 device->fs_devices = fs_devices; 909 fs_devices->num_devices++; 910 } 911 mutex_unlock(&orig->device_list_mutex); 912 return fs_devices; 913 error: 914 mutex_unlock(&orig->device_list_mutex); 915 free_fs_devices(fs_devices); 916 return ERR_PTR(-ENOMEM); 917 } 918 919 /* 920 * After we have read the system tree and know devids belonging to 921 * this filesystem, remove the device which does not belong there. 922 */ 923 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step) 924 { 925 struct btrfs_device *device, *next; 926 struct btrfs_device *latest_dev = NULL; 927 928 mutex_lock(&uuid_mutex); 929 again: 930 /* This is the initialized path, it is safe to release the devices. */ 931 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 932 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 933 &device->dev_state)) { 934 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 935 &device->dev_state) && 936 (!latest_dev || 937 device->generation > latest_dev->generation)) { 938 latest_dev = device; 939 } 940 continue; 941 } 942 943 if (device->devid == BTRFS_DEV_REPLACE_DEVID) { 944 /* 945 * In the first step, keep the device which has 946 * the correct fsid and the devid that is used 947 * for the dev_replace procedure. 948 * In the second step, the dev_replace state is 949 * read from the device tree and it is known 950 * whether the procedure is really active or 951 * not, which means whether this device is 952 * used or whether it should be removed. 953 */ 954 if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 955 &device->dev_state)) { 956 continue; 957 } 958 } 959 if (device->bdev) { 960 blkdev_put(device->bdev, device->mode); 961 device->bdev = NULL; 962 fs_devices->open_devices--; 963 } 964 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 965 list_del_init(&device->dev_alloc_list); 966 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 967 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 968 &device->dev_state)) 969 fs_devices->rw_devices--; 970 } 971 list_del_init(&device->dev_list); 972 fs_devices->num_devices--; 973 btrfs_free_device(device); 974 } 975 976 if (fs_devices->seed) { 977 fs_devices = fs_devices->seed; 978 goto again; 979 } 980 981 fs_devices->latest_bdev = latest_dev->bdev; 982 983 mutex_unlock(&uuid_mutex); 984 } 985 986 static void free_device_rcu(struct rcu_head *head) 987 { 988 struct btrfs_device *device; 989 990 device = container_of(head, struct btrfs_device, rcu); 991 btrfs_free_device(device); 992 } 993 994 static void btrfs_close_bdev(struct btrfs_device *device) 995 { 996 if (!device->bdev) 997 return; 998 999 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1000 sync_blockdev(device->bdev); 1001 invalidate_bdev(device->bdev); 1002 } 1003 1004 blkdev_put(device->bdev, device->mode); 1005 } 1006 1007 static void btrfs_prepare_close_one_device(struct btrfs_device *device) 1008 { 1009 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1010 struct btrfs_device *new_device; 1011 struct rcu_string *name; 1012 1013 if (device->bdev) 1014 fs_devices->open_devices--; 1015 1016 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1017 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1018 list_del_init(&device->dev_alloc_list); 1019 fs_devices->rw_devices--; 1020 } 1021 1022 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 1023 fs_devices->missing_devices--; 1024 1025 new_device = btrfs_alloc_device(NULL, &device->devid, 1026 device->uuid); 1027 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ 1028 1029 /* Safe because we are under uuid_mutex */ 1030 if (device->name) { 1031 name = rcu_string_strdup(device->name->str, GFP_NOFS); 1032 BUG_ON(!name); /* -ENOMEM */ 1033 rcu_assign_pointer(new_device->name, name); 1034 } 1035 1036 list_replace_rcu(&device->dev_list, &new_device->dev_list); 1037 new_device->fs_devices = device->fs_devices; 1038 } 1039 1040 static int close_fs_devices(struct btrfs_fs_devices *fs_devices) 1041 { 1042 struct btrfs_device *device, *tmp; 1043 struct list_head pending_put; 1044 1045 INIT_LIST_HEAD(&pending_put); 1046 1047 if (--fs_devices->opened > 0) 1048 return 0; 1049 1050 mutex_lock(&fs_devices->device_list_mutex); 1051 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) { 1052 btrfs_prepare_close_one_device(device); 1053 list_add(&device->dev_list, &pending_put); 1054 } 1055 mutex_unlock(&fs_devices->device_list_mutex); 1056 1057 /* 1058 * btrfs_show_devname() is using the device_list_mutex, 1059 * sometimes call to blkdev_put() leads vfs calling 1060 * into this func. So do put outside of device_list_mutex, 1061 * as of now. 1062 */ 1063 while (!list_empty(&pending_put)) { 1064 device = list_first_entry(&pending_put, 1065 struct btrfs_device, dev_list); 1066 list_del(&device->dev_list); 1067 btrfs_close_bdev(device); 1068 call_rcu(&device->rcu, free_device_rcu); 1069 } 1070 1071 WARN_ON(fs_devices->open_devices); 1072 WARN_ON(fs_devices->rw_devices); 1073 fs_devices->opened = 0; 1074 fs_devices->seeding = 0; 1075 1076 return 0; 1077 } 1078 1079 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1080 { 1081 struct btrfs_fs_devices *seed_devices = NULL; 1082 int ret; 1083 1084 mutex_lock(&uuid_mutex); 1085 ret = close_fs_devices(fs_devices); 1086 if (!fs_devices->opened) { 1087 seed_devices = fs_devices->seed; 1088 fs_devices->seed = NULL; 1089 } 1090 mutex_unlock(&uuid_mutex); 1091 1092 while (seed_devices) { 1093 fs_devices = seed_devices; 1094 seed_devices = fs_devices->seed; 1095 close_fs_devices(fs_devices); 1096 free_fs_devices(fs_devices); 1097 } 1098 return ret; 1099 } 1100 1101 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1102 fmode_t flags, void *holder) 1103 { 1104 struct btrfs_device *device; 1105 struct btrfs_device *latest_dev = NULL; 1106 int ret = 0; 1107 1108 flags |= FMODE_EXCL; 1109 1110 list_for_each_entry(device, &fs_devices->devices, dev_list) { 1111 /* Just open everything we can; ignore failures here */ 1112 if (btrfs_open_one_device(fs_devices, device, flags, holder)) 1113 continue; 1114 1115 if (!latest_dev || 1116 device->generation > latest_dev->generation) 1117 latest_dev = device; 1118 } 1119 if (fs_devices->open_devices == 0) { 1120 ret = -EINVAL; 1121 goto out; 1122 } 1123 fs_devices->opened = 1; 1124 fs_devices->latest_bdev = latest_dev->bdev; 1125 fs_devices->total_rw_bytes = 0; 1126 out: 1127 return ret; 1128 } 1129 1130 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b) 1131 { 1132 struct btrfs_device *dev1, *dev2; 1133 1134 dev1 = list_entry(a, struct btrfs_device, dev_list); 1135 dev2 = list_entry(b, struct btrfs_device, dev_list); 1136 1137 if (dev1->devid < dev2->devid) 1138 return -1; 1139 else if (dev1->devid > dev2->devid) 1140 return 1; 1141 return 0; 1142 } 1143 1144 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1145 fmode_t flags, void *holder) 1146 { 1147 int ret; 1148 1149 mutex_lock(&uuid_mutex); 1150 mutex_lock(&fs_devices->device_list_mutex); 1151 if (fs_devices->opened) { 1152 fs_devices->opened++; 1153 ret = 0; 1154 } else { 1155 list_sort(NULL, &fs_devices->devices, devid_cmp); 1156 ret = open_fs_devices(fs_devices, flags, holder); 1157 } 1158 mutex_unlock(&fs_devices->device_list_mutex); 1159 mutex_unlock(&uuid_mutex); 1160 1161 return ret; 1162 } 1163 1164 static void btrfs_release_disk_super(struct page *page) 1165 { 1166 kunmap(page); 1167 put_page(page); 1168 } 1169 1170 static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr, 1171 struct page **page, 1172 struct btrfs_super_block **disk_super) 1173 { 1174 void *p; 1175 pgoff_t index; 1176 1177 /* make sure our super fits in the device */ 1178 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1179 return 1; 1180 1181 /* make sure our super fits in the page */ 1182 if (sizeof(**disk_super) > PAGE_SIZE) 1183 return 1; 1184 1185 /* make sure our super doesn't straddle pages on disk */ 1186 index = bytenr >> PAGE_SHIFT; 1187 if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index) 1188 return 1; 1189 1190 /* pull in the page with our super */ 1191 *page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 1192 index, GFP_KERNEL); 1193 1194 if (IS_ERR_OR_NULL(*page)) 1195 return 1; 1196 1197 p = kmap(*page); 1198 1199 /* align our pointer to the offset of the super block */ 1200 *disk_super = p + (bytenr & ~PAGE_MASK); 1201 1202 if (btrfs_super_bytenr(*disk_super) != bytenr || 1203 btrfs_super_magic(*disk_super) != BTRFS_MAGIC) { 1204 btrfs_release_disk_super(*page); 1205 return 1; 1206 } 1207 1208 if ((*disk_super)->label[0] && 1209 (*disk_super)->label[BTRFS_LABEL_SIZE - 1]) 1210 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0'; 1211 1212 return 0; 1213 } 1214 1215 /* 1216 * Look for a btrfs signature on a device. This may be called out of the mount path 1217 * and we are not allowed to call set_blocksize during the scan. The superblock 1218 * is read via pagecache 1219 */ 1220 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 1221 struct btrfs_fs_devices **fs_devices_ret) 1222 { 1223 struct btrfs_super_block *disk_super; 1224 struct btrfs_device *device; 1225 struct block_device *bdev; 1226 struct page *page; 1227 int ret = 0; 1228 u64 bytenr; 1229 1230 /* 1231 * we would like to check all the supers, but that would make 1232 * a btrfs mount succeed after a mkfs from a different FS. 1233 * So, we need to add a special mount option to scan for 1234 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1235 */ 1236 bytenr = btrfs_sb_offset(0); 1237 flags |= FMODE_EXCL; 1238 1239 bdev = blkdev_get_by_path(path, flags, holder); 1240 if (IS_ERR(bdev)) 1241 return PTR_ERR(bdev); 1242 1243 if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) { 1244 ret = -EINVAL; 1245 goto error_bdev_put; 1246 } 1247 1248 mutex_lock(&uuid_mutex); 1249 device = device_list_add(path, disk_super); 1250 if (IS_ERR(device)) 1251 ret = PTR_ERR(device); 1252 else 1253 *fs_devices_ret = device->fs_devices; 1254 mutex_unlock(&uuid_mutex); 1255 1256 btrfs_release_disk_super(page); 1257 1258 error_bdev_put: 1259 blkdev_put(bdev, flags); 1260 1261 return ret; 1262 } 1263 1264 /* helper to account the used device space in the range */ 1265 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 1266 u64 end, u64 *length) 1267 { 1268 struct btrfs_key key; 1269 struct btrfs_root *root = device->fs_info->dev_root; 1270 struct btrfs_dev_extent *dev_extent; 1271 struct btrfs_path *path; 1272 u64 extent_end; 1273 int ret; 1274 int slot; 1275 struct extent_buffer *l; 1276 1277 *length = 0; 1278 1279 if (start >= device->total_bytes || 1280 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 1281 return 0; 1282 1283 path = btrfs_alloc_path(); 1284 if (!path) 1285 return -ENOMEM; 1286 path->reada = READA_FORWARD; 1287 1288 key.objectid = device->devid; 1289 key.offset = start; 1290 key.type = BTRFS_DEV_EXTENT_KEY; 1291 1292 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1293 if (ret < 0) 1294 goto out; 1295 if (ret > 0) { 1296 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1297 if (ret < 0) 1298 goto out; 1299 } 1300 1301 while (1) { 1302 l = path->nodes[0]; 1303 slot = path->slots[0]; 1304 if (slot >= btrfs_header_nritems(l)) { 1305 ret = btrfs_next_leaf(root, path); 1306 if (ret == 0) 1307 continue; 1308 if (ret < 0) 1309 goto out; 1310 1311 break; 1312 } 1313 btrfs_item_key_to_cpu(l, &key, slot); 1314 1315 if (key.objectid < device->devid) 1316 goto next; 1317 1318 if (key.objectid > device->devid) 1319 break; 1320 1321 if (key.type != BTRFS_DEV_EXTENT_KEY) 1322 goto next; 1323 1324 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1325 extent_end = key.offset + btrfs_dev_extent_length(l, 1326 dev_extent); 1327 if (key.offset <= start && extent_end > end) { 1328 *length = end - start + 1; 1329 break; 1330 } else if (key.offset <= start && extent_end > start) 1331 *length += extent_end - start; 1332 else if (key.offset > start && extent_end <= end) 1333 *length += extent_end - key.offset; 1334 else if (key.offset > start && key.offset <= end) { 1335 *length += end - key.offset + 1; 1336 break; 1337 } else if (key.offset > end) 1338 break; 1339 1340 next: 1341 path->slots[0]++; 1342 } 1343 ret = 0; 1344 out: 1345 btrfs_free_path(path); 1346 return ret; 1347 } 1348 1349 static int contains_pending_extent(struct btrfs_transaction *transaction, 1350 struct btrfs_device *device, 1351 u64 *start, u64 len) 1352 { 1353 struct btrfs_fs_info *fs_info = device->fs_info; 1354 struct extent_map *em; 1355 struct list_head *search_list = &fs_info->pinned_chunks; 1356 int ret = 0; 1357 u64 physical_start = *start; 1358 1359 if (transaction) 1360 search_list = &transaction->pending_chunks; 1361 again: 1362 list_for_each_entry(em, search_list, list) { 1363 struct map_lookup *map; 1364 int i; 1365 1366 map = em->map_lookup; 1367 for (i = 0; i < map->num_stripes; i++) { 1368 u64 end; 1369 1370 if (map->stripes[i].dev != device) 1371 continue; 1372 if (map->stripes[i].physical >= physical_start + len || 1373 map->stripes[i].physical + em->orig_block_len <= 1374 physical_start) 1375 continue; 1376 /* 1377 * Make sure that while processing the pinned list we do 1378 * not override our *start with a lower value, because 1379 * we can have pinned chunks that fall within this 1380 * device hole and that have lower physical addresses 1381 * than the pending chunks we processed before. If we 1382 * do not take this special care we can end up getting 1383 * 2 pending chunks that start at the same physical 1384 * device offsets because the end offset of a pinned 1385 * chunk can be equal to the start offset of some 1386 * pending chunk. 1387 */ 1388 end = map->stripes[i].physical + em->orig_block_len; 1389 if (end > *start) { 1390 *start = end; 1391 ret = 1; 1392 } 1393 } 1394 } 1395 if (search_list != &fs_info->pinned_chunks) { 1396 search_list = &fs_info->pinned_chunks; 1397 goto again; 1398 } 1399 1400 return ret; 1401 } 1402 1403 1404 /* 1405 * find_free_dev_extent_start - find free space in the specified device 1406 * @device: the device which we search the free space in 1407 * @num_bytes: the size of the free space that we need 1408 * @search_start: the position from which to begin the search 1409 * @start: store the start of the free space. 1410 * @len: the size of the free space. that we find, or the size 1411 * of the max free space if we don't find suitable free space 1412 * 1413 * this uses a pretty simple search, the expectation is that it is 1414 * called very infrequently and that a given device has a small number 1415 * of extents 1416 * 1417 * @start is used to store the start of the free space if we find. But if we 1418 * don't find suitable free space, it will be used to store the start position 1419 * of the max free space. 1420 * 1421 * @len is used to store the size of the free space that we find. 1422 * But if we don't find suitable free space, it is used to store the size of 1423 * the max free space. 1424 */ 1425 int find_free_dev_extent_start(struct btrfs_transaction *transaction, 1426 struct btrfs_device *device, u64 num_bytes, 1427 u64 search_start, u64 *start, u64 *len) 1428 { 1429 struct btrfs_fs_info *fs_info = device->fs_info; 1430 struct btrfs_root *root = fs_info->dev_root; 1431 struct btrfs_key key; 1432 struct btrfs_dev_extent *dev_extent; 1433 struct btrfs_path *path; 1434 u64 hole_size; 1435 u64 max_hole_start; 1436 u64 max_hole_size; 1437 u64 extent_end; 1438 u64 search_end = device->total_bytes; 1439 int ret; 1440 int slot; 1441 struct extent_buffer *l; 1442 1443 /* 1444 * We don't want to overwrite the superblock on the drive nor any area 1445 * used by the boot loader (grub for example), so we make sure to start 1446 * at an offset of at least 1MB. 1447 */ 1448 search_start = max_t(u64, search_start, SZ_1M); 1449 1450 path = btrfs_alloc_path(); 1451 if (!path) 1452 return -ENOMEM; 1453 1454 max_hole_start = search_start; 1455 max_hole_size = 0; 1456 1457 again: 1458 if (search_start >= search_end || 1459 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1460 ret = -ENOSPC; 1461 goto out; 1462 } 1463 1464 path->reada = READA_FORWARD; 1465 path->search_commit_root = 1; 1466 path->skip_locking = 1; 1467 1468 key.objectid = device->devid; 1469 key.offset = search_start; 1470 key.type = BTRFS_DEV_EXTENT_KEY; 1471 1472 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1473 if (ret < 0) 1474 goto out; 1475 if (ret > 0) { 1476 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1477 if (ret < 0) 1478 goto out; 1479 } 1480 1481 while (1) { 1482 l = path->nodes[0]; 1483 slot = path->slots[0]; 1484 if (slot >= btrfs_header_nritems(l)) { 1485 ret = btrfs_next_leaf(root, path); 1486 if (ret == 0) 1487 continue; 1488 if (ret < 0) 1489 goto out; 1490 1491 break; 1492 } 1493 btrfs_item_key_to_cpu(l, &key, slot); 1494 1495 if (key.objectid < device->devid) 1496 goto next; 1497 1498 if (key.objectid > device->devid) 1499 break; 1500 1501 if (key.type != BTRFS_DEV_EXTENT_KEY) 1502 goto next; 1503 1504 if (key.offset > search_start) { 1505 hole_size = key.offset - search_start; 1506 1507 /* 1508 * Have to check before we set max_hole_start, otherwise 1509 * we could end up sending back this offset anyway. 1510 */ 1511 if (contains_pending_extent(transaction, device, 1512 &search_start, 1513 hole_size)) { 1514 if (key.offset >= search_start) { 1515 hole_size = key.offset - search_start; 1516 } else { 1517 WARN_ON_ONCE(1); 1518 hole_size = 0; 1519 } 1520 } 1521 1522 if (hole_size > max_hole_size) { 1523 max_hole_start = search_start; 1524 max_hole_size = hole_size; 1525 } 1526 1527 /* 1528 * If this free space is greater than which we need, 1529 * it must be the max free space that we have found 1530 * until now, so max_hole_start must point to the start 1531 * of this free space and the length of this free space 1532 * is stored in max_hole_size. Thus, we return 1533 * max_hole_start and max_hole_size and go back to the 1534 * caller. 1535 */ 1536 if (hole_size >= num_bytes) { 1537 ret = 0; 1538 goto out; 1539 } 1540 } 1541 1542 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1543 extent_end = key.offset + btrfs_dev_extent_length(l, 1544 dev_extent); 1545 if (extent_end > search_start) 1546 search_start = extent_end; 1547 next: 1548 path->slots[0]++; 1549 cond_resched(); 1550 } 1551 1552 /* 1553 * At this point, search_start should be the end of 1554 * allocated dev extents, and when shrinking the device, 1555 * search_end may be smaller than search_start. 1556 */ 1557 if (search_end > search_start) { 1558 hole_size = search_end - search_start; 1559 1560 if (contains_pending_extent(transaction, device, &search_start, 1561 hole_size)) { 1562 btrfs_release_path(path); 1563 goto again; 1564 } 1565 1566 if (hole_size > max_hole_size) { 1567 max_hole_start = search_start; 1568 max_hole_size = hole_size; 1569 } 1570 } 1571 1572 /* See above. */ 1573 if (max_hole_size < num_bytes) 1574 ret = -ENOSPC; 1575 else 1576 ret = 0; 1577 1578 out: 1579 btrfs_free_path(path); 1580 *start = max_hole_start; 1581 if (len) 1582 *len = max_hole_size; 1583 return ret; 1584 } 1585 1586 int find_free_dev_extent(struct btrfs_trans_handle *trans, 1587 struct btrfs_device *device, u64 num_bytes, 1588 u64 *start, u64 *len) 1589 { 1590 /* FIXME use last free of some kind */ 1591 return find_free_dev_extent_start(trans->transaction, device, 1592 num_bytes, 0, start, len); 1593 } 1594 1595 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1596 struct btrfs_device *device, 1597 u64 start, u64 *dev_extent_len) 1598 { 1599 struct btrfs_fs_info *fs_info = device->fs_info; 1600 struct btrfs_root *root = fs_info->dev_root; 1601 int ret; 1602 struct btrfs_path *path; 1603 struct btrfs_key key; 1604 struct btrfs_key found_key; 1605 struct extent_buffer *leaf = NULL; 1606 struct btrfs_dev_extent *extent = NULL; 1607 1608 path = btrfs_alloc_path(); 1609 if (!path) 1610 return -ENOMEM; 1611 1612 key.objectid = device->devid; 1613 key.offset = start; 1614 key.type = BTRFS_DEV_EXTENT_KEY; 1615 again: 1616 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1617 if (ret > 0) { 1618 ret = btrfs_previous_item(root, path, key.objectid, 1619 BTRFS_DEV_EXTENT_KEY); 1620 if (ret) 1621 goto out; 1622 leaf = path->nodes[0]; 1623 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1624 extent = btrfs_item_ptr(leaf, path->slots[0], 1625 struct btrfs_dev_extent); 1626 BUG_ON(found_key.offset > start || found_key.offset + 1627 btrfs_dev_extent_length(leaf, extent) < start); 1628 key = found_key; 1629 btrfs_release_path(path); 1630 goto again; 1631 } else if (ret == 0) { 1632 leaf = path->nodes[0]; 1633 extent = btrfs_item_ptr(leaf, path->slots[0], 1634 struct btrfs_dev_extent); 1635 } else { 1636 btrfs_handle_fs_error(fs_info, ret, "Slot search failed"); 1637 goto out; 1638 } 1639 1640 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1641 1642 ret = btrfs_del_item(trans, root, path); 1643 if (ret) { 1644 btrfs_handle_fs_error(fs_info, ret, 1645 "Failed to remove dev extent item"); 1646 } else { 1647 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1648 } 1649 out: 1650 btrfs_free_path(path); 1651 return ret; 1652 } 1653 1654 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1655 struct btrfs_device *device, 1656 u64 chunk_offset, u64 start, u64 num_bytes) 1657 { 1658 int ret; 1659 struct btrfs_path *path; 1660 struct btrfs_fs_info *fs_info = device->fs_info; 1661 struct btrfs_root *root = fs_info->dev_root; 1662 struct btrfs_dev_extent *extent; 1663 struct extent_buffer *leaf; 1664 struct btrfs_key key; 1665 1666 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 1667 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1668 path = btrfs_alloc_path(); 1669 if (!path) 1670 return -ENOMEM; 1671 1672 key.objectid = device->devid; 1673 key.offset = start; 1674 key.type = BTRFS_DEV_EXTENT_KEY; 1675 ret = btrfs_insert_empty_item(trans, root, path, &key, 1676 sizeof(*extent)); 1677 if (ret) 1678 goto out; 1679 1680 leaf = path->nodes[0]; 1681 extent = btrfs_item_ptr(leaf, path->slots[0], 1682 struct btrfs_dev_extent); 1683 btrfs_set_dev_extent_chunk_tree(leaf, extent, 1684 BTRFS_CHUNK_TREE_OBJECTID); 1685 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 1686 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 1687 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 1688 1689 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1690 btrfs_mark_buffer_dirty(leaf); 1691 out: 1692 btrfs_free_path(path); 1693 return ret; 1694 } 1695 1696 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1697 { 1698 struct extent_map_tree *em_tree; 1699 struct extent_map *em; 1700 struct rb_node *n; 1701 u64 ret = 0; 1702 1703 em_tree = &fs_info->mapping_tree.map_tree; 1704 read_lock(&em_tree->lock); 1705 n = rb_last(&em_tree->map); 1706 if (n) { 1707 em = rb_entry(n, struct extent_map, rb_node); 1708 ret = em->start + em->len; 1709 } 1710 read_unlock(&em_tree->lock); 1711 1712 return ret; 1713 } 1714 1715 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1716 u64 *devid_ret) 1717 { 1718 int ret; 1719 struct btrfs_key key; 1720 struct btrfs_key found_key; 1721 struct btrfs_path *path; 1722 1723 path = btrfs_alloc_path(); 1724 if (!path) 1725 return -ENOMEM; 1726 1727 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1728 key.type = BTRFS_DEV_ITEM_KEY; 1729 key.offset = (u64)-1; 1730 1731 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1732 if (ret < 0) 1733 goto error; 1734 1735 BUG_ON(ret == 0); /* Corruption */ 1736 1737 ret = btrfs_previous_item(fs_info->chunk_root, path, 1738 BTRFS_DEV_ITEMS_OBJECTID, 1739 BTRFS_DEV_ITEM_KEY); 1740 if (ret) { 1741 *devid_ret = 1; 1742 } else { 1743 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1744 path->slots[0]); 1745 *devid_ret = found_key.offset + 1; 1746 } 1747 ret = 0; 1748 error: 1749 btrfs_free_path(path); 1750 return ret; 1751 } 1752 1753 /* 1754 * the device information is stored in the chunk root 1755 * the btrfs_device struct should be fully filled in 1756 */ 1757 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1758 struct btrfs_fs_info *fs_info, 1759 struct btrfs_device *device) 1760 { 1761 struct btrfs_root *root = fs_info->chunk_root; 1762 int ret; 1763 struct btrfs_path *path; 1764 struct btrfs_dev_item *dev_item; 1765 struct extent_buffer *leaf; 1766 struct btrfs_key key; 1767 unsigned long ptr; 1768 1769 path = btrfs_alloc_path(); 1770 if (!path) 1771 return -ENOMEM; 1772 1773 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1774 key.type = BTRFS_DEV_ITEM_KEY; 1775 key.offset = device->devid; 1776 1777 ret = btrfs_insert_empty_item(trans, root, path, &key, 1778 sizeof(*dev_item)); 1779 if (ret) 1780 goto out; 1781 1782 leaf = path->nodes[0]; 1783 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1784 1785 btrfs_set_device_id(leaf, dev_item, device->devid); 1786 btrfs_set_device_generation(leaf, dev_item, 0); 1787 btrfs_set_device_type(leaf, dev_item, device->type); 1788 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1789 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1790 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1791 btrfs_set_device_total_bytes(leaf, dev_item, 1792 btrfs_device_get_disk_total_bytes(device)); 1793 btrfs_set_device_bytes_used(leaf, dev_item, 1794 btrfs_device_get_bytes_used(device)); 1795 btrfs_set_device_group(leaf, dev_item, 0); 1796 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1797 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1798 btrfs_set_device_start_offset(leaf, dev_item, 0); 1799 1800 ptr = btrfs_device_uuid(dev_item); 1801 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1802 ptr = btrfs_device_fsid(dev_item); 1803 write_extent_buffer(leaf, fs_info->fsid, ptr, BTRFS_FSID_SIZE); 1804 btrfs_mark_buffer_dirty(leaf); 1805 1806 ret = 0; 1807 out: 1808 btrfs_free_path(path); 1809 return ret; 1810 } 1811 1812 /* 1813 * Function to update ctime/mtime for a given device path. 1814 * Mainly used for ctime/mtime based probe like libblkid. 1815 */ 1816 static void update_dev_time(const char *path_name) 1817 { 1818 struct file *filp; 1819 1820 filp = filp_open(path_name, O_RDWR, 0); 1821 if (IS_ERR(filp)) 1822 return; 1823 file_update_time(filp); 1824 filp_close(filp, NULL); 1825 } 1826 1827 static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info, 1828 struct btrfs_device *device) 1829 { 1830 struct btrfs_root *root = fs_info->chunk_root; 1831 int ret; 1832 struct btrfs_path *path; 1833 struct btrfs_key key; 1834 struct btrfs_trans_handle *trans; 1835 1836 path = btrfs_alloc_path(); 1837 if (!path) 1838 return -ENOMEM; 1839 1840 trans = btrfs_start_transaction(root, 0); 1841 if (IS_ERR(trans)) { 1842 btrfs_free_path(path); 1843 return PTR_ERR(trans); 1844 } 1845 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1846 key.type = BTRFS_DEV_ITEM_KEY; 1847 key.offset = device->devid; 1848 1849 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1850 if (ret) { 1851 if (ret > 0) 1852 ret = -ENOENT; 1853 btrfs_abort_transaction(trans, ret); 1854 btrfs_end_transaction(trans); 1855 goto out; 1856 } 1857 1858 ret = btrfs_del_item(trans, root, path); 1859 if (ret) { 1860 btrfs_abort_transaction(trans, ret); 1861 btrfs_end_transaction(trans); 1862 } 1863 1864 out: 1865 btrfs_free_path(path); 1866 if (!ret) 1867 ret = btrfs_commit_transaction(trans); 1868 return ret; 1869 } 1870 1871 /* 1872 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1873 * filesystem. It's up to the caller to adjust that number regarding eg. device 1874 * replace. 1875 */ 1876 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1877 u64 num_devices) 1878 { 1879 u64 all_avail; 1880 unsigned seq; 1881 int i; 1882 1883 do { 1884 seq = read_seqbegin(&fs_info->profiles_lock); 1885 1886 all_avail = fs_info->avail_data_alloc_bits | 1887 fs_info->avail_system_alloc_bits | 1888 fs_info->avail_metadata_alloc_bits; 1889 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1890 1891 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1892 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1893 continue; 1894 1895 if (num_devices < btrfs_raid_array[i].devs_min) { 1896 int ret = btrfs_raid_array[i].mindev_error; 1897 1898 if (ret) 1899 return ret; 1900 } 1901 } 1902 1903 return 0; 1904 } 1905 1906 static struct btrfs_device * btrfs_find_next_active_device( 1907 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1908 { 1909 struct btrfs_device *next_device; 1910 1911 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1912 if (next_device != device && 1913 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1914 && next_device->bdev) 1915 return next_device; 1916 } 1917 1918 return NULL; 1919 } 1920 1921 /* 1922 * Helper function to check if the given device is part of s_bdev / latest_bdev 1923 * and replace it with the provided or the next active device, in the context 1924 * where this function called, there should be always be another device (or 1925 * this_dev) which is active. 1926 */ 1927 void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, 1928 struct btrfs_device *device, struct btrfs_device *this_dev) 1929 { 1930 struct btrfs_device *next_device; 1931 1932 if (this_dev) 1933 next_device = this_dev; 1934 else 1935 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1936 device); 1937 ASSERT(next_device); 1938 1939 if (fs_info->sb->s_bdev && 1940 (fs_info->sb->s_bdev == device->bdev)) 1941 fs_info->sb->s_bdev = next_device->bdev; 1942 1943 if (fs_info->fs_devices->latest_bdev == device->bdev) 1944 fs_info->fs_devices->latest_bdev = next_device->bdev; 1945 } 1946 1947 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, 1948 u64 devid) 1949 { 1950 struct btrfs_device *device; 1951 struct btrfs_fs_devices *cur_devices; 1952 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 1953 u64 num_devices; 1954 int ret = 0; 1955 1956 mutex_lock(&uuid_mutex); 1957 1958 num_devices = fs_devices->num_devices; 1959 btrfs_dev_replace_read_lock(&fs_info->dev_replace); 1960 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 1961 WARN_ON(num_devices < 1); 1962 num_devices--; 1963 } 1964 btrfs_dev_replace_read_unlock(&fs_info->dev_replace); 1965 1966 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 1967 if (ret) 1968 goto out; 1969 1970 ret = btrfs_find_device_by_devspec(fs_info, devid, device_path, 1971 &device); 1972 if (ret) 1973 goto out; 1974 1975 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1976 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 1977 goto out; 1978 } 1979 1980 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1981 fs_info->fs_devices->rw_devices == 1) { 1982 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 1983 goto out; 1984 } 1985 1986 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1987 mutex_lock(&fs_info->chunk_mutex); 1988 list_del_init(&device->dev_alloc_list); 1989 device->fs_devices->rw_devices--; 1990 mutex_unlock(&fs_info->chunk_mutex); 1991 } 1992 1993 mutex_unlock(&uuid_mutex); 1994 ret = btrfs_shrink_device(device, 0); 1995 mutex_lock(&uuid_mutex); 1996 if (ret) 1997 goto error_undo; 1998 1999 /* 2000 * TODO: the superblock still includes this device in its num_devices 2001 * counter although write_all_supers() is not locked out. This 2002 * could give a filesystem state which requires a degraded mount. 2003 */ 2004 ret = btrfs_rm_dev_item(fs_info, device); 2005 if (ret) 2006 goto error_undo; 2007 2008 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2009 btrfs_scrub_cancel_dev(fs_info, device); 2010 2011 /* 2012 * the device list mutex makes sure that we don't change 2013 * the device list while someone else is writing out all 2014 * the device supers. Whoever is writing all supers, should 2015 * lock the device list mutex before getting the number of 2016 * devices in the super block (super_copy). Conversely, 2017 * whoever updates the number of devices in the super block 2018 * (super_copy) should hold the device list mutex. 2019 */ 2020 2021 /* 2022 * In normal cases the cur_devices == fs_devices. But in case 2023 * of deleting a seed device, the cur_devices should point to 2024 * its own fs_devices listed under the fs_devices->seed. 2025 */ 2026 cur_devices = device->fs_devices; 2027 mutex_lock(&fs_devices->device_list_mutex); 2028 list_del_rcu(&device->dev_list); 2029 2030 cur_devices->num_devices--; 2031 cur_devices->total_devices--; 2032 2033 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2034 cur_devices->missing_devices--; 2035 2036 btrfs_assign_next_active_device(fs_info, device, NULL); 2037 2038 if (device->bdev) { 2039 cur_devices->open_devices--; 2040 /* remove sysfs entry */ 2041 btrfs_sysfs_rm_device_link(fs_devices, device); 2042 } 2043 2044 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2045 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2046 mutex_unlock(&fs_devices->device_list_mutex); 2047 2048 /* 2049 * at this point, the device is zero sized and detached from 2050 * the devices list. All that's left is to zero out the old 2051 * supers and free the device. 2052 */ 2053 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2054 btrfs_scratch_superblocks(device->bdev, device->name->str); 2055 2056 btrfs_close_bdev(device); 2057 call_rcu(&device->rcu, free_device_rcu); 2058 2059 if (cur_devices->open_devices == 0) { 2060 while (fs_devices) { 2061 if (fs_devices->seed == cur_devices) { 2062 fs_devices->seed = cur_devices->seed; 2063 break; 2064 } 2065 fs_devices = fs_devices->seed; 2066 } 2067 cur_devices->seed = NULL; 2068 close_fs_devices(cur_devices); 2069 free_fs_devices(cur_devices); 2070 } 2071 2072 out: 2073 mutex_unlock(&uuid_mutex); 2074 return ret; 2075 2076 error_undo: 2077 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2078 mutex_lock(&fs_info->chunk_mutex); 2079 list_add(&device->dev_alloc_list, 2080 &fs_devices->alloc_list); 2081 device->fs_devices->rw_devices++; 2082 mutex_unlock(&fs_info->chunk_mutex); 2083 } 2084 goto out; 2085 } 2086 2087 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, 2088 struct btrfs_device *srcdev) 2089 { 2090 struct btrfs_fs_devices *fs_devices; 2091 2092 lockdep_assert_held(&fs_info->fs_devices->device_list_mutex); 2093 2094 /* 2095 * in case of fs with no seed, srcdev->fs_devices will point 2096 * to fs_devices of fs_info. However when the dev being replaced is 2097 * a seed dev it will point to the seed's local fs_devices. In short 2098 * srcdev will have its correct fs_devices in both the cases. 2099 */ 2100 fs_devices = srcdev->fs_devices; 2101 2102 list_del_rcu(&srcdev->dev_list); 2103 list_del(&srcdev->dev_alloc_list); 2104 fs_devices->num_devices--; 2105 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2106 fs_devices->missing_devices--; 2107 2108 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2109 fs_devices->rw_devices--; 2110 2111 if (srcdev->bdev) 2112 fs_devices->open_devices--; 2113 } 2114 2115 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info, 2116 struct btrfs_device *srcdev) 2117 { 2118 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2119 2120 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) { 2121 /* zero out the old super if it is writable */ 2122 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); 2123 } 2124 2125 btrfs_close_bdev(srcdev); 2126 call_rcu(&srcdev->rcu, free_device_rcu); 2127 2128 /* if this is no devs we rather delete the fs_devices */ 2129 if (!fs_devices->num_devices) { 2130 struct btrfs_fs_devices *tmp_fs_devices; 2131 2132 /* 2133 * On a mounted FS, num_devices can't be zero unless it's a 2134 * seed. In case of a seed device being replaced, the replace 2135 * target added to the sprout FS, so there will be no more 2136 * device left under the seed FS. 2137 */ 2138 ASSERT(fs_devices->seeding); 2139 2140 tmp_fs_devices = fs_info->fs_devices; 2141 while (tmp_fs_devices) { 2142 if (tmp_fs_devices->seed == fs_devices) { 2143 tmp_fs_devices->seed = fs_devices->seed; 2144 break; 2145 } 2146 tmp_fs_devices = tmp_fs_devices->seed; 2147 } 2148 fs_devices->seed = NULL; 2149 close_fs_devices(fs_devices); 2150 free_fs_devices(fs_devices); 2151 } 2152 } 2153 2154 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, 2155 struct btrfs_device *tgtdev) 2156 { 2157 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2158 2159 WARN_ON(!tgtdev); 2160 mutex_lock(&fs_devices->device_list_mutex); 2161 2162 btrfs_sysfs_rm_device_link(fs_devices, tgtdev); 2163 2164 if (tgtdev->bdev) 2165 fs_devices->open_devices--; 2166 2167 fs_devices->num_devices--; 2168 2169 btrfs_assign_next_active_device(fs_info, tgtdev, NULL); 2170 2171 list_del_rcu(&tgtdev->dev_list); 2172 2173 mutex_unlock(&fs_devices->device_list_mutex); 2174 2175 /* 2176 * The update_dev_time() with in btrfs_scratch_superblocks() 2177 * may lead to a call to btrfs_show_devname() which will try 2178 * to hold device_list_mutex. And here this device 2179 * is already out of device list, so we don't have to hold 2180 * the device_list_mutex lock. 2181 */ 2182 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); 2183 2184 btrfs_close_bdev(tgtdev); 2185 call_rcu(&tgtdev->rcu, free_device_rcu); 2186 } 2187 2188 static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info, 2189 const char *device_path, 2190 struct btrfs_device **device) 2191 { 2192 int ret = 0; 2193 struct btrfs_super_block *disk_super; 2194 u64 devid; 2195 u8 *dev_uuid; 2196 struct block_device *bdev; 2197 struct buffer_head *bh; 2198 2199 *device = NULL; 2200 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, 2201 fs_info->bdev_holder, 0, &bdev, &bh); 2202 if (ret) 2203 return ret; 2204 disk_super = (struct btrfs_super_block *)bh->b_data; 2205 devid = btrfs_stack_device_id(&disk_super->dev_item); 2206 dev_uuid = disk_super->dev_item.uuid; 2207 *device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid); 2208 brelse(bh); 2209 if (!*device) 2210 ret = -ENOENT; 2211 blkdev_put(bdev, FMODE_READ); 2212 return ret; 2213 } 2214 2215 int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, 2216 const char *device_path, 2217 struct btrfs_device **device) 2218 { 2219 *device = NULL; 2220 if (strcmp(device_path, "missing") == 0) { 2221 struct list_head *devices; 2222 struct btrfs_device *tmp; 2223 2224 devices = &fs_info->fs_devices->devices; 2225 list_for_each_entry(tmp, devices, dev_list) { 2226 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 2227 &tmp->dev_state) && !tmp->bdev) { 2228 *device = tmp; 2229 break; 2230 } 2231 } 2232 2233 if (!*device) 2234 return BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2235 2236 return 0; 2237 } else { 2238 return btrfs_find_device_by_path(fs_info, device_path, device); 2239 } 2240 } 2241 2242 /* 2243 * Lookup a device given by device id, or the path if the id is 0. 2244 */ 2245 int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid, 2246 const char *devpath, 2247 struct btrfs_device **device) 2248 { 2249 int ret; 2250 2251 if (devid) { 2252 ret = 0; 2253 *device = btrfs_find_device(fs_info, devid, NULL, NULL); 2254 if (!*device) 2255 ret = -ENOENT; 2256 } else { 2257 if (!devpath || !devpath[0]) 2258 return -EINVAL; 2259 2260 ret = btrfs_find_device_missing_or_by_path(fs_info, devpath, 2261 device); 2262 } 2263 return ret; 2264 } 2265 2266 /* 2267 * does all the dirty work required for changing file system's UUID. 2268 */ 2269 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) 2270 { 2271 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2272 struct btrfs_fs_devices *old_devices; 2273 struct btrfs_fs_devices *seed_devices; 2274 struct btrfs_super_block *disk_super = fs_info->super_copy; 2275 struct btrfs_device *device; 2276 u64 super_flags; 2277 2278 lockdep_assert_held(&uuid_mutex); 2279 if (!fs_devices->seeding) 2280 return -EINVAL; 2281 2282 seed_devices = alloc_fs_devices(NULL); 2283 if (IS_ERR(seed_devices)) 2284 return PTR_ERR(seed_devices); 2285 2286 old_devices = clone_fs_devices(fs_devices); 2287 if (IS_ERR(old_devices)) { 2288 kfree(seed_devices); 2289 return PTR_ERR(old_devices); 2290 } 2291 2292 list_add(&old_devices->fs_list, &fs_uuids); 2293 2294 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2295 seed_devices->opened = 1; 2296 INIT_LIST_HEAD(&seed_devices->devices); 2297 INIT_LIST_HEAD(&seed_devices->alloc_list); 2298 mutex_init(&seed_devices->device_list_mutex); 2299 2300 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2301 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2302 synchronize_rcu); 2303 list_for_each_entry(device, &seed_devices->devices, dev_list) 2304 device->fs_devices = seed_devices; 2305 2306 mutex_lock(&fs_info->chunk_mutex); 2307 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 2308 mutex_unlock(&fs_info->chunk_mutex); 2309 2310 fs_devices->seeding = 0; 2311 fs_devices->num_devices = 0; 2312 fs_devices->open_devices = 0; 2313 fs_devices->missing_devices = 0; 2314 fs_devices->rotating = 0; 2315 fs_devices->seed = seed_devices; 2316 2317 generate_random_uuid(fs_devices->fsid); 2318 memcpy(fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2319 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2320 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2321 2322 super_flags = btrfs_super_flags(disk_super) & 2323 ~BTRFS_SUPER_FLAG_SEEDING; 2324 btrfs_set_super_flags(disk_super, super_flags); 2325 2326 return 0; 2327 } 2328 2329 /* 2330 * Store the expected generation for seed devices in device items. 2331 */ 2332 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 2333 struct btrfs_fs_info *fs_info) 2334 { 2335 struct btrfs_root *root = fs_info->chunk_root; 2336 struct btrfs_path *path; 2337 struct extent_buffer *leaf; 2338 struct btrfs_dev_item *dev_item; 2339 struct btrfs_device *device; 2340 struct btrfs_key key; 2341 u8 fs_uuid[BTRFS_FSID_SIZE]; 2342 u8 dev_uuid[BTRFS_UUID_SIZE]; 2343 u64 devid; 2344 int ret; 2345 2346 path = btrfs_alloc_path(); 2347 if (!path) 2348 return -ENOMEM; 2349 2350 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2351 key.offset = 0; 2352 key.type = BTRFS_DEV_ITEM_KEY; 2353 2354 while (1) { 2355 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2356 if (ret < 0) 2357 goto error; 2358 2359 leaf = path->nodes[0]; 2360 next_slot: 2361 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2362 ret = btrfs_next_leaf(root, path); 2363 if (ret > 0) 2364 break; 2365 if (ret < 0) 2366 goto error; 2367 leaf = path->nodes[0]; 2368 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2369 btrfs_release_path(path); 2370 continue; 2371 } 2372 2373 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2374 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2375 key.type != BTRFS_DEV_ITEM_KEY) 2376 break; 2377 2378 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2379 struct btrfs_dev_item); 2380 devid = btrfs_device_id(leaf, dev_item); 2381 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2382 BTRFS_UUID_SIZE); 2383 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2384 BTRFS_FSID_SIZE); 2385 device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid); 2386 BUG_ON(!device); /* Logic error */ 2387 2388 if (device->fs_devices->seeding) { 2389 btrfs_set_device_generation(leaf, dev_item, 2390 device->generation); 2391 btrfs_mark_buffer_dirty(leaf); 2392 } 2393 2394 path->slots[0]++; 2395 goto next_slot; 2396 } 2397 ret = 0; 2398 error: 2399 btrfs_free_path(path); 2400 return ret; 2401 } 2402 2403 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2404 { 2405 struct btrfs_root *root = fs_info->dev_root; 2406 struct request_queue *q; 2407 struct btrfs_trans_handle *trans; 2408 struct btrfs_device *device; 2409 struct block_device *bdev; 2410 struct list_head *devices; 2411 struct super_block *sb = fs_info->sb; 2412 struct rcu_string *name; 2413 u64 tmp; 2414 int seeding_dev = 0; 2415 int ret = 0; 2416 bool unlocked = false; 2417 2418 if (sb_rdonly(sb) && !fs_info->fs_devices->seeding) 2419 return -EROFS; 2420 2421 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2422 fs_info->bdev_holder); 2423 if (IS_ERR(bdev)) 2424 return PTR_ERR(bdev); 2425 2426 if (fs_info->fs_devices->seeding) { 2427 seeding_dev = 1; 2428 down_write(&sb->s_umount); 2429 mutex_lock(&uuid_mutex); 2430 } 2431 2432 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2433 2434 devices = &fs_info->fs_devices->devices; 2435 2436 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2437 list_for_each_entry(device, devices, dev_list) { 2438 if (device->bdev == bdev) { 2439 ret = -EEXIST; 2440 mutex_unlock( 2441 &fs_info->fs_devices->device_list_mutex); 2442 goto error; 2443 } 2444 } 2445 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2446 2447 device = btrfs_alloc_device(fs_info, NULL, NULL); 2448 if (IS_ERR(device)) { 2449 /* we can safely leave the fs_devices entry around */ 2450 ret = PTR_ERR(device); 2451 goto error; 2452 } 2453 2454 name = rcu_string_strdup(device_path, GFP_KERNEL); 2455 if (!name) { 2456 ret = -ENOMEM; 2457 goto error_free_device; 2458 } 2459 rcu_assign_pointer(device->name, name); 2460 2461 trans = btrfs_start_transaction(root, 0); 2462 if (IS_ERR(trans)) { 2463 ret = PTR_ERR(trans); 2464 goto error_free_device; 2465 } 2466 2467 q = bdev_get_queue(bdev); 2468 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2469 device->generation = trans->transid; 2470 device->io_width = fs_info->sectorsize; 2471 device->io_align = fs_info->sectorsize; 2472 device->sector_size = fs_info->sectorsize; 2473 device->total_bytes = round_down(i_size_read(bdev->bd_inode), 2474 fs_info->sectorsize); 2475 device->disk_total_bytes = device->total_bytes; 2476 device->commit_total_bytes = device->total_bytes; 2477 device->fs_info = fs_info; 2478 device->bdev = bdev; 2479 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2480 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2481 device->mode = FMODE_EXCL; 2482 device->dev_stats_valid = 1; 2483 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2484 2485 if (seeding_dev) { 2486 sb->s_flags &= ~SB_RDONLY; 2487 ret = btrfs_prepare_sprout(fs_info); 2488 if (ret) { 2489 btrfs_abort_transaction(trans, ret); 2490 goto error_trans; 2491 } 2492 } 2493 2494 device->fs_devices = fs_info->fs_devices; 2495 2496 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2497 mutex_lock(&fs_info->chunk_mutex); 2498 list_add_rcu(&device->dev_list, &fs_info->fs_devices->devices); 2499 list_add(&device->dev_alloc_list, 2500 &fs_info->fs_devices->alloc_list); 2501 fs_info->fs_devices->num_devices++; 2502 fs_info->fs_devices->open_devices++; 2503 fs_info->fs_devices->rw_devices++; 2504 fs_info->fs_devices->total_devices++; 2505 fs_info->fs_devices->total_rw_bytes += device->total_bytes; 2506 2507 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2508 2509 if (!blk_queue_nonrot(q)) 2510 fs_info->fs_devices->rotating = 1; 2511 2512 tmp = btrfs_super_total_bytes(fs_info->super_copy); 2513 btrfs_set_super_total_bytes(fs_info->super_copy, 2514 round_down(tmp + device->total_bytes, fs_info->sectorsize)); 2515 2516 tmp = btrfs_super_num_devices(fs_info->super_copy); 2517 btrfs_set_super_num_devices(fs_info->super_copy, tmp + 1); 2518 2519 /* add sysfs device entry */ 2520 btrfs_sysfs_add_device_link(fs_info->fs_devices, device); 2521 2522 /* 2523 * we've got more storage, clear any full flags on the space 2524 * infos 2525 */ 2526 btrfs_clear_space_info_full(fs_info); 2527 2528 mutex_unlock(&fs_info->chunk_mutex); 2529 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2530 2531 if (seeding_dev) { 2532 mutex_lock(&fs_info->chunk_mutex); 2533 ret = init_first_rw_device(trans, fs_info); 2534 mutex_unlock(&fs_info->chunk_mutex); 2535 if (ret) { 2536 btrfs_abort_transaction(trans, ret); 2537 goto error_sysfs; 2538 } 2539 } 2540 2541 ret = btrfs_add_dev_item(trans, fs_info, device); 2542 if (ret) { 2543 btrfs_abort_transaction(trans, ret); 2544 goto error_sysfs; 2545 } 2546 2547 if (seeding_dev) { 2548 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; 2549 2550 ret = btrfs_finish_sprout(trans, fs_info); 2551 if (ret) { 2552 btrfs_abort_transaction(trans, ret); 2553 goto error_sysfs; 2554 } 2555 2556 /* Sprouting would change fsid of the mounted root, 2557 * so rename the fsid on the sysfs 2558 */ 2559 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", 2560 fs_info->fsid); 2561 if (kobject_rename(&fs_info->fs_devices->fsid_kobj, fsid_buf)) 2562 btrfs_warn(fs_info, 2563 "sysfs: failed to create fsid for sprout"); 2564 } 2565 2566 ret = btrfs_commit_transaction(trans); 2567 2568 if (seeding_dev) { 2569 mutex_unlock(&uuid_mutex); 2570 up_write(&sb->s_umount); 2571 unlocked = true; 2572 2573 if (ret) /* transaction commit */ 2574 return ret; 2575 2576 ret = btrfs_relocate_sys_chunks(fs_info); 2577 if (ret < 0) 2578 btrfs_handle_fs_error(fs_info, ret, 2579 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2580 trans = btrfs_attach_transaction(root); 2581 if (IS_ERR(trans)) { 2582 if (PTR_ERR(trans) == -ENOENT) 2583 return 0; 2584 ret = PTR_ERR(trans); 2585 trans = NULL; 2586 goto error_sysfs; 2587 } 2588 ret = btrfs_commit_transaction(trans); 2589 } 2590 2591 /* Update ctime/mtime for libblkid */ 2592 update_dev_time(device_path); 2593 return ret; 2594 2595 error_sysfs: 2596 btrfs_sysfs_rm_device_link(fs_info->fs_devices, device); 2597 error_trans: 2598 if (seeding_dev) 2599 sb->s_flags |= SB_RDONLY; 2600 if (trans) 2601 btrfs_end_transaction(trans); 2602 error_free_device: 2603 btrfs_free_device(device); 2604 error: 2605 blkdev_put(bdev, FMODE_EXCL); 2606 if (seeding_dev && !unlocked) { 2607 mutex_unlock(&uuid_mutex); 2608 up_write(&sb->s_umount); 2609 } 2610 return ret; 2611 } 2612 2613 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2614 struct btrfs_device *device) 2615 { 2616 int ret; 2617 struct btrfs_path *path; 2618 struct btrfs_root *root = device->fs_info->chunk_root; 2619 struct btrfs_dev_item *dev_item; 2620 struct extent_buffer *leaf; 2621 struct btrfs_key key; 2622 2623 path = btrfs_alloc_path(); 2624 if (!path) 2625 return -ENOMEM; 2626 2627 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2628 key.type = BTRFS_DEV_ITEM_KEY; 2629 key.offset = device->devid; 2630 2631 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2632 if (ret < 0) 2633 goto out; 2634 2635 if (ret > 0) { 2636 ret = -ENOENT; 2637 goto out; 2638 } 2639 2640 leaf = path->nodes[0]; 2641 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2642 2643 btrfs_set_device_id(leaf, dev_item, device->devid); 2644 btrfs_set_device_type(leaf, dev_item, device->type); 2645 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2646 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2647 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2648 btrfs_set_device_total_bytes(leaf, dev_item, 2649 btrfs_device_get_disk_total_bytes(device)); 2650 btrfs_set_device_bytes_used(leaf, dev_item, 2651 btrfs_device_get_bytes_used(device)); 2652 btrfs_mark_buffer_dirty(leaf); 2653 2654 out: 2655 btrfs_free_path(path); 2656 return ret; 2657 } 2658 2659 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2660 struct btrfs_device *device, u64 new_size) 2661 { 2662 struct btrfs_fs_info *fs_info = device->fs_info; 2663 struct btrfs_super_block *super_copy = fs_info->super_copy; 2664 struct btrfs_fs_devices *fs_devices; 2665 u64 old_total; 2666 u64 diff; 2667 2668 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2669 return -EACCES; 2670 2671 new_size = round_down(new_size, fs_info->sectorsize); 2672 2673 mutex_lock(&fs_info->chunk_mutex); 2674 old_total = btrfs_super_total_bytes(super_copy); 2675 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2676 2677 if (new_size <= device->total_bytes || 2678 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2679 mutex_unlock(&fs_info->chunk_mutex); 2680 return -EINVAL; 2681 } 2682 2683 fs_devices = fs_info->fs_devices; 2684 2685 btrfs_set_super_total_bytes(super_copy, 2686 round_down(old_total + diff, fs_info->sectorsize)); 2687 device->fs_devices->total_rw_bytes += diff; 2688 2689 btrfs_device_set_total_bytes(device, new_size); 2690 btrfs_device_set_disk_total_bytes(device, new_size); 2691 btrfs_clear_space_info_full(device->fs_info); 2692 if (list_empty(&device->resized_list)) 2693 list_add_tail(&device->resized_list, 2694 &fs_devices->resized_devices); 2695 mutex_unlock(&fs_info->chunk_mutex); 2696 2697 return btrfs_update_device(trans, device); 2698 } 2699 2700 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, 2701 struct btrfs_fs_info *fs_info, u64 chunk_offset) 2702 { 2703 struct btrfs_root *root = fs_info->chunk_root; 2704 int ret; 2705 struct btrfs_path *path; 2706 struct btrfs_key key; 2707 2708 path = btrfs_alloc_path(); 2709 if (!path) 2710 return -ENOMEM; 2711 2712 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2713 key.offset = chunk_offset; 2714 key.type = BTRFS_CHUNK_ITEM_KEY; 2715 2716 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2717 if (ret < 0) 2718 goto out; 2719 else if (ret > 0) { /* Logic error or corruption */ 2720 btrfs_handle_fs_error(fs_info, -ENOENT, 2721 "Failed lookup while freeing chunk."); 2722 ret = -ENOENT; 2723 goto out; 2724 } 2725 2726 ret = btrfs_del_item(trans, root, path); 2727 if (ret < 0) 2728 btrfs_handle_fs_error(fs_info, ret, 2729 "Failed to delete chunk item."); 2730 out: 2731 btrfs_free_path(path); 2732 return ret; 2733 } 2734 2735 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2736 { 2737 struct btrfs_super_block *super_copy = fs_info->super_copy; 2738 struct btrfs_disk_key *disk_key; 2739 struct btrfs_chunk *chunk; 2740 u8 *ptr; 2741 int ret = 0; 2742 u32 num_stripes; 2743 u32 array_size; 2744 u32 len = 0; 2745 u32 cur; 2746 struct btrfs_key key; 2747 2748 mutex_lock(&fs_info->chunk_mutex); 2749 array_size = btrfs_super_sys_array_size(super_copy); 2750 2751 ptr = super_copy->sys_chunk_array; 2752 cur = 0; 2753 2754 while (cur < array_size) { 2755 disk_key = (struct btrfs_disk_key *)ptr; 2756 btrfs_disk_key_to_cpu(&key, disk_key); 2757 2758 len = sizeof(*disk_key); 2759 2760 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2761 chunk = (struct btrfs_chunk *)(ptr + len); 2762 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2763 len += btrfs_chunk_item_size(num_stripes); 2764 } else { 2765 ret = -EIO; 2766 break; 2767 } 2768 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2769 key.offset == chunk_offset) { 2770 memmove(ptr, ptr + len, array_size - (cur + len)); 2771 array_size -= len; 2772 btrfs_set_super_sys_array_size(super_copy, array_size); 2773 } else { 2774 ptr += len; 2775 cur += len; 2776 } 2777 } 2778 mutex_unlock(&fs_info->chunk_mutex); 2779 return ret; 2780 } 2781 2782 static struct extent_map *get_chunk_map(struct btrfs_fs_info *fs_info, 2783 u64 logical, u64 length) 2784 { 2785 struct extent_map_tree *em_tree; 2786 struct extent_map *em; 2787 2788 em_tree = &fs_info->mapping_tree.map_tree; 2789 read_lock(&em_tree->lock); 2790 em = lookup_extent_mapping(em_tree, logical, length); 2791 read_unlock(&em_tree->lock); 2792 2793 if (!em) { 2794 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 2795 logical, length); 2796 return ERR_PTR(-EINVAL); 2797 } 2798 2799 if (em->start > logical || em->start + em->len < logical) { 2800 btrfs_crit(fs_info, 2801 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 2802 logical, length, em->start, em->start + em->len); 2803 free_extent_map(em); 2804 return ERR_PTR(-EINVAL); 2805 } 2806 2807 /* callers are responsible for dropping em's ref. */ 2808 return em; 2809 } 2810 2811 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, 2812 struct btrfs_fs_info *fs_info, u64 chunk_offset) 2813 { 2814 struct extent_map *em; 2815 struct map_lookup *map; 2816 u64 dev_extent_len = 0; 2817 int i, ret = 0; 2818 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2819 2820 em = get_chunk_map(fs_info, chunk_offset, 1); 2821 if (IS_ERR(em)) { 2822 /* 2823 * This is a logic error, but we don't want to just rely on the 2824 * user having built with ASSERT enabled, so if ASSERT doesn't 2825 * do anything we still error out. 2826 */ 2827 ASSERT(0); 2828 return PTR_ERR(em); 2829 } 2830 map = em->map_lookup; 2831 mutex_lock(&fs_info->chunk_mutex); 2832 check_system_chunk(trans, fs_info, map->type); 2833 mutex_unlock(&fs_info->chunk_mutex); 2834 2835 /* 2836 * Take the device list mutex to prevent races with the final phase of 2837 * a device replace operation that replaces the device object associated 2838 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). 2839 */ 2840 mutex_lock(&fs_devices->device_list_mutex); 2841 for (i = 0; i < map->num_stripes; i++) { 2842 struct btrfs_device *device = map->stripes[i].dev; 2843 ret = btrfs_free_dev_extent(trans, device, 2844 map->stripes[i].physical, 2845 &dev_extent_len); 2846 if (ret) { 2847 mutex_unlock(&fs_devices->device_list_mutex); 2848 btrfs_abort_transaction(trans, ret); 2849 goto out; 2850 } 2851 2852 if (device->bytes_used > 0) { 2853 mutex_lock(&fs_info->chunk_mutex); 2854 btrfs_device_set_bytes_used(device, 2855 device->bytes_used - dev_extent_len); 2856 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 2857 btrfs_clear_space_info_full(fs_info); 2858 mutex_unlock(&fs_info->chunk_mutex); 2859 } 2860 2861 if (map->stripes[i].dev) { 2862 ret = btrfs_update_device(trans, map->stripes[i].dev); 2863 if (ret) { 2864 mutex_unlock(&fs_devices->device_list_mutex); 2865 btrfs_abort_transaction(trans, ret); 2866 goto out; 2867 } 2868 } 2869 } 2870 mutex_unlock(&fs_devices->device_list_mutex); 2871 2872 ret = btrfs_free_chunk(trans, fs_info, chunk_offset); 2873 if (ret) { 2874 btrfs_abort_transaction(trans, ret); 2875 goto out; 2876 } 2877 2878 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 2879 2880 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 2881 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 2882 if (ret) { 2883 btrfs_abort_transaction(trans, ret); 2884 goto out; 2885 } 2886 } 2887 2888 ret = btrfs_remove_block_group(trans, fs_info, chunk_offset, em); 2889 if (ret) { 2890 btrfs_abort_transaction(trans, ret); 2891 goto out; 2892 } 2893 2894 out: 2895 /* once for us */ 2896 free_extent_map(em); 2897 return ret; 2898 } 2899 2900 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2901 { 2902 struct btrfs_root *root = fs_info->chunk_root; 2903 struct btrfs_trans_handle *trans; 2904 int ret; 2905 2906 /* 2907 * Prevent races with automatic removal of unused block groups. 2908 * After we relocate and before we remove the chunk with offset 2909 * chunk_offset, automatic removal of the block group can kick in, 2910 * resulting in a failure when calling btrfs_remove_chunk() below. 2911 * 2912 * Make sure to acquire this mutex before doing a tree search (dev 2913 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 2914 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 2915 * we release the path used to search the chunk/dev tree and before 2916 * the current task acquires this mutex and calls us. 2917 */ 2918 lockdep_assert_held(&fs_info->delete_unused_bgs_mutex); 2919 2920 ret = btrfs_can_relocate(fs_info, chunk_offset); 2921 if (ret) 2922 return -ENOSPC; 2923 2924 /* step one, relocate all the extents inside this chunk */ 2925 btrfs_scrub_pause(fs_info); 2926 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 2927 btrfs_scrub_continue(fs_info); 2928 if (ret) 2929 return ret; 2930 2931 /* 2932 * We add the kobjects here (and after forcing data chunk creation) 2933 * since relocation is the only place we'll create chunks of a new 2934 * type at runtime. The only place where we'll remove the last 2935 * chunk of a type is the call immediately below this one. Even 2936 * so, we're protected against races with the cleaner thread since 2937 * we're covered by the delete_unused_bgs_mutex. 2938 */ 2939 btrfs_add_raid_kobjects(fs_info); 2940 2941 trans = btrfs_start_trans_remove_block_group(root->fs_info, 2942 chunk_offset); 2943 if (IS_ERR(trans)) { 2944 ret = PTR_ERR(trans); 2945 btrfs_handle_fs_error(root->fs_info, ret, NULL); 2946 return ret; 2947 } 2948 2949 /* 2950 * step two, delete the device extents and the 2951 * chunk tree entries 2952 */ 2953 ret = btrfs_remove_chunk(trans, fs_info, chunk_offset); 2954 btrfs_end_transaction(trans); 2955 return ret; 2956 } 2957 2958 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 2959 { 2960 struct btrfs_root *chunk_root = fs_info->chunk_root; 2961 struct btrfs_path *path; 2962 struct extent_buffer *leaf; 2963 struct btrfs_chunk *chunk; 2964 struct btrfs_key key; 2965 struct btrfs_key found_key; 2966 u64 chunk_type; 2967 bool retried = false; 2968 int failed = 0; 2969 int ret; 2970 2971 path = btrfs_alloc_path(); 2972 if (!path) 2973 return -ENOMEM; 2974 2975 again: 2976 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2977 key.offset = (u64)-1; 2978 key.type = BTRFS_CHUNK_ITEM_KEY; 2979 2980 while (1) { 2981 mutex_lock(&fs_info->delete_unused_bgs_mutex); 2982 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2983 if (ret < 0) { 2984 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 2985 goto error; 2986 } 2987 BUG_ON(ret == 0); /* Corruption */ 2988 2989 ret = btrfs_previous_item(chunk_root, path, key.objectid, 2990 key.type); 2991 if (ret) 2992 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 2993 if (ret < 0) 2994 goto error; 2995 if (ret > 0) 2996 break; 2997 2998 leaf = path->nodes[0]; 2999 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3000 3001 chunk = btrfs_item_ptr(leaf, path->slots[0], 3002 struct btrfs_chunk); 3003 chunk_type = btrfs_chunk_type(leaf, chunk); 3004 btrfs_release_path(path); 3005 3006 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3007 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3008 if (ret == -ENOSPC) 3009 failed++; 3010 else 3011 BUG_ON(ret); 3012 } 3013 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3014 3015 if (found_key.offset == 0) 3016 break; 3017 key.offset = found_key.offset - 1; 3018 } 3019 ret = 0; 3020 if (failed && !retried) { 3021 failed = 0; 3022 retried = true; 3023 goto again; 3024 } else if (WARN_ON(failed && retried)) { 3025 ret = -ENOSPC; 3026 } 3027 error: 3028 btrfs_free_path(path); 3029 return ret; 3030 } 3031 3032 /* 3033 * return 1 : allocate a data chunk successfully, 3034 * return <0: errors during allocating a data chunk, 3035 * return 0 : no need to allocate a data chunk. 3036 */ 3037 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3038 u64 chunk_offset) 3039 { 3040 struct btrfs_block_group_cache *cache; 3041 u64 bytes_used; 3042 u64 chunk_type; 3043 3044 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3045 ASSERT(cache); 3046 chunk_type = cache->flags; 3047 btrfs_put_block_group(cache); 3048 3049 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) { 3050 spin_lock(&fs_info->data_sinfo->lock); 3051 bytes_used = fs_info->data_sinfo->bytes_used; 3052 spin_unlock(&fs_info->data_sinfo->lock); 3053 3054 if (!bytes_used) { 3055 struct btrfs_trans_handle *trans; 3056 int ret; 3057 3058 trans = btrfs_join_transaction(fs_info->tree_root); 3059 if (IS_ERR(trans)) 3060 return PTR_ERR(trans); 3061 3062 ret = btrfs_force_chunk_alloc(trans, fs_info, 3063 BTRFS_BLOCK_GROUP_DATA); 3064 btrfs_end_transaction(trans); 3065 if (ret < 0) 3066 return ret; 3067 3068 btrfs_add_raid_kobjects(fs_info); 3069 3070 return 1; 3071 } 3072 } 3073 return 0; 3074 } 3075 3076 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3077 struct btrfs_balance_control *bctl) 3078 { 3079 struct btrfs_root *root = fs_info->tree_root; 3080 struct btrfs_trans_handle *trans; 3081 struct btrfs_balance_item *item; 3082 struct btrfs_disk_balance_args disk_bargs; 3083 struct btrfs_path *path; 3084 struct extent_buffer *leaf; 3085 struct btrfs_key key; 3086 int ret, err; 3087 3088 path = btrfs_alloc_path(); 3089 if (!path) 3090 return -ENOMEM; 3091 3092 trans = btrfs_start_transaction(root, 0); 3093 if (IS_ERR(trans)) { 3094 btrfs_free_path(path); 3095 return PTR_ERR(trans); 3096 } 3097 3098 key.objectid = BTRFS_BALANCE_OBJECTID; 3099 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3100 key.offset = 0; 3101 3102 ret = btrfs_insert_empty_item(trans, root, path, &key, 3103 sizeof(*item)); 3104 if (ret) 3105 goto out; 3106 3107 leaf = path->nodes[0]; 3108 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3109 3110 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3111 3112 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3113 btrfs_set_balance_data(leaf, item, &disk_bargs); 3114 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3115 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3116 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3117 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3118 3119 btrfs_set_balance_flags(leaf, item, bctl->flags); 3120 3121 btrfs_mark_buffer_dirty(leaf); 3122 out: 3123 btrfs_free_path(path); 3124 err = btrfs_commit_transaction(trans); 3125 if (err && !ret) 3126 ret = err; 3127 return ret; 3128 } 3129 3130 static int del_balance_item(struct btrfs_fs_info *fs_info) 3131 { 3132 struct btrfs_root *root = fs_info->tree_root; 3133 struct btrfs_trans_handle *trans; 3134 struct btrfs_path *path; 3135 struct btrfs_key key; 3136 int ret, err; 3137 3138 path = btrfs_alloc_path(); 3139 if (!path) 3140 return -ENOMEM; 3141 3142 trans = btrfs_start_transaction(root, 0); 3143 if (IS_ERR(trans)) { 3144 btrfs_free_path(path); 3145 return PTR_ERR(trans); 3146 } 3147 3148 key.objectid = BTRFS_BALANCE_OBJECTID; 3149 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3150 key.offset = 0; 3151 3152 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3153 if (ret < 0) 3154 goto out; 3155 if (ret > 0) { 3156 ret = -ENOENT; 3157 goto out; 3158 } 3159 3160 ret = btrfs_del_item(trans, root, path); 3161 out: 3162 btrfs_free_path(path); 3163 err = btrfs_commit_transaction(trans); 3164 if (err && !ret) 3165 ret = err; 3166 return ret; 3167 } 3168 3169 /* 3170 * This is a heuristic used to reduce the number of chunks balanced on 3171 * resume after balance was interrupted. 3172 */ 3173 static void update_balance_args(struct btrfs_balance_control *bctl) 3174 { 3175 /* 3176 * Turn on soft mode for chunk types that were being converted. 3177 */ 3178 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3179 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3180 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3181 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3182 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3183 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3184 3185 /* 3186 * Turn on usage filter if is not already used. The idea is 3187 * that chunks that we have already balanced should be 3188 * reasonably full. Don't do it for chunks that are being 3189 * converted - that will keep us from relocating unconverted 3190 * (albeit full) chunks. 3191 */ 3192 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3193 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3194 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3195 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3196 bctl->data.usage = 90; 3197 } 3198 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3199 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3200 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3201 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3202 bctl->sys.usage = 90; 3203 } 3204 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3205 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3206 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3207 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3208 bctl->meta.usage = 90; 3209 } 3210 } 3211 3212 /* 3213 * Clear the balance status in fs_info and delete the balance item from disk. 3214 */ 3215 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3216 { 3217 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3218 int ret; 3219 3220 BUG_ON(!fs_info->balance_ctl); 3221 3222 spin_lock(&fs_info->balance_lock); 3223 fs_info->balance_ctl = NULL; 3224 spin_unlock(&fs_info->balance_lock); 3225 3226 kfree(bctl); 3227 ret = del_balance_item(fs_info); 3228 if (ret) 3229 btrfs_handle_fs_error(fs_info, ret, NULL); 3230 } 3231 3232 /* 3233 * Balance filters. Return 1 if chunk should be filtered out 3234 * (should not be balanced). 3235 */ 3236 static int chunk_profiles_filter(u64 chunk_type, 3237 struct btrfs_balance_args *bargs) 3238 { 3239 chunk_type = chunk_to_extended(chunk_type) & 3240 BTRFS_EXTENDED_PROFILE_MASK; 3241 3242 if (bargs->profiles & chunk_type) 3243 return 0; 3244 3245 return 1; 3246 } 3247 3248 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3249 struct btrfs_balance_args *bargs) 3250 { 3251 struct btrfs_block_group_cache *cache; 3252 u64 chunk_used; 3253 u64 user_thresh_min; 3254 u64 user_thresh_max; 3255 int ret = 1; 3256 3257 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3258 chunk_used = btrfs_block_group_used(&cache->item); 3259 3260 if (bargs->usage_min == 0) 3261 user_thresh_min = 0; 3262 else 3263 user_thresh_min = div_factor_fine(cache->key.offset, 3264 bargs->usage_min); 3265 3266 if (bargs->usage_max == 0) 3267 user_thresh_max = 1; 3268 else if (bargs->usage_max > 100) 3269 user_thresh_max = cache->key.offset; 3270 else 3271 user_thresh_max = div_factor_fine(cache->key.offset, 3272 bargs->usage_max); 3273 3274 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3275 ret = 0; 3276 3277 btrfs_put_block_group(cache); 3278 return ret; 3279 } 3280 3281 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3282 u64 chunk_offset, struct btrfs_balance_args *bargs) 3283 { 3284 struct btrfs_block_group_cache *cache; 3285 u64 chunk_used, user_thresh; 3286 int ret = 1; 3287 3288 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3289 chunk_used = btrfs_block_group_used(&cache->item); 3290 3291 if (bargs->usage_min == 0) 3292 user_thresh = 1; 3293 else if (bargs->usage > 100) 3294 user_thresh = cache->key.offset; 3295 else 3296 user_thresh = div_factor_fine(cache->key.offset, 3297 bargs->usage); 3298 3299 if (chunk_used < user_thresh) 3300 ret = 0; 3301 3302 btrfs_put_block_group(cache); 3303 return ret; 3304 } 3305 3306 static int chunk_devid_filter(struct extent_buffer *leaf, 3307 struct btrfs_chunk *chunk, 3308 struct btrfs_balance_args *bargs) 3309 { 3310 struct btrfs_stripe *stripe; 3311 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3312 int i; 3313 3314 for (i = 0; i < num_stripes; i++) { 3315 stripe = btrfs_stripe_nr(chunk, i); 3316 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3317 return 0; 3318 } 3319 3320 return 1; 3321 } 3322 3323 /* [pstart, pend) */ 3324 static int chunk_drange_filter(struct extent_buffer *leaf, 3325 struct btrfs_chunk *chunk, 3326 struct btrfs_balance_args *bargs) 3327 { 3328 struct btrfs_stripe *stripe; 3329 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3330 u64 stripe_offset; 3331 u64 stripe_length; 3332 int factor; 3333 int i; 3334 3335 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3336 return 0; 3337 3338 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP | 3339 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) { 3340 factor = num_stripes / 2; 3341 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) { 3342 factor = num_stripes - 1; 3343 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) { 3344 factor = num_stripes - 2; 3345 } else { 3346 factor = num_stripes; 3347 } 3348 3349 for (i = 0; i < num_stripes; i++) { 3350 stripe = btrfs_stripe_nr(chunk, i); 3351 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3352 continue; 3353 3354 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3355 stripe_length = btrfs_chunk_length(leaf, chunk); 3356 stripe_length = div_u64(stripe_length, factor); 3357 3358 if (stripe_offset < bargs->pend && 3359 stripe_offset + stripe_length > bargs->pstart) 3360 return 0; 3361 } 3362 3363 return 1; 3364 } 3365 3366 /* [vstart, vend) */ 3367 static int chunk_vrange_filter(struct extent_buffer *leaf, 3368 struct btrfs_chunk *chunk, 3369 u64 chunk_offset, 3370 struct btrfs_balance_args *bargs) 3371 { 3372 if (chunk_offset < bargs->vend && 3373 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3374 /* at least part of the chunk is inside this vrange */ 3375 return 0; 3376 3377 return 1; 3378 } 3379 3380 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3381 struct btrfs_chunk *chunk, 3382 struct btrfs_balance_args *bargs) 3383 { 3384 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3385 3386 if (bargs->stripes_min <= num_stripes 3387 && num_stripes <= bargs->stripes_max) 3388 return 0; 3389 3390 return 1; 3391 } 3392 3393 static int chunk_soft_convert_filter(u64 chunk_type, 3394 struct btrfs_balance_args *bargs) 3395 { 3396 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3397 return 0; 3398 3399 chunk_type = chunk_to_extended(chunk_type) & 3400 BTRFS_EXTENDED_PROFILE_MASK; 3401 3402 if (bargs->target == chunk_type) 3403 return 1; 3404 3405 return 0; 3406 } 3407 3408 static int should_balance_chunk(struct btrfs_fs_info *fs_info, 3409 struct extent_buffer *leaf, 3410 struct btrfs_chunk *chunk, u64 chunk_offset) 3411 { 3412 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3413 struct btrfs_balance_args *bargs = NULL; 3414 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3415 3416 /* type filter */ 3417 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3418 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3419 return 0; 3420 } 3421 3422 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3423 bargs = &bctl->data; 3424 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3425 bargs = &bctl->sys; 3426 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3427 bargs = &bctl->meta; 3428 3429 /* profiles filter */ 3430 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3431 chunk_profiles_filter(chunk_type, bargs)) { 3432 return 0; 3433 } 3434 3435 /* usage filter */ 3436 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3437 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3438 return 0; 3439 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3440 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3441 return 0; 3442 } 3443 3444 /* devid filter */ 3445 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3446 chunk_devid_filter(leaf, chunk, bargs)) { 3447 return 0; 3448 } 3449 3450 /* drange filter, makes sense only with devid filter */ 3451 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3452 chunk_drange_filter(leaf, chunk, bargs)) { 3453 return 0; 3454 } 3455 3456 /* vrange filter */ 3457 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3458 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3459 return 0; 3460 } 3461 3462 /* stripes filter */ 3463 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3464 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3465 return 0; 3466 } 3467 3468 /* soft profile changing mode */ 3469 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3470 chunk_soft_convert_filter(chunk_type, bargs)) { 3471 return 0; 3472 } 3473 3474 /* 3475 * limited by count, must be the last filter 3476 */ 3477 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3478 if (bargs->limit == 0) 3479 return 0; 3480 else 3481 bargs->limit--; 3482 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3483 /* 3484 * Same logic as the 'limit' filter; the minimum cannot be 3485 * determined here because we do not have the global information 3486 * about the count of all chunks that satisfy the filters. 3487 */ 3488 if (bargs->limit_max == 0) 3489 return 0; 3490 else 3491 bargs->limit_max--; 3492 } 3493 3494 return 1; 3495 } 3496 3497 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3498 { 3499 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3500 struct btrfs_root *chunk_root = fs_info->chunk_root; 3501 struct btrfs_root *dev_root = fs_info->dev_root; 3502 struct list_head *devices; 3503 struct btrfs_device *device; 3504 u64 old_size; 3505 u64 size_to_free; 3506 u64 chunk_type; 3507 struct btrfs_chunk *chunk; 3508 struct btrfs_path *path = NULL; 3509 struct btrfs_key key; 3510 struct btrfs_key found_key; 3511 struct btrfs_trans_handle *trans; 3512 struct extent_buffer *leaf; 3513 int slot; 3514 int ret; 3515 int enospc_errors = 0; 3516 bool counting = true; 3517 /* The single value limit and min/max limits use the same bytes in the */ 3518 u64 limit_data = bctl->data.limit; 3519 u64 limit_meta = bctl->meta.limit; 3520 u64 limit_sys = bctl->sys.limit; 3521 u32 count_data = 0; 3522 u32 count_meta = 0; 3523 u32 count_sys = 0; 3524 int chunk_reserved = 0; 3525 3526 /* step one make some room on all the devices */ 3527 devices = &fs_info->fs_devices->devices; 3528 list_for_each_entry(device, devices, dev_list) { 3529 old_size = btrfs_device_get_total_bytes(device); 3530 size_to_free = div_factor(old_size, 1); 3531 size_to_free = min_t(u64, size_to_free, SZ_1M); 3532 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) || 3533 btrfs_device_get_total_bytes(device) - 3534 btrfs_device_get_bytes_used(device) > size_to_free || 3535 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 3536 continue; 3537 3538 ret = btrfs_shrink_device(device, old_size - size_to_free); 3539 if (ret == -ENOSPC) 3540 break; 3541 if (ret) { 3542 /* btrfs_shrink_device never returns ret > 0 */ 3543 WARN_ON(ret > 0); 3544 goto error; 3545 } 3546 3547 trans = btrfs_start_transaction(dev_root, 0); 3548 if (IS_ERR(trans)) { 3549 ret = PTR_ERR(trans); 3550 btrfs_info_in_rcu(fs_info, 3551 "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu", 3552 rcu_str_deref(device->name), ret, 3553 old_size, old_size - size_to_free); 3554 goto error; 3555 } 3556 3557 ret = btrfs_grow_device(trans, device, old_size); 3558 if (ret) { 3559 btrfs_end_transaction(trans); 3560 /* btrfs_grow_device never returns ret > 0 */ 3561 WARN_ON(ret > 0); 3562 btrfs_info_in_rcu(fs_info, 3563 "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu", 3564 rcu_str_deref(device->name), ret, 3565 old_size, old_size - size_to_free); 3566 goto error; 3567 } 3568 3569 btrfs_end_transaction(trans); 3570 } 3571 3572 /* step two, relocate all the chunks */ 3573 path = btrfs_alloc_path(); 3574 if (!path) { 3575 ret = -ENOMEM; 3576 goto error; 3577 } 3578 3579 /* zero out stat counters */ 3580 spin_lock(&fs_info->balance_lock); 3581 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3582 spin_unlock(&fs_info->balance_lock); 3583 again: 3584 if (!counting) { 3585 /* 3586 * The single value limit and min/max limits use the same bytes 3587 * in the 3588 */ 3589 bctl->data.limit = limit_data; 3590 bctl->meta.limit = limit_meta; 3591 bctl->sys.limit = limit_sys; 3592 } 3593 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3594 key.offset = (u64)-1; 3595 key.type = BTRFS_CHUNK_ITEM_KEY; 3596 3597 while (1) { 3598 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3599 atomic_read(&fs_info->balance_cancel_req)) { 3600 ret = -ECANCELED; 3601 goto error; 3602 } 3603 3604 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3605 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3606 if (ret < 0) { 3607 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3608 goto error; 3609 } 3610 3611 /* 3612 * this shouldn't happen, it means the last relocate 3613 * failed 3614 */ 3615 if (ret == 0) 3616 BUG(); /* FIXME break ? */ 3617 3618 ret = btrfs_previous_item(chunk_root, path, 0, 3619 BTRFS_CHUNK_ITEM_KEY); 3620 if (ret) { 3621 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3622 ret = 0; 3623 break; 3624 } 3625 3626 leaf = path->nodes[0]; 3627 slot = path->slots[0]; 3628 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3629 3630 if (found_key.objectid != key.objectid) { 3631 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3632 break; 3633 } 3634 3635 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3636 chunk_type = btrfs_chunk_type(leaf, chunk); 3637 3638 if (!counting) { 3639 spin_lock(&fs_info->balance_lock); 3640 bctl->stat.considered++; 3641 spin_unlock(&fs_info->balance_lock); 3642 } 3643 3644 ret = should_balance_chunk(fs_info, leaf, chunk, 3645 found_key.offset); 3646 3647 btrfs_release_path(path); 3648 if (!ret) { 3649 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3650 goto loop; 3651 } 3652 3653 if (counting) { 3654 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3655 spin_lock(&fs_info->balance_lock); 3656 bctl->stat.expected++; 3657 spin_unlock(&fs_info->balance_lock); 3658 3659 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3660 count_data++; 3661 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3662 count_sys++; 3663 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3664 count_meta++; 3665 3666 goto loop; 3667 } 3668 3669 /* 3670 * Apply limit_min filter, no need to check if the LIMITS 3671 * filter is used, limit_min is 0 by default 3672 */ 3673 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3674 count_data < bctl->data.limit_min) 3675 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3676 count_meta < bctl->meta.limit_min) 3677 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3678 count_sys < bctl->sys.limit_min)) { 3679 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3680 goto loop; 3681 } 3682 3683 if (!chunk_reserved) { 3684 /* 3685 * We may be relocating the only data chunk we have, 3686 * which could potentially end up with losing data's 3687 * raid profile, so lets allocate an empty one in 3688 * advance. 3689 */ 3690 ret = btrfs_may_alloc_data_chunk(fs_info, 3691 found_key.offset); 3692 if (ret < 0) { 3693 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3694 goto error; 3695 } else if (ret == 1) { 3696 chunk_reserved = 1; 3697 } 3698 } 3699 3700 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3701 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3702 if (ret && ret != -ENOSPC) 3703 goto error; 3704 if (ret == -ENOSPC) { 3705 enospc_errors++; 3706 } else { 3707 spin_lock(&fs_info->balance_lock); 3708 bctl->stat.completed++; 3709 spin_unlock(&fs_info->balance_lock); 3710 } 3711 loop: 3712 if (found_key.offset == 0) 3713 break; 3714 key.offset = found_key.offset - 1; 3715 } 3716 3717 if (counting) { 3718 btrfs_release_path(path); 3719 counting = false; 3720 goto again; 3721 } 3722 error: 3723 btrfs_free_path(path); 3724 if (enospc_errors) { 3725 btrfs_info(fs_info, "%d enospc errors during balance", 3726 enospc_errors); 3727 if (!ret) 3728 ret = -ENOSPC; 3729 } 3730 3731 return ret; 3732 } 3733 3734 /** 3735 * alloc_profile_is_valid - see if a given profile is valid and reduced 3736 * @flags: profile to validate 3737 * @extended: if true @flags is treated as an extended profile 3738 */ 3739 static int alloc_profile_is_valid(u64 flags, int extended) 3740 { 3741 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 3742 BTRFS_BLOCK_GROUP_PROFILE_MASK); 3743 3744 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 3745 3746 /* 1) check that all other bits are zeroed */ 3747 if (flags & ~mask) 3748 return 0; 3749 3750 /* 2) see if profile is reduced */ 3751 if (flags == 0) 3752 return !extended; /* "0" is valid for usual profiles */ 3753 3754 /* true if exactly one bit set */ 3755 return (flags & (flags - 1)) == 0; 3756 } 3757 3758 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 3759 { 3760 /* cancel requested || normal exit path */ 3761 return atomic_read(&fs_info->balance_cancel_req) || 3762 (atomic_read(&fs_info->balance_pause_req) == 0 && 3763 atomic_read(&fs_info->balance_cancel_req) == 0); 3764 } 3765 3766 /* Non-zero return value signifies invalidity */ 3767 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg, 3768 u64 allowed) 3769 { 3770 return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) && 3771 (!alloc_profile_is_valid(bctl_arg->target, 1) || 3772 (bctl_arg->target & ~allowed))); 3773 } 3774 3775 /* 3776 * Should be called with balance mutexe held 3777 */ 3778 int btrfs_balance(struct btrfs_fs_info *fs_info, 3779 struct btrfs_balance_control *bctl, 3780 struct btrfs_ioctl_balance_args *bargs) 3781 { 3782 u64 meta_target, data_target; 3783 u64 allowed; 3784 int mixed = 0; 3785 int ret; 3786 u64 num_devices; 3787 unsigned seq; 3788 3789 if (btrfs_fs_closing(fs_info) || 3790 atomic_read(&fs_info->balance_pause_req) || 3791 atomic_read(&fs_info->balance_cancel_req)) { 3792 ret = -EINVAL; 3793 goto out; 3794 } 3795 3796 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 3797 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 3798 mixed = 1; 3799 3800 /* 3801 * In case of mixed groups both data and meta should be picked, 3802 * and identical options should be given for both of them. 3803 */ 3804 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 3805 if (mixed && (bctl->flags & allowed)) { 3806 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 3807 !(bctl->flags & BTRFS_BALANCE_METADATA) || 3808 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 3809 btrfs_err(fs_info, 3810 "balance: mixed groups data and metadata options must be the same"); 3811 ret = -EINVAL; 3812 goto out; 3813 } 3814 } 3815 3816 num_devices = fs_info->fs_devices->num_devices; 3817 btrfs_dev_replace_read_lock(&fs_info->dev_replace); 3818 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 3819 BUG_ON(num_devices < 1); 3820 num_devices--; 3821 } 3822 btrfs_dev_replace_read_unlock(&fs_info->dev_replace); 3823 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP; 3824 if (num_devices > 1) 3825 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); 3826 if (num_devices > 2) 3827 allowed |= BTRFS_BLOCK_GROUP_RAID5; 3828 if (num_devices > 3) 3829 allowed |= (BTRFS_BLOCK_GROUP_RAID10 | 3830 BTRFS_BLOCK_GROUP_RAID6); 3831 if (validate_convert_profile(&bctl->data, allowed)) { 3832 int index = btrfs_bg_flags_to_raid_index(bctl->data.target); 3833 3834 btrfs_err(fs_info, 3835 "balance: invalid convert data profile %s", 3836 get_raid_name(index)); 3837 ret = -EINVAL; 3838 goto out; 3839 } 3840 if (validate_convert_profile(&bctl->meta, allowed)) { 3841 int index = btrfs_bg_flags_to_raid_index(bctl->meta.target); 3842 3843 btrfs_err(fs_info, 3844 "balance: invalid convert metadata profile %s", 3845 get_raid_name(index)); 3846 ret = -EINVAL; 3847 goto out; 3848 } 3849 if (validate_convert_profile(&bctl->sys, allowed)) { 3850 int index = btrfs_bg_flags_to_raid_index(bctl->sys.target); 3851 3852 btrfs_err(fs_info, 3853 "balance: invalid convert system profile %s", 3854 get_raid_name(index)); 3855 ret = -EINVAL; 3856 goto out; 3857 } 3858 3859 /* allow to reduce meta or sys integrity only if force set */ 3860 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | 3861 BTRFS_BLOCK_GROUP_RAID10 | 3862 BTRFS_BLOCK_GROUP_RAID5 | 3863 BTRFS_BLOCK_GROUP_RAID6; 3864 do { 3865 seq = read_seqbegin(&fs_info->profiles_lock); 3866 3867 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3868 (fs_info->avail_system_alloc_bits & allowed) && 3869 !(bctl->sys.target & allowed)) || 3870 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3871 (fs_info->avail_metadata_alloc_bits & allowed) && 3872 !(bctl->meta.target & allowed))) { 3873 if (bctl->flags & BTRFS_BALANCE_FORCE) { 3874 btrfs_info(fs_info, 3875 "balance: force reducing metadata integrity"); 3876 } else { 3877 btrfs_err(fs_info, 3878 "balance: reduces metadata integrity, use --force if you want this"); 3879 ret = -EINVAL; 3880 goto out; 3881 } 3882 } 3883 } while (read_seqretry(&fs_info->profiles_lock, seq)); 3884 3885 /* if we're not converting, the target field is uninitialized */ 3886 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 3887 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 3888 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 3889 bctl->data.target : fs_info->avail_data_alloc_bits; 3890 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 3891 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 3892 int meta_index = btrfs_bg_flags_to_raid_index(meta_target); 3893 int data_index = btrfs_bg_flags_to_raid_index(data_target); 3894 3895 btrfs_warn(fs_info, 3896 "balance: metadata profile %s has lower redundancy than data profile %s", 3897 get_raid_name(meta_index), get_raid_name(data_index)); 3898 } 3899 3900 ret = insert_balance_item(fs_info, bctl); 3901 if (ret && ret != -EEXIST) 3902 goto out; 3903 3904 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 3905 BUG_ON(ret == -EEXIST); 3906 BUG_ON(fs_info->balance_ctl); 3907 spin_lock(&fs_info->balance_lock); 3908 fs_info->balance_ctl = bctl; 3909 spin_unlock(&fs_info->balance_lock); 3910 } else { 3911 BUG_ON(ret != -EEXIST); 3912 spin_lock(&fs_info->balance_lock); 3913 update_balance_args(bctl); 3914 spin_unlock(&fs_info->balance_lock); 3915 } 3916 3917 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 3918 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 3919 mutex_unlock(&fs_info->balance_mutex); 3920 3921 ret = __btrfs_balance(fs_info); 3922 3923 mutex_lock(&fs_info->balance_mutex); 3924 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 3925 3926 if (bargs) { 3927 memset(bargs, 0, sizeof(*bargs)); 3928 btrfs_update_ioctl_balance_args(fs_info, bargs); 3929 } 3930 3931 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 3932 balance_need_close(fs_info)) { 3933 reset_balance_state(fs_info); 3934 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 3935 } 3936 3937 wake_up(&fs_info->balance_wait_q); 3938 3939 return ret; 3940 out: 3941 if (bctl->flags & BTRFS_BALANCE_RESUME) 3942 reset_balance_state(fs_info); 3943 else 3944 kfree(bctl); 3945 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 3946 3947 return ret; 3948 } 3949 3950 static int balance_kthread(void *data) 3951 { 3952 struct btrfs_fs_info *fs_info = data; 3953 int ret = 0; 3954 3955 mutex_lock(&fs_info->balance_mutex); 3956 if (fs_info->balance_ctl) { 3957 btrfs_info(fs_info, "balance: resuming"); 3958 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 3959 } 3960 mutex_unlock(&fs_info->balance_mutex); 3961 3962 return ret; 3963 } 3964 3965 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 3966 { 3967 struct task_struct *tsk; 3968 3969 mutex_lock(&fs_info->balance_mutex); 3970 if (!fs_info->balance_ctl) { 3971 mutex_unlock(&fs_info->balance_mutex); 3972 return 0; 3973 } 3974 mutex_unlock(&fs_info->balance_mutex); 3975 3976 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 3977 btrfs_info(fs_info, "balance: resume skipped"); 3978 return 0; 3979 } 3980 3981 /* 3982 * A ro->rw remount sequence should continue with the paused balance 3983 * regardless of who pauses it, system or the user as of now, so set 3984 * the resume flag. 3985 */ 3986 spin_lock(&fs_info->balance_lock); 3987 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 3988 spin_unlock(&fs_info->balance_lock); 3989 3990 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3991 return PTR_ERR_OR_ZERO(tsk); 3992 } 3993 3994 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 3995 { 3996 struct btrfs_balance_control *bctl; 3997 struct btrfs_balance_item *item; 3998 struct btrfs_disk_balance_args disk_bargs; 3999 struct btrfs_path *path; 4000 struct extent_buffer *leaf; 4001 struct btrfs_key key; 4002 int ret; 4003 4004 path = btrfs_alloc_path(); 4005 if (!path) 4006 return -ENOMEM; 4007 4008 key.objectid = BTRFS_BALANCE_OBJECTID; 4009 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4010 key.offset = 0; 4011 4012 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4013 if (ret < 0) 4014 goto out; 4015 if (ret > 0) { /* ret = -ENOENT; */ 4016 ret = 0; 4017 goto out; 4018 } 4019 4020 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4021 if (!bctl) { 4022 ret = -ENOMEM; 4023 goto out; 4024 } 4025 4026 leaf = path->nodes[0]; 4027 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4028 4029 bctl->flags = btrfs_balance_flags(leaf, item); 4030 bctl->flags |= BTRFS_BALANCE_RESUME; 4031 4032 btrfs_balance_data(leaf, item, &disk_bargs); 4033 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4034 btrfs_balance_meta(leaf, item, &disk_bargs); 4035 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4036 btrfs_balance_sys(leaf, item, &disk_bargs); 4037 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4038 4039 /* 4040 * This should never happen, as the paused balance state is recovered 4041 * during mount without any chance of other exclusive ops to collide. 4042 * 4043 * This gives the exclusive op status to balance and keeps in paused 4044 * state until user intervention (cancel or umount). If the ownership 4045 * cannot be assigned, show a message but do not fail. The balance 4046 * is in a paused state and must have fs_info::balance_ctl properly 4047 * set up. 4048 */ 4049 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) 4050 btrfs_warn(fs_info, 4051 "balance: cannot set exclusive op status, resume manually"); 4052 4053 mutex_lock(&fs_info->balance_mutex); 4054 BUG_ON(fs_info->balance_ctl); 4055 spin_lock(&fs_info->balance_lock); 4056 fs_info->balance_ctl = bctl; 4057 spin_unlock(&fs_info->balance_lock); 4058 mutex_unlock(&fs_info->balance_mutex); 4059 out: 4060 btrfs_free_path(path); 4061 return ret; 4062 } 4063 4064 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4065 { 4066 int ret = 0; 4067 4068 mutex_lock(&fs_info->balance_mutex); 4069 if (!fs_info->balance_ctl) { 4070 mutex_unlock(&fs_info->balance_mutex); 4071 return -ENOTCONN; 4072 } 4073 4074 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4075 atomic_inc(&fs_info->balance_pause_req); 4076 mutex_unlock(&fs_info->balance_mutex); 4077 4078 wait_event(fs_info->balance_wait_q, 4079 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4080 4081 mutex_lock(&fs_info->balance_mutex); 4082 /* we are good with balance_ctl ripped off from under us */ 4083 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4084 atomic_dec(&fs_info->balance_pause_req); 4085 } else { 4086 ret = -ENOTCONN; 4087 } 4088 4089 mutex_unlock(&fs_info->balance_mutex); 4090 return ret; 4091 } 4092 4093 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4094 { 4095 mutex_lock(&fs_info->balance_mutex); 4096 if (!fs_info->balance_ctl) { 4097 mutex_unlock(&fs_info->balance_mutex); 4098 return -ENOTCONN; 4099 } 4100 4101 /* 4102 * A paused balance with the item stored on disk can be resumed at 4103 * mount time if the mount is read-write. Otherwise it's still paused 4104 * and we must not allow cancelling as it deletes the item. 4105 */ 4106 if (sb_rdonly(fs_info->sb)) { 4107 mutex_unlock(&fs_info->balance_mutex); 4108 return -EROFS; 4109 } 4110 4111 atomic_inc(&fs_info->balance_cancel_req); 4112 /* 4113 * if we are running just wait and return, balance item is 4114 * deleted in btrfs_balance in this case 4115 */ 4116 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4117 mutex_unlock(&fs_info->balance_mutex); 4118 wait_event(fs_info->balance_wait_q, 4119 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4120 mutex_lock(&fs_info->balance_mutex); 4121 } else { 4122 mutex_unlock(&fs_info->balance_mutex); 4123 /* 4124 * Lock released to allow other waiters to continue, we'll 4125 * reexamine the status again. 4126 */ 4127 mutex_lock(&fs_info->balance_mutex); 4128 4129 if (fs_info->balance_ctl) { 4130 reset_balance_state(fs_info); 4131 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 4132 btrfs_info(fs_info, "balance: canceled"); 4133 } 4134 } 4135 4136 BUG_ON(fs_info->balance_ctl || 4137 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4138 atomic_dec(&fs_info->balance_cancel_req); 4139 mutex_unlock(&fs_info->balance_mutex); 4140 return 0; 4141 } 4142 4143 static int btrfs_uuid_scan_kthread(void *data) 4144 { 4145 struct btrfs_fs_info *fs_info = data; 4146 struct btrfs_root *root = fs_info->tree_root; 4147 struct btrfs_key key; 4148 struct btrfs_path *path = NULL; 4149 int ret = 0; 4150 struct extent_buffer *eb; 4151 int slot; 4152 struct btrfs_root_item root_item; 4153 u32 item_size; 4154 struct btrfs_trans_handle *trans = NULL; 4155 4156 path = btrfs_alloc_path(); 4157 if (!path) { 4158 ret = -ENOMEM; 4159 goto out; 4160 } 4161 4162 key.objectid = 0; 4163 key.type = BTRFS_ROOT_ITEM_KEY; 4164 key.offset = 0; 4165 4166 while (1) { 4167 ret = btrfs_search_forward(root, &key, path, 4168 BTRFS_OLDEST_GENERATION); 4169 if (ret) { 4170 if (ret > 0) 4171 ret = 0; 4172 break; 4173 } 4174 4175 if (key.type != BTRFS_ROOT_ITEM_KEY || 4176 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4177 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4178 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4179 goto skip; 4180 4181 eb = path->nodes[0]; 4182 slot = path->slots[0]; 4183 item_size = btrfs_item_size_nr(eb, slot); 4184 if (item_size < sizeof(root_item)) 4185 goto skip; 4186 4187 read_extent_buffer(eb, &root_item, 4188 btrfs_item_ptr_offset(eb, slot), 4189 (int)sizeof(root_item)); 4190 if (btrfs_root_refs(&root_item) == 0) 4191 goto skip; 4192 4193 if (!btrfs_is_empty_uuid(root_item.uuid) || 4194 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4195 if (trans) 4196 goto update_tree; 4197 4198 btrfs_release_path(path); 4199 /* 4200 * 1 - subvol uuid item 4201 * 1 - received_subvol uuid item 4202 */ 4203 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4204 if (IS_ERR(trans)) { 4205 ret = PTR_ERR(trans); 4206 break; 4207 } 4208 continue; 4209 } else { 4210 goto skip; 4211 } 4212 update_tree: 4213 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4214 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4215 BTRFS_UUID_KEY_SUBVOL, 4216 key.objectid); 4217 if (ret < 0) { 4218 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4219 ret); 4220 break; 4221 } 4222 } 4223 4224 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4225 ret = btrfs_uuid_tree_add(trans, 4226 root_item.received_uuid, 4227 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4228 key.objectid); 4229 if (ret < 0) { 4230 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4231 ret); 4232 break; 4233 } 4234 } 4235 4236 skip: 4237 if (trans) { 4238 ret = btrfs_end_transaction(trans); 4239 trans = NULL; 4240 if (ret) 4241 break; 4242 } 4243 4244 btrfs_release_path(path); 4245 if (key.offset < (u64)-1) { 4246 key.offset++; 4247 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4248 key.offset = 0; 4249 key.type = BTRFS_ROOT_ITEM_KEY; 4250 } else if (key.objectid < (u64)-1) { 4251 key.offset = 0; 4252 key.type = BTRFS_ROOT_ITEM_KEY; 4253 key.objectid++; 4254 } else { 4255 break; 4256 } 4257 cond_resched(); 4258 } 4259 4260 out: 4261 btrfs_free_path(path); 4262 if (trans && !IS_ERR(trans)) 4263 btrfs_end_transaction(trans); 4264 if (ret) 4265 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4266 else 4267 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4268 up(&fs_info->uuid_tree_rescan_sem); 4269 return 0; 4270 } 4271 4272 /* 4273 * Callback for btrfs_uuid_tree_iterate(). 4274 * returns: 4275 * 0 check succeeded, the entry is not outdated. 4276 * < 0 if an error occurred. 4277 * > 0 if the check failed, which means the caller shall remove the entry. 4278 */ 4279 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, 4280 u8 *uuid, u8 type, u64 subid) 4281 { 4282 struct btrfs_key key; 4283 int ret = 0; 4284 struct btrfs_root *subvol_root; 4285 4286 if (type != BTRFS_UUID_KEY_SUBVOL && 4287 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) 4288 goto out; 4289 4290 key.objectid = subid; 4291 key.type = BTRFS_ROOT_ITEM_KEY; 4292 key.offset = (u64)-1; 4293 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key); 4294 if (IS_ERR(subvol_root)) { 4295 ret = PTR_ERR(subvol_root); 4296 if (ret == -ENOENT) 4297 ret = 1; 4298 goto out; 4299 } 4300 4301 switch (type) { 4302 case BTRFS_UUID_KEY_SUBVOL: 4303 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) 4304 ret = 1; 4305 break; 4306 case BTRFS_UUID_KEY_RECEIVED_SUBVOL: 4307 if (memcmp(uuid, subvol_root->root_item.received_uuid, 4308 BTRFS_UUID_SIZE)) 4309 ret = 1; 4310 break; 4311 } 4312 4313 out: 4314 return ret; 4315 } 4316 4317 static int btrfs_uuid_rescan_kthread(void *data) 4318 { 4319 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; 4320 int ret; 4321 4322 /* 4323 * 1st step is to iterate through the existing UUID tree and 4324 * to delete all entries that contain outdated data. 4325 * 2nd step is to add all missing entries to the UUID tree. 4326 */ 4327 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry); 4328 if (ret < 0) { 4329 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); 4330 up(&fs_info->uuid_tree_rescan_sem); 4331 return ret; 4332 } 4333 return btrfs_uuid_scan_kthread(data); 4334 } 4335 4336 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4337 { 4338 struct btrfs_trans_handle *trans; 4339 struct btrfs_root *tree_root = fs_info->tree_root; 4340 struct btrfs_root *uuid_root; 4341 struct task_struct *task; 4342 int ret; 4343 4344 /* 4345 * 1 - root node 4346 * 1 - root item 4347 */ 4348 trans = btrfs_start_transaction(tree_root, 2); 4349 if (IS_ERR(trans)) 4350 return PTR_ERR(trans); 4351 4352 uuid_root = btrfs_create_tree(trans, fs_info, 4353 BTRFS_UUID_TREE_OBJECTID); 4354 if (IS_ERR(uuid_root)) { 4355 ret = PTR_ERR(uuid_root); 4356 btrfs_abort_transaction(trans, ret); 4357 btrfs_end_transaction(trans); 4358 return ret; 4359 } 4360 4361 fs_info->uuid_root = uuid_root; 4362 4363 ret = btrfs_commit_transaction(trans); 4364 if (ret) 4365 return ret; 4366 4367 down(&fs_info->uuid_tree_rescan_sem); 4368 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4369 if (IS_ERR(task)) { 4370 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4371 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4372 up(&fs_info->uuid_tree_rescan_sem); 4373 return PTR_ERR(task); 4374 } 4375 4376 return 0; 4377 } 4378 4379 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) 4380 { 4381 struct task_struct *task; 4382 4383 down(&fs_info->uuid_tree_rescan_sem); 4384 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); 4385 if (IS_ERR(task)) { 4386 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4387 btrfs_warn(fs_info, "failed to start uuid_rescan task"); 4388 up(&fs_info->uuid_tree_rescan_sem); 4389 return PTR_ERR(task); 4390 } 4391 4392 return 0; 4393 } 4394 4395 /* 4396 * shrinking a device means finding all of the device extents past 4397 * the new size, and then following the back refs to the chunks. 4398 * The chunk relocation code actually frees the device extent 4399 */ 4400 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4401 { 4402 struct btrfs_fs_info *fs_info = device->fs_info; 4403 struct btrfs_root *root = fs_info->dev_root; 4404 struct btrfs_trans_handle *trans; 4405 struct btrfs_dev_extent *dev_extent = NULL; 4406 struct btrfs_path *path; 4407 u64 length; 4408 u64 chunk_offset; 4409 int ret; 4410 int slot; 4411 int failed = 0; 4412 bool retried = false; 4413 bool checked_pending_chunks = false; 4414 struct extent_buffer *l; 4415 struct btrfs_key key; 4416 struct btrfs_super_block *super_copy = fs_info->super_copy; 4417 u64 old_total = btrfs_super_total_bytes(super_copy); 4418 u64 old_size = btrfs_device_get_total_bytes(device); 4419 u64 diff; 4420 4421 new_size = round_down(new_size, fs_info->sectorsize); 4422 diff = round_down(old_size - new_size, fs_info->sectorsize); 4423 4424 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4425 return -EINVAL; 4426 4427 path = btrfs_alloc_path(); 4428 if (!path) 4429 return -ENOMEM; 4430 4431 path->reada = READA_BACK; 4432 4433 mutex_lock(&fs_info->chunk_mutex); 4434 4435 btrfs_device_set_total_bytes(device, new_size); 4436 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4437 device->fs_devices->total_rw_bytes -= diff; 4438 atomic64_sub(diff, &fs_info->free_chunk_space); 4439 } 4440 mutex_unlock(&fs_info->chunk_mutex); 4441 4442 again: 4443 key.objectid = device->devid; 4444 key.offset = (u64)-1; 4445 key.type = BTRFS_DEV_EXTENT_KEY; 4446 4447 do { 4448 mutex_lock(&fs_info->delete_unused_bgs_mutex); 4449 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4450 if (ret < 0) { 4451 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4452 goto done; 4453 } 4454 4455 ret = btrfs_previous_item(root, path, 0, key.type); 4456 if (ret) 4457 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4458 if (ret < 0) 4459 goto done; 4460 if (ret) { 4461 ret = 0; 4462 btrfs_release_path(path); 4463 break; 4464 } 4465 4466 l = path->nodes[0]; 4467 slot = path->slots[0]; 4468 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4469 4470 if (key.objectid != device->devid) { 4471 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4472 btrfs_release_path(path); 4473 break; 4474 } 4475 4476 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4477 length = btrfs_dev_extent_length(l, dev_extent); 4478 4479 if (key.offset + length <= new_size) { 4480 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4481 btrfs_release_path(path); 4482 break; 4483 } 4484 4485 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4486 btrfs_release_path(path); 4487 4488 /* 4489 * We may be relocating the only data chunk we have, 4490 * which could potentially end up with losing data's 4491 * raid profile, so lets allocate an empty one in 4492 * advance. 4493 */ 4494 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4495 if (ret < 0) { 4496 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4497 goto done; 4498 } 4499 4500 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4501 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4502 if (ret && ret != -ENOSPC) 4503 goto done; 4504 if (ret == -ENOSPC) 4505 failed++; 4506 } while (key.offset-- > 0); 4507 4508 if (failed && !retried) { 4509 failed = 0; 4510 retried = true; 4511 goto again; 4512 } else if (failed && retried) { 4513 ret = -ENOSPC; 4514 goto done; 4515 } 4516 4517 /* Shrinking succeeded, else we would be at "done". */ 4518 trans = btrfs_start_transaction(root, 0); 4519 if (IS_ERR(trans)) { 4520 ret = PTR_ERR(trans); 4521 goto done; 4522 } 4523 4524 mutex_lock(&fs_info->chunk_mutex); 4525 4526 /* 4527 * We checked in the above loop all device extents that were already in 4528 * the device tree. However before we have updated the device's 4529 * total_bytes to the new size, we might have had chunk allocations that 4530 * have not complete yet (new block groups attached to transaction 4531 * handles), and therefore their device extents were not yet in the 4532 * device tree and we missed them in the loop above. So if we have any 4533 * pending chunk using a device extent that overlaps the device range 4534 * that we can not use anymore, commit the current transaction and 4535 * repeat the search on the device tree - this way we guarantee we will 4536 * not have chunks using device extents that end beyond 'new_size'. 4537 */ 4538 if (!checked_pending_chunks) { 4539 u64 start = new_size; 4540 u64 len = old_size - new_size; 4541 4542 if (contains_pending_extent(trans->transaction, device, 4543 &start, len)) { 4544 mutex_unlock(&fs_info->chunk_mutex); 4545 checked_pending_chunks = true; 4546 failed = 0; 4547 retried = false; 4548 ret = btrfs_commit_transaction(trans); 4549 if (ret) 4550 goto done; 4551 goto again; 4552 } 4553 } 4554 4555 btrfs_device_set_disk_total_bytes(device, new_size); 4556 if (list_empty(&device->resized_list)) 4557 list_add_tail(&device->resized_list, 4558 &fs_info->fs_devices->resized_devices); 4559 4560 WARN_ON(diff > old_total); 4561 btrfs_set_super_total_bytes(super_copy, 4562 round_down(old_total - diff, fs_info->sectorsize)); 4563 mutex_unlock(&fs_info->chunk_mutex); 4564 4565 /* Now btrfs_update_device() will change the on-disk size. */ 4566 ret = btrfs_update_device(trans, device); 4567 btrfs_end_transaction(trans); 4568 done: 4569 btrfs_free_path(path); 4570 if (ret) { 4571 mutex_lock(&fs_info->chunk_mutex); 4572 btrfs_device_set_total_bytes(device, old_size); 4573 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4574 device->fs_devices->total_rw_bytes += diff; 4575 atomic64_add(diff, &fs_info->free_chunk_space); 4576 mutex_unlock(&fs_info->chunk_mutex); 4577 } 4578 return ret; 4579 } 4580 4581 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4582 struct btrfs_key *key, 4583 struct btrfs_chunk *chunk, int item_size) 4584 { 4585 struct btrfs_super_block *super_copy = fs_info->super_copy; 4586 struct btrfs_disk_key disk_key; 4587 u32 array_size; 4588 u8 *ptr; 4589 4590 mutex_lock(&fs_info->chunk_mutex); 4591 array_size = btrfs_super_sys_array_size(super_copy); 4592 if (array_size + item_size + sizeof(disk_key) 4593 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4594 mutex_unlock(&fs_info->chunk_mutex); 4595 return -EFBIG; 4596 } 4597 4598 ptr = super_copy->sys_chunk_array + array_size; 4599 btrfs_cpu_key_to_disk(&disk_key, key); 4600 memcpy(ptr, &disk_key, sizeof(disk_key)); 4601 ptr += sizeof(disk_key); 4602 memcpy(ptr, chunk, item_size); 4603 item_size += sizeof(disk_key); 4604 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4605 mutex_unlock(&fs_info->chunk_mutex); 4606 4607 return 0; 4608 } 4609 4610 /* 4611 * sort the devices in descending order by max_avail, total_avail 4612 */ 4613 static int btrfs_cmp_device_info(const void *a, const void *b) 4614 { 4615 const struct btrfs_device_info *di_a = a; 4616 const struct btrfs_device_info *di_b = b; 4617 4618 if (di_a->max_avail > di_b->max_avail) 4619 return -1; 4620 if (di_a->max_avail < di_b->max_avail) 4621 return 1; 4622 if (di_a->total_avail > di_b->total_avail) 4623 return -1; 4624 if (di_a->total_avail < di_b->total_avail) 4625 return 1; 4626 return 0; 4627 } 4628 4629 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4630 { 4631 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 4632 return; 4633 4634 btrfs_set_fs_incompat(info, RAID56); 4635 } 4636 4637 #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \ 4638 - sizeof(struct btrfs_chunk)) \ 4639 / sizeof(struct btrfs_stripe) + 1) 4640 4641 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ 4642 - 2 * sizeof(struct btrfs_disk_key) \ 4643 - 2 * sizeof(struct btrfs_chunk)) \ 4644 / sizeof(struct btrfs_stripe) + 1) 4645 4646 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 4647 u64 start, u64 type) 4648 { 4649 struct btrfs_fs_info *info = trans->fs_info; 4650 struct btrfs_fs_devices *fs_devices = info->fs_devices; 4651 struct btrfs_device *device; 4652 struct map_lookup *map = NULL; 4653 struct extent_map_tree *em_tree; 4654 struct extent_map *em; 4655 struct btrfs_device_info *devices_info = NULL; 4656 u64 total_avail; 4657 int num_stripes; /* total number of stripes to allocate */ 4658 int data_stripes; /* number of stripes that count for 4659 block group size */ 4660 int sub_stripes; /* sub_stripes info for map */ 4661 int dev_stripes; /* stripes per dev */ 4662 int devs_max; /* max devs to use */ 4663 int devs_min; /* min devs needed */ 4664 int devs_increment; /* ndevs has to be a multiple of this */ 4665 int ncopies; /* how many copies to data has */ 4666 int ret; 4667 u64 max_stripe_size; 4668 u64 max_chunk_size; 4669 u64 stripe_size; 4670 u64 num_bytes; 4671 int ndevs; 4672 int i; 4673 int j; 4674 int index; 4675 4676 BUG_ON(!alloc_profile_is_valid(type, 0)); 4677 4678 if (list_empty(&fs_devices->alloc_list)) { 4679 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 4680 btrfs_debug(info, "%s: no writable device", __func__); 4681 return -ENOSPC; 4682 } 4683 4684 index = btrfs_bg_flags_to_raid_index(type); 4685 4686 sub_stripes = btrfs_raid_array[index].sub_stripes; 4687 dev_stripes = btrfs_raid_array[index].dev_stripes; 4688 devs_max = btrfs_raid_array[index].devs_max; 4689 devs_min = btrfs_raid_array[index].devs_min; 4690 devs_increment = btrfs_raid_array[index].devs_increment; 4691 ncopies = btrfs_raid_array[index].ncopies; 4692 4693 if (type & BTRFS_BLOCK_GROUP_DATA) { 4694 max_stripe_size = SZ_1G; 4695 max_chunk_size = 10 * max_stripe_size; 4696 if (!devs_max) 4697 devs_max = BTRFS_MAX_DEVS(info); 4698 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 4699 /* for larger filesystems, use larger metadata chunks */ 4700 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 4701 max_stripe_size = SZ_1G; 4702 else 4703 max_stripe_size = SZ_256M; 4704 max_chunk_size = max_stripe_size; 4705 if (!devs_max) 4706 devs_max = BTRFS_MAX_DEVS(info); 4707 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4708 max_stripe_size = SZ_32M; 4709 max_chunk_size = 2 * max_stripe_size; 4710 if (!devs_max) 4711 devs_max = BTRFS_MAX_DEVS_SYS_CHUNK; 4712 } else { 4713 btrfs_err(info, "invalid chunk type 0x%llx requested", 4714 type); 4715 BUG_ON(1); 4716 } 4717 4718 /* we don't want a chunk larger than 10% of writeable space */ 4719 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 4720 max_chunk_size); 4721 4722 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 4723 GFP_NOFS); 4724 if (!devices_info) 4725 return -ENOMEM; 4726 4727 /* 4728 * in the first pass through the devices list, we gather information 4729 * about the available holes on each device. 4730 */ 4731 ndevs = 0; 4732 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 4733 u64 max_avail; 4734 u64 dev_offset; 4735 4736 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4737 WARN(1, KERN_ERR 4738 "BTRFS: read-only device in alloc_list\n"); 4739 continue; 4740 } 4741 4742 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 4743 &device->dev_state) || 4744 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4745 continue; 4746 4747 if (device->total_bytes > device->bytes_used) 4748 total_avail = device->total_bytes - device->bytes_used; 4749 else 4750 total_avail = 0; 4751 4752 /* If there is no space on this device, skip it. */ 4753 if (total_avail == 0) 4754 continue; 4755 4756 ret = find_free_dev_extent(trans, device, 4757 max_stripe_size * dev_stripes, 4758 &dev_offset, &max_avail); 4759 if (ret && ret != -ENOSPC) 4760 goto error; 4761 4762 if (ret == 0) 4763 max_avail = max_stripe_size * dev_stripes; 4764 4765 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) { 4766 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 4767 btrfs_debug(info, 4768 "%s: devid %llu has no free space, have=%llu want=%u", 4769 __func__, device->devid, max_avail, 4770 BTRFS_STRIPE_LEN * dev_stripes); 4771 continue; 4772 } 4773 4774 if (ndevs == fs_devices->rw_devices) { 4775 WARN(1, "%s: found more than %llu devices\n", 4776 __func__, fs_devices->rw_devices); 4777 break; 4778 } 4779 devices_info[ndevs].dev_offset = dev_offset; 4780 devices_info[ndevs].max_avail = max_avail; 4781 devices_info[ndevs].total_avail = total_avail; 4782 devices_info[ndevs].dev = device; 4783 ++ndevs; 4784 } 4785 4786 /* 4787 * now sort the devices by hole size / available space 4788 */ 4789 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 4790 btrfs_cmp_device_info, NULL); 4791 4792 /* round down to number of usable stripes */ 4793 ndevs = round_down(ndevs, devs_increment); 4794 4795 if (ndevs < devs_min) { 4796 ret = -ENOSPC; 4797 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 4798 btrfs_debug(info, 4799 "%s: not enough devices with free space: have=%d minimum required=%d", 4800 __func__, ndevs, devs_min); 4801 } 4802 goto error; 4803 } 4804 4805 ndevs = min(ndevs, devs_max); 4806 4807 /* 4808 * The primary goal is to maximize the number of stripes, so use as 4809 * many devices as possible, even if the stripes are not maximum sized. 4810 * 4811 * The DUP profile stores more than one stripe per device, the 4812 * max_avail is the total size so we have to adjust. 4813 */ 4814 stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes); 4815 num_stripes = ndevs * dev_stripes; 4816 4817 /* 4818 * this will have to be fixed for RAID1 and RAID10 over 4819 * more drives 4820 */ 4821 data_stripes = num_stripes / ncopies; 4822 4823 if (type & BTRFS_BLOCK_GROUP_RAID5) 4824 data_stripes = num_stripes - 1; 4825 4826 if (type & BTRFS_BLOCK_GROUP_RAID6) 4827 data_stripes = num_stripes - 2; 4828 4829 /* 4830 * Use the number of data stripes to figure out how big this chunk 4831 * is really going to be in terms of logical address space, 4832 * and compare that answer with the max chunk size 4833 */ 4834 if (stripe_size * data_stripes > max_chunk_size) { 4835 stripe_size = div_u64(max_chunk_size, data_stripes); 4836 4837 /* bump the answer up to a 16MB boundary */ 4838 stripe_size = round_up(stripe_size, SZ_16M); 4839 4840 /* 4841 * But don't go higher than the limits we found while searching 4842 * for free extents 4843 */ 4844 stripe_size = min(devices_info[ndevs - 1].max_avail, 4845 stripe_size); 4846 } 4847 4848 /* align to BTRFS_STRIPE_LEN */ 4849 stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN); 4850 4851 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 4852 if (!map) { 4853 ret = -ENOMEM; 4854 goto error; 4855 } 4856 map->num_stripes = num_stripes; 4857 4858 for (i = 0; i < ndevs; ++i) { 4859 for (j = 0; j < dev_stripes; ++j) { 4860 int s = i * dev_stripes + j; 4861 map->stripes[s].dev = devices_info[i].dev; 4862 map->stripes[s].physical = devices_info[i].dev_offset + 4863 j * stripe_size; 4864 } 4865 } 4866 map->stripe_len = BTRFS_STRIPE_LEN; 4867 map->io_align = BTRFS_STRIPE_LEN; 4868 map->io_width = BTRFS_STRIPE_LEN; 4869 map->type = type; 4870 map->sub_stripes = sub_stripes; 4871 4872 num_bytes = stripe_size * data_stripes; 4873 4874 trace_btrfs_chunk_alloc(info, map, start, num_bytes); 4875 4876 em = alloc_extent_map(); 4877 if (!em) { 4878 kfree(map); 4879 ret = -ENOMEM; 4880 goto error; 4881 } 4882 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 4883 em->map_lookup = map; 4884 em->start = start; 4885 em->len = num_bytes; 4886 em->block_start = 0; 4887 em->block_len = em->len; 4888 em->orig_block_len = stripe_size; 4889 4890 em_tree = &info->mapping_tree.map_tree; 4891 write_lock(&em_tree->lock); 4892 ret = add_extent_mapping(em_tree, em, 0); 4893 if (ret) { 4894 write_unlock(&em_tree->lock); 4895 free_extent_map(em); 4896 goto error; 4897 } 4898 4899 list_add_tail(&em->list, &trans->transaction->pending_chunks); 4900 refcount_inc(&em->refs); 4901 write_unlock(&em_tree->lock); 4902 4903 ret = btrfs_make_block_group(trans, info, 0, type, start, num_bytes); 4904 if (ret) 4905 goto error_del_extent; 4906 4907 for (i = 0; i < map->num_stripes; i++) { 4908 num_bytes = map->stripes[i].dev->bytes_used + stripe_size; 4909 btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes); 4910 } 4911 4912 atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space); 4913 4914 free_extent_map(em); 4915 check_raid56_incompat_flag(info, type); 4916 4917 kfree(devices_info); 4918 return 0; 4919 4920 error_del_extent: 4921 write_lock(&em_tree->lock); 4922 remove_extent_mapping(em_tree, em); 4923 write_unlock(&em_tree->lock); 4924 4925 /* One for our allocation */ 4926 free_extent_map(em); 4927 /* One for the tree reference */ 4928 free_extent_map(em); 4929 /* One for the pending_chunks list reference */ 4930 free_extent_map(em); 4931 error: 4932 kfree(devices_info); 4933 return ret; 4934 } 4935 4936 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, 4937 struct btrfs_fs_info *fs_info, 4938 u64 chunk_offset, u64 chunk_size) 4939 { 4940 struct btrfs_root *extent_root = fs_info->extent_root; 4941 struct btrfs_root *chunk_root = fs_info->chunk_root; 4942 struct btrfs_key key; 4943 struct btrfs_device *device; 4944 struct btrfs_chunk *chunk; 4945 struct btrfs_stripe *stripe; 4946 struct extent_map *em; 4947 struct map_lookup *map; 4948 size_t item_size; 4949 u64 dev_offset; 4950 u64 stripe_size; 4951 int i = 0; 4952 int ret = 0; 4953 4954 em = get_chunk_map(fs_info, chunk_offset, chunk_size); 4955 if (IS_ERR(em)) 4956 return PTR_ERR(em); 4957 4958 map = em->map_lookup; 4959 item_size = btrfs_chunk_item_size(map->num_stripes); 4960 stripe_size = em->orig_block_len; 4961 4962 chunk = kzalloc(item_size, GFP_NOFS); 4963 if (!chunk) { 4964 ret = -ENOMEM; 4965 goto out; 4966 } 4967 4968 /* 4969 * Take the device list mutex to prevent races with the final phase of 4970 * a device replace operation that replaces the device object associated 4971 * with the map's stripes, because the device object's id can change 4972 * at any time during that final phase of the device replace operation 4973 * (dev-replace.c:btrfs_dev_replace_finishing()). 4974 */ 4975 mutex_lock(&fs_info->fs_devices->device_list_mutex); 4976 for (i = 0; i < map->num_stripes; i++) { 4977 device = map->stripes[i].dev; 4978 dev_offset = map->stripes[i].physical; 4979 4980 ret = btrfs_update_device(trans, device); 4981 if (ret) 4982 break; 4983 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset, 4984 dev_offset, stripe_size); 4985 if (ret) 4986 break; 4987 } 4988 if (ret) { 4989 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4990 goto out; 4991 } 4992 4993 stripe = &chunk->stripe; 4994 for (i = 0; i < map->num_stripes; i++) { 4995 device = map->stripes[i].dev; 4996 dev_offset = map->stripes[i].physical; 4997 4998 btrfs_set_stack_stripe_devid(stripe, device->devid); 4999 btrfs_set_stack_stripe_offset(stripe, dev_offset); 5000 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 5001 stripe++; 5002 } 5003 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 5004 5005 btrfs_set_stack_chunk_length(chunk, chunk_size); 5006 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 5007 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5008 btrfs_set_stack_chunk_type(chunk, map->type); 5009 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5010 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5011 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5012 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5013 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5014 5015 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5016 key.type = BTRFS_CHUNK_ITEM_KEY; 5017 key.offset = chunk_offset; 5018 5019 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5020 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5021 /* 5022 * TODO: Cleanup of inserted chunk root in case of 5023 * failure. 5024 */ 5025 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5026 } 5027 5028 out: 5029 kfree(chunk); 5030 free_extent_map(em); 5031 return ret; 5032 } 5033 5034 /* 5035 * Chunk allocation falls into two parts. The first part does works 5036 * that make the new allocated chunk useable, but not do any operation 5037 * that modifies the chunk tree. The second part does the works that 5038 * require modifying the chunk tree. This division is important for the 5039 * bootstrap process of adding storage to a seed btrfs. 5040 */ 5041 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 5042 struct btrfs_fs_info *fs_info, u64 type) 5043 { 5044 u64 chunk_offset; 5045 5046 lockdep_assert_held(&fs_info->chunk_mutex); 5047 chunk_offset = find_next_chunk(fs_info); 5048 return __btrfs_alloc_chunk(trans, chunk_offset, type); 5049 } 5050 5051 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, 5052 struct btrfs_fs_info *fs_info) 5053 { 5054 u64 chunk_offset; 5055 u64 sys_chunk_offset; 5056 u64 alloc_profile; 5057 int ret; 5058 5059 chunk_offset = find_next_chunk(fs_info); 5060 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5061 ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile); 5062 if (ret) 5063 return ret; 5064 5065 sys_chunk_offset = find_next_chunk(fs_info); 5066 alloc_profile = btrfs_system_alloc_profile(fs_info); 5067 ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile); 5068 return ret; 5069 } 5070 5071 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5072 { 5073 int max_errors; 5074 5075 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 5076 BTRFS_BLOCK_GROUP_RAID10 | 5077 BTRFS_BLOCK_GROUP_RAID5 | 5078 BTRFS_BLOCK_GROUP_DUP)) { 5079 max_errors = 1; 5080 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) { 5081 max_errors = 2; 5082 } else { 5083 max_errors = 0; 5084 } 5085 5086 return max_errors; 5087 } 5088 5089 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5090 { 5091 struct extent_map *em; 5092 struct map_lookup *map; 5093 int readonly = 0; 5094 int miss_ndevs = 0; 5095 int i; 5096 5097 em = get_chunk_map(fs_info, chunk_offset, 1); 5098 if (IS_ERR(em)) 5099 return 1; 5100 5101 map = em->map_lookup; 5102 for (i = 0; i < map->num_stripes; i++) { 5103 if (test_bit(BTRFS_DEV_STATE_MISSING, 5104 &map->stripes[i].dev->dev_state)) { 5105 miss_ndevs++; 5106 continue; 5107 } 5108 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5109 &map->stripes[i].dev->dev_state)) { 5110 readonly = 1; 5111 goto end; 5112 } 5113 } 5114 5115 /* 5116 * If the number of missing devices is larger than max errors, 5117 * we can not write the data into that chunk successfully, so 5118 * set it readonly. 5119 */ 5120 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5121 readonly = 1; 5122 end: 5123 free_extent_map(em); 5124 return readonly; 5125 } 5126 5127 void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 5128 { 5129 extent_map_tree_init(&tree->map_tree); 5130 } 5131 5132 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 5133 { 5134 struct extent_map *em; 5135 5136 while (1) { 5137 write_lock(&tree->map_tree.lock); 5138 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 5139 if (em) 5140 remove_extent_mapping(&tree->map_tree, em); 5141 write_unlock(&tree->map_tree.lock); 5142 if (!em) 5143 break; 5144 /* once for us */ 5145 free_extent_map(em); 5146 /* once for the tree */ 5147 free_extent_map(em); 5148 } 5149 } 5150 5151 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5152 { 5153 struct extent_map *em; 5154 struct map_lookup *map; 5155 int ret; 5156 5157 em = get_chunk_map(fs_info, logical, len); 5158 if (IS_ERR(em)) 5159 /* 5160 * We could return errors for these cases, but that could get 5161 * ugly and we'd probably do the same thing which is just not do 5162 * anything else and exit, so return 1 so the callers don't try 5163 * to use other copies. 5164 */ 5165 return 1; 5166 5167 map = em->map_lookup; 5168 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) 5169 ret = map->num_stripes; 5170 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5171 ret = map->sub_stripes; 5172 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5173 ret = 2; 5174 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5175 /* 5176 * There could be two corrupted data stripes, we need 5177 * to loop retry in order to rebuild the correct data. 5178 * 5179 * Fail a stripe at a time on every retry except the 5180 * stripe under reconstruction. 5181 */ 5182 ret = map->num_stripes; 5183 else 5184 ret = 1; 5185 free_extent_map(em); 5186 5187 btrfs_dev_replace_read_lock(&fs_info->dev_replace); 5188 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5189 fs_info->dev_replace.tgtdev) 5190 ret++; 5191 btrfs_dev_replace_read_unlock(&fs_info->dev_replace); 5192 5193 return ret; 5194 } 5195 5196 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5197 u64 logical) 5198 { 5199 struct extent_map *em; 5200 struct map_lookup *map; 5201 unsigned long len = fs_info->sectorsize; 5202 5203 em = get_chunk_map(fs_info, logical, len); 5204 5205 if (!WARN_ON(IS_ERR(em))) { 5206 map = em->map_lookup; 5207 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5208 len = map->stripe_len * nr_data_stripes(map); 5209 free_extent_map(em); 5210 } 5211 return len; 5212 } 5213 5214 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5215 { 5216 struct extent_map *em; 5217 struct map_lookup *map; 5218 int ret = 0; 5219 5220 em = get_chunk_map(fs_info, logical, len); 5221 5222 if(!WARN_ON(IS_ERR(em))) { 5223 map = em->map_lookup; 5224 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5225 ret = 1; 5226 free_extent_map(em); 5227 } 5228 return ret; 5229 } 5230 5231 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5232 struct map_lookup *map, int first, 5233 int dev_replace_is_ongoing) 5234 { 5235 int i; 5236 int num_stripes; 5237 int preferred_mirror; 5238 int tolerance; 5239 struct btrfs_device *srcdev; 5240 5241 ASSERT((map->type & 5242 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))); 5243 5244 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5245 num_stripes = map->sub_stripes; 5246 else 5247 num_stripes = map->num_stripes; 5248 5249 preferred_mirror = first + current->pid % num_stripes; 5250 5251 if (dev_replace_is_ongoing && 5252 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5253 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5254 srcdev = fs_info->dev_replace.srcdev; 5255 else 5256 srcdev = NULL; 5257 5258 /* 5259 * try to avoid the drive that is the source drive for a 5260 * dev-replace procedure, only choose it if no other non-missing 5261 * mirror is available 5262 */ 5263 for (tolerance = 0; tolerance < 2; tolerance++) { 5264 if (map->stripes[preferred_mirror].dev->bdev && 5265 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5266 return preferred_mirror; 5267 for (i = first; i < first + num_stripes; i++) { 5268 if (map->stripes[i].dev->bdev && 5269 (tolerance || map->stripes[i].dev != srcdev)) 5270 return i; 5271 } 5272 } 5273 5274 /* we couldn't find one that doesn't fail. Just return something 5275 * and the io error handling code will clean up eventually 5276 */ 5277 return preferred_mirror; 5278 } 5279 5280 static inline int parity_smaller(u64 a, u64 b) 5281 { 5282 return a > b; 5283 } 5284 5285 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5286 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) 5287 { 5288 struct btrfs_bio_stripe s; 5289 int i; 5290 u64 l; 5291 int again = 1; 5292 5293 while (again) { 5294 again = 0; 5295 for (i = 0; i < num_stripes - 1; i++) { 5296 if (parity_smaller(bbio->raid_map[i], 5297 bbio->raid_map[i+1])) { 5298 s = bbio->stripes[i]; 5299 l = bbio->raid_map[i]; 5300 bbio->stripes[i] = bbio->stripes[i+1]; 5301 bbio->raid_map[i] = bbio->raid_map[i+1]; 5302 bbio->stripes[i+1] = s; 5303 bbio->raid_map[i+1] = l; 5304 5305 again = 1; 5306 } 5307 } 5308 } 5309 } 5310 5311 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) 5312 { 5313 struct btrfs_bio *bbio = kzalloc( 5314 /* the size of the btrfs_bio */ 5315 sizeof(struct btrfs_bio) + 5316 /* plus the variable array for the stripes */ 5317 sizeof(struct btrfs_bio_stripe) * (total_stripes) + 5318 /* plus the variable array for the tgt dev */ 5319 sizeof(int) * (real_stripes) + 5320 /* 5321 * plus the raid_map, which includes both the tgt dev 5322 * and the stripes 5323 */ 5324 sizeof(u64) * (total_stripes), 5325 GFP_NOFS|__GFP_NOFAIL); 5326 5327 atomic_set(&bbio->error, 0); 5328 refcount_set(&bbio->refs, 1); 5329 5330 return bbio; 5331 } 5332 5333 void btrfs_get_bbio(struct btrfs_bio *bbio) 5334 { 5335 WARN_ON(!refcount_read(&bbio->refs)); 5336 refcount_inc(&bbio->refs); 5337 } 5338 5339 void btrfs_put_bbio(struct btrfs_bio *bbio) 5340 { 5341 if (!bbio) 5342 return; 5343 if (refcount_dec_and_test(&bbio->refs)) 5344 kfree(bbio); 5345 } 5346 5347 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5348 /* 5349 * Please note that, discard won't be sent to target device of device 5350 * replace. 5351 */ 5352 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5353 u64 logical, u64 length, 5354 struct btrfs_bio **bbio_ret) 5355 { 5356 struct extent_map *em; 5357 struct map_lookup *map; 5358 struct btrfs_bio *bbio; 5359 u64 offset; 5360 u64 stripe_nr; 5361 u64 stripe_nr_end; 5362 u64 stripe_end_offset; 5363 u64 stripe_cnt; 5364 u64 stripe_len; 5365 u64 stripe_offset; 5366 u64 num_stripes; 5367 u32 stripe_index; 5368 u32 factor = 0; 5369 u32 sub_stripes = 0; 5370 u64 stripes_per_dev = 0; 5371 u32 remaining_stripes = 0; 5372 u32 last_stripe = 0; 5373 int ret = 0; 5374 int i; 5375 5376 /* discard always return a bbio */ 5377 ASSERT(bbio_ret); 5378 5379 em = get_chunk_map(fs_info, logical, length); 5380 if (IS_ERR(em)) 5381 return PTR_ERR(em); 5382 5383 map = em->map_lookup; 5384 /* we don't discard raid56 yet */ 5385 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5386 ret = -EOPNOTSUPP; 5387 goto out; 5388 } 5389 5390 offset = logical - em->start; 5391 length = min_t(u64, em->len - offset, length); 5392 5393 stripe_len = map->stripe_len; 5394 /* 5395 * stripe_nr counts the total number of stripes we have to stride 5396 * to get to this block 5397 */ 5398 stripe_nr = div64_u64(offset, stripe_len); 5399 5400 /* stripe_offset is the offset of this block in its stripe */ 5401 stripe_offset = offset - stripe_nr * stripe_len; 5402 5403 stripe_nr_end = round_up(offset + length, map->stripe_len); 5404 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5405 stripe_cnt = stripe_nr_end - stripe_nr; 5406 stripe_end_offset = stripe_nr_end * map->stripe_len - 5407 (offset + length); 5408 /* 5409 * after this, stripe_nr is the number of stripes on this 5410 * device we have to walk to find the data, and stripe_index is 5411 * the number of our device in the stripe array 5412 */ 5413 num_stripes = 1; 5414 stripe_index = 0; 5415 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5416 BTRFS_BLOCK_GROUP_RAID10)) { 5417 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5418 sub_stripes = 1; 5419 else 5420 sub_stripes = map->sub_stripes; 5421 5422 factor = map->num_stripes / sub_stripes; 5423 num_stripes = min_t(u64, map->num_stripes, 5424 sub_stripes * stripe_cnt); 5425 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5426 stripe_index *= sub_stripes; 5427 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 5428 &remaining_stripes); 5429 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5430 last_stripe *= sub_stripes; 5431 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 5432 BTRFS_BLOCK_GROUP_DUP)) { 5433 num_stripes = map->num_stripes; 5434 } else { 5435 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5436 &stripe_index); 5437 } 5438 5439 bbio = alloc_btrfs_bio(num_stripes, 0); 5440 if (!bbio) { 5441 ret = -ENOMEM; 5442 goto out; 5443 } 5444 5445 for (i = 0; i < num_stripes; i++) { 5446 bbio->stripes[i].physical = 5447 map->stripes[stripe_index].physical + 5448 stripe_offset + stripe_nr * map->stripe_len; 5449 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 5450 5451 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5452 BTRFS_BLOCK_GROUP_RAID10)) { 5453 bbio->stripes[i].length = stripes_per_dev * 5454 map->stripe_len; 5455 5456 if (i / sub_stripes < remaining_stripes) 5457 bbio->stripes[i].length += 5458 map->stripe_len; 5459 5460 /* 5461 * Special for the first stripe and 5462 * the last stripe: 5463 * 5464 * |-------|...|-------| 5465 * |----------| 5466 * off end_off 5467 */ 5468 if (i < sub_stripes) 5469 bbio->stripes[i].length -= 5470 stripe_offset; 5471 5472 if (stripe_index >= last_stripe && 5473 stripe_index <= (last_stripe + 5474 sub_stripes - 1)) 5475 bbio->stripes[i].length -= 5476 stripe_end_offset; 5477 5478 if (i == sub_stripes - 1) 5479 stripe_offset = 0; 5480 } else { 5481 bbio->stripes[i].length = length; 5482 } 5483 5484 stripe_index++; 5485 if (stripe_index == map->num_stripes) { 5486 stripe_index = 0; 5487 stripe_nr++; 5488 } 5489 } 5490 5491 *bbio_ret = bbio; 5492 bbio->map_type = map->type; 5493 bbio->num_stripes = num_stripes; 5494 out: 5495 free_extent_map(em); 5496 return ret; 5497 } 5498 5499 /* 5500 * In dev-replace case, for repair case (that's the only case where the mirror 5501 * is selected explicitly when calling btrfs_map_block), blocks left of the 5502 * left cursor can also be read from the target drive. 5503 * 5504 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 5505 * array of stripes. 5506 * For READ, it also needs to be supported using the same mirror number. 5507 * 5508 * If the requested block is not left of the left cursor, EIO is returned. This 5509 * can happen because btrfs_num_copies() returns one more in the dev-replace 5510 * case. 5511 */ 5512 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 5513 u64 logical, u64 length, 5514 u64 srcdev_devid, int *mirror_num, 5515 u64 *physical) 5516 { 5517 struct btrfs_bio *bbio = NULL; 5518 int num_stripes; 5519 int index_srcdev = 0; 5520 int found = 0; 5521 u64 physical_of_found = 0; 5522 int i; 5523 int ret = 0; 5524 5525 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 5526 logical, &length, &bbio, 0, 0); 5527 if (ret) { 5528 ASSERT(bbio == NULL); 5529 return ret; 5530 } 5531 5532 num_stripes = bbio->num_stripes; 5533 if (*mirror_num > num_stripes) { 5534 /* 5535 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 5536 * that means that the requested area is not left of the left 5537 * cursor 5538 */ 5539 btrfs_put_bbio(bbio); 5540 return -EIO; 5541 } 5542 5543 /* 5544 * process the rest of the function using the mirror_num of the source 5545 * drive. Therefore look it up first. At the end, patch the device 5546 * pointer to the one of the target drive. 5547 */ 5548 for (i = 0; i < num_stripes; i++) { 5549 if (bbio->stripes[i].dev->devid != srcdev_devid) 5550 continue; 5551 5552 /* 5553 * In case of DUP, in order to keep it simple, only add the 5554 * mirror with the lowest physical address 5555 */ 5556 if (found && 5557 physical_of_found <= bbio->stripes[i].physical) 5558 continue; 5559 5560 index_srcdev = i; 5561 found = 1; 5562 physical_of_found = bbio->stripes[i].physical; 5563 } 5564 5565 btrfs_put_bbio(bbio); 5566 5567 ASSERT(found); 5568 if (!found) 5569 return -EIO; 5570 5571 *mirror_num = index_srcdev + 1; 5572 *physical = physical_of_found; 5573 return ret; 5574 } 5575 5576 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 5577 struct btrfs_bio **bbio_ret, 5578 struct btrfs_dev_replace *dev_replace, 5579 int *num_stripes_ret, int *max_errors_ret) 5580 { 5581 struct btrfs_bio *bbio = *bbio_ret; 5582 u64 srcdev_devid = dev_replace->srcdev->devid; 5583 int tgtdev_indexes = 0; 5584 int num_stripes = *num_stripes_ret; 5585 int max_errors = *max_errors_ret; 5586 int i; 5587 5588 if (op == BTRFS_MAP_WRITE) { 5589 int index_where_to_add; 5590 5591 /* 5592 * duplicate the write operations while the dev replace 5593 * procedure is running. Since the copying of the old disk to 5594 * the new disk takes place at run time while the filesystem is 5595 * mounted writable, the regular write operations to the old 5596 * disk have to be duplicated to go to the new disk as well. 5597 * 5598 * Note that device->missing is handled by the caller, and that 5599 * the write to the old disk is already set up in the stripes 5600 * array. 5601 */ 5602 index_where_to_add = num_stripes; 5603 for (i = 0; i < num_stripes; i++) { 5604 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5605 /* write to new disk, too */ 5606 struct btrfs_bio_stripe *new = 5607 bbio->stripes + index_where_to_add; 5608 struct btrfs_bio_stripe *old = 5609 bbio->stripes + i; 5610 5611 new->physical = old->physical; 5612 new->length = old->length; 5613 new->dev = dev_replace->tgtdev; 5614 bbio->tgtdev_map[i] = index_where_to_add; 5615 index_where_to_add++; 5616 max_errors++; 5617 tgtdev_indexes++; 5618 } 5619 } 5620 num_stripes = index_where_to_add; 5621 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 5622 int index_srcdev = 0; 5623 int found = 0; 5624 u64 physical_of_found = 0; 5625 5626 /* 5627 * During the dev-replace procedure, the target drive can also 5628 * be used to read data in case it is needed to repair a corrupt 5629 * block elsewhere. This is possible if the requested area is 5630 * left of the left cursor. In this area, the target drive is a 5631 * full copy of the source drive. 5632 */ 5633 for (i = 0; i < num_stripes; i++) { 5634 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5635 /* 5636 * In case of DUP, in order to keep it simple, 5637 * only add the mirror with the lowest physical 5638 * address 5639 */ 5640 if (found && 5641 physical_of_found <= 5642 bbio->stripes[i].physical) 5643 continue; 5644 index_srcdev = i; 5645 found = 1; 5646 physical_of_found = bbio->stripes[i].physical; 5647 } 5648 } 5649 if (found) { 5650 struct btrfs_bio_stripe *tgtdev_stripe = 5651 bbio->stripes + num_stripes; 5652 5653 tgtdev_stripe->physical = physical_of_found; 5654 tgtdev_stripe->length = 5655 bbio->stripes[index_srcdev].length; 5656 tgtdev_stripe->dev = dev_replace->tgtdev; 5657 bbio->tgtdev_map[index_srcdev] = num_stripes; 5658 5659 tgtdev_indexes++; 5660 num_stripes++; 5661 } 5662 } 5663 5664 *num_stripes_ret = num_stripes; 5665 *max_errors_ret = max_errors; 5666 bbio->num_tgtdevs = tgtdev_indexes; 5667 *bbio_ret = bbio; 5668 } 5669 5670 static bool need_full_stripe(enum btrfs_map_op op) 5671 { 5672 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 5673 } 5674 5675 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 5676 enum btrfs_map_op op, 5677 u64 logical, u64 *length, 5678 struct btrfs_bio **bbio_ret, 5679 int mirror_num, int need_raid_map) 5680 { 5681 struct extent_map *em; 5682 struct map_lookup *map; 5683 u64 offset; 5684 u64 stripe_offset; 5685 u64 stripe_nr; 5686 u64 stripe_len; 5687 u32 stripe_index; 5688 int i; 5689 int ret = 0; 5690 int num_stripes; 5691 int max_errors = 0; 5692 int tgtdev_indexes = 0; 5693 struct btrfs_bio *bbio = NULL; 5694 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 5695 int dev_replace_is_ongoing = 0; 5696 int num_alloc_stripes; 5697 int patch_the_first_stripe_for_dev_replace = 0; 5698 u64 physical_to_patch_in_first_stripe = 0; 5699 u64 raid56_full_stripe_start = (u64)-1; 5700 5701 if (op == BTRFS_MAP_DISCARD) 5702 return __btrfs_map_block_for_discard(fs_info, logical, 5703 *length, bbio_ret); 5704 5705 em = get_chunk_map(fs_info, logical, *length); 5706 if (IS_ERR(em)) 5707 return PTR_ERR(em); 5708 5709 map = em->map_lookup; 5710 offset = logical - em->start; 5711 5712 stripe_len = map->stripe_len; 5713 stripe_nr = offset; 5714 /* 5715 * stripe_nr counts the total number of stripes we have to stride 5716 * to get to this block 5717 */ 5718 stripe_nr = div64_u64(stripe_nr, stripe_len); 5719 5720 stripe_offset = stripe_nr * stripe_len; 5721 if (offset < stripe_offset) { 5722 btrfs_crit(fs_info, 5723 "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu", 5724 stripe_offset, offset, em->start, logical, 5725 stripe_len); 5726 free_extent_map(em); 5727 return -EINVAL; 5728 } 5729 5730 /* stripe_offset is the offset of this block in its stripe*/ 5731 stripe_offset = offset - stripe_offset; 5732 5733 /* if we're here for raid56, we need to know the stripe aligned start */ 5734 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5735 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); 5736 raid56_full_stripe_start = offset; 5737 5738 /* allow a write of a full stripe, but make sure we don't 5739 * allow straddling of stripes 5740 */ 5741 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 5742 full_stripe_len); 5743 raid56_full_stripe_start *= full_stripe_len; 5744 } 5745 5746 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 5747 u64 max_len; 5748 /* For writes to RAID[56], allow a full stripeset across all disks. 5749 For other RAID types and for RAID[56] reads, just allow a single 5750 stripe (on a single disk). */ 5751 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 5752 (op == BTRFS_MAP_WRITE)) { 5753 max_len = stripe_len * nr_data_stripes(map) - 5754 (offset - raid56_full_stripe_start); 5755 } else { 5756 /* we limit the length of each bio to what fits in a stripe */ 5757 max_len = stripe_len - stripe_offset; 5758 } 5759 *length = min_t(u64, em->len - offset, max_len); 5760 } else { 5761 *length = em->len - offset; 5762 } 5763 5764 /* This is for when we're called from btrfs_merge_bio_hook() and all 5765 it cares about is the length */ 5766 if (!bbio_ret) 5767 goto out; 5768 5769 btrfs_dev_replace_read_lock(dev_replace); 5770 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 5771 if (!dev_replace_is_ongoing) 5772 btrfs_dev_replace_read_unlock(dev_replace); 5773 else 5774 btrfs_dev_replace_set_lock_blocking(dev_replace); 5775 5776 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 5777 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 5778 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 5779 dev_replace->srcdev->devid, 5780 &mirror_num, 5781 &physical_to_patch_in_first_stripe); 5782 if (ret) 5783 goto out; 5784 else 5785 patch_the_first_stripe_for_dev_replace = 1; 5786 } else if (mirror_num > map->num_stripes) { 5787 mirror_num = 0; 5788 } 5789 5790 num_stripes = 1; 5791 stripe_index = 0; 5792 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 5793 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5794 &stripe_index); 5795 if (!need_full_stripe(op)) 5796 mirror_num = 1; 5797 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 5798 if (need_full_stripe(op)) 5799 num_stripes = map->num_stripes; 5800 else if (mirror_num) 5801 stripe_index = mirror_num - 1; 5802 else { 5803 stripe_index = find_live_mirror(fs_info, map, 0, 5804 dev_replace_is_ongoing); 5805 mirror_num = stripe_index + 1; 5806 } 5807 5808 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 5809 if (need_full_stripe(op)) { 5810 num_stripes = map->num_stripes; 5811 } else if (mirror_num) { 5812 stripe_index = mirror_num - 1; 5813 } else { 5814 mirror_num = 1; 5815 } 5816 5817 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 5818 u32 factor = map->num_stripes / map->sub_stripes; 5819 5820 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5821 stripe_index *= map->sub_stripes; 5822 5823 if (need_full_stripe(op)) 5824 num_stripes = map->sub_stripes; 5825 else if (mirror_num) 5826 stripe_index += mirror_num - 1; 5827 else { 5828 int old_stripe_index = stripe_index; 5829 stripe_index = find_live_mirror(fs_info, map, 5830 stripe_index, 5831 dev_replace_is_ongoing); 5832 mirror_num = stripe_index - old_stripe_index + 1; 5833 } 5834 5835 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5836 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 5837 /* push stripe_nr back to the start of the full stripe */ 5838 stripe_nr = div64_u64(raid56_full_stripe_start, 5839 stripe_len * nr_data_stripes(map)); 5840 5841 /* RAID[56] write or recovery. Return all stripes */ 5842 num_stripes = map->num_stripes; 5843 max_errors = nr_parity_stripes(map); 5844 5845 *length = map->stripe_len; 5846 stripe_index = 0; 5847 stripe_offset = 0; 5848 } else { 5849 /* 5850 * Mirror #0 or #1 means the original data block. 5851 * Mirror #2 is RAID5 parity block. 5852 * Mirror #3 is RAID6 Q block. 5853 */ 5854 stripe_nr = div_u64_rem(stripe_nr, 5855 nr_data_stripes(map), &stripe_index); 5856 if (mirror_num > 1) 5857 stripe_index = nr_data_stripes(map) + 5858 mirror_num - 2; 5859 5860 /* We distribute the parity blocks across stripes */ 5861 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 5862 &stripe_index); 5863 if (!need_full_stripe(op) && mirror_num <= 1) 5864 mirror_num = 1; 5865 } 5866 } else { 5867 /* 5868 * after this, stripe_nr is the number of stripes on this 5869 * device we have to walk to find the data, and stripe_index is 5870 * the number of our device in the stripe array 5871 */ 5872 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5873 &stripe_index); 5874 mirror_num = stripe_index + 1; 5875 } 5876 if (stripe_index >= map->num_stripes) { 5877 btrfs_crit(fs_info, 5878 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 5879 stripe_index, map->num_stripes); 5880 ret = -EINVAL; 5881 goto out; 5882 } 5883 5884 num_alloc_stripes = num_stripes; 5885 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 5886 if (op == BTRFS_MAP_WRITE) 5887 num_alloc_stripes <<= 1; 5888 if (op == BTRFS_MAP_GET_READ_MIRRORS) 5889 num_alloc_stripes++; 5890 tgtdev_indexes = num_stripes; 5891 } 5892 5893 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); 5894 if (!bbio) { 5895 ret = -ENOMEM; 5896 goto out; 5897 } 5898 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) 5899 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); 5900 5901 /* build raid_map */ 5902 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 5903 (need_full_stripe(op) || mirror_num > 1)) { 5904 u64 tmp; 5905 unsigned rot; 5906 5907 bbio->raid_map = (u64 *)((void *)bbio->stripes + 5908 sizeof(struct btrfs_bio_stripe) * 5909 num_alloc_stripes + 5910 sizeof(int) * tgtdev_indexes); 5911 5912 /* Work out the disk rotation on this stripe-set */ 5913 div_u64_rem(stripe_nr, num_stripes, &rot); 5914 5915 /* Fill in the logical address of each stripe */ 5916 tmp = stripe_nr * nr_data_stripes(map); 5917 for (i = 0; i < nr_data_stripes(map); i++) 5918 bbio->raid_map[(i+rot) % num_stripes] = 5919 em->start + (tmp + i) * map->stripe_len; 5920 5921 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; 5922 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5923 bbio->raid_map[(i+rot+1) % num_stripes] = 5924 RAID6_Q_STRIPE; 5925 } 5926 5927 5928 for (i = 0; i < num_stripes; i++) { 5929 bbio->stripes[i].physical = 5930 map->stripes[stripe_index].physical + 5931 stripe_offset + 5932 stripe_nr * map->stripe_len; 5933 bbio->stripes[i].dev = 5934 map->stripes[stripe_index].dev; 5935 stripe_index++; 5936 } 5937 5938 if (need_full_stripe(op)) 5939 max_errors = btrfs_chunk_max_errors(map); 5940 5941 if (bbio->raid_map) 5942 sort_parity_stripes(bbio, num_stripes); 5943 5944 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 5945 need_full_stripe(op)) { 5946 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes, 5947 &max_errors); 5948 } 5949 5950 *bbio_ret = bbio; 5951 bbio->map_type = map->type; 5952 bbio->num_stripes = num_stripes; 5953 bbio->max_errors = max_errors; 5954 bbio->mirror_num = mirror_num; 5955 5956 /* 5957 * this is the case that REQ_READ && dev_replace_is_ongoing && 5958 * mirror_num == num_stripes + 1 && dev_replace target drive is 5959 * available as a mirror 5960 */ 5961 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 5962 WARN_ON(num_stripes > 1); 5963 bbio->stripes[0].dev = dev_replace->tgtdev; 5964 bbio->stripes[0].physical = physical_to_patch_in_first_stripe; 5965 bbio->mirror_num = map->num_stripes + 1; 5966 } 5967 out: 5968 if (dev_replace_is_ongoing) { 5969 btrfs_dev_replace_clear_lock_blocking(dev_replace); 5970 btrfs_dev_replace_read_unlock(dev_replace); 5971 } 5972 free_extent_map(em); 5973 return ret; 5974 } 5975 5976 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 5977 u64 logical, u64 *length, 5978 struct btrfs_bio **bbio_ret, int mirror_num) 5979 { 5980 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 5981 mirror_num, 0); 5982 } 5983 5984 /* For Scrub/replace */ 5985 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 5986 u64 logical, u64 *length, 5987 struct btrfs_bio **bbio_ret) 5988 { 5989 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1); 5990 } 5991 5992 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 5993 u64 physical, u64 **logical, int *naddrs, int *stripe_len) 5994 { 5995 struct extent_map *em; 5996 struct map_lookup *map; 5997 u64 *buf; 5998 u64 bytenr; 5999 u64 length; 6000 u64 stripe_nr; 6001 u64 rmap_len; 6002 int i, j, nr = 0; 6003 6004 em = get_chunk_map(fs_info, chunk_start, 1); 6005 if (IS_ERR(em)) 6006 return -EIO; 6007 6008 map = em->map_lookup; 6009 length = em->len; 6010 rmap_len = map->stripe_len; 6011 6012 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 6013 length = div_u64(length, map->num_stripes / map->sub_stripes); 6014 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6015 length = div_u64(length, map->num_stripes); 6016 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6017 length = div_u64(length, nr_data_stripes(map)); 6018 rmap_len = map->stripe_len * nr_data_stripes(map); 6019 } 6020 6021 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 6022 BUG_ON(!buf); /* -ENOMEM */ 6023 6024 for (i = 0; i < map->num_stripes; i++) { 6025 if (map->stripes[i].physical > physical || 6026 map->stripes[i].physical + length <= physical) 6027 continue; 6028 6029 stripe_nr = physical - map->stripes[i].physical; 6030 stripe_nr = div64_u64(stripe_nr, map->stripe_len); 6031 6032 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6033 stripe_nr = stripe_nr * map->num_stripes + i; 6034 stripe_nr = div_u64(stripe_nr, map->sub_stripes); 6035 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6036 stripe_nr = stripe_nr * map->num_stripes + i; 6037 } /* else if RAID[56], multiply by nr_data_stripes(). 6038 * Alternatively, just use rmap_len below instead of 6039 * map->stripe_len */ 6040 6041 bytenr = chunk_start + stripe_nr * rmap_len; 6042 WARN_ON(nr >= map->num_stripes); 6043 for (j = 0; j < nr; j++) { 6044 if (buf[j] == bytenr) 6045 break; 6046 } 6047 if (j == nr) { 6048 WARN_ON(nr >= map->num_stripes); 6049 buf[nr++] = bytenr; 6050 } 6051 } 6052 6053 *logical = buf; 6054 *naddrs = nr; 6055 *stripe_len = rmap_len; 6056 6057 free_extent_map(em); 6058 return 0; 6059 } 6060 6061 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) 6062 { 6063 bio->bi_private = bbio->private; 6064 bio->bi_end_io = bbio->end_io; 6065 bio_endio(bio); 6066 6067 btrfs_put_bbio(bbio); 6068 } 6069 6070 static void btrfs_end_bio(struct bio *bio) 6071 { 6072 struct btrfs_bio *bbio = bio->bi_private; 6073 int is_orig_bio = 0; 6074 6075 if (bio->bi_status) { 6076 atomic_inc(&bbio->error); 6077 if (bio->bi_status == BLK_STS_IOERR || 6078 bio->bi_status == BLK_STS_TARGET) { 6079 unsigned int stripe_index = 6080 btrfs_io_bio(bio)->stripe_index; 6081 struct btrfs_device *dev; 6082 6083 BUG_ON(stripe_index >= bbio->num_stripes); 6084 dev = bbio->stripes[stripe_index].dev; 6085 if (dev->bdev) { 6086 if (bio_op(bio) == REQ_OP_WRITE) 6087 btrfs_dev_stat_inc_and_print(dev, 6088 BTRFS_DEV_STAT_WRITE_ERRS); 6089 else 6090 btrfs_dev_stat_inc_and_print(dev, 6091 BTRFS_DEV_STAT_READ_ERRS); 6092 if (bio->bi_opf & REQ_PREFLUSH) 6093 btrfs_dev_stat_inc_and_print(dev, 6094 BTRFS_DEV_STAT_FLUSH_ERRS); 6095 } 6096 } 6097 } 6098 6099 if (bio == bbio->orig_bio) 6100 is_orig_bio = 1; 6101 6102 btrfs_bio_counter_dec(bbio->fs_info); 6103 6104 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6105 if (!is_orig_bio) { 6106 bio_put(bio); 6107 bio = bbio->orig_bio; 6108 } 6109 6110 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6111 /* only send an error to the higher layers if it is 6112 * beyond the tolerance of the btrfs bio 6113 */ 6114 if (atomic_read(&bbio->error) > bbio->max_errors) { 6115 bio->bi_status = BLK_STS_IOERR; 6116 } else { 6117 /* 6118 * this bio is actually up to date, we didn't 6119 * go over the max number of errors 6120 */ 6121 bio->bi_status = BLK_STS_OK; 6122 } 6123 6124 btrfs_end_bbio(bbio, bio); 6125 } else if (!is_orig_bio) { 6126 bio_put(bio); 6127 } 6128 } 6129 6130 /* 6131 * see run_scheduled_bios for a description of why bios are collected for 6132 * async submit. 6133 * 6134 * This will add one bio to the pending list for a device and make sure 6135 * the work struct is scheduled. 6136 */ 6137 static noinline void btrfs_schedule_bio(struct btrfs_device *device, 6138 struct bio *bio) 6139 { 6140 struct btrfs_fs_info *fs_info = device->fs_info; 6141 int should_queue = 1; 6142 struct btrfs_pending_bios *pending_bios; 6143 6144 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) || 6145 !device->bdev) { 6146 bio_io_error(bio); 6147 return; 6148 } 6149 6150 /* don't bother with additional async steps for reads, right now */ 6151 if (bio_op(bio) == REQ_OP_READ) { 6152 btrfsic_submit_bio(bio); 6153 return; 6154 } 6155 6156 WARN_ON(bio->bi_next); 6157 bio->bi_next = NULL; 6158 6159 spin_lock(&device->io_lock); 6160 if (op_is_sync(bio->bi_opf)) 6161 pending_bios = &device->pending_sync_bios; 6162 else 6163 pending_bios = &device->pending_bios; 6164 6165 if (pending_bios->tail) 6166 pending_bios->tail->bi_next = bio; 6167 6168 pending_bios->tail = bio; 6169 if (!pending_bios->head) 6170 pending_bios->head = bio; 6171 if (device->running_pending) 6172 should_queue = 0; 6173 6174 spin_unlock(&device->io_lock); 6175 6176 if (should_queue) 6177 btrfs_queue_work(fs_info->submit_workers, &device->work); 6178 } 6179 6180 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, 6181 u64 physical, int dev_nr, int async) 6182 { 6183 struct btrfs_device *dev = bbio->stripes[dev_nr].dev; 6184 struct btrfs_fs_info *fs_info = bbio->fs_info; 6185 6186 bio->bi_private = bbio; 6187 btrfs_io_bio(bio)->stripe_index = dev_nr; 6188 bio->bi_end_io = btrfs_end_bio; 6189 bio->bi_iter.bi_sector = physical >> 9; 6190 #ifdef DEBUG 6191 { 6192 struct rcu_string *name; 6193 6194 rcu_read_lock(); 6195 name = rcu_dereference(dev->name); 6196 btrfs_debug(fs_info, 6197 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6198 bio_op(bio), bio->bi_opf, 6199 (u64)bio->bi_iter.bi_sector, 6200 (u_long)dev->bdev->bd_dev, name->str, dev->devid, 6201 bio->bi_iter.bi_size); 6202 rcu_read_unlock(); 6203 } 6204 #endif 6205 bio_set_dev(bio, dev->bdev); 6206 6207 btrfs_bio_counter_inc_noblocked(fs_info); 6208 6209 if (async) 6210 btrfs_schedule_bio(dev, bio); 6211 else 6212 btrfsic_submit_bio(bio); 6213 } 6214 6215 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) 6216 { 6217 atomic_inc(&bbio->error); 6218 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6219 /* Should be the original bio. */ 6220 WARN_ON(bio != bbio->orig_bio); 6221 6222 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6223 bio->bi_iter.bi_sector = logical >> 9; 6224 if (atomic_read(&bbio->error) > bbio->max_errors) 6225 bio->bi_status = BLK_STS_IOERR; 6226 else 6227 bio->bi_status = BLK_STS_OK; 6228 btrfs_end_bbio(bbio, bio); 6229 } 6230 } 6231 6232 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6233 int mirror_num, int async_submit) 6234 { 6235 struct btrfs_device *dev; 6236 struct bio *first_bio = bio; 6237 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 6238 u64 length = 0; 6239 u64 map_length; 6240 int ret; 6241 int dev_nr; 6242 int total_devs; 6243 struct btrfs_bio *bbio = NULL; 6244 6245 length = bio->bi_iter.bi_size; 6246 map_length = length; 6247 6248 btrfs_bio_counter_inc_blocked(fs_info); 6249 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6250 &map_length, &bbio, mirror_num, 1); 6251 if (ret) { 6252 btrfs_bio_counter_dec(fs_info); 6253 return errno_to_blk_status(ret); 6254 } 6255 6256 total_devs = bbio->num_stripes; 6257 bbio->orig_bio = first_bio; 6258 bbio->private = first_bio->bi_private; 6259 bbio->end_io = first_bio->bi_end_io; 6260 bbio->fs_info = fs_info; 6261 atomic_set(&bbio->stripes_pending, bbio->num_stripes); 6262 6263 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6264 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) { 6265 /* In this case, map_length has been set to the length of 6266 a single stripe; not the whole write */ 6267 if (bio_op(bio) == REQ_OP_WRITE) { 6268 ret = raid56_parity_write(fs_info, bio, bbio, 6269 map_length); 6270 } else { 6271 ret = raid56_parity_recover(fs_info, bio, bbio, 6272 map_length, mirror_num, 1); 6273 } 6274 6275 btrfs_bio_counter_dec(fs_info); 6276 return errno_to_blk_status(ret); 6277 } 6278 6279 if (map_length < length) { 6280 btrfs_crit(fs_info, 6281 "mapping failed logical %llu bio len %llu len %llu", 6282 logical, length, map_length); 6283 BUG(); 6284 } 6285 6286 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6287 dev = bbio->stripes[dev_nr].dev; 6288 if (!dev || !dev->bdev || 6289 (bio_op(first_bio) == REQ_OP_WRITE && 6290 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6291 bbio_error(bbio, first_bio, logical); 6292 continue; 6293 } 6294 6295 if (dev_nr < total_devs - 1) 6296 bio = btrfs_bio_clone(first_bio); 6297 else 6298 bio = first_bio; 6299 6300 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, 6301 dev_nr, async_submit); 6302 } 6303 btrfs_bio_counter_dec(fs_info); 6304 return BLK_STS_OK; 6305 } 6306 6307 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, 6308 u8 *uuid, u8 *fsid) 6309 { 6310 struct btrfs_device *device; 6311 struct btrfs_fs_devices *cur_devices; 6312 6313 cur_devices = fs_info->fs_devices; 6314 while (cur_devices) { 6315 if (!fsid || 6316 !memcmp(cur_devices->fsid, fsid, BTRFS_FSID_SIZE)) { 6317 device = find_device(cur_devices, devid, uuid); 6318 if (device) 6319 return device; 6320 } 6321 cur_devices = cur_devices->seed; 6322 } 6323 return NULL; 6324 } 6325 6326 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6327 u64 devid, u8 *dev_uuid) 6328 { 6329 struct btrfs_device *device; 6330 6331 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6332 if (IS_ERR(device)) 6333 return device; 6334 6335 list_add(&device->dev_list, &fs_devices->devices); 6336 device->fs_devices = fs_devices; 6337 fs_devices->num_devices++; 6338 6339 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6340 fs_devices->missing_devices++; 6341 6342 return device; 6343 } 6344 6345 /** 6346 * btrfs_alloc_device - allocate struct btrfs_device 6347 * @fs_info: used only for generating a new devid, can be NULL if 6348 * devid is provided (i.e. @devid != NULL). 6349 * @devid: a pointer to devid for this device. If NULL a new devid 6350 * is generated. 6351 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6352 * is generated. 6353 * 6354 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6355 * on error. Returned struct is not linked onto any lists and must be 6356 * destroyed with btrfs_free_device. 6357 */ 6358 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6359 const u64 *devid, 6360 const u8 *uuid) 6361 { 6362 struct btrfs_device *dev; 6363 u64 tmp; 6364 6365 if (WARN_ON(!devid && !fs_info)) 6366 return ERR_PTR(-EINVAL); 6367 6368 dev = __alloc_device(); 6369 if (IS_ERR(dev)) 6370 return dev; 6371 6372 if (devid) 6373 tmp = *devid; 6374 else { 6375 int ret; 6376 6377 ret = find_next_devid(fs_info, &tmp); 6378 if (ret) { 6379 btrfs_free_device(dev); 6380 return ERR_PTR(ret); 6381 } 6382 } 6383 dev->devid = tmp; 6384 6385 if (uuid) 6386 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6387 else 6388 generate_random_uuid(dev->uuid); 6389 6390 btrfs_init_work(&dev->work, btrfs_submit_helper, 6391 pending_bios_fn, NULL, NULL); 6392 6393 return dev; 6394 } 6395 6396 /* Return -EIO if any error, otherwise return 0. */ 6397 static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info, 6398 struct extent_buffer *leaf, 6399 struct btrfs_chunk *chunk, u64 logical) 6400 { 6401 u64 length; 6402 u64 stripe_len; 6403 u16 num_stripes; 6404 u16 sub_stripes; 6405 u64 type; 6406 6407 length = btrfs_chunk_length(leaf, chunk); 6408 stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6409 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6410 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6411 type = btrfs_chunk_type(leaf, chunk); 6412 6413 if (!num_stripes) { 6414 btrfs_err(fs_info, "invalid chunk num_stripes: %u", 6415 num_stripes); 6416 return -EIO; 6417 } 6418 if (!IS_ALIGNED(logical, fs_info->sectorsize)) { 6419 btrfs_err(fs_info, "invalid chunk logical %llu", logical); 6420 return -EIO; 6421 } 6422 if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) { 6423 btrfs_err(fs_info, "invalid chunk sectorsize %u", 6424 btrfs_chunk_sector_size(leaf, chunk)); 6425 return -EIO; 6426 } 6427 if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) { 6428 btrfs_err(fs_info, "invalid chunk length %llu", length); 6429 return -EIO; 6430 } 6431 if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) { 6432 btrfs_err(fs_info, "invalid chunk stripe length: %llu", 6433 stripe_len); 6434 return -EIO; 6435 } 6436 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6437 type) { 6438 btrfs_err(fs_info, "unrecognized chunk type: %llu", 6439 ~(BTRFS_BLOCK_GROUP_TYPE_MASK | 6440 BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6441 btrfs_chunk_type(leaf, chunk)); 6442 return -EIO; 6443 } 6444 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || 6445 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) || 6446 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || 6447 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || 6448 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) || 6449 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && 6450 num_stripes != 1)) { 6451 btrfs_err(fs_info, 6452 "invalid num_stripes:sub_stripes %u:%u for profile %llu", 6453 num_stripes, sub_stripes, 6454 type & BTRFS_BLOCK_GROUP_PROFILE_MASK); 6455 return -EIO; 6456 } 6457 6458 return 0; 6459 } 6460 6461 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6462 u64 devid, u8 *uuid, bool error) 6463 { 6464 if (error) 6465 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6466 devid, uuid); 6467 else 6468 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6469 devid, uuid); 6470 } 6471 6472 static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 6473 struct extent_buffer *leaf, 6474 struct btrfs_chunk *chunk) 6475 { 6476 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 6477 struct map_lookup *map; 6478 struct extent_map *em; 6479 u64 logical; 6480 u64 length; 6481 u64 devid; 6482 u8 uuid[BTRFS_UUID_SIZE]; 6483 int num_stripes; 6484 int ret; 6485 int i; 6486 6487 logical = key->offset; 6488 length = btrfs_chunk_length(leaf, chunk); 6489 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6490 6491 ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical); 6492 if (ret) 6493 return ret; 6494 6495 read_lock(&map_tree->map_tree.lock); 6496 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 6497 read_unlock(&map_tree->map_tree.lock); 6498 6499 /* already mapped? */ 6500 if (em && em->start <= logical && em->start + em->len > logical) { 6501 free_extent_map(em); 6502 return 0; 6503 } else if (em) { 6504 free_extent_map(em); 6505 } 6506 6507 em = alloc_extent_map(); 6508 if (!em) 6509 return -ENOMEM; 6510 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6511 if (!map) { 6512 free_extent_map(em); 6513 return -ENOMEM; 6514 } 6515 6516 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6517 em->map_lookup = map; 6518 em->start = logical; 6519 em->len = length; 6520 em->orig_start = 0; 6521 em->block_start = 0; 6522 em->block_len = em->len; 6523 6524 map->num_stripes = num_stripes; 6525 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6526 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6527 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6528 map->type = btrfs_chunk_type(leaf, chunk); 6529 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6530 for (i = 0; i < num_stripes; i++) { 6531 map->stripes[i].physical = 6532 btrfs_stripe_offset_nr(leaf, chunk, i); 6533 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6534 read_extent_buffer(leaf, uuid, (unsigned long) 6535 btrfs_stripe_dev_uuid_nr(chunk, i), 6536 BTRFS_UUID_SIZE); 6537 map->stripes[i].dev = btrfs_find_device(fs_info, devid, 6538 uuid, NULL); 6539 if (!map->stripes[i].dev && 6540 !btrfs_test_opt(fs_info, DEGRADED)) { 6541 free_extent_map(em); 6542 btrfs_report_missing_device(fs_info, devid, uuid, true); 6543 return -ENOENT; 6544 } 6545 if (!map->stripes[i].dev) { 6546 map->stripes[i].dev = 6547 add_missing_dev(fs_info->fs_devices, devid, 6548 uuid); 6549 if (IS_ERR(map->stripes[i].dev)) { 6550 free_extent_map(em); 6551 btrfs_err(fs_info, 6552 "failed to init missing dev %llu: %ld", 6553 devid, PTR_ERR(map->stripes[i].dev)); 6554 return PTR_ERR(map->stripes[i].dev); 6555 } 6556 btrfs_report_missing_device(fs_info, devid, uuid, false); 6557 } 6558 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 6559 &(map->stripes[i].dev->dev_state)); 6560 6561 } 6562 6563 write_lock(&map_tree->map_tree.lock); 6564 ret = add_extent_mapping(&map_tree->map_tree, em, 0); 6565 write_unlock(&map_tree->map_tree.lock); 6566 BUG_ON(ret); /* Tree corruption */ 6567 free_extent_map(em); 6568 6569 return 0; 6570 } 6571 6572 static void fill_device_from_item(struct extent_buffer *leaf, 6573 struct btrfs_dev_item *dev_item, 6574 struct btrfs_device *device) 6575 { 6576 unsigned long ptr; 6577 6578 device->devid = btrfs_device_id(leaf, dev_item); 6579 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6580 device->total_bytes = device->disk_total_bytes; 6581 device->commit_total_bytes = device->disk_total_bytes; 6582 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6583 device->commit_bytes_used = device->bytes_used; 6584 device->type = btrfs_device_type(leaf, dev_item); 6585 device->io_align = btrfs_device_io_align(leaf, dev_item); 6586 device->io_width = btrfs_device_io_width(leaf, dev_item); 6587 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6588 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6589 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 6590 6591 ptr = btrfs_device_uuid(dev_item); 6592 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6593 } 6594 6595 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 6596 u8 *fsid) 6597 { 6598 struct btrfs_fs_devices *fs_devices; 6599 int ret; 6600 6601 lockdep_assert_held(&uuid_mutex); 6602 ASSERT(fsid); 6603 6604 fs_devices = fs_info->fs_devices->seed; 6605 while (fs_devices) { 6606 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 6607 return fs_devices; 6608 6609 fs_devices = fs_devices->seed; 6610 } 6611 6612 fs_devices = find_fsid(fsid); 6613 if (!fs_devices) { 6614 if (!btrfs_test_opt(fs_info, DEGRADED)) 6615 return ERR_PTR(-ENOENT); 6616 6617 fs_devices = alloc_fs_devices(fsid); 6618 if (IS_ERR(fs_devices)) 6619 return fs_devices; 6620 6621 fs_devices->seeding = 1; 6622 fs_devices->opened = 1; 6623 return fs_devices; 6624 } 6625 6626 fs_devices = clone_fs_devices(fs_devices); 6627 if (IS_ERR(fs_devices)) 6628 return fs_devices; 6629 6630 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 6631 if (ret) { 6632 free_fs_devices(fs_devices); 6633 fs_devices = ERR_PTR(ret); 6634 goto out; 6635 } 6636 6637 if (!fs_devices->seeding) { 6638 close_fs_devices(fs_devices); 6639 free_fs_devices(fs_devices); 6640 fs_devices = ERR_PTR(-EINVAL); 6641 goto out; 6642 } 6643 6644 fs_devices->seed = fs_info->fs_devices->seed; 6645 fs_info->fs_devices->seed = fs_devices; 6646 out: 6647 return fs_devices; 6648 } 6649 6650 static int read_one_dev(struct btrfs_fs_info *fs_info, 6651 struct extent_buffer *leaf, 6652 struct btrfs_dev_item *dev_item) 6653 { 6654 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6655 struct btrfs_device *device; 6656 u64 devid; 6657 int ret; 6658 u8 fs_uuid[BTRFS_FSID_SIZE]; 6659 u8 dev_uuid[BTRFS_UUID_SIZE]; 6660 6661 devid = btrfs_device_id(leaf, dev_item); 6662 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 6663 BTRFS_UUID_SIZE); 6664 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 6665 BTRFS_FSID_SIZE); 6666 6667 if (memcmp(fs_uuid, fs_info->fsid, BTRFS_FSID_SIZE)) { 6668 fs_devices = open_seed_devices(fs_info, fs_uuid); 6669 if (IS_ERR(fs_devices)) 6670 return PTR_ERR(fs_devices); 6671 } 6672 6673 device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid); 6674 if (!device) { 6675 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6676 btrfs_report_missing_device(fs_info, devid, 6677 dev_uuid, true); 6678 return -ENOENT; 6679 } 6680 6681 device = add_missing_dev(fs_devices, devid, dev_uuid); 6682 if (IS_ERR(device)) { 6683 btrfs_err(fs_info, 6684 "failed to add missing dev %llu: %ld", 6685 devid, PTR_ERR(device)); 6686 return PTR_ERR(device); 6687 } 6688 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 6689 } else { 6690 if (!device->bdev) { 6691 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6692 btrfs_report_missing_device(fs_info, 6693 devid, dev_uuid, true); 6694 return -ENOENT; 6695 } 6696 btrfs_report_missing_device(fs_info, devid, 6697 dev_uuid, false); 6698 } 6699 6700 if (!device->bdev && 6701 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 6702 /* 6703 * this happens when a device that was properly setup 6704 * in the device info lists suddenly goes bad. 6705 * device->bdev is NULL, and so we have to set 6706 * device->missing to one here 6707 */ 6708 device->fs_devices->missing_devices++; 6709 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6710 } 6711 6712 /* Move the device to its own fs_devices */ 6713 if (device->fs_devices != fs_devices) { 6714 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 6715 &device->dev_state)); 6716 6717 list_move(&device->dev_list, &fs_devices->devices); 6718 device->fs_devices->num_devices--; 6719 fs_devices->num_devices++; 6720 6721 device->fs_devices->missing_devices--; 6722 fs_devices->missing_devices++; 6723 6724 device->fs_devices = fs_devices; 6725 } 6726 } 6727 6728 if (device->fs_devices != fs_info->fs_devices) { 6729 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 6730 if (device->generation != 6731 btrfs_device_generation(leaf, dev_item)) 6732 return -EINVAL; 6733 } 6734 6735 fill_device_from_item(leaf, dev_item, device); 6736 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 6737 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 6738 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 6739 device->fs_devices->total_rw_bytes += device->total_bytes; 6740 atomic64_add(device->total_bytes - device->bytes_used, 6741 &fs_info->free_chunk_space); 6742 } 6743 ret = 0; 6744 return ret; 6745 } 6746 6747 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 6748 { 6749 struct btrfs_root *root = fs_info->tree_root; 6750 struct btrfs_super_block *super_copy = fs_info->super_copy; 6751 struct extent_buffer *sb; 6752 struct btrfs_disk_key *disk_key; 6753 struct btrfs_chunk *chunk; 6754 u8 *array_ptr; 6755 unsigned long sb_array_offset; 6756 int ret = 0; 6757 u32 num_stripes; 6758 u32 array_size; 6759 u32 len = 0; 6760 u32 cur_offset; 6761 u64 type; 6762 struct btrfs_key key; 6763 6764 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 6765 /* 6766 * This will create extent buffer of nodesize, superblock size is 6767 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 6768 * overallocate but we can keep it as-is, only the first page is used. 6769 */ 6770 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET); 6771 if (IS_ERR(sb)) 6772 return PTR_ERR(sb); 6773 set_extent_buffer_uptodate(sb); 6774 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6775 /* 6776 * The sb extent buffer is artificial and just used to read the system array. 6777 * set_extent_buffer_uptodate() call does not properly mark all it's 6778 * pages up-to-date when the page is larger: extent does not cover the 6779 * whole page and consequently check_page_uptodate does not find all 6780 * the page's extents up-to-date (the hole beyond sb), 6781 * write_extent_buffer then triggers a WARN_ON. 6782 * 6783 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 6784 * but sb spans only this function. Add an explicit SetPageUptodate call 6785 * to silence the warning eg. on PowerPC 64. 6786 */ 6787 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 6788 SetPageUptodate(sb->pages[0]); 6789 6790 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6791 array_size = btrfs_super_sys_array_size(super_copy); 6792 6793 array_ptr = super_copy->sys_chunk_array; 6794 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 6795 cur_offset = 0; 6796 6797 while (cur_offset < array_size) { 6798 disk_key = (struct btrfs_disk_key *)array_ptr; 6799 len = sizeof(*disk_key); 6800 if (cur_offset + len > array_size) 6801 goto out_short_read; 6802 6803 btrfs_disk_key_to_cpu(&key, disk_key); 6804 6805 array_ptr += len; 6806 sb_array_offset += len; 6807 cur_offset += len; 6808 6809 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 6810 chunk = (struct btrfs_chunk *)sb_array_offset; 6811 /* 6812 * At least one btrfs_chunk with one stripe must be 6813 * present, exact stripe count check comes afterwards 6814 */ 6815 len = btrfs_chunk_item_size(1); 6816 if (cur_offset + len > array_size) 6817 goto out_short_read; 6818 6819 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 6820 if (!num_stripes) { 6821 btrfs_err(fs_info, 6822 "invalid number of stripes %u in sys_array at offset %u", 6823 num_stripes, cur_offset); 6824 ret = -EIO; 6825 break; 6826 } 6827 6828 type = btrfs_chunk_type(sb, chunk); 6829 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 6830 btrfs_err(fs_info, 6831 "invalid chunk type %llu in sys_array at offset %u", 6832 type, cur_offset); 6833 ret = -EIO; 6834 break; 6835 } 6836 6837 len = btrfs_chunk_item_size(num_stripes); 6838 if (cur_offset + len > array_size) 6839 goto out_short_read; 6840 6841 ret = read_one_chunk(fs_info, &key, sb, chunk); 6842 if (ret) 6843 break; 6844 } else { 6845 btrfs_err(fs_info, 6846 "unexpected item type %u in sys_array at offset %u", 6847 (u32)key.type, cur_offset); 6848 ret = -EIO; 6849 break; 6850 } 6851 array_ptr += len; 6852 sb_array_offset += len; 6853 cur_offset += len; 6854 } 6855 clear_extent_buffer_uptodate(sb); 6856 free_extent_buffer_stale(sb); 6857 return ret; 6858 6859 out_short_read: 6860 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 6861 len, cur_offset); 6862 clear_extent_buffer_uptodate(sb); 6863 free_extent_buffer_stale(sb); 6864 return -EIO; 6865 } 6866 6867 /* 6868 * Check if all chunks in the fs are OK for read-write degraded mount 6869 * 6870 * If the @failing_dev is specified, it's accounted as missing. 6871 * 6872 * Return true if all chunks meet the minimal RW mount requirements. 6873 * Return false if any chunk doesn't meet the minimal RW mount requirements. 6874 */ 6875 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 6876 struct btrfs_device *failing_dev) 6877 { 6878 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 6879 struct extent_map *em; 6880 u64 next_start = 0; 6881 bool ret = true; 6882 6883 read_lock(&map_tree->map_tree.lock); 6884 em = lookup_extent_mapping(&map_tree->map_tree, 0, (u64)-1); 6885 read_unlock(&map_tree->map_tree.lock); 6886 /* No chunk at all? Return false anyway */ 6887 if (!em) { 6888 ret = false; 6889 goto out; 6890 } 6891 while (em) { 6892 struct map_lookup *map; 6893 int missing = 0; 6894 int max_tolerated; 6895 int i; 6896 6897 map = em->map_lookup; 6898 max_tolerated = 6899 btrfs_get_num_tolerated_disk_barrier_failures( 6900 map->type); 6901 for (i = 0; i < map->num_stripes; i++) { 6902 struct btrfs_device *dev = map->stripes[i].dev; 6903 6904 if (!dev || !dev->bdev || 6905 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 6906 dev->last_flush_error) 6907 missing++; 6908 else if (failing_dev && failing_dev == dev) 6909 missing++; 6910 } 6911 if (missing > max_tolerated) { 6912 if (!failing_dev) 6913 btrfs_warn(fs_info, 6914 "chunk %llu missing %d devices, max tolerance is %d for writeable mount", 6915 em->start, missing, max_tolerated); 6916 free_extent_map(em); 6917 ret = false; 6918 goto out; 6919 } 6920 next_start = extent_map_end(em); 6921 free_extent_map(em); 6922 6923 read_lock(&map_tree->map_tree.lock); 6924 em = lookup_extent_mapping(&map_tree->map_tree, next_start, 6925 (u64)(-1) - next_start); 6926 read_unlock(&map_tree->map_tree.lock); 6927 } 6928 out: 6929 return ret; 6930 } 6931 6932 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 6933 { 6934 struct btrfs_root *root = fs_info->chunk_root; 6935 struct btrfs_path *path; 6936 struct extent_buffer *leaf; 6937 struct btrfs_key key; 6938 struct btrfs_key found_key; 6939 int ret; 6940 int slot; 6941 u64 total_dev = 0; 6942 6943 path = btrfs_alloc_path(); 6944 if (!path) 6945 return -ENOMEM; 6946 6947 /* 6948 * uuid_mutex is needed only if we are mounting a sprout FS 6949 * otherwise we don't need it. 6950 */ 6951 mutex_lock(&uuid_mutex); 6952 mutex_lock(&fs_info->chunk_mutex); 6953 6954 /* 6955 * Read all device items, and then all the chunk items. All 6956 * device items are found before any chunk item (their object id 6957 * is smaller than the lowest possible object id for a chunk 6958 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 6959 */ 6960 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 6961 key.offset = 0; 6962 key.type = 0; 6963 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6964 if (ret < 0) 6965 goto error; 6966 while (1) { 6967 leaf = path->nodes[0]; 6968 slot = path->slots[0]; 6969 if (slot >= btrfs_header_nritems(leaf)) { 6970 ret = btrfs_next_leaf(root, path); 6971 if (ret == 0) 6972 continue; 6973 if (ret < 0) 6974 goto error; 6975 break; 6976 } 6977 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6978 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 6979 struct btrfs_dev_item *dev_item; 6980 dev_item = btrfs_item_ptr(leaf, slot, 6981 struct btrfs_dev_item); 6982 ret = read_one_dev(fs_info, leaf, dev_item); 6983 if (ret) 6984 goto error; 6985 total_dev++; 6986 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 6987 struct btrfs_chunk *chunk; 6988 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 6989 ret = read_one_chunk(fs_info, &found_key, leaf, chunk); 6990 if (ret) 6991 goto error; 6992 } 6993 path->slots[0]++; 6994 } 6995 6996 /* 6997 * After loading chunk tree, we've got all device information, 6998 * do another round of validation checks. 6999 */ 7000 if (total_dev != fs_info->fs_devices->total_devices) { 7001 btrfs_err(fs_info, 7002 "super_num_devices %llu mismatch with num_devices %llu found here", 7003 btrfs_super_num_devices(fs_info->super_copy), 7004 total_dev); 7005 ret = -EINVAL; 7006 goto error; 7007 } 7008 if (btrfs_super_total_bytes(fs_info->super_copy) < 7009 fs_info->fs_devices->total_rw_bytes) { 7010 btrfs_err(fs_info, 7011 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7012 btrfs_super_total_bytes(fs_info->super_copy), 7013 fs_info->fs_devices->total_rw_bytes); 7014 ret = -EINVAL; 7015 goto error; 7016 } 7017 ret = 0; 7018 error: 7019 mutex_unlock(&fs_info->chunk_mutex); 7020 mutex_unlock(&uuid_mutex); 7021 7022 btrfs_free_path(path); 7023 return ret; 7024 } 7025 7026 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7027 { 7028 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7029 struct btrfs_device *device; 7030 7031 while (fs_devices) { 7032 mutex_lock(&fs_devices->device_list_mutex); 7033 list_for_each_entry(device, &fs_devices->devices, dev_list) 7034 device->fs_info = fs_info; 7035 mutex_unlock(&fs_devices->device_list_mutex); 7036 7037 fs_devices = fs_devices->seed; 7038 } 7039 } 7040 7041 static void __btrfs_reset_dev_stats(struct btrfs_device *dev) 7042 { 7043 int i; 7044 7045 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7046 btrfs_dev_stat_reset(dev, i); 7047 } 7048 7049 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7050 { 7051 struct btrfs_key key; 7052 struct btrfs_key found_key; 7053 struct btrfs_root *dev_root = fs_info->dev_root; 7054 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7055 struct extent_buffer *eb; 7056 int slot; 7057 int ret = 0; 7058 struct btrfs_device *device; 7059 struct btrfs_path *path = NULL; 7060 int i; 7061 7062 path = btrfs_alloc_path(); 7063 if (!path) { 7064 ret = -ENOMEM; 7065 goto out; 7066 } 7067 7068 mutex_lock(&fs_devices->device_list_mutex); 7069 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7070 int item_size; 7071 struct btrfs_dev_stats_item *ptr; 7072 7073 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7074 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7075 key.offset = device->devid; 7076 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 7077 if (ret) { 7078 __btrfs_reset_dev_stats(device); 7079 device->dev_stats_valid = 1; 7080 btrfs_release_path(path); 7081 continue; 7082 } 7083 slot = path->slots[0]; 7084 eb = path->nodes[0]; 7085 btrfs_item_key_to_cpu(eb, &found_key, slot); 7086 item_size = btrfs_item_size_nr(eb, slot); 7087 7088 ptr = btrfs_item_ptr(eb, slot, 7089 struct btrfs_dev_stats_item); 7090 7091 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7092 if (item_size >= (1 + i) * sizeof(__le64)) 7093 btrfs_dev_stat_set(device, i, 7094 btrfs_dev_stats_value(eb, ptr, i)); 7095 else 7096 btrfs_dev_stat_reset(device, i); 7097 } 7098 7099 device->dev_stats_valid = 1; 7100 btrfs_dev_stat_print_on_load(device); 7101 btrfs_release_path(path); 7102 } 7103 mutex_unlock(&fs_devices->device_list_mutex); 7104 7105 out: 7106 btrfs_free_path(path); 7107 return ret < 0 ? ret : 0; 7108 } 7109 7110 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7111 struct btrfs_fs_info *fs_info, 7112 struct btrfs_device *device) 7113 { 7114 struct btrfs_root *dev_root = fs_info->dev_root; 7115 struct btrfs_path *path; 7116 struct btrfs_key key; 7117 struct extent_buffer *eb; 7118 struct btrfs_dev_stats_item *ptr; 7119 int ret; 7120 int i; 7121 7122 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7123 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7124 key.offset = device->devid; 7125 7126 path = btrfs_alloc_path(); 7127 if (!path) 7128 return -ENOMEM; 7129 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7130 if (ret < 0) { 7131 btrfs_warn_in_rcu(fs_info, 7132 "error %d while searching for dev_stats item for device %s", 7133 ret, rcu_str_deref(device->name)); 7134 goto out; 7135 } 7136 7137 if (ret == 0 && 7138 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7139 /* need to delete old one and insert a new one */ 7140 ret = btrfs_del_item(trans, dev_root, path); 7141 if (ret != 0) { 7142 btrfs_warn_in_rcu(fs_info, 7143 "delete too small dev_stats item for device %s failed %d", 7144 rcu_str_deref(device->name), ret); 7145 goto out; 7146 } 7147 ret = 1; 7148 } 7149 7150 if (ret == 1) { 7151 /* need to insert a new item */ 7152 btrfs_release_path(path); 7153 ret = btrfs_insert_empty_item(trans, dev_root, path, 7154 &key, sizeof(*ptr)); 7155 if (ret < 0) { 7156 btrfs_warn_in_rcu(fs_info, 7157 "insert dev_stats item for device %s failed %d", 7158 rcu_str_deref(device->name), ret); 7159 goto out; 7160 } 7161 } 7162 7163 eb = path->nodes[0]; 7164 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7165 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7166 btrfs_set_dev_stats_value(eb, ptr, i, 7167 btrfs_dev_stat_read(device, i)); 7168 btrfs_mark_buffer_dirty(eb); 7169 7170 out: 7171 btrfs_free_path(path); 7172 return ret; 7173 } 7174 7175 /* 7176 * called from commit_transaction. Writes all changed device stats to disk. 7177 */ 7178 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, 7179 struct btrfs_fs_info *fs_info) 7180 { 7181 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7182 struct btrfs_device *device; 7183 int stats_cnt; 7184 int ret = 0; 7185 7186 mutex_lock(&fs_devices->device_list_mutex); 7187 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7188 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7189 if (!device->dev_stats_valid || stats_cnt == 0) 7190 continue; 7191 7192 7193 /* 7194 * There is a LOAD-LOAD control dependency between the value of 7195 * dev_stats_ccnt and updating the on-disk values which requires 7196 * reading the in-memory counters. Such control dependencies 7197 * require explicit read memory barriers. 7198 * 7199 * This memory barriers pairs with smp_mb__before_atomic in 7200 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7201 * barrier implied by atomic_xchg in 7202 * btrfs_dev_stats_read_and_reset 7203 */ 7204 smp_rmb(); 7205 7206 ret = update_dev_stat_item(trans, fs_info, device); 7207 if (!ret) 7208 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7209 } 7210 mutex_unlock(&fs_devices->device_list_mutex); 7211 7212 return ret; 7213 } 7214 7215 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7216 { 7217 btrfs_dev_stat_inc(dev, index); 7218 btrfs_dev_stat_print_on_error(dev); 7219 } 7220 7221 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7222 { 7223 if (!dev->dev_stats_valid) 7224 return; 7225 btrfs_err_rl_in_rcu(dev->fs_info, 7226 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7227 rcu_str_deref(dev->name), 7228 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7229 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7230 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7231 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7232 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7233 } 7234 7235 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7236 { 7237 int i; 7238 7239 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7240 if (btrfs_dev_stat_read(dev, i) != 0) 7241 break; 7242 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7243 return; /* all values == 0, suppress message */ 7244 7245 btrfs_info_in_rcu(dev->fs_info, 7246 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7247 rcu_str_deref(dev->name), 7248 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7249 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7250 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7251 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7252 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7253 } 7254 7255 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7256 struct btrfs_ioctl_get_dev_stats *stats) 7257 { 7258 struct btrfs_device *dev; 7259 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7260 int i; 7261 7262 mutex_lock(&fs_devices->device_list_mutex); 7263 dev = btrfs_find_device(fs_info, stats->devid, NULL, NULL); 7264 mutex_unlock(&fs_devices->device_list_mutex); 7265 7266 if (!dev) { 7267 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7268 return -ENODEV; 7269 } else if (!dev->dev_stats_valid) { 7270 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7271 return -ENODEV; 7272 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7273 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7274 if (stats->nr_items > i) 7275 stats->values[i] = 7276 btrfs_dev_stat_read_and_reset(dev, i); 7277 else 7278 btrfs_dev_stat_reset(dev, i); 7279 } 7280 } else { 7281 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7282 if (stats->nr_items > i) 7283 stats->values[i] = btrfs_dev_stat_read(dev, i); 7284 } 7285 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7286 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7287 return 0; 7288 } 7289 7290 void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path) 7291 { 7292 struct buffer_head *bh; 7293 struct btrfs_super_block *disk_super; 7294 int copy_num; 7295 7296 if (!bdev) 7297 return; 7298 7299 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; 7300 copy_num++) { 7301 7302 if (btrfs_read_dev_one_super(bdev, copy_num, &bh)) 7303 continue; 7304 7305 disk_super = (struct btrfs_super_block *)bh->b_data; 7306 7307 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 7308 set_buffer_dirty(bh); 7309 sync_dirty_buffer(bh); 7310 brelse(bh); 7311 } 7312 7313 /* Notify udev that device has changed */ 7314 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 7315 7316 /* Update ctime/mtime for device path for libblkid */ 7317 update_dev_time(device_path); 7318 } 7319 7320 /* 7321 * Update the size of all devices, which is used for writing out the 7322 * super blocks. 7323 */ 7324 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info) 7325 { 7326 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7327 struct btrfs_device *curr, *next; 7328 7329 if (list_empty(&fs_devices->resized_devices)) 7330 return; 7331 7332 mutex_lock(&fs_devices->device_list_mutex); 7333 mutex_lock(&fs_info->chunk_mutex); 7334 list_for_each_entry_safe(curr, next, &fs_devices->resized_devices, 7335 resized_list) { 7336 list_del_init(&curr->resized_list); 7337 curr->commit_total_bytes = curr->disk_total_bytes; 7338 } 7339 mutex_unlock(&fs_info->chunk_mutex); 7340 mutex_unlock(&fs_devices->device_list_mutex); 7341 } 7342 7343 /* Must be invoked during the transaction commit */ 7344 void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans) 7345 { 7346 struct btrfs_fs_info *fs_info = trans->fs_info; 7347 struct extent_map *em; 7348 struct map_lookup *map; 7349 struct btrfs_device *dev; 7350 int i; 7351 7352 if (list_empty(&trans->pending_chunks)) 7353 return; 7354 7355 /* In order to kick the device replace finish process */ 7356 mutex_lock(&fs_info->chunk_mutex); 7357 list_for_each_entry(em, &trans->pending_chunks, list) { 7358 map = em->map_lookup; 7359 7360 for (i = 0; i < map->num_stripes; i++) { 7361 dev = map->stripes[i].dev; 7362 dev->commit_bytes_used = dev->bytes_used; 7363 } 7364 } 7365 mutex_unlock(&fs_info->chunk_mutex); 7366 } 7367 7368 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info) 7369 { 7370 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7371 while (fs_devices) { 7372 fs_devices->fs_info = fs_info; 7373 fs_devices = fs_devices->seed; 7374 } 7375 } 7376 7377 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info) 7378 { 7379 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7380 while (fs_devices) { 7381 fs_devices->fs_info = NULL; 7382 fs_devices = fs_devices->seed; 7383 } 7384 } 7385