1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/bio.h> 8 #include <linux/slab.h> 9 #include <linux/buffer_head.h> 10 #include <linux/blkdev.h> 11 #include <linux/iocontext.h> 12 #include <linux/capability.h> 13 #include <linux/ratelimit.h> 14 #include <linux/kthread.h> 15 #include <linux/raid/pq.h> 16 #include <linux/semaphore.h> 17 #include <linux/uuid.h> 18 #include <linux/list_sort.h> 19 #include <asm/div64.h> 20 #include "ctree.h" 21 #include "extent_map.h" 22 #include "disk-io.h" 23 #include "transaction.h" 24 #include "print-tree.h" 25 #include "volumes.h" 26 #include "raid56.h" 27 #include "async-thread.h" 28 #include "check-integrity.h" 29 #include "rcu-string.h" 30 #include "math.h" 31 #include "dev-replace.h" 32 #include "sysfs.h" 33 34 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 35 [BTRFS_RAID_RAID10] = { 36 .sub_stripes = 2, 37 .dev_stripes = 1, 38 .devs_max = 0, /* 0 == as many as possible */ 39 .devs_min = 4, 40 .tolerated_failures = 1, 41 .devs_increment = 2, 42 .ncopies = 2, 43 .raid_name = "raid10", 44 .bg_flag = BTRFS_BLOCK_GROUP_RAID10, 45 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 46 }, 47 [BTRFS_RAID_RAID1] = { 48 .sub_stripes = 1, 49 .dev_stripes = 1, 50 .devs_max = 2, 51 .devs_min = 2, 52 .tolerated_failures = 1, 53 .devs_increment = 2, 54 .ncopies = 2, 55 .raid_name = "raid1", 56 .bg_flag = BTRFS_BLOCK_GROUP_RAID1, 57 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 58 }, 59 [BTRFS_RAID_DUP] = { 60 .sub_stripes = 1, 61 .dev_stripes = 2, 62 .devs_max = 1, 63 .devs_min = 1, 64 .tolerated_failures = 0, 65 .devs_increment = 1, 66 .ncopies = 2, 67 .raid_name = "dup", 68 .bg_flag = BTRFS_BLOCK_GROUP_DUP, 69 .mindev_error = 0, 70 }, 71 [BTRFS_RAID_RAID0] = { 72 .sub_stripes = 1, 73 .dev_stripes = 1, 74 .devs_max = 0, 75 .devs_min = 2, 76 .tolerated_failures = 0, 77 .devs_increment = 1, 78 .ncopies = 1, 79 .raid_name = "raid0", 80 .bg_flag = BTRFS_BLOCK_GROUP_RAID0, 81 .mindev_error = 0, 82 }, 83 [BTRFS_RAID_SINGLE] = { 84 .sub_stripes = 1, 85 .dev_stripes = 1, 86 .devs_max = 1, 87 .devs_min = 1, 88 .tolerated_failures = 0, 89 .devs_increment = 1, 90 .ncopies = 1, 91 .raid_name = "single", 92 .bg_flag = 0, 93 .mindev_error = 0, 94 }, 95 [BTRFS_RAID_RAID5] = { 96 .sub_stripes = 1, 97 .dev_stripes = 1, 98 .devs_max = 0, 99 .devs_min = 2, 100 .tolerated_failures = 1, 101 .devs_increment = 1, 102 .ncopies = 2, 103 .raid_name = "raid5", 104 .bg_flag = BTRFS_BLOCK_GROUP_RAID5, 105 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 106 }, 107 [BTRFS_RAID_RAID6] = { 108 .sub_stripes = 1, 109 .dev_stripes = 1, 110 .devs_max = 0, 111 .devs_min = 3, 112 .tolerated_failures = 2, 113 .devs_increment = 1, 114 .ncopies = 3, 115 .raid_name = "raid6", 116 .bg_flag = BTRFS_BLOCK_GROUP_RAID6, 117 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 118 }, 119 }; 120 121 const char *get_raid_name(enum btrfs_raid_types type) 122 { 123 if (type >= BTRFS_NR_RAID_TYPES) 124 return NULL; 125 126 return btrfs_raid_array[type].raid_name; 127 } 128 129 static int init_first_rw_device(struct btrfs_trans_handle *trans, 130 struct btrfs_fs_info *fs_info); 131 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); 132 static void __btrfs_reset_dev_stats(struct btrfs_device *dev); 133 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 134 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 135 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 136 enum btrfs_map_op op, 137 u64 logical, u64 *length, 138 struct btrfs_bio **bbio_ret, 139 int mirror_num, int need_raid_map); 140 141 /* 142 * Device locking 143 * ============== 144 * 145 * There are several mutexes that protect manipulation of devices and low-level 146 * structures like chunks but not block groups, extents or files 147 * 148 * uuid_mutex (global lock) 149 * ------------------------ 150 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from 151 * the SCAN_DEV ioctl registration or from mount either implicitly (the first 152 * device) or requested by the device= mount option 153 * 154 * the mutex can be very coarse and can cover long-running operations 155 * 156 * protects: updates to fs_devices counters like missing devices, rw devices, 157 * seeding, structure cloning, openning/closing devices at mount/umount time 158 * 159 * global::fs_devs - add, remove, updates to the global list 160 * 161 * does not protect: manipulation of the fs_devices::devices list! 162 * 163 * btrfs_device::name - renames (write side), read is RCU 164 * 165 * fs_devices::device_list_mutex (per-fs, with RCU) 166 * ------------------------------------------------ 167 * protects updates to fs_devices::devices, ie. adding and deleting 168 * 169 * simple list traversal with read-only actions can be done with RCU protection 170 * 171 * may be used to exclude some operations from running concurrently without any 172 * modifications to the list (see write_all_supers) 173 * 174 * balance_mutex 175 * ------------- 176 * protects balance structures (status, state) and context accessed from 177 * several places (internally, ioctl) 178 * 179 * chunk_mutex 180 * ----------- 181 * protects chunks, adding or removing during allocation, trim or when a new 182 * device is added/removed 183 * 184 * cleaner_mutex 185 * ------------- 186 * a big lock that is held by the cleaner thread and prevents running subvolume 187 * cleaning together with relocation or delayed iputs 188 * 189 * 190 * Lock nesting 191 * ============ 192 * 193 * uuid_mutex 194 * volume_mutex 195 * device_list_mutex 196 * chunk_mutex 197 * balance_mutex 198 * 199 * 200 * Exclusive operations, BTRFS_FS_EXCL_OP 201 * ====================================== 202 * 203 * Maintains the exclusivity of the following operations that apply to the 204 * whole filesystem and cannot run in parallel. 205 * 206 * - Balance (*) 207 * - Device add 208 * - Device remove 209 * - Device replace (*) 210 * - Resize 211 * 212 * The device operations (as above) can be in one of the following states: 213 * 214 * - Running state 215 * - Paused state 216 * - Completed state 217 * 218 * Only device operations marked with (*) can go into the Paused state for the 219 * following reasons: 220 * 221 * - ioctl (only Balance can be Paused through ioctl) 222 * - filesystem remounted as read-only 223 * - filesystem unmounted and mounted as read-only 224 * - system power-cycle and filesystem mounted as read-only 225 * - filesystem or device errors leading to forced read-only 226 * 227 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations. 228 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set. 229 * A device operation in Paused or Running state can be canceled or resumed 230 * either by ioctl (Balance only) or when remounted as read-write. 231 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or 232 * completed. 233 */ 234 235 DEFINE_MUTEX(uuid_mutex); 236 static LIST_HEAD(fs_uuids); 237 struct list_head *btrfs_get_fs_uuids(void) 238 { 239 return &fs_uuids; 240 } 241 242 /* 243 * alloc_fs_devices - allocate struct btrfs_fs_devices 244 * @fsid: if not NULL, copy the uuid to fs_devices::fsid 245 * 246 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). 247 * The returned struct is not linked onto any lists and can be destroyed with 248 * kfree() right away. 249 */ 250 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) 251 { 252 struct btrfs_fs_devices *fs_devs; 253 254 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 255 if (!fs_devs) 256 return ERR_PTR(-ENOMEM); 257 258 mutex_init(&fs_devs->device_list_mutex); 259 260 INIT_LIST_HEAD(&fs_devs->devices); 261 INIT_LIST_HEAD(&fs_devs->resized_devices); 262 INIT_LIST_HEAD(&fs_devs->alloc_list); 263 INIT_LIST_HEAD(&fs_devs->fs_list); 264 if (fsid) 265 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 266 267 return fs_devs; 268 } 269 270 void btrfs_free_device(struct btrfs_device *device) 271 { 272 rcu_string_free(device->name); 273 bio_put(device->flush_bio); 274 kfree(device); 275 } 276 277 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 278 { 279 struct btrfs_device *device; 280 WARN_ON(fs_devices->opened); 281 while (!list_empty(&fs_devices->devices)) { 282 device = list_entry(fs_devices->devices.next, 283 struct btrfs_device, dev_list); 284 list_del(&device->dev_list); 285 btrfs_free_device(device); 286 } 287 kfree(fs_devices); 288 } 289 290 static void btrfs_kobject_uevent(struct block_device *bdev, 291 enum kobject_action action) 292 { 293 int ret; 294 295 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); 296 if (ret) 297 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", 298 action, 299 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), 300 &disk_to_dev(bdev->bd_disk)->kobj); 301 } 302 303 void __exit btrfs_cleanup_fs_uuids(void) 304 { 305 struct btrfs_fs_devices *fs_devices; 306 307 while (!list_empty(&fs_uuids)) { 308 fs_devices = list_entry(fs_uuids.next, 309 struct btrfs_fs_devices, fs_list); 310 list_del(&fs_devices->fs_list); 311 free_fs_devices(fs_devices); 312 } 313 } 314 315 /* 316 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error. 317 * Returned struct is not linked onto any lists and must be destroyed using 318 * btrfs_free_device. 319 */ 320 static struct btrfs_device *__alloc_device(void) 321 { 322 struct btrfs_device *dev; 323 324 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 325 if (!dev) 326 return ERR_PTR(-ENOMEM); 327 328 /* 329 * Preallocate a bio that's always going to be used for flushing device 330 * barriers and matches the device lifespan 331 */ 332 dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL); 333 if (!dev->flush_bio) { 334 kfree(dev); 335 return ERR_PTR(-ENOMEM); 336 } 337 338 INIT_LIST_HEAD(&dev->dev_list); 339 INIT_LIST_HEAD(&dev->dev_alloc_list); 340 INIT_LIST_HEAD(&dev->resized_list); 341 342 spin_lock_init(&dev->io_lock); 343 344 atomic_set(&dev->reada_in_flight, 0); 345 atomic_set(&dev->dev_stats_ccnt, 0); 346 btrfs_device_data_ordered_init(dev); 347 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 348 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 349 350 return dev; 351 } 352 353 /* 354 * Find a device specified by @devid or @uuid in the list of @fs_devices, or 355 * return NULL. 356 * 357 * If devid and uuid are both specified, the match must be exact, otherwise 358 * only devid is used. 359 */ 360 static struct btrfs_device *find_device(struct btrfs_fs_devices *fs_devices, 361 u64 devid, const u8 *uuid) 362 { 363 struct btrfs_device *dev; 364 365 list_for_each_entry(dev, &fs_devices->devices, dev_list) { 366 if (dev->devid == devid && 367 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 368 return dev; 369 } 370 } 371 return NULL; 372 } 373 374 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 375 { 376 struct btrfs_fs_devices *fs_devices; 377 378 list_for_each_entry(fs_devices, &fs_uuids, fs_list) { 379 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 380 return fs_devices; 381 } 382 return NULL; 383 } 384 385 static int 386 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 387 int flush, struct block_device **bdev, 388 struct buffer_head **bh) 389 { 390 int ret; 391 392 *bdev = blkdev_get_by_path(device_path, flags, holder); 393 394 if (IS_ERR(*bdev)) { 395 ret = PTR_ERR(*bdev); 396 goto error; 397 } 398 399 if (flush) 400 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 401 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); 402 if (ret) { 403 blkdev_put(*bdev, flags); 404 goto error; 405 } 406 invalidate_bdev(*bdev); 407 *bh = btrfs_read_dev_super(*bdev); 408 if (IS_ERR(*bh)) { 409 ret = PTR_ERR(*bh); 410 blkdev_put(*bdev, flags); 411 goto error; 412 } 413 414 return 0; 415 416 error: 417 *bdev = NULL; 418 *bh = NULL; 419 return ret; 420 } 421 422 static void requeue_list(struct btrfs_pending_bios *pending_bios, 423 struct bio *head, struct bio *tail) 424 { 425 426 struct bio *old_head; 427 428 old_head = pending_bios->head; 429 pending_bios->head = head; 430 if (pending_bios->tail) 431 tail->bi_next = old_head; 432 else 433 pending_bios->tail = tail; 434 } 435 436 /* 437 * we try to collect pending bios for a device so we don't get a large 438 * number of procs sending bios down to the same device. This greatly 439 * improves the schedulers ability to collect and merge the bios. 440 * 441 * But, it also turns into a long list of bios to process and that is sure 442 * to eventually make the worker thread block. The solution here is to 443 * make some progress and then put this work struct back at the end of 444 * the list if the block device is congested. This way, multiple devices 445 * can make progress from a single worker thread. 446 */ 447 static noinline void run_scheduled_bios(struct btrfs_device *device) 448 { 449 struct btrfs_fs_info *fs_info = device->fs_info; 450 struct bio *pending; 451 struct backing_dev_info *bdi; 452 struct btrfs_pending_bios *pending_bios; 453 struct bio *tail; 454 struct bio *cur; 455 int again = 0; 456 unsigned long num_run; 457 unsigned long batch_run = 0; 458 unsigned long last_waited = 0; 459 int force_reg = 0; 460 int sync_pending = 0; 461 struct blk_plug plug; 462 463 /* 464 * this function runs all the bios we've collected for 465 * a particular device. We don't want to wander off to 466 * another device without first sending all of these down. 467 * So, setup a plug here and finish it off before we return 468 */ 469 blk_start_plug(&plug); 470 471 bdi = device->bdev->bd_bdi; 472 473 loop: 474 spin_lock(&device->io_lock); 475 476 loop_lock: 477 num_run = 0; 478 479 /* take all the bios off the list at once and process them 480 * later on (without the lock held). But, remember the 481 * tail and other pointers so the bios can be properly reinserted 482 * into the list if we hit congestion 483 */ 484 if (!force_reg && device->pending_sync_bios.head) { 485 pending_bios = &device->pending_sync_bios; 486 force_reg = 1; 487 } else { 488 pending_bios = &device->pending_bios; 489 force_reg = 0; 490 } 491 492 pending = pending_bios->head; 493 tail = pending_bios->tail; 494 WARN_ON(pending && !tail); 495 496 /* 497 * if pending was null this time around, no bios need processing 498 * at all and we can stop. Otherwise it'll loop back up again 499 * and do an additional check so no bios are missed. 500 * 501 * device->running_pending is used to synchronize with the 502 * schedule_bio code. 503 */ 504 if (device->pending_sync_bios.head == NULL && 505 device->pending_bios.head == NULL) { 506 again = 0; 507 device->running_pending = 0; 508 } else { 509 again = 1; 510 device->running_pending = 1; 511 } 512 513 pending_bios->head = NULL; 514 pending_bios->tail = NULL; 515 516 spin_unlock(&device->io_lock); 517 518 while (pending) { 519 520 rmb(); 521 /* we want to work on both lists, but do more bios on the 522 * sync list than the regular list 523 */ 524 if ((num_run > 32 && 525 pending_bios != &device->pending_sync_bios && 526 device->pending_sync_bios.head) || 527 (num_run > 64 && pending_bios == &device->pending_sync_bios && 528 device->pending_bios.head)) { 529 spin_lock(&device->io_lock); 530 requeue_list(pending_bios, pending, tail); 531 goto loop_lock; 532 } 533 534 cur = pending; 535 pending = pending->bi_next; 536 cur->bi_next = NULL; 537 538 BUG_ON(atomic_read(&cur->__bi_cnt) == 0); 539 540 /* 541 * if we're doing the sync list, record that our 542 * plug has some sync requests on it 543 * 544 * If we're doing the regular list and there are 545 * sync requests sitting around, unplug before 546 * we add more 547 */ 548 if (pending_bios == &device->pending_sync_bios) { 549 sync_pending = 1; 550 } else if (sync_pending) { 551 blk_finish_plug(&plug); 552 blk_start_plug(&plug); 553 sync_pending = 0; 554 } 555 556 btrfsic_submit_bio(cur); 557 num_run++; 558 batch_run++; 559 560 cond_resched(); 561 562 /* 563 * we made progress, there is more work to do and the bdi 564 * is now congested. Back off and let other work structs 565 * run instead 566 */ 567 if (pending && bdi_write_congested(bdi) && batch_run > 8 && 568 fs_info->fs_devices->open_devices > 1) { 569 struct io_context *ioc; 570 571 ioc = current->io_context; 572 573 /* 574 * the main goal here is that we don't want to 575 * block if we're going to be able to submit 576 * more requests without blocking. 577 * 578 * This code does two great things, it pokes into 579 * the elevator code from a filesystem _and_ 580 * it makes assumptions about how batching works. 581 */ 582 if (ioc && ioc->nr_batch_requests > 0 && 583 time_before(jiffies, ioc->last_waited + HZ/50UL) && 584 (last_waited == 0 || 585 ioc->last_waited == last_waited)) { 586 /* 587 * we want to go through our batch of 588 * requests and stop. So, we copy out 589 * the ioc->last_waited time and test 590 * against it before looping 591 */ 592 last_waited = ioc->last_waited; 593 cond_resched(); 594 continue; 595 } 596 spin_lock(&device->io_lock); 597 requeue_list(pending_bios, pending, tail); 598 device->running_pending = 1; 599 600 spin_unlock(&device->io_lock); 601 btrfs_queue_work(fs_info->submit_workers, 602 &device->work); 603 goto done; 604 } 605 } 606 607 cond_resched(); 608 if (again) 609 goto loop; 610 611 spin_lock(&device->io_lock); 612 if (device->pending_bios.head || device->pending_sync_bios.head) 613 goto loop_lock; 614 spin_unlock(&device->io_lock); 615 616 done: 617 blk_finish_plug(&plug); 618 } 619 620 static void pending_bios_fn(struct btrfs_work *work) 621 { 622 struct btrfs_device *device; 623 624 device = container_of(work, struct btrfs_device, work); 625 run_scheduled_bios(device); 626 } 627 628 /* 629 * Search and remove all stale (devices which are not mounted) devices. 630 * When both inputs are NULL, it will search and release all stale devices. 631 * path: Optional. When provided will it release all unmounted devices 632 * matching this path only. 633 * skip_dev: Optional. Will skip this device when searching for the stale 634 * devices. 635 */ 636 static void btrfs_free_stale_devices(const char *path, 637 struct btrfs_device *skip_dev) 638 { 639 struct btrfs_fs_devices *fs_devs, *tmp_fs_devs; 640 struct btrfs_device *dev, *tmp_dev; 641 642 list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, fs_list) { 643 644 if (fs_devs->opened) 645 continue; 646 647 list_for_each_entry_safe(dev, tmp_dev, 648 &fs_devs->devices, dev_list) { 649 int not_found = 0; 650 651 if (skip_dev && skip_dev == dev) 652 continue; 653 if (path && !dev->name) 654 continue; 655 656 rcu_read_lock(); 657 if (path) 658 not_found = strcmp(rcu_str_deref(dev->name), 659 path); 660 rcu_read_unlock(); 661 if (not_found) 662 continue; 663 664 /* delete the stale device */ 665 if (fs_devs->num_devices == 1) { 666 btrfs_sysfs_remove_fsid(fs_devs); 667 list_del(&fs_devs->fs_list); 668 free_fs_devices(fs_devs); 669 break; 670 } else { 671 fs_devs->num_devices--; 672 list_del(&dev->dev_list); 673 btrfs_free_device(dev); 674 } 675 } 676 } 677 } 678 679 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, 680 struct btrfs_device *device, fmode_t flags, 681 void *holder) 682 { 683 struct request_queue *q; 684 struct block_device *bdev; 685 struct buffer_head *bh; 686 struct btrfs_super_block *disk_super; 687 u64 devid; 688 int ret; 689 690 if (device->bdev) 691 return -EINVAL; 692 if (!device->name) 693 return -EINVAL; 694 695 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 696 &bdev, &bh); 697 if (ret) 698 return ret; 699 700 disk_super = (struct btrfs_super_block *)bh->b_data; 701 devid = btrfs_stack_device_id(&disk_super->dev_item); 702 if (devid != device->devid) 703 goto error_brelse; 704 705 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) 706 goto error_brelse; 707 708 device->generation = btrfs_super_generation(disk_super); 709 710 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 711 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 712 fs_devices->seeding = 1; 713 } else { 714 if (bdev_read_only(bdev)) 715 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 716 else 717 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 718 } 719 720 q = bdev_get_queue(bdev); 721 if (!blk_queue_nonrot(q)) 722 fs_devices->rotating = 1; 723 724 device->bdev = bdev; 725 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 726 device->mode = flags; 727 728 fs_devices->open_devices++; 729 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 730 device->devid != BTRFS_DEV_REPLACE_DEVID) { 731 fs_devices->rw_devices++; 732 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); 733 } 734 brelse(bh); 735 736 return 0; 737 738 error_brelse: 739 brelse(bh); 740 blkdev_put(bdev, flags); 741 742 return -EINVAL; 743 } 744 745 /* 746 * Add new device to list of registered devices 747 * 748 * Returns: 749 * device pointer which was just added or updated when successful 750 * error pointer when failed 751 */ 752 static noinline struct btrfs_device *device_list_add(const char *path, 753 struct btrfs_super_block *disk_super) 754 { 755 struct btrfs_device *device; 756 struct btrfs_fs_devices *fs_devices; 757 struct rcu_string *name; 758 u64 found_transid = btrfs_super_generation(disk_super); 759 u64 devid = btrfs_stack_device_id(&disk_super->dev_item); 760 761 fs_devices = find_fsid(disk_super->fsid); 762 if (!fs_devices) { 763 fs_devices = alloc_fs_devices(disk_super->fsid); 764 if (IS_ERR(fs_devices)) 765 return ERR_CAST(fs_devices); 766 767 list_add(&fs_devices->fs_list, &fs_uuids); 768 769 device = NULL; 770 } else { 771 device = find_device(fs_devices, devid, 772 disk_super->dev_item.uuid); 773 } 774 775 if (!device) { 776 if (fs_devices->opened) 777 return ERR_PTR(-EBUSY); 778 779 device = btrfs_alloc_device(NULL, &devid, 780 disk_super->dev_item.uuid); 781 if (IS_ERR(device)) { 782 /* we can safely leave the fs_devices entry around */ 783 return device; 784 } 785 786 name = rcu_string_strdup(path, GFP_NOFS); 787 if (!name) { 788 btrfs_free_device(device); 789 return ERR_PTR(-ENOMEM); 790 } 791 rcu_assign_pointer(device->name, name); 792 793 mutex_lock(&fs_devices->device_list_mutex); 794 list_add_rcu(&device->dev_list, &fs_devices->devices); 795 fs_devices->num_devices++; 796 mutex_unlock(&fs_devices->device_list_mutex); 797 798 device->fs_devices = fs_devices; 799 btrfs_free_stale_devices(path, device); 800 801 if (disk_super->label[0]) 802 pr_info("BTRFS: device label %s devid %llu transid %llu %s\n", 803 disk_super->label, devid, found_transid, path); 804 else 805 pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n", 806 disk_super->fsid, devid, found_transid, path); 807 808 } else if (!device->name || strcmp(device->name->str, path)) { 809 /* 810 * When FS is already mounted. 811 * 1. If you are here and if the device->name is NULL that 812 * means this device was missing at time of FS mount. 813 * 2. If you are here and if the device->name is different 814 * from 'path' that means either 815 * a. The same device disappeared and reappeared with 816 * different name. or 817 * b. The missing-disk-which-was-replaced, has 818 * reappeared now. 819 * 820 * We must allow 1 and 2a above. But 2b would be a spurious 821 * and unintentional. 822 * 823 * Further in case of 1 and 2a above, the disk at 'path' 824 * would have missed some transaction when it was away and 825 * in case of 2a the stale bdev has to be updated as well. 826 * 2b must not be allowed at all time. 827 */ 828 829 /* 830 * For now, we do allow update to btrfs_fs_device through the 831 * btrfs dev scan cli after FS has been mounted. We're still 832 * tracking a problem where systems fail mount by subvolume id 833 * when we reject replacement on a mounted FS. 834 */ 835 if (!fs_devices->opened && found_transid < device->generation) { 836 /* 837 * That is if the FS is _not_ mounted and if you 838 * are here, that means there is more than one 839 * disk with same uuid and devid.We keep the one 840 * with larger generation number or the last-in if 841 * generation are equal. 842 */ 843 return ERR_PTR(-EEXIST); 844 } 845 846 name = rcu_string_strdup(path, GFP_NOFS); 847 if (!name) 848 return ERR_PTR(-ENOMEM); 849 rcu_string_free(device->name); 850 rcu_assign_pointer(device->name, name); 851 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 852 fs_devices->missing_devices--; 853 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 854 } 855 } 856 857 /* 858 * Unmount does not free the btrfs_device struct but would zero 859 * generation along with most of the other members. So just update 860 * it back. We need it to pick the disk with largest generation 861 * (as above). 862 */ 863 if (!fs_devices->opened) 864 device->generation = found_transid; 865 866 fs_devices->total_devices = btrfs_super_num_devices(disk_super); 867 868 return device; 869 } 870 871 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 872 { 873 struct btrfs_fs_devices *fs_devices; 874 struct btrfs_device *device; 875 struct btrfs_device *orig_dev; 876 877 fs_devices = alloc_fs_devices(orig->fsid); 878 if (IS_ERR(fs_devices)) 879 return fs_devices; 880 881 mutex_lock(&orig->device_list_mutex); 882 fs_devices->total_devices = orig->total_devices; 883 884 /* We have held the volume lock, it is safe to get the devices. */ 885 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 886 struct rcu_string *name; 887 888 device = btrfs_alloc_device(NULL, &orig_dev->devid, 889 orig_dev->uuid); 890 if (IS_ERR(device)) 891 goto error; 892 893 /* 894 * This is ok to do without rcu read locked because we hold the 895 * uuid mutex so nothing we touch in here is going to disappear. 896 */ 897 if (orig_dev->name) { 898 name = rcu_string_strdup(orig_dev->name->str, 899 GFP_KERNEL); 900 if (!name) { 901 btrfs_free_device(device); 902 goto error; 903 } 904 rcu_assign_pointer(device->name, name); 905 } 906 907 list_add(&device->dev_list, &fs_devices->devices); 908 device->fs_devices = fs_devices; 909 fs_devices->num_devices++; 910 } 911 mutex_unlock(&orig->device_list_mutex); 912 return fs_devices; 913 error: 914 mutex_unlock(&orig->device_list_mutex); 915 free_fs_devices(fs_devices); 916 return ERR_PTR(-ENOMEM); 917 } 918 919 /* 920 * After we have read the system tree and know devids belonging to 921 * this filesystem, remove the device which does not belong there. 922 */ 923 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step) 924 { 925 struct btrfs_device *device, *next; 926 struct btrfs_device *latest_dev = NULL; 927 928 mutex_lock(&uuid_mutex); 929 again: 930 /* This is the initialized path, it is safe to release the devices. */ 931 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 932 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 933 &device->dev_state)) { 934 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 935 &device->dev_state) && 936 (!latest_dev || 937 device->generation > latest_dev->generation)) { 938 latest_dev = device; 939 } 940 continue; 941 } 942 943 if (device->devid == BTRFS_DEV_REPLACE_DEVID) { 944 /* 945 * In the first step, keep the device which has 946 * the correct fsid and the devid that is used 947 * for the dev_replace procedure. 948 * In the second step, the dev_replace state is 949 * read from the device tree and it is known 950 * whether the procedure is really active or 951 * not, which means whether this device is 952 * used or whether it should be removed. 953 */ 954 if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 955 &device->dev_state)) { 956 continue; 957 } 958 } 959 if (device->bdev) { 960 blkdev_put(device->bdev, device->mode); 961 device->bdev = NULL; 962 fs_devices->open_devices--; 963 } 964 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 965 list_del_init(&device->dev_alloc_list); 966 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 967 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 968 &device->dev_state)) 969 fs_devices->rw_devices--; 970 } 971 list_del_init(&device->dev_list); 972 fs_devices->num_devices--; 973 btrfs_free_device(device); 974 } 975 976 if (fs_devices->seed) { 977 fs_devices = fs_devices->seed; 978 goto again; 979 } 980 981 fs_devices->latest_bdev = latest_dev->bdev; 982 983 mutex_unlock(&uuid_mutex); 984 } 985 986 static void free_device_rcu(struct rcu_head *head) 987 { 988 struct btrfs_device *device; 989 990 device = container_of(head, struct btrfs_device, rcu); 991 btrfs_free_device(device); 992 } 993 994 static void btrfs_close_bdev(struct btrfs_device *device) 995 { 996 if (!device->bdev) 997 return; 998 999 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1000 sync_blockdev(device->bdev); 1001 invalidate_bdev(device->bdev); 1002 } 1003 1004 blkdev_put(device->bdev, device->mode); 1005 } 1006 1007 static void btrfs_prepare_close_one_device(struct btrfs_device *device) 1008 { 1009 struct btrfs_fs_devices *fs_devices = device->fs_devices; 1010 struct btrfs_device *new_device; 1011 struct rcu_string *name; 1012 1013 if (device->bdev) 1014 fs_devices->open_devices--; 1015 1016 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1017 device->devid != BTRFS_DEV_REPLACE_DEVID) { 1018 list_del_init(&device->dev_alloc_list); 1019 fs_devices->rw_devices--; 1020 } 1021 1022 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 1023 fs_devices->missing_devices--; 1024 1025 new_device = btrfs_alloc_device(NULL, &device->devid, 1026 device->uuid); 1027 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ 1028 1029 /* Safe because we are under uuid_mutex */ 1030 if (device->name) { 1031 name = rcu_string_strdup(device->name->str, GFP_NOFS); 1032 BUG_ON(!name); /* -ENOMEM */ 1033 rcu_assign_pointer(new_device->name, name); 1034 } 1035 1036 list_replace_rcu(&device->dev_list, &new_device->dev_list); 1037 new_device->fs_devices = device->fs_devices; 1038 } 1039 1040 static int close_fs_devices(struct btrfs_fs_devices *fs_devices) 1041 { 1042 struct btrfs_device *device, *tmp; 1043 struct list_head pending_put; 1044 1045 INIT_LIST_HEAD(&pending_put); 1046 1047 if (--fs_devices->opened > 0) 1048 return 0; 1049 1050 mutex_lock(&fs_devices->device_list_mutex); 1051 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) { 1052 btrfs_prepare_close_one_device(device); 1053 list_add(&device->dev_list, &pending_put); 1054 } 1055 mutex_unlock(&fs_devices->device_list_mutex); 1056 1057 /* 1058 * btrfs_show_devname() is using the device_list_mutex, 1059 * sometimes call to blkdev_put() leads vfs calling 1060 * into this func. So do put outside of device_list_mutex, 1061 * as of now. 1062 */ 1063 while (!list_empty(&pending_put)) { 1064 device = list_first_entry(&pending_put, 1065 struct btrfs_device, dev_list); 1066 list_del(&device->dev_list); 1067 btrfs_close_bdev(device); 1068 call_rcu(&device->rcu, free_device_rcu); 1069 } 1070 1071 WARN_ON(fs_devices->open_devices); 1072 WARN_ON(fs_devices->rw_devices); 1073 fs_devices->opened = 0; 1074 fs_devices->seeding = 0; 1075 1076 return 0; 1077 } 1078 1079 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 1080 { 1081 struct btrfs_fs_devices *seed_devices = NULL; 1082 int ret; 1083 1084 mutex_lock(&uuid_mutex); 1085 ret = close_fs_devices(fs_devices); 1086 if (!fs_devices->opened) { 1087 seed_devices = fs_devices->seed; 1088 fs_devices->seed = NULL; 1089 } 1090 mutex_unlock(&uuid_mutex); 1091 1092 while (seed_devices) { 1093 fs_devices = seed_devices; 1094 seed_devices = fs_devices->seed; 1095 close_fs_devices(fs_devices); 1096 free_fs_devices(fs_devices); 1097 } 1098 return ret; 1099 } 1100 1101 static int open_fs_devices(struct btrfs_fs_devices *fs_devices, 1102 fmode_t flags, void *holder) 1103 { 1104 struct btrfs_device *device; 1105 struct btrfs_device *latest_dev = NULL; 1106 int ret = 0; 1107 1108 flags |= FMODE_EXCL; 1109 1110 list_for_each_entry(device, &fs_devices->devices, dev_list) { 1111 /* Just open everything we can; ignore failures here */ 1112 if (btrfs_open_one_device(fs_devices, device, flags, holder)) 1113 continue; 1114 1115 if (!latest_dev || 1116 device->generation > latest_dev->generation) 1117 latest_dev = device; 1118 } 1119 if (fs_devices->open_devices == 0) { 1120 ret = -EINVAL; 1121 goto out; 1122 } 1123 fs_devices->opened = 1; 1124 fs_devices->latest_bdev = latest_dev->bdev; 1125 fs_devices->total_rw_bytes = 0; 1126 out: 1127 return ret; 1128 } 1129 1130 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b) 1131 { 1132 struct btrfs_device *dev1, *dev2; 1133 1134 dev1 = list_entry(a, struct btrfs_device, dev_list); 1135 dev2 = list_entry(b, struct btrfs_device, dev_list); 1136 1137 if (dev1->devid < dev2->devid) 1138 return -1; 1139 else if (dev1->devid > dev2->devid) 1140 return 1; 1141 return 0; 1142 } 1143 1144 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 1145 fmode_t flags, void *holder) 1146 { 1147 int ret; 1148 1149 mutex_lock(&fs_devices->device_list_mutex); 1150 if (fs_devices->opened) { 1151 fs_devices->opened++; 1152 ret = 0; 1153 } else { 1154 list_sort(NULL, &fs_devices->devices, devid_cmp); 1155 ret = open_fs_devices(fs_devices, flags, holder); 1156 } 1157 mutex_unlock(&fs_devices->device_list_mutex); 1158 1159 return ret; 1160 } 1161 1162 static void btrfs_release_disk_super(struct page *page) 1163 { 1164 kunmap(page); 1165 put_page(page); 1166 } 1167 1168 static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr, 1169 struct page **page, 1170 struct btrfs_super_block **disk_super) 1171 { 1172 void *p; 1173 pgoff_t index; 1174 1175 /* make sure our super fits in the device */ 1176 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1177 return 1; 1178 1179 /* make sure our super fits in the page */ 1180 if (sizeof(**disk_super) > PAGE_SIZE) 1181 return 1; 1182 1183 /* make sure our super doesn't straddle pages on disk */ 1184 index = bytenr >> PAGE_SHIFT; 1185 if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index) 1186 return 1; 1187 1188 /* pull in the page with our super */ 1189 *page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 1190 index, GFP_KERNEL); 1191 1192 if (IS_ERR_OR_NULL(*page)) 1193 return 1; 1194 1195 p = kmap(*page); 1196 1197 /* align our pointer to the offset of the super block */ 1198 *disk_super = p + (bytenr & ~PAGE_MASK); 1199 1200 if (btrfs_super_bytenr(*disk_super) != bytenr || 1201 btrfs_super_magic(*disk_super) != BTRFS_MAGIC) { 1202 btrfs_release_disk_super(*page); 1203 return 1; 1204 } 1205 1206 if ((*disk_super)->label[0] && 1207 (*disk_super)->label[BTRFS_LABEL_SIZE - 1]) 1208 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0'; 1209 1210 return 0; 1211 } 1212 1213 /* 1214 * Look for a btrfs signature on a device. This may be called out of the mount path 1215 * and we are not allowed to call set_blocksize during the scan. The superblock 1216 * is read via pagecache 1217 */ 1218 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 1219 struct btrfs_fs_devices **fs_devices_ret) 1220 { 1221 struct btrfs_super_block *disk_super; 1222 struct btrfs_device *device; 1223 struct block_device *bdev; 1224 struct page *page; 1225 int ret = 0; 1226 u64 bytenr; 1227 1228 /* 1229 * we would like to check all the supers, but that would make 1230 * a btrfs mount succeed after a mkfs from a different FS. 1231 * So, we need to add a special mount option to scan for 1232 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1233 */ 1234 bytenr = btrfs_sb_offset(0); 1235 flags |= FMODE_EXCL; 1236 1237 bdev = blkdev_get_by_path(path, flags, holder); 1238 if (IS_ERR(bdev)) 1239 return PTR_ERR(bdev); 1240 1241 if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) { 1242 ret = -EINVAL; 1243 goto error_bdev_put; 1244 } 1245 1246 mutex_lock(&uuid_mutex); 1247 device = device_list_add(path, disk_super); 1248 if (IS_ERR(device)) 1249 ret = PTR_ERR(device); 1250 else 1251 *fs_devices_ret = device->fs_devices; 1252 mutex_unlock(&uuid_mutex); 1253 1254 btrfs_release_disk_super(page); 1255 1256 error_bdev_put: 1257 blkdev_put(bdev, flags); 1258 1259 return ret; 1260 } 1261 1262 /* helper to account the used device space in the range */ 1263 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 1264 u64 end, u64 *length) 1265 { 1266 struct btrfs_key key; 1267 struct btrfs_root *root = device->fs_info->dev_root; 1268 struct btrfs_dev_extent *dev_extent; 1269 struct btrfs_path *path; 1270 u64 extent_end; 1271 int ret; 1272 int slot; 1273 struct extent_buffer *l; 1274 1275 *length = 0; 1276 1277 if (start >= device->total_bytes || 1278 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 1279 return 0; 1280 1281 path = btrfs_alloc_path(); 1282 if (!path) 1283 return -ENOMEM; 1284 path->reada = READA_FORWARD; 1285 1286 key.objectid = device->devid; 1287 key.offset = start; 1288 key.type = BTRFS_DEV_EXTENT_KEY; 1289 1290 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1291 if (ret < 0) 1292 goto out; 1293 if (ret > 0) { 1294 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1295 if (ret < 0) 1296 goto out; 1297 } 1298 1299 while (1) { 1300 l = path->nodes[0]; 1301 slot = path->slots[0]; 1302 if (slot >= btrfs_header_nritems(l)) { 1303 ret = btrfs_next_leaf(root, path); 1304 if (ret == 0) 1305 continue; 1306 if (ret < 0) 1307 goto out; 1308 1309 break; 1310 } 1311 btrfs_item_key_to_cpu(l, &key, slot); 1312 1313 if (key.objectid < device->devid) 1314 goto next; 1315 1316 if (key.objectid > device->devid) 1317 break; 1318 1319 if (key.type != BTRFS_DEV_EXTENT_KEY) 1320 goto next; 1321 1322 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1323 extent_end = key.offset + btrfs_dev_extent_length(l, 1324 dev_extent); 1325 if (key.offset <= start && extent_end > end) { 1326 *length = end - start + 1; 1327 break; 1328 } else if (key.offset <= start && extent_end > start) 1329 *length += extent_end - start; 1330 else if (key.offset > start && extent_end <= end) 1331 *length += extent_end - key.offset; 1332 else if (key.offset > start && key.offset <= end) { 1333 *length += end - key.offset + 1; 1334 break; 1335 } else if (key.offset > end) 1336 break; 1337 1338 next: 1339 path->slots[0]++; 1340 } 1341 ret = 0; 1342 out: 1343 btrfs_free_path(path); 1344 return ret; 1345 } 1346 1347 static int contains_pending_extent(struct btrfs_transaction *transaction, 1348 struct btrfs_device *device, 1349 u64 *start, u64 len) 1350 { 1351 struct btrfs_fs_info *fs_info = device->fs_info; 1352 struct extent_map *em; 1353 struct list_head *search_list = &fs_info->pinned_chunks; 1354 int ret = 0; 1355 u64 physical_start = *start; 1356 1357 if (transaction) 1358 search_list = &transaction->pending_chunks; 1359 again: 1360 list_for_each_entry(em, search_list, list) { 1361 struct map_lookup *map; 1362 int i; 1363 1364 map = em->map_lookup; 1365 for (i = 0; i < map->num_stripes; i++) { 1366 u64 end; 1367 1368 if (map->stripes[i].dev != device) 1369 continue; 1370 if (map->stripes[i].physical >= physical_start + len || 1371 map->stripes[i].physical + em->orig_block_len <= 1372 physical_start) 1373 continue; 1374 /* 1375 * Make sure that while processing the pinned list we do 1376 * not override our *start with a lower value, because 1377 * we can have pinned chunks that fall within this 1378 * device hole and that have lower physical addresses 1379 * than the pending chunks we processed before. If we 1380 * do not take this special care we can end up getting 1381 * 2 pending chunks that start at the same physical 1382 * device offsets because the end offset of a pinned 1383 * chunk can be equal to the start offset of some 1384 * pending chunk. 1385 */ 1386 end = map->stripes[i].physical + em->orig_block_len; 1387 if (end > *start) { 1388 *start = end; 1389 ret = 1; 1390 } 1391 } 1392 } 1393 if (search_list != &fs_info->pinned_chunks) { 1394 search_list = &fs_info->pinned_chunks; 1395 goto again; 1396 } 1397 1398 return ret; 1399 } 1400 1401 1402 /* 1403 * find_free_dev_extent_start - find free space in the specified device 1404 * @device: the device which we search the free space in 1405 * @num_bytes: the size of the free space that we need 1406 * @search_start: the position from which to begin the search 1407 * @start: store the start of the free space. 1408 * @len: the size of the free space. that we find, or the size 1409 * of the max free space if we don't find suitable free space 1410 * 1411 * this uses a pretty simple search, the expectation is that it is 1412 * called very infrequently and that a given device has a small number 1413 * of extents 1414 * 1415 * @start is used to store the start of the free space if we find. But if we 1416 * don't find suitable free space, it will be used to store the start position 1417 * of the max free space. 1418 * 1419 * @len is used to store the size of the free space that we find. 1420 * But if we don't find suitable free space, it is used to store the size of 1421 * the max free space. 1422 */ 1423 int find_free_dev_extent_start(struct btrfs_transaction *transaction, 1424 struct btrfs_device *device, u64 num_bytes, 1425 u64 search_start, u64 *start, u64 *len) 1426 { 1427 struct btrfs_fs_info *fs_info = device->fs_info; 1428 struct btrfs_root *root = fs_info->dev_root; 1429 struct btrfs_key key; 1430 struct btrfs_dev_extent *dev_extent; 1431 struct btrfs_path *path; 1432 u64 hole_size; 1433 u64 max_hole_start; 1434 u64 max_hole_size; 1435 u64 extent_end; 1436 u64 search_end = device->total_bytes; 1437 int ret; 1438 int slot; 1439 struct extent_buffer *l; 1440 1441 /* 1442 * We don't want to overwrite the superblock on the drive nor any area 1443 * used by the boot loader (grub for example), so we make sure to start 1444 * at an offset of at least 1MB. 1445 */ 1446 search_start = max_t(u64, search_start, SZ_1M); 1447 1448 path = btrfs_alloc_path(); 1449 if (!path) 1450 return -ENOMEM; 1451 1452 max_hole_start = search_start; 1453 max_hole_size = 0; 1454 1455 again: 1456 if (search_start >= search_end || 1457 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1458 ret = -ENOSPC; 1459 goto out; 1460 } 1461 1462 path->reada = READA_FORWARD; 1463 path->search_commit_root = 1; 1464 path->skip_locking = 1; 1465 1466 key.objectid = device->devid; 1467 key.offset = search_start; 1468 key.type = BTRFS_DEV_EXTENT_KEY; 1469 1470 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1471 if (ret < 0) 1472 goto out; 1473 if (ret > 0) { 1474 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1475 if (ret < 0) 1476 goto out; 1477 } 1478 1479 while (1) { 1480 l = path->nodes[0]; 1481 slot = path->slots[0]; 1482 if (slot >= btrfs_header_nritems(l)) { 1483 ret = btrfs_next_leaf(root, path); 1484 if (ret == 0) 1485 continue; 1486 if (ret < 0) 1487 goto out; 1488 1489 break; 1490 } 1491 btrfs_item_key_to_cpu(l, &key, slot); 1492 1493 if (key.objectid < device->devid) 1494 goto next; 1495 1496 if (key.objectid > device->devid) 1497 break; 1498 1499 if (key.type != BTRFS_DEV_EXTENT_KEY) 1500 goto next; 1501 1502 if (key.offset > search_start) { 1503 hole_size = key.offset - search_start; 1504 1505 /* 1506 * Have to check before we set max_hole_start, otherwise 1507 * we could end up sending back this offset anyway. 1508 */ 1509 if (contains_pending_extent(transaction, device, 1510 &search_start, 1511 hole_size)) { 1512 if (key.offset >= search_start) { 1513 hole_size = key.offset - search_start; 1514 } else { 1515 WARN_ON_ONCE(1); 1516 hole_size = 0; 1517 } 1518 } 1519 1520 if (hole_size > max_hole_size) { 1521 max_hole_start = search_start; 1522 max_hole_size = hole_size; 1523 } 1524 1525 /* 1526 * If this free space is greater than which we need, 1527 * it must be the max free space that we have found 1528 * until now, so max_hole_start must point to the start 1529 * of this free space and the length of this free space 1530 * is stored in max_hole_size. Thus, we return 1531 * max_hole_start and max_hole_size and go back to the 1532 * caller. 1533 */ 1534 if (hole_size >= num_bytes) { 1535 ret = 0; 1536 goto out; 1537 } 1538 } 1539 1540 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1541 extent_end = key.offset + btrfs_dev_extent_length(l, 1542 dev_extent); 1543 if (extent_end > search_start) 1544 search_start = extent_end; 1545 next: 1546 path->slots[0]++; 1547 cond_resched(); 1548 } 1549 1550 /* 1551 * At this point, search_start should be the end of 1552 * allocated dev extents, and when shrinking the device, 1553 * search_end may be smaller than search_start. 1554 */ 1555 if (search_end > search_start) { 1556 hole_size = search_end - search_start; 1557 1558 if (contains_pending_extent(transaction, device, &search_start, 1559 hole_size)) { 1560 btrfs_release_path(path); 1561 goto again; 1562 } 1563 1564 if (hole_size > max_hole_size) { 1565 max_hole_start = search_start; 1566 max_hole_size = hole_size; 1567 } 1568 } 1569 1570 /* See above. */ 1571 if (max_hole_size < num_bytes) 1572 ret = -ENOSPC; 1573 else 1574 ret = 0; 1575 1576 out: 1577 btrfs_free_path(path); 1578 *start = max_hole_start; 1579 if (len) 1580 *len = max_hole_size; 1581 return ret; 1582 } 1583 1584 int find_free_dev_extent(struct btrfs_trans_handle *trans, 1585 struct btrfs_device *device, u64 num_bytes, 1586 u64 *start, u64 *len) 1587 { 1588 /* FIXME use last free of some kind */ 1589 return find_free_dev_extent_start(trans->transaction, device, 1590 num_bytes, 0, start, len); 1591 } 1592 1593 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1594 struct btrfs_device *device, 1595 u64 start, u64 *dev_extent_len) 1596 { 1597 struct btrfs_fs_info *fs_info = device->fs_info; 1598 struct btrfs_root *root = fs_info->dev_root; 1599 int ret; 1600 struct btrfs_path *path; 1601 struct btrfs_key key; 1602 struct btrfs_key found_key; 1603 struct extent_buffer *leaf = NULL; 1604 struct btrfs_dev_extent *extent = NULL; 1605 1606 path = btrfs_alloc_path(); 1607 if (!path) 1608 return -ENOMEM; 1609 1610 key.objectid = device->devid; 1611 key.offset = start; 1612 key.type = BTRFS_DEV_EXTENT_KEY; 1613 again: 1614 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1615 if (ret > 0) { 1616 ret = btrfs_previous_item(root, path, key.objectid, 1617 BTRFS_DEV_EXTENT_KEY); 1618 if (ret) 1619 goto out; 1620 leaf = path->nodes[0]; 1621 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1622 extent = btrfs_item_ptr(leaf, path->slots[0], 1623 struct btrfs_dev_extent); 1624 BUG_ON(found_key.offset > start || found_key.offset + 1625 btrfs_dev_extent_length(leaf, extent) < start); 1626 key = found_key; 1627 btrfs_release_path(path); 1628 goto again; 1629 } else if (ret == 0) { 1630 leaf = path->nodes[0]; 1631 extent = btrfs_item_ptr(leaf, path->slots[0], 1632 struct btrfs_dev_extent); 1633 } else { 1634 btrfs_handle_fs_error(fs_info, ret, "Slot search failed"); 1635 goto out; 1636 } 1637 1638 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1639 1640 ret = btrfs_del_item(trans, root, path); 1641 if (ret) { 1642 btrfs_handle_fs_error(fs_info, ret, 1643 "Failed to remove dev extent item"); 1644 } else { 1645 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1646 } 1647 out: 1648 btrfs_free_path(path); 1649 return ret; 1650 } 1651 1652 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1653 struct btrfs_device *device, 1654 u64 chunk_offset, u64 start, u64 num_bytes) 1655 { 1656 int ret; 1657 struct btrfs_path *path; 1658 struct btrfs_fs_info *fs_info = device->fs_info; 1659 struct btrfs_root *root = fs_info->dev_root; 1660 struct btrfs_dev_extent *extent; 1661 struct extent_buffer *leaf; 1662 struct btrfs_key key; 1663 1664 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 1665 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 1666 path = btrfs_alloc_path(); 1667 if (!path) 1668 return -ENOMEM; 1669 1670 key.objectid = device->devid; 1671 key.offset = start; 1672 key.type = BTRFS_DEV_EXTENT_KEY; 1673 ret = btrfs_insert_empty_item(trans, root, path, &key, 1674 sizeof(*extent)); 1675 if (ret) 1676 goto out; 1677 1678 leaf = path->nodes[0]; 1679 extent = btrfs_item_ptr(leaf, path->slots[0], 1680 struct btrfs_dev_extent); 1681 btrfs_set_dev_extent_chunk_tree(leaf, extent, 1682 BTRFS_CHUNK_TREE_OBJECTID); 1683 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 1684 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 1685 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 1686 1687 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1688 btrfs_mark_buffer_dirty(leaf); 1689 out: 1690 btrfs_free_path(path); 1691 return ret; 1692 } 1693 1694 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1695 { 1696 struct extent_map_tree *em_tree; 1697 struct extent_map *em; 1698 struct rb_node *n; 1699 u64 ret = 0; 1700 1701 em_tree = &fs_info->mapping_tree.map_tree; 1702 read_lock(&em_tree->lock); 1703 n = rb_last(&em_tree->map); 1704 if (n) { 1705 em = rb_entry(n, struct extent_map, rb_node); 1706 ret = em->start + em->len; 1707 } 1708 read_unlock(&em_tree->lock); 1709 1710 return ret; 1711 } 1712 1713 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1714 u64 *devid_ret) 1715 { 1716 int ret; 1717 struct btrfs_key key; 1718 struct btrfs_key found_key; 1719 struct btrfs_path *path; 1720 1721 path = btrfs_alloc_path(); 1722 if (!path) 1723 return -ENOMEM; 1724 1725 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1726 key.type = BTRFS_DEV_ITEM_KEY; 1727 key.offset = (u64)-1; 1728 1729 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1730 if (ret < 0) 1731 goto error; 1732 1733 BUG_ON(ret == 0); /* Corruption */ 1734 1735 ret = btrfs_previous_item(fs_info->chunk_root, path, 1736 BTRFS_DEV_ITEMS_OBJECTID, 1737 BTRFS_DEV_ITEM_KEY); 1738 if (ret) { 1739 *devid_ret = 1; 1740 } else { 1741 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1742 path->slots[0]); 1743 *devid_ret = found_key.offset + 1; 1744 } 1745 ret = 0; 1746 error: 1747 btrfs_free_path(path); 1748 return ret; 1749 } 1750 1751 /* 1752 * the device information is stored in the chunk root 1753 * the btrfs_device struct should be fully filled in 1754 */ 1755 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, 1756 struct btrfs_fs_info *fs_info, 1757 struct btrfs_device *device) 1758 { 1759 struct btrfs_root *root = fs_info->chunk_root; 1760 int ret; 1761 struct btrfs_path *path; 1762 struct btrfs_dev_item *dev_item; 1763 struct extent_buffer *leaf; 1764 struct btrfs_key key; 1765 unsigned long ptr; 1766 1767 path = btrfs_alloc_path(); 1768 if (!path) 1769 return -ENOMEM; 1770 1771 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1772 key.type = BTRFS_DEV_ITEM_KEY; 1773 key.offset = device->devid; 1774 1775 ret = btrfs_insert_empty_item(trans, root, path, &key, 1776 sizeof(*dev_item)); 1777 if (ret) 1778 goto out; 1779 1780 leaf = path->nodes[0]; 1781 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1782 1783 btrfs_set_device_id(leaf, dev_item, device->devid); 1784 btrfs_set_device_generation(leaf, dev_item, 0); 1785 btrfs_set_device_type(leaf, dev_item, device->type); 1786 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1787 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1788 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1789 btrfs_set_device_total_bytes(leaf, dev_item, 1790 btrfs_device_get_disk_total_bytes(device)); 1791 btrfs_set_device_bytes_used(leaf, dev_item, 1792 btrfs_device_get_bytes_used(device)); 1793 btrfs_set_device_group(leaf, dev_item, 0); 1794 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1795 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1796 btrfs_set_device_start_offset(leaf, dev_item, 0); 1797 1798 ptr = btrfs_device_uuid(dev_item); 1799 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1800 ptr = btrfs_device_fsid(dev_item); 1801 write_extent_buffer(leaf, fs_info->fsid, ptr, BTRFS_FSID_SIZE); 1802 btrfs_mark_buffer_dirty(leaf); 1803 1804 ret = 0; 1805 out: 1806 btrfs_free_path(path); 1807 return ret; 1808 } 1809 1810 /* 1811 * Function to update ctime/mtime for a given device path. 1812 * Mainly used for ctime/mtime based probe like libblkid. 1813 */ 1814 static void update_dev_time(const char *path_name) 1815 { 1816 struct file *filp; 1817 1818 filp = filp_open(path_name, O_RDWR, 0); 1819 if (IS_ERR(filp)) 1820 return; 1821 file_update_time(filp); 1822 filp_close(filp, NULL); 1823 } 1824 1825 static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info, 1826 struct btrfs_device *device) 1827 { 1828 struct btrfs_root *root = fs_info->chunk_root; 1829 int ret; 1830 struct btrfs_path *path; 1831 struct btrfs_key key; 1832 struct btrfs_trans_handle *trans; 1833 1834 path = btrfs_alloc_path(); 1835 if (!path) 1836 return -ENOMEM; 1837 1838 trans = btrfs_start_transaction(root, 0); 1839 if (IS_ERR(trans)) { 1840 btrfs_free_path(path); 1841 return PTR_ERR(trans); 1842 } 1843 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1844 key.type = BTRFS_DEV_ITEM_KEY; 1845 key.offset = device->devid; 1846 1847 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1848 if (ret) { 1849 if (ret > 0) 1850 ret = -ENOENT; 1851 btrfs_abort_transaction(trans, ret); 1852 btrfs_end_transaction(trans); 1853 goto out; 1854 } 1855 1856 ret = btrfs_del_item(trans, root, path); 1857 if (ret) { 1858 btrfs_abort_transaction(trans, ret); 1859 btrfs_end_transaction(trans); 1860 } 1861 1862 out: 1863 btrfs_free_path(path); 1864 if (!ret) 1865 ret = btrfs_commit_transaction(trans); 1866 return ret; 1867 } 1868 1869 /* 1870 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1871 * filesystem. It's up to the caller to adjust that number regarding eg. device 1872 * replace. 1873 */ 1874 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1875 u64 num_devices) 1876 { 1877 u64 all_avail; 1878 unsigned seq; 1879 int i; 1880 1881 do { 1882 seq = read_seqbegin(&fs_info->profiles_lock); 1883 1884 all_avail = fs_info->avail_data_alloc_bits | 1885 fs_info->avail_system_alloc_bits | 1886 fs_info->avail_metadata_alloc_bits; 1887 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1888 1889 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1890 if (!(all_avail & btrfs_raid_array[i].bg_flag)) 1891 continue; 1892 1893 if (num_devices < btrfs_raid_array[i].devs_min) { 1894 int ret = btrfs_raid_array[i].mindev_error; 1895 1896 if (ret) 1897 return ret; 1898 } 1899 } 1900 1901 return 0; 1902 } 1903 1904 static struct btrfs_device * btrfs_find_next_active_device( 1905 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) 1906 { 1907 struct btrfs_device *next_device; 1908 1909 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1910 if (next_device != device && 1911 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) 1912 && next_device->bdev) 1913 return next_device; 1914 } 1915 1916 return NULL; 1917 } 1918 1919 /* 1920 * Helper function to check if the given device is part of s_bdev / latest_bdev 1921 * and replace it with the provided or the next active device, in the context 1922 * where this function called, there should be always be another device (or 1923 * this_dev) which is active. 1924 */ 1925 void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, 1926 struct btrfs_device *device, struct btrfs_device *this_dev) 1927 { 1928 struct btrfs_device *next_device; 1929 1930 if (this_dev) 1931 next_device = this_dev; 1932 else 1933 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1934 device); 1935 ASSERT(next_device); 1936 1937 if (fs_info->sb->s_bdev && 1938 (fs_info->sb->s_bdev == device->bdev)) 1939 fs_info->sb->s_bdev = next_device->bdev; 1940 1941 if (fs_info->fs_devices->latest_bdev == device->bdev) 1942 fs_info->fs_devices->latest_bdev = next_device->bdev; 1943 } 1944 1945 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, 1946 u64 devid) 1947 { 1948 struct btrfs_device *device; 1949 struct btrfs_fs_devices *cur_devices; 1950 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 1951 u64 num_devices; 1952 int ret = 0; 1953 1954 mutex_lock(&uuid_mutex); 1955 1956 num_devices = fs_devices->num_devices; 1957 btrfs_dev_replace_read_lock(&fs_info->dev_replace); 1958 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 1959 WARN_ON(num_devices < 1); 1960 num_devices--; 1961 } 1962 btrfs_dev_replace_read_unlock(&fs_info->dev_replace); 1963 1964 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); 1965 if (ret) 1966 goto out; 1967 1968 ret = btrfs_find_device_by_devspec(fs_info, devid, device_path, 1969 &device); 1970 if (ret) 1971 goto out; 1972 1973 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 1974 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 1975 goto out; 1976 } 1977 1978 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 1979 fs_info->fs_devices->rw_devices == 1) { 1980 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 1981 goto out; 1982 } 1983 1984 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 1985 mutex_lock(&fs_info->chunk_mutex); 1986 list_del_init(&device->dev_alloc_list); 1987 device->fs_devices->rw_devices--; 1988 mutex_unlock(&fs_info->chunk_mutex); 1989 } 1990 1991 mutex_unlock(&uuid_mutex); 1992 ret = btrfs_shrink_device(device, 0); 1993 mutex_lock(&uuid_mutex); 1994 if (ret) 1995 goto error_undo; 1996 1997 /* 1998 * TODO: the superblock still includes this device in its num_devices 1999 * counter although write_all_supers() is not locked out. This 2000 * could give a filesystem state which requires a degraded mount. 2001 */ 2002 ret = btrfs_rm_dev_item(fs_info, device); 2003 if (ret) 2004 goto error_undo; 2005 2006 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2007 btrfs_scrub_cancel_dev(fs_info, device); 2008 2009 /* 2010 * the device list mutex makes sure that we don't change 2011 * the device list while someone else is writing out all 2012 * the device supers. Whoever is writing all supers, should 2013 * lock the device list mutex before getting the number of 2014 * devices in the super block (super_copy). Conversely, 2015 * whoever updates the number of devices in the super block 2016 * (super_copy) should hold the device list mutex. 2017 */ 2018 2019 /* 2020 * In normal cases the cur_devices == fs_devices. But in case 2021 * of deleting a seed device, the cur_devices should point to 2022 * its own fs_devices listed under the fs_devices->seed. 2023 */ 2024 cur_devices = device->fs_devices; 2025 mutex_lock(&fs_devices->device_list_mutex); 2026 list_del_rcu(&device->dev_list); 2027 2028 cur_devices->num_devices--; 2029 cur_devices->total_devices--; 2030 2031 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 2032 cur_devices->missing_devices--; 2033 2034 btrfs_assign_next_active_device(fs_info, device, NULL); 2035 2036 if (device->bdev) { 2037 cur_devices->open_devices--; 2038 /* remove sysfs entry */ 2039 btrfs_sysfs_rm_device_link(fs_devices, device); 2040 } 2041 2042 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; 2043 btrfs_set_super_num_devices(fs_info->super_copy, num_devices); 2044 mutex_unlock(&fs_devices->device_list_mutex); 2045 2046 /* 2047 * at this point, the device is zero sized and detached from 2048 * the devices list. All that's left is to zero out the old 2049 * supers and free the device. 2050 */ 2051 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2052 btrfs_scratch_superblocks(device->bdev, device->name->str); 2053 2054 btrfs_close_bdev(device); 2055 call_rcu(&device->rcu, free_device_rcu); 2056 2057 if (cur_devices->open_devices == 0) { 2058 while (fs_devices) { 2059 if (fs_devices->seed == cur_devices) { 2060 fs_devices->seed = cur_devices->seed; 2061 break; 2062 } 2063 fs_devices = fs_devices->seed; 2064 } 2065 cur_devices->seed = NULL; 2066 close_fs_devices(cur_devices); 2067 free_fs_devices(cur_devices); 2068 } 2069 2070 out: 2071 mutex_unlock(&uuid_mutex); 2072 return ret; 2073 2074 error_undo: 2075 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 2076 mutex_lock(&fs_info->chunk_mutex); 2077 list_add(&device->dev_alloc_list, 2078 &fs_devices->alloc_list); 2079 device->fs_devices->rw_devices++; 2080 mutex_unlock(&fs_info->chunk_mutex); 2081 } 2082 goto out; 2083 } 2084 2085 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, 2086 struct btrfs_device *srcdev) 2087 { 2088 struct btrfs_fs_devices *fs_devices; 2089 2090 lockdep_assert_held(&fs_info->fs_devices->device_list_mutex); 2091 2092 /* 2093 * in case of fs with no seed, srcdev->fs_devices will point 2094 * to fs_devices of fs_info. However when the dev being replaced is 2095 * a seed dev it will point to the seed's local fs_devices. In short 2096 * srcdev will have its correct fs_devices in both the cases. 2097 */ 2098 fs_devices = srcdev->fs_devices; 2099 2100 list_del_rcu(&srcdev->dev_list); 2101 list_del(&srcdev->dev_alloc_list); 2102 fs_devices->num_devices--; 2103 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) 2104 fs_devices->missing_devices--; 2105 2106 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) 2107 fs_devices->rw_devices--; 2108 2109 if (srcdev->bdev) 2110 fs_devices->open_devices--; 2111 } 2112 2113 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info, 2114 struct btrfs_device *srcdev) 2115 { 2116 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 2117 2118 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) { 2119 /* zero out the old super if it is writable */ 2120 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); 2121 } 2122 2123 btrfs_close_bdev(srcdev); 2124 call_rcu(&srcdev->rcu, free_device_rcu); 2125 2126 /* if this is no devs we rather delete the fs_devices */ 2127 if (!fs_devices->num_devices) { 2128 struct btrfs_fs_devices *tmp_fs_devices; 2129 2130 /* 2131 * On a mounted FS, num_devices can't be zero unless it's a 2132 * seed. In case of a seed device being replaced, the replace 2133 * target added to the sprout FS, so there will be no more 2134 * device left under the seed FS. 2135 */ 2136 ASSERT(fs_devices->seeding); 2137 2138 tmp_fs_devices = fs_info->fs_devices; 2139 while (tmp_fs_devices) { 2140 if (tmp_fs_devices->seed == fs_devices) { 2141 tmp_fs_devices->seed = fs_devices->seed; 2142 break; 2143 } 2144 tmp_fs_devices = tmp_fs_devices->seed; 2145 } 2146 fs_devices->seed = NULL; 2147 close_fs_devices(fs_devices); 2148 free_fs_devices(fs_devices); 2149 } 2150 } 2151 2152 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, 2153 struct btrfs_device *tgtdev) 2154 { 2155 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2156 2157 WARN_ON(!tgtdev); 2158 mutex_lock(&fs_devices->device_list_mutex); 2159 2160 btrfs_sysfs_rm_device_link(fs_devices, tgtdev); 2161 2162 if (tgtdev->bdev) 2163 fs_devices->open_devices--; 2164 2165 fs_devices->num_devices--; 2166 2167 btrfs_assign_next_active_device(fs_info, tgtdev, NULL); 2168 2169 list_del_rcu(&tgtdev->dev_list); 2170 2171 mutex_unlock(&fs_devices->device_list_mutex); 2172 2173 /* 2174 * The update_dev_time() with in btrfs_scratch_superblocks() 2175 * may lead to a call to btrfs_show_devname() which will try 2176 * to hold device_list_mutex. And here this device 2177 * is already out of device list, so we don't have to hold 2178 * the device_list_mutex lock. 2179 */ 2180 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); 2181 2182 btrfs_close_bdev(tgtdev); 2183 call_rcu(&tgtdev->rcu, free_device_rcu); 2184 } 2185 2186 static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info, 2187 const char *device_path, 2188 struct btrfs_device **device) 2189 { 2190 int ret = 0; 2191 struct btrfs_super_block *disk_super; 2192 u64 devid; 2193 u8 *dev_uuid; 2194 struct block_device *bdev; 2195 struct buffer_head *bh; 2196 2197 *device = NULL; 2198 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, 2199 fs_info->bdev_holder, 0, &bdev, &bh); 2200 if (ret) 2201 return ret; 2202 disk_super = (struct btrfs_super_block *)bh->b_data; 2203 devid = btrfs_stack_device_id(&disk_super->dev_item); 2204 dev_uuid = disk_super->dev_item.uuid; 2205 *device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid); 2206 brelse(bh); 2207 if (!*device) 2208 ret = -ENOENT; 2209 blkdev_put(bdev, FMODE_READ); 2210 return ret; 2211 } 2212 2213 int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, 2214 const char *device_path, 2215 struct btrfs_device **device) 2216 { 2217 *device = NULL; 2218 if (strcmp(device_path, "missing") == 0) { 2219 struct list_head *devices; 2220 struct btrfs_device *tmp; 2221 2222 devices = &fs_info->fs_devices->devices; 2223 list_for_each_entry(tmp, devices, dev_list) { 2224 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 2225 &tmp->dev_state) && !tmp->bdev) { 2226 *device = tmp; 2227 break; 2228 } 2229 } 2230 2231 if (!*device) 2232 return BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2233 2234 return 0; 2235 } else { 2236 return btrfs_find_device_by_path(fs_info, device_path, device); 2237 } 2238 } 2239 2240 /* 2241 * Lookup a device given by device id, or the path if the id is 0. 2242 */ 2243 int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid, 2244 const char *devpath, 2245 struct btrfs_device **device) 2246 { 2247 int ret; 2248 2249 if (devid) { 2250 ret = 0; 2251 *device = btrfs_find_device(fs_info, devid, NULL, NULL); 2252 if (!*device) 2253 ret = -ENOENT; 2254 } else { 2255 if (!devpath || !devpath[0]) 2256 return -EINVAL; 2257 2258 ret = btrfs_find_device_missing_or_by_path(fs_info, devpath, 2259 device); 2260 } 2261 return ret; 2262 } 2263 2264 /* 2265 * does all the dirty work required for changing file system's UUID. 2266 */ 2267 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) 2268 { 2269 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2270 struct btrfs_fs_devices *old_devices; 2271 struct btrfs_fs_devices *seed_devices; 2272 struct btrfs_super_block *disk_super = fs_info->super_copy; 2273 struct btrfs_device *device; 2274 u64 super_flags; 2275 2276 lockdep_assert_held(&uuid_mutex); 2277 if (!fs_devices->seeding) 2278 return -EINVAL; 2279 2280 seed_devices = alloc_fs_devices(NULL); 2281 if (IS_ERR(seed_devices)) 2282 return PTR_ERR(seed_devices); 2283 2284 old_devices = clone_fs_devices(fs_devices); 2285 if (IS_ERR(old_devices)) { 2286 kfree(seed_devices); 2287 return PTR_ERR(old_devices); 2288 } 2289 2290 list_add(&old_devices->fs_list, &fs_uuids); 2291 2292 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2293 seed_devices->opened = 1; 2294 INIT_LIST_HEAD(&seed_devices->devices); 2295 INIT_LIST_HEAD(&seed_devices->alloc_list); 2296 mutex_init(&seed_devices->device_list_mutex); 2297 2298 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2299 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2300 synchronize_rcu); 2301 list_for_each_entry(device, &seed_devices->devices, dev_list) 2302 device->fs_devices = seed_devices; 2303 2304 mutex_lock(&fs_info->chunk_mutex); 2305 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 2306 mutex_unlock(&fs_info->chunk_mutex); 2307 2308 fs_devices->seeding = 0; 2309 fs_devices->num_devices = 0; 2310 fs_devices->open_devices = 0; 2311 fs_devices->missing_devices = 0; 2312 fs_devices->rotating = 0; 2313 fs_devices->seed = seed_devices; 2314 2315 generate_random_uuid(fs_devices->fsid); 2316 memcpy(fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2317 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2318 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2319 2320 super_flags = btrfs_super_flags(disk_super) & 2321 ~BTRFS_SUPER_FLAG_SEEDING; 2322 btrfs_set_super_flags(disk_super, super_flags); 2323 2324 return 0; 2325 } 2326 2327 /* 2328 * Store the expected generation for seed devices in device items. 2329 */ 2330 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 2331 struct btrfs_fs_info *fs_info) 2332 { 2333 struct btrfs_root *root = fs_info->chunk_root; 2334 struct btrfs_path *path; 2335 struct extent_buffer *leaf; 2336 struct btrfs_dev_item *dev_item; 2337 struct btrfs_device *device; 2338 struct btrfs_key key; 2339 u8 fs_uuid[BTRFS_FSID_SIZE]; 2340 u8 dev_uuid[BTRFS_UUID_SIZE]; 2341 u64 devid; 2342 int ret; 2343 2344 path = btrfs_alloc_path(); 2345 if (!path) 2346 return -ENOMEM; 2347 2348 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2349 key.offset = 0; 2350 key.type = BTRFS_DEV_ITEM_KEY; 2351 2352 while (1) { 2353 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2354 if (ret < 0) 2355 goto error; 2356 2357 leaf = path->nodes[0]; 2358 next_slot: 2359 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2360 ret = btrfs_next_leaf(root, path); 2361 if (ret > 0) 2362 break; 2363 if (ret < 0) 2364 goto error; 2365 leaf = path->nodes[0]; 2366 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2367 btrfs_release_path(path); 2368 continue; 2369 } 2370 2371 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2372 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2373 key.type != BTRFS_DEV_ITEM_KEY) 2374 break; 2375 2376 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2377 struct btrfs_dev_item); 2378 devid = btrfs_device_id(leaf, dev_item); 2379 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2380 BTRFS_UUID_SIZE); 2381 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2382 BTRFS_FSID_SIZE); 2383 device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid); 2384 BUG_ON(!device); /* Logic error */ 2385 2386 if (device->fs_devices->seeding) { 2387 btrfs_set_device_generation(leaf, dev_item, 2388 device->generation); 2389 btrfs_mark_buffer_dirty(leaf); 2390 } 2391 2392 path->slots[0]++; 2393 goto next_slot; 2394 } 2395 ret = 0; 2396 error: 2397 btrfs_free_path(path); 2398 return ret; 2399 } 2400 2401 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) 2402 { 2403 struct btrfs_root *root = fs_info->dev_root; 2404 struct request_queue *q; 2405 struct btrfs_trans_handle *trans; 2406 struct btrfs_device *device; 2407 struct block_device *bdev; 2408 struct list_head *devices; 2409 struct super_block *sb = fs_info->sb; 2410 struct rcu_string *name; 2411 u64 tmp; 2412 int seeding_dev = 0; 2413 int ret = 0; 2414 bool unlocked = false; 2415 2416 if (sb_rdonly(sb) && !fs_info->fs_devices->seeding) 2417 return -EROFS; 2418 2419 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2420 fs_info->bdev_holder); 2421 if (IS_ERR(bdev)) 2422 return PTR_ERR(bdev); 2423 2424 if (fs_info->fs_devices->seeding) { 2425 seeding_dev = 1; 2426 down_write(&sb->s_umount); 2427 mutex_lock(&uuid_mutex); 2428 } 2429 2430 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2431 2432 devices = &fs_info->fs_devices->devices; 2433 2434 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2435 list_for_each_entry(device, devices, dev_list) { 2436 if (device->bdev == bdev) { 2437 ret = -EEXIST; 2438 mutex_unlock( 2439 &fs_info->fs_devices->device_list_mutex); 2440 goto error; 2441 } 2442 } 2443 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2444 2445 device = btrfs_alloc_device(fs_info, NULL, NULL); 2446 if (IS_ERR(device)) { 2447 /* we can safely leave the fs_devices entry around */ 2448 ret = PTR_ERR(device); 2449 goto error; 2450 } 2451 2452 name = rcu_string_strdup(device_path, GFP_KERNEL); 2453 if (!name) { 2454 ret = -ENOMEM; 2455 goto error_free_device; 2456 } 2457 rcu_assign_pointer(device->name, name); 2458 2459 trans = btrfs_start_transaction(root, 0); 2460 if (IS_ERR(trans)) { 2461 ret = PTR_ERR(trans); 2462 goto error_free_device; 2463 } 2464 2465 q = bdev_get_queue(bdev); 2466 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 2467 device->generation = trans->transid; 2468 device->io_width = fs_info->sectorsize; 2469 device->io_align = fs_info->sectorsize; 2470 device->sector_size = fs_info->sectorsize; 2471 device->total_bytes = round_down(i_size_read(bdev->bd_inode), 2472 fs_info->sectorsize); 2473 device->disk_total_bytes = device->total_bytes; 2474 device->commit_total_bytes = device->total_bytes; 2475 device->fs_info = fs_info; 2476 device->bdev = bdev; 2477 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 2478 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 2479 device->mode = FMODE_EXCL; 2480 device->dev_stats_valid = 1; 2481 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2482 2483 if (seeding_dev) { 2484 sb->s_flags &= ~SB_RDONLY; 2485 ret = btrfs_prepare_sprout(fs_info); 2486 if (ret) { 2487 btrfs_abort_transaction(trans, ret); 2488 goto error_trans; 2489 } 2490 } 2491 2492 device->fs_devices = fs_info->fs_devices; 2493 2494 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2495 mutex_lock(&fs_info->chunk_mutex); 2496 list_add_rcu(&device->dev_list, &fs_info->fs_devices->devices); 2497 list_add(&device->dev_alloc_list, 2498 &fs_info->fs_devices->alloc_list); 2499 fs_info->fs_devices->num_devices++; 2500 fs_info->fs_devices->open_devices++; 2501 fs_info->fs_devices->rw_devices++; 2502 fs_info->fs_devices->total_devices++; 2503 fs_info->fs_devices->total_rw_bytes += device->total_bytes; 2504 2505 atomic64_add(device->total_bytes, &fs_info->free_chunk_space); 2506 2507 if (!blk_queue_nonrot(q)) 2508 fs_info->fs_devices->rotating = 1; 2509 2510 tmp = btrfs_super_total_bytes(fs_info->super_copy); 2511 btrfs_set_super_total_bytes(fs_info->super_copy, 2512 round_down(tmp + device->total_bytes, fs_info->sectorsize)); 2513 2514 tmp = btrfs_super_num_devices(fs_info->super_copy); 2515 btrfs_set_super_num_devices(fs_info->super_copy, tmp + 1); 2516 2517 /* add sysfs device entry */ 2518 btrfs_sysfs_add_device_link(fs_info->fs_devices, device); 2519 2520 /* 2521 * we've got more storage, clear any full flags on the space 2522 * infos 2523 */ 2524 btrfs_clear_space_info_full(fs_info); 2525 2526 mutex_unlock(&fs_info->chunk_mutex); 2527 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2528 2529 if (seeding_dev) { 2530 mutex_lock(&fs_info->chunk_mutex); 2531 ret = init_first_rw_device(trans, fs_info); 2532 mutex_unlock(&fs_info->chunk_mutex); 2533 if (ret) { 2534 btrfs_abort_transaction(trans, ret); 2535 goto error_sysfs; 2536 } 2537 } 2538 2539 ret = btrfs_add_dev_item(trans, fs_info, device); 2540 if (ret) { 2541 btrfs_abort_transaction(trans, ret); 2542 goto error_sysfs; 2543 } 2544 2545 if (seeding_dev) { 2546 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; 2547 2548 ret = btrfs_finish_sprout(trans, fs_info); 2549 if (ret) { 2550 btrfs_abort_transaction(trans, ret); 2551 goto error_sysfs; 2552 } 2553 2554 /* Sprouting would change fsid of the mounted root, 2555 * so rename the fsid on the sysfs 2556 */ 2557 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", 2558 fs_info->fsid); 2559 if (kobject_rename(&fs_info->fs_devices->fsid_kobj, fsid_buf)) 2560 btrfs_warn(fs_info, 2561 "sysfs: failed to create fsid for sprout"); 2562 } 2563 2564 ret = btrfs_commit_transaction(trans); 2565 2566 if (seeding_dev) { 2567 mutex_unlock(&uuid_mutex); 2568 up_write(&sb->s_umount); 2569 unlocked = true; 2570 2571 if (ret) /* transaction commit */ 2572 return ret; 2573 2574 ret = btrfs_relocate_sys_chunks(fs_info); 2575 if (ret < 0) 2576 btrfs_handle_fs_error(fs_info, ret, 2577 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); 2578 trans = btrfs_attach_transaction(root); 2579 if (IS_ERR(trans)) { 2580 if (PTR_ERR(trans) == -ENOENT) 2581 return 0; 2582 ret = PTR_ERR(trans); 2583 trans = NULL; 2584 goto error_sysfs; 2585 } 2586 ret = btrfs_commit_transaction(trans); 2587 } 2588 2589 /* Update ctime/mtime for libblkid */ 2590 update_dev_time(device_path); 2591 return ret; 2592 2593 error_sysfs: 2594 btrfs_sysfs_rm_device_link(fs_info->fs_devices, device); 2595 error_trans: 2596 if (seeding_dev) 2597 sb->s_flags |= SB_RDONLY; 2598 if (trans) 2599 btrfs_end_transaction(trans); 2600 error_free_device: 2601 btrfs_free_device(device); 2602 error: 2603 blkdev_put(bdev, FMODE_EXCL); 2604 if (seeding_dev && !unlocked) { 2605 mutex_unlock(&uuid_mutex); 2606 up_write(&sb->s_umount); 2607 } 2608 return ret; 2609 } 2610 2611 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2612 struct btrfs_device *device) 2613 { 2614 int ret; 2615 struct btrfs_path *path; 2616 struct btrfs_root *root = device->fs_info->chunk_root; 2617 struct btrfs_dev_item *dev_item; 2618 struct extent_buffer *leaf; 2619 struct btrfs_key key; 2620 2621 path = btrfs_alloc_path(); 2622 if (!path) 2623 return -ENOMEM; 2624 2625 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2626 key.type = BTRFS_DEV_ITEM_KEY; 2627 key.offset = device->devid; 2628 2629 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2630 if (ret < 0) 2631 goto out; 2632 2633 if (ret > 0) { 2634 ret = -ENOENT; 2635 goto out; 2636 } 2637 2638 leaf = path->nodes[0]; 2639 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2640 2641 btrfs_set_device_id(leaf, dev_item, device->devid); 2642 btrfs_set_device_type(leaf, dev_item, device->type); 2643 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2644 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2645 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2646 btrfs_set_device_total_bytes(leaf, dev_item, 2647 btrfs_device_get_disk_total_bytes(device)); 2648 btrfs_set_device_bytes_used(leaf, dev_item, 2649 btrfs_device_get_bytes_used(device)); 2650 btrfs_mark_buffer_dirty(leaf); 2651 2652 out: 2653 btrfs_free_path(path); 2654 return ret; 2655 } 2656 2657 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2658 struct btrfs_device *device, u64 new_size) 2659 { 2660 struct btrfs_fs_info *fs_info = device->fs_info; 2661 struct btrfs_super_block *super_copy = fs_info->super_copy; 2662 struct btrfs_fs_devices *fs_devices; 2663 u64 old_total; 2664 u64 diff; 2665 2666 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 2667 return -EACCES; 2668 2669 new_size = round_down(new_size, fs_info->sectorsize); 2670 2671 mutex_lock(&fs_info->chunk_mutex); 2672 old_total = btrfs_super_total_bytes(super_copy); 2673 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); 2674 2675 if (new_size <= device->total_bytes || 2676 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 2677 mutex_unlock(&fs_info->chunk_mutex); 2678 return -EINVAL; 2679 } 2680 2681 fs_devices = fs_info->fs_devices; 2682 2683 btrfs_set_super_total_bytes(super_copy, 2684 round_down(old_total + diff, fs_info->sectorsize)); 2685 device->fs_devices->total_rw_bytes += diff; 2686 2687 btrfs_device_set_total_bytes(device, new_size); 2688 btrfs_device_set_disk_total_bytes(device, new_size); 2689 btrfs_clear_space_info_full(device->fs_info); 2690 if (list_empty(&device->resized_list)) 2691 list_add_tail(&device->resized_list, 2692 &fs_devices->resized_devices); 2693 mutex_unlock(&fs_info->chunk_mutex); 2694 2695 return btrfs_update_device(trans, device); 2696 } 2697 2698 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, 2699 struct btrfs_fs_info *fs_info, u64 chunk_offset) 2700 { 2701 struct btrfs_root *root = fs_info->chunk_root; 2702 int ret; 2703 struct btrfs_path *path; 2704 struct btrfs_key key; 2705 2706 path = btrfs_alloc_path(); 2707 if (!path) 2708 return -ENOMEM; 2709 2710 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2711 key.offset = chunk_offset; 2712 key.type = BTRFS_CHUNK_ITEM_KEY; 2713 2714 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2715 if (ret < 0) 2716 goto out; 2717 else if (ret > 0) { /* Logic error or corruption */ 2718 btrfs_handle_fs_error(fs_info, -ENOENT, 2719 "Failed lookup while freeing chunk."); 2720 ret = -ENOENT; 2721 goto out; 2722 } 2723 2724 ret = btrfs_del_item(trans, root, path); 2725 if (ret < 0) 2726 btrfs_handle_fs_error(fs_info, ret, 2727 "Failed to delete chunk item."); 2728 out: 2729 btrfs_free_path(path); 2730 return ret; 2731 } 2732 2733 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2734 { 2735 struct btrfs_super_block *super_copy = fs_info->super_copy; 2736 struct btrfs_disk_key *disk_key; 2737 struct btrfs_chunk *chunk; 2738 u8 *ptr; 2739 int ret = 0; 2740 u32 num_stripes; 2741 u32 array_size; 2742 u32 len = 0; 2743 u32 cur; 2744 struct btrfs_key key; 2745 2746 mutex_lock(&fs_info->chunk_mutex); 2747 array_size = btrfs_super_sys_array_size(super_copy); 2748 2749 ptr = super_copy->sys_chunk_array; 2750 cur = 0; 2751 2752 while (cur < array_size) { 2753 disk_key = (struct btrfs_disk_key *)ptr; 2754 btrfs_disk_key_to_cpu(&key, disk_key); 2755 2756 len = sizeof(*disk_key); 2757 2758 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2759 chunk = (struct btrfs_chunk *)(ptr + len); 2760 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2761 len += btrfs_chunk_item_size(num_stripes); 2762 } else { 2763 ret = -EIO; 2764 break; 2765 } 2766 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && 2767 key.offset == chunk_offset) { 2768 memmove(ptr, ptr + len, array_size - (cur + len)); 2769 array_size -= len; 2770 btrfs_set_super_sys_array_size(super_copy, array_size); 2771 } else { 2772 ptr += len; 2773 cur += len; 2774 } 2775 } 2776 mutex_unlock(&fs_info->chunk_mutex); 2777 return ret; 2778 } 2779 2780 static struct extent_map *get_chunk_map(struct btrfs_fs_info *fs_info, 2781 u64 logical, u64 length) 2782 { 2783 struct extent_map_tree *em_tree; 2784 struct extent_map *em; 2785 2786 em_tree = &fs_info->mapping_tree.map_tree; 2787 read_lock(&em_tree->lock); 2788 em = lookup_extent_mapping(em_tree, logical, length); 2789 read_unlock(&em_tree->lock); 2790 2791 if (!em) { 2792 btrfs_crit(fs_info, "unable to find logical %llu length %llu", 2793 logical, length); 2794 return ERR_PTR(-EINVAL); 2795 } 2796 2797 if (em->start > logical || em->start + em->len < logical) { 2798 btrfs_crit(fs_info, 2799 "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 2800 logical, length, em->start, em->start + em->len); 2801 free_extent_map(em); 2802 return ERR_PTR(-EINVAL); 2803 } 2804 2805 /* callers are responsible for dropping em's ref. */ 2806 return em; 2807 } 2808 2809 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, 2810 struct btrfs_fs_info *fs_info, u64 chunk_offset) 2811 { 2812 struct extent_map *em; 2813 struct map_lookup *map; 2814 u64 dev_extent_len = 0; 2815 int i, ret = 0; 2816 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2817 2818 em = get_chunk_map(fs_info, chunk_offset, 1); 2819 if (IS_ERR(em)) { 2820 /* 2821 * This is a logic error, but we don't want to just rely on the 2822 * user having built with ASSERT enabled, so if ASSERT doesn't 2823 * do anything we still error out. 2824 */ 2825 ASSERT(0); 2826 return PTR_ERR(em); 2827 } 2828 map = em->map_lookup; 2829 mutex_lock(&fs_info->chunk_mutex); 2830 check_system_chunk(trans, fs_info, map->type); 2831 mutex_unlock(&fs_info->chunk_mutex); 2832 2833 /* 2834 * Take the device list mutex to prevent races with the final phase of 2835 * a device replace operation that replaces the device object associated 2836 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). 2837 */ 2838 mutex_lock(&fs_devices->device_list_mutex); 2839 for (i = 0; i < map->num_stripes; i++) { 2840 struct btrfs_device *device = map->stripes[i].dev; 2841 ret = btrfs_free_dev_extent(trans, device, 2842 map->stripes[i].physical, 2843 &dev_extent_len); 2844 if (ret) { 2845 mutex_unlock(&fs_devices->device_list_mutex); 2846 btrfs_abort_transaction(trans, ret); 2847 goto out; 2848 } 2849 2850 if (device->bytes_used > 0) { 2851 mutex_lock(&fs_info->chunk_mutex); 2852 btrfs_device_set_bytes_used(device, 2853 device->bytes_used - dev_extent_len); 2854 atomic64_add(dev_extent_len, &fs_info->free_chunk_space); 2855 btrfs_clear_space_info_full(fs_info); 2856 mutex_unlock(&fs_info->chunk_mutex); 2857 } 2858 2859 if (map->stripes[i].dev) { 2860 ret = btrfs_update_device(trans, map->stripes[i].dev); 2861 if (ret) { 2862 mutex_unlock(&fs_devices->device_list_mutex); 2863 btrfs_abort_transaction(trans, ret); 2864 goto out; 2865 } 2866 } 2867 } 2868 mutex_unlock(&fs_devices->device_list_mutex); 2869 2870 ret = btrfs_free_chunk(trans, fs_info, chunk_offset); 2871 if (ret) { 2872 btrfs_abort_transaction(trans, ret); 2873 goto out; 2874 } 2875 2876 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); 2877 2878 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 2879 ret = btrfs_del_sys_chunk(fs_info, chunk_offset); 2880 if (ret) { 2881 btrfs_abort_transaction(trans, ret); 2882 goto out; 2883 } 2884 } 2885 2886 ret = btrfs_remove_block_group(trans, fs_info, chunk_offset, em); 2887 if (ret) { 2888 btrfs_abort_transaction(trans, ret); 2889 goto out; 2890 } 2891 2892 out: 2893 /* once for us */ 2894 free_extent_map(em); 2895 return ret; 2896 } 2897 2898 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) 2899 { 2900 struct btrfs_root *root = fs_info->chunk_root; 2901 struct btrfs_trans_handle *trans; 2902 int ret; 2903 2904 /* 2905 * Prevent races with automatic removal of unused block groups. 2906 * After we relocate and before we remove the chunk with offset 2907 * chunk_offset, automatic removal of the block group can kick in, 2908 * resulting in a failure when calling btrfs_remove_chunk() below. 2909 * 2910 * Make sure to acquire this mutex before doing a tree search (dev 2911 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 2912 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 2913 * we release the path used to search the chunk/dev tree and before 2914 * the current task acquires this mutex and calls us. 2915 */ 2916 lockdep_assert_held(&fs_info->delete_unused_bgs_mutex); 2917 2918 ret = btrfs_can_relocate(fs_info, chunk_offset); 2919 if (ret) 2920 return -ENOSPC; 2921 2922 /* step one, relocate all the extents inside this chunk */ 2923 btrfs_scrub_pause(fs_info); 2924 ret = btrfs_relocate_block_group(fs_info, chunk_offset); 2925 btrfs_scrub_continue(fs_info); 2926 if (ret) 2927 return ret; 2928 2929 /* 2930 * We add the kobjects here (and after forcing data chunk creation) 2931 * since relocation is the only place we'll create chunks of a new 2932 * type at runtime. The only place where we'll remove the last 2933 * chunk of a type is the call immediately below this one. Even 2934 * so, we're protected against races with the cleaner thread since 2935 * we're covered by the delete_unused_bgs_mutex. 2936 */ 2937 btrfs_add_raid_kobjects(fs_info); 2938 2939 trans = btrfs_start_trans_remove_block_group(root->fs_info, 2940 chunk_offset); 2941 if (IS_ERR(trans)) { 2942 ret = PTR_ERR(trans); 2943 btrfs_handle_fs_error(root->fs_info, ret, NULL); 2944 return ret; 2945 } 2946 2947 /* 2948 * step two, delete the device extents and the 2949 * chunk tree entries 2950 */ 2951 ret = btrfs_remove_chunk(trans, fs_info, chunk_offset); 2952 btrfs_end_transaction(trans); 2953 return ret; 2954 } 2955 2956 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) 2957 { 2958 struct btrfs_root *chunk_root = fs_info->chunk_root; 2959 struct btrfs_path *path; 2960 struct extent_buffer *leaf; 2961 struct btrfs_chunk *chunk; 2962 struct btrfs_key key; 2963 struct btrfs_key found_key; 2964 u64 chunk_type; 2965 bool retried = false; 2966 int failed = 0; 2967 int ret; 2968 2969 path = btrfs_alloc_path(); 2970 if (!path) 2971 return -ENOMEM; 2972 2973 again: 2974 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2975 key.offset = (u64)-1; 2976 key.type = BTRFS_CHUNK_ITEM_KEY; 2977 2978 while (1) { 2979 mutex_lock(&fs_info->delete_unused_bgs_mutex); 2980 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2981 if (ret < 0) { 2982 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 2983 goto error; 2984 } 2985 BUG_ON(ret == 0); /* Corruption */ 2986 2987 ret = btrfs_previous_item(chunk_root, path, key.objectid, 2988 key.type); 2989 if (ret) 2990 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 2991 if (ret < 0) 2992 goto error; 2993 if (ret > 0) 2994 break; 2995 2996 leaf = path->nodes[0]; 2997 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2998 2999 chunk = btrfs_item_ptr(leaf, path->slots[0], 3000 struct btrfs_chunk); 3001 chunk_type = btrfs_chunk_type(leaf, chunk); 3002 btrfs_release_path(path); 3003 3004 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 3005 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3006 if (ret == -ENOSPC) 3007 failed++; 3008 else 3009 BUG_ON(ret); 3010 } 3011 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3012 3013 if (found_key.offset == 0) 3014 break; 3015 key.offset = found_key.offset - 1; 3016 } 3017 ret = 0; 3018 if (failed && !retried) { 3019 failed = 0; 3020 retried = true; 3021 goto again; 3022 } else if (WARN_ON(failed && retried)) { 3023 ret = -ENOSPC; 3024 } 3025 error: 3026 btrfs_free_path(path); 3027 return ret; 3028 } 3029 3030 /* 3031 * return 1 : allocate a data chunk successfully, 3032 * return <0: errors during allocating a data chunk, 3033 * return 0 : no need to allocate a data chunk. 3034 */ 3035 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, 3036 u64 chunk_offset) 3037 { 3038 struct btrfs_block_group_cache *cache; 3039 u64 bytes_used; 3040 u64 chunk_type; 3041 3042 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3043 ASSERT(cache); 3044 chunk_type = cache->flags; 3045 btrfs_put_block_group(cache); 3046 3047 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) { 3048 spin_lock(&fs_info->data_sinfo->lock); 3049 bytes_used = fs_info->data_sinfo->bytes_used; 3050 spin_unlock(&fs_info->data_sinfo->lock); 3051 3052 if (!bytes_used) { 3053 struct btrfs_trans_handle *trans; 3054 int ret; 3055 3056 trans = btrfs_join_transaction(fs_info->tree_root); 3057 if (IS_ERR(trans)) 3058 return PTR_ERR(trans); 3059 3060 ret = btrfs_force_chunk_alloc(trans, fs_info, 3061 BTRFS_BLOCK_GROUP_DATA); 3062 btrfs_end_transaction(trans); 3063 if (ret < 0) 3064 return ret; 3065 3066 btrfs_add_raid_kobjects(fs_info); 3067 3068 return 1; 3069 } 3070 } 3071 return 0; 3072 } 3073 3074 static int insert_balance_item(struct btrfs_fs_info *fs_info, 3075 struct btrfs_balance_control *bctl) 3076 { 3077 struct btrfs_root *root = fs_info->tree_root; 3078 struct btrfs_trans_handle *trans; 3079 struct btrfs_balance_item *item; 3080 struct btrfs_disk_balance_args disk_bargs; 3081 struct btrfs_path *path; 3082 struct extent_buffer *leaf; 3083 struct btrfs_key key; 3084 int ret, err; 3085 3086 path = btrfs_alloc_path(); 3087 if (!path) 3088 return -ENOMEM; 3089 3090 trans = btrfs_start_transaction(root, 0); 3091 if (IS_ERR(trans)) { 3092 btrfs_free_path(path); 3093 return PTR_ERR(trans); 3094 } 3095 3096 key.objectid = BTRFS_BALANCE_OBJECTID; 3097 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3098 key.offset = 0; 3099 3100 ret = btrfs_insert_empty_item(trans, root, path, &key, 3101 sizeof(*item)); 3102 if (ret) 3103 goto out; 3104 3105 leaf = path->nodes[0]; 3106 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3107 3108 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3109 3110 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3111 btrfs_set_balance_data(leaf, item, &disk_bargs); 3112 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3113 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3114 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3115 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3116 3117 btrfs_set_balance_flags(leaf, item, bctl->flags); 3118 3119 btrfs_mark_buffer_dirty(leaf); 3120 out: 3121 btrfs_free_path(path); 3122 err = btrfs_commit_transaction(trans); 3123 if (err && !ret) 3124 ret = err; 3125 return ret; 3126 } 3127 3128 static int del_balance_item(struct btrfs_fs_info *fs_info) 3129 { 3130 struct btrfs_root *root = fs_info->tree_root; 3131 struct btrfs_trans_handle *trans; 3132 struct btrfs_path *path; 3133 struct btrfs_key key; 3134 int ret, err; 3135 3136 path = btrfs_alloc_path(); 3137 if (!path) 3138 return -ENOMEM; 3139 3140 trans = btrfs_start_transaction(root, 0); 3141 if (IS_ERR(trans)) { 3142 btrfs_free_path(path); 3143 return PTR_ERR(trans); 3144 } 3145 3146 key.objectid = BTRFS_BALANCE_OBJECTID; 3147 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3148 key.offset = 0; 3149 3150 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3151 if (ret < 0) 3152 goto out; 3153 if (ret > 0) { 3154 ret = -ENOENT; 3155 goto out; 3156 } 3157 3158 ret = btrfs_del_item(trans, root, path); 3159 out: 3160 btrfs_free_path(path); 3161 err = btrfs_commit_transaction(trans); 3162 if (err && !ret) 3163 ret = err; 3164 return ret; 3165 } 3166 3167 /* 3168 * This is a heuristic used to reduce the number of chunks balanced on 3169 * resume after balance was interrupted. 3170 */ 3171 static void update_balance_args(struct btrfs_balance_control *bctl) 3172 { 3173 /* 3174 * Turn on soft mode for chunk types that were being converted. 3175 */ 3176 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3177 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3178 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3179 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3180 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3181 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3182 3183 /* 3184 * Turn on usage filter if is not already used. The idea is 3185 * that chunks that we have already balanced should be 3186 * reasonably full. Don't do it for chunks that are being 3187 * converted - that will keep us from relocating unconverted 3188 * (albeit full) chunks. 3189 */ 3190 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3191 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3192 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3193 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3194 bctl->data.usage = 90; 3195 } 3196 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3197 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3198 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3199 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3200 bctl->sys.usage = 90; 3201 } 3202 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3203 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3204 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3205 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3206 bctl->meta.usage = 90; 3207 } 3208 } 3209 3210 /* 3211 * Clear the balance status in fs_info and delete the balance item from disk. 3212 */ 3213 static void reset_balance_state(struct btrfs_fs_info *fs_info) 3214 { 3215 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3216 int ret; 3217 3218 BUG_ON(!fs_info->balance_ctl); 3219 3220 spin_lock(&fs_info->balance_lock); 3221 fs_info->balance_ctl = NULL; 3222 spin_unlock(&fs_info->balance_lock); 3223 3224 kfree(bctl); 3225 ret = del_balance_item(fs_info); 3226 if (ret) 3227 btrfs_handle_fs_error(fs_info, ret, NULL); 3228 } 3229 3230 /* 3231 * Balance filters. Return 1 if chunk should be filtered out 3232 * (should not be balanced). 3233 */ 3234 static int chunk_profiles_filter(u64 chunk_type, 3235 struct btrfs_balance_args *bargs) 3236 { 3237 chunk_type = chunk_to_extended(chunk_type) & 3238 BTRFS_EXTENDED_PROFILE_MASK; 3239 3240 if (bargs->profiles & chunk_type) 3241 return 0; 3242 3243 return 1; 3244 } 3245 3246 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3247 struct btrfs_balance_args *bargs) 3248 { 3249 struct btrfs_block_group_cache *cache; 3250 u64 chunk_used; 3251 u64 user_thresh_min; 3252 u64 user_thresh_max; 3253 int ret = 1; 3254 3255 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3256 chunk_used = btrfs_block_group_used(&cache->item); 3257 3258 if (bargs->usage_min == 0) 3259 user_thresh_min = 0; 3260 else 3261 user_thresh_min = div_factor_fine(cache->key.offset, 3262 bargs->usage_min); 3263 3264 if (bargs->usage_max == 0) 3265 user_thresh_max = 1; 3266 else if (bargs->usage_max > 100) 3267 user_thresh_max = cache->key.offset; 3268 else 3269 user_thresh_max = div_factor_fine(cache->key.offset, 3270 bargs->usage_max); 3271 3272 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3273 ret = 0; 3274 3275 btrfs_put_block_group(cache); 3276 return ret; 3277 } 3278 3279 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3280 u64 chunk_offset, struct btrfs_balance_args *bargs) 3281 { 3282 struct btrfs_block_group_cache *cache; 3283 u64 chunk_used, user_thresh; 3284 int ret = 1; 3285 3286 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3287 chunk_used = btrfs_block_group_used(&cache->item); 3288 3289 if (bargs->usage_min == 0) 3290 user_thresh = 1; 3291 else if (bargs->usage > 100) 3292 user_thresh = cache->key.offset; 3293 else 3294 user_thresh = div_factor_fine(cache->key.offset, 3295 bargs->usage); 3296 3297 if (chunk_used < user_thresh) 3298 ret = 0; 3299 3300 btrfs_put_block_group(cache); 3301 return ret; 3302 } 3303 3304 static int chunk_devid_filter(struct extent_buffer *leaf, 3305 struct btrfs_chunk *chunk, 3306 struct btrfs_balance_args *bargs) 3307 { 3308 struct btrfs_stripe *stripe; 3309 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3310 int i; 3311 3312 for (i = 0; i < num_stripes; i++) { 3313 stripe = btrfs_stripe_nr(chunk, i); 3314 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3315 return 0; 3316 } 3317 3318 return 1; 3319 } 3320 3321 /* [pstart, pend) */ 3322 static int chunk_drange_filter(struct extent_buffer *leaf, 3323 struct btrfs_chunk *chunk, 3324 struct btrfs_balance_args *bargs) 3325 { 3326 struct btrfs_stripe *stripe; 3327 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3328 u64 stripe_offset; 3329 u64 stripe_length; 3330 int factor; 3331 int i; 3332 3333 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3334 return 0; 3335 3336 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP | 3337 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) { 3338 factor = num_stripes / 2; 3339 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) { 3340 factor = num_stripes - 1; 3341 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) { 3342 factor = num_stripes - 2; 3343 } else { 3344 factor = num_stripes; 3345 } 3346 3347 for (i = 0; i < num_stripes; i++) { 3348 stripe = btrfs_stripe_nr(chunk, i); 3349 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3350 continue; 3351 3352 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3353 stripe_length = btrfs_chunk_length(leaf, chunk); 3354 stripe_length = div_u64(stripe_length, factor); 3355 3356 if (stripe_offset < bargs->pend && 3357 stripe_offset + stripe_length > bargs->pstart) 3358 return 0; 3359 } 3360 3361 return 1; 3362 } 3363 3364 /* [vstart, vend) */ 3365 static int chunk_vrange_filter(struct extent_buffer *leaf, 3366 struct btrfs_chunk *chunk, 3367 u64 chunk_offset, 3368 struct btrfs_balance_args *bargs) 3369 { 3370 if (chunk_offset < bargs->vend && 3371 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3372 /* at least part of the chunk is inside this vrange */ 3373 return 0; 3374 3375 return 1; 3376 } 3377 3378 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3379 struct btrfs_chunk *chunk, 3380 struct btrfs_balance_args *bargs) 3381 { 3382 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3383 3384 if (bargs->stripes_min <= num_stripes 3385 && num_stripes <= bargs->stripes_max) 3386 return 0; 3387 3388 return 1; 3389 } 3390 3391 static int chunk_soft_convert_filter(u64 chunk_type, 3392 struct btrfs_balance_args *bargs) 3393 { 3394 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3395 return 0; 3396 3397 chunk_type = chunk_to_extended(chunk_type) & 3398 BTRFS_EXTENDED_PROFILE_MASK; 3399 3400 if (bargs->target == chunk_type) 3401 return 1; 3402 3403 return 0; 3404 } 3405 3406 static int should_balance_chunk(struct btrfs_fs_info *fs_info, 3407 struct extent_buffer *leaf, 3408 struct btrfs_chunk *chunk, u64 chunk_offset) 3409 { 3410 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3411 struct btrfs_balance_args *bargs = NULL; 3412 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3413 3414 /* type filter */ 3415 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3416 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3417 return 0; 3418 } 3419 3420 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3421 bargs = &bctl->data; 3422 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3423 bargs = &bctl->sys; 3424 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3425 bargs = &bctl->meta; 3426 3427 /* profiles filter */ 3428 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3429 chunk_profiles_filter(chunk_type, bargs)) { 3430 return 0; 3431 } 3432 3433 /* usage filter */ 3434 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3435 chunk_usage_filter(fs_info, chunk_offset, bargs)) { 3436 return 0; 3437 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3438 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { 3439 return 0; 3440 } 3441 3442 /* devid filter */ 3443 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3444 chunk_devid_filter(leaf, chunk, bargs)) { 3445 return 0; 3446 } 3447 3448 /* drange filter, makes sense only with devid filter */ 3449 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3450 chunk_drange_filter(leaf, chunk, bargs)) { 3451 return 0; 3452 } 3453 3454 /* vrange filter */ 3455 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3456 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3457 return 0; 3458 } 3459 3460 /* stripes filter */ 3461 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3462 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3463 return 0; 3464 } 3465 3466 /* soft profile changing mode */ 3467 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3468 chunk_soft_convert_filter(chunk_type, bargs)) { 3469 return 0; 3470 } 3471 3472 /* 3473 * limited by count, must be the last filter 3474 */ 3475 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3476 if (bargs->limit == 0) 3477 return 0; 3478 else 3479 bargs->limit--; 3480 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3481 /* 3482 * Same logic as the 'limit' filter; the minimum cannot be 3483 * determined here because we do not have the global information 3484 * about the count of all chunks that satisfy the filters. 3485 */ 3486 if (bargs->limit_max == 0) 3487 return 0; 3488 else 3489 bargs->limit_max--; 3490 } 3491 3492 return 1; 3493 } 3494 3495 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3496 { 3497 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3498 struct btrfs_root *chunk_root = fs_info->chunk_root; 3499 struct btrfs_root *dev_root = fs_info->dev_root; 3500 struct list_head *devices; 3501 struct btrfs_device *device; 3502 u64 old_size; 3503 u64 size_to_free; 3504 u64 chunk_type; 3505 struct btrfs_chunk *chunk; 3506 struct btrfs_path *path = NULL; 3507 struct btrfs_key key; 3508 struct btrfs_key found_key; 3509 struct btrfs_trans_handle *trans; 3510 struct extent_buffer *leaf; 3511 int slot; 3512 int ret; 3513 int enospc_errors = 0; 3514 bool counting = true; 3515 /* The single value limit and min/max limits use the same bytes in the */ 3516 u64 limit_data = bctl->data.limit; 3517 u64 limit_meta = bctl->meta.limit; 3518 u64 limit_sys = bctl->sys.limit; 3519 u32 count_data = 0; 3520 u32 count_meta = 0; 3521 u32 count_sys = 0; 3522 int chunk_reserved = 0; 3523 3524 /* step one make some room on all the devices */ 3525 devices = &fs_info->fs_devices->devices; 3526 list_for_each_entry(device, devices, dev_list) { 3527 old_size = btrfs_device_get_total_bytes(device); 3528 size_to_free = div_factor(old_size, 1); 3529 size_to_free = min_t(u64, size_to_free, SZ_1M); 3530 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) || 3531 btrfs_device_get_total_bytes(device) - 3532 btrfs_device_get_bytes_used(device) > size_to_free || 3533 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 3534 continue; 3535 3536 ret = btrfs_shrink_device(device, old_size - size_to_free); 3537 if (ret == -ENOSPC) 3538 break; 3539 if (ret) { 3540 /* btrfs_shrink_device never returns ret > 0 */ 3541 WARN_ON(ret > 0); 3542 goto error; 3543 } 3544 3545 trans = btrfs_start_transaction(dev_root, 0); 3546 if (IS_ERR(trans)) { 3547 ret = PTR_ERR(trans); 3548 btrfs_info_in_rcu(fs_info, 3549 "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu", 3550 rcu_str_deref(device->name), ret, 3551 old_size, old_size - size_to_free); 3552 goto error; 3553 } 3554 3555 ret = btrfs_grow_device(trans, device, old_size); 3556 if (ret) { 3557 btrfs_end_transaction(trans); 3558 /* btrfs_grow_device never returns ret > 0 */ 3559 WARN_ON(ret > 0); 3560 btrfs_info_in_rcu(fs_info, 3561 "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu", 3562 rcu_str_deref(device->name), ret, 3563 old_size, old_size - size_to_free); 3564 goto error; 3565 } 3566 3567 btrfs_end_transaction(trans); 3568 } 3569 3570 /* step two, relocate all the chunks */ 3571 path = btrfs_alloc_path(); 3572 if (!path) { 3573 ret = -ENOMEM; 3574 goto error; 3575 } 3576 3577 /* zero out stat counters */ 3578 spin_lock(&fs_info->balance_lock); 3579 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3580 spin_unlock(&fs_info->balance_lock); 3581 again: 3582 if (!counting) { 3583 /* 3584 * The single value limit and min/max limits use the same bytes 3585 * in the 3586 */ 3587 bctl->data.limit = limit_data; 3588 bctl->meta.limit = limit_meta; 3589 bctl->sys.limit = limit_sys; 3590 } 3591 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3592 key.offset = (u64)-1; 3593 key.type = BTRFS_CHUNK_ITEM_KEY; 3594 3595 while (1) { 3596 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3597 atomic_read(&fs_info->balance_cancel_req)) { 3598 ret = -ECANCELED; 3599 goto error; 3600 } 3601 3602 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3603 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3604 if (ret < 0) { 3605 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3606 goto error; 3607 } 3608 3609 /* 3610 * this shouldn't happen, it means the last relocate 3611 * failed 3612 */ 3613 if (ret == 0) 3614 BUG(); /* FIXME break ? */ 3615 3616 ret = btrfs_previous_item(chunk_root, path, 0, 3617 BTRFS_CHUNK_ITEM_KEY); 3618 if (ret) { 3619 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3620 ret = 0; 3621 break; 3622 } 3623 3624 leaf = path->nodes[0]; 3625 slot = path->slots[0]; 3626 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3627 3628 if (found_key.objectid != key.objectid) { 3629 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3630 break; 3631 } 3632 3633 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3634 chunk_type = btrfs_chunk_type(leaf, chunk); 3635 3636 if (!counting) { 3637 spin_lock(&fs_info->balance_lock); 3638 bctl->stat.considered++; 3639 spin_unlock(&fs_info->balance_lock); 3640 } 3641 3642 ret = should_balance_chunk(fs_info, leaf, chunk, 3643 found_key.offset); 3644 3645 btrfs_release_path(path); 3646 if (!ret) { 3647 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3648 goto loop; 3649 } 3650 3651 if (counting) { 3652 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3653 spin_lock(&fs_info->balance_lock); 3654 bctl->stat.expected++; 3655 spin_unlock(&fs_info->balance_lock); 3656 3657 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3658 count_data++; 3659 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3660 count_sys++; 3661 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3662 count_meta++; 3663 3664 goto loop; 3665 } 3666 3667 /* 3668 * Apply limit_min filter, no need to check if the LIMITS 3669 * filter is used, limit_min is 0 by default 3670 */ 3671 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3672 count_data < bctl->data.limit_min) 3673 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3674 count_meta < bctl->meta.limit_min) 3675 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3676 count_sys < bctl->sys.limit_min)) { 3677 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3678 goto loop; 3679 } 3680 3681 if (!chunk_reserved) { 3682 /* 3683 * We may be relocating the only data chunk we have, 3684 * which could potentially end up with losing data's 3685 * raid profile, so lets allocate an empty one in 3686 * advance. 3687 */ 3688 ret = btrfs_may_alloc_data_chunk(fs_info, 3689 found_key.offset); 3690 if (ret < 0) { 3691 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3692 goto error; 3693 } else if (ret == 1) { 3694 chunk_reserved = 1; 3695 } 3696 } 3697 3698 ret = btrfs_relocate_chunk(fs_info, found_key.offset); 3699 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3700 if (ret && ret != -ENOSPC) 3701 goto error; 3702 if (ret == -ENOSPC) { 3703 enospc_errors++; 3704 } else { 3705 spin_lock(&fs_info->balance_lock); 3706 bctl->stat.completed++; 3707 spin_unlock(&fs_info->balance_lock); 3708 } 3709 loop: 3710 if (found_key.offset == 0) 3711 break; 3712 key.offset = found_key.offset - 1; 3713 } 3714 3715 if (counting) { 3716 btrfs_release_path(path); 3717 counting = false; 3718 goto again; 3719 } 3720 error: 3721 btrfs_free_path(path); 3722 if (enospc_errors) { 3723 btrfs_info(fs_info, "%d enospc errors during balance", 3724 enospc_errors); 3725 if (!ret) 3726 ret = -ENOSPC; 3727 } 3728 3729 return ret; 3730 } 3731 3732 /** 3733 * alloc_profile_is_valid - see if a given profile is valid and reduced 3734 * @flags: profile to validate 3735 * @extended: if true @flags is treated as an extended profile 3736 */ 3737 static int alloc_profile_is_valid(u64 flags, int extended) 3738 { 3739 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 3740 BTRFS_BLOCK_GROUP_PROFILE_MASK); 3741 3742 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 3743 3744 /* 1) check that all other bits are zeroed */ 3745 if (flags & ~mask) 3746 return 0; 3747 3748 /* 2) see if profile is reduced */ 3749 if (flags == 0) 3750 return !extended; /* "0" is valid for usual profiles */ 3751 3752 /* true if exactly one bit set */ 3753 return (flags & (flags - 1)) == 0; 3754 } 3755 3756 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 3757 { 3758 /* cancel requested || normal exit path */ 3759 return atomic_read(&fs_info->balance_cancel_req) || 3760 (atomic_read(&fs_info->balance_pause_req) == 0 && 3761 atomic_read(&fs_info->balance_cancel_req) == 0); 3762 } 3763 3764 /* Non-zero return value signifies invalidity */ 3765 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg, 3766 u64 allowed) 3767 { 3768 return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) && 3769 (!alloc_profile_is_valid(bctl_arg->target, 1) || 3770 (bctl_arg->target & ~allowed))); 3771 } 3772 3773 /* 3774 * Should be called with balance mutexe held 3775 */ 3776 int btrfs_balance(struct btrfs_fs_info *fs_info, 3777 struct btrfs_balance_control *bctl, 3778 struct btrfs_ioctl_balance_args *bargs) 3779 { 3780 u64 meta_target, data_target; 3781 u64 allowed; 3782 int mixed = 0; 3783 int ret; 3784 u64 num_devices; 3785 unsigned seq; 3786 3787 if (btrfs_fs_closing(fs_info) || 3788 atomic_read(&fs_info->balance_pause_req) || 3789 atomic_read(&fs_info->balance_cancel_req)) { 3790 ret = -EINVAL; 3791 goto out; 3792 } 3793 3794 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 3795 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 3796 mixed = 1; 3797 3798 /* 3799 * In case of mixed groups both data and meta should be picked, 3800 * and identical options should be given for both of them. 3801 */ 3802 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 3803 if (mixed && (bctl->flags & allowed)) { 3804 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 3805 !(bctl->flags & BTRFS_BALANCE_METADATA) || 3806 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 3807 btrfs_err(fs_info, 3808 "balance: mixed groups data and metadata options must be the same"); 3809 ret = -EINVAL; 3810 goto out; 3811 } 3812 } 3813 3814 num_devices = fs_info->fs_devices->num_devices; 3815 btrfs_dev_replace_read_lock(&fs_info->dev_replace); 3816 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 3817 BUG_ON(num_devices < 1); 3818 num_devices--; 3819 } 3820 btrfs_dev_replace_read_unlock(&fs_info->dev_replace); 3821 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP; 3822 if (num_devices > 1) 3823 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); 3824 if (num_devices > 2) 3825 allowed |= BTRFS_BLOCK_GROUP_RAID5; 3826 if (num_devices > 3) 3827 allowed |= (BTRFS_BLOCK_GROUP_RAID10 | 3828 BTRFS_BLOCK_GROUP_RAID6); 3829 if (validate_convert_profile(&bctl->data, allowed)) { 3830 int index = btrfs_bg_flags_to_raid_index(bctl->data.target); 3831 3832 btrfs_err(fs_info, 3833 "balance: invalid convert data profile %s", 3834 get_raid_name(index)); 3835 ret = -EINVAL; 3836 goto out; 3837 } 3838 if (validate_convert_profile(&bctl->meta, allowed)) { 3839 int index = btrfs_bg_flags_to_raid_index(bctl->meta.target); 3840 3841 btrfs_err(fs_info, 3842 "balance: invalid convert metadata profile %s", 3843 get_raid_name(index)); 3844 ret = -EINVAL; 3845 goto out; 3846 } 3847 if (validate_convert_profile(&bctl->sys, allowed)) { 3848 int index = btrfs_bg_flags_to_raid_index(bctl->sys.target); 3849 3850 btrfs_err(fs_info, 3851 "balance: invalid convert system profile %s", 3852 get_raid_name(index)); 3853 ret = -EINVAL; 3854 goto out; 3855 } 3856 3857 /* allow to reduce meta or sys integrity only if force set */ 3858 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | 3859 BTRFS_BLOCK_GROUP_RAID10 | 3860 BTRFS_BLOCK_GROUP_RAID5 | 3861 BTRFS_BLOCK_GROUP_RAID6; 3862 do { 3863 seq = read_seqbegin(&fs_info->profiles_lock); 3864 3865 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3866 (fs_info->avail_system_alloc_bits & allowed) && 3867 !(bctl->sys.target & allowed)) || 3868 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3869 (fs_info->avail_metadata_alloc_bits & allowed) && 3870 !(bctl->meta.target & allowed))) { 3871 if (bctl->flags & BTRFS_BALANCE_FORCE) { 3872 btrfs_info(fs_info, 3873 "balance: force reducing metadata integrity"); 3874 } else { 3875 btrfs_err(fs_info, 3876 "balance: reduces metadata integrity, use --force if you want this"); 3877 ret = -EINVAL; 3878 goto out; 3879 } 3880 } 3881 } while (read_seqretry(&fs_info->profiles_lock, seq)); 3882 3883 /* if we're not converting, the target field is uninitialized */ 3884 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 3885 bctl->meta.target : fs_info->avail_metadata_alloc_bits; 3886 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? 3887 bctl->data.target : fs_info->avail_data_alloc_bits; 3888 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < 3889 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { 3890 int meta_index = btrfs_bg_flags_to_raid_index(meta_target); 3891 int data_index = btrfs_bg_flags_to_raid_index(data_target); 3892 3893 btrfs_warn(fs_info, 3894 "balance: metadata profile %s has lower redundancy than data profile %s", 3895 get_raid_name(meta_index), get_raid_name(data_index)); 3896 } 3897 3898 ret = insert_balance_item(fs_info, bctl); 3899 if (ret && ret != -EEXIST) 3900 goto out; 3901 3902 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 3903 BUG_ON(ret == -EEXIST); 3904 BUG_ON(fs_info->balance_ctl); 3905 spin_lock(&fs_info->balance_lock); 3906 fs_info->balance_ctl = bctl; 3907 spin_unlock(&fs_info->balance_lock); 3908 } else { 3909 BUG_ON(ret != -EEXIST); 3910 spin_lock(&fs_info->balance_lock); 3911 update_balance_args(bctl); 3912 spin_unlock(&fs_info->balance_lock); 3913 } 3914 3915 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 3916 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 3917 mutex_unlock(&fs_info->balance_mutex); 3918 3919 ret = __btrfs_balance(fs_info); 3920 3921 mutex_lock(&fs_info->balance_mutex); 3922 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); 3923 3924 if (bargs) { 3925 memset(bargs, 0, sizeof(*bargs)); 3926 btrfs_update_ioctl_balance_args(fs_info, bargs); 3927 } 3928 3929 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 3930 balance_need_close(fs_info)) { 3931 reset_balance_state(fs_info); 3932 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 3933 } 3934 3935 wake_up(&fs_info->balance_wait_q); 3936 3937 return ret; 3938 out: 3939 if (bctl->flags & BTRFS_BALANCE_RESUME) 3940 reset_balance_state(fs_info); 3941 else 3942 kfree(bctl); 3943 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 3944 3945 return ret; 3946 } 3947 3948 static int balance_kthread(void *data) 3949 { 3950 struct btrfs_fs_info *fs_info = data; 3951 int ret = 0; 3952 3953 mutex_lock(&fs_info->balance_mutex); 3954 if (fs_info->balance_ctl) { 3955 btrfs_info(fs_info, "balance: resuming"); 3956 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); 3957 } 3958 mutex_unlock(&fs_info->balance_mutex); 3959 3960 return ret; 3961 } 3962 3963 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 3964 { 3965 struct task_struct *tsk; 3966 3967 mutex_lock(&fs_info->balance_mutex); 3968 if (!fs_info->balance_ctl) { 3969 mutex_unlock(&fs_info->balance_mutex); 3970 return 0; 3971 } 3972 mutex_unlock(&fs_info->balance_mutex); 3973 3974 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { 3975 btrfs_info(fs_info, "balance: resume skipped"); 3976 return 0; 3977 } 3978 3979 /* 3980 * A ro->rw remount sequence should continue with the paused balance 3981 * regardless of who pauses it, system or the user as of now, so set 3982 * the resume flag. 3983 */ 3984 spin_lock(&fs_info->balance_lock); 3985 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; 3986 spin_unlock(&fs_info->balance_lock); 3987 3988 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3989 return PTR_ERR_OR_ZERO(tsk); 3990 } 3991 3992 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 3993 { 3994 struct btrfs_balance_control *bctl; 3995 struct btrfs_balance_item *item; 3996 struct btrfs_disk_balance_args disk_bargs; 3997 struct btrfs_path *path; 3998 struct extent_buffer *leaf; 3999 struct btrfs_key key; 4000 int ret; 4001 4002 path = btrfs_alloc_path(); 4003 if (!path) 4004 return -ENOMEM; 4005 4006 key.objectid = BTRFS_BALANCE_OBJECTID; 4007 key.type = BTRFS_TEMPORARY_ITEM_KEY; 4008 key.offset = 0; 4009 4010 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4011 if (ret < 0) 4012 goto out; 4013 if (ret > 0) { /* ret = -ENOENT; */ 4014 ret = 0; 4015 goto out; 4016 } 4017 4018 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 4019 if (!bctl) { 4020 ret = -ENOMEM; 4021 goto out; 4022 } 4023 4024 leaf = path->nodes[0]; 4025 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 4026 4027 bctl->flags = btrfs_balance_flags(leaf, item); 4028 bctl->flags |= BTRFS_BALANCE_RESUME; 4029 4030 btrfs_balance_data(leaf, item, &disk_bargs); 4031 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 4032 btrfs_balance_meta(leaf, item, &disk_bargs); 4033 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 4034 btrfs_balance_sys(leaf, item, &disk_bargs); 4035 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 4036 4037 /* 4038 * This should never happen, as the paused balance state is recovered 4039 * during mount without any chance of other exclusive ops to collide. 4040 * 4041 * This gives the exclusive op status to balance and keeps in paused 4042 * state until user intervention (cancel or umount). If the ownership 4043 * cannot be assigned, show a message but do not fail. The balance 4044 * is in a paused state and must have fs_info::balance_ctl properly 4045 * set up. 4046 */ 4047 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) 4048 btrfs_warn(fs_info, 4049 "balance: cannot set exclusive op status, resume manually"); 4050 4051 mutex_lock(&fs_info->balance_mutex); 4052 BUG_ON(fs_info->balance_ctl); 4053 spin_lock(&fs_info->balance_lock); 4054 fs_info->balance_ctl = bctl; 4055 spin_unlock(&fs_info->balance_lock); 4056 mutex_unlock(&fs_info->balance_mutex); 4057 out: 4058 btrfs_free_path(path); 4059 return ret; 4060 } 4061 4062 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 4063 { 4064 int ret = 0; 4065 4066 mutex_lock(&fs_info->balance_mutex); 4067 if (!fs_info->balance_ctl) { 4068 mutex_unlock(&fs_info->balance_mutex); 4069 return -ENOTCONN; 4070 } 4071 4072 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4073 atomic_inc(&fs_info->balance_pause_req); 4074 mutex_unlock(&fs_info->balance_mutex); 4075 4076 wait_event(fs_info->balance_wait_q, 4077 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4078 4079 mutex_lock(&fs_info->balance_mutex); 4080 /* we are good with balance_ctl ripped off from under us */ 4081 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4082 atomic_dec(&fs_info->balance_pause_req); 4083 } else { 4084 ret = -ENOTCONN; 4085 } 4086 4087 mutex_unlock(&fs_info->balance_mutex); 4088 return ret; 4089 } 4090 4091 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 4092 { 4093 mutex_lock(&fs_info->balance_mutex); 4094 if (!fs_info->balance_ctl) { 4095 mutex_unlock(&fs_info->balance_mutex); 4096 return -ENOTCONN; 4097 } 4098 4099 /* 4100 * A paused balance with the item stored on disk can be resumed at 4101 * mount time if the mount is read-write. Otherwise it's still paused 4102 * and we must not allow cancelling as it deletes the item. 4103 */ 4104 if (sb_rdonly(fs_info->sb)) { 4105 mutex_unlock(&fs_info->balance_mutex); 4106 return -EROFS; 4107 } 4108 4109 atomic_inc(&fs_info->balance_cancel_req); 4110 /* 4111 * if we are running just wait and return, balance item is 4112 * deleted in btrfs_balance in this case 4113 */ 4114 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { 4115 mutex_unlock(&fs_info->balance_mutex); 4116 wait_event(fs_info->balance_wait_q, 4117 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4118 mutex_lock(&fs_info->balance_mutex); 4119 } else { 4120 mutex_unlock(&fs_info->balance_mutex); 4121 /* 4122 * Lock released to allow other waiters to continue, we'll 4123 * reexamine the status again. 4124 */ 4125 mutex_lock(&fs_info->balance_mutex); 4126 4127 if (fs_info->balance_ctl) { 4128 reset_balance_state(fs_info); 4129 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 4130 btrfs_info(fs_info, "balance: canceled"); 4131 } 4132 } 4133 4134 BUG_ON(fs_info->balance_ctl || 4135 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4136 atomic_dec(&fs_info->balance_cancel_req); 4137 mutex_unlock(&fs_info->balance_mutex); 4138 return 0; 4139 } 4140 4141 static int btrfs_uuid_scan_kthread(void *data) 4142 { 4143 struct btrfs_fs_info *fs_info = data; 4144 struct btrfs_root *root = fs_info->tree_root; 4145 struct btrfs_key key; 4146 struct btrfs_path *path = NULL; 4147 int ret = 0; 4148 struct extent_buffer *eb; 4149 int slot; 4150 struct btrfs_root_item root_item; 4151 u32 item_size; 4152 struct btrfs_trans_handle *trans = NULL; 4153 4154 path = btrfs_alloc_path(); 4155 if (!path) { 4156 ret = -ENOMEM; 4157 goto out; 4158 } 4159 4160 key.objectid = 0; 4161 key.type = BTRFS_ROOT_ITEM_KEY; 4162 key.offset = 0; 4163 4164 while (1) { 4165 ret = btrfs_search_forward(root, &key, path, 4166 BTRFS_OLDEST_GENERATION); 4167 if (ret) { 4168 if (ret > 0) 4169 ret = 0; 4170 break; 4171 } 4172 4173 if (key.type != BTRFS_ROOT_ITEM_KEY || 4174 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4175 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4176 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4177 goto skip; 4178 4179 eb = path->nodes[0]; 4180 slot = path->slots[0]; 4181 item_size = btrfs_item_size_nr(eb, slot); 4182 if (item_size < sizeof(root_item)) 4183 goto skip; 4184 4185 read_extent_buffer(eb, &root_item, 4186 btrfs_item_ptr_offset(eb, slot), 4187 (int)sizeof(root_item)); 4188 if (btrfs_root_refs(&root_item) == 0) 4189 goto skip; 4190 4191 if (!btrfs_is_empty_uuid(root_item.uuid) || 4192 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4193 if (trans) 4194 goto update_tree; 4195 4196 btrfs_release_path(path); 4197 /* 4198 * 1 - subvol uuid item 4199 * 1 - received_subvol uuid item 4200 */ 4201 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4202 if (IS_ERR(trans)) { 4203 ret = PTR_ERR(trans); 4204 break; 4205 } 4206 continue; 4207 } else { 4208 goto skip; 4209 } 4210 update_tree: 4211 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4212 ret = btrfs_uuid_tree_add(trans, root_item.uuid, 4213 BTRFS_UUID_KEY_SUBVOL, 4214 key.objectid); 4215 if (ret < 0) { 4216 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4217 ret); 4218 break; 4219 } 4220 } 4221 4222 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4223 ret = btrfs_uuid_tree_add(trans, 4224 root_item.received_uuid, 4225 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4226 key.objectid); 4227 if (ret < 0) { 4228 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4229 ret); 4230 break; 4231 } 4232 } 4233 4234 skip: 4235 if (trans) { 4236 ret = btrfs_end_transaction(trans); 4237 trans = NULL; 4238 if (ret) 4239 break; 4240 } 4241 4242 btrfs_release_path(path); 4243 if (key.offset < (u64)-1) { 4244 key.offset++; 4245 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4246 key.offset = 0; 4247 key.type = BTRFS_ROOT_ITEM_KEY; 4248 } else if (key.objectid < (u64)-1) { 4249 key.offset = 0; 4250 key.type = BTRFS_ROOT_ITEM_KEY; 4251 key.objectid++; 4252 } else { 4253 break; 4254 } 4255 cond_resched(); 4256 } 4257 4258 out: 4259 btrfs_free_path(path); 4260 if (trans && !IS_ERR(trans)) 4261 btrfs_end_transaction(trans); 4262 if (ret) 4263 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4264 else 4265 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 4266 up(&fs_info->uuid_tree_rescan_sem); 4267 return 0; 4268 } 4269 4270 /* 4271 * Callback for btrfs_uuid_tree_iterate(). 4272 * returns: 4273 * 0 check succeeded, the entry is not outdated. 4274 * < 0 if an error occurred. 4275 * > 0 if the check failed, which means the caller shall remove the entry. 4276 */ 4277 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, 4278 u8 *uuid, u8 type, u64 subid) 4279 { 4280 struct btrfs_key key; 4281 int ret = 0; 4282 struct btrfs_root *subvol_root; 4283 4284 if (type != BTRFS_UUID_KEY_SUBVOL && 4285 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) 4286 goto out; 4287 4288 key.objectid = subid; 4289 key.type = BTRFS_ROOT_ITEM_KEY; 4290 key.offset = (u64)-1; 4291 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key); 4292 if (IS_ERR(subvol_root)) { 4293 ret = PTR_ERR(subvol_root); 4294 if (ret == -ENOENT) 4295 ret = 1; 4296 goto out; 4297 } 4298 4299 switch (type) { 4300 case BTRFS_UUID_KEY_SUBVOL: 4301 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) 4302 ret = 1; 4303 break; 4304 case BTRFS_UUID_KEY_RECEIVED_SUBVOL: 4305 if (memcmp(uuid, subvol_root->root_item.received_uuid, 4306 BTRFS_UUID_SIZE)) 4307 ret = 1; 4308 break; 4309 } 4310 4311 out: 4312 return ret; 4313 } 4314 4315 static int btrfs_uuid_rescan_kthread(void *data) 4316 { 4317 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; 4318 int ret; 4319 4320 /* 4321 * 1st step is to iterate through the existing UUID tree and 4322 * to delete all entries that contain outdated data. 4323 * 2nd step is to add all missing entries to the UUID tree. 4324 */ 4325 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry); 4326 if (ret < 0) { 4327 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); 4328 up(&fs_info->uuid_tree_rescan_sem); 4329 return ret; 4330 } 4331 return btrfs_uuid_scan_kthread(data); 4332 } 4333 4334 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4335 { 4336 struct btrfs_trans_handle *trans; 4337 struct btrfs_root *tree_root = fs_info->tree_root; 4338 struct btrfs_root *uuid_root; 4339 struct task_struct *task; 4340 int ret; 4341 4342 /* 4343 * 1 - root node 4344 * 1 - root item 4345 */ 4346 trans = btrfs_start_transaction(tree_root, 2); 4347 if (IS_ERR(trans)) 4348 return PTR_ERR(trans); 4349 4350 uuid_root = btrfs_create_tree(trans, fs_info, 4351 BTRFS_UUID_TREE_OBJECTID); 4352 if (IS_ERR(uuid_root)) { 4353 ret = PTR_ERR(uuid_root); 4354 btrfs_abort_transaction(trans, ret); 4355 btrfs_end_transaction(trans); 4356 return ret; 4357 } 4358 4359 fs_info->uuid_root = uuid_root; 4360 4361 ret = btrfs_commit_transaction(trans); 4362 if (ret) 4363 return ret; 4364 4365 down(&fs_info->uuid_tree_rescan_sem); 4366 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4367 if (IS_ERR(task)) { 4368 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4369 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4370 up(&fs_info->uuid_tree_rescan_sem); 4371 return PTR_ERR(task); 4372 } 4373 4374 return 0; 4375 } 4376 4377 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) 4378 { 4379 struct task_struct *task; 4380 4381 down(&fs_info->uuid_tree_rescan_sem); 4382 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); 4383 if (IS_ERR(task)) { 4384 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4385 btrfs_warn(fs_info, "failed to start uuid_rescan task"); 4386 up(&fs_info->uuid_tree_rescan_sem); 4387 return PTR_ERR(task); 4388 } 4389 4390 return 0; 4391 } 4392 4393 /* 4394 * shrinking a device means finding all of the device extents past 4395 * the new size, and then following the back refs to the chunks. 4396 * The chunk relocation code actually frees the device extent 4397 */ 4398 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4399 { 4400 struct btrfs_fs_info *fs_info = device->fs_info; 4401 struct btrfs_root *root = fs_info->dev_root; 4402 struct btrfs_trans_handle *trans; 4403 struct btrfs_dev_extent *dev_extent = NULL; 4404 struct btrfs_path *path; 4405 u64 length; 4406 u64 chunk_offset; 4407 int ret; 4408 int slot; 4409 int failed = 0; 4410 bool retried = false; 4411 bool checked_pending_chunks = false; 4412 struct extent_buffer *l; 4413 struct btrfs_key key; 4414 struct btrfs_super_block *super_copy = fs_info->super_copy; 4415 u64 old_total = btrfs_super_total_bytes(super_copy); 4416 u64 old_size = btrfs_device_get_total_bytes(device); 4417 u64 diff; 4418 4419 new_size = round_down(new_size, fs_info->sectorsize); 4420 diff = round_down(old_size - new_size, fs_info->sectorsize); 4421 4422 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4423 return -EINVAL; 4424 4425 path = btrfs_alloc_path(); 4426 if (!path) 4427 return -ENOMEM; 4428 4429 path->reada = READA_BACK; 4430 4431 mutex_lock(&fs_info->chunk_mutex); 4432 4433 btrfs_device_set_total_bytes(device, new_size); 4434 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4435 device->fs_devices->total_rw_bytes -= diff; 4436 atomic64_sub(diff, &fs_info->free_chunk_space); 4437 } 4438 mutex_unlock(&fs_info->chunk_mutex); 4439 4440 again: 4441 key.objectid = device->devid; 4442 key.offset = (u64)-1; 4443 key.type = BTRFS_DEV_EXTENT_KEY; 4444 4445 do { 4446 mutex_lock(&fs_info->delete_unused_bgs_mutex); 4447 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4448 if (ret < 0) { 4449 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4450 goto done; 4451 } 4452 4453 ret = btrfs_previous_item(root, path, 0, key.type); 4454 if (ret) 4455 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4456 if (ret < 0) 4457 goto done; 4458 if (ret) { 4459 ret = 0; 4460 btrfs_release_path(path); 4461 break; 4462 } 4463 4464 l = path->nodes[0]; 4465 slot = path->slots[0]; 4466 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4467 4468 if (key.objectid != device->devid) { 4469 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4470 btrfs_release_path(path); 4471 break; 4472 } 4473 4474 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4475 length = btrfs_dev_extent_length(l, dev_extent); 4476 4477 if (key.offset + length <= new_size) { 4478 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4479 btrfs_release_path(path); 4480 break; 4481 } 4482 4483 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4484 btrfs_release_path(path); 4485 4486 /* 4487 * We may be relocating the only data chunk we have, 4488 * which could potentially end up with losing data's 4489 * raid profile, so lets allocate an empty one in 4490 * advance. 4491 */ 4492 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); 4493 if (ret < 0) { 4494 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4495 goto done; 4496 } 4497 4498 ret = btrfs_relocate_chunk(fs_info, chunk_offset); 4499 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 4500 if (ret && ret != -ENOSPC) 4501 goto done; 4502 if (ret == -ENOSPC) 4503 failed++; 4504 } while (key.offset-- > 0); 4505 4506 if (failed && !retried) { 4507 failed = 0; 4508 retried = true; 4509 goto again; 4510 } else if (failed && retried) { 4511 ret = -ENOSPC; 4512 goto done; 4513 } 4514 4515 /* Shrinking succeeded, else we would be at "done". */ 4516 trans = btrfs_start_transaction(root, 0); 4517 if (IS_ERR(trans)) { 4518 ret = PTR_ERR(trans); 4519 goto done; 4520 } 4521 4522 mutex_lock(&fs_info->chunk_mutex); 4523 4524 /* 4525 * We checked in the above loop all device extents that were already in 4526 * the device tree. However before we have updated the device's 4527 * total_bytes to the new size, we might have had chunk allocations that 4528 * have not complete yet (new block groups attached to transaction 4529 * handles), and therefore their device extents were not yet in the 4530 * device tree and we missed them in the loop above. So if we have any 4531 * pending chunk using a device extent that overlaps the device range 4532 * that we can not use anymore, commit the current transaction and 4533 * repeat the search on the device tree - this way we guarantee we will 4534 * not have chunks using device extents that end beyond 'new_size'. 4535 */ 4536 if (!checked_pending_chunks) { 4537 u64 start = new_size; 4538 u64 len = old_size - new_size; 4539 4540 if (contains_pending_extent(trans->transaction, device, 4541 &start, len)) { 4542 mutex_unlock(&fs_info->chunk_mutex); 4543 checked_pending_chunks = true; 4544 failed = 0; 4545 retried = false; 4546 ret = btrfs_commit_transaction(trans); 4547 if (ret) 4548 goto done; 4549 goto again; 4550 } 4551 } 4552 4553 btrfs_device_set_disk_total_bytes(device, new_size); 4554 if (list_empty(&device->resized_list)) 4555 list_add_tail(&device->resized_list, 4556 &fs_info->fs_devices->resized_devices); 4557 4558 WARN_ON(diff > old_total); 4559 btrfs_set_super_total_bytes(super_copy, 4560 round_down(old_total - diff, fs_info->sectorsize)); 4561 mutex_unlock(&fs_info->chunk_mutex); 4562 4563 /* Now btrfs_update_device() will change the on-disk size. */ 4564 ret = btrfs_update_device(trans, device); 4565 btrfs_end_transaction(trans); 4566 done: 4567 btrfs_free_path(path); 4568 if (ret) { 4569 mutex_lock(&fs_info->chunk_mutex); 4570 btrfs_device_set_total_bytes(device, old_size); 4571 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 4572 device->fs_devices->total_rw_bytes += diff; 4573 atomic64_add(diff, &fs_info->free_chunk_space); 4574 mutex_unlock(&fs_info->chunk_mutex); 4575 } 4576 return ret; 4577 } 4578 4579 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, 4580 struct btrfs_key *key, 4581 struct btrfs_chunk *chunk, int item_size) 4582 { 4583 struct btrfs_super_block *super_copy = fs_info->super_copy; 4584 struct btrfs_disk_key disk_key; 4585 u32 array_size; 4586 u8 *ptr; 4587 4588 mutex_lock(&fs_info->chunk_mutex); 4589 array_size = btrfs_super_sys_array_size(super_copy); 4590 if (array_size + item_size + sizeof(disk_key) 4591 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4592 mutex_unlock(&fs_info->chunk_mutex); 4593 return -EFBIG; 4594 } 4595 4596 ptr = super_copy->sys_chunk_array + array_size; 4597 btrfs_cpu_key_to_disk(&disk_key, key); 4598 memcpy(ptr, &disk_key, sizeof(disk_key)); 4599 ptr += sizeof(disk_key); 4600 memcpy(ptr, chunk, item_size); 4601 item_size += sizeof(disk_key); 4602 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4603 mutex_unlock(&fs_info->chunk_mutex); 4604 4605 return 0; 4606 } 4607 4608 /* 4609 * sort the devices in descending order by max_avail, total_avail 4610 */ 4611 static int btrfs_cmp_device_info(const void *a, const void *b) 4612 { 4613 const struct btrfs_device_info *di_a = a; 4614 const struct btrfs_device_info *di_b = b; 4615 4616 if (di_a->max_avail > di_b->max_avail) 4617 return -1; 4618 if (di_a->max_avail < di_b->max_avail) 4619 return 1; 4620 if (di_a->total_avail > di_b->total_avail) 4621 return -1; 4622 if (di_a->total_avail < di_b->total_avail) 4623 return 1; 4624 return 0; 4625 } 4626 4627 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4628 { 4629 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 4630 return; 4631 4632 btrfs_set_fs_incompat(info, RAID56); 4633 } 4634 4635 #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \ 4636 - sizeof(struct btrfs_chunk)) \ 4637 / sizeof(struct btrfs_stripe) + 1) 4638 4639 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ 4640 - 2 * sizeof(struct btrfs_disk_key) \ 4641 - 2 * sizeof(struct btrfs_chunk)) \ 4642 / sizeof(struct btrfs_stripe) + 1) 4643 4644 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 4645 u64 start, u64 type) 4646 { 4647 struct btrfs_fs_info *info = trans->fs_info; 4648 struct btrfs_fs_devices *fs_devices = info->fs_devices; 4649 struct btrfs_device *device; 4650 struct map_lookup *map = NULL; 4651 struct extent_map_tree *em_tree; 4652 struct extent_map *em; 4653 struct btrfs_device_info *devices_info = NULL; 4654 u64 total_avail; 4655 int num_stripes; /* total number of stripes to allocate */ 4656 int data_stripes; /* number of stripes that count for 4657 block group size */ 4658 int sub_stripes; /* sub_stripes info for map */ 4659 int dev_stripes; /* stripes per dev */ 4660 int devs_max; /* max devs to use */ 4661 int devs_min; /* min devs needed */ 4662 int devs_increment; /* ndevs has to be a multiple of this */ 4663 int ncopies; /* how many copies to data has */ 4664 int ret; 4665 u64 max_stripe_size; 4666 u64 max_chunk_size; 4667 u64 stripe_size; 4668 u64 num_bytes; 4669 int ndevs; 4670 int i; 4671 int j; 4672 int index; 4673 4674 BUG_ON(!alloc_profile_is_valid(type, 0)); 4675 4676 if (list_empty(&fs_devices->alloc_list)) { 4677 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 4678 btrfs_debug(info, "%s: no writable device", __func__); 4679 return -ENOSPC; 4680 } 4681 4682 index = btrfs_bg_flags_to_raid_index(type); 4683 4684 sub_stripes = btrfs_raid_array[index].sub_stripes; 4685 dev_stripes = btrfs_raid_array[index].dev_stripes; 4686 devs_max = btrfs_raid_array[index].devs_max; 4687 devs_min = btrfs_raid_array[index].devs_min; 4688 devs_increment = btrfs_raid_array[index].devs_increment; 4689 ncopies = btrfs_raid_array[index].ncopies; 4690 4691 if (type & BTRFS_BLOCK_GROUP_DATA) { 4692 max_stripe_size = SZ_1G; 4693 max_chunk_size = 10 * max_stripe_size; 4694 if (!devs_max) 4695 devs_max = BTRFS_MAX_DEVS(info); 4696 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 4697 /* for larger filesystems, use larger metadata chunks */ 4698 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 4699 max_stripe_size = SZ_1G; 4700 else 4701 max_stripe_size = SZ_256M; 4702 max_chunk_size = max_stripe_size; 4703 if (!devs_max) 4704 devs_max = BTRFS_MAX_DEVS(info); 4705 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4706 max_stripe_size = SZ_32M; 4707 max_chunk_size = 2 * max_stripe_size; 4708 if (!devs_max) 4709 devs_max = BTRFS_MAX_DEVS_SYS_CHUNK; 4710 } else { 4711 btrfs_err(info, "invalid chunk type 0x%llx requested", 4712 type); 4713 BUG_ON(1); 4714 } 4715 4716 /* we don't want a chunk larger than 10% of writeable space */ 4717 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 4718 max_chunk_size); 4719 4720 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 4721 GFP_NOFS); 4722 if (!devices_info) 4723 return -ENOMEM; 4724 4725 /* 4726 * in the first pass through the devices list, we gather information 4727 * about the available holes on each device. 4728 */ 4729 ndevs = 0; 4730 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 4731 u64 max_avail; 4732 u64 dev_offset; 4733 4734 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { 4735 WARN(1, KERN_ERR 4736 "BTRFS: read-only device in alloc_list\n"); 4737 continue; 4738 } 4739 4740 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 4741 &device->dev_state) || 4742 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 4743 continue; 4744 4745 if (device->total_bytes > device->bytes_used) 4746 total_avail = device->total_bytes - device->bytes_used; 4747 else 4748 total_avail = 0; 4749 4750 /* If there is no space on this device, skip it. */ 4751 if (total_avail == 0) 4752 continue; 4753 4754 ret = find_free_dev_extent(trans, device, 4755 max_stripe_size * dev_stripes, 4756 &dev_offset, &max_avail); 4757 if (ret && ret != -ENOSPC) 4758 goto error; 4759 4760 if (ret == 0) 4761 max_avail = max_stripe_size * dev_stripes; 4762 4763 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) { 4764 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 4765 btrfs_debug(info, 4766 "%s: devid %llu has no free space, have=%llu want=%u", 4767 __func__, device->devid, max_avail, 4768 BTRFS_STRIPE_LEN * dev_stripes); 4769 continue; 4770 } 4771 4772 if (ndevs == fs_devices->rw_devices) { 4773 WARN(1, "%s: found more than %llu devices\n", 4774 __func__, fs_devices->rw_devices); 4775 break; 4776 } 4777 devices_info[ndevs].dev_offset = dev_offset; 4778 devices_info[ndevs].max_avail = max_avail; 4779 devices_info[ndevs].total_avail = total_avail; 4780 devices_info[ndevs].dev = device; 4781 ++ndevs; 4782 } 4783 4784 /* 4785 * now sort the devices by hole size / available space 4786 */ 4787 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 4788 btrfs_cmp_device_info, NULL); 4789 4790 /* round down to number of usable stripes */ 4791 ndevs = round_down(ndevs, devs_increment); 4792 4793 if (ndevs < devs_min) { 4794 ret = -ENOSPC; 4795 if (btrfs_test_opt(info, ENOSPC_DEBUG)) { 4796 btrfs_debug(info, 4797 "%s: not enough devices with free space: have=%d minimum required=%d", 4798 __func__, ndevs, devs_min); 4799 } 4800 goto error; 4801 } 4802 4803 ndevs = min(ndevs, devs_max); 4804 4805 /* 4806 * The primary goal is to maximize the number of stripes, so use as 4807 * many devices as possible, even if the stripes are not maximum sized. 4808 * 4809 * The DUP profile stores more than one stripe per device, the 4810 * max_avail is the total size so we have to adjust. 4811 */ 4812 stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes); 4813 num_stripes = ndevs * dev_stripes; 4814 4815 /* 4816 * this will have to be fixed for RAID1 and RAID10 over 4817 * more drives 4818 */ 4819 data_stripes = num_stripes / ncopies; 4820 4821 if (type & BTRFS_BLOCK_GROUP_RAID5) 4822 data_stripes = num_stripes - 1; 4823 4824 if (type & BTRFS_BLOCK_GROUP_RAID6) 4825 data_stripes = num_stripes - 2; 4826 4827 /* 4828 * Use the number of data stripes to figure out how big this chunk 4829 * is really going to be in terms of logical address space, 4830 * and compare that answer with the max chunk size 4831 */ 4832 if (stripe_size * data_stripes > max_chunk_size) { 4833 stripe_size = div_u64(max_chunk_size, data_stripes); 4834 4835 /* bump the answer up to a 16MB boundary */ 4836 stripe_size = round_up(stripe_size, SZ_16M); 4837 4838 /* 4839 * But don't go higher than the limits we found while searching 4840 * for free extents 4841 */ 4842 stripe_size = min(devices_info[ndevs - 1].max_avail, 4843 stripe_size); 4844 } 4845 4846 /* align to BTRFS_STRIPE_LEN */ 4847 stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN); 4848 4849 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 4850 if (!map) { 4851 ret = -ENOMEM; 4852 goto error; 4853 } 4854 map->num_stripes = num_stripes; 4855 4856 for (i = 0; i < ndevs; ++i) { 4857 for (j = 0; j < dev_stripes; ++j) { 4858 int s = i * dev_stripes + j; 4859 map->stripes[s].dev = devices_info[i].dev; 4860 map->stripes[s].physical = devices_info[i].dev_offset + 4861 j * stripe_size; 4862 } 4863 } 4864 map->stripe_len = BTRFS_STRIPE_LEN; 4865 map->io_align = BTRFS_STRIPE_LEN; 4866 map->io_width = BTRFS_STRIPE_LEN; 4867 map->type = type; 4868 map->sub_stripes = sub_stripes; 4869 4870 num_bytes = stripe_size * data_stripes; 4871 4872 trace_btrfs_chunk_alloc(info, map, start, num_bytes); 4873 4874 em = alloc_extent_map(); 4875 if (!em) { 4876 kfree(map); 4877 ret = -ENOMEM; 4878 goto error; 4879 } 4880 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 4881 em->map_lookup = map; 4882 em->start = start; 4883 em->len = num_bytes; 4884 em->block_start = 0; 4885 em->block_len = em->len; 4886 em->orig_block_len = stripe_size; 4887 4888 em_tree = &info->mapping_tree.map_tree; 4889 write_lock(&em_tree->lock); 4890 ret = add_extent_mapping(em_tree, em, 0); 4891 if (ret) { 4892 write_unlock(&em_tree->lock); 4893 free_extent_map(em); 4894 goto error; 4895 } 4896 4897 list_add_tail(&em->list, &trans->transaction->pending_chunks); 4898 refcount_inc(&em->refs); 4899 write_unlock(&em_tree->lock); 4900 4901 ret = btrfs_make_block_group(trans, info, 0, type, start, num_bytes); 4902 if (ret) 4903 goto error_del_extent; 4904 4905 for (i = 0; i < map->num_stripes; i++) { 4906 num_bytes = map->stripes[i].dev->bytes_used + stripe_size; 4907 btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes); 4908 } 4909 4910 atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space); 4911 4912 free_extent_map(em); 4913 check_raid56_incompat_flag(info, type); 4914 4915 kfree(devices_info); 4916 return 0; 4917 4918 error_del_extent: 4919 write_lock(&em_tree->lock); 4920 remove_extent_mapping(em_tree, em); 4921 write_unlock(&em_tree->lock); 4922 4923 /* One for our allocation */ 4924 free_extent_map(em); 4925 /* One for the tree reference */ 4926 free_extent_map(em); 4927 /* One for the pending_chunks list reference */ 4928 free_extent_map(em); 4929 error: 4930 kfree(devices_info); 4931 return ret; 4932 } 4933 4934 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, 4935 struct btrfs_fs_info *fs_info, 4936 u64 chunk_offset, u64 chunk_size) 4937 { 4938 struct btrfs_root *extent_root = fs_info->extent_root; 4939 struct btrfs_root *chunk_root = fs_info->chunk_root; 4940 struct btrfs_key key; 4941 struct btrfs_device *device; 4942 struct btrfs_chunk *chunk; 4943 struct btrfs_stripe *stripe; 4944 struct extent_map *em; 4945 struct map_lookup *map; 4946 size_t item_size; 4947 u64 dev_offset; 4948 u64 stripe_size; 4949 int i = 0; 4950 int ret = 0; 4951 4952 em = get_chunk_map(fs_info, chunk_offset, chunk_size); 4953 if (IS_ERR(em)) 4954 return PTR_ERR(em); 4955 4956 map = em->map_lookup; 4957 item_size = btrfs_chunk_item_size(map->num_stripes); 4958 stripe_size = em->orig_block_len; 4959 4960 chunk = kzalloc(item_size, GFP_NOFS); 4961 if (!chunk) { 4962 ret = -ENOMEM; 4963 goto out; 4964 } 4965 4966 /* 4967 * Take the device list mutex to prevent races with the final phase of 4968 * a device replace operation that replaces the device object associated 4969 * with the map's stripes, because the device object's id can change 4970 * at any time during that final phase of the device replace operation 4971 * (dev-replace.c:btrfs_dev_replace_finishing()). 4972 */ 4973 mutex_lock(&fs_info->fs_devices->device_list_mutex); 4974 for (i = 0; i < map->num_stripes; i++) { 4975 device = map->stripes[i].dev; 4976 dev_offset = map->stripes[i].physical; 4977 4978 ret = btrfs_update_device(trans, device); 4979 if (ret) 4980 break; 4981 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset, 4982 dev_offset, stripe_size); 4983 if (ret) 4984 break; 4985 } 4986 if (ret) { 4987 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4988 goto out; 4989 } 4990 4991 stripe = &chunk->stripe; 4992 for (i = 0; i < map->num_stripes; i++) { 4993 device = map->stripes[i].dev; 4994 dev_offset = map->stripes[i].physical; 4995 4996 btrfs_set_stack_stripe_devid(stripe, device->devid); 4997 btrfs_set_stack_stripe_offset(stripe, dev_offset); 4998 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 4999 stripe++; 5000 } 5001 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 5002 5003 btrfs_set_stack_chunk_length(chunk, chunk_size); 5004 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 5005 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 5006 btrfs_set_stack_chunk_type(chunk, map->type); 5007 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 5008 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 5009 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 5010 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); 5011 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 5012 5013 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 5014 key.type = BTRFS_CHUNK_ITEM_KEY; 5015 key.offset = chunk_offset; 5016 5017 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 5018 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 5019 /* 5020 * TODO: Cleanup of inserted chunk root in case of 5021 * failure. 5022 */ 5023 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); 5024 } 5025 5026 out: 5027 kfree(chunk); 5028 free_extent_map(em); 5029 return ret; 5030 } 5031 5032 /* 5033 * Chunk allocation falls into two parts. The first part does works 5034 * that make the new allocated chunk useable, but not do any operation 5035 * that modifies the chunk tree. The second part does the works that 5036 * require modifying the chunk tree. This division is important for the 5037 * bootstrap process of adding storage to a seed btrfs. 5038 */ 5039 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 5040 struct btrfs_fs_info *fs_info, u64 type) 5041 { 5042 u64 chunk_offset; 5043 5044 lockdep_assert_held(&fs_info->chunk_mutex); 5045 chunk_offset = find_next_chunk(fs_info); 5046 return __btrfs_alloc_chunk(trans, chunk_offset, type); 5047 } 5048 5049 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, 5050 struct btrfs_fs_info *fs_info) 5051 { 5052 u64 chunk_offset; 5053 u64 sys_chunk_offset; 5054 u64 alloc_profile; 5055 int ret; 5056 5057 chunk_offset = find_next_chunk(fs_info); 5058 alloc_profile = btrfs_metadata_alloc_profile(fs_info); 5059 ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile); 5060 if (ret) 5061 return ret; 5062 5063 sys_chunk_offset = find_next_chunk(fs_info); 5064 alloc_profile = btrfs_system_alloc_profile(fs_info); 5065 ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile); 5066 return ret; 5067 } 5068 5069 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 5070 { 5071 int max_errors; 5072 5073 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 5074 BTRFS_BLOCK_GROUP_RAID10 | 5075 BTRFS_BLOCK_GROUP_RAID5 | 5076 BTRFS_BLOCK_GROUP_DUP)) { 5077 max_errors = 1; 5078 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) { 5079 max_errors = 2; 5080 } else { 5081 max_errors = 0; 5082 } 5083 5084 return max_errors; 5085 } 5086 5087 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset) 5088 { 5089 struct extent_map *em; 5090 struct map_lookup *map; 5091 int readonly = 0; 5092 int miss_ndevs = 0; 5093 int i; 5094 5095 em = get_chunk_map(fs_info, chunk_offset, 1); 5096 if (IS_ERR(em)) 5097 return 1; 5098 5099 map = em->map_lookup; 5100 for (i = 0; i < map->num_stripes; i++) { 5101 if (test_bit(BTRFS_DEV_STATE_MISSING, 5102 &map->stripes[i].dev->dev_state)) { 5103 miss_ndevs++; 5104 continue; 5105 } 5106 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 5107 &map->stripes[i].dev->dev_state)) { 5108 readonly = 1; 5109 goto end; 5110 } 5111 } 5112 5113 /* 5114 * If the number of missing devices is larger than max errors, 5115 * we can not write the data into that chunk successfully, so 5116 * set it readonly. 5117 */ 5118 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5119 readonly = 1; 5120 end: 5121 free_extent_map(em); 5122 return readonly; 5123 } 5124 5125 void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 5126 { 5127 extent_map_tree_init(&tree->map_tree); 5128 } 5129 5130 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 5131 { 5132 struct extent_map *em; 5133 5134 while (1) { 5135 write_lock(&tree->map_tree.lock); 5136 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 5137 if (em) 5138 remove_extent_mapping(&tree->map_tree, em); 5139 write_unlock(&tree->map_tree.lock); 5140 if (!em) 5141 break; 5142 /* once for us */ 5143 free_extent_map(em); 5144 /* once for the tree */ 5145 free_extent_map(em); 5146 } 5147 } 5148 5149 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5150 { 5151 struct extent_map *em; 5152 struct map_lookup *map; 5153 int ret; 5154 5155 em = get_chunk_map(fs_info, logical, len); 5156 if (IS_ERR(em)) 5157 /* 5158 * We could return errors for these cases, but that could get 5159 * ugly and we'd probably do the same thing which is just not do 5160 * anything else and exit, so return 1 so the callers don't try 5161 * to use other copies. 5162 */ 5163 return 1; 5164 5165 map = em->map_lookup; 5166 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) 5167 ret = map->num_stripes; 5168 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5169 ret = map->sub_stripes; 5170 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5171 ret = 2; 5172 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5173 /* 5174 * There could be two corrupted data stripes, we need 5175 * to loop retry in order to rebuild the correct data. 5176 * 5177 * Fail a stripe at a time on every retry except the 5178 * stripe under reconstruction. 5179 */ 5180 ret = map->num_stripes; 5181 else 5182 ret = 1; 5183 free_extent_map(em); 5184 5185 btrfs_dev_replace_read_lock(&fs_info->dev_replace); 5186 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && 5187 fs_info->dev_replace.tgtdev) 5188 ret++; 5189 btrfs_dev_replace_read_unlock(&fs_info->dev_replace); 5190 5191 return ret; 5192 } 5193 5194 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 5195 u64 logical) 5196 { 5197 struct extent_map *em; 5198 struct map_lookup *map; 5199 unsigned long len = fs_info->sectorsize; 5200 5201 em = get_chunk_map(fs_info, logical, len); 5202 5203 if (!WARN_ON(IS_ERR(em))) { 5204 map = em->map_lookup; 5205 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5206 len = map->stripe_len * nr_data_stripes(map); 5207 free_extent_map(em); 5208 } 5209 return len; 5210 } 5211 5212 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5213 { 5214 struct extent_map *em; 5215 struct map_lookup *map; 5216 int ret = 0; 5217 5218 em = get_chunk_map(fs_info, logical, len); 5219 5220 if(!WARN_ON(IS_ERR(em))) { 5221 map = em->map_lookup; 5222 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5223 ret = 1; 5224 free_extent_map(em); 5225 } 5226 return ret; 5227 } 5228 5229 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5230 struct map_lookup *map, int first, 5231 int dev_replace_is_ongoing) 5232 { 5233 int i; 5234 int num_stripes; 5235 int preferred_mirror; 5236 int tolerance; 5237 struct btrfs_device *srcdev; 5238 5239 ASSERT((map->type & 5240 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))); 5241 5242 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5243 num_stripes = map->sub_stripes; 5244 else 5245 num_stripes = map->num_stripes; 5246 5247 preferred_mirror = first + current->pid % num_stripes; 5248 5249 if (dev_replace_is_ongoing && 5250 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5251 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5252 srcdev = fs_info->dev_replace.srcdev; 5253 else 5254 srcdev = NULL; 5255 5256 /* 5257 * try to avoid the drive that is the source drive for a 5258 * dev-replace procedure, only choose it if no other non-missing 5259 * mirror is available 5260 */ 5261 for (tolerance = 0; tolerance < 2; tolerance++) { 5262 if (map->stripes[preferred_mirror].dev->bdev && 5263 (tolerance || map->stripes[preferred_mirror].dev != srcdev)) 5264 return preferred_mirror; 5265 for (i = first; i < first + num_stripes; i++) { 5266 if (map->stripes[i].dev->bdev && 5267 (tolerance || map->stripes[i].dev != srcdev)) 5268 return i; 5269 } 5270 } 5271 5272 /* we couldn't find one that doesn't fail. Just return something 5273 * and the io error handling code will clean up eventually 5274 */ 5275 return preferred_mirror; 5276 } 5277 5278 static inline int parity_smaller(u64 a, u64 b) 5279 { 5280 return a > b; 5281 } 5282 5283 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5284 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) 5285 { 5286 struct btrfs_bio_stripe s; 5287 int i; 5288 u64 l; 5289 int again = 1; 5290 5291 while (again) { 5292 again = 0; 5293 for (i = 0; i < num_stripes - 1; i++) { 5294 if (parity_smaller(bbio->raid_map[i], 5295 bbio->raid_map[i+1])) { 5296 s = bbio->stripes[i]; 5297 l = bbio->raid_map[i]; 5298 bbio->stripes[i] = bbio->stripes[i+1]; 5299 bbio->raid_map[i] = bbio->raid_map[i+1]; 5300 bbio->stripes[i+1] = s; 5301 bbio->raid_map[i+1] = l; 5302 5303 again = 1; 5304 } 5305 } 5306 } 5307 } 5308 5309 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) 5310 { 5311 struct btrfs_bio *bbio = kzalloc( 5312 /* the size of the btrfs_bio */ 5313 sizeof(struct btrfs_bio) + 5314 /* plus the variable array for the stripes */ 5315 sizeof(struct btrfs_bio_stripe) * (total_stripes) + 5316 /* plus the variable array for the tgt dev */ 5317 sizeof(int) * (real_stripes) + 5318 /* 5319 * plus the raid_map, which includes both the tgt dev 5320 * and the stripes 5321 */ 5322 sizeof(u64) * (total_stripes), 5323 GFP_NOFS|__GFP_NOFAIL); 5324 5325 atomic_set(&bbio->error, 0); 5326 refcount_set(&bbio->refs, 1); 5327 5328 return bbio; 5329 } 5330 5331 void btrfs_get_bbio(struct btrfs_bio *bbio) 5332 { 5333 WARN_ON(!refcount_read(&bbio->refs)); 5334 refcount_inc(&bbio->refs); 5335 } 5336 5337 void btrfs_put_bbio(struct btrfs_bio *bbio) 5338 { 5339 if (!bbio) 5340 return; 5341 if (refcount_dec_and_test(&bbio->refs)) 5342 kfree(bbio); 5343 } 5344 5345 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ 5346 /* 5347 * Please note that, discard won't be sent to target device of device 5348 * replace. 5349 */ 5350 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, 5351 u64 logical, u64 length, 5352 struct btrfs_bio **bbio_ret) 5353 { 5354 struct extent_map *em; 5355 struct map_lookup *map; 5356 struct btrfs_bio *bbio; 5357 u64 offset; 5358 u64 stripe_nr; 5359 u64 stripe_nr_end; 5360 u64 stripe_end_offset; 5361 u64 stripe_cnt; 5362 u64 stripe_len; 5363 u64 stripe_offset; 5364 u64 num_stripes; 5365 u32 stripe_index; 5366 u32 factor = 0; 5367 u32 sub_stripes = 0; 5368 u64 stripes_per_dev = 0; 5369 u32 remaining_stripes = 0; 5370 u32 last_stripe = 0; 5371 int ret = 0; 5372 int i; 5373 5374 /* discard always return a bbio */ 5375 ASSERT(bbio_ret); 5376 5377 em = get_chunk_map(fs_info, logical, length); 5378 if (IS_ERR(em)) 5379 return PTR_ERR(em); 5380 5381 map = em->map_lookup; 5382 /* we don't discard raid56 yet */ 5383 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5384 ret = -EOPNOTSUPP; 5385 goto out; 5386 } 5387 5388 offset = logical - em->start; 5389 length = min_t(u64, em->len - offset, length); 5390 5391 stripe_len = map->stripe_len; 5392 /* 5393 * stripe_nr counts the total number of stripes we have to stride 5394 * to get to this block 5395 */ 5396 stripe_nr = div64_u64(offset, stripe_len); 5397 5398 /* stripe_offset is the offset of this block in its stripe */ 5399 stripe_offset = offset - stripe_nr * stripe_len; 5400 5401 stripe_nr_end = round_up(offset + length, map->stripe_len); 5402 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); 5403 stripe_cnt = stripe_nr_end - stripe_nr; 5404 stripe_end_offset = stripe_nr_end * map->stripe_len - 5405 (offset + length); 5406 /* 5407 * after this, stripe_nr is the number of stripes on this 5408 * device we have to walk to find the data, and stripe_index is 5409 * the number of our device in the stripe array 5410 */ 5411 num_stripes = 1; 5412 stripe_index = 0; 5413 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5414 BTRFS_BLOCK_GROUP_RAID10)) { 5415 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5416 sub_stripes = 1; 5417 else 5418 sub_stripes = map->sub_stripes; 5419 5420 factor = map->num_stripes / sub_stripes; 5421 num_stripes = min_t(u64, map->num_stripes, 5422 sub_stripes * stripe_cnt); 5423 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5424 stripe_index *= sub_stripes; 5425 stripes_per_dev = div_u64_rem(stripe_cnt, factor, 5426 &remaining_stripes); 5427 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5428 last_stripe *= sub_stripes; 5429 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 5430 BTRFS_BLOCK_GROUP_DUP)) { 5431 num_stripes = map->num_stripes; 5432 } else { 5433 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5434 &stripe_index); 5435 } 5436 5437 bbio = alloc_btrfs_bio(num_stripes, 0); 5438 if (!bbio) { 5439 ret = -ENOMEM; 5440 goto out; 5441 } 5442 5443 for (i = 0; i < num_stripes; i++) { 5444 bbio->stripes[i].physical = 5445 map->stripes[stripe_index].physical + 5446 stripe_offset + stripe_nr * map->stripe_len; 5447 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 5448 5449 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5450 BTRFS_BLOCK_GROUP_RAID10)) { 5451 bbio->stripes[i].length = stripes_per_dev * 5452 map->stripe_len; 5453 5454 if (i / sub_stripes < remaining_stripes) 5455 bbio->stripes[i].length += 5456 map->stripe_len; 5457 5458 /* 5459 * Special for the first stripe and 5460 * the last stripe: 5461 * 5462 * |-------|...|-------| 5463 * |----------| 5464 * off end_off 5465 */ 5466 if (i < sub_stripes) 5467 bbio->stripes[i].length -= 5468 stripe_offset; 5469 5470 if (stripe_index >= last_stripe && 5471 stripe_index <= (last_stripe + 5472 sub_stripes - 1)) 5473 bbio->stripes[i].length -= 5474 stripe_end_offset; 5475 5476 if (i == sub_stripes - 1) 5477 stripe_offset = 0; 5478 } else { 5479 bbio->stripes[i].length = length; 5480 } 5481 5482 stripe_index++; 5483 if (stripe_index == map->num_stripes) { 5484 stripe_index = 0; 5485 stripe_nr++; 5486 } 5487 } 5488 5489 *bbio_ret = bbio; 5490 bbio->map_type = map->type; 5491 bbio->num_stripes = num_stripes; 5492 out: 5493 free_extent_map(em); 5494 return ret; 5495 } 5496 5497 /* 5498 * In dev-replace case, for repair case (that's the only case where the mirror 5499 * is selected explicitly when calling btrfs_map_block), blocks left of the 5500 * left cursor can also be read from the target drive. 5501 * 5502 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the 5503 * array of stripes. 5504 * For READ, it also needs to be supported using the same mirror number. 5505 * 5506 * If the requested block is not left of the left cursor, EIO is returned. This 5507 * can happen because btrfs_num_copies() returns one more in the dev-replace 5508 * case. 5509 */ 5510 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, 5511 u64 logical, u64 length, 5512 u64 srcdev_devid, int *mirror_num, 5513 u64 *physical) 5514 { 5515 struct btrfs_bio *bbio = NULL; 5516 int num_stripes; 5517 int index_srcdev = 0; 5518 int found = 0; 5519 u64 physical_of_found = 0; 5520 int i; 5521 int ret = 0; 5522 5523 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 5524 logical, &length, &bbio, 0, 0); 5525 if (ret) { 5526 ASSERT(bbio == NULL); 5527 return ret; 5528 } 5529 5530 num_stripes = bbio->num_stripes; 5531 if (*mirror_num > num_stripes) { 5532 /* 5533 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, 5534 * that means that the requested area is not left of the left 5535 * cursor 5536 */ 5537 btrfs_put_bbio(bbio); 5538 return -EIO; 5539 } 5540 5541 /* 5542 * process the rest of the function using the mirror_num of the source 5543 * drive. Therefore look it up first. At the end, patch the device 5544 * pointer to the one of the target drive. 5545 */ 5546 for (i = 0; i < num_stripes; i++) { 5547 if (bbio->stripes[i].dev->devid != srcdev_devid) 5548 continue; 5549 5550 /* 5551 * In case of DUP, in order to keep it simple, only add the 5552 * mirror with the lowest physical address 5553 */ 5554 if (found && 5555 physical_of_found <= bbio->stripes[i].physical) 5556 continue; 5557 5558 index_srcdev = i; 5559 found = 1; 5560 physical_of_found = bbio->stripes[i].physical; 5561 } 5562 5563 btrfs_put_bbio(bbio); 5564 5565 ASSERT(found); 5566 if (!found) 5567 return -EIO; 5568 5569 *mirror_num = index_srcdev + 1; 5570 *physical = physical_of_found; 5571 return ret; 5572 } 5573 5574 static void handle_ops_on_dev_replace(enum btrfs_map_op op, 5575 struct btrfs_bio **bbio_ret, 5576 struct btrfs_dev_replace *dev_replace, 5577 int *num_stripes_ret, int *max_errors_ret) 5578 { 5579 struct btrfs_bio *bbio = *bbio_ret; 5580 u64 srcdev_devid = dev_replace->srcdev->devid; 5581 int tgtdev_indexes = 0; 5582 int num_stripes = *num_stripes_ret; 5583 int max_errors = *max_errors_ret; 5584 int i; 5585 5586 if (op == BTRFS_MAP_WRITE) { 5587 int index_where_to_add; 5588 5589 /* 5590 * duplicate the write operations while the dev replace 5591 * procedure is running. Since the copying of the old disk to 5592 * the new disk takes place at run time while the filesystem is 5593 * mounted writable, the regular write operations to the old 5594 * disk have to be duplicated to go to the new disk as well. 5595 * 5596 * Note that device->missing is handled by the caller, and that 5597 * the write to the old disk is already set up in the stripes 5598 * array. 5599 */ 5600 index_where_to_add = num_stripes; 5601 for (i = 0; i < num_stripes; i++) { 5602 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5603 /* write to new disk, too */ 5604 struct btrfs_bio_stripe *new = 5605 bbio->stripes + index_where_to_add; 5606 struct btrfs_bio_stripe *old = 5607 bbio->stripes + i; 5608 5609 new->physical = old->physical; 5610 new->length = old->length; 5611 new->dev = dev_replace->tgtdev; 5612 bbio->tgtdev_map[i] = index_where_to_add; 5613 index_where_to_add++; 5614 max_errors++; 5615 tgtdev_indexes++; 5616 } 5617 } 5618 num_stripes = index_where_to_add; 5619 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { 5620 int index_srcdev = 0; 5621 int found = 0; 5622 u64 physical_of_found = 0; 5623 5624 /* 5625 * During the dev-replace procedure, the target drive can also 5626 * be used to read data in case it is needed to repair a corrupt 5627 * block elsewhere. This is possible if the requested area is 5628 * left of the left cursor. In this area, the target drive is a 5629 * full copy of the source drive. 5630 */ 5631 for (i = 0; i < num_stripes; i++) { 5632 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5633 /* 5634 * In case of DUP, in order to keep it simple, 5635 * only add the mirror with the lowest physical 5636 * address 5637 */ 5638 if (found && 5639 physical_of_found <= 5640 bbio->stripes[i].physical) 5641 continue; 5642 index_srcdev = i; 5643 found = 1; 5644 physical_of_found = bbio->stripes[i].physical; 5645 } 5646 } 5647 if (found) { 5648 struct btrfs_bio_stripe *tgtdev_stripe = 5649 bbio->stripes + num_stripes; 5650 5651 tgtdev_stripe->physical = physical_of_found; 5652 tgtdev_stripe->length = 5653 bbio->stripes[index_srcdev].length; 5654 tgtdev_stripe->dev = dev_replace->tgtdev; 5655 bbio->tgtdev_map[index_srcdev] = num_stripes; 5656 5657 tgtdev_indexes++; 5658 num_stripes++; 5659 } 5660 } 5661 5662 *num_stripes_ret = num_stripes; 5663 *max_errors_ret = max_errors; 5664 bbio->num_tgtdevs = tgtdev_indexes; 5665 *bbio_ret = bbio; 5666 } 5667 5668 static bool need_full_stripe(enum btrfs_map_op op) 5669 { 5670 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); 5671 } 5672 5673 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 5674 enum btrfs_map_op op, 5675 u64 logical, u64 *length, 5676 struct btrfs_bio **bbio_ret, 5677 int mirror_num, int need_raid_map) 5678 { 5679 struct extent_map *em; 5680 struct map_lookup *map; 5681 u64 offset; 5682 u64 stripe_offset; 5683 u64 stripe_nr; 5684 u64 stripe_len; 5685 u32 stripe_index; 5686 int i; 5687 int ret = 0; 5688 int num_stripes; 5689 int max_errors = 0; 5690 int tgtdev_indexes = 0; 5691 struct btrfs_bio *bbio = NULL; 5692 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 5693 int dev_replace_is_ongoing = 0; 5694 int num_alloc_stripes; 5695 int patch_the_first_stripe_for_dev_replace = 0; 5696 u64 physical_to_patch_in_first_stripe = 0; 5697 u64 raid56_full_stripe_start = (u64)-1; 5698 5699 if (op == BTRFS_MAP_DISCARD) 5700 return __btrfs_map_block_for_discard(fs_info, logical, 5701 *length, bbio_ret); 5702 5703 em = get_chunk_map(fs_info, logical, *length); 5704 if (IS_ERR(em)) 5705 return PTR_ERR(em); 5706 5707 map = em->map_lookup; 5708 offset = logical - em->start; 5709 5710 stripe_len = map->stripe_len; 5711 stripe_nr = offset; 5712 /* 5713 * stripe_nr counts the total number of stripes we have to stride 5714 * to get to this block 5715 */ 5716 stripe_nr = div64_u64(stripe_nr, stripe_len); 5717 5718 stripe_offset = stripe_nr * stripe_len; 5719 if (offset < stripe_offset) { 5720 btrfs_crit(fs_info, 5721 "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu", 5722 stripe_offset, offset, em->start, logical, 5723 stripe_len); 5724 free_extent_map(em); 5725 return -EINVAL; 5726 } 5727 5728 /* stripe_offset is the offset of this block in its stripe*/ 5729 stripe_offset = offset - stripe_offset; 5730 5731 /* if we're here for raid56, we need to know the stripe aligned start */ 5732 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5733 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); 5734 raid56_full_stripe_start = offset; 5735 5736 /* allow a write of a full stripe, but make sure we don't 5737 * allow straddling of stripes 5738 */ 5739 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 5740 full_stripe_len); 5741 raid56_full_stripe_start *= full_stripe_len; 5742 } 5743 5744 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 5745 u64 max_len; 5746 /* For writes to RAID[56], allow a full stripeset across all disks. 5747 For other RAID types and for RAID[56] reads, just allow a single 5748 stripe (on a single disk). */ 5749 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 5750 (op == BTRFS_MAP_WRITE)) { 5751 max_len = stripe_len * nr_data_stripes(map) - 5752 (offset - raid56_full_stripe_start); 5753 } else { 5754 /* we limit the length of each bio to what fits in a stripe */ 5755 max_len = stripe_len - stripe_offset; 5756 } 5757 *length = min_t(u64, em->len - offset, max_len); 5758 } else { 5759 *length = em->len - offset; 5760 } 5761 5762 /* This is for when we're called from btrfs_merge_bio_hook() and all 5763 it cares about is the length */ 5764 if (!bbio_ret) 5765 goto out; 5766 5767 btrfs_dev_replace_read_lock(dev_replace); 5768 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 5769 if (!dev_replace_is_ongoing) 5770 btrfs_dev_replace_read_unlock(dev_replace); 5771 else 5772 btrfs_dev_replace_set_lock_blocking(dev_replace); 5773 5774 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 5775 !need_full_stripe(op) && dev_replace->tgtdev != NULL) { 5776 ret = get_extra_mirror_from_replace(fs_info, logical, *length, 5777 dev_replace->srcdev->devid, 5778 &mirror_num, 5779 &physical_to_patch_in_first_stripe); 5780 if (ret) 5781 goto out; 5782 else 5783 patch_the_first_stripe_for_dev_replace = 1; 5784 } else if (mirror_num > map->num_stripes) { 5785 mirror_num = 0; 5786 } 5787 5788 num_stripes = 1; 5789 stripe_index = 0; 5790 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 5791 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5792 &stripe_index); 5793 if (!need_full_stripe(op)) 5794 mirror_num = 1; 5795 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 5796 if (need_full_stripe(op)) 5797 num_stripes = map->num_stripes; 5798 else if (mirror_num) 5799 stripe_index = mirror_num - 1; 5800 else { 5801 stripe_index = find_live_mirror(fs_info, map, 0, 5802 dev_replace_is_ongoing); 5803 mirror_num = stripe_index + 1; 5804 } 5805 5806 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 5807 if (need_full_stripe(op)) { 5808 num_stripes = map->num_stripes; 5809 } else if (mirror_num) { 5810 stripe_index = mirror_num - 1; 5811 } else { 5812 mirror_num = 1; 5813 } 5814 5815 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 5816 u32 factor = map->num_stripes / map->sub_stripes; 5817 5818 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5819 stripe_index *= map->sub_stripes; 5820 5821 if (need_full_stripe(op)) 5822 num_stripes = map->sub_stripes; 5823 else if (mirror_num) 5824 stripe_index += mirror_num - 1; 5825 else { 5826 int old_stripe_index = stripe_index; 5827 stripe_index = find_live_mirror(fs_info, map, 5828 stripe_index, 5829 dev_replace_is_ongoing); 5830 mirror_num = stripe_index - old_stripe_index + 1; 5831 } 5832 5833 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5834 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { 5835 /* push stripe_nr back to the start of the full stripe */ 5836 stripe_nr = div64_u64(raid56_full_stripe_start, 5837 stripe_len * nr_data_stripes(map)); 5838 5839 /* RAID[56] write or recovery. Return all stripes */ 5840 num_stripes = map->num_stripes; 5841 max_errors = nr_parity_stripes(map); 5842 5843 *length = map->stripe_len; 5844 stripe_index = 0; 5845 stripe_offset = 0; 5846 } else { 5847 /* 5848 * Mirror #0 or #1 means the original data block. 5849 * Mirror #2 is RAID5 parity block. 5850 * Mirror #3 is RAID6 Q block. 5851 */ 5852 stripe_nr = div_u64_rem(stripe_nr, 5853 nr_data_stripes(map), &stripe_index); 5854 if (mirror_num > 1) 5855 stripe_index = nr_data_stripes(map) + 5856 mirror_num - 2; 5857 5858 /* We distribute the parity blocks across stripes */ 5859 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 5860 &stripe_index); 5861 if (!need_full_stripe(op) && mirror_num <= 1) 5862 mirror_num = 1; 5863 } 5864 } else { 5865 /* 5866 * after this, stripe_nr is the number of stripes on this 5867 * device we have to walk to find the data, and stripe_index is 5868 * the number of our device in the stripe array 5869 */ 5870 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5871 &stripe_index); 5872 mirror_num = stripe_index + 1; 5873 } 5874 if (stripe_index >= map->num_stripes) { 5875 btrfs_crit(fs_info, 5876 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", 5877 stripe_index, map->num_stripes); 5878 ret = -EINVAL; 5879 goto out; 5880 } 5881 5882 num_alloc_stripes = num_stripes; 5883 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { 5884 if (op == BTRFS_MAP_WRITE) 5885 num_alloc_stripes <<= 1; 5886 if (op == BTRFS_MAP_GET_READ_MIRRORS) 5887 num_alloc_stripes++; 5888 tgtdev_indexes = num_stripes; 5889 } 5890 5891 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); 5892 if (!bbio) { 5893 ret = -ENOMEM; 5894 goto out; 5895 } 5896 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) 5897 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); 5898 5899 /* build raid_map */ 5900 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && 5901 (need_full_stripe(op) || mirror_num > 1)) { 5902 u64 tmp; 5903 unsigned rot; 5904 5905 bbio->raid_map = (u64 *)((void *)bbio->stripes + 5906 sizeof(struct btrfs_bio_stripe) * 5907 num_alloc_stripes + 5908 sizeof(int) * tgtdev_indexes); 5909 5910 /* Work out the disk rotation on this stripe-set */ 5911 div_u64_rem(stripe_nr, num_stripes, &rot); 5912 5913 /* Fill in the logical address of each stripe */ 5914 tmp = stripe_nr * nr_data_stripes(map); 5915 for (i = 0; i < nr_data_stripes(map); i++) 5916 bbio->raid_map[(i+rot) % num_stripes] = 5917 em->start + (tmp + i) * map->stripe_len; 5918 5919 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; 5920 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5921 bbio->raid_map[(i+rot+1) % num_stripes] = 5922 RAID6_Q_STRIPE; 5923 } 5924 5925 5926 for (i = 0; i < num_stripes; i++) { 5927 bbio->stripes[i].physical = 5928 map->stripes[stripe_index].physical + 5929 stripe_offset + 5930 stripe_nr * map->stripe_len; 5931 bbio->stripes[i].dev = 5932 map->stripes[stripe_index].dev; 5933 stripe_index++; 5934 } 5935 5936 if (need_full_stripe(op)) 5937 max_errors = btrfs_chunk_max_errors(map); 5938 5939 if (bbio->raid_map) 5940 sort_parity_stripes(bbio, num_stripes); 5941 5942 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && 5943 need_full_stripe(op)) { 5944 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes, 5945 &max_errors); 5946 } 5947 5948 *bbio_ret = bbio; 5949 bbio->map_type = map->type; 5950 bbio->num_stripes = num_stripes; 5951 bbio->max_errors = max_errors; 5952 bbio->mirror_num = mirror_num; 5953 5954 /* 5955 * this is the case that REQ_READ && dev_replace_is_ongoing && 5956 * mirror_num == num_stripes + 1 && dev_replace target drive is 5957 * available as a mirror 5958 */ 5959 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 5960 WARN_ON(num_stripes > 1); 5961 bbio->stripes[0].dev = dev_replace->tgtdev; 5962 bbio->stripes[0].physical = physical_to_patch_in_first_stripe; 5963 bbio->mirror_num = map->num_stripes + 1; 5964 } 5965 out: 5966 if (dev_replace_is_ongoing) { 5967 btrfs_dev_replace_clear_lock_blocking(dev_replace); 5968 btrfs_dev_replace_read_unlock(dev_replace); 5969 } 5970 free_extent_map(em); 5971 return ret; 5972 } 5973 5974 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 5975 u64 logical, u64 *length, 5976 struct btrfs_bio **bbio_ret, int mirror_num) 5977 { 5978 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 5979 mirror_num, 0); 5980 } 5981 5982 /* For Scrub/replace */ 5983 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 5984 u64 logical, u64 *length, 5985 struct btrfs_bio **bbio_ret) 5986 { 5987 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1); 5988 } 5989 5990 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 5991 u64 physical, u64 **logical, int *naddrs, int *stripe_len) 5992 { 5993 struct extent_map *em; 5994 struct map_lookup *map; 5995 u64 *buf; 5996 u64 bytenr; 5997 u64 length; 5998 u64 stripe_nr; 5999 u64 rmap_len; 6000 int i, j, nr = 0; 6001 6002 em = get_chunk_map(fs_info, chunk_start, 1); 6003 if (IS_ERR(em)) 6004 return -EIO; 6005 6006 map = em->map_lookup; 6007 length = em->len; 6008 rmap_len = map->stripe_len; 6009 6010 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 6011 length = div_u64(length, map->num_stripes / map->sub_stripes); 6012 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 6013 length = div_u64(length, map->num_stripes); 6014 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 6015 length = div_u64(length, nr_data_stripes(map)); 6016 rmap_len = map->stripe_len * nr_data_stripes(map); 6017 } 6018 6019 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 6020 BUG_ON(!buf); /* -ENOMEM */ 6021 6022 for (i = 0; i < map->num_stripes; i++) { 6023 if (map->stripes[i].physical > physical || 6024 map->stripes[i].physical + length <= physical) 6025 continue; 6026 6027 stripe_nr = physical - map->stripes[i].physical; 6028 stripe_nr = div64_u64(stripe_nr, map->stripe_len); 6029 6030 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 6031 stripe_nr = stripe_nr * map->num_stripes + i; 6032 stripe_nr = div_u64(stripe_nr, map->sub_stripes); 6033 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 6034 stripe_nr = stripe_nr * map->num_stripes + i; 6035 } /* else if RAID[56], multiply by nr_data_stripes(). 6036 * Alternatively, just use rmap_len below instead of 6037 * map->stripe_len */ 6038 6039 bytenr = chunk_start + stripe_nr * rmap_len; 6040 WARN_ON(nr >= map->num_stripes); 6041 for (j = 0; j < nr; j++) { 6042 if (buf[j] == bytenr) 6043 break; 6044 } 6045 if (j == nr) { 6046 WARN_ON(nr >= map->num_stripes); 6047 buf[nr++] = bytenr; 6048 } 6049 } 6050 6051 *logical = buf; 6052 *naddrs = nr; 6053 *stripe_len = rmap_len; 6054 6055 free_extent_map(em); 6056 return 0; 6057 } 6058 6059 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) 6060 { 6061 bio->bi_private = bbio->private; 6062 bio->bi_end_io = bbio->end_io; 6063 bio_endio(bio); 6064 6065 btrfs_put_bbio(bbio); 6066 } 6067 6068 static void btrfs_end_bio(struct bio *bio) 6069 { 6070 struct btrfs_bio *bbio = bio->bi_private; 6071 int is_orig_bio = 0; 6072 6073 if (bio->bi_status) { 6074 atomic_inc(&bbio->error); 6075 if (bio->bi_status == BLK_STS_IOERR || 6076 bio->bi_status == BLK_STS_TARGET) { 6077 unsigned int stripe_index = 6078 btrfs_io_bio(bio)->stripe_index; 6079 struct btrfs_device *dev; 6080 6081 BUG_ON(stripe_index >= bbio->num_stripes); 6082 dev = bbio->stripes[stripe_index].dev; 6083 if (dev->bdev) { 6084 if (bio_op(bio) == REQ_OP_WRITE) 6085 btrfs_dev_stat_inc_and_print(dev, 6086 BTRFS_DEV_STAT_WRITE_ERRS); 6087 else 6088 btrfs_dev_stat_inc_and_print(dev, 6089 BTRFS_DEV_STAT_READ_ERRS); 6090 if (bio->bi_opf & REQ_PREFLUSH) 6091 btrfs_dev_stat_inc_and_print(dev, 6092 BTRFS_DEV_STAT_FLUSH_ERRS); 6093 } 6094 } 6095 } 6096 6097 if (bio == bbio->orig_bio) 6098 is_orig_bio = 1; 6099 6100 btrfs_bio_counter_dec(bbio->fs_info); 6101 6102 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6103 if (!is_orig_bio) { 6104 bio_put(bio); 6105 bio = bbio->orig_bio; 6106 } 6107 6108 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6109 /* only send an error to the higher layers if it is 6110 * beyond the tolerance of the btrfs bio 6111 */ 6112 if (atomic_read(&bbio->error) > bbio->max_errors) { 6113 bio->bi_status = BLK_STS_IOERR; 6114 } else { 6115 /* 6116 * this bio is actually up to date, we didn't 6117 * go over the max number of errors 6118 */ 6119 bio->bi_status = BLK_STS_OK; 6120 } 6121 6122 btrfs_end_bbio(bbio, bio); 6123 } else if (!is_orig_bio) { 6124 bio_put(bio); 6125 } 6126 } 6127 6128 /* 6129 * see run_scheduled_bios for a description of why bios are collected for 6130 * async submit. 6131 * 6132 * This will add one bio to the pending list for a device and make sure 6133 * the work struct is scheduled. 6134 */ 6135 static noinline void btrfs_schedule_bio(struct btrfs_device *device, 6136 struct bio *bio) 6137 { 6138 struct btrfs_fs_info *fs_info = device->fs_info; 6139 int should_queue = 1; 6140 struct btrfs_pending_bios *pending_bios; 6141 6142 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) || 6143 !device->bdev) { 6144 bio_io_error(bio); 6145 return; 6146 } 6147 6148 /* don't bother with additional async steps for reads, right now */ 6149 if (bio_op(bio) == REQ_OP_READ) { 6150 btrfsic_submit_bio(bio); 6151 return; 6152 } 6153 6154 WARN_ON(bio->bi_next); 6155 bio->bi_next = NULL; 6156 6157 spin_lock(&device->io_lock); 6158 if (op_is_sync(bio->bi_opf)) 6159 pending_bios = &device->pending_sync_bios; 6160 else 6161 pending_bios = &device->pending_bios; 6162 6163 if (pending_bios->tail) 6164 pending_bios->tail->bi_next = bio; 6165 6166 pending_bios->tail = bio; 6167 if (!pending_bios->head) 6168 pending_bios->head = bio; 6169 if (device->running_pending) 6170 should_queue = 0; 6171 6172 spin_unlock(&device->io_lock); 6173 6174 if (should_queue) 6175 btrfs_queue_work(fs_info->submit_workers, &device->work); 6176 } 6177 6178 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, 6179 u64 physical, int dev_nr, int async) 6180 { 6181 struct btrfs_device *dev = bbio->stripes[dev_nr].dev; 6182 struct btrfs_fs_info *fs_info = bbio->fs_info; 6183 6184 bio->bi_private = bbio; 6185 btrfs_io_bio(bio)->stripe_index = dev_nr; 6186 bio->bi_end_io = btrfs_end_bio; 6187 bio->bi_iter.bi_sector = physical >> 9; 6188 #ifdef DEBUG 6189 { 6190 struct rcu_string *name; 6191 6192 rcu_read_lock(); 6193 name = rcu_dereference(dev->name); 6194 btrfs_debug(fs_info, 6195 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", 6196 bio_op(bio), bio->bi_opf, 6197 (u64)bio->bi_iter.bi_sector, 6198 (u_long)dev->bdev->bd_dev, name->str, dev->devid, 6199 bio->bi_iter.bi_size); 6200 rcu_read_unlock(); 6201 } 6202 #endif 6203 bio_set_dev(bio, dev->bdev); 6204 6205 btrfs_bio_counter_inc_noblocked(fs_info); 6206 6207 if (async) 6208 btrfs_schedule_bio(dev, bio); 6209 else 6210 btrfsic_submit_bio(bio); 6211 } 6212 6213 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) 6214 { 6215 atomic_inc(&bbio->error); 6216 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6217 /* Should be the original bio. */ 6218 WARN_ON(bio != bbio->orig_bio); 6219 6220 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6221 bio->bi_iter.bi_sector = logical >> 9; 6222 if (atomic_read(&bbio->error) > bbio->max_errors) 6223 bio->bi_status = BLK_STS_IOERR; 6224 else 6225 bio->bi_status = BLK_STS_OK; 6226 btrfs_end_bbio(bbio, bio); 6227 } 6228 } 6229 6230 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6231 int mirror_num, int async_submit) 6232 { 6233 struct btrfs_device *dev; 6234 struct bio *first_bio = bio; 6235 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 6236 u64 length = 0; 6237 u64 map_length; 6238 int ret; 6239 int dev_nr; 6240 int total_devs; 6241 struct btrfs_bio *bbio = NULL; 6242 6243 length = bio->bi_iter.bi_size; 6244 map_length = length; 6245 6246 btrfs_bio_counter_inc_blocked(fs_info); 6247 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, 6248 &map_length, &bbio, mirror_num, 1); 6249 if (ret) { 6250 btrfs_bio_counter_dec(fs_info); 6251 return errno_to_blk_status(ret); 6252 } 6253 6254 total_devs = bbio->num_stripes; 6255 bbio->orig_bio = first_bio; 6256 bbio->private = first_bio->bi_private; 6257 bbio->end_io = first_bio->bi_end_io; 6258 bbio->fs_info = fs_info; 6259 atomic_set(&bbio->stripes_pending, bbio->num_stripes); 6260 6261 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6262 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) { 6263 /* In this case, map_length has been set to the length of 6264 a single stripe; not the whole write */ 6265 if (bio_op(bio) == REQ_OP_WRITE) { 6266 ret = raid56_parity_write(fs_info, bio, bbio, 6267 map_length); 6268 } else { 6269 ret = raid56_parity_recover(fs_info, bio, bbio, 6270 map_length, mirror_num, 1); 6271 } 6272 6273 btrfs_bio_counter_dec(fs_info); 6274 return errno_to_blk_status(ret); 6275 } 6276 6277 if (map_length < length) { 6278 btrfs_crit(fs_info, 6279 "mapping failed logical %llu bio len %llu len %llu", 6280 logical, length, map_length); 6281 BUG(); 6282 } 6283 6284 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6285 dev = bbio->stripes[dev_nr].dev; 6286 if (!dev || !dev->bdev || 6287 (bio_op(first_bio) == REQ_OP_WRITE && 6288 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { 6289 bbio_error(bbio, first_bio, logical); 6290 continue; 6291 } 6292 6293 if (dev_nr < total_devs - 1) 6294 bio = btrfs_bio_clone(first_bio); 6295 else 6296 bio = first_bio; 6297 6298 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, 6299 dev_nr, async_submit); 6300 } 6301 btrfs_bio_counter_dec(fs_info); 6302 return BLK_STS_OK; 6303 } 6304 6305 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, 6306 u8 *uuid, u8 *fsid) 6307 { 6308 struct btrfs_device *device; 6309 struct btrfs_fs_devices *cur_devices; 6310 6311 cur_devices = fs_info->fs_devices; 6312 while (cur_devices) { 6313 if (!fsid || 6314 !memcmp(cur_devices->fsid, fsid, BTRFS_FSID_SIZE)) { 6315 device = find_device(cur_devices, devid, uuid); 6316 if (device) 6317 return device; 6318 } 6319 cur_devices = cur_devices->seed; 6320 } 6321 return NULL; 6322 } 6323 6324 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, 6325 u64 devid, u8 *dev_uuid) 6326 { 6327 struct btrfs_device *device; 6328 6329 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6330 if (IS_ERR(device)) 6331 return device; 6332 6333 list_add(&device->dev_list, &fs_devices->devices); 6334 device->fs_devices = fs_devices; 6335 fs_devices->num_devices++; 6336 6337 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6338 fs_devices->missing_devices++; 6339 6340 return device; 6341 } 6342 6343 /** 6344 * btrfs_alloc_device - allocate struct btrfs_device 6345 * @fs_info: used only for generating a new devid, can be NULL if 6346 * devid is provided (i.e. @devid != NULL). 6347 * @devid: a pointer to devid for this device. If NULL a new devid 6348 * is generated. 6349 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6350 * is generated. 6351 * 6352 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6353 * on error. Returned struct is not linked onto any lists and must be 6354 * destroyed with btrfs_free_device. 6355 */ 6356 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6357 const u64 *devid, 6358 const u8 *uuid) 6359 { 6360 struct btrfs_device *dev; 6361 u64 tmp; 6362 6363 if (WARN_ON(!devid && !fs_info)) 6364 return ERR_PTR(-EINVAL); 6365 6366 dev = __alloc_device(); 6367 if (IS_ERR(dev)) 6368 return dev; 6369 6370 if (devid) 6371 tmp = *devid; 6372 else { 6373 int ret; 6374 6375 ret = find_next_devid(fs_info, &tmp); 6376 if (ret) { 6377 btrfs_free_device(dev); 6378 return ERR_PTR(ret); 6379 } 6380 } 6381 dev->devid = tmp; 6382 6383 if (uuid) 6384 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6385 else 6386 generate_random_uuid(dev->uuid); 6387 6388 btrfs_init_work(&dev->work, btrfs_submit_helper, 6389 pending_bios_fn, NULL, NULL); 6390 6391 return dev; 6392 } 6393 6394 /* Return -EIO if any error, otherwise return 0. */ 6395 static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info, 6396 struct extent_buffer *leaf, 6397 struct btrfs_chunk *chunk, u64 logical) 6398 { 6399 u64 length; 6400 u64 stripe_len; 6401 u16 num_stripes; 6402 u16 sub_stripes; 6403 u64 type; 6404 6405 length = btrfs_chunk_length(leaf, chunk); 6406 stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6407 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6408 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6409 type = btrfs_chunk_type(leaf, chunk); 6410 6411 if (!num_stripes) { 6412 btrfs_err(fs_info, "invalid chunk num_stripes: %u", 6413 num_stripes); 6414 return -EIO; 6415 } 6416 if (!IS_ALIGNED(logical, fs_info->sectorsize)) { 6417 btrfs_err(fs_info, "invalid chunk logical %llu", logical); 6418 return -EIO; 6419 } 6420 if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) { 6421 btrfs_err(fs_info, "invalid chunk sectorsize %u", 6422 btrfs_chunk_sector_size(leaf, chunk)); 6423 return -EIO; 6424 } 6425 if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) { 6426 btrfs_err(fs_info, "invalid chunk length %llu", length); 6427 return -EIO; 6428 } 6429 if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) { 6430 btrfs_err(fs_info, "invalid chunk stripe length: %llu", 6431 stripe_len); 6432 return -EIO; 6433 } 6434 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6435 type) { 6436 btrfs_err(fs_info, "unrecognized chunk type: %llu", 6437 ~(BTRFS_BLOCK_GROUP_TYPE_MASK | 6438 BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6439 btrfs_chunk_type(leaf, chunk)); 6440 return -EIO; 6441 } 6442 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || 6443 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) || 6444 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || 6445 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || 6446 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) || 6447 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && 6448 num_stripes != 1)) { 6449 btrfs_err(fs_info, 6450 "invalid num_stripes:sub_stripes %u:%u for profile %llu", 6451 num_stripes, sub_stripes, 6452 type & BTRFS_BLOCK_GROUP_PROFILE_MASK); 6453 return -EIO; 6454 } 6455 6456 return 0; 6457 } 6458 6459 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, 6460 u64 devid, u8 *uuid, bool error) 6461 { 6462 if (error) 6463 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", 6464 devid, uuid); 6465 else 6466 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", 6467 devid, uuid); 6468 } 6469 6470 static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 6471 struct extent_buffer *leaf, 6472 struct btrfs_chunk *chunk) 6473 { 6474 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 6475 struct map_lookup *map; 6476 struct extent_map *em; 6477 u64 logical; 6478 u64 length; 6479 u64 devid; 6480 u8 uuid[BTRFS_UUID_SIZE]; 6481 int num_stripes; 6482 int ret; 6483 int i; 6484 6485 logical = key->offset; 6486 length = btrfs_chunk_length(leaf, chunk); 6487 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6488 6489 ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical); 6490 if (ret) 6491 return ret; 6492 6493 read_lock(&map_tree->map_tree.lock); 6494 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 6495 read_unlock(&map_tree->map_tree.lock); 6496 6497 /* already mapped? */ 6498 if (em && em->start <= logical && em->start + em->len > logical) { 6499 free_extent_map(em); 6500 return 0; 6501 } else if (em) { 6502 free_extent_map(em); 6503 } 6504 6505 em = alloc_extent_map(); 6506 if (!em) 6507 return -ENOMEM; 6508 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6509 if (!map) { 6510 free_extent_map(em); 6511 return -ENOMEM; 6512 } 6513 6514 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6515 em->map_lookup = map; 6516 em->start = logical; 6517 em->len = length; 6518 em->orig_start = 0; 6519 em->block_start = 0; 6520 em->block_len = em->len; 6521 6522 map->num_stripes = num_stripes; 6523 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6524 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6525 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6526 map->type = btrfs_chunk_type(leaf, chunk); 6527 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6528 for (i = 0; i < num_stripes; i++) { 6529 map->stripes[i].physical = 6530 btrfs_stripe_offset_nr(leaf, chunk, i); 6531 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6532 read_extent_buffer(leaf, uuid, (unsigned long) 6533 btrfs_stripe_dev_uuid_nr(chunk, i), 6534 BTRFS_UUID_SIZE); 6535 map->stripes[i].dev = btrfs_find_device(fs_info, devid, 6536 uuid, NULL); 6537 if (!map->stripes[i].dev && 6538 !btrfs_test_opt(fs_info, DEGRADED)) { 6539 free_extent_map(em); 6540 btrfs_report_missing_device(fs_info, devid, uuid, true); 6541 return -ENOENT; 6542 } 6543 if (!map->stripes[i].dev) { 6544 map->stripes[i].dev = 6545 add_missing_dev(fs_info->fs_devices, devid, 6546 uuid); 6547 if (IS_ERR(map->stripes[i].dev)) { 6548 free_extent_map(em); 6549 btrfs_err(fs_info, 6550 "failed to init missing dev %llu: %ld", 6551 devid, PTR_ERR(map->stripes[i].dev)); 6552 return PTR_ERR(map->stripes[i].dev); 6553 } 6554 btrfs_report_missing_device(fs_info, devid, uuid, false); 6555 } 6556 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 6557 &(map->stripes[i].dev->dev_state)); 6558 6559 } 6560 6561 write_lock(&map_tree->map_tree.lock); 6562 ret = add_extent_mapping(&map_tree->map_tree, em, 0); 6563 write_unlock(&map_tree->map_tree.lock); 6564 BUG_ON(ret); /* Tree corruption */ 6565 free_extent_map(em); 6566 6567 return 0; 6568 } 6569 6570 static void fill_device_from_item(struct extent_buffer *leaf, 6571 struct btrfs_dev_item *dev_item, 6572 struct btrfs_device *device) 6573 { 6574 unsigned long ptr; 6575 6576 device->devid = btrfs_device_id(leaf, dev_item); 6577 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6578 device->total_bytes = device->disk_total_bytes; 6579 device->commit_total_bytes = device->disk_total_bytes; 6580 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6581 device->commit_bytes_used = device->bytes_used; 6582 device->type = btrfs_device_type(leaf, dev_item); 6583 device->io_align = btrfs_device_io_align(leaf, dev_item); 6584 device->io_width = btrfs_device_io_width(leaf, dev_item); 6585 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6586 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6587 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 6588 6589 ptr = btrfs_device_uuid(dev_item); 6590 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6591 } 6592 6593 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, 6594 u8 *fsid) 6595 { 6596 struct btrfs_fs_devices *fs_devices; 6597 int ret; 6598 6599 lockdep_assert_held(&uuid_mutex); 6600 ASSERT(fsid); 6601 6602 fs_devices = fs_info->fs_devices->seed; 6603 while (fs_devices) { 6604 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) 6605 return fs_devices; 6606 6607 fs_devices = fs_devices->seed; 6608 } 6609 6610 fs_devices = find_fsid(fsid); 6611 if (!fs_devices) { 6612 if (!btrfs_test_opt(fs_info, DEGRADED)) 6613 return ERR_PTR(-ENOENT); 6614 6615 fs_devices = alloc_fs_devices(fsid); 6616 if (IS_ERR(fs_devices)) 6617 return fs_devices; 6618 6619 fs_devices->seeding = 1; 6620 fs_devices->opened = 1; 6621 return fs_devices; 6622 } 6623 6624 fs_devices = clone_fs_devices(fs_devices); 6625 if (IS_ERR(fs_devices)) 6626 return fs_devices; 6627 6628 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); 6629 if (ret) { 6630 free_fs_devices(fs_devices); 6631 fs_devices = ERR_PTR(ret); 6632 goto out; 6633 } 6634 6635 if (!fs_devices->seeding) { 6636 close_fs_devices(fs_devices); 6637 free_fs_devices(fs_devices); 6638 fs_devices = ERR_PTR(-EINVAL); 6639 goto out; 6640 } 6641 6642 fs_devices->seed = fs_info->fs_devices->seed; 6643 fs_info->fs_devices->seed = fs_devices; 6644 out: 6645 return fs_devices; 6646 } 6647 6648 static int read_one_dev(struct btrfs_fs_info *fs_info, 6649 struct extent_buffer *leaf, 6650 struct btrfs_dev_item *dev_item) 6651 { 6652 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6653 struct btrfs_device *device; 6654 u64 devid; 6655 int ret; 6656 u8 fs_uuid[BTRFS_FSID_SIZE]; 6657 u8 dev_uuid[BTRFS_UUID_SIZE]; 6658 6659 devid = btrfs_device_id(leaf, dev_item); 6660 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 6661 BTRFS_UUID_SIZE); 6662 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 6663 BTRFS_FSID_SIZE); 6664 6665 if (memcmp(fs_uuid, fs_info->fsid, BTRFS_FSID_SIZE)) { 6666 fs_devices = open_seed_devices(fs_info, fs_uuid); 6667 if (IS_ERR(fs_devices)) 6668 return PTR_ERR(fs_devices); 6669 } 6670 6671 device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid); 6672 if (!device) { 6673 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6674 btrfs_report_missing_device(fs_info, devid, 6675 dev_uuid, true); 6676 return -ENOENT; 6677 } 6678 6679 device = add_missing_dev(fs_devices, devid, dev_uuid); 6680 if (IS_ERR(device)) { 6681 btrfs_err(fs_info, 6682 "failed to add missing dev %llu: %ld", 6683 devid, PTR_ERR(device)); 6684 return PTR_ERR(device); 6685 } 6686 btrfs_report_missing_device(fs_info, devid, dev_uuid, false); 6687 } else { 6688 if (!device->bdev) { 6689 if (!btrfs_test_opt(fs_info, DEGRADED)) { 6690 btrfs_report_missing_device(fs_info, 6691 devid, dev_uuid, true); 6692 return -ENOENT; 6693 } 6694 btrfs_report_missing_device(fs_info, devid, 6695 dev_uuid, false); 6696 } 6697 6698 if (!device->bdev && 6699 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { 6700 /* 6701 * this happens when a device that was properly setup 6702 * in the device info lists suddenly goes bad. 6703 * device->bdev is NULL, and so we have to set 6704 * device->missing to one here 6705 */ 6706 device->fs_devices->missing_devices++; 6707 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); 6708 } 6709 6710 /* Move the device to its own fs_devices */ 6711 if (device->fs_devices != fs_devices) { 6712 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, 6713 &device->dev_state)); 6714 6715 list_move(&device->dev_list, &fs_devices->devices); 6716 device->fs_devices->num_devices--; 6717 fs_devices->num_devices++; 6718 6719 device->fs_devices->missing_devices--; 6720 fs_devices->missing_devices++; 6721 6722 device->fs_devices = fs_devices; 6723 } 6724 } 6725 6726 if (device->fs_devices != fs_info->fs_devices) { 6727 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); 6728 if (device->generation != 6729 btrfs_device_generation(leaf, dev_item)) 6730 return -EINVAL; 6731 } 6732 6733 fill_device_from_item(leaf, dev_item, device); 6734 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 6735 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && 6736 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { 6737 device->fs_devices->total_rw_bytes += device->total_bytes; 6738 atomic64_add(device->total_bytes - device->bytes_used, 6739 &fs_info->free_chunk_space); 6740 } 6741 ret = 0; 6742 return ret; 6743 } 6744 6745 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) 6746 { 6747 struct btrfs_root *root = fs_info->tree_root; 6748 struct btrfs_super_block *super_copy = fs_info->super_copy; 6749 struct extent_buffer *sb; 6750 struct btrfs_disk_key *disk_key; 6751 struct btrfs_chunk *chunk; 6752 u8 *array_ptr; 6753 unsigned long sb_array_offset; 6754 int ret = 0; 6755 u32 num_stripes; 6756 u32 array_size; 6757 u32 len = 0; 6758 u32 cur_offset; 6759 u64 type; 6760 struct btrfs_key key; 6761 6762 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); 6763 /* 6764 * This will create extent buffer of nodesize, superblock size is 6765 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 6766 * overallocate but we can keep it as-is, only the first page is used. 6767 */ 6768 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET); 6769 if (IS_ERR(sb)) 6770 return PTR_ERR(sb); 6771 set_extent_buffer_uptodate(sb); 6772 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6773 /* 6774 * The sb extent buffer is artificial and just used to read the system array. 6775 * set_extent_buffer_uptodate() call does not properly mark all it's 6776 * pages up-to-date when the page is larger: extent does not cover the 6777 * whole page and consequently check_page_uptodate does not find all 6778 * the page's extents up-to-date (the hole beyond sb), 6779 * write_extent_buffer then triggers a WARN_ON. 6780 * 6781 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 6782 * but sb spans only this function. Add an explicit SetPageUptodate call 6783 * to silence the warning eg. on PowerPC 64. 6784 */ 6785 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 6786 SetPageUptodate(sb->pages[0]); 6787 6788 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6789 array_size = btrfs_super_sys_array_size(super_copy); 6790 6791 array_ptr = super_copy->sys_chunk_array; 6792 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 6793 cur_offset = 0; 6794 6795 while (cur_offset < array_size) { 6796 disk_key = (struct btrfs_disk_key *)array_ptr; 6797 len = sizeof(*disk_key); 6798 if (cur_offset + len > array_size) 6799 goto out_short_read; 6800 6801 btrfs_disk_key_to_cpu(&key, disk_key); 6802 6803 array_ptr += len; 6804 sb_array_offset += len; 6805 cur_offset += len; 6806 6807 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 6808 chunk = (struct btrfs_chunk *)sb_array_offset; 6809 /* 6810 * At least one btrfs_chunk with one stripe must be 6811 * present, exact stripe count check comes afterwards 6812 */ 6813 len = btrfs_chunk_item_size(1); 6814 if (cur_offset + len > array_size) 6815 goto out_short_read; 6816 6817 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 6818 if (!num_stripes) { 6819 btrfs_err(fs_info, 6820 "invalid number of stripes %u in sys_array at offset %u", 6821 num_stripes, cur_offset); 6822 ret = -EIO; 6823 break; 6824 } 6825 6826 type = btrfs_chunk_type(sb, chunk); 6827 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 6828 btrfs_err(fs_info, 6829 "invalid chunk type %llu in sys_array at offset %u", 6830 type, cur_offset); 6831 ret = -EIO; 6832 break; 6833 } 6834 6835 len = btrfs_chunk_item_size(num_stripes); 6836 if (cur_offset + len > array_size) 6837 goto out_short_read; 6838 6839 ret = read_one_chunk(fs_info, &key, sb, chunk); 6840 if (ret) 6841 break; 6842 } else { 6843 btrfs_err(fs_info, 6844 "unexpected item type %u in sys_array at offset %u", 6845 (u32)key.type, cur_offset); 6846 ret = -EIO; 6847 break; 6848 } 6849 array_ptr += len; 6850 sb_array_offset += len; 6851 cur_offset += len; 6852 } 6853 clear_extent_buffer_uptodate(sb); 6854 free_extent_buffer_stale(sb); 6855 return ret; 6856 6857 out_short_read: 6858 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", 6859 len, cur_offset); 6860 clear_extent_buffer_uptodate(sb); 6861 free_extent_buffer_stale(sb); 6862 return -EIO; 6863 } 6864 6865 /* 6866 * Check if all chunks in the fs are OK for read-write degraded mount 6867 * 6868 * If the @failing_dev is specified, it's accounted as missing. 6869 * 6870 * Return true if all chunks meet the minimal RW mount requirements. 6871 * Return false if any chunk doesn't meet the minimal RW mount requirements. 6872 */ 6873 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 6874 struct btrfs_device *failing_dev) 6875 { 6876 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 6877 struct extent_map *em; 6878 u64 next_start = 0; 6879 bool ret = true; 6880 6881 read_lock(&map_tree->map_tree.lock); 6882 em = lookup_extent_mapping(&map_tree->map_tree, 0, (u64)-1); 6883 read_unlock(&map_tree->map_tree.lock); 6884 /* No chunk at all? Return false anyway */ 6885 if (!em) { 6886 ret = false; 6887 goto out; 6888 } 6889 while (em) { 6890 struct map_lookup *map; 6891 int missing = 0; 6892 int max_tolerated; 6893 int i; 6894 6895 map = em->map_lookup; 6896 max_tolerated = 6897 btrfs_get_num_tolerated_disk_barrier_failures( 6898 map->type); 6899 for (i = 0; i < map->num_stripes; i++) { 6900 struct btrfs_device *dev = map->stripes[i].dev; 6901 6902 if (!dev || !dev->bdev || 6903 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || 6904 dev->last_flush_error) 6905 missing++; 6906 else if (failing_dev && failing_dev == dev) 6907 missing++; 6908 } 6909 if (missing > max_tolerated) { 6910 if (!failing_dev) 6911 btrfs_warn(fs_info, 6912 "chunk %llu missing %d devices, max tolerance is %d for writeable mount", 6913 em->start, missing, max_tolerated); 6914 free_extent_map(em); 6915 ret = false; 6916 goto out; 6917 } 6918 next_start = extent_map_end(em); 6919 free_extent_map(em); 6920 6921 read_lock(&map_tree->map_tree.lock); 6922 em = lookup_extent_mapping(&map_tree->map_tree, next_start, 6923 (u64)(-1) - next_start); 6924 read_unlock(&map_tree->map_tree.lock); 6925 } 6926 out: 6927 return ret; 6928 } 6929 6930 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) 6931 { 6932 struct btrfs_root *root = fs_info->chunk_root; 6933 struct btrfs_path *path; 6934 struct extent_buffer *leaf; 6935 struct btrfs_key key; 6936 struct btrfs_key found_key; 6937 int ret; 6938 int slot; 6939 u64 total_dev = 0; 6940 6941 path = btrfs_alloc_path(); 6942 if (!path) 6943 return -ENOMEM; 6944 6945 /* 6946 * uuid_mutex is needed only if we are mounting a sprout FS 6947 * otherwise we don't need it. 6948 */ 6949 mutex_lock(&uuid_mutex); 6950 mutex_lock(&fs_info->chunk_mutex); 6951 6952 /* 6953 * Read all device items, and then all the chunk items. All 6954 * device items are found before any chunk item (their object id 6955 * is smaller than the lowest possible object id for a chunk 6956 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 6957 */ 6958 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 6959 key.offset = 0; 6960 key.type = 0; 6961 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6962 if (ret < 0) 6963 goto error; 6964 while (1) { 6965 leaf = path->nodes[0]; 6966 slot = path->slots[0]; 6967 if (slot >= btrfs_header_nritems(leaf)) { 6968 ret = btrfs_next_leaf(root, path); 6969 if (ret == 0) 6970 continue; 6971 if (ret < 0) 6972 goto error; 6973 break; 6974 } 6975 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6976 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 6977 struct btrfs_dev_item *dev_item; 6978 dev_item = btrfs_item_ptr(leaf, slot, 6979 struct btrfs_dev_item); 6980 ret = read_one_dev(fs_info, leaf, dev_item); 6981 if (ret) 6982 goto error; 6983 total_dev++; 6984 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 6985 struct btrfs_chunk *chunk; 6986 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 6987 ret = read_one_chunk(fs_info, &found_key, leaf, chunk); 6988 if (ret) 6989 goto error; 6990 } 6991 path->slots[0]++; 6992 } 6993 6994 /* 6995 * After loading chunk tree, we've got all device information, 6996 * do another round of validation checks. 6997 */ 6998 if (total_dev != fs_info->fs_devices->total_devices) { 6999 btrfs_err(fs_info, 7000 "super_num_devices %llu mismatch with num_devices %llu found here", 7001 btrfs_super_num_devices(fs_info->super_copy), 7002 total_dev); 7003 ret = -EINVAL; 7004 goto error; 7005 } 7006 if (btrfs_super_total_bytes(fs_info->super_copy) < 7007 fs_info->fs_devices->total_rw_bytes) { 7008 btrfs_err(fs_info, 7009 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", 7010 btrfs_super_total_bytes(fs_info->super_copy), 7011 fs_info->fs_devices->total_rw_bytes); 7012 ret = -EINVAL; 7013 goto error; 7014 } 7015 ret = 0; 7016 error: 7017 mutex_unlock(&fs_info->chunk_mutex); 7018 mutex_unlock(&uuid_mutex); 7019 7020 btrfs_free_path(path); 7021 return ret; 7022 } 7023 7024 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 7025 { 7026 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7027 struct btrfs_device *device; 7028 7029 while (fs_devices) { 7030 mutex_lock(&fs_devices->device_list_mutex); 7031 list_for_each_entry(device, &fs_devices->devices, dev_list) 7032 device->fs_info = fs_info; 7033 mutex_unlock(&fs_devices->device_list_mutex); 7034 7035 fs_devices = fs_devices->seed; 7036 } 7037 } 7038 7039 static void __btrfs_reset_dev_stats(struct btrfs_device *dev) 7040 { 7041 int i; 7042 7043 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7044 btrfs_dev_stat_reset(dev, i); 7045 } 7046 7047 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 7048 { 7049 struct btrfs_key key; 7050 struct btrfs_key found_key; 7051 struct btrfs_root *dev_root = fs_info->dev_root; 7052 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7053 struct extent_buffer *eb; 7054 int slot; 7055 int ret = 0; 7056 struct btrfs_device *device; 7057 struct btrfs_path *path = NULL; 7058 int i; 7059 7060 path = btrfs_alloc_path(); 7061 if (!path) { 7062 ret = -ENOMEM; 7063 goto out; 7064 } 7065 7066 mutex_lock(&fs_devices->device_list_mutex); 7067 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7068 int item_size; 7069 struct btrfs_dev_stats_item *ptr; 7070 7071 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7072 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7073 key.offset = device->devid; 7074 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 7075 if (ret) { 7076 __btrfs_reset_dev_stats(device); 7077 device->dev_stats_valid = 1; 7078 btrfs_release_path(path); 7079 continue; 7080 } 7081 slot = path->slots[0]; 7082 eb = path->nodes[0]; 7083 btrfs_item_key_to_cpu(eb, &found_key, slot); 7084 item_size = btrfs_item_size_nr(eb, slot); 7085 7086 ptr = btrfs_item_ptr(eb, slot, 7087 struct btrfs_dev_stats_item); 7088 7089 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7090 if (item_size >= (1 + i) * sizeof(__le64)) 7091 btrfs_dev_stat_set(device, i, 7092 btrfs_dev_stats_value(eb, ptr, i)); 7093 else 7094 btrfs_dev_stat_reset(device, i); 7095 } 7096 7097 device->dev_stats_valid = 1; 7098 btrfs_dev_stat_print_on_load(device); 7099 btrfs_release_path(path); 7100 } 7101 mutex_unlock(&fs_devices->device_list_mutex); 7102 7103 out: 7104 btrfs_free_path(path); 7105 return ret < 0 ? ret : 0; 7106 } 7107 7108 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 7109 struct btrfs_fs_info *fs_info, 7110 struct btrfs_device *device) 7111 { 7112 struct btrfs_root *dev_root = fs_info->dev_root; 7113 struct btrfs_path *path; 7114 struct btrfs_key key; 7115 struct extent_buffer *eb; 7116 struct btrfs_dev_stats_item *ptr; 7117 int ret; 7118 int i; 7119 7120 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7121 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7122 key.offset = device->devid; 7123 7124 path = btrfs_alloc_path(); 7125 if (!path) 7126 return -ENOMEM; 7127 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 7128 if (ret < 0) { 7129 btrfs_warn_in_rcu(fs_info, 7130 "error %d while searching for dev_stats item for device %s", 7131 ret, rcu_str_deref(device->name)); 7132 goto out; 7133 } 7134 7135 if (ret == 0 && 7136 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 7137 /* need to delete old one and insert a new one */ 7138 ret = btrfs_del_item(trans, dev_root, path); 7139 if (ret != 0) { 7140 btrfs_warn_in_rcu(fs_info, 7141 "delete too small dev_stats item for device %s failed %d", 7142 rcu_str_deref(device->name), ret); 7143 goto out; 7144 } 7145 ret = 1; 7146 } 7147 7148 if (ret == 1) { 7149 /* need to insert a new item */ 7150 btrfs_release_path(path); 7151 ret = btrfs_insert_empty_item(trans, dev_root, path, 7152 &key, sizeof(*ptr)); 7153 if (ret < 0) { 7154 btrfs_warn_in_rcu(fs_info, 7155 "insert dev_stats item for device %s failed %d", 7156 rcu_str_deref(device->name), ret); 7157 goto out; 7158 } 7159 } 7160 7161 eb = path->nodes[0]; 7162 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 7163 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7164 btrfs_set_dev_stats_value(eb, ptr, i, 7165 btrfs_dev_stat_read(device, i)); 7166 btrfs_mark_buffer_dirty(eb); 7167 7168 out: 7169 btrfs_free_path(path); 7170 return ret; 7171 } 7172 7173 /* 7174 * called from commit_transaction. Writes all changed device stats to disk. 7175 */ 7176 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, 7177 struct btrfs_fs_info *fs_info) 7178 { 7179 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7180 struct btrfs_device *device; 7181 int stats_cnt; 7182 int ret = 0; 7183 7184 mutex_lock(&fs_devices->device_list_mutex); 7185 list_for_each_entry(device, &fs_devices->devices, dev_list) { 7186 stats_cnt = atomic_read(&device->dev_stats_ccnt); 7187 if (!device->dev_stats_valid || stats_cnt == 0) 7188 continue; 7189 7190 7191 /* 7192 * There is a LOAD-LOAD control dependency between the value of 7193 * dev_stats_ccnt and updating the on-disk values which requires 7194 * reading the in-memory counters. Such control dependencies 7195 * require explicit read memory barriers. 7196 * 7197 * This memory barriers pairs with smp_mb__before_atomic in 7198 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full 7199 * barrier implied by atomic_xchg in 7200 * btrfs_dev_stats_read_and_reset 7201 */ 7202 smp_rmb(); 7203 7204 ret = update_dev_stat_item(trans, fs_info, device); 7205 if (!ret) 7206 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 7207 } 7208 mutex_unlock(&fs_devices->device_list_mutex); 7209 7210 return ret; 7211 } 7212 7213 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 7214 { 7215 btrfs_dev_stat_inc(dev, index); 7216 btrfs_dev_stat_print_on_error(dev); 7217 } 7218 7219 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 7220 { 7221 if (!dev->dev_stats_valid) 7222 return; 7223 btrfs_err_rl_in_rcu(dev->fs_info, 7224 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7225 rcu_str_deref(dev->name), 7226 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7227 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7228 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7229 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7230 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7231 } 7232 7233 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 7234 { 7235 int i; 7236 7237 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7238 if (btrfs_dev_stat_read(dev, i) != 0) 7239 break; 7240 if (i == BTRFS_DEV_STAT_VALUES_MAX) 7241 return; /* all values == 0, suppress message */ 7242 7243 btrfs_info_in_rcu(dev->fs_info, 7244 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 7245 rcu_str_deref(dev->name), 7246 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 7247 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 7248 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 7249 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 7250 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 7251 } 7252 7253 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 7254 struct btrfs_ioctl_get_dev_stats *stats) 7255 { 7256 struct btrfs_device *dev; 7257 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7258 int i; 7259 7260 mutex_lock(&fs_devices->device_list_mutex); 7261 dev = btrfs_find_device(fs_info, stats->devid, NULL, NULL); 7262 mutex_unlock(&fs_devices->device_list_mutex); 7263 7264 if (!dev) { 7265 btrfs_warn(fs_info, "get dev_stats failed, device not found"); 7266 return -ENODEV; 7267 } else if (!dev->dev_stats_valid) { 7268 btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); 7269 return -ENODEV; 7270 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7271 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7272 if (stats->nr_items > i) 7273 stats->values[i] = 7274 btrfs_dev_stat_read_and_reset(dev, i); 7275 else 7276 btrfs_dev_stat_reset(dev, i); 7277 } 7278 } else { 7279 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 7280 if (stats->nr_items > i) 7281 stats->values[i] = btrfs_dev_stat_read(dev, i); 7282 } 7283 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 7284 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 7285 return 0; 7286 } 7287 7288 void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path) 7289 { 7290 struct buffer_head *bh; 7291 struct btrfs_super_block *disk_super; 7292 int copy_num; 7293 7294 if (!bdev) 7295 return; 7296 7297 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; 7298 copy_num++) { 7299 7300 if (btrfs_read_dev_one_super(bdev, copy_num, &bh)) 7301 continue; 7302 7303 disk_super = (struct btrfs_super_block *)bh->b_data; 7304 7305 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 7306 set_buffer_dirty(bh); 7307 sync_dirty_buffer(bh); 7308 brelse(bh); 7309 } 7310 7311 /* Notify udev that device has changed */ 7312 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 7313 7314 /* Update ctime/mtime for device path for libblkid */ 7315 update_dev_time(device_path); 7316 } 7317 7318 /* 7319 * Update the size of all devices, which is used for writing out the 7320 * super blocks. 7321 */ 7322 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info) 7323 { 7324 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7325 struct btrfs_device *curr, *next; 7326 7327 if (list_empty(&fs_devices->resized_devices)) 7328 return; 7329 7330 mutex_lock(&fs_devices->device_list_mutex); 7331 mutex_lock(&fs_info->chunk_mutex); 7332 list_for_each_entry_safe(curr, next, &fs_devices->resized_devices, 7333 resized_list) { 7334 list_del_init(&curr->resized_list); 7335 curr->commit_total_bytes = curr->disk_total_bytes; 7336 } 7337 mutex_unlock(&fs_info->chunk_mutex); 7338 mutex_unlock(&fs_devices->device_list_mutex); 7339 } 7340 7341 /* Must be invoked during the transaction commit */ 7342 void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans) 7343 { 7344 struct btrfs_fs_info *fs_info = trans->fs_info; 7345 struct extent_map *em; 7346 struct map_lookup *map; 7347 struct btrfs_device *dev; 7348 int i; 7349 7350 if (list_empty(&trans->pending_chunks)) 7351 return; 7352 7353 /* In order to kick the device replace finish process */ 7354 mutex_lock(&fs_info->chunk_mutex); 7355 list_for_each_entry(em, &trans->pending_chunks, list) { 7356 map = em->map_lookup; 7357 7358 for (i = 0; i < map->num_stripes; i++) { 7359 dev = map->stripes[i].dev; 7360 dev->commit_bytes_used = dev->bytes_used; 7361 } 7362 } 7363 mutex_unlock(&fs_info->chunk_mutex); 7364 } 7365 7366 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info) 7367 { 7368 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7369 while (fs_devices) { 7370 fs_devices->fs_info = fs_info; 7371 fs_devices = fs_devices->seed; 7372 } 7373 } 7374 7375 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info) 7376 { 7377 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7378 while (fs_devices) { 7379 fs_devices->fs_info = NULL; 7380 fs_devices = fs_devices->seed; 7381 } 7382 } 7383