1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/buffer_head.h> 22 #include <linux/blkdev.h> 23 #include <linux/random.h> 24 #include <linux/iocontext.h> 25 #include <linux/capability.h> 26 #include <linux/ratelimit.h> 27 #include <linux/kthread.h> 28 #include <linux/raid/pq.h> 29 #include <linux/semaphore.h> 30 #include <asm/div64.h> 31 #include "ctree.h" 32 #include "extent_map.h" 33 #include "disk-io.h" 34 #include "transaction.h" 35 #include "print-tree.h" 36 #include "volumes.h" 37 #include "raid56.h" 38 #include "async-thread.h" 39 #include "check-integrity.h" 40 #include "rcu-string.h" 41 #include "math.h" 42 #include "dev-replace.h" 43 #include "sysfs.h" 44 45 static int init_first_rw_device(struct btrfs_trans_handle *trans, 46 struct btrfs_root *root, 47 struct btrfs_device *device); 48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root); 49 static void __btrfs_reset_dev_stats(struct btrfs_device *dev); 50 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 51 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 52 53 DEFINE_MUTEX(uuid_mutex); 54 static LIST_HEAD(fs_uuids); 55 struct list_head *btrfs_get_fs_uuids(void) 56 { 57 return &fs_uuids; 58 } 59 60 static struct btrfs_fs_devices *__alloc_fs_devices(void) 61 { 62 struct btrfs_fs_devices *fs_devs; 63 64 fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS); 65 if (!fs_devs) 66 return ERR_PTR(-ENOMEM); 67 68 mutex_init(&fs_devs->device_list_mutex); 69 70 INIT_LIST_HEAD(&fs_devs->devices); 71 INIT_LIST_HEAD(&fs_devs->resized_devices); 72 INIT_LIST_HEAD(&fs_devs->alloc_list); 73 INIT_LIST_HEAD(&fs_devs->list); 74 75 return fs_devs; 76 } 77 78 /** 79 * alloc_fs_devices - allocate struct btrfs_fs_devices 80 * @fsid: a pointer to UUID for this FS. If NULL a new UUID is 81 * generated. 82 * 83 * Return: a pointer to a new &struct btrfs_fs_devices on success; 84 * ERR_PTR() on error. Returned struct is not linked onto any lists and 85 * can be destroyed with kfree() right away. 86 */ 87 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) 88 { 89 struct btrfs_fs_devices *fs_devs; 90 91 fs_devs = __alloc_fs_devices(); 92 if (IS_ERR(fs_devs)) 93 return fs_devs; 94 95 if (fsid) 96 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 97 else 98 generate_random_uuid(fs_devs->fsid); 99 100 return fs_devs; 101 } 102 103 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 104 { 105 struct btrfs_device *device; 106 WARN_ON(fs_devices->opened); 107 while (!list_empty(&fs_devices->devices)) { 108 device = list_entry(fs_devices->devices.next, 109 struct btrfs_device, dev_list); 110 list_del(&device->dev_list); 111 rcu_string_free(device->name); 112 kfree(device); 113 } 114 kfree(fs_devices); 115 } 116 117 static void btrfs_kobject_uevent(struct block_device *bdev, 118 enum kobject_action action) 119 { 120 int ret; 121 122 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); 123 if (ret) 124 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", 125 action, 126 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), 127 &disk_to_dev(bdev->bd_disk)->kobj); 128 } 129 130 void btrfs_cleanup_fs_uuids(void) 131 { 132 struct btrfs_fs_devices *fs_devices; 133 134 while (!list_empty(&fs_uuids)) { 135 fs_devices = list_entry(fs_uuids.next, 136 struct btrfs_fs_devices, list); 137 list_del(&fs_devices->list); 138 free_fs_devices(fs_devices); 139 } 140 } 141 142 static struct btrfs_device *__alloc_device(void) 143 { 144 struct btrfs_device *dev; 145 146 dev = kzalloc(sizeof(*dev), GFP_NOFS); 147 if (!dev) 148 return ERR_PTR(-ENOMEM); 149 150 INIT_LIST_HEAD(&dev->dev_list); 151 INIT_LIST_HEAD(&dev->dev_alloc_list); 152 INIT_LIST_HEAD(&dev->resized_list); 153 154 spin_lock_init(&dev->io_lock); 155 156 spin_lock_init(&dev->reada_lock); 157 atomic_set(&dev->reada_in_flight, 0); 158 atomic_set(&dev->dev_stats_ccnt, 0); 159 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT); 160 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT); 161 162 return dev; 163 } 164 165 static noinline struct btrfs_device *__find_device(struct list_head *head, 166 u64 devid, u8 *uuid) 167 { 168 struct btrfs_device *dev; 169 170 list_for_each_entry(dev, head, dev_list) { 171 if (dev->devid == devid && 172 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 173 return dev; 174 } 175 } 176 return NULL; 177 } 178 179 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 180 { 181 struct btrfs_fs_devices *fs_devices; 182 183 list_for_each_entry(fs_devices, &fs_uuids, list) { 184 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 185 return fs_devices; 186 } 187 return NULL; 188 } 189 190 static int 191 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 192 int flush, struct block_device **bdev, 193 struct buffer_head **bh) 194 { 195 int ret; 196 197 *bdev = blkdev_get_by_path(device_path, flags, holder); 198 199 if (IS_ERR(*bdev)) { 200 ret = PTR_ERR(*bdev); 201 printk(KERN_INFO "BTRFS: open %s failed\n", device_path); 202 goto error; 203 } 204 205 if (flush) 206 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 207 ret = set_blocksize(*bdev, 4096); 208 if (ret) { 209 blkdev_put(*bdev, flags); 210 goto error; 211 } 212 invalidate_bdev(*bdev); 213 *bh = btrfs_read_dev_super(*bdev); 214 if (!*bh) { 215 ret = -EINVAL; 216 blkdev_put(*bdev, flags); 217 goto error; 218 } 219 220 return 0; 221 222 error: 223 *bdev = NULL; 224 *bh = NULL; 225 return ret; 226 } 227 228 static void requeue_list(struct btrfs_pending_bios *pending_bios, 229 struct bio *head, struct bio *tail) 230 { 231 232 struct bio *old_head; 233 234 old_head = pending_bios->head; 235 pending_bios->head = head; 236 if (pending_bios->tail) 237 tail->bi_next = old_head; 238 else 239 pending_bios->tail = tail; 240 } 241 242 /* 243 * we try to collect pending bios for a device so we don't get a large 244 * number of procs sending bios down to the same device. This greatly 245 * improves the schedulers ability to collect and merge the bios. 246 * 247 * But, it also turns into a long list of bios to process and that is sure 248 * to eventually make the worker thread block. The solution here is to 249 * make some progress and then put this work struct back at the end of 250 * the list if the block device is congested. This way, multiple devices 251 * can make progress from a single worker thread. 252 */ 253 static noinline void run_scheduled_bios(struct btrfs_device *device) 254 { 255 struct bio *pending; 256 struct backing_dev_info *bdi; 257 struct btrfs_fs_info *fs_info; 258 struct btrfs_pending_bios *pending_bios; 259 struct bio *tail; 260 struct bio *cur; 261 int again = 0; 262 unsigned long num_run; 263 unsigned long batch_run = 0; 264 unsigned long limit; 265 unsigned long last_waited = 0; 266 int force_reg = 0; 267 int sync_pending = 0; 268 struct blk_plug plug; 269 270 /* 271 * this function runs all the bios we've collected for 272 * a particular device. We don't want to wander off to 273 * another device without first sending all of these down. 274 * So, setup a plug here and finish it off before we return 275 */ 276 blk_start_plug(&plug); 277 278 bdi = blk_get_backing_dev_info(device->bdev); 279 fs_info = device->dev_root->fs_info; 280 limit = btrfs_async_submit_limit(fs_info); 281 limit = limit * 2 / 3; 282 283 loop: 284 spin_lock(&device->io_lock); 285 286 loop_lock: 287 num_run = 0; 288 289 /* take all the bios off the list at once and process them 290 * later on (without the lock held). But, remember the 291 * tail and other pointers so the bios can be properly reinserted 292 * into the list if we hit congestion 293 */ 294 if (!force_reg && device->pending_sync_bios.head) { 295 pending_bios = &device->pending_sync_bios; 296 force_reg = 1; 297 } else { 298 pending_bios = &device->pending_bios; 299 force_reg = 0; 300 } 301 302 pending = pending_bios->head; 303 tail = pending_bios->tail; 304 WARN_ON(pending && !tail); 305 306 /* 307 * if pending was null this time around, no bios need processing 308 * at all and we can stop. Otherwise it'll loop back up again 309 * and do an additional check so no bios are missed. 310 * 311 * device->running_pending is used to synchronize with the 312 * schedule_bio code. 313 */ 314 if (device->pending_sync_bios.head == NULL && 315 device->pending_bios.head == NULL) { 316 again = 0; 317 device->running_pending = 0; 318 } else { 319 again = 1; 320 device->running_pending = 1; 321 } 322 323 pending_bios->head = NULL; 324 pending_bios->tail = NULL; 325 326 spin_unlock(&device->io_lock); 327 328 while (pending) { 329 330 rmb(); 331 /* we want to work on both lists, but do more bios on the 332 * sync list than the regular list 333 */ 334 if ((num_run > 32 && 335 pending_bios != &device->pending_sync_bios && 336 device->pending_sync_bios.head) || 337 (num_run > 64 && pending_bios == &device->pending_sync_bios && 338 device->pending_bios.head)) { 339 spin_lock(&device->io_lock); 340 requeue_list(pending_bios, pending, tail); 341 goto loop_lock; 342 } 343 344 cur = pending; 345 pending = pending->bi_next; 346 cur->bi_next = NULL; 347 348 if (atomic_dec_return(&fs_info->nr_async_bios) < limit && 349 waitqueue_active(&fs_info->async_submit_wait)) 350 wake_up(&fs_info->async_submit_wait); 351 352 BUG_ON(atomic_read(&cur->__bi_cnt) == 0); 353 354 /* 355 * if we're doing the sync list, record that our 356 * plug has some sync requests on it 357 * 358 * If we're doing the regular list and there are 359 * sync requests sitting around, unplug before 360 * we add more 361 */ 362 if (pending_bios == &device->pending_sync_bios) { 363 sync_pending = 1; 364 } else if (sync_pending) { 365 blk_finish_plug(&plug); 366 blk_start_plug(&plug); 367 sync_pending = 0; 368 } 369 370 btrfsic_submit_bio(cur->bi_rw, cur); 371 num_run++; 372 batch_run++; 373 374 cond_resched(); 375 376 /* 377 * we made progress, there is more work to do and the bdi 378 * is now congested. Back off and let other work structs 379 * run instead 380 */ 381 if (pending && bdi_write_congested(bdi) && batch_run > 8 && 382 fs_info->fs_devices->open_devices > 1) { 383 struct io_context *ioc; 384 385 ioc = current->io_context; 386 387 /* 388 * the main goal here is that we don't want to 389 * block if we're going to be able to submit 390 * more requests without blocking. 391 * 392 * This code does two great things, it pokes into 393 * the elevator code from a filesystem _and_ 394 * it makes assumptions about how batching works. 395 */ 396 if (ioc && ioc->nr_batch_requests > 0 && 397 time_before(jiffies, ioc->last_waited + HZ/50UL) && 398 (last_waited == 0 || 399 ioc->last_waited == last_waited)) { 400 /* 401 * we want to go through our batch of 402 * requests and stop. So, we copy out 403 * the ioc->last_waited time and test 404 * against it before looping 405 */ 406 last_waited = ioc->last_waited; 407 cond_resched(); 408 continue; 409 } 410 spin_lock(&device->io_lock); 411 requeue_list(pending_bios, pending, tail); 412 device->running_pending = 1; 413 414 spin_unlock(&device->io_lock); 415 btrfs_queue_work(fs_info->submit_workers, 416 &device->work); 417 goto done; 418 } 419 /* unplug every 64 requests just for good measure */ 420 if (batch_run % 64 == 0) { 421 blk_finish_plug(&plug); 422 blk_start_plug(&plug); 423 sync_pending = 0; 424 } 425 } 426 427 cond_resched(); 428 if (again) 429 goto loop; 430 431 spin_lock(&device->io_lock); 432 if (device->pending_bios.head || device->pending_sync_bios.head) 433 goto loop_lock; 434 spin_unlock(&device->io_lock); 435 436 done: 437 blk_finish_plug(&plug); 438 } 439 440 static void pending_bios_fn(struct btrfs_work *work) 441 { 442 struct btrfs_device *device; 443 444 device = container_of(work, struct btrfs_device, work); 445 run_scheduled_bios(device); 446 } 447 448 449 void btrfs_free_stale_device(struct btrfs_device *cur_dev) 450 { 451 struct btrfs_fs_devices *fs_devs; 452 struct btrfs_device *dev; 453 454 if (!cur_dev->name) 455 return; 456 457 list_for_each_entry(fs_devs, &fs_uuids, list) { 458 int del = 1; 459 460 if (fs_devs->opened) 461 continue; 462 if (fs_devs->seeding) 463 continue; 464 465 list_for_each_entry(dev, &fs_devs->devices, dev_list) { 466 467 if (dev == cur_dev) 468 continue; 469 if (!dev->name) 470 continue; 471 472 /* 473 * Todo: This won't be enough. What if the same device 474 * comes back (with new uuid and) with its mapper path? 475 * But for now, this does help as mostly an admin will 476 * either use mapper or non mapper path throughout. 477 */ 478 rcu_read_lock(); 479 del = strcmp(rcu_str_deref(dev->name), 480 rcu_str_deref(cur_dev->name)); 481 rcu_read_unlock(); 482 if (!del) 483 break; 484 } 485 486 if (!del) { 487 /* delete the stale device */ 488 if (fs_devs->num_devices == 1) { 489 btrfs_sysfs_remove_fsid(fs_devs); 490 list_del(&fs_devs->list); 491 free_fs_devices(fs_devs); 492 } else { 493 fs_devs->num_devices--; 494 list_del(&dev->dev_list); 495 rcu_string_free(dev->name); 496 kfree(dev); 497 } 498 break; 499 } 500 } 501 } 502 503 /* 504 * Add new device to list of registered devices 505 * 506 * Returns: 507 * 1 - first time device is seen 508 * 0 - device already known 509 * < 0 - error 510 */ 511 static noinline int device_list_add(const char *path, 512 struct btrfs_super_block *disk_super, 513 u64 devid, struct btrfs_fs_devices **fs_devices_ret) 514 { 515 struct btrfs_device *device; 516 struct btrfs_fs_devices *fs_devices; 517 struct rcu_string *name; 518 int ret = 0; 519 u64 found_transid = btrfs_super_generation(disk_super); 520 521 fs_devices = find_fsid(disk_super->fsid); 522 if (!fs_devices) { 523 fs_devices = alloc_fs_devices(disk_super->fsid); 524 if (IS_ERR(fs_devices)) 525 return PTR_ERR(fs_devices); 526 527 list_add(&fs_devices->list, &fs_uuids); 528 529 device = NULL; 530 } else { 531 device = __find_device(&fs_devices->devices, devid, 532 disk_super->dev_item.uuid); 533 } 534 535 if (!device) { 536 if (fs_devices->opened) 537 return -EBUSY; 538 539 device = btrfs_alloc_device(NULL, &devid, 540 disk_super->dev_item.uuid); 541 if (IS_ERR(device)) { 542 /* we can safely leave the fs_devices entry around */ 543 return PTR_ERR(device); 544 } 545 546 name = rcu_string_strdup(path, GFP_NOFS); 547 if (!name) { 548 kfree(device); 549 return -ENOMEM; 550 } 551 rcu_assign_pointer(device->name, name); 552 553 mutex_lock(&fs_devices->device_list_mutex); 554 list_add_rcu(&device->dev_list, &fs_devices->devices); 555 fs_devices->num_devices++; 556 mutex_unlock(&fs_devices->device_list_mutex); 557 558 ret = 1; 559 device->fs_devices = fs_devices; 560 } else if (!device->name || strcmp(device->name->str, path)) { 561 /* 562 * When FS is already mounted. 563 * 1. If you are here and if the device->name is NULL that 564 * means this device was missing at time of FS mount. 565 * 2. If you are here and if the device->name is different 566 * from 'path' that means either 567 * a. The same device disappeared and reappeared with 568 * different name. or 569 * b. The missing-disk-which-was-replaced, has 570 * reappeared now. 571 * 572 * We must allow 1 and 2a above. But 2b would be a spurious 573 * and unintentional. 574 * 575 * Further in case of 1 and 2a above, the disk at 'path' 576 * would have missed some transaction when it was away and 577 * in case of 2a the stale bdev has to be updated as well. 578 * 2b must not be allowed at all time. 579 */ 580 581 /* 582 * For now, we do allow update to btrfs_fs_device through the 583 * btrfs dev scan cli after FS has been mounted. We're still 584 * tracking a problem where systems fail mount by subvolume id 585 * when we reject replacement on a mounted FS. 586 */ 587 if (!fs_devices->opened && found_transid < device->generation) { 588 /* 589 * That is if the FS is _not_ mounted and if you 590 * are here, that means there is more than one 591 * disk with same uuid and devid.We keep the one 592 * with larger generation number or the last-in if 593 * generation are equal. 594 */ 595 return -EEXIST; 596 } 597 598 name = rcu_string_strdup(path, GFP_NOFS); 599 if (!name) 600 return -ENOMEM; 601 rcu_string_free(device->name); 602 rcu_assign_pointer(device->name, name); 603 if (device->missing) { 604 fs_devices->missing_devices--; 605 device->missing = 0; 606 } 607 } 608 609 /* 610 * Unmount does not free the btrfs_device struct but would zero 611 * generation along with most of the other members. So just update 612 * it back. We need it to pick the disk with largest generation 613 * (as above). 614 */ 615 if (!fs_devices->opened) 616 device->generation = found_transid; 617 618 /* 619 * if there is new btrfs on an already registered device, 620 * then remove the stale device entry. 621 */ 622 btrfs_free_stale_device(device); 623 624 *fs_devices_ret = fs_devices; 625 626 return ret; 627 } 628 629 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 630 { 631 struct btrfs_fs_devices *fs_devices; 632 struct btrfs_device *device; 633 struct btrfs_device *orig_dev; 634 635 fs_devices = alloc_fs_devices(orig->fsid); 636 if (IS_ERR(fs_devices)) 637 return fs_devices; 638 639 mutex_lock(&orig->device_list_mutex); 640 fs_devices->total_devices = orig->total_devices; 641 642 /* We have held the volume lock, it is safe to get the devices. */ 643 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 644 struct rcu_string *name; 645 646 device = btrfs_alloc_device(NULL, &orig_dev->devid, 647 orig_dev->uuid); 648 if (IS_ERR(device)) 649 goto error; 650 651 /* 652 * This is ok to do without rcu read locked because we hold the 653 * uuid mutex so nothing we touch in here is going to disappear. 654 */ 655 if (orig_dev->name) { 656 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); 657 if (!name) { 658 kfree(device); 659 goto error; 660 } 661 rcu_assign_pointer(device->name, name); 662 } 663 664 list_add(&device->dev_list, &fs_devices->devices); 665 device->fs_devices = fs_devices; 666 fs_devices->num_devices++; 667 } 668 mutex_unlock(&orig->device_list_mutex); 669 return fs_devices; 670 error: 671 mutex_unlock(&orig->device_list_mutex); 672 free_fs_devices(fs_devices); 673 return ERR_PTR(-ENOMEM); 674 } 675 676 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step) 677 { 678 struct btrfs_device *device, *next; 679 struct btrfs_device *latest_dev = NULL; 680 681 mutex_lock(&uuid_mutex); 682 again: 683 /* This is the initialized path, it is safe to release the devices. */ 684 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 685 if (device->in_fs_metadata) { 686 if (!device->is_tgtdev_for_dev_replace && 687 (!latest_dev || 688 device->generation > latest_dev->generation)) { 689 latest_dev = device; 690 } 691 continue; 692 } 693 694 if (device->devid == BTRFS_DEV_REPLACE_DEVID) { 695 /* 696 * In the first step, keep the device which has 697 * the correct fsid and the devid that is used 698 * for the dev_replace procedure. 699 * In the second step, the dev_replace state is 700 * read from the device tree and it is known 701 * whether the procedure is really active or 702 * not, which means whether this device is 703 * used or whether it should be removed. 704 */ 705 if (step == 0 || device->is_tgtdev_for_dev_replace) { 706 continue; 707 } 708 } 709 if (device->bdev) { 710 blkdev_put(device->bdev, device->mode); 711 device->bdev = NULL; 712 fs_devices->open_devices--; 713 } 714 if (device->writeable) { 715 list_del_init(&device->dev_alloc_list); 716 device->writeable = 0; 717 if (!device->is_tgtdev_for_dev_replace) 718 fs_devices->rw_devices--; 719 } 720 list_del_init(&device->dev_list); 721 fs_devices->num_devices--; 722 rcu_string_free(device->name); 723 kfree(device); 724 } 725 726 if (fs_devices->seed) { 727 fs_devices = fs_devices->seed; 728 goto again; 729 } 730 731 fs_devices->latest_bdev = latest_dev->bdev; 732 733 mutex_unlock(&uuid_mutex); 734 } 735 736 static void __free_device(struct work_struct *work) 737 { 738 struct btrfs_device *device; 739 740 device = container_of(work, struct btrfs_device, rcu_work); 741 742 if (device->bdev) 743 blkdev_put(device->bdev, device->mode); 744 745 rcu_string_free(device->name); 746 kfree(device); 747 } 748 749 static void free_device(struct rcu_head *head) 750 { 751 struct btrfs_device *device; 752 753 device = container_of(head, struct btrfs_device, rcu); 754 755 INIT_WORK(&device->rcu_work, __free_device); 756 schedule_work(&device->rcu_work); 757 } 758 759 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 760 { 761 struct btrfs_device *device, *tmp; 762 763 if (--fs_devices->opened > 0) 764 return 0; 765 766 mutex_lock(&fs_devices->device_list_mutex); 767 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) { 768 struct btrfs_device *new_device; 769 struct rcu_string *name; 770 771 if (device->bdev) 772 fs_devices->open_devices--; 773 774 if (device->writeable && 775 device->devid != BTRFS_DEV_REPLACE_DEVID) { 776 list_del_init(&device->dev_alloc_list); 777 fs_devices->rw_devices--; 778 } 779 780 if (device->missing) 781 fs_devices->missing_devices--; 782 783 new_device = btrfs_alloc_device(NULL, &device->devid, 784 device->uuid); 785 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ 786 787 /* Safe because we are under uuid_mutex */ 788 if (device->name) { 789 name = rcu_string_strdup(device->name->str, GFP_NOFS); 790 BUG_ON(!name); /* -ENOMEM */ 791 rcu_assign_pointer(new_device->name, name); 792 } 793 794 list_replace_rcu(&device->dev_list, &new_device->dev_list); 795 new_device->fs_devices = device->fs_devices; 796 797 call_rcu(&device->rcu, free_device); 798 } 799 mutex_unlock(&fs_devices->device_list_mutex); 800 801 WARN_ON(fs_devices->open_devices); 802 WARN_ON(fs_devices->rw_devices); 803 fs_devices->opened = 0; 804 fs_devices->seeding = 0; 805 806 return 0; 807 } 808 809 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 810 { 811 struct btrfs_fs_devices *seed_devices = NULL; 812 int ret; 813 814 mutex_lock(&uuid_mutex); 815 ret = __btrfs_close_devices(fs_devices); 816 if (!fs_devices->opened) { 817 seed_devices = fs_devices->seed; 818 fs_devices->seed = NULL; 819 } 820 mutex_unlock(&uuid_mutex); 821 822 while (seed_devices) { 823 fs_devices = seed_devices; 824 seed_devices = fs_devices->seed; 825 __btrfs_close_devices(fs_devices); 826 free_fs_devices(fs_devices); 827 } 828 /* 829 * Wait for rcu kworkers under __btrfs_close_devices 830 * to finish all blkdev_puts so device is really 831 * free when umount is done. 832 */ 833 rcu_barrier(); 834 return ret; 835 } 836 837 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 838 fmode_t flags, void *holder) 839 { 840 struct request_queue *q; 841 struct block_device *bdev; 842 struct list_head *head = &fs_devices->devices; 843 struct btrfs_device *device; 844 struct btrfs_device *latest_dev = NULL; 845 struct buffer_head *bh; 846 struct btrfs_super_block *disk_super; 847 u64 devid; 848 int seeding = 1; 849 int ret = 0; 850 851 flags |= FMODE_EXCL; 852 853 list_for_each_entry(device, head, dev_list) { 854 if (device->bdev) 855 continue; 856 if (!device->name) 857 continue; 858 859 /* Just open everything we can; ignore failures here */ 860 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 861 &bdev, &bh)) 862 continue; 863 864 disk_super = (struct btrfs_super_block *)bh->b_data; 865 devid = btrfs_stack_device_id(&disk_super->dev_item); 866 if (devid != device->devid) 867 goto error_brelse; 868 869 if (memcmp(device->uuid, disk_super->dev_item.uuid, 870 BTRFS_UUID_SIZE)) 871 goto error_brelse; 872 873 device->generation = btrfs_super_generation(disk_super); 874 if (!latest_dev || 875 device->generation > latest_dev->generation) 876 latest_dev = device; 877 878 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 879 device->writeable = 0; 880 } else { 881 device->writeable = !bdev_read_only(bdev); 882 seeding = 0; 883 } 884 885 q = bdev_get_queue(bdev); 886 if (blk_queue_discard(q)) 887 device->can_discard = 1; 888 889 device->bdev = bdev; 890 device->in_fs_metadata = 0; 891 device->mode = flags; 892 893 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 894 fs_devices->rotating = 1; 895 896 fs_devices->open_devices++; 897 if (device->writeable && 898 device->devid != BTRFS_DEV_REPLACE_DEVID) { 899 fs_devices->rw_devices++; 900 list_add(&device->dev_alloc_list, 901 &fs_devices->alloc_list); 902 } 903 brelse(bh); 904 continue; 905 906 error_brelse: 907 brelse(bh); 908 blkdev_put(bdev, flags); 909 continue; 910 } 911 if (fs_devices->open_devices == 0) { 912 ret = -EINVAL; 913 goto out; 914 } 915 fs_devices->seeding = seeding; 916 fs_devices->opened = 1; 917 fs_devices->latest_bdev = latest_dev->bdev; 918 fs_devices->total_rw_bytes = 0; 919 out: 920 return ret; 921 } 922 923 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 924 fmode_t flags, void *holder) 925 { 926 int ret; 927 928 mutex_lock(&uuid_mutex); 929 if (fs_devices->opened) { 930 fs_devices->opened++; 931 ret = 0; 932 } else { 933 ret = __btrfs_open_devices(fs_devices, flags, holder); 934 } 935 mutex_unlock(&uuid_mutex); 936 return ret; 937 } 938 939 /* 940 * Look for a btrfs signature on a device. This may be called out of the mount path 941 * and we are not allowed to call set_blocksize during the scan. The superblock 942 * is read via pagecache 943 */ 944 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 945 struct btrfs_fs_devices **fs_devices_ret) 946 { 947 struct btrfs_super_block *disk_super; 948 struct block_device *bdev; 949 struct page *page; 950 void *p; 951 int ret = -EINVAL; 952 u64 devid; 953 u64 transid; 954 u64 total_devices; 955 u64 bytenr; 956 pgoff_t index; 957 958 /* 959 * we would like to check all the supers, but that would make 960 * a btrfs mount succeed after a mkfs from a different FS. 961 * So, we need to add a special mount option to scan for 962 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 963 */ 964 bytenr = btrfs_sb_offset(0); 965 flags |= FMODE_EXCL; 966 mutex_lock(&uuid_mutex); 967 968 bdev = blkdev_get_by_path(path, flags, holder); 969 970 if (IS_ERR(bdev)) { 971 ret = PTR_ERR(bdev); 972 goto error; 973 } 974 975 /* make sure our super fits in the device */ 976 if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode)) 977 goto error_bdev_put; 978 979 /* make sure our super fits in the page */ 980 if (sizeof(*disk_super) > PAGE_CACHE_SIZE) 981 goto error_bdev_put; 982 983 /* make sure our super doesn't straddle pages on disk */ 984 index = bytenr >> PAGE_CACHE_SHIFT; 985 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index) 986 goto error_bdev_put; 987 988 /* pull in the page with our super */ 989 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 990 index, GFP_NOFS); 991 992 if (IS_ERR_OR_NULL(page)) 993 goto error_bdev_put; 994 995 p = kmap(page); 996 997 /* align our pointer to the offset of the super block */ 998 disk_super = p + (bytenr & ~PAGE_CACHE_MASK); 999 1000 if (btrfs_super_bytenr(disk_super) != bytenr || 1001 btrfs_super_magic(disk_super) != BTRFS_MAGIC) 1002 goto error_unmap; 1003 1004 devid = btrfs_stack_device_id(&disk_super->dev_item); 1005 transid = btrfs_super_generation(disk_super); 1006 total_devices = btrfs_super_num_devices(disk_super); 1007 1008 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 1009 if (ret > 0) { 1010 if (disk_super->label[0]) { 1011 if (disk_super->label[BTRFS_LABEL_SIZE - 1]) 1012 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0'; 1013 printk(KERN_INFO "BTRFS: device label %s ", disk_super->label); 1014 } else { 1015 printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid); 1016 } 1017 1018 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path); 1019 ret = 0; 1020 } 1021 if (!ret && fs_devices_ret) 1022 (*fs_devices_ret)->total_devices = total_devices; 1023 1024 error_unmap: 1025 kunmap(page); 1026 page_cache_release(page); 1027 1028 error_bdev_put: 1029 blkdev_put(bdev, flags); 1030 error: 1031 mutex_unlock(&uuid_mutex); 1032 return ret; 1033 } 1034 1035 /* helper to account the used device space in the range */ 1036 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 1037 u64 end, u64 *length) 1038 { 1039 struct btrfs_key key; 1040 struct btrfs_root *root = device->dev_root; 1041 struct btrfs_dev_extent *dev_extent; 1042 struct btrfs_path *path; 1043 u64 extent_end; 1044 int ret; 1045 int slot; 1046 struct extent_buffer *l; 1047 1048 *length = 0; 1049 1050 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace) 1051 return 0; 1052 1053 path = btrfs_alloc_path(); 1054 if (!path) 1055 return -ENOMEM; 1056 path->reada = 2; 1057 1058 key.objectid = device->devid; 1059 key.offset = start; 1060 key.type = BTRFS_DEV_EXTENT_KEY; 1061 1062 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1063 if (ret < 0) 1064 goto out; 1065 if (ret > 0) { 1066 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1067 if (ret < 0) 1068 goto out; 1069 } 1070 1071 while (1) { 1072 l = path->nodes[0]; 1073 slot = path->slots[0]; 1074 if (slot >= btrfs_header_nritems(l)) { 1075 ret = btrfs_next_leaf(root, path); 1076 if (ret == 0) 1077 continue; 1078 if (ret < 0) 1079 goto out; 1080 1081 break; 1082 } 1083 btrfs_item_key_to_cpu(l, &key, slot); 1084 1085 if (key.objectid < device->devid) 1086 goto next; 1087 1088 if (key.objectid > device->devid) 1089 break; 1090 1091 if (key.type != BTRFS_DEV_EXTENT_KEY) 1092 goto next; 1093 1094 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1095 extent_end = key.offset + btrfs_dev_extent_length(l, 1096 dev_extent); 1097 if (key.offset <= start && extent_end > end) { 1098 *length = end - start + 1; 1099 break; 1100 } else if (key.offset <= start && extent_end > start) 1101 *length += extent_end - start; 1102 else if (key.offset > start && extent_end <= end) 1103 *length += extent_end - key.offset; 1104 else if (key.offset > start && key.offset <= end) { 1105 *length += end - key.offset + 1; 1106 break; 1107 } else if (key.offset > end) 1108 break; 1109 1110 next: 1111 path->slots[0]++; 1112 } 1113 ret = 0; 1114 out: 1115 btrfs_free_path(path); 1116 return ret; 1117 } 1118 1119 static int contains_pending_extent(struct btrfs_trans_handle *trans, 1120 struct btrfs_device *device, 1121 u64 *start, u64 len) 1122 { 1123 struct extent_map *em; 1124 struct list_head *search_list = &trans->transaction->pending_chunks; 1125 int ret = 0; 1126 u64 physical_start = *start; 1127 1128 again: 1129 list_for_each_entry(em, search_list, list) { 1130 struct map_lookup *map; 1131 int i; 1132 1133 map = (struct map_lookup *)em->bdev; 1134 for (i = 0; i < map->num_stripes; i++) { 1135 u64 end; 1136 1137 if (map->stripes[i].dev != device) 1138 continue; 1139 if (map->stripes[i].physical >= physical_start + len || 1140 map->stripes[i].physical + em->orig_block_len <= 1141 physical_start) 1142 continue; 1143 /* 1144 * Make sure that while processing the pinned list we do 1145 * not override our *start with a lower value, because 1146 * we can have pinned chunks that fall within this 1147 * device hole and that have lower physical addresses 1148 * than the pending chunks we processed before. If we 1149 * do not take this special care we can end up getting 1150 * 2 pending chunks that start at the same physical 1151 * device offsets because the end offset of a pinned 1152 * chunk can be equal to the start offset of some 1153 * pending chunk. 1154 */ 1155 end = map->stripes[i].physical + em->orig_block_len; 1156 if (end > *start) { 1157 *start = end; 1158 ret = 1; 1159 } 1160 } 1161 } 1162 if (search_list == &trans->transaction->pending_chunks) { 1163 search_list = &trans->root->fs_info->pinned_chunks; 1164 goto again; 1165 } 1166 1167 return ret; 1168 } 1169 1170 1171 /* 1172 * find_free_dev_extent - find free space in the specified device 1173 * @device: the device which we search the free space in 1174 * @num_bytes: the size of the free space that we need 1175 * @start: store the start of the free space. 1176 * @len: the size of the free space. that we find, or the size of the max 1177 * free space if we don't find suitable free space 1178 * 1179 * this uses a pretty simple search, the expectation is that it is 1180 * called very infrequently and that a given device has a small number 1181 * of extents 1182 * 1183 * @start is used to store the start of the free space if we find. But if we 1184 * don't find suitable free space, it will be used to store the start position 1185 * of the max free space. 1186 * 1187 * @len is used to store the size of the free space that we find. 1188 * But if we don't find suitable free space, it is used to store the size of 1189 * the max free space. 1190 */ 1191 int find_free_dev_extent(struct btrfs_trans_handle *trans, 1192 struct btrfs_device *device, u64 num_bytes, 1193 u64 *start, u64 *len) 1194 { 1195 struct btrfs_key key; 1196 struct btrfs_root *root = device->dev_root; 1197 struct btrfs_dev_extent *dev_extent; 1198 struct btrfs_path *path; 1199 u64 hole_size; 1200 u64 max_hole_start; 1201 u64 max_hole_size; 1202 u64 extent_end; 1203 u64 search_start; 1204 u64 search_end = device->total_bytes; 1205 int ret; 1206 int slot; 1207 struct extent_buffer *l; 1208 1209 /* FIXME use last free of some kind */ 1210 1211 /* we don't want to overwrite the superblock on the drive, 1212 * so we make sure to start at an offset of at least 1MB 1213 */ 1214 search_start = max(root->fs_info->alloc_start, 1024ull * 1024); 1215 1216 path = btrfs_alloc_path(); 1217 if (!path) 1218 return -ENOMEM; 1219 1220 max_hole_start = search_start; 1221 max_hole_size = 0; 1222 1223 again: 1224 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) { 1225 ret = -ENOSPC; 1226 goto out; 1227 } 1228 1229 path->reada = 2; 1230 path->search_commit_root = 1; 1231 path->skip_locking = 1; 1232 1233 key.objectid = device->devid; 1234 key.offset = search_start; 1235 key.type = BTRFS_DEV_EXTENT_KEY; 1236 1237 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1238 if (ret < 0) 1239 goto out; 1240 if (ret > 0) { 1241 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1242 if (ret < 0) 1243 goto out; 1244 } 1245 1246 while (1) { 1247 l = path->nodes[0]; 1248 slot = path->slots[0]; 1249 if (slot >= btrfs_header_nritems(l)) { 1250 ret = btrfs_next_leaf(root, path); 1251 if (ret == 0) 1252 continue; 1253 if (ret < 0) 1254 goto out; 1255 1256 break; 1257 } 1258 btrfs_item_key_to_cpu(l, &key, slot); 1259 1260 if (key.objectid < device->devid) 1261 goto next; 1262 1263 if (key.objectid > device->devid) 1264 break; 1265 1266 if (key.type != BTRFS_DEV_EXTENT_KEY) 1267 goto next; 1268 1269 if (key.offset > search_start) { 1270 hole_size = key.offset - search_start; 1271 1272 /* 1273 * Have to check before we set max_hole_start, otherwise 1274 * we could end up sending back this offset anyway. 1275 */ 1276 if (contains_pending_extent(trans, device, 1277 &search_start, 1278 hole_size)) { 1279 if (key.offset >= search_start) { 1280 hole_size = key.offset - search_start; 1281 } else { 1282 WARN_ON_ONCE(1); 1283 hole_size = 0; 1284 } 1285 } 1286 1287 if (hole_size > max_hole_size) { 1288 max_hole_start = search_start; 1289 max_hole_size = hole_size; 1290 } 1291 1292 /* 1293 * If this free space is greater than which we need, 1294 * it must be the max free space that we have found 1295 * until now, so max_hole_start must point to the start 1296 * of this free space and the length of this free space 1297 * is stored in max_hole_size. Thus, we return 1298 * max_hole_start and max_hole_size and go back to the 1299 * caller. 1300 */ 1301 if (hole_size >= num_bytes) { 1302 ret = 0; 1303 goto out; 1304 } 1305 } 1306 1307 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1308 extent_end = key.offset + btrfs_dev_extent_length(l, 1309 dev_extent); 1310 if (extent_end > search_start) 1311 search_start = extent_end; 1312 next: 1313 path->slots[0]++; 1314 cond_resched(); 1315 } 1316 1317 /* 1318 * At this point, search_start should be the end of 1319 * allocated dev extents, and when shrinking the device, 1320 * search_end may be smaller than search_start. 1321 */ 1322 if (search_end > search_start) { 1323 hole_size = search_end - search_start; 1324 1325 if (contains_pending_extent(trans, device, &search_start, 1326 hole_size)) { 1327 btrfs_release_path(path); 1328 goto again; 1329 } 1330 1331 if (hole_size > max_hole_size) { 1332 max_hole_start = search_start; 1333 max_hole_size = hole_size; 1334 } 1335 } 1336 1337 /* See above. */ 1338 if (max_hole_size < num_bytes) 1339 ret = -ENOSPC; 1340 else 1341 ret = 0; 1342 1343 out: 1344 btrfs_free_path(path); 1345 *start = max_hole_start; 1346 if (len) 1347 *len = max_hole_size; 1348 return ret; 1349 } 1350 1351 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1352 struct btrfs_device *device, 1353 u64 start, u64 *dev_extent_len) 1354 { 1355 int ret; 1356 struct btrfs_path *path; 1357 struct btrfs_root *root = device->dev_root; 1358 struct btrfs_key key; 1359 struct btrfs_key found_key; 1360 struct extent_buffer *leaf = NULL; 1361 struct btrfs_dev_extent *extent = NULL; 1362 1363 path = btrfs_alloc_path(); 1364 if (!path) 1365 return -ENOMEM; 1366 1367 key.objectid = device->devid; 1368 key.offset = start; 1369 key.type = BTRFS_DEV_EXTENT_KEY; 1370 again: 1371 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1372 if (ret > 0) { 1373 ret = btrfs_previous_item(root, path, key.objectid, 1374 BTRFS_DEV_EXTENT_KEY); 1375 if (ret) 1376 goto out; 1377 leaf = path->nodes[0]; 1378 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1379 extent = btrfs_item_ptr(leaf, path->slots[0], 1380 struct btrfs_dev_extent); 1381 BUG_ON(found_key.offset > start || found_key.offset + 1382 btrfs_dev_extent_length(leaf, extent) < start); 1383 key = found_key; 1384 btrfs_release_path(path); 1385 goto again; 1386 } else if (ret == 0) { 1387 leaf = path->nodes[0]; 1388 extent = btrfs_item_ptr(leaf, path->slots[0], 1389 struct btrfs_dev_extent); 1390 } else { 1391 btrfs_error(root->fs_info, ret, "Slot search failed"); 1392 goto out; 1393 } 1394 1395 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1396 1397 ret = btrfs_del_item(trans, root, path); 1398 if (ret) { 1399 btrfs_error(root->fs_info, ret, 1400 "Failed to remove dev extent item"); 1401 } else { 1402 trans->transaction->have_free_bgs = 1; 1403 } 1404 out: 1405 btrfs_free_path(path); 1406 return ret; 1407 } 1408 1409 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1410 struct btrfs_device *device, 1411 u64 chunk_tree, u64 chunk_objectid, 1412 u64 chunk_offset, u64 start, u64 num_bytes) 1413 { 1414 int ret; 1415 struct btrfs_path *path; 1416 struct btrfs_root *root = device->dev_root; 1417 struct btrfs_dev_extent *extent; 1418 struct extent_buffer *leaf; 1419 struct btrfs_key key; 1420 1421 WARN_ON(!device->in_fs_metadata); 1422 WARN_ON(device->is_tgtdev_for_dev_replace); 1423 path = btrfs_alloc_path(); 1424 if (!path) 1425 return -ENOMEM; 1426 1427 key.objectid = device->devid; 1428 key.offset = start; 1429 key.type = BTRFS_DEV_EXTENT_KEY; 1430 ret = btrfs_insert_empty_item(trans, root, path, &key, 1431 sizeof(*extent)); 1432 if (ret) 1433 goto out; 1434 1435 leaf = path->nodes[0]; 1436 extent = btrfs_item_ptr(leaf, path->slots[0], 1437 struct btrfs_dev_extent); 1438 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree); 1439 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid); 1440 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 1441 1442 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, 1443 btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE); 1444 1445 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1446 btrfs_mark_buffer_dirty(leaf); 1447 out: 1448 btrfs_free_path(path); 1449 return ret; 1450 } 1451 1452 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1453 { 1454 struct extent_map_tree *em_tree; 1455 struct extent_map *em; 1456 struct rb_node *n; 1457 u64 ret = 0; 1458 1459 em_tree = &fs_info->mapping_tree.map_tree; 1460 read_lock(&em_tree->lock); 1461 n = rb_last(&em_tree->map); 1462 if (n) { 1463 em = rb_entry(n, struct extent_map, rb_node); 1464 ret = em->start + em->len; 1465 } 1466 read_unlock(&em_tree->lock); 1467 1468 return ret; 1469 } 1470 1471 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1472 u64 *devid_ret) 1473 { 1474 int ret; 1475 struct btrfs_key key; 1476 struct btrfs_key found_key; 1477 struct btrfs_path *path; 1478 1479 path = btrfs_alloc_path(); 1480 if (!path) 1481 return -ENOMEM; 1482 1483 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1484 key.type = BTRFS_DEV_ITEM_KEY; 1485 key.offset = (u64)-1; 1486 1487 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1488 if (ret < 0) 1489 goto error; 1490 1491 BUG_ON(ret == 0); /* Corruption */ 1492 1493 ret = btrfs_previous_item(fs_info->chunk_root, path, 1494 BTRFS_DEV_ITEMS_OBJECTID, 1495 BTRFS_DEV_ITEM_KEY); 1496 if (ret) { 1497 *devid_ret = 1; 1498 } else { 1499 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1500 path->slots[0]); 1501 *devid_ret = found_key.offset + 1; 1502 } 1503 ret = 0; 1504 error: 1505 btrfs_free_path(path); 1506 return ret; 1507 } 1508 1509 /* 1510 * the device information is stored in the chunk root 1511 * the btrfs_device struct should be fully filled in 1512 */ 1513 static int btrfs_add_device(struct btrfs_trans_handle *trans, 1514 struct btrfs_root *root, 1515 struct btrfs_device *device) 1516 { 1517 int ret; 1518 struct btrfs_path *path; 1519 struct btrfs_dev_item *dev_item; 1520 struct extent_buffer *leaf; 1521 struct btrfs_key key; 1522 unsigned long ptr; 1523 1524 root = root->fs_info->chunk_root; 1525 1526 path = btrfs_alloc_path(); 1527 if (!path) 1528 return -ENOMEM; 1529 1530 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1531 key.type = BTRFS_DEV_ITEM_KEY; 1532 key.offset = device->devid; 1533 1534 ret = btrfs_insert_empty_item(trans, root, path, &key, 1535 sizeof(*dev_item)); 1536 if (ret) 1537 goto out; 1538 1539 leaf = path->nodes[0]; 1540 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1541 1542 btrfs_set_device_id(leaf, dev_item, device->devid); 1543 btrfs_set_device_generation(leaf, dev_item, 0); 1544 btrfs_set_device_type(leaf, dev_item, device->type); 1545 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1546 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1547 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1548 btrfs_set_device_total_bytes(leaf, dev_item, 1549 btrfs_device_get_disk_total_bytes(device)); 1550 btrfs_set_device_bytes_used(leaf, dev_item, 1551 btrfs_device_get_bytes_used(device)); 1552 btrfs_set_device_group(leaf, dev_item, 0); 1553 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1554 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1555 btrfs_set_device_start_offset(leaf, dev_item, 0); 1556 1557 ptr = btrfs_device_uuid(dev_item); 1558 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1559 ptr = btrfs_device_fsid(dev_item); 1560 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE); 1561 btrfs_mark_buffer_dirty(leaf); 1562 1563 ret = 0; 1564 out: 1565 btrfs_free_path(path); 1566 return ret; 1567 } 1568 1569 /* 1570 * Function to update ctime/mtime for a given device path. 1571 * Mainly used for ctime/mtime based probe like libblkid. 1572 */ 1573 static void update_dev_time(char *path_name) 1574 { 1575 struct file *filp; 1576 1577 filp = filp_open(path_name, O_RDWR, 0); 1578 if (IS_ERR(filp)) 1579 return; 1580 file_update_time(filp); 1581 filp_close(filp, NULL); 1582 return; 1583 } 1584 1585 static int btrfs_rm_dev_item(struct btrfs_root *root, 1586 struct btrfs_device *device) 1587 { 1588 int ret; 1589 struct btrfs_path *path; 1590 struct btrfs_key key; 1591 struct btrfs_trans_handle *trans; 1592 1593 root = root->fs_info->chunk_root; 1594 1595 path = btrfs_alloc_path(); 1596 if (!path) 1597 return -ENOMEM; 1598 1599 trans = btrfs_start_transaction(root, 0); 1600 if (IS_ERR(trans)) { 1601 btrfs_free_path(path); 1602 return PTR_ERR(trans); 1603 } 1604 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1605 key.type = BTRFS_DEV_ITEM_KEY; 1606 key.offset = device->devid; 1607 1608 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1609 if (ret < 0) 1610 goto out; 1611 1612 if (ret > 0) { 1613 ret = -ENOENT; 1614 goto out; 1615 } 1616 1617 ret = btrfs_del_item(trans, root, path); 1618 if (ret) 1619 goto out; 1620 out: 1621 btrfs_free_path(path); 1622 btrfs_commit_transaction(trans, root); 1623 return ret; 1624 } 1625 1626 int btrfs_rm_device(struct btrfs_root *root, char *device_path) 1627 { 1628 struct btrfs_device *device; 1629 struct btrfs_device *next_device; 1630 struct block_device *bdev; 1631 struct buffer_head *bh = NULL; 1632 struct btrfs_super_block *disk_super; 1633 struct btrfs_fs_devices *cur_devices; 1634 u64 all_avail; 1635 u64 devid; 1636 u64 num_devices; 1637 u8 *dev_uuid; 1638 unsigned seq; 1639 int ret = 0; 1640 bool clear_super = false; 1641 1642 mutex_lock(&uuid_mutex); 1643 1644 do { 1645 seq = read_seqbegin(&root->fs_info->profiles_lock); 1646 1647 all_avail = root->fs_info->avail_data_alloc_bits | 1648 root->fs_info->avail_system_alloc_bits | 1649 root->fs_info->avail_metadata_alloc_bits; 1650 } while (read_seqretry(&root->fs_info->profiles_lock, seq)); 1651 1652 num_devices = root->fs_info->fs_devices->num_devices; 1653 btrfs_dev_replace_lock(&root->fs_info->dev_replace); 1654 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) { 1655 WARN_ON(num_devices < 1); 1656 num_devices--; 1657 } 1658 btrfs_dev_replace_unlock(&root->fs_info->dev_replace); 1659 1660 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) { 1661 ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET; 1662 goto out; 1663 } 1664 1665 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) { 1666 ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET; 1667 goto out; 1668 } 1669 1670 if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) && 1671 root->fs_info->fs_devices->rw_devices <= 2) { 1672 ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET; 1673 goto out; 1674 } 1675 if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) && 1676 root->fs_info->fs_devices->rw_devices <= 3) { 1677 ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET; 1678 goto out; 1679 } 1680 1681 if (strcmp(device_path, "missing") == 0) { 1682 struct list_head *devices; 1683 struct btrfs_device *tmp; 1684 1685 device = NULL; 1686 devices = &root->fs_info->fs_devices->devices; 1687 /* 1688 * It is safe to read the devices since the volume_mutex 1689 * is held. 1690 */ 1691 list_for_each_entry(tmp, devices, dev_list) { 1692 if (tmp->in_fs_metadata && 1693 !tmp->is_tgtdev_for_dev_replace && 1694 !tmp->bdev) { 1695 device = tmp; 1696 break; 1697 } 1698 } 1699 bdev = NULL; 1700 bh = NULL; 1701 disk_super = NULL; 1702 if (!device) { 1703 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 1704 goto out; 1705 } 1706 } else { 1707 ret = btrfs_get_bdev_and_sb(device_path, 1708 FMODE_WRITE | FMODE_EXCL, 1709 root->fs_info->bdev_holder, 0, 1710 &bdev, &bh); 1711 if (ret) 1712 goto out; 1713 disk_super = (struct btrfs_super_block *)bh->b_data; 1714 devid = btrfs_stack_device_id(&disk_super->dev_item); 1715 dev_uuid = disk_super->dev_item.uuid; 1716 device = btrfs_find_device(root->fs_info, devid, dev_uuid, 1717 disk_super->fsid); 1718 if (!device) { 1719 ret = -ENOENT; 1720 goto error_brelse; 1721 } 1722 } 1723 1724 if (device->is_tgtdev_for_dev_replace) { 1725 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 1726 goto error_brelse; 1727 } 1728 1729 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { 1730 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 1731 goto error_brelse; 1732 } 1733 1734 if (device->writeable) { 1735 lock_chunks(root); 1736 list_del_init(&device->dev_alloc_list); 1737 device->fs_devices->rw_devices--; 1738 unlock_chunks(root); 1739 clear_super = true; 1740 } 1741 1742 mutex_unlock(&uuid_mutex); 1743 ret = btrfs_shrink_device(device, 0); 1744 mutex_lock(&uuid_mutex); 1745 if (ret) 1746 goto error_undo; 1747 1748 /* 1749 * TODO: the superblock still includes this device in its num_devices 1750 * counter although write_all_supers() is not locked out. This 1751 * could give a filesystem state which requires a degraded mount. 1752 */ 1753 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); 1754 if (ret) 1755 goto error_undo; 1756 1757 device->in_fs_metadata = 0; 1758 btrfs_scrub_cancel_dev(root->fs_info, device); 1759 1760 /* 1761 * the device list mutex makes sure that we don't change 1762 * the device list while someone else is writing out all 1763 * the device supers. Whoever is writing all supers, should 1764 * lock the device list mutex before getting the number of 1765 * devices in the super block (super_copy). Conversely, 1766 * whoever updates the number of devices in the super block 1767 * (super_copy) should hold the device list mutex. 1768 */ 1769 1770 cur_devices = device->fs_devices; 1771 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1772 list_del_rcu(&device->dev_list); 1773 1774 device->fs_devices->num_devices--; 1775 device->fs_devices->total_devices--; 1776 1777 if (device->missing) 1778 device->fs_devices->missing_devices--; 1779 1780 next_device = list_entry(root->fs_info->fs_devices->devices.next, 1781 struct btrfs_device, dev_list); 1782 if (device->bdev == root->fs_info->sb->s_bdev) 1783 root->fs_info->sb->s_bdev = next_device->bdev; 1784 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1785 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1786 1787 if (device->bdev) { 1788 device->fs_devices->open_devices--; 1789 /* remove sysfs entry */ 1790 btrfs_kobj_rm_device(root->fs_info->fs_devices, device); 1791 } 1792 1793 call_rcu(&device->rcu, free_device); 1794 1795 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1; 1796 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices); 1797 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1798 1799 if (cur_devices->open_devices == 0) { 1800 struct btrfs_fs_devices *fs_devices; 1801 fs_devices = root->fs_info->fs_devices; 1802 while (fs_devices) { 1803 if (fs_devices->seed == cur_devices) { 1804 fs_devices->seed = cur_devices->seed; 1805 break; 1806 } 1807 fs_devices = fs_devices->seed; 1808 } 1809 cur_devices->seed = NULL; 1810 __btrfs_close_devices(cur_devices); 1811 free_fs_devices(cur_devices); 1812 } 1813 1814 root->fs_info->num_tolerated_disk_barrier_failures = 1815 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); 1816 1817 /* 1818 * at this point, the device is zero sized. We want to 1819 * remove it from the devices list and zero out the old super 1820 */ 1821 if (clear_super && disk_super) { 1822 u64 bytenr; 1823 int i; 1824 1825 /* make sure this device isn't detected as part of 1826 * the FS anymore 1827 */ 1828 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 1829 set_buffer_dirty(bh); 1830 sync_dirty_buffer(bh); 1831 1832 /* clear the mirror copies of super block on the disk 1833 * being removed, 0th copy is been taken care above and 1834 * the below would take of the rest 1835 */ 1836 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) { 1837 bytenr = btrfs_sb_offset(i); 1838 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 1839 i_size_read(bdev->bd_inode)) 1840 break; 1841 1842 brelse(bh); 1843 bh = __bread(bdev, bytenr / 4096, 1844 BTRFS_SUPER_INFO_SIZE); 1845 if (!bh) 1846 continue; 1847 1848 disk_super = (struct btrfs_super_block *)bh->b_data; 1849 1850 if (btrfs_super_bytenr(disk_super) != bytenr || 1851 btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1852 continue; 1853 } 1854 memset(&disk_super->magic, 0, 1855 sizeof(disk_super->magic)); 1856 set_buffer_dirty(bh); 1857 sync_dirty_buffer(bh); 1858 } 1859 } 1860 1861 ret = 0; 1862 1863 if (bdev) { 1864 /* Notify udev that device has changed */ 1865 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 1866 1867 /* Update ctime/mtime for device path for libblkid */ 1868 update_dev_time(device_path); 1869 } 1870 1871 error_brelse: 1872 brelse(bh); 1873 if (bdev) 1874 blkdev_put(bdev, FMODE_READ | FMODE_EXCL); 1875 out: 1876 mutex_unlock(&uuid_mutex); 1877 return ret; 1878 error_undo: 1879 if (device->writeable) { 1880 lock_chunks(root); 1881 list_add(&device->dev_alloc_list, 1882 &root->fs_info->fs_devices->alloc_list); 1883 device->fs_devices->rw_devices++; 1884 unlock_chunks(root); 1885 } 1886 goto error_brelse; 1887 } 1888 1889 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, 1890 struct btrfs_device *srcdev) 1891 { 1892 struct btrfs_fs_devices *fs_devices; 1893 1894 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex)); 1895 1896 /* 1897 * in case of fs with no seed, srcdev->fs_devices will point 1898 * to fs_devices of fs_info. However when the dev being replaced is 1899 * a seed dev it will point to the seed's local fs_devices. In short 1900 * srcdev will have its correct fs_devices in both the cases. 1901 */ 1902 fs_devices = srcdev->fs_devices; 1903 1904 list_del_rcu(&srcdev->dev_list); 1905 list_del_rcu(&srcdev->dev_alloc_list); 1906 fs_devices->num_devices--; 1907 if (srcdev->missing) 1908 fs_devices->missing_devices--; 1909 1910 if (srcdev->writeable) { 1911 fs_devices->rw_devices--; 1912 /* zero out the old super if it is writable */ 1913 btrfs_scratch_superblock(srcdev); 1914 } 1915 1916 if (srcdev->bdev) 1917 fs_devices->open_devices--; 1918 } 1919 1920 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info, 1921 struct btrfs_device *srcdev) 1922 { 1923 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 1924 1925 call_rcu(&srcdev->rcu, free_device); 1926 1927 /* 1928 * unless fs_devices is seed fs, num_devices shouldn't go 1929 * zero 1930 */ 1931 BUG_ON(!fs_devices->num_devices && !fs_devices->seeding); 1932 1933 /* if this is no devs we rather delete the fs_devices */ 1934 if (!fs_devices->num_devices) { 1935 struct btrfs_fs_devices *tmp_fs_devices; 1936 1937 tmp_fs_devices = fs_info->fs_devices; 1938 while (tmp_fs_devices) { 1939 if (tmp_fs_devices->seed == fs_devices) { 1940 tmp_fs_devices->seed = fs_devices->seed; 1941 break; 1942 } 1943 tmp_fs_devices = tmp_fs_devices->seed; 1944 } 1945 fs_devices->seed = NULL; 1946 __btrfs_close_devices(fs_devices); 1947 free_fs_devices(fs_devices); 1948 } 1949 } 1950 1951 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, 1952 struct btrfs_device *tgtdev) 1953 { 1954 struct btrfs_device *next_device; 1955 1956 mutex_lock(&uuid_mutex); 1957 WARN_ON(!tgtdev); 1958 mutex_lock(&fs_info->fs_devices->device_list_mutex); 1959 1960 btrfs_kobj_rm_device(fs_info->fs_devices, tgtdev); 1961 1962 if (tgtdev->bdev) { 1963 btrfs_scratch_superblock(tgtdev); 1964 fs_info->fs_devices->open_devices--; 1965 } 1966 fs_info->fs_devices->num_devices--; 1967 1968 next_device = list_entry(fs_info->fs_devices->devices.next, 1969 struct btrfs_device, dev_list); 1970 if (tgtdev->bdev == fs_info->sb->s_bdev) 1971 fs_info->sb->s_bdev = next_device->bdev; 1972 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev) 1973 fs_info->fs_devices->latest_bdev = next_device->bdev; 1974 list_del_rcu(&tgtdev->dev_list); 1975 1976 call_rcu(&tgtdev->rcu, free_device); 1977 1978 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 1979 mutex_unlock(&uuid_mutex); 1980 } 1981 1982 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path, 1983 struct btrfs_device **device) 1984 { 1985 int ret = 0; 1986 struct btrfs_super_block *disk_super; 1987 u64 devid; 1988 u8 *dev_uuid; 1989 struct block_device *bdev; 1990 struct buffer_head *bh; 1991 1992 *device = NULL; 1993 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, 1994 root->fs_info->bdev_holder, 0, &bdev, &bh); 1995 if (ret) 1996 return ret; 1997 disk_super = (struct btrfs_super_block *)bh->b_data; 1998 devid = btrfs_stack_device_id(&disk_super->dev_item); 1999 dev_uuid = disk_super->dev_item.uuid; 2000 *device = btrfs_find_device(root->fs_info, devid, dev_uuid, 2001 disk_super->fsid); 2002 brelse(bh); 2003 if (!*device) 2004 ret = -ENOENT; 2005 blkdev_put(bdev, FMODE_READ); 2006 return ret; 2007 } 2008 2009 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root, 2010 char *device_path, 2011 struct btrfs_device **device) 2012 { 2013 *device = NULL; 2014 if (strcmp(device_path, "missing") == 0) { 2015 struct list_head *devices; 2016 struct btrfs_device *tmp; 2017 2018 devices = &root->fs_info->fs_devices->devices; 2019 /* 2020 * It is safe to read the devices since the volume_mutex 2021 * is held by the caller. 2022 */ 2023 list_for_each_entry(tmp, devices, dev_list) { 2024 if (tmp->in_fs_metadata && !tmp->bdev) { 2025 *device = tmp; 2026 break; 2027 } 2028 } 2029 2030 if (!*device) { 2031 btrfs_err(root->fs_info, "no missing device found"); 2032 return -ENOENT; 2033 } 2034 2035 return 0; 2036 } else { 2037 return btrfs_find_device_by_path(root, device_path, device); 2038 } 2039 } 2040 2041 /* 2042 * does all the dirty work required for changing file system's UUID. 2043 */ 2044 static int btrfs_prepare_sprout(struct btrfs_root *root) 2045 { 2046 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 2047 struct btrfs_fs_devices *old_devices; 2048 struct btrfs_fs_devices *seed_devices; 2049 struct btrfs_super_block *disk_super = root->fs_info->super_copy; 2050 struct btrfs_device *device; 2051 u64 super_flags; 2052 2053 BUG_ON(!mutex_is_locked(&uuid_mutex)); 2054 if (!fs_devices->seeding) 2055 return -EINVAL; 2056 2057 seed_devices = __alloc_fs_devices(); 2058 if (IS_ERR(seed_devices)) 2059 return PTR_ERR(seed_devices); 2060 2061 old_devices = clone_fs_devices(fs_devices); 2062 if (IS_ERR(old_devices)) { 2063 kfree(seed_devices); 2064 return PTR_ERR(old_devices); 2065 } 2066 2067 list_add(&old_devices->list, &fs_uuids); 2068 2069 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2070 seed_devices->opened = 1; 2071 INIT_LIST_HEAD(&seed_devices->devices); 2072 INIT_LIST_HEAD(&seed_devices->alloc_list); 2073 mutex_init(&seed_devices->device_list_mutex); 2074 2075 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2076 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2077 synchronize_rcu); 2078 list_for_each_entry(device, &seed_devices->devices, dev_list) 2079 device->fs_devices = seed_devices; 2080 2081 lock_chunks(root); 2082 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 2083 unlock_chunks(root); 2084 2085 fs_devices->seeding = 0; 2086 fs_devices->num_devices = 0; 2087 fs_devices->open_devices = 0; 2088 fs_devices->missing_devices = 0; 2089 fs_devices->rotating = 0; 2090 fs_devices->seed = seed_devices; 2091 2092 generate_random_uuid(fs_devices->fsid); 2093 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2094 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2095 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2096 2097 super_flags = btrfs_super_flags(disk_super) & 2098 ~BTRFS_SUPER_FLAG_SEEDING; 2099 btrfs_set_super_flags(disk_super, super_flags); 2100 2101 return 0; 2102 } 2103 2104 /* 2105 * strore the expected generation for seed devices in device items. 2106 */ 2107 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 2108 struct btrfs_root *root) 2109 { 2110 struct btrfs_path *path; 2111 struct extent_buffer *leaf; 2112 struct btrfs_dev_item *dev_item; 2113 struct btrfs_device *device; 2114 struct btrfs_key key; 2115 u8 fs_uuid[BTRFS_UUID_SIZE]; 2116 u8 dev_uuid[BTRFS_UUID_SIZE]; 2117 u64 devid; 2118 int ret; 2119 2120 path = btrfs_alloc_path(); 2121 if (!path) 2122 return -ENOMEM; 2123 2124 root = root->fs_info->chunk_root; 2125 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2126 key.offset = 0; 2127 key.type = BTRFS_DEV_ITEM_KEY; 2128 2129 while (1) { 2130 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2131 if (ret < 0) 2132 goto error; 2133 2134 leaf = path->nodes[0]; 2135 next_slot: 2136 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2137 ret = btrfs_next_leaf(root, path); 2138 if (ret > 0) 2139 break; 2140 if (ret < 0) 2141 goto error; 2142 leaf = path->nodes[0]; 2143 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2144 btrfs_release_path(path); 2145 continue; 2146 } 2147 2148 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2149 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2150 key.type != BTRFS_DEV_ITEM_KEY) 2151 break; 2152 2153 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2154 struct btrfs_dev_item); 2155 devid = btrfs_device_id(leaf, dev_item); 2156 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2157 BTRFS_UUID_SIZE); 2158 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2159 BTRFS_UUID_SIZE); 2160 device = btrfs_find_device(root->fs_info, devid, dev_uuid, 2161 fs_uuid); 2162 BUG_ON(!device); /* Logic error */ 2163 2164 if (device->fs_devices->seeding) { 2165 btrfs_set_device_generation(leaf, dev_item, 2166 device->generation); 2167 btrfs_mark_buffer_dirty(leaf); 2168 } 2169 2170 path->slots[0]++; 2171 goto next_slot; 2172 } 2173 ret = 0; 2174 error: 2175 btrfs_free_path(path); 2176 return ret; 2177 } 2178 2179 int btrfs_init_new_device(struct btrfs_root *root, char *device_path) 2180 { 2181 struct request_queue *q; 2182 struct btrfs_trans_handle *trans; 2183 struct btrfs_device *device; 2184 struct block_device *bdev; 2185 struct list_head *devices; 2186 struct super_block *sb = root->fs_info->sb; 2187 struct rcu_string *name; 2188 u64 tmp; 2189 int seeding_dev = 0; 2190 int ret = 0; 2191 2192 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 2193 return -EROFS; 2194 2195 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2196 root->fs_info->bdev_holder); 2197 if (IS_ERR(bdev)) 2198 return PTR_ERR(bdev); 2199 2200 if (root->fs_info->fs_devices->seeding) { 2201 seeding_dev = 1; 2202 down_write(&sb->s_umount); 2203 mutex_lock(&uuid_mutex); 2204 } 2205 2206 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2207 2208 devices = &root->fs_info->fs_devices->devices; 2209 2210 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2211 list_for_each_entry(device, devices, dev_list) { 2212 if (device->bdev == bdev) { 2213 ret = -EEXIST; 2214 mutex_unlock( 2215 &root->fs_info->fs_devices->device_list_mutex); 2216 goto error; 2217 } 2218 } 2219 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2220 2221 device = btrfs_alloc_device(root->fs_info, NULL, NULL); 2222 if (IS_ERR(device)) { 2223 /* we can safely leave the fs_devices entry around */ 2224 ret = PTR_ERR(device); 2225 goto error; 2226 } 2227 2228 name = rcu_string_strdup(device_path, GFP_NOFS); 2229 if (!name) { 2230 kfree(device); 2231 ret = -ENOMEM; 2232 goto error; 2233 } 2234 rcu_assign_pointer(device->name, name); 2235 2236 trans = btrfs_start_transaction(root, 0); 2237 if (IS_ERR(trans)) { 2238 rcu_string_free(device->name); 2239 kfree(device); 2240 ret = PTR_ERR(trans); 2241 goto error; 2242 } 2243 2244 q = bdev_get_queue(bdev); 2245 if (blk_queue_discard(q)) 2246 device->can_discard = 1; 2247 device->writeable = 1; 2248 device->generation = trans->transid; 2249 device->io_width = root->sectorsize; 2250 device->io_align = root->sectorsize; 2251 device->sector_size = root->sectorsize; 2252 device->total_bytes = i_size_read(bdev->bd_inode); 2253 device->disk_total_bytes = device->total_bytes; 2254 device->commit_total_bytes = device->total_bytes; 2255 device->dev_root = root->fs_info->dev_root; 2256 device->bdev = bdev; 2257 device->in_fs_metadata = 1; 2258 device->is_tgtdev_for_dev_replace = 0; 2259 device->mode = FMODE_EXCL; 2260 device->dev_stats_valid = 1; 2261 set_blocksize(device->bdev, 4096); 2262 2263 if (seeding_dev) { 2264 sb->s_flags &= ~MS_RDONLY; 2265 ret = btrfs_prepare_sprout(root); 2266 BUG_ON(ret); /* -ENOMEM */ 2267 } 2268 2269 device->fs_devices = root->fs_info->fs_devices; 2270 2271 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2272 lock_chunks(root); 2273 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); 2274 list_add(&device->dev_alloc_list, 2275 &root->fs_info->fs_devices->alloc_list); 2276 root->fs_info->fs_devices->num_devices++; 2277 root->fs_info->fs_devices->open_devices++; 2278 root->fs_info->fs_devices->rw_devices++; 2279 root->fs_info->fs_devices->total_devices++; 2280 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; 2281 2282 spin_lock(&root->fs_info->free_chunk_lock); 2283 root->fs_info->free_chunk_space += device->total_bytes; 2284 spin_unlock(&root->fs_info->free_chunk_lock); 2285 2286 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 2287 root->fs_info->fs_devices->rotating = 1; 2288 2289 tmp = btrfs_super_total_bytes(root->fs_info->super_copy); 2290 btrfs_set_super_total_bytes(root->fs_info->super_copy, 2291 tmp + device->total_bytes); 2292 2293 tmp = btrfs_super_num_devices(root->fs_info->super_copy); 2294 btrfs_set_super_num_devices(root->fs_info->super_copy, 2295 tmp + 1); 2296 2297 /* add sysfs device entry */ 2298 btrfs_kobj_add_device(root->fs_info->fs_devices, device); 2299 2300 /* 2301 * we've got more storage, clear any full flags on the space 2302 * infos 2303 */ 2304 btrfs_clear_space_info_full(root->fs_info); 2305 2306 unlock_chunks(root); 2307 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2308 2309 if (seeding_dev) { 2310 lock_chunks(root); 2311 ret = init_first_rw_device(trans, root, device); 2312 unlock_chunks(root); 2313 if (ret) { 2314 btrfs_abort_transaction(trans, root, ret); 2315 goto error_trans; 2316 } 2317 } 2318 2319 ret = btrfs_add_device(trans, root, device); 2320 if (ret) { 2321 btrfs_abort_transaction(trans, root, ret); 2322 goto error_trans; 2323 } 2324 2325 if (seeding_dev) { 2326 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; 2327 2328 ret = btrfs_finish_sprout(trans, root); 2329 if (ret) { 2330 btrfs_abort_transaction(trans, root, ret); 2331 goto error_trans; 2332 } 2333 2334 /* Sprouting would change fsid of the mounted root, 2335 * so rename the fsid on the sysfs 2336 */ 2337 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", 2338 root->fs_info->fsid); 2339 if (kobject_rename(&root->fs_info->fs_devices->super_kobj, 2340 fsid_buf)) 2341 pr_warn("BTRFS: sysfs: failed to create fsid for sprout\n"); 2342 } 2343 2344 root->fs_info->num_tolerated_disk_barrier_failures = 2345 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); 2346 ret = btrfs_commit_transaction(trans, root); 2347 2348 if (seeding_dev) { 2349 mutex_unlock(&uuid_mutex); 2350 up_write(&sb->s_umount); 2351 2352 if (ret) /* transaction commit */ 2353 return ret; 2354 2355 ret = btrfs_relocate_sys_chunks(root); 2356 if (ret < 0) 2357 btrfs_error(root->fs_info, ret, 2358 "Failed to relocate sys chunks after " 2359 "device initialization. This can be fixed " 2360 "using the \"btrfs balance\" command."); 2361 trans = btrfs_attach_transaction(root); 2362 if (IS_ERR(trans)) { 2363 if (PTR_ERR(trans) == -ENOENT) 2364 return 0; 2365 return PTR_ERR(trans); 2366 } 2367 ret = btrfs_commit_transaction(trans, root); 2368 } 2369 2370 /* Update ctime/mtime for libblkid */ 2371 update_dev_time(device_path); 2372 return ret; 2373 2374 error_trans: 2375 btrfs_end_transaction(trans, root); 2376 rcu_string_free(device->name); 2377 btrfs_kobj_rm_device(root->fs_info->fs_devices, device); 2378 kfree(device); 2379 error: 2380 blkdev_put(bdev, FMODE_EXCL); 2381 if (seeding_dev) { 2382 mutex_unlock(&uuid_mutex); 2383 up_write(&sb->s_umount); 2384 } 2385 return ret; 2386 } 2387 2388 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path, 2389 struct btrfs_device *srcdev, 2390 struct btrfs_device **device_out) 2391 { 2392 struct request_queue *q; 2393 struct btrfs_device *device; 2394 struct block_device *bdev; 2395 struct btrfs_fs_info *fs_info = root->fs_info; 2396 struct list_head *devices; 2397 struct rcu_string *name; 2398 u64 devid = BTRFS_DEV_REPLACE_DEVID; 2399 int ret = 0; 2400 2401 *device_out = NULL; 2402 if (fs_info->fs_devices->seeding) { 2403 btrfs_err(fs_info, "the filesystem is a seed filesystem!"); 2404 return -EINVAL; 2405 } 2406 2407 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2408 fs_info->bdev_holder); 2409 if (IS_ERR(bdev)) { 2410 btrfs_err(fs_info, "target device %s is invalid!", device_path); 2411 return PTR_ERR(bdev); 2412 } 2413 2414 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2415 2416 devices = &fs_info->fs_devices->devices; 2417 list_for_each_entry(device, devices, dev_list) { 2418 if (device->bdev == bdev) { 2419 btrfs_err(fs_info, "target device is in the filesystem!"); 2420 ret = -EEXIST; 2421 goto error; 2422 } 2423 } 2424 2425 2426 if (i_size_read(bdev->bd_inode) < 2427 btrfs_device_get_total_bytes(srcdev)) { 2428 btrfs_err(fs_info, "target device is smaller than source device!"); 2429 ret = -EINVAL; 2430 goto error; 2431 } 2432 2433 2434 device = btrfs_alloc_device(NULL, &devid, NULL); 2435 if (IS_ERR(device)) { 2436 ret = PTR_ERR(device); 2437 goto error; 2438 } 2439 2440 name = rcu_string_strdup(device_path, GFP_NOFS); 2441 if (!name) { 2442 kfree(device); 2443 ret = -ENOMEM; 2444 goto error; 2445 } 2446 rcu_assign_pointer(device->name, name); 2447 2448 q = bdev_get_queue(bdev); 2449 if (blk_queue_discard(q)) 2450 device->can_discard = 1; 2451 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2452 device->writeable = 1; 2453 device->generation = 0; 2454 device->io_width = root->sectorsize; 2455 device->io_align = root->sectorsize; 2456 device->sector_size = root->sectorsize; 2457 device->total_bytes = btrfs_device_get_total_bytes(srcdev); 2458 device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev); 2459 device->bytes_used = btrfs_device_get_bytes_used(srcdev); 2460 ASSERT(list_empty(&srcdev->resized_list)); 2461 device->commit_total_bytes = srcdev->commit_total_bytes; 2462 device->commit_bytes_used = device->bytes_used; 2463 device->dev_root = fs_info->dev_root; 2464 device->bdev = bdev; 2465 device->in_fs_metadata = 1; 2466 device->is_tgtdev_for_dev_replace = 1; 2467 device->mode = FMODE_EXCL; 2468 device->dev_stats_valid = 1; 2469 set_blocksize(device->bdev, 4096); 2470 device->fs_devices = fs_info->fs_devices; 2471 list_add(&device->dev_list, &fs_info->fs_devices->devices); 2472 fs_info->fs_devices->num_devices++; 2473 fs_info->fs_devices->open_devices++; 2474 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2475 2476 *device_out = device; 2477 return ret; 2478 2479 error: 2480 blkdev_put(bdev, FMODE_EXCL); 2481 return ret; 2482 } 2483 2484 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info, 2485 struct btrfs_device *tgtdev) 2486 { 2487 WARN_ON(fs_info->fs_devices->rw_devices == 0); 2488 tgtdev->io_width = fs_info->dev_root->sectorsize; 2489 tgtdev->io_align = fs_info->dev_root->sectorsize; 2490 tgtdev->sector_size = fs_info->dev_root->sectorsize; 2491 tgtdev->dev_root = fs_info->dev_root; 2492 tgtdev->in_fs_metadata = 1; 2493 } 2494 2495 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2496 struct btrfs_device *device) 2497 { 2498 int ret; 2499 struct btrfs_path *path; 2500 struct btrfs_root *root; 2501 struct btrfs_dev_item *dev_item; 2502 struct extent_buffer *leaf; 2503 struct btrfs_key key; 2504 2505 root = device->dev_root->fs_info->chunk_root; 2506 2507 path = btrfs_alloc_path(); 2508 if (!path) 2509 return -ENOMEM; 2510 2511 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2512 key.type = BTRFS_DEV_ITEM_KEY; 2513 key.offset = device->devid; 2514 2515 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2516 if (ret < 0) 2517 goto out; 2518 2519 if (ret > 0) { 2520 ret = -ENOENT; 2521 goto out; 2522 } 2523 2524 leaf = path->nodes[0]; 2525 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2526 2527 btrfs_set_device_id(leaf, dev_item, device->devid); 2528 btrfs_set_device_type(leaf, dev_item, device->type); 2529 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2530 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2531 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2532 btrfs_set_device_total_bytes(leaf, dev_item, 2533 btrfs_device_get_disk_total_bytes(device)); 2534 btrfs_set_device_bytes_used(leaf, dev_item, 2535 btrfs_device_get_bytes_used(device)); 2536 btrfs_mark_buffer_dirty(leaf); 2537 2538 out: 2539 btrfs_free_path(path); 2540 return ret; 2541 } 2542 2543 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2544 struct btrfs_device *device, u64 new_size) 2545 { 2546 struct btrfs_super_block *super_copy = 2547 device->dev_root->fs_info->super_copy; 2548 struct btrfs_fs_devices *fs_devices; 2549 u64 old_total; 2550 u64 diff; 2551 2552 if (!device->writeable) 2553 return -EACCES; 2554 2555 lock_chunks(device->dev_root); 2556 old_total = btrfs_super_total_bytes(super_copy); 2557 diff = new_size - device->total_bytes; 2558 2559 if (new_size <= device->total_bytes || 2560 device->is_tgtdev_for_dev_replace) { 2561 unlock_chunks(device->dev_root); 2562 return -EINVAL; 2563 } 2564 2565 fs_devices = device->dev_root->fs_info->fs_devices; 2566 2567 btrfs_set_super_total_bytes(super_copy, old_total + diff); 2568 device->fs_devices->total_rw_bytes += diff; 2569 2570 btrfs_device_set_total_bytes(device, new_size); 2571 btrfs_device_set_disk_total_bytes(device, new_size); 2572 btrfs_clear_space_info_full(device->dev_root->fs_info); 2573 if (list_empty(&device->resized_list)) 2574 list_add_tail(&device->resized_list, 2575 &fs_devices->resized_devices); 2576 unlock_chunks(device->dev_root); 2577 2578 return btrfs_update_device(trans, device); 2579 } 2580 2581 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, 2582 struct btrfs_root *root, u64 chunk_objectid, 2583 u64 chunk_offset) 2584 { 2585 int ret; 2586 struct btrfs_path *path; 2587 struct btrfs_key key; 2588 2589 root = root->fs_info->chunk_root; 2590 path = btrfs_alloc_path(); 2591 if (!path) 2592 return -ENOMEM; 2593 2594 key.objectid = chunk_objectid; 2595 key.offset = chunk_offset; 2596 key.type = BTRFS_CHUNK_ITEM_KEY; 2597 2598 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2599 if (ret < 0) 2600 goto out; 2601 else if (ret > 0) { /* Logic error or corruption */ 2602 btrfs_error(root->fs_info, -ENOENT, 2603 "Failed lookup while freeing chunk."); 2604 ret = -ENOENT; 2605 goto out; 2606 } 2607 2608 ret = btrfs_del_item(trans, root, path); 2609 if (ret < 0) 2610 btrfs_error(root->fs_info, ret, 2611 "Failed to delete chunk item."); 2612 out: 2613 btrfs_free_path(path); 2614 return ret; 2615 } 2616 2617 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 2618 chunk_offset) 2619 { 2620 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 2621 struct btrfs_disk_key *disk_key; 2622 struct btrfs_chunk *chunk; 2623 u8 *ptr; 2624 int ret = 0; 2625 u32 num_stripes; 2626 u32 array_size; 2627 u32 len = 0; 2628 u32 cur; 2629 struct btrfs_key key; 2630 2631 lock_chunks(root); 2632 array_size = btrfs_super_sys_array_size(super_copy); 2633 2634 ptr = super_copy->sys_chunk_array; 2635 cur = 0; 2636 2637 while (cur < array_size) { 2638 disk_key = (struct btrfs_disk_key *)ptr; 2639 btrfs_disk_key_to_cpu(&key, disk_key); 2640 2641 len = sizeof(*disk_key); 2642 2643 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2644 chunk = (struct btrfs_chunk *)(ptr + len); 2645 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2646 len += btrfs_chunk_item_size(num_stripes); 2647 } else { 2648 ret = -EIO; 2649 break; 2650 } 2651 if (key.objectid == chunk_objectid && 2652 key.offset == chunk_offset) { 2653 memmove(ptr, ptr + len, array_size - (cur + len)); 2654 array_size -= len; 2655 btrfs_set_super_sys_array_size(super_copy, array_size); 2656 } else { 2657 ptr += len; 2658 cur += len; 2659 } 2660 } 2661 unlock_chunks(root); 2662 return ret; 2663 } 2664 2665 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, 2666 struct btrfs_root *root, u64 chunk_offset) 2667 { 2668 struct extent_map_tree *em_tree; 2669 struct extent_map *em; 2670 struct btrfs_root *extent_root = root->fs_info->extent_root; 2671 struct map_lookup *map; 2672 u64 dev_extent_len = 0; 2673 u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2674 int i, ret = 0; 2675 2676 /* Just in case */ 2677 root = root->fs_info->chunk_root; 2678 em_tree = &root->fs_info->mapping_tree.map_tree; 2679 2680 read_lock(&em_tree->lock); 2681 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 2682 read_unlock(&em_tree->lock); 2683 2684 if (!em || em->start > chunk_offset || 2685 em->start + em->len < chunk_offset) { 2686 /* 2687 * This is a logic error, but we don't want to just rely on the 2688 * user having built with ASSERT enabled, so if ASSERT doens't 2689 * do anything we still error out. 2690 */ 2691 ASSERT(0); 2692 if (em) 2693 free_extent_map(em); 2694 return -EINVAL; 2695 } 2696 map = (struct map_lookup *)em->bdev; 2697 lock_chunks(root->fs_info->chunk_root); 2698 check_system_chunk(trans, extent_root, map->type); 2699 unlock_chunks(root->fs_info->chunk_root); 2700 2701 for (i = 0; i < map->num_stripes; i++) { 2702 struct btrfs_device *device = map->stripes[i].dev; 2703 ret = btrfs_free_dev_extent(trans, device, 2704 map->stripes[i].physical, 2705 &dev_extent_len); 2706 if (ret) { 2707 btrfs_abort_transaction(trans, root, ret); 2708 goto out; 2709 } 2710 2711 if (device->bytes_used > 0) { 2712 lock_chunks(root); 2713 btrfs_device_set_bytes_used(device, 2714 device->bytes_used - dev_extent_len); 2715 spin_lock(&root->fs_info->free_chunk_lock); 2716 root->fs_info->free_chunk_space += dev_extent_len; 2717 spin_unlock(&root->fs_info->free_chunk_lock); 2718 btrfs_clear_space_info_full(root->fs_info); 2719 unlock_chunks(root); 2720 } 2721 2722 if (map->stripes[i].dev) { 2723 ret = btrfs_update_device(trans, map->stripes[i].dev); 2724 if (ret) { 2725 btrfs_abort_transaction(trans, root, ret); 2726 goto out; 2727 } 2728 } 2729 } 2730 ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset); 2731 if (ret) { 2732 btrfs_abort_transaction(trans, root, ret); 2733 goto out; 2734 } 2735 2736 trace_btrfs_chunk_free(root, map, chunk_offset, em->len); 2737 2738 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 2739 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); 2740 if (ret) { 2741 btrfs_abort_transaction(trans, root, ret); 2742 goto out; 2743 } 2744 } 2745 2746 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em); 2747 if (ret) { 2748 btrfs_abort_transaction(trans, extent_root, ret); 2749 goto out; 2750 } 2751 2752 out: 2753 /* once for us */ 2754 free_extent_map(em); 2755 return ret; 2756 } 2757 2758 static int btrfs_relocate_chunk(struct btrfs_root *root, 2759 u64 chunk_objectid, 2760 u64 chunk_offset) 2761 { 2762 struct btrfs_root *extent_root; 2763 struct btrfs_trans_handle *trans; 2764 int ret; 2765 2766 root = root->fs_info->chunk_root; 2767 extent_root = root->fs_info->extent_root; 2768 2769 /* 2770 * Prevent races with automatic removal of unused block groups. 2771 * After we relocate and before we remove the chunk with offset 2772 * chunk_offset, automatic removal of the block group can kick in, 2773 * resulting in a failure when calling btrfs_remove_chunk() below. 2774 * 2775 * Make sure to acquire this mutex before doing a tree search (dev 2776 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 2777 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 2778 * we release the path used to search the chunk/dev tree and before 2779 * the current task acquires this mutex and calls us. 2780 */ 2781 ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex)); 2782 2783 ret = btrfs_can_relocate(extent_root, chunk_offset); 2784 if (ret) 2785 return -ENOSPC; 2786 2787 /* step one, relocate all the extents inside this chunk */ 2788 ret = btrfs_relocate_block_group(extent_root, chunk_offset); 2789 if (ret) 2790 return ret; 2791 2792 trans = btrfs_start_transaction(root, 0); 2793 if (IS_ERR(trans)) { 2794 ret = PTR_ERR(trans); 2795 btrfs_std_error(root->fs_info, ret); 2796 return ret; 2797 } 2798 2799 /* 2800 * step two, delete the device extents and the 2801 * chunk tree entries 2802 */ 2803 ret = btrfs_remove_chunk(trans, root, chunk_offset); 2804 btrfs_end_transaction(trans, root); 2805 return ret; 2806 } 2807 2808 static int btrfs_relocate_sys_chunks(struct btrfs_root *root) 2809 { 2810 struct btrfs_root *chunk_root = root->fs_info->chunk_root; 2811 struct btrfs_path *path; 2812 struct extent_buffer *leaf; 2813 struct btrfs_chunk *chunk; 2814 struct btrfs_key key; 2815 struct btrfs_key found_key; 2816 u64 chunk_type; 2817 bool retried = false; 2818 int failed = 0; 2819 int ret; 2820 2821 path = btrfs_alloc_path(); 2822 if (!path) 2823 return -ENOMEM; 2824 2825 again: 2826 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2827 key.offset = (u64)-1; 2828 key.type = BTRFS_CHUNK_ITEM_KEY; 2829 2830 while (1) { 2831 mutex_lock(&root->fs_info->delete_unused_bgs_mutex); 2832 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2833 if (ret < 0) { 2834 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 2835 goto error; 2836 } 2837 BUG_ON(ret == 0); /* Corruption */ 2838 2839 ret = btrfs_previous_item(chunk_root, path, key.objectid, 2840 key.type); 2841 if (ret) 2842 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 2843 if (ret < 0) 2844 goto error; 2845 if (ret > 0) 2846 break; 2847 2848 leaf = path->nodes[0]; 2849 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2850 2851 chunk = btrfs_item_ptr(leaf, path->slots[0], 2852 struct btrfs_chunk); 2853 chunk_type = btrfs_chunk_type(leaf, chunk); 2854 btrfs_release_path(path); 2855 2856 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 2857 ret = btrfs_relocate_chunk(chunk_root, 2858 found_key.objectid, 2859 found_key.offset); 2860 if (ret == -ENOSPC) 2861 failed++; 2862 else 2863 BUG_ON(ret); 2864 } 2865 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 2866 2867 if (found_key.offset == 0) 2868 break; 2869 key.offset = found_key.offset - 1; 2870 } 2871 ret = 0; 2872 if (failed && !retried) { 2873 failed = 0; 2874 retried = true; 2875 goto again; 2876 } else if (WARN_ON(failed && retried)) { 2877 ret = -ENOSPC; 2878 } 2879 error: 2880 btrfs_free_path(path); 2881 return ret; 2882 } 2883 2884 static int insert_balance_item(struct btrfs_root *root, 2885 struct btrfs_balance_control *bctl) 2886 { 2887 struct btrfs_trans_handle *trans; 2888 struct btrfs_balance_item *item; 2889 struct btrfs_disk_balance_args disk_bargs; 2890 struct btrfs_path *path; 2891 struct extent_buffer *leaf; 2892 struct btrfs_key key; 2893 int ret, err; 2894 2895 path = btrfs_alloc_path(); 2896 if (!path) 2897 return -ENOMEM; 2898 2899 trans = btrfs_start_transaction(root, 0); 2900 if (IS_ERR(trans)) { 2901 btrfs_free_path(path); 2902 return PTR_ERR(trans); 2903 } 2904 2905 key.objectid = BTRFS_BALANCE_OBJECTID; 2906 key.type = BTRFS_BALANCE_ITEM_KEY; 2907 key.offset = 0; 2908 2909 ret = btrfs_insert_empty_item(trans, root, path, &key, 2910 sizeof(*item)); 2911 if (ret) 2912 goto out; 2913 2914 leaf = path->nodes[0]; 2915 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 2916 2917 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item)); 2918 2919 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 2920 btrfs_set_balance_data(leaf, item, &disk_bargs); 2921 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 2922 btrfs_set_balance_meta(leaf, item, &disk_bargs); 2923 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 2924 btrfs_set_balance_sys(leaf, item, &disk_bargs); 2925 2926 btrfs_set_balance_flags(leaf, item, bctl->flags); 2927 2928 btrfs_mark_buffer_dirty(leaf); 2929 out: 2930 btrfs_free_path(path); 2931 err = btrfs_commit_transaction(trans, root); 2932 if (err && !ret) 2933 ret = err; 2934 return ret; 2935 } 2936 2937 static int del_balance_item(struct btrfs_root *root) 2938 { 2939 struct btrfs_trans_handle *trans; 2940 struct btrfs_path *path; 2941 struct btrfs_key key; 2942 int ret, err; 2943 2944 path = btrfs_alloc_path(); 2945 if (!path) 2946 return -ENOMEM; 2947 2948 trans = btrfs_start_transaction(root, 0); 2949 if (IS_ERR(trans)) { 2950 btrfs_free_path(path); 2951 return PTR_ERR(trans); 2952 } 2953 2954 key.objectid = BTRFS_BALANCE_OBJECTID; 2955 key.type = BTRFS_BALANCE_ITEM_KEY; 2956 key.offset = 0; 2957 2958 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2959 if (ret < 0) 2960 goto out; 2961 if (ret > 0) { 2962 ret = -ENOENT; 2963 goto out; 2964 } 2965 2966 ret = btrfs_del_item(trans, root, path); 2967 out: 2968 btrfs_free_path(path); 2969 err = btrfs_commit_transaction(trans, root); 2970 if (err && !ret) 2971 ret = err; 2972 return ret; 2973 } 2974 2975 /* 2976 * This is a heuristic used to reduce the number of chunks balanced on 2977 * resume after balance was interrupted. 2978 */ 2979 static void update_balance_args(struct btrfs_balance_control *bctl) 2980 { 2981 /* 2982 * Turn on soft mode for chunk types that were being converted. 2983 */ 2984 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 2985 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 2986 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 2987 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 2988 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 2989 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 2990 2991 /* 2992 * Turn on usage filter if is not already used. The idea is 2993 * that chunks that we have already balanced should be 2994 * reasonably full. Don't do it for chunks that are being 2995 * converted - that will keep us from relocating unconverted 2996 * (albeit full) chunks. 2997 */ 2998 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 2999 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3000 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3001 bctl->data.usage = 90; 3002 } 3003 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3004 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3005 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3006 bctl->sys.usage = 90; 3007 } 3008 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3009 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3010 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3011 bctl->meta.usage = 90; 3012 } 3013 } 3014 3015 /* 3016 * Should be called with both balance and volume mutexes held to 3017 * serialize other volume operations (add_dev/rm_dev/resize) with 3018 * restriper. Same goes for unset_balance_control. 3019 */ 3020 static void set_balance_control(struct btrfs_balance_control *bctl) 3021 { 3022 struct btrfs_fs_info *fs_info = bctl->fs_info; 3023 3024 BUG_ON(fs_info->balance_ctl); 3025 3026 spin_lock(&fs_info->balance_lock); 3027 fs_info->balance_ctl = bctl; 3028 spin_unlock(&fs_info->balance_lock); 3029 } 3030 3031 static void unset_balance_control(struct btrfs_fs_info *fs_info) 3032 { 3033 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3034 3035 BUG_ON(!fs_info->balance_ctl); 3036 3037 spin_lock(&fs_info->balance_lock); 3038 fs_info->balance_ctl = NULL; 3039 spin_unlock(&fs_info->balance_lock); 3040 3041 kfree(bctl); 3042 } 3043 3044 /* 3045 * Balance filters. Return 1 if chunk should be filtered out 3046 * (should not be balanced). 3047 */ 3048 static int chunk_profiles_filter(u64 chunk_type, 3049 struct btrfs_balance_args *bargs) 3050 { 3051 chunk_type = chunk_to_extended(chunk_type) & 3052 BTRFS_EXTENDED_PROFILE_MASK; 3053 3054 if (bargs->profiles & chunk_type) 3055 return 0; 3056 3057 return 1; 3058 } 3059 3060 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3061 struct btrfs_balance_args *bargs) 3062 { 3063 struct btrfs_block_group_cache *cache; 3064 u64 chunk_used, user_thresh; 3065 int ret = 1; 3066 3067 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3068 chunk_used = btrfs_block_group_used(&cache->item); 3069 3070 if (bargs->usage == 0) 3071 user_thresh = 1; 3072 else if (bargs->usage > 100) 3073 user_thresh = cache->key.offset; 3074 else 3075 user_thresh = div_factor_fine(cache->key.offset, 3076 bargs->usage); 3077 3078 if (chunk_used < user_thresh) 3079 ret = 0; 3080 3081 btrfs_put_block_group(cache); 3082 return ret; 3083 } 3084 3085 static int chunk_devid_filter(struct extent_buffer *leaf, 3086 struct btrfs_chunk *chunk, 3087 struct btrfs_balance_args *bargs) 3088 { 3089 struct btrfs_stripe *stripe; 3090 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3091 int i; 3092 3093 for (i = 0; i < num_stripes; i++) { 3094 stripe = btrfs_stripe_nr(chunk, i); 3095 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3096 return 0; 3097 } 3098 3099 return 1; 3100 } 3101 3102 /* [pstart, pend) */ 3103 static int chunk_drange_filter(struct extent_buffer *leaf, 3104 struct btrfs_chunk *chunk, 3105 u64 chunk_offset, 3106 struct btrfs_balance_args *bargs) 3107 { 3108 struct btrfs_stripe *stripe; 3109 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3110 u64 stripe_offset; 3111 u64 stripe_length; 3112 int factor; 3113 int i; 3114 3115 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3116 return 0; 3117 3118 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP | 3119 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) { 3120 factor = num_stripes / 2; 3121 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) { 3122 factor = num_stripes - 1; 3123 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) { 3124 factor = num_stripes - 2; 3125 } else { 3126 factor = num_stripes; 3127 } 3128 3129 for (i = 0; i < num_stripes; i++) { 3130 stripe = btrfs_stripe_nr(chunk, i); 3131 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3132 continue; 3133 3134 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3135 stripe_length = btrfs_chunk_length(leaf, chunk); 3136 stripe_length = div_u64(stripe_length, factor); 3137 3138 if (stripe_offset < bargs->pend && 3139 stripe_offset + stripe_length > bargs->pstart) 3140 return 0; 3141 } 3142 3143 return 1; 3144 } 3145 3146 /* [vstart, vend) */ 3147 static int chunk_vrange_filter(struct extent_buffer *leaf, 3148 struct btrfs_chunk *chunk, 3149 u64 chunk_offset, 3150 struct btrfs_balance_args *bargs) 3151 { 3152 if (chunk_offset < bargs->vend && 3153 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3154 /* at least part of the chunk is inside this vrange */ 3155 return 0; 3156 3157 return 1; 3158 } 3159 3160 static int chunk_soft_convert_filter(u64 chunk_type, 3161 struct btrfs_balance_args *bargs) 3162 { 3163 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3164 return 0; 3165 3166 chunk_type = chunk_to_extended(chunk_type) & 3167 BTRFS_EXTENDED_PROFILE_MASK; 3168 3169 if (bargs->target == chunk_type) 3170 return 1; 3171 3172 return 0; 3173 } 3174 3175 static int should_balance_chunk(struct btrfs_root *root, 3176 struct extent_buffer *leaf, 3177 struct btrfs_chunk *chunk, u64 chunk_offset) 3178 { 3179 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl; 3180 struct btrfs_balance_args *bargs = NULL; 3181 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3182 3183 /* type filter */ 3184 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3185 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3186 return 0; 3187 } 3188 3189 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3190 bargs = &bctl->data; 3191 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3192 bargs = &bctl->sys; 3193 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3194 bargs = &bctl->meta; 3195 3196 /* profiles filter */ 3197 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3198 chunk_profiles_filter(chunk_type, bargs)) { 3199 return 0; 3200 } 3201 3202 /* usage filter */ 3203 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3204 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) { 3205 return 0; 3206 } 3207 3208 /* devid filter */ 3209 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3210 chunk_devid_filter(leaf, chunk, bargs)) { 3211 return 0; 3212 } 3213 3214 /* drange filter, makes sense only with devid filter */ 3215 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3216 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) { 3217 return 0; 3218 } 3219 3220 /* vrange filter */ 3221 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3222 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3223 return 0; 3224 } 3225 3226 /* soft profile changing mode */ 3227 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3228 chunk_soft_convert_filter(chunk_type, bargs)) { 3229 return 0; 3230 } 3231 3232 /* 3233 * limited by count, must be the last filter 3234 */ 3235 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3236 if (bargs->limit == 0) 3237 return 0; 3238 else 3239 bargs->limit--; 3240 } 3241 3242 return 1; 3243 } 3244 3245 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3246 { 3247 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3248 struct btrfs_root *chunk_root = fs_info->chunk_root; 3249 struct btrfs_root *dev_root = fs_info->dev_root; 3250 struct list_head *devices; 3251 struct btrfs_device *device; 3252 u64 old_size; 3253 u64 size_to_free; 3254 struct btrfs_chunk *chunk; 3255 struct btrfs_path *path; 3256 struct btrfs_key key; 3257 struct btrfs_key found_key; 3258 struct btrfs_trans_handle *trans; 3259 struct extent_buffer *leaf; 3260 int slot; 3261 int ret; 3262 int enospc_errors = 0; 3263 bool counting = true; 3264 u64 limit_data = bctl->data.limit; 3265 u64 limit_meta = bctl->meta.limit; 3266 u64 limit_sys = bctl->sys.limit; 3267 3268 /* step one make some room on all the devices */ 3269 devices = &fs_info->fs_devices->devices; 3270 list_for_each_entry(device, devices, dev_list) { 3271 old_size = btrfs_device_get_total_bytes(device); 3272 size_to_free = div_factor(old_size, 1); 3273 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); 3274 if (!device->writeable || 3275 btrfs_device_get_total_bytes(device) - 3276 btrfs_device_get_bytes_used(device) > size_to_free || 3277 device->is_tgtdev_for_dev_replace) 3278 continue; 3279 3280 ret = btrfs_shrink_device(device, old_size - size_to_free); 3281 if (ret == -ENOSPC) 3282 break; 3283 BUG_ON(ret); 3284 3285 trans = btrfs_start_transaction(dev_root, 0); 3286 BUG_ON(IS_ERR(trans)); 3287 3288 ret = btrfs_grow_device(trans, device, old_size); 3289 BUG_ON(ret); 3290 3291 btrfs_end_transaction(trans, dev_root); 3292 } 3293 3294 /* step two, relocate all the chunks */ 3295 path = btrfs_alloc_path(); 3296 if (!path) { 3297 ret = -ENOMEM; 3298 goto error; 3299 } 3300 3301 /* zero out stat counters */ 3302 spin_lock(&fs_info->balance_lock); 3303 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3304 spin_unlock(&fs_info->balance_lock); 3305 again: 3306 if (!counting) { 3307 bctl->data.limit = limit_data; 3308 bctl->meta.limit = limit_meta; 3309 bctl->sys.limit = limit_sys; 3310 } 3311 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3312 key.offset = (u64)-1; 3313 key.type = BTRFS_CHUNK_ITEM_KEY; 3314 3315 while (1) { 3316 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3317 atomic_read(&fs_info->balance_cancel_req)) { 3318 ret = -ECANCELED; 3319 goto error; 3320 } 3321 3322 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3323 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3324 if (ret < 0) { 3325 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3326 goto error; 3327 } 3328 3329 /* 3330 * this shouldn't happen, it means the last relocate 3331 * failed 3332 */ 3333 if (ret == 0) 3334 BUG(); /* FIXME break ? */ 3335 3336 ret = btrfs_previous_item(chunk_root, path, 0, 3337 BTRFS_CHUNK_ITEM_KEY); 3338 if (ret) { 3339 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3340 ret = 0; 3341 break; 3342 } 3343 3344 leaf = path->nodes[0]; 3345 slot = path->slots[0]; 3346 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3347 3348 if (found_key.objectid != key.objectid) { 3349 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3350 break; 3351 } 3352 3353 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3354 3355 if (!counting) { 3356 spin_lock(&fs_info->balance_lock); 3357 bctl->stat.considered++; 3358 spin_unlock(&fs_info->balance_lock); 3359 } 3360 3361 ret = should_balance_chunk(chunk_root, leaf, chunk, 3362 found_key.offset); 3363 btrfs_release_path(path); 3364 if (!ret) { 3365 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3366 goto loop; 3367 } 3368 3369 if (counting) { 3370 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3371 spin_lock(&fs_info->balance_lock); 3372 bctl->stat.expected++; 3373 spin_unlock(&fs_info->balance_lock); 3374 goto loop; 3375 } 3376 3377 ret = btrfs_relocate_chunk(chunk_root, 3378 found_key.objectid, 3379 found_key.offset); 3380 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3381 if (ret && ret != -ENOSPC) 3382 goto error; 3383 if (ret == -ENOSPC) { 3384 enospc_errors++; 3385 } else { 3386 spin_lock(&fs_info->balance_lock); 3387 bctl->stat.completed++; 3388 spin_unlock(&fs_info->balance_lock); 3389 } 3390 loop: 3391 if (found_key.offset == 0) 3392 break; 3393 key.offset = found_key.offset - 1; 3394 } 3395 3396 if (counting) { 3397 btrfs_release_path(path); 3398 counting = false; 3399 goto again; 3400 } 3401 error: 3402 btrfs_free_path(path); 3403 if (enospc_errors) { 3404 btrfs_info(fs_info, "%d enospc errors during balance", 3405 enospc_errors); 3406 if (!ret) 3407 ret = -ENOSPC; 3408 } 3409 3410 return ret; 3411 } 3412 3413 /** 3414 * alloc_profile_is_valid - see if a given profile is valid and reduced 3415 * @flags: profile to validate 3416 * @extended: if true @flags is treated as an extended profile 3417 */ 3418 static int alloc_profile_is_valid(u64 flags, int extended) 3419 { 3420 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 3421 BTRFS_BLOCK_GROUP_PROFILE_MASK); 3422 3423 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 3424 3425 /* 1) check that all other bits are zeroed */ 3426 if (flags & ~mask) 3427 return 0; 3428 3429 /* 2) see if profile is reduced */ 3430 if (flags == 0) 3431 return !extended; /* "0" is valid for usual profiles */ 3432 3433 /* true if exactly one bit set */ 3434 return (flags & (flags - 1)) == 0; 3435 } 3436 3437 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 3438 { 3439 /* cancel requested || normal exit path */ 3440 return atomic_read(&fs_info->balance_cancel_req) || 3441 (atomic_read(&fs_info->balance_pause_req) == 0 && 3442 atomic_read(&fs_info->balance_cancel_req) == 0); 3443 } 3444 3445 static void __cancel_balance(struct btrfs_fs_info *fs_info) 3446 { 3447 int ret; 3448 3449 unset_balance_control(fs_info); 3450 ret = del_balance_item(fs_info->tree_root); 3451 if (ret) 3452 btrfs_std_error(fs_info, ret); 3453 3454 atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3455 } 3456 3457 /* 3458 * Should be called with both balance and volume mutexes held 3459 */ 3460 int btrfs_balance(struct btrfs_balance_control *bctl, 3461 struct btrfs_ioctl_balance_args *bargs) 3462 { 3463 struct btrfs_fs_info *fs_info = bctl->fs_info; 3464 u64 allowed; 3465 int mixed = 0; 3466 int ret; 3467 u64 num_devices; 3468 unsigned seq; 3469 3470 if (btrfs_fs_closing(fs_info) || 3471 atomic_read(&fs_info->balance_pause_req) || 3472 atomic_read(&fs_info->balance_cancel_req)) { 3473 ret = -EINVAL; 3474 goto out; 3475 } 3476 3477 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 3478 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 3479 mixed = 1; 3480 3481 /* 3482 * In case of mixed groups both data and meta should be picked, 3483 * and identical options should be given for both of them. 3484 */ 3485 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 3486 if (mixed && (bctl->flags & allowed)) { 3487 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 3488 !(bctl->flags & BTRFS_BALANCE_METADATA) || 3489 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 3490 btrfs_err(fs_info, "with mixed groups data and " 3491 "metadata balance options must be the same"); 3492 ret = -EINVAL; 3493 goto out; 3494 } 3495 } 3496 3497 num_devices = fs_info->fs_devices->num_devices; 3498 btrfs_dev_replace_lock(&fs_info->dev_replace); 3499 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 3500 BUG_ON(num_devices < 1); 3501 num_devices--; 3502 } 3503 btrfs_dev_replace_unlock(&fs_info->dev_replace); 3504 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 3505 if (num_devices == 1) 3506 allowed |= BTRFS_BLOCK_GROUP_DUP; 3507 else if (num_devices > 1) 3508 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); 3509 if (num_devices > 2) 3510 allowed |= BTRFS_BLOCK_GROUP_RAID5; 3511 if (num_devices > 3) 3512 allowed |= (BTRFS_BLOCK_GROUP_RAID10 | 3513 BTRFS_BLOCK_GROUP_RAID6); 3514 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3515 (!alloc_profile_is_valid(bctl->data.target, 1) || 3516 (bctl->data.target & ~allowed))) { 3517 btrfs_err(fs_info, "unable to start balance with target " 3518 "data profile %llu", 3519 bctl->data.target); 3520 ret = -EINVAL; 3521 goto out; 3522 } 3523 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3524 (!alloc_profile_is_valid(bctl->meta.target, 1) || 3525 (bctl->meta.target & ~allowed))) { 3526 btrfs_err(fs_info, 3527 "unable to start balance with target metadata profile %llu", 3528 bctl->meta.target); 3529 ret = -EINVAL; 3530 goto out; 3531 } 3532 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3533 (!alloc_profile_is_valid(bctl->sys.target, 1) || 3534 (bctl->sys.target & ~allowed))) { 3535 btrfs_err(fs_info, 3536 "unable to start balance with target system profile %llu", 3537 bctl->sys.target); 3538 ret = -EINVAL; 3539 goto out; 3540 } 3541 3542 /* allow dup'ed data chunks only in mixed mode */ 3543 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3544 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) { 3545 btrfs_err(fs_info, "dup for data is not allowed"); 3546 ret = -EINVAL; 3547 goto out; 3548 } 3549 3550 /* allow to reduce meta or sys integrity only if force set */ 3551 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | 3552 BTRFS_BLOCK_GROUP_RAID10 | 3553 BTRFS_BLOCK_GROUP_RAID5 | 3554 BTRFS_BLOCK_GROUP_RAID6; 3555 do { 3556 seq = read_seqbegin(&fs_info->profiles_lock); 3557 3558 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3559 (fs_info->avail_system_alloc_bits & allowed) && 3560 !(bctl->sys.target & allowed)) || 3561 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3562 (fs_info->avail_metadata_alloc_bits & allowed) && 3563 !(bctl->meta.target & allowed))) { 3564 if (bctl->flags & BTRFS_BALANCE_FORCE) { 3565 btrfs_info(fs_info, "force reducing metadata integrity"); 3566 } else { 3567 btrfs_err(fs_info, "balance will reduce metadata " 3568 "integrity, use force if you want this"); 3569 ret = -EINVAL; 3570 goto out; 3571 } 3572 } 3573 } while (read_seqretry(&fs_info->profiles_lock, seq)); 3574 3575 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3576 int num_tolerated_disk_barrier_failures; 3577 u64 target = bctl->sys.target; 3578 3579 num_tolerated_disk_barrier_failures = 3580 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); 3581 if (num_tolerated_disk_barrier_failures > 0 && 3582 (target & 3583 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 | 3584 BTRFS_AVAIL_ALLOC_BIT_SINGLE))) 3585 num_tolerated_disk_barrier_failures = 0; 3586 else if (num_tolerated_disk_barrier_failures > 1 && 3587 (target & 3588 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))) 3589 num_tolerated_disk_barrier_failures = 1; 3590 3591 fs_info->num_tolerated_disk_barrier_failures = 3592 num_tolerated_disk_barrier_failures; 3593 } 3594 3595 ret = insert_balance_item(fs_info->tree_root, bctl); 3596 if (ret && ret != -EEXIST) 3597 goto out; 3598 3599 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 3600 BUG_ON(ret == -EEXIST); 3601 set_balance_control(bctl); 3602 } else { 3603 BUG_ON(ret != -EEXIST); 3604 spin_lock(&fs_info->balance_lock); 3605 update_balance_args(bctl); 3606 spin_unlock(&fs_info->balance_lock); 3607 } 3608 3609 atomic_inc(&fs_info->balance_running); 3610 mutex_unlock(&fs_info->balance_mutex); 3611 3612 ret = __btrfs_balance(fs_info); 3613 3614 mutex_lock(&fs_info->balance_mutex); 3615 atomic_dec(&fs_info->balance_running); 3616 3617 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3618 fs_info->num_tolerated_disk_barrier_failures = 3619 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); 3620 } 3621 3622 if (bargs) { 3623 memset(bargs, 0, sizeof(*bargs)); 3624 update_ioctl_balance_args(fs_info, 0, bargs); 3625 } 3626 3627 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 3628 balance_need_close(fs_info)) { 3629 __cancel_balance(fs_info); 3630 } 3631 3632 wake_up(&fs_info->balance_wait_q); 3633 3634 return ret; 3635 out: 3636 if (bctl->flags & BTRFS_BALANCE_RESUME) 3637 __cancel_balance(fs_info); 3638 else { 3639 kfree(bctl); 3640 atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3641 } 3642 return ret; 3643 } 3644 3645 static int balance_kthread(void *data) 3646 { 3647 struct btrfs_fs_info *fs_info = data; 3648 int ret = 0; 3649 3650 mutex_lock(&fs_info->volume_mutex); 3651 mutex_lock(&fs_info->balance_mutex); 3652 3653 if (fs_info->balance_ctl) { 3654 btrfs_info(fs_info, "continuing balance"); 3655 ret = btrfs_balance(fs_info->balance_ctl, NULL); 3656 } 3657 3658 mutex_unlock(&fs_info->balance_mutex); 3659 mutex_unlock(&fs_info->volume_mutex); 3660 3661 return ret; 3662 } 3663 3664 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 3665 { 3666 struct task_struct *tsk; 3667 3668 spin_lock(&fs_info->balance_lock); 3669 if (!fs_info->balance_ctl) { 3670 spin_unlock(&fs_info->balance_lock); 3671 return 0; 3672 } 3673 spin_unlock(&fs_info->balance_lock); 3674 3675 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) { 3676 btrfs_info(fs_info, "force skipping balance"); 3677 return 0; 3678 } 3679 3680 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3681 return PTR_ERR_OR_ZERO(tsk); 3682 } 3683 3684 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 3685 { 3686 struct btrfs_balance_control *bctl; 3687 struct btrfs_balance_item *item; 3688 struct btrfs_disk_balance_args disk_bargs; 3689 struct btrfs_path *path; 3690 struct extent_buffer *leaf; 3691 struct btrfs_key key; 3692 int ret; 3693 3694 path = btrfs_alloc_path(); 3695 if (!path) 3696 return -ENOMEM; 3697 3698 key.objectid = BTRFS_BALANCE_OBJECTID; 3699 key.type = BTRFS_BALANCE_ITEM_KEY; 3700 key.offset = 0; 3701 3702 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 3703 if (ret < 0) 3704 goto out; 3705 if (ret > 0) { /* ret = -ENOENT; */ 3706 ret = 0; 3707 goto out; 3708 } 3709 3710 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 3711 if (!bctl) { 3712 ret = -ENOMEM; 3713 goto out; 3714 } 3715 3716 leaf = path->nodes[0]; 3717 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3718 3719 bctl->fs_info = fs_info; 3720 bctl->flags = btrfs_balance_flags(leaf, item); 3721 bctl->flags |= BTRFS_BALANCE_RESUME; 3722 3723 btrfs_balance_data(leaf, item, &disk_bargs); 3724 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 3725 btrfs_balance_meta(leaf, item, &disk_bargs); 3726 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 3727 btrfs_balance_sys(leaf, item, &disk_bargs); 3728 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 3729 3730 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)); 3731 3732 mutex_lock(&fs_info->volume_mutex); 3733 mutex_lock(&fs_info->balance_mutex); 3734 3735 set_balance_control(bctl); 3736 3737 mutex_unlock(&fs_info->balance_mutex); 3738 mutex_unlock(&fs_info->volume_mutex); 3739 out: 3740 btrfs_free_path(path); 3741 return ret; 3742 } 3743 3744 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 3745 { 3746 int ret = 0; 3747 3748 mutex_lock(&fs_info->balance_mutex); 3749 if (!fs_info->balance_ctl) { 3750 mutex_unlock(&fs_info->balance_mutex); 3751 return -ENOTCONN; 3752 } 3753 3754 if (atomic_read(&fs_info->balance_running)) { 3755 atomic_inc(&fs_info->balance_pause_req); 3756 mutex_unlock(&fs_info->balance_mutex); 3757 3758 wait_event(fs_info->balance_wait_q, 3759 atomic_read(&fs_info->balance_running) == 0); 3760 3761 mutex_lock(&fs_info->balance_mutex); 3762 /* we are good with balance_ctl ripped off from under us */ 3763 BUG_ON(atomic_read(&fs_info->balance_running)); 3764 atomic_dec(&fs_info->balance_pause_req); 3765 } else { 3766 ret = -ENOTCONN; 3767 } 3768 3769 mutex_unlock(&fs_info->balance_mutex); 3770 return ret; 3771 } 3772 3773 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 3774 { 3775 if (fs_info->sb->s_flags & MS_RDONLY) 3776 return -EROFS; 3777 3778 mutex_lock(&fs_info->balance_mutex); 3779 if (!fs_info->balance_ctl) { 3780 mutex_unlock(&fs_info->balance_mutex); 3781 return -ENOTCONN; 3782 } 3783 3784 atomic_inc(&fs_info->balance_cancel_req); 3785 /* 3786 * if we are running just wait and return, balance item is 3787 * deleted in btrfs_balance in this case 3788 */ 3789 if (atomic_read(&fs_info->balance_running)) { 3790 mutex_unlock(&fs_info->balance_mutex); 3791 wait_event(fs_info->balance_wait_q, 3792 atomic_read(&fs_info->balance_running) == 0); 3793 mutex_lock(&fs_info->balance_mutex); 3794 } else { 3795 /* __cancel_balance needs volume_mutex */ 3796 mutex_unlock(&fs_info->balance_mutex); 3797 mutex_lock(&fs_info->volume_mutex); 3798 mutex_lock(&fs_info->balance_mutex); 3799 3800 if (fs_info->balance_ctl) 3801 __cancel_balance(fs_info); 3802 3803 mutex_unlock(&fs_info->volume_mutex); 3804 } 3805 3806 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running)); 3807 atomic_dec(&fs_info->balance_cancel_req); 3808 mutex_unlock(&fs_info->balance_mutex); 3809 return 0; 3810 } 3811 3812 static int btrfs_uuid_scan_kthread(void *data) 3813 { 3814 struct btrfs_fs_info *fs_info = data; 3815 struct btrfs_root *root = fs_info->tree_root; 3816 struct btrfs_key key; 3817 struct btrfs_key max_key; 3818 struct btrfs_path *path = NULL; 3819 int ret = 0; 3820 struct extent_buffer *eb; 3821 int slot; 3822 struct btrfs_root_item root_item; 3823 u32 item_size; 3824 struct btrfs_trans_handle *trans = NULL; 3825 3826 path = btrfs_alloc_path(); 3827 if (!path) { 3828 ret = -ENOMEM; 3829 goto out; 3830 } 3831 3832 key.objectid = 0; 3833 key.type = BTRFS_ROOT_ITEM_KEY; 3834 key.offset = 0; 3835 3836 max_key.objectid = (u64)-1; 3837 max_key.type = BTRFS_ROOT_ITEM_KEY; 3838 max_key.offset = (u64)-1; 3839 3840 while (1) { 3841 ret = btrfs_search_forward(root, &key, path, 0); 3842 if (ret) { 3843 if (ret > 0) 3844 ret = 0; 3845 break; 3846 } 3847 3848 if (key.type != BTRFS_ROOT_ITEM_KEY || 3849 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 3850 key.objectid != BTRFS_FS_TREE_OBJECTID) || 3851 key.objectid > BTRFS_LAST_FREE_OBJECTID) 3852 goto skip; 3853 3854 eb = path->nodes[0]; 3855 slot = path->slots[0]; 3856 item_size = btrfs_item_size_nr(eb, slot); 3857 if (item_size < sizeof(root_item)) 3858 goto skip; 3859 3860 read_extent_buffer(eb, &root_item, 3861 btrfs_item_ptr_offset(eb, slot), 3862 (int)sizeof(root_item)); 3863 if (btrfs_root_refs(&root_item) == 0) 3864 goto skip; 3865 3866 if (!btrfs_is_empty_uuid(root_item.uuid) || 3867 !btrfs_is_empty_uuid(root_item.received_uuid)) { 3868 if (trans) 3869 goto update_tree; 3870 3871 btrfs_release_path(path); 3872 /* 3873 * 1 - subvol uuid item 3874 * 1 - received_subvol uuid item 3875 */ 3876 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 3877 if (IS_ERR(trans)) { 3878 ret = PTR_ERR(trans); 3879 break; 3880 } 3881 continue; 3882 } else { 3883 goto skip; 3884 } 3885 update_tree: 3886 if (!btrfs_is_empty_uuid(root_item.uuid)) { 3887 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, 3888 root_item.uuid, 3889 BTRFS_UUID_KEY_SUBVOL, 3890 key.objectid); 3891 if (ret < 0) { 3892 btrfs_warn(fs_info, "uuid_tree_add failed %d", 3893 ret); 3894 break; 3895 } 3896 } 3897 3898 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 3899 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, 3900 root_item.received_uuid, 3901 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 3902 key.objectid); 3903 if (ret < 0) { 3904 btrfs_warn(fs_info, "uuid_tree_add failed %d", 3905 ret); 3906 break; 3907 } 3908 } 3909 3910 skip: 3911 if (trans) { 3912 ret = btrfs_end_transaction(trans, fs_info->uuid_root); 3913 trans = NULL; 3914 if (ret) 3915 break; 3916 } 3917 3918 btrfs_release_path(path); 3919 if (key.offset < (u64)-1) { 3920 key.offset++; 3921 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 3922 key.offset = 0; 3923 key.type = BTRFS_ROOT_ITEM_KEY; 3924 } else if (key.objectid < (u64)-1) { 3925 key.offset = 0; 3926 key.type = BTRFS_ROOT_ITEM_KEY; 3927 key.objectid++; 3928 } else { 3929 break; 3930 } 3931 cond_resched(); 3932 } 3933 3934 out: 3935 btrfs_free_path(path); 3936 if (trans && !IS_ERR(trans)) 3937 btrfs_end_transaction(trans, fs_info->uuid_root); 3938 if (ret) 3939 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 3940 else 3941 fs_info->update_uuid_tree_gen = 1; 3942 up(&fs_info->uuid_tree_rescan_sem); 3943 return 0; 3944 } 3945 3946 /* 3947 * Callback for btrfs_uuid_tree_iterate(). 3948 * returns: 3949 * 0 check succeeded, the entry is not outdated. 3950 * < 0 if an error occured. 3951 * > 0 if the check failed, which means the caller shall remove the entry. 3952 */ 3953 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, 3954 u8 *uuid, u8 type, u64 subid) 3955 { 3956 struct btrfs_key key; 3957 int ret = 0; 3958 struct btrfs_root *subvol_root; 3959 3960 if (type != BTRFS_UUID_KEY_SUBVOL && 3961 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) 3962 goto out; 3963 3964 key.objectid = subid; 3965 key.type = BTRFS_ROOT_ITEM_KEY; 3966 key.offset = (u64)-1; 3967 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key); 3968 if (IS_ERR(subvol_root)) { 3969 ret = PTR_ERR(subvol_root); 3970 if (ret == -ENOENT) 3971 ret = 1; 3972 goto out; 3973 } 3974 3975 switch (type) { 3976 case BTRFS_UUID_KEY_SUBVOL: 3977 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) 3978 ret = 1; 3979 break; 3980 case BTRFS_UUID_KEY_RECEIVED_SUBVOL: 3981 if (memcmp(uuid, subvol_root->root_item.received_uuid, 3982 BTRFS_UUID_SIZE)) 3983 ret = 1; 3984 break; 3985 } 3986 3987 out: 3988 return ret; 3989 } 3990 3991 static int btrfs_uuid_rescan_kthread(void *data) 3992 { 3993 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; 3994 int ret; 3995 3996 /* 3997 * 1st step is to iterate through the existing UUID tree and 3998 * to delete all entries that contain outdated data. 3999 * 2nd step is to add all missing entries to the UUID tree. 4000 */ 4001 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry); 4002 if (ret < 0) { 4003 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); 4004 up(&fs_info->uuid_tree_rescan_sem); 4005 return ret; 4006 } 4007 return btrfs_uuid_scan_kthread(data); 4008 } 4009 4010 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4011 { 4012 struct btrfs_trans_handle *trans; 4013 struct btrfs_root *tree_root = fs_info->tree_root; 4014 struct btrfs_root *uuid_root; 4015 struct task_struct *task; 4016 int ret; 4017 4018 /* 4019 * 1 - root node 4020 * 1 - root item 4021 */ 4022 trans = btrfs_start_transaction(tree_root, 2); 4023 if (IS_ERR(trans)) 4024 return PTR_ERR(trans); 4025 4026 uuid_root = btrfs_create_tree(trans, fs_info, 4027 BTRFS_UUID_TREE_OBJECTID); 4028 if (IS_ERR(uuid_root)) { 4029 ret = PTR_ERR(uuid_root); 4030 btrfs_abort_transaction(trans, tree_root, ret); 4031 return ret; 4032 } 4033 4034 fs_info->uuid_root = uuid_root; 4035 4036 ret = btrfs_commit_transaction(trans, tree_root); 4037 if (ret) 4038 return ret; 4039 4040 down(&fs_info->uuid_tree_rescan_sem); 4041 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4042 if (IS_ERR(task)) { 4043 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4044 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4045 up(&fs_info->uuid_tree_rescan_sem); 4046 return PTR_ERR(task); 4047 } 4048 4049 return 0; 4050 } 4051 4052 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) 4053 { 4054 struct task_struct *task; 4055 4056 down(&fs_info->uuid_tree_rescan_sem); 4057 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); 4058 if (IS_ERR(task)) { 4059 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4060 btrfs_warn(fs_info, "failed to start uuid_rescan task"); 4061 up(&fs_info->uuid_tree_rescan_sem); 4062 return PTR_ERR(task); 4063 } 4064 4065 return 0; 4066 } 4067 4068 /* 4069 * shrinking a device means finding all of the device extents past 4070 * the new size, and then following the back refs to the chunks. 4071 * The chunk relocation code actually frees the device extent 4072 */ 4073 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4074 { 4075 struct btrfs_trans_handle *trans; 4076 struct btrfs_root *root = device->dev_root; 4077 struct btrfs_dev_extent *dev_extent = NULL; 4078 struct btrfs_path *path; 4079 u64 length; 4080 u64 chunk_objectid; 4081 u64 chunk_offset; 4082 int ret; 4083 int slot; 4084 int failed = 0; 4085 bool retried = false; 4086 bool checked_pending_chunks = false; 4087 struct extent_buffer *l; 4088 struct btrfs_key key; 4089 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 4090 u64 old_total = btrfs_super_total_bytes(super_copy); 4091 u64 old_size = btrfs_device_get_total_bytes(device); 4092 u64 diff = old_size - new_size; 4093 4094 if (device->is_tgtdev_for_dev_replace) 4095 return -EINVAL; 4096 4097 path = btrfs_alloc_path(); 4098 if (!path) 4099 return -ENOMEM; 4100 4101 path->reada = 2; 4102 4103 lock_chunks(root); 4104 4105 btrfs_device_set_total_bytes(device, new_size); 4106 if (device->writeable) { 4107 device->fs_devices->total_rw_bytes -= diff; 4108 spin_lock(&root->fs_info->free_chunk_lock); 4109 root->fs_info->free_chunk_space -= diff; 4110 spin_unlock(&root->fs_info->free_chunk_lock); 4111 } 4112 unlock_chunks(root); 4113 4114 again: 4115 key.objectid = device->devid; 4116 key.offset = (u64)-1; 4117 key.type = BTRFS_DEV_EXTENT_KEY; 4118 4119 do { 4120 mutex_lock(&root->fs_info->delete_unused_bgs_mutex); 4121 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4122 if (ret < 0) { 4123 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4124 goto done; 4125 } 4126 4127 ret = btrfs_previous_item(root, path, 0, key.type); 4128 if (ret) 4129 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4130 if (ret < 0) 4131 goto done; 4132 if (ret) { 4133 ret = 0; 4134 btrfs_release_path(path); 4135 break; 4136 } 4137 4138 l = path->nodes[0]; 4139 slot = path->slots[0]; 4140 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4141 4142 if (key.objectid != device->devid) { 4143 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4144 btrfs_release_path(path); 4145 break; 4146 } 4147 4148 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4149 length = btrfs_dev_extent_length(l, dev_extent); 4150 4151 if (key.offset + length <= new_size) { 4152 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4153 btrfs_release_path(path); 4154 break; 4155 } 4156 4157 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 4158 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4159 btrfs_release_path(path); 4160 4161 ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset); 4162 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4163 if (ret && ret != -ENOSPC) 4164 goto done; 4165 if (ret == -ENOSPC) 4166 failed++; 4167 } while (key.offset-- > 0); 4168 4169 if (failed && !retried) { 4170 failed = 0; 4171 retried = true; 4172 goto again; 4173 } else if (failed && retried) { 4174 ret = -ENOSPC; 4175 goto done; 4176 } 4177 4178 /* Shrinking succeeded, else we would be at "done". */ 4179 trans = btrfs_start_transaction(root, 0); 4180 if (IS_ERR(trans)) { 4181 ret = PTR_ERR(trans); 4182 goto done; 4183 } 4184 4185 lock_chunks(root); 4186 4187 /* 4188 * We checked in the above loop all device extents that were already in 4189 * the device tree. However before we have updated the device's 4190 * total_bytes to the new size, we might have had chunk allocations that 4191 * have not complete yet (new block groups attached to transaction 4192 * handles), and therefore their device extents were not yet in the 4193 * device tree and we missed them in the loop above. So if we have any 4194 * pending chunk using a device extent that overlaps the device range 4195 * that we can not use anymore, commit the current transaction and 4196 * repeat the search on the device tree - this way we guarantee we will 4197 * not have chunks using device extents that end beyond 'new_size'. 4198 */ 4199 if (!checked_pending_chunks) { 4200 u64 start = new_size; 4201 u64 len = old_size - new_size; 4202 4203 if (contains_pending_extent(trans, device, &start, len)) { 4204 unlock_chunks(root); 4205 checked_pending_chunks = true; 4206 failed = 0; 4207 retried = false; 4208 ret = btrfs_commit_transaction(trans, root); 4209 if (ret) 4210 goto done; 4211 goto again; 4212 } 4213 } 4214 4215 btrfs_device_set_disk_total_bytes(device, new_size); 4216 if (list_empty(&device->resized_list)) 4217 list_add_tail(&device->resized_list, 4218 &root->fs_info->fs_devices->resized_devices); 4219 4220 WARN_ON(diff > old_total); 4221 btrfs_set_super_total_bytes(super_copy, old_total - diff); 4222 unlock_chunks(root); 4223 4224 /* Now btrfs_update_device() will change the on-disk size. */ 4225 ret = btrfs_update_device(trans, device); 4226 btrfs_end_transaction(trans, root); 4227 done: 4228 btrfs_free_path(path); 4229 if (ret) { 4230 lock_chunks(root); 4231 btrfs_device_set_total_bytes(device, old_size); 4232 if (device->writeable) 4233 device->fs_devices->total_rw_bytes += diff; 4234 spin_lock(&root->fs_info->free_chunk_lock); 4235 root->fs_info->free_chunk_space += diff; 4236 spin_unlock(&root->fs_info->free_chunk_lock); 4237 unlock_chunks(root); 4238 } 4239 return ret; 4240 } 4241 4242 static int btrfs_add_system_chunk(struct btrfs_root *root, 4243 struct btrfs_key *key, 4244 struct btrfs_chunk *chunk, int item_size) 4245 { 4246 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 4247 struct btrfs_disk_key disk_key; 4248 u32 array_size; 4249 u8 *ptr; 4250 4251 lock_chunks(root); 4252 array_size = btrfs_super_sys_array_size(super_copy); 4253 if (array_size + item_size + sizeof(disk_key) 4254 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4255 unlock_chunks(root); 4256 return -EFBIG; 4257 } 4258 4259 ptr = super_copy->sys_chunk_array + array_size; 4260 btrfs_cpu_key_to_disk(&disk_key, key); 4261 memcpy(ptr, &disk_key, sizeof(disk_key)); 4262 ptr += sizeof(disk_key); 4263 memcpy(ptr, chunk, item_size); 4264 item_size += sizeof(disk_key); 4265 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4266 unlock_chunks(root); 4267 4268 return 0; 4269 } 4270 4271 /* 4272 * sort the devices in descending order by max_avail, total_avail 4273 */ 4274 static int btrfs_cmp_device_info(const void *a, const void *b) 4275 { 4276 const struct btrfs_device_info *di_a = a; 4277 const struct btrfs_device_info *di_b = b; 4278 4279 if (di_a->max_avail > di_b->max_avail) 4280 return -1; 4281 if (di_a->max_avail < di_b->max_avail) 4282 return 1; 4283 if (di_a->total_avail > di_b->total_avail) 4284 return -1; 4285 if (di_a->total_avail < di_b->total_avail) 4286 return 1; 4287 return 0; 4288 } 4289 4290 static const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 4291 [BTRFS_RAID_RAID10] = { 4292 .sub_stripes = 2, 4293 .dev_stripes = 1, 4294 .devs_max = 0, /* 0 == as many as possible */ 4295 .devs_min = 4, 4296 .devs_increment = 2, 4297 .ncopies = 2, 4298 }, 4299 [BTRFS_RAID_RAID1] = { 4300 .sub_stripes = 1, 4301 .dev_stripes = 1, 4302 .devs_max = 2, 4303 .devs_min = 2, 4304 .devs_increment = 2, 4305 .ncopies = 2, 4306 }, 4307 [BTRFS_RAID_DUP] = { 4308 .sub_stripes = 1, 4309 .dev_stripes = 2, 4310 .devs_max = 1, 4311 .devs_min = 1, 4312 .devs_increment = 1, 4313 .ncopies = 2, 4314 }, 4315 [BTRFS_RAID_RAID0] = { 4316 .sub_stripes = 1, 4317 .dev_stripes = 1, 4318 .devs_max = 0, 4319 .devs_min = 2, 4320 .devs_increment = 1, 4321 .ncopies = 1, 4322 }, 4323 [BTRFS_RAID_SINGLE] = { 4324 .sub_stripes = 1, 4325 .dev_stripes = 1, 4326 .devs_max = 1, 4327 .devs_min = 1, 4328 .devs_increment = 1, 4329 .ncopies = 1, 4330 }, 4331 [BTRFS_RAID_RAID5] = { 4332 .sub_stripes = 1, 4333 .dev_stripes = 1, 4334 .devs_max = 0, 4335 .devs_min = 2, 4336 .devs_increment = 1, 4337 .ncopies = 2, 4338 }, 4339 [BTRFS_RAID_RAID6] = { 4340 .sub_stripes = 1, 4341 .dev_stripes = 1, 4342 .devs_max = 0, 4343 .devs_min = 3, 4344 .devs_increment = 1, 4345 .ncopies = 3, 4346 }, 4347 }; 4348 4349 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target) 4350 { 4351 /* TODO allow them to set a preferred stripe size */ 4352 return 64 * 1024; 4353 } 4354 4355 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4356 { 4357 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 4358 return; 4359 4360 btrfs_set_fs_incompat(info, RAID56); 4361 } 4362 4363 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \ 4364 - sizeof(struct btrfs_item) \ 4365 - sizeof(struct btrfs_chunk)) \ 4366 / sizeof(struct btrfs_stripe) + 1) 4367 4368 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ 4369 - 2 * sizeof(struct btrfs_disk_key) \ 4370 - 2 * sizeof(struct btrfs_chunk)) \ 4371 / sizeof(struct btrfs_stripe) + 1) 4372 4373 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 4374 struct btrfs_root *extent_root, u64 start, 4375 u64 type) 4376 { 4377 struct btrfs_fs_info *info = extent_root->fs_info; 4378 struct btrfs_fs_devices *fs_devices = info->fs_devices; 4379 struct list_head *cur; 4380 struct map_lookup *map = NULL; 4381 struct extent_map_tree *em_tree; 4382 struct extent_map *em; 4383 struct btrfs_device_info *devices_info = NULL; 4384 u64 total_avail; 4385 int num_stripes; /* total number of stripes to allocate */ 4386 int data_stripes; /* number of stripes that count for 4387 block group size */ 4388 int sub_stripes; /* sub_stripes info for map */ 4389 int dev_stripes; /* stripes per dev */ 4390 int devs_max; /* max devs to use */ 4391 int devs_min; /* min devs needed */ 4392 int devs_increment; /* ndevs has to be a multiple of this */ 4393 int ncopies; /* how many copies to data has */ 4394 int ret; 4395 u64 max_stripe_size; 4396 u64 max_chunk_size; 4397 u64 stripe_size; 4398 u64 num_bytes; 4399 u64 raid_stripe_len = BTRFS_STRIPE_LEN; 4400 int ndevs; 4401 int i; 4402 int j; 4403 int index; 4404 4405 BUG_ON(!alloc_profile_is_valid(type, 0)); 4406 4407 if (list_empty(&fs_devices->alloc_list)) 4408 return -ENOSPC; 4409 4410 index = __get_raid_index(type); 4411 4412 sub_stripes = btrfs_raid_array[index].sub_stripes; 4413 dev_stripes = btrfs_raid_array[index].dev_stripes; 4414 devs_max = btrfs_raid_array[index].devs_max; 4415 devs_min = btrfs_raid_array[index].devs_min; 4416 devs_increment = btrfs_raid_array[index].devs_increment; 4417 ncopies = btrfs_raid_array[index].ncopies; 4418 4419 if (type & BTRFS_BLOCK_GROUP_DATA) { 4420 max_stripe_size = 1024 * 1024 * 1024; 4421 max_chunk_size = 10 * max_stripe_size; 4422 if (!devs_max) 4423 devs_max = BTRFS_MAX_DEVS(info->chunk_root); 4424 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 4425 /* for larger filesystems, use larger metadata chunks */ 4426 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024) 4427 max_stripe_size = 1024 * 1024 * 1024; 4428 else 4429 max_stripe_size = 256 * 1024 * 1024; 4430 max_chunk_size = max_stripe_size; 4431 if (!devs_max) 4432 devs_max = BTRFS_MAX_DEVS(info->chunk_root); 4433 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4434 max_stripe_size = 32 * 1024 * 1024; 4435 max_chunk_size = 2 * max_stripe_size; 4436 if (!devs_max) 4437 devs_max = BTRFS_MAX_DEVS_SYS_CHUNK; 4438 } else { 4439 btrfs_err(info, "invalid chunk type 0x%llx requested", 4440 type); 4441 BUG_ON(1); 4442 } 4443 4444 /* we don't want a chunk larger than 10% of writeable space */ 4445 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 4446 max_chunk_size); 4447 4448 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 4449 GFP_NOFS); 4450 if (!devices_info) 4451 return -ENOMEM; 4452 4453 cur = fs_devices->alloc_list.next; 4454 4455 /* 4456 * in the first pass through the devices list, we gather information 4457 * about the available holes on each device. 4458 */ 4459 ndevs = 0; 4460 while (cur != &fs_devices->alloc_list) { 4461 struct btrfs_device *device; 4462 u64 max_avail; 4463 u64 dev_offset; 4464 4465 device = list_entry(cur, struct btrfs_device, dev_alloc_list); 4466 4467 cur = cur->next; 4468 4469 if (!device->writeable) { 4470 WARN(1, KERN_ERR 4471 "BTRFS: read-only device in alloc_list\n"); 4472 continue; 4473 } 4474 4475 if (!device->in_fs_metadata || 4476 device->is_tgtdev_for_dev_replace) 4477 continue; 4478 4479 if (device->total_bytes > device->bytes_used) 4480 total_avail = device->total_bytes - device->bytes_used; 4481 else 4482 total_avail = 0; 4483 4484 /* If there is no space on this device, skip it. */ 4485 if (total_avail == 0) 4486 continue; 4487 4488 ret = find_free_dev_extent(trans, device, 4489 max_stripe_size * dev_stripes, 4490 &dev_offset, &max_avail); 4491 if (ret && ret != -ENOSPC) 4492 goto error; 4493 4494 if (ret == 0) 4495 max_avail = max_stripe_size * dev_stripes; 4496 4497 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) 4498 continue; 4499 4500 if (ndevs == fs_devices->rw_devices) { 4501 WARN(1, "%s: found more than %llu devices\n", 4502 __func__, fs_devices->rw_devices); 4503 break; 4504 } 4505 devices_info[ndevs].dev_offset = dev_offset; 4506 devices_info[ndevs].max_avail = max_avail; 4507 devices_info[ndevs].total_avail = total_avail; 4508 devices_info[ndevs].dev = device; 4509 ++ndevs; 4510 } 4511 4512 /* 4513 * now sort the devices by hole size / available space 4514 */ 4515 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 4516 btrfs_cmp_device_info, NULL); 4517 4518 /* round down to number of usable stripes */ 4519 ndevs -= ndevs % devs_increment; 4520 4521 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) { 4522 ret = -ENOSPC; 4523 goto error; 4524 } 4525 4526 if (devs_max && ndevs > devs_max) 4527 ndevs = devs_max; 4528 /* 4529 * the primary goal is to maximize the number of stripes, so use as many 4530 * devices as possible, even if the stripes are not maximum sized. 4531 */ 4532 stripe_size = devices_info[ndevs-1].max_avail; 4533 num_stripes = ndevs * dev_stripes; 4534 4535 /* 4536 * this will have to be fixed for RAID1 and RAID10 over 4537 * more drives 4538 */ 4539 data_stripes = num_stripes / ncopies; 4540 4541 if (type & BTRFS_BLOCK_GROUP_RAID5) { 4542 raid_stripe_len = find_raid56_stripe_len(ndevs - 1, 4543 btrfs_super_stripesize(info->super_copy)); 4544 data_stripes = num_stripes - 1; 4545 } 4546 if (type & BTRFS_BLOCK_GROUP_RAID6) { 4547 raid_stripe_len = find_raid56_stripe_len(ndevs - 2, 4548 btrfs_super_stripesize(info->super_copy)); 4549 data_stripes = num_stripes - 2; 4550 } 4551 4552 /* 4553 * Use the number of data stripes to figure out how big this chunk 4554 * is really going to be in terms of logical address space, 4555 * and compare that answer with the max chunk size 4556 */ 4557 if (stripe_size * data_stripes > max_chunk_size) { 4558 u64 mask = (1ULL << 24) - 1; 4559 4560 stripe_size = div_u64(max_chunk_size, data_stripes); 4561 4562 /* bump the answer up to a 16MB boundary */ 4563 stripe_size = (stripe_size + mask) & ~mask; 4564 4565 /* but don't go higher than the limits we found 4566 * while searching for free extents 4567 */ 4568 if (stripe_size > devices_info[ndevs-1].max_avail) 4569 stripe_size = devices_info[ndevs-1].max_avail; 4570 } 4571 4572 stripe_size = div_u64(stripe_size, dev_stripes); 4573 4574 /* align to BTRFS_STRIPE_LEN */ 4575 stripe_size = div_u64(stripe_size, raid_stripe_len); 4576 stripe_size *= raid_stripe_len; 4577 4578 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 4579 if (!map) { 4580 ret = -ENOMEM; 4581 goto error; 4582 } 4583 map->num_stripes = num_stripes; 4584 4585 for (i = 0; i < ndevs; ++i) { 4586 for (j = 0; j < dev_stripes; ++j) { 4587 int s = i * dev_stripes + j; 4588 map->stripes[s].dev = devices_info[i].dev; 4589 map->stripes[s].physical = devices_info[i].dev_offset + 4590 j * stripe_size; 4591 } 4592 } 4593 map->sector_size = extent_root->sectorsize; 4594 map->stripe_len = raid_stripe_len; 4595 map->io_align = raid_stripe_len; 4596 map->io_width = raid_stripe_len; 4597 map->type = type; 4598 map->sub_stripes = sub_stripes; 4599 4600 num_bytes = stripe_size * data_stripes; 4601 4602 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes); 4603 4604 em = alloc_extent_map(); 4605 if (!em) { 4606 kfree(map); 4607 ret = -ENOMEM; 4608 goto error; 4609 } 4610 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 4611 em->bdev = (struct block_device *)map; 4612 em->start = start; 4613 em->len = num_bytes; 4614 em->block_start = 0; 4615 em->block_len = em->len; 4616 em->orig_block_len = stripe_size; 4617 4618 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 4619 write_lock(&em_tree->lock); 4620 ret = add_extent_mapping(em_tree, em, 0); 4621 if (!ret) { 4622 list_add_tail(&em->list, &trans->transaction->pending_chunks); 4623 atomic_inc(&em->refs); 4624 } 4625 write_unlock(&em_tree->lock); 4626 if (ret) { 4627 free_extent_map(em); 4628 goto error; 4629 } 4630 4631 ret = btrfs_make_block_group(trans, extent_root, 0, type, 4632 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 4633 start, num_bytes); 4634 if (ret) 4635 goto error_del_extent; 4636 4637 for (i = 0; i < map->num_stripes; i++) { 4638 num_bytes = map->stripes[i].dev->bytes_used + stripe_size; 4639 btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes); 4640 } 4641 4642 spin_lock(&extent_root->fs_info->free_chunk_lock); 4643 extent_root->fs_info->free_chunk_space -= (stripe_size * 4644 map->num_stripes); 4645 spin_unlock(&extent_root->fs_info->free_chunk_lock); 4646 4647 free_extent_map(em); 4648 check_raid56_incompat_flag(extent_root->fs_info, type); 4649 4650 kfree(devices_info); 4651 return 0; 4652 4653 error_del_extent: 4654 write_lock(&em_tree->lock); 4655 remove_extent_mapping(em_tree, em); 4656 write_unlock(&em_tree->lock); 4657 4658 /* One for our allocation */ 4659 free_extent_map(em); 4660 /* One for the tree reference */ 4661 free_extent_map(em); 4662 /* One for the pending_chunks list reference */ 4663 free_extent_map(em); 4664 error: 4665 kfree(devices_info); 4666 return ret; 4667 } 4668 4669 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, 4670 struct btrfs_root *extent_root, 4671 u64 chunk_offset, u64 chunk_size) 4672 { 4673 struct btrfs_key key; 4674 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 4675 struct btrfs_device *device; 4676 struct btrfs_chunk *chunk; 4677 struct btrfs_stripe *stripe; 4678 struct extent_map_tree *em_tree; 4679 struct extent_map *em; 4680 struct map_lookup *map; 4681 size_t item_size; 4682 u64 dev_offset; 4683 u64 stripe_size; 4684 int i = 0; 4685 int ret; 4686 4687 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 4688 read_lock(&em_tree->lock); 4689 em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size); 4690 read_unlock(&em_tree->lock); 4691 4692 if (!em) { 4693 btrfs_crit(extent_root->fs_info, "unable to find logical " 4694 "%Lu len %Lu", chunk_offset, chunk_size); 4695 return -EINVAL; 4696 } 4697 4698 if (em->start != chunk_offset || em->len != chunk_size) { 4699 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted" 4700 " %Lu-%Lu, found %Lu-%Lu", chunk_offset, 4701 chunk_size, em->start, em->len); 4702 free_extent_map(em); 4703 return -EINVAL; 4704 } 4705 4706 map = (struct map_lookup *)em->bdev; 4707 item_size = btrfs_chunk_item_size(map->num_stripes); 4708 stripe_size = em->orig_block_len; 4709 4710 chunk = kzalloc(item_size, GFP_NOFS); 4711 if (!chunk) { 4712 ret = -ENOMEM; 4713 goto out; 4714 } 4715 4716 for (i = 0; i < map->num_stripes; i++) { 4717 device = map->stripes[i].dev; 4718 dev_offset = map->stripes[i].physical; 4719 4720 ret = btrfs_update_device(trans, device); 4721 if (ret) 4722 goto out; 4723 ret = btrfs_alloc_dev_extent(trans, device, 4724 chunk_root->root_key.objectid, 4725 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 4726 chunk_offset, dev_offset, 4727 stripe_size); 4728 if (ret) 4729 goto out; 4730 } 4731 4732 stripe = &chunk->stripe; 4733 for (i = 0; i < map->num_stripes; i++) { 4734 device = map->stripes[i].dev; 4735 dev_offset = map->stripes[i].physical; 4736 4737 btrfs_set_stack_stripe_devid(stripe, device->devid); 4738 btrfs_set_stack_stripe_offset(stripe, dev_offset); 4739 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 4740 stripe++; 4741 } 4742 4743 btrfs_set_stack_chunk_length(chunk, chunk_size); 4744 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 4745 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 4746 btrfs_set_stack_chunk_type(chunk, map->type); 4747 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 4748 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 4749 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 4750 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); 4751 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 4752 4753 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 4754 key.type = BTRFS_CHUNK_ITEM_KEY; 4755 key.offset = chunk_offset; 4756 4757 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 4758 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 4759 /* 4760 * TODO: Cleanup of inserted chunk root in case of 4761 * failure. 4762 */ 4763 ret = btrfs_add_system_chunk(chunk_root, &key, chunk, 4764 item_size); 4765 } 4766 4767 out: 4768 kfree(chunk); 4769 free_extent_map(em); 4770 return ret; 4771 } 4772 4773 /* 4774 * Chunk allocation falls into two parts. The first part does works 4775 * that make the new allocated chunk useable, but not do any operation 4776 * that modifies the chunk tree. The second part does the works that 4777 * require modifying the chunk tree. This division is important for the 4778 * bootstrap process of adding storage to a seed btrfs. 4779 */ 4780 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 4781 struct btrfs_root *extent_root, u64 type) 4782 { 4783 u64 chunk_offset; 4784 4785 ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex)); 4786 chunk_offset = find_next_chunk(extent_root->fs_info); 4787 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); 4788 } 4789 4790 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, 4791 struct btrfs_root *root, 4792 struct btrfs_device *device) 4793 { 4794 u64 chunk_offset; 4795 u64 sys_chunk_offset; 4796 u64 alloc_profile; 4797 struct btrfs_fs_info *fs_info = root->fs_info; 4798 struct btrfs_root *extent_root = fs_info->extent_root; 4799 int ret; 4800 4801 chunk_offset = find_next_chunk(fs_info); 4802 alloc_profile = btrfs_get_alloc_profile(extent_root, 0); 4803 ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset, 4804 alloc_profile); 4805 if (ret) 4806 return ret; 4807 4808 sys_chunk_offset = find_next_chunk(root->fs_info); 4809 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0); 4810 ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset, 4811 alloc_profile); 4812 return ret; 4813 } 4814 4815 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 4816 { 4817 int max_errors; 4818 4819 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 4820 BTRFS_BLOCK_GROUP_RAID10 | 4821 BTRFS_BLOCK_GROUP_RAID5 | 4822 BTRFS_BLOCK_GROUP_DUP)) { 4823 max_errors = 1; 4824 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) { 4825 max_errors = 2; 4826 } else { 4827 max_errors = 0; 4828 } 4829 4830 return max_errors; 4831 } 4832 4833 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) 4834 { 4835 struct extent_map *em; 4836 struct map_lookup *map; 4837 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 4838 int readonly = 0; 4839 int miss_ndevs = 0; 4840 int i; 4841 4842 read_lock(&map_tree->map_tree.lock); 4843 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 4844 read_unlock(&map_tree->map_tree.lock); 4845 if (!em) 4846 return 1; 4847 4848 map = (struct map_lookup *)em->bdev; 4849 for (i = 0; i < map->num_stripes; i++) { 4850 if (map->stripes[i].dev->missing) { 4851 miss_ndevs++; 4852 continue; 4853 } 4854 4855 if (!map->stripes[i].dev->writeable) { 4856 readonly = 1; 4857 goto end; 4858 } 4859 } 4860 4861 /* 4862 * If the number of missing devices is larger than max errors, 4863 * we can not write the data into that chunk successfully, so 4864 * set it readonly. 4865 */ 4866 if (miss_ndevs > btrfs_chunk_max_errors(map)) 4867 readonly = 1; 4868 end: 4869 free_extent_map(em); 4870 return readonly; 4871 } 4872 4873 void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 4874 { 4875 extent_map_tree_init(&tree->map_tree); 4876 } 4877 4878 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 4879 { 4880 struct extent_map *em; 4881 4882 while (1) { 4883 write_lock(&tree->map_tree.lock); 4884 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 4885 if (em) 4886 remove_extent_mapping(&tree->map_tree, em); 4887 write_unlock(&tree->map_tree.lock); 4888 if (!em) 4889 break; 4890 /* once for us */ 4891 free_extent_map(em); 4892 /* once for the tree */ 4893 free_extent_map(em); 4894 } 4895 } 4896 4897 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 4898 { 4899 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 4900 struct extent_map *em; 4901 struct map_lookup *map; 4902 struct extent_map_tree *em_tree = &map_tree->map_tree; 4903 int ret; 4904 4905 read_lock(&em_tree->lock); 4906 em = lookup_extent_mapping(em_tree, logical, len); 4907 read_unlock(&em_tree->lock); 4908 4909 /* 4910 * We could return errors for these cases, but that could get ugly and 4911 * we'd probably do the same thing which is just not do anything else 4912 * and exit, so return 1 so the callers don't try to use other copies. 4913 */ 4914 if (!em) { 4915 btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical, 4916 logical+len); 4917 return 1; 4918 } 4919 4920 if (em->start > logical || em->start + em->len < logical) { 4921 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got " 4922 "%Lu-%Lu", logical, logical+len, em->start, 4923 em->start + em->len); 4924 free_extent_map(em); 4925 return 1; 4926 } 4927 4928 map = (struct map_lookup *)em->bdev; 4929 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) 4930 ret = map->num_stripes; 4931 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 4932 ret = map->sub_stripes; 4933 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 4934 ret = 2; 4935 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 4936 ret = 3; 4937 else 4938 ret = 1; 4939 free_extent_map(em); 4940 4941 btrfs_dev_replace_lock(&fs_info->dev_replace); 4942 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) 4943 ret++; 4944 btrfs_dev_replace_unlock(&fs_info->dev_replace); 4945 4946 return ret; 4947 } 4948 4949 unsigned long btrfs_full_stripe_len(struct btrfs_root *root, 4950 struct btrfs_mapping_tree *map_tree, 4951 u64 logical) 4952 { 4953 struct extent_map *em; 4954 struct map_lookup *map; 4955 struct extent_map_tree *em_tree = &map_tree->map_tree; 4956 unsigned long len = root->sectorsize; 4957 4958 read_lock(&em_tree->lock); 4959 em = lookup_extent_mapping(em_tree, logical, len); 4960 read_unlock(&em_tree->lock); 4961 BUG_ON(!em); 4962 4963 BUG_ON(em->start > logical || em->start + em->len < logical); 4964 map = (struct map_lookup *)em->bdev; 4965 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 4966 len = map->stripe_len * nr_data_stripes(map); 4967 free_extent_map(em); 4968 return len; 4969 } 4970 4971 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, 4972 u64 logical, u64 len, int mirror_num) 4973 { 4974 struct extent_map *em; 4975 struct map_lookup *map; 4976 struct extent_map_tree *em_tree = &map_tree->map_tree; 4977 int ret = 0; 4978 4979 read_lock(&em_tree->lock); 4980 em = lookup_extent_mapping(em_tree, logical, len); 4981 read_unlock(&em_tree->lock); 4982 BUG_ON(!em); 4983 4984 BUG_ON(em->start > logical || em->start + em->len < logical); 4985 map = (struct map_lookup *)em->bdev; 4986 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 4987 ret = 1; 4988 free_extent_map(em); 4989 return ret; 4990 } 4991 4992 static int find_live_mirror(struct btrfs_fs_info *fs_info, 4993 struct map_lookup *map, int first, int num, 4994 int optimal, int dev_replace_is_ongoing) 4995 { 4996 int i; 4997 int tolerance; 4998 struct btrfs_device *srcdev; 4999 5000 if (dev_replace_is_ongoing && 5001 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5002 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5003 srcdev = fs_info->dev_replace.srcdev; 5004 else 5005 srcdev = NULL; 5006 5007 /* 5008 * try to avoid the drive that is the source drive for a 5009 * dev-replace procedure, only choose it if no other non-missing 5010 * mirror is available 5011 */ 5012 for (tolerance = 0; tolerance < 2; tolerance++) { 5013 if (map->stripes[optimal].dev->bdev && 5014 (tolerance || map->stripes[optimal].dev != srcdev)) 5015 return optimal; 5016 for (i = first; i < first + num; i++) { 5017 if (map->stripes[i].dev->bdev && 5018 (tolerance || map->stripes[i].dev != srcdev)) 5019 return i; 5020 } 5021 } 5022 5023 /* we couldn't find one that doesn't fail. Just return something 5024 * and the io error handling code will clean up eventually 5025 */ 5026 return optimal; 5027 } 5028 5029 static inline int parity_smaller(u64 a, u64 b) 5030 { 5031 return a > b; 5032 } 5033 5034 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5035 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) 5036 { 5037 struct btrfs_bio_stripe s; 5038 int i; 5039 u64 l; 5040 int again = 1; 5041 5042 while (again) { 5043 again = 0; 5044 for (i = 0; i < num_stripes - 1; i++) { 5045 if (parity_smaller(bbio->raid_map[i], 5046 bbio->raid_map[i+1])) { 5047 s = bbio->stripes[i]; 5048 l = bbio->raid_map[i]; 5049 bbio->stripes[i] = bbio->stripes[i+1]; 5050 bbio->raid_map[i] = bbio->raid_map[i+1]; 5051 bbio->stripes[i+1] = s; 5052 bbio->raid_map[i+1] = l; 5053 5054 again = 1; 5055 } 5056 } 5057 } 5058 } 5059 5060 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) 5061 { 5062 struct btrfs_bio *bbio = kzalloc( 5063 /* the size of the btrfs_bio */ 5064 sizeof(struct btrfs_bio) + 5065 /* plus the variable array for the stripes */ 5066 sizeof(struct btrfs_bio_stripe) * (total_stripes) + 5067 /* plus the variable array for the tgt dev */ 5068 sizeof(int) * (real_stripes) + 5069 /* 5070 * plus the raid_map, which includes both the tgt dev 5071 * and the stripes 5072 */ 5073 sizeof(u64) * (total_stripes), 5074 GFP_NOFS); 5075 if (!bbio) 5076 return NULL; 5077 5078 atomic_set(&bbio->error, 0); 5079 atomic_set(&bbio->refs, 1); 5080 5081 return bbio; 5082 } 5083 5084 void btrfs_get_bbio(struct btrfs_bio *bbio) 5085 { 5086 WARN_ON(!atomic_read(&bbio->refs)); 5087 atomic_inc(&bbio->refs); 5088 } 5089 5090 void btrfs_put_bbio(struct btrfs_bio *bbio) 5091 { 5092 if (!bbio) 5093 return; 5094 if (atomic_dec_and_test(&bbio->refs)) 5095 kfree(bbio); 5096 } 5097 5098 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 5099 u64 logical, u64 *length, 5100 struct btrfs_bio **bbio_ret, 5101 int mirror_num, int need_raid_map) 5102 { 5103 struct extent_map *em; 5104 struct map_lookup *map; 5105 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 5106 struct extent_map_tree *em_tree = &map_tree->map_tree; 5107 u64 offset; 5108 u64 stripe_offset; 5109 u64 stripe_end_offset; 5110 u64 stripe_nr; 5111 u64 stripe_nr_orig; 5112 u64 stripe_nr_end; 5113 u64 stripe_len; 5114 u32 stripe_index; 5115 int i; 5116 int ret = 0; 5117 int num_stripes; 5118 int max_errors = 0; 5119 int tgtdev_indexes = 0; 5120 struct btrfs_bio *bbio = NULL; 5121 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 5122 int dev_replace_is_ongoing = 0; 5123 int num_alloc_stripes; 5124 int patch_the_first_stripe_for_dev_replace = 0; 5125 u64 physical_to_patch_in_first_stripe = 0; 5126 u64 raid56_full_stripe_start = (u64)-1; 5127 5128 read_lock(&em_tree->lock); 5129 em = lookup_extent_mapping(em_tree, logical, *length); 5130 read_unlock(&em_tree->lock); 5131 5132 if (!em) { 5133 btrfs_crit(fs_info, "unable to find logical %llu len %llu", 5134 logical, *length); 5135 return -EINVAL; 5136 } 5137 5138 if (em->start > logical || em->start + em->len < logical) { 5139 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, " 5140 "found %Lu-%Lu", logical, em->start, 5141 em->start + em->len); 5142 free_extent_map(em); 5143 return -EINVAL; 5144 } 5145 5146 map = (struct map_lookup *)em->bdev; 5147 offset = logical - em->start; 5148 5149 stripe_len = map->stripe_len; 5150 stripe_nr = offset; 5151 /* 5152 * stripe_nr counts the total number of stripes we have to stride 5153 * to get to this block 5154 */ 5155 stripe_nr = div64_u64(stripe_nr, stripe_len); 5156 5157 stripe_offset = stripe_nr * stripe_len; 5158 BUG_ON(offset < stripe_offset); 5159 5160 /* stripe_offset is the offset of this block in its stripe*/ 5161 stripe_offset = offset - stripe_offset; 5162 5163 /* if we're here for raid56, we need to know the stripe aligned start */ 5164 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5165 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); 5166 raid56_full_stripe_start = offset; 5167 5168 /* allow a write of a full stripe, but make sure we don't 5169 * allow straddling of stripes 5170 */ 5171 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 5172 full_stripe_len); 5173 raid56_full_stripe_start *= full_stripe_len; 5174 } 5175 5176 if (rw & REQ_DISCARD) { 5177 /* we don't discard raid56 yet */ 5178 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5179 ret = -EOPNOTSUPP; 5180 goto out; 5181 } 5182 *length = min_t(u64, em->len - offset, *length); 5183 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 5184 u64 max_len; 5185 /* For writes to RAID[56], allow a full stripeset across all disks. 5186 For other RAID types and for RAID[56] reads, just allow a single 5187 stripe (on a single disk). */ 5188 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 5189 (rw & REQ_WRITE)) { 5190 max_len = stripe_len * nr_data_stripes(map) - 5191 (offset - raid56_full_stripe_start); 5192 } else { 5193 /* we limit the length of each bio to what fits in a stripe */ 5194 max_len = stripe_len - stripe_offset; 5195 } 5196 *length = min_t(u64, em->len - offset, max_len); 5197 } else { 5198 *length = em->len - offset; 5199 } 5200 5201 /* This is for when we're called from btrfs_merge_bio_hook() and all 5202 it cares about is the length */ 5203 if (!bbio_ret) 5204 goto out; 5205 5206 btrfs_dev_replace_lock(dev_replace); 5207 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 5208 if (!dev_replace_is_ongoing) 5209 btrfs_dev_replace_unlock(dev_replace); 5210 5211 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 5212 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) && 5213 dev_replace->tgtdev != NULL) { 5214 /* 5215 * in dev-replace case, for repair case (that's the only 5216 * case where the mirror is selected explicitly when 5217 * calling btrfs_map_block), blocks left of the left cursor 5218 * can also be read from the target drive. 5219 * For REQ_GET_READ_MIRRORS, the target drive is added as 5220 * the last one to the array of stripes. For READ, it also 5221 * needs to be supported using the same mirror number. 5222 * If the requested block is not left of the left cursor, 5223 * EIO is returned. This can happen because btrfs_num_copies() 5224 * returns one more in the dev-replace case. 5225 */ 5226 u64 tmp_length = *length; 5227 struct btrfs_bio *tmp_bbio = NULL; 5228 int tmp_num_stripes; 5229 u64 srcdev_devid = dev_replace->srcdev->devid; 5230 int index_srcdev = 0; 5231 int found = 0; 5232 u64 physical_of_found = 0; 5233 5234 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, 5235 logical, &tmp_length, &tmp_bbio, 0, 0); 5236 if (ret) { 5237 WARN_ON(tmp_bbio != NULL); 5238 goto out; 5239 } 5240 5241 tmp_num_stripes = tmp_bbio->num_stripes; 5242 if (mirror_num > tmp_num_stripes) { 5243 /* 5244 * REQ_GET_READ_MIRRORS does not contain this 5245 * mirror, that means that the requested area 5246 * is not left of the left cursor 5247 */ 5248 ret = -EIO; 5249 btrfs_put_bbio(tmp_bbio); 5250 goto out; 5251 } 5252 5253 /* 5254 * process the rest of the function using the mirror_num 5255 * of the source drive. Therefore look it up first. 5256 * At the end, patch the device pointer to the one of the 5257 * target drive. 5258 */ 5259 for (i = 0; i < tmp_num_stripes; i++) { 5260 if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) { 5261 /* 5262 * In case of DUP, in order to keep it 5263 * simple, only add the mirror with the 5264 * lowest physical address 5265 */ 5266 if (found && 5267 physical_of_found <= 5268 tmp_bbio->stripes[i].physical) 5269 continue; 5270 index_srcdev = i; 5271 found = 1; 5272 physical_of_found = 5273 tmp_bbio->stripes[i].physical; 5274 } 5275 } 5276 5277 if (found) { 5278 mirror_num = index_srcdev + 1; 5279 patch_the_first_stripe_for_dev_replace = 1; 5280 physical_to_patch_in_first_stripe = physical_of_found; 5281 } else { 5282 WARN_ON(1); 5283 ret = -EIO; 5284 btrfs_put_bbio(tmp_bbio); 5285 goto out; 5286 } 5287 5288 btrfs_put_bbio(tmp_bbio); 5289 } else if (mirror_num > map->num_stripes) { 5290 mirror_num = 0; 5291 } 5292 5293 num_stripes = 1; 5294 stripe_index = 0; 5295 stripe_nr_orig = stripe_nr; 5296 stripe_nr_end = ALIGN(offset + *length, map->stripe_len); 5297 stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len); 5298 stripe_end_offset = stripe_nr_end * map->stripe_len - 5299 (offset + *length); 5300 5301 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 5302 if (rw & REQ_DISCARD) 5303 num_stripes = min_t(u64, map->num_stripes, 5304 stripe_nr_end - stripe_nr_orig); 5305 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5306 &stripe_index); 5307 if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))) 5308 mirror_num = 1; 5309 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 5310 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) 5311 num_stripes = map->num_stripes; 5312 else if (mirror_num) 5313 stripe_index = mirror_num - 1; 5314 else { 5315 stripe_index = find_live_mirror(fs_info, map, 0, 5316 map->num_stripes, 5317 current->pid % map->num_stripes, 5318 dev_replace_is_ongoing); 5319 mirror_num = stripe_index + 1; 5320 } 5321 5322 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 5323 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) { 5324 num_stripes = map->num_stripes; 5325 } else if (mirror_num) { 5326 stripe_index = mirror_num - 1; 5327 } else { 5328 mirror_num = 1; 5329 } 5330 5331 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 5332 u32 factor = map->num_stripes / map->sub_stripes; 5333 5334 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5335 stripe_index *= map->sub_stripes; 5336 5337 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) 5338 num_stripes = map->sub_stripes; 5339 else if (rw & REQ_DISCARD) 5340 num_stripes = min_t(u64, map->sub_stripes * 5341 (stripe_nr_end - stripe_nr_orig), 5342 map->num_stripes); 5343 else if (mirror_num) 5344 stripe_index += mirror_num - 1; 5345 else { 5346 int old_stripe_index = stripe_index; 5347 stripe_index = find_live_mirror(fs_info, map, 5348 stripe_index, 5349 map->sub_stripes, stripe_index + 5350 current->pid % map->sub_stripes, 5351 dev_replace_is_ongoing); 5352 mirror_num = stripe_index - old_stripe_index + 1; 5353 } 5354 5355 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5356 if (need_raid_map && 5357 ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || 5358 mirror_num > 1)) { 5359 /* push stripe_nr back to the start of the full stripe */ 5360 stripe_nr = div_u64(raid56_full_stripe_start, 5361 stripe_len * nr_data_stripes(map)); 5362 5363 /* RAID[56] write or recovery. Return all stripes */ 5364 num_stripes = map->num_stripes; 5365 max_errors = nr_parity_stripes(map); 5366 5367 *length = map->stripe_len; 5368 stripe_index = 0; 5369 stripe_offset = 0; 5370 } else { 5371 /* 5372 * Mirror #0 or #1 means the original data block. 5373 * Mirror #2 is RAID5 parity block. 5374 * Mirror #3 is RAID6 Q block. 5375 */ 5376 stripe_nr = div_u64_rem(stripe_nr, 5377 nr_data_stripes(map), &stripe_index); 5378 if (mirror_num > 1) 5379 stripe_index = nr_data_stripes(map) + 5380 mirror_num - 2; 5381 5382 /* We distribute the parity blocks across stripes */ 5383 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 5384 &stripe_index); 5385 if (!(rw & (REQ_WRITE | REQ_DISCARD | 5386 REQ_GET_READ_MIRRORS)) && mirror_num <= 1) 5387 mirror_num = 1; 5388 } 5389 } else { 5390 /* 5391 * after this, stripe_nr is the number of stripes on this 5392 * device we have to walk to find the data, and stripe_index is 5393 * the number of our device in the stripe array 5394 */ 5395 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5396 &stripe_index); 5397 mirror_num = stripe_index + 1; 5398 } 5399 BUG_ON(stripe_index >= map->num_stripes); 5400 5401 num_alloc_stripes = num_stripes; 5402 if (dev_replace_is_ongoing) { 5403 if (rw & (REQ_WRITE | REQ_DISCARD)) 5404 num_alloc_stripes <<= 1; 5405 if (rw & REQ_GET_READ_MIRRORS) 5406 num_alloc_stripes++; 5407 tgtdev_indexes = num_stripes; 5408 } 5409 5410 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); 5411 if (!bbio) { 5412 ret = -ENOMEM; 5413 goto out; 5414 } 5415 if (dev_replace_is_ongoing) 5416 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); 5417 5418 /* build raid_map */ 5419 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && 5420 need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || 5421 mirror_num > 1)) { 5422 u64 tmp; 5423 unsigned rot; 5424 5425 bbio->raid_map = (u64 *)((void *)bbio->stripes + 5426 sizeof(struct btrfs_bio_stripe) * 5427 num_alloc_stripes + 5428 sizeof(int) * tgtdev_indexes); 5429 5430 /* Work out the disk rotation on this stripe-set */ 5431 div_u64_rem(stripe_nr, num_stripes, &rot); 5432 5433 /* Fill in the logical address of each stripe */ 5434 tmp = stripe_nr * nr_data_stripes(map); 5435 for (i = 0; i < nr_data_stripes(map); i++) 5436 bbio->raid_map[(i+rot) % num_stripes] = 5437 em->start + (tmp + i) * map->stripe_len; 5438 5439 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; 5440 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5441 bbio->raid_map[(i+rot+1) % num_stripes] = 5442 RAID6_Q_STRIPE; 5443 } 5444 5445 if (rw & REQ_DISCARD) { 5446 u32 factor = 0; 5447 u32 sub_stripes = 0; 5448 u64 stripes_per_dev = 0; 5449 u32 remaining_stripes = 0; 5450 u32 last_stripe = 0; 5451 5452 if (map->type & 5453 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { 5454 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5455 sub_stripes = 1; 5456 else 5457 sub_stripes = map->sub_stripes; 5458 5459 factor = map->num_stripes / sub_stripes; 5460 stripes_per_dev = div_u64_rem(stripe_nr_end - 5461 stripe_nr_orig, 5462 factor, 5463 &remaining_stripes); 5464 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5465 last_stripe *= sub_stripes; 5466 } 5467 5468 for (i = 0; i < num_stripes; i++) { 5469 bbio->stripes[i].physical = 5470 map->stripes[stripe_index].physical + 5471 stripe_offset + stripe_nr * map->stripe_len; 5472 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 5473 5474 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5475 BTRFS_BLOCK_GROUP_RAID10)) { 5476 bbio->stripes[i].length = stripes_per_dev * 5477 map->stripe_len; 5478 5479 if (i / sub_stripes < remaining_stripes) 5480 bbio->stripes[i].length += 5481 map->stripe_len; 5482 5483 /* 5484 * Special for the first stripe and 5485 * the last stripe: 5486 * 5487 * |-------|...|-------| 5488 * |----------| 5489 * off end_off 5490 */ 5491 if (i < sub_stripes) 5492 bbio->stripes[i].length -= 5493 stripe_offset; 5494 5495 if (stripe_index >= last_stripe && 5496 stripe_index <= (last_stripe + 5497 sub_stripes - 1)) 5498 bbio->stripes[i].length -= 5499 stripe_end_offset; 5500 5501 if (i == sub_stripes - 1) 5502 stripe_offset = 0; 5503 } else 5504 bbio->stripes[i].length = *length; 5505 5506 stripe_index++; 5507 if (stripe_index == map->num_stripes) { 5508 /* This could only happen for RAID0/10 */ 5509 stripe_index = 0; 5510 stripe_nr++; 5511 } 5512 } 5513 } else { 5514 for (i = 0; i < num_stripes; i++) { 5515 bbio->stripes[i].physical = 5516 map->stripes[stripe_index].physical + 5517 stripe_offset + 5518 stripe_nr * map->stripe_len; 5519 bbio->stripes[i].dev = 5520 map->stripes[stripe_index].dev; 5521 stripe_index++; 5522 } 5523 } 5524 5525 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) 5526 max_errors = btrfs_chunk_max_errors(map); 5527 5528 if (bbio->raid_map) 5529 sort_parity_stripes(bbio, num_stripes); 5530 5531 tgtdev_indexes = 0; 5532 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && 5533 dev_replace->tgtdev != NULL) { 5534 int index_where_to_add; 5535 u64 srcdev_devid = dev_replace->srcdev->devid; 5536 5537 /* 5538 * duplicate the write operations while the dev replace 5539 * procedure is running. Since the copying of the old disk 5540 * to the new disk takes place at run time while the 5541 * filesystem is mounted writable, the regular write 5542 * operations to the old disk have to be duplicated to go 5543 * to the new disk as well. 5544 * Note that device->missing is handled by the caller, and 5545 * that the write to the old disk is already set up in the 5546 * stripes array. 5547 */ 5548 index_where_to_add = num_stripes; 5549 for (i = 0; i < num_stripes; i++) { 5550 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5551 /* write to new disk, too */ 5552 struct btrfs_bio_stripe *new = 5553 bbio->stripes + index_where_to_add; 5554 struct btrfs_bio_stripe *old = 5555 bbio->stripes + i; 5556 5557 new->physical = old->physical; 5558 new->length = old->length; 5559 new->dev = dev_replace->tgtdev; 5560 bbio->tgtdev_map[i] = index_where_to_add; 5561 index_where_to_add++; 5562 max_errors++; 5563 tgtdev_indexes++; 5564 } 5565 } 5566 num_stripes = index_where_to_add; 5567 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) && 5568 dev_replace->tgtdev != NULL) { 5569 u64 srcdev_devid = dev_replace->srcdev->devid; 5570 int index_srcdev = 0; 5571 int found = 0; 5572 u64 physical_of_found = 0; 5573 5574 /* 5575 * During the dev-replace procedure, the target drive can 5576 * also be used to read data in case it is needed to repair 5577 * a corrupt block elsewhere. This is possible if the 5578 * requested area is left of the left cursor. In this area, 5579 * the target drive is a full copy of the source drive. 5580 */ 5581 for (i = 0; i < num_stripes; i++) { 5582 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5583 /* 5584 * In case of DUP, in order to keep it 5585 * simple, only add the mirror with the 5586 * lowest physical address 5587 */ 5588 if (found && 5589 physical_of_found <= 5590 bbio->stripes[i].physical) 5591 continue; 5592 index_srcdev = i; 5593 found = 1; 5594 physical_of_found = bbio->stripes[i].physical; 5595 } 5596 } 5597 if (found) { 5598 if (physical_of_found + map->stripe_len <= 5599 dev_replace->cursor_left) { 5600 struct btrfs_bio_stripe *tgtdev_stripe = 5601 bbio->stripes + num_stripes; 5602 5603 tgtdev_stripe->physical = physical_of_found; 5604 tgtdev_stripe->length = 5605 bbio->stripes[index_srcdev].length; 5606 tgtdev_stripe->dev = dev_replace->tgtdev; 5607 bbio->tgtdev_map[index_srcdev] = num_stripes; 5608 5609 tgtdev_indexes++; 5610 num_stripes++; 5611 } 5612 } 5613 } 5614 5615 *bbio_ret = bbio; 5616 bbio->map_type = map->type; 5617 bbio->num_stripes = num_stripes; 5618 bbio->max_errors = max_errors; 5619 bbio->mirror_num = mirror_num; 5620 bbio->num_tgtdevs = tgtdev_indexes; 5621 5622 /* 5623 * this is the case that REQ_READ && dev_replace_is_ongoing && 5624 * mirror_num == num_stripes + 1 && dev_replace target drive is 5625 * available as a mirror 5626 */ 5627 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 5628 WARN_ON(num_stripes > 1); 5629 bbio->stripes[0].dev = dev_replace->tgtdev; 5630 bbio->stripes[0].physical = physical_to_patch_in_first_stripe; 5631 bbio->mirror_num = map->num_stripes + 1; 5632 } 5633 out: 5634 if (dev_replace_is_ongoing) 5635 btrfs_dev_replace_unlock(dev_replace); 5636 free_extent_map(em); 5637 return ret; 5638 } 5639 5640 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 5641 u64 logical, u64 *length, 5642 struct btrfs_bio **bbio_ret, int mirror_num) 5643 { 5644 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, 5645 mirror_num, 0); 5646 } 5647 5648 /* For Scrub/replace */ 5649 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, 5650 u64 logical, u64 *length, 5651 struct btrfs_bio **bbio_ret, int mirror_num, 5652 int need_raid_map) 5653 { 5654 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, 5655 mirror_num, need_raid_map); 5656 } 5657 5658 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 5659 u64 chunk_start, u64 physical, u64 devid, 5660 u64 **logical, int *naddrs, int *stripe_len) 5661 { 5662 struct extent_map_tree *em_tree = &map_tree->map_tree; 5663 struct extent_map *em; 5664 struct map_lookup *map; 5665 u64 *buf; 5666 u64 bytenr; 5667 u64 length; 5668 u64 stripe_nr; 5669 u64 rmap_len; 5670 int i, j, nr = 0; 5671 5672 read_lock(&em_tree->lock); 5673 em = lookup_extent_mapping(em_tree, chunk_start, 1); 5674 read_unlock(&em_tree->lock); 5675 5676 if (!em) { 5677 printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n", 5678 chunk_start); 5679 return -EIO; 5680 } 5681 5682 if (em->start != chunk_start) { 5683 printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n", 5684 em->start, chunk_start); 5685 free_extent_map(em); 5686 return -EIO; 5687 } 5688 map = (struct map_lookup *)em->bdev; 5689 5690 length = em->len; 5691 rmap_len = map->stripe_len; 5692 5693 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5694 length = div_u64(length, map->num_stripes / map->sub_stripes); 5695 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5696 length = div_u64(length, map->num_stripes); 5697 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5698 length = div_u64(length, nr_data_stripes(map)); 5699 rmap_len = map->stripe_len * nr_data_stripes(map); 5700 } 5701 5702 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 5703 BUG_ON(!buf); /* -ENOMEM */ 5704 5705 for (i = 0; i < map->num_stripes; i++) { 5706 if (devid && map->stripes[i].dev->devid != devid) 5707 continue; 5708 if (map->stripes[i].physical > physical || 5709 map->stripes[i].physical + length <= physical) 5710 continue; 5711 5712 stripe_nr = physical - map->stripes[i].physical; 5713 stripe_nr = div_u64(stripe_nr, map->stripe_len); 5714 5715 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 5716 stripe_nr = stripe_nr * map->num_stripes + i; 5717 stripe_nr = div_u64(stripe_nr, map->sub_stripes); 5718 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 5719 stripe_nr = stripe_nr * map->num_stripes + i; 5720 } /* else if RAID[56], multiply by nr_data_stripes(). 5721 * Alternatively, just use rmap_len below instead of 5722 * map->stripe_len */ 5723 5724 bytenr = chunk_start + stripe_nr * rmap_len; 5725 WARN_ON(nr >= map->num_stripes); 5726 for (j = 0; j < nr; j++) { 5727 if (buf[j] == bytenr) 5728 break; 5729 } 5730 if (j == nr) { 5731 WARN_ON(nr >= map->num_stripes); 5732 buf[nr++] = bytenr; 5733 } 5734 } 5735 5736 *logical = buf; 5737 *naddrs = nr; 5738 *stripe_len = rmap_len; 5739 5740 free_extent_map(em); 5741 return 0; 5742 } 5743 5744 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err) 5745 { 5746 bio->bi_private = bbio->private; 5747 bio->bi_end_io = bbio->end_io; 5748 bio_endio(bio, err); 5749 5750 btrfs_put_bbio(bbio); 5751 } 5752 5753 static void btrfs_end_bio(struct bio *bio, int err) 5754 { 5755 struct btrfs_bio *bbio = bio->bi_private; 5756 int is_orig_bio = 0; 5757 5758 if (err) { 5759 atomic_inc(&bbio->error); 5760 if (err == -EIO || err == -EREMOTEIO) { 5761 unsigned int stripe_index = 5762 btrfs_io_bio(bio)->stripe_index; 5763 struct btrfs_device *dev; 5764 5765 BUG_ON(stripe_index >= bbio->num_stripes); 5766 dev = bbio->stripes[stripe_index].dev; 5767 if (dev->bdev) { 5768 if (bio->bi_rw & WRITE) 5769 btrfs_dev_stat_inc(dev, 5770 BTRFS_DEV_STAT_WRITE_ERRS); 5771 else 5772 btrfs_dev_stat_inc(dev, 5773 BTRFS_DEV_STAT_READ_ERRS); 5774 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH) 5775 btrfs_dev_stat_inc(dev, 5776 BTRFS_DEV_STAT_FLUSH_ERRS); 5777 btrfs_dev_stat_print_on_error(dev); 5778 } 5779 } 5780 } 5781 5782 if (bio == bbio->orig_bio) 5783 is_orig_bio = 1; 5784 5785 btrfs_bio_counter_dec(bbio->fs_info); 5786 5787 if (atomic_dec_and_test(&bbio->stripes_pending)) { 5788 if (!is_orig_bio) { 5789 bio_put(bio); 5790 bio = bbio->orig_bio; 5791 } 5792 5793 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5794 /* only send an error to the higher layers if it is 5795 * beyond the tolerance of the btrfs bio 5796 */ 5797 if (atomic_read(&bbio->error) > bbio->max_errors) { 5798 err = -EIO; 5799 } else { 5800 /* 5801 * this bio is actually up to date, we didn't 5802 * go over the max number of errors 5803 */ 5804 set_bit(BIO_UPTODATE, &bio->bi_flags); 5805 err = 0; 5806 } 5807 5808 btrfs_end_bbio(bbio, bio, err); 5809 } else if (!is_orig_bio) { 5810 bio_put(bio); 5811 } 5812 } 5813 5814 /* 5815 * see run_scheduled_bios for a description of why bios are collected for 5816 * async submit. 5817 * 5818 * This will add one bio to the pending list for a device and make sure 5819 * the work struct is scheduled. 5820 */ 5821 static noinline void btrfs_schedule_bio(struct btrfs_root *root, 5822 struct btrfs_device *device, 5823 int rw, struct bio *bio) 5824 { 5825 int should_queue = 1; 5826 struct btrfs_pending_bios *pending_bios; 5827 5828 if (device->missing || !device->bdev) { 5829 bio_endio(bio, -EIO); 5830 return; 5831 } 5832 5833 /* don't bother with additional async steps for reads, right now */ 5834 if (!(rw & REQ_WRITE)) { 5835 bio_get(bio); 5836 btrfsic_submit_bio(rw, bio); 5837 bio_put(bio); 5838 return; 5839 } 5840 5841 /* 5842 * nr_async_bios allows us to reliably return congestion to the 5843 * higher layers. Otherwise, the async bio makes it appear we have 5844 * made progress against dirty pages when we've really just put it 5845 * on a queue for later 5846 */ 5847 atomic_inc(&root->fs_info->nr_async_bios); 5848 WARN_ON(bio->bi_next); 5849 bio->bi_next = NULL; 5850 bio->bi_rw |= rw; 5851 5852 spin_lock(&device->io_lock); 5853 if (bio->bi_rw & REQ_SYNC) 5854 pending_bios = &device->pending_sync_bios; 5855 else 5856 pending_bios = &device->pending_bios; 5857 5858 if (pending_bios->tail) 5859 pending_bios->tail->bi_next = bio; 5860 5861 pending_bios->tail = bio; 5862 if (!pending_bios->head) 5863 pending_bios->head = bio; 5864 if (device->running_pending) 5865 should_queue = 0; 5866 5867 spin_unlock(&device->io_lock); 5868 5869 if (should_queue) 5870 btrfs_queue_work(root->fs_info->submit_workers, 5871 &device->work); 5872 } 5873 5874 static int bio_size_ok(struct block_device *bdev, struct bio *bio, 5875 sector_t sector) 5876 { 5877 struct bio_vec *prev; 5878 struct request_queue *q = bdev_get_queue(bdev); 5879 unsigned int max_sectors = queue_max_sectors(q); 5880 struct bvec_merge_data bvm = { 5881 .bi_bdev = bdev, 5882 .bi_sector = sector, 5883 .bi_rw = bio->bi_rw, 5884 }; 5885 5886 if (WARN_ON(bio->bi_vcnt == 0)) 5887 return 1; 5888 5889 prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 5890 if (bio_sectors(bio) > max_sectors) 5891 return 0; 5892 5893 if (!q->merge_bvec_fn) 5894 return 1; 5895 5896 bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len; 5897 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) 5898 return 0; 5899 return 1; 5900 } 5901 5902 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, 5903 struct bio *bio, u64 physical, int dev_nr, 5904 int rw, int async) 5905 { 5906 struct btrfs_device *dev = bbio->stripes[dev_nr].dev; 5907 5908 bio->bi_private = bbio; 5909 btrfs_io_bio(bio)->stripe_index = dev_nr; 5910 bio->bi_end_io = btrfs_end_bio; 5911 bio->bi_iter.bi_sector = physical >> 9; 5912 #ifdef DEBUG 5913 { 5914 struct rcu_string *name; 5915 5916 rcu_read_lock(); 5917 name = rcu_dereference(dev->name); 5918 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu " 5919 "(%s id %llu), size=%u\n", rw, 5920 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, 5921 name->str, dev->devid, bio->bi_iter.bi_size); 5922 rcu_read_unlock(); 5923 } 5924 #endif 5925 bio->bi_bdev = dev->bdev; 5926 5927 btrfs_bio_counter_inc_noblocked(root->fs_info); 5928 5929 if (async) 5930 btrfs_schedule_bio(root, dev, rw, bio); 5931 else 5932 btrfsic_submit_bio(rw, bio); 5933 } 5934 5935 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, 5936 struct bio *first_bio, struct btrfs_device *dev, 5937 int dev_nr, int rw, int async) 5938 { 5939 struct bio_vec *bvec = first_bio->bi_io_vec; 5940 struct bio *bio; 5941 int nr_vecs = bio_get_nr_vecs(dev->bdev); 5942 u64 physical = bbio->stripes[dev_nr].physical; 5943 5944 again: 5945 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS); 5946 if (!bio) 5947 return -ENOMEM; 5948 5949 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) { 5950 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, 5951 bvec->bv_offset) < bvec->bv_len) { 5952 u64 len = bio->bi_iter.bi_size; 5953 5954 atomic_inc(&bbio->stripes_pending); 5955 submit_stripe_bio(root, bbio, bio, physical, dev_nr, 5956 rw, async); 5957 physical += len; 5958 goto again; 5959 } 5960 bvec++; 5961 } 5962 5963 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async); 5964 return 0; 5965 } 5966 5967 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) 5968 { 5969 atomic_inc(&bbio->error); 5970 if (atomic_dec_and_test(&bbio->stripes_pending)) { 5971 /* Shoud be the original bio. */ 5972 WARN_ON(bio != bbio->orig_bio); 5973 5974 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5975 bio->bi_iter.bi_sector = logical >> 9; 5976 5977 btrfs_end_bbio(bbio, bio, -EIO); 5978 } 5979 } 5980 5981 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, 5982 int mirror_num, int async_submit) 5983 { 5984 struct btrfs_device *dev; 5985 struct bio *first_bio = bio; 5986 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 5987 u64 length = 0; 5988 u64 map_length; 5989 int ret; 5990 int dev_nr; 5991 int total_devs; 5992 struct btrfs_bio *bbio = NULL; 5993 5994 length = bio->bi_iter.bi_size; 5995 map_length = length; 5996 5997 btrfs_bio_counter_inc_blocked(root->fs_info); 5998 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, 5999 mirror_num, 1); 6000 if (ret) { 6001 btrfs_bio_counter_dec(root->fs_info); 6002 return ret; 6003 } 6004 6005 total_devs = bbio->num_stripes; 6006 bbio->orig_bio = first_bio; 6007 bbio->private = first_bio->bi_private; 6008 bbio->end_io = first_bio->bi_end_io; 6009 bbio->fs_info = root->fs_info; 6010 atomic_set(&bbio->stripes_pending, bbio->num_stripes); 6011 6012 if (bbio->raid_map) { 6013 /* In this case, map_length has been set to the length of 6014 a single stripe; not the whole write */ 6015 if (rw & WRITE) { 6016 ret = raid56_parity_write(root, bio, bbio, map_length); 6017 } else { 6018 ret = raid56_parity_recover(root, bio, bbio, map_length, 6019 mirror_num, 1); 6020 } 6021 6022 btrfs_bio_counter_dec(root->fs_info); 6023 return ret; 6024 } 6025 6026 if (map_length < length) { 6027 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu", 6028 logical, length, map_length); 6029 BUG(); 6030 } 6031 6032 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6033 dev = bbio->stripes[dev_nr].dev; 6034 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) { 6035 bbio_error(bbio, first_bio, logical); 6036 continue; 6037 } 6038 6039 /* 6040 * Check and see if we're ok with this bio based on it's size 6041 * and offset with the given device. 6042 */ 6043 if (!bio_size_ok(dev->bdev, first_bio, 6044 bbio->stripes[dev_nr].physical >> 9)) { 6045 ret = breakup_stripe_bio(root, bbio, first_bio, dev, 6046 dev_nr, rw, async_submit); 6047 BUG_ON(ret); 6048 continue; 6049 } 6050 6051 if (dev_nr < total_devs - 1) { 6052 bio = btrfs_bio_clone(first_bio, GFP_NOFS); 6053 BUG_ON(!bio); /* -ENOMEM */ 6054 } else 6055 bio = first_bio; 6056 6057 submit_stripe_bio(root, bbio, bio, 6058 bbio->stripes[dev_nr].physical, dev_nr, rw, 6059 async_submit); 6060 } 6061 btrfs_bio_counter_dec(root->fs_info); 6062 return 0; 6063 } 6064 6065 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, 6066 u8 *uuid, u8 *fsid) 6067 { 6068 struct btrfs_device *device; 6069 struct btrfs_fs_devices *cur_devices; 6070 6071 cur_devices = fs_info->fs_devices; 6072 while (cur_devices) { 6073 if (!fsid || 6074 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 6075 device = __find_device(&cur_devices->devices, 6076 devid, uuid); 6077 if (device) 6078 return device; 6079 } 6080 cur_devices = cur_devices->seed; 6081 } 6082 return NULL; 6083 } 6084 6085 static struct btrfs_device *add_missing_dev(struct btrfs_root *root, 6086 struct btrfs_fs_devices *fs_devices, 6087 u64 devid, u8 *dev_uuid) 6088 { 6089 struct btrfs_device *device; 6090 6091 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6092 if (IS_ERR(device)) 6093 return NULL; 6094 6095 list_add(&device->dev_list, &fs_devices->devices); 6096 device->fs_devices = fs_devices; 6097 fs_devices->num_devices++; 6098 6099 device->missing = 1; 6100 fs_devices->missing_devices++; 6101 6102 return device; 6103 } 6104 6105 /** 6106 * btrfs_alloc_device - allocate struct btrfs_device 6107 * @fs_info: used only for generating a new devid, can be NULL if 6108 * devid is provided (i.e. @devid != NULL). 6109 * @devid: a pointer to devid for this device. If NULL a new devid 6110 * is generated. 6111 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6112 * is generated. 6113 * 6114 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6115 * on error. Returned struct is not linked onto any lists and can be 6116 * destroyed with kfree() right away. 6117 */ 6118 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6119 const u64 *devid, 6120 const u8 *uuid) 6121 { 6122 struct btrfs_device *dev; 6123 u64 tmp; 6124 6125 if (WARN_ON(!devid && !fs_info)) 6126 return ERR_PTR(-EINVAL); 6127 6128 dev = __alloc_device(); 6129 if (IS_ERR(dev)) 6130 return dev; 6131 6132 if (devid) 6133 tmp = *devid; 6134 else { 6135 int ret; 6136 6137 ret = find_next_devid(fs_info, &tmp); 6138 if (ret) { 6139 kfree(dev); 6140 return ERR_PTR(ret); 6141 } 6142 } 6143 dev->devid = tmp; 6144 6145 if (uuid) 6146 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6147 else 6148 generate_random_uuid(dev->uuid); 6149 6150 btrfs_init_work(&dev->work, btrfs_submit_helper, 6151 pending_bios_fn, NULL, NULL); 6152 6153 return dev; 6154 } 6155 6156 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, 6157 struct extent_buffer *leaf, 6158 struct btrfs_chunk *chunk) 6159 { 6160 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 6161 struct map_lookup *map; 6162 struct extent_map *em; 6163 u64 logical; 6164 u64 length; 6165 u64 devid; 6166 u8 uuid[BTRFS_UUID_SIZE]; 6167 int num_stripes; 6168 int ret; 6169 int i; 6170 6171 logical = key->offset; 6172 length = btrfs_chunk_length(leaf, chunk); 6173 6174 read_lock(&map_tree->map_tree.lock); 6175 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 6176 read_unlock(&map_tree->map_tree.lock); 6177 6178 /* already mapped? */ 6179 if (em && em->start <= logical && em->start + em->len > logical) { 6180 free_extent_map(em); 6181 return 0; 6182 } else if (em) { 6183 free_extent_map(em); 6184 } 6185 6186 em = alloc_extent_map(); 6187 if (!em) 6188 return -ENOMEM; 6189 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6190 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6191 if (!map) { 6192 free_extent_map(em); 6193 return -ENOMEM; 6194 } 6195 6196 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6197 em->bdev = (struct block_device *)map; 6198 em->start = logical; 6199 em->len = length; 6200 em->orig_start = 0; 6201 em->block_start = 0; 6202 em->block_len = em->len; 6203 6204 map->num_stripes = num_stripes; 6205 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6206 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6207 map->sector_size = btrfs_chunk_sector_size(leaf, chunk); 6208 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6209 map->type = btrfs_chunk_type(leaf, chunk); 6210 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6211 for (i = 0; i < num_stripes; i++) { 6212 map->stripes[i].physical = 6213 btrfs_stripe_offset_nr(leaf, chunk, i); 6214 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6215 read_extent_buffer(leaf, uuid, (unsigned long) 6216 btrfs_stripe_dev_uuid_nr(chunk, i), 6217 BTRFS_UUID_SIZE); 6218 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid, 6219 uuid, NULL); 6220 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { 6221 free_extent_map(em); 6222 return -EIO; 6223 } 6224 if (!map->stripes[i].dev) { 6225 map->stripes[i].dev = 6226 add_missing_dev(root, root->fs_info->fs_devices, 6227 devid, uuid); 6228 if (!map->stripes[i].dev) { 6229 free_extent_map(em); 6230 return -EIO; 6231 } 6232 btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing", 6233 devid, uuid); 6234 } 6235 map->stripes[i].dev->in_fs_metadata = 1; 6236 } 6237 6238 write_lock(&map_tree->map_tree.lock); 6239 ret = add_extent_mapping(&map_tree->map_tree, em, 0); 6240 write_unlock(&map_tree->map_tree.lock); 6241 BUG_ON(ret); /* Tree corruption */ 6242 free_extent_map(em); 6243 6244 return 0; 6245 } 6246 6247 static void fill_device_from_item(struct extent_buffer *leaf, 6248 struct btrfs_dev_item *dev_item, 6249 struct btrfs_device *device) 6250 { 6251 unsigned long ptr; 6252 6253 device->devid = btrfs_device_id(leaf, dev_item); 6254 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6255 device->total_bytes = device->disk_total_bytes; 6256 device->commit_total_bytes = device->disk_total_bytes; 6257 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6258 device->commit_bytes_used = device->bytes_used; 6259 device->type = btrfs_device_type(leaf, dev_item); 6260 device->io_align = btrfs_device_io_align(leaf, dev_item); 6261 device->io_width = btrfs_device_io_width(leaf, dev_item); 6262 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6263 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6264 device->is_tgtdev_for_dev_replace = 0; 6265 6266 ptr = btrfs_device_uuid(dev_item); 6267 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6268 } 6269 6270 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root, 6271 u8 *fsid) 6272 { 6273 struct btrfs_fs_devices *fs_devices; 6274 int ret; 6275 6276 BUG_ON(!mutex_is_locked(&uuid_mutex)); 6277 6278 fs_devices = root->fs_info->fs_devices->seed; 6279 while (fs_devices) { 6280 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) 6281 return fs_devices; 6282 6283 fs_devices = fs_devices->seed; 6284 } 6285 6286 fs_devices = find_fsid(fsid); 6287 if (!fs_devices) { 6288 if (!btrfs_test_opt(root, DEGRADED)) 6289 return ERR_PTR(-ENOENT); 6290 6291 fs_devices = alloc_fs_devices(fsid); 6292 if (IS_ERR(fs_devices)) 6293 return fs_devices; 6294 6295 fs_devices->seeding = 1; 6296 fs_devices->opened = 1; 6297 return fs_devices; 6298 } 6299 6300 fs_devices = clone_fs_devices(fs_devices); 6301 if (IS_ERR(fs_devices)) 6302 return fs_devices; 6303 6304 ret = __btrfs_open_devices(fs_devices, FMODE_READ, 6305 root->fs_info->bdev_holder); 6306 if (ret) { 6307 free_fs_devices(fs_devices); 6308 fs_devices = ERR_PTR(ret); 6309 goto out; 6310 } 6311 6312 if (!fs_devices->seeding) { 6313 __btrfs_close_devices(fs_devices); 6314 free_fs_devices(fs_devices); 6315 fs_devices = ERR_PTR(-EINVAL); 6316 goto out; 6317 } 6318 6319 fs_devices->seed = root->fs_info->fs_devices->seed; 6320 root->fs_info->fs_devices->seed = fs_devices; 6321 out: 6322 return fs_devices; 6323 } 6324 6325 static int read_one_dev(struct btrfs_root *root, 6326 struct extent_buffer *leaf, 6327 struct btrfs_dev_item *dev_item) 6328 { 6329 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 6330 struct btrfs_device *device; 6331 u64 devid; 6332 int ret; 6333 u8 fs_uuid[BTRFS_UUID_SIZE]; 6334 u8 dev_uuid[BTRFS_UUID_SIZE]; 6335 6336 devid = btrfs_device_id(leaf, dev_item); 6337 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 6338 BTRFS_UUID_SIZE); 6339 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 6340 BTRFS_UUID_SIZE); 6341 6342 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) { 6343 fs_devices = open_seed_devices(root, fs_uuid); 6344 if (IS_ERR(fs_devices)) 6345 return PTR_ERR(fs_devices); 6346 } 6347 6348 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid); 6349 if (!device) { 6350 if (!btrfs_test_opt(root, DEGRADED)) 6351 return -EIO; 6352 6353 device = add_missing_dev(root, fs_devices, devid, dev_uuid); 6354 if (!device) 6355 return -ENOMEM; 6356 btrfs_warn(root->fs_info, "devid %llu uuid %pU missing", 6357 devid, dev_uuid); 6358 } else { 6359 if (!device->bdev && !btrfs_test_opt(root, DEGRADED)) 6360 return -EIO; 6361 6362 if(!device->bdev && !device->missing) { 6363 /* 6364 * this happens when a device that was properly setup 6365 * in the device info lists suddenly goes bad. 6366 * device->bdev is NULL, and so we have to set 6367 * device->missing to one here 6368 */ 6369 device->fs_devices->missing_devices++; 6370 device->missing = 1; 6371 } 6372 6373 /* Move the device to its own fs_devices */ 6374 if (device->fs_devices != fs_devices) { 6375 ASSERT(device->missing); 6376 6377 list_move(&device->dev_list, &fs_devices->devices); 6378 device->fs_devices->num_devices--; 6379 fs_devices->num_devices++; 6380 6381 device->fs_devices->missing_devices--; 6382 fs_devices->missing_devices++; 6383 6384 device->fs_devices = fs_devices; 6385 } 6386 } 6387 6388 if (device->fs_devices != root->fs_info->fs_devices) { 6389 BUG_ON(device->writeable); 6390 if (device->generation != 6391 btrfs_device_generation(leaf, dev_item)) 6392 return -EINVAL; 6393 } 6394 6395 fill_device_from_item(leaf, dev_item, device); 6396 device->in_fs_metadata = 1; 6397 if (device->writeable && !device->is_tgtdev_for_dev_replace) { 6398 device->fs_devices->total_rw_bytes += device->total_bytes; 6399 spin_lock(&root->fs_info->free_chunk_lock); 6400 root->fs_info->free_chunk_space += device->total_bytes - 6401 device->bytes_used; 6402 spin_unlock(&root->fs_info->free_chunk_lock); 6403 } 6404 ret = 0; 6405 return ret; 6406 } 6407 6408 int btrfs_read_sys_array(struct btrfs_root *root) 6409 { 6410 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 6411 struct extent_buffer *sb; 6412 struct btrfs_disk_key *disk_key; 6413 struct btrfs_chunk *chunk; 6414 u8 *array_ptr; 6415 unsigned long sb_array_offset; 6416 int ret = 0; 6417 u32 num_stripes; 6418 u32 array_size; 6419 u32 len = 0; 6420 u32 cur_offset; 6421 struct btrfs_key key; 6422 6423 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); 6424 /* 6425 * This will create extent buffer of nodesize, superblock size is 6426 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 6427 * overallocate but we can keep it as-is, only the first page is used. 6428 */ 6429 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET); 6430 if (!sb) 6431 return -ENOMEM; 6432 btrfs_set_buffer_uptodate(sb); 6433 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6434 /* 6435 * The sb extent buffer is artifical and just used to read the system array. 6436 * btrfs_set_buffer_uptodate() call does not properly mark all it's 6437 * pages up-to-date when the page is larger: extent does not cover the 6438 * whole page and consequently check_page_uptodate does not find all 6439 * the page's extents up-to-date (the hole beyond sb), 6440 * write_extent_buffer then triggers a WARN_ON. 6441 * 6442 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 6443 * but sb spans only this function. Add an explicit SetPageUptodate call 6444 * to silence the warning eg. on PowerPC 64. 6445 */ 6446 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE) 6447 SetPageUptodate(sb->pages[0]); 6448 6449 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6450 array_size = btrfs_super_sys_array_size(super_copy); 6451 6452 array_ptr = super_copy->sys_chunk_array; 6453 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 6454 cur_offset = 0; 6455 6456 while (cur_offset < array_size) { 6457 disk_key = (struct btrfs_disk_key *)array_ptr; 6458 len = sizeof(*disk_key); 6459 if (cur_offset + len > array_size) 6460 goto out_short_read; 6461 6462 btrfs_disk_key_to_cpu(&key, disk_key); 6463 6464 array_ptr += len; 6465 sb_array_offset += len; 6466 cur_offset += len; 6467 6468 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 6469 chunk = (struct btrfs_chunk *)sb_array_offset; 6470 /* 6471 * At least one btrfs_chunk with one stripe must be 6472 * present, exact stripe count check comes afterwards 6473 */ 6474 len = btrfs_chunk_item_size(1); 6475 if (cur_offset + len > array_size) 6476 goto out_short_read; 6477 6478 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 6479 len = btrfs_chunk_item_size(num_stripes); 6480 if (cur_offset + len > array_size) 6481 goto out_short_read; 6482 6483 ret = read_one_chunk(root, &key, sb, chunk); 6484 if (ret) 6485 break; 6486 } else { 6487 ret = -EIO; 6488 break; 6489 } 6490 array_ptr += len; 6491 sb_array_offset += len; 6492 cur_offset += len; 6493 } 6494 free_extent_buffer(sb); 6495 return ret; 6496 6497 out_short_read: 6498 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", 6499 len, cur_offset); 6500 free_extent_buffer(sb); 6501 return -EIO; 6502 } 6503 6504 int btrfs_read_chunk_tree(struct btrfs_root *root) 6505 { 6506 struct btrfs_path *path; 6507 struct extent_buffer *leaf; 6508 struct btrfs_key key; 6509 struct btrfs_key found_key; 6510 int ret; 6511 int slot; 6512 6513 root = root->fs_info->chunk_root; 6514 6515 path = btrfs_alloc_path(); 6516 if (!path) 6517 return -ENOMEM; 6518 6519 mutex_lock(&uuid_mutex); 6520 lock_chunks(root); 6521 6522 /* 6523 * Read all device items, and then all the chunk items. All 6524 * device items are found before any chunk item (their object id 6525 * is smaller than the lowest possible object id for a chunk 6526 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 6527 */ 6528 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 6529 key.offset = 0; 6530 key.type = 0; 6531 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6532 if (ret < 0) 6533 goto error; 6534 while (1) { 6535 leaf = path->nodes[0]; 6536 slot = path->slots[0]; 6537 if (slot >= btrfs_header_nritems(leaf)) { 6538 ret = btrfs_next_leaf(root, path); 6539 if (ret == 0) 6540 continue; 6541 if (ret < 0) 6542 goto error; 6543 break; 6544 } 6545 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6546 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 6547 struct btrfs_dev_item *dev_item; 6548 dev_item = btrfs_item_ptr(leaf, slot, 6549 struct btrfs_dev_item); 6550 ret = read_one_dev(root, leaf, dev_item); 6551 if (ret) 6552 goto error; 6553 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 6554 struct btrfs_chunk *chunk; 6555 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 6556 ret = read_one_chunk(root, &found_key, leaf, chunk); 6557 if (ret) 6558 goto error; 6559 } 6560 path->slots[0]++; 6561 } 6562 ret = 0; 6563 error: 6564 unlock_chunks(root); 6565 mutex_unlock(&uuid_mutex); 6566 6567 btrfs_free_path(path); 6568 return ret; 6569 } 6570 6571 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 6572 { 6573 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6574 struct btrfs_device *device; 6575 6576 while (fs_devices) { 6577 mutex_lock(&fs_devices->device_list_mutex); 6578 list_for_each_entry(device, &fs_devices->devices, dev_list) 6579 device->dev_root = fs_info->dev_root; 6580 mutex_unlock(&fs_devices->device_list_mutex); 6581 6582 fs_devices = fs_devices->seed; 6583 } 6584 } 6585 6586 static void __btrfs_reset_dev_stats(struct btrfs_device *dev) 6587 { 6588 int i; 6589 6590 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6591 btrfs_dev_stat_reset(dev, i); 6592 } 6593 6594 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 6595 { 6596 struct btrfs_key key; 6597 struct btrfs_key found_key; 6598 struct btrfs_root *dev_root = fs_info->dev_root; 6599 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6600 struct extent_buffer *eb; 6601 int slot; 6602 int ret = 0; 6603 struct btrfs_device *device; 6604 struct btrfs_path *path = NULL; 6605 int i; 6606 6607 path = btrfs_alloc_path(); 6608 if (!path) { 6609 ret = -ENOMEM; 6610 goto out; 6611 } 6612 6613 mutex_lock(&fs_devices->device_list_mutex); 6614 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6615 int item_size; 6616 struct btrfs_dev_stats_item *ptr; 6617 6618 key.objectid = 0; 6619 key.type = BTRFS_DEV_STATS_KEY; 6620 key.offset = device->devid; 6621 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 6622 if (ret) { 6623 __btrfs_reset_dev_stats(device); 6624 device->dev_stats_valid = 1; 6625 btrfs_release_path(path); 6626 continue; 6627 } 6628 slot = path->slots[0]; 6629 eb = path->nodes[0]; 6630 btrfs_item_key_to_cpu(eb, &found_key, slot); 6631 item_size = btrfs_item_size_nr(eb, slot); 6632 6633 ptr = btrfs_item_ptr(eb, slot, 6634 struct btrfs_dev_stats_item); 6635 6636 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 6637 if (item_size >= (1 + i) * sizeof(__le64)) 6638 btrfs_dev_stat_set(device, i, 6639 btrfs_dev_stats_value(eb, ptr, i)); 6640 else 6641 btrfs_dev_stat_reset(device, i); 6642 } 6643 6644 device->dev_stats_valid = 1; 6645 btrfs_dev_stat_print_on_load(device); 6646 btrfs_release_path(path); 6647 } 6648 mutex_unlock(&fs_devices->device_list_mutex); 6649 6650 out: 6651 btrfs_free_path(path); 6652 return ret < 0 ? ret : 0; 6653 } 6654 6655 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 6656 struct btrfs_root *dev_root, 6657 struct btrfs_device *device) 6658 { 6659 struct btrfs_path *path; 6660 struct btrfs_key key; 6661 struct extent_buffer *eb; 6662 struct btrfs_dev_stats_item *ptr; 6663 int ret; 6664 int i; 6665 6666 key.objectid = 0; 6667 key.type = BTRFS_DEV_STATS_KEY; 6668 key.offset = device->devid; 6669 6670 path = btrfs_alloc_path(); 6671 BUG_ON(!path); 6672 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 6673 if (ret < 0) { 6674 printk_in_rcu(KERN_WARNING "BTRFS: " 6675 "error %d while searching for dev_stats item for device %s!\n", 6676 ret, rcu_str_deref(device->name)); 6677 goto out; 6678 } 6679 6680 if (ret == 0 && 6681 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 6682 /* need to delete old one and insert a new one */ 6683 ret = btrfs_del_item(trans, dev_root, path); 6684 if (ret != 0) { 6685 printk_in_rcu(KERN_WARNING "BTRFS: " 6686 "delete too small dev_stats item for device %s failed %d!\n", 6687 rcu_str_deref(device->name), ret); 6688 goto out; 6689 } 6690 ret = 1; 6691 } 6692 6693 if (ret == 1) { 6694 /* need to insert a new item */ 6695 btrfs_release_path(path); 6696 ret = btrfs_insert_empty_item(trans, dev_root, path, 6697 &key, sizeof(*ptr)); 6698 if (ret < 0) { 6699 printk_in_rcu(KERN_WARNING "BTRFS: " 6700 "insert dev_stats item for device %s failed %d!\n", 6701 rcu_str_deref(device->name), ret); 6702 goto out; 6703 } 6704 } 6705 6706 eb = path->nodes[0]; 6707 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 6708 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6709 btrfs_set_dev_stats_value(eb, ptr, i, 6710 btrfs_dev_stat_read(device, i)); 6711 btrfs_mark_buffer_dirty(eb); 6712 6713 out: 6714 btrfs_free_path(path); 6715 return ret; 6716 } 6717 6718 /* 6719 * called from commit_transaction. Writes all changed device stats to disk. 6720 */ 6721 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, 6722 struct btrfs_fs_info *fs_info) 6723 { 6724 struct btrfs_root *dev_root = fs_info->dev_root; 6725 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6726 struct btrfs_device *device; 6727 int stats_cnt; 6728 int ret = 0; 6729 6730 mutex_lock(&fs_devices->device_list_mutex); 6731 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6732 if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device)) 6733 continue; 6734 6735 stats_cnt = atomic_read(&device->dev_stats_ccnt); 6736 ret = update_dev_stat_item(trans, dev_root, device); 6737 if (!ret) 6738 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 6739 } 6740 mutex_unlock(&fs_devices->device_list_mutex); 6741 6742 return ret; 6743 } 6744 6745 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 6746 { 6747 btrfs_dev_stat_inc(dev, index); 6748 btrfs_dev_stat_print_on_error(dev); 6749 } 6750 6751 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 6752 { 6753 if (!dev->dev_stats_valid) 6754 return; 6755 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: " 6756 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", 6757 rcu_str_deref(dev->name), 6758 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 6759 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 6760 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 6761 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 6762 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 6763 } 6764 6765 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 6766 { 6767 int i; 6768 6769 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6770 if (btrfs_dev_stat_read(dev, i) != 0) 6771 break; 6772 if (i == BTRFS_DEV_STAT_VALUES_MAX) 6773 return; /* all values == 0, suppress message */ 6774 6775 printk_in_rcu(KERN_INFO "BTRFS: " 6776 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", 6777 rcu_str_deref(dev->name), 6778 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 6779 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 6780 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 6781 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 6782 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 6783 } 6784 6785 int btrfs_get_dev_stats(struct btrfs_root *root, 6786 struct btrfs_ioctl_get_dev_stats *stats) 6787 { 6788 struct btrfs_device *dev; 6789 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 6790 int i; 6791 6792 mutex_lock(&fs_devices->device_list_mutex); 6793 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL); 6794 mutex_unlock(&fs_devices->device_list_mutex); 6795 6796 if (!dev) { 6797 btrfs_warn(root->fs_info, "get dev_stats failed, device not found"); 6798 return -ENODEV; 6799 } else if (!dev->dev_stats_valid) { 6800 btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid"); 6801 return -ENODEV; 6802 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 6803 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 6804 if (stats->nr_items > i) 6805 stats->values[i] = 6806 btrfs_dev_stat_read_and_reset(dev, i); 6807 else 6808 btrfs_dev_stat_reset(dev, i); 6809 } 6810 } else { 6811 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6812 if (stats->nr_items > i) 6813 stats->values[i] = btrfs_dev_stat_read(dev, i); 6814 } 6815 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 6816 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 6817 return 0; 6818 } 6819 6820 int btrfs_scratch_superblock(struct btrfs_device *device) 6821 { 6822 struct buffer_head *bh; 6823 struct btrfs_super_block *disk_super; 6824 6825 bh = btrfs_read_dev_super(device->bdev); 6826 if (!bh) 6827 return -EINVAL; 6828 disk_super = (struct btrfs_super_block *)bh->b_data; 6829 6830 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 6831 set_buffer_dirty(bh); 6832 sync_dirty_buffer(bh); 6833 brelse(bh); 6834 6835 return 0; 6836 } 6837 6838 /* 6839 * Update the size of all devices, which is used for writing out the 6840 * super blocks. 6841 */ 6842 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info) 6843 { 6844 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6845 struct btrfs_device *curr, *next; 6846 6847 if (list_empty(&fs_devices->resized_devices)) 6848 return; 6849 6850 mutex_lock(&fs_devices->device_list_mutex); 6851 lock_chunks(fs_info->dev_root); 6852 list_for_each_entry_safe(curr, next, &fs_devices->resized_devices, 6853 resized_list) { 6854 list_del_init(&curr->resized_list); 6855 curr->commit_total_bytes = curr->disk_total_bytes; 6856 } 6857 unlock_chunks(fs_info->dev_root); 6858 mutex_unlock(&fs_devices->device_list_mutex); 6859 } 6860 6861 /* Must be invoked during the transaction commit */ 6862 void btrfs_update_commit_device_bytes_used(struct btrfs_root *root, 6863 struct btrfs_transaction *transaction) 6864 { 6865 struct extent_map *em; 6866 struct map_lookup *map; 6867 struct btrfs_device *dev; 6868 int i; 6869 6870 if (list_empty(&transaction->pending_chunks)) 6871 return; 6872 6873 /* In order to kick the device replace finish process */ 6874 lock_chunks(root); 6875 list_for_each_entry(em, &transaction->pending_chunks, list) { 6876 map = (struct map_lookup *)em->bdev; 6877 6878 for (i = 0; i < map->num_stripes; i++) { 6879 dev = map->stripes[i].dev; 6880 dev->commit_bytes_used = dev->bytes_used; 6881 } 6882 } 6883 unlock_chunks(root); 6884 } 6885 6886 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info) 6887 { 6888 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6889 while (fs_devices) { 6890 fs_devices->fs_info = fs_info; 6891 fs_devices = fs_devices->seed; 6892 } 6893 } 6894 6895 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info) 6896 { 6897 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6898 while (fs_devices) { 6899 fs_devices->fs_info = NULL; 6900 fs_devices = fs_devices->seed; 6901 } 6902 } 6903