1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/buffer_head.h> 22 #include <linux/blkdev.h> 23 #include <linux/iocontext.h> 24 #include <linux/capability.h> 25 #include <linux/ratelimit.h> 26 #include <linux/kthread.h> 27 #include <linux/raid/pq.h> 28 #include <linux/semaphore.h> 29 #include <linux/uuid.h> 30 #include <asm/div64.h> 31 #include "ctree.h" 32 #include "extent_map.h" 33 #include "disk-io.h" 34 #include "transaction.h" 35 #include "print-tree.h" 36 #include "volumes.h" 37 #include "raid56.h" 38 #include "async-thread.h" 39 #include "check-integrity.h" 40 #include "rcu-string.h" 41 #include "math.h" 42 #include "dev-replace.h" 43 #include "sysfs.h" 44 45 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 46 [BTRFS_RAID_RAID10] = { 47 .sub_stripes = 2, 48 .dev_stripes = 1, 49 .devs_max = 0, /* 0 == as many as possible */ 50 .devs_min = 4, 51 .tolerated_failures = 1, 52 .devs_increment = 2, 53 .ncopies = 2, 54 }, 55 [BTRFS_RAID_RAID1] = { 56 .sub_stripes = 1, 57 .dev_stripes = 1, 58 .devs_max = 2, 59 .devs_min = 2, 60 .tolerated_failures = 1, 61 .devs_increment = 2, 62 .ncopies = 2, 63 }, 64 [BTRFS_RAID_DUP] = { 65 .sub_stripes = 1, 66 .dev_stripes = 2, 67 .devs_max = 1, 68 .devs_min = 1, 69 .tolerated_failures = 0, 70 .devs_increment = 1, 71 .ncopies = 2, 72 }, 73 [BTRFS_RAID_RAID0] = { 74 .sub_stripes = 1, 75 .dev_stripes = 1, 76 .devs_max = 0, 77 .devs_min = 2, 78 .tolerated_failures = 0, 79 .devs_increment = 1, 80 .ncopies = 1, 81 }, 82 [BTRFS_RAID_SINGLE] = { 83 .sub_stripes = 1, 84 .dev_stripes = 1, 85 .devs_max = 1, 86 .devs_min = 1, 87 .tolerated_failures = 0, 88 .devs_increment = 1, 89 .ncopies = 1, 90 }, 91 [BTRFS_RAID_RAID5] = { 92 .sub_stripes = 1, 93 .dev_stripes = 1, 94 .devs_max = 0, 95 .devs_min = 2, 96 .tolerated_failures = 1, 97 .devs_increment = 1, 98 .ncopies = 2, 99 }, 100 [BTRFS_RAID_RAID6] = { 101 .sub_stripes = 1, 102 .dev_stripes = 1, 103 .devs_max = 0, 104 .devs_min = 3, 105 .tolerated_failures = 2, 106 .devs_increment = 1, 107 .ncopies = 3, 108 }, 109 }; 110 111 const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = { 112 [BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10, 113 [BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1, 114 [BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP, 115 [BTRFS_RAID_RAID0] = BTRFS_BLOCK_GROUP_RAID0, 116 [BTRFS_RAID_SINGLE] = 0, 117 [BTRFS_RAID_RAID5] = BTRFS_BLOCK_GROUP_RAID5, 118 [BTRFS_RAID_RAID6] = BTRFS_BLOCK_GROUP_RAID6, 119 }; 120 121 /* 122 * Table to convert BTRFS_RAID_* to the error code if minimum number of devices 123 * condition is not met. Zero means there's no corresponding 124 * BTRFS_ERROR_DEV_*_NOT_MET value. 125 */ 126 const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = { 127 [BTRFS_RAID_RAID10] = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 128 [BTRFS_RAID_RAID1] = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, 129 [BTRFS_RAID_DUP] = 0, 130 [BTRFS_RAID_RAID0] = 0, 131 [BTRFS_RAID_SINGLE] = 0, 132 [BTRFS_RAID_RAID5] = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 133 [BTRFS_RAID_RAID6] = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 134 }; 135 136 static int init_first_rw_device(struct btrfs_trans_handle *trans, 137 struct btrfs_root *root, 138 struct btrfs_device *device); 139 static int btrfs_relocate_sys_chunks(struct btrfs_root *root); 140 static void __btrfs_reset_dev_stats(struct btrfs_device *dev); 141 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 142 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 143 static void btrfs_close_one_device(struct btrfs_device *device); 144 145 DEFINE_MUTEX(uuid_mutex); 146 static LIST_HEAD(fs_uuids); 147 struct list_head *btrfs_get_fs_uuids(void) 148 { 149 return &fs_uuids; 150 } 151 152 static struct btrfs_fs_devices *__alloc_fs_devices(void) 153 { 154 struct btrfs_fs_devices *fs_devs; 155 156 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); 157 if (!fs_devs) 158 return ERR_PTR(-ENOMEM); 159 160 mutex_init(&fs_devs->device_list_mutex); 161 162 INIT_LIST_HEAD(&fs_devs->devices); 163 INIT_LIST_HEAD(&fs_devs->resized_devices); 164 INIT_LIST_HEAD(&fs_devs->alloc_list); 165 INIT_LIST_HEAD(&fs_devs->list); 166 167 return fs_devs; 168 } 169 170 /** 171 * alloc_fs_devices - allocate struct btrfs_fs_devices 172 * @fsid: a pointer to UUID for this FS. If NULL a new UUID is 173 * generated. 174 * 175 * Return: a pointer to a new &struct btrfs_fs_devices on success; 176 * ERR_PTR() on error. Returned struct is not linked onto any lists and 177 * can be destroyed with kfree() right away. 178 */ 179 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid) 180 { 181 struct btrfs_fs_devices *fs_devs; 182 183 fs_devs = __alloc_fs_devices(); 184 if (IS_ERR(fs_devs)) 185 return fs_devs; 186 187 if (fsid) 188 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); 189 else 190 generate_random_uuid(fs_devs->fsid); 191 192 return fs_devs; 193 } 194 195 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 196 { 197 struct btrfs_device *device; 198 WARN_ON(fs_devices->opened); 199 while (!list_empty(&fs_devices->devices)) { 200 device = list_entry(fs_devices->devices.next, 201 struct btrfs_device, dev_list); 202 list_del(&device->dev_list); 203 rcu_string_free(device->name); 204 kfree(device); 205 } 206 kfree(fs_devices); 207 } 208 209 static void btrfs_kobject_uevent(struct block_device *bdev, 210 enum kobject_action action) 211 { 212 int ret; 213 214 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); 215 if (ret) 216 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", 217 action, 218 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), 219 &disk_to_dev(bdev->bd_disk)->kobj); 220 } 221 222 void btrfs_cleanup_fs_uuids(void) 223 { 224 struct btrfs_fs_devices *fs_devices; 225 226 while (!list_empty(&fs_uuids)) { 227 fs_devices = list_entry(fs_uuids.next, 228 struct btrfs_fs_devices, list); 229 list_del(&fs_devices->list); 230 free_fs_devices(fs_devices); 231 } 232 } 233 234 static struct btrfs_device *__alloc_device(void) 235 { 236 struct btrfs_device *dev; 237 238 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 239 if (!dev) 240 return ERR_PTR(-ENOMEM); 241 242 INIT_LIST_HEAD(&dev->dev_list); 243 INIT_LIST_HEAD(&dev->dev_alloc_list); 244 INIT_LIST_HEAD(&dev->resized_list); 245 246 spin_lock_init(&dev->io_lock); 247 248 spin_lock_init(&dev->reada_lock); 249 atomic_set(&dev->reada_in_flight, 0); 250 atomic_set(&dev->dev_stats_ccnt, 0); 251 btrfs_device_data_ordered_init(dev); 252 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 253 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 254 255 return dev; 256 } 257 258 static noinline struct btrfs_device *__find_device(struct list_head *head, 259 u64 devid, u8 *uuid) 260 { 261 struct btrfs_device *dev; 262 263 list_for_each_entry(dev, head, dev_list) { 264 if (dev->devid == devid && 265 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 266 return dev; 267 } 268 } 269 return NULL; 270 } 271 272 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 273 { 274 struct btrfs_fs_devices *fs_devices; 275 276 list_for_each_entry(fs_devices, &fs_uuids, list) { 277 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 278 return fs_devices; 279 } 280 return NULL; 281 } 282 283 static int 284 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, 285 int flush, struct block_device **bdev, 286 struct buffer_head **bh) 287 { 288 int ret; 289 290 *bdev = blkdev_get_by_path(device_path, flags, holder); 291 292 if (IS_ERR(*bdev)) { 293 ret = PTR_ERR(*bdev); 294 goto error; 295 } 296 297 if (flush) 298 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); 299 ret = set_blocksize(*bdev, 4096); 300 if (ret) { 301 blkdev_put(*bdev, flags); 302 goto error; 303 } 304 invalidate_bdev(*bdev); 305 *bh = btrfs_read_dev_super(*bdev); 306 if (IS_ERR(*bh)) { 307 ret = PTR_ERR(*bh); 308 blkdev_put(*bdev, flags); 309 goto error; 310 } 311 312 return 0; 313 314 error: 315 *bdev = NULL; 316 *bh = NULL; 317 return ret; 318 } 319 320 static void requeue_list(struct btrfs_pending_bios *pending_bios, 321 struct bio *head, struct bio *tail) 322 { 323 324 struct bio *old_head; 325 326 old_head = pending_bios->head; 327 pending_bios->head = head; 328 if (pending_bios->tail) 329 tail->bi_next = old_head; 330 else 331 pending_bios->tail = tail; 332 } 333 334 /* 335 * we try to collect pending bios for a device so we don't get a large 336 * number of procs sending bios down to the same device. This greatly 337 * improves the schedulers ability to collect and merge the bios. 338 * 339 * But, it also turns into a long list of bios to process and that is sure 340 * to eventually make the worker thread block. The solution here is to 341 * make some progress and then put this work struct back at the end of 342 * the list if the block device is congested. This way, multiple devices 343 * can make progress from a single worker thread. 344 */ 345 static noinline void run_scheduled_bios(struct btrfs_device *device) 346 { 347 struct bio *pending; 348 struct backing_dev_info *bdi; 349 struct btrfs_fs_info *fs_info; 350 struct btrfs_pending_bios *pending_bios; 351 struct bio *tail; 352 struct bio *cur; 353 int again = 0; 354 unsigned long num_run; 355 unsigned long batch_run = 0; 356 unsigned long limit; 357 unsigned long last_waited = 0; 358 int force_reg = 0; 359 int sync_pending = 0; 360 struct blk_plug plug; 361 362 /* 363 * this function runs all the bios we've collected for 364 * a particular device. We don't want to wander off to 365 * another device without first sending all of these down. 366 * So, setup a plug here and finish it off before we return 367 */ 368 blk_start_plug(&plug); 369 370 bdi = blk_get_backing_dev_info(device->bdev); 371 fs_info = device->dev_root->fs_info; 372 limit = btrfs_async_submit_limit(fs_info); 373 limit = limit * 2 / 3; 374 375 loop: 376 spin_lock(&device->io_lock); 377 378 loop_lock: 379 num_run = 0; 380 381 /* take all the bios off the list at once and process them 382 * later on (without the lock held). But, remember the 383 * tail and other pointers so the bios can be properly reinserted 384 * into the list if we hit congestion 385 */ 386 if (!force_reg && device->pending_sync_bios.head) { 387 pending_bios = &device->pending_sync_bios; 388 force_reg = 1; 389 } else { 390 pending_bios = &device->pending_bios; 391 force_reg = 0; 392 } 393 394 pending = pending_bios->head; 395 tail = pending_bios->tail; 396 WARN_ON(pending && !tail); 397 398 /* 399 * if pending was null this time around, no bios need processing 400 * at all and we can stop. Otherwise it'll loop back up again 401 * and do an additional check so no bios are missed. 402 * 403 * device->running_pending is used to synchronize with the 404 * schedule_bio code. 405 */ 406 if (device->pending_sync_bios.head == NULL && 407 device->pending_bios.head == NULL) { 408 again = 0; 409 device->running_pending = 0; 410 } else { 411 again = 1; 412 device->running_pending = 1; 413 } 414 415 pending_bios->head = NULL; 416 pending_bios->tail = NULL; 417 418 spin_unlock(&device->io_lock); 419 420 while (pending) { 421 422 rmb(); 423 /* we want to work on both lists, but do more bios on the 424 * sync list than the regular list 425 */ 426 if ((num_run > 32 && 427 pending_bios != &device->pending_sync_bios && 428 device->pending_sync_bios.head) || 429 (num_run > 64 && pending_bios == &device->pending_sync_bios && 430 device->pending_bios.head)) { 431 spin_lock(&device->io_lock); 432 requeue_list(pending_bios, pending, tail); 433 goto loop_lock; 434 } 435 436 cur = pending; 437 pending = pending->bi_next; 438 cur->bi_next = NULL; 439 440 /* 441 * atomic_dec_return implies a barrier for waitqueue_active 442 */ 443 if (atomic_dec_return(&fs_info->nr_async_bios) < limit && 444 waitqueue_active(&fs_info->async_submit_wait)) 445 wake_up(&fs_info->async_submit_wait); 446 447 BUG_ON(atomic_read(&cur->__bi_cnt) == 0); 448 449 /* 450 * if we're doing the sync list, record that our 451 * plug has some sync requests on it 452 * 453 * If we're doing the regular list and there are 454 * sync requests sitting around, unplug before 455 * we add more 456 */ 457 if (pending_bios == &device->pending_sync_bios) { 458 sync_pending = 1; 459 } else if (sync_pending) { 460 blk_finish_plug(&plug); 461 blk_start_plug(&plug); 462 sync_pending = 0; 463 } 464 465 btrfsic_submit_bio(cur->bi_rw, cur); 466 num_run++; 467 batch_run++; 468 469 cond_resched(); 470 471 /* 472 * we made progress, there is more work to do and the bdi 473 * is now congested. Back off and let other work structs 474 * run instead 475 */ 476 if (pending && bdi_write_congested(bdi) && batch_run > 8 && 477 fs_info->fs_devices->open_devices > 1) { 478 struct io_context *ioc; 479 480 ioc = current->io_context; 481 482 /* 483 * the main goal here is that we don't want to 484 * block if we're going to be able to submit 485 * more requests without blocking. 486 * 487 * This code does two great things, it pokes into 488 * the elevator code from a filesystem _and_ 489 * it makes assumptions about how batching works. 490 */ 491 if (ioc && ioc->nr_batch_requests > 0 && 492 time_before(jiffies, ioc->last_waited + HZ/50UL) && 493 (last_waited == 0 || 494 ioc->last_waited == last_waited)) { 495 /* 496 * we want to go through our batch of 497 * requests and stop. So, we copy out 498 * the ioc->last_waited time and test 499 * against it before looping 500 */ 501 last_waited = ioc->last_waited; 502 cond_resched(); 503 continue; 504 } 505 spin_lock(&device->io_lock); 506 requeue_list(pending_bios, pending, tail); 507 device->running_pending = 1; 508 509 spin_unlock(&device->io_lock); 510 btrfs_queue_work(fs_info->submit_workers, 511 &device->work); 512 goto done; 513 } 514 /* unplug every 64 requests just for good measure */ 515 if (batch_run % 64 == 0) { 516 blk_finish_plug(&plug); 517 blk_start_plug(&plug); 518 sync_pending = 0; 519 } 520 } 521 522 cond_resched(); 523 if (again) 524 goto loop; 525 526 spin_lock(&device->io_lock); 527 if (device->pending_bios.head || device->pending_sync_bios.head) 528 goto loop_lock; 529 spin_unlock(&device->io_lock); 530 531 done: 532 blk_finish_plug(&plug); 533 } 534 535 static void pending_bios_fn(struct btrfs_work *work) 536 { 537 struct btrfs_device *device; 538 539 device = container_of(work, struct btrfs_device, work); 540 run_scheduled_bios(device); 541 } 542 543 544 void btrfs_free_stale_device(struct btrfs_device *cur_dev) 545 { 546 struct btrfs_fs_devices *fs_devs; 547 struct btrfs_device *dev; 548 549 if (!cur_dev->name) 550 return; 551 552 list_for_each_entry(fs_devs, &fs_uuids, list) { 553 int del = 1; 554 555 if (fs_devs->opened) 556 continue; 557 if (fs_devs->seeding) 558 continue; 559 560 list_for_each_entry(dev, &fs_devs->devices, dev_list) { 561 562 if (dev == cur_dev) 563 continue; 564 if (!dev->name) 565 continue; 566 567 /* 568 * Todo: This won't be enough. What if the same device 569 * comes back (with new uuid and) with its mapper path? 570 * But for now, this does help as mostly an admin will 571 * either use mapper or non mapper path throughout. 572 */ 573 rcu_read_lock(); 574 del = strcmp(rcu_str_deref(dev->name), 575 rcu_str_deref(cur_dev->name)); 576 rcu_read_unlock(); 577 if (!del) 578 break; 579 } 580 581 if (!del) { 582 /* delete the stale device */ 583 if (fs_devs->num_devices == 1) { 584 btrfs_sysfs_remove_fsid(fs_devs); 585 list_del(&fs_devs->list); 586 free_fs_devices(fs_devs); 587 } else { 588 fs_devs->num_devices--; 589 list_del(&dev->dev_list); 590 rcu_string_free(dev->name); 591 kfree(dev); 592 } 593 break; 594 } 595 } 596 } 597 598 /* 599 * Add new device to list of registered devices 600 * 601 * Returns: 602 * 1 - first time device is seen 603 * 0 - device already known 604 * < 0 - error 605 */ 606 static noinline int device_list_add(const char *path, 607 struct btrfs_super_block *disk_super, 608 u64 devid, struct btrfs_fs_devices **fs_devices_ret) 609 { 610 struct btrfs_device *device; 611 struct btrfs_fs_devices *fs_devices; 612 struct rcu_string *name; 613 int ret = 0; 614 u64 found_transid = btrfs_super_generation(disk_super); 615 616 fs_devices = find_fsid(disk_super->fsid); 617 if (!fs_devices) { 618 fs_devices = alloc_fs_devices(disk_super->fsid); 619 if (IS_ERR(fs_devices)) 620 return PTR_ERR(fs_devices); 621 622 list_add(&fs_devices->list, &fs_uuids); 623 624 device = NULL; 625 } else { 626 device = __find_device(&fs_devices->devices, devid, 627 disk_super->dev_item.uuid); 628 } 629 630 if (!device) { 631 if (fs_devices->opened) 632 return -EBUSY; 633 634 device = btrfs_alloc_device(NULL, &devid, 635 disk_super->dev_item.uuid); 636 if (IS_ERR(device)) { 637 /* we can safely leave the fs_devices entry around */ 638 return PTR_ERR(device); 639 } 640 641 name = rcu_string_strdup(path, GFP_NOFS); 642 if (!name) { 643 kfree(device); 644 return -ENOMEM; 645 } 646 rcu_assign_pointer(device->name, name); 647 648 mutex_lock(&fs_devices->device_list_mutex); 649 list_add_rcu(&device->dev_list, &fs_devices->devices); 650 fs_devices->num_devices++; 651 mutex_unlock(&fs_devices->device_list_mutex); 652 653 ret = 1; 654 device->fs_devices = fs_devices; 655 } else if (!device->name || strcmp(device->name->str, path)) { 656 /* 657 * When FS is already mounted. 658 * 1. If you are here and if the device->name is NULL that 659 * means this device was missing at time of FS mount. 660 * 2. If you are here and if the device->name is different 661 * from 'path' that means either 662 * a. The same device disappeared and reappeared with 663 * different name. or 664 * b. The missing-disk-which-was-replaced, has 665 * reappeared now. 666 * 667 * We must allow 1 and 2a above. But 2b would be a spurious 668 * and unintentional. 669 * 670 * Further in case of 1 and 2a above, the disk at 'path' 671 * would have missed some transaction when it was away and 672 * in case of 2a the stale bdev has to be updated as well. 673 * 2b must not be allowed at all time. 674 */ 675 676 /* 677 * For now, we do allow update to btrfs_fs_device through the 678 * btrfs dev scan cli after FS has been mounted. We're still 679 * tracking a problem where systems fail mount by subvolume id 680 * when we reject replacement on a mounted FS. 681 */ 682 if (!fs_devices->opened && found_transid < device->generation) { 683 /* 684 * That is if the FS is _not_ mounted and if you 685 * are here, that means there is more than one 686 * disk with same uuid and devid.We keep the one 687 * with larger generation number or the last-in if 688 * generation are equal. 689 */ 690 return -EEXIST; 691 } 692 693 name = rcu_string_strdup(path, GFP_NOFS); 694 if (!name) 695 return -ENOMEM; 696 rcu_string_free(device->name); 697 rcu_assign_pointer(device->name, name); 698 if (device->missing) { 699 fs_devices->missing_devices--; 700 device->missing = 0; 701 } 702 } 703 704 /* 705 * Unmount does not free the btrfs_device struct but would zero 706 * generation along with most of the other members. So just update 707 * it back. We need it to pick the disk with largest generation 708 * (as above). 709 */ 710 if (!fs_devices->opened) 711 device->generation = found_transid; 712 713 /* 714 * if there is new btrfs on an already registered device, 715 * then remove the stale device entry. 716 */ 717 if (ret > 0) 718 btrfs_free_stale_device(device); 719 720 *fs_devices_ret = fs_devices; 721 722 return ret; 723 } 724 725 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 726 { 727 struct btrfs_fs_devices *fs_devices; 728 struct btrfs_device *device; 729 struct btrfs_device *orig_dev; 730 731 fs_devices = alloc_fs_devices(orig->fsid); 732 if (IS_ERR(fs_devices)) 733 return fs_devices; 734 735 mutex_lock(&orig->device_list_mutex); 736 fs_devices->total_devices = orig->total_devices; 737 738 /* We have held the volume lock, it is safe to get the devices. */ 739 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 740 struct rcu_string *name; 741 742 device = btrfs_alloc_device(NULL, &orig_dev->devid, 743 orig_dev->uuid); 744 if (IS_ERR(device)) 745 goto error; 746 747 /* 748 * This is ok to do without rcu read locked because we hold the 749 * uuid mutex so nothing we touch in here is going to disappear. 750 */ 751 if (orig_dev->name) { 752 name = rcu_string_strdup(orig_dev->name->str, 753 GFP_KERNEL); 754 if (!name) { 755 kfree(device); 756 goto error; 757 } 758 rcu_assign_pointer(device->name, name); 759 } 760 761 list_add(&device->dev_list, &fs_devices->devices); 762 device->fs_devices = fs_devices; 763 fs_devices->num_devices++; 764 } 765 mutex_unlock(&orig->device_list_mutex); 766 return fs_devices; 767 error: 768 mutex_unlock(&orig->device_list_mutex); 769 free_fs_devices(fs_devices); 770 return ERR_PTR(-ENOMEM); 771 } 772 773 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step) 774 { 775 struct btrfs_device *device, *next; 776 struct btrfs_device *latest_dev = NULL; 777 778 mutex_lock(&uuid_mutex); 779 again: 780 /* This is the initialized path, it is safe to release the devices. */ 781 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 782 if (device->in_fs_metadata) { 783 if (!device->is_tgtdev_for_dev_replace && 784 (!latest_dev || 785 device->generation > latest_dev->generation)) { 786 latest_dev = device; 787 } 788 continue; 789 } 790 791 if (device->devid == BTRFS_DEV_REPLACE_DEVID) { 792 /* 793 * In the first step, keep the device which has 794 * the correct fsid and the devid that is used 795 * for the dev_replace procedure. 796 * In the second step, the dev_replace state is 797 * read from the device tree and it is known 798 * whether the procedure is really active or 799 * not, which means whether this device is 800 * used or whether it should be removed. 801 */ 802 if (step == 0 || device->is_tgtdev_for_dev_replace) { 803 continue; 804 } 805 } 806 if (device->bdev) { 807 blkdev_put(device->bdev, device->mode); 808 device->bdev = NULL; 809 fs_devices->open_devices--; 810 } 811 if (device->writeable) { 812 list_del_init(&device->dev_alloc_list); 813 device->writeable = 0; 814 if (!device->is_tgtdev_for_dev_replace) 815 fs_devices->rw_devices--; 816 } 817 list_del_init(&device->dev_list); 818 fs_devices->num_devices--; 819 rcu_string_free(device->name); 820 kfree(device); 821 } 822 823 if (fs_devices->seed) { 824 fs_devices = fs_devices->seed; 825 goto again; 826 } 827 828 fs_devices->latest_bdev = latest_dev->bdev; 829 830 mutex_unlock(&uuid_mutex); 831 } 832 833 static void __free_device(struct work_struct *work) 834 { 835 struct btrfs_device *device; 836 837 device = container_of(work, struct btrfs_device, rcu_work); 838 839 if (device->bdev) 840 blkdev_put(device->bdev, device->mode); 841 842 rcu_string_free(device->name); 843 kfree(device); 844 } 845 846 static void free_device(struct rcu_head *head) 847 { 848 struct btrfs_device *device; 849 850 device = container_of(head, struct btrfs_device, rcu); 851 852 INIT_WORK(&device->rcu_work, __free_device); 853 schedule_work(&device->rcu_work); 854 } 855 856 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 857 { 858 struct btrfs_device *device, *tmp; 859 860 if (--fs_devices->opened > 0) 861 return 0; 862 863 mutex_lock(&fs_devices->device_list_mutex); 864 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) { 865 btrfs_close_one_device(device); 866 } 867 mutex_unlock(&fs_devices->device_list_mutex); 868 869 WARN_ON(fs_devices->open_devices); 870 WARN_ON(fs_devices->rw_devices); 871 fs_devices->opened = 0; 872 fs_devices->seeding = 0; 873 874 return 0; 875 } 876 877 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 878 { 879 struct btrfs_fs_devices *seed_devices = NULL; 880 int ret; 881 882 mutex_lock(&uuid_mutex); 883 ret = __btrfs_close_devices(fs_devices); 884 if (!fs_devices->opened) { 885 seed_devices = fs_devices->seed; 886 fs_devices->seed = NULL; 887 } 888 mutex_unlock(&uuid_mutex); 889 890 while (seed_devices) { 891 fs_devices = seed_devices; 892 seed_devices = fs_devices->seed; 893 __btrfs_close_devices(fs_devices); 894 free_fs_devices(fs_devices); 895 } 896 /* 897 * Wait for rcu kworkers under __btrfs_close_devices 898 * to finish all blkdev_puts so device is really 899 * free when umount is done. 900 */ 901 rcu_barrier(); 902 return ret; 903 } 904 905 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 906 fmode_t flags, void *holder) 907 { 908 struct request_queue *q; 909 struct block_device *bdev; 910 struct list_head *head = &fs_devices->devices; 911 struct btrfs_device *device; 912 struct btrfs_device *latest_dev = NULL; 913 struct buffer_head *bh; 914 struct btrfs_super_block *disk_super; 915 u64 devid; 916 int seeding = 1; 917 int ret = 0; 918 919 flags |= FMODE_EXCL; 920 921 list_for_each_entry(device, head, dev_list) { 922 if (device->bdev) 923 continue; 924 if (!device->name) 925 continue; 926 927 /* Just open everything we can; ignore failures here */ 928 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, 929 &bdev, &bh)) 930 continue; 931 932 disk_super = (struct btrfs_super_block *)bh->b_data; 933 devid = btrfs_stack_device_id(&disk_super->dev_item); 934 if (devid != device->devid) 935 goto error_brelse; 936 937 if (memcmp(device->uuid, disk_super->dev_item.uuid, 938 BTRFS_UUID_SIZE)) 939 goto error_brelse; 940 941 device->generation = btrfs_super_generation(disk_super); 942 if (!latest_dev || 943 device->generation > latest_dev->generation) 944 latest_dev = device; 945 946 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 947 device->writeable = 0; 948 } else { 949 device->writeable = !bdev_read_only(bdev); 950 seeding = 0; 951 } 952 953 q = bdev_get_queue(bdev); 954 if (blk_queue_discard(q)) 955 device->can_discard = 1; 956 957 device->bdev = bdev; 958 device->in_fs_metadata = 0; 959 device->mode = flags; 960 961 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 962 fs_devices->rotating = 1; 963 964 fs_devices->open_devices++; 965 if (device->writeable && 966 device->devid != BTRFS_DEV_REPLACE_DEVID) { 967 fs_devices->rw_devices++; 968 list_add(&device->dev_alloc_list, 969 &fs_devices->alloc_list); 970 } 971 brelse(bh); 972 continue; 973 974 error_brelse: 975 brelse(bh); 976 blkdev_put(bdev, flags); 977 continue; 978 } 979 if (fs_devices->open_devices == 0) { 980 ret = -EINVAL; 981 goto out; 982 } 983 fs_devices->seeding = seeding; 984 fs_devices->opened = 1; 985 fs_devices->latest_bdev = latest_dev->bdev; 986 fs_devices->total_rw_bytes = 0; 987 out: 988 return ret; 989 } 990 991 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 992 fmode_t flags, void *holder) 993 { 994 int ret; 995 996 mutex_lock(&uuid_mutex); 997 if (fs_devices->opened) { 998 fs_devices->opened++; 999 ret = 0; 1000 } else { 1001 ret = __btrfs_open_devices(fs_devices, flags, holder); 1002 } 1003 mutex_unlock(&uuid_mutex); 1004 return ret; 1005 } 1006 1007 void btrfs_release_disk_super(struct page *page) 1008 { 1009 kunmap(page); 1010 put_page(page); 1011 } 1012 1013 int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr, 1014 struct page **page, struct btrfs_super_block **disk_super) 1015 { 1016 void *p; 1017 pgoff_t index; 1018 1019 /* make sure our super fits in the device */ 1020 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1021 return 1; 1022 1023 /* make sure our super fits in the page */ 1024 if (sizeof(**disk_super) > PAGE_SIZE) 1025 return 1; 1026 1027 /* make sure our super doesn't straddle pages on disk */ 1028 index = bytenr >> PAGE_SHIFT; 1029 if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index) 1030 return 1; 1031 1032 /* pull in the page with our super */ 1033 *page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 1034 index, GFP_KERNEL); 1035 1036 if (IS_ERR_OR_NULL(*page)) 1037 return 1; 1038 1039 p = kmap(*page); 1040 1041 /* align our pointer to the offset of the super block */ 1042 *disk_super = p + (bytenr & ~PAGE_MASK); 1043 1044 if (btrfs_super_bytenr(*disk_super) != bytenr || 1045 btrfs_super_magic(*disk_super) != BTRFS_MAGIC) { 1046 btrfs_release_disk_super(*page); 1047 return 1; 1048 } 1049 1050 if ((*disk_super)->label[0] && 1051 (*disk_super)->label[BTRFS_LABEL_SIZE - 1]) 1052 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0'; 1053 1054 return 0; 1055 } 1056 1057 /* 1058 * Look for a btrfs signature on a device. This may be called out of the mount path 1059 * and we are not allowed to call set_blocksize during the scan. The superblock 1060 * is read via pagecache 1061 */ 1062 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 1063 struct btrfs_fs_devices **fs_devices_ret) 1064 { 1065 struct btrfs_super_block *disk_super; 1066 struct block_device *bdev; 1067 struct page *page; 1068 int ret = -EINVAL; 1069 u64 devid; 1070 u64 transid; 1071 u64 total_devices; 1072 u64 bytenr; 1073 1074 /* 1075 * we would like to check all the supers, but that would make 1076 * a btrfs mount succeed after a mkfs from a different FS. 1077 * So, we need to add a special mount option to scan for 1078 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 1079 */ 1080 bytenr = btrfs_sb_offset(0); 1081 flags |= FMODE_EXCL; 1082 mutex_lock(&uuid_mutex); 1083 1084 bdev = blkdev_get_by_path(path, flags, holder); 1085 if (IS_ERR(bdev)) { 1086 ret = PTR_ERR(bdev); 1087 goto error; 1088 } 1089 1090 if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) 1091 goto error_bdev_put; 1092 1093 devid = btrfs_stack_device_id(&disk_super->dev_item); 1094 transid = btrfs_super_generation(disk_super); 1095 total_devices = btrfs_super_num_devices(disk_super); 1096 1097 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 1098 if (ret > 0) { 1099 if (disk_super->label[0]) { 1100 printk(KERN_INFO "BTRFS: device label %s ", disk_super->label); 1101 } else { 1102 printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid); 1103 } 1104 1105 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path); 1106 ret = 0; 1107 } 1108 if (!ret && fs_devices_ret) 1109 (*fs_devices_ret)->total_devices = total_devices; 1110 1111 btrfs_release_disk_super(page); 1112 1113 error_bdev_put: 1114 blkdev_put(bdev, flags); 1115 error: 1116 mutex_unlock(&uuid_mutex); 1117 return ret; 1118 } 1119 1120 /* helper to account the used device space in the range */ 1121 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 1122 u64 end, u64 *length) 1123 { 1124 struct btrfs_key key; 1125 struct btrfs_root *root = device->dev_root; 1126 struct btrfs_dev_extent *dev_extent; 1127 struct btrfs_path *path; 1128 u64 extent_end; 1129 int ret; 1130 int slot; 1131 struct extent_buffer *l; 1132 1133 *length = 0; 1134 1135 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace) 1136 return 0; 1137 1138 path = btrfs_alloc_path(); 1139 if (!path) 1140 return -ENOMEM; 1141 path->reada = READA_FORWARD; 1142 1143 key.objectid = device->devid; 1144 key.offset = start; 1145 key.type = BTRFS_DEV_EXTENT_KEY; 1146 1147 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1148 if (ret < 0) 1149 goto out; 1150 if (ret > 0) { 1151 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1152 if (ret < 0) 1153 goto out; 1154 } 1155 1156 while (1) { 1157 l = path->nodes[0]; 1158 slot = path->slots[0]; 1159 if (slot >= btrfs_header_nritems(l)) { 1160 ret = btrfs_next_leaf(root, path); 1161 if (ret == 0) 1162 continue; 1163 if (ret < 0) 1164 goto out; 1165 1166 break; 1167 } 1168 btrfs_item_key_to_cpu(l, &key, slot); 1169 1170 if (key.objectid < device->devid) 1171 goto next; 1172 1173 if (key.objectid > device->devid) 1174 break; 1175 1176 if (key.type != BTRFS_DEV_EXTENT_KEY) 1177 goto next; 1178 1179 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1180 extent_end = key.offset + btrfs_dev_extent_length(l, 1181 dev_extent); 1182 if (key.offset <= start && extent_end > end) { 1183 *length = end - start + 1; 1184 break; 1185 } else if (key.offset <= start && extent_end > start) 1186 *length += extent_end - start; 1187 else if (key.offset > start && extent_end <= end) 1188 *length += extent_end - key.offset; 1189 else if (key.offset > start && key.offset <= end) { 1190 *length += end - key.offset + 1; 1191 break; 1192 } else if (key.offset > end) 1193 break; 1194 1195 next: 1196 path->slots[0]++; 1197 } 1198 ret = 0; 1199 out: 1200 btrfs_free_path(path); 1201 return ret; 1202 } 1203 1204 static int contains_pending_extent(struct btrfs_transaction *transaction, 1205 struct btrfs_device *device, 1206 u64 *start, u64 len) 1207 { 1208 struct btrfs_fs_info *fs_info = device->dev_root->fs_info; 1209 struct extent_map *em; 1210 struct list_head *search_list = &fs_info->pinned_chunks; 1211 int ret = 0; 1212 u64 physical_start = *start; 1213 1214 if (transaction) 1215 search_list = &transaction->pending_chunks; 1216 again: 1217 list_for_each_entry(em, search_list, list) { 1218 struct map_lookup *map; 1219 int i; 1220 1221 map = em->map_lookup; 1222 for (i = 0; i < map->num_stripes; i++) { 1223 u64 end; 1224 1225 if (map->stripes[i].dev != device) 1226 continue; 1227 if (map->stripes[i].physical >= physical_start + len || 1228 map->stripes[i].physical + em->orig_block_len <= 1229 physical_start) 1230 continue; 1231 /* 1232 * Make sure that while processing the pinned list we do 1233 * not override our *start with a lower value, because 1234 * we can have pinned chunks that fall within this 1235 * device hole and that have lower physical addresses 1236 * than the pending chunks we processed before. If we 1237 * do not take this special care we can end up getting 1238 * 2 pending chunks that start at the same physical 1239 * device offsets because the end offset of a pinned 1240 * chunk can be equal to the start offset of some 1241 * pending chunk. 1242 */ 1243 end = map->stripes[i].physical + em->orig_block_len; 1244 if (end > *start) { 1245 *start = end; 1246 ret = 1; 1247 } 1248 } 1249 } 1250 if (search_list != &fs_info->pinned_chunks) { 1251 search_list = &fs_info->pinned_chunks; 1252 goto again; 1253 } 1254 1255 return ret; 1256 } 1257 1258 1259 /* 1260 * find_free_dev_extent_start - find free space in the specified device 1261 * @device: the device which we search the free space in 1262 * @num_bytes: the size of the free space that we need 1263 * @search_start: the position from which to begin the search 1264 * @start: store the start of the free space. 1265 * @len: the size of the free space. that we find, or the size 1266 * of the max free space if we don't find suitable free space 1267 * 1268 * this uses a pretty simple search, the expectation is that it is 1269 * called very infrequently and that a given device has a small number 1270 * of extents 1271 * 1272 * @start is used to store the start of the free space if we find. But if we 1273 * don't find suitable free space, it will be used to store the start position 1274 * of the max free space. 1275 * 1276 * @len is used to store the size of the free space that we find. 1277 * But if we don't find suitable free space, it is used to store the size of 1278 * the max free space. 1279 */ 1280 int find_free_dev_extent_start(struct btrfs_transaction *transaction, 1281 struct btrfs_device *device, u64 num_bytes, 1282 u64 search_start, u64 *start, u64 *len) 1283 { 1284 struct btrfs_key key; 1285 struct btrfs_root *root = device->dev_root; 1286 struct btrfs_dev_extent *dev_extent; 1287 struct btrfs_path *path; 1288 u64 hole_size; 1289 u64 max_hole_start; 1290 u64 max_hole_size; 1291 u64 extent_end; 1292 u64 search_end = device->total_bytes; 1293 int ret; 1294 int slot; 1295 struct extent_buffer *l; 1296 u64 min_search_start; 1297 1298 /* 1299 * We don't want to overwrite the superblock on the drive nor any area 1300 * used by the boot loader (grub for example), so we make sure to start 1301 * at an offset of at least 1MB. 1302 */ 1303 min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024); 1304 search_start = max(search_start, min_search_start); 1305 1306 path = btrfs_alloc_path(); 1307 if (!path) 1308 return -ENOMEM; 1309 1310 max_hole_start = search_start; 1311 max_hole_size = 0; 1312 1313 again: 1314 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) { 1315 ret = -ENOSPC; 1316 goto out; 1317 } 1318 1319 path->reada = READA_FORWARD; 1320 path->search_commit_root = 1; 1321 path->skip_locking = 1; 1322 1323 key.objectid = device->devid; 1324 key.offset = search_start; 1325 key.type = BTRFS_DEV_EXTENT_KEY; 1326 1327 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1328 if (ret < 0) 1329 goto out; 1330 if (ret > 0) { 1331 ret = btrfs_previous_item(root, path, key.objectid, key.type); 1332 if (ret < 0) 1333 goto out; 1334 } 1335 1336 while (1) { 1337 l = path->nodes[0]; 1338 slot = path->slots[0]; 1339 if (slot >= btrfs_header_nritems(l)) { 1340 ret = btrfs_next_leaf(root, path); 1341 if (ret == 0) 1342 continue; 1343 if (ret < 0) 1344 goto out; 1345 1346 break; 1347 } 1348 btrfs_item_key_to_cpu(l, &key, slot); 1349 1350 if (key.objectid < device->devid) 1351 goto next; 1352 1353 if (key.objectid > device->devid) 1354 break; 1355 1356 if (key.type != BTRFS_DEV_EXTENT_KEY) 1357 goto next; 1358 1359 if (key.offset > search_start) { 1360 hole_size = key.offset - search_start; 1361 1362 /* 1363 * Have to check before we set max_hole_start, otherwise 1364 * we could end up sending back this offset anyway. 1365 */ 1366 if (contains_pending_extent(transaction, device, 1367 &search_start, 1368 hole_size)) { 1369 if (key.offset >= search_start) { 1370 hole_size = key.offset - search_start; 1371 } else { 1372 WARN_ON_ONCE(1); 1373 hole_size = 0; 1374 } 1375 } 1376 1377 if (hole_size > max_hole_size) { 1378 max_hole_start = search_start; 1379 max_hole_size = hole_size; 1380 } 1381 1382 /* 1383 * If this free space is greater than which we need, 1384 * it must be the max free space that we have found 1385 * until now, so max_hole_start must point to the start 1386 * of this free space and the length of this free space 1387 * is stored in max_hole_size. Thus, we return 1388 * max_hole_start and max_hole_size and go back to the 1389 * caller. 1390 */ 1391 if (hole_size >= num_bytes) { 1392 ret = 0; 1393 goto out; 1394 } 1395 } 1396 1397 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1398 extent_end = key.offset + btrfs_dev_extent_length(l, 1399 dev_extent); 1400 if (extent_end > search_start) 1401 search_start = extent_end; 1402 next: 1403 path->slots[0]++; 1404 cond_resched(); 1405 } 1406 1407 /* 1408 * At this point, search_start should be the end of 1409 * allocated dev extents, and when shrinking the device, 1410 * search_end may be smaller than search_start. 1411 */ 1412 if (search_end > search_start) { 1413 hole_size = search_end - search_start; 1414 1415 if (contains_pending_extent(transaction, device, &search_start, 1416 hole_size)) { 1417 btrfs_release_path(path); 1418 goto again; 1419 } 1420 1421 if (hole_size > max_hole_size) { 1422 max_hole_start = search_start; 1423 max_hole_size = hole_size; 1424 } 1425 } 1426 1427 /* See above. */ 1428 if (max_hole_size < num_bytes) 1429 ret = -ENOSPC; 1430 else 1431 ret = 0; 1432 1433 out: 1434 btrfs_free_path(path); 1435 *start = max_hole_start; 1436 if (len) 1437 *len = max_hole_size; 1438 return ret; 1439 } 1440 1441 int find_free_dev_extent(struct btrfs_trans_handle *trans, 1442 struct btrfs_device *device, u64 num_bytes, 1443 u64 *start, u64 *len) 1444 { 1445 /* FIXME use last free of some kind */ 1446 return find_free_dev_extent_start(trans->transaction, device, 1447 num_bytes, 0, start, len); 1448 } 1449 1450 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 1451 struct btrfs_device *device, 1452 u64 start, u64 *dev_extent_len) 1453 { 1454 int ret; 1455 struct btrfs_path *path; 1456 struct btrfs_root *root = device->dev_root; 1457 struct btrfs_key key; 1458 struct btrfs_key found_key; 1459 struct extent_buffer *leaf = NULL; 1460 struct btrfs_dev_extent *extent = NULL; 1461 1462 path = btrfs_alloc_path(); 1463 if (!path) 1464 return -ENOMEM; 1465 1466 key.objectid = device->devid; 1467 key.offset = start; 1468 key.type = BTRFS_DEV_EXTENT_KEY; 1469 again: 1470 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1471 if (ret > 0) { 1472 ret = btrfs_previous_item(root, path, key.objectid, 1473 BTRFS_DEV_EXTENT_KEY); 1474 if (ret) 1475 goto out; 1476 leaf = path->nodes[0]; 1477 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1478 extent = btrfs_item_ptr(leaf, path->slots[0], 1479 struct btrfs_dev_extent); 1480 BUG_ON(found_key.offset > start || found_key.offset + 1481 btrfs_dev_extent_length(leaf, extent) < start); 1482 key = found_key; 1483 btrfs_release_path(path); 1484 goto again; 1485 } else if (ret == 0) { 1486 leaf = path->nodes[0]; 1487 extent = btrfs_item_ptr(leaf, path->slots[0], 1488 struct btrfs_dev_extent); 1489 } else { 1490 btrfs_handle_fs_error(root->fs_info, ret, "Slot search failed"); 1491 goto out; 1492 } 1493 1494 *dev_extent_len = btrfs_dev_extent_length(leaf, extent); 1495 1496 ret = btrfs_del_item(trans, root, path); 1497 if (ret) { 1498 btrfs_handle_fs_error(root->fs_info, ret, 1499 "Failed to remove dev extent item"); 1500 } else { 1501 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); 1502 } 1503 out: 1504 btrfs_free_path(path); 1505 return ret; 1506 } 1507 1508 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1509 struct btrfs_device *device, 1510 u64 chunk_tree, u64 chunk_objectid, 1511 u64 chunk_offset, u64 start, u64 num_bytes) 1512 { 1513 int ret; 1514 struct btrfs_path *path; 1515 struct btrfs_root *root = device->dev_root; 1516 struct btrfs_dev_extent *extent; 1517 struct extent_buffer *leaf; 1518 struct btrfs_key key; 1519 1520 WARN_ON(!device->in_fs_metadata); 1521 WARN_ON(device->is_tgtdev_for_dev_replace); 1522 path = btrfs_alloc_path(); 1523 if (!path) 1524 return -ENOMEM; 1525 1526 key.objectid = device->devid; 1527 key.offset = start; 1528 key.type = BTRFS_DEV_EXTENT_KEY; 1529 ret = btrfs_insert_empty_item(trans, root, path, &key, 1530 sizeof(*extent)); 1531 if (ret) 1532 goto out; 1533 1534 leaf = path->nodes[0]; 1535 extent = btrfs_item_ptr(leaf, path->slots[0], 1536 struct btrfs_dev_extent); 1537 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree); 1538 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid); 1539 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 1540 1541 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, 1542 btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE); 1543 1544 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1545 btrfs_mark_buffer_dirty(leaf); 1546 out: 1547 btrfs_free_path(path); 1548 return ret; 1549 } 1550 1551 static u64 find_next_chunk(struct btrfs_fs_info *fs_info) 1552 { 1553 struct extent_map_tree *em_tree; 1554 struct extent_map *em; 1555 struct rb_node *n; 1556 u64 ret = 0; 1557 1558 em_tree = &fs_info->mapping_tree.map_tree; 1559 read_lock(&em_tree->lock); 1560 n = rb_last(&em_tree->map); 1561 if (n) { 1562 em = rb_entry(n, struct extent_map, rb_node); 1563 ret = em->start + em->len; 1564 } 1565 read_unlock(&em_tree->lock); 1566 1567 return ret; 1568 } 1569 1570 static noinline int find_next_devid(struct btrfs_fs_info *fs_info, 1571 u64 *devid_ret) 1572 { 1573 int ret; 1574 struct btrfs_key key; 1575 struct btrfs_key found_key; 1576 struct btrfs_path *path; 1577 1578 path = btrfs_alloc_path(); 1579 if (!path) 1580 return -ENOMEM; 1581 1582 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1583 key.type = BTRFS_DEV_ITEM_KEY; 1584 key.offset = (u64)-1; 1585 1586 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); 1587 if (ret < 0) 1588 goto error; 1589 1590 BUG_ON(ret == 0); /* Corruption */ 1591 1592 ret = btrfs_previous_item(fs_info->chunk_root, path, 1593 BTRFS_DEV_ITEMS_OBJECTID, 1594 BTRFS_DEV_ITEM_KEY); 1595 if (ret) { 1596 *devid_ret = 1; 1597 } else { 1598 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1599 path->slots[0]); 1600 *devid_ret = found_key.offset + 1; 1601 } 1602 ret = 0; 1603 error: 1604 btrfs_free_path(path); 1605 return ret; 1606 } 1607 1608 /* 1609 * the device information is stored in the chunk root 1610 * the btrfs_device struct should be fully filled in 1611 */ 1612 static int btrfs_add_device(struct btrfs_trans_handle *trans, 1613 struct btrfs_root *root, 1614 struct btrfs_device *device) 1615 { 1616 int ret; 1617 struct btrfs_path *path; 1618 struct btrfs_dev_item *dev_item; 1619 struct extent_buffer *leaf; 1620 struct btrfs_key key; 1621 unsigned long ptr; 1622 1623 root = root->fs_info->chunk_root; 1624 1625 path = btrfs_alloc_path(); 1626 if (!path) 1627 return -ENOMEM; 1628 1629 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1630 key.type = BTRFS_DEV_ITEM_KEY; 1631 key.offset = device->devid; 1632 1633 ret = btrfs_insert_empty_item(trans, root, path, &key, 1634 sizeof(*dev_item)); 1635 if (ret) 1636 goto out; 1637 1638 leaf = path->nodes[0]; 1639 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1640 1641 btrfs_set_device_id(leaf, dev_item, device->devid); 1642 btrfs_set_device_generation(leaf, dev_item, 0); 1643 btrfs_set_device_type(leaf, dev_item, device->type); 1644 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1645 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1646 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1647 btrfs_set_device_total_bytes(leaf, dev_item, 1648 btrfs_device_get_disk_total_bytes(device)); 1649 btrfs_set_device_bytes_used(leaf, dev_item, 1650 btrfs_device_get_bytes_used(device)); 1651 btrfs_set_device_group(leaf, dev_item, 0); 1652 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1653 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1654 btrfs_set_device_start_offset(leaf, dev_item, 0); 1655 1656 ptr = btrfs_device_uuid(dev_item); 1657 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1658 ptr = btrfs_device_fsid(dev_item); 1659 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE); 1660 btrfs_mark_buffer_dirty(leaf); 1661 1662 ret = 0; 1663 out: 1664 btrfs_free_path(path); 1665 return ret; 1666 } 1667 1668 /* 1669 * Function to update ctime/mtime for a given device path. 1670 * Mainly used for ctime/mtime based probe like libblkid. 1671 */ 1672 static void update_dev_time(char *path_name) 1673 { 1674 struct file *filp; 1675 1676 filp = filp_open(path_name, O_RDWR, 0); 1677 if (IS_ERR(filp)) 1678 return; 1679 file_update_time(filp); 1680 filp_close(filp, NULL); 1681 } 1682 1683 static int btrfs_rm_dev_item(struct btrfs_root *root, 1684 struct btrfs_device *device) 1685 { 1686 int ret; 1687 struct btrfs_path *path; 1688 struct btrfs_key key; 1689 struct btrfs_trans_handle *trans; 1690 1691 root = root->fs_info->chunk_root; 1692 1693 path = btrfs_alloc_path(); 1694 if (!path) 1695 return -ENOMEM; 1696 1697 trans = btrfs_start_transaction(root, 0); 1698 if (IS_ERR(trans)) { 1699 btrfs_free_path(path); 1700 return PTR_ERR(trans); 1701 } 1702 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1703 key.type = BTRFS_DEV_ITEM_KEY; 1704 key.offset = device->devid; 1705 1706 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1707 if (ret < 0) 1708 goto out; 1709 1710 if (ret > 0) { 1711 ret = -ENOENT; 1712 goto out; 1713 } 1714 1715 ret = btrfs_del_item(trans, root, path); 1716 if (ret) 1717 goto out; 1718 out: 1719 btrfs_free_path(path); 1720 btrfs_commit_transaction(trans, root); 1721 return ret; 1722 } 1723 1724 /* 1725 * Verify that @num_devices satisfies the RAID profile constraints in the whole 1726 * filesystem. It's up to the caller to adjust that number regarding eg. device 1727 * replace. 1728 */ 1729 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, 1730 u64 num_devices) 1731 { 1732 u64 all_avail; 1733 unsigned seq; 1734 int i; 1735 1736 do { 1737 seq = read_seqbegin(&fs_info->profiles_lock); 1738 1739 all_avail = fs_info->avail_data_alloc_bits | 1740 fs_info->avail_system_alloc_bits | 1741 fs_info->avail_metadata_alloc_bits; 1742 } while (read_seqretry(&fs_info->profiles_lock, seq)); 1743 1744 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1745 if (!(all_avail & btrfs_raid_group[i])) 1746 continue; 1747 1748 if (num_devices < btrfs_raid_array[i].devs_min) { 1749 int ret = btrfs_raid_mindev_error[i]; 1750 1751 if (ret) 1752 return ret; 1753 } 1754 } 1755 1756 return 0; 1757 } 1758 1759 struct btrfs_device *btrfs_find_next_active_device(struct btrfs_fs_devices *fs_devs, 1760 struct btrfs_device *device) 1761 { 1762 struct btrfs_device *next_device; 1763 1764 list_for_each_entry(next_device, &fs_devs->devices, dev_list) { 1765 if (next_device != device && 1766 !next_device->missing && next_device->bdev) 1767 return next_device; 1768 } 1769 1770 return NULL; 1771 } 1772 1773 /* 1774 * Helper function to check if the given device is part of s_bdev / latest_bdev 1775 * and replace it with the provided or the next active device, in the context 1776 * where this function called, there should be always be another device (or 1777 * this_dev) which is active. 1778 */ 1779 void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, 1780 struct btrfs_device *device, struct btrfs_device *this_dev) 1781 { 1782 struct btrfs_device *next_device; 1783 1784 if (this_dev) 1785 next_device = this_dev; 1786 else 1787 next_device = btrfs_find_next_active_device(fs_info->fs_devices, 1788 device); 1789 ASSERT(next_device); 1790 1791 if (fs_info->sb->s_bdev && 1792 (fs_info->sb->s_bdev == device->bdev)) 1793 fs_info->sb->s_bdev = next_device->bdev; 1794 1795 if (fs_info->fs_devices->latest_bdev == device->bdev) 1796 fs_info->fs_devices->latest_bdev = next_device->bdev; 1797 } 1798 1799 int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid) 1800 { 1801 struct btrfs_device *device; 1802 struct btrfs_fs_devices *cur_devices; 1803 u64 num_devices; 1804 int ret = 0; 1805 bool clear_super = false; 1806 char *dev_name = NULL; 1807 1808 mutex_lock(&uuid_mutex); 1809 1810 num_devices = root->fs_info->fs_devices->num_devices; 1811 btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0); 1812 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) { 1813 WARN_ON(num_devices < 1); 1814 num_devices--; 1815 } 1816 btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0); 1817 1818 ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1); 1819 if (ret) 1820 goto out; 1821 1822 ret = btrfs_find_device_by_devspec(root, devid, device_path, 1823 &device); 1824 if (ret) 1825 goto out; 1826 1827 if (device->is_tgtdev_for_dev_replace) { 1828 ret = BTRFS_ERROR_DEV_TGT_REPLACE; 1829 goto out; 1830 } 1831 1832 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { 1833 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; 1834 goto out; 1835 } 1836 1837 if (device->writeable) { 1838 lock_chunks(root); 1839 list_del_init(&device->dev_alloc_list); 1840 device->fs_devices->rw_devices--; 1841 unlock_chunks(root); 1842 dev_name = kstrdup(device->name->str, GFP_KERNEL); 1843 if (!dev_name) { 1844 ret = -ENOMEM; 1845 goto error_undo; 1846 } 1847 clear_super = true; 1848 } 1849 1850 mutex_unlock(&uuid_mutex); 1851 ret = btrfs_shrink_device(device, 0); 1852 mutex_lock(&uuid_mutex); 1853 if (ret) 1854 goto error_undo; 1855 1856 /* 1857 * TODO: the superblock still includes this device in its num_devices 1858 * counter although write_all_supers() is not locked out. This 1859 * could give a filesystem state which requires a degraded mount. 1860 */ 1861 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); 1862 if (ret) 1863 goto error_undo; 1864 1865 device->in_fs_metadata = 0; 1866 btrfs_scrub_cancel_dev(root->fs_info, device); 1867 1868 /* 1869 * the device list mutex makes sure that we don't change 1870 * the device list while someone else is writing out all 1871 * the device supers. Whoever is writing all supers, should 1872 * lock the device list mutex before getting the number of 1873 * devices in the super block (super_copy). Conversely, 1874 * whoever updates the number of devices in the super block 1875 * (super_copy) should hold the device list mutex. 1876 */ 1877 1878 cur_devices = device->fs_devices; 1879 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1880 list_del_rcu(&device->dev_list); 1881 1882 device->fs_devices->num_devices--; 1883 device->fs_devices->total_devices--; 1884 1885 if (device->missing) 1886 device->fs_devices->missing_devices--; 1887 1888 btrfs_assign_next_active_device(root->fs_info, device, NULL); 1889 1890 if (device->bdev) { 1891 device->fs_devices->open_devices--; 1892 /* remove sysfs entry */ 1893 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device); 1894 } 1895 1896 call_rcu(&device->rcu, free_device); 1897 1898 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1; 1899 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices); 1900 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1901 1902 if (cur_devices->open_devices == 0) { 1903 struct btrfs_fs_devices *fs_devices; 1904 fs_devices = root->fs_info->fs_devices; 1905 while (fs_devices) { 1906 if (fs_devices->seed == cur_devices) { 1907 fs_devices->seed = cur_devices->seed; 1908 break; 1909 } 1910 fs_devices = fs_devices->seed; 1911 } 1912 cur_devices->seed = NULL; 1913 __btrfs_close_devices(cur_devices); 1914 free_fs_devices(cur_devices); 1915 } 1916 1917 root->fs_info->num_tolerated_disk_barrier_failures = 1918 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); 1919 1920 /* 1921 * at this point, the device is zero sized. We want to 1922 * remove it from the devices list and zero out the old super 1923 */ 1924 if (clear_super) { 1925 struct block_device *bdev; 1926 1927 bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL, 1928 root->fs_info->bdev_holder); 1929 if (!IS_ERR(bdev)) { 1930 btrfs_scratch_superblocks(bdev, dev_name); 1931 blkdev_put(bdev, FMODE_READ | FMODE_EXCL); 1932 } 1933 } 1934 1935 out: 1936 kfree(dev_name); 1937 1938 mutex_unlock(&uuid_mutex); 1939 return ret; 1940 1941 error_undo: 1942 if (device->writeable) { 1943 lock_chunks(root); 1944 list_add(&device->dev_alloc_list, 1945 &root->fs_info->fs_devices->alloc_list); 1946 device->fs_devices->rw_devices++; 1947 unlock_chunks(root); 1948 } 1949 goto out; 1950 } 1951 1952 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, 1953 struct btrfs_device *srcdev) 1954 { 1955 struct btrfs_fs_devices *fs_devices; 1956 1957 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex)); 1958 1959 /* 1960 * in case of fs with no seed, srcdev->fs_devices will point 1961 * to fs_devices of fs_info. However when the dev being replaced is 1962 * a seed dev it will point to the seed's local fs_devices. In short 1963 * srcdev will have its correct fs_devices in both the cases. 1964 */ 1965 fs_devices = srcdev->fs_devices; 1966 1967 list_del_rcu(&srcdev->dev_list); 1968 list_del_rcu(&srcdev->dev_alloc_list); 1969 fs_devices->num_devices--; 1970 if (srcdev->missing) 1971 fs_devices->missing_devices--; 1972 1973 if (srcdev->writeable) 1974 fs_devices->rw_devices--; 1975 1976 if (srcdev->bdev) 1977 fs_devices->open_devices--; 1978 } 1979 1980 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info, 1981 struct btrfs_device *srcdev) 1982 { 1983 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; 1984 1985 if (srcdev->writeable) { 1986 /* zero out the old super if it is writable */ 1987 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); 1988 } 1989 call_rcu(&srcdev->rcu, free_device); 1990 1991 /* 1992 * unless fs_devices is seed fs, num_devices shouldn't go 1993 * zero 1994 */ 1995 BUG_ON(!fs_devices->num_devices && !fs_devices->seeding); 1996 1997 /* if this is no devs we rather delete the fs_devices */ 1998 if (!fs_devices->num_devices) { 1999 struct btrfs_fs_devices *tmp_fs_devices; 2000 2001 tmp_fs_devices = fs_info->fs_devices; 2002 while (tmp_fs_devices) { 2003 if (tmp_fs_devices->seed == fs_devices) { 2004 tmp_fs_devices->seed = fs_devices->seed; 2005 break; 2006 } 2007 tmp_fs_devices = tmp_fs_devices->seed; 2008 } 2009 fs_devices->seed = NULL; 2010 __btrfs_close_devices(fs_devices); 2011 free_fs_devices(fs_devices); 2012 } 2013 } 2014 2015 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, 2016 struct btrfs_device *tgtdev) 2017 { 2018 mutex_lock(&uuid_mutex); 2019 WARN_ON(!tgtdev); 2020 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2021 2022 btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev); 2023 2024 if (tgtdev->bdev) 2025 fs_info->fs_devices->open_devices--; 2026 2027 fs_info->fs_devices->num_devices--; 2028 2029 btrfs_assign_next_active_device(fs_info, tgtdev, NULL); 2030 2031 list_del_rcu(&tgtdev->dev_list); 2032 2033 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2034 mutex_unlock(&uuid_mutex); 2035 2036 /* 2037 * The update_dev_time() with in btrfs_scratch_superblocks() 2038 * may lead to a call to btrfs_show_devname() which will try 2039 * to hold device_list_mutex. And here this device 2040 * is already out of device list, so we don't have to hold 2041 * the device_list_mutex lock. 2042 */ 2043 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); 2044 call_rcu(&tgtdev->rcu, free_device); 2045 } 2046 2047 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path, 2048 struct btrfs_device **device) 2049 { 2050 int ret = 0; 2051 struct btrfs_super_block *disk_super; 2052 u64 devid; 2053 u8 *dev_uuid; 2054 struct block_device *bdev; 2055 struct buffer_head *bh; 2056 2057 *device = NULL; 2058 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, 2059 root->fs_info->bdev_holder, 0, &bdev, &bh); 2060 if (ret) 2061 return ret; 2062 disk_super = (struct btrfs_super_block *)bh->b_data; 2063 devid = btrfs_stack_device_id(&disk_super->dev_item); 2064 dev_uuid = disk_super->dev_item.uuid; 2065 *device = btrfs_find_device(root->fs_info, devid, dev_uuid, 2066 disk_super->fsid); 2067 brelse(bh); 2068 if (!*device) 2069 ret = -ENOENT; 2070 blkdev_put(bdev, FMODE_READ); 2071 return ret; 2072 } 2073 2074 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root, 2075 char *device_path, 2076 struct btrfs_device **device) 2077 { 2078 *device = NULL; 2079 if (strcmp(device_path, "missing") == 0) { 2080 struct list_head *devices; 2081 struct btrfs_device *tmp; 2082 2083 devices = &root->fs_info->fs_devices->devices; 2084 /* 2085 * It is safe to read the devices since the volume_mutex 2086 * is held by the caller. 2087 */ 2088 list_for_each_entry(tmp, devices, dev_list) { 2089 if (tmp->in_fs_metadata && !tmp->bdev) { 2090 *device = tmp; 2091 break; 2092 } 2093 } 2094 2095 if (!*device) 2096 return BTRFS_ERROR_DEV_MISSING_NOT_FOUND; 2097 2098 return 0; 2099 } else { 2100 return btrfs_find_device_by_path(root, device_path, device); 2101 } 2102 } 2103 2104 /* 2105 * Lookup a device given by device id, or the path if the id is 0. 2106 */ 2107 int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid, 2108 char *devpath, 2109 struct btrfs_device **device) 2110 { 2111 int ret; 2112 2113 if (devid) { 2114 ret = 0; 2115 *device = btrfs_find_device(root->fs_info, devid, NULL, 2116 NULL); 2117 if (!*device) 2118 ret = -ENOENT; 2119 } else { 2120 if (!devpath || !devpath[0]) 2121 return -EINVAL; 2122 2123 ret = btrfs_find_device_missing_or_by_path(root, devpath, 2124 device); 2125 } 2126 return ret; 2127 } 2128 2129 /* 2130 * does all the dirty work required for changing file system's UUID. 2131 */ 2132 static int btrfs_prepare_sprout(struct btrfs_root *root) 2133 { 2134 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 2135 struct btrfs_fs_devices *old_devices; 2136 struct btrfs_fs_devices *seed_devices; 2137 struct btrfs_super_block *disk_super = root->fs_info->super_copy; 2138 struct btrfs_device *device; 2139 u64 super_flags; 2140 2141 BUG_ON(!mutex_is_locked(&uuid_mutex)); 2142 if (!fs_devices->seeding) 2143 return -EINVAL; 2144 2145 seed_devices = __alloc_fs_devices(); 2146 if (IS_ERR(seed_devices)) 2147 return PTR_ERR(seed_devices); 2148 2149 old_devices = clone_fs_devices(fs_devices); 2150 if (IS_ERR(old_devices)) { 2151 kfree(seed_devices); 2152 return PTR_ERR(old_devices); 2153 } 2154 2155 list_add(&old_devices->list, &fs_uuids); 2156 2157 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 2158 seed_devices->opened = 1; 2159 INIT_LIST_HEAD(&seed_devices->devices); 2160 INIT_LIST_HEAD(&seed_devices->alloc_list); 2161 mutex_init(&seed_devices->device_list_mutex); 2162 2163 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2164 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, 2165 synchronize_rcu); 2166 list_for_each_entry(device, &seed_devices->devices, dev_list) 2167 device->fs_devices = seed_devices; 2168 2169 lock_chunks(root); 2170 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 2171 unlock_chunks(root); 2172 2173 fs_devices->seeding = 0; 2174 fs_devices->num_devices = 0; 2175 fs_devices->open_devices = 0; 2176 fs_devices->missing_devices = 0; 2177 fs_devices->rotating = 0; 2178 fs_devices->seed = seed_devices; 2179 2180 generate_random_uuid(fs_devices->fsid); 2181 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2182 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 2183 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2184 2185 super_flags = btrfs_super_flags(disk_super) & 2186 ~BTRFS_SUPER_FLAG_SEEDING; 2187 btrfs_set_super_flags(disk_super, super_flags); 2188 2189 return 0; 2190 } 2191 2192 /* 2193 * Store the expected generation for seed devices in device items. 2194 */ 2195 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 2196 struct btrfs_root *root) 2197 { 2198 struct btrfs_path *path; 2199 struct extent_buffer *leaf; 2200 struct btrfs_dev_item *dev_item; 2201 struct btrfs_device *device; 2202 struct btrfs_key key; 2203 u8 fs_uuid[BTRFS_UUID_SIZE]; 2204 u8 dev_uuid[BTRFS_UUID_SIZE]; 2205 u64 devid; 2206 int ret; 2207 2208 path = btrfs_alloc_path(); 2209 if (!path) 2210 return -ENOMEM; 2211 2212 root = root->fs_info->chunk_root; 2213 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2214 key.offset = 0; 2215 key.type = BTRFS_DEV_ITEM_KEY; 2216 2217 while (1) { 2218 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2219 if (ret < 0) 2220 goto error; 2221 2222 leaf = path->nodes[0]; 2223 next_slot: 2224 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2225 ret = btrfs_next_leaf(root, path); 2226 if (ret > 0) 2227 break; 2228 if (ret < 0) 2229 goto error; 2230 leaf = path->nodes[0]; 2231 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2232 btrfs_release_path(path); 2233 continue; 2234 } 2235 2236 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2237 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 2238 key.type != BTRFS_DEV_ITEM_KEY) 2239 break; 2240 2241 dev_item = btrfs_item_ptr(leaf, path->slots[0], 2242 struct btrfs_dev_item); 2243 devid = btrfs_device_id(leaf, dev_item); 2244 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 2245 BTRFS_UUID_SIZE); 2246 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 2247 BTRFS_UUID_SIZE); 2248 device = btrfs_find_device(root->fs_info, devid, dev_uuid, 2249 fs_uuid); 2250 BUG_ON(!device); /* Logic error */ 2251 2252 if (device->fs_devices->seeding) { 2253 btrfs_set_device_generation(leaf, dev_item, 2254 device->generation); 2255 btrfs_mark_buffer_dirty(leaf); 2256 } 2257 2258 path->slots[0]++; 2259 goto next_slot; 2260 } 2261 ret = 0; 2262 error: 2263 btrfs_free_path(path); 2264 return ret; 2265 } 2266 2267 int btrfs_init_new_device(struct btrfs_root *root, char *device_path) 2268 { 2269 struct request_queue *q; 2270 struct btrfs_trans_handle *trans; 2271 struct btrfs_device *device; 2272 struct block_device *bdev; 2273 struct list_head *devices; 2274 struct super_block *sb = root->fs_info->sb; 2275 struct rcu_string *name; 2276 u64 tmp; 2277 int seeding_dev = 0; 2278 int ret = 0; 2279 2280 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 2281 return -EROFS; 2282 2283 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2284 root->fs_info->bdev_holder); 2285 if (IS_ERR(bdev)) 2286 return PTR_ERR(bdev); 2287 2288 if (root->fs_info->fs_devices->seeding) { 2289 seeding_dev = 1; 2290 down_write(&sb->s_umount); 2291 mutex_lock(&uuid_mutex); 2292 } 2293 2294 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2295 2296 devices = &root->fs_info->fs_devices->devices; 2297 2298 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2299 list_for_each_entry(device, devices, dev_list) { 2300 if (device->bdev == bdev) { 2301 ret = -EEXIST; 2302 mutex_unlock( 2303 &root->fs_info->fs_devices->device_list_mutex); 2304 goto error; 2305 } 2306 } 2307 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2308 2309 device = btrfs_alloc_device(root->fs_info, NULL, NULL); 2310 if (IS_ERR(device)) { 2311 /* we can safely leave the fs_devices entry around */ 2312 ret = PTR_ERR(device); 2313 goto error; 2314 } 2315 2316 name = rcu_string_strdup(device_path, GFP_KERNEL); 2317 if (!name) { 2318 kfree(device); 2319 ret = -ENOMEM; 2320 goto error; 2321 } 2322 rcu_assign_pointer(device->name, name); 2323 2324 trans = btrfs_start_transaction(root, 0); 2325 if (IS_ERR(trans)) { 2326 rcu_string_free(device->name); 2327 kfree(device); 2328 ret = PTR_ERR(trans); 2329 goto error; 2330 } 2331 2332 q = bdev_get_queue(bdev); 2333 if (blk_queue_discard(q)) 2334 device->can_discard = 1; 2335 device->writeable = 1; 2336 device->generation = trans->transid; 2337 device->io_width = root->sectorsize; 2338 device->io_align = root->sectorsize; 2339 device->sector_size = root->sectorsize; 2340 device->total_bytes = i_size_read(bdev->bd_inode); 2341 device->disk_total_bytes = device->total_bytes; 2342 device->commit_total_bytes = device->total_bytes; 2343 device->dev_root = root->fs_info->dev_root; 2344 device->bdev = bdev; 2345 device->in_fs_metadata = 1; 2346 device->is_tgtdev_for_dev_replace = 0; 2347 device->mode = FMODE_EXCL; 2348 device->dev_stats_valid = 1; 2349 set_blocksize(device->bdev, 4096); 2350 2351 if (seeding_dev) { 2352 sb->s_flags &= ~MS_RDONLY; 2353 ret = btrfs_prepare_sprout(root); 2354 BUG_ON(ret); /* -ENOMEM */ 2355 } 2356 2357 device->fs_devices = root->fs_info->fs_devices; 2358 2359 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2360 lock_chunks(root); 2361 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); 2362 list_add(&device->dev_alloc_list, 2363 &root->fs_info->fs_devices->alloc_list); 2364 root->fs_info->fs_devices->num_devices++; 2365 root->fs_info->fs_devices->open_devices++; 2366 root->fs_info->fs_devices->rw_devices++; 2367 root->fs_info->fs_devices->total_devices++; 2368 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; 2369 2370 spin_lock(&root->fs_info->free_chunk_lock); 2371 root->fs_info->free_chunk_space += device->total_bytes; 2372 spin_unlock(&root->fs_info->free_chunk_lock); 2373 2374 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 2375 root->fs_info->fs_devices->rotating = 1; 2376 2377 tmp = btrfs_super_total_bytes(root->fs_info->super_copy); 2378 btrfs_set_super_total_bytes(root->fs_info->super_copy, 2379 tmp + device->total_bytes); 2380 2381 tmp = btrfs_super_num_devices(root->fs_info->super_copy); 2382 btrfs_set_super_num_devices(root->fs_info->super_copy, 2383 tmp + 1); 2384 2385 /* add sysfs device entry */ 2386 btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device); 2387 2388 /* 2389 * we've got more storage, clear any full flags on the space 2390 * infos 2391 */ 2392 btrfs_clear_space_info_full(root->fs_info); 2393 2394 unlock_chunks(root); 2395 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2396 2397 if (seeding_dev) { 2398 lock_chunks(root); 2399 ret = init_first_rw_device(trans, root, device); 2400 unlock_chunks(root); 2401 if (ret) { 2402 btrfs_abort_transaction(trans, root, ret); 2403 goto error_trans; 2404 } 2405 } 2406 2407 ret = btrfs_add_device(trans, root, device); 2408 if (ret) { 2409 btrfs_abort_transaction(trans, root, ret); 2410 goto error_trans; 2411 } 2412 2413 if (seeding_dev) { 2414 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; 2415 2416 ret = btrfs_finish_sprout(trans, root); 2417 if (ret) { 2418 btrfs_abort_transaction(trans, root, ret); 2419 goto error_trans; 2420 } 2421 2422 /* Sprouting would change fsid of the mounted root, 2423 * so rename the fsid on the sysfs 2424 */ 2425 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", 2426 root->fs_info->fsid); 2427 if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj, 2428 fsid_buf)) 2429 btrfs_warn(root->fs_info, 2430 "sysfs: failed to create fsid for sprout"); 2431 } 2432 2433 root->fs_info->num_tolerated_disk_barrier_failures = 2434 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); 2435 ret = btrfs_commit_transaction(trans, root); 2436 2437 if (seeding_dev) { 2438 mutex_unlock(&uuid_mutex); 2439 up_write(&sb->s_umount); 2440 2441 if (ret) /* transaction commit */ 2442 return ret; 2443 2444 ret = btrfs_relocate_sys_chunks(root); 2445 if (ret < 0) 2446 btrfs_handle_fs_error(root->fs_info, ret, 2447 "Failed to relocate sys chunks after " 2448 "device initialization. This can be fixed " 2449 "using the \"btrfs balance\" command."); 2450 trans = btrfs_attach_transaction(root); 2451 if (IS_ERR(trans)) { 2452 if (PTR_ERR(trans) == -ENOENT) 2453 return 0; 2454 return PTR_ERR(trans); 2455 } 2456 ret = btrfs_commit_transaction(trans, root); 2457 } 2458 2459 /* Update ctime/mtime for libblkid */ 2460 update_dev_time(device_path); 2461 return ret; 2462 2463 error_trans: 2464 btrfs_end_transaction(trans, root); 2465 rcu_string_free(device->name); 2466 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device); 2467 kfree(device); 2468 error: 2469 blkdev_put(bdev, FMODE_EXCL); 2470 if (seeding_dev) { 2471 mutex_unlock(&uuid_mutex); 2472 up_write(&sb->s_umount); 2473 } 2474 return ret; 2475 } 2476 2477 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path, 2478 struct btrfs_device *srcdev, 2479 struct btrfs_device **device_out) 2480 { 2481 struct request_queue *q; 2482 struct btrfs_device *device; 2483 struct block_device *bdev; 2484 struct btrfs_fs_info *fs_info = root->fs_info; 2485 struct list_head *devices; 2486 struct rcu_string *name; 2487 u64 devid = BTRFS_DEV_REPLACE_DEVID; 2488 int ret = 0; 2489 2490 *device_out = NULL; 2491 if (fs_info->fs_devices->seeding) { 2492 btrfs_err(fs_info, "the filesystem is a seed filesystem!"); 2493 return -EINVAL; 2494 } 2495 2496 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 2497 fs_info->bdev_holder); 2498 if (IS_ERR(bdev)) { 2499 btrfs_err(fs_info, "target device %s is invalid!", device_path); 2500 return PTR_ERR(bdev); 2501 } 2502 2503 filemap_write_and_wait(bdev->bd_inode->i_mapping); 2504 2505 devices = &fs_info->fs_devices->devices; 2506 list_for_each_entry(device, devices, dev_list) { 2507 if (device->bdev == bdev) { 2508 btrfs_err(fs_info, "target device is in the filesystem!"); 2509 ret = -EEXIST; 2510 goto error; 2511 } 2512 } 2513 2514 2515 if (i_size_read(bdev->bd_inode) < 2516 btrfs_device_get_total_bytes(srcdev)) { 2517 btrfs_err(fs_info, "target device is smaller than source device!"); 2518 ret = -EINVAL; 2519 goto error; 2520 } 2521 2522 2523 device = btrfs_alloc_device(NULL, &devid, NULL); 2524 if (IS_ERR(device)) { 2525 ret = PTR_ERR(device); 2526 goto error; 2527 } 2528 2529 name = rcu_string_strdup(device_path, GFP_NOFS); 2530 if (!name) { 2531 kfree(device); 2532 ret = -ENOMEM; 2533 goto error; 2534 } 2535 rcu_assign_pointer(device->name, name); 2536 2537 q = bdev_get_queue(bdev); 2538 if (blk_queue_discard(q)) 2539 device->can_discard = 1; 2540 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2541 device->writeable = 1; 2542 device->generation = 0; 2543 device->io_width = root->sectorsize; 2544 device->io_align = root->sectorsize; 2545 device->sector_size = root->sectorsize; 2546 device->total_bytes = btrfs_device_get_total_bytes(srcdev); 2547 device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev); 2548 device->bytes_used = btrfs_device_get_bytes_used(srcdev); 2549 ASSERT(list_empty(&srcdev->resized_list)); 2550 device->commit_total_bytes = srcdev->commit_total_bytes; 2551 device->commit_bytes_used = device->bytes_used; 2552 device->dev_root = fs_info->dev_root; 2553 device->bdev = bdev; 2554 device->in_fs_metadata = 1; 2555 device->is_tgtdev_for_dev_replace = 1; 2556 device->mode = FMODE_EXCL; 2557 device->dev_stats_valid = 1; 2558 set_blocksize(device->bdev, 4096); 2559 device->fs_devices = fs_info->fs_devices; 2560 list_add(&device->dev_list, &fs_info->fs_devices->devices); 2561 fs_info->fs_devices->num_devices++; 2562 fs_info->fs_devices->open_devices++; 2563 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2564 2565 *device_out = device; 2566 return ret; 2567 2568 error: 2569 blkdev_put(bdev, FMODE_EXCL); 2570 return ret; 2571 } 2572 2573 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info, 2574 struct btrfs_device *tgtdev) 2575 { 2576 WARN_ON(fs_info->fs_devices->rw_devices == 0); 2577 tgtdev->io_width = fs_info->dev_root->sectorsize; 2578 tgtdev->io_align = fs_info->dev_root->sectorsize; 2579 tgtdev->sector_size = fs_info->dev_root->sectorsize; 2580 tgtdev->dev_root = fs_info->dev_root; 2581 tgtdev->in_fs_metadata = 1; 2582 } 2583 2584 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 2585 struct btrfs_device *device) 2586 { 2587 int ret; 2588 struct btrfs_path *path; 2589 struct btrfs_root *root; 2590 struct btrfs_dev_item *dev_item; 2591 struct extent_buffer *leaf; 2592 struct btrfs_key key; 2593 2594 root = device->dev_root->fs_info->chunk_root; 2595 2596 path = btrfs_alloc_path(); 2597 if (!path) 2598 return -ENOMEM; 2599 2600 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 2601 key.type = BTRFS_DEV_ITEM_KEY; 2602 key.offset = device->devid; 2603 2604 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2605 if (ret < 0) 2606 goto out; 2607 2608 if (ret > 0) { 2609 ret = -ENOENT; 2610 goto out; 2611 } 2612 2613 leaf = path->nodes[0]; 2614 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 2615 2616 btrfs_set_device_id(leaf, dev_item, device->devid); 2617 btrfs_set_device_type(leaf, dev_item, device->type); 2618 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 2619 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 2620 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 2621 btrfs_set_device_total_bytes(leaf, dev_item, 2622 btrfs_device_get_disk_total_bytes(device)); 2623 btrfs_set_device_bytes_used(leaf, dev_item, 2624 btrfs_device_get_bytes_used(device)); 2625 btrfs_mark_buffer_dirty(leaf); 2626 2627 out: 2628 btrfs_free_path(path); 2629 return ret; 2630 } 2631 2632 int btrfs_grow_device(struct btrfs_trans_handle *trans, 2633 struct btrfs_device *device, u64 new_size) 2634 { 2635 struct btrfs_super_block *super_copy = 2636 device->dev_root->fs_info->super_copy; 2637 struct btrfs_fs_devices *fs_devices; 2638 u64 old_total; 2639 u64 diff; 2640 2641 if (!device->writeable) 2642 return -EACCES; 2643 2644 lock_chunks(device->dev_root); 2645 old_total = btrfs_super_total_bytes(super_copy); 2646 diff = new_size - device->total_bytes; 2647 2648 if (new_size <= device->total_bytes || 2649 device->is_tgtdev_for_dev_replace) { 2650 unlock_chunks(device->dev_root); 2651 return -EINVAL; 2652 } 2653 2654 fs_devices = device->dev_root->fs_info->fs_devices; 2655 2656 btrfs_set_super_total_bytes(super_copy, old_total + diff); 2657 device->fs_devices->total_rw_bytes += diff; 2658 2659 btrfs_device_set_total_bytes(device, new_size); 2660 btrfs_device_set_disk_total_bytes(device, new_size); 2661 btrfs_clear_space_info_full(device->dev_root->fs_info); 2662 if (list_empty(&device->resized_list)) 2663 list_add_tail(&device->resized_list, 2664 &fs_devices->resized_devices); 2665 unlock_chunks(device->dev_root); 2666 2667 return btrfs_update_device(trans, device); 2668 } 2669 2670 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, 2671 struct btrfs_root *root, u64 chunk_objectid, 2672 u64 chunk_offset) 2673 { 2674 int ret; 2675 struct btrfs_path *path; 2676 struct btrfs_key key; 2677 2678 root = root->fs_info->chunk_root; 2679 path = btrfs_alloc_path(); 2680 if (!path) 2681 return -ENOMEM; 2682 2683 key.objectid = chunk_objectid; 2684 key.offset = chunk_offset; 2685 key.type = BTRFS_CHUNK_ITEM_KEY; 2686 2687 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2688 if (ret < 0) 2689 goto out; 2690 else if (ret > 0) { /* Logic error or corruption */ 2691 btrfs_handle_fs_error(root->fs_info, -ENOENT, 2692 "Failed lookup while freeing chunk."); 2693 ret = -ENOENT; 2694 goto out; 2695 } 2696 2697 ret = btrfs_del_item(trans, root, path); 2698 if (ret < 0) 2699 btrfs_handle_fs_error(root->fs_info, ret, 2700 "Failed to delete chunk item."); 2701 out: 2702 btrfs_free_path(path); 2703 return ret; 2704 } 2705 2706 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 2707 chunk_offset) 2708 { 2709 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 2710 struct btrfs_disk_key *disk_key; 2711 struct btrfs_chunk *chunk; 2712 u8 *ptr; 2713 int ret = 0; 2714 u32 num_stripes; 2715 u32 array_size; 2716 u32 len = 0; 2717 u32 cur; 2718 struct btrfs_key key; 2719 2720 lock_chunks(root); 2721 array_size = btrfs_super_sys_array_size(super_copy); 2722 2723 ptr = super_copy->sys_chunk_array; 2724 cur = 0; 2725 2726 while (cur < array_size) { 2727 disk_key = (struct btrfs_disk_key *)ptr; 2728 btrfs_disk_key_to_cpu(&key, disk_key); 2729 2730 len = sizeof(*disk_key); 2731 2732 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 2733 chunk = (struct btrfs_chunk *)(ptr + len); 2734 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 2735 len += btrfs_chunk_item_size(num_stripes); 2736 } else { 2737 ret = -EIO; 2738 break; 2739 } 2740 if (key.objectid == chunk_objectid && 2741 key.offset == chunk_offset) { 2742 memmove(ptr, ptr + len, array_size - (cur + len)); 2743 array_size -= len; 2744 btrfs_set_super_sys_array_size(super_copy, array_size); 2745 } else { 2746 ptr += len; 2747 cur += len; 2748 } 2749 } 2750 unlock_chunks(root); 2751 return ret; 2752 } 2753 2754 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, 2755 struct btrfs_root *root, u64 chunk_offset) 2756 { 2757 struct extent_map_tree *em_tree; 2758 struct extent_map *em; 2759 struct btrfs_root *extent_root = root->fs_info->extent_root; 2760 struct map_lookup *map; 2761 u64 dev_extent_len = 0; 2762 u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2763 int i, ret = 0; 2764 2765 /* Just in case */ 2766 root = root->fs_info->chunk_root; 2767 em_tree = &root->fs_info->mapping_tree.map_tree; 2768 2769 read_lock(&em_tree->lock); 2770 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 2771 read_unlock(&em_tree->lock); 2772 2773 if (!em || em->start > chunk_offset || 2774 em->start + em->len < chunk_offset) { 2775 /* 2776 * This is a logic error, but we don't want to just rely on the 2777 * user having built with ASSERT enabled, so if ASSERT doesn't 2778 * do anything we still error out. 2779 */ 2780 ASSERT(0); 2781 if (em) 2782 free_extent_map(em); 2783 return -EINVAL; 2784 } 2785 map = em->map_lookup; 2786 lock_chunks(root->fs_info->chunk_root); 2787 check_system_chunk(trans, extent_root, map->type); 2788 unlock_chunks(root->fs_info->chunk_root); 2789 2790 for (i = 0; i < map->num_stripes; i++) { 2791 struct btrfs_device *device = map->stripes[i].dev; 2792 ret = btrfs_free_dev_extent(trans, device, 2793 map->stripes[i].physical, 2794 &dev_extent_len); 2795 if (ret) { 2796 btrfs_abort_transaction(trans, root, ret); 2797 goto out; 2798 } 2799 2800 if (device->bytes_used > 0) { 2801 lock_chunks(root); 2802 btrfs_device_set_bytes_used(device, 2803 device->bytes_used - dev_extent_len); 2804 spin_lock(&root->fs_info->free_chunk_lock); 2805 root->fs_info->free_chunk_space += dev_extent_len; 2806 spin_unlock(&root->fs_info->free_chunk_lock); 2807 btrfs_clear_space_info_full(root->fs_info); 2808 unlock_chunks(root); 2809 } 2810 2811 if (map->stripes[i].dev) { 2812 ret = btrfs_update_device(trans, map->stripes[i].dev); 2813 if (ret) { 2814 btrfs_abort_transaction(trans, root, ret); 2815 goto out; 2816 } 2817 } 2818 } 2819 ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset); 2820 if (ret) { 2821 btrfs_abort_transaction(trans, root, ret); 2822 goto out; 2823 } 2824 2825 trace_btrfs_chunk_free(root, map, chunk_offset, em->len); 2826 2827 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 2828 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); 2829 if (ret) { 2830 btrfs_abort_transaction(trans, root, ret); 2831 goto out; 2832 } 2833 } 2834 2835 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em); 2836 if (ret) { 2837 btrfs_abort_transaction(trans, extent_root, ret); 2838 goto out; 2839 } 2840 2841 out: 2842 /* once for us */ 2843 free_extent_map(em); 2844 return ret; 2845 } 2846 2847 static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset) 2848 { 2849 struct btrfs_root *extent_root; 2850 struct btrfs_trans_handle *trans; 2851 int ret; 2852 2853 root = root->fs_info->chunk_root; 2854 extent_root = root->fs_info->extent_root; 2855 2856 /* 2857 * Prevent races with automatic removal of unused block groups. 2858 * After we relocate and before we remove the chunk with offset 2859 * chunk_offset, automatic removal of the block group can kick in, 2860 * resulting in a failure when calling btrfs_remove_chunk() below. 2861 * 2862 * Make sure to acquire this mutex before doing a tree search (dev 2863 * or chunk trees) to find chunks. Otherwise the cleaner kthread might 2864 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after 2865 * we release the path used to search the chunk/dev tree and before 2866 * the current task acquires this mutex and calls us. 2867 */ 2868 ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex)); 2869 2870 ret = btrfs_can_relocate(extent_root, chunk_offset); 2871 if (ret) 2872 return -ENOSPC; 2873 2874 /* step one, relocate all the extents inside this chunk */ 2875 btrfs_scrub_pause(root); 2876 ret = btrfs_relocate_block_group(extent_root, chunk_offset); 2877 btrfs_scrub_continue(root); 2878 if (ret) 2879 return ret; 2880 2881 trans = btrfs_start_trans_remove_block_group(root->fs_info, 2882 chunk_offset); 2883 if (IS_ERR(trans)) { 2884 ret = PTR_ERR(trans); 2885 btrfs_handle_fs_error(root->fs_info, ret, NULL); 2886 return ret; 2887 } 2888 2889 /* 2890 * step two, delete the device extents and the 2891 * chunk tree entries 2892 */ 2893 ret = btrfs_remove_chunk(trans, root, chunk_offset); 2894 btrfs_end_transaction(trans, root); 2895 return ret; 2896 } 2897 2898 static int btrfs_relocate_sys_chunks(struct btrfs_root *root) 2899 { 2900 struct btrfs_root *chunk_root = root->fs_info->chunk_root; 2901 struct btrfs_path *path; 2902 struct extent_buffer *leaf; 2903 struct btrfs_chunk *chunk; 2904 struct btrfs_key key; 2905 struct btrfs_key found_key; 2906 u64 chunk_type; 2907 bool retried = false; 2908 int failed = 0; 2909 int ret; 2910 2911 path = btrfs_alloc_path(); 2912 if (!path) 2913 return -ENOMEM; 2914 2915 again: 2916 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2917 key.offset = (u64)-1; 2918 key.type = BTRFS_CHUNK_ITEM_KEY; 2919 2920 while (1) { 2921 mutex_lock(&root->fs_info->delete_unused_bgs_mutex); 2922 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2923 if (ret < 0) { 2924 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 2925 goto error; 2926 } 2927 BUG_ON(ret == 0); /* Corruption */ 2928 2929 ret = btrfs_previous_item(chunk_root, path, key.objectid, 2930 key.type); 2931 if (ret) 2932 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 2933 if (ret < 0) 2934 goto error; 2935 if (ret > 0) 2936 break; 2937 2938 leaf = path->nodes[0]; 2939 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2940 2941 chunk = btrfs_item_ptr(leaf, path->slots[0], 2942 struct btrfs_chunk); 2943 chunk_type = btrfs_chunk_type(leaf, chunk); 2944 btrfs_release_path(path); 2945 2946 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 2947 ret = btrfs_relocate_chunk(chunk_root, 2948 found_key.offset); 2949 if (ret == -ENOSPC) 2950 failed++; 2951 else 2952 BUG_ON(ret); 2953 } 2954 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 2955 2956 if (found_key.offset == 0) 2957 break; 2958 key.offset = found_key.offset - 1; 2959 } 2960 ret = 0; 2961 if (failed && !retried) { 2962 failed = 0; 2963 retried = true; 2964 goto again; 2965 } else if (WARN_ON(failed && retried)) { 2966 ret = -ENOSPC; 2967 } 2968 error: 2969 btrfs_free_path(path); 2970 return ret; 2971 } 2972 2973 static int insert_balance_item(struct btrfs_root *root, 2974 struct btrfs_balance_control *bctl) 2975 { 2976 struct btrfs_trans_handle *trans; 2977 struct btrfs_balance_item *item; 2978 struct btrfs_disk_balance_args disk_bargs; 2979 struct btrfs_path *path; 2980 struct extent_buffer *leaf; 2981 struct btrfs_key key; 2982 int ret, err; 2983 2984 path = btrfs_alloc_path(); 2985 if (!path) 2986 return -ENOMEM; 2987 2988 trans = btrfs_start_transaction(root, 0); 2989 if (IS_ERR(trans)) { 2990 btrfs_free_path(path); 2991 return PTR_ERR(trans); 2992 } 2993 2994 key.objectid = BTRFS_BALANCE_OBJECTID; 2995 key.type = BTRFS_TEMPORARY_ITEM_KEY; 2996 key.offset = 0; 2997 2998 ret = btrfs_insert_empty_item(trans, root, path, &key, 2999 sizeof(*item)); 3000 if (ret) 3001 goto out; 3002 3003 leaf = path->nodes[0]; 3004 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3005 3006 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item)); 3007 3008 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); 3009 btrfs_set_balance_data(leaf, item, &disk_bargs); 3010 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); 3011 btrfs_set_balance_meta(leaf, item, &disk_bargs); 3012 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); 3013 btrfs_set_balance_sys(leaf, item, &disk_bargs); 3014 3015 btrfs_set_balance_flags(leaf, item, bctl->flags); 3016 3017 btrfs_mark_buffer_dirty(leaf); 3018 out: 3019 btrfs_free_path(path); 3020 err = btrfs_commit_transaction(trans, root); 3021 if (err && !ret) 3022 ret = err; 3023 return ret; 3024 } 3025 3026 static int del_balance_item(struct btrfs_root *root) 3027 { 3028 struct btrfs_trans_handle *trans; 3029 struct btrfs_path *path; 3030 struct btrfs_key key; 3031 int ret, err; 3032 3033 path = btrfs_alloc_path(); 3034 if (!path) 3035 return -ENOMEM; 3036 3037 trans = btrfs_start_transaction(root, 0); 3038 if (IS_ERR(trans)) { 3039 btrfs_free_path(path); 3040 return PTR_ERR(trans); 3041 } 3042 3043 key.objectid = BTRFS_BALANCE_OBJECTID; 3044 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3045 key.offset = 0; 3046 3047 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3048 if (ret < 0) 3049 goto out; 3050 if (ret > 0) { 3051 ret = -ENOENT; 3052 goto out; 3053 } 3054 3055 ret = btrfs_del_item(trans, root, path); 3056 out: 3057 btrfs_free_path(path); 3058 err = btrfs_commit_transaction(trans, root); 3059 if (err && !ret) 3060 ret = err; 3061 return ret; 3062 } 3063 3064 /* 3065 * This is a heuristic used to reduce the number of chunks balanced on 3066 * resume after balance was interrupted. 3067 */ 3068 static void update_balance_args(struct btrfs_balance_control *bctl) 3069 { 3070 /* 3071 * Turn on soft mode for chunk types that were being converted. 3072 */ 3073 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) 3074 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; 3075 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) 3076 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; 3077 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) 3078 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; 3079 3080 /* 3081 * Turn on usage filter if is not already used. The idea is 3082 * that chunks that we have already balanced should be 3083 * reasonably full. Don't do it for chunks that are being 3084 * converted - that will keep us from relocating unconverted 3085 * (albeit full) chunks. 3086 */ 3087 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && 3088 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3089 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3090 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; 3091 bctl->data.usage = 90; 3092 } 3093 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && 3094 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3095 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3096 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; 3097 bctl->sys.usage = 90; 3098 } 3099 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && 3100 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3101 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { 3102 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; 3103 bctl->meta.usage = 90; 3104 } 3105 } 3106 3107 /* 3108 * Should be called with both balance and volume mutexes held to 3109 * serialize other volume operations (add_dev/rm_dev/resize) with 3110 * restriper. Same goes for unset_balance_control. 3111 */ 3112 static void set_balance_control(struct btrfs_balance_control *bctl) 3113 { 3114 struct btrfs_fs_info *fs_info = bctl->fs_info; 3115 3116 BUG_ON(fs_info->balance_ctl); 3117 3118 spin_lock(&fs_info->balance_lock); 3119 fs_info->balance_ctl = bctl; 3120 spin_unlock(&fs_info->balance_lock); 3121 } 3122 3123 static void unset_balance_control(struct btrfs_fs_info *fs_info) 3124 { 3125 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3126 3127 BUG_ON(!fs_info->balance_ctl); 3128 3129 spin_lock(&fs_info->balance_lock); 3130 fs_info->balance_ctl = NULL; 3131 spin_unlock(&fs_info->balance_lock); 3132 3133 kfree(bctl); 3134 } 3135 3136 /* 3137 * Balance filters. Return 1 if chunk should be filtered out 3138 * (should not be balanced). 3139 */ 3140 static int chunk_profiles_filter(u64 chunk_type, 3141 struct btrfs_balance_args *bargs) 3142 { 3143 chunk_type = chunk_to_extended(chunk_type) & 3144 BTRFS_EXTENDED_PROFILE_MASK; 3145 3146 if (bargs->profiles & chunk_type) 3147 return 0; 3148 3149 return 1; 3150 } 3151 3152 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3153 struct btrfs_balance_args *bargs) 3154 { 3155 struct btrfs_block_group_cache *cache; 3156 u64 chunk_used; 3157 u64 user_thresh_min; 3158 u64 user_thresh_max; 3159 int ret = 1; 3160 3161 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3162 chunk_used = btrfs_block_group_used(&cache->item); 3163 3164 if (bargs->usage_min == 0) 3165 user_thresh_min = 0; 3166 else 3167 user_thresh_min = div_factor_fine(cache->key.offset, 3168 bargs->usage_min); 3169 3170 if (bargs->usage_max == 0) 3171 user_thresh_max = 1; 3172 else if (bargs->usage_max > 100) 3173 user_thresh_max = cache->key.offset; 3174 else 3175 user_thresh_max = div_factor_fine(cache->key.offset, 3176 bargs->usage_max); 3177 3178 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) 3179 ret = 0; 3180 3181 btrfs_put_block_group(cache); 3182 return ret; 3183 } 3184 3185 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, 3186 u64 chunk_offset, struct btrfs_balance_args *bargs) 3187 { 3188 struct btrfs_block_group_cache *cache; 3189 u64 chunk_used, user_thresh; 3190 int ret = 1; 3191 3192 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3193 chunk_used = btrfs_block_group_used(&cache->item); 3194 3195 if (bargs->usage_min == 0) 3196 user_thresh = 1; 3197 else if (bargs->usage > 100) 3198 user_thresh = cache->key.offset; 3199 else 3200 user_thresh = div_factor_fine(cache->key.offset, 3201 bargs->usage); 3202 3203 if (chunk_used < user_thresh) 3204 ret = 0; 3205 3206 btrfs_put_block_group(cache); 3207 return ret; 3208 } 3209 3210 static int chunk_devid_filter(struct extent_buffer *leaf, 3211 struct btrfs_chunk *chunk, 3212 struct btrfs_balance_args *bargs) 3213 { 3214 struct btrfs_stripe *stripe; 3215 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3216 int i; 3217 3218 for (i = 0; i < num_stripes; i++) { 3219 stripe = btrfs_stripe_nr(chunk, i); 3220 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) 3221 return 0; 3222 } 3223 3224 return 1; 3225 } 3226 3227 /* [pstart, pend) */ 3228 static int chunk_drange_filter(struct extent_buffer *leaf, 3229 struct btrfs_chunk *chunk, 3230 u64 chunk_offset, 3231 struct btrfs_balance_args *bargs) 3232 { 3233 struct btrfs_stripe *stripe; 3234 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3235 u64 stripe_offset; 3236 u64 stripe_length; 3237 int factor; 3238 int i; 3239 3240 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) 3241 return 0; 3242 3243 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP | 3244 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) { 3245 factor = num_stripes / 2; 3246 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) { 3247 factor = num_stripes - 1; 3248 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) { 3249 factor = num_stripes - 2; 3250 } else { 3251 factor = num_stripes; 3252 } 3253 3254 for (i = 0; i < num_stripes; i++) { 3255 stripe = btrfs_stripe_nr(chunk, i); 3256 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) 3257 continue; 3258 3259 stripe_offset = btrfs_stripe_offset(leaf, stripe); 3260 stripe_length = btrfs_chunk_length(leaf, chunk); 3261 stripe_length = div_u64(stripe_length, factor); 3262 3263 if (stripe_offset < bargs->pend && 3264 stripe_offset + stripe_length > bargs->pstart) 3265 return 0; 3266 } 3267 3268 return 1; 3269 } 3270 3271 /* [vstart, vend) */ 3272 static int chunk_vrange_filter(struct extent_buffer *leaf, 3273 struct btrfs_chunk *chunk, 3274 u64 chunk_offset, 3275 struct btrfs_balance_args *bargs) 3276 { 3277 if (chunk_offset < bargs->vend && 3278 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) 3279 /* at least part of the chunk is inside this vrange */ 3280 return 0; 3281 3282 return 1; 3283 } 3284 3285 static int chunk_stripes_range_filter(struct extent_buffer *leaf, 3286 struct btrfs_chunk *chunk, 3287 struct btrfs_balance_args *bargs) 3288 { 3289 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3290 3291 if (bargs->stripes_min <= num_stripes 3292 && num_stripes <= bargs->stripes_max) 3293 return 0; 3294 3295 return 1; 3296 } 3297 3298 static int chunk_soft_convert_filter(u64 chunk_type, 3299 struct btrfs_balance_args *bargs) 3300 { 3301 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 3302 return 0; 3303 3304 chunk_type = chunk_to_extended(chunk_type) & 3305 BTRFS_EXTENDED_PROFILE_MASK; 3306 3307 if (bargs->target == chunk_type) 3308 return 1; 3309 3310 return 0; 3311 } 3312 3313 static int should_balance_chunk(struct btrfs_root *root, 3314 struct extent_buffer *leaf, 3315 struct btrfs_chunk *chunk, u64 chunk_offset) 3316 { 3317 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl; 3318 struct btrfs_balance_args *bargs = NULL; 3319 u64 chunk_type = btrfs_chunk_type(leaf, chunk); 3320 3321 /* type filter */ 3322 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & 3323 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { 3324 return 0; 3325 } 3326 3327 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3328 bargs = &bctl->data; 3329 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3330 bargs = &bctl->sys; 3331 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3332 bargs = &bctl->meta; 3333 3334 /* profiles filter */ 3335 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && 3336 chunk_profiles_filter(chunk_type, bargs)) { 3337 return 0; 3338 } 3339 3340 /* usage filter */ 3341 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && 3342 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) { 3343 return 0; 3344 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && 3345 chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) { 3346 return 0; 3347 } 3348 3349 /* devid filter */ 3350 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && 3351 chunk_devid_filter(leaf, chunk, bargs)) { 3352 return 0; 3353 } 3354 3355 /* drange filter, makes sense only with devid filter */ 3356 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && 3357 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) { 3358 return 0; 3359 } 3360 3361 /* vrange filter */ 3362 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && 3363 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { 3364 return 0; 3365 } 3366 3367 /* stripes filter */ 3368 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && 3369 chunk_stripes_range_filter(leaf, chunk, bargs)) { 3370 return 0; 3371 } 3372 3373 /* soft profile changing mode */ 3374 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && 3375 chunk_soft_convert_filter(chunk_type, bargs)) { 3376 return 0; 3377 } 3378 3379 /* 3380 * limited by count, must be the last filter 3381 */ 3382 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { 3383 if (bargs->limit == 0) 3384 return 0; 3385 else 3386 bargs->limit--; 3387 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3388 /* 3389 * Same logic as the 'limit' filter; the minimum cannot be 3390 * determined here because we do not have the global information 3391 * about the count of all chunks that satisfy the filters. 3392 */ 3393 if (bargs->limit_max == 0) 3394 return 0; 3395 else 3396 bargs->limit_max--; 3397 } 3398 3399 return 1; 3400 } 3401 3402 static int __btrfs_balance(struct btrfs_fs_info *fs_info) 3403 { 3404 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3405 struct btrfs_root *chunk_root = fs_info->chunk_root; 3406 struct btrfs_root *dev_root = fs_info->dev_root; 3407 struct list_head *devices; 3408 struct btrfs_device *device; 3409 u64 old_size; 3410 u64 size_to_free; 3411 u64 chunk_type; 3412 struct btrfs_chunk *chunk; 3413 struct btrfs_path *path; 3414 struct btrfs_key key; 3415 struct btrfs_key found_key; 3416 struct btrfs_trans_handle *trans; 3417 struct extent_buffer *leaf; 3418 int slot; 3419 int ret; 3420 int enospc_errors = 0; 3421 bool counting = true; 3422 /* The single value limit and min/max limits use the same bytes in the */ 3423 u64 limit_data = bctl->data.limit; 3424 u64 limit_meta = bctl->meta.limit; 3425 u64 limit_sys = bctl->sys.limit; 3426 u32 count_data = 0; 3427 u32 count_meta = 0; 3428 u32 count_sys = 0; 3429 int chunk_reserved = 0; 3430 u64 bytes_used = 0; 3431 3432 /* step one make some room on all the devices */ 3433 devices = &fs_info->fs_devices->devices; 3434 list_for_each_entry(device, devices, dev_list) { 3435 old_size = btrfs_device_get_total_bytes(device); 3436 size_to_free = div_factor(old_size, 1); 3437 size_to_free = min_t(u64, size_to_free, SZ_1M); 3438 if (!device->writeable || 3439 btrfs_device_get_total_bytes(device) - 3440 btrfs_device_get_bytes_used(device) > size_to_free || 3441 device->is_tgtdev_for_dev_replace) 3442 continue; 3443 3444 ret = btrfs_shrink_device(device, old_size - size_to_free); 3445 if (ret == -ENOSPC) 3446 break; 3447 BUG_ON(ret); 3448 3449 trans = btrfs_start_transaction(dev_root, 0); 3450 BUG_ON(IS_ERR(trans)); 3451 3452 ret = btrfs_grow_device(trans, device, old_size); 3453 BUG_ON(ret); 3454 3455 btrfs_end_transaction(trans, dev_root); 3456 } 3457 3458 /* step two, relocate all the chunks */ 3459 path = btrfs_alloc_path(); 3460 if (!path) { 3461 ret = -ENOMEM; 3462 goto error; 3463 } 3464 3465 /* zero out stat counters */ 3466 spin_lock(&fs_info->balance_lock); 3467 memset(&bctl->stat, 0, sizeof(bctl->stat)); 3468 spin_unlock(&fs_info->balance_lock); 3469 again: 3470 if (!counting) { 3471 /* 3472 * The single value limit and min/max limits use the same bytes 3473 * in the 3474 */ 3475 bctl->data.limit = limit_data; 3476 bctl->meta.limit = limit_meta; 3477 bctl->sys.limit = limit_sys; 3478 } 3479 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 3480 key.offset = (u64)-1; 3481 key.type = BTRFS_CHUNK_ITEM_KEY; 3482 3483 while (1) { 3484 if ((!counting && atomic_read(&fs_info->balance_pause_req)) || 3485 atomic_read(&fs_info->balance_cancel_req)) { 3486 ret = -ECANCELED; 3487 goto error; 3488 } 3489 3490 mutex_lock(&fs_info->delete_unused_bgs_mutex); 3491 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3492 if (ret < 0) { 3493 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3494 goto error; 3495 } 3496 3497 /* 3498 * this shouldn't happen, it means the last relocate 3499 * failed 3500 */ 3501 if (ret == 0) 3502 BUG(); /* FIXME break ? */ 3503 3504 ret = btrfs_previous_item(chunk_root, path, 0, 3505 BTRFS_CHUNK_ITEM_KEY); 3506 if (ret) { 3507 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3508 ret = 0; 3509 break; 3510 } 3511 3512 leaf = path->nodes[0]; 3513 slot = path->slots[0]; 3514 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3515 3516 if (found_key.objectid != key.objectid) { 3517 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3518 break; 3519 } 3520 3521 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3522 chunk_type = btrfs_chunk_type(leaf, chunk); 3523 3524 if (!counting) { 3525 spin_lock(&fs_info->balance_lock); 3526 bctl->stat.considered++; 3527 spin_unlock(&fs_info->balance_lock); 3528 } 3529 3530 ret = should_balance_chunk(chunk_root, leaf, chunk, 3531 found_key.offset); 3532 3533 btrfs_release_path(path); 3534 if (!ret) { 3535 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3536 goto loop; 3537 } 3538 3539 if (counting) { 3540 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3541 spin_lock(&fs_info->balance_lock); 3542 bctl->stat.expected++; 3543 spin_unlock(&fs_info->balance_lock); 3544 3545 if (chunk_type & BTRFS_BLOCK_GROUP_DATA) 3546 count_data++; 3547 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) 3548 count_sys++; 3549 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) 3550 count_meta++; 3551 3552 goto loop; 3553 } 3554 3555 /* 3556 * Apply limit_min filter, no need to check if the LIMITS 3557 * filter is used, limit_min is 0 by default 3558 */ 3559 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3560 count_data < bctl->data.limit_min) 3561 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && 3562 count_meta < bctl->meta.limit_min) 3563 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && 3564 count_sys < bctl->sys.limit_min)) { 3565 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3566 goto loop; 3567 } 3568 3569 ASSERT(fs_info->data_sinfo); 3570 spin_lock(&fs_info->data_sinfo->lock); 3571 bytes_used = fs_info->data_sinfo->bytes_used; 3572 spin_unlock(&fs_info->data_sinfo->lock); 3573 3574 if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && 3575 !chunk_reserved && !bytes_used) { 3576 trans = btrfs_start_transaction(chunk_root, 0); 3577 if (IS_ERR(trans)) { 3578 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3579 ret = PTR_ERR(trans); 3580 goto error; 3581 } 3582 3583 ret = btrfs_force_chunk_alloc(trans, chunk_root, 3584 BTRFS_BLOCK_GROUP_DATA); 3585 btrfs_end_transaction(trans, chunk_root); 3586 if (ret < 0) { 3587 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3588 goto error; 3589 } 3590 chunk_reserved = 1; 3591 } 3592 3593 ret = btrfs_relocate_chunk(chunk_root, 3594 found_key.offset); 3595 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3596 if (ret && ret != -ENOSPC) 3597 goto error; 3598 if (ret == -ENOSPC) { 3599 enospc_errors++; 3600 } else { 3601 spin_lock(&fs_info->balance_lock); 3602 bctl->stat.completed++; 3603 spin_unlock(&fs_info->balance_lock); 3604 } 3605 loop: 3606 if (found_key.offset == 0) 3607 break; 3608 key.offset = found_key.offset - 1; 3609 } 3610 3611 if (counting) { 3612 btrfs_release_path(path); 3613 counting = false; 3614 goto again; 3615 } 3616 error: 3617 btrfs_free_path(path); 3618 if (enospc_errors) { 3619 btrfs_info(fs_info, "%d enospc errors during balance", 3620 enospc_errors); 3621 if (!ret) 3622 ret = -ENOSPC; 3623 } 3624 3625 return ret; 3626 } 3627 3628 /** 3629 * alloc_profile_is_valid - see if a given profile is valid and reduced 3630 * @flags: profile to validate 3631 * @extended: if true @flags is treated as an extended profile 3632 */ 3633 static int alloc_profile_is_valid(u64 flags, int extended) 3634 { 3635 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : 3636 BTRFS_BLOCK_GROUP_PROFILE_MASK); 3637 3638 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; 3639 3640 /* 1) check that all other bits are zeroed */ 3641 if (flags & ~mask) 3642 return 0; 3643 3644 /* 2) see if profile is reduced */ 3645 if (flags == 0) 3646 return !extended; /* "0" is valid for usual profiles */ 3647 3648 /* true if exactly one bit set */ 3649 return (flags & (flags - 1)) == 0; 3650 } 3651 3652 static inline int balance_need_close(struct btrfs_fs_info *fs_info) 3653 { 3654 /* cancel requested || normal exit path */ 3655 return atomic_read(&fs_info->balance_cancel_req) || 3656 (atomic_read(&fs_info->balance_pause_req) == 0 && 3657 atomic_read(&fs_info->balance_cancel_req) == 0); 3658 } 3659 3660 static void __cancel_balance(struct btrfs_fs_info *fs_info) 3661 { 3662 int ret; 3663 3664 unset_balance_control(fs_info); 3665 ret = del_balance_item(fs_info->tree_root); 3666 if (ret) 3667 btrfs_handle_fs_error(fs_info, ret, NULL); 3668 3669 atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3670 } 3671 3672 /* Non-zero return value signifies invalidity */ 3673 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg, 3674 u64 allowed) 3675 { 3676 return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) && 3677 (!alloc_profile_is_valid(bctl_arg->target, 1) || 3678 (bctl_arg->target & ~allowed))); 3679 } 3680 3681 /* 3682 * Should be called with both balance and volume mutexes held 3683 */ 3684 int btrfs_balance(struct btrfs_balance_control *bctl, 3685 struct btrfs_ioctl_balance_args *bargs) 3686 { 3687 struct btrfs_fs_info *fs_info = bctl->fs_info; 3688 u64 allowed; 3689 int mixed = 0; 3690 int ret; 3691 u64 num_devices; 3692 unsigned seq; 3693 3694 if (btrfs_fs_closing(fs_info) || 3695 atomic_read(&fs_info->balance_pause_req) || 3696 atomic_read(&fs_info->balance_cancel_req)) { 3697 ret = -EINVAL; 3698 goto out; 3699 } 3700 3701 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 3702 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 3703 mixed = 1; 3704 3705 /* 3706 * In case of mixed groups both data and meta should be picked, 3707 * and identical options should be given for both of them. 3708 */ 3709 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; 3710 if (mixed && (bctl->flags & allowed)) { 3711 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 3712 !(bctl->flags & BTRFS_BALANCE_METADATA) || 3713 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 3714 btrfs_err(fs_info, "with mixed groups data and " 3715 "metadata balance options must be the same"); 3716 ret = -EINVAL; 3717 goto out; 3718 } 3719 } 3720 3721 num_devices = fs_info->fs_devices->num_devices; 3722 btrfs_dev_replace_lock(&fs_info->dev_replace, 0); 3723 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { 3724 BUG_ON(num_devices < 1); 3725 num_devices--; 3726 } 3727 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); 3728 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP; 3729 if (num_devices > 1) 3730 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); 3731 if (num_devices > 2) 3732 allowed |= BTRFS_BLOCK_GROUP_RAID5; 3733 if (num_devices > 3) 3734 allowed |= (BTRFS_BLOCK_GROUP_RAID10 | 3735 BTRFS_BLOCK_GROUP_RAID6); 3736 if (validate_convert_profile(&bctl->data, allowed)) { 3737 btrfs_err(fs_info, "unable to start balance with target " 3738 "data profile %llu", 3739 bctl->data.target); 3740 ret = -EINVAL; 3741 goto out; 3742 } 3743 if (validate_convert_profile(&bctl->meta, allowed)) { 3744 btrfs_err(fs_info, 3745 "unable to start balance with target metadata profile %llu", 3746 bctl->meta.target); 3747 ret = -EINVAL; 3748 goto out; 3749 } 3750 if (validate_convert_profile(&bctl->sys, allowed)) { 3751 btrfs_err(fs_info, 3752 "unable to start balance with target system profile %llu", 3753 bctl->sys.target); 3754 ret = -EINVAL; 3755 goto out; 3756 } 3757 3758 /* allow to reduce meta or sys integrity only if force set */ 3759 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | 3760 BTRFS_BLOCK_GROUP_RAID10 | 3761 BTRFS_BLOCK_GROUP_RAID5 | 3762 BTRFS_BLOCK_GROUP_RAID6; 3763 do { 3764 seq = read_seqbegin(&fs_info->profiles_lock); 3765 3766 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3767 (fs_info->avail_system_alloc_bits & allowed) && 3768 !(bctl->sys.target & allowed)) || 3769 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3770 (fs_info->avail_metadata_alloc_bits & allowed) && 3771 !(bctl->meta.target & allowed))) { 3772 if (bctl->flags & BTRFS_BALANCE_FORCE) { 3773 btrfs_info(fs_info, "force reducing metadata integrity"); 3774 } else { 3775 btrfs_err(fs_info, "balance will reduce metadata " 3776 "integrity, use force if you want this"); 3777 ret = -EINVAL; 3778 goto out; 3779 } 3780 } 3781 } while (read_seqretry(&fs_info->profiles_lock, seq)); 3782 3783 if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) < 3784 btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) { 3785 btrfs_warn(fs_info, 3786 "metadata profile 0x%llx has lower redundancy than data profile 0x%llx", 3787 bctl->meta.target, bctl->data.target); 3788 } 3789 3790 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3791 fs_info->num_tolerated_disk_barrier_failures = min( 3792 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info), 3793 btrfs_get_num_tolerated_disk_barrier_failures( 3794 bctl->sys.target)); 3795 } 3796 3797 ret = insert_balance_item(fs_info->tree_root, bctl); 3798 if (ret && ret != -EEXIST) 3799 goto out; 3800 3801 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { 3802 BUG_ON(ret == -EEXIST); 3803 set_balance_control(bctl); 3804 } else { 3805 BUG_ON(ret != -EEXIST); 3806 spin_lock(&fs_info->balance_lock); 3807 update_balance_args(bctl); 3808 spin_unlock(&fs_info->balance_lock); 3809 } 3810 3811 atomic_inc(&fs_info->balance_running); 3812 mutex_unlock(&fs_info->balance_mutex); 3813 3814 ret = __btrfs_balance(fs_info); 3815 3816 mutex_lock(&fs_info->balance_mutex); 3817 atomic_dec(&fs_info->balance_running); 3818 3819 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3820 fs_info->num_tolerated_disk_barrier_failures = 3821 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); 3822 } 3823 3824 if (bargs) { 3825 memset(bargs, 0, sizeof(*bargs)); 3826 update_ioctl_balance_args(fs_info, 0, bargs); 3827 } 3828 3829 if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 3830 balance_need_close(fs_info)) { 3831 __cancel_balance(fs_info); 3832 } 3833 3834 wake_up(&fs_info->balance_wait_q); 3835 3836 return ret; 3837 out: 3838 if (bctl->flags & BTRFS_BALANCE_RESUME) 3839 __cancel_balance(fs_info); 3840 else { 3841 kfree(bctl); 3842 atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3843 } 3844 return ret; 3845 } 3846 3847 static int balance_kthread(void *data) 3848 { 3849 struct btrfs_fs_info *fs_info = data; 3850 int ret = 0; 3851 3852 mutex_lock(&fs_info->volume_mutex); 3853 mutex_lock(&fs_info->balance_mutex); 3854 3855 if (fs_info->balance_ctl) { 3856 btrfs_info(fs_info, "continuing balance"); 3857 ret = btrfs_balance(fs_info->balance_ctl, NULL); 3858 } 3859 3860 mutex_unlock(&fs_info->balance_mutex); 3861 mutex_unlock(&fs_info->volume_mutex); 3862 3863 return ret; 3864 } 3865 3866 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) 3867 { 3868 struct task_struct *tsk; 3869 3870 spin_lock(&fs_info->balance_lock); 3871 if (!fs_info->balance_ctl) { 3872 spin_unlock(&fs_info->balance_lock); 3873 return 0; 3874 } 3875 spin_unlock(&fs_info->balance_lock); 3876 3877 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) { 3878 btrfs_info(fs_info, "force skipping balance"); 3879 return 0; 3880 } 3881 3882 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3883 return PTR_ERR_OR_ZERO(tsk); 3884 } 3885 3886 int btrfs_recover_balance(struct btrfs_fs_info *fs_info) 3887 { 3888 struct btrfs_balance_control *bctl; 3889 struct btrfs_balance_item *item; 3890 struct btrfs_disk_balance_args disk_bargs; 3891 struct btrfs_path *path; 3892 struct extent_buffer *leaf; 3893 struct btrfs_key key; 3894 int ret; 3895 3896 path = btrfs_alloc_path(); 3897 if (!path) 3898 return -ENOMEM; 3899 3900 key.objectid = BTRFS_BALANCE_OBJECTID; 3901 key.type = BTRFS_TEMPORARY_ITEM_KEY; 3902 key.offset = 0; 3903 3904 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 3905 if (ret < 0) 3906 goto out; 3907 if (ret > 0) { /* ret = -ENOENT; */ 3908 ret = 0; 3909 goto out; 3910 } 3911 3912 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 3913 if (!bctl) { 3914 ret = -ENOMEM; 3915 goto out; 3916 } 3917 3918 leaf = path->nodes[0]; 3919 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 3920 3921 bctl->fs_info = fs_info; 3922 bctl->flags = btrfs_balance_flags(leaf, item); 3923 bctl->flags |= BTRFS_BALANCE_RESUME; 3924 3925 btrfs_balance_data(leaf, item, &disk_bargs); 3926 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 3927 btrfs_balance_meta(leaf, item, &disk_bargs); 3928 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 3929 btrfs_balance_sys(leaf, item, &disk_bargs); 3930 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 3931 3932 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)); 3933 3934 mutex_lock(&fs_info->volume_mutex); 3935 mutex_lock(&fs_info->balance_mutex); 3936 3937 set_balance_control(bctl); 3938 3939 mutex_unlock(&fs_info->balance_mutex); 3940 mutex_unlock(&fs_info->volume_mutex); 3941 out: 3942 btrfs_free_path(path); 3943 return ret; 3944 } 3945 3946 int btrfs_pause_balance(struct btrfs_fs_info *fs_info) 3947 { 3948 int ret = 0; 3949 3950 mutex_lock(&fs_info->balance_mutex); 3951 if (!fs_info->balance_ctl) { 3952 mutex_unlock(&fs_info->balance_mutex); 3953 return -ENOTCONN; 3954 } 3955 3956 if (atomic_read(&fs_info->balance_running)) { 3957 atomic_inc(&fs_info->balance_pause_req); 3958 mutex_unlock(&fs_info->balance_mutex); 3959 3960 wait_event(fs_info->balance_wait_q, 3961 atomic_read(&fs_info->balance_running) == 0); 3962 3963 mutex_lock(&fs_info->balance_mutex); 3964 /* we are good with balance_ctl ripped off from under us */ 3965 BUG_ON(atomic_read(&fs_info->balance_running)); 3966 atomic_dec(&fs_info->balance_pause_req); 3967 } else { 3968 ret = -ENOTCONN; 3969 } 3970 3971 mutex_unlock(&fs_info->balance_mutex); 3972 return ret; 3973 } 3974 3975 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) 3976 { 3977 if (fs_info->sb->s_flags & MS_RDONLY) 3978 return -EROFS; 3979 3980 mutex_lock(&fs_info->balance_mutex); 3981 if (!fs_info->balance_ctl) { 3982 mutex_unlock(&fs_info->balance_mutex); 3983 return -ENOTCONN; 3984 } 3985 3986 atomic_inc(&fs_info->balance_cancel_req); 3987 /* 3988 * if we are running just wait and return, balance item is 3989 * deleted in btrfs_balance in this case 3990 */ 3991 if (atomic_read(&fs_info->balance_running)) { 3992 mutex_unlock(&fs_info->balance_mutex); 3993 wait_event(fs_info->balance_wait_q, 3994 atomic_read(&fs_info->balance_running) == 0); 3995 mutex_lock(&fs_info->balance_mutex); 3996 } else { 3997 /* __cancel_balance needs volume_mutex */ 3998 mutex_unlock(&fs_info->balance_mutex); 3999 mutex_lock(&fs_info->volume_mutex); 4000 mutex_lock(&fs_info->balance_mutex); 4001 4002 if (fs_info->balance_ctl) 4003 __cancel_balance(fs_info); 4004 4005 mutex_unlock(&fs_info->volume_mutex); 4006 } 4007 4008 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running)); 4009 atomic_dec(&fs_info->balance_cancel_req); 4010 mutex_unlock(&fs_info->balance_mutex); 4011 return 0; 4012 } 4013 4014 static int btrfs_uuid_scan_kthread(void *data) 4015 { 4016 struct btrfs_fs_info *fs_info = data; 4017 struct btrfs_root *root = fs_info->tree_root; 4018 struct btrfs_key key; 4019 struct btrfs_key max_key; 4020 struct btrfs_path *path = NULL; 4021 int ret = 0; 4022 struct extent_buffer *eb; 4023 int slot; 4024 struct btrfs_root_item root_item; 4025 u32 item_size; 4026 struct btrfs_trans_handle *trans = NULL; 4027 4028 path = btrfs_alloc_path(); 4029 if (!path) { 4030 ret = -ENOMEM; 4031 goto out; 4032 } 4033 4034 key.objectid = 0; 4035 key.type = BTRFS_ROOT_ITEM_KEY; 4036 key.offset = 0; 4037 4038 max_key.objectid = (u64)-1; 4039 max_key.type = BTRFS_ROOT_ITEM_KEY; 4040 max_key.offset = (u64)-1; 4041 4042 while (1) { 4043 ret = btrfs_search_forward(root, &key, path, 0); 4044 if (ret) { 4045 if (ret > 0) 4046 ret = 0; 4047 break; 4048 } 4049 4050 if (key.type != BTRFS_ROOT_ITEM_KEY || 4051 (key.objectid < BTRFS_FIRST_FREE_OBJECTID && 4052 key.objectid != BTRFS_FS_TREE_OBJECTID) || 4053 key.objectid > BTRFS_LAST_FREE_OBJECTID) 4054 goto skip; 4055 4056 eb = path->nodes[0]; 4057 slot = path->slots[0]; 4058 item_size = btrfs_item_size_nr(eb, slot); 4059 if (item_size < sizeof(root_item)) 4060 goto skip; 4061 4062 read_extent_buffer(eb, &root_item, 4063 btrfs_item_ptr_offset(eb, slot), 4064 (int)sizeof(root_item)); 4065 if (btrfs_root_refs(&root_item) == 0) 4066 goto skip; 4067 4068 if (!btrfs_is_empty_uuid(root_item.uuid) || 4069 !btrfs_is_empty_uuid(root_item.received_uuid)) { 4070 if (trans) 4071 goto update_tree; 4072 4073 btrfs_release_path(path); 4074 /* 4075 * 1 - subvol uuid item 4076 * 1 - received_subvol uuid item 4077 */ 4078 trans = btrfs_start_transaction(fs_info->uuid_root, 2); 4079 if (IS_ERR(trans)) { 4080 ret = PTR_ERR(trans); 4081 break; 4082 } 4083 continue; 4084 } else { 4085 goto skip; 4086 } 4087 update_tree: 4088 if (!btrfs_is_empty_uuid(root_item.uuid)) { 4089 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, 4090 root_item.uuid, 4091 BTRFS_UUID_KEY_SUBVOL, 4092 key.objectid); 4093 if (ret < 0) { 4094 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4095 ret); 4096 break; 4097 } 4098 } 4099 4100 if (!btrfs_is_empty_uuid(root_item.received_uuid)) { 4101 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, 4102 root_item.received_uuid, 4103 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4104 key.objectid); 4105 if (ret < 0) { 4106 btrfs_warn(fs_info, "uuid_tree_add failed %d", 4107 ret); 4108 break; 4109 } 4110 } 4111 4112 skip: 4113 if (trans) { 4114 ret = btrfs_end_transaction(trans, fs_info->uuid_root); 4115 trans = NULL; 4116 if (ret) 4117 break; 4118 } 4119 4120 btrfs_release_path(path); 4121 if (key.offset < (u64)-1) { 4122 key.offset++; 4123 } else if (key.type < BTRFS_ROOT_ITEM_KEY) { 4124 key.offset = 0; 4125 key.type = BTRFS_ROOT_ITEM_KEY; 4126 } else if (key.objectid < (u64)-1) { 4127 key.offset = 0; 4128 key.type = BTRFS_ROOT_ITEM_KEY; 4129 key.objectid++; 4130 } else { 4131 break; 4132 } 4133 cond_resched(); 4134 } 4135 4136 out: 4137 btrfs_free_path(path); 4138 if (trans && !IS_ERR(trans)) 4139 btrfs_end_transaction(trans, fs_info->uuid_root); 4140 if (ret) 4141 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4142 else 4143 fs_info->update_uuid_tree_gen = 1; 4144 up(&fs_info->uuid_tree_rescan_sem); 4145 return 0; 4146 } 4147 4148 /* 4149 * Callback for btrfs_uuid_tree_iterate(). 4150 * returns: 4151 * 0 check succeeded, the entry is not outdated. 4152 * < 0 if an error occurred. 4153 * > 0 if the check failed, which means the caller shall remove the entry. 4154 */ 4155 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, 4156 u8 *uuid, u8 type, u64 subid) 4157 { 4158 struct btrfs_key key; 4159 int ret = 0; 4160 struct btrfs_root *subvol_root; 4161 4162 if (type != BTRFS_UUID_KEY_SUBVOL && 4163 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) 4164 goto out; 4165 4166 key.objectid = subid; 4167 key.type = BTRFS_ROOT_ITEM_KEY; 4168 key.offset = (u64)-1; 4169 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key); 4170 if (IS_ERR(subvol_root)) { 4171 ret = PTR_ERR(subvol_root); 4172 if (ret == -ENOENT) 4173 ret = 1; 4174 goto out; 4175 } 4176 4177 switch (type) { 4178 case BTRFS_UUID_KEY_SUBVOL: 4179 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) 4180 ret = 1; 4181 break; 4182 case BTRFS_UUID_KEY_RECEIVED_SUBVOL: 4183 if (memcmp(uuid, subvol_root->root_item.received_uuid, 4184 BTRFS_UUID_SIZE)) 4185 ret = 1; 4186 break; 4187 } 4188 4189 out: 4190 return ret; 4191 } 4192 4193 static int btrfs_uuid_rescan_kthread(void *data) 4194 { 4195 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; 4196 int ret; 4197 4198 /* 4199 * 1st step is to iterate through the existing UUID tree and 4200 * to delete all entries that contain outdated data. 4201 * 2nd step is to add all missing entries to the UUID tree. 4202 */ 4203 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry); 4204 if (ret < 0) { 4205 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); 4206 up(&fs_info->uuid_tree_rescan_sem); 4207 return ret; 4208 } 4209 return btrfs_uuid_scan_kthread(data); 4210 } 4211 4212 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) 4213 { 4214 struct btrfs_trans_handle *trans; 4215 struct btrfs_root *tree_root = fs_info->tree_root; 4216 struct btrfs_root *uuid_root; 4217 struct task_struct *task; 4218 int ret; 4219 4220 /* 4221 * 1 - root node 4222 * 1 - root item 4223 */ 4224 trans = btrfs_start_transaction(tree_root, 2); 4225 if (IS_ERR(trans)) 4226 return PTR_ERR(trans); 4227 4228 uuid_root = btrfs_create_tree(trans, fs_info, 4229 BTRFS_UUID_TREE_OBJECTID); 4230 if (IS_ERR(uuid_root)) { 4231 ret = PTR_ERR(uuid_root); 4232 btrfs_abort_transaction(trans, tree_root, ret); 4233 return ret; 4234 } 4235 4236 fs_info->uuid_root = uuid_root; 4237 4238 ret = btrfs_commit_transaction(trans, tree_root); 4239 if (ret) 4240 return ret; 4241 4242 down(&fs_info->uuid_tree_rescan_sem); 4243 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); 4244 if (IS_ERR(task)) { 4245 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4246 btrfs_warn(fs_info, "failed to start uuid_scan task"); 4247 up(&fs_info->uuid_tree_rescan_sem); 4248 return PTR_ERR(task); 4249 } 4250 4251 return 0; 4252 } 4253 4254 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) 4255 { 4256 struct task_struct *task; 4257 4258 down(&fs_info->uuid_tree_rescan_sem); 4259 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); 4260 if (IS_ERR(task)) { 4261 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 4262 btrfs_warn(fs_info, "failed to start uuid_rescan task"); 4263 up(&fs_info->uuid_tree_rescan_sem); 4264 return PTR_ERR(task); 4265 } 4266 4267 return 0; 4268 } 4269 4270 /* 4271 * shrinking a device means finding all of the device extents past 4272 * the new size, and then following the back refs to the chunks. 4273 * The chunk relocation code actually frees the device extent 4274 */ 4275 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 4276 { 4277 struct btrfs_trans_handle *trans; 4278 struct btrfs_root *root = device->dev_root; 4279 struct btrfs_dev_extent *dev_extent = NULL; 4280 struct btrfs_path *path; 4281 u64 length; 4282 u64 chunk_offset; 4283 int ret; 4284 int slot; 4285 int failed = 0; 4286 bool retried = false; 4287 bool checked_pending_chunks = false; 4288 struct extent_buffer *l; 4289 struct btrfs_key key; 4290 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 4291 u64 old_total = btrfs_super_total_bytes(super_copy); 4292 u64 old_size = btrfs_device_get_total_bytes(device); 4293 u64 diff = old_size - new_size; 4294 4295 if (device->is_tgtdev_for_dev_replace) 4296 return -EINVAL; 4297 4298 path = btrfs_alloc_path(); 4299 if (!path) 4300 return -ENOMEM; 4301 4302 path->reada = READA_FORWARD; 4303 4304 lock_chunks(root); 4305 4306 btrfs_device_set_total_bytes(device, new_size); 4307 if (device->writeable) { 4308 device->fs_devices->total_rw_bytes -= diff; 4309 spin_lock(&root->fs_info->free_chunk_lock); 4310 root->fs_info->free_chunk_space -= diff; 4311 spin_unlock(&root->fs_info->free_chunk_lock); 4312 } 4313 unlock_chunks(root); 4314 4315 again: 4316 key.objectid = device->devid; 4317 key.offset = (u64)-1; 4318 key.type = BTRFS_DEV_EXTENT_KEY; 4319 4320 do { 4321 mutex_lock(&root->fs_info->delete_unused_bgs_mutex); 4322 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4323 if (ret < 0) { 4324 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4325 goto done; 4326 } 4327 4328 ret = btrfs_previous_item(root, path, 0, key.type); 4329 if (ret) 4330 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4331 if (ret < 0) 4332 goto done; 4333 if (ret) { 4334 ret = 0; 4335 btrfs_release_path(path); 4336 break; 4337 } 4338 4339 l = path->nodes[0]; 4340 slot = path->slots[0]; 4341 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4342 4343 if (key.objectid != device->devid) { 4344 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4345 btrfs_release_path(path); 4346 break; 4347 } 4348 4349 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 4350 length = btrfs_dev_extent_length(l, dev_extent); 4351 4352 if (key.offset + length <= new_size) { 4353 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4354 btrfs_release_path(path); 4355 break; 4356 } 4357 4358 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 4359 btrfs_release_path(path); 4360 4361 ret = btrfs_relocate_chunk(root, chunk_offset); 4362 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex); 4363 if (ret && ret != -ENOSPC) 4364 goto done; 4365 if (ret == -ENOSPC) 4366 failed++; 4367 } while (key.offset-- > 0); 4368 4369 if (failed && !retried) { 4370 failed = 0; 4371 retried = true; 4372 goto again; 4373 } else if (failed && retried) { 4374 ret = -ENOSPC; 4375 goto done; 4376 } 4377 4378 /* Shrinking succeeded, else we would be at "done". */ 4379 trans = btrfs_start_transaction(root, 0); 4380 if (IS_ERR(trans)) { 4381 ret = PTR_ERR(trans); 4382 goto done; 4383 } 4384 4385 lock_chunks(root); 4386 4387 /* 4388 * We checked in the above loop all device extents that were already in 4389 * the device tree. However before we have updated the device's 4390 * total_bytes to the new size, we might have had chunk allocations that 4391 * have not complete yet (new block groups attached to transaction 4392 * handles), and therefore their device extents were not yet in the 4393 * device tree and we missed them in the loop above. So if we have any 4394 * pending chunk using a device extent that overlaps the device range 4395 * that we can not use anymore, commit the current transaction and 4396 * repeat the search on the device tree - this way we guarantee we will 4397 * not have chunks using device extents that end beyond 'new_size'. 4398 */ 4399 if (!checked_pending_chunks) { 4400 u64 start = new_size; 4401 u64 len = old_size - new_size; 4402 4403 if (contains_pending_extent(trans->transaction, device, 4404 &start, len)) { 4405 unlock_chunks(root); 4406 checked_pending_chunks = true; 4407 failed = 0; 4408 retried = false; 4409 ret = btrfs_commit_transaction(trans, root); 4410 if (ret) 4411 goto done; 4412 goto again; 4413 } 4414 } 4415 4416 btrfs_device_set_disk_total_bytes(device, new_size); 4417 if (list_empty(&device->resized_list)) 4418 list_add_tail(&device->resized_list, 4419 &root->fs_info->fs_devices->resized_devices); 4420 4421 WARN_ON(diff > old_total); 4422 btrfs_set_super_total_bytes(super_copy, old_total - diff); 4423 unlock_chunks(root); 4424 4425 /* Now btrfs_update_device() will change the on-disk size. */ 4426 ret = btrfs_update_device(trans, device); 4427 btrfs_end_transaction(trans, root); 4428 done: 4429 btrfs_free_path(path); 4430 if (ret) { 4431 lock_chunks(root); 4432 btrfs_device_set_total_bytes(device, old_size); 4433 if (device->writeable) 4434 device->fs_devices->total_rw_bytes += diff; 4435 spin_lock(&root->fs_info->free_chunk_lock); 4436 root->fs_info->free_chunk_space += diff; 4437 spin_unlock(&root->fs_info->free_chunk_lock); 4438 unlock_chunks(root); 4439 } 4440 return ret; 4441 } 4442 4443 static int btrfs_add_system_chunk(struct btrfs_root *root, 4444 struct btrfs_key *key, 4445 struct btrfs_chunk *chunk, int item_size) 4446 { 4447 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 4448 struct btrfs_disk_key disk_key; 4449 u32 array_size; 4450 u8 *ptr; 4451 4452 lock_chunks(root); 4453 array_size = btrfs_super_sys_array_size(super_copy); 4454 if (array_size + item_size + sizeof(disk_key) 4455 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4456 unlock_chunks(root); 4457 return -EFBIG; 4458 } 4459 4460 ptr = super_copy->sys_chunk_array + array_size; 4461 btrfs_cpu_key_to_disk(&disk_key, key); 4462 memcpy(ptr, &disk_key, sizeof(disk_key)); 4463 ptr += sizeof(disk_key); 4464 memcpy(ptr, chunk, item_size); 4465 item_size += sizeof(disk_key); 4466 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 4467 unlock_chunks(root); 4468 4469 return 0; 4470 } 4471 4472 /* 4473 * sort the devices in descending order by max_avail, total_avail 4474 */ 4475 static int btrfs_cmp_device_info(const void *a, const void *b) 4476 { 4477 const struct btrfs_device_info *di_a = a; 4478 const struct btrfs_device_info *di_b = b; 4479 4480 if (di_a->max_avail > di_b->max_avail) 4481 return -1; 4482 if (di_a->max_avail < di_b->max_avail) 4483 return 1; 4484 if (di_a->total_avail > di_b->total_avail) 4485 return -1; 4486 if (di_a->total_avail < di_b->total_avail) 4487 return 1; 4488 return 0; 4489 } 4490 4491 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target) 4492 { 4493 /* TODO allow them to set a preferred stripe size */ 4494 return SZ_64K; 4495 } 4496 4497 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4498 { 4499 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) 4500 return; 4501 4502 btrfs_set_fs_incompat(info, RAID56); 4503 } 4504 4505 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \ 4506 - sizeof(struct btrfs_item) \ 4507 - sizeof(struct btrfs_chunk)) \ 4508 / sizeof(struct btrfs_stripe) + 1) 4509 4510 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ 4511 - 2 * sizeof(struct btrfs_disk_key) \ 4512 - 2 * sizeof(struct btrfs_chunk)) \ 4513 / sizeof(struct btrfs_stripe) + 1) 4514 4515 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 4516 struct btrfs_root *extent_root, u64 start, 4517 u64 type) 4518 { 4519 struct btrfs_fs_info *info = extent_root->fs_info; 4520 struct btrfs_fs_devices *fs_devices = info->fs_devices; 4521 struct list_head *cur; 4522 struct map_lookup *map = NULL; 4523 struct extent_map_tree *em_tree; 4524 struct extent_map *em; 4525 struct btrfs_device_info *devices_info = NULL; 4526 u64 total_avail; 4527 int num_stripes; /* total number of stripes to allocate */ 4528 int data_stripes; /* number of stripes that count for 4529 block group size */ 4530 int sub_stripes; /* sub_stripes info for map */ 4531 int dev_stripes; /* stripes per dev */ 4532 int devs_max; /* max devs to use */ 4533 int devs_min; /* min devs needed */ 4534 int devs_increment; /* ndevs has to be a multiple of this */ 4535 int ncopies; /* how many copies to data has */ 4536 int ret; 4537 u64 max_stripe_size; 4538 u64 max_chunk_size; 4539 u64 stripe_size; 4540 u64 num_bytes; 4541 u64 raid_stripe_len = BTRFS_STRIPE_LEN; 4542 int ndevs; 4543 int i; 4544 int j; 4545 int index; 4546 4547 BUG_ON(!alloc_profile_is_valid(type, 0)); 4548 4549 if (list_empty(&fs_devices->alloc_list)) 4550 return -ENOSPC; 4551 4552 index = __get_raid_index(type); 4553 4554 sub_stripes = btrfs_raid_array[index].sub_stripes; 4555 dev_stripes = btrfs_raid_array[index].dev_stripes; 4556 devs_max = btrfs_raid_array[index].devs_max; 4557 devs_min = btrfs_raid_array[index].devs_min; 4558 devs_increment = btrfs_raid_array[index].devs_increment; 4559 ncopies = btrfs_raid_array[index].ncopies; 4560 4561 if (type & BTRFS_BLOCK_GROUP_DATA) { 4562 max_stripe_size = SZ_1G; 4563 max_chunk_size = 10 * max_stripe_size; 4564 if (!devs_max) 4565 devs_max = BTRFS_MAX_DEVS(info->chunk_root); 4566 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 4567 /* for larger filesystems, use larger metadata chunks */ 4568 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 4569 max_stripe_size = SZ_1G; 4570 else 4571 max_stripe_size = SZ_256M; 4572 max_chunk_size = max_stripe_size; 4573 if (!devs_max) 4574 devs_max = BTRFS_MAX_DEVS(info->chunk_root); 4575 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4576 max_stripe_size = SZ_32M; 4577 max_chunk_size = 2 * max_stripe_size; 4578 if (!devs_max) 4579 devs_max = BTRFS_MAX_DEVS_SYS_CHUNK; 4580 } else { 4581 btrfs_err(info, "invalid chunk type 0x%llx requested", 4582 type); 4583 BUG_ON(1); 4584 } 4585 4586 /* we don't want a chunk larger than 10% of writeable space */ 4587 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 4588 max_chunk_size); 4589 4590 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 4591 GFP_NOFS); 4592 if (!devices_info) 4593 return -ENOMEM; 4594 4595 cur = fs_devices->alloc_list.next; 4596 4597 /* 4598 * in the first pass through the devices list, we gather information 4599 * about the available holes on each device. 4600 */ 4601 ndevs = 0; 4602 while (cur != &fs_devices->alloc_list) { 4603 struct btrfs_device *device; 4604 u64 max_avail; 4605 u64 dev_offset; 4606 4607 device = list_entry(cur, struct btrfs_device, dev_alloc_list); 4608 4609 cur = cur->next; 4610 4611 if (!device->writeable) { 4612 WARN(1, KERN_ERR 4613 "BTRFS: read-only device in alloc_list\n"); 4614 continue; 4615 } 4616 4617 if (!device->in_fs_metadata || 4618 device->is_tgtdev_for_dev_replace) 4619 continue; 4620 4621 if (device->total_bytes > device->bytes_used) 4622 total_avail = device->total_bytes - device->bytes_used; 4623 else 4624 total_avail = 0; 4625 4626 /* If there is no space on this device, skip it. */ 4627 if (total_avail == 0) 4628 continue; 4629 4630 ret = find_free_dev_extent(trans, device, 4631 max_stripe_size * dev_stripes, 4632 &dev_offset, &max_avail); 4633 if (ret && ret != -ENOSPC) 4634 goto error; 4635 4636 if (ret == 0) 4637 max_avail = max_stripe_size * dev_stripes; 4638 4639 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) 4640 continue; 4641 4642 if (ndevs == fs_devices->rw_devices) { 4643 WARN(1, "%s: found more than %llu devices\n", 4644 __func__, fs_devices->rw_devices); 4645 break; 4646 } 4647 devices_info[ndevs].dev_offset = dev_offset; 4648 devices_info[ndevs].max_avail = max_avail; 4649 devices_info[ndevs].total_avail = total_avail; 4650 devices_info[ndevs].dev = device; 4651 ++ndevs; 4652 } 4653 4654 /* 4655 * now sort the devices by hole size / available space 4656 */ 4657 sort(devices_info, ndevs, sizeof(struct btrfs_device_info), 4658 btrfs_cmp_device_info, NULL); 4659 4660 /* round down to number of usable stripes */ 4661 ndevs -= ndevs % devs_increment; 4662 4663 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) { 4664 ret = -ENOSPC; 4665 goto error; 4666 } 4667 4668 if (devs_max && ndevs > devs_max) 4669 ndevs = devs_max; 4670 /* 4671 * the primary goal is to maximize the number of stripes, so use as many 4672 * devices as possible, even if the stripes are not maximum sized. 4673 */ 4674 stripe_size = devices_info[ndevs-1].max_avail; 4675 num_stripes = ndevs * dev_stripes; 4676 4677 /* 4678 * this will have to be fixed for RAID1 and RAID10 over 4679 * more drives 4680 */ 4681 data_stripes = num_stripes / ncopies; 4682 4683 if (type & BTRFS_BLOCK_GROUP_RAID5) { 4684 raid_stripe_len = find_raid56_stripe_len(ndevs - 1, 4685 btrfs_super_stripesize(info->super_copy)); 4686 data_stripes = num_stripes - 1; 4687 } 4688 if (type & BTRFS_BLOCK_GROUP_RAID6) { 4689 raid_stripe_len = find_raid56_stripe_len(ndevs - 2, 4690 btrfs_super_stripesize(info->super_copy)); 4691 data_stripes = num_stripes - 2; 4692 } 4693 4694 /* 4695 * Use the number of data stripes to figure out how big this chunk 4696 * is really going to be in terms of logical address space, 4697 * and compare that answer with the max chunk size 4698 */ 4699 if (stripe_size * data_stripes > max_chunk_size) { 4700 u64 mask = (1ULL << 24) - 1; 4701 4702 stripe_size = div_u64(max_chunk_size, data_stripes); 4703 4704 /* bump the answer up to a 16MB boundary */ 4705 stripe_size = (stripe_size + mask) & ~mask; 4706 4707 /* but don't go higher than the limits we found 4708 * while searching for free extents 4709 */ 4710 if (stripe_size > devices_info[ndevs-1].max_avail) 4711 stripe_size = devices_info[ndevs-1].max_avail; 4712 } 4713 4714 stripe_size = div_u64(stripe_size, dev_stripes); 4715 4716 /* align to BTRFS_STRIPE_LEN */ 4717 stripe_size = div_u64(stripe_size, raid_stripe_len); 4718 stripe_size *= raid_stripe_len; 4719 4720 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 4721 if (!map) { 4722 ret = -ENOMEM; 4723 goto error; 4724 } 4725 map->num_stripes = num_stripes; 4726 4727 for (i = 0; i < ndevs; ++i) { 4728 for (j = 0; j < dev_stripes; ++j) { 4729 int s = i * dev_stripes + j; 4730 map->stripes[s].dev = devices_info[i].dev; 4731 map->stripes[s].physical = devices_info[i].dev_offset + 4732 j * stripe_size; 4733 } 4734 } 4735 map->sector_size = extent_root->sectorsize; 4736 map->stripe_len = raid_stripe_len; 4737 map->io_align = raid_stripe_len; 4738 map->io_width = raid_stripe_len; 4739 map->type = type; 4740 map->sub_stripes = sub_stripes; 4741 4742 num_bytes = stripe_size * data_stripes; 4743 4744 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes); 4745 4746 em = alloc_extent_map(); 4747 if (!em) { 4748 kfree(map); 4749 ret = -ENOMEM; 4750 goto error; 4751 } 4752 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 4753 em->map_lookup = map; 4754 em->start = start; 4755 em->len = num_bytes; 4756 em->block_start = 0; 4757 em->block_len = em->len; 4758 em->orig_block_len = stripe_size; 4759 4760 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 4761 write_lock(&em_tree->lock); 4762 ret = add_extent_mapping(em_tree, em, 0); 4763 if (!ret) { 4764 list_add_tail(&em->list, &trans->transaction->pending_chunks); 4765 atomic_inc(&em->refs); 4766 } 4767 write_unlock(&em_tree->lock); 4768 if (ret) { 4769 free_extent_map(em); 4770 goto error; 4771 } 4772 4773 ret = btrfs_make_block_group(trans, extent_root, 0, type, 4774 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 4775 start, num_bytes); 4776 if (ret) 4777 goto error_del_extent; 4778 4779 for (i = 0; i < map->num_stripes; i++) { 4780 num_bytes = map->stripes[i].dev->bytes_used + stripe_size; 4781 btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes); 4782 } 4783 4784 spin_lock(&extent_root->fs_info->free_chunk_lock); 4785 extent_root->fs_info->free_chunk_space -= (stripe_size * 4786 map->num_stripes); 4787 spin_unlock(&extent_root->fs_info->free_chunk_lock); 4788 4789 free_extent_map(em); 4790 check_raid56_incompat_flag(extent_root->fs_info, type); 4791 4792 kfree(devices_info); 4793 return 0; 4794 4795 error_del_extent: 4796 write_lock(&em_tree->lock); 4797 remove_extent_mapping(em_tree, em); 4798 write_unlock(&em_tree->lock); 4799 4800 /* One for our allocation */ 4801 free_extent_map(em); 4802 /* One for the tree reference */ 4803 free_extent_map(em); 4804 /* One for the pending_chunks list reference */ 4805 free_extent_map(em); 4806 error: 4807 kfree(devices_info); 4808 return ret; 4809 } 4810 4811 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, 4812 struct btrfs_root *extent_root, 4813 u64 chunk_offset, u64 chunk_size) 4814 { 4815 struct btrfs_key key; 4816 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 4817 struct btrfs_device *device; 4818 struct btrfs_chunk *chunk; 4819 struct btrfs_stripe *stripe; 4820 struct extent_map_tree *em_tree; 4821 struct extent_map *em; 4822 struct map_lookup *map; 4823 size_t item_size; 4824 u64 dev_offset; 4825 u64 stripe_size; 4826 int i = 0; 4827 int ret = 0; 4828 4829 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 4830 read_lock(&em_tree->lock); 4831 em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size); 4832 read_unlock(&em_tree->lock); 4833 4834 if (!em) { 4835 btrfs_crit(extent_root->fs_info, "unable to find logical " 4836 "%Lu len %Lu", chunk_offset, chunk_size); 4837 return -EINVAL; 4838 } 4839 4840 if (em->start != chunk_offset || em->len != chunk_size) { 4841 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted" 4842 " %Lu-%Lu, found %Lu-%Lu", chunk_offset, 4843 chunk_size, em->start, em->len); 4844 free_extent_map(em); 4845 return -EINVAL; 4846 } 4847 4848 map = em->map_lookup; 4849 item_size = btrfs_chunk_item_size(map->num_stripes); 4850 stripe_size = em->orig_block_len; 4851 4852 chunk = kzalloc(item_size, GFP_NOFS); 4853 if (!chunk) { 4854 ret = -ENOMEM; 4855 goto out; 4856 } 4857 4858 /* 4859 * Take the device list mutex to prevent races with the final phase of 4860 * a device replace operation that replaces the device object associated 4861 * with the map's stripes, because the device object's id can change 4862 * at any time during that final phase of the device replace operation 4863 * (dev-replace.c:btrfs_dev_replace_finishing()). 4864 */ 4865 mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex); 4866 for (i = 0; i < map->num_stripes; i++) { 4867 device = map->stripes[i].dev; 4868 dev_offset = map->stripes[i].physical; 4869 4870 ret = btrfs_update_device(trans, device); 4871 if (ret) 4872 break; 4873 ret = btrfs_alloc_dev_extent(trans, device, 4874 chunk_root->root_key.objectid, 4875 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 4876 chunk_offset, dev_offset, 4877 stripe_size); 4878 if (ret) 4879 break; 4880 } 4881 if (ret) { 4882 mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex); 4883 goto out; 4884 } 4885 4886 stripe = &chunk->stripe; 4887 for (i = 0; i < map->num_stripes; i++) { 4888 device = map->stripes[i].dev; 4889 dev_offset = map->stripes[i].physical; 4890 4891 btrfs_set_stack_stripe_devid(stripe, device->devid); 4892 btrfs_set_stack_stripe_offset(stripe, dev_offset); 4893 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 4894 stripe++; 4895 } 4896 mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex); 4897 4898 btrfs_set_stack_chunk_length(chunk, chunk_size); 4899 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 4900 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 4901 btrfs_set_stack_chunk_type(chunk, map->type); 4902 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 4903 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 4904 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 4905 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); 4906 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 4907 4908 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 4909 key.type = BTRFS_CHUNK_ITEM_KEY; 4910 key.offset = chunk_offset; 4911 4912 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 4913 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 4914 /* 4915 * TODO: Cleanup of inserted chunk root in case of 4916 * failure. 4917 */ 4918 ret = btrfs_add_system_chunk(chunk_root, &key, chunk, 4919 item_size); 4920 } 4921 4922 out: 4923 kfree(chunk); 4924 free_extent_map(em); 4925 return ret; 4926 } 4927 4928 /* 4929 * Chunk allocation falls into two parts. The first part does works 4930 * that make the new allocated chunk useable, but not do any operation 4931 * that modifies the chunk tree. The second part does the works that 4932 * require modifying the chunk tree. This division is important for the 4933 * bootstrap process of adding storage to a seed btrfs. 4934 */ 4935 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 4936 struct btrfs_root *extent_root, u64 type) 4937 { 4938 u64 chunk_offset; 4939 4940 ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex)); 4941 chunk_offset = find_next_chunk(extent_root->fs_info); 4942 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); 4943 } 4944 4945 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, 4946 struct btrfs_root *root, 4947 struct btrfs_device *device) 4948 { 4949 u64 chunk_offset; 4950 u64 sys_chunk_offset; 4951 u64 alloc_profile; 4952 struct btrfs_fs_info *fs_info = root->fs_info; 4953 struct btrfs_root *extent_root = fs_info->extent_root; 4954 int ret; 4955 4956 chunk_offset = find_next_chunk(fs_info); 4957 alloc_profile = btrfs_get_alloc_profile(extent_root, 0); 4958 ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset, 4959 alloc_profile); 4960 if (ret) 4961 return ret; 4962 4963 sys_chunk_offset = find_next_chunk(root->fs_info); 4964 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0); 4965 ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset, 4966 alloc_profile); 4967 return ret; 4968 } 4969 4970 static inline int btrfs_chunk_max_errors(struct map_lookup *map) 4971 { 4972 int max_errors; 4973 4974 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 4975 BTRFS_BLOCK_GROUP_RAID10 | 4976 BTRFS_BLOCK_GROUP_RAID5 | 4977 BTRFS_BLOCK_GROUP_DUP)) { 4978 max_errors = 1; 4979 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) { 4980 max_errors = 2; 4981 } else { 4982 max_errors = 0; 4983 } 4984 4985 return max_errors; 4986 } 4987 4988 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) 4989 { 4990 struct extent_map *em; 4991 struct map_lookup *map; 4992 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 4993 int readonly = 0; 4994 int miss_ndevs = 0; 4995 int i; 4996 4997 read_lock(&map_tree->map_tree.lock); 4998 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 4999 read_unlock(&map_tree->map_tree.lock); 5000 if (!em) 5001 return 1; 5002 5003 map = em->map_lookup; 5004 for (i = 0; i < map->num_stripes; i++) { 5005 if (map->stripes[i].dev->missing) { 5006 miss_ndevs++; 5007 continue; 5008 } 5009 5010 if (!map->stripes[i].dev->writeable) { 5011 readonly = 1; 5012 goto end; 5013 } 5014 } 5015 5016 /* 5017 * If the number of missing devices is larger than max errors, 5018 * we can not write the data into that chunk successfully, so 5019 * set it readonly. 5020 */ 5021 if (miss_ndevs > btrfs_chunk_max_errors(map)) 5022 readonly = 1; 5023 end: 5024 free_extent_map(em); 5025 return readonly; 5026 } 5027 5028 void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 5029 { 5030 extent_map_tree_init(&tree->map_tree); 5031 } 5032 5033 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 5034 { 5035 struct extent_map *em; 5036 5037 while (1) { 5038 write_lock(&tree->map_tree.lock); 5039 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 5040 if (em) 5041 remove_extent_mapping(&tree->map_tree, em); 5042 write_unlock(&tree->map_tree.lock); 5043 if (!em) 5044 break; 5045 /* once for us */ 5046 free_extent_map(em); 5047 /* once for the tree */ 5048 free_extent_map(em); 5049 } 5050 } 5051 5052 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) 5053 { 5054 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 5055 struct extent_map *em; 5056 struct map_lookup *map; 5057 struct extent_map_tree *em_tree = &map_tree->map_tree; 5058 int ret; 5059 5060 read_lock(&em_tree->lock); 5061 em = lookup_extent_mapping(em_tree, logical, len); 5062 read_unlock(&em_tree->lock); 5063 5064 /* 5065 * We could return errors for these cases, but that could get ugly and 5066 * we'd probably do the same thing which is just not do anything else 5067 * and exit, so return 1 so the callers don't try to use other copies. 5068 */ 5069 if (!em) { 5070 btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical, 5071 logical+len); 5072 return 1; 5073 } 5074 5075 if (em->start > logical || em->start + em->len < logical) { 5076 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got " 5077 "%Lu-%Lu", logical, logical+len, em->start, 5078 em->start + em->len); 5079 free_extent_map(em); 5080 return 1; 5081 } 5082 5083 map = em->map_lookup; 5084 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) 5085 ret = map->num_stripes; 5086 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5087 ret = map->sub_stripes; 5088 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5089 ret = 2; 5090 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5091 ret = 3; 5092 else 5093 ret = 1; 5094 free_extent_map(em); 5095 5096 btrfs_dev_replace_lock(&fs_info->dev_replace, 0); 5097 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) 5098 ret++; 5099 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); 5100 5101 return ret; 5102 } 5103 5104 unsigned long btrfs_full_stripe_len(struct btrfs_root *root, 5105 struct btrfs_mapping_tree *map_tree, 5106 u64 logical) 5107 { 5108 struct extent_map *em; 5109 struct map_lookup *map; 5110 struct extent_map_tree *em_tree = &map_tree->map_tree; 5111 unsigned long len = root->sectorsize; 5112 5113 read_lock(&em_tree->lock); 5114 em = lookup_extent_mapping(em_tree, logical, len); 5115 read_unlock(&em_tree->lock); 5116 BUG_ON(!em); 5117 5118 BUG_ON(em->start > logical || em->start + em->len < logical); 5119 map = em->map_lookup; 5120 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5121 len = map->stripe_len * nr_data_stripes(map); 5122 free_extent_map(em); 5123 return len; 5124 } 5125 5126 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, 5127 u64 logical, u64 len, int mirror_num) 5128 { 5129 struct extent_map *em; 5130 struct map_lookup *map; 5131 struct extent_map_tree *em_tree = &map_tree->map_tree; 5132 int ret = 0; 5133 5134 read_lock(&em_tree->lock); 5135 em = lookup_extent_mapping(em_tree, logical, len); 5136 read_unlock(&em_tree->lock); 5137 BUG_ON(!em); 5138 5139 BUG_ON(em->start > logical || em->start + em->len < logical); 5140 map = em->map_lookup; 5141 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 5142 ret = 1; 5143 free_extent_map(em); 5144 return ret; 5145 } 5146 5147 static int find_live_mirror(struct btrfs_fs_info *fs_info, 5148 struct map_lookup *map, int first, int num, 5149 int optimal, int dev_replace_is_ongoing) 5150 { 5151 int i; 5152 int tolerance; 5153 struct btrfs_device *srcdev; 5154 5155 if (dev_replace_is_ongoing && 5156 fs_info->dev_replace.cont_reading_from_srcdev_mode == 5157 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) 5158 srcdev = fs_info->dev_replace.srcdev; 5159 else 5160 srcdev = NULL; 5161 5162 /* 5163 * try to avoid the drive that is the source drive for a 5164 * dev-replace procedure, only choose it if no other non-missing 5165 * mirror is available 5166 */ 5167 for (tolerance = 0; tolerance < 2; tolerance++) { 5168 if (map->stripes[optimal].dev->bdev && 5169 (tolerance || map->stripes[optimal].dev != srcdev)) 5170 return optimal; 5171 for (i = first; i < first + num; i++) { 5172 if (map->stripes[i].dev->bdev && 5173 (tolerance || map->stripes[i].dev != srcdev)) 5174 return i; 5175 } 5176 } 5177 5178 /* we couldn't find one that doesn't fail. Just return something 5179 * and the io error handling code will clean up eventually 5180 */ 5181 return optimal; 5182 } 5183 5184 static inline int parity_smaller(u64 a, u64 b) 5185 { 5186 return a > b; 5187 } 5188 5189 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 5190 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) 5191 { 5192 struct btrfs_bio_stripe s; 5193 int i; 5194 u64 l; 5195 int again = 1; 5196 5197 while (again) { 5198 again = 0; 5199 for (i = 0; i < num_stripes - 1; i++) { 5200 if (parity_smaller(bbio->raid_map[i], 5201 bbio->raid_map[i+1])) { 5202 s = bbio->stripes[i]; 5203 l = bbio->raid_map[i]; 5204 bbio->stripes[i] = bbio->stripes[i+1]; 5205 bbio->raid_map[i] = bbio->raid_map[i+1]; 5206 bbio->stripes[i+1] = s; 5207 bbio->raid_map[i+1] = l; 5208 5209 again = 1; 5210 } 5211 } 5212 } 5213 } 5214 5215 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) 5216 { 5217 struct btrfs_bio *bbio = kzalloc( 5218 /* the size of the btrfs_bio */ 5219 sizeof(struct btrfs_bio) + 5220 /* plus the variable array for the stripes */ 5221 sizeof(struct btrfs_bio_stripe) * (total_stripes) + 5222 /* plus the variable array for the tgt dev */ 5223 sizeof(int) * (real_stripes) + 5224 /* 5225 * plus the raid_map, which includes both the tgt dev 5226 * and the stripes 5227 */ 5228 sizeof(u64) * (total_stripes), 5229 GFP_NOFS|__GFP_NOFAIL); 5230 5231 atomic_set(&bbio->error, 0); 5232 atomic_set(&bbio->refs, 1); 5233 5234 return bbio; 5235 } 5236 5237 void btrfs_get_bbio(struct btrfs_bio *bbio) 5238 { 5239 WARN_ON(!atomic_read(&bbio->refs)); 5240 atomic_inc(&bbio->refs); 5241 } 5242 5243 void btrfs_put_bbio(struct btrfs_bio *bbio) 5244 { 5245 if (!bbio) 5246 return; 5247 if (atomic_dec_and_test(&bbio->refs)) 5248 kfree(bbio); 5249 } 5250 5251 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 5252 u64 logical, u64 *length, 5253 struct btrfs_bio **bbio_ret, 5254 int mirror_num, int need_raid_map) 5255 { 5256 struct extent_map *em; 5257 struct map_lookup *map; 5258 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 5259 struct extent_map_tree *em_tree = &map_tree->map_tree; 5260 u64 offset; 5261 u64 stripe_offset; 5262 u64 stripe_end_offset; 5263 u64 stripe_nr; 5264 u64 stripe_nr_orig; 5265 u64 stripe_nr_end; 5266 u64 stripe_len; 5267 u32 stripe_index; 5268 int i; 5269 int ret = 0; 5270 int num_stripes; 5271 int max_errors = 0; 5272 int tgtdev_indexes = 0; 5273 struct btrfs_bio *bbio = NULL; 5274 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 5275 int dev_replace_is_ongoing = 0; 5276 int num_alloc_stripes; 5277 int patch_the_first_stripe_for_dev_replace = 0; 5278 u64 physical_to_patch_in_first_stripe = 0; 5279 u64 raid56_full_stripe_start = (u64)-1; 5280 5281 read_lock(&em_tree->lock); 5282 em = lookup_extent_mapping(em_tree, logical, *length); 5283 read_unlock(&em_tree->lock); 5284 5285 if (!em) { 5286 btrfs_crit(fs_info, "unable to find logical %llu len %llu", 5287 logical, *length); 5288 return -EINVAL; 5289 } 5290 5291 if (em->start > logical || em->start + em->len < logical) { 5292 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, " 5293 "found %Lu-%Lu", logical, em->start, 5294 em->start + em->len); 5295 free_extent_map(em); 5296 return -EINVAL; 5297 } 5298 5299 map = em->map_lookup; 5300 offset = logical - em->start; 5301 5302 stripe_len = map->stripe_len; 5303 stripe_nr = offset; 5304 /* 5305 * stripe_nr counts the total number of stripes we have to stride 5306 * to get to this block 5307 */ 5308 stripe_nr = div64_u64(stripe_nr, stripe_len); 5309 5310 stripe_offset = stripe_nr * stripe_len; 5311 if (offset < stripe_offset) { 5312 btrfs_crit(fs_info, "stripe math has gone wrong, " 5313 "stripe_offset=%llu, offset=%llu, start=%llu, " 5314 "logical=%llu, stripe_len=%llu", 5315 stripe_offset, offset, em->start, logical, 5316 stripe_len); 5317 free_extent_map(em); 5318 return -EINVAL; 5319 } 5320 5321 /* stripe_offset is the offset of this block in its stripe*/ 5322 stripe_offset = offset - stripe_offset; 5323 5324 /* if we're here for raid56, we need to know the stripe aligned start */ 5325 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5326 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); 5327 raid56_full_stripe_start = offset; 5328 5329 /* allow a write of a full stripe, but make sure we don't 5330 * allow straddling of stripes 5331 */ 5332 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, 5333 full_stripe_len); 5334 raid56_full_stripe_start *= full_stripe_len; 5335 } 5336 5337 if (rw & REQ_DISCARD) { 5338 /* we don't discard raid56 yet */ 5339 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5340 ret = -EOPNOTSUPP; 5341 goto out; 5342 } 5343 *length = min_t(u64, em->len - offset, *length); 5344 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 5345 u64 max_len; 5346 /* For writes to RAID[56], allow a full stripeset across all disks. 5347 For other RAID types and for RAID[56] reads, just allow a single 5348 stripe (on a single disk). */ 5349 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 5350 (rw & REQ_WRITE)) { 5351 max_len = stripe_len * nr_data_stripes(map) - 5352 (offset - raid56_full_stripe_start); 5353 } else { 5354 /* we limit the length of each bio to what fits in a stripe */ 5355 max_len = stripe_len - stripe_offset; 5356 } 5357 *length = min_t(u64, em->len - offset, max_len); 5358 } else { 5359 *length = em->len - offset; 5360 } 5361 5362 /* This is for when we're called from btrfs_merge_bio_hook() and all 5363 it cares about is the length */ 5364 if (!bbio_ret) 5365 goto out; 5366 5367 btrfs_dev_replace_lock(dev_replace, 0); 5368 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); 5369 if (!dev_replace_is_ongoing) 5370 btrfs_dev_replace_unlock(dev_replace, 0); 5371 else 5372 btrfs_dev_replace_set_lock_blocking(dev_replace); 5373 5374 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && 5375 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) && 5376 dev_replace->tgtdev != NULL) { 5377 /* 5378 * in dev-replace case, for repair case (that's the only 5379 * case where the mirror is selected explicitly when 5380 * calling btrfs_map_block), blocks left of the left cursor 5381 * can also be read from the target drive. 5382 * For REQ_GET_READ_MIRRORS, the target drive is added as 5383 * the last one to the array of stripes. For READ, it also 5384 * needs to be supported using the same mirror number. 5385 * If the requested block is not left of the left cursor, 5386 * EIO is returned. This can happen because btrfs_num_copies() 5387 * returns one more in the dev-replace case. 5388 */ 5389 u64 tmp_length = *length; 5390 struct btrfs_bio *tmp_bbio = NULL; 5391 int tmp_num_stripes; 5392 u64 srcdev_devid = dev_replace->srcdev->devid; 5393 int index_srcdev = 0; 5394 int found = 0; 5395 u64 physical_of_found = 0; 5396 5397 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, 5398 logical, &tmp_length, &tmp_bbio, 0, 0); 5399 if (ret) { 5400 WARN_ON(tmp_bbio != NULL); 5401 goto out; 5402 } 5403 5404 tmp_num_stripes = tmp_bbio->num_stripes; 5405 if (mirror_num > tmp_num_stripes) { 5406 /* 5407 * REQ_GET_READ_MIRRORS does not contain this 5408 * mirror, that means that the requested area 5409 * is not left of the left cursor 5410 */ 5411 ret = -EIO; 5412 btrfs_put_bbio(tmp_bbio); 5413 goto out; 5414 } 5415 5416 /* 5417 * process the rest of the function using the mirror_num 5418 * of the source drive. Therefore look it up first. 5419 * At the end, patch the device pointer to the one of the 5420 * target drive. 5421 */ 5422 for (i = 0; i < tmp_num_stripes; i++) { 5423 if (tmp_bbio->stripes[i].dev->devid != srcdev_devid) 5424 continue; 5425 5426 /* 5427 * In case of DUP, in order to keep it simple, only add 5428 * the mirror with the lowest physical address 5429 */ 5430 if (found && 5431 physical_of_found <= tmp_bbio->stripes[i].physical) 5432 continue; 5433 5434 index_srcdev = i; 5435 found = 1; 5436 physical_of_found = tmp_bbio->stripes[i].physical; 5437 } 5438 5439 btrfs_put_bbio(tmp_bbio); 5440 5441 if (!found) { 5442 WARN_ON(1); 5443 ret = -EIO; 5444 goto out; 5445 } 5446 5447 mirror_num = index_srcdev + 1; 5448 patch_the_first_stripe_for_dev_replace = 1; 5449 physical_to_patch_in_first_stripe = physical_of_found; 5450 } else if (mirror_num > map->num_stripes) { 5451 mirror_num = 0; 5452 } 5453 5454 num_stripes = 1; 5455 stripe_index = 0; 5456 stripe_nr_orig = stripe_nr; 5457 stripe_nr_end = ALIGN(offset + *length, map->stripe_len); 5458 stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len); 5459 stripe_end_offset = stripe_nr_end * map->stripe_len - 5460 (offset + *length); 5461 5462 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 5463 if (rw & REQ_DISCARD) 5464 num_stripes = min_t(u64, map->num_stripes, 5465 stripe_nr_end - stripe_nr_orig); 5466 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5467 &stripe_index); 5468 if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))) 5469 mirror_num = 1; 5470 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 5471 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) 5472 num_stripes = map->num_stripes; 5473 else if (mirror_num) 5474 stripe_index = mirror_num - 1; 5475 else { 5476 stripe_index = find_live_mirror(fs_info, map, 0, 5477 map->num_stripes, 5478 current->pid % map->num_stripes, 5479 dev_replace_is_ongoing); 5480 mirror_num = stripe_index + 1; 5481 } 5482 5483 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 5484 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) { 5485 num_stripes = map->num_stripes; 5486 } else if (mirror_num) { 5487 stripe_index = mirror_num - 1; 5488 } else { 5489 mirror_num = 1; 5490 } 5491 5492 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 5493 u32 factor = map->num_stripes / map->sub_stripes; 5494 5495 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); 5496 stripe_index *= map->sub_stripes; 5497 5498 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) 5499 num_stripes = map->sub_stripes; 5500 else if (rw & REQ_DISCARD) 5501 num_stripes = min_t(u64, map->sub_stripes * 5502 (stripe_nr_end - stripe_nr_orig), 5503 map->num_stripes); 5504 else if (mirror_num) 5505 stripe_index += mirror_num - 1; 5506 else { 5507 int old_stripe_index = stripe_index; 5508 stripe_index = find_live_mirror(fs_info, map, 5509 stripe_index, 5510 map->sub_stripes, stripe_index + 5511 current->pid % map->sub_stripes, 5512 dev_replace_is_ongoing); 5513 mirror_num = stripe_index - old_stripe_index + 1; 5514 } 5515 5516 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5517 if (need_raid_map && 5518 ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || 5519 mirror_num > 1)) { 5520 /* push stripe_nr back to the start of the full stripe */ 5521 stripe_nr = div_u64(raid56_full_stripe_start, 5522 stripe_len * nr_data_stripes(map)); 5523 5524 /* RAID[56] write or recovery. Return all stripes */ 5525 num_stripes = map->num_stripes; 5526 max_errors = nr_parity_stripes(map); 5527 5528 *length = map->stripe_len; 5529 stripe_index = 0; 5530 stripe_offset = 0; 5531 } else { 5532 /* 5533 * Mirror #0 or #1 means the original data block. 5534 * Mirror #2 is RAID5 parity block. 5535 * Mirror #3 is RAID6 Q block. 5536 */ 5537 stripe_nr = div_u64_rem(stripe_nr, 5538 nr_data_stripes(map), &stripe_index); 5539 if (mirror_num > 1) 5540 stripe_index = nr_data_stripes(map) + 5541 mirror_num - 2; 5542 5543 /* We distribute the parity blocks across stripes */ 5544 div_u64_rem(stripe_nr + stripe_index, map->num_stripes, 5545 &stripe_index); 5546 if (!(rw & (REQ_WRITE | REQ_DISCARD | 5547 REQ_GET_READ_MIRRORS)) && mirror_num <= 1) 5548 mirror_num = 1; 5549 } 5550 } else { 5551 /* 5552 * after this, stripe_nr is the number of stripes on this 5553 * device we have to walk to find the data, and stripe_index is 5554 * the number of our device in the stripe array 5555 */ 5556 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, 5557 &stripe_index); 5558 mirror_num = stripe_index + 1; 5559 } 5560 if (stripe_index >= map->num_stripes) { 5561 btrfs_crit(fs_info, "stripe index math went horribly wrong, " 5562 "got stripe_index=%u, num_stripes=%u", 5563 stripe_index, map->num_stripes); 5564 ret = -EINVAL; 5565 goto out; 5566 } 5567 5568 num_alloc_stripes = num_stripes; 5569 if (dev_replace_is_ongoing) { 5570 if (rw & (REQ_WRITE | REQ_DISCARD)) 5571 num_alloc_stripes <<= 1; 5572 if (rw & REQ_GET_READ_MIRRORS) 5573 num_alloc_stripes++; 5574 tgtdev_indexes = num_stripes; 5575 } 5576 5577 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); 5578 if (!bbio) { 5579 ret = -ENOMEM; 5580 goto out; 5581 } 5582 if (dev_replace_is_ongoing) 5583 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); 5584 5585 /* build raid_map */ 5586 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && 5587 need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || 5588 mirror_num > 1)) { 5589 u64 tmp; 5590 unsigned rot; 5591 5592 bbio->raid_map = (u64 *)((void *)bbio->stripes + 5593 sizeof(struct btrfs_bio_stripe) * 5594 num_alloc_stripes + 5595 sizeof(int) * tgtdev_indexes); 5596 5597 /* Work out the disk rotation on this stripe-set */ 5598 div_u64_rem(stripe_nr, num_stripes, &rot); 5599 5600 /* Fill in the logical address of each stripe */ 5601 tmp = stripe_nr * nr_data_stripes(map); 5602 for (i = 0; i < nr_data_stripes(map); i++) 5603 bbio->raid_map[(i+rot) % num_stripes] = 5604 em->start + (tmp + i) * map->stripe_len; 5605 5606 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; 5607 if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5608 bbio->raid_map[(i+rot+1) % num_stripes] = 5609 RAID6_Q_STRIPE; 5610 } 5611 5612 if (rw & REQ_DISCARD) { 5613 u32 factor = 0; 5614 u32 sub_stripes = 0; 5615 u64 stripes_per_dev = 0; 5616 u32 remaining_stripes = 0; 5617 u32 last_stripe = 0; 5618 5619 if (map->type & 5620 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { 5621 if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5622 sub_stripes = 1; 5623 else 5624 sub_stripes = map->sub_stripes; 5625 5626 factor = map->num_stripes / sub_stripes; 5627 stripes_per_dev = div_u64_rem(stripe_nr_end - 5628 stripe_nr_orig, 5629 factor, 5630 &remaining_stripes); 5631 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); 5632 last_stripe *= sub_stripes; 5633 } 5634 5635 for (i = 0; i < num_stripes; i++) { 5636 bbio->stripes[i].physical = 5637 map->stripes[stripe_index].physical + 5638 stripe_offset + stripe_nr * map->stripe_len; 5639 bbio->stripes[i].dev = map->stripes[stripe_index].dev; 5640 5641 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 5642 BTRFS_BLOCK_GROUP_RAID10)) { 5643 bbio->stripes[i].length = stripes_per_dev * 5644 map->stripe_len; 5645 5646 if (i / sub_stripes < remaining_stripes) 5647 bbio->stripes[i].length += 5648 map->stripe_len; 5649 5650 /* 5651 * Special for the first stripe and 5652 * the last stripe: 5653 * 5654 * |-------|...|-------| 5655 * |----------| 5656 * off end_off 5657 */ 5658 if (i < sub_stripes) 5659 bbio->stripes[i].length -= 5660 stripe_offset; 5661 5662 if (stripe_index >= last_stripe && 5663 stripe_index <= (last_stripe + 5664 sub_stripes - 1)) 5665 bbio->stripes[i].length -= 5666 stripe_end_offset; 5667 5668 if (i == sub_stripes - 1) 5669 stripe_offset = 0; 5670 } else 5671 bbio->stripes[i].length = *length; 5672 5673 stripe_index++; 5674 if (stripe_index == map->num_stripes) { 5675 /* This could only happen for RAID0/10 */ 5676 stripe_index = 0; 5677 stripe_nr++; 5678 } 5679 } 5680 } else { 5681 for (i = 0; i < num_stripes; i++) { 5682 bbio->stripes[i].physical = 5683 map->stripes[stripe_index].physical + 5684 stripe_offset + 5685 stripe_nr * map->stripe_len; 5686 bbio->stripes[i].dev = 5687 map->stripes[stripe_index].dev; 5688 stripe_index++; 5689 } 5690 } 5691 5692 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) 5693 max_errors = btrfs_chunk_max_errors(map); 5694 5695 if (bbio->raid_map) 5696 sort_parity_stripes(bbio, num_stripes); 5697 5698 tgtdev_indexes = 0; 5699 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && 5700 dev_replace->tgtdev != NULL) { 5701 int index_where_to_add; 5702 u64 srcdev_devid = dev_replace->srcdev->devid; 5703 5704 /* 5705 * duplicate the write operations while the dev replace 5706 * procedure is running. Since the copying of the old disk 5707 * to the new disk takes place at run time while the 5708 * filesystem is mounted writable, the regular write 5709 * operations to the old disk have to be duplicated to go 5710 * to the new disk as well. 5711 * Note that device->missing is handled by the caller, and 5712 * that the write to the old disk is already set up in the 5713 * stripes array. 5714 */ 5715 index_where_to_add = num_stripes; 5716 for (i = 0; i < num_stripes; i++) { 5717 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5718 /* write to new disk, too */ 5719 struct btrfs_bio_stripe *new = 5720 bbio->stripes + index_where_to_add; 5721 struct btrfs_bio_stripe *old = 5722 bbio->stripes + i; 5723 5724 new->physical = old->physical; 5725 new->length = old->length; 5726 new->dev = dev_replace->tgtdev; 5727 bbio->tgtdev_map[i] = index_where_to_add; 5728 index_where_to_add++; 5729 max_errors++; 5730 tgtdev_indexes++; 5731 } 5732 } 5733 num_stripes = index_where_to_add; 5734 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) && 5735 dev_replace->tgtdev != NULL) { 5736 u64 srcdev_devid = dev_replace->srcdev->devid; 5737 int index_srcdev = 0; 5738 int found = 0; 5739 u64 physical_of_found = 0; 5740 5741 /* 5742 * During the dev-replace procedure, the target drive can 5743 * also be used to read data in case it is needed to repair 5744 * a corrupt block elsewhere. This is possible if the 5745 * requested area is left of the left cursor. In this area, 5746 * the target drive is a full copy of the source drive. 5747 */ 5748 for (i = 0; i < num_stripes; i++) { 5749 if (bbio->stripes[i].dev->devid == srcdev_devid) { 5750 /* 5751 * In case of DUP, in order to keep it 5752 * simple, only add the mirror with the 5753 * lowest physical address 5754 */ 5755 if (found && 5756 physical_of_found <= 5757 bbio->stripes[i].physical) 5758 continue; 5759 index_srcdev = i; 5760 found = 1; 5761 physical_of_found = bbio->stripes[i].physical; 5762 } 5763 } 5764 if (found) { 5765 if (physical_of_found + map->stripe_len <= 5766 dev_replace->cursor_left) { 5767 struct btrfs_bio_stripe *tgtdev_stripe = 5768 bbio->stripes + num_stripes; 5769 5770 tgtdev_stripe->physical = physical_of_found; 5771 tgtdev_stripe->length = 5772 bbio->stripes[index_srcdev].length; 5773 tgtdev_stripe->dev = dev_replace->tgtdev; 5774 bbio->tgtdev_map[index_srcdev] = num_stripes; 5775 5776 tgtdev_indexes++; 5777 num_stripes++; 5778 } 5779 } 5780 } 5781 5782 *bbio_ret = bbio; 5783 bbio->map_type = map->type; 5784 bbio->num_stripes = num_stripes; 5785 bbio->max_errors = max_errors; 5786 bbio->mirror_num = mirror_num; 5787 bbio->num_tgtdevs = tgtdev_indexes; 5788 5789 /* 5790 * this is the case that REQ_READ && dev_replace_is_ongoing && 5791 * mirror_num == num_stripes + 1 && dev_replace target drive is 5792 * available as a mirror 5793 */ 5794 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { 5795 WARN_ON(num_stripes > 1); 5796 bbio->stripes[0].dev = dev_replace->tgtdev; 5797 bbio->stripes[0].physical = physical_to_patch_in_first_stripe; 5798 bbio->mirror_num = map->num_stripes + 1; 5799 } 5800 out: 5801 if (dev_replace_is_ongoing) { 5802 btrfs_dev_replace_clear_lock_blocking(dev_replace); 5803 btrfs_dev_replace_unlock(dev_replace, 0); 5804 } 5805 free_extent_map(em); 5806 return ret; 5807 } 5808 5809 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 5810 u64 logical, u64 *length, 5811 struct btrfs_bio **bbio_ret, int mirror_num) 5812 { 5813 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, 5814 mirror_num, 0); 5815 } 5816 5817 /* For Scrub/replace */ 5818 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, 5819 u64 logical, u64 *length, 5820 struct btrfs_bio **bbio_ret, int mirror_num, 5821 int need_raid_map) 5822 { 5823 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, 5824 mirror_num, need_raid_map); 5825 } 5826 5827 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 5828 u64 chunk_start, u64 physical, u64 devid, 5829 u64 **logical, int *naddrs, int *stripe_len) 5830 { 5831 struct extent_map_tree *em_tree = &map_tree->map_tree; 5832 struct extent_map *em; 5833 struct map_lookup *map; 5834 u64 *buf; 5835 u64 bytenr; 5836 u64 length; 5837 u64 stripe_nr; 5838 u64 rmap_len; 5839 int i, j, nr = 0; 5840 5841 read_lock(&em_tree->lock); 5842 em = lookup_extent_mapping(em_tree, chunk_start, 1); 5843 read_unlock(&em_tree->lock); 5844 5845 if (!em) { 5846 printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n", 5847 chunk_start); 5848 return -EIO; 5849 } 5850 5851 if (em->start != chunk_start) { 5852 printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n", 5853 em->start, chunk_start); 5854 free_extent_map(em); 5855 return -EIO; 5856 } 5857 map = em->map_lookup; 5858 5859 length = em->len; 5860 rmap_len = map->stripe_len; 5861 5862 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 5863 length = div_u64(length, map->num_stripes / map->sub_stripes); 5864 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5865 length = div_u64(length, map->num_stripes); 5866 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 5867 length = div_u64(length, nr_data_stripes(map)); 5868 rmap_len = map->stripe_len * nr_data_stripes(map); 5869 } 5870 5871 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 5872 BUG_ON(!buf); /* -ENOMEM */ 5873 5874 for (i = 0; i < map->num_stripes; i++) { 5875 if (devid && map->stripes[i].dev->devid != devid) 5876 continue; 5877 if (map->stripes[i].physical > physical || 5878 map->stripes[i].physical + length <= physical) 5879 continue; 5880 5881 stripe_nr = physical - map->stripes[i].physical; 5882 stripe_nr = div_u64(stripe_nr, map->stripe_len); 5883 5884 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 5885 stripe_nr = stripe_nr * map->num_stripes + i; 5886 stripe_nr = div_u64(stripe_nr, map->sub_stripes); 5887 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 5888 stripe_nr = stripe_nr * map->num_stripes + i; 5889 } /* else if RAID[56], multiply by nr_data_stripes(). 5890 * Alternatively, just use rmap_len below instead of 5891 * map->stripe_len */ 5892 5893 bytenr = chunk_start + stripe_nr * rmap_len; 5894 WARN_ON(nr >= map->num_stripes); 5895 for (j = 0; j < nr; j++) { 5896 if (buf[j] == bytenr) 5897 break; 5898 } 5899 if (j == nr) { 5900 WARN_ON(nr >= map->num_stripes); 5901 buf[nr++] = bytenr; 5902 } 5903 } 5904 5905 *logical = buf; 5906 *naddrs = nr; 5907 *stripe_len = rmap_len; 5908 5909 free_extent_map(em); 5910 return 0; 5911 } 5912 5913 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) 5914 { 5915 bio->bi_private = bbio->private; 5916 bio->bi_end_io = bbio->end_io; 5917 bio_endio(bio); 5918 5919 btrfs_put_bbio(bbio); 5920 } 5921 5922 static void btrfs_end_bio(struct bio *bio) 5923 { 5924 struct btrfs_bio *bbio = bio->bi_private; 5925 int is_orig_bio = 0; 5926 5927 if (bio->bi_error) { 5928 atomic_inc(&bbio->error); 5929 if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) { 5930 unsigned int stripe_index = 5931 btrfs_io_bio(bio)->stripe_index; 5932 struct btrfs_device *dev; 5933 5934 BUG_ON(stripe_index >= bbio->num_stripes); 5935 dev = bbio->stripes[stripe_index].dev; 5936 if (dev->bdev) { 5937 if (bio->bi_rw & WRITE) 5938 btrfs_dev_stat_inc(dev, 5939 BTRFS_DEV_STAT_WRITE_ERRS); 5940 else 5941 btrfs_dev_stat_inc(dev, 5942 BTRFS_DEV_STAT_READ_ERRS); 5943 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH) 5944 btrfs_dev_stat_inc(dev, 5945 BTRFS_DEV_STAT_FLUSH_ERRS); 5946 btrfs_dev_stat_print_on_error(dev); 5947 } 5948 } 5949 } 5950 5951 if (bio == bbio->orig_bio) 5952 is_orig_bio = 1; 5953 5954 btrfs_bio_counter_dec(bbio->fs_info); 5955 5956 if (atomic_dec_and_test(&bbio->stripes_pending)) { 5957 if (!is_orig_bio) { 5958 bio_put(bio); 5959 bio = bbio->orig_bio; 5960 } 5961 5962 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5963 /* only send an error to the higher layers if it is 5964 * beyond the tolerance of the btrfs bio 5965 */ 5966 if (atomic_read(&bbio->error) > bbio->max_errors) { 5967 bio->bi_error = -EIO; 5968 } else { 5969 /* 5970 * this bio is actually up to date, we didn't 5971 * go over the max number of errors 5972 */ 5973 bio->bi_error = 0; 5974 } 5975 5976 btrfs_end_bbio(bbio, bio); 5977 } else if (!is_orig_bio) { 5978 bio_put(bio); 5979 } 5980 } 5981 5982 /* 5983 * see run_scheduled_bios for a description of why bios are collected for 5984 * async submit. 5985 * 5986 * This will add one bio to the pending list for a device and make sure 5987 * the work struct is scheduled. 5988 */ 5989 static noinline void btrfs_schedule_bio(struct btrfs_root *root, 5990 struct btrfs_device *device, 5991 int rw, struct bio *bio) 5992 { 5993 int should_queue = 1; 5994 struct btrfs_pending_bios *pending_bios; 5995 5996 if (device->missing || !device->bdev) { 5997 bio_io_error(bio); 5998 return; 5999 } 6000 6001 /* don't bother with additional async steps for reads, right now */ 6002 if (!(rw & REQ_WRITE)) { 6003 bio_get(bio); 6004 btrfsic_submit_bio(rw, bio); 6005 bio_put(bio); 6006 return; 6007 } 6008 6009 /* 6010 * nr_async_bios allows us to reliably return congestion to the 6011 * higher layers. Otherwise, the async bio makes it appear we have 6012 * made progress against dirty pages when we've really just put it 6013 * on a queue for later 6014 */ 6015 atomic_inc(&root->fs_info->nr_async_bios); 6016 WARN_ON(bio->bi_next); 6017 bio->bi_next = NULL; 6018 bio->bi_rw |= rw; 6019 6020 spin_lock(&device->io_lock); 6021 if (bio->bi_rw & REQ_SYNC) 6022 pending_bios = &device->pending_sync_bios; 6023 else 6024 pending_bios = &device->pending_bios; 6025 6026 if (pending_bios->tail) 6027 pending_bios->tail->bi_next = bio; 6028 6029 pending_bios->tail = bio; 6030 if (!pending_bios->head) 6031 pending_bios->head = bio; 6032 if (device->running_pending) 6033 should_queue = 0; 6034 6035 spin_unlock(&device->io_lock); 6036 6037 if (should_queue) 6038 btrfs_queue_work(root->fs_info->submit_workers, 6039 &device->work); 6040 } 6041 6042 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, 6043 struct bio *bio, u64 physical, int dev_nr, 6044 int rw, int async) 6045 { 6046 struct btrfs_device *dev = bbio->stripes[dev_nr].dev; 6047 6048 bio->bi_private = bbio; 6049 btrfs_io_bio(bio)->stripe_index = dev_nr; 6050 bio->bi_end_io = btrfs_end_bio; 6051 bio->bi_iter.bi_sector = physical >> 9; 6052 #ifdef DEBUG 6053 { 6054 struct rcu_string *name; 6055 6056 rcu_read_lock(); 6057 name = rcu_dereference(dev->name); 6058 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu " 6059 "(%s id %llu), size=%u\n", rw, 6060 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, 6061 name->str, dev->devid, bio->bi_iter.bi_size); 6062 rcu_read_unlock(); 6063 } 6064 #endif 6065 bio->bi_bdev = dev->bdev; 6066 6067 btrfs_bio_counter_inc_noblocked(root->fs_info); 6068 6069 if (async) 6070 btrfs_schedule_bio(root, dev, rw, bio); 6071 else 6072 btrfsic_submit_bio(rw, bio); 6073 } 6074 6075 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) 6076 { 6077 atomic_inc(&bbio->error); 6078 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6079 /* Should be the original bio. */ 6080 WARN_ON(bio != bbio->orig_bio); 6081 6082 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6083 bio->bi_iter.bi_sector = logical >> 9; 6084 bio->bi_error = -EIO; 6085 btrfs_end_bbio(bbio, bio); 6086 } 6087 } 6088 6089 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, 6090 int mirror_num, int async_submit) 6091 { 6092 struct btrfs_device *dev; 6093 struct bio *first_bio = bio; 6094 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 6095 u64 length = 0; 6096 u64 map_length; 6097 int ret; 6098 int dev_nr; 6099 int total_devs; 6100 struct btrfs_bio *bbio = NULL; 6101 6102 length = bio->bi_iter.bi_size; 6103 map_length = length; 6104 6105 btrfs_bio_counter_inc_blocked(root->fs_info); 6106 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, 6107 mirror_num, 1); 6108 if (ret) { 6109 btrfs_bio_counter_dec(root->fs_info); 6110 return ret; 6111 } 6112 6113 total_devs = bbio->num_stripes; 6114 bbio->orig_bio = first_bio; 6115 bbio->private = first_bio->bi_private; 6116 bbio->end_io = first_bio->bi_end_io; 6117 bbio->fs_info = root->fs_info; 6118 atomic_set(&bbio->stripes_pending, bbio->num_stripes); 6119 6120 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && 6121 ((rw & WRITE) || (mirror_num > 1))) { 6122 /* In this case, map_length has been set to the length of 6123 a single stripe; not the whole write */ 6124 if (rw & WRITE) { 6125 ret = raid56_parity_write(root, bio, bbio, map_length); 6126 } else { 6127 ret = raid56_parity_recover(root, bio, bbio, map_length, 6128 mirror_num, 1); 6129 } 6130 6131 btrfs_bio_counter_dec(root->fs_info); 6132 return ret; 6133 } 6134 6135 if (map_length < length) { 6136 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu", 6137 logical, length, map_length); 6138 BUG(); 6139 } 6140 6141 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6142 dev = bbio->stripes[dev_nr].dev; 6143 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) { 6144 bbio_error(bbio, first_bio, logical); 6145 continue; 6146 } 6147 6148 if (dev_nr < total_devs - 1) { 6149 bio = btrfs_bio_clone(first_bio, GFP_NOFS); 6150 BUG_ON(!bio); /* -ENOMEM */ 6151 } else 6152 bio = first_bio; 6153 6154 submit_stripe_bio(root, bbio, bio, 6155 bbio->stripes[dev_nr].physical, dev_nr, rw, 6156 async_submit); 6157 } 6158 btrfs_bio_counter_dec(root->fs_info); 6159 return 0; 6160 } 6161 6162 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, 6163 u8 *uuid, u8 *fsid) 6164 { 6165 struct btrfs_device *device; 6166 struct btrfs_fs_devices *cur_devices; 6167 6168 cur_devices = fs_info->fs_devices; 6169 while (cur_devices) { 6170 if (!fsid || 6171 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 6172 device = __find_device(&cur_devices->devices, 6173 devid, uuid); 6174 if (device) 6175 return device; 6176 } 6177 cur_devices = cur_devices->seed; 6178 } 6179 return NULL; 6180 } 6181 6182 static struct btrfs_device *add_missing_dev(struct btrfs_root *root, 6183 struct btrfs_fs_devices *fs_devices, 6184 u64 devid, u8 *dev_uuid) 6185 { 6186 struct btrfs_device *device; 6187 6188 device = btrfs_alloc_device(NULL, &devid, dev_uuid); 6189 if (IS_ERR(device)) 6190 return NULL; 6191 6192 list_add(&device->dev_list, &fs_devices->devices); 6193 device->fs_devices = fs_devices; 6194 fs_devices->num_devices++; 6195 6196 device->missing = 1; 6197 fs_devices->missing_devices++; 6198 6199 return device; 6200 } 6201 6202 /** 6203 * btrfs_alloc_device - allocate struct btrfs_device 6204 * @fs_info: used only for generating a new devid, can be NULL if 6205 * devid is provided (i.e. @devid != NULL). 6206 * @devid: a pointer to devid for this device. If NULL a new devid 6207 * is generated. 6208 * @uuid: a pointer to UUID for this device. If NULL a new UUID 6209 * is generated. 6210 * 6211 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() 6212 * on error. Returned struct is not linked onto any lists and can be 6213 * destroyed with kfree() right away. 6214 */ 6215 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 6216 const u64 *devid, 6217 const u8 *uuid) 6218 { 6219 struct btrfs_device *dev; 6220 u64 tmp; 6221 6222 if (WARN_ON(!devid && !fs_info)) 6223 return ERR_PTR(-EINVAL); 6224 6225 dev = __alloc_device(); 6226 if (IS_ERR(dev)) 6227 return dev; 6228 6229 if (devid) 6230 tmp = *devid; 6231 else { 6232 int ret; 6233 6234 ret = find_next_devid(fs_info, &tmp); 6235 if (ret) { 6236 kfree(dev); 6237 return ERR_PTR(ret); 6238 } 6239 } 6240 dev->devid = tmp; 6241 6242 if (uuid) 6243 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); 6244 else 6245 generate_random_uuid(dev->uuid); 6246 6247 btrfs_init_work(&dev->work, btrfs_submit_helper, 6248 pending_bios_fn, NULL, NULL); 6249 6250 return dev; 6251 } 6252 6253 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, 6254 struct extent_buffer *leaf, 6255 struct btrfs_chunk *chunk) 6256 { 6257 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 6258 struct map_lookup *map; 6259 struct extent_map *em; 6260 u64 logical; 6261 u64 length; 6262 u64 stripe_len; 6263 u64 devid; 6264 u8 uuid[BTRFS_UUID_SIZE]; 6265 int num_stripes; 6266 int ret; 6267 int i; 6268 6269 logical = key->offset; 6270 length = btrfs_chunk_length(leaf, chunk); 6271 stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6272 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6273 /* Validation check */ 6274 if (!num_stripes) { 6275 btrfs_err(root->fs_info, "invalid chunk num_stripes: %u", 6276 num_stripes); 6277 return -EIO; 6278 } 6279 if (!IS_ALIGNED(logical, root->sectorsize)) { 6280 btrfs_err(root->fs_info, 6281 "invalid chunk logical %llu", logical); 6282 return -EIO; 6283 } 6284 if (!length || !IS_ALIGNED(length, root->sectorsize)) { 6285 btrfs_err(root->fs_info, 6286 "invalid chunk length %llu", length); 6287 return -EIO; 6288 } 6289 if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) { 6290 btrfs_err(root->fs_info, "invalid chunk stripe length: %llu", 6291 stripe_len); 6292 return -EIO; 6293 } 6294 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6295 btrfs_chunk_type(leaf, chunk)) { 6296 btrfs_err(root->fs_info, "unrecognized chunk type: %llu", 6297 ~(BTRFS_BLOCK_GROUP_TYPE_MASK | 6298 BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6299 btrfs_chunk_type(leaf, chunk)); 6300 return -EIO; 6301 } 6302 6303 read_lock(&map_tree->map_tree.lock); 6304 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 6305 read_unlock(&map_tree->map_tree.lock); 6306 6307 /* already mapped? */ 6308 if (em && em->start <= logical && em->start + em->len > logical) { 6309 free_extent_map(em); 6310 return 0; 6311 } else if (em) { 6312 free_extent_map(em); 6313 } 6314 6315 em = alloc_extent_map(); 6316 if (!em) 6317 return -ENOMEM; 6318 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 6319 if (!map) { 6320 free_extent_map(em); 6321 return -ENOMEM; 6322 } 6323 6324 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); 6325 em->map_lookup = map; 6326 em->start = logical; 6327 em->len = length; 6328 em->orig_start = 0; 6329 em->block_start = 0; 6330 em->block_len = em->len; 6331 6332 map->num_stripes = num_stripes; 6333 map->io_width = btrfs_chunk_io_width(leaf, chunk); 6334 map->io_align = btrfs_chunk_io_align(leaf, chunk); 6335 map->sector_size = btrfs_chunk_sector_size(leaf, chunk); 6336 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6337 map->type = btrfs_chunk_type(leaf, chunk); 6338 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 6339 for (i = 0; i < num_stripes; i++) { 6340 map->stripes[i].physical = 6341 btrfs_stripe_offset_nr(leaf, chunk, i); 6342 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 6343 read_extent_buffer(leaf, uuid, (unsigned long) 6344 btrfs_stripe_dev_uuid_nr(chunk, i), 6345 BTRFS_UUID_SIZE); 6346 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid, 6347 uuid, NULL); 6348 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { 6349 free_extent_map(em); 6350 return -EIO; 6351 } 6352 if (!map->stripes[i].dev) { 6353 map->stripes[i].dev = 6354 add_missing_dev(root, root->fs_info->fs_devices, 6355 devid, uuid); 6356 if (!map->stripes[i].dev) { 6357 free_extent_map(em); 6358 return -EIO; 6359 } 6360 btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing", 6361 devid, uuid); 6362 } 6363 map->stripes[i].dev->in_fs_metadata = 1; 6364 } 6365 6366 write_lock(&map_tree->map_tree.lock); 6367 ret = add_extent_mapping(&map_tree->map_tree, em, 0); 6368 write_unlock(&map_tree->map_tree.lock); 6369 BUG_ON(ret); /* Tree corruption */ 6370 free_extent_map(em); 6371 6372 return 0; 6373 } 6374 6375 static void fill_device_from_item(struct extent_buffer *leaf, 6376 struct btrfs_dev_item *dev_item, 6377 struct btrfs_device *device) 6378 { 6379 unsigned long ptr; 6380 6381 device->devid = btrfs_device_id(leaf, dev_item); 6382 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 6383 device->total_bytes = device->disk_total_bytes; 6384 device->commit_total_bytes = device->disk_total_bytes; 6385 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 6386 device->commit_bytes_used = device->bytes_used; 6387 device->type = btrfs_device_type(leaf, dev_item); 6388 device->io_align = btrfs_device_io_align(leaf, dev_item); 6389 device->io_width = btrfs_device_io_width(leaf, dev_item); 6390 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 6391 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); 6392 device->is_tgtdev_for_dev_replace = 0; 6393 6394 ptr = btrfs_device_uuid(dev_item); 6395 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 6396 } 6397 6398 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root, 6399 u8 *fsid) 6400 { 6401 struct btrfs_fs_devices *fs_devices; 6402 int ret; 6403 6404 BUG_ON(!mutex_is_locked(&uuid_mutex)); 6405 6406 fs_devices = root->fs_info->fs_devices->seed; 6407 while (fs_devices) { 6408 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) 6409 return fs_devices; 6410 6411 fs_devices = fs_devices->seed; 6412 } 6413 6414 fs_devices = find_fsid(fsid); 6415 if (!fs_devices) { 6416 if (!btrfs_test_opt(root, DEGRADED)) 6417 return ERR_PTR(-ENOENT); 6418 6419 fs_devices = alloc_fs_devices(fsid); 6420 if (IS_ERR(fs_devices)) 6421 return fs_devices; 6422 6423 fs_devices->seeding = 1; 6424 fs_devices->opened = 1; 6425 return fs_devices; 6426 } 6427 6428 fs_devices = clone_fs_devices(fs_devices); 6429 if (IS_ERR(fs_devices)) 6430 return fs_devices; 6431 6432 ret = __btrfs_open_devices(fs_devices, FMODE_READ, 6433 root->fs_info->bdev_holder); 6434 if (ret) { 6435 free_fs_devices(fs_devices); 6436 fs_devices = ERR_PTR(ret); 6437 goto out; 6438 } 6439 6440 if (!fs_devices->seeding) { 6441 __btrfs_close_devices(fs_devices); 6442 free_fs_devices(fs_devices); 6443 fs_devices = ERR_PTR(-EINVAL); 6444 goto out; 6445 } 6446 6447 fs_devices->seed = root->fs_info->fs_devices->seed; 6448 root->fs_info->fs_devices->seed = fs_devices; 6449 out: 6450 return fs_devices; 6451 } 6452 6453 static int read_one_dev(struct btrfs_root *root, 6454 struct extent_buffer *leaf, 6455 struct btrfs_dev_item *dev_item) 6456 { 6457 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 6458 struct btrfs_device *device; 6459 u64 devid; 6460 int ret; 6461 u8 fs_uuid[BTRFS_UUID_SIZE]; 6462 u8 dev_uuid[BTRFS_UUID_SIZE]; 6463 6464 devid = btrfs_device_id(leaf, dev_item); 6465 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), 6466 BTRFS_UUID_SIZE); 6467 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), 6468 BTRFS_UUID_SIZE); 6469 6470 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) { 6471 fs_devices = open_seed_devices(root, fs_uuid); 6472 if (IS_ERR(fs_devices)) 6473 return PTR_ERR(fs_devices); 6474 } 6475 6476 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid); 6477 if (!device) { 6478 if (!btrfs_test_opt(root, DEGRADED)) 6479 return -EIO; 6480 6481 device = add_missing_dev(root, fs_devices, devid, dev_uuid); 6482 if (!device) 6483 return -ENOMEM; 6484 btrfs_warn(root->fs_info, "devid %llu uuid %pU missing", 6485 devid, dev_uuid); 6486 } else { 6487 if (!device->bdev && !btrfs_test_opt(root, DEGRADED)) 6488 return -EIO; 6489 6490 if(!device->bdev && !device->missing) { 6491 /* 6492 * this happens when a device that was properly setup 6493 * in the device info lists suddenly goes bad. 6494 * device->bdev is NULL, and so we have to set 6495 * device->missing to one here 6496 */ 6497 device->fs_devices->missing_devices++; 6498 device->missing = 1; 6499 } 6500 6501 /* Move the device to its own fs_devices */ 6502 if (device->fs_devices != fs_devices) { 6503 ASSERT(device->missing); 6504 6505 list_move(&device->dev_list, &fs_devices->devices); 6506 device->fs_devices->num_devices--; 6507 fs_devices->num_devices++; 6508 6509 device->fs_devices->missing_devices--; 6510 fs_devices->missing_devices++; 6511 6512 device->fs_devices = fs_devices; 6513 } 6514 } 6515 6516 if (device->fs_devices != root->fs_info->fs_devices) { 6517 BUG_ON(device->writeable); 6518 if (device->generation != 6519 btrfs_device_generation(leaf, dev_item)) 6520 return -EINVAL; 6521 } 6522 6523 fill_device_from_item(leaf, dev_item, device); 6524 device->in_fs_metadata = 1; 6525 if (device->writeable && !device->is_tgtdev_for_dev_replace) { 6526 device->fs_devices->total_rw_bytes += device->total_bytes; 6527 spin_lock(&root->fs_info->free_chunk_lock); 6528 root->fs_info->free_chunk_space += device->total_bytes - 6529 device->bytes_used; 6530 spin_unlock(&root->fs_info->free_chunk_lock); 6531 } 6532 ret = 0; 6533 return ret; 6534 } 6535 6536 int btrfs_read_sys_array(struct btrfs_root *root) 6537 { 6538 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 6539 struct extent_buffer *sb; 6540 struct btrfs_disk_key *disk_key; 6541 struct btrfs_chunk *chunk; 6542 u8 *array_ptr; 6543 unsigned long sb_array_offset; 6544 int ret = 0; 6545 u32 num_stripes; 6546 u32 array_size; 6547 u32 len = 0; 6548 u32 cur_offset; 6549 struct btrfs_key key; 6550 6551 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); 6552 /* 6553 * This will create extent buffer of nodesize, superblock size is 6554 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will 6555 * overallocate but we can keep it as-is, only the first page is used. 6556 */ 6557 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET); 6558 if (!sb) 6559 return -ENOMEM; 6560 set_extent_buffer_uptodate(sb); 6561 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6562 /* 6563 * The sb extent buffer is artificial and just used to read the system array. 6564 * set_extent_buffer_uptodate() call does not properly mark all it's 6565 * pages up-to-date when the page is larger: extent does not cover the 6566 * whole page and consequently check_page_uptodate does not find all 6567 * the page's extents up-to-date (the hole beyond sb), 6568 * write_extent_buffer then triggers a WARN_ON. 6569 * 6570 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, 6571 * but sb spans only this function. Add an explicit SetPageUptodate call 6572 * to silence the warning eg. on PowerPC 64. 6573 */ 6574 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) 6575 SetPageUptodate(sb->pages[0]); 6576 6577 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6578 array_size = btrfs_super_sys_array_size(super_copy); 6579 6580 array_ptr = super_copy->sys_chunk_array; 6581 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); 6582 cur_offset = 0; 6583 6584 while (cur_offset < array_size) { 6585 disk_key = (struct btrfs_disk_key *)array_ptr; 6586 len = sizeof(*disk_key); 6587 if (cur_offset + len > array_size) 6588 goto out_short_read; 6589 6590 btrfs_disk_key_to_cpu(&key, disk_key); 6591 6592 array_ptr += len; 6593 sb_array_offset += len; 6594 cur_offset += len; 6595 6596 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 6597 chunk = (struct btrfs_chunk *)sb_array_offset; 6598 /* 6599 * At least one btrfs_chunk with one stripe must be 6600 * present, exact stripe count check comes afterwards 6601 */ 6602 len = btrfs_chunk_item_size(1); 6603 if (cur_offset + len > array_size) 6604 goto out_short_read; 6605 6606 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 6607 if (!num_stripes) { 6608 printk(KERN_ERR 6609 "BTRFS: invalid number of stripes %u in sys_array at offset %u\n", 6610 num_stripes, cur_offset); 6611 ret = -EIO; 6612 break; 6613 } 6614 6615 len = btrfs_chunk_item_size(num_stripes); 6616 if (cur_offset + len > array_size) 6617 goto out_short_read; 6618 6619 ret = read_one_chunk(root, &key, sb, chunk); 6620 if (ret) 6621 break; 6622 } else { 6623 printk(KERN_ERR 6624 "BTRFS: unexpected item type %u in sys_array at offset %u\n", 6625 (u32)key.type, cur_offset); 6626 ret = -EIO; 6627 break; 6628 } 6629 array_ptr += len; 6630 sb_array_offset += len; 6631 cur_offset += len; 6632 } 6633 free_extent_buffer_stale(sb); 6634 return ret; 6635 6636 out_short_read: 6637 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", 6638 len, cur_offset); 6639 free_extent_buffer_stale(sb); 6640 return -EIO; 6641 } 6642 6643 int btrfs_read_chunk_tree(struct btrfs_root *root) 6644 { 6645 struct btrfs_path *path; 6646 struct extent_buffer *leaf; 6647 struct btrfs_key key; 6648 struct btrfs_key found_key; 6649 int ret; 6650 int slot; 6651 6652 root = root->fs_info->chunk_root; 6653 6654 path = btrfs_alloc_path(); 6655 if (!path) 6656 return -ENOMEM; 6657 6658 mutex_lock(&uuid_mutex); 6659 lock_chunks(root); 6660 6661 /* 6662 * Read all device items, and then all the chunk items. All 6663 * device items are found before any chunk item (their object id 6664 * is smaller than the lowest possible object id for a chunk 6665 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). 6666 */ 6667 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 6668 key.offset = 0; 6669 key.type = 0; 6670 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6671 if (ret < 0) 6672 goto error; 6673 while (1) { 6674 leaf = path->nodes[0]; 6675 slot = path->slots[0]; 6676 if (slot >= btrfs_header_nritems(leaf)) { 6677 ret = btrfs_next_leaf(root, path); 6678 if (ret == 0) 6679 continue; 6680 if (ret < 0) 6681 goto error; 6682 break; 6683 } 6684 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6685 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 6686 struct btrfs_dev_item *dev_item; 6687 dev_item = btrfs_item_ptr(leaf, slot, 6688 struct btrfs_dev_item); 6689 ret = read_one_dev(root, leaf, dev_item); 6690 if (ret) 6691 goto error; 6692 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 6693 struct btrfs_chunk *chunk; 6694 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 6695 ret = read_one_chunk(root, &found_key, leaf, chunk); 6696 if (ret) 6697 goto error; 6698 } 6699 path->slots[0]++; 6700 } 6701 ret = 0; 6702 error: 6703 unlock_chunks(root); 6704 mutex_unlock(&uuid_mutex); 6705 6706 btrfs_free_path(path); 6707 return ret; 6708 } 6709 6710 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) 6711 { 6712 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6713 struct btrfs_device *device; 6714 6715 while (fs_devices) { 6716 mutex_lock(&fs_devices->device_list_mutex); 6717 list_for_each_entry(device, &fs_devices->devices, dev_list) 6718 device->dev_root = fs_info->dev_root; 6719 mutex_unlock(&fs_devices->device_list_mutex); 6720 6721 fs_devices = fs_devices->seed; 6722 } 6723 } 6724 6725 static void __btrfs_reset_dev_stats(struct btrfs_device *dev) 6726 { 6727 int i; 6728 6729 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6730 btrfs_dev_stat_reset(dev, i); 6731 } 6732 6733 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) 6734 { 6735 struct btrfs_key key; 6736 struct btrfs_key found_key; 6737 struct btrfs_root *dev_root = fs_info->dev_root; 6738 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6739 struct extent_buffer *eb; 6740 int slot; 6741 int ret = 0; 6742 struct btrfs_device *device; 6743 struct btrfs_path *path = NULL; 6744 int i; 6745 6746 path = btrfs_alloc_path(); 6747 if (!path) { 6748 ret = -ENOMEM; 6749 goto out; 6750 } 6751 6752 mutex_lock(&fs_devices->device_list_mutex); 6753 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6754 int item_size; 6755 struct btrfs_dev_stats_item *ptr; 6756 6757 key.objectid = BTRFS_DEV_STATS_OBJECTID; 6758 key.type = BTRFS_PERSISTENT_ITEM_KEY; 6759 key.offset = device->devid; 6760 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 6761 if (ret) { 6762 __btrfs_reset_dev_stats(device); 6763 device->dev_stats_valid = 1; 6764 btrfs_release_path(path); 6765 continue; 6766 } 6767 slot = path->slots[0]; 6768 eb = path->nodes[0]; 6769 btrfs_item_key_to_cpu(eb, &found_key, slot); 6770 item_size = btrfs_item_size_nr(eb, slot); 6771 6772 ptr = btrfs_item_ptr(eb, slot, 6773 struct btrfs_dev_stats_item); 6774 6775 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 6776 if (item_size >= (1 + i) * sizeof(__le64)) 6777 btrfs_dev_stat_set(device, i, 6778 btrfs_dev_stats_value(eb, ptr, i)); 6779 else 6780 btrfs_dev_stat_reset(device, i); 6781 } 6782 6783 device->dev_stats_valid = 1; 6784 btrfs_dev_stat_print_on_load(device); 6785 btrfs_release_path(path); 6786 } 6787 mutex_unlock(&fs_devices->device_list_mutex); 6788 6789 out: 6790 btrfs_free_path(path); 6791 return ret < 0 ? ret : 0; 6792 } 6793 6794 static int update_dev_stat_item(struct btrfs_trans_handle *trans, 6795 struct btrfs_root *dev_root, 6796 struct btrfs_device *device) 6797 { 6798 struct btrfs_path *path; 6799 struct btrfs_key key; 6800 struct extent_buffer *eb; 6801 struct btrfs_dev_stats_item *ptr; 6802 int ret; 6803 int i; 6804 6805 key.objectid = BTRFS_DEV_STATS_OBJECTID; 6806 key.type = BTRFS_PERSISTENT_ITEM_KEY; 6807 key.offset = device->devid; 6808 6809 path = btrfs_alloc_path(); 6810 BUG_ON(!path); 6811 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 6812 if (ret < 0) { 6813 btrfs_warn_in_rcu(dev_root->fs_info, 6814 "error %d while searching for dev_stats item for device %s", 6815 ret, rcu_str_deref(device->name)); 6816 goto out; 6817 } 6818 6819 if (ret == 0 && 6820 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 6821 /* need to delete old one and insert a new one */ 6822 ret = btrfs_del_item(trans, dev_root, path); 6823 if (ret != 0) { 6824 btrfs_warn_in_rcu(dev_root->fs_info, 6825 "delete too small dev_stats item for device %s failed %d", 6826 rcu_str_deref(device->name), ret); 6827 goto out; 6828 } 6829 ret = 1; 6830 } 6831 6832 if (ret == 1) { 6833 /* need to insert a new item */ 6834 btrfs_release_path(path); 6835 ret = btrfs_insert_empty_item(trans, dev_root, path, 6836 &key, sizeof(*ptr)); 6837 if (ret < 0) { 6838 btrfs_warn_in_rcu(dev_root->fs_info, 6839 "insert dev_stats item for device %s failed %d", 6840 rcu_str_deref(device->name), ret); 6841 goto out; 6842 } 6843 } 6844 6845 eb = path->nodes[0]; 6846 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); 6847 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6848 btrfs_set_dev_stats_value(eb, ptr, i, 6849 btrfs_dev_stat_read(device, i)); 6850 btrfs_mark_buffer_dirty(eb); 6851 6852 out: 6853 btrfs_free_path(path); 6854 return ret; 6855 } 6856 6857 /* 6858 * called from commit_transaction. Writes all changed device stats to disk. 6859 */ 6860 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, 6861 struct btrfs_fs_info *fs_info) 6862 { 6863 struct btrfs_root *dev_root = fs_info->dev_root; 6864 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6865 struct btrfs_device *device; 6866 int stats_cnt; 6867 int ret = 0; 6868 6869 mutex_lock(&fs_devices->device_list_mutex); 6870 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6871 if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device)) 6872 continue; 6873 6874 stats_cnt = atomic_read(&device->dev_stats_ccnt); 6875 ret = update_dev_stat_item(trans, dev_root, device); 6876 if (!ret) 6877 atomic_sub(stats_cnt, &device->dev_stats_ccnt); 6878 } 6879 mutex_unlock(&fs_devices->device_list_mutex); 6880 6881 return ret; 6882 } 6883 6884 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) 6885 { 6886 btrfs_dev_stat_inc(dev, index); 6887 btrfs_dev_stat_print_on_error(dev); 6888 } 6889 6890 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 6891 { 6892 if (!dev->dev_stats_valid) 6893 return; 6894 btrfs_err_rl_in_rcu(dev->dev_root->fs_info, 6895 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 6896 rcu_str_deref(dev->name), 6897 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 6898 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 6899 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 6900 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 6901 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 6902 } 6903 6904 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 6905 { 6906 int i; 6907 6908 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6909 if (btrfs_dev_stat_read(dev, i) != 0) 6910 break; 6911 if (i == BTRFS_DEV_STAT_VALUES_MAX) 6912 return; /* all values == 0, suppress message */ 6913 6914 btrfs_info_in_rcu(dev->dev_root->fs_info, 6915 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", 6916 rcu_str_deref(dev->name), 6917 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 6918 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 6919 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), 6920 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), 6921 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); 6922 } 6923 6924 int btrfs_get_dev_stats(struct btrfs_root *root, 6925 struct btrfs_ioctl_get_dev_stats *stats) 6926 { 6927 struct btrfs_device *dev; 6928 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 6929 int i; 6930 6931 mutex_lock(&fs_devices->device_list_mutex); 6932 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL); 6933 mutex_unlock(&fs_devices->device_list_mutex); 6934 6935 if (!dev) { 6936 btrfs_warn(root->fs_info, "get dev_stats failed, device not found"); 6937 return -ENODEV; 6938 } else if (!dev->dev_stats_valid) { 6939 btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid"); 6940 return -ENODEV; 6941 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 6942 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 6943 if (stats->nr_items > i) 6944 stats->values[i] = 6945 btrfs_dev_stat_read_and_reset(dev, i); 6946 else 6947 btrfs_dev_stat_reset(dev, i); 6948 } 6949 } else { 6950 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) 6951 if (stats->nr_items > i) 6952 stats->values[i] = btrfs_dev_stat_read(dev, i); 6953 } 6954 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) 6955 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; 6956 return 0; 6957 } 6958 6959 void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path) 6960 { 6961 struct buffer_head *bh; 6962 struct btrfs_super_block *disk_super; 6963 int copy_num; 6964 6965 if (!bdev) 6966 return; 6967 6968 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; 6969 copy_num++) { 6970 6971 if (btrfs_read_dev_one_super(bdev, copy_num, &bh)) 6972 continue; 6973 6974 disk_super = (struct btrfs_super_block *)bh->b_data; 6975 6976 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 6977 set_buffer_dirty(bh); 6978 sync_dirty_buffer(bh); 6979 brelse(bh); 6980 } 6981 6982 /* Notify udev that device has changed */ 6983 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 6984 6985 /* Update ctime/mtime for device path for libblkid */ 6986 update_dev_time(device_path); 6987 } 6988 6989 /* 6990 * Update the size of all devices, which is used for writing out the 6991 * super blocks. 6992 */ 6993 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info) 6994 { 6995 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6996 struct btrfs_device *curr, *next; 6997 6998 if (list_empty(&fs_devices->resized_devices)) 6999 return; 7000 7001 mutex_lock(&fs_devices->device_list_mutex); 7002 lock_chunks(fs_info->dev_root); 7003 list_for_each_entry_safe(curr, next, &fs_devices->resized_devices, 7004 resized_list) { 7005 list_del_init(&curr->resized_list); 7006 curr->commit_total_bytes = curr->disk_total_bytes; 7007 } 7008 unlock_chunks(fs_info->dev_root); 7009 mutex_unlock(&fs_devices->device_list_mutex); 7010 } 7011 7012 /* Must be invoked during the transaction commit */ 7013 void btrfs_update_commit_device_bytes_used(struct btrfs_root *root, 7014 struct btrfs_transaction *transaction) 7015 { 7016 struct extent_map *em; 7017 struct map_lookup *map; 7018 struct btrfs_device *dev; 7019 int i; 7020 7021 if (list_empty(&transaction->pending_chunks)) 7022 return; 7023 7024 /* In order to kick the device replace finish process */ 7025 lock_chunks(root); 7026 list_for_each_entry(em, &transaction->pending_chunks, list) { 7027 map = em->map_lookup; 7028 7029 for (i = 0; i < map->num_stripes; i++) { 7030 dev = map->stripes[i].dev; 7031 dev->commit_bytes_used = dev->bytes_used; 7032 } 7033 } 7034 unlock_chunks(root); 7035 } 7036 7037 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info) 7038 { 7039 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7040 while (fs_devices) { 7041 fs_devices->fs_info = fs_info; 7042 fs_devices = fs_devices->seed; 7043 } 7044 } 7045 7046 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info) 7047 { 7048 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 7049 while (fs_devices) { 7050 fs_devices->fs_info = NULL; 7051 fs_devices = fs_devices->seed; 7052 } 7053 } 7054 7055 static void btrfs_close_one_device(struct btrfs_device *device) 7056 { 7057 struct btrfs_fs_devices *fs_devices = device->fs_devices; 7058 struct btrfs_device *new_device; 7059 struct rcu_string *name; 7060 7061 if (device->bdev) 7062 fs_devices->open_devices--; 7063 7064 if (device->writeable && 7065 device->devid != BTRFS_DEV_REPLACE_DEVID) { 7066 list_del_init(&device->dev_alloc_list); 7067 fs_devices->rw_devices--; 7068 } 7069 7070 if (device->missing) 7071 fs_devices->missing_devices--; 7072 7073 new_device = btrfs_alloc_device(NULL, &device->devid, 7074 device->uuid); 7075 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ 7076 7077 /* Safe because we are under uuid_mutex */ 7078 if (device->name) { 7079 name = rcu_string_strdup(device->name->str, GFP_NOFS); 7080 BUG_ON(!name); /* -ENOMEM */ 7081 rcu_assign_pointer(new_device->name, name); 7082 } 7083 7084 list_replace_rcu(&device->dev_list, &new_device->dev_list); 7085 new_device->fs_devices = device->fs_devices; 7086 7087 call_rcu(&device->rcu, free_device); 7088 } 7089