1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/buffer_head.h> 22 #include <linux/blkdev.h> 23 #include <linux/random.h> 24 #include <linux/iocontext.h> 25 #include <asm/div64.h> 26 #include "compat.h" 27 #include "ctree.h" 28 #include "extent_map.h" 29 #include "disk-io.h" 30 #include "transaction.h" 31 #include "print-tree.h" 32 #include "volumes.h" 33 #include "async-thread.h" 34 35 struct map_lookup { 36 u64 type; 37 int io_align; 38 int io_width; 39 int stripe_len; 40 int sector_size; 41 int num_stripes; 42 int sub_stripes; 43 struct btrfs_bio_stripe stripes[]; 44 }; 45 46 static int init_first_rw_device(struct btrfs_trans_handle *trans, 47 struct btrfs_root *root, 48 struct btrfs_device *device); 49 static int btrfs_relocate_sys_chunks(struct btrfs_root *root); 50 51 #define map_lookup_size(n) (sizeof(struct map_lookup) + \ 52 (sizeof(struct btrfs_bio_stripe) * (n))) 53 54 static DEFINE_MUTEX(uuid_mutex); 55 static LIST_HEAD(fs_uuids); 56 57 void btrfs_lock_volumes(void) 58 { 59 mutex_lock(&uuid_mutex); 60 } 61 62 void btrfs_unlock_volumes(void) 63 { 64 mutex_unlock(&uuid_mutex); 65 } 66 67 static void lock_chunks(struct btrfs_root *root) 68 { 69 mutex_lock(&root->fs_info->chunk_mutex); 70 } 71 72 static void unlock_chunks(struct btrfs_root *root) 73 { 74 mutex_unlock(&root->fs_info->chunk_mutex); 75 } 76 77 static void free_fs_devices(struct btrfs_fs_devices *fs_devices) 78 { 79 struct btrfs_device *device; 80 WARN_ON(fs_devices->opened); 81 while (!list_empty(&fs_devices->devices)) { 82 device = list_entry(fs_devices->devices.next, 83 struct btrfs_device, dev_list); 84 list_del(&device->dev_list); 85 kfree(device->name); 86 kfree(device); 87 } 88 kfree(fs_devices); 89 } 90 91 int btrfs_cleanup_fs_uuids(void) 92 { 93 struct btrfs_fs_devices *fs_devices; 94 95 while (!list_empty(&fs_uuids)) { 96 fs_devices = list_entry(fs_uuids.next, 97 struct btrfs_fs_devices, list); 98 list_del(&fs_devices->list); 99 free_fs_devices(fs_devices); 100 } 101 return 0; 102 } 103 104 static noinline struct btrfs_device *__find_device(struct list_head *head, 105 u64 devid, u8 *uuid) 106 { 107 struct btrfs_device *dev; 108 109 list_for_each_entry(dev, head, dev_list) { 110 if (dev->devid == devid && 111 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 112 return dev; 113 } 114 } 115 return NULL; 116 } 117 118 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 119 { 120 struct btrfs_fs_devices *fs_devices; 121 122 list_for_each_entry(fs_devices, &fs_uuids, list) { 123 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 124 return fs_devices; 125 } 126 return NULL; 127 } 128 129 static void requeue_list(struct btrfs_pending_bios *pending_bios, 130 struct bio *head, struct bio *tail) 131 { 132 133 struct bio *old_head; 134 135 old_head = pending_bios->head; 136 pending_bios->head = head; 137 if (pending_bios->tail) 138 tail->bi_next = old_head; 139 else 140 pending_bios->tail = tail; 141 } 142 143 /* 144 * we try to collect pending bios for a device so we don't get a large 145 * number of procs sending bios down to the same device. This greatly 146 * improves the schedulers ability to collect and merge the bios. 147 * 148 * But, it also turns into a long list of bios to process and that is sure 149 * to eventually make the worker thread block. The solution here is to 150 * make some progress and then put this work struct back at the end of 151 * the list if the block device is congested. This way, multiple devices 152 * can make progress from a single worker thread. 153 */ 154 static noinline int run_scheduled_bios(struct btrfs_device *device) 155 { 156 struct bio *pending; 157 struct backing_dev_info *bdi; 158 struct btrfs_fs_info *fs_info; 159 struct btrfs_pending_bios *pending_bios; 160 struct bio *tail; 161 struct bio *cur; 162 int again = 0; 163 unsigned long num_run; 164 unsigned long num_sync_run; 165 unsigned long batch_run = 0; 166 unsigned long limit; 167 unsigned long last_waited = 0; 168 int force_reg = 0; 169 170 bdi = blk_get_backing_dev_info(device->bdev); 171 fs_info = device->dev_root->fs_info; 172 limit = btrfs_async_submit_limit(fs_info); 173 limit = limit * 2 / 3; 174 175 /* we want to make sure that every time we switch from the sync 176 * list to the normal list, we unplug 177 */ 178 num_sync_run = 0; 179 180 loop: 181 spin_lock(&device->io_lock); 182 183 loop_lock: 184 num_run = 0; 185 186 /* take all the bios off the list at once and process them 187 * later on (without the lock held). But, remember the 188 * tail and other pointers so the bios can be properly reinserted 189 * into the list if we hit congestion 190 */ 191 if (!force_reg && device->pending_sync_bios.head) { 192 pending_bios = &device->pending_sync_bios; 193 force_reg = 1; 194 } else { 195 pending_bios = &device->pending_bios; 196 force_reg = 0; 197 } 198 199 pending = pending_bios->head; 200 tail = pending_bios->tail; 201 WARN_ON(pending && !tail); 202 203 /* 204 * if pending was null this time around, no bios need processing 205 * at all and we can stop. Otherwise it'll loop back up again 206 * and do an additional check so no bios are missed. 207 * 208 * device->running_pending is used to synchronize with the 209 * schedule_bio code. 210 */ 211 if (device->pending_sync_bios.head == NULL && 212 device->pending_bios.head == NULL) { 213 again = 0; 214 device->running_pending = 0; 215 } else { 216 again = 1; 217 device->running_pending = 1; 218 } 219 220 pending_bios->head = NULL; 221 pending_bios->tail = NULL; 222 223 spin_unlock(&device->io_lock); 224 225 /* 226 * if we're doing the regular priority list, make sure we unplug 227 * for any high prio bios we've sent down 228 */ 229 if (pending_bios == &device->pending_bios && num_sync_run > 0) { 230 num_sync_run = 0; 231 blk_run_backing_dev(bdi, NULL); 232 } 233 234 while (pending) { 235 236 rmb(); 237 /* we want to work on both lists, but do more bios on the 238 * sync list than the regular list 239 */ 240 if ((num_run > 32 && 241 pending_bios != &device->pending_sync_bios && 242 device->pending_sync_bios.head) || 243 (num_run > 64 && pending_bios == &device->pending_sync_bios && 244 device->pending_bios.head)) { 245 spin_lock(&device->io_lock); 246 requeue_list(pending_bios, pending, tail); 247 goto loop_lock; 248 } 249 250 cur = pending; 251 pending = pending->bi_next; 252 cur->bi_next = NULL; 253 atomic_dec(&fs_info->nr_async_bios); 254 255 if (atomic_read(&fs_info->nr_async_bios) < limit && 256 waitqueue_active(&fs_info->async_submit_wait)) 257 wake_up(&fs_info->async_submit_wait); 258 259 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 260 261 if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) 262 num_sync_run++; 263 264 submit_bio(cur->bi_rw, cur); 265 num_run++; 266 batch_run++; 267 if (need_resched()) { 268 if (num_sync_run) { 269 blk_run_backing_dev(bdi, NULL); 270 num_sync_run = 0; 271 } 272 cond_resched(); 273 } 274 275 /* 276 * we made progress, there is more work to do and the bdi 277 * is now congested. Back off and let other work structs 278 * run instead 279 */ 280 if (pending && bdi_write_congested(bdi) && batch_run > 8 && 281 fs_info->fs_devices->open_devices > 1) { 282 struct io_context *ioc; 283 284 ioc = current->io_context; 285 286 /* 287 * the main goal here is that we don't want to 288 * block if we're going to be able to submit 289 * more requests without blocking. 290 * 291 * This code does two great things, it pokes into 292 * the elevator code from a filesystem _and_ 293 * it makes assumptions about how batching works. 294 */ 295 if (ioc && ioc->nr_batch_requests > 0 && 296 time_before(jiffies, ioc->last_waited + HZ/50UL) && 297 (last_waited == 0 || 298 ioc->last_waited == last_waited)) { 299 /* 300 * we want to go through our batch of 301 * requests and stop. So, we copy out 302 * the ioc->last_waited time and test 303 * against it before looping 304 */ 305 last_waited = ioc->last_waited; 306 if (need_resched()) { 307 if (num_sync_run) { 308 blk_run_backing_dev(bdi, NULL); 309 num_sync_run = 0; 310 } 311 cond_resched(); 312 } 313 continue; 314 } 315 spin_lock(&device->io_lock); 316 requeue_list(pending_bios, pending, tail); 317 device->running_pending = 1; 318 319 spin_unlock(&device->io_lock); 320 btrfs_requeue_work(&device->work); 321 goto done; 322 } 323 } 324 325 if (num_sync_run) { 326 num_sync_run = 0; 327 blk_run_backing_dev(bdi, NULL); 328 } 329 /* 330 * IO has already been through a long path to get here. Checksumming, 331 * async helper threads, perhaps compression. We've done a pretty 332 * good job of collecting a batch of IO and should just unplug 333 * the device right away. 334 * 335 * This will help anyone who is waiting on the IO, they might have 336 * already unplugged, but managed to do so before the bio they 337 * cared about found its way down here. 338 */ 339 blk_run_backing_dev(bdi, NULL); 340 341 cond_resched(); 342 if (again) 343 goto loop; 344 345 spin_lock(&device->io_lock); 346 if (device->pending_bios.head || device->pending_sync_bios.head) 347 goto loop_lock; 348 spin_unlock(&device->io_lock); 349 350 done: 351 return 0; 352 } 353 354 static void pending_bios_fn(struct btrfs_work *work) 355 { 356 struct btrfs_device *device; 357 358 device = container_of(work, struct btrfs_device, work); 359 run_scheduled_bios(device); 360 } 361 362 static noinline int device_list_add(const char *path, 363 struct btrfs_super_block *disk_super, 364 u64 devid, struct btrfs_fs_devices **fs_devices_ret) 365 { 366 struct btrfs_device *device; 367 struct btrfs_fs_devices *fs_devices; 368 u64 found_transid = btrfs_super_generation(disk_super); 369 char *name; 370 371 fs_devices = find_fsid(disk_super->fsid); 372 if (!fs_devices) { 373 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); 374 if (!fs_devices) 375 return -ENOMEM; 376 INIT_LIST_HEAD(&fs_devices->devices); 377 INIT_LIST_HEAD(&fs_devices->alloc_list); 378 list_add(&fs_devices->list, &fs_uuids); 379 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); 380 fs_devices->latest_devid = devid; 381 fs_devices->latest_trans = found_transid; 382 mutex_init(&fs_devices->device_list_mutex); 383 device = NULL; 384 } else { 385 device = __find_device(&fs_devices->devices, devid, 386 disk_super->dev_item.uuid); 387 } 388 if (!device) { 389 if (fs_devices->opened) 390 return -EBUSY; 391 392 device = kzalloc(sizeof(*device), GFP_NOFS); 393 if (!device) { 394 /* we can safely leave the fs_devices entry around */ 395 return -ENOMEM; 396 } 397 device->devid = devid; 398 device->work.func = pending_bios_fn; 399 memcpy(device->uuid, disk_super->dev_item.uuid, 400 BTRFS_UUID_SIZE); 401 device->barriers = 1; 402 spin_lock_init(&device->io_lock); 403 device->name = kstrdup(path, GFP_NOFS); 404 if (!device->name) { 405 kfree(device); 406 return -ENOMEM; 407 } 408 INIT_LIST_HEAD(&device->dev_alloc_list); 409 410 mutex_lock(&fs_devices->device_list_mutex); 411 list_add(&device->dev_list, &fs_devices->devices); 412 mutex_unlock(&fs_devices->device_list_mutex); 413 414 device->fs_devices = fs_devices; 415 fs_devices->num_devices++; 416 } else if (strcmp(device->name, path)) { 417 name = kstrdup(path, GFP_NOFS); 418 if (!name) 419 return -ENOMEM; 420 kfree(device->name); 421 device->name = name; 422 } 423 424 if (found_transid > fs_devices->latest_trans) { 425 fs_devices->latest_devid = devid; 426 fs_devices->latest_trans = found_transid; 427 } 428 *fs_devices_ret = fs_devices; 429 return 0; 430 } 431 432 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 433 { 434 struct btrfs_fs_devices *fs_devices; 435 struct btrfs_device *device; 436 struct btrfs_device *orig_dev; 437 438 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); 439 if (!fs_devices) 440 return ERR_PTR(-ENOMEM); 441 442 INIT_LIST_HEAD(&fs_devices->devices); 443 INIT_LIST_HEAD(&fs_devices->alloc_list); 444 INIT_LIST_HEAD(&fs_devices->list); 445 mutex_init(&fs_devices->device_list_mutex); 446 fs_devices->latest_devid = orig->latest_devid; 447 fs_devices->latest_trans = orig->latest_trans; 448 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); 449 450 mutex_lock(&orig->device_list_mutex); 451 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 452 device = kzalloc(sizeof(*device), GFP_NOFS); 453 if (!device) 454 goto error; 455 456 device->name = kstrdup(orig_dev->name, GFP_NOFS); 457 if (!device->name) { 458 kfree(device); 459 goto error; 460 } 461 462 device->devid = orig_dev->devid; 463 device->work.func = pending_bios_fn; 464 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid)); 465 device->barriers = 1; 466 spin_lock_init(&device->io_lock); 467 INIT_LIST_HEAD(&device->dev_list); 468 INIT_LIST_HEAD(&device->dev_alloc_list); 469 470 list_add(&device->dev_list, &fs_devices->devices); 471 device->fs_devices = fs_devices; 472 fs_devices->num_devices++; 473 } 474 mutex_unlock(&orig->device_list_mutex); 475 return fs_devices; 476 error: 477 mutex_unlock(&orig->device_list_mutex); 478 free_fs_devices(fs_devices); 479 return ERR_PTR(-ENOMEM); 480 } 481 482 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) 483 { 484 struct btrfs_device *device, *next; 485 486 mutex_lock(&uuid_mutex); 487 again: 488 mutex_lock(&fs_devices->device_list_mutex); 489 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 490 if (device->in_fs_metadata) 491 continue; 492 493 if (device->bdev) { 494 close_bdev_exclusive(device->bdev, device->mode); 495 device->bdev = NULL; 496 fs_devices->open_devices--; 497 } 498 if (device->writeable) { 499 list_del_init(&device->dev_alloc_list); 500 device->writeable = 0; 501 fs_devices->rw_devices--; 502 } 503 list_del_init(&device->dev_list); 504 fs_devices->num_devices--; 505 kfree(device->name); 506 kfree(device); 507 } 508 mutex_unlock(&fs_devices->device_list_mutex); 509 510 if (fs_devices->seed) { 511 fs_devices = fs_devices->seed; 512 goto again; 513 } 514 515 mutex_unlock(&uuid_mutex); 516 return 0; 517 } 518 519 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 520 { 521 struct btrfs_device *device; 522 523 if (--fs_devices->opened > 0) 524 return 0; 525 526 list_for_each_entry(device, &fs_devices->devices, dev_list) { 527 if (device->bdev) { 528 close_bdev_exclusive(device->bdev, device->mode); 529 fs_devices->open_devices--; 530 } 531 if (device->writeable) { 532 list_del_init(&device->dev_alloc_list); 533 fs_devices->rw_devices--; 534 } 535 536 device->bdev = NULL; 537 device->writeable = 0; 538 device->in_fs_metadata = 0; 539 } 540 WARN_ON(fs_devices->open_devices); 541 WARN_ON(fs_devices->rw_devices); 542 fs_devices->opened = 0; 543 fs_devices->seeding = 0; 544 545 return 0; 546 } 547 548 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 549 { 550 struct btrfs_fs_devices *seed_devices = NULL; 551 int ret; 552 553 mutex_lock(&uuid_mutex); 554 ret = __btrfs_close_devices(fs_devices); 555 if (!fs_devices->opened) { 556 seed_devices = fs_devices->seed; 557 fs_devices->seed = NULL; 558 } 559 mutex_unlock(&uuid_mutex); 560 561 while (seed_devices) { 562 fs_devices = seed_devices; 563 seed_devices = fs_devices->seed; 564 __btrfs_close_devices(fs_devices); 565 free_fs_devices(fs_devices); 566 } 567 return ret; 568 } 569 570 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 571 fmode_t flags, void *holder) 572 { 573 struct block_device *bdev; 574 struct list_head *head = &fs_devices->devices; 575 struct btrfs_device *device; 576 struct block_device *latest_bdev = NULL; 577 struct buffer_head *bh; 578 struct btrfs_super_block *disk_super; 579 u64 latest_devid = 0; 580 u64 latest_transid = 0; 581 u64 devid; 582 int seeding = 1; 583 int ret = 0; 584 585 list_for_each_entry(device, head, dev_list) { 586 if (device->bdev) 587 continue; 588 if (!device->name) 589 continue; 590 591 bdev = open_bdev_exclusive(device->name, flags, holder); 592 if (IS_ERR(bdev)) { 593 printk(KERN_INFO "open %s failed\n", device->name); 594 goto error; 595 } 596 set_blocksize(bdev, 4096); 597 598 bh = btrfs_read_dev_super(bdev); 599 if (!bh) 600 goto error_close; 601 602 disk_super = (struct btrfs_super_block *)bh->b_data; 603 devid = btrfs_stack_device_id(&disk_super->dev_item); 604 if (devid != device->devid) 605 goto error_brelse; 606 607 if (memcmp(device->uuid, disk_super->dev_item.uuid, 608 BTRFS_UUID_SIZE)) 609 goto error_brelse; 610 611 device->generation = btrfs_super_generation(disk_super); 612 if (!latest_transid || device->generation > latest_transid) { 613 latest_devid = devid; 614 latest_transid = device->generation; 615 latest_bdev = bdev; 616 } 617 618 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { 619 device->writeable = 0; 620 } else { 621 device->writeable = !bdev_read_only(bdev); 622 seeding = 0; 623 } 624 625 device->bdev = bdev; 626 device->in_fs_metadata = 0; 627 device->mode = flags; 628 629 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 630 fs_devices->rotating = 1; 631 632 fs_devices->open_devices++; 633 if (device->writeable) { 634 fs_devices->rw_devices++; 635 list_add(&device->dev_alloc_list, 636 &fs_devices->alloc_list); 637 } 638 continue; 639 640 error_brelse: 641 brelse(bh); 642 error_close: 643 close_bdev_exclusive(bdev, FMODE_READ); 644 error: 645 continue; 646 } 647 if (fs_devices->open_devices == 0) { 648 ret = -EIO; 649 goto out; 650 } 651 fs_devices->seeding = seeding; 652 fs_devices->opened = 1; 653 fs_devices->latest_bdev = latest_bdev; 654 fs_devices->latest_devid = latest_devid; 655 fs_devices->latest_trans = latest_transid; 656 fs_devices->total_rw_bytes = 0; 657 out: 658 return ret; 659 } 660 661 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 662 fmode_t flags, void *holder) 663 { 664 int ret; 665 666 mutex_lock(&uuid_mutex); 667 if (fs_devices->opened) { 668 fs_devices->opened++; 669 ret = 0; 670 } else { 671 ret = __btrfs_open_devices(fs_devices, flags, holder); 672 } 673 mutex_unlock(&uuid_mutex); 674 return ret; 675 } 676 677 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 678 struct btrfs_fs_devices **fs_devices_ret) 679 { 680 struct btrfs_super_block *disk_super; 681 struct block_device *bdev; 682 struct buffer_head *bh; 683 int ret; 684 u64 devid; 685 u64 transid; 686 687 mutex_lock(&uuid_mutex); 688 689 bdev = open_bdev_exclusive(path, flags, holder); 690 691 if (IS_ERR(bdev)) { 692 ret = PTR_ERR(bdev); 693 goto error; 694 } 695 696 ret = set_blocksize(bdev, 4096); 697 if (ret) 698 goto error_close; 699 bh = btrfs_read_dev_super(bdev); 700 if (!bh) { 701 ret = -EIO; 702 goto error_close; 703 } 704 disk_super = (struct btrfs_super_block *)bh->b_data; 705 devid = btrfs_stack_device_id(&disk_super->dev_item); 706 transid = btrfs_super_generation(disk_super); 707 if (disk_super->label[0]) 708 printk(KERN_INFO "device label %s ", disk_super->label); 709 else { 710 /* FIXME, make a readl uuid parser */ 711 printk(KERN_INFO "device fsid %llx-%llx ", 712 *(unsigned long long *)disk_super->fsid, 713 *(unsigned long long *)(disk_super->fsid + 8)); 714 } 715 printk(KERN_CONT "devid %llu transid %llu %s\n", 716 (unsigned long long)devid, (unsigned long long)transid, path); 717 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 718 719 brelse(bh); 720 error_close: 721 close_bdev_exclusive(bdev, flags); 722 error: 723 mutex_unlock(&uuid_mutex); 724 return ret; 725 } 726 727 /* 728 * this uses a pretty simple search, the expectation is that it is 729 * called very infrequently and that a given device has a small number 730 * of extents 731 */ 732 int find_free_dev_extent(struct btrfs_trans_handle *trans, 733 struct btrfs_device *device, u64 num_bytes, 734 u64 *start, u64 *max_avail) 735 { 736 struct btrfs_key key; 737 struct btrfs_root *root = device->dev_root; 738 struct btrfs_dev_extent *dev_extent = NULL; 739 struct btrfs_path *path; 740 u64 hole_size = 0; 741 u64 last_byte = 0; 742 u64 search_start = 0; 743 u64 search_end = device->total_bytes; 744 int ret; 745 int slot = 0; 746 int start_found; 747 struct extent_buffer *l; 748 749 path = btrfs_alloc_path(); 750 if (!path) 751 return -ENOMEM; 752 path->reada = 2; 753 start_found = 0; 754 755 /* FIXME use last free of some kind */ 756 757 /* we don't want to overwrite the superblock on the drive, 758 * so we make sure to start at an offset of at least 1MB 759 */ 760 search_start = max((u64)1024 * 1024, search_start); 761 762 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes) 763 search_start = max(root->fs_info->alloc_start, search_start); 764 765 key.objectid = device->devid; 766 key.offset = search_start; 767 key.type = BTRFS_DEV_EXTENT_KEY; 768 ret = btrfs_search_slot(trans, root, &key, path, 0, 0); 769 if (ret < 0) 770 goto error; 771 if (ret > 0) { 772 ret = btrfs_previous_item(root, path, key.objectid, key.type); 773 if (ret < 0) 774 goto error; 775 if (ret > 0) 776 start_found = 1; 777 } 778 l = path->nodes[0]; 779 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 780 while (1) { 781 l = path->nodes[0]; 782 slot = path->slots[0]; 783 if (slot >= btrfs_header_nritems(l)) { 784 ret = btrfs_next_leaf(root, path); 785 if (ret == 0) 786 continue; 787 if (ret < 0) 788 goto error; 789 no_more_items: 790 if (!start_found) { 791 if (search_start >= search_end) { 792 ret = -ENOSPC; 793 goto error; 794 } 795 *start = search_start; 796 start_found = 1; 797 goto check_pending; 798 } 799 *start = last_byte > search_start ? 800 last_byte : search_start; 801 if (search_end <= *start) { 802 ret = -ENOSPC; 803 goto error; 804 } 805 goto check_pending; 806 } 807 btrfs_item_key_to_cpu(l, &key, slot); 808 809 if (key.objectid < device->devid) 810 goto next; 811 812 if (key.objectid > device->devid) 813 goto no_more_items; 814 815 if (key.offset >= search_start && key.offset > last_byte && 816 start_found) { 817 if (last_byte < search_start) 818 last_byte = search_start; 819 hole_size = key.offset - last_byte; 820 821 if (hole_size > *max_avail) 822 *max_avail = hole_size; 823 824 if (key.offset > last_byte && 825 hole_size >= num_bytes) { 826 *start = last_byte; 827 goto check_pending; 828 } 829 } 830 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) 831 goto next; 832 833 start_found = 1; 834 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 835 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent); 836 next: 837 path->slots[0]++; 838 cond_resched(); 839 } 840 check_pending: 841 /* we have to make sure we didn't find an extent that has already 842 * been allocated by the map tree or the original allocation 843 */ 844 BUG_ON(*start < search_start); 845 846 if (*start + num_bytes > search_end) { 847 ret = -ENOSPC; 848 goto error; 849 } 850 /* check for pending inserts here */ 851 ret = 0; 852 853 error: 854 btrfs_free_path(path); 855 return ret; 856 } 857 858 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, 859 struct btrfs_device *device, 860 u64 start) 861 { 862 int ret; 863 struct btrfs_path *path; 864 struct btrfs_root *root = device->dev_root; 865 struct btrfs_key key; 866 struct btrfs_key found_key; 867 struct extent_buffer *leaf = NULL; 868 struct btrfs_dev_extent *extent = NULL; 869 870 path = btrfs_alloc_path(); 871 if (!path) 872 return -ENOMEM; 873 874 key.objectid = device->devid; 875 key.offset = start; 876 key.type = BTRFS_DEV_EXTENT_KEY; 877 878 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 879 if (ret > 0) { 880 ret = btrfs_previous_item(root, path, key.objectid, 881 BTRFS_DEV_EXTENT_KEY); 882 BUG_ON(ret); 883 leaf = path->nodes[0]; 884 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 885 extent = btrfs_item_ptr(leaf, path->slots[0], 886 struct btrfs_dev_extent); 887 BUG_ON(found_key.offset > start || found_key.offset + 888 btrfs_dev_extent_length(leaf, extent) < start); 889 ret = 0; 890 } else if (ret == 0) { 891 leaf = path->nodes[0]; 892 extent = btrfs_item_ptr(leaf, path->slots[0], 893 struct btrfs_dev_extent); 894 } 895 BUG_ON(ret); 896 897 if (device->bytes_used > 0) 898 device->bytes_used -= btrfs_dev_extent_length(leaf, extent); 899 ret = btrfs_del_item(trans, root, path); 900 BUG_ON(ret); 901 902 btrfs_free_path(path); 903 return ret; 904 } 905 906 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 907 struct btrfs_device *device, 908 u64 chunk_tree, u64 chunk_objectid, 909 u64 chunk_offset, u64 start, u64 num_bytes) 910 { 911 int ret; 912 struct btrfs_path *path; 913 struct btrfs_root *root = device->dev_root; 914 struct btrfs_dev_extent *extent; 915 struct extent_buffer *leaf; 916 struct btrfs_key key; 917 918 WARN_ON(!device->in_fs_metadata); 919 path = btrfs_alloc_path(); 920 if (!path) 921 return -ENOMEM; 922 923 key.objectid = device->devid; 924 key.offset = start; 925 key.type = BTRFS_DEV_EXTENT_KEY; 926 ret = btrfs_insert_empty_item(trans, root, path, &key, 927 sizeof(*extent)); 928 BUG_ON(ret); 929 930 leaf = path->nodes[0]; 931 extent = btrfs_item_ptr(leaf, path->slots[0], 932 struct btrfs_dev_extent); 933 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree); 934 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid); 935 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 936 937 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, 938 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent), 939 BTRFS_UUID_SIZE); 940 941 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 942 btrfs_mark_buffer_dirty(leaf); 943 btrfs_free_path(path); 944 return ret; 945 } 946 947 static noinline int find_next_chunk(struct btrfs_root *root, 948 u64 objectid, u64 *offset) 949 { 950 struct btrfs_path *path; 951 int ret; 952 struct btrfs_key key; 953 struct btrfs_chunk *chunk; 954 struct btrfs_key found_key; 955 956 path = btrfs_alloc_path(); 957 BUG_ON(!path); 958 959 key.objectid = objectid; 960 key.offset = (u64)-1; 961 key.type = BTRFS_CHUNK_ITEM_KEY; 962 963 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 964 if (ret < 0) 965 goto error; 966 967 BUG_ON(ret == 0); 968 969 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY); 970 if (ret) { 971 *offset = 0; 972 } else { 973 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 974 path->slots[0]); 975 if (found_key.objectid != objectid) 976 *offset = 0; 977 else { 978 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0], 979 struct btrfs_chunk); 980 *offset = found_key.offset + 981 btrfs_chunk_length(path->nodes[0], chunk); 982 } 983 } 984 ret = 0; 985 error: 986 btrfs_free_path(path); 987 return ret; 988 } 989 990 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid) 991 { 992 int ret; 993 struct btrfs_key key; 994 struct btrfs_key found_key; 995 struct btrfs_path *path; 996 997 root = root->fs_info->chunk_root; 998 999 path = btrfs_alloc_path(); 1000 if (!path) 1001 return -ENOMEM; 1002 1003 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1004 key.type = BTRFS_DEV_ITEM_KEY; 1005 key.offset = (u64)-1; 1006 1007 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1008 if (ret < 0) 1009 goto error; 1010 1011 BUG_ON(ret == 0); 1012 1013 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID, 1014 BTRFS_DEV_ITEM_KEY); 1015 if (ret) { 1016 *objectid = 1; 1017 } else { 1018 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1019 path->slots[0]); 1020 *objectid = found_key.offset + 1; 1021 } 1022 ret = 0; 1023 error: 1024 btrfs_free_path(path); 1025 return ret; 1026 } 1027 1028 /* 1029 * the device information is stored in the chunk root 1030 * the btrfs_device struct should be fully filled in 1031 */ 1032 int btrfs_add_device(struct btrfs_trans_handle *trans, 1033 struct btrfs_root *root, 1034 struct btrfs_device *device) 1035 { 1036 int ret; 1037 struct btrfs_path *path; 1038 struct btrfs_dev_item *dev_item; 1039 struct extent_buffer *leaf; 1040 struct btrfs_key key; 1041 unsigned long ptr; 1042 1043 root = root->fs_info->chunk_root; 1044 1045 path = btrfs_alloc_path(); 1046 if (!path) 1047 return -ENOMEM; 1048 1049 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1050 key.type = BTRFS_DEV_ITEM_KEY; 1051 key.offset = device->devid; 1052 1053 ret = btrfs_insert_empty_item(trans, root, path, &key, 1054 sizeof(*dev_item)); 1055 if (ret) 1056 goto out; 1057 1058 leaf = path->nodes[0]; 1059 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1060 1061 btrfs_set_device_id(leaf, dev_item, device->devid); 1062 btrfs_set_device_generation(leaf, dev_item, 0); 1063 btrfs_set_device_type(leaf, dev_item, device->type); 1064 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1065 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1066 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1067 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); 1068 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 1069 btrfs_set_device_group(leaf, dev_item, 0); 1070 btrfs_set_device_seek_speed(leaf, dev_item, 0); 1071 btrfs_set_device_bandwidth(leaf, dev_item, 0); 1072 btrfs_set_device_start_offset(leaf, dev_item, 0); 1073 1074 ptr = (unsigned long)btrfs_device_uuid(dev_item); 1075 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 1076 ptr = (unsigned long)btrfs_device_fsid(dev_item); 1077 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE); 1078 btrfs_mark_buffer_dirty(leaf); 1079 1080 ret = 0; 1081 out: 1082 btrfs_free_path(path); 1083 return ret; 1084 } 1085 1086 static int btrfs_rm_dev_item(struct btrfs_root *root, 1087 struct btrfs_device *device) 1088 { 1089 int ret; 1090 struct btrfs_path *path; 1091 struct btrfs_key key; 1092 struct btrfs_trans_handle *trans; 1093 1094 root = root->fs_info->chunk_root; 1095 1096 path = btrfs_alloc_path(); 1097 if (!path) 1098 return -ENOMEM; 1099 1100 trans = btrfs_start_transaction(root, 0); 1101 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1102 key.type = BTRFS_DEV_ITEM_KEY; 1103 key.offset = device->devid; 1104 lock_chunks(root); 1105 1106 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1107 if (ret < 0) 1108 goto out; 1109 1110 if (ret > 0) { 1111 ret = -ENOENT; 1112 goto out; 1113 } 1114 1115 ret = btrfs_del_item(trans, root, path); 1116 if (ret) 1117 goto out; 1118 out: 1119 btrfs_free_path(path); 1120 unlock_chunks(root); 1121 btrfs_commit_transaction(trans, root); 1122 return ret; 1123 } 1124 1125 int btrfs_rm_device(struct btrfs_root *root, char *device_path) 1126 { 1127 struct btrfs_device *device; 1128 struct btrfs_device *next_device; 1129 struct block_device *bdev; 1130 struct buffer_head *bh = NULL; 1131 struct btrfs_super_block *disk_super; 1132 u64 all_avail; 1133 u64 devid; 1134 u64 num_devices; 1135 u8 *dev_uuid; 1136 int ret = 0; 1137 1138 mutex_lock(&uuid_mutex); 1139 mutex_lock(&root->fs_info->volume_mutex); 1140 1141 all_avail = root->fs_info->avail_data_alloc_bits | 1142 root->fs_info->avail_system_alloc_bits | 1143 root->fs_info->avail_metadata_alloc_bits; 1144 1145 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && 1146 root->fs_info->fs_devices->num_devices <= 4) { 1147 printk(KERN_ERR "btrfs: unable to go below four devices " 1148 "on raid10\n"); 1149 ret = -EINVAL; 1150 goto out; 1151 } 1152 1153 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && 1154 root->fs_info->fs_devices->num_devices <= 2) { 1155 printk(KERN_ERR "btrfs: unable to go below two " 1156 "devices on raid1\n"); 1157 ret = -EINVAL; 1158 goto out; 1159 } 1160 1161 if (strcmp(device_path, "missing") == 0) { 1162 struct list_head *devices; 1163 struct btrfs_device *tmp; 1164 1165 device = NULL; 1166 devices = &root->fs_info->fs_devices->devices; 1167 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1168 list_for_each_entry(tmp, devices, dev_list) { 1169 if (tmp->in_fs_metadata && !tmp->bdev) { 1170 device = tmp; 1171 break; 1172 } 1173 } 1174 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1175 bdev = NULL; 1176 bh = NULL; 1177 disk_super = NULL; 1178 if (!device) { 1179 printk(KERN_ERR "btrfs: no missing devices found to " 1180 "remove\n"); 1181 goto out; 1182 } 1183 } else { 1184 bdev = open_bdev_exclusive(device_path, FMODE_READ, 1185 root->fs_info->bdev_holder); 1186 if (IS_ERR(bdev)) { 1187 ret = PTR_ERR(bdev); 1188 goto out; 1189 } 1190 1191 set_blocksize(bdev, 4096); 1192 bh = btrfs_read_dev_super(bdev); 1193 if (!bh) { 1194 ret = -EIO; 1195 goto error_close; 1196 } 1197 disk_super = (struct btrfs_super_block *)bh->b_data; 1198 devid = btrfs_stack_device_id(&disk_super->dev_item); 1199 dev_uuid = disk_super->dev_item.uuid; 1200 device = btrfs_find_device(root, devid, dev_uuid, 1201 disk_super->fsid); 1202 if (!device) { 1203 ret = -ENOENT; 1204 goto error_brelse; 1205 } 1206 } 1207 1208 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { 1209 printk(KERN_ERR "btrfs: unable to remove the only writeable " 1210 "device\n"); 1211 ret = -EINVAL; 1212 goto error_brelse; 1213 } 1214 1215 if (device->writeable) { 1216 list_del_init(&device->dev_alloc_list); 1217 root->fs_info->fs_devices->rw_devices--; 1218 } 1219 1220 ret = btrfs_shrink_device(device, 0); 1221 if (ret) 1222 goto error_brelse; 1223 1224 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); 1225 if (ret) 1226 goto error_brelse; 1227 1228 device->in_fs_metadata = 0; 1229 1230 /* 1231 * the device list mutex makes sure that we don't change 1232 * the device list while someone else is writing out all 1233 * the device supers. 1234 */ 1235 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1236 list_del_init(&device->dev_list); 1237 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1238 1239 device->fs_devices->num_devices--; 1240 1241 next_device = list_entry(root->fs_info->fs_devices->devices.next, 1242 struct btrfs_device, dev_list); 1243 if (device->bdev == root->fs_info->sb->s_bdev) 1244 root->fs_info->sb->s_bdev = next_device->bdev; 1245 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1246 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1247 1248 if (device->bdev) { 1249 close_bdev_exclusive(device->bdev, device->mode); 1250 device->bdev = NULL; 1251 device->fs_devices->open_devices--; 1252 } 1253 1254 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; 1255 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); 1256 1257 if (device->fs_devices->open_devices == 0) { 1258 struct btrfs_fs_devices *fs_devices; 1259 fs_devices = root->fs_info->fs_devices; 1260 while (fs_devices) { 1261 if (fs_devices->seed == device->fs_devices) 1262 break; 1263 fs_devices = fs_devices->seed; 1264 } 1265 fs_devices->seed = device->fs_devices->seed; 1266 device->fs_devices->seed = NULL; 1267 __btrfs_close_devices(device->fs_devices); 1268 free_fs_devices(device->fs_devices); 1269 } 1270 1271 /* 1272 * at this point, the device is zero sized. We want to 1273 * remove it from the devices list and zero out the old super 1274 */ 1275 if (device->writeable) { 1276 /* make sure this device isn't detected as part of 1277 * the FS anymore 1278 */ 1279 memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 1280 set_buffer_dirty(bh); 1281 sync_dirty_buffer(bh); 1282 } 1283 1284 kfree(device->name); 1285 kfree(device); 1286 ret = 0; 1287 1288 error_brelse: 1289 brelse(bh); 1290 error_close: 1291 if (bdev) 1292 close_bdev_exclusive(bdev, FMODE_READ); 1293 out: 1294 mutex_unlock(&root->fs_info->volume_mutex); 1295 mutex_unlock(&uuid_mutex); 1296 return ret; 1297 } 1298 1299 /* 1300 * does all the dirty work required for changing file system's UUID. 1301 */ 1302 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, 1303 struct btrfs_root *root) 1304 { 1305 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 1306 struct btrfs_fs_devices *old_devices; 1307 struct btrfs_fs_devices *seed_devices; 1308 struct btrfs_super_block *disk_super = &root->fs_info->super_copy; 1309 struct btrfs_device *device; 1310 u64 super_flags; 1311 1312 BUG_ON(!mutex_is_locked(&uuid_mutex)); 1313 if (!fs_devices->seeding) 1314 return -EINVAL; 1315 1316 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); 1317 if (!seed_devices) 1318 return -ENOMEM; 1319 1320 old_devices = clone_fs_devices(fs_devices); 1321 if (IS_ERR(old_devices)) { 1322 kfree(seed_devices); 1323 return PTR_ERR(old_devices); 1324 } 1325 1326 list_add(&old_devices->list, &fs_uuids); 1327 1328 memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); 1329 seed_devices->opened = 1; 1330 INIT_LIST_HEAD(&seed_devices->devices); 1331 INIT_LIST_HEAD(&seed_devices->alloc_list); 1332 mutex_init(&seed_devices->device_list_mutex); 1333 list_splice_init(&fs_devices->devices, &seed_devices->devices); 1334 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 1335 list_for_each_entry(device, &seed_devices->devices, dev_list) { 1336 device->fs_devices = seed_devices; 1337 } 1338 1339 fs_devices->seeding = 0; 1340 fs_devices->num_devices = 0; 1341 fs_devices->open_devices = 0; 1342 fs_devices->seed = seed_devices; 1343 1344 generate_random_uuid(fs_devices->fsid); 1345 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 1346 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 1347 super_flags = btrfs_super_flags(disk_super) & 1348 ~BTRFS_SUPER_FLAG_SEEDING; 1349 btrfs_set_super_flags(disk_super, super_flags); 1350 1351 return 0; 1352 } 1353 1354 /* 1355 * strore the expected generation for seed devices in device items. 1356 */ 1357 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 1358 struct btrfs_root *root) 1359 { 1360 struct btrfs_path *path; 1361 struct extent_buffer *leaf; 1362 struct btrfs_dev_item *dev_item; 1363 struct btrfs_device *device; 1364 struct btrfs_key key; 1365 u8 fs_uuid[BTRFS_UUID_SIZE]; 1366 u8 dev_uuid[BTRFS_UUID_SIZE]; 1367 u64 devid; 1368 int ret; 1369 1370 path = btrfs_alloc_path(); 1371 if (!path) 1372 return -ENOMEM; 1373 1374 root = root->fs_info->chunk_root; 1375 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1376 key.offset = 0; 1377 key.type = BTRFS_DEV_ITEM_KEY; 1378 1379 while (1) { 1380 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1381 if (ret < 0) 1382 goto error; 1383 1384 leaf = path->nodes[0]; 1385 next_slot: 1386 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1387 ret = btrfs_next_leaf(root, path); 1388 if (ret > 0) 1389 break; 1390 if (ret < 0) 1391 goto error; 1392 leaf = path->nodes[0]; 1393 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1394 btrfs_release_path(root, path); 1395 continue; 1396 } 1397 1398 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1399 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || 1400 key.type != BTRFS_DEV_ITEM_KEY) 1401 break; 1402 1403 dev_item = btrfs_item_ptr(leaf, path->slots[0], 1404 struct btrfs_dev_item); 1405 devid = btrfs_device_id(leaf, dev_item); 1406 read_extent_buffer(leaf, dev_uuid, 1407 (unsigned long)btrfs_device_uuid(dev_item), 1408 BTRFS_UUID_SIZE); 1409 read_extent_buffer(leaf, fs_uuid, 1410 (unsigned long)btrfs_device_fsid(dev_item), 1411 BTRFS_UUID_SIZE); 1412 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); 1413 BUG_ON(!device); 1414 1415 if (device->fs_devices->seeding) { 1416 btrfs_set_device_generation(leaf, dev_item, 1417 device->generation); 1418 btrfs_mark_buffer_dirty(leaf); 1419 } 1420 1421 path->slots[0]++; 1422 goto next_slot; 1423 } 1424 ret = 0; 1425 error: 1426 btrfs_free_path(path); 1427 return ret; 1428 } 1429 1430 int btrfs_init_new_device(struct btrfs_root *root, char *device_path) 1431 { 1432 struct btrfs_trans_handle *trans; 1433 struct btrfs_device *device; 1434 struct block_device *bdev; 1435 struct list_head *devices; 1436 struct super_block *sb = root->fs_info->sb; 1437 u64 total_bytes; 1438 int seeding_dev = 0; 1439 int ret = 0; 1440 1441 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 1442 return -EINVAL; 1443 1444 bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); 1445 if (IS_ERR(bdev)) 1446 return PTR_ERR(bdev); 1447 1448 if (root->fs_info->fs_devices->seeding) { 1449 seeding_dev = 1; 1450 down_write(&sb->s_umount); 1451 mutex_lock(&uuid_mutex); 1452 } 1453 1454 filemap_write_and_wait(bdev->bd_inode->i_mapping); 1455 mutex_lock(&root->fs_info->volume_mutex); 1456 1457 devices = &root->fs_info->fs_devices->devices; 1458 /* 1459 * we have the volume lock, so we don't need the extra 1460 * device list mutex while reading the list here. 1461 */ 1462 list_for_each_entry(device, devices, dev_list) { 1463 if (device->bdev == bdev) { 1464 ret = -EEXIST; 1465 goto error; 1466 } 1467 } 1468 1469 device = kzalloc(sizeof(*device), GFP_NOFS); 1470 if (!device) { 1471 /* we can safely leave the fs_devices entry around */ 1472 ret = -ENOMEM; 1473 goto error; 1474 } 1475 1476 device->name = kstrdup(device_path, GFP_NOFS); 1477 if (!device->name) { 1478 kfree(device); 1479 ret = -ENOMEM; 1480 goto error; 1481 } 1482 1483 ret = find_next_devid(root, &device->devid); 1484 if (ret) { 1485 kfree(device); 1486 goto error; 1487 } 1488 1489 trans = btrfs_start_transaction(root, 0); 1490 lock_chunks(root); 1491 1492 device->barriers = 1; 1493 device->writeable = 1; 1494 device->work.func = pending_bios_fn; 1495 generate_random_uuid(device->uuid); 1496 spin_lock_init(&device->io_lock); 1497 device->generation = trans->transid; 1498 device->io_width = root->sectorsize; 1499 device->io_align = root->sectorsize; 1500 device->sector_size = root->sectorsize; 1501 device->total_bytes = i_size_read(bdev->bd_inode); 1502 device->disk_total_bytes = device->total_bytes; 1503 device->dev_root = root->fs_info->dev_root; 1504 device->bdev = bdev; 1505 device->in_fs_metadata = 1; 1506 device->mode = 0; 1507 set_blocksize(device->bdev, 4096); 1508 1509 if (seeding_dev) { 1510 sb->s_flags &= ~MS_RDONLY; 1511 ret = btrfs_prepare_sprout(trans, root); 1512 BUG_ON(ret); 1513 } 1514 1515 device->fs_devices = root->fs_info->fs_devices; 1516 1517 /* 1518 * we don't want write_supers to jump in here with our device 1519 * half setup 1520 */ 1521 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1522 list_add(&device->dev_list, &root->fs_info->fs_devices->devices); 1523 list_add(&device->dev_alloc_list, 1524 &root->fs_info->fs_devices->alloc_list); 1525 root->fs_info->fs_devices->num_devices++; 1526 root->fs_info->fs_devices->open_devices++; 1527 root->fs_info->fs_devices->rw_devices++; 1528 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; 1529 1530 if (!blk_queue_nonrot(bdev_get_queue(bdev))) 1531 root->fs_info->fs_devices->rotating = 1; 1532 1533 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); 1534 btrfs_set_super_total_bytes(&root->fs_info->super_copy, 1535 total_bytes + device->total_bytes); 1536 1537 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy); 1538 btrfs_set_super_num_devices(&root->fs_info->super_copy, 1539 total_bytes + 1); 1540 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1541 1542 if (seeding_dev) { 1543 ret = init_first_rw_device(trans, root, device); 1544 BUG_ON(ret); 1545 ret = btrfs_finish_sprout(trans, root); 1546 BUG_ON(ret); 1547 } else { 1548 ret = btrfs_add_device(trans, root, device); 1549 } 1550 1551 /* 1552 * we've got more storage, clear any full flags on the space 1553 * infos 1554 */ 1555 btrfs_clear_space_info_full(root->fs_info); 1556 1557 unlock_chunks(root); 1558 btrfs_commit_transaction(trans, root); 1559 1560 if (seeding_dev) { 1561 mutex_unlock(&uuid_mutex); 1562 up_write(&sb->s_umount); 1563 1564 ret = btrfs_relocate_sys_chunks(root); 1565 BUG_ON(ret); 1566 } 1567 out: 1568 mutex_unlock(&root->fs_info->volume_mutex); 1569 return ret; 1570 error: 1571 close_bdev_exclusive(bdev, 0); 1572 if (seeding_dev) { 1573 mutex_unlock(&uuid_mutex); 1574 up_write(&sb->s_umount); 1575 } 1576 goto out; 1577 } 1578 1579 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, 1580 struct btrfs_device *device) 1581 { 1582 int ret; 1583 struct btrfs_path *path; 1584 struct btrfs_root *root; 1585 struct btrfs_dev_item *dev_item; 1586 struct extent_buffer *leaf; 1587 struct btrfs_key key; 1588 1589 root = device->dev_root->fs_info->chunk_root; 1590 1591 path = btrfs_alloc_path(); 1592 if (!path) 1593 return -ENOMEM; 1594 1595 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1596 key.type = BTRFS_DEV_ITEM_KEY; 1597 key.offset = device->devid; 1598 1599 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1600 if (ret < 0) 1601 goto out; 1602 1603 if (ret > 0) { 1604 ret = -ENOENT; 1605 goto out; 1606 } 1607 1608 leaf = path->nodes[0]; 1609 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); 1610 1611 btrfs_set_device_id(leaf, dev_item, device->devid); 1612 btrfs_set_device_type(leaf, dev_item, device->type); 1613 btrfs_set_device_io_align(leaf, dev_item, device->io_align); 1614 btrfs_set_device_io_width(leaf, dev_item, device->io_width); 1615 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); 1616 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes); 1617 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); 1618 btrfs_mark_buffer_dirty(leaf); 1619 1620 out: 1621 btrfs_free_path(path); 1622 return ret; 1623 } 1624 1625 static int __btrfs_grow_device(struct btrfs_trans_handle *trans, 1626 struct btrfs_device *device, u64 new_size) 1627 { 1628 struct btrfs_super_block *super_copy = 1629 &device->dev_root->fs_info->super_copy; 1630 u64 old_total = btrfs_super_total_bytes(super_copy); 1631 u64 diff = new_size - device->total_bytes; 1632 1633 if (!device->writeable) 1634 return -EACCES; 1635 if (new_size <= device->total_bytes) 1636 return -EINVAL; 1637 1638 btrfs_set_super_total_bytes(super_copy, old_total + diff); 1639 device->fs_devices->total_rw_bytes += diff; 1640 1641 device->total_bytes = new_size; 1642 device->disk_total_bytes = new_size; 1643 btrfs_clear_space_info_full(device->dev_root->fs_info); 1644 1645 return btrfs_update_device(trans, device); 1646 } 1647 1648 int btrfs_grow_device(struct btrfs_trans_handle *trans, 1649 struct btrfs_device *device, u64 new_size) 1650 { 1651 int ret; 1652 lock_chunks(device->dev_root); 1653 ret = __btrfs_grow_device(trans, device, new_size); 1654 unlock_chunks(device->dev_root); 1655 return ret; 1656 } 1657 1658 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, 1659 struct btrfs_root *root, 1660 u64 chunk_tree, u64 chunk_objectid, 1661 u64 chunk_offset) 1662 { 1663 int ret; 1664 struct btrfs_path *path; 1665 struct btrfs_key key; 1666 1667 root = root->fs_info->chunk_root; 1668 path = btrfs_alloc_path(); 1669 if (!path) 1670 return -ENOMEM; 1671 1672 key.objectid = chunk_objectid; 1673 key.offset = chunk_offset; 1674 key.type = BTRFS_CHUNK_ITEM_KEY; 1675 1676 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1677 BUG_ON(ret); 1678 1679 ret = btrfs_del_item(trans, root, path); 1680 BUG_ON(ret); 1681 1682 btrfs_free_path(path); 1683 return 0; 1684 } 1685 1686 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 1687 chunk_offset) 1688 { 1689 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 1690 struct btrfs_disk_key *disk_key; 1691 struct btrfs_chunk *chunk; 1692 u8 *ptr; 1693 int ret = 0; 1694 u32 num_stripes; 1695 u32 array_size; 1696 u32 len = 0; 1697 u32 cur; 1698 struct btrfs_key key; 1699 1700 array_size = btrfs_super_sys_array_size(super_copy); 1701 1702 ptr = super_copy->sys_chunk_array; 1703 cur = 0; 1704 1705 while (cur < array_size) { 1706 disk_key = (struct btrfs_disk_key *)ptr; 1707 btrfs_disk_key_to_cpu(&key, disk_key); 1708 1709 len = sizeof(*disk_key); 1710 1711 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 1712 chunk = (struct btrfs_chunk *)(ptr + len); 1713 num_stripes = btrfs_stack_chunk_num_stripes(chunk); 1714 len += btrfs_chunk_item_size(num_stripes); 1715 } else { 1716 ret = -EIO; 1717 break; 1718 } 1719 if (key.objectid == chunk_objectid && 1720 key.offset == chunk_offset) { 1721 memmove(ptr, ptr + len, array_size - (cur + len)); 1722 array_size -= len; 1723 btrfs_set_super_sys_array_size(super_copy, array_size); 1724 } else { 1725 ptr += len; 1726 cur += len; 1727 } 1728 } 1729 return ret; 1730 } 1731 1732 static int btrfs_relocate_chunk(struct btrfs_root *root, 1733 u64 chunk_tree, u64 chunk_objectid, 1734 u64 chunk_offset) 1735 { 1736 struct extent_map_tree *em_tree; 1737 struct btrfs_root *extent_root; 1738 struct btrfs_trans_handle *trans; 1739 struct extent_map *em; 1740 struct map_lookup *map; 1741 int ret; 1742 int i; 1743 1744 root = root->fs_info->chunk_root; 1745 extent_root = root->fs_info->extent_root; 1746 em_tree = &root->fs_info->mapping_tree.map_tree; 1747 1748 ret = btrfs_can_relocate(extent_root, chunk_offset); 1749 if (ret) 1750 return -ENOSPC; 1751 1752 /* step one, relocate all the extents inside this chunk */ 1753 ret = btrfs_relocate_block_group(extent_root, chunk_offset); 1754 if (ret) 1755 return ret; 1756 1757 trans = btrfs_start_transaction(root, 0); 1758 BUG_ON(!trans); 1759 1760 lock_chunks(root); 1761 1762 /* 1763 * step two, delete the device extents and the 1764 * chunk tree entries 1765 */ 1766 read_lock(&em_tree->lock); 1767 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1768 read_unlock(&em_tree->lock); 1769 1770 BUG_ON(em->start > chunk_offset || 1771 em->start + em->len < chunk_offset); 1772 map = (struct map_lookup *)em->bdev; 1773 1774 for (i = 0; i < map->num_stripes; i++) { 1775 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev, 1776 map->stripes[i].physical); 1777 BUG_ON(ret); 1778 1779 if (map->stripes[i].dev) { 1780 ret = btrfs_update_device(trans, map->stripes[i].dev); 1781 BUG_ON(ret); 1782 } 1783 } 1784 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid, 1785 chunk_offset); 1786 1787 BUG_ON(ret); 1788 1789 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 1790 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); 1791 BUG_ON(ret); 1792 } 1793 1794 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); 1795 BUG_ON(ret); 1796 1797 write_lock(&em_tree->lock); 1798 remove_extent_mapping(em_tree, em); 1799 write_unlock(&em_tree->lock); 1800 1801 kfree(map); 1802 em->bdev = NULL; 1803 1804 /* once for the tree */ 1805 free_extent_map(em); 1806 /* once for us */ 1807 free_extent_map(em); 1808 1809 unlock_chunks(root); 1810 btrfs_end_transaction(trans, root); 1811 return 0; 1812 } 1813 1814 static int btrfs_relocate_sys_chunks(struct btrfs_root *root) 1815 { 1816 struct btrfs_root *chunk_root = root->fs_info->chunk_root; 1817 struct btrfs_path *path; 1818 struct extent_buffer *leaf; 1819 struct btrfs_chunk *chunk; 1820 struct btrfs_key key; 1821 struct btrfs_key found_key; 1822 u64 chunk_tree = chunk_root->root_key.objectid; 1823 u64 chunk_type; 1824 bool retried = false; 1825 int failed = 0; 1826 int ret; 1827 1828 path = btrfs_alloc_path(); 1829 if (!path) 1830 return -ENOMEM; 1831 1832 again: 1833 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 1834 key.offset = (u64)-1; 1835 key.type = BTRFS_CHUNK_ITEM_KEY; 1836 1837 while (1) { 1838 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 1839 if (ret < 0) 1840 goto error; 1841 BUG_ON(ret == 0); 1842 1843 ret = btrfs_previous_item(chunk_root, path, key.objectid, 1844 key.type); 1845 if (ret < 0) 1846 goto error; 1847 if (ret > 0) 1848 break; 1849 1850 leaf = path->nodes[0]; 1851 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1852 1853 chunk = btrfs_item_ptr(leaf, path->slots[0], 1854 struct btrfs_chunk); 1855 chunk_type = btrfs_chunk_type(leaf, chunk); 1856 btrfs_release_path(chunk_root, path); 1857 1858 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 1859 ret = btrfs_relocate_chunk(chunk_root, chunk_tree, 1860 found_key.objectid, 1861 found_key.offset); 1862 if (ret == -ENOSPC) 1863 failed++; 1864 else if (ret) 1865 BUG(); 1866 } 1867 1868 if (found_key.offset == 0) 1869 break; 1870 key.offset = found_key.offset - 1; 1871 } 1872 ret = 0; 1873 if (failed && !retried) { 1874 failed = 0; 1875 retried = true; 1876 goto again; 1877 } else if (failed && retried) { 1878 WARN_ON(1); 1879 ret = -ENOSPC; 1880 } 1881 error: 1882 btrfs_free_path(path); 1883 return ret; 1884 } 1885 1886 static u64 div_factor(u64 num, int factor) 1887 { 1888 if (factor == 10) 1889 return num; 1890 num *= factor; 1891 do_div(num, 10); 1892 return num; 1893 } 1894 1895 int btrfs_balance(struct btrfs_root *dev_root) 1896 { 1897 int ret; 1898 struct list_head *devices = &dev_root->fs_info->fs_devices->devices; 1899 struct btrfs_device *device; 1900 u64 old_size; 1901 u64 size_to_free; 1902 struct btrfs_path *path; 1903 struct btrfs_key key; 1904 struct btrfs_chunk *chunk; 1905 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root; 1906 struct btrfs_trans_handle *trans; 1907 struct btrfs_key found_key; 1908 1909 if (dev_root->fs_info->sb->s_flags & MS_RDONLY) 1910 return -EROFS; 1911 1912 mutex_lock(&dev_root->fs_info->volume_mutex); 1913 dev_root = dev_root->fs_info->dev_root; 1914 1915 /* step one make some room on all the devices */ 1916 list_for_each_entry(device, devices, dev_list) { 1917 old_size = device->total_bytes; 1918 size_to_free = div_factor(old_size, 1); 1919 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); 1920 if (!device->writeable || 1921 device->total_bytes - device->bytes_used > size_to_free) 1922 continue; 1923 1924 ret = btrfs_shrink_device(device, old_size - size_to_free); 1925 if (ret == -ENOSPC) 1926 break; 1927 BUG_ON(ret); 1928 1929 trans = btrfs_start_transaction(dev_root, 0); 1930 BUG_ON(!trans); 1931 1932 ret = btrfs_grow_device(trans, device, old_size); 1933 BUG_ON(ret); 1934 1935 btrfs_end_transaction(trans, dev_root); 1936 } 1937 1938 /* step two, relocate all the chunks */ 1939 path = btrfs_alloc_path(); 1940 BUG_ON(!path); 1941 1942 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 1943 key.offset = (u64)-1; 1944 key.type = BTRFS_CHUNK_ITEM_KEY; 1945 1946 while (1) { 1947 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 1948 if (ret < 0) 1949 goto error; 1950 1951 /* 1952 * this shouldn't happen, it means the last relocate 1953 * failed 1954 */ 1955 if (ret == 0) 1956 break; 1957 1958 ret = btrfs_previous_item(chunk_root, path, 0, 1959 BTRFS_CHUNK_ITEM_KEY); 1960 if (ret) 1961 break; 1962 1963 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1964 path->slots[0]); 1965 if (found_key.objectid != key.objectid) 1966 break; 1967 1968 chunk = btrfs_item_ptr(path->nodes[0], 1969 path->slots[0], 1970 struct btrfs_chunk); 1971 /* chunk zero is special */ 1972 if (found_key.offset == 0) 1973 break; 1974 1975 btrfs_release_path(chunk_root, path); 1976 ret = btrfs_relocate_chunk(chunk_root, 1977 chunk_root->root_key.objectid, 1978 found_key.objectid, 1979 found_key.offset); 1980 BUG_ON(ret && ret != -ENOSPC); 1981 key.offset = found_key.offset - 1; 1982 } 1983 ret = 0; 1984 error: 1985 btrfs_free_path(path); 1986 mutex_unlock(&dev_root->fs_info->volume_mutex); 1987 return ret; 1988 } 1989 1990 /* 1991 * shrinking a device means finding all of the device extents past 1992 * the new size, and then following the back refs to the chunks. 1993 * The chunk relocation code actually frees the device extent 1994 */ 1995 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) 1996 { 1997 struct btrfs_trans_handle *trans; 1998 struct btrfs_root *root = device->dev_root; 1999 struct btrfs_dev_extent *dev_extent = NULL; 2000 struct btrfs_path *path; 2001 u64 length; 2002 u64 chunk_tree; 2003 u64 chunk_objectid; 2004 u64 chunk_offset; 2005 int ret; 2006 int slot; 2007 int failed = 0; 2008 bool retried = false; 2009 struct extent_buffer *l; 2010 struct btrfs_key key; 2011 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 2012 u64 old_total = btrfs_super_total_bytes(super_copy); 2013 u64 old_size = device->total_bytes; 2014 u64 diff = device->total_bytes - new_size; 2015 2016 if (new_size >= device->total_bytes) 2017 return -EINVAL; 2018 2019 path = btrfs_alloc_path(); 2020 if (!path) 2021 return -ENOMEM; 2022 2023 path->reada = 2; 2024 2025 lock_chunks(root); 2026 2027 device->total_bytes = new_size; 2028 if (device->writeable) 2029 device->fs_devices->total_rw_bytes -= diff; 2030 unlock_chunks(root); 2031 2032 again: 2033 key.objectid = device->devid; 2034 key.offset = (u64)-1; 2035 key.type = BTRFS_DEV_EXTENT_KEY; 2036 2037 while (1) { 2038 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2039 if (ret < 0) 2040 goto done; 2041 2042 ret = btrfs_previous_item(root, path, 0, key.type); 2043 if (ret < 0) 2044 goto done; 2045 if (ret) { 2046 ret = 0; 2047 btrfs_release_path(root, path); 2048 break; 2049 } 2050 2051 l = path->nodes[0]; 2052 slot = path->slots[0]; 2053 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 2054 2055 if (key.objectid != device->devid) { 2056 btrfs_release_path(root, path); 2057 break; 2058 } 2059 2060 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2061 length = btrfs_dev_extent_length(l, dev_extent); 2062 2063 if (key.offset + length <= new_size) { 2064 btrfs_release_path(root, path); 2065 break; 2066 } 2067 2068 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 2069 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 2070 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 2071 btrfs_release_path(root, path); 2072 2073 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, 2074 chunk_offset); 2075 if (ret && ret != -ENOSPC) 2076 goto done; 2077 if (ret == -ENOSPC) 2078 failed++; 2079 key.offset -= 1; 2080 } 2081 2082 if (failed && !retried) { 2083 failed = 0; 2084 retried = true; 2085 goto again; 2086 } else if (failed && retried) { 2087 ret = -ENOSPC; 2088 lock_chunks(root); 2089 2090 device->total_bytes = old_size; 2091 if (device->writeable) 2092 device->fs_devices->total_rw_bytes += diff; 2093 unlock_chunks(root); 2094 goto done; 2095 } 2096 2097 /* Shrinking succeeded, else we would be at "done". */ 2098 trans = btrfs_start_transaction(root, 0); 2099 lock_chunks(root); 2100 2101 device->disk_total_bytes = new_size; 2102 /* Now btrfs_update_device() will change the on-disk size. */ 2103 ret = btrfs_update_device(trans, device); 2104 if (ret) { 2105 unlock_chunks(root); 2106 btrfs_end_transaction(trans, root); 2107 goto done; 2108 } 2109 WARN_ON(diff > old_total); 2110 btrfs_set_super_total_bytes(super_copy, old_total - diff); 2111 unlock_chunks(root); 2112 btrfs_end_transaction(trans, root); 2113 done: 2114 btrfs_free_path(path); 2115 return ret; 2116 } 2117 2118 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, 2119 struct btrfs_root *root, 2120 struct btrfs_key *key, 2121 struct btrfs_chunk *chunk, int item_size) 2122 { 2123 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 2124 struct btrfs_disk_key disk_key; 2125 u32 array_size; 2126 u8 *ptr; 2127 2128 array_size = btrfs_super_sys_array_size(super_copy); 2129 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) 2130 return -EFBIG; 2131 2132 ptr = super_copy->sys_chunk_array + array_size; 2133 btrfs_cpu_key_to_disk(&disk_key, key); 2134 memcpy(ptr, &disk_key, sizeof(disk_key)); 2135 ptr += sizeof(disk_key); 2136 memcpy(ptr, chunk, item_size); 2137 item_size += sizeof(disk_key); 2138 btrfs_set_super_sys_array_size(super_copy, array_size + item_size); 2139 return 0; 2140 } 2141 2142 static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, 2143 int num_stripes, int sub_stripes) 2144 { 2145 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) 2146 return calc_size; 2147 else if (type & BTRFS_BLOCK_GROUP_RAID10) 2148 return calc_size * (num_stripes / sub_stripes); 2149 else 2150 return calc_size * num_stripes; 2151 } 2152 2153 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 2154 struct btrfs_root *extent_root, 2155 struct map_lookup **map_ret, 2156 u64 *num_bytes, u64 *stripe_size, 2157 u64 start, u64 type) 2158 { 2159 struct btrfs_fs_info *info = extent_root->fs_info; 2160 struct btrfs_device *device = NULL; 2161 struct btrfs_fs_devices *fs_devices = info->fs_devices; 2162 struct list_head *cur; 2163 struct map_lookup *map = NULL; 2164 struct extent_map_tree *em_tree; 2165 struct extent_map *em; 2166 struct list_head private_devs; 2167 int min_stripe_size = 1 * 1024 * 1024; 2168 u64 calc_size = 1024 * 1024 * 1024; 2169 u64 max_chunk_size = calc_size; 2170 u64 min_free; 2171 u64 avail; 2172 u64 max_avail = 0; 2173 u64 dev_offset; 2174 int num_stripes = 1; 2175 int min_stripes = 1; 2176 int sub_stripes = 0; 2177 int looped = 0; 2178 int ret; 2179 int index; 2180 int stripe_len = 64 * 1024; 2181 2182 if ((type & BTRFS_BLOCK_GROUP_RAID1) && 2183 (type & BTRFS_BLOCK_GROUP_DUP)) { 2184 WARN_ON(1); 2185 type &= ~BTRFS_BLOCK_GROUP_DUP; 2186 } 2187 if (list_empty(&fs_devices->alloc_list)) 2188 return -ENOSPC; 2189 2190 if (type & (BTRFS_BLOCK_GROUP_RAID0)) { 2191 num_stripes = fs_devices->rw_devices; 2192 min_stripes = 2; 2193 } 2194 if (type & (BTRFS_BLOCK_GROUP_DUP)) { 2195 num_stripes = 2; 2196 min_stripes = 2; 2197 } 2198 if (type & (BTRFS_BLOCK_GROUP_RAID1)) { 2199 if (fs_devices->rw_devices < 2) 2200 return -ENOSPC; 2201 num_stripes = 2; 2202 min_stripes = 2; 2203 } 2204 if (type & (BTRFS_BLOCK_GROUP_RAID10)) { 2205 num_stripes = fs_devices->rw_devices; 2206 if (num_stripes < 4) 2207 return -ENOSPC; 2208 num_stripes &= ~(u32)1; 2209 sub_stripes = 2; 2210 min_stripes = 4; 2211 } 2212 2213 if (type & BTRFS_BLOCK_GROUP_DATA) { 2214 max_chunk_size = 10 * calc_size; 2215 min_stripe_size = 64 * 1024 * 1024; 2216 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 2217 max_chunk_size = 256 * 1024 * 1024; 2218 min_stripe_size = 32 * 1024 * 1024; 2219 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 2220 calc_size = 8 * 1024 * 1024; 2221 max_chunk_size = calc_size * 2; 2222 min_stripe_size = 1 * 1024 * 1024; 2223 } 2224 2225 /* we don't want a chunk larger than 10% of writeable space */ 2226 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 2227 max_chunk_size); 2228 2229 again: 2230 max_avail = 0; 2231 if (!map || map->num_stripes != num_stripes) { 2232 kfree(map); 2233 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 2234 if (!map) 2235 return -ENOMEM; 2236 map->num_stripes = num_stripes; 2237 } 2238 2239 if (calc_size * num_stripes > max_chunk_size) { 2240 calc_size = max_chunk_size; 2241 do_div(calc_size, num_stripes); 2242 do_div(calc_size, stripe_len); 2243 calc_size *= stripe_len; 2244 } 2245 2246 /* we don't want tiny stripes */ 2247 if (!looped) 2248 calc_size = max_t(u64, min_stripe_size, calc_size); 2249 2250 /* 2251 * we're about to do_div by the stripe_len so lets make sure 2252 * we end up with something bigger than a stripe 2253 */ 2254 calc_size = max_t(u64, calc_size, stripe_len * 4); 2255 2256 do_div(calc_size, stripe_len); 2257 calc_size *= stripe_len; 2258 2259 cur = fs_devices->alloc_list.next; 2260 index = 0; 2261 2262 if (type & BTRFS_BLOCK_GROUP_DUP) 2263 min_free = calc_size * 2; 2264 else 2265 min_free = calc_size; 2266 2267 /* 2268 * we add 1MB because we never use the first 1MB of the device, unless 2269 * we've looped, then we are likely allocating the maximum amount of 2270 * space left already 2271 */ 2272 if (!looped) 2273 min_free += 1024 * 1024; 2274 2275 INIT_LIST_HEAD(&private_devs); 2276 while (index < num_stripes) { 2277 device = list_entry(cur, struct btrfs_device, dev_alloc_list); 2278 BUG_ON(!device->writeable); 2279 if (device->total_bytes > device->bytes_used) 2280 avail = device->total_bytes - device->bytes_used; 2281 else 2282 avail = 0; 2283 cur = cur->next; 2284 2285 if (device->in_fs_metadata && avail >= min_free) { 2286 ret = find_free_dev_extent(trans, device, 2287 min_free, &dev_offset, 2288 &max_avail); 2289 if (ret == 0) { 2290 list_move_tail(&device->dev_alloc_list, 2291 &private_devs); 2292 map->stripes[index].dev = device; 2293 map->stripes[index].physical = dev_offset; 2294 index++; 2295 if (type & BTRFS_BLOCK_GROUP_DUP) { 2296 map->stripes[index].dev = device; 2297 map->stripes[index].physical = 2298 dev_offset + calc_size; 2299 index++; 2300 } 2301 } 2302 } else if (device->in_fs_metadata && avail > max_avail) 2303 max_avail = avail; 2304 if (cur == &fs_devices->alloc_list) 2305 break; 2306 } 2307 list_splice(&private_devs, &fs_devices->alloc_list); 2308 if (index < num_stripes) { 2309 if (index >= min_stripes) { 2310 num_stripes = index; 2311 if (type & (BTRFS_BLOCK_GROUP_RAID10)) { 2312 num_stripes /= sub_stripes; 2313 num_stripes *= sub_stripes; 2314 } 2315 looped = 1; 2316 goto again; 2317 } 2318 if (!looped && max_avail > 0) { 2319 looped = 1; 2320 calc_size = max_avail; 2321 goto again; 2322 } 2323 kfree(map); 2324 return -ENOSPC; 2325 } 2326 map->sector_size = extent_root->sectorsize; 2327 map->stripe_len = stripe_len; 2328 map->io_align = stripe_len; 2329 map->io_width = stripe_len; 2330 map->type = type; 2331 map->num_stripes = num_stripes; 2332 map->sub_stripes = sub_stripes; 2333 2334 *map_ret = map; 2335 *stripe_size = calc_size; 2336 *num_bytes = chunk_bytes_by_type(type, calc_size, 2337 num_stripes, sub_stripes); 2338 2339 em = alloc_extent_map(GFP_NOFS); 2340 if (!em) { 2341 kfree(map); 2342 return -ENOMEM; 2343 } 2344 em->bdev = (struct block_device *)map; 2345 em->start = start; 2346 em->len = *num_bytes; 2347 em->block_start = 0; 2348 em->block_len = em->len; 2349 2350 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 2351 write_lock(&em_tree->lock); 2352 ret = add_extent_mapping(em_tree, em); 2353 write_unlock(&em_tree->lock); 2354 BUG_ON(ret); 2355 free_extent_map(em); 2356 2357 ret = btrfs_make_block_group(trans, extent_root, 0, type, 2358 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2359 start, *num_bytes); 2360 BUG_ON(ret); 2361 2362 index = 0; 2363 while (index < map->num_stripes) { 2364 device = map->stripes[index].dev; 2365 dev_offset = map->stripes[index].physical; 2366 2367 ret = btrfs_alloc_dev_extent(trans, device, 2368 info->chunk_root->root_key.objectid, 2369 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2370 start, dev_offset, calc_size); 2371 BUG_ON(ret); 2372 index++; 2373 } 2374 2375 return 0; 2376 } 2377 2378 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, 2379 struct btrfs_root *extent_root, 2380 struct map_lookup *map, u64 chunk_offset, 2381 u64 chunk_size, u64 stripe_size) 2382 { 2383 u64 dev_offset; 2384 struct btrfs_key key; 2385 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 2386 struct btrfs_device *device; 2387 struct btrfs_chunk *chunk; 2388 struct btrfs_stripe *stripe; 2389 size_t item_size = btrfs_chunk_item_size(map->num_stripes); 2390 int index = 0; 2391 int ret; 2392 2393 chunk = kzalloc(item_size, GFP_NOFS); 2394 if (!chunk) 2395 return -ENOMEM; 2396 2397 index = 0; 2398 while (index < map->num_stripes) { 2399 device = map->stripes[index].dev; 2400 device->bytes_used += stripe_size; 2401 ret = btrfs_update_device(trans, device); 2402 BUG_ON(ret); 2403 index++; 2404 } 2405 2406 index = 0; 2407 stripe = &chunk->stripe; 2408 while (index < map->num_stripes) { 2409 device = map->stripes[index].dev; 2410 dev_offset = map->stripes[index].physical; 2411 2412 btrfs_set_stack_stripe_devid(stripe, device->devid); 2413 btrfs_set_stack_stripe_offset(stripe, dev_offset); 2414 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); 2415 stripe++; 2416 index++; 2417 } 2418 2419 btrfs_set_stack_chunk_length(chunk, chunk_size); 2420 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); 2421 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); 2422 btrfs_set_stack_chunk_type(chunk, map->type); 2423 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); 2424 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); 2425 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); 2426 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); 2427 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); 2428 2429 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2430 key.type = BTRFS_CHUNK_ITEM_KEY; 2431 key.offset = chunk_offset; 2432 2433 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 2434 BUG_ON(ret); 2435 2436 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 2437 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk, 2438 item_size); 2439 BUG_ON(ret); 2440 } 2441 kfree(chunk); 2442 return 0; 2443 } 2444 2445 /* 2446 * Chunk allocation falls into two parts. The first part does works 2447 * that make the new allocated chunk useable, but not do any operation 2448 * that modifies the chunk tree. The second part does the works that 2449 * require modifying the chunk tree. This division is important for the 2450 * bootstrap process of adding storage to a seed btrfs. 2451 */ 2452 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 2453 struct btrfs_root *extent_root, u64 type) 2454 { 2455 u64 chunk_offset; 2456 u64 chunk_size; 2457 u64 stripe_size; 2458 struct map_lookup *map; 2459 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; 2460 int ret; 2461 2462 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2463 &chunk_offset); 2464 if (ret) 2465 return ret; 2466 2467 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, 2468 &stripe_size, chunk_offset, type); 2469 if (ret) 2470 return ret; 2471 2472 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, 2473 chunk_size, stripe_size); 2474 BUG_ON(ret); 2475 return 0; 2476 } 2477 2478 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, 2479 struct btrfs_root *root, 2480 struct btrfs_device *device) 2481 { 2482 u64 chunk_offset; 2483 u64 sys_chunk_offset; 2484 u64 chunk_size; 2485 u64 sys_chunk_size; 2486 u64 stripe_size; 2487 u64 sys_stripe_size; 2488 u64 alloc_profile; 2489 struct map_lookup *map; 2490 struct map_lookup *sys_map; 2491 struct btrfs_fs_info *fs_info = root->fs_info; 2492 struct btrfs_root *extent_root = fs_info->extent_root; 2493 int ret; 2494 2495 ret = find_next_chunk(fs_info->chunk_root, 2496 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset); 2497 BUG_ON(ret); 2498 2499 alloc_profile = BTRFS_BLOCK_GROUP_METADATA | 2500 (fs_info->metadata_alloc_profile & 2501 fs_info->avail_metadata_alloc_bits); 2502 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); 2503 2504 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, 2505 &stripe_size, chunk_offset, alloc_profile); 2506 BUG_ON(ret); 2507 2508 sys_chunk_offset = chunk_offset + chunk_size; 2509 2510 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM | 2511 (fs_info->system_alloc_profile & 2512 fs_info->avail_system_alloc_bits); 2513 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); 2514 2515 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, 2516 &sys_chunk_size, &sys_stripe_size, 2517 sys_chunk_offset, alloc_profile); 2518 BUG_ON(ret); 2519 2520 ret = btrfs_add_device(trans, fs_info->chunk_root, device); 2521 BUG_ON(ret); 2522 2523 /* 2524 * Modifying chunk tree needs allocating new blocks from both 2525 * system block group and metadata block group. So we only can 2526 * do operations require modifying the chunk tree after both 2527 * block groups were created. 2528 */ 2529 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, 2530 chunk_size, stripe_size); 2531 BUG_ON(ret); 2532 2533 ret = __finish_chunk_alloc(trans, extent_root, sys_map, 2534 sys_chunk_offset, sys_chunk_size, 2535 sys_stripe_size); 2536 BUG_ON(ret); 2537 return 0; 2538 } 2539 2540 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) 2541 { 2542 struct extent_map *em; 2543 struct map_lookup *map; 2544 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 2545 int readonly = 0; 2546 int i; 2547 2548 read_lock(&map_tree->map_tree.lock); 2549 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 2550 read_unlock(&map_tree->map_tree.lock); 2551 if (!em) 2552 return 1; 2553 2554 if (btrfs_test_opt(root, DEGRADED)) { 2555 free_extent_map(em); 2556 return 0; 2557 } 2558 2559 map = (struct map_lookup *)em->bdev; 2560 for (i = 0; i < map->num_stripes; i++) { 2561 if (!map->stripes[i].dev->writeable) { 2562 readonly = 1; 2563 break; 2564 } 2565 } 2566 free_extent_map(em); 2567 return readonly; 2568 } 2569 2570 void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 2571 { 2572 extent_map_tree_init(&tree->map_tree, GFP_NOFS); 2573 } 2574 2575 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 2576 { 2577 struct extent_map *em; 2578 2579 while (1) { 2580 write_lock(&tree->map_tree.lock); 2581 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 2582 if (em) 2583 remove_extent_mapping(&tree->map_tree, em); 2584 write_unlock(&tree->map_tree.lock); 2585 if (!em) 2586 break; 2587 kfree(em->bdev); 2588 /* once for us */ 2589 free_extent_map(em); 2590 /* once for the tree */ 2591 free_extent_map(em); 2592 } 2593 } 2594 2595 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len) 2596 { 2597 struct extent_map *em; 2598 struct map_lookup *map; 2599 struct extent_map_tree *em_tree = &map_tree->map_tree; 2600 int ret; 2601 2602 read_lock(&em_tree->lock); 2603 em = lookup_extent_mapping(em_tree, logical, len); 2604 read_unlock(&em_tree->lock); 2605 BUG_ON(!em); 2606 2607 BUG_ON(em->start > logical || em->start + em->len < logical); 2608 map = (struct map_lookup *)em->bdev; 2609 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) 2610 ret = map->num_stripes; 2611 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) 2612 ret = map->sub_stripes; 2613 else 2614 ret = 1; 2615 free_extent_map(em); 2616 return ret; 2617 } 2618 2619 static int find_live_mirror(struct map_lookup *map, int first, int num, 2620 int optimal) 2621 { 2622 int i; 2623 if (map->stripes[optimal].dev->bdev) 2624 return optimal; 2625 for (i = first; i < first + num; i++) { 2626 if (map->stripes[i].dev->bdev) 2627 return i; 2628 } 2629 /* we couldn't find one that doesn't fail. Just return something 2630 * and the io error handling code will clean up eventually 2631 */ 2632 return optimal; 2633 } 2634 2635 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 2636 u64 logical, u64 *length, 2637 struct btrfs_multi_bio **multi_ret, 2638 int mirror_num, struct page *unplug_page) 2639 { 2640 struct extent_map *em; 2641 struct map_lookup *map; 2642 struct extent_map_tree *em_tree = &map_tree->map_tree; 2643 u64 offset; 2644 u64 stripe_offset; 2645 u64 stripe_nr; 2646 int stripes_allocated = 8; 2647 int stripes_required = 1; 2648 int stripe_index; 2649 int i; 2650 int num_stripes; 2651 int max_errors = 0; 2652 struct btrfs_multi_bio *multi = NULL; 2653 2654 if (multi_ret && !(rw & (1 << BIO_RW))) 2655 stripes_allocated = 1; 2656 again: 2657 if (multi_ret) { 2658 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated), 2659 GFP_NOFS); 2660 if (!multi) 2661 return -ENOMEM; 2662 2663 atomic_set(&multi->error, 0); 2664 } 2665 2666 read_lock(&em_tree->lock); 2667 em = lookup_extent_mapping(em_tree, logical, *length); 2668 read_unlock(&em_tree->lock); 2669 2670 if (!em && unplug_page) { 2671 kfree(multi); 2672 return 0; 2673 } 2674 2675 if (!em) { 2676 printk(KERN_CRIT "unable to find logical %llu len %llu\n", 2677 (unsigned long long)logical, 2678 (unsigned long long)*length); 2679 BUG(); 2680 } 2681 2682 BUG_ON(em->start > logical || em->start + em->len < logical); 2683 map = (struct map_lookup *)em->bdev; 2684 offset = logical - em->start; 2685 2686 if (mirror_num > map->num_stripes) 2687 mirror_num = 0; 2688 2689 /* if our multi bio struct is too small, back off and try again */ 2690 if (rw & (1 << BIO_RW)) { 2691 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 2692 BTRFS_BLOCK_GROUP_DUP)) { 2693 stripes_required = map->num_stripes; 2694 max_errors = 1; 2695 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 2696 stripes_required = map->sub_stripes; 2697 max_errors = 1; 2698 } 2699 } 2700 if (multi_ret && (rw & (1 << BIO_RW)) && 2701 stripes_allocated < stripes_required) { 2702 stripes_allocated = map->num_stripes; 2703 free_extent_map(em); 2704 kfree(multi); 2705 goto again; 2706 } 2707 stripe_nr = offset; 2708 /* 2709 * stripe_nr counts the total number of stripes we have to stride 2710 * to get to this block 2711 */ 2712 do_div(stripe_nr, map->stripe_len); 2713 2714 stripe_offset = stripe_nr * map->stripe_len; 2715 BUG_ON(offset < stripe_offset); 2716 2717 /* stripe_offset is the offset of this block in its stripe*/ 2718 stripe_offset = offset - stripe_offset; 2719 2720 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | 2721 BTRFS_BLOCK_GROUP_RAID10 | 2722 BTRFS_BLOCK_GROUP_DUP)) { 2723 /* we limit the length of each bio to what fits in a stripe */ 2724 *length = min_t(u64, em->len - offset, 2725 map->stripe_len - stripe_offset); 2726 } else { 2727 *length = em->len - offset; 2728 } 2729 2730 if (!multi_ret && !unplug_page) 2731 goto out; 2732 2733 num_stripes = 1; 2734 stripe_index = 0; 2735 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 2736 if (unplug_page || (rw & (1 << BIO_RW))) 2737 num_stripes = map->num_stripes; 2738 else if (mirror_num) 2739 stripe_index = mirror_num - 1; 2740 else { 2741 stripe_index = find_live_mirror(map, 0, 2742 map->num_stripes, 2743 current->pid % map->num_stripes); 2744 } 2745 2746 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 2747 if (rw & (1 << BIO_RW)) 2748 num_stripes = map->num_stripes; 2749 else if (mirror_num) 2750 stripe_index = mirror_num - 1; 2751 2752 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 2753 int factor = map->num_stripes / map->sub_stripes; 2754 2755 stripe_index = do_div(stripe_nr, factor); 2756 stripe_index *= map->sub_stripes; 2757 2758 if (unplug_page || (rw & (1 << BIO_RW))) 2759 num_stripes = map->sub_stripes; 2760 else if (mirror_num) 2761 stripe_index += mirror_num - 1; 2762 else { 2763 stripe_index = find_live_mirror(map, stripe_index, 2764 map->sub_stripes, stripe_index + 2765 current->pid % map->sub_stripes); 2766 } 2767 } else { 2768 /* 2769 * after this do_div call, stripe_nr is the number of stripes 2770 * on this device we have to walk to find the data, and 2771 * stripe_index is the number of our device in the stripe array 2772 */ 2773 stripe_index = do_div(stripe_nr, map->num_stripes); 2774 } 2775 BUG_ON(stripe_index >= map->num_stripes); 2776 2777 for (i = 0; i < num_stripes; i++) { 2778 if (unplug_page) { 2779 struct btrfs_device *device; 2780 struct backing_dev_info *bdi; 2781 2782 device = map->stripes[stripe_index].dev; 2783 if (device->bdev) { 2784 bdi = blk_get_backing_dev_info(device->bdev); 2785 if (bdi->unplug_io_fn) 2786 bdi->unplug_io_fn(bdi, unplug_page); 2787 } 2788 } else { 2789 multi->stripes[i].physical = 2790 map->stripes[stripe_index].physical + 2791 stripe_offset + stripe_nr * map->stripe_len; 2792 multi->stripes[i].dev = map->stripes[stripe_index].dev; 2793 } 2794 stripe_index++; 2795 } 2796 if (multi_ret) { 2797 *multi_ret = multi; 2798 multi->num_stripes = num_stripes; 2799 multi->max_errors = max_errors; 2800 } 2801 out: 2802 free_extent_map(em); 2803 return 0; 2804 } 2805 2806 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 2807 u64 logical, u64 *length, 2808 struct btrfs_multi_bio **multi_ret, int mirror_num) 2809 { 2810 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, 2811 mirror_num, NULL); 2812 } 2813 2814 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 2815 u64 chunk_start, u64 physical, u64 devid, 2816 u64 **logical, int *naddrs, int *stripe_len) 2817 { 2818 struct extent_map_tree *em_tree = &map_tree->map_tree; 2819 struct extent_map *em; 2820 struct map_lookup *map; 2821 u64 *buf; 2822 u64 bytenr; 2823 u64 length; 2824 u64 stripe_nr; 2825 int i, j, nr = 0; 2826 2827 read_lock(&em_tree->lock); 2828 em = lookup_extent_mapping(em_tree, chunk_start, 1); 2829 read_unlock(&em_tree->lock); 2830 2831 BUG_ON(!em || em->start != chunk_start); 2832 map = (struct map_lookup *)em->bdev; 2833 2834 length = em->len; 2835 if (map->type & BTRFS_BLOCK_GROUP_RAID10) 2836 do_div(length, map->num_stripes / map->sub_stripes); 2837 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 2838 do_div(length, map->num_stripes); 2839 2840 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); 2841 BUG_ON(!buf); 2842 2843 for (i = 0; i < map->num_stripes; i++) { 2844 if (devid && map->stripes[i].dev->devid != devid) 2845 continue; 2846 if (map->stripes[i].physical > physical || 2847 map->stripes[i].physical + length <= physical) 2848 continue; 2849 2850 stripe_nr = physical - map->stripes[i].physical; 2851 do_div(stripe_nr, map->stripe_len); 2852 2853 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 2854 stripe_nr = stripe_nr * map->num_stripes + i; 2855 do_div(stripe_nr, map->sub_stripes); 2856 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 2857 stripe_nr = stripe_nr * map->num_stripes + i; 2858 } 2859 bytenr = chunk_start + stripe_nr * map->stripe_len; 2860 WARN_ON(nr >= map->num_stripes); 2861 for (j = 0; j < nr; j++) { 2862 if (buf[j] == bytenr) 2863 break; 2864 } 2865 if (j == nr) { 2866 WARN_ON(nr >= map->num_stripes); 2867 buf[nr++] = bytenr; 2868 } 2869 } 2870 2871 *logical = buf; 2872 *naddrs = nr; 2873 *stripe_len = map->stripe_len; 2874 2875 free_extent_map(em); 2876 return 0; 2877 } 2878 2879 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, 2880 u64 logical, struct page *page) 2881 { 2882 u64 length = PAGE_CACHE_SIZE; 2883 return __btrfs_map_block(map_tree, READ, logical, &length, 2884 NULL, 0, page); 2885 } 2886 2887 static void end_bio_multi_stripe(struct bio *bio, int err) 2888 { 2889 struct btrfs_multi_bio *multi = bio->bi_private; 2890 int is_orig_bio = 0; 2891 2892 if (err) 2893 atomic_inc(&multi->error); 2894 2895 if (bio == multi->orig_bio) 2896 is_orig_bio = 1; 2897 2898 if (atomic_dec_and_test(&multi->stripes_pending)) { 2899 if (!is_orig_bio) { 2900 bio_put(bio); 2901 bio = multi->orig_bio; 2902 } 2903 bio->bi_private = multi->private; 2904 bio->bi_end_io = multi->end_io; 2905 /* only send an error to the higher layers if it is 2906 * beyond the tolerance of the multi-bio 2907 */ 2908 if (atomic_read(&multi->error) > multi->max_errors) { 2909 err = -EIO; 2910 } else if (err) { 2911 /* 2912 * this bio is actually up to date, we didn't 2913 * go over the max number of errors 2914 */ 2915 set_bit(BIO_UPTODATE, &bio->bi_flags); 2916 err = 0; 2917 } 2918 kfree(multi); 2919 2920 bio_endio(bio, err); 2921 } else if (!is_orig_bio) { 2922 bio_put(bio); 2923 } 2924 } 2925 2926 struct async_sched { 2927 struct bio *bio; 2928 int rw; 2929 struct btrfs_fs_info *info; 2930 struct btrfs_work work; 2931 }; 2932 2933 /* 2934 * see run_scheduled_bios for a description of why bios are collected for 2935 * async submit. 2936 * 2937 * This will add one bio to the pending list for a device and make sure 2938 * the work struct is scheduled. 2939 */ 2940 static noinline int schedule_bio(struct btrfs_root *root, 2941 struct btrfs_device *device, 2942 int rw, struct bio *bio) 2943 { 2944 int should_queue = 1; 2945 struct btrfs_pending_bios *pending_bios; 2946 2947 /* don't bother with additional async steps for reads, right now */ 2948 if (!(rw & (1 << BIO_RW))) { 2949 bio_get(bio); 2950 submit_bio(rw, bio); 2951 bio_put(bio); 2952 return 0; 2953 } 2954 2955 /* 2956 * nr_async_bios allows us to reliably return congestion to the 2957 * higher layers. Otherwise, the async bio makes it appear we have 2958 * made progress against dirty pages when we've really just put it 2959 * on a queue for later 2960 */ 2961 atomic_inc(&root->fs_info->nr_async_bios); 2962 WARN_ON(bio->bi_next); 2963 bio->bi_next = NULL; 2964 bio->bi_rw |= rw; 2965 2966 spin_lock(&device->io_lock); 2967 if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) 2968 pending_bios = &device->pending_sync_bios; 2969 else 2970 pending_bios = &device->pending_bios; 2971 2972 if (pending_bios->tail) 2973 pending_bios->tail->bi_next = bio; 2974 2975 pending_bios->tail = bio; 2976 if (!pending_bios->head) 2977 pending_bios->head = bio; 2978 if (device->running_pending) 2979 should_queue = 0; 2980 2981 spin_unlock(&device->io_lock); 2982 2983 if (should_queue) 2984 btrfs_queue_worker(&root->fs_info->submit_workers, 2985 &device->work); 2986 return 0; 2987 } 2988 2989 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, 2990 int mirror_num, int async_submit) 2991 { 2992 struct btrfs_mapping_tree *map_tree; 2993 struct btrfs_device *dev; 2994 struct bio *first_bio = bio; 2995 u64 logical = (u64)bio->bi_sector << 9; 2996 u64 length = 0; 2997 u64 map_length; 2998 struct btrfs_multi_bio *multi = NULL; 2999 int ret; 3000 int dev_nr = 0; 3001 int total_devs = 1; 3002 3003 length = bio->bi_size; 3004 map_tree = &root->fs_info->mapping_tree; 3005 map_length = length; 3006 3007 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi, 3008 mirror_num); 3009 BUG_ON(ret); 3010 3011 total_devs = multi->num_stripes; 3012 if (map_length < length) { 3013 printk(KERN_CRIT "mapping failed logical %llu bio len %llu " 3014 "len %llu\n", (unsigned long long)logical, 3015 (unsigned long long)length, 3016 (unsigned long long)map_length); 3017 BUG(); 3018 } 3019 multi->end_io = first_bio->bi_end_io; 3020 multi->private = first_bio->bi_private; 3021 multi->orig_bio = first_bio; 3022 atomic_set(&multi->stripes_pending, multi->num_stripes); 3023 3024 while (dev_nr < total_devs) { 3025 if (total_devs > 1) { 3026 if (dev_nr < total_devs - 1) { 3027 bio = bio_clone(first_bio, GFP_NOFS); 3028 BUG_ON(!bio); 3029 } else { 3030 bio = first_bio; 3031 } 3032 bio->bi_private = multi; 3033 bio->bi_end_io = end_bio_multi_stripe; 3034 } 3035 bio->bi_sector = multi->stripes[dev_nr].physical >> 9; 3036 dev = multi->stripes[dev_nr].dev; 3037 BUG_ON(rw == WRITE && !dev->writeable); 3038 if (dev && dev->bdev) { 3039 bio->bi_bdev = dev->bdev; 3040 if (async_submit) 3041 schedule_bio(root, dev, rw, bio); 3042 else 3043 submit_bio(rw, bio); 3044 } else { 3045 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev; 3046 bio->bi_sector = logical >> 9; 3047 bio_endio(bio, -EIO); 3048 } 3049 dev_nr++; 3050 } 3051 if (total_devs == 1) 3052 kfree(multi); 3053 return 0; 3054 } 3055 3056 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, 3057 u8 *uuid, u8 *fsid) 3058 { 3059 struct btrfs_device *device; 3060 struct btrfs_fs_devices *cur_devices; 3061 3062 cur_devices = root->fs_info->fs_devices; 3063 while (cur_devices) { 3064 if (!fsid || 3065 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 3066 device = __find_device(&cur_devices->devices, 3067 devid, uuid); 3068 if (device) 3069 return device; 3070 } 3071 cur_devices = cur_devices->seed; 3072 } 3073 return NULL; 3074 } 3075 3076 static struct btrfs_device *add_missing_dev(struct btrfs_root *root, 3077 u64 devid, u8 *dev_uuid) 3078 { 3079 struct btrfs_device *device; 3080 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 3081 3082 device = kzalloc(sizeof(*device), GFP_NOFS); 3083 if (!device) 3084 return NULL; 3085 list_add(&device->dev_list, 3086 &fs_devices->devices); 3087 device->barriers = 1; 3088 device->dev_root = root->fs_info->dev_root; 3089 device->devid = devid; 3090 device->work.func = pending_bios_fn; 3091 device->fs_devices = fs_devices; 3092 fs_devices->num_devices++; 3093 spin_lock_init(&device->io_lock); 3094 INIT_LIST_HEAD(&device->dev_alloc_list); 3095 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); 3096 return device; 3097 } 3098 3099 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, 3100 struct extent_buffer *leaf, 3101 struct btrfs_chunk *chunk) 3102 { 3103 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 3104 struct map_lookup *map; 3105 struct extent_map *em; 3106 u64 logical; 3107 u64 length; 3108 u64 devid; 3109 u8 uuid[BTRFS_UUID_SIZE]; 3110 int num_stripes; 3111 int ret; 3112 int i; 3113 3114 logical = key->offset; 3115 length = btrfs_chunk_length(leaf, chunk); 3116 3117 read_lock(&map_tree->map_tree.lock); 3118 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 3119 read_unlock(&map_tree->map_tree.lock); 3120 3121 /* already mapped? */ 3122 if (em && em->start <= logical && em->start + em->len > logical) { 3123 free_extent_map(em); 3124 return 0; 3125 } else if (em) { 3126 free_extent_map(em); 3127 } 3128 3129 em = alloc_extent_map(GFP_NOFS); 3130 if (!em) 3131 return -ENOMEM; 3132 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3133 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 3134 if (!map) { 3135 free_extent_map(em); 3136 return -ENOMEM; 3137 } 3138 3139 em->bdev = (struct block_device *)map; 3140 em->start = logical; 3141 em->len = length; 3142 em->block_start = 0; 3143 em->block_len = em->len; 3144 3145 map->num_stripes = num_stripes; 3146 map->io_width = btrfs_chunk_io_width(leaf, chunk); 3147 map->io_align = btrfs_chunk_io_align(leaf, chunk); 3148 map->sector_size = btrfs_chunk_sector_size(leaf, chunk); 3149 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 3150 map->type = btrfs_chunk_type(leaf, chunk); 3151 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 3152 for (i = 0; i < num_stripes; i++) { 3153 map->stripes[i].physical = 3154 btrfs_stripe_offset_nr(leaf, chunk, i); 3155 devid = btrfs_stripe_devid_nr(leaf, chunk, i); 3156 read_extent_buffer(leaf, uuid, (unsigned long) 3157 btrfs_stripe_dev_uuid_nr(chunk, i), 3158 BTRFS_UUID_SIZE); 3159 map->stripes[i].dev = btrfs_find_device(root, devid, uuid, 3160 NULL); 3161 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { 3162 kfree(map); 3163 free_extent_map(em); 3164 return -EIO; 3165 } 3166 if (!map->stripes[i].dev) { 3167 map->stripes[i].dev = 3168 add_missing_dev(root, devid, uuid); 3169 if (!map->stripes[i].dev) { 3170 kfree(map); 3171 free_extent_map(em); 3172 return -EIO; 3173 } 3174 } 3175 map->stripes[i].dev->in_fs_metadata = 1; 3176 } 3177 3178 write_lock(&map_tree->map_tree.lock); 3179 ret = add_extent_mapping(&map_tree->map_tree, em); 3180 write_unlock(&map_tree->map_tree.lock); 3181 BUG_ON(ret); 3182 free_extent_map(em); 3183 3184 return 0; 3185 } 3186 3187 static int fill_device_from_item(struct extent_buffer *leaf, 3188 struct btrfs_dev_item *dev_item, 3189 struct btrfs_device *device) 3190 { 3191 unsigned long ptr; 3192 3193 device->devid = btrfs_device_id(leaf, dev_item); 3194 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); 3195 device->total_bytes = device->disk_total_bytes; 3196 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); 3197 device->type = btrfs_device_type(leaf, dev_item); 3198 device->io_align = btrfs_device_io_align(leaf, dev_item); 3199 device->io_width = btrfs_device_io_width(leaf, dev_item); 3200 device->sector_size = btrfs_device_sector_size(leaf, dev_item); 3201 3202 ptr = (unsigned long)btrfs_device_uuid(dev_item); 3203 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 3204 3205 return 0; 3206 } 3207 3208 static int open_seed_devices(struct btrfs_root *root, u8 *fsid) 3209 { 3210 struct btrfs_fs_devices *fs_devices; 3211 int ret; 3212 3213 mutex_lock(&uuid_mutex); 3214 3215 fs_devices = root->fs_info->fs_devices->seed; 3216 while (fs_devices) { 3217 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) { 3218 ret = 0; 3219 goto out; 3220 } 3221 fs_devices = fs_devices->seed; 3222 } 3223 3224 fs_devices = find_fsid(fsid); 3225 if (!fs_devices) { 3226 ret = -ENOENT; 3227 goto out; 3228 } 3229 3230 fs_devices = clone_fs_devices(fs_devices); 3231 if (IS_ERR(fs_devices)) { 3232 ret = PTR_ERR(fs_devices); 3233 goto out; 3234 } 3235 3236 ret = __btrfs_open_devices(fs_devices, FMODE_READ, 3237 root->fs_info->bdev_holder); 3238 if (ret) 3239 goto out; 3240 3241 if (!fs_devices->seeding) { 3242 __btrfs_close_devices(fs_devices); 3243 free_fs_devices(fs_devices); 3244 ret = -EINVAL; 3245 goto out; 3246 } 3247 3248 fs_devices->seed = root->fs_info->fs_devices->seed; 3249 root->fs_info->fs_devices->seed = fs_devices; 3250 out: 3251 mutex_unlock(&uuid_mutex); 3252 return ret; 3253 } 3254 3255 static int read_one_dev(struct btrfs_root *root, 3256 struct extent_buffer *leaf, 3257 struct btrfs_dev_item *dev_item) 3258 { 3259 struct btrfs_device *device; 3260 u64 devid; 3261 int ret; 3262 u8 fs_uuid[BTRFS_UUID_SIZE]; 3263 u8 dev_uuid[BTRFS_UUID_SIZE]; 3264 3265 devid = btrfs_device_id(leaf, dev_item); 3266 read_extent_buffer(leaf, dev_uuid, 3267 (unsigned long)btrfs_device_uuid(dev_item), 3268 BTRFS_UUID_SIZE); 3269 read_extent_buffer(leaf, fs_uuid, 3270 (unsigned long)btrfs_device_fsid(dev_item), 3271 BTRFS_UUID_SIZE); 3272 3273 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) { 3274 ret = open_seed_devices(root, fs_uuid); 3275 if (ret && !btrfs_test_opt(root, DEGRADED)) 3276 return ret; 3277 } 3278 3279 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); 3280 if (!device || !device->bdev) { 3281 if (!btrfs_test_opt(root, DEGRADED)) 3282 return -EIO; 3283 3284 if (!device) { 3285 printk(KERN_WARNING "warning devid %llu missing\n", 3286 (unsigned long long)devid); 3287 device = add_missing_dev(root, devid, dev_uuid); 3288 if (!device) 3289 return -ENOMEM; 3290 } 3291 } 3292 3293 if (device->fs_devices != root->fs_info->fs_devices) { 3294 BUG_ON(device->writeable); 3295 if (device->generation != 3296 btrfs_device_generation(leaf, dev_item)) 3297 return -EINVAL; 3298 } 3299 3300 fill_device_from_item(leaf, dev_item, device); 3301 device->dev_root = root->fs_info->dev_root; 3302 device->in_fs_metadata = 1; 3303 if (device->writeable) 3304 device->fs_devices->total_rw_bytes += device->total_bytes; 3305 ret = 0; 3306 return ret; 3307 } 3308 3309 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf) 3310 { 3311 struct btrfs_dev_item *dev_item; 3312 3313 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block, 3314 dev_item); 3315 return read_one_dev(root, buf, dev_item); 3316 } 3317 3318 int btrfs_read_sys_array(struct btrfs_root *root) 3319 { 3320 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 3321 struct extent_buffer *sb; 3322 struct btrfs_disk_key *disk_key; 3323 struct btrfs_chunk *chunk; 3324 u8 *ptr; 3325 unsigned long sb_ptr; 3326 int ret = 0; 3327 u32 num_stripes; 3328 u32 array_size; 3329 u32 len = 0; 3330 u32 cur; 3331 struct btrfs_key key; 3332 3333 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET, 3334 BTRFS_SUPER_INFO_SIZE); 3335 if (!sb) 3336 return -ENOMEM; 3337 btrfs_set_buffer_uptodate(sb); 3338 btrfs_set_buffer_lockdep_class(sb, 0); 3339 3340 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 3341 array_size = btrfs_super_sys_array_size(super_copy); 3342 3343 ptr = super_copy->sys_chunk_array; 3344 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); 3345 cur = 0; 3346 3347 while (cur < array_size) { 3348 disk_key = (struct btrfs_disk_key *)ptr; 3349 btrfs_disk_key_to_cpu(&key, disk_key); 3350 3351 len = sizeof(*disk_key); ptr += len; 3352 sb_ptr += len; 3353 cur += len; 3354 3355 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 3356 chunk = (struct btrfs_chunk *)sb_ptr; 3357 ret = read_one_chunk(root, &key, sb, chunk); 3358 if (ret) 3359 break; 3360 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 3361 len = btrfs_chunk_item_size(num_stripes); 3362 } else { 3363 ret = -EIO; 3364 break; 3365 } 3366 ptr += len; 3367 sb_ptr += len; 3368 cur += len; 3369 } 3370 free_extent_buffer(sb); 3371 return ret; 3372 } 3373 3374 int btrfs_read_chunk_tree(struct btrfs_root *root) 3375 { 3376 struct btrfs_path *path; 3377 struct extent_buffer *leaf; 3378 struct btrfs_key key; 3379 struct btrfs_key found_key; 3380 int ret; 3381 int slot; 3382 3383 root = root->fs_info->chunk_root; 3384 3385 path = btrfs_alloc_path(); 3386 if (!path) 3387 return -ENOMEM; 3388 3389 /* first we search for all of the device items, and then we 3390 * read in all of the chunk items. This way we can create chunk 3391 * mappings that reference all of the devices that are afound 3392 */ 3393 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 3394 key.offset = 0; 3395 key.type = 0; 3396 again: 3397 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3398 if (ret < 0) 3399 goto error; 3400 while (1) { 3401 leaf = path->nodes[0]; 3402 slot = path->slots[0]; 3403 if (slot >= btrfs_header_nritems(leaf)) { 3404 ret = btrfs_next_leaf(root, path); 3405 if (ret == 0) 3406 continue; 3407 if (ret < 0) 3408 goto error; 3409 break; 3410 } 3411 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3412 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { 3413 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID) 3414 break; 3415 if (found_key.type == BTRFS_DEV_ITEM_KEY) { 3416 struct btrfs_dev_item *dev_item; 3417 dev_item = btrfs_item_ptr(leaf, slot, 3418 struct btrfs_dev_item); 3419 ret = read_one_dev(root, leaf, dev_item); 3420 if (ret) 3421 goto error; 3422 } 3423 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 3424 struct btrfs_chunk *chunk; 3425 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3426 ret = read_one_chunk(root, &found_key, leaf, chunk); 3427 if (ret) 3428 goto error; 3429 } 3430 path->slots[0]++; 3431 } 3432 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { 3433 key.objectid = 0; 3434 btrfs_release_path(root, path); 3435 goto again; 3436 } 3437 ret = 0; 3438 error: 3439 btrfs_free_path(path); 3440 return ret; 3441 } 3442