1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) STRATO AG 2012. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/bio.h> 8 #include <linux/slab.h> 9 #include <linux/blkdev.h> 10 #include <linux/kthread.h> 11 #include <linux/math64.h> 12 #include "misc.h" 13 #include "ctree.h" 14 #include "disk-io.h" 15 #include "transaction.h" 16 #include "volumes.h" 17 #include "async-thread.h" 18 #include "dev-replace.h" 19 #include "sysfs.h" 20 #include "zoned.h" 21 #include "block-group.h" 22 #include "fs.h" 23 #include "accessors.h" 24 #include "scrub.h" 25 26 /* 27 * Device replace overview 28 * 29 * [Objective] 30 * To copy all extents (both new and on-disk) from source device to target 31 * device, while still keeping the filesystem read-write. 32 * 33 * [Method] 34 * There are two main methods involved: 35 * 36 * - Write duplication 37 * 38 * All new writes will be written to both target and source devices, so even 39 * if replace gets canceled, sources device still contains up-to-date data. 40 * 41 * Location: handle_ops_on_dev_replace() from btrfs_map_block() 42 * Start: btrfs_dev_replace_start() 43 * End: btrfs_dev_replace_finishing() 44 * Content: Latest data/metadata 45 * 46 * - Copy existing extents 47 * 48 * This happens by re-using scrub facility, as scrub also iterates through 49 * existing extents from commit root. 50 * 51 * Location: scrub_write_block_to_dev_replace() from 52 * scrub_block_complete() 53 * Content: Data/meta from commit root. 54 * 55 * Due to the content difference, we need to avoid nocow write when dev-replace 56 * is happening. This is done by marking the block group read-only and waiting 57 * for NOCOW writes. 58 * 59 * After replace is done, the finishing part is done by swapping the target and 60 * source devices. 61 * 62 * Location: btrfs_dev_replace_update_device_in_mapping_tree() from 63 * btrfs_dev_replace_finishing() 64 */ 65 66 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, 67 int scrub_ret); 68 static int btrfs_dev_replace_kthread(void *data); 69 70 int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info) 71 { 72 struct btrfs_dev_lookup_args args = { .devid = BTRFS_DEV_REPLACE_DEVID }; 73 struct btrfs_key key; 74 struct btrfs_root *dev_root = fs_info->dev_root; 75 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 76 struct extent_buffer *eb; 77 int slot; 78 int ret = 0; 79 struct btrfs_path *path = NULL; 80 int item_size; 81 struct btrfs_dev_replace_item *ptr; 82 u64 src_devid; 83 84 if (!dev_root) 85 return 0; 86 87 path = btrfs_alloc_path(); 88 if (!path) { 89 ret = -ENOMEM; 90 goto out; 91 } 92 93 key.objectid = 0; 94 key.type = BTRFS_DEV_REPLACE_KEY; 95 key.offset = 0; 96 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 97 if (ret) { 98 no_valid_dev_replace_entry_found: 99 /* 100 * We don't have a replace item or it's corrupted. If there is 101 * a replace target, fail the mount. 102 */ 103 if (btrfs_find_device(fs_info->fs_devices, &args)) { 104 btrfs_err(fs_info, 105 "found replace target device without a valid replace item"); 106 ret = -EUCLEAN; 107 goto out; 108 } 109 ret = 0; 110 dev_replace->replace_state = 111 BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; 112 dev_replace->cont_reading_from_srcdev_mode = 113 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS; 114 dev_replace->time_started = 0; 115 dev_replace->time_stopped = 0; 116 atomic64_set(&dev_replace->num_write_errors, 0); 117 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0); 118 dev_replace->cursor_left = 0; 119 dev_replace->committed_cursor_left = 0; 120 dev_replace->cursor_left_last_write_of_item = 0; 121 dev_replace->cursor_right = 0; 122 dev_replace->srcdev = NULL; 123 dev_replace->tgtdev = NULL; 124 dev_replace->is_valid = 0; 125 dev_replace->item_needs_writeback = 0; 126 goto out; 127 } 128 slot = path->slots[0]; 129 eb = path->nodes[0]; 130 item_size = btrfs_item_size(eb, slot); 131 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item); 132 133 if (item_size != sizeof(struct btrfs_dev_replace_item)) { 134 btrfs_warn(fs_info, 135 "dev_replace entry found has unexpected size, ignore entry"); 136 goto no_valid_dev_replace_entry_found; 137 } 138 139 src_devid = btrfs_dev_replace_src_devid(eb, ptr); 140 dev_replace->cont_reading_from_srcdev_mode = 141 btrfs_dev_replace_cont_reading_from_srcdev_mode(eb, ptr); 142 dev_replace->replace_state = btrfs_dev_replace_replace_state(eb, ptr); 143 dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr); 144 dev_replace->time_stopped = 145 btrfs_dev_replace_time_stopped(eb, ptr); 146 atomic64_set(&dev_replace->num_write_errors, 147 btrfs_dev_replace_num_write_errors(eb, ptr)); 148 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 149 btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr)); 150 dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr); 151 dev_replace->committed_cursor_left = dev_replace->cursor_left; 152 dev_replace->cursor_left_last_write_of_item = dev_replace->cursor_left; 153 dev_replace->cursor_right = btrfs_dev_replace_cursor_right(eb, ptr); 154 dev_replace->is_valid = 1; 155 156 dev_replace->item_needs_writeback = 0; 157 switch (dev_replace->replace_state) { 158 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 159 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 160 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 161 /* 162 * We don't have an active replace item but if there is a 163 * replace target, fail the mount. 164 */ 165 if (btrfs_find_device(fs_info->fs_devices, &args)) { 166 btrfs_err(fs_info, 167 "replace without active item, run 'device scan --forget' on the target device"); 168 ret = -EUCLEAN; 169 } else { 170 dev_replace->srcdev = NULL; 171 dev_replace->tgtdev = NULL; 172 } 173 break; 174 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 175 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 176 dev_replace->tgtdev = btrfs_find_device(fs_info->fs_devices, &args); 177 args.devid = src_devid; 178 dev_replace->srcdev = btrfs_find_device(fs_info->fs_devices, &args); 179 180 /* 181 * allow 'btrfs dev replace_cancel' if src/tgt device is 182 * missing 183 */ 184 if (!dev_replace->srcdev && 185 !btrfs_test_opt(fs_info, DEGRADED)) { 186 ret = -EIO; 187 btrfs_warn(fs_info, 188 "cannot mount because device replace operation is ongoing and"); 189 btrfs_warn(fs_info, 190 "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?", 191 src_devid); 192 } 193 if (!dev_replace->tgtdev && 194 !btrfs_test_opt(fs_info, DEGRADED)) { 195 ret = -EIO; 196 btrfs_warn(fs_info, 197 "cannot mount because device replace operation is ongoing and"); 198 btrfs_warn(fs_info, 199 "tgtdev (devid %llu) is missing, need to run 'btrfs dev scan'?", 200 BTRFS_DEV_REPLACE_DEVID); 201 } 202 if (dev_replace->tgtdev) { 203 if (dev_replace->srcdev) { 204 dev_replace->tgtdev->total_bytes = 205 dev_replace->srcdev->total_bytes; 206 dev_replace->tgtdev->disk_total_bytes = 207 dev_replace->srcdev->disk_total_bytes; 208 dev_replace->tgtdev->commit_total_bytes = 209 dev_replace->srcdev->commit_total_bytes; 210 dev_replace->tgtdev->bytes_used = 211 dev_replace->srcdev->bytes_used; 212 dev_replace->tgtdev->commit_bytes_used = 213 dev_replace->srcdev->commit_bytes_used; 214 } 215 set_bit(BTRFS_DEV_STATE_REPLACE_TGT, 216 &dev_replace->tgtdev->dev_state); 217 218 WARN_ON(fs_info->fs_devices->rw_devices == 0); 219 dev_replace->tgtdev->io_width = fs_info->sectorsize; 220 dev_replace->tgtdev->io_align = fs_info->sectorsize; 221 dev_replace->tgtdev->sector_size = fs_info->sectorsize; 222 dev_replace->tgtdev->fs_info = fs_info; 223 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 224 &dev_replace->tgtdev->dev_state); 225 } 226 break; 227 } 228 229 out: 230 btrfs_free_path(path); 231 return ret; 232 } 233 234 /* 235 * Initialize a new device for device replace target from a given source dev 236 * and path. 237 * 238 * Return 0 and new device in @device_out, otherwise return < 0 239 */ 240 static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, 241 const char *device_path, 242 struct btrfs_device *srcdev, 243 struct btrfs_device **device_out) 244 { 245 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 246 struct btrfs_device *device; 247 struct file *bdev_file; 248 struct block_device *bdev; 249 u64 devid = BTRFS_DEV_REPLACE_DEVID; 250 int ret = 0; 251 252 *device_out = NULL; 253 if (srcdev->fs_devices->seeding) { 254 btrfs_err(fs_info, "the filesystem is a seed filesystem!"); 255 return -EINVAL; 256 } 257 258 bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE, 259 fs_info->bdev_holder, NULL); 260 if (IS_ERR(bdev_file)) { 261 btrfs_err(fs_info, "target device %s is invalid!", device_path); 262 return PTR_ERR(bdev_file); 263 } 264 bdev = file_bdev(bdev_file); 265 266 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 267 btrfs_err(fs_info, 268 "dev-replace: zoned type of target device mismatch with filesystem"); 269 ret = -EINVAL; 270 goto error; 271 } 272 273 sync_blockdev(bdev); 274 275 list_for_each_entry(device, &fs_devices->devices, dev_list) { 276 if (device->bdev == bdev) { 277 btrfs_err(fs_info, 278 "target device is in the filesystem!"); 279 ret = -EEXIST; 280 goto error; 281 } 282 } 283 284 285 if (bdev_nr_bytes(bdev) < btrfs_device_get_total_bytes(srcdev)) { 286 btrfs_err(fs_info, 287 "target device is smaller than source device!"); 288 ret = -EINVAL; 289 goto error; 290 } 291 292 293 device = btrfs_alloc_device(NULL, &devid, NULL, device_path); 294 if (IS_ERR(device)) { 295 ret = PTR_ERR(device); 296 goto error; 297 } 298 299 ret = lookup_bdev(device_path, &device->devt); 300 if (ret) 301 goto error; 302 303 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 304 device->generation = 0; 305 device->io_width = fs_info->sectorsize; 306 device->io_align = fs_info->sectorsize; 307 device->sector_size = fs_info->sectorsize; 308 device->total_bytes = btrfs_device_get_total_bytes(srcdev); 309 device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev); 310 device->bytes_used = btrfs_device_get_bytes_used(srcdev); 311 device->commit_total_bytes = srcdev->commit_total_bytes; 312 device->commit_bytes_used = device->bytes_used; 313 device->fs_info = fs_info; 314 device->bdev = bdev; 315 device->bdev_file = bdev_file; 316 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 317 set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 318 device->dev_stats_valid = 1; 319 set_blocksize(bdev_file, BTRFS_BDEV_BLOCKSIZE); 320 device->fs_devices = fs_devices; 321 322 ret = btrfs_get_dev_zone_info(device, false); 323 if (ret) 324 goto error; 325 326 mutex_lock(&fs_devices->device_list_mutex); 327 list_add(&device->dev_list, &fs_devices->devices); 328 fs_devices->num_devices++; 329 fs_devices->open_devices++; 330 mutex_unlock(&fs_devices->device_list_mutex); 331 332 *device_out = device; 333 return 0; 334 335 error: 336 fput(bdev_file); 337 return ret; 338 } 339 340 /* 341 * called from commit_transaction. Writes changed device replace state to 342 * disk. 343 */ 344 int btrfs_run_dev_replace(struct btrfs_trans_handle *trans) 345 { 346 struct btrfs_fs_info *fs_info = trans->fs_info; 347 int ret; 348 struct btrfs_root *dev_root = fs_info->dev_root; 349 struct btrfs_path *path; 350 struct btrfs_key key; 351 struct extent_buffer *eb; 352 struct btrfs_dev_replace_item *ptr; 353 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 354 355 down_read(&dev_replace->rwsem); 356 if (!dev_replace->is_valid || 357 !dev_replace->item_needs_writeback) { 358 up_read(&dev_replace->rwsem); 359 return 0; 360 } 361 up_read(&dev_replace->rwsem); 362 363 key.objectid = 0; 364 key.type = BTRFS_DEV_REPLACE_KEY; 365 key.offset = 0; 366 367 path = btrfs_alloc_path(); 368 if (!path) { 369 ret = -ENOMEM; 370 goto out; 371 } 372 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 373 if (ret < 0) { 374 btrfs_warn(fs_info, 375 "error %d while searching for dev_replace item!", 376 ret); 377 goto out; 378 } 379 380 if (ret == 0 && 381 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 382 /* 383 * need to delete old one and insert a new one. 384 * Since no attempt is made to recover any old state, if the 385 * dev_replace state is 'running', the data on the target 386 * drive is lost. 387 * It would be possible to recover the state: just make sure 388 * that the beginning of the item is never changed and always 389 * contains all the essential information. Then read this 390 * minimal set of information and use it as a base for the 391 * new state. 392 */ 393 ret = btrfs_del_item(trans, dev_root, path); 394 if (ret != 0) { 395 btrfs_warn(fs_info, 396 "delete too small dev_replace item failed %d!", 397 ret); 398 goto out; 399 } 400 ret = 1; 401 } 402 403 if (ret == 1) { 404 /* need to insert a new item */ 405 btrfs_release_path(path); 406 ret = btrfs_insert_empty_item(trans, dev_root, path, 407 &key, sizeof(*ptr)); 408 if (ret < 0) { 409 btrfs_warn(fs_info, 410 "insert dev_replace item failed %d!", ret); 411 goto out; 412 } 413 } 414 415 eb = path->nodes[0]; 416 ptr = btrfs_item_ptr(eb, path->slots[0], 417 struct btrfs_dev_replace_item); 418 419 down_write(&dev_replace->rwsem); 420 if (dev_replace->srcdev) 421 btrfs_set_dev_replace_src_devid(eb, ptr, 422 dev_replace->srcdev->devid); 423 else 424 btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1); 425 btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr, 426 dev_replace->cont_reading_from_srcdev_mode); 427 btrfs_set_dev_replace_replace_state(eb, ptr, 428 dev_replace->replace_state); 429 btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started); 430 btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped); 431 btrfs_set_dev_replace_num_write_errors(eb, ptr, 432 atomic64_read(&dev_replace->num_write_errors)); 433 btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr, 434 atomic64_read(&dev_replace->num_uncorrectable_read_errors)); 435 dev_replace->cursor_left_last_write_of_item = 436 dev_replace->cursor_left; 437 btrfs_set_dev_replace_cursor_left(eb, ptr, 438 dev_replace->cursor_left_last_write_of_item); 439 btrfs_set_dev_replace_cursor_right(eb, ptr, 440 dev_replace->cursor_right); 441 dev_replace->item_needs_writeback = 0; 442 up_write(&dev_replace->rwsem); 443 444 btrfs_mark_buffer_dirty(trans, eb); 445 446 out: 447 btrfs_free_path(path); 448 449 return ret; 450 } 451 452 static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info, 453 struct btrfs_device *src_dev) 454 { 455 struct btrfs_path *path; 456 struct btrfs_key key; 457 struct btrfs_key found_key; 458 struct btrfs_root *root = fs_info->dev_root; 459 struct btrfs_dev_extent *dev_extent = NULL; 460 struct btrfs_block_group *cache; 461 struct btrfs_trans_handle *trans; 462 int iter_ret = 0; 463 int ret = 0; 464 u64 chunk_offset; 465 466 /* Do not use "to_copy" on non zoned filesystem for now */ 467 if (!btrfs_is_zoned(fs_info)) 468 return 0; 469 470 mutex_lock(&fs_info->chunk_mutex); 471 472 /* Ensure we don't have pending new block group */ 473 spin_lock(&fs_info->trans_lock); 474 while (fs_info->running_transaction && 475 !list_empty(&fs_info->running_transaction->dev_update_list)) { 476 spin_unlock(&fs_info->trans_lock); 477 mutex_unlock(&fs_info->chunk_mutex); 478 trans = btrfs_attach_transaction(root); 479 if (IS_ERR(trans)) { 480 ret = PTR_ERR(trans); 481 mutex_lock(&fs_info->chunk_mutex); 482 if (ret == -ENOENT) { 483 spin_lock(&fs_info->trans_lock); 484 continue; 485 } else { 486 goto unlock; 487 } 488 } 489 490 ret = btrfs_commit_transaction(trans); 491 mutex_lock(&fs_info->chunk_mutex); 492 if (ret) 493 goto unlock; 494 495 spin_lock(&fs_info->trans_lock); 496 } 497 spin_unlock(&fs_info->trans_lock); 498 499 path = btrfs_alloc_path(); 500 if (!path) { 501 ret = -ENOMEM; 502 goto unlock; 503 } 504 505 path->reada = READA_FORWARD; 506 path->search_commit_root = 1; 507 path->skip_locking = 1; 508 509 key.objectid = src_dev->devid; 510 key.type = BTRFS_DEV_EXTENT_KEY; 511 key.offset = 0; 512 513 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 514 struct extent_buffer *leaf = path->nodes[0]; 515 516 if (found_key.objectid != src_dev->devid) 517 break; 518 519 if (found_key.type != BTRFS_DEV_EXTENT_KEY) 520 break; 521 522 if (found_key.offset < key.offset) 523 break; 524 525 dev_extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 526 527 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dev_extent); 528 529 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 530 if (!cache) 531 continue; 532 533 set_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 534 btrfs_put_block_group(cache); 535 } 536 if (iter_ret < 0) 537 ret = iter_ret; 538 539 btrfs_free_path(path); 540 unlock: 541 mutex_unlock(&fs_info->chunk_mutex); 542 543 return ret; 544 } 545 546 bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev, 547 struct btrfs_block_group *cache, 548 u64 physical) 549 { 550 struct btrfs_fs_info *fs_info = cache->fs_info; 551 struct btrfs_chunk_map *map; 552 u64 chunk_offset = cache->start; 553 int num_extents, cur_extent; 554 int i; 555 556 /* Do not use "to_copy" on non zoned filesystem for now */ 557 if (!btrfs_is_zoned(fs_info)) 558 return true; 559 560 spin_lock(&cache->lock); 561 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { 562 spin_unlock(&cache->lock); 563 return true; 564 } 565 spin_unlock(&cache->lock); 566 567 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 568 ASSERT(!IS_ERR(map)); 569 570 num_extents = 0; 571 cur_extent = 0; 572 for (i = 0; i < map->num_stripes; i++) { 573 /* We have more device extent to copy */ 574 if (srcdev != map->stripes[i].dev) 575 continue; 576 577 num_extents++; 578 if (physical == map->stripes[i].physical) 579 cur_extent = i; 580 } 581 582 btrfs_free_chunk_map(map); 583 584 if (num_extents > 1 && cur_extent < num_extents - 1) { 585 /* 586 * Has more stripes on this device. Keep this block group 587 * readonly until we finish all the stripes. 588 */ 589 return false; 590 } 591 592 /* Last stripe on this device */ 593 clear_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 594 595 return true; 596 } 597 598 static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, 599 const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, 600 int read_src) 601 { 602 struct btrfs_root *root = fs_info->dev_root; 603 struct btrfs_trans_handle *trans; 604 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 605 int ret; 606 struct btrfs_device *tgt_device = NULL; 607 struct btrfs_device *src_device = NULL; 608 609 src_device = btrfs_find_device_by_devspec(fs_info, srcdevid, 610 srcdev_name); 611 if (IS_ERR(src_device)) 612 return PTR_ERR(src_device); 613 614 if (btrfs_pinned_by_swapfile(fs_info, src_device)) { 615 btrfs_warn_in_rcu(fs_info, 616 "cannot replace device %s (devid %llu) due to active swapfile", 617 btrfs_dev_name(src_device), src_device->devid); 618 return -ETXTBSY; 619 } 620 621 /* 622 * Here we commit the transaction to make sure commit_total_bytes 623 * of all the devices are updated. 624 */ 625 trans = btrfs_attach_transaction(root); 626 if (!IS_ERR(trans)) { 627 ret = btrfs_commit_transaction(trans); 628 if (ret) 629 return ret; 630 } else if (PTR_ERR(trans) != -ENOENT) { 631 return PTR_ERR(trans); 632 } 633 634 ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name, 635 src_device, &tgt_device); 636 if (ret) 637 return ret; 638 639 ret = mark_block_group_to_copy(fs_info, src_device); 640 if (ret) 641 return ret; 642 643 down_write(&dev_replace->rwsem); 644 switch (dev_replace->replace_state) { 645 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 646 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 647 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 648 break; 649 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 650 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 651 ASSERT(0); 652 ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED; 653 up_write(&dev_replace->rwsem); 654 goto leave; 655 } 656 657 dev_replace->cont_reading_from_srcdev_mode = read_src; 658 dev_replace->srcdev = src_device; 659 dev_replace->tgtdev = tgt_device; 660 661 btrfs_info_in_rcu(fs_info, 662 "dev_replace from %s (devid %llu) to %s started", 663 btrfs_dev_name(src_device), 664 src_device->devid, 665 btrfs_dev_name(tgt_device)); 666 667 /* 668 * from now on, the writes to the srcdev are all duplicated to 669 * go to the tgtdev as well (refer to btrfs_map_block()). 670 */ 671 dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED; 672 dev_replace->time_started = ktime_get_real_seconds(); 673 dev_replace->cursor_left = 0; 674 dev_replace->committed_cursor_left = 0; 675 dev_replace->cursor_left_last_write_of_item = 0; 676 dev_replace->cursor_right = 0; 677 dev_replace->is_valid = 1; 678 dev_replace->item_needs_writeback = 1; 679 atomic64_set(&dev_replace->num_write_errors, 0); 680 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0); 681 up_write(&dev_replace->rwsem); 682 683 ret = btrfs_sysfs_add_device(tgt_device); 684 if (ret) 685 btrfs_err(fs_info, "kobj add dev failed %d", ret); 686 687 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); 688 689 /* 690 * Commit dev_replace state and reserve 1 item for it. 691 * This is crucial to ensure we won't miss copying extents for new block 692 * groups that are allocated after we started the device replace, and 693 * must be done after setting up the device replace state. 694 */ 695 trans = btrfs_start_transaction(root, 1); 696 if (IS_ERR(trans)) { 697 ret = PTR_ERR(trans); 698 down_write(&dev_replace->rwsem); 699 dev_replace->replace_state = 700 BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; 701 dev_replace->srcdev = NULL; 702 dev_replace->tgtdev = NULL; 703 up_write(&dev_replace->rwsem); 704 goto leave; 705 } 706 707 ret = btrfs_commit_transaction(trans); 708 WARN_ON(ret); 709 710 /* the disk copy procedure reuses the scrub code */ 711 ret = btrfs_scrub_dev(fs_info, src_device->devid, 0, 712 btrfs_device_get_total_bytes(src_device), 713 &dev_replace->scrub_progress, 0, 1); 714 715 ret = btrfs_dev_replace_finishing(fs_info, ret); 716 if (ret == -EINPROGRESS) 717 ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS; 718 719 return ret; 720 721 leave: 722 btrfs_destroy_dev_replace_tgtdev(tgt_device); 723 return ret; 724 } 725 726 static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args) 727 { 728 if (args->start.srcdevid == 0) { 729 if (memchr(args->start.srcdev_name, 0, 730 sizeof(args->start.srcdev_name)) == NULL) 731 return -ENAMETOOLONG; 732 } else { 733 args->start.srcdev_name[0] = 0; 734 } 735 736 if (memchr(args->start.tgtdev_name, 0, 737 sizeof(args->start.tgtdev_name)) == NULL) 738 return -ENAMETOOLONG; 739 740 return 0; 741 } 742 743 int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, 744 struct btrfs_ioctl_dev_replace_args *args) 745 { 746 int ret; 747 748 switch (args->start.cont_reading_from_srcdev_mode) { 749 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: 750 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: 751 break; 752 default: 753 return -EINVAL; 754 } 755 ret = btrfs_check_replace_dev_names(args); 756 if (ret < 0) 757 return ret; 758 759 ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name, 760 args->start.srcdevid, 761 args->start.srcdev_name, 762 args->start.cont_reading_from_srcdev_mode); 763 args->result = ret; 764 /* don't warn if EINPROGRESS, someone else might be running scrub */ 765 if (ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS || 766 ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR) 767 return 0; 768 769 return ret; 770 } 771 772 /* 773 * blocked until all in-flight bios operations are finished. 774 */ 775 static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) 776 { 777 set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state); 778 wait_event(fs_info->dev_replace.replace_wait, !percpu_counter_sum( 779 &fs_info->dev_replace.bio_counter)); 780 } 781 782 /* 783 * we have removed target device, it is safe to allow new bios request. 784 */ 785 static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info) 786 { 787 clear_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state); 788 wake_up(&fs_info->dev_replace.replace_wait); 789 } 790 791 /* 792 * When finishing the device replace, before swapping the source device with the 793 * target device we must update the chunk allocation state in the target device, 794 * as it is empty because replace works by directly copying the chunks and not 795 * through the normal chunk allocation path. 796 */ 797 static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev, 798 struct btrfs_device *tgtdev) 799 { 800 struct extent_state *cached_state = NULL; 801 u64 start = 0; 802 u64 found_start; 803 u64 found_end; 804 int ret = 0; 805 806 lockdep_assert_held(&srcdev->fs_info->chunk_mutex); 807 808 while (find_first_extent_bit(&srcdev->alloc_state, start, 809 &found_start, &found_end, 810 CHUNK_ALLOCATED, &cached_state)) { 811 ret = set_extent_bit(&tgtdev->alloc_state, found_start, 812 found_end, CHUNK_ALLOCATED, NULL); 813 if (ret) 814 break; 815 start = found_end + 1; 816 } 817 818 free_extent_state(cached_state); 819 return ret; 820 } 821 822 static void btrfs_dev_replace_update_device_in_mapping_tree( 823 struct btrfs_fs_info *fs_info, 824 struct btrfs_device *srcdev, 825 struct btrfs_device *tgtdev) 826 { 827 struct rb_node *node; 828 829 /* 830 * The chunk mutex must be held so that no new chunks can be created 831 * while we are updating existing chunks. This guarantees we don't miss 832 * any new chunk that gets created for a range that falls before the 833 * range of the last chunk we processed. 834 */ 835 lockdep_assert_held(&fs_info->chunk_mutex); 836 837 write_lock(&fs_info->mapping_tree_lock); 838 node = rb_first_cached(&fs_info->mapping_tree); 839 while (node) { 840 struct rb_node *next = rb_next(node); 841 struct btrfs_chunk_map *map; 842 u64 next_start; 843 844 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 845 next_start = map->start + map->chunk_len; 846 847 for (int i = 0; i < map->num_stripes; i++) 848 if (srcdev == map->stripes[i].dev) 849 map->stripes[i].dev = tgtdev; 850 851 if (cond_resched_rwlock_write(&fs_info->mapping_tree_lock)) { 852 map = btrfs_find_chunk_map_nolock(fs_info, next_start, U64_MAX); 853 if (!map) 854 break; 855 node = &map->rb_node; 856 /* 857 * Drop the lookup reference since we are holding the 858 * lock in write mode and no one can remove the chunk 859 * map from the tree and drop its tree reference. 860 */ 861 btrfs_free_chunk_map(map); 862 } else { 863 node = next; 864 } 865 } 866 write_unlock(&fs_info->mapping_tree_lock); 867 } 868 869 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, 870 int scrub_ret) 871 { 872 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 873 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 874 struct btrfs_device *tgt_device; 875 struct btrfs_device *src_device; 876 struct btrfs_root *root = fs_info->tree_root; 877 u8 uuid_tmp[BTRFS_UUID_SIZE]; 878 struct btrfs_trans_handle *trans; 879 int ret = 0; 880 881 /* don't allow cancel or unmount to disturb the finishing procedure */ 882 mutex_lock(&dev_replace->lock_finishing_cancel_unmount); 883 884 down_read(&dev_replace->rwsem); 885 /* was the operation canceled, or is it finished? */ 886 if (dev_replace->replace_state != 887 BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED) { 888 up_read(&dev_replace->rwsem); 889 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 890 return 0; 891 } 892 893 tgt_device = dev_replace->tgtdev; 894 src_device = dev_replace->srcdev; 895 up_read(&dev_replace->rwsem); 896 897 /* 898 * flush all outstanding I/O and inode extent mappings before the 899 * copy operation is declared as being finished 900 */ 901 ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); 902 if (ret) { 903 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 904 return ret; 905 } 906 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); 907 908 /* 909 * We have to use this loop approach because at this point src_device 910 * has to be available for transaction commit to complete, yet new 911 * chunks shouldn't be allocated on the device. 912 */ 913 while (1) { 914 trans = btrfs_start_transaction(root, 0); 915 if (IS_ERR(trans)) { 916 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 917 return PTR_ERR(trans); 918 } 919 ret = btrfs_commit_transaction(trans); 920 WARN_ON(ret); 921 922 /* Prevent write_all_supers() during the finishing procedure */ 923 mutex_lock(&fs_devices->device_list_mutex); 924 /* Prevent new chunks being allocated on the source device */ 925 mutex_lock(&fs_info->chunk_mutex); 926 927 if (!list_empty(&src_device->post_commit_list)) { 928 mutex_unlock(&fs_devices->device_list_mutex); 929 mutex_unlock(&fs_info->chunk_mutex); 930 } else { 931 break; 932 } 933 } 934 935 down_write(&dev_replace->rwsem); 936 dev_replace->replace_state = 937 scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED 938 : BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED; 939 dev_replace->tgtdev = NULL; 940 dev_replace->srcdev = NULL; 941 dev_replace->time_stopped = ktime_get_real_seconds(); 942 dev_replace->item_needs_writeback = 1; 943 944 /* 945 * Update allocation state in the new device and replace the old device 946 * with the new one in the mapping tree. 947 */ 948 if (!scrub_ret) { 949 scrub_ret = btrfs_set_target_alloc_state(src_device, tgt_device); 950 if (scrub_ret) 951 goto error; 952 btrfs_dev_replace_update_device_in_mapping_tree(fs_info, 953 src_device, 954 tgt_device); 955 } else { 956 if (scrub_ret != -ECANCELED) 957 btrfs_err_in_rcu(fs_info, 958 "btrfs_scrub_dev(%s, %llu, %s) failed %d", 959 btrfs_dev_name(src_device), 960 src_device->devid, 961 btrfs_dev_name(tgt_device), scrub_ret); 962 error: 963 up_write(&dev_replace->rwsem); 964 mutex_unlock(&fs_info->chunk_mutex); 965 mutex_unlock(&fs_devices->device_list_mutex); 966 btrfs_rm_dev_replace_blocked(fs_info); 967 if (tgt_device) 968 btrfs_destroy_dev_replace_tgtdev(tgt_device); 969 btrfs_rm_dev_replace_unblocked(fs_info); 970 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 971 972 return scrub_ret; 973 } 974 975 btrfs_info_in_rcu(fs_info, 976 "dev_replace from %s (devid %llu) to %s finished", 977 btrfs_dev_name(src_device), 978 src_device->devid, 979 btrfs_dev_name(tgt_device)); 980 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &tgt_device->dev_state); 981 tgt_device->devid = src_device->devid; 982 src_device->devid = BTRFS_DEV_REPLACE_DEVID; 983 memcpy(uuid_tmp, tgt_device->uuid, sizeof(uuid_tmp)); 984 memcpy(tgt_device->uuid, src_device->uuid, sizeof(tgt_device->uuid)); 985 memcpy(src_device->uuid, uuid_tmp, sizeof(src_device->uuid)); 986 btrfs_device_set_total_bytes(tgt_device, src_device->total_bytes); 987 btrfs_device_set_disk_total_bytes(tgt_device, 988 src_device->disk_total_bytes); 989 btrfs_device_set_bytes_used(tgt_device, src_device->bytes_used); 990 tgt_device->commit_bytes_used = src_device->bytes_used; 991 992 btrfs_assign_next_active_device(src_device, tgt_device); 993 994 list_add(&tgt_device->dev_alloc_list, &fs_devices->alloc_list); 995 fs_devices->rw_devices++; 996 997 up_write(&dev_replace->rwsem); 998 btrfs_rm_dev_replace_blocked(fs_info); 999 1000 btrfs_rm_dev_replace_remove_srcdev(src_device); 1001 1002 btrfs_rm_dev_replace_unblocked(fs_info); 1003 1004 /* 1005 * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will 1006 * update on-disk dev stats value during commit transaction 1007 */ 1008 atomic_inc(&tgt_device->dev_stats_ccnt); 1009 1010 /* 1011 * this is again a consistent state where no dev_replace procedure 1012 * is running, the target device is part of the filesystem, the 1013 * source device is not part of the filesystem anymore and its 1st 1014 * superblock is scratched out so that it is no longer marked to 1015 * belong to this filesystem. 1016 */ 1017 mutex_unlock(&fs_info->chunk_mutex); 1018 mutex_unlock(&fs_devices->device_list_mutex); 1019 1020 /* replace the sysfs entry */ 1021 btrfs_sysfs_remove_device(src_device); 1022 btrfs_sysfs_update_devid(tgt_device); 1023 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &src_device->dev_state)) 1024 btrfs_scratch_superblocks(fs_info, src_device); 1025 1026 /* write back the superblocks */ 1027 trans = btrfs_start_transaction(root, 0); 1028 if (!IS_ERR(trans)) 1029 btrfs_commit_transaction(trans); 1030 1031 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1032 1033 btrfs_rm_dev_replace_free_srcdev(src_device); 1034 1035 return 0; 1036 } 1037 1038 /* 1039 * Read progress of device replace status according to the state and last 1040 * stored position. The value format is the same as for 1041 * btrfs_dev_replace::progress_1000 1042 */ 1043 static u64 btrfs_dev_replace_progress(struct btrfs_fs_info *fs_info) 1044 { 1045 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1046 u64 ret = 0; 1047 1048 switch (dev_replace->replace_state) { 1049 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1050 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1051 ret = 0; 1052 break; 1053 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1054 ret = 1000; 1055 break; 1056 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1057 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1058 ret = div64_u64(dev_replace->cursor_left, 1059 div_u64(btrfs_device_get_total_bytes( 1060 dev_replace->srcdev), 1000)); 1061 break; 1062 } 1063 1064 return ret; 1065 } 1066 1067 void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, 1068 struct btrfs_ioctl_dev_replace_args *args) 1069 { 1070 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1071 1072 down_read(&dev_replace->rwsem); 1073 /* even if !dev_replace_is_valid, the values are good enough for 1074 * the replace_status ioctl */ 1075 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 1076 args->status.replace_state = dev_replace->replace_state; 1077 args->status.time_started = dev_replace->time_started; 1078 args->status.time_stopped = dev_replace->time_stopped; 1079 args->status.num_write_errors = 1080 atomic64_read(&dev_replace->num_write_errors); 1081 args->status.num_uncorrectable_read_errors = 1082 atomic64_read(&dev_replace->num_uncorrectable_read_errors); 1083 args->status.progress_1000 = btrfs_dev_replace_progress(fs_info); 1084 up_read(&dev_replace->rwsem); 1085 } 1086 1087 int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) 1088 { 1089 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1090 struct btrfs_device *tgt_device = NULL; 1091 struct btrfs_device *src_device = NULL; 1092 struct btrfs_trans_handle *trans; 1093 struct btrfs_root *root = fs_info->tree_root; 1094 int result; 1095 int ret; 1096 1097 if (sb_rdonly(fs_info->sb)) 1098 return -EROFS; 1099 1100 mutex_lock(&dev_replace->lock_finishing_cancel_unmount); 1101 down_write(&dev_replace->rwsem); 1102 switch (dev_replace->replace_state) { 1103 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1104 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1105 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1106 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; 1107 up_write(&dev_replace->rwsem); 1108 break; 1109 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1110 tgt_device = dev_replace->tgtdev; 1111 src_device = dev_replace->srcdev; 1112 up_write(&dev_replace->rwsem); 1113 ret = btrfs_scrub_cancel(fs_info); 1114 if (ret < 0) { 1115 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; 1116 } else { 1117 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 1118 /* 1119 * btrfs_dev_replace_finishing() will handle the 1120 * cleanup part 1121 */ 1122 btrfs_info_in_rcu(fs_info, 1123 "dev_replace from %s (devid %llu) to %s canceled", 1124 btrfs_dev_name(src_device), src_device->devid, 1125 btrfs_dev_name(tgt_device)); 1126 } 1127 break; 1128 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1129 /* 1130 * Scrub doing the replace isn't running so we need to do the 1131 * cleanup step of btrfs_dev_replace_finishing() here 1132 */ 1133 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 1134 tgt_device = dev_replace->tgtdev; 1135 src_device = dev_replace->srcdev; 1136 dev_replace->tgtdev = NULL; 1137 dev_replace->srcdev = NULL; 1138 dev_replace->replace_state = 1139 BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED; 1140 dev_replace->time_stopped = ktime_get_real_seconds(); 1141 dev_replace->item_needs_writeback = 1; 1142 1143 up_write(&dev_replace->rwsem); 1144 1145 /* Scrub for replace must not be running in suspended state */ 1146 btrfs_scrub_cancel(fs_info); 1147 1148 trans = btrfs_start_transaction(root, 0); 1149 if (IS_ERR(trans)) { 1150 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1151 return PTR_ERR(trans); 1152 } 1153 ret = btrfs_commit_transaction(trans); 1154 WARN_ON(ret); 1155 1156 btrfs_info_in_rcu(fs_info, 1157 "suspended dev_replace from %s (devid %llu) to %s canceled", 1158 btrfs_dev_name(src_device), src_device->devid, 1159 btrfs_dev_name(tgt_device)); 1160 1161 if (tgt_device) 1162 btrfs_destroy_dev_replace_tgtdev(tgt_device); 1163 break; 1164 default: 1165 up_write(&dev_replace->rwsem); 1166 result = -EINVAL; 1167 } 1168 1169 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1170 return result; 1171 } 1172 1173 void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info) 1174 { 1175 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1176 1177 mutex_lock(&dev_replace->lock_finishing_cancel_unmount); 1178 down_write(&dev_replace->rwsem); 1179 1180 switch (dev_replace->replace_state) { 1181 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1182 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1183 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1184 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1185 break; 1186 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1187 dev_replace->replace_state = 1188 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; 1189 dev_replace->time_stopped = ktime_get_real_seconds(); 1190 dev_replace->item_needs_writeback = 1; 1191 btrfs_info(fs_info, "suspending dev_replace for unmount"); 1192 break; 1193 } 1194 1195 up_write(&dev_replace->rwsem); 1196 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1197 } 1198 1199 /* resume dev_replace procedure that was interrupted by unmount */ 1200 int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info) 1201 { 1202 struct task_struct *task; 1203 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1204 1205 down_write(&dev_replace->rwsem); 1206 1207 switch (dev_replace->replace_state) { 1208 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1209 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1210 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1211 up_write(&dev_replace->rwsem); 1212 return 0; 1213 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1214 break; 1215 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1216 dev_replace->replace_state = 1217 BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED; 1218 break; 1219 } 1220 if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) { 1221 btrfs_info(fs_info, 1222 "cannot continue dev_replace, tgtdev is missing"); 1223 btrfs_info(fs_info, 1224 "you may cancel the operation after 'mount -o degraded'"); 1225 dev_replace->replace_state = 1226 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; 1227 up_write(&dev_replace->rwsem); 1228 return 0; 1229 } 1230 up_write(&dev_replace->rwsem); 1231 1232 /* 1233 * This could collide with a paused balance, but the exclusive op logic 1234 * should never allow both to start and pause. We don't want to allow 1235 * dev-replace to start anyway. 1236 */ 1237 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) { 1238 down_write(&dev_replace->rwsem); 1239 dev_replace->replace_state = 1240 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; 1241 up_write(&dev_replace->rwsem); 1242 btrfs_info(fs_info, 1243 "cannot resume dev-replace, other exclusive operation running"); 1244 return 0; 1245 } 1246 1247 task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl"); 1248 return PTR_ERR_OR_ZERO(task); 1249 } 1250 1251 static int btrfs_dev_replace_kthread(void *data) 1252 { 1253 struct btrfs_fs_info *fs_info = data; 1254 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1255 u64 progress; 1256 int ret; 1257 1258 progress = btrfs_dev_replace_progress(fs_info); 1259 progress = div_u64(progress, 10); 1260 btrfs_info_in_rcu(fs_info, 1261 "continuing dev_replace from %s (devid %llu) to target %s @%u%%", 1262 btrfs_dev_name(dev_replace->srcdev), 1263 dev_replace->srcdev->devid, 1264 btrfs_dev_name(dev_replace->tgtdev), 1265 (unsigned int)progress); 1266 1267 ret = btrfs_scrub_dev(fs_info, dev_replace->srcdev->devid, 1268 dev_replace->committed_cursor_left, 1269 btrfs_device_get_total_bytes(dev_replace->srcdev), 1270 &dev_replace->scrub_progress, 0, 1); 1271 ret = btrfs_dev_replace_finishing(fs_info, ret); 1272 WARN_ON(ret && ret != -ECANCELED); 1273 1274 btrfs_exclop_finish(fs_info); 1275 return 0; 1276 } 1277 1278 int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) 1279 { 1280 if (!dev_replace->is_valid) 1281 return 0; 1282 1283 switch (dev_replace->replace_state) { 1284 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1285 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1286 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1287 return 0; 1288 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1289 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1290 /* 1291 * return true even if tgtdev is missing (this is 1292 * something that can happen if the dev_replace 1293 * procedure is suspended by an umount and then 1294 * the tgtdev is missing (or "btrfs dev scan") was 1295 * not called and the filesystem is remounted 1296 * in degraded state. This does not stop the 1297 * dev_replace procedure. It needs to be canceled 1298 * manually if the cancellation is wanted. 1299 */ 1300 break; 1301 } 1302 return 1; 1303 } 1304 1305 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) 1306 { 1307 percpu_counter_sub(&fs_info->dev_replace.bio_counter, amount); 1308 cond_wake_up_nomb(&fs_info->dev_replace.replace_wait); 1309 } 1310 1311 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info) 1312 { 1313 while (1) { 1314 percpu_counter_inc(&fs_info->dev_replace.bio_counter); 1315 if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING, 1316 &fs_info->fs_state))) 1317 break; 1318 1319 btrfs_bio_counter_dec(fs_info); 1320 wait_event(fs_info->dev_replace.replace_wait, 1321 !test_bit(BTRFS_FS_STATE_DEV_REPLACING, 1322 &fs_info->fs_state)); 1323 } 1324 } 1325