1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) STRATO AG 2012. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/bio.h> 8 #include <linux/slab.h> 9 #include <linux/blkdev.h> 10 #include <linux/kthread.h> 11 #include <linux/math64.h> 12 #include "misc.h" 13 #include "ctree.h" 14 #include "disk-io.h" 15 #include "transaction.h" 16 #include "volumes.h" 17 #include "async-thread.h" 18 #include "dev-replace.h" 19 #include "sysfs.h" 20 #include "zoned.h" 21 #include "block-group.h" 22 #include "fs.h" 23 #include "accessors.h" 24 #include "scrub.h" 25 26 /* 27 * Device replace overview 28 * 29 * [Objective] 30 * To copy all extents (both new and on-disk) from source device to target 31 * device, while still keeping the filesystem read-write. 32 * 33 * [Method] 34 * There are two main methods involved: 35 * 36 * - Write duplication 37 * 38 * All new writes will be written to both target and source devices, so even 39 * if replace gets canceled, sources device still contains up-to-date data. 40 * 41 * Location: handle_ops_on_dev_replace() from btrfs_map_block() 42 * Start: btrfs_dev_replace_start() 43 * End: btrfs_dev_replace_finishing() 44 * Content: Latest data/metadata 45 * 46 * - Copy existing extents 47 * 48 * This happens by reusing scrub facility, as scrub also iterates through 49 * existing extents from commit root. 50 * 51 * Location: scrub_write_block_to_dev_replace() from 52 * scrub_block_complete() 53 * Content: Data/meta from commit root. 54 * 55 * Due to the content difference, we need to avoid nocow write when dev-replace 56 * is happening. This is done by marking the block group read-only and waiting 57 * for NOCOW writes. 58 * 59 * After replace is done, the finishing part is done by swapping the target and 60 * source devices. 61 * 62 * Location: btrfs_dev_replace_update_device_in_mapping_tree() from 63 * btrfs_dev_replace_finishing() 64 */ 65 66 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, 67 int scrub_ret); 68 static int btrfs_dev_replace_kthread(void *data); 69 70 int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info) 71 { 72 struct btrfs_dev_lookup_args args = { .devid = BTRFS_DEV_REPLACE_DEVID }; 73 struct btrfs_key key; 74 struct btrfs_root *dev_root = fs_info->dev_root; 75 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 76 struct extent_buffer *eb; 77 int slot; 78 int ret = 0; 79 struct btrfs_path *path = NULL; 80 int item_size; 81 struct btrfs_dev_replace_item *ptr; 82 u64 src_devid; 83 84 if (!dev_root) 85 return 0; 86 87 path = btrfs_alloc_path(); 88 if (!path) { 89 ret = -ENOMEM; 90 goto out; 91 } 92 93 key.objectid = 0; 94 key.type = BTRFS_DEV_REPLACE_KEY; 95 key.offset = 0; 96 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 97 if (ret) { 98 no_valid_dev_replace_entry_found: 99 /* 100 * We don't have a replace item or it's corrupted. If there is 101 * a replace target, fail the mount. 102 */ 103 if (btrfs_find_device(fs_info->fs_devices, &args)) { 104 btrfs_err(fs_info, 105 "found replace target device without a valid replace item"); 106 ret = -EUCLEAN; 107 goto out; 108 } 109 ret = 0; 110 dev_replace->replace_state = 111 BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; 112 dev_replace->cont_reading_from_srcdev_mode = 113 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS; 114 dev_replace->time_started = 0; 115 dev_replace->time_stopped = 0; 116 atomic64_set(&dev_replace->num_write_errors, 0); 117 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0); 118 dev_replace->cursor_left = 0; 119 dev_replace->committed_cursor_left = 0; 120 dev_replace->cursor_left_last_write_of_item = 0; 121 dev_replace->cursor_right = 0; 122 dev_replace->srcdev = NULL; 123 dev_replace->tgtdev = NULL; 124 dev_replace->is_valid = 0; 125 dev_replace->item_needs_writeback = 0; 126 goto out; 127 } 128 slot = path->slots[0]; 129 eb = path->nodes[0]; 130 item_size = btrfs_item_size(eb, slot); 131 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item); 132 133 if (item_size != sizeof(struct btrfs_dev_replace_item)) { 134 btrfs_warn(fs_info, 135 "dev_replace entry found has unexpected size, ignore entry"); 136 goto no_valid_dev_replace_entry_found; 137 } 138 139 src_devid = btrfs_dev_replace_src_devid(eb, ptr); 140 dev_replace->cont_reading_from_srcdev_mode = 141 btrfs_dev_replace_cont_reading_from_srcdev_mode(eb, ptr); 142 dev_replace->replace_state = btrfs_dev_replace_replace_state(eb, ptr); 143 dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr); 144 dev_replace->time_stopped = 145 btrfs_dev_replace_time_stopped(eb, ptr); 146 atomic64_set(&dev_replace->num_write_errors, 147 btrfs_dev_replace_num_write_errors(eb, ptr)); 148 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 149 btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr)); 150 dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr); 151 dev_replace->committed_cursor_left = dev_replace->cursor_left; 152 dev_replace->cursor_left_last_write_of_item = dev_replace->cursor_left; 153 dev_replace->cursor_right = btrfs_dev_replace_cursor_right(eb, ptr); 154 dev_replace->is_valid = 1; 155 156 dev_replace->item_needs_writeback = 0; 157 switch (dev_replace->replace_state) { 158 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 159 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 160 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 161 /* 162 * We don't have an active replace item but if there is a 163 * replace target, fail the mount. 164 */ 165 if (btrfs_find_device(fs_info->fs_devices, &args)) { 166 btrfs_err(fs_info, 167 "replace without active item, run 'device scan --forget' on the target device"); 168 ret = -EUCLEAN; 169 } else { 170 dev_replace->srcdev = NULL; 171 dev_replace->tgtdev = NULL; 172 } 173 break; 174 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 175 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 176 dev_replace->tgtdev = btrfs_find_device(fs_info->fs_devices, &args); 177 args.devid = src_devid; 178 dev_replace->srcdev = btrfs_find_device(fs_info->fs_devices, &args); 179 180 /* 181 * allow 'btrfs dev replace_cancel' if src/tgt device is 182 * missing 183 */ 184 if (!dev_replace->srcdev && 185 !btrfs_test_opt(fs_info, DEGRADED)) { 186 ret = -EIO; 187 btrfs_warn(fs_info, 188 "cannot mount because device replace operation is ongoing and"); 189 btrfs_warn(fs_info, 190 "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?", 191 src_devid); 192 } 193 if (!dev_replace->tgtdev && 194 !btrfs_test_opt(fs_info, DEGRADED)) { 195 ret = -EIO; 196 btrfs_warn(fs_info, 197 "cannot mount because device replace operation is ongoing and"); 198 btrfs_warn(fs_info, 199 "tgtdev (devid %llu) is missing, need to run 'btrfs dev scan'?", 200 BTRFS_DEV_REPLACE_DEVID); 201 } 202 if (dev_replace->tgtdev) { 203 if (dev_replace->srcdev) { 204 dev_replace->tgtdev->total_bytes = 205 dev_replace->srcdev->total_bytes; 206 dev_replace->tgtdev->disk_total_bytes = 207 dev_replace->srcdev->disk_total_bytes; 208 dev_replace->tgtdev->commit_total_bytes = 209 dev_replace->srcdev->commit_total_bytes; 210 dev_replace->tgtdev->bytes_used = 211 dev_replace->srcdev->bytes_used; 212 dev_replace->tgtdev->commit_bytes_used = 213 dev_replace->srcdev->commit_bytes_used; 214 } 215 set_bit(BTRFS_DEV_STATE_REPLACE_TGT, 216 &dev_replace->tgtdev->dev_state); 217 218 WARN_ON(fs_info->fs_devices->rw_devices == 0); 219 dev_replace->tgtdev->io_width = fs_info->sectorsize; 220 dev_replace->tgtdev->io_align = fs_info->sectorsize; 221 dev_replace->tgtdev->sector_size = fs_info->sectorsize; 222 dev_replace->tgtdev->fs_info = fs_info; 223 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 224 &dev_replace->tgtdev->dev_state); 225 } 226 break; 227 } 228 229 out: 230 btrfs_free_path(path); 231 return ret; 232 } 233 234 /* 235 * Initialize a new device for device replace target from a given source dev 236 * and path. 237 * 238 * Return 0 and new device in @device_out, otherwise return < 0 239 */ 240 static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, 241 const char *device_path, 242 struct btrfs_device *srcdev, 243 struct btrfs_device **device_out) 244 { 245 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 246 struct btrfs_device *device; 247 struct file *bdev_file; 248 struct block_device *bdev; 249 u64 devid = BTRFS_DEV_REPLACE_DEVID; 250 int ret = 0; 251 252 *device_out = NULL; 253 if (srcdev->fs_devices->seeding) { 254 btrfs_err(fs_info, "the filesystem is a seed filesystem!"); 255 return -EINVAL; 256 } 257 258 bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE, 259 fs_info->bdev_holder, NULL); 260 if (IS_ERR(bdev_file)) { 261 btrfs_err(fs_info, "target device %s is invalid!", device_path); 262 return PTR_ERR(bdev_file); 263 } 264 bdev = file_bdev(bdev_file); 265 266 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 267 btrfs_err(fs_info, 268 "dev-replace: zoned type of target device mismatch with filesystem"); 269 ret = -EINVAL; 270 goto error; 271 } 272 273 sync_blockdev(bdev); 274 275 list_for_each_entry(device, &fs_devices->devices, dev_list) { 276 if (device->bdev == bdev) { 277 btrfs_err(fs_info, 278 "target device is in the filesystem!"); 279 ret = -EEXIST; 280 goto error; 281 } 282 } 283 284 285 if (bdev_nr_bytes(bdev) < btrfs_device_get_total_bytes(srcdev)) { 286 btrfs_err(fs_info, 287 "target device is smaller than source device!"); 288 ret = -EINVAL; 289 goto error; 290 } 291 292 293 device = btrfs_alloc_device(NULL, &devid, NULL, device_path); 294 if (IS_ERR(device)) { 295 ret = PTR_ERR(device); 296 goto error; 297 } 298 299 ret = lookup_bdev(device_path, &device->devt); 300 if (ret) 301 goto error; 302 303 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 304 device->generation = 0; 305 device->io_width = fs_info->sectorsize; 306 device->io_align = fs_info->sectorsize; 307 device->sector_size = fs_info->sectorsize; 308 device->total_bytes = btrfs_device_get_total_bytes(srcdev); 309 device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev); 310 device->bytes_used = btrfs_device_get_bytes_used(srcdev); 311 device->commit_total_bytes = srcdev->commit_total_bytes; 312 device->commit_bytes_used = device->bytes_used; 313 device->fs_info = fs_info; 314 device->bdev = bdev; 315 device->bdev_file = bdev_file; 316 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 317 set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 318 device->dev_stats_valid = 1; 319 set_blocksize(bdev_file, BTRFS_BDEV_BLOCKSIZE); 320 device->fs_devices = fs_devices; 321 322 ret = btrfs_get_dev_zone_info(device, false); 323 if (ret) 324 goto error; 325 326 mutex_lock(&fs_devices->device_list_mutex); 327 list_add(&device->dev_list, &fs_devices->devices); 328 fs_devices->num_devices++; 329 fs_devices->open_devices++; 330 mutex_unlock(&fs_devices->device_list_mutex); 331 332 *device_out = device; 333 return 0; 334 335 error: 336 fput(bdev_file); 337 return ret; 338 } 339 340 /* 341 * called from commit_transaction. Writes changed device replace state to 342 * disk. 343 */ 344 int btrfs_run_dev_replace(struct btrfs_trans_handle *trans) 345 { 346 struct btrfs_fs_info *fs_info = trans->fs_info; 347 int ret; 348 struct btrfs_root *dev_root = fs_info->dev_root; 349 struct btrfs_path *path; 350 struct btrfs_key key; 351 struct extent_buffer *eb; 352 struct btrfs_dev_replace_item *ptr; 353 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 354 355 down_read(&dev_replace->rwsem); 356 if (!dev_replace->is_valid || 357 !dev_replace->item_needs_writeback) { 358 up_read(&dev_replace->rwsem); 359 return 0; 360 } 361 up_read(&dev_replace->rwsem); 362 363 key.objectid = 0; 364 key.type = BTRFS_DEV_REPLACE_KEY; 365 key.offset = 0; 366 367 path = btrfs_alloc_path(); 368 if (!path) { 369 ret = -ENOMEM; 370 goto out; 371 } 372 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 373 if (ret < 0) { 374 btrfs_warn(fs_info, 375 "error %d while searching for dev_replace item!", 376 ret); 377 goto out; 378 } 379 380 if (ret == 0 && 381 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 382 /* 383 * need to delete old one and insert a new one. 384 * Since no attempt is made to recover any old state, if the 385 * dev_replace state is 'running', the data on the target 386 * drive is lost. 387 * It would be possible to recover the state: just make sure 388 * that the beginning of the item is never changed and always 389 * contains all the essential information. Then read this 390 * minimal set of information and use it as a base for the 391 * new state. 392 */ 393 ret = btrfs_del_item(trans, dev_root, path); 394 if (ret != 0) { 395 btrfs_warn(fs_info, 396 "delete too small dev_replace item failed %d!", 397 ret); 398 goto out; 399 } 400 ret = 1; 401 } 402 403 if (ret == 1) { 404 /* need to insert a new item */ 405 btrfs_release_path(path); 406 ret = btrfs_insert_empty_item(trans, dev_root, path, 407 &key, sizeof(*ptr)); 408 if (ret < 0) { 409 btrfs_warn(fs_info, 410 "insert dev_replace item failed %d!", ret); 411 goto out; 412 } 413 } 414 415 eb = path->nodes[0]; 416 ptr = btrfs_item_ptr(eb, path->slots[0], 417 struct btrfs_dev_replace_item); 418 419 down_write(&dev_replace->rwsem); 420 if (dev_replace->srcdev) 421 btrfs_set_dev_replace_src_devid(eb, ptr, 422 dev_replace->srcdev->devid); 423 else 424 btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1); 425 btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr, 426 dev_replace->cont_reading_from_srcdev_mode); 427 btrfs_set_dev_replace_replace_state(eb, ptr, 428 dev_replace->replace_state); 429 btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started); 430 btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped); 431 btrfs_set_dev_replace_num_write_errors(eb, ptr, 432 atomic64_read(&dev_replace->num_write_errors)); 433 btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr, 434 atomic64_read(&dev_replace->num_uncorrectable_read_errors)); 435 dev_replace->cursor_left_last_write_of_item = 436 dev_replace->cursor_left; 437 btrfs_set_dev_replace_cursor_left(eb, ptr, 438 dev_replace->cursor_left_last_write_of_item); 439 btrfs_set_dev_replace_cursor_right(eb, ptr, 440 dev_replace->cursor_right); 441 dev_replace->item_needs_writeback = 0; 442 up_write(&dev_replace->rwsem); 443 out: 444 btrfs_free_path(path); 445 446 return ret; 447 } 448 449 static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info, 450 struct btrfs_device *src_dev) 451 { 452 struct btrfs_path *path; 453 struct btrfs_key key; 454 struct btrfs_key found_key; 455 struct btrfs_root *root = fs_info->dev_root; 456 struct btrfs_dev_extent *dev_extent = NULL; 457 struct btrfs_block_group *cache; 458 struct btrfs_trans_handle *trans; 459 int iter_ret = 0; 460 int ret = 0; 461 u64 chunk_offset; 462 463 /* Do not use "to_copy" on non zoned filesystem for now */ 464 if (!btrfs_is_zoned(fs_info)) 465 return 0; 466 467 mutex_lock(&fs_info->chunk_mutex); 468 469 /* Ensure we don't have pending new block group */ 470 spin_lock(&fs_info->trans_lock); 471 while (fs_info->running_transaction && 472 !list_empty(&fs_info->running_transaction->dev_update_list)) { 473 spin_unlock(&fs_info->trans_lock); 474 mutex_unlock(&fs_info->chunk_mutex); 475 trans = btrfs_attach_transaction(root); 476 if (IS_ERR(trans)) { 477 ret = PTR_ERR(trans); 478 mutex_lock(&fs_info->chunk_mutex); 479 if (ret == -ENOENT) { 480 spin_lock(&fs_info->trans_lock); 481 continue; 482 } else { 483 goto unlock; 484 } 485 } 486 487 ret = btrfs_commit_transaction(trans); 488 mutex_lock(&fs_info->chunk_mutex); 489 if (ret) 490 goto unlock; 491 492 spin_lock(&fs_info->trans_lock); 493 } 494 spin_unlock(&fs_info->trans_lock); 495 496 path = btrfs_alloc_path(); 497 if (!path) { 498 ret = -ENOMEM; 499 goto unlock; 500 } 501 502 path->reada = READA_FORWARD; 503 path->search_commit_root = 1; 504 path->skip_locking = 1; 505 506 key.objectid = src_dev->devid; 507 key.type = BTRFS_DEV_EXTENT_KEY; 508 key.offset = 0; 509 510 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 511 struct extent_buffer *leaf = path->nodes[0]; 512 513 if (found_key.objectid != src_dev->devid) 514 break; 515 516 if (found_key.type != BTRFS_DEV_EXTENT_KEY) 517 break; 518 519 if (found_key.offset < key.offset) 520 break; 521 522 dev_extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 523 524 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dev_extent); 525 526 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 527 if (!cache) 528 continue; 529 530 set_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 531 btrfs_put_block_group(cache); 532 } 533 if (iter_ret < 0) 534 ret = iter_ret; 535 536 btrfs_free_path(path); 537 unlock: 538 mutex_unlock(&fs_info->chunk_mutex); 539 540 return ret; 541 } 542 543 bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev, 544 struct btrfs_block_group *cache, 545 u64 physical) 546 { 547 struct btrfs_fs_info *fs_info = cache->fs_info; 548 struct btrfs_chunk_map *map; 549 u64 chunk_offset = cache->start; 550 int num_extents, cur_extent; 551 int i; 552 553 /* Do not use "to_copy" on non zoned filesystem for now */ 554 if (!btrfs_is_zoned(fs_info)) 555 return true; 556 557 spin_lock(&cache->lock); 558 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { 559 spin_unlock(&cache->lock); 560 return true; 561 } 562 spin_unlock(&cache->lock); 563 564 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 565 ASSERT(!IS_ERR(map)); 566 567 num_extents = 0; 568 cur_extent = 0; 569 for (i = 0; i < map->num_stripes; i++) { 570 /* We have more device extent to copy */ 571 if (srcdev != map->stripes[i].dev) 572 continue; 573 574 num_extents++; 575 if (physical == map->stripes[i].physical) 576 cur_extent = i; 577 } 578 579 btrfs_free_chunk_map(map); 580 581 if (num_extents > 1 && cur_extent < num_extents - 1) { 582 /* 583 * Has more stripes on this device. Keep this block group 584 * readonly until we finish all the stripes. 585 */ 586 return false; 587 } 588 589 /* Last stripe on this device */ 590 clear_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 591 592 return true; 593 } 594 595 static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, 596 const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, 597 int read_src) 598 { 599 struct btrfs_root *root = fs_info->dev_root; 600 struct btrfs_trans_handle *trans; 601 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 602 int ret; 603 struct btrfs_device *tgt_device = NULL; 604 struct btrfs_device *src_device = NULL; 605 606 src_device = btrfs_find_device_by_devspec(fs_info, srcdevid, 607 srcdev_name); 608 if (IS_ERR(src_device)) 609 return PTR_ERR(src_device); 610 611 if (btrfs_pinned_by_swapfile(fs_info, src_device)) { 612 btrfs_warn_in_rcu(fs_info, 613 "cannot replace device %s (devid %llu) due to active swapfile", 614 btrfs_dev_name(src_device), src_device->devid); 615 return -ETXTBSY; 616 } 617 618 /* 619 * Here we commit the transaction to make sure commit_total_bytes 620 * of all the devices are updated. 621 */ 622 trans = btrfs_attach_transaction(root); 623 if (!IS_ERR(trans)) { 624 ret = btrfs_commit_transaction(trans); 625 if (ret) 626 return ret; 627 } else if (PTR_ERR(trans) != -ENOENT) { 628 return PTR_ERR(trans); 629 } 630 631 ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name, 632 src_device, &tgt_device); 633 if (ret) 634 return ret; 635 636 ret = mark_block_group_to_copy(fs_info, src_device); 637 if (ret) 638 return ret; 639 640 down_write(&dev_replace->rwsem); 641 dev_replace->replace_task = current; 642 switch (dev_replace->replace_state) { 643 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 644 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 645 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 646 break; 647 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 648 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 649 ASSERT(0); 650 ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED; 651 up_write(&dev_replace->rwsem); 652 goto leave; 653 } 654 655 dev_replace->cont_reading_from_srcdev_mode = read_src; 656 dev_replace->srcdev = src_device; 657 dev_replace->tgtdev = tgt_device; 658 659 btrfs_info_in_rcu(fs_info, 660 "dev_replace from %s (devid %llu) to %s started", 661 btrfs_dev_name(src_device), 662 src_device->devid, 663 btrfs_dev_name(tgt_device)); 664 665 /* 666 * from now on, the writes to the srcdev are all duplicated to 667 * go to the tgtdev as well (refer to btrfs_map_block()). 668 */ 669 dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED; 670 dev_replace->time_started = ktime_get_real_seconds(); 671 dev_replace->cursor_left = 0; 672 dev_replace->committed_cursor_left = 0; 673 dev_replace->cursor_left_last_write_of_item = 0; 674 dev_replace->cursor_right = 0; 675 dev_replace->is_valid = 1; 676 dev_replace->item_needs_writeback = 1; 677 atomic64_set(&dev_replace->num_write_errors, 0); 678 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0); 679 up_write(&dev_replace->rwsem); 680 681 ret = btrfs_sysfs_add_device(tgt_device); 682 if (ret) 683 btrfs_err(fs_info, "kobj add dev failed %d", ret); 684 685 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); 686 687 /* 688 * Commit dev_replace state and reserve 1 item for it. 689 * This is crucial to ensure we won't miss copying extents for new block 690 * groups that are allocated after we started the device replace, and 691 * must be done after setting up the device replace state. 692 */ 693 trans = btrfs_start_transaction(root, 1); 694 if (IS_ERR(trans)) { 695 ret = PTR_ERR(trans); 696 down_write(&dev_replace->rwsem); 697 dev_replace->replace_state = 698 BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; 699 dev_replace->srcdev = NULL; 700 dev_replace->tgtdev = NULL; 701 up_write(&dev_replace->rwsem); 702 goto leave; 703 } 704 705 ret = btrfs_commit_transaction(trans); 706 WARN_ON(ret); 707 708 /* the disk copy procedure reuses the scrub code */ 709 ret = btrfs_scrub_dev(fs_info, src_device->devid, 0, 710 btrfs_device_get_total_bytes(src_device), 711 &dev_replace->scrub_progress, 0, 1); 712 713 ret = btrfs_dev_replace_finishing(fs_info, ret); 714 if (ret == -EINPROGRESS) 715 ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS; 716 717 return ret; 718 719 leave: 720 btrfs_destroy_dev_replace_tgtdev(tgt_device); 721 return ret; 722 } 723 724 static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args) 725 { 726 if (args->start.srcdevid == 0) { 727 if (memchr(args->start.srcdev_name, 0, 728 sizeof(args->start.srcdev_name)) == NULL) 729 return -ENAMETOOLONG; 730 } else { 731 args->start.srcdev_name[0] = 0; 732 } 733 734 if (memchr(args->start.tgtdev_name, 0, 735 sizeof(args->start.tgtdev_name)) == NULL) 736 return -ENAMETOOLONG; 737 738 return 0; 739 } 740 741 int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, 742 struct btrfs_ioctl_dev_replace_args *args) 743 { 744 int ret; 745 746 switch (args->start.cont_reading_from_srcdev_mode) { 747 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: 748 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: 749 break; 750 default: 751 return -EINVAL; 752 } 753 ret = btrfs_check_replace_dev_names(args); 754 if (ret < 0) 755 return ret; 756 757 ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name, 758 args->start.srcdevid, 759 args->start.srcdev_name, 760 args->start.cont_reading_from_srcdev_mode); 761 args->result = ret; 762 /* don't warn if EINPROGRESS, someone else might be running scrub */ 763 if (ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS || 764 ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR) 765 return 0; 766 767 return ret; 768 } 769 770 /* 771 * blocked until all in-flight bios operations are finished. 772 */ 773 static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) 774 { 775 set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state); 776 wait_event(fs_info->dev_replace.replace_wait, !percpu_counter_sum( 777 &fs_info->dev_replace.bio_counter)); 778 } 779 780 /* 781 * we have removed target device, it is safe to allow new bios request. 782 */ 783 static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info) 784 { 785 clear_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state); 786 wake_up(&fs_info->dev_replace.replace_wait); 787 } 788 789 /* 790 * When finishing the device replace, before swapping the source device with the 791 * target device we must update the chunk allocation state in the target device, 792 * as it is empty because replace works by directly copying the chunks and not 793 * through the normal chunk allocation path. 794 */ 795 static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev, 796 struct btrfs_device *tgtdev) 797 { 798 struct extent_state *cached_state = NULL; 799 u64 start = 0; 800 u64 found_start; 801 u64 found_end; 802 int ret = 0; 803 804 lockdep_assert_held(&srcdev->fs_info->chunk_mutex); 805 806 while (find_first_extent_bit(&srcdev->alloc_state, start, 807 &found_start, &found_end, 808 CHUNK_ALLOCATED, &cached_state)) { 809 ret = set_extent_bit(&tgtdev->alloc_state, found_start, 810 found_end, CHUNK_ALLOCATED, NULL); 811 if (ret) 812 break; 813 start = found_end + 1; 814 } 815 816 free_extent_state(cached_state); 817 return ret; 818 } 819 820 static void btrfs_dev_replace_update_device_in_mapping_tree( 821 struct btrfs_fs_info *fs_info, 822 struct btrfs_device *srcdev, 823 struct btrfs_device *tgtdev) 824 { 825 struct rb_node *node; 826 827 /* 828 * The chunk mutex must be held so that no new chunks can be created 829 * while we are updating existing chunks. This guarantees we don't miss 830 * any new chunk that gets created for a range that falls before the 831 * range of the last chunk we processed. 832 */ 833 lockdep_assert_held(&fs_info->chunk_mutex); 834 835 write_lock(&fs_info->mapping_tree_lock); 836 node = rb_first_cached(&fs_info->mapping_tree); 837 while (node) { 838 struct rb_node *next = rb_next(node); 839 struct btrfs_chunk_map *map; 840 u64 next_start; 841 842 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 843 next_start = map->start + map->chunk_len; 844 845 for (int i = 0; i < map->num_stripes; i++) 846 if (srcdev == map->stripes[i].dev) 847 map->stripes[i].dev = tgtdev; 848 849 if (cond_resched_rwlock_write(&fs_info->mapping_tree_lock)) { 850 map = btrfs_find_chunk_map_nolock(fs_info, next_start, U64_MAX); 851 if (!map) 852 break; 853 node = &map->rb_node; 854 /* 855 * Drop the lookup reference since we are holding the 856 * lock in write mode and no one can remove the chunk 857 * map from the tree and drop its tree reference. 858 */ 859 btrfs_free_chunk_map(map); 860 } else { 861 node = next; 862 } 863 } 864 write_unlock(&fs_info->mapping_tree_lock); 865 } 866 867 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, 868 int scrub_ret) 869 { 870 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 871 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 872 struct btrfs_device *tgt_device; 873 struct btrfs_device *src_device; 874 struct btrfs_root *root = fs_info->tree_root; 875 u8 uuid_tmp[BTRFS_UUID_SIZE]; 876 struct btrfs_trans_handle *trans; 877 int ret = 0; 878 879 /* don't allow cancel or unmount to disturb the finishing procedure */ 880 mutex_lock(&dev_replace->lock_finishing_cancel_unmount); 881 882 down_read(&dev_replace->rwsem); 883 /* was the operation canceled, or is it finished? */ 884 if (dev_replace->replace_state != 885 BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED) { 886 up_read(&dev_replace->rwsem); 887 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 888 return 0; 889 } 890 891 tgt_device = dev_replace->tgtdev; 892 src_device = dev_replace->srcdev; 893 up_read(&dev_replace->rwsem); 894 895 /* 896 * flush all outstanding I/O and inode extent mappings before the 897 * copy operation is declared as being finished 898 */ 899 ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); 900 if (ret) { 901 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 902 return ret; 903 } 904 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); 905 906 /* 907 * We have to use this loop approach because at this point src_device 908 * has to be available for transaction commit to complete, yet new 909 * chunks shouldn't be allocated on the device. 910 */ 911 while (1) { 912 trans = btrfs_start_transaction(root, 0); 913 if (IS_ERR(trans)) { 914 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 915 return PTR_ERR(trans); 916 } 917 ret = btrfs_commit_transaction(trans); 918 WARN_ON(ret); 919 920 /* Prevent write_all_supers() during the finishing procedure */ 921 mutex_lock(&fs_devices->device_list_mutex); 922 /* Prevent new chunks being allocated on the source device */ 923 mutex_lock(&fs_info->chunk_mutex); 924 925 if (!list_empty(&src_device->post_commit_list)) { 926 mutex_unlock(&fs_devices->device_list_mutex); 927 mutex_unlock(&fs_info->chunk_mutex); 928 } else { 929 break; 930 } 931 } 932 933 down_write(&dev_replace->rwsem); 934 dev_replace->replace_state = 935 scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED 936 : BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED; 937 dev_replace->tgtdev = NULL; 938 dev_replace->srcdev = NULL; 939 dev_replace->time_stopped = ktime_get_real_seconds(); 940 dev_replace->item_needs_writeback = 1; 941 942 /* 943 * Update allocation state in the new device and replace the old device 944 * with the new one in the mapping tree. 945 */ 946 if (!scrub_ret) { 947 scrub_ret = btrfs_set_target_alloc_state(src_device, tgt_device); 948 if (scrub_ret) 949 goto error; 950 btrfs_dev_replace_update_device_in_mapping_tree(fs_info, 951 src_device, 952 tgt_device); 953 } else { 954 if (scrub_ret != -ECANCELED) 955 btrfs_err_in_rcu(fs_info, 956 "btrfs_scrub_dev(%s, %llu, %s) failed %d", 957 btrfs_dev_name(src_device), 958 src_device->devid, 959 btrfs_dev_name(tgt_device), scrub_ret); 960 error: 961 up_write(&dev_replace->rwsem); 962 mutex_unlock(&fs_info->chunk_mutex); 963 mutex_unlock(&fs_devices->device_list_mutex); 964 btrfs_rm_dev_replace_blocked(fs_info); 965 if (tgt_device) 966 btrfs_destroy_dev_replace_tgtdev(tgt_device); 967 btrfs_rm_dev_replace_unblocked(fs_info); 968 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 969 970 return scrub_ret; 971 } 972 973 btrfs_info_in_rcu(fs_info, 974 "dev_replace from %s (devid %llu) to %s finished", 975 btrfs_dev_name(src_device), 976 src_device->devid, 977 btrfs_dev_name(tgt_device)); 978 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &tgt_device->dev_state); 979 tgt_device->devid = src_device->devid; 980 src_device->devid = BTRFS_DEV_REPLACE_DEVID; 981 memcpy(uuid_tmp, tgt_device->uuid, sizeof(uuid_tmp)); 982 memcpy(tgt_device->uuid, src_device->uuid, sizeof(tgt_device->uuid)); 983 memcpy(src_device->uuid, uuid_tmp, sizeof(src_device->uuid)); 984 btrfs_device_set_total_bytes(tgt_device, src_device->total_bytes); 985 btrfs_device_set_disk_total_bytes(tgt_device, 986 src_device->disk_total_bytes); 987 btrfs_device_set_bytes_used(tgt_device, src_device->bytes_used); 988 tgt_device->commit_bytes_used = src_device->bytes_used; 989 990 btrfs_assign_next_active_device(src_device, tgt_device); 991 992 list_add(&tgt_device->dev_alloc_list, &fs_devices->alloc_list); 993 fs_devices->rw_devices++; 994 995 dev_replace->replace_task = NULL; 996 up_write(&dev_replace->rwsem); 997 btrfs_rm_dev_replace_blocked(fs_info); 998 999 btrfs_rm_dev_replace_remove_srcdev(src_device); 1000 1001 btrfs_rm_dev_replace_unblocked(fs_info); 1002 1003 /* 1004 * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will 1005 * update on-disk dev stats value during commit transaction 1006 */ 1007 atomic_inc(&tgt_device->dev_stats_ccnt); 1008 1009 /* 1010 * this is again a consistent state where no dev_replace procedure 1011 * is running, the target device is part of the filesystem, the 1012 * source device is not part of the filesystem anymore and its 1st 1013 * superblock is scratched out so that it is no longer marked to 1014 * belong to this filesystem. 1015 */ 1016 mutex_unlock(&fs_info->chunk_mutex); 1017 mutex_unlock(&fs_devices->device_list_mutex); 1018 1019 /* replace the sysfs entry */ 1020 btrfs_sysfs_remove_device(src_device); 1021 btrfs_sysfs_update_devid(tgt_device); 1022 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &src_device->dev_state)) 1023 btrfs_scratch_superblocks(fs_info, src_device); 1024 1025 /* write back the superblocks */ 1026 trans = btrfs_start_transaction(root, 0); 1027 if (!IS_ERR(trans)) 1028 btrfs_commit_transaction(trans); 1029 1030 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1031 1032 btrfs_rm_dev_replace_free_srcdev(src_device); 1033 1034 return 0; 1035 } 1036 1037 /* 1038 * Read progress of device replace status according to the state and last 1039 * stored position. The value format is the same as for 1040 * btrfs_dev_replace::progress_1000 1041 */ 1042 static u64 btrfs_dev_replace_progress(struct btrfs_fs_info *fs_info) 1043 { 1044 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1045 u64 ret = 0; 1046 1047 switch (dev_replace->replace_state) { 1048 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1049 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1050 ret = 0; 1051 break; 1052 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1053 ret = 1000; 1054 break; 1055 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1056 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1057 ret = div64_u64(dev_replace->cursor_left, 1058 div_u64(btrfs_device_get_total_bytes( 1059 dev_replace->srcdev), 1000)); 1060 break; 1061 } 1062 1063 return ret; 1064 } 1065 1066 void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, 1067 struct btrfs_ioctl_dev_replace_args *args) 1068 { 1069 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1070 1071 down_read(&dev_replace->rwsem); 1072 /* even if !dev_replace_is_valid, the values are good enough for 1073 * the replace_status ioctl */ 1074 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 1075 args->status.replace_state = dev_replace->replace_state; 1076 args->status.time_started = dev_replace->time_started; 1077 args->status.time_stopped = dev_replace->time_stopped; 1078 args->status.num_write_errors = 1079 atomic64_read(&dev_replace->num_write_errors); 1080 args->status.num_uncorrectable_read_errors = 1081 atomic64_read(&dev_replace->num_uncorrectable_read_errors); 1082 args->status.progress_1000 = btrfs_dev_replace_progress(fs_info); 1083 up_read(&dev_replace->rwsem); 1084 } 1085 1086 int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) 1087 { 1088 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1089 struct btrfs_device *tgt_device = NULL; 1090 struct btrfs_device *src_device = NULL; 1091 struct btrfs_trans_handle *trans; 1092 struct btrfs_root *root = fs_info->tree_root; 1093 int result; 1094 int ret; 1095 1096 if (sb_rdonly(fs_info->sb)) 1097 return -EROFS; 1098 1099 mutex_lock(&dev_replace->lock_finishing_cancel_unmount); 1100 down_write(&dev_replace->rwsem); 1101 switch (dev_replace->replace_state) { 1102 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1103 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1104 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1105 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; 1106 up_write(&dev_replace->rwsem); 1107 break; 1108 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1109 tgt_device = dev_replace->tgtdev; 1110 src_device = dev_replace->srcdev; 1111 up_write(&dev_replace->rwsem); 1112 ret = btrfs_scrub_cancel(fs_info); 1113 if (ret < 0) { 1114 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; 1115 } else { 1116 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 1117 /* 1118 * btrfs_dev_replace_finishing() will handle the 1119 * cleanup part 1120 */ 1121 btrfs_info_in_rcu(fs_info, 1122 "dev_replace from %s (devid %llu) to %s canceled", 1123 btrfs_dev_name(src_device), src_device->devid, 1124 btrfs_dev_name(tgt_device)); 1125 } 1126 break; 1127 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1128 /* 1129 * Scrub doing the replace isn't running so we need to do the 1130 * cleanup step of btrfs_dev_replace_finishing() here 1131 */ 1132 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 1133 tgt_device = dev_replace->tgtdev; 1134 src_device = dev_replace->srcdev; 1135 dev_replace->tgtdev = NULL; 1136 dev_replace->srcdev = NULL; 1137 dev_replace->replace_state = 1138 BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED; 1139 dev_replace->time_stopped = ktime_get_real_seconds(); 1140 dev_replace->item_needs_writeback = 1; 1141 1142 up_write(&dev_replace->rwsem); 1143 1144 /* Scrub for replace must not be running in suspended state */ 1145 btrfs_scrub_cancel(fs_info); 1146 1147 trans = btrfs_start_transaction(root, 0); 1148 if (IS_ERR(trans)) { 1149 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1150 return PTR_ERR(trans); 1151 } 1152 ret = btrfs_commit_transaction(trans); 1153 WARN_ON(ret); 1154 1155 btrfs_info_in_rcu(fs_info, 1156 "suspended dev_replace from %s (devid %llu) to %s canceled", 1157 btrfs_dev_name(src_device), src_device->devid, 1158 btrfs_dev_name(tgt_device)); 1159 1160 if (tgt_device) 1161 btrfs_destroy_dev_replace_tgtdev(tgt_device); 1162 break; 1163 default: 1164 up_write(&dev_replace->rwsem); 1165 result = -EINVAL; 1166 } 1167 1168 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1169 return result; 1170 } 1171 1172 void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info) 1173 { 1174 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1175 1176 mutex_lock(&dev_replace->lock_finishing_cancel_unmount); 1177 down_write(&dev_replace->rwsem); 1178 1179 switch (dev_replace->replace_state) { 1180 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1181 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1182 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1183 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1184 break; 1185 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1186 dev_replace->replace_state = 1187 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; 1188 dev_replace->time_stopped = ktime_get_real_seconds(); 1189 dev_replace->item_needs_writeback = 1; 1190 btrfs_info(fs_info, "suspending dev_replace for unmount"); 1191 break; 1192 } 1193 1194 up_write(&dev_replace->rwsem); 1195 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1196 } 1197 1198 /* resume dev_replace procedure that was interrupted by unmount */ 1199 int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info) 1200 { 1201 struct task_struct *task; 1202 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1203 1204 down_write(&dev_replace->rwsem); 1205 1206 switch (dev_replace->replace_state) { 1207 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1208 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1209 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1210 up_write(&dev_replace->rwsem); 1211 return 0; 1212 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1213 break; 1214 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1215 dev_replace->replace_state = 1216 BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED; 1217 break; 1218 } 1219 if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) { 1220 btrfs_info(fs_info, 1221 "cannot continue dev_replace, tgtdev is missing"); 1222 btrfs_info(fs_info, 1223 "you may cancel the operation after 'mount -o degraded'"); 1224 dev_replace->replace_state = 1225 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; 1226 up_write(&dev_replace->rwsem); 1227 return 0; 1228 } 1229 up_write(&dev_replace->rwsem); 1230 1231 /* 1232 * This could collide with a paused balance, but the exclusive op logic 1233 * should never allow both to start and pause. We don't want to allow 1234 * dev-replace to start anyway. 1235 */ 1236 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) { 1237 down_write(&dev_replace->rwsem); 1238 dev_replace->replace_state = 1239 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; 1240 up_write(&dev_replace->rwsem); 1241 btrfs_info(fs_info, 1242 "cannot resume dev-replace, other exclusive operation running"); 1243 return 0; 1244 } 1245 1246 task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl"); 1247 return PTR_ERR_OR_ZERO(task); 1248 } 1249 1250 static int btrfs_dev_replace_kthread(void *data) 1251 { 1252 struct btrfs_fs_info *fs_info = data; 1253 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1254 u64 progress; 1255 int ret; 1256 1257 progress = btrfs_dev_replace_progress(fs_info); 1258 progress = div_u64(progress, 10); 1259 btrfs_info_in_rcu(fs_info, 1260 "continuing dev_replace from %s (devid %llu) to target %s @%u%%", 1261 btrfs_dev_name(dev_replace->srcdev), 1262 dev_replace->srcdev->devid, 1263 btrfs_dev_name(dev_replace->tgtdev), 1264 (unsigned int)progress); 1265 1266 ret = btrfs_scrub_dev(fs_info, dev_replace->srcdev->devid, 1267 dev_replace->committed_cursor_left, 1268 btrfs_device_get_total_bytes(dev_replace->srcdev), 1269 &dev_replace->scrub_progress, 0, 1); 1270 ret = btrfs_dev_replace_finishing(fs_info, ret); 1271 WARN_ON(ret && ret != -ECANCELED); 1272 1273 btrfs_exclop_finish(fs_info); 1274 return 0; 1275 } 1276 1277 int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) 1278 { 1279 if (!dev_replace->is_valid) 1280 return 0; 1281 1282 switch (dev_replace->replace_state) { 1283 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1284 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1285 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1286 return 0; 1287 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1288 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1289 /* 1290 * return true even if tgtdev is missing (this is 1291 * something that can happen if the dev_replace 1292 * procedure is suspended by an umount and then 1293 * the tgtdev is missing (or "btrfs dev scan") was 1294 * not called and the filesystem is remounted 1295 * in degraded state. This does not stop the 1296 * dev_replace procedure. It needs to be canceled 1297 * manually if the cancellation is wanted. 1298 */ 1299 break; 1300 } 1301 return 1; 1302 } 1303 1304 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) 1305 { 1306 percpu_counter_sub(&fs_info->dev_replace.bio_counter, amount); 1307 cond_wake_up_nomb(&fs_info->dev_replace.replace_wait); 1308 } 1309 1310 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info) 1311 { 1312 while (1) { 1313 percpu_counter_inc(&fs_info->dev_replace.bio_counter); 1314 if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING, 1315 &fs_info->fs_state))) 1316 break; 1317 1318 btrfs_bio_counter_dec(fs_info); 1319 wait_event(fs_info->dev_replace.replace_wait, 1320 !test_bit(BTRFS_FS_STATE_DEV_REPLACING, 1321 &fs_info->fs_state)); 1322 } 1323 } 1324