1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) STRATO AG 2012. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/bio.h> 8 #include <linux/slab.h> 9 #include <linux/blkdev.h> 10 #include <linux/kthread.h> 11 #include <linux/math64.h> 12 #include "misc.h" 13 #include "ctree.h" 14 #include "disk-io.h" 15 #include "transaction.h" 16 #include "volumes.h" 17 #include "async-thread.h" 18 #include "dev-replace.h" 19 #include "sysfs.h" 20 #include "zoned.h" 21 #include "block-group.h" 22 #include "fs.h" 23 #include "accessors.h" 24 #include "scrub.h" 25 26 /* 27 * Device replace overview 28 * 29 * [Objective] 30 * To copy all extents (both new and on-disk) from source device to target 31 * device, while still keeping the filesystem read-write. 32 * 33 * [Method] 34 * There are two main methods involved: 35 * 36 * - Write duplication 37 * 38 * All new writes will be written to both target and source devices, so even 39 * if replace gets canceled, sources device still contains up-to-date data. 40 * 41 * Location: handle_ops_on_dev_replace() from btrfs_map_block() 42 * Start: btrfs_dev_replace_start() 43 * End: btrfs_dev_replace_finishing() 44 * Content: Latest data/metadata 45 * 46 * - Copy existing extents 47 * 48 * This happens by reusing scrub facility, as scrub also iterates through 49 * existing extents from commit root. 50 * 51 * Location: scrub_write_block_to_dev_replace() from 52 * scrub_block_complete() 53 * Content: Data/meta from commit root. 54 * 55 * Due to the content difference, we need to avoid nocow write when dev-replace 56 * is happening. This is done by marking the block group read-only and waiting 57 * for NOCOW writes. 58 * 59 * After replace is done, the finishing part is done by swapping the target and 60 * source devices. 61 * 62 * Location: btrfs_dev_replace_update_device_in_mapping_tree() from 63 * btrfs_dev_replace_finishing() 64 */ 65 66 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, 67 int scrub_ret); 68 static int btrfs_dev_replace_kthread(void *data); 69 70 int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info) 71 { 72 struct btrfs_dev_lookup_args args = { .devid = BTRFS_DEV_REPLACE_DEVID }; 73 struct btrfs_key key; 74 struct btrfs_root *dev_root = fs_info->dev_root; 75 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 76 struct extent_buffer *eb; 77 int slot; 78 int ret = 0; 79 struct btrfs_path *path = NULL; 80 int item_size; 81 struct btrfs_dev_replace_item *ptr; 82 u64 src_devid; 83 84 if (!dev_root) 85 return 0; 86 87 path = btrfs_alloc_path(); 88 if (!path) { 89 ret = -ENOMEM; 90 goto out; 91 } 92 93 key.objectid = 0; 94 key.type = BTRFS_DEV_REPLACE_KEY; 95 key.offset = 0; 96 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 97 if (ret) { 98 no_valid_dev_replace_entry_found: 99 /* 100 * We don't have a replace item or it's corrupted. If there is 101 * a replace target, fail the mount. 102 */ 103 if (btrfs_find_device(fs_info->fs_devices, &args)) { 104 btrfs_err(fs_info, 105 "found replace target device without a valid replace item"); 106 ret = -EUCLEAN; 107 goto out; 108 } 109 ret = 0; 110 dev_replace->replace_state = 111 BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; 112 dev_replace->cont_reading_from_srcdev_mode = 113 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS; 114 dev_replace->time_started = 0; 115 dev_replace->time_stopped = 0; 116 atomic64_set(&dev_replace->num_write_errors, 0); 117 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0); 118 dev_replace->cursor_left = 0; 119 dev_replace->committed_cursor_left = 0; 120 dev_replace->cursor_left_last_write_of_item = 0; 121 dev_replace->cursor_right = 0; 122 dev_replace->srcdev = NULL; 123 dev_replace->tgtdev = NULL; 124 dev_replace->is_valid = 0; 125 dev_replace->item_needs_writeback = 0; 126 goto out; 127 } 128 slot = path->slots[0]; 129 eb = path->nodes[0]; 130 item_size = btrfs_item_size(eb, slot); 131 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item); 132 133 if (item_size != sizeof(struct btrfs_dev_replace_item)) { 134 btrfs_warn(fs_info, 135 "dev_replace entry found has unexpected size, ignore entry"); 136 goto no_valid_dev_replace_entry_found; 137 } 138 139 src_devid = btrfs_dev_replace_src_devid(eb, ptr); 140 dev_replace->cont_reading_from_srcdev_mode = 141 btrfs_dev_replace_cont_reading_from_srcdev_mode(eb, ptr); 142 dev_replace->replace_state = btrfs_dev_replace_replace_state(eb, ptr); 143 dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr); 144 dev_replace->time_stopped = 145 btrfs_dev_replace_time_stopped(eb, ptr); 146 atomic64_set(&dev_replace->num_write_errors, 147 btrfs_dev_replace_num_write_errors(eb, ptr)); 148 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 149 btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr)); 150 dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr); 151 dev_replace->committed_cursor_left = dev_replace->cursor_left; 152 dev_replace->cursor_left_last_write_of_item = dev_replace->cursor_left; 153 dev_replace->cursor_right = btrfs_dev_replace_cursor_right(eb, ptr); 154 dev_replace->is_valid = 1; 155 156 dev_replace->item_needs_writeback = 0; 157 switch (dev_replace->replace_state) { 158 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 159 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 160 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 161 /* 162 * We don't have an active replace item but if there is a 163 * replace target, fail the mount. 164 */ 165 if (btrfs_find_device(fs_info->fs_devices, &args)) { 166 btrfs_err(fs_info, 167 "replace without active item, run 'device scan --forget' on the target device"); 168 ret = -EUCLEAN; 169 } else { 170 dev_replace->srcdev = NULL; 171 dev_replace->tgtdev = NULL; 172 } 173 break; 174 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 175 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 176 dev_replace->tgtdev = btrfs_find_device(fs_info->fs_devices, &args); 177 args.devid = src_devid; 178 dev_replace->srcdev = btrfs_find_device(fs_info->fs_devices, &args); 179 180 /* 181 * allow 'btrfs dev replace_cancel' if src/tgt device is 182 * missing 183 */ 184 if (!dev_replace->srcdev && 185 !btrfs_test_opt(fs_info, DEGRADED)) { 186 ret = -EIO; 187 btrfs_warn(fs_info, 188 "cannot mount because device replace operation is ongoing and"); 189 btrfs_warn(fs_info, 190 "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?", 191 src_devid); 192 } 193 if (!dev_replace->tgtdev && 194 !btrfs_test_opt(fs_info, DEGRADED)) { 195 ret = -EIO; 196 btrfs_warn(fs_info, 197 "cannot mount because device replace operation is ongoing and"); 198 btrfs_warn(fs_info, 199 "tgtdev (devid %llu) is missing, need to run 'btrfs dev scan'?", 200 BTRFS_DEV_REPLACE_DEVID); 201 } 202 if (dev_replace->tgtdev) { 203 if (dev_replace->srcdev) { 204 dev_replace->tgtdev->total_bytes = 205 dev_replace->srcdev->total_bytes; 206 dev_replace->tgtdev->disk_total_bytes = 207 dev_replace->srcdev->disk_total_bytes; 208 dev_replace->tgtdev->commit_total_bytes = 209 dev_replace->srcdev->commit_total_bytes; 210 dev_replace->tgtdev->bytes_used = 211 dev_replace->srcdev->bytes_used; 212 dev_replace->tgtdev->commit_bytes_used = 213 dev_replace->srcdev->commit_bytes_used; 214 } 215 set_bit(BTRFS_DEV_STATE_REPLACE_TGT, 216 &dev_replace->tgtdev->dev_state); 217 218 WARN_ON(fs_info->fs_devices->rw_devices == 0); 219 dev_replace->tgtdev->io_width = fs_info->sectorsize; 220 dev_replace->tgtdev->io_align = fs_info->sectorsize; 221 dev_replace->tgtdev->sector_size = fs_info->sectorsize; 222 dev_replace->tgtdev->fs_info = fs_info; 223 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 224 &dev_replace->tgtdev->dev_state); 225 } 226 break; 227 } 228 229 out: 230 btrfs_free_path(path); 231 return ret; 232 } 233 234 /* 235 * Initialize a new device for device replace target from a given source dev 236 * and path. 237 * 238 * Return 0 and new device in @device_out, otherwise return < 0 239 */ 240 static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, 241 const char *device_path, 242 struct btrfs_device *srcdev, 243 struct btrfs_device **device_out) 244 { 245 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 246 struct btrfs_device *device; 247 struct file *bdev_file; 248 struct block_device *bdev; 249 u64 devid = BTRFS_DEV_REPLACE_DEVID; 250 int ret = 0; 251 252 *device_out = NULL; 253 if (srcdev->fs_devices->seeding) { 254 btrfs_err(fs_info, "the filesystem is a seed filesystem!"); 255 return -EINVAL; 256 } 257 258 bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE, 259 fs_info->bdev_holder, NULL); 260 if (IS_ERR(bdev_file)) { 261 btrfs_err(fs_info, "target device %s is invalid!", device_path); 262 return PTR_ERR(bdev_file); 263 } 264 bdev = file_bdev(bdev_file); 265 266 if (!btrfs_check_device_zone_type(fs_info, bdev)) { 267 btrfs_err(fs_info, 268 "dev-replace: zoned type of target device mismatch with filesystem"); 269 ret = -EINVAL; 270 goto error; 271 } 272 273 sync_blockdev(bdev); 274 275 list_for_each_entry(device, &fs_devices->devices, dev_list) { 276 if (device->bdev == bdev) { 277 btrfs_err(fs_info, 278 "target device is in the filesystem!"); 279 ret = -EEXIST; 280 goto error; 281 } 282 } 283 284 285 if (bdev_nr_bytes(bdev) < btrfs_device_get_total_bytes(srcdev)) { 286 btrfs_err(fs_info, 287 "target device is smaller than source device!"); 288 ret = -EINVAL; 289 goto error; 290 } 291 292 293 device = btrfs_alloc_device(NULL, &devid, NULL, device_path); 294 if (IS_ERR(device)) { 295 ret = PTR_ERR(device); 296 goto error; 297 } 298 299 ret = lookup_bdev(device_path, &device->devt); 300 if (ret) 301 goto error; 302 303 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); 304 device->generation = 0; 305 device->io_width = fs_info->sectorsize; 306 device->io_align = fs_info->sectorsize; 307 device->sector_size = fs_info->sectorsize; 308 device->total_bytes = btrfs_device_get_total_bytes(srcdev); 309 device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev); 310 device->bytes_used = btrfs_device_get_bytes_used(srcdev); 311 device->commit_total_bytes = srcdev->commit_total_bytes; 312 device->commit_bytes_used = device->bytes_used; 313 device->fs_info = fs_info; 314 device->bdev = bdev; 315 device->bdev_file = bdev_file; 316 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); 317 set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); 318 device->dev_stats_valid = 1; 319 set_blocksize(bdev_file, BTRFS_BDEV_BLOCKSIZE); 320 device->fs_devices = fs_devices; 321 322 ret = btrfs_get_dev_zone_info(device, false); 323 if (ret) 324 goto error; 325 326 mutex_lock(&fs_devices->device_list_mutex); 327 list_add(&device->dev_list, &fs_devices->devices); 328 fs_devices->num_devices++; 329 fs_devices->open_devices++; 330 mutex_unlock(&fs_devices->device_list_mutex); 331 332 *device_out = device; 333 return 0; 334 335 error: 336 fput(bdev_file); 337 return ret; 338 } 339 340 /* 341 * called from commit_transaction. Writes changed device replace state to 342 * disk. 343 */ 344 int btrfs_run_dev_replace(struct btrfs_trans_handle *trans) 345 { 346 struct btrfs_fs_info *fs_info = trans->fs_info; 347 int ret; 348 struct btrfs_root *dev_root = fs_info->dev_root; 349 struct btrfs_path *path; 350 struct btrfs_key key; 351 struct extent_buffer *eb; 352 struct btrfs_dev_replace_item *ptr; 353 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 354 355 down_read(&dev_replace->rwsem); 356 if (!dev_replace->is_valid || 357 !dev_replace->item_needs_writeback) { 358 up_read(&dev_replace->rwsem); 359 return 0; 360 } 361 up_read(&dev_replace->rwsem); 362 363 key.objectid = 0; 364 key.type = BTRFS_DEV_REPLACE_KEY; 365 key.offset = 0; 366 367 path = btrfs_alloc_path(); 368 if (!path) { 369 ret = -ENOMEM; 370 goto out; 371 } 372 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 373 if (ret < 0) { 374 btrfs_warn(fs_info, 375 "error %d while searching for dev_replace item!", 376 ret); 377 goto out; 378 } 379 380 if (ret == 0 && 381 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { 382 /* 383 * need to delete old one and insert a new one. 384 * Since no attempt is made to recover any old state, if the 385 * dev_replace state is 'running', the data on the target 386 * drive is lost. 387 * It would be possible to recover the state: just make sure 388 * that the beginning of the item is never changed and always 389 * contains all the essential information. Then read this 390 * minimal set of information and use it as a base for the 391 * new state. 392 */ 393 ret = btrfs_del_item(trans, dev_root, path); 394 if (ret != 0) { 395 btrfs_warn(fs_info, 396 "delete too small dev_replace item failed %d!", 397 ret); 398 goto out; 399 } 400 ret = 1; 401 } 402 403 if (ret == 1) { 404 /* need to insert a new item */ 405 btrfs_release_path(path); 406 ret = btrfs_insert_empty_item(trans, dev_root, path, 407 &key, sizeof(*ptr)); 408 if (ret < 0) { 409 btrfs_warn(fs_info, 410 "insert dev_replace item failed %d!", ret); 411 goto out; 412 } 413 } 414 415 eb = path->nodes[0]; 416 ptr = btrfs_item_ptr(eb, path->slots[0], 417 struct btrfs_dev_replace_item); 418 419 down_write(&dev_replace->rwsem); 420 if (dev_replace->srcdev) 421 btrfs_set_dev_replace_src_devid(eb, ptr, 422 dev_replace->srcdev->devid); 423 else 424 btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1); 425 btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr, 426 dev_replace->cont_reading_from_srcdev_mode); 427 btrfs_set_dev_replace_replace_state(eb, ptr, 428 dev_replace->replace_state); 429 btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started); 430 btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped); 431 btrfs_set_dev_replace_num_write_errors(eb, ptr, 432 atomic64_read(&dev_replace->num_write_errors)); 433 btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr, 434 atomic64_read(&dev_replace->num_uncorrectable_read_errors)); 435 dev_replace->cursor_left_last_write_of_item = 436 dev_replace->cursor_left; 437 btrfs_set_dev_replace_cursor_left(eb, ptr, 438 dev_replace->cursor_left_last_write_of_item); 439 btrfs_set_dev_replace_cursor_right(eb, ptr, 440 dev_replace->cursor_right); 441 dev_replace->item_needs_writeback = 0; 442 up_write(&dev_replace->rwsem); 443 444 btrfs_mark_buffer_dirty(trans, eb); 445 446 out: 447 btrfs_free_path(path); 448 449 return ret; 450 } 451 452 static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info, 453 struct btrfs_device *src_dev) 454 { 455 struct btrfs_path *path; 456 struct btrfs_key key; 457 struct btrfs_key found_key; 458 struct btrfs_root *root = fs_info->dev_root; 459 struct btrfs_dev_extent *dev_extent = NULL; 460 struct btrfs_block_group *cache; 461 struct btrfs_trans_handle *trans; 462 int iter_ret = 0; 463 int ret = 0; 464 u64 chunk_offset; 465 466 /* Do not use "to_copy" on non zoned filesystem for now */ 467 if (!btrfs_is_zoned(fs_info)) 468 return 0; 469 470 mutex_lock(&fs_info->chunk_mutex); 471 472 /* Ensure we don't have pending new block group */ 473 spin_lock(&fs_info->trans_lock); 474 while (fs_info->running_transaction && 475 !list_empty(&fs_info->running_transaction->dev_update_list)) { 476 spin_unlock(&fs_info->trans_lock); 477 mutex_unlock(&fs_info->chunk_mutex); 478 trans = btrfs_attach_transaction(root); 479 if (IS_ERR(trans)) { 480 ret = PTR_ERR(trans); 481 mutex_lock(&fs_info->chunk_mutex); 482 if (ret == -ENOENT) { 483 spin_lock(&fs_info->trans_lock); 484 continue; 485 } else { 486 goto unlock; 487 } 488 } 489 490 ret = btrfs_commit_transaction(trans); 491 mutex_lock(&fs_info->chunk_mutex); 492 if (ret) 493 goto unlock; 494 495 spin_lock(&fs_info->trans_lock); 496 } 497 spin_unlock(&fs_info->trans_lock); 498 499 path = btrfs_alloc_path(); 500 if (!path) { 501 ret = -ENOMEM; 502 goto unlock; 503 } 504 505 path->reada = READA_FORWARD; 506 path->search_commit_root = 1; 507 path->skip_locking = 1; 508 509 key.objectid = src_dev->devid; 510 key.type = BTRFS_DEV_EXTENT_KEY; 511 key.offset = 0; 512 513 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { 514 struct extent_buffer *leaf = path->nodes[0]; 515 516 if (found_key.objectid != src_dev->devid) 517 break; 518 519 if (found_key.type != BTRFS_DEV_EXTENT_KEY) 520 break; 521 522 if (found_key.offset < key.offset) 523 break; 524 525 dev_extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 526 527 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dev_extent); 528 529 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 530 if (!cache) 531 continue; 532 533 set_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 534 btrfs_put_block_group(cache); 535 } 536 if (iter_ret < 0) 537 ret = iter_ret; 538 539 btrfs_free_path(path); 540 unlock: 541 mutex_unlock(&fs_info->chunk_mutex); 542 543 return ret; 544 } 545 546 bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev, 547 struct btrfs_block_group *cache, 548 u64 physical) 549 { 550 struct btrfs_fs_info *fs_info = cache->fs_info; 551 struct btrfs_chunk_map *map; 552 u64 chunk_offset = cache->start; 553 int num_extents, cur_extent; 554 int i; 555 556 /* Do not use "to_copy" on non zoned filesystem for now */ 557 if (!btrfs_is_zoned(fs_info)) 558 return true; 559 560 spin_lock(&cache->lock); 561 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { 562 spin_unlock(&cache->lock); 563 return true; 564 } 565 spin_unlock(&cache->lock); 566 567 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1); 568 ASSERT(!IS_ERR(map)); 569 570 num_extents = 0; 571 cur_extent = 0; 572 for (i = 0; i < map->num_stripes; i++) { 573 /* We have more device extent to copy */ 574 if (srcdev != map->stripes[i].dev) 575 continue; 576 577 num_extents++; 578 if (physical == map->stripes[i].physical) 579 cur_extent = i; 580 } 581 582 btrfs_free_chunk_map(map); 583 584 if (num_extents > 1 && cur_extent < num_extents - 1) { 585 /* 586 * Has more stripes on this device. Keep this block group 587 * readonly until we finish all the stripes. 588 */ 589 return false; 590 } 591 592 /* Last stripe on this device */ 593 clear_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); 594 595 return true; 596 } 597 598 static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, 599 const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, 600 int read_src) 601 { 602 struct btrfs_root *root = fs_info->dev_root; 603 struct btrfs_trans_handle *trans; 604 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 605 int ret; 606 struct btrfs_device *tgt_device = NULL; 607 struct btrfs_device *src_device = NULL; 608 609 src_device = btrfs_find_device_by_devspec(fs_info, srcdevid, 610 srcdev_name); 611 if (IS_ERR(src_device)) 612 return PTR_ERR(src_device); 613 614 if (btrfs_pinned_by_swapfile(fs_info, src_device)) { 615 btrfs_warn_in_rcu(fs_info, 616 "cannot replace device %s (devid %llu) due to active swapfile", 617 btrfs_dev_name(src_device), src_device->devid); 618 return -ETXTBSY; 619 } 620 621 /* 622 * Here we commit the transaction to make sure commit_total_bytes 623 * of all the devices are updated. 624 */ 625 trans = btrfs_attach_transaction(root); 626 if (!IS_ERR(trans)) { 627 ret = btrfs_commit_transaction(trans); 628 if (ret) 629 return ret; 630 } else if (PTR_ERR(trans) != -ENOENT) { 631 return PTR_ERR(trans); 632 } 633 634 ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name, 635 src_device, &tgt_device); 636 if (ret) 637 return ret; 638 639 ret = mark_block_group_to_copy(fs_info, src_device); 640 if (ret) 641 return ret; 642 643 down_write(&dev_replace->rwsem); 644 dev_replace->replace_task = current; 645 switch (dev_replace->replace_state) { 646 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 647 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 648 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 649 break; 650 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 651 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 652 ASSERT(0); 653 ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED; 654 up_write(&dev_replace->rwsem); 655 goto leave; 656 } 657 658 dev_replace->cont_reading_from_srcdev_mode = read_src; 659 dev_replace->srcdev = src_device; 660 dev_replace->tgtdev = tgt_device; 661 662 btrfs_info_in_rcu(fs_info, 663 "dev_replace from %s (devid %llu) to %s started", 664 btrfs_dev_name(src_device), 665 src_device->devid, 666 btrfs_dev_name(tgt_device)); 667 668 /* 669 * from now on, the writes to the srcdev are all duplicated to 670 * go to the tgtdev as well (refer to btrfs_map_block()). 671 */ 672 dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED; 673 dev_replace->time_started = ktime_get_real_seconds(); 674 dev_replace->cursor_left = 0; 675 dev_replace->committed_cursor_left = 0; 676 dev_replace->cursor_left_last_write_of_item = 0; 677 dev_replace->cursor_right = 0; 678 dev_replace->is_valid = 1; 679 dev_replace->item_needs_writeback = 1; 680 atomic64_set(&dev_replace->num_write_errors, 0); 681 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0); 682 up_write(&dev_replace->rwsem); 683 684 ret = btrfs_sysfs_add_device(tgt_device); 685 if (ret) 686 btrfs_err(fs_info, "kobj add dev failed %d", ret); 687 688 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); 689 690 /* 691 * Commit dev_replace state and reserve 1 item for it. 692 * This is crucial to ensure we won't miss copying extents for new block 693 * groups that are allocated after we started the device replace, and 694 * must be done after setting up the device replace state. 695 */ 696 trans = btrfs_start_transaction(root, 1); 697 if (IS_ERR(trans)) { 698 ret = PTR_ERR(trans); 699 down_write(&dev_replace->rwsem); 700 dev_replace->replace_state = 701 BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; 702 dev_replace->srcdev = NULL; 703 dev_replace->tgtdev = NULL; 704 up_write(&dev_replace->rwsem); 705 goto leave; 706 } 707 708 ret = btrfs_commit_transaction(trans); 709 WARN_ON(ret); 710 711 /* the disk copy procedure reuses the scrub code */ 712 ret = btrfs_scrub_dev(fs_info, src_device->devid, 0, 713 btrfs_device_get_total_bytes(src_device), 714 &dev_replace->scrub_progress, 0, 1); 715 716 ret = btrfs_dev_replace_finishing(fs_info, ret); 717 if (ret == -EINPROGRESS) 718 ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS; 719 720 return ret; 721 722 leave: 723 btrfs_destroy_dev_replace_tgtdev(tgt_device); 724 return ret; 725 } 726 727 static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args) 728 { 729 if (args->start.srcdevid == 0) { 730 if (memchr(args->start.srcdev_name, 0, 731 sizeof(args->start.srcdev_name)) == NULL) 732 return -ENAMETOOLONG; 733 } else { 734 args->start.srcdev_name[0] = 0; 735 } 736 737 if (memchr(args->start.tgtdev_name, 0, 738 sizeof(args->start.tgtdev_name)) == NULL) 739 return -ENAMETOOLONG; 740 741 return 0; 742 } 743 744 int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, 745 struct btrfs_ioctl_dev_replace_args *args) 746 { 747 int ret; 748 749 switch (args->start.cont_reading_from_srcdev_mode) { 750 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: 751 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: 752 break; 753 default: 754 return -EINVAL; 755 } 756 ret = btrfs_check_replace_dev_names(args); 757 if (ret < 0) 758 return ret; 759 760 ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name, 761 args->start.srcdevid, 762 args->start.srcdev_name, 763 args->start.cont_reading_from_srcdev_mode); 764 args->result = ret; 765 /* don't warn if EINPROGRESS, someone else might be running scrub */ 766 if (ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS || 767 ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR) 768 return 0; 769 770 return ret; 771 } 772 773 /* 774 * blocked until all in-flight bios operations are finished. 775 */ 776 static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) 777 { 778 set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state); 779 wait_event(fs_info->dev_replace.replace_wait, !percpu_counter_sum( 780 &fs_info->dev_replace.bio_counter)); 781 } 782 783 /* 784 * we have removed target device, it is safe to allow new bios request. 785 */ 786 static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info) 787 { 788 clear_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state); 789 wake_up(&fs_info->dev_replace.replace_wait); 790 } 791 792 /* 793 * When finishing the device replace, before swapping the source device with the 794 * target device we must update the chunk allocation state in the target device, 795 * as it is empty because replace works by directly copying the chunks and not 796 * through the normal chunk allocation path. 797 */ 798 static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev, 799 struct btrfs_device *tgtdev) 800 { 801 struct extent_state *cached_state = NULL; 802 u64 start = 0; 803 u64 found_start; 804 u64 found_end; 805 int ret = 0; 806 807 lockdep_assert_held(&srcdev->fs_info->chunk_mutex); 808 809 while (find_first_extent_bit(&srcdev->alloc_state, start, 810 &found_start, &found_end, 811 CHUNK_ALLOCATED, &cached_state)) { 812 ret = set_extent_bit(&tgtdev->alloc_state, found_start, 813 found_end, CHUNK_ALLOCATED, NULL); 814 if (ret) 815 break; 816 start = found_end + 1; 817 } 818 819 free_extent_state(cached_state); 820 return ret; 821 } 822 823 static void btrfs_dev_replace_update_device_in_mapping_tree( 824 struct btrfs_fs_info *fs_info, 825 struct btrfs_device *srcdev, 826 struct btrfs_device *tgtdev) 827 { 828 struct rb_node *node; 829 830 /* 831 * The chunk mutex must be held so that no new chunks can be created 832 * while we are updating existing chunks. This guarantees we don't miss 833 * any new chunk that gets created for a range that falls before the 834 * range of the last chunk we processed. 835 */ 836 lockdep_assert_held(&fs_info->chunk_mutex); 837 838 write_lock(&fs_info->mapping_tree_lock); 839 node = rb_first_cached(&fs_info->mapping_tree); 840 while (node) { 841 struct rb_node *next = rb_next(node); 842 struct btrfs_chunk_map *map; 843 u64 next_start; 844 845 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 846 next_start = map->start + map->chunk_len; 847 848 for (int i = 0; i < map->num_stripes; i++) 849 if (srcdev == map->stripes[i].dev) 850 map->stripes[i].dev = tgtdev; 851 852 if (cond_resched_rwlock_write(&fs_info->mapping_tree_lock)) { 853 map = btrfs_find_chunk_map_nolock(fs_info, next_start, U64_MAX); 854 if (!map) 855 break; 856 node = &map->rb_node; 857 /* 858 * Drop the lookup reference since we are holding the 859 * lock in write mode and no one can remove the chunk 860 * map from the tree and drop its tree reference. 861 */ 862 btrfs_free_chunk_map(map); 863 } else { 864 node = next; 865 } 866 } 867 write_unlock(&fs_info->mapping_tree_lock); 868 } 869 870 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, 871 int scrub_ret) 872 { 873 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 874 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 875 struct btrfs_device *tgt_device; 876 struct btrfs_device *src_device; 877 struct btrfs_root *root = fs_info->tree_root; 878 u8 uuid_tmp[BTRFS_UUID_SIZE]; 879 struct btrfs_trans_handle *trans; 880 int ret = 0; 881 882 /* don't allow cancel or unmount to disturb the finishing procedure */ 883 mutex_lock(&dev_replace->lock_finishing_cancel_unmount); 884 885 down_read(&dev_replace->rwsem); 886 /* was the operation canceled, or is it finished? */ 887 if (dev_replace->replace_state != 888 BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED) { 889 up_read(&dev_replace->rwsem); 890 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 891 return 0; 892 } 893 894 tgt_device = dev_replace->tgtdev; 895 src_device = dev_replace->srcdev; 896 up_read(&dev_replace->rwsem); 897 898 /* 899 * flush all outstanding I/O and inode extent mappings before the 900 * copy operation is declared as being finished 901 */ 902 ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); 903 if (ret) { 904 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 905 return ret; 906 } 907 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); 908 909 /* 910 * We have to use this loop approach because at this point src_device 911 * has to be available for transaction commit to complete, yet new 912 * chunks shouldn't be allocated on the device. 913 */ 914 while (1) { 915 trans = btrfs_start_transaction(root, 0); 916 if (IS_ERR(trans)) { 917 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 918 return PTR_ERR(trans); 919 } 920 ret = btrfs_commit_transaction(trans); 921 WARN_ON(ret); 922 923 /* Prevent write_all_supers() during the finishing procedure */ 924 mutex_lock(&fs_devices->device_list_mutex); 925 /* Prevent new chunks being allocated on the source device */ 926 mutex_lock(&fs_info->chunk_mutex); 927 928 if (!list_empty(&src_device->post_commit_list)) { 929 mutex_unlock(&fs_devices->device_list_mutex); 930 mutex_unlock(&fs_info->chunk_mutex); 931 } else { 932 break; 933 } 934 } 935 936 down_write(&dev_replace->rwsem); 937 dev_replace->replace_state = 938 scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED 939 : BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED; 940 dev_replace->tgtdev = NULL; 941 dev_replace->srcdev = NULL; 942 dev_replace->time_stopped = ktime_get_real_seconds(); 943 dev_replace->item_needs_writeback = 1; 944 945 /* 946 * Update allocation state in the new device and replace the old device 947 * with the new one in the mapping tree. 948 */ 949 if (!scrub_ret) { 950 scrub_ret = btrfs_set_target_alloc_state(src_device, tgt_device); 951 if (scrub_ret) 952 goto error; 953 btrfs_dev_replace_update_device_in_mapping_tree(fs_info, 954 src_device, 955 tgt_device); 956 } else { 957 if (scrub_ret != -ECANCELED) 958 btrfs_err_in_rcu(fs_info, 959 "btrfs_scrub_dev(%s, %llu, %s) failed %d", 960 btrfs_dev_name(src_device), 961 src_device->devid, 962 btrfs_dev_name(tgt_device), scrub_ret); 963 error: 964 up_write(&dev_replace->rwsem); 965 mutex_unlock(&fs_info->chunk_mutex); 966 mutex_unlock(&fs_devices->device_list_mutex); 967 btrfs_rm_dev_replace_blocked(fs_info); 968 if (tgt_device) 969 btrfs_destroy_dev_replace_tgtdev(tgt_device); 970 btrfs_rm_dev_replace_unblocked(fs_info); 971 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 972 973 return scrub_ret; 974 } 975 976 btrfs_info_in_rcu(fs_info, 977 "dev_replace from %s (devid %llu) to %s finished", 978 btrfs_dev_name(src_device), 979 src_device->devid, 980 btrfs_dev_name(tgt_device)); 981 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &tgt_device->dev_state); 982 tgt_device->devid = src_device->devid; 983 src_device->devid = BTRFS_DEV_REPLACE_DEVID; 984 memcpy(uuid_tmp, tgt_device->uuid, sizeof(uuid_tmp)); 985 memcpy(tgt_device->uuid, src_device->uuid, sizeof(tgt_device->uuid)); 986 memcpy(src_device->uuid, uuid_tmp, sizeof(src_device->uuid)); 987 btrfs_device_set_total_bytes(tgt_device, src_device->total_bytes); 988 btrfs_device_set_disk_total_bytes(tgt_device, 989 src_device->disk_total_bytes); 990 btrfs_device_set_bytes_used(tgt_device, src_device->bytes_used); 991 tgt_device->commit_bytes_used = src_device->bytes_used; 992 993 btrfs_assign_next_active_device(src_device, tgt_device); 994 995 list_add(&tgt_device->dev_alloc_list, &fs_devices->alloc_list); 996 fs_devices->rw_devices++; 997 998 dev_replace->replace_task = NULL; 999 up_write(&dev_replace->rwsem); 1000 btrfs_rm_dev_replace_blocked(fs_info); 1001 1002 btrfs_rm_dev_replace_remove_srcdev(src_device); 1003 1004 btrfs_rm_dev_replace_unblocked(fs_info); 1005 1006 /* 1007 * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will 1008 * update on-disk dev stats value during commit transaction 1009 */ 1010 atomic_inc(&tgt_device->dev_stats_ccnt); 1011 1012 /* 1013 * this is again a consistent state where no dev_replace procedure 1014 * is running, the target device is part of the filesystem, the 1015 * source device is not part of the filesystem anymore and its 1st 1016 * superblock is scratched out so that it is no longer marked to 1017 * belong to this filesystem. 1018 */ 1019 mutex_unlock(&fs_info->chunk_mutex); 1020 mutex_unlock(&fs_devices->device_list_mutex); 1021 1022 /* replace the sysfs entry */ 1023 btrfs_sysfs_remove_device(src_device); 1024 btrfs_sysfs_update_devid(tgt_device); 1025 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &src_device->dev_state)) 1026 btrfs_scratch_superblocks(fs_info, src_device); 1027 1028 /* write back the superblocks */ 1029 trans = btrfs_start_transaction(root, 0); 1030 if (!IS_ERR(trans)) 1031 btrfs_commit_transaction(trans); 1032 1033 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1034 1035 btrfs_rm_dev_replace_free_srcdev(src_device); 1036 1037 return 0; 1038 } 1039 1040 /* 1041 * Read progress of device replace status according to the state and last 1042 * stored position. The value format is the same as for 1043 * btrfs_dev_replace::progress_1000 1044 */ 1045 static u64 btrfs_dev_replace_progress(struct btrfs_fs_info *fs_info) 1046 { 1047 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1048 u64 ret = 0; 1049 1050 switch (dev_replace->replace_state) { 1051 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1052 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1053 ret = 0; 1054 break; 1055 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1056 ret = 1000; 1057 break; 1058 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1059 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1060 ret = div64_u64(dev_replace->cursor_left, 1061 div_u64(btrfs_device_get_total_bytes( 1062 dev_replace->srcdev), 1000)); 1063 break; 1064 } 1065 1066 return ret; 1067 } 1068 1069 void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, 1070 struct btrfs_ioctl_dev_replace_args *args) 1071 { 1072 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1073 1074 down_read(&dev_replace->rwsem); 1075 /* even if !dev_replace_is_valid, the values are good enough for 1076 * the replace_status ioctl */ 1077 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 1078 args->status.replace_state = dev_replace->replace_state; 1079 args->status.time_started = dev_replace->time_started; 1080 args->status.time_stopped = dev_replace->time_stopped; 1081 args->status.num_write_errors = 1082 atomic64_read(&dev_replace->num_write_errors); 1083 args->status.num_uncorrectable_read_errors = 1084 atomic64_read(&dev_replace->num_uncorrectable_read_errors); 1085 args->status.progress_1000 = btrfs_dev_replace_progress(fs_info); 1086 up_read(&dev_replace->rwsem); 1087 } 1088 1089 int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) 1090 { 1091 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1092 struct btrfs_device *tgt_device = NULL; 1093 struct btrfs_device *src_device = NULL; 1094 struct btrfs_trans_handle *trans; 1095 struct btrfs_root *root = fs_info->tree_root; 1096 int result; 1097 int ret; 1098 1099 if (sb_rdonly(fs_info->sb)) 1100 return -EROFS; 1101 1102 mutex_lock(&dev_replace->lock_finishing_cancel_unmount); 1103 down_write(&dev_replace->rwsem); 1104 switch (dev_replace->replace_state) { 1105 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1106 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1107 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1108 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; 1109 up_write(&dev_replace->rwsem); 1110 break; 1111 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1112 tgt_device = dev_replace->tgtdev; 1113 src_device = dev_replace->srcdev; 1114 up_write(&dev_replace->rwsem); 1115 ret = btrfs_scrub_cancel(fs_info); 1116 if (ret < 0) { 1117 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; 1118 } else { 1119 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 1120 /* 1121 * btrfs_dev_replace_finishing() will handle the 1122 * cleanup part 1123 */ 1124 btrfs_info_in_rcu(fs_info, 1125 "dev_replace from %s (devid %llu) to %s canceled", 1126 btrfs_dev_name(src_device), src_device->devid, 1127 btrfs_dev_name(tgt_device)); 1128 } 1129 break; 1130 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1131 /* 1132 * Scrub doing the replace isn't running so we need to do the 1133 * cleanup step of btrfs_dev_replace_finishing() here 1134 */ 1135 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 1136 tgt_device = dev_replace->tgtdev; 1137 src_device = dev_replace->srcdev; 1138 dev_replace->tgtdev = NULL; 1139 dev_replace->srcdev = NULL; 1140 dev_replace->replace_state = 1141 BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED; 1142 dev_replace->time_stopped = ktime_get_real_seconds(); 1143 dev_replace->item_needs_writeback = 1; 1144 1145 up_write(&dev_replace->rwsem); 1146 1147 /* Scrub for replace must not be running in suspended state */ 1148 btrfs_scrub_cancel(fs_info); 1149 1150 trans = btrfs_start_transaction(root, 0); 1151 if (IS_ERR(trans)) { 1152 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1153 return PTR_ERR(trans); 1154 } 1155 ret = btrfs_commit_transaction(trans); 1156 WARN_ON(ret); 1157 1158 btrfs_info_in_rcu(fs_info, 1159 "suspended dev_replace from %s (devid %llu) to %s canceled", 1160 btrfs_dev_name(src_device), src_device->devid, 1161 btrfs_dev_name(tgt_device)); 1162 1163 if (tgt_device) 1164 btrfs_destroy_dev_replace_tgtdev(tgt_device); 1165 break; 1166 default: 1167 up_write(&dev_replace->rwsem); 1168 result = -EINVAL; 1169 } 1170 1171 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1172 return result; 1173 } 1174 1175 void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info) 1176 { 1177 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1178 1179 mutex_lock(&dev_replace->lock_finishing_cancel_unmount); 1180 down_write(&dev_replace->rwsem); 1181 1182 switch (dev_replace->replace_state) { 1183 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1184 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1185 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1186 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1187 break; 1188 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1189 dev_replace->replace_state = 1190 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; 1191 dev_replace->time_stopped = ktime_get_real_seconds(); 1192 dev_replace->item_needs_writeback = 1; 1193 btrfs_info(fs_info, "suspending dev_replace for unmount"); 1194 break; 1195 } 1196 1197 up_write(&dev_replace->rwsem); 1198 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 1199 } 1200 1201 /* resume dev_replace procedure that was interrupted by unmount */ 1202 int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info) 1203 { 1204 struct task_struct *task; 1205 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1206 1207 down_write(&dev_replace->rwsem); 1208 1209 switch (dev_replace->replace_state) { 1210 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1211 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1212 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1213 up_write(&dev_replace->rwsem); 1214 return 0; 1215 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1216 break; 1217 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1218 dev_replace->replace_state = 1219 BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED; 1220 break; 1221 } 1222 if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) { 1223 btrfs_info(fs_info, 1224 "cannot continue dev_replace, tgtdev is missing"); 1225 btrfs_info(fs_info, 1226 "you may cancel the operation after 'mount -o degraded'"); 1227 dev_replace->replace_state = 1228 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; 1229 up_write(&dev_replace->rwsem); 1230 return 0; 1231 } 1232 up_write(&dev_replace->rwsem); 1233 1234 /* 1235 * This could collide with a paused balance, but the exclusive op logic 1236 * should never allow both to start and pause. We don't want to allow 1237 * dev-replace to start anyway. 1238 */ 1239 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) { 1240 down_write(&dev_replace->rwsem); 1241 dev_replace->replace_state = 1242 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; 1243 up_write(&dev_replace->rwsem); 1244 btrfs_info(fs_info, 1245 "cannot resume dev-replace, other exclusive operation running"); 1246 return 0; 1247 } 1248 1249 task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl"); 1250 return PTR_ERR_OR_ZERO(task); 1251 } 1252 1253 static int btrfs_dev_replace_kthread(void *data) 1254 { 1255 struct btrfs_fs_info *fs_info = data; 1256 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1257 u64 progress; 1258 int ret; 1259 1260 progress = btrfs_dev_replace_progress(fs_info); 1261 progress = div_u64(progress, 10); 1262 btrfs_info_in_rcu(fs_info, 1263 "continuing dev_replace from %s (devid %llu) to target %s @%u%%", 1264 btrfs_dev_name(dev_replace->srcdev), 1265 dev_replace->srcdev->devid, 1266 btrfs_dev_name(dev_replace->tgtdev), 1267 (unsigned int)progress); 1268 1269 ret = btrfs_scrub_dev(fs_info, dev_replace->srcdev->devid, 1270 dev_replace->committed_cursor_left, 1271 btrfs_device_get_total_bytes(dev_replace->srcdev), 1272 &dev_replace->scrub_progress, 0, 1); 1273 ret = btrfs_dev_replace_finishing(fs_info, ret); 1274 WARN_ON(ret && ret != -ECANCELED); 1275 1276 btrfs_exclop_finish(fs_info); 1277 return 0; 1278 } 1279 1280 int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) 1281 { 1282 if (!dev_replace->is_valid) 1283 return 0; 1284 1285 switch (dev_replace->replace_state) { 1286 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: 1287 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: 1288 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: 1289 return 0; 1290 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: 1291 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: 1292 /* 1293 * return true even if tgtdev is missing (this is 1294 * something that can happen if the dev_replace 1295 * procedure is suspended by an umount and then 1296 * the tgtdev is missing (or "btrfs dev scan") was 1297 * not called and the filesystem is remounted 1298 * in degraded state. This does not stop the 1299 * dev_replace procedure. It needs to be canceled 1300 * manually if the cancellation is wanted. 1301 */ 1302 break; 1303 } 1304 return 1; 1305 } 1306 1307 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) 1308 { 1309 percpu_counter_sub(&fs_info->dev_replace.bio_counter, amount); 1310 cond_wake_up_nomb(&fs_info->dev_replace.replace_wait); 1311 } 1312 1313 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info) 1314 { 1315 while (1) { 1316 percpu_counter_inc(&fs_info->dev_replace.bio_counter); 1317 if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING, 1318 &fs_info->fs_state))) 1319 break; 1320 1321 btrfs_bio_counter_dec(fs_info); 1322 wait_event(fs_info->dev_replace.replace_wait, 1323 !test_bit(BTRFS_FS_STATE_DEV_REPLACING, 1324 &fs_info->fs_state)); 1325 } 1326 } 1327