1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) STRATO AG 2012. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/bio.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/kthread.h>
11 #include <linux/math64.h>
12 #include "misc.h"
13 #include "ctree.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "volumes.h"
17 #include "async-thread.h"
18 #include "dev-replace.h"
19 #include "sysfs.h"
20 #include "zoned.h"
21 #include "block-group.h"
22 #include "fs.h"
23 #include "accessors.h"
24 #include "scrub.h"
25
26 /*
27 * Device replace overview
28 *
29 * [Objective]
30 * To copy all extents (both new and on-disk) from source device to target
31 * device, while still keeping the filesystem read-write.
32 *
33 * [Method]
34 * There are two main methods involved:
35 *
36 * - Write duplication
37 *
38 * All new writes will be written to both target and source devices, so even
39 * if replace gets canceled, sources device still contains up-to-date data.
40 *
41 * Location: handle_ops_on_dev_replace() from btrfs_map_block()
42 * Start: btrfs_dev_replace_start()
43 * End: btrfs_dev_replace_finishing()
44 * Content: Latest data/metadata
45 *
46 * - Copy existing extents
47 *
48 * This happens by reusing scrub facility, as scrub also iterates through
49 * existing extents from commit root.
50 *
51 * Location: scrub_write_block_to_dev_replace() from
52 * scrub_block_complete()
53 * Content: Data/meta from commit root.
54 *
55 * Due to the content difference, we need to avoid nocow write when dev-replace
56 * is happening. This is done by marking the block group read-only and waiting
57 * for NOCOW writes.
58 *
59 * After replace is done, the finishing part is done by swapping the target and
60 * source devices.
61 *
62 * Location: btrfs_dev_replace_update_device_in_mapping_tree() from
63 * btrfs_dev_replace_finishing()
64 */
65
66 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
67 int scrub_ret);
68 static int btrfs_dev_replace_kthread(void *data);
69
btrfs_init_dev_replace(struct btrfs_fs_info * fs_info)70 int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
71 {
72 struct btrfs_dev_lookup_args args = { .devid = BTRFS_DEV_REPLACE_DEVID };
73 struct btrfs_key key;
74 struct btrfs_root *dev_root = fs_info->dev_root;
75 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
76 struct extent_buffer *eb;
77 int slot;
78 int ret = 0;
79 BTRFS_PATH_AUTO_FREE(path);
80 int item_size;
81 struct btrfs_dev_replace_item *ptr;
82 u64 src_devid;
83
84 if (!dev_root)
85 return 0;
86
87 path = btrfs_alloc_path();
88 if (!path)
89 return -ENOMEM;
90
91 key.objectid = 0;
92 key.type = BTRFS_DEV_REPLACE_KEY;
93 key.offset = 0;
94 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
95 if (ret) {
96 no_valid_dev_replace_entry_found:
97 /*
98 * We don't have a replace item or it's corrupted. If there is
99 * a replace target, fail the mount.
100 */
101 if (btrfs_find_device(fs_info->fs_devices, &args)) {
102 btrfs_err(fs_info,
103 "found replace target device without a valid replace item");
104 return -EUCLEAN;
105 }
106 dev_replace->replace_state =
107 BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
108 dev_replace->cont_reading_from_srcdev_mode =
109 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS;
110 dev_replace->time_started = 0;
111 dev_replace->time_stopped = 0;
112 atomic64_set(&dev_replace->num_write_errors, 0);
113 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
114 dev_replace->cursor_left = 0;
115 dev_replace->committed_cursor_left = 0;
116 dev_replace->cursor_left_last_write_of_item = 0;
117 dev_replace->cursor_right = 0;
118 dev_replace->srcdev = NULL;
119 dev_replace->tgtdev = NULL;
120 dev_replace->is_valid = 0;
121 dev_replace->item_needs_writeback = 0;
122 return 0;
123 }
124 slot = path->slots[0];
125 eb = path->nodes[0];
126 item_size = btrfs_item_size(eb, slot);
127 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item);
128
129 if (item_size != sizeof(struct btrfs_dev_replace_item)) {
130 btrfs_warn(fs_info,
131 "dev_replace entry found has unexpected size, ignore entry");
132 goto no_valid_dev_replace_entry_found;
133 }
134
135 src_devid = btrfs_dev_replace_src_devid(eb, ptr);
136 dev_replace->cont_reading_from_srcdev_mode =
137 btrfs_dev_replace_cont_reading_from_srcdev_mode(eb, ptr);
138 dev_replace->replace_state = btrfs_dev_replace_replace_state(eb, ptr);
139 dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr);
140 dev_replace->time_stopped =
141 btrfs_dev_replace_time_stopped(eb, ptr);
142 atomic64_set(&dev_replace->num_write_errors,
143 btrfs_dev_replace_num_write_errors(eb, ptr));
144 atomic64_set(&dev_replace->num_uncorrectable_read_errors,
145 btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr));
146 dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr);
147 dev_replace->committed_cursor_left = dev_replace->cursor_left;
148 dev_replace->cursor_left_last_write_of_item = dev_replace->cursor_left;
149 dev_replace->cursor_right = btrfs_dev_replace_cursor_right(eb, ptr);
150 dev_replace->is_valid = 1;
151
152 dev_replace->item_needs_writeback = 0;
153 switch (dev_replace->replace_state) {
154 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
155 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
156 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
157 /*
158 * We don't have an active replace item but if there is a
159 * replace target, fail the mount.
160 */
161 if (btrfs_find_device(fs_info->fs_devices, &args)) {
162 btrfs_err(fs_info,
163 "replace without active item, run 'device scan --forget' on the target device");
164 ret = -EUCLEAN;
165 } else {
166 dev_replace->srcdev = NULL;
167 dev_replace->tgtdev = NULL;
168 }
169 break;
170 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
171 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
172 dev_replace->tgtdev = btrfs_find_device(fs_info->fs_devices, &args);
173 args.devid = src_devid;
174 dev_replace->srcdev = btrfs_find_device(fs_info->fs_devices, &args);
175
176 /*
177 * allow 'btrfs dev replace_cancel' if src/tgt device is
178 * missing
179 */
180 if (!dev_replace->srcdev &&
181 !btrfs_test_opt(fs_info, DEGRADED)) {
182 ret = -EIO;
183 btrfs_warn(fs_info,
184 "cannot mount because device replace operation is ongoing and");
185 btrfs_warn(fs_info,
186 "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?",
187 src_devid);
188 }
189 if (!dev_replace->tgtdev &&
190 !btrfs_test_opt(fs_info, DEGRADED)) {
191 ret = -EIO;
192 btrfs_warn(fs_info,
193 "cannot mount because device replace operation is ongoing and");
194 btrfs_warn(fs_info,
195 "tgtdev (devid %llu) is missing, need to run 'btrfs dev scan'?",
196 BTRFS_DEV_REPLACE_DEVID);
197 }
198 if (dev_replace->tgtdev) {
199 if (dev_replace->srcdev) {
200 dev_replace->tgtdev->total_bytes =
201 dev_replace->srcdev->total_bytes;
202 dev_replace->tgtdev->disk_total_bytes =
203 dev_replace->srcdev->disk_total_bytes;
204 dev_replace->tgtdev->commit_total_bytes =
205 dev_replace->srcdev->commit_total_bytes;
206 dev_replace->tgtdev->bytes_used =
207 dev_replace->srcdev->bytes_used;
208 dev_replace->tgtdev->commit_bytes_used =
209 dev_replace->srcdev->commit_bytes_used;
210 }
211 set_bit(BTRFS_DEV_STATE_REPLACE_TGT,
212 &dev_replace->tgtdev->dev_state);
213
214 WARN_ON(fs_info->fs_devices->rw_devices == 0);
215 dev_replace->tgtdev->io_width = fs_info->sectorsize;
216 dev_replace->tgtdev->io_align = fs_info->sectorsize;
217 dev_replace->tgtdev->sector_size = fs_info->sectorsize;
218 dev_replace->tgtdev->fs_info = fs_info;
219 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
220 &dev_replace->tgtdev->dev_state);
221 }
222 break;
223 }
224
225 return ret;
226 }
227
228 /*
229 * Initialize a new device for device replace target from a given source dev
230 * and path.
231 *
232 * Return 0 and new device in @device_out, otherwise return < 0
233 */
btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info * fs_info,const char * device_path,struct btrfs_device * srcdev,struct btrfs_device ** device_out)234 static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
235 const char *device_path,
236 struct btrfs_device *srcdev,
237 struct btrfs_device **device_out)
238 {
239 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
240 struct btrfs_device *device;
241 struct file *bdev_file;
242 struct block_device *bdev;
243 u64 devid = BTRFS_DEV_REPLACE_DEVID;
244 int ret = 0;
245
246 *device_out = NULL;
247 if (srcdev->fs_devices->seeding) {
248 btrfs_err(fs_info, "the filesystem is a seed filesystem!");
249 return -EINVAL;
250 }
251
252 bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE,
253 fs_info->bdev_holder, NULL);
254 if (IS_ERR(bdev_file)) {
255 btrfs_err(fs_info, "target device %s is invalid!", device_path);
256 return PTR_ERR(bdev_file);
257 }
258 bdev = file_bdev(bdev_file);
259
260 if (!btrfs_check_device_zone_type(fs_info, bdev)) {
261 btrfs_err(fs_info,
262 "dev-replace: zoned type of target device mismatch with filesystem");
263 ret = -EINVAL;
264 goto error;
265 }
266
267 sync_blockdev(bdev);
268
269 list_for_each_entry(device, &fs_devices->devices, dev_list) {
270 if (device->bdev == bdev) {
271 btrfs_err(fs_info,
272 "target device is in the filesystem!");
273 ret = -EEXIST;
274 goto error;
275 }
276 }
277
278
279 if (bdev_nr_bytes(bdev) < btrfs_device_get_total_bytes(srcdev)) {
280 btrfs_err(fs_info,
281 "target device is smaller than source device!");
282 ret = -EINVAL;
283 goto error;
284 }
285
286
287 device = btrfs_alloc_device(NULL, &devid, NULL, device_path);
288 if (IS_ERR(device)) {
289 ret = PTR_ERR(device);
290 goto error;
291 }
292
293 ret = lookup_bdev(device_path, &device->devt);
294 if (ret)
295 goto error;
296
297 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
298 device->generation = 0;
299 device->io_width = fs_info->sectorsize;
300 device->io_align = fs_info->sectorsize;
301 device->sector_size = fs_info->sectorsize;
302 device->total_bytes = btrfs_device_get_total_bytes(srcdev);
303 device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
304 device->bytes_used = btrfs_device_get_bytes_used(srcdev);
305 device->commit_total_bytes = srcdev->commit_total_bytes;
306 device->commit_bytes_used = device->bytes_used;
307 device->fs_info = fs_info;
308 device->bdev = bdev;
309 device->bdev_file = bdev_file;
310 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
311 set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
312 device->dev_stats_valid = 1;
313 set_blocksize(bdev_file, BTRFS_BDEV_BLOCKSIZE);
314 device->fs_devices = fs_devices;
315
316 ret = btrfs_get_dev_zone_info(device, false);
317 if (ret)
318 goto error;
319
320 mutex_lock(&fs_devices->device_list_mutex);
321 list_add(&device->dev_list, &fs_devices->devices);
322 fs_devices->num_devices++;
323 fs_devices->open_devices++;
324 mutex_unlock(&fs_devices->device_list_mutex);
325
326 *device_out = device;
327 return 0;
328
329 error:
330 fput(bdev_file);
331 return ret;
332 }
333
334 /*
335 * called from commit_transaction. Writes changed device replace state to
336 * disk.
337 */
btrfs_run_dev_replace(struct btrfs_trans_handle * trans)338 int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
339 {
340 struct btrfs_fs_info *fs_info = trans->fs_info;
341 int ret;
342 struct btrfs_root *dev_root = fs_info->dev_root;
343 BTRFS_PATH_AUTO_FREE(path);
344 struct btrfs_key key;
345 struct extent_buffer *eb;
346 struct btrfs_dev_replace_item *ptr;
347 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
348
349 down_read(&dev_replace->rwsem);
350 if (!dev_replace->is_valid ||
351 !dev_replace->item_needs_writeback) {
352 up_read(&dev_replace->rwsem);
353 return 0;
354 }
355 up_read(&dev_replace->rwsem);
356
357 key.objectid = 0;
358 key.type = BTRFS_DEV_REPLACE_KEY;
359 key.offset = 0;
360
361 path = btrfs_alloc_path();
362 if (!path)
363 return -ENOMEM;
364
365 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
366 if (ret < 0) {
367 btrfs_warn(fs_info,
368 "error %d while searching for dev_replace item!",
369 ret);
370 return ret;
371 }
372
373 if (ret == 0 &&
374 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
375 /*
376 * need to delete old one and insert a new one.
377 * Since no attempt is made to recover any old state, if the
378 * dev_replace state is 'running', the data on the target
379 * drive is lost.
380 * It would be possible to recover the state: just make sure
381 * that the beginning of the item is never changed and always
382 * contains all the essential information. Then read this
383 * minimal set of information and use it as a base for the
384 * new state.
385 */
386 ret = btrfs_del_item(trans, dev_root, path);
387 if (ret != 0) {
388 btrfs_warn(fs_info,
389 "delete too small dev_replace item failed %d!",
390 ret);
391 return ret;
392 }
393 ret = 1;
394 }
395
396 if (ret == 1) {
397 /* need to insert a new item */
398 btrfs_release_path(path);
399 ret = btrfs_insert_empty_item(trans, dev_root, path,
400 &key, sizeof(*ptr));
401 if (ret < 0) {
402 btrfs_warn(fs_info,
403 "insert dev_replace item failed %d!", ret);
404 return ret;
405 }
406 }
407
408 eb = path->nodes[0];
409 ptr = btrfs_item_ptr(eb, path->slots[0],
410 struct btrfs_dev_replace_item);
411
412 down_write(&dev_replace->rwsem);
413 if (dev_replace->srcdev)
414 btrfs_set_dev_replace_src_devid(eb, ptr,
415 dev_replace->srcdev->devid);
416 else
417 btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1);
418 btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr,
419 dev_replace->cont_reading_from_srcdev_mode);
420 btrfs_set_dev_replace_replace_state(eb, ptr,
421 dev_replace->replace_state);
422 btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started);
423 btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped);
424 btrfs_set_dev_replace_num_write_errors(eb, ptr,
425 atomic64_read(&dev_replace->num_write_errors));
426 btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr,
427 atomic64_read(&dev_replace->num_uncorrectable_read_errors));
428 dev_replace->cursor_left_last_write_of_item =
429 dev_replace->cursor_left;
430 btrfs_set_dev_replace_cursor_left(eb, ptr,
431 dev_replace->cursor_left_last_write_of_item);
432 btrfs_set_dev_replace_cursor_right(eb, ptr,
433 dev_replace->cursor_right);
434 dev_replace->item_needs_writeback = 0;
435 up_write(&dev_replace->rwsem);
436
437 return ret;
438 }
439
mark_block_group_to_copy(struct btrfs_fs_info * fs_info,struct btrfs_device * src_dev)440 static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
441 struct btrfs_device *src_dev)
442 {
443 struct btrfs_path *path;
444 struct btrfs_key key;
445 struct btrfs_key found_key;
446 struct btrfs_root *root = fs_info->dev_root;
447 struct btrfs_dev_extent *dev_extent = NULL;
448 struct btrfs_block_group *cache;
449 struct btrfs_trans_handle *trans;
450 int iter_ret = 0;
451 int ret = 0;
452 u64 chunk_offset;
453
454 /* Do not use "to_copy" on non zoned filesystem for now */
455 if (!btrfs_is_zoned(fs_info))
456 return 0;
457
458 mutex_lock(&fs_info->chunk_mutex);
459
460 /* Ensure we don't have pending new block group */
461 spin_lock(&fs_info->trans_lock);
462 while (fs_info->running_transaction &&
463 !list_empty(&fs_info->running_transaction->dev_update_list)) {
464 spin_unlock(&fs_info->trans_lock);
465 mutex_unlock(&fs_info->chunk_mutex);
466 trans = btrfs_attach_transaction(root);
467 if (IS_ERR(trans)) {
468 ret = PTR_ERR(trans);
469 mutex_lock(&fs_info->chunk_mutex);
470 if (ret == -ENOENT) {
471 spin_lock(&fs_info->trans_lock);
472 continue;
473 } else {
474 goto unlock;
475 }
476 }
477
478 ret = btrfs_commit_transaction(trans);
479 mutex_lock(&fs_info->chunk_mutex);
480 if (ret)
481 goto unlock;
482
483 spin_lock(&fs_info->trans_lock);
484 }
485 spin_unlock(&fs_info->trans_lock);
486
487 path = btrfs_alloc_path();
488 if (!path) {
489 ret = -ENOMEM;
490 goto unlock;
491 }
492
493 path->reada = READA_FORWARD;
494 path->search_commit_root = 1;
495 path->skip_locking = 1;
496
497 key.objectid = src_dev->devid;
498 key.type = BTRFS_DEV_EXTENT_KEY;
499 key.offset = 0;
500
501 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
502 struct extent_buffer *leaf = path->nodes[0];
503
504 if (found_key.objectid != src_dev->devid)
505 break;
506
507 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
508 break;
509
510 if (found_key.offset < key.offset)
511 break;
512
513 dev_extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
514
515 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dev_extent);
516
517 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
518 if (!cache)
519 continue;
520
521 set_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
522 btrfs_put_block_group(cache);
523 }
524 if (iter_ret < 0)
525 ret = iter_ret;
526
527 btrfs_free_path(path);
528 unlock:
529 mutex_unlock(&fs_info->chunk_mutex);
530
531 return ret;
532 }
533
btrfs_finish_block_group_to_copy(struct btrfs_device * srcdev,struct btrfs_block_group * cache,u64 physical)534 bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
535 struct btrfs_block_group *cache,
536 u64 physical)
537 {
538 struct btrfs_fs_info *fs_info = cache->fs_info;
539 struct btrfs_chunk_map *map;
540 u64 chunk_offset = cache->start;
541 int num_extents, cur_extent;
542 int i;
543
544 /* Do not use "to_copy" on non zoned filesystem for now */
545 if (!btrfs_is_zoned(fs_info))
546 return true;
547
548 spin_lock(&cache->lock);
549 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
550 spin_unlock(&cache->lock);
551 return true;
552 }
553 spin_unlock(&cache->lock);
554
555 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
556 ASSERT(!IS_ERR(map));
557
558 num_extents = 0;
559 cur_extent = 0;
560 for (i = 0; i < map->num_stripes; i++) {
561 /* We have more device extent to copy */
562 if (srcdev != map->stripes[i].dev)
563 continue;
564
565 num_extents++;
566 if (physical == map->stripes[i].physical)
567 cur_extent = i;
568 }
569
570 btrfs_free_chunk_map(map);
571
572 if (num_extents > 1 && cur_extent < num_extents - 1) {
573 /*
574 * Has more stripes on this device. Keep this block group
575 * readonly until we finish all the stripes.
576 */
577 return false;
578 }
579
580 /* Last stripe on this device */
581 clear_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
582
583 return true;
584 }
585
btrfs_dev_replace_start(struct btrfs_fs_info * fs_info,const char * tgtdev_name,u64 srcdevid,const char * srcdev_name,int read_src)586 static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
587 const char *tgtdev_name, u64 srcdevid, const char *srcdev_name,
588 int read_src)
589 {
590 struct btrfs_root *root = fs_info->dev_root;
591 struct btrfs_trans_handle *trans;
592 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
593 int ret;
594 struct btrfs_device *tgt_device = NULL;
595 struct btrfs_device *src_device = NULL;
596
597 src_device = btrfs_find_device_by_devspec(fs_info, srcdevid,
598 srcdev_name);
599 if (IS_ERR(src_device))
600 return PTR_ERR(src_device);
601
602 if (btrfs_pinned_by_swapfile(fs_info, src_device)) {
603 btrfs_warn_in_rcu(fs_info,
604 "cannot replace device %s (devid %llu) due to active swapfile",
605 btrfs_dev_name(src_device), src_device->devid);
606 return -ETXTBSY;
607 }
608
609 /*
610 * Here we commit the transaction to make sure commit_total_bytes
611 * of all the devices are updated.
612 */
613 trans = btrfs_attach_transaction(root);
614 if (!IS_ERR(trans)) {
615 ret = btrfs_commit_transaction(trans);
616 if (ret)
617 return ret;
618 } else if (PTR_ERR(trans) != -ENOENT) {
619 return PTR_ERR(trans);
620 }
621
622 ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name,
623 src_device, &tgt_device);
624 if (ret)
625 return ret;
626
627 ret = mark_block_group_to_copy(fs_info, src_device);
628 if (ret)
629 return ret;
630
631 down_write(&dev_replace->rwsem);
632 dev_replace->replace_task = current;
633 switch (dev_replace->replace_state) {
634 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
635 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
636 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
637 break;
638 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
639 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
640 ASSERT(0);
641 ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
642 up_write(&dev_replace->rwsem);
643 goto leave;
644 }
645
646 dev_replace->cont_reading_from_srcdev_mode = read_src;
647 dev_replace->srcdev = src_device;
648 dev_replace->tgtdev = tgt_device;
649
650 btrfs_info_in_rcu(fs_info,
651 "dev_replace from %s (devid %llu) to %s started",
652 btrfs_dev_name(src_device),
653 src_device->devid,
654 btrfs_dev_name(tgt_device));
655
656 /*
657 * from now on, the writes to the srcdev are all duplicated to
658 * go to the tgtdev as well (refer to btrfs_map_block()).
659 */
660 dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
661 dev_replace->time_started = ktime_get_real_seconds();
662 dev_replace->cursor_left = 0;
663 dev_replace->committed_cursor_left = 0;
664 dev_replace->cursor_left_last_write_of_item = 0;
665 dev_replace->cursor_right = 0;
666 dev_replace->is_valid = 1;
667 dev_replace->item_needs_writeback = 1;
668 atomic64_set(&dev_replace->num_write_errors, 0);
669 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
670 up_write(&dev_replace->rwsem);
671
672 ret = btrfs_sysfs_add_device(tgt_device);
673 if (ret)
674 btrfs_err(fs_info, "kobj add dev failed %d", ret);
675
676 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
677
678 /*
679 * Commit dev_replace state and reserve 1 item for it.
680 * This is crucial to ensure we won't miss copying extents for new block
681 * groups that are allocated after we started the device replace, and
682 * must be done after setting up the device replace state.
683 */
684 trans = btrfs_start_transaction(root, 1);
685 if (IS_ERR(trans)) {
686 ret = PTR_ERR(trans);
687 down_write(&dev_replace->rwsem);
688 dev_replace->replace_state =
689 BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
690 dev_replace->srcdev = NULL;
691 dev_replace->tgtdev = NULL;
692 up_write(&dev_replace->rwsem);
693 goto leave;
694 }
695
696 ret = btrfs_commit_transaction(trans);
697 WARN_ON(ret);
698
699 /* the disk copy procedure reuses the scrub code */
700 ret = btrfs_scrub_dev(fs_info, src_device->devid, 0,
701 btrfs_device_get_total_bytes(src_device),
702 &dev_replace->scrub_progress, 0, 1);
703
704 ret = btrfs_dev_replace_finishing(fs_info, ret);
705 if (ret == -EINPROGRESS)
706 ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS;
707
708 return ret;
709
710 leave:
711 btrfs_destroy_dev_replace_tgtdev(tgt_device);
712 return ret;
713 }
714
btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args * args)715 static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
716 {
717 if (args->start.srcdevid == 0) {
718 if (memchr(args->start.srcdev_name, 0,
719 sizeof(args->start.srcdev_name)) == NULL)
720 return -ENAMETOOLONG;
721 } else {
722 args->start.srcdev_name[0] = 0;
723 }
724
725 if (memchr(args->start.tgtdev_name, 0,
726 sizeof(args->start.tgtdev_name)) == NULL)
727 return -ENAMETOOLONG;
728
729 return 0;
730 }
731
btrfs_dev_replace_by_ioctl(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_dev_replace_args * args)732 int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
733 struct btrfs_ioctl_dev_replace_args *args)
734 {
735 int ret;
736
737 switch (args->start.cont_reading_from_srcdev_mode) {
738 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS:
739 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID:
740 break;
741 default:
742 return -EINVAL;
743 }
744 ret = btrfs_check_replace_dev_names(args);
745 if (ret < 0)
746 return ret;
747
748 ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
749 args->start.srcdevid,
750 args->start.srcdev_name,
751 args->start.cont_reading_from_srcdev_mode);
752 args->result = ret;
753 /* don't warn if EINPROGRESS, someone else might be running scrub */
754 if (ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS ||
755 ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR)
756 return 0;
757
758 return ret;
759 }
760
761 /*
762 * blocked until all in-flight bios operations are finished.
763 */
btrfs_rm_dev_replace_blocked(struct btrfs_fs_info * fs_info)764 static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
765 {
766 set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
767 wait_event(fs_info->dev_replace.replace_wait, !percpu_counter_sum(
768 &fs_info->dev_replace.bio_counter));
769 }
770
771 /*
772 * we have removed target device, it is safe to allow new bios request.
773 */
btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info * fs_info)774 static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info)
775 {
776 clear_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
777 wake_up(&fs_info->dev_replace.replace_wait);
778 }
779
780 /*
781 * When finishing the device replace, before swapping the source device with the
782 * target device we must update the chunk allocation state in the target device,
783 * as it is empty because replace works by directly copying the chunks and not
784 * through the normal chunk allocation path.
785 */
btrfs_set_target_alloc_state(struct btrfs_device * srcdev,struct btrfs_device * tgtdev)786 static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev,
787 struct btrfs_device *tgtdev)
788 {
789 struct extent_state *cached_state = NULL;
790 u64 start = 0;
791 u64 found_start;
792 u64 found_end;
793 int ret = 0;
794
795 lockdep_assert_held(&srcdev->fs_info->chunk_mutex);
796
797 while (find_first_extent_bit(&srcdev->alloc_state, start,
798 &found_start, &found_end,
799 CHUNK_ALLOCATED, &cached_state)) {
800 ret = set_extent_bit(&tgtdev->alloc_state, found_start,
801 found_end, CHUNK_ALLOCATED, NULL);
802 if (ret)
803 break;
804 start = found_end + 1;
805 }
806
807 free_extent_state(cached_state);
808 return ret;
809 }
810
btrfs_dev_replace_update_device_in_mapping_tree(struct btrfs_fs_info * fs_info,struct btrfs_device * srcdev,struct btrfs_device * tgtdev)811 static void btrfs_dev_replace_update_device_in_mapping_tree(
812 struct btrfs_fs_info *fs_info,
813 struct btrfs_device *srcdev,
814 struct btrfs_device *tgtdev)
815 {
816 struct rb_node *node;
817
818 /*
819 * The chunk mutex must be held so that no new chunks can be created
820 * while we are updating existing chunks. This guarantees we don't miss
821 * any new chunk that gets created for a range that falls before the
822 * range of the last chunk we processed.
823 */
824 lockdep_assert_held(&fs_info->chunk_mutex);
825
826 write_lock(&fs_info->mapping_tree_lock);
827 node = rb_first_cached(&fs_info->mapping_tree);
828 while (node) {
829 struct rb_node *next = rb_next(node);
830 struct btrfs_chunk_map *map;
831 u64 next_start;
832
833 map = rb_entry(node, struct btrfs_chunk_map, rb_node);
834 next_start = map->start + map->chunk_len;
835
836 for (int i = 0; i < map->num_stripes; i++)
837 if (srcdev == map->stripes[i].dev)
838 map->stripes[i].dev = tgtdev;
839
840 if (cond_resched_rwlock_write(&fs_info->mapping_tree_lock)) {
841 map = btrfs_find_chunk_map_nolock(fs_info, next_start, U64_MAX);
842 if (!map)
843 break;
844 node = &map->rb_node;
845 /*
846 * Drop the lookup reference since we are holding the
847 * lock in write mode and no one can remove the chunk
848 * map from the tree and drop its tree reference.
849 */
850 btrfs_free_chunk_map(map);
851 } else {
852 node = next;
853 }
854 }
855 write_unlock(&fs_info->mapping_tree_lock);
856 }
857
btrfs_dev_replace_finishing(struct btrfs_fs_info * fs_info,int scrub_ret)858 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
859 int scrub_ret)
860 {
861 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
862 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
863 struct btrfs_device *tgt_device;
864 struct btrfs_device *src_device;
865 struct btrfs_root *root = fs_info->tree_root;
866 u8 uuid_tmp[BTRFS_UUID_SIZE];
867 struct btrfs_trans_handle *trans;
868 int ret = 0;
869
870 /* don't allow cancel or unmount to disturb the finishing procedure */
871 mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
872
873 down_read(&dev_replace->rwsem);
874 /* was the operation canceled, or is it finished? */
875 if (dev_replace->replace_state !=
876 BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED) {
877 up_read(&dev_replace->rwsem);
878 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
879 return 0;
880 }
881
882 tgt_device = dev_replace->tgtdev;
883 src_device = dev_replace->srcdev;
884 up_read(&dev_replace->rwsem);
885
886 /*
887 * flush all outstanding I/O and inode extent mappings before the
888 * copy operation is declared as being finished
889 */
890 ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
891 if (ret) {
892 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
893 return ret;
894 }
895 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
896
897 /*
898 * We have to use this loop approach because at this point src_device
899 * has to be available for transaction commit to complete, yet new
900 * chunks shouldn't be allocated on the device.
901 */
902 while (1) {
903 trans = btrfs_start_transaction(root, 0);
904 if (IS_ERR(trans)) {
905 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
906 return PTR_ERR(trans);
907 }
908 ret = btrfs_commit_transaction(trans);
909 WARN_ON(ret);
910
911 /* Prevent write_all_supers() during the finishing procedure */
912 mutex_lock(&fs_devices->device_list_mutex);
913 /* Prevent new chunks being allocated on the source device */
914 mutex_lock(&fs_info->chunk_mutex);
915
916 if (!list_empty(&src_device->post_commit_list)) {
917 mutex_unlock(&fs_devices->device_list_mutex);
918 mutex_unlock(&fs_info->chunk_mutex);
919 } else {
920 break;
921 }
922 }
923
924 down_write(&dev_replace->rwsem);
925 dev_replace->replace_state =
926 scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED
927 : BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED;
928 dev_replace->tgtdev = NULL;
929 dev_replace->srcdev = NULL;
930 dev_replace->time_stopped = ktime_get_real_seconds();
931 dev_replace->item_needs_writeback = 1;
932
933 /*
934 * Update allocation state in the new device and replace the old device
935 * with the new one in the mapping tree.
936 */
937 if (!scrub_ret) {
938 scrub_ret = btrfs_set_target_alloc_state(src_device, tgt_device);
939 if (scrub_ret)
940 goto error;
941 btrfs_dev_replace_update_device_in_mapping_tree(fs_info,
942 src_device,
943 tgt_device);
944 } else {
945 if (scrub_ret != -ECANCELED)
946 btrfs_err_in_rcu(fs_info,
947 "btrfs_scrub_dev(%s, %llu, %s) failed %d",
948 btrfs_dev_name(src_device),
949 src_device->devid,
950 btrfs_dev_name(tgt_device), scrub_ret);
951 error:
952 up_write(&dev_replace->rwsem);
953 mutex_unlock(&fs_info->chunk_mutex);
954 mutex_unlock(&fs_devices->device_list_mutex);
955 btrfs_rm_dev_replace_blocked(fs_info);
956 if (tgt_device)
957 btrfs_destroy_dev_replace_tgtdev(tgt_device);
958 btrfs_rm_dev_replace_unblocked(fs_info);
959 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
960
961 return scrub_ret;
962 }
963
964 btrfs_info_in_rcu(fs_info,
965 "dev_replace from %s (devid %llu) to %s finished",
966 btrfs_dev_name(src_device),
967 src_device->devid,
968 btrfs_dev_name(tgt_device));
969 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &tgt_device->dev_state);
970 tgt_device->devid = src_device->devid;
971 src_device->devid = BTRFS_DEV_REPLACE_DEVID;
972 memcpy(uuid_tmp, tgt_device->uuid, sizeof(uuid_tmp));
973 memcpy(tgt_device->uuid, src_device->uuid, sizeof(tgt_device->uuid));
974 memcpy(src_device->uuid, uuid_tmp, sizeof(src_device->uuid));
975 btrfs_device_set_total_bytes(tgt_device, src_device->total_bytes);
976 btrfs_device_set_disk_total_bytes(tgt_device,
977 src_device->disk_total_bytes);
978 btrfs_device_set_bytes_used(tgt_device, src_device->bytes_used);
979 tgt_device->commit_bytes_used = src_device->bytes_used;
980
981 btrfs_assign_next_active_device(src_device, tgt_device);
982
983 list_add(&tgt_device->dev_alloc_list, &fs_devices->alloc_list);
984 fs_devices->rw_devices++;
985
986 dev_replace->replace_task = NULL;
987 up_write(&dev_replace->rwsem);
988 btrfs_rm_dev_replace_blocked(fs_info);
989
990 btrfs_rm_dev_replace_remove_srcdev(src_device);
991
992 btrfs_rm_dev_replace_unblocked(fs_info);
993
994 /*
995 * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
996 * update on-disk dev stats value during commit transaction
997 */
998 atomic_inc(&tgt_device->dev_stats_ccnt);
999
1000 /*
1001 * this is again a consistent state where no dev_replace procedure
1002 * is running, the target device is part of the filesystem, the
1003 * source device is not part of the filesystem anymore and its 1st
1004 * superblock is scratched out so that it is no longer marked to
1005 * belong to this filesystem.
1006 */
1007 mutex_unlock(&fs_info->chunk_mutex);
1008 mutex_unlock(&fs_devices->device_list_mutex);
1009
1010 /* replace the sysfs entry */
1011 btrfs_sysfs_remove_device(src_device);
1012 btrfs_sysfs_update_devid(tgt_device);
1013 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &src_device->dev_state))
1014 btrfs_scratch_superblocks(fs_info, src_device);
1015
1016 /* write back the superblocks */
1017 trans = btrfs_start_transaction(root, 0);
1018 if (!IS_ERR(trans))
1019 btrfs_commit_transaction(trans);
1020
1021 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
1022
1023 btrfs_rm_dev_replace_free_srcdev(src_device);
1024
1025 return 0;
1026 }
1027
1028 /*
1029 * Read progress of device replace status according to the state and last
1030 * stored position. The value format is the same as for
1031 * btrfs_dev_replace::progress_1000
1032 */
btrfs_dev_replace_progress(struct btrfs_fs_info * fs_info)1033 static u64 btrfs_dev_replace_progress(struct btrfs_fs_info *fs_info)
1034 {
1035 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1036 u64 ret = 0;
1037
1038 switch (dev_replace->replace_state) {
1039 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
1040 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
1041 ret = 0;
1042 break;
1043 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
1044 ret = 1000;
1045 break;
1046 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
1047 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
1048 ret = div64_u64(dev_replace->cursor_left,
1049 div_u64(btrfs_device_get_total_bytes(
1050 dev_replace->srcdev), 1000));
1051 break;
1052 }
1053
1054 return ret;
1055 }
1056
btrfs_dev_replace_status(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_dev_replace_args * args)1057 void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
1058 struct btrfs_ioctl_dev_replace_args *args)
1059 {
1060 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1061
1062 down_read(&dev_replace->rwsem);
1063 /* even if !dev_replace_is_valid, the values are good enough for
1064 * the replace_status ioctl */
1065 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
1066 args->status.replace_state = dev_replace->replace_state;
1067 args->status.time_started = dev_replace->time_started;
1068 args->status.time_stopped = dev_replace->time_stopped;
1069 args->status.num_write_errors =
1070 atomic64_read(&dev_replace->num_write_errors);
1071 args->status.num_uncorrectable_read_errors =
1072 atomic64_read(&dev_replace->num_uncorrectable_read_errors);
1073 args->status.progress_1000 = btrfs_dev_replace_progress(fs_info);
1074 up_read(&dev_replace->rwsem);
1075 }
1076
btrfs_dev_replace_cancel(struct btrfs_fs_info * fs_info)1077 int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
1078 {
1079 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1080 struct btrfs_device *tgt_device = NULL;
1081 struct btrfs_device *src_device = NULL;
1082 struct btrfs_trans_handle *trans;
1083 struct btrfs_root *root = fs_info->tree_root;
1084 int result;
1085 int ret;
1086
1087 if (sb_rdonly(fs_info->sb))
1088 return -EROFS;
1089
1090 mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
1091 down_write(&dev_replace->rwsem);
1092 switch (dev_replace->replace_state) {
1093 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
1094 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
1095 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
1096 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
1097 up_write(&dev_replace->rwsem);
1098 break;
1099 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
1100 tgt_device = dev_replace->tgtdev;
1101 src_device = dev_replace->srcdev;
1102 up_write(&dev_replace->rwsem);
1103 ret = btrfs_scrub_cancel(fs_info);
1104 if (ret < 0) {
1105 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
1106 } else {
1107 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
1108 /*
1109 * btrfs_dev_replace_finishing() will handle the
1110 * cleanup part
1111 */
1112 btrfs_info_in_rcu(fs_info,
1113 "dev_replace from %s (devid %llu) to %s canceled",
1114 btrfs_dev_name(src_device), src_device->devid,
1115 btrfs_dev_name(tgt_device));
1116 }
1117 break;
1118 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
1119 /*
1120 * Scrub doing the replace isn't running so we need to do the
1121 * cleanup step of btrfs_dev_replace_finishing() here
1122 */
1123 result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
1124 tgt_device = dev_replace->tgtdev;
1125 src_device = dev_replace->srcdev;
1126 dev_replace->tgtdev = NULL;
1127 dev_replace->srcdev = NULL;
1128 dev_replace->replace_state =
1129 BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
1130 dev_replace->time_stopped = ktime_get_real_seconds();
1131 dev_replace->item_needs_writeback = 1;
1132
1133 up_write(&dev_replace->rwsem);
1134
1135 /* Scrub for replace must not be running in suspended state */
1136 btrfs_scrub_cancel(fs_info);
1137
1138 trans = btrfs_start_transaction(root, 0);
1139 if (IS_ERR(trans)) {
1140 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
1141 return PTR_ERR(trans);
1142 }
1143 ret = btrfs_commit_transaction(trans);
1144 WARN_ON(ret);
1145
1146 btrfs_info_in_rcu(fs_info,
1147 "suspended dev_replace from %s (devid %llu) to %s canceled",
1148 btrfs_dev_name(src_device), src_device->devid,
1149 btrfs_dev_name(tgt_device));
1150
1151 if (tgt_device)
1152 btrfs_destroy_dev_replace_tgtdev(tgt_device);
1153 break;
1154 default:
1155 up_write(&dev_replace->rwsem);
1156 result = -EINVAL;
1157 }
1158
1159 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
1160 return result;
1161 }
1162
btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info * fs_info)1163 void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info)
1164 {
1165 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1166
1167 mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
1168 down_write(&dev_replace->rwsem);
1169
1170 switch (dev_replace->replace_state) {
1171 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
1172 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
1173 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
1174 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
1175 break;
1176 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
1177 dev_replace->replace_state =
1178 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
1179 dev_replace->time_stopped = ktime_get_real_seconds();
1180 dev_replace->item_needs_writeback = 1;
1181 btrfs_info(fs_info, "suspending dev_replace for unmount");
1182 break;
1183 }
1184
1185 up_write(&dev_replace->rwsem);
1186 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
1187 }
1188
1189 /* resume dev_replace procedure that was interrupted by unmount */
btrfs_resume_dev_replace_async(struct btrfs_fs_info * fs_info)1190 int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
1191 {
1192 struct task_struct *task;
1193 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1194
1195 down_write(&dev_replace->rwsem);
1196
1197 switch (dev_replace->replace_state) {
1198 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
1199 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
1200 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
1201 up_write(&dev_replace->rwsem);
1202 return 0;
1203 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
1204 break;
1205 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
1206 dev_replace->replace_state =
1207 BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
1208 break;
1209 }
1210 if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) {
1211 btrfs_info(fs_info,
1212 "cannot continue dev_replace, tgtdev is missing");
1213 btrfs_info(fs_info,
1214 "you may cancel the operation after 'mount -o degraded'");
1215 dev_replace->replace_state =
1216 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
1217 up_write(&dev_replace->rwsem);
1218 return 0;
1219 }
1220 up_write(&dev_replace->rwsem);
1221
1222 /*
1223 * This could collide with a paused balance, but the exclusive op logic
1224 * should never allow both to start and pause. We don't want to allow
1225 * dev-replace to start anyway.
1226 */
1227 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) {
1228 down_write(&dev_replace->rwsem);
1229 dev_replace->replace_state =
1230 BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
1231 up_write(&dev_replace->rwsem);
1232 btrfs_info(fs_info,
1233 "cannot resume dev-replace, other exclusive operation running");
1234 return 0;
1235 }
1236
1237 task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl");
1238 return PTR_ERR_OR_ZERO(task);
1239 }
1240
btrfs_dev_replace_kthread(void * data)1241 static int btrfs_dev_replace_kthread(void *data)
1242 {
1243 struct btrfs_fs_info *fs_info = data;
1244 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1245 u64 progress;
1246 int ret;
1247
1248 progress = btrfs_dev_replace_progress(fs_info);
1249 progress = div_u64(progress, 10);
1250 btrfs_info_in_rcu(fs_info,
1251 "continuing dev_replace from %s (devid %llu) to target %s @%u%%",
1252 btrfs_dev_name(dev_replace->srcdev),
1253 dev_replace->srcdev->devid,
1254 btrfs_dev_name(dev_replace->tgtdev),
1255 (unsigned int)progress);
1256
1257 ret = btrfs_scrub_dev(fs_info, dev_replace->srcdev->devid,
1258 dev_replace->committed_cursor_left,
1259 btrfs_device_get_total_bytes(dev_replace->srcdev),
1260 &dev_replace->scrub_progress, 0, 1);
1261 ret = btrfs_dev_replace_finishing(fs_info, ret);
1262 WARN_ON(ret && ret != -ECANCELED);
1263
1264 btrfs_exclop_finish(fs_info);
1265 return 0;
1266 }
1267
btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace * dev_replace)1268 int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
1269 {
1270 if (!dev_replace->is_valid)
1271 return 0;
1272
1273 switch (dev_replace->replace_state) {
1274 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
1275 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
1276 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
1277 return 0;
1278 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
1279 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
1280 /*
1281 * return true even if tgtdev is missing (this is
1282 * something that can happen if the dev_replace
1283 * procedure is suspended by an umount and then
1284 * the tgtdev is missing (or "btrfs dev scan") was
1285 * not called and the filesystem is remounted
1286 * in degraded state. This does not stop the
1287 * dev_replace procedure. It needs to be canceled
1288 * manually if the cancellation is wanted.
1289 */
1290 break;
1291 }
1292 return 1;
1293 }
1294
btrfs_bio_counter_sub(struct btrfs_fs_info * fs_info,s64 amount)1295 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
1296 {
1297 percpu_counter_sub(&fs_info->dev_replace.bio_counter, amount);
1298 cond_wake_up_nomb(&fs_info->dev_replace.replace_wait);
1299 }
1300
btrfs_bio_counter_inc_blocked(struct btrfs_fs_info * fs_info)1301 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info)
1302 {
1303 while (1) {
1304 percpu_counter_inc(&fs_info->dev_replace.bio_counter);
1305 if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING,
1306 &fs_info->fs_state)))
1307 break;
1308
1309 btrfs_bio_counter_dec(fs_info);
1310 wait_event(fs_info->dev_replace.replace_wait,
1311 !test_bit(BTRFS_FS_STATE_DEV_REPLACING,
1312 &fs_info->fs_state));
1313 }
1314 }
1315