xref: /linux/fs/btrfs/volumes.c (revision 6ceb6346b0436ea6591c33ab6ab22e5077ed17e7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/slab.h>
9 #include <linux/ratelimit.h>
10 #include <linux/kthread.h>
11 #include <linux/semaphore.h>
12 #include <linux/uuid.h>
13 #include <linux/list_sort.h>
14 #include <linux/namei.h>
15 #include "misc.h"
16 #include "disk-io.h"
17 #include "extent-tree.h"
18 #include "transaction.h"
19 #include "volumes.h"
20 #include "raid56.h"
21 #include "rcu-string.h"
22 #include "dev-replace.h"
23 #include "sysfs.h"
24 #include "tree-checker.h"
25 #include "space-info.h"
26 #include "block-group.h"
27 #include "discard.h"
28 #include "zoned.h"
29 #include "fs.h"
30 #include "accessors.h"
31 #include "uuid-tree.h"
32 #include "ioctl.h"
33 #include "relocation.h"
34 #include "scrub.h"
35 #include "super.h"
36 #include "raid-stripe-tree.h"
37 
38 #define BTRFS_BLOCK_GROUP_STRIPE_MASK	(BTRFS_BLOCK_GROUP_RAID0 | \
39 					 BTRFS_BLOCK_GROUP_RAID10 | \
40 					 BTRFS_BLOCK_GROUP_RAID56_MASK)
41 
42 struct btrfs_io_geometry {
43 	u32 stripe_index;
44 	u32 stripe_nr;
45 	int mirror_num;
46 	int num_stripes;
47 	u64 stripe_offset;
48 	u64 raid56_full_stripe_start;
49 	int max_errors;
50 	enum btrfs_map_op op;
51 	bool use_rst;
52 };
53 
54 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
55 	[BTRFS_RAID_RAID10] = {
56 		.sub_stripes	= 2,
57 		.dev_stripes	= 1,
58 		.devs_max	= 0,	/* 0 == as many as possible */
59 		.devs_min	= 2,
60 		.tolerated_failures = 1,
61 		.devs_increment	= 2,
62 		.ncopies	= 2,
63 		.nparity        = 0,
64 		.raid_name	= "raid10",
65 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
66 		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
67 	},
68 	[BTRFS_RAID_RAID1] = {
69 		.sub_stripes	= 1,
70 		.dev_stripes	= 1,
71 		.devs_max	= 2,
72 		.devs_min	= 2,
73 		.tolerated_failures = 1,
74 		.devs_increment	= 2,
75 		.ncopies	= 2,
76 		.nparity        = 0,
77 		.raid_name	= "raid1",
78 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
79 		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
80 	},
81 	[BTRFS_RAID_RAID1C3] = {
82 		.sub_stripes	= 1,
83 		.dev_stripes	= 1,
84 		.devs_max	= 3,
85 		.devs_min	= 3,
86 		.tolerated_failures = 2,
87 		.devs_increment	= 3,
88 		.ncopies	= 3,
89 		.nparity        = 0,
90 		.raid_name	= "raid1c3",
91 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C3,
92 		.mindev_error	= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
93 	},
94 	[BTRFS_RAID_RAID1C4] = {
95 		.sub_stripes	= 1,
96 		.dev_stripes	= 1,
97 		.devs_max	= 4,
98 		.devs_min	= 4,
99 		.tolerated_failures = 3,
100 		.devs_increment	= 4,
101 		.ncopies	= 4,
102 		.nparity        = 0,
103 		.raid_name	= "raid1c4",
104 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C4,
105 		.mindev_error	= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
106 	},
107 	[BTRFS_RAID_DUP] = {
108 		.sub_stripes	= 1,
109 		.dev_stripes	= 2,
110 		.devs_max	= 1,
111 		.devs_min	= 1,
112 		.tolerated_failures = 0,
113 		.devs_increment	= 1,
114 		.ncopies	= 2,
115 		.nparity        = 0,
116 		.raid_name	= "dup",
117 		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
118 		.mindev_error	= 0,
119 	},
120 	[BTRFS_RAID_RAID0] = {
121 		.sub_stripes	= 1,
122 		.dev_stripes	= 1,
123 		.devs_max	= 0,
124 		.devs_min	= 1,
125 		.tolerated_failures = 0,
126 		.devs_increment	= 1,
127 		.ncopies	= 1,
128 		.nparity        = 0,
129 		.raid_name	= "raid0",
130 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
131 		.mindev_error	= 0,
132 	},
133 	[BTRFS_RAID_SINGLE] = {
134 		.sub_stripes	= 1,
135 		.dev_stripes	= 1,
136 		.devs_max	= 1,
137 		.devs_min	= 1,
138 		.tolerated_failures = 0,
139 		.devs_increment	= 1,
140 		.ncopies	= 1,
141 		.nparity        = 0,
142 		.raid_name	= "single",
143 		.bg_flag	= 0,
144 		.mindev_error	= 0,
145 	},
146 	[BTRFS_RAID_RAID5] = {
147 		.sub_stripes	= 1,
148 		.dev_stripes	= 1,
149 		.devs_max	= 0,
150 		.devs_min	= 2,
151 		.tolerated_failures = 1,
152 		.devs_increment	= 1,
153 		.ncopies	= 1,
154 		.nparity        = 1,
155 		.raid_name	= "raid5",
156 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
157 		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
158 	},
159 	[BTRFS_RAID_RAID6] = {
160 		.sub_stripes	= 1,
161 		.dev_stripes	= 1,
162 		.devs_max	= 0,
163 		.devs_min	= 3,
164 		.tolerated_failures = 2,
165 		.devs_increment	= 1,
166 		.ncopies	= 1,
167 		.nparity        = 2,
168 		.raid_name	= "raid6",
169 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
170 		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
171 	},
172 };
173 
174 /*
175  * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
176  * can be used as index to access btrfs_raid_array[].
177  */
btrfs_bg_flags_to_raid_index(u64 flags)178 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
179 {
180 	const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK);
181 
182 	if (!profile)
183 		return BTRFS_RAID_SINGLE;
184 
185 	return BTRFS_BG_FLAG_TO_INDEX(profile);
186 }
187 
btrfs_bg_type_to_raid_name(u64 flags)188 const char *btrfs_bg_type_to_raid_name(u64 flags)
189 {
190 	const int index = btrfs_bg_flags_to_raid_index(flags);
191 
192 	if (index >= BTRFS_NR_RAID_TYPES)
193 		return NULL;
194 
195 	return btrfs_raid_array[index].raid_name;
196 }
197 
btrfs_nr_parity_stripes(u64 type)198 int btrfs_nr_parity_stripes(u64 type)
199 {
200 	enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type);
201 
202 	return btrfs_raid_array[index].nparity;
203 }
204 
205 /*
206  * Fill @buf with textual description of @bg_flags, no more than @size_buf
207  * bytes including terminating null byte.
208  */
btrfs_describe_block_groups(u64 bg_flags,char * buf,u32 size_buf)209 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
210 {
211 	int i;
212 	int ret;
213 	char *bp = buf;
214 	u64 flags = bg_flags;
215 	u32 size_bp = size_buf;
216 
217 	if (!flags) {
218 		strcpy(bp, "NONE");
219 		return;
220 	}
221 
222 #define DESCRIBE_FLAG(flag, desc)						\
223 	do {								\
224 		if (flags & (flag)) {					\
225 			ret = snprintf(bp, size_bp, "%s|", (desc));	\
226 			if (ret < 0 || ret >= size_bp)			\
227 				goto out_overflow;			\
228 			size_bp -= ret;					\
229 			bp += ret;					\
230 			flags &= ~(flag);				\
231 		}							\
232 	} while (0)
233 
234 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
235 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
236 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
237 
238 	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
239 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
240 		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
241 			      btrfs_raid_array[i].raid_name);
242 #undef DESCRIBE_FLAG
243 
244 	if (flags) {
245 		ret = snprintf(bp, size_bp, "0x%llx|", flags);
246 		size_bp -= ret;
247 	}
248 
249 	if (size_bp < size_buf)
250 		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
251 
252 	/*
253 	 * The text is trimmed, it's up to the caller to provide sufficiently
254 	 * large buffer
255 	 */
256 out_overflow:;
257 }
258 
259 static int init_first_rw_device(struct btrfs_trans_handle *trans);
260 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
261 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
262 
263 /*
264  * Device locking
265  * ==============
266  *
267  * There are several mutexes that protect manipulation of devices and low-level
268  * structures like chunks but not block groups, extents or files
269  *
270  * uuid_mutex (global lock)
271  * ------------------------
272  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
273  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
274  * device) or requested by the device= mount option
275  *
276  * the mutex can be very coarse and can cover long-running operations
277  *
278  * protects: updates to fs_devices counters like missing devices, rw devices,
279  * seeding, structure cloning, opening/closing devices at mount/umount time
280  *
281  * global::fs_devs - add, remove, updates to the global list
282  *
283  * does not protect: manipulation of the fs_devices::devices list in general
284  * but in mount context it could be used to exclude list modifications by eg.
285  * scan ioctl
286  *
287  * btrfs_device::name - renames (write side), read is RCU
288  *
289  * fs_devices::device_list_mutex (per-fs, with RCU)
290  * ------------------------------------------------
291  * protects updates to fs_devices::devices, ie. adding and deleting
292  *
293  * simple list traversal with read-only actions can be done with RCU protection
294  *
295  * may be used to exclude some operations from running concurrently without any
296  * modifications to the list (see write_all_supers)
297  *
298  * Is not required at mount and close times, because our device list is
299  * protected by the uuid_mutex at that point.
300  *
301  * balance_mutex
302  * -------------
303  * protects balance structures (status, state) and context accessed from
304  * several places (internally, ioctl)
305  *
306  * chunk_mutex
307  * -----------
308  * protects chunks, adding or removing during allocation, trim or when a new
309  * device is added/removed. Additionally it also protects post_commit_list of
310  * individual devices, since they can be added to the transaction's
311  * post_commit_list only with chunk_mutex held.
312  *
313  * cleaner_mutex
314  * -------------
315  * a big lock that is held by the cleaner thread and prevents running subvolume
316  * cleaning together with relocation or delayed iputs
317  *
318  *
319  * Lock nesting
320  * ============
321  *
322  * uuid_mutex
323  *   device_list_mutex
324  *     chunk_mutex
325  *   balance_mutex
326  *
327  *
328  * Exclusive operations
329  * ====================
330  *
331  * Maintains the exclusivity of the following operations that apply to the
332  * whole filesystem and cannot run in parallel.
333  *
334  * - Balance (*)
335  * - Device add
336  * - Device remove
337  * - Device replace (*)
338  * - Resize
339  *
340  * The device operations (as above) can be in one of the following states:
341  *
342  * - Running state
343  * - Paused state
344  * - Completed state
345  *
346  * Only device operations marked with (*) can go into the Paused state for the
347  * following reasons:
348  *
349  * - ioctl (only Balance can be Paused through ioctl)
350  * - filesystem remounted as read-only
351  * - filesystem unmounted and mounted as read-only
352  * - system power-cycle and filesystem mounted as read-only
353  * - filesystem or device errors leading to forced read-only
354  *
355  * The status of exclusive operation is set and cleared atomically.
356  * During the course of Paused state, fs_info::exclusive_operation remains set.
357  * A device operation in Paused or Running state can be canceled or resumed
358  * either by ioctl (Balance only) or when remounted as read-write.
359  * The exclusive status is cleared when the device operation is canceled or
360  * completed.
361  */
362 
363 DEFINE_MUTEX(uuid_mutex);
364 static LIST_HEAD(fs_uuids);
btrfs_get_fs_uuids(void)365 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
366 {
367 	return &fs_uuids;
368 }
369 
370 /*
371  * Allocate new btrfs_fs_devices structure identified by a fsid.
372  *
373  * @fsid:    if not NULL, copy the UUID to fs_devices::fsid and to
374  *           fs_devices::metadata_fsid
375  *
376  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
377  * The returned struct is not linked onto any lists and can be destroyed with
378  * kfree() right away.
379  */
alloc_fs_devices(const u8 * fsid)380 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
381 {
382 	struct btrfs_fs_devices *fs_devs;
383 
384 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
385 	if (!fs_devs)
386 		return ERR_PTR(-ENOMEM);
387 
388 	mutex_init(&fs_devs->device_list_mutex);
389 
390 	INIT_LIST_HEAD(&fs_devs->devices);
391 	INIT_LIST_HEAD(&fs_devs->alloc_list);
392 	INIT_LIST_HEAD(&fs_devs->fs_list);
393 	INIT_LIST_HEAD(&fs_devs->seed_list);
394 
395 	if (fsid) {
396 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
397 		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
398 	}
399 
400 	return fs_devs;
401 }
402 
btrfs_free_device(struct btrfs_device * device)403 static void btrfs_free_device(struct btrfs_device *device)
404 {
405 	WARN_ON(!list_empty(&device->post_commit_list));
406 	rcu_string_free(device->name);
407 	extent_io_tree_release(&device->alloc_state);
408 	btrfs_destroy_dev_zone_info(device);
409 	kfree(device);
410 }
411 
free_fs_devices(struct btrfs_fs_devices * fs_devices)412 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
413 {
414 	struct btrfs_device *device;
415 
416 	WARN_ON(fs_devices->opened);
417 	while (!list_empty(&fs_devices->devices)) {
418 		device = list_entry(fs_devices->devices.next,
419 				    struct btrfs_device, dev_list);
420 		list_del(&device->dev_list);
421 		btrfs_free_device(device);
422 	}
423 	kfree(fs_devices);
424 }
425 
btrfs_cleanup_fs_uuids(void)426 void __exit btrfs_cleanup_fs_uuids(void)
427 {
428 	struct btrfs_fs_devices *fs_devices;
429 
430 	while (!list_empty(&fs_uuids)) {
431 		fs_devices = list_entry(fs_uuids.next,
432 					struct btrfs_fs_devices, fs_list);
433 		list_del(&fs_devices->fs_list);
434 		free_fs_devices(fs_devices);
435 	}
436 }
437 
match_fsid_fs_devices(const struct btrfs_fs_devices * fs_devices,const u8 * fsid,const u8 * metadata_fsid)438 static bool match_fsid_fs_devices(const struct btrfs_fs_devices *fs_devices,
439 				  const u8 *fsid, const u8 *metadata_fsid)
440 {
441 	if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0)
442 		return false;
443 
444 	if (!metadata_fsid)
445 		return true;
446 
447 	if (memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0)
448 		return false;
449 
450 	return true;
451 }
452 
find_fsid(const u8 * fsid,const u8 * metadata_fsid)453 static noinline struct btrfs_fs_devices *find_fsid(
454 		const u8 *fsid, const u8 *metadata_fsid)
455 {
456 	struct btrfs_fs_devices *fs_devices;
457 
458 	ASSERT(fsid);
459 
460 	/* Handle non-split brain cases */
461 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
462 		if (match_fsid_fs_devices(fs_devices, fsid, metadata_fsid))
463 			return fs_devices;
464 	}
465 	return NULL;
466 }
467 
468 static int
btrfs_get_bdev_and_sb(const char * device_path,blk_mode_t flags,void * holder,int flush,struct file ** bdev_file,struct btrfs_super_block ** disk_super)469 btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
470 		      int flush, struct file **bdev_file,
471 		      struct btrfs_super_block **disk_super)
472 {
473 	struct block_device *bdev;
474 	int ret;
475 
476 	*bdev_file = bdev_file_open_by_path(device_path, flags, holder, NULL);
477 
478 	if (IS_ERR(*bdev_file)) {
479 		ret = PTR_ERR(*bdev_file);
480 		btrfs_err(NULL, "failed to open device for path %s with flags 0x%x: %d",
481 			  device_path, flags, ret);
482 		goto error;
483 	}
484 	bdev = file_bdev(*bdev_file);
485 
486 	if (flush)
487 		sync_blockdev(bdev);
488 	if (holder) {
489 		ret = set_blocksize(*bdev_file, BTRFS_BDEV_BLOCKSIZE);
490 		if (ret) {
491 			fput(*bdev_file);
492 			goto error;
493 		}
494 	}
495 	invalidate_bdev(bdev);
496 	*disk_super = btrfs_read_dev_super(bdev);
497 	if (IS_ERR(*disk_super)) {
498 		ret = PTR_ERR(*disk_super);
499 		fput(*bdev_file);
500 		goto error;
501 	}
502 
503 	return 0;
504 
505 error:
506 	*disk_super = NULL;
507 	*bdev_file = NULL;
508 	return ret;
509 }
510 
511 /*
512  *  Search and remove all stale devices (which are not mounted).  When both
513  *  inputs are NULL, it will search and release all stale devices.
514  *
515  *  @devt:         Optional. When provided will it release all unmounted devices
516  *                 matching this devt only.
517  *  @skip_device:  Optional. Will skip this device when searching for the stale
518  *                 devices.
519  *
520  *  Return:	0 for success or if @devt is 0.
521  *		-EBUSY if @devt is a mounted device.
522  *		-ENOENT if @devt does not match any device in the list.
523  */
btrfs_free_stale_devices(dev_t devt,struct btrfs_device * skip_device)524 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device)
525 {
526 	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
527 	struct btrfs_device *device, *tmp_device;
528 	int ret;
529 	bool freed = false;
530 
531 	lockdep_assert_held(&uuid_mutex);
532 
533 	/* Return good status if there is no instance of devt. */
534 	ret = 0;
535 	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
536 
537 		mutex_lock(&fs_devices->device_list_mutex);
538 		list_for_each_entry_safe(device, tmp_device,
539 					 &fs_devices->devices, dev_list) {
540 			if (skip_device && skip_device == device)
541 				continue;
542 			if (devt && devt != device->devt)
543 				continue;
544 			if (fs_devices->opened) {
545 				if (devt)
546 					ret = -EBUSY;
547 				break;
548 			}
549 
550 			/* delete the stale device */
551 			fs_devices->num_devices--;
552 			list_del(&device->dev_list);
553 			btrfs_free_device(device);
554 
555 			freed = true;
556 		}
557 		mutex_unlock(&fs_devices->device_list_mutex);
558 
559 		if (fs_devices->num_devices == 0) {
560 			btrfs_sysfs_remove_fsid(fs_devices);
561 			list_del(&fs_devices->fs_list);
562 			free_fs_devices(fs_devices);
563 		}
564 	}
565 
566 	/* If there is at least one freed device return 0. */
567 	if (freed)
568 		return 0;
569 
570 	return ret;
571 }
572 
find_fsid_by_device(struct btrfs_super_block * disk_super,dev_t devt,bool * same_fsid_diff_dev)573 static struct btrfs_fs_devices *find_fsid_by_device(
574 					struct btrfs_super_block *disk_super,
575 					dev_t devt, bool *same_fsid_diff_dev)
576 {
577 	struct btrfs_fs_devices *fsid_fs_devices;
578 	struct btrfs_fs_devices *devt_fs_devices;
579 	const bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
580 					BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
581 	bool found_by_devt = false;
582 
583 	/* Find the fs_device by the usual method, if found use it. */
584 	fsid_fs_devices = find_fsid(disk_super->fsid,
585 		    has_metadata_uuid ? disk_super->metadata_uuid : NULL);
586 
587 	/* The temp_fsid feature is supported only with single device filesystem. */
588 	if (btrfs_super_num_devices(disk_super) != 1)
589 		return fsid_fs_devices;
590 
591 	/*
592 	 * A seed device is an integral component of the sprout device, which
593 	 * functions as a multi-device filesystem. So, temp-fsid feature is
594 	 * not supported.
595 	 */
596 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)
597 		return fsid_fs_devices;
598 
599 	/* Try to find a fs_devices by matching devt. */
600 	list_for_each_entry(devt_fs_devices, &fs_uuids, fs_list) {
601 		struct btrfs_device *device;
602 
603 		list_for_each_entry(device, &devt_fs_devices->devices, dev_list) {
604 			if (device->devt == devt) {
605 				found_by_devt = true;
606 				break;
607 			}
608 		}
609 		if (found_by_devt)
610 			break;
611 	}
612 
613 	if (found_by_devt) {
614 		/* Existing device. */
615 		if (fsid_fs_devices == NULL) {
616 			if (devt_fs_devices->opened == 0) {
617 				/* Stale device. */
618 				return NULL;
619 			} else {
620 				/* temp_fsid is mounting a subvol. */
621 				return devt_fs_devices;
622 			}
623 		} else {
624 			/* Regular or temp_fsid device mounting a subvol. */
625 			return devt_fs_devices;
626 		}
627 	} else {
628 		/* New device. */
629 		if (fsid_fs_devices == NULL) {
630 			return NULL;
631 		} else {
632 			/* sb::fsid is already used create a new temp_fsid. */
633 			*same_fsid_diff_dev = true;
634 			return NULL;
635 		}
636 	}
637 
638 	/* Not reached. */
639 }
640 
641 /*
642  * This is only used on mount, and we are protected from competing things
643  * messing with our fs_devices by the uuid_mutex, thus we do not need the
644  * fs_devices->device_list_mutex here.
645  */
btrfs_open_one_device(struct btrfs_fs_devices * fs_devices,struct btrfs_device * device,blk_mode_t flags,void * holder)646 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
647 			struct btrfs_device *device, blk_mode_t flags,
648 			void *holder)
649 {
650 	struct file *bdev_file;
651 	struct btrfs_super_block *disk_super;
652 	u64 devid;
653 	int ret;
654 
655 	if (device->bdev)
656 		return -EINVAL;
657 	if (!device->name)
658 		return -EINVAL;
659 
660 	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
661 				    &bdev_file, &disk_super);
662 	if (ret)
663 		return ret;
664 
665 	devid = btrfs_stack_device_id(&disk_super->dev_item);
666 	if (devid != device->devid)
667 		goto error_free_page;
668 
669 	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
670 		goto error_free_page;
671 
672 	device->generation = btrfs_super_generation(disk_super);
673 
674 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
675 		if (btrfs_super_incompat_flags(disk_super) &
676 		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
677 			pr_err(
678 		"BTRFS: Invalid seeding and uuid-changed device detected\n");
679 			goto error_free_page;
680 		}
681 
682 		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
683 		fs_devices->seeding = true;
684 	} else {
685 		if (bdev_read_only(file_bdev(bdev_file)))
686 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
687 		else
688 			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
689 	}
690 
691 	if (!bdev_nonrot(file_bdev(bdev_file)))
692 		fs_devices->rotating = true;
693 
694 	if (bdev_max_discard_sectors(file_bdev(bdev_file)))
695 		fs_devices->discardable = true;
696 
697 	device->bdev_file = bdev_file;
698 	device->bdev = file_bdev(bdev_file);
699 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
700 
701 	if (device->devt != device->bdev->bd_dev) {
702 		btrfs_warn(NULL,
703 			   "device %s maj:min changed from %d:%d to %d:%d",
704 			   device->name->str, MAJOR(device->devt),
705 			   MINOR(device->devt), MAJOR(device->bdev->bd_dev),
706 			   MINOR(device->bdev->bd_dev));
707 
708 		device->devt = device->bdev->bd_dev;
709 	}
710 
711 	fs_devices->open_devices++;
712 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
713 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
714 		fs_devices->rw_devices++;
715 		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
716 	}
717 	btrfs_release_disk_super(disk_super);
718 
719 	return 0;
720 
721 error_free_page:
722 	btrfs_release_disk_super(disk_super);
723 	fput(bdev_file);
724 
725 	return -EINVAL;
726 }
727 
btrfs_sb_fsid_ptr(const struct btrfs_super_block * sb)728 const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb)
729 {
730 	bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
731 				  BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
732 
733 	return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
734 }
735 
736 /*
737  * We can have very weird soft links passed in.
738  * One example is "/proc/self/fd/<fd>", which can be a soft link to
739  * a block device.
740  *
741  * But it's never a good idea to use those weird names.
742  * Here we check if the path (not following symlinks) is a good one inside
743  * "/dev/".
744  */
is_good_dev_path(const char * dev_path)745 static bool is_good_dev_path(const char *dev_path)
746 {
747 	struct path path = { .mnt = NULL, .dentry = NULL };
748 	char *path_buf = NULL;
749 	char *resolved_path;
750 	bool is_good = false;
751 	int ret;
752 
753 	if (!dev_path)
754 		goto out;
755 
756 	path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
757 	if (!path_buf)
758 		goto out;
759 
760 	/*
761 	 * Do not follow soft link, just check if the original path is inside
762 	 * "/dev/".
763 	 */
764 	ret = kern_path(dev_path, 0, &path);
765 	if (ret)
766 		goto out;
767 	resolved_path = d_path(&path, path_buf, PATH_MAX);
768 	if (IS_ERR(resolved_path))
769 		goto out;
770 	if (strncmp(resolved_path, "/dev/", strlen("/dev/")))
771 		goto out;
772 	is_good = true;
773 out:
774 	kfree(path_buf);
775 	path_put(&path);
776 	return is_good;
777 }
778 
get_canonical_dev_path(const char * dev_path,char * canonical)779 static int get_canonical_dev_path(const char *dev_path, char *canonical)
780 {
781 	struct path path = { .mnt = NULL, .dentry = NULL };
782 	char *path_buf = NULL;
783 	char *resolved_path;
784 	int ret;
785 
786 	if (!dev_path) {
787 		ret = -EINVAL;
788 		goto out;
789 	}
790 
791 	path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
792 	if (!path_buf) {
793 		ret = -ENOMEM;
794 		goto out;
795 	}
796 
797 	ret = kern_path(dev_path, LOOKUP_FOLLOW, &path);
798 	if (ret)
799 		goto out;
800 	resolved_path = d_path(&path, path_buf, PATH_MAX);
801 	if (IS_ERR(resolved_path)) {
802 		ret = PTR_ERR(resolved_path);
803 		goto out;
804 	}
805 	ret = strscpy(canonical, resolved_path, PATH_MAX);
806 out:
807 	kfree(path_buf);
808 	path_put(&path);
809 	return ret;
810 }
811 
is_same_device(struct btrfs_device * device,const char * new_path)812 static bool is_same_device(struct btrfs_device *device, const char *new_path)
813 {
814 	struct path old = { .mnt = NULL, .dentry = NULL };
815 	struct path new = { .mnt = NULL, .dentry = NULL };
816 	char *old_path = NULL;
817 	bool is_same = false;
818 	int ret;
819 
820 	if (!device->name)
821 		goto out;
822 
823 	old_path = kzalloc(PATH_MAX, GFP_NOFS);
824 	if (!old_path)
825 		goto out;
826 
827 	rcu_read_lock();
828 	ret = strscpy(old_path, rcu_str_deref(device->name), PATH_MAX);
829 	rcu_read_unlock();
830 	if (ret < 0)
831 		goto out;
832 
833 	ret = kern_path(old_path, LOOKUP_FOLLOW, &old);
834 	if (ret)
835 		goto out;
836 	ret = kern_path(new_path, LOOKUP_FOLLOW, &new);
837 	if (ret)
838 		goto out;
839 	if (path_equal(&old, &new))
840 		is_same = true;
841 out:
842 	kfree(old_path);
843 	path_put(&old);
844 	path_put(&new);
845 	return is_same;
846 }
847 
848 /*
849  * Add new device to list of registered devices
850  *
851  * Returns:
852  * device pointer which was just added or updated when successful
853  * error pointer when failed
854  */
device_list_add(const char * path,struct btrfs_super_block * disk_super,bool * new_device_added)855 static noinline struct btrfs_device *device_list_add(const char *path,
856 			   struct btrfs_super_block *disk_super,
857 			   bool *new_device_added)
858 {
859 	struct btrfs_device *device;
860 	struct btrfs_fs_devices *fs_devices = NULL;
861 	struct rcu_string *name;
862 	u64 found_transid = btrfs_super_generation(disk_super);
863 	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
864 	dev_t path_devt;
865 	int error;
866 	bool same_fsid_diff_dev = false;
867 	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
868 		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
869 
870 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
871 		btrfs_err(NULL,
872 "device %s has incomplete metadata_uuid change, please use btrfstune to complete",
873 			  path);
874 		return ERR_PTR(-EAGAIN);
875 	}
876 
877 	error = lookup_bdev(path, &path_devt);
878 	if (error) {
879 		btrfs_err(NULL, "failed to lookup block device for path %s: %d",
880 			  path, error);
881 		return ERR_PTR(error);
882 	}
883 
884 	fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev);
885 
886 	if (!fs_devices) {
887 		fs_devices = alloc_fs_devices(disk_super->fsid);
888 		if (IS_ERR(fs_devices))
889 			return ERR_CAST(fs_devices);
890 
891 		if (has_metadata_uuid)
892 			memcpy(fs_devices->metadata_uuid,
893 			       disk_super->metadata_uuid, BTRFS_FSID_SIZE);
894 
895 		if (same_fsid_diff_dev) {
896 			generate_random_uuid(fs_devices->fsid);
897 			fs_devices->temp_fsid = true;
898 		pr_info("BTRFS: device %s (%d:%d) using temp-fsid %pU\n",
899 				path, MAJOR(path_devt), MINOR(path_devt),
900 				fs_devices->fsid);
901 		}
902 
903 		mutex_lock(&fs_devices->device_list_mutex);
904 		list_add(&fs_devices->fs_list, &fs_uuids);
905 
906 		device = NULL;
907 	} else {
908 		struct btrfs_dev_lookup_args args = {
909 			.devid = devid,
910 			.uuid = disk_super->dev_item.uuid,
911 		};
912 
913 		mutex_lock(&fs_devices->device_list_mutex);
914 		device = btrfs_find_device(fs_devices, &args);
915 
916 		if (found_transid > fs_devices->latest_generation) {
917 			memcpy(fs_devices->fsid, disk_super->fsid,
918 					BTRFS_FSID_SIZE);
919 			memcpy(fs_devices->metadata_uuid,
920 			       btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE);
921 		}
922 	}
923 
924 	if (!device) {
925 		unsigned int nofs_flag;
926 
927 		if (fs_devices->opened) {
928 			btrfs_err(NULL,
929 "device %s (%d:%d) belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)",
930 				  path, MAJOR(path_devt), MINOR(path_devt),
931 				  fs_devices->fsid, current->comm,
932 				  task_pid_nr(current));
933 			mutex_unlock(&fs_devices->device_list_mutex);
934 			return ERR_PTR(-EBUSY);
935 		}
936 
937 		nofs_flag = memalloc_nofs_save();
938 		device = btrfs_alloc_device(NULL, &devid,
939 					    disk_super->dev_item.uuid, path);
940 		memalloc_nofs_restore(nofs_flag);
941 		if (IS_ERR(device)) {
942 			mutex_unlock(&fs_devices->device_list_mutex);
943 			/* we can safely leave the fs_devices entry around */
944 			return device;
945 		}
946 
947 		device->devt = path_devt;
948 
949 		list_add_rcu(&device->dev_list, &fs_devices->devices);
950 		fs_devices->num_devices++;
951 
952 		device->fs_devices = fs_devices;
953 		*new_device_added = true;
954 
955 		if (disk_super->label[0])
956 			pr_info(
957 "BTRFS: device label %s devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n",
958 				disk_super->label, devid, found_transid, path,
959 				MAJOR(path_devt), MINOR(path_devt),
960 				current->comm, task_pid_nr(current));
961 		else
962 			pr_info(
963 "BTRFS: device fsid %pU devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n",
964 				disk_super->fsid, devid, found_transid, path,
965 				MAJOR(path_devt), MINOR(path_devt),
966 				current->comm, task_pid_nr(current));
967 
968 	} else if (!device->name || !is_same_device(device, path)) {
969 		/*
970 		 * When FS is already mounted.
971 		 * 1. If you are here and if the device->name is NULL that
972 		 *    means this device was missing at time of FS mount.
973 		 * 2. If you are here and if the device->name is different
974 		 *    from 'path' that means either
975 		 *      a. The same device disappeared and reappeared with
976 		 *         different name. or
977 		 *      b. The missing-disk-which-was-replaced, has
978 		 *         reappeared now.
979 		 *
980 		 * We must allow 1 and 2a above. But 2b would be a spurious
981 		 * and unintentional.
982 		 *
983 		 * Further in case of 1 and 2a above, the disk at 'path'
984 		 * would have missed some transaction when it was away and
985 		 * in case of 2a the stale bdev has to be updated as well.
986 		 * 2b must not be allowed at all time.
987 		 */
988 
989 		/*
990 		 * For now, we do allow update to btrfs_fs_device through the
991 		 * btrfs dev scan cli after FS has been mounted.  We're still
992 		 * tracking a problem where systems fail mount by subvolume id
993 		 * when we reject replacement on a mounted FS.
994 		 */
995 		if (!fs_devices->opened && found_transid < device->generation) {
996 			/*
997 			 * That is if the FS is _not_ mounted and if you
998 			 * are here, that means there is more than one
999 			 * disk with same uuid and devid.We keep the one
1000 			 * with larger generation number or the last-in if
1001 			 * generation are equal.
1002 			 */
1003 			mutex_unlock(&fs_devices->device_list_mutex);
1004 			btrfs_err(NULL,
1005 "device %s already registered with a higher generation, found %llu expect %llu",
1006 				  path, found_transid, device->generation);
1007 			return ERR_PTR(-EEXIST);
1008 		}
1009 
1010 		/*
1011 		 * We are going to replace the device path for a given devid,
1012 		 * make sure it's the same device if the device is mounted
1013 		 *
1014 		 * NOTE: the device->fs_info may not be reliable here so pass
1015 		 * in a NULL to message helpers instead. This avoids a possible
1016 		 * use-after-free when the fs_info and fs_info->sb are already
1017 		 * torn down.
1018 		 */
1019 		if (device->bdev) {
1020 			if (device->devt != path_devt) {
1021 				mutex_unlock(&fs_devices->device_list_mutex);
1022 				btrfs_warn_in_rcu(NULL,
1023 	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
1024 						  path, devid, found_transid,
1025 						  current->comm,
1026 						  task_pid_nr(current));
1027 				return ERR_PTR(-EEXIST);
1028 			}
1029 			btrfs_info_in_rcu(NULL,
1030 	"devid %llu device path %s changed to %s scanned by %s (%d)",
1031 					  devid, btrfs_dev_name(device),
1032 					  path, current->comm,
1033 					  task_pid_nr(current));
1034 		}
1035 
1036 		name = rcu_string_strdup(path, GFP_NOFS);
1037 		if (!name) {
1038 			mutex_unlock(&fs_devices->device_list_mutex);
1039 			return ERR_PTR(-ENOMEM);
1040 		}
1041 		rcu_string_free(device->name);
1042 		rcu_assign_pointer(device->name, name);
1043 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1044 			fs_devices->missing_devices--;
1045 			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1046 		}
1047 		device->devt = path_devt;
1048 	}
1049 
1050 	/*
1051 	 * Unmount does not free the btrfs_device struct but would zero
1052 	 * generation along with most of the other members. So just update
1053 	 * it back. We need it to pick the disk with largest generation
1054 	 * (as above).
1055 	 */
1056 	if (!fs_devices->opened) {
1057 		device->generation = found_transid;
1058 		fs_devices->latest_generation = max_t(u64, found_transid,
1059 						fs_devices->latest_generation);
1060 	}
1061 
1062 	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
1063 
1064 	mutex_unlock(&fs_devices->device_list_mutex);
1065 	return device;
1066 }
1067 
clone_fs_devices(struct btrfs_fs_devices * orig)1068 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1069 {
1070 	struct btrfs_fs_devices *fs_devices;
1071 	struct btrfs_device *device;
1072 	struct btrfs_device *orig_dev;
1073 	int ret = 0;
1074 
1075 	lockdep_assert_held(&uuid_mutex);
1076 
1077 	fs_devices = alloc_fs_devices(orig->fsid);
1078 	if (IS_ERR(fs_devices))
1079 		return fs_devices;
1080 
1081 	fs_devices->total_devices = orig->total_devices;
1082 
1083 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1084 		const char *dev_path = NULL;
1085 
1086 		/*
1087 		 * This is ok to do without RCU read locked because we hold the
1088 		 * uuid mutex so nothing we touch in here is going to disappear.
1089 		 */
1090 		if (orig_dev->name)
1091 			dev_path = orig_dev->name->str;
1092 
1093 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
1094 					    orig_dev->uuid, dev_path);
1095 		if (IS_ERR(device)) {
1096 			ret = PTR_ERR(device);
1097 			goto error;
1098 		}
1099 
1100 		if (orig_dev->zone_info) {
1101 			struct btrfs_zoned_device_info *zone_info;
1102 
1103 			zone_info = btrfs_clone_dev_zone_info(orig_dev);
1104 			if (!zone_info) {
1105 				btrfs_free_device(device);
1106 				ret = -ENOMEM;
1107 				goto error;
1108 			}
1109 			device->zone_info = zone_info;
1110 		}
1111 
1112 		list_add(&device->dev_list, &fs_devices->devices);
1113 		device->fs_devices = fs_devices;
1114 		fs_devices->num_devices++;
1115 	}
1116 	return fs_devices;
1117 error:
1118 	free_fs_devices(fs_devices);
1119 	return ERR_PTR(ret);
1120 }
1121 
__btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,struct btrfs_device ** latest_dev)1122 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1123 				      struct btrfs_device **latest_dev)
1124 {
1125 	struct btrfs_device *device, *next;
1126 
1127 	/* This is the initialized path, it is safe to release the devices. */
1128 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1129 		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1130 			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1131 				      &device->dev_state) &&
1132 			    !test_bit(BTRFS_DEV_STATE_MISSING,
1133 				      &device->dev_state) &&
1134 			    (!*latest_dev ||
1135 			     device->generation > (*latest_dev)->generation)) {
1136 				*latest_dev = device;
1137 			}
1138 			continue;
1139 		}
1140 
1141 		/*
1142 		 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1143 		 * in btrfs_init_dev_replace() so just continue.
1144 		 */
1145 		if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1146 			continue;
1147 
1148 		if (device->bdev_file) {
1149 			fput(device->bdev_file);
1150 			device->bdev = NULL;
1151 			device->bdev_file = NULL;
1152 			fs_devices->open_devices--;
1153 		}
1154 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1155 			list_del_init(&device->dev_alloc_list);
1156 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1157 			fs_devices->rw_devices--;
1158 		}
1159 		list_del_init(&device->dev_list);
1160 		fs_devices->num_devices--;
1161 		btrfs_free_device(device);
1162 	}
1163 
1164 }
1165 
1166 /*
1167  * After we have read the system tree and know devids belonging to this
1168  * filesystem, remove the device which does not belong there.
1169  */
btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices)1170 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1171 {
1172 	struct btrfs_device *latest_dev = NULL;
1173 	struct btrfs_fs_devices *seed_dev;
1174 
1175 	mutex_lock(&uuid_mutex);
1176 	__btrfs_free_extra_devids(fs_devices, &latest_dev);
1177 
1178 	list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1179 		__btrfs_free_extra_devids(seed_dev, &latest_dev);
1180 
1181 	fs_devices->latest_dev = latest_dev;
1182 
1183 	mutex_unlock(&uuid_mutex);
1184 }
1185 
btrfs_close_bdev(struct btrfs_device * device)1186 static void btrfs_close_bdev(struct btrfs_device *device)
1187 {
1188 	if (!device->bdev)
1189 		return;
1190 
1191 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1192 		sync_blockdev(device->bdev);
1193 		invalidate_bdev(device->bdev);
1194 	}
1195 
1196 	fput(device->bdev_file);
1197 }
1198 
btrfs_close_one_device(struct btrfs_device * device)1199 static void btrfs_close_one_device(struct btrfs_device *device)
1200 {
1201 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
1202 
1203 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1204 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1205 		list_del_init(&device->dev_alloc_list);
1206 		fs_devices->rw_devices--;
1207 	}
1208 
1209 	if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1210 		clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1211 
1212 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1213 		clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1214 		fs_devices->missing_devices--;
1215 	}
1216 
1217 	btrfs_close_bdev(device);
1218 	if (device->bdev) {
1219 		fs_devices->open_devices--;
1220 		device->bdev = NULL;
1221 		device->bdev_file = NULL;
1222 	}
1223 	clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1224 	btrfs_destroy_dev_zone_info(device);
1225 
1226 	device->fs_info = NULL;
1227 	atomic_set(&device->dev_stats_ccnt, 0);
1228 	extent_io_tree_release(&device->alloc_state);
1229 
1230 	/*
1231 	 * Reset the flush error record. We might have a transient flush error
1232 	 * in this mount, and if so we aborted the current transaction and set
1233 	 * the fs to an error state, guaranteeing no super blocks can be further
1234 	 * committed. However that error might be transient and if we unmount the
1235 	 * filesystem and mount it again, we should allow the mount to succeed
1236 	 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1237 	 * filesystem again we still get flush errors, then we will again abort
1238 	 * any transaction and set the error state, guaranteeing no commits of
1239 	 * unsafe super blocks.
1240 	 */
1241 	device->last_flush_error = 0;
1242 
1243 	/* Verify the device is back in a pristine state  */
1244 	WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1245 	WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1246 	WARN_ON(!list_empty(&device->dev_alloc_list));
1247 	WARN_ON(!list_empty(&device->post_commit_list));
1248 }
1249 
close_fs_devices(struct btrfs_fs_devices * fs_devices)1250 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1251 {
1252 	struct btrfs_device *device, *tmp;
1253 
1254 	lockdep_assert_held(&uuid_mutex);
1255 
1256 	if (--fs_devices->opened > 0)
1257 		return;
1258 
1259 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1260 		btrfs_close_one_device(device);
1261 
1262 	WARN_ON(fs_devices->open_devices);
1263 	WARN_ON(fs_devices->rw_devices);
1264 	fs_devices->opened = 0;
1265 	fs_devices->seeding = false;
1266 	fs_devices->fs_info = NULL;
1267 }
1268 
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)1269 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1270 {
1271 	LIST_HEAD(list);
1272 	struct btrfs_fs_devices *tmp;
1273 
1274 	mutex_lock(&uuid_mutex);
1275 	close_fs_devices(fs_devices);
1276 	if (!fs_devices->opened) {
1277 		list_splice_init(&fs_devices->seed_list, &list);
1278 
1279 		/*
1280 		 * If the struct btrfs_fs_devices is not assembled with any
1281 		 * other device, it can be re-initialized during the next mount
1282 		 * without the needing device-scan step. Therefore, it can be
1283 		 * fully freed.
1284 		 */
1285 		if (fs_devices->num_devices == 1) {
1286 			list_del(&fs_devices->fs_list);
1287 			free_fs_devices(fs_devices);
1288 		}
1289 	}
1290 
1291 
1292 	list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1293 		close_fs_devices(fs_devices);
1294 		list_del(&fs_devices->seed_list);
1295 		free_fs_devices(fs_devices);
1296 	}
1297 	mutex_unlock(&uuid_mutex);
1298 }
1299 
open_fs_devices(struct btrfs_fs_devices * fs_devices,blk_mode_t flags,void * holder)1300 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1301 				blk_mode_t flags, void *holder)
1302 {
1303 	struct btrfs_device *device;
1304 	struct btrfs_device *latest_dev = NULL;
1305 	struct btrfs_device *tmp_device;
1306 	s64 __maybe_unused value = 0;
1307 	int ret = 0;
1308 
1309 	list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1310 				 dev_list) {
1311 		int ret2;
1312 
1313 		ret2 = btrfs_open_one_device(fs_devices, device, flags, holder);
1314 		if (ret2 == 0 &&
1315 		    (!latest_dev || device->generation > latest_dev->generation)) {
1316 			latest_dev = device;
1317 		} else if (ret2 == -ENODATA) {
1318 			fs_devices->num_devices--;
1319 			list_del(&device->dev_list);
1320 			btrfs_free_device(device);
1321 		}
1322 		if (ret == 0 && ret2 != 0)
1323 			ret = ret2;
1324 	}
1325 
1326 	if (fs_devices->open_devices == 0) {
1327 		if (ret)
1328 			return ret;
1329 		return -EINVAL;
1330 	}
1331 
1332 	fs_devices->opened = 1;
1333 	fs_devices->latest_dev = latest_dev;
1334 	fs_devices->total_rw_bytes = 0;
1335 	fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1336 #ifdef CONFIG_BTRFS_EXPERIMENTAL
1337 	fs_devices->rr_min_contig_read = BTRFS_DEFAULT_RR_MIN_CONTIG_READ;
1338 	fs_devices->read_devid = latest_dev->devid;
1339 	fs_devices->read_policy = btrfs_read_policy_to_enum(btrfs_get_mod_read_policy(),
1340 							    &value);
1341 	if (fs_devices->read_policy == BTRFS_READ_POLICY_RR)
1342 		fs_devices->collect_fs_stats = true;
1343 
1344 	if (value) {
1345 		if (fs_devices->read_policy == BTRFS_READ_POLICY_RR)
1346 			fs_devices->rr_min_contig_read = value;
1347 		if (fs_devices->read_policy == BTRFS_READ_POLICY_DEVID)
1348 			fs_devices->read_devid = value;
1349 	}
1350 #else
1351 	fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1352 #endif
1353 
1354 	return 0;
1355 }
1356 
devid_cmp(void * priv,const struct list_head * a,const struct list_head * b)1357 static int devid_cmp(void *priv, const struct list_head *a,
1358 		     const struct list_head *b)
1359 {
1360 	const struct btrfs_device *dev1, *dev2;
1361 
1362 	dev1 = list_entry(a, struct btrfs_device, dev_list);
1363 	dev2 = list_entry(b, struct btrfs_device, dev_list);
1364 
1365 	if (dev1->devid < dev2->devid)
1366 		return -1;
1367 	else if (dev1->devid > dev2->devid)
1368 		return 1;
1369 	return 0;
1370 }
1371 
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,blk_mode_t flags,void * holder)1372 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1373 		       blk_mode_t flags, void *holder)
1374 {
1375 	int ret;
1376 
1377 	lockdep_assert_held(&uuid_mutex);
1378 	/*
1379 	 * The device_list_mutex cannot be taken here in case opening the
1380 	 * underlying device takes further locks like open_mutex.
1381 	 *
1382 	 * We also don't need the lock here as this is called during mount and
1383 	 * exclusion is provided by uuid_mutex
1384 	 */
1385 
1386 	if (fs_devices->opened) {
1387 		fs_devices->opened++;
1388 		ret = 0;
1389 	} else {
1390 		list_sort(NULL, &fs_devices->devices, devid_cmp);
1391 		ret = open_fs_devices(fs_devices, flags, holder);
1392 	}
1393 
1394 	return ret;
1395 }
1396 
btrfs_release_disk_super(struct btrfs_super_block * super)1397 void btrfs_release_disk_super(struct btrfs_super_block *super)
1398 {
1399 	struct page *page = virt_to_page(super);
1400 
1401 	put_page(page);
1402 }
1403 
btrfs_read_disk_super(struct block_device * bdev,u64 bytenr,u64 bytenr_orig)1404 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1405 						       u64 bytenr, u64 bytenr_orig)
1406 {
1407 	struct btrfs_super_block *disk_super;
1408 	struct page *page;
1409 	void *p;
1410 	pgoff_t index;
1411 
1412 	/* make sure our super fits in the device */
1413 	if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev))
1414 		return ERR_PTR(-EINVAL);
1415 
1416 	/* make sure our super fits in the page */
1417 	if (sizeof(*disk_super) > PAGE_SIZE)
1418 		return ERR_PTR(-EINVAL);
1419 
1420 	/* make sure our super doesn't straddle pages on disk */
1421 	index = bytenr >> PAGE_SHIFT;
1422 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1423 		return ERR_PTR(-EINVAL);
1424 
1425 	/* pull in the page with our super */
1426 	page = read_cache_page_gfp(bdev->bd_mapping, index, GFP_KERNEL);
1427 
1428 	if (IS_ERR(page))
1429 		return ERR_CAST(page);
1430 
1431 	p = page_address(page);
1432 
1433 	/* align our pointer to the offset of the super block */
1434 	disk_super = p + offset_in_page(bytenr);
1435 
1436 	if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1437 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1438 		btrfs_release_disk_super(p);
1439 		return ERR_PTR(-EINVAL);
1440 	}
1441 
1442 	if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1443 		disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1444 
1445 	return disk_super;
1446 }
1447 
btrfs_forget_devices(dev_t devt)1448 int btrfs_forget_devices(dev_t devt)
1449 {
1450 	int ret;
1451 
1452 	mutex_lock(&uuid_mutex);
1453 	ret = btrfs_free_stale_devices(devt, NULL);
1454 	mutex_unlock(&uuid_mutex);
1455 
1456 	return ret;
1457 }
1458 
btrfs_skip_registration(struct btrfs_super_block * disk_super,const char * path,dev_t devt,bool mount_arg_dev)1459 static bool btrfs_skip_registration(struct btrfs_super_block *disk_super,
1460 				    const char *path, dev_t devt,
1461 				    bool mount_arg_dev)
1462 {
1463 	struct btrfs_fs_devices *fs_devices;
1464 
1465 	/*
1466 	 * Do not skip device registration for mounted devices with matching
1467 	 * maj:min but different paths. Booting without initrd relies on
1468 	 * /dev/root initially, later replaced with the actual root device.
1469 	 * A successful scan ensures grub2-probe selects the correct device.
1470 	 */
1471 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
1472 		struct btrfs_device *device;
1473 
1474 		mutex_lock(&fs_devices->device_list_mutex);
1475 
1476 		if (!fs_devices->opened) {
1477 			mutex_unlock(&fs_devices->device_list_mutex);
1478 			continue;
1479 		}
1480 
1481 		list_for_each_entry(device, &fs_devices->devices, dev_list) {
1482 			if (device->bdev && (device->bdev->bd_dev == devt) &&
1483 			    strcmp(device->name->str, path) != 0) {
1484 				mutex_unlock(&fs_devices->device_list_mutex);
1485 
1486 				/* Do not skip registration. */
1487 				return false;
1488 			}
1489 		}
1490 		mutex_unlock(&fs_devices->device_list_mutex);
1491 	}
1492 
1493 	if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 &&
1494 	    !(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING))
1495 		return true;
1496 
1497 	return false;
1498 }
1499 
1500 /*
1501  * Look for a btrfs signature on a device. This may be called out of the mount path
1502  * and we are not allowed to call set_blocksize during the scan. The superblock
1503  * is read via pagecache.
1504  *
1505  * With @mount_arg_dev it's a scan during mount time that will always register
1506  * the device or return an error. Multi-device and seeding devices are registered
1507  * in both cases.
1508  */
btrfs_scan_one_device(const char * path,blk_mode_t flags,bool mount_arg_dev)1509 struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
1510 					   bool mount_arg_dev)
1511 {
1512 	struct btrfs_super_block *disk_super;
1513 	bool new_device_added = false;
1514 	struct btrfs_device *device = NULL;
1515 	struct file *bdev_file;
1516 	char *canonical_path = NULL;
1517 	u64 bytenr;
1518 	dev_t devt;
1519 	int ret;
1520 
1521 	lockdep_assert_held(&uuid_mutex);
1522 
1523 	if (!is_good_dev_path(path)) {
1524 		canonical_path = kmalloc(PATH_MAX, GFP_KERNEL);
1525 		if (canonical_path) {
1526 			ret = get_canonical_dev_path(path, canonical_path);
1527 			if (ret < 0) {
1528 				kfree(canonical_path);
1529 				canonical_path = NULL;
1530 			}
1531 		}
1532 	}
1533 	/*
1534 	 * Avoid an exclusive open here, as the systemd-udev may initiate the
1535 	 * device scan which may race with the user's mount or mkfs command,
1536 	 * resulting in failure.
1537 	 * Since the device scan is solely for reading purposes, there is no
1538 	 * need for an exclusive open. Additionally, the devices are read again
1539 	 * during the mount process. It is ok to get some inconsistent
1540 	 * values temporarily, as the device paths of the fsid are the only
1541 	 * required information for assembling the volume.
1542 	 */
1543 	bdev_file = bdev_file_open_by_path(path, flags, NULL, NULL);
1544 	if (IS_ERR(bdev_file))
1545 		return ERR_CAST(bdev_file);
1546 
1547 	/*
1548 	 * We would like to check all the super blocks, but doing so would
1549 	 * allow a mount to succeed after a mkfs from a different filesystem.
1550 	 * Currently, recovery from a bad primary btrfs superblock is done
1551 	 * using the userspace command 'btrfs check --super'.
1552 	 */
1553 	ret = btrfs_sb_log_location_bdev(file_bdev(bdev_file), 0, READ, &bytenr);
1554 	if (ret) {
1555 		device = ERR_PTR(ret);
1556 		goto error_bdev_put;
1557 	}
1558 
1559 	disk_super = btrfs_read_disk_super(file_bdev(bdev_file), bytenr,
1560 					   btrfs_sb_offset(0));
1561 	if (IS_ERR(disk_super)) {
1562 		device = ERR_CAST(disk_super);
1563 		goto error_bdev_put;
1564 	}
1565 
1566 	devt = file_bdev(bdev_file)->bd_dev;
1567 	if (btrfs_skip_registration(disk_super, path, devt, mount_arg_dev)) {
1568 		pr_debug("BTRFS: skip registering single non-seed device %s (%d:%d)\n",
1569 			  path, MAJOR(devt), MINOR(devt));
1570 
1571 		btrfs_free_stale_devices(devt, NULL);
1572 
1573 		device = NULL;
1574 		goto free_disk_super;
1575 	}
1576 
1577 	device = device_list_add(canonical_path ? : path, disk_super,
1578 				 &new_device_added);
1579 	if (!IS_ERR(device) && new_device_added)
1580 		btrfs_free_stale_devices(device->devt, device);
1581 
1582 free_disk_super:
1583 	btrfs_release_disk_super(disk_super);
1584 
1585 error_bdev_put:
1586 	fput(bdev_file);
1587 	kfree(canonical_path);
1588 
1589 	return device;
1590 }
1591 
1592 /*
1593  * Try to find a chunk that intersects [start, start + len] range and when one
1594  * such is found, record the end of it in *start
1595  */
contains_pending_extent(struct btrfs_device * device,u64 * start,u64 len)1596 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1597 				    u64 len)
1598 {
1599 	u64 physical_start, physical_end;
1600 
1601 	lockdep_assert_held(&device->fs_info->chunk_mutex);
1602 
1603 	if (find_first_extent_bit(&device->alloc_state, *start,
1604 				  &physical_start, &physical_end,
1605 				  CHUNK_ALLOCATED, NULL)) {
1606 
1607 		if (in_range(physical_start, *start, len) ||
1608 		    in_range(*start, physical_start,
1609 			     physical_end + 1 - physical_start)) {
1610 			*start = physical_end + 1;
1611 			return true;
1612 		}
1613 	}
1614 	return false;
1615 }
1616 
dev_extent_search_start(struct btrfs_device * device)1617 static u64 dev_extent_search_start(struct btrfs_device *device)
1618 {
1619 	switch (device->fs_devices->chunk_alloc_policy) {
1620 	case BTRFS_CHUNK_ALLOC_REGULAR:
1621 		return BTRFS_DEVICE_RANGE_RESERVED;
1622 	case BTRFS_CHUNK_ALLOC_ZONED:
1623 		/*
1624 		 * We don't care about the starting region like regular
1625 		 * allocator, because we anyway use/reserve the first two zones
1626 		 * for superblock logging.
1627 		 */
1628 		return 0;
1629 	default:
1630 		BUG();
1631 	}
1632 }
1633 
dev_extent_hole_check_zoned(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1634 static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1635 					u64 *hole_start, u64 *hole_size,
1636 					u64 num_bytes)
1637 {
1638 	u64 zone_size = device->zone_info->zone_size;
1639 	u64 pos;
1640 	int ret;
1641 	bool changed = false;
1642 
1643 	ASSERT(IS_ALIGNED(*hole_start, zone_size));
1644 
1645 	while (*hole_size > 0) {
1646 		pos = btrfs_find_allocatable_zones(device, *hole_start,
1647 						   *hole_start + *hole_size,
1648 						   num_bytes);
1649 		if (pos != *hole_start) {
1650 			*hole_size = *hole_start + *hole_size - pos;
1651 			*hole_start = pos;
1652 			changed = true;
1653 			if (*hole_size < num_bytes)
1654 				break;
1655 		}
1656 
1657 		ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1658 
1659 		/* Range is ensured to be empty */
1660 		if (!ret)
1661 			return changed;
1662 
1663 		/* Given hole range was invalid (outside of device) */
1664 		if (ret == -ERANGE) {
1665 			*hole_start += *hole_size;
1666 			*hole_size = 0;
1667 			return true;
1668 		}
1669 
1670 		*hole_start += zone_size;
1671 		*hole_size -= zone_size;
1672 		changed = true;
1673 	}
1674 
1675 	return changed;
1676 }
1677 
1678 /*
1679  * Check if specified hole is suitable for allocation.
1680  *
1681  * @device:	the device which we have the hole
1682  * @hole_start: starting position of the hole
1683  * @hole_size:	the size of the hole
1684  * @num_bytes:	the size of the free space that we need
1685  *
1686  * This function may modify @hole_start and @hole_size to reflect the suitable
1687  * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1688  */
dev_extent_hole_check(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1689 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1690 				  u64 *hole_size, u64 num_bytes)
1691 {
1692 	bool changed = false;
1693 	u64 hole_end = *hole_start + *hole_size;
1694 
1695 	for (;;) {
1696 		/*
1697 		 * Check before we set max_hole_start, otherwise we could end up
1698 		 * sending back this offset anyway.
1699 		 */
1700 		if (contains_pending_extent(device, hole_start, *hole_size)) {
1701 			if (hole_end >= *hole_start)
1702 				*hole_size = hole_end - *hole_start;
1703 			else
1704 				*hole_size = 0;
1705 			changed = true;
1706 		}
1707 
1708 		switch (device->fs_devices->chunk_alloc_policy) {
1709 		case BTRFS_CHUNK_ALLOC_REGULAR:
1710 			/* No extra check */
1711 			break;
1712 		case BTRFS_CHUNK_ALLOC_ZONED:
1713 			if (dev_extent_hole_check_zoned(device, hole_start,
1714 							hole_size, num_bytes)) {
1715 				changed = true;
1716 				/*
1717 				 * The changed hole can contain pending extent.
1718 				 * Loop again to check that.
1719 				 */
1720 				continue;
1721 			}
1722 			break;
1723 		default:
1724 			BUG();
1725 		}
1726 
1727 		break;
1728 	}
1729 
1730 	return changed;
1731 }
1732 
1733 /*
1734  * Find free space in the specified device.
1735  *
1736  * @device:	  the device which we search the free space in
1737  * @num_bytes:	  the size of the free space that we need
1738  * @search_start: the position from which to begin the search
1739  * @start:	  store the start of the free space.
1740  * @len:	  the size of the free space. that we find, or the size
1741  *		  of the max free space if we don't find suitable free space
1742  *
1743  * This does a pretty simple search, the expectation is that it is called very
1744  * infrequently and that a given device has a small number of extents.
1745  *
1746  * @start is used to store the start of the free space if we find. But if we
1747  * don't find suitable free space, it will be used to store the start position
1748  * of the max free space.
1749  *
1750  * @len is used to store the size of the free space that we find.
1751  * But if we don't find suitable free space, it is used to store the size of
1752  * the max free space.
1753  *
1754  * NOTE: This function will search *commit* root of device tree, and does extra
1755  * check to ensure dev extents are not double allocated.
1756  * This makes the function safe to allocate dev extents but may not report
1757  * correct usable device space, as device extent freed in current transaction
1758  * is not reported as available.
1759  */
find_free_dev_extent(struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1760 static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1761 				u64 *start, u64 *len)
1762 {
1763 	struct btrfs_fs_info *fs_info = device->fs_info;
1764 	struct btrfs_root *root = fs_info->dev_root;
1765 	struct btrfs_key key;
1766 	struct btrfs_dev_extent *dev_extent;
1767 	struct btrfs_path *path;
1768 	u64 search_start;
1769 	u64 hole_size;
1770 	u64 max_hole_start;
1771 	u64 max_hole_size = 0;
1772 	u64 extent_end;
1773 	u64 search_end = device->total_bytes;
1774 	int ret;
1775 	int slot;
1776 	struct extent_buffer *l;
1777 
1778 	search_start = dev_extent_search_start(device);
1779 	max_hole_start = search_start;
1780 
1781 	WARN_ON(device->zone_info &&
1782 		!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1783 
1784 	path = btrfs_alloc_path();
1785 	if (!path) {
1786 		ret = -ENOMEM;
1787 		goto out;
1788 	}
1789 again:
1790 	if (search_start >= search_end ||
1791 		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1792 		ret = -ENOSPC;
1793 		goto out;
1794 	}
1795 
1796 	path->reada = READA_FORWARD;
1797 	path->search_commit_root = 1;
1798 	path->skip_locking = 1;
1799 
1800 	key.objectid = device->devid;
1801 	key.offset = search_start;
1802 	key.type = BTRFS_DEV_EXTENT_KEY;
1803 
1804 	ret = btrfs_search_backwards(root, &key, path);
1805 	if (ret < 0)
1806 		goto out;
1807 
1808 	while (search_start < search_end) {
1809 		l = path->nodes[0];
1810 		slot = path->slots[0];
1811 		if (slot >= btrfs_header_nritems(l)) {
1812 			ret = btrfs_next_leaf(root, path);
1813 			if (ret == 0)
1814 				continue;
1815 			if (ret < 0)
1816 				goto out;
1817 
1818 			break;
1819 		}
1820 		btrfs_item_key_to_cpu(l, &key, slot);
1821 
1822 		if (key.objectid < device->devid)
1823 			goto next;
1824 
1825 		if (key.objectid > device->devid)
1826 			break;
1827 
1828 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1829 			goto next;
1830 
1831 		if (key.offset > search_end)
1832 			break;
1833 
1834 		if (key.offset > search_start) {
1835 			hole_size = key.offset - search_start;
1836 			dev_extent_hole_check(device, &search_start, &hole_size,
1837 					      num_bytes);
1838 
1839 			if (hole_size > max_hole_size) {
1840 				max_hole_start = search_start;
1841 				max_hole_size = hole_size;
1842 			}
1843 
1844 			/*
1845 			 * If this free space is greater than which we need,
1846 			 * it must be the max free space that we have found
1847 			 * until now, so max_hole_start must point to the start
1848 			 * of this free space and the length of this free space
1849 			 * is stored in max_hole_size. Thus, we return
1850 			 * max_hole_start and max_hole_size and go back to the
1851 			 * caller.
1852 			 */
1853 			if (hole_size >= num_bytes) {
1854 				ret = 0;
1855 				goto out;
1856 			}
1857 		}
1858 
1859 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1860 		extent_end = key.offset + btrfs_dev_extent_length(l,
1861 								  dev_extent);
1862 		if (extent_end > search_start)
1863 			search_start = extent_end;
1864 next:
1865 		path->slots[0]++;
1866 		cond_resched();
1867 	}
1868 
1869 	/*
1870 	 * At this point, search_start should be the end of
1871 	 * allocated dev extents, and when shrinking the device,
1872 	 * search_end may be smaller than search_start.
1873 	 */
1874 	if (search_end > search_start) {
1875 		hole_size = search_end - search_start;
1876 		if (dev_extent_hole_check(device, &search_start, &hole_size,
1877 					  num_bytes)) {
1878 			btrfs_release_path(path);
1879 			goto again;
1880 		}
1881 
1882 		if (hole_size > max_hole_size) {
1883 			max_hole_start = search_start;
1884 			max_hole_size = hole_size;
1885 		}
1886 	}
1887 
1888 	/* See above. */
1889 	if (max_hole_size < num_bytes)
1890 		ret = -ENOSPC;
1891 	else
1892 		ret = 0;
1893 
1894 	ASSERT(max_hole_start + max_hole_size <= search_end);
1895 out:
1896 	btrfs_free_path(path);
1897 	*start = max_hole_start;
1898 	if (len)
1899 		*len = max_hole_size;
1900 	return ret;
1901 }
1902 
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1903 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1904 			  struct btrfs_device *device,
1905 			  u64 start, u64 *dev_extent_len)
1906 {
1907 	struct btrfs_fs_info *fs_info = device->fs_info;
1908 	struct btrfs_root *root = fs_info->dev_root;
1909 	int ret;
1910 	struct btrfs_path *path;
1911 	struct btrfs_key key;
1912 	struct btrfs_key found_key;
1913 	struct extent_buffer *leaf = NULL;
1914 	struct btrfs_dev_extent *extent = NULL;
1915 
1916 	path = btrfs_alloc_path();
1917 	if (!path)
1918 		return -ENOMEM;
1919 
1920 	key.objectid = device->devid;
1921 	key.offset = start;
1922 	key.type = BTRFS_DEV_EXTENT_KEY;
1923 again:
1924 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1925 	if (ret > 0) {
1926 		ret = btrfs_previous_item(root, path, key.objectid,
1927 					  BTRFS_DEV_EXTENT_KEY);
1928 		if (ret)
1929 			goto out;
1930 		leaf = path->nodes[0];
1931 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1932 		extent = btrfs_item_ptr(leaf, path->slots[0],
1933 					struct btrfs_dev_extent);
1934 		BUG_ON(found_key.offset > start || found_key.offset +
1935 		       btrfs_dev_extent_length(leaf, extent) < start);
1936 		key = found_key;
1937 		btrfs_release_path(path);
1938 		goto again;
1939 	} else if (ret == 0) {
1940 		leaf = path->nodes[0];
1941 		extent = btrfs_item_ptr(leaf, path->slots[0],
1942 					struct btrfs_dev_extent);
1943 	} else {
1944 		goto out;
1945 	}
1946 
1947 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1948 
1949 	ret = btrfs_del_item(trans, root, path);
1950 	if (ret == 0)
1951 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1952 out:
1953 	btrfs_free_path(path);
1954 	return ret;
1955 }
1956 
find_next_chunk(struct btrfs_fs_info * fs_info)1957 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1958 {
1959 	struct rb_node *n;
1960 	u64 ret = 0;
1961 
1962 	read_lock(&fs_info->mapping_tree_lock);
1963 	n = rb_last(&fs_info->mapping_tree.rb_root);
1964 	if (n) {
1965 		struct btrfs_chunk_map *map;
1966 
1967 		map = rb_entry(n, struct btrfs_chunk_map, rb_node);
1968 		ret = map->start + map->chunk_len;
1969 	}
1970 	read_unlock(&fs_info->mapping_tree_lock);
1971 
1972 	return ret;
1973 }
1974 
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1975 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1976 				    u64 *devid_ret)
1977 {
1978 	int ret;
1979 	struct btrfs_key key;
1980 	struct btrfs_key found_key;
1981 	struct btrfs_path *path;
1982 
1983 	path = btrfs_alloc_path();
1984 	if (!path)
1985 		return -ENOMEM;
1986 
1987 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1988 	key.type = BTRFS_DEV_ITEM_KEY;
1989 	key.offset = (u64)-1;
1990 
1991 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1992 	if (ret < 0)
1993 		goto error;
1994 
1995 	if (ret == 0) {
1996 		/* Corruption */
1997 		btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1998 		ret = -EUCLEAN;
1999 		goto error;
2000 	}
2001 
2002 	ret = btrfs_previous_item(fs_info->chunk_root, path,
2003 				  BTRFS_DEV_ITEMS_OBJECTID,
2004 				  BTRFS_DEV_ITEM_KEY);
2005 	if (ret) {
2006 		*devid_ret = 1;
2007 	} else {
2008 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2009 				      path->slots[0]);
2010 		*devid_ret = found_key.offset + 1;
2011 	}
2012 	ret = 0;
2013 error:
2014 	btrfs_free_path(path);
2015 	return ret;
2016 }
2017 
2018 /*
2019  * the device information is stored in the chunk root
2020  * the btrfs_device struct should be fully filled in
2021  */
btrfs_add_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)2022 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
2023 			    struct btrfs_device *device)
2024 {
2025 	int ret;
2026 	struct btrfs_path *path;
2027 	struct btrfs_dev_item *dev_item;
2028 	struct extent_buffer *leaf;
2029 	struct btrfs_key key;
2030 	unsigned long ptr;
2031 
2032 	path = btrfs_alloc_path();
2033 	if (!path)
2034 		return -ENOMEM;
2035 
2036 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2037 	key.type = BTRFS_DEV_ITEM_KEY;
2038 	key.offset = device->devid;
2039 
2040 	btrfs_reserve_chunk_metadata(trans, true);
2041 	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
2042 				      &key, sizeof(*dev_item));
2043 	btrfs_trans_release_chunk_metadata(trans);
2044 	if (ret)
2045 		goto out;
2046 
2047 	leaf = path->nodes[0];
2048 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2049 
2050 	btrfs_set_device_id(leaf, dev_item, device->devid);
2051 	btrfs_set_device_generation(leaf, dev_item, 0);
2052 	btrfs_set_device_type(leaf, dev_item, device->type);
2053 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2054 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2055 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2056 	btrfs_set_device_total_bytes(leaf, dev_item,
2057 				     btrfs_device_get_disk_total_bytes(device));
2058 	btrfs_set_device_bytes_used(leaf, dev_item,
2059 				    btrfs_device_get_bytes_used(device));
2060 	btrfs_set_device_group(leaf, dev_item, 0);
2061 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
2062 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
2063 	btrfs_set_device_start_offset(leaf, dev_item, 0);
2064 
2065 	ptr = btrfs_device_uuid(dev_item);
2066 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2067 	ptr = btrfs_device_fsid(dev_item);
2068 	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
2069 			    ptr, BTRFS_FSID_SIZE);
2070 
2071 	ret = 0;
2072 out:
2073 	btrfs_free_path(path);
2074 	return ret;
2075 }
2076 
2077 /*
2078  * Function to update ctime/mtime for a given device path.
2079  * Mainly used for ctime/mtime based probe like libblkid.
2080  *
2081  * We don't care about errors here, this is just to be kind to userspace.
2082  */
update_dev_time(const char * device_path)2083 static void update_dev_time(const char *device_path)
2084 {
2085 	struct path path;
2086 	int ret;
2087 
2088 	ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
2089 	if (ret)
2090 		return;
2091 
2092 	inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION);
2093 	path_put(&path);
2094 }
2095 
btrfs_rm_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)2096 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
2097 			     struct btrfs_device *device)
2098 {
2099 	struct btrfs_root *root = device->fs_info->chunk_root;
2100 	int ret;
2101 	struct btrfs_path *path;
2102 	struct btrfs_key key;
2103 
2104 	path = btrfs_alloc_path();
2105 	if (!path)
2106 		return -ENOMEM;
2107 
2108 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2109 	key.type = BTRFS_DEV_ITEM_KEY;
2110 	key.offset = device->devid;
2111 
2112 	btrfs_reserve_chunk_metadata(trans, false);
2113 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2114 	btrfs_trans_release_chunk_metadata(trans);
2115 	if (ret) {
2116 		if (ret > 0)
2117 			ret = -ENOENT;
2118 		goto out;
2119 	}
2120 
2121 	ret = btrfs_del_item(trans, root, path);
2122 out:
2123 	btrfs_free_path(path);
2124 	return ret;
2125 }
2126 
2127 /*
2128  * Verify that @num_devices satisfies the RAID profile constraints in the whole
2129  * filesystem. It's up to the caller to adjust that number regarding eg. device
2130  * replace.
2131  */
btrfs_check_raid_min_devices(struct btrfs_fs_info * fs_info,u64 num_devices)2132 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
2133 		u64 num_devices)
2134 {
2135 	u64 all_avail;
2136 	unsigned seq;
2137 	int i;
2138 
2139 	do {
2140 		seq = read_seqbegin(&fs_info->profiles_lock);
2141 
2142 		all_avail = fs_info->avail_data_alloc_bits |
2143 			    fs_info->avail_system_alloc_bits |
2144 			    fs_info->avail_metadata_alloc_bits;
2145 	} while (read_seqretry(&fs_info->profiles_lock, seq));
2146 
2147 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2148 		if (!(all_avail & btrfs_raid_array[i].bg_flag))
2149 			continue;
2150 
2151 		if (num_devices < btrfs_raid_array[i].devs_min)
2152 			return btrfs_raid_array[i].mindev_error;
2153 	}
2154 
2155 	return 0;
2156 }
2157 
btrfs_find_next_active_device(struct btrfs_fs_devices * fs_devs,struct btrfs_device * device)2158 static struct btrfs_device * btrfs_find_next_active_device(
2159 		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2160 {
2161 	struct btrfs_device *next_device;
2162 
2163 	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2164 		if (next_device != device &&
2165 		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2166 		    && next_device->bdev)
2167 			return next_device;
2168 	}
2169 
2170 	return NULL;
2171 }
2172 
2173 /*
2174  * Helper function to check if the given device is part of s_bdev / latest_dev
2175  * and replace it with the provided or the next active device, in the context
2176  * where this function called, there should be always be another device (or
2177  * this_dev) which is active.
2178  */
btrfs_assign_next_active_device(struct btrfs_device * device,struct btrfs_device * next_device)2179 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2180 					    struct btrfs_device *next_device)
2181 {
2182 	struct btrfs_fs_info *fs_info = device->fs_info;
2183 
2184 	if (!next_device)
2185 		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2186 							    device);
2187 	ASSERT(next_device);
2188 
2189 	if (fs_info->sb->s_bdev &&
2190 			(fs_info->sb->s_bdev == device->bdev))
2191 		fs_info->sb->s_bdev = next_device->bdev;
2192 
2193 	if (fs_info->fs_devices->latest_dev->bdev == device->bdev)
2194 		fs_info->fs_devices->latest_dev = next_device;
2195 }
2196 
2197 /*
2198  * Return btrfs_fs_devices::num_devices excluding the device that's being
2199  * currently replaced.
2200  */
btrfs_num_devices(struct btrfs_fs_info * fs_info)2201 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2202 {
2203 	u64 num_devices = fs_info->fs_devices->num_devices;
2204 
2205 	down_read(&fs_info->dev_replace.rwsem);
2206 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2207 		ASSERT(num_devices > 1);
2208 		num_devices--;
2209 	}
2210 	up_read(&fs_info->dev_replace.rwsem);
2211 
2212 	return num_devices;
2213 }
2214 
btrfs_scratch_superblock(struct btrfs_fs_info * fs_info,struct block_device * bdev,int copy_num)2215 static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info,
2216 				     struct block_device *bdev, int copy_num)
2217 {
2218 	struct btrfs_super_block *disk_super;
2219 	const size_t len = sizeof(disk_super->magic);
2220 	const u64 bytenr = btrfs_sb_offset(copy_num);
2221 	int ret;
2222 
2223 	disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr);
2224 	if (IS_ERR(disk_super))
2225 		return;
2226 
2227 	memset(&disk_super->magic, 0, len);
2228 	folio_mark_dirty(virt_to_folio(disk_super));
2229 	btrfs_release_disk_super(disk_super);
2230 
2231 	ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1);
2232 	if (ret)
2233 		btrfs_warn(fs_info, "error clearing superblock number %d (%d)",
2234 			copy_num, ret);
2235 }
2236 
btrfs_scratch_superblocks(struct btrfs_fs_info * fs_info,struct btrfs_device * device)2237 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_device *device)
2238 {
2239 	int copy_num;
2240 	struct block_device *bdev = device->bdev;
2241 
2242 	if (!bdev)
2243 		return;
2244 
2245 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2246 		if (bdev_is_zoned(bdev))
2247 			btrfs_reset_sb_log_zones(bdev, copy_num);
2248 		else
2249 			btrfs_scratch_superblock(fs_info, bdev, copy_num);
2250 	}
2251 
2252 	/* Notify udev that device has changed */
2253 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2254 
2255 	/* Update ctime/mtime for device path for libblkid */
2256 	update_dev_time(device->name->str);
2257 }
2258 
btrfs_rm_device(struct btrfs_fs_info * fs_info,struct btrfs_dev_lookup_args * args,struct file ** bdev_file)2259 int btrfs_rm_device(struct btrfs_fs_info *fs_info,
2260 		    struct btrfs_dev_lookup_args *args,
2261 		    struct file **bdev_file)
2262 {
2263 	struct btrfs_trans_handle *trans;
2264 	struct btrfs_device *device;
2265 	struct btrfs_fs_devices *cur_devices;
2266 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2267 	u64 num_devices;
2268 	int ret = 0;
2269 
2270 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
2271 		btrfs_err(fs_info, "device remove not supported on extent tree v2 yet");
2272 		return -EINVAL;
2273 	}
2274 
2275 	/*
2276 	 * The device list in fs_devices is accessed without locks (neither
2277 	 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2278 	 * filesystem and another device rm cannot run.
2279 	 */
2280 	num_devices = btrfs_num_devices(fs_info);
2281 
2282 	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2283 	if (ret)
2284 		return ret;
2285 
2286 	device = btrfs_find_device(fs_info->fs_devices, args);
2287 	if (!device) {
2288 		if (args->missing)
2289 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2290 		else
2291 			ret = -ENOENT;
2292 		return ret;
2293 	}
2294 
2295 	if (btrfs_pinned_by_swapfile(fs_info, device)) {
2296 		btrfs_warn_in_rcu(fs_info,
2297 		  "cannot remove device %s (devid %llu) due to active swapfile",
2298 				  btrfs_dev_name(device), device->devid);
2299 		return -ETXTBSY;
2300 	}
2301 
2302 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2303 		return BTRFS_ERROR_DEV_TGT_REPLACE;
2304 
2305 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2306 	    fs_info->fs_devices->rw_devices == 1)
2307 		return BTRFS_ERROR_DEV_ONLY_WRITABLE;
2308 
2309 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2310 		mutex_lock(&fs_info->chunk_mutex);
2311 		list_del_init(&device->dev_alloc_list);
2312 		device->fs_devices->rw_devices--;
2313 		mutex_unlock(&fs_info->chunk_mutex);
2314 	}
2315 
2316 	ret = btrfs_shrink_device(device, 0);
2317 	if (ret)
2318 		goto error_undo;
2319 
2320 	trans = btrfs_start_transaction(fs_info->chunk_root, 0);
2321 	if (IS_ERR(trans)) {
2322 		ret = PTR_ERR(trans);
2323 		goto error_undo;
2324 	}
2325 
2326 	ret = btrfs_rm_dev_item(trans, device);
2327 	if (ret) {
2328 		/* Any error in dev item removal is critical */
2329 		btrfs_crit(fs_info,
2330 			   "failed to remove device item for devid %llu: %d",
2331 			   device->devid, ret);
2332 		btrfs_abort_transaction(trans, ret);
2333 		btrfs_end_transaction(trans);
2334 		return ret;
2335 	}
2336 
2337 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2338 	btrfs_scrub_cancel_dev(device);
2339 
2340 	/*
2341 	 * the device list mutex makes sure that we don't change
2342 	 * the device list while someone else is writing out all
2343 	 * the device supers. Whoever is writing all supers, should
2344 	 * lock the device list mutex before getting the number of
2345 	 * devices in the super block (super_copy). Conversely,
2346 	 * whoever updates the number of devices in the super block
2347 	 * (super_copy) should hold the device list mutex.
2348 	 */
2349 
2350 	/*
2351 	 * In normal cases the cur_devices == fs_devices. But in case
2352 	 * of deleting a seed device, the cur_devices should point to
2353 	 * its own fs_devices listed under the fs_devices->seed_list.
2354 	 */
2355 	cur_devices = device->fs_devices;
2356 	mutex_lock(&fs_devices->device_list_mutex);
2357 	list_del_rcu(&device->dev_list);
2358 
2359 	cur_devices->num_devices--;
2360 	cur_devices->total_devices--;
2361 	/* Update total_devices of the parent fs_devices if it's seed */
2362 	if (cur_devices != fs_devices)
2363 		fs_devices->total_devices--;
2364 
2365 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2366 		cur_devices->missing_devices--;
2367 
2368 	btrfs_assign_next_active_device(device, NULL);
2369 
2370 	if (device->bdev_file) {
2371 		cur_devices->open_devices--;
2372 		/* remove sysfs entry */
2373 		btrfs_sysfs_remove_device(device);
2374 	}
2375 
2376 	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2377 	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2378 	mutex_unlock(&fs_devices->device_list_mutex);
2379 
2380 	/*
2381 	 * At this point, the device is zero sized and detached from the
2382 	 * devices list.  All that's left is to zero out the old supers and
2383 	 * free the device.
2384 	 *
2385 	 * We cannot call btrfs_close_bdev() here because we're holding the sb
2386 	 * write lock, and fput() on the block device will pull in the
2387 	 * ->open_mutex on the block device and it's dependencies.  Instead
2388 	 *  just flush the device and let the caller do the final bdev_release.
2389 	 */
2390 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2391 		btrfs_scratch_superblocks(fs_info, device);
2392 		if (device->bdev) {
2393 			sync_blockdev(device->bdev);
2394 			invalidate_bdev(device->bdev);
2395 		}
2396 	}
2397 
2398 	*bdev_file = device->bdev_file;
2399 	synchronize_rcu();
2400 	btrfs_free_device(device);
2401 
2402 	/*
2403 	 * This can happen if cur_devices is the private seed devices list.  We
2404 	 * cannot call close_fs_devices() here because it expects the uuid_mutex
2405 	 * to be held, but in fact we don't need that for the private
2406 	 * seed_devices, we can simply decrement cur_devices->opened and then
2407 	 * remove it from our list and free the fs_devices.
2408 	 */
2409 	if (cur_devices->num_devices == 0) {
2410 		list_del_init(&cur_devices->seed_list);
2411 		ASSERT(cur_devices->opened == 1);
2412 		cur_devices->opened--;
2413 		free_fs_devices(cur_devices);
2414 	}
2415 
2416 	ret = btrfs_commit_transaction(trans);
2417 
2418 	return ret;
2419 
2420 error_undo:
2421 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2422 		mutex_lock(&fs_info->chunk_mutex);
2423 		list_add(&device->dev_alloc_list,
2424 			 &fs_devices->alloc_list);
2425 		device->fs_devices->rw_devices++;
2426 		mutex_unlock(&fs_info->chunk_mutex);
2427 	}
2428 	return ret;
2429 }
2430 
btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device * srcdev)2431 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2432 {
2433 	struct btrfs_fs_devices *fs_devices;
2434 
2435 	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2436 
2437 	/*
2438 	 * in case of fs with no seed, srcdev->fs_devices will point
2439 	 * to fs_devices of fs_info. However when the dev being replaced is
2440 	 * a seed dev it will point to the seed's local fs_devices. In short
2441 	 * srcdev will have its correct fs_devices in both the cases.
2442 	 */
2443 	fs_devices = srcdev->fs_devices;
2444 
2445 	list_del_rcu(&srcdev->dev_list);
2446 	list_del(&srcdev->dev_alloc_list);
2447 	fs_devices->num_devices--;
2448 	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2449 		fs_devices->missing_devices--;
2450 
2451 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2452 		fs_devices->rw_devices--;
2453 
2454 	if (srcdev->bdev)
2455 		fs_devices->open_devices--;
2456 }
2457 
btrfs_rm_dev_replace_free_srcdev(struct btrfs_device * srcdev)2458 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2459 {
2460 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2461 
2462 	mutex_lock(&uuid_mutex);
2463 
2464 	btrfs_close_bdev(srcdev);
2465 	synchronize_rcu();
2466 	btrfs_free_device(srcdev);
2467 
2468 	/* if this is no devs we rather delete the fs_devices */
2469 	if (!fs_devices->num_devices) {
2470 		/*
2471 		 * On a mounted FS, num_devices can't be zero unless it's a
2472 		 * seed. In case of a seed device being replaced, the replace
2473 		 * target added to the sprout FS, so there will be no more
2474 		 * device left under the seed FS.
2475 		 */
2476 		ASSERT(fs_devices->seeding);
2477 
2478 		list_del_init(&fs_devices->seed_list);
2479 		close_fs_devices(fs_devices);
2480 		free_fs_devices(fs_devices);
2481 	}
2482 	mutex_unlock(&uuid_mutex);
2483 }
2484 
btrfs_destroy_dev_replace_tgtdev(struct btrfs_device * tgtdev)2485 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2486 {
2487 	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2488 
2489 	mutex_lock(&fs_devices->device_list_mutex);
2490 
2491 	btrfs_sysfs_remove_device(tgtdev);
2492 
2493 	if (tgtdev->bdev)
2494 		fs_devices->open_devices--;
2495 
2496 	fs_devices->num_devices--;
2497 
2498 	btrfs_assign_next_active_device(tgtdev, NULL);
2499 
2500 	list_del_rcu(&tgtdev->dev_list);
2501 
2502 	mutex_unlock(&fs_devices->device_list_mutex);
2503 
2504 	btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev);
2505 
2506 	btrfs_close_bdev(tgtdev);
2507 	synchronize_rcu();
2508 	btrfs_free_device(tgtdev);
2509 }
2510 
2511 /*
2512  * Populate args from device at path.
2513  *
2514  * @fs_info:	the filesystem
2515  * @args:	the args to populate
2516  * @path:	the path to the device
2517  *
2518  * This will read the super block of the device at @path and populate @args with
2519  * the devid, fsid, and uuid.  This is meant to be used for ioctls that need to
2520  * lookup a device to operate on, but need to do it before we take any locks.
2521  * This properly handles the special case of "missing" that a user may pass in,
2522  * and does some basic sanity checks.  The caller must make sure that @path is
2523  * properly NUL terminated before calling in, and must call
2524  * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2525  * uuid buffers.
2526  *
2527  * Return: 0 for success, -errno for failure
2528  */
btrfs_get_dev_args_from_path(struct btrfs_fs_info * fs_info,struct btrfs_dev_lookup_args * args,const char * path)2529 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
2530 				 struct btrfs_dev_lookup_args *args,
2531 				 const char *path)
2532 {
2533 	struct btrfs_super_block *disk_super;
2534 	struct file *bdev_file;
2535 	int ret;
2536 
2537 	if (!path || !path[0])
2538 		return -EINVAL;
2539 	if (!strcmp(path, "missing")) {
2540 		args->missing = true;
2541 		return 0;
2542 	}
2543 
2544 	args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL);
2545 	args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL);
2546 	if (!args->uuid || !args->fsid) {
2547 		btrfs_put_dev_args_from_path(args);
2548 		return -ENOMEM;
2549 	}
2550 
2551 	ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0,
2552 				    &bdev_file, &disk_super);
2553 	if (ret) {
2554 		btrfs_put_dev_args_from_path(args);
2555 		return ret;
2556 	}
2557 
2558 	args->devid = btrfs_stack_device_id(&disk_super->dev_item);
2559 	memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
2560 	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2561 		memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE);
2562 	else
2563 		memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
2564 	btrfs_release_disk_super(disk_super);
2565 	fput(bdev_file);
2566 	return 0;
2567 }
2568 
2569 /*
2570  * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2571  * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2572  * that don't need to be freed.
2573  */
btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args * args)2574 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args)
2575 {
2576 	kfree(args->uuid);
2577 	kfree(args->fsid);
2578 	args->uuid = NULL;
2579 	args->fsid = NULL;
2580 }
2581 
btrfs_find_device_by_devspec(struct btrfs_fs_info * fs_info,u64 devid,const char * device_path)2582 struct btrfs_device *btrfs_find_device_by_devspec(
2583 		struct btrfs_fs_info *fs_info, u64 devid,
2584 		const char *device_path)
2585 {
2586 	BTRFS_DEV_LOOKUP_ARGS(args);
2587 	struct btrfs_device *device;
2588 	int ret;
2589 
2590 	if (devid) {
2591 		args.devid = devid;
2592 		device = btrfs_find_device(fs_info->fs_devices, &args);
2593 		if (!device)
2594 			return ERR_PTR(-ENOENT);
2595 		return device;
2596 	}
2597 
2598 	ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path);
2599 	if (ret)
2600 		return ERR_PTR(ret);
2601 	device = btrfs_find_device(fs_info->fs_devices, &args);
2602 	btrfs_put_dev_args_from_path(&args);
2603 	if (!device)
2604 		return ERR_PTR(-ENOENT);
2605 	return device;
2606 }
2607 
btrfs_init_sprout(struct btrfs_fs_info * fs_info)2608 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info)
2609 {
2610 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2611 	struct btrfs_fs_devices *old_devices;
2612 	struct btrfs_fs_devices *seed_devices;
2613 
2614 	lockdep_assert_held(&uuid_mutex);
2615 	if (!fs_devices->seeding)
2616 		return ERR_PTR(-EINVAL);
2617 
2618 	/*
2619 	 * Private copy of the seed devices, anchored at
2620 	 * fs_info->fs_devices->seed_list
2621 	 */
2622 	seed_devices = alloc_fs_devices(NULL);
2623 	if (IS_ERR(seed_devices))
2624 		return seed_devices;
2625 
2626 	/*
2627 	 * It's necessary to retain a copy of the original seed fs_devices in
2628 	 * fs_uuids so that filesystems which have been seeded can successfully
2629 	 * reference the seed device from open_seed_devices. This also supports
2630 	 * multiple fs seed.
2631 	 */
2632 	old_devices = clone_fs_devices(fs_devices);
2633 	if (IS_ERR(old_devices)) {
2634 		kfree(seed_devices);
2635 		return old_devices;
2636 	}
2637 
2638 	list_add(&old_devices->fs_list, &fs_uuids);
2639 
2640 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2641 	seed_devices->opened = 1;
2642 	INIT_LIST_HEAD(&seed_devices->devices);
2643 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2644 	mutex_init(&seed_devices->device_list_mutex);
2645 
2646 	return seed_devices;
2647 }
2648 
2649 /*
2650  * Splice seed devices into the sprout fs_devices.
2651  * Generate a new fsid for the sprouted read-write filesystem.
2652  */
btrfs_setup_sprout(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * seed_devices)2653 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info,
2654 			       struct btrfs_fs_devices *seed_devices)
2655 {
2656 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2657 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2658 	struct btrfs_device *device;
2659 	u64 super_flags;
2660 
2661 	/*
2662 	 * We are updating the fsid, the thread leading to device_list_add()
2663 	 * could race, so uuid_mutex is needed.
2664 	 */
2665 	lockdep_assert_held(&uuid_mutex);
2666 
2667 	/*
2668 	 * The threads listed below may traverse dev_list but can do that without
2669 	 * device_list_mutex:
2670 	 * - All device ops and balance - as we are in btrfs_exclop_start.
2671 	 * - Various dev_list readers - are using RCU.
2672 	 * - btrfs_ioctl_fitrim() - is using RCU.
2673 	 *
2674 	 * For-read threads as below are using device_list_mutex:
2675 	 * - Readonly scrub btrfs_scrub_dev()
2676 	 * - Readonly scrub btrfs_scrub_progress()
2677 	 * - btrfs_get_dev_stats()
2678 	 */
2679 	lockdep_assert_held(&fs_devices->device_list_mutex);
2680 
2681 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2682 			      synchronize_rcu);
2683 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2684 		device->fs_devices = seed_devices;
2685 
2686 	fs_devices->seeding = false;
2687 	fs_devices->num_devices = 0;
2688 	fs_devices->open_devices = 0;
2689 	fs_devices->missing_devices = 0;
2690 	fs_devices->rotating = false;
2691 	list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2692 
2693 	generate_random_uuid(fs_devices->fsid);
2694 	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2695 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2696 
2697 	super_flags = btrfs_super_flags(disk_super) &
2698 		      ~BTRFS_SUPER_FLAG_SEEDING;
2699 	btrfs_set_super_flags(disk_super, super_flags);
2700 }
2701 
2702 /*
2703  * Store the expected generation for seed devices in device items.
2704  */
btrfs_finish_sprout(struct btrfs_trans_handle * trans)2705 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2706 {
2707 	BTRFS_DEV_LOOKUP_ARGS(args);
2708 	struct btrfs_fs_info *fs_info = trans->fs_info;
2709 	struct btrfs_root *root = fs_info->chunk_root;
2710 	struct btrfs_path *path;
2711 	struct extent_buffer *leaf;
2712 	struct btrfs_dev_item *dev_item;
2713 	struct btrfs_device *device;
2714 	struct btrfs_key key;
2715 	u8 fs_uuid[BTRFS_FSID_SIZE];
2716 	u8 dev_uuid[BTRFS_UUID_SIZE];
2717 	int ret;
2718 
2719 	path = btrfs_alloc_path();
2720 	if (!path)
2721 		return -ENOMEM;
2722 
2723 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2724 	key.offset = 0;
2725 	key.type = BTRFS_DEV_ITEM_KEY;
2726 
2727 	while (1) {
2728 		btrfs_reserve_chunk_metadata(trans, false);
2729 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2730 		btrfs_trans_release_chunk_metadata(trans);
2731 		if (ret < 0)
2732 			goto error;
2733 
2734 		leaf = path->nodes[0];
2735 next_slot:
2736 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2737 			ret = btrfs_next_leaf(root, path);
2738 			if (ret > 0)
2739 				break;
2740 			if (ret < 0)
2741 				goto error;
2742 			leaf = path->nodes[0];
2743 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2744 			btrfs_release_path(path);
2745 			continue;
2746 		}
2747 
2748 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2749 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2750 		    key.type != BTRFS_DEV_ITEM_KEY)
2751 			break;
2752 
2753 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2754 					  struct btrfs_dev_item);
2755 		args.devid = btrfs_device_id(leaf, dev_item);
2756 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2757 				   BTRFS_UUID_SIZE);
2758 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2759 				   BTRFS_FSID_SIZE);
2760 		args.uuid = dev_uuid;
2761 		args.fsid = fs_uuid;
2762 		device = btrfs_find_device(fs_info->fs_devices, &args);
2763 		BUG_ON(!device); /* Logic error */
2764 
2765 		if (device->fs_devices->seeding)
2766 			btrfs_set_device_generation(leaf, dev_item,
2767 						    device->generation);
2768 
2769 		path->slots[0]++;
2770 		goto next_slot;
2771 	}
2772 	ret = 0;
2773 error:
2774 	btrfs_free_path(path);
2775 	return ret;
2776 }
2777 
btrfs_init_new_device(struct btrfs_fs_info * fs_info,const char * device_path)2778 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2779 {
2780 	struct btrfs_root *root = fs_info->dev_root;
2781 	struct btrfs_trans_handle *trans;
2782 	struct btrfs_device *device;
2783 	struct file *bdev_file;
2784 	struct super_block *sb = fs_info->sb;
2785 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2786 	struct btrfs_fs_devices *seed_devices = NULL;
2787 	u64 orig_super_total_bytes;
2788 	u64 orig_super_num_devices;
2789 	int ret = 0;
2790 	bool seeding_dev = false;
2791 	bool locked = false;
2792 
2793 	if (sb_rdonly(sb) && !fs_devices->seeding)
2794 		return -EROFS;
2795 
2796 	bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE,
2797 					fs_info->bdev_holder, NULL);
2798 	if (IS_ERR(bdev_file))
2799 		return PTR_ERR(bdev_file);
2800 
2801 	if (!btrfs_check_device_zone_type(fs_info, file_bdev(bdev_file))) {
2802 		ret = -EINVAL;
2803 		goto error;
2804 	}
2805 
2806 	if (fs_devices->seeding) {
2807 		seeding_dev = true;
2808 		down_write(&sb->s_umount);
2809 		mutex_lock(&uuid_mutex);
2810 		locked = true;
2811 	}
2812 
2813 	sync_blockdev(file_bdev(bdev_file));
2814 
2815 	rcu_read_lock();
2816 	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2817 		if (device->bdev == file_bdev(bdev_file)) {
2818 			ret = -EEXIST;
2819 			rcu_read_unlock();
2820 			goto error;
2821 		}
2822 	}
2823 	rcu_read_unlock();
2824 
2825 	device = btrfs_alloc_device(fs_info, NULL, NULL, device_path);
2826 	if (IS_ERR(device)) {
2827 		/* we can safely leave the fs_devices entry around */
2828 		ret = PTR_ERR(device);
2829 		goto error;
2830 	}
2831 
2832 	device->fs_info = fs_info;
2833 	device->bdev_file = bdev_file;
2834 	device->bdev = file_bdev(bdev_file);
2835 	ret = lookup_bdev(device_path, &device->devt);
2836 	if (ret)
2837 		goto error_free_device;
2838 
2839 	ret = btrfs_get_dev_zone_info(device, false);
2840 	if (ret)
2841 		goto error_free_device;
2842 
2843 	trans = btrfs_start_transaction(root, 0);
2844 	if (IS_ERR(trans)) {
2845 		ret = PTR_ERR(trans);
2846 		goto error_free_zone;
2847 	}
2848 
2849 	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2850 	device->generation = trans->transid;
2851 	device->io_width = fs_info->sectorsize;
2852 	device->io_align = fs_info->sectorsize;
2853 	device->sector_size = fs_info->sectorsize;
2854 	device->total_bytes =
2855 		round_down(bdev_nr_bytes(device->bdev), fs_info->sectorsize);
2856 	device->disk_total_bytes = device->total_bytes;
2857 	device->commit_total_bytes = device->total_bytes;
2858 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2859 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2860 	device->dev_stats_valid = 1;
2861 	set_blocksize(device->bdev_file, BTRFS_BDEV_BLOCKSIZE);
2862 
2863 	if (seeding_dev) {
2864 		/* GFP_KERNEL allocation must not be under device_list_mutex */
2865 		seed_devices = btrfs_init_sprout(fs_info);
2866 		if (IS_ERR(seed_devices)) {
2867 			ret = PTR_ERR(seed_devices);
2868 			btrfs_abort_transaction(trans, ret);
2869 			goto error_trans;
2870 		}
2871 	}
2872 
2873 	mutex_lock(&fs_devices->device_list_mutex);
2874 	if (seeding_dev) {
2875 		btrfs_setup_sprout(fs_info, seed_devices);
2876 		btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev,
2877 						device);
2878 	}
2879 
2880 	device->fs_devices = fs_devices;
2881 
2882 	mutex_lock(&fs_info->chunk_mutex);
2883 	list_add_rcu(&device->dev_list, &fs_devices->devices);
2884 	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2885 	fs_devices->num_devices++;
2886 	fs_devices->open_devices++;
2887 	fs_devices->rw_devices++;
2888 	fs_devices->total_devices++;
2889 	fs_devices->total_rw_bytes += device->total_bytes;
2890 
2891 	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2892 
2893 	if (!bdev_nonrot(device->bdev))
2894 		fs_devices->rotating = true;
2895 
2896 	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2897 	btrfs_set_super_total_bytes(fs_info->super_copy,
2898 		round_down(orig_super_total_bytes + device->total_bytes,
2899 			   fs_info->sectorsize));
2900 
2901 	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2902 	btrfs_set_super_num_devices(fs_info->super_copy,
2903 				    orig_super_num_devices + 1);
2904 
2905 	/*
2906 	 * we've got more storage, clear any full flags on the space
2907 	 * infos
2908 	 */
2909 	btrfs_clear_space_info_full(fs_info);
2910 
2911 	mutex_unlock(&fs_info->chunk_mutex);
2912 
2913 	/* Add sysfs device entry */
2914 	btrfs_sysfs_add_device(device);
2915 
2916 	mutex_unlock(&fs_devices->device_list_mutex);
2917 
2918 	if (seeding_dev) {
2919 		mutex_lock(&fs_info->chunk_mutex);
2920 		ret = init_first_rw_device(trans);
2921 		mutex_unlock(&fs_info->chunk_mutex);
2922 		if (ret) {
2923 			btrfs_abort_transaction(trans, ret);
2924 			goto error_sysfs;
2925 		}
2926 	}
2927 
2928 	ret = btrfs_add_dev_item(trans, device);
2929 	if (ret) {
2930 		btrfs_abort_transaction(trans, ret);
2931 		goto error_sysfs;
2932 	}
2933 
2934 	if (seeding_dev) {
2935 		ret = btrfs_finish_sprout(trans);
2936 		if (ret) {
2937 			btrfs_abort_transaction(trans, ret);
2938 			goto error_sysfs;
2939 		}
2940 
2941 		/*
2942 		 * fs_devices now represents the newly sprouted filesystem and
2943 		 * its fsid has been changed by btrfs_sprout_splice().
2944 		 */
2945 		btrfs_sysfs_update_sprout_fsid(fs_devices);
2946 	}
2947 
2948 	ret = btrfs_commit_transaction(trans);
2949 
2950 	if (seeding_dev) {
2951 		mutex_unlock(&uuid_mutex);
2952 		up_write(&sb->s_umount);
2953 		locked = false;
2954 
2955 		if (ret) /* transaction commit */
2956 			return ret;
2957 
2958 		ret = btrfs_relocate_sys_chunks(fs_info);
2959 		if (ret < 0)
2960 			btrfs_handle_fs_error(fs_info, ret,
2961 				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2962 		trans = btrfs_attach_transaction(root);
2963 		if (IS_ERR(trans)) {
2964 			if (PTR_ERR(trans) == -ENOENT)
2965 				return 0;
2966 			ret = PTR_ERR(trans);
2967 			trans = NULL;
2968 			goto error_sysfs;
2969 		}
2970 		ret = btrfs_commit_transaction(trans);
2971 	}
2972 
2973 	/*
2974 	 * Now that we have written a new super block to this device, check all
2975 	 * other fs_devices list if device_path alienates any other scanned
2976 	 * device.
2977 	 * We can ignore the return value as it typically returns -EINVAL and
2978 	 * only succeeds if the device was an alien.
2979 	 */
2980 	btrfs_forget_devices(device->devt);
2981 
2982 	/* Update ctime/mtime for blkid or udev */
2983 	update_dev_time(device_path);
2984 
2985 	return ret;
2986 
2987 error_sysfs:
2988 	btrfs_sysfs_remove_device(device);
2989 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2990 	mutex_lock(&fs_info->chunk_mutex);
2991 	list_del_rcu(&device->dev_list);
2992 	list_del(&device->dev_alloc_list);
2993 	fs_info->fs_devices->num_devices--;
2994 	fs_info->fs_devices->open_devices--;
2995 	fs_info->fs_devices->rw_devices--;
2996 	fs_info->fs_devices->total_devices--;
2997 	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2998 	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2999 	btrfs_set_super_total_bytes(fs_info->super_copy,
3000 				    orig_super_total_bytes);
3001 	btrfs_set_super_num_devices(fs_info->super_copy,
3002 				    orig_super_num_devices);
3003 	mutex_unlock(&fs_info->chunk_mutex);
3004 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3005 error_trans:
3006 	if (trans)
3007 		btrfs_end_transaction(trans);
3008 error_free_zone:
3009 	btrfs_destroy_dev_zone_info(device);
3010 error_free_device:
3011 	btrfs_free_device(device);
3012 error:
3013 	fput(bdev_file);
3014 	if (locked) {
3015 		mutex_unlock(&uuid_mutex);
3016 		up_write(&sb->s_umount);
3017 	}
3018 	return ret;
3019 }
3020 
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)3021 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
3022 					struct btrfs_device *device)
3023 {
3024 	int ret;
3025 	struct btrfs_path *path;
3026 	struct btrfs_root *root = device->fs_info->chunk_root;
3027 	struct btrfs_dev_item *dev_item;
3028 	struct extent_buffer *leaf;
3029 	struct btrfs_key key;
3030 
3031 	path = btrfs_alloc_path();
3032 	if (!path)
3033 		return -ENOMEM;
3034 
3035 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3036 	key.type = BTRFS_DEV_ITEM_KEY;
3037 	key.offset = device->devid;
3038 
3039 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3040 	if (ret < 0)
3041 		goto out;
3042 
3043 	if (ret > 0) {
3044 		ret = -ENOENT;
3045 		goto out;
3046 	}
3047 
3048 	leaf = path->nodes[0];
3049 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
3050 
3051 	btrfs_set_device_id(leaf, dev_item, device->devid);
3052 	btrfs_set_device_type(leaf, dev_item, device->type);
3053 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
3054 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
3055 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
3056 	btrfs_set_device_total_bytes(leaf, dev_item,
3057 				     btrfs_device_get_disk_total_bytes(device));
3058 	btrfs_set_device_bytes_used(leaf, dev_item,
3059 				    btrfs_device_get_bytes_used(device));
3060 out:
3061 	btrfs_free_path(path);
3062 	return ret;
3063 }
3064 
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)3065 int btrfs_grow_device(struct btrfs_trans_handle *trans,
3066 		      struct btrfs_device *device, u64 new_size)
3067 {
3068 	struct btrfs_fs_info *fs_info = device->fs_info;
3069 	struct btrfs_super_block *super_copy = fs_info->super_copy;
3070 	u64 old_total;
3071 	u64 diff;
3072 	int ret;
3073 
3074 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
3075 		return -EACCES;
3076 
3077 	new_size = round_down(new_size, fs_info->sectorsize);
3078 
3079 	mutex_lock(&fs_info->chunk_mutex);
3080 	old_total = btrfs_super_total_bytes(super_copy);
3081 	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
3082 
3083 	if (new_size <= device->total_bytes ||
3084 	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
3085 		mutex_unlock(&fs_info->chunk_mutex);
3086 		return -EINVAL;
3087 	}
3088 
3089 	btrfs_set_super_total_bytes(super_copy,
3090 			round_down(old_total + diff, fs_info->sectorsize));
3091 	device->fs_devices->total_rw_bytes += diff;
3092 	atomic64_add(diff, &fs_info->free_chunk_space);
3093 
3094 	btrfs_device_set_total_bytes(device, new_size);
3095 	btrfs_device_set_disk_total_bytes(device, new_size);
3096 	btrfs_clear_space_info_full(device->fs_info);
3097 	if (list_empty(&device->post_commit_list))
3098 		list_add_tail(&device->post_commit_list,
3099 			      &trans->transaction->dev_update_list);
3100 	mutex_unlock(&fs_info->chunk_mutex);
3101 
3102 	btrfs_reserve_chunk_metadata(trans, false);
3103 	ret = btrfs_update_device(trans, device);
3104 	btrfs_trans_release_chunk_metadata(trans);
3105 
3106 	return ret;
3107 }
3108 
btrfs_free_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)3109 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3110 {
3111 	struct btrfs_fs_info *fs_info = trans->fs_info;
3112 	struct btrfs_root *root = fs_info->chunk_root;
3113 	int ret;
3114 	struct btrfs_path *path;
3115 	struct btrfs_key key;
3116 
3117 	path = btrfs_alloc_path();
3118 	if (!path)
3119 		return -ENOMEM;
3120 
3121 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3122 	key.offset = chunk_offset;
3123 	key.type = BTRFS_CHUNK_ITEM_KEY;
3124 
3125 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3126 	if (ret < 0)
3127 		goto out;
3128 	else if (ret > 0) { /* Logic error or corruption */
3129 		btrfs_err(fs_info, "failed to lookup chunk %llu when freeing",
3130 			  chunk_offset);
3131 		btrfs_abort_transaction(trans, -ENOENT);
3132 		ret = -EUCLEAN;
3133 		goto out;
3134 	}
3135 
3136 	ret = btrfs_del_item(trans, root, path);
3137 	if (ret < 0) {
3138 		btrfs_err(fs_info, "failed to delete chunk %llu item", chunk_offset);
3139 		btrfs_abort_transaction(trans, ret);
3140 		goto out;
3141 	}
3142 out:
3143 	btrfs_free_path(path);
3144 	return ret;
3145 }
3146 
btrfs_del_sys_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3147 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3148 {
3149 	struct btrfs_super_block *super_copy = fs_info->super_copy;
3150 	struct btrfs_disk_key *disk_key;
3151 	struct btrfs_chunk *chunk;
3152 	u8 *ptr;
3153 	int ret = 0;
3154 	u32 num_stripes;
3155 	u32 array_size;
3156 	u32 len = 0;
3157 	u32 cur;
3158 	struct btrfs_key key;
3159 
3160 	lockdep_assert_held(&fs_info->chunk_mutex);
3161 	array_size = btrfs_super_sys_array_size(super_copy);
3162 
3163 	ptr = super_copy->sys_chunk_array;
3164 	cur = 0;
3165 
3166 	while (cur < array_size) {
3167 		disk_key = (struct btrfs_disk_key *)ptr;
3168 		btrfs_disk_key_to_cpu(&key, disk_key);
3169 
3170 		len = sizeof(*disk_key);
3171 
3172 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3173 			chunk = (struct btrfs_chunk *)(ptr + len);
3174 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
3175 			len += btrfs_chunk_item_size(num_stripes);
3176 		} else {
3177 			ret = -EIO;
3178 			break;
3179 		}
3180 		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
3181 		    key.offset == chunk_offset) {
3182 			memmove(ptr, ptr + len, array_size - (cur + len));
3183 			array_size -= len;
3184 			btrfs_set_super_sys_array_size(super_copy, array_size);
3185 		} else {
3186 			ptr += len;
3187 			cur += len;
3188 		}
3189 	}
3190 	return ret;
3191 }
3192 
btrfs_find_chunk_map_nolock(struct btrfs_fs_info * fs_info,u64 logical,u64 length)3193 struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info,
3194 						    u64 logical, u64 length)
3195 {
3196 	struct rb_node *node = fs_info->mapping_tree.rb_root.rb_node;
3197 	struct rb_node *prev = NULL;
3198 	struct rb_node *orig_prev;
3199 	struct btrfs_chunk_map *map;
3200 	struct btrfs_chunk_map *prev_map = NULL;
3201 
3202 	while (node) {
3203 		map = rb_entry(node, struct btrfs_chunk_map, rb_node);
3204 		prev = node;
3205 		prev_map = map;
3206 
3207 		if (logical < map->start) {
3208 			node = node->rb_left;
3209 		} else if (logical >= map->start + map->chunk_len) {
3210 			node = node->rb_right;
3211 		} else {
3212 			refcount_inc(&map->refs);
3213 			return map;
3214 		}
3215 	}
3216 
3217 	if (!prev)
3218 		return NULL;
3219 
3220 	orig_prev = prev;
3221 	while (prev && logical >= prev_map->start + prev_map->chunk_len) {
3222 		prev = rb_next(prev);
3223 		prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node);
3224 	}
3225 
3226 	if (!prev) {
3227 		prev = orig_prev;
3228 		prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node);
3229 		while (prev && logical < prev_map->start) {
3230 			prev = rb_prev(prev);
3231 			prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node);
3232 		}
3233 	}
3234 
3235 	if (prev) {
3236 		u64 end = logical + length;
3237 
3238 		/*
3239 		 * Caller can pass a U64_MAX length when it wants to get any
3240 		 * chunk starting at an offset of 'logical' or higher, so deal
3241 		 * with underflow by resetting the end offset to U64_MAX.
3242 		 */
3243 		if (end < logical)
3244 			end = U64_MAX;
3245 
3246 		if (end > prev_map->start &&
3247 		    logical < prev_map->start + prev_map->chunk_len) {
3248 			refcount_inc(&prev_map->refs);
3249 			return prev_map;
3250 		}
3251 	}
3252 
3253 	return NULL;
3254 }
3255 
btrfs_find_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)3256 struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info,
3257 					     u64 logical, u64 length)
3258 {
3259 	struct btrfs_chunk_map *map;
3260 
3261 	read_lock(&fs_info->mapping_tree_lock);
3262 	map = btrfs_find_chunk_map_nolock(fs_info, logical, length);
3263 	read_unlock(&fs_info->mapping_tree_lock);
3264 
3265 	return map;
3266 }
3267 
3268 /*
3269  * Find the mapping containing the given logical extent.
3270  *
3271  * @logical: Logical block offset in bytes.
3272  * @length: Length of extent in bytes.
3273  *
3274  * Return: Chunk mapping or ERR_PTR.
3275  */
btrfs_get_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)3276 struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
3277 					    u64 logical, u64 length)
3278 {
3279 	struct btrfs_chunk_map *map;
3280 
3281 	map = btrfs_find_chunk_map(fs_info, logical, length);
3282 
3283 	if (unlikely(!map)) {
3284 		btrfs_crit(fs_info,
3285 			   "unable to find chunk map for logical %llu length %llu",
3286 			   logical, length);
3287 		return ERR_PTR(-EINVAL);
3288 	}
3289 
3290 	if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
3291 		btrfs_crit(fs_info,
3292 			   "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
3293 			   logical, logical + length, map->start,
3294 			   map->start + map->chunk_len);
3295 		btrfs_free_chunk_map(map);
3296 		return ERR_PTR(-EINVAL);
3297 	}
3298 
3299 	/* Callers are responsible for dropping the reference. */
3300 	return map;
3301 }
3302 
remove_chunk_item(struct btrfs_trans_handle * trans,struct btrfs_chunk_map * map,u64 chunk_offset)3303 static int remove_chunk_item(struct btrfs_trans_handle *trans,
3304 			     struct btrfs_chunk_map *map, u64 chunk_offset)
3305 {
3306 	int i;
3307 
3308 	/*
3309 	 * Removing chunk items and updating the device items in the chunks btree
3310 	 * requires holding the chunk_mutex.
3311 	 * See the comment at btrfs_chunk_alloc() for the details.
3312 	 */
3313 	lockdep_assert_held(&trans->fs_info->chunk_mutex);
3314 
3315 	for (i = 0; i < map->num_stripes; i++) {
3316 		int ret;
3317 
3318 		ret = btrfs_update_device(trans, map->stripes[i].dev);
3319 		if (ret)
3320 			return ret;
3321 	}
3322 
3323 	return btrfs_free_chunk(trans, chunk_offset);
3324 }
3325 
btrfs_remove_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)3326 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3327 {
3328 	struct btrfs_fs_info *fs_info = trans->fs_info;
3329 	struct btrfs_chunk_map *map;
3330 	u64 dev_extent_len = 0;
3331 	int i, ret = 0;
3332 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3333 
3334 	map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3335 	if (IS_ERR(map)) {
3336 		/*
3337 		 * This is a logic error, but we don't want to just rely on the
3338 		 * user having built with ASSERT enabled, so if ASSERT doesn't
3339 		 * do anything we still error out.
3340 		 */
3341 		ASSERT(0);
3342 		return PTR_ERR(map);
3343 	}
3344 
3345 	/*
3346 	 * First delete the device extent items from the devices btree.
3347 	 * We take the device_list_mutex to avoid racing with the finishing phase
3348 	 * of a device replace operation. See the comment below before acquiring
3349 	 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3350 	 * because that can result in a deadlock when deleting the device extent
3351 	 * items from the devices btree - COWing an extent buffer from the btree
3352 	 * may result in allocating a new metadata chunk, which would attempt to
3353 	 * lock again fs_info->chunk_mutex.
3354 	 */
3355 	mutex_lock(&fs_devices->device_list_mutex);
3356 	for (i = 0; i < map->num_stripes; i++) {
3357 		struct btrfs_device *device = map->stripes[i].dev;
3358 		ret = btrfs_free_dev_extent(trans, device,
3359 					    map->stripes[i].physical,
3360 					    &dev_extent_len);
3361 		if (ret) {
3362 			mutex_unlock(&fs_devices->device_list_mutex);
3363 			btrfs_abort_transaction(trans, ret);
3364 			goto out;
3365 		}
3366 
3367 		if (device->bytes_used > 0) {
3368 			mutex_lock(&fs_info->chunk_mutex);
3369 			btrfs_device_set_bytes_used(device,
3370 					device->bytes_used - dev_extent_len);
3371 			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3372 			btrfs_clear_space_info_full(fs_info);
3373 			mutex_unlock(&fs_info->chunk_mutex);
3374 		}
3375 	}
3376 	mutex_unlock(&fs_devices->device_list_mutex);
3377 
3378 	/*
3379 	 * We acquire fs_info->chunk_mutex for 2 reasons:
3380 	 *
3381 	 * 1) Just like with the first phase of the chunk allocation, we must
3382 	 *    reserve system space, do all chunk btree updates and deletions, and
3383 	 *    update the system chunk array in the superblock while holding this
3384 	 *    mutex. This is for similar reasons as explained on the comment at
3385 	 *    the top of btrfs_chunk_alloc();
3386 	 *
3387 	 * 2) Prevent races with the final phase of a device replace operation
3388 	 *    that replaces the device object associated with the map's stripes,
3389 	 *    because the device object's id can change at any time during that
3390 	 *    final phase of the device replace operation
3391 	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3392 	 *    replaced device and then see it with an ID of
3393 	 *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3394 	 *    the device item, which does not exists on the chunk btree.
3395 	 *    The finishing phase of device replace acquires both the
3396 	 *    device_list_mutex and the chunk_mutex, in that order, so we are
3397 	 *    safe by just acquiring the chunk_mutex.
3398 	 */
3399 	trans->removing_chunk = true;
3400 	mutex_lock(&fs_info->chunk_mutex);
3401 
3402 	check_system_chunk(trans, map->type);
3403 
3404 	ret = remove_chunk_item(trans, map, chunk_offset);
3405 	/*
3406 	 * Normally we should not get -ENOSPC since we reserved space before
3407 	 * through the call to check_system_chunk().
3408 	 *
3409 	 * Despite our system space_info having enough free space, we may not
3410 	 * be able to allocate extents from its block groups, because all have
3411 	 * an incompatible profile, which will force us to allocate a new system
3412 	 * block group with the right profile, or right after we called
3413 	 * check_system_space() above, a scrub turned the only system block group
3414 	 * with enough free space into RO mode.
3415 	 * This is explained with more detail at do_chunk_alloc().
3416 	 *
3417 	 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3418 	 */
3419 	if (ret == -ENOSPC) {
3420 		const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3421 		struct btrfs_block_group *sys_bg;
3422 
3423 		sys_bg = btrfs_create_chunk(trans, sys_flags);
3424 		if (IS_ERR(sys_bg)) {
3425 			ret = PTR_ERR(sys_bg);
3426 			btrfs_abort_transaction(trans, ret);
3427 			goto out;
3428 		}
3429 
3430 		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3431 		if (ret) {
3432 			btrfs_abort_transaction(trans, ret);
3433 			goto out;
3434 		}
3435 
3436 		ret = remove_chunk_item(trans, map, chunk_offset);
3437 		if (ret) {
3438 			btrfs_abort_transaction(trans, ret);
3439 			goto out;
3440 		}
3441 	} else if (ret) {
3442 		btrfs_abort_transaction(trans, ret);
3443 		goto out;
3444 	}
3445 
3446 	trace_btrfs_chunk_free(fs_info, map, chunk_offset, map->chunk_len);
3447 
3448 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3449 		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3450 		if (ret) {
3451 			btrfs_abort_transaction(trans, ret);
3452 			goto out;
3453 		}
3454 	}
3455 
3456 	mutex_unlock(&fs_info->chunk_mutex);
3457 	trans->removing_chunk = false;
3458 
3459 	/*
3460 	 * We are done with chunk btree updates and deletions, so release the
3461 	 * system space we previously reserved (with check_system_chunk()).
3462 	 */
3463 	btrfs_trans_release_chunk_metadata(trans);
3464 
3465 	ret = btrfs_remove_block_group(trans, map);
3466 	if (ret) {
3467 		btrfs_abort_transaction(trans, ret);
3468 		goto out;
3469 	}
3470 
3471 out:
3472 	if (trans->removing_chunk) {
3473 		mutex_unlock(&fs_info->chunk_mutex);
3474 		trans->removing_chunk = false;
3475 	}
3476 	/* once for us */
3477 	btrfs_free_chunk_map(map);
3478 	return ret;
3479 }
3480 
btrfs_relocate_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3481 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3482 {
3483 	struct btrfs_root *root = fs_info->chunk_root;
3484 	struct btrfs_trans_handle *trans;
3485 	struct btrfs_block_group *block_group;
3486 	u64 length;
3487 	int ret;
3488 
3489 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
3490 		btrfs_err(fs_info,
3491 			  "relocate: not supported on extent tree v2 yet");
3492 		return -EINVAL;
3493 	}
3494 
3495 	/*
3496 	 * Prevent races with automatic removal of unused block groups.
3497 	 * After we relocate and before we remove the chunk with offset
3498 	 * chunk_offset, automatic removal of the block group can kick in,
3499 	 * resulting in a failure when calling btrfs_remove_chunk() below.
3500 	 *
3501 	 * Make sure to acquire this mutex before doing a tree search (dev
3502 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3503 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3504 	 * we release the path used to search the chunk/dev tree and before
3505 	 * the current task acquires this mutex and calls us.
3506 	 */
3507 	lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3508 
3509 	/* step one, relocate all the extents inside this chunk */
3510 	btrfs_scrub_pause(fs_info);
3511 	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3512 	btrfs_scrub_continue(fs_info);
3513 	if (ret) {
3514 		/*
3515 		 * If we had a transaction abort, stop all running scrubs.
3516 		 * See transaction.c:cleanup_transaction() why we do it here.
3517 		 */
3518 		if (BTRFS_FS_ERROR(fs_info))
3519 			btrfs_scrub_cancel(fs_info);
3520 		return ret;
3521 	}
3522 
3523 	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3524 	if (!block_group)
3525 		return -ENOENT;
3526 	btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3527 	length = block_group->length;
3528 	btrfs_put_block_group(block_group);
3529 
3530 	/*
3531 	 * On a zoned file system, discard the whole block group, this will
3532 	 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3533 	 * resetting the zone fails, don't treat it as a fatal problem from the
3534 	 * filesystem's point of view.
3535 	 */
3536 	if (btrfs_is_zoned(fs_info)) {
3537 		ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3538 		if (ret)
3539 			btrfs_info(fs_info,
3540 				"failed to reset zone %llu after relocation",
3541 				chunk_offset);
3542 	}
3543 
3544 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
3545 						     chunk_offset);
3546 	if (IS_ERR(trans)) {
3547 		ret = PTR_ERR(trans);
3548 		btrfs_handle_fs_error(root->fs_info, ret, NULL);
3549 		return ret;
3550 	}
3551 
3552 	/*
3553 	 * step two, delete the device extents and the
3554 	 * chunk tree entries
3555 	 */
3556 	ret = btrfs_remove_chunk(trans, chunk_offset);
3557 	btrfs_end_transaction(trans);
3558 	return ret;
3559 }
3560 
btrfs_relocate_sys_chunks(struct btrfs_fs_info * fs_info)3561 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3562 {
3563 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3564 	struct btrfs_path *path;
3565 	struct extent_buffer *leaf;
3566 	struct btrfs_chunk *chunk;
3567 	struct btrfs_key key;
3568 	struct btrfs_key found_key;
3569 	u64 chunk_type;
3570 	bool retried = false;
3571 	int failed = 0;
3572 	int ret;
3573 
3574 	path = btrfs_alloc_path();
3575 	if (!path)
3576 		return -ENOMEM;
3577 
3578 again:
3579 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3580 	key.offset = (u64)-1;
3581 	key.type = BTRFS_CHUNK_ITEM_KEY;
3582 
3583 	while (1) {
3584 		mutex_lock(&fs_info->reclaim_bgs_lock);
3585 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3586 		if (ret < 0) {
3587 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3588 			goto error;
3589 		}
3590 		if (ret == 0) {
3591 			/*
3592 			 * On the first search we would find chunk tree with
3593 			 * offset -1, which is not possible. On subsequent
3594 			 * loops this would find an existing item on an invalid
3595 			 * offset (one less than the previous one, wrong
3596 			 * alignment and size).
3597 			 */
3598 			ret = -EUCLEAN;
3599 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3600 			goto error;
3601 		}
3602 
3603 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
3604 					  key.type);
3605 		if (ret)
3606 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3607 		if (ret < 0)
3608 			goto error;
3609 		if (ret > 0)
3610 			break;
3611 
3612 		leaf = path->nodes[0];
3613 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3614 
3615 		chunk = btrfs_item_ptr(leaf, path->slots[0],
3616 				       struct btrfs_chunk);
3617 		chunk_type = btrfs_chunk_type(leaf, chunk);
3618 		btrfs_release_path(path);
3619 
3620 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3621 			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3622 			if (ret == -ENOSPC)
3623 				failed++;
3624 			else
3625 				BUG_ON(ret);
3626 		}
3627 		mutex_unlock(&fs_info->reclaim_bgs_lock);
3628 
3629 		if (found_key.offset == 0)
3630 			break;
3631 		key.offset = found_key.offset - 1;
3632 	}
3633 	ret = 0;
3634 	if (failed && !retried) {
3635 		failed = 0;
3636 		retried = true;
3637 		goto again;
3638 	} else if (WARN_ON(failed && retried)) {
3639 		ret = -ENOSPC;
3640 	}
3641 error:
3642 	btrfs_free_path(path);
3643 	return ret;
3644 }
3645 
3646 /*
3647  * return 1 : allocate a data chunk successfully,
3648  * return <0: errors during allocating a data chunk,
3649  * return 0 : no need to allocate a data chunk.
3650  */
btrfs_may_alloc_data_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3651 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3652 				      u64 chunk_offset)
3653 {
3654 	struct btrfs_block_group *cache;
3655 	u64 bytes_used;
3656 	u64 chunk_type;
3657 
3658 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3659 	ASSERT(cache);
3660 	chunk_type = cache->flags;
3661 	btrfs_put_block_group(cache);
3662 
3663 	if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3664 		return 0;
3665 
3666 	spin_lock(&fs_info->data_sinfo->lock);
3667 	bytes_used = fs_info->data_sinfo->bytes_used;
3668 	spin_unlock(&fs_info->data_sinfo->lock);
3669 
3670 	if (!bytes_used) {
3671 		struct btrfs_trans_handle *trans;
3672 		int ret;
3673 
3674 		trans =	btrfs_join_transaction(fs_info->tree_root);
3675 		if (IS_ERR(trans))
3676 			return PTR_ERR(trans);
3677 
3678 		ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3679 		btrfs_end_transaction(trans);
3680 		if (ret < 0)
3681 			return ret;
3682 		return 1;
3683 	}
3684 
3685 	return 0;
3686 }
3687 
btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args * cpu,const struct btrfs_disk_balance_args * disk)3688 static void btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
3689 					   const struct btrfs_disk_balance_args *disk)
3690 {
3691 	memset(cpu, 0, sizeof(*cpu));
3692 
3693 	cpu->profiles = le64_to_cpu(disk->profiles);
3694 	cpu->usage = le64_to_cpu(disk->usage);
3695 	cpu->devid = le64_to_cpu(disk->devid);
3696 	cpu->pstart = le64_to_cpu(disk->pstart);
3697 	cpu->pend = le64_to_cpu(disk->pend);
3698 	cpu->vstart = le64_to_cpu(disk->vstart);
3699 	cpu->vend = le64_to_cpu(disk->vend);
3700 	cpu->target = le64_to_cpu(disk->target);
3701 	cpu->flags = le64_to_cpu(disk->flags);
3702 	cpu->limit = le64_to_cpu(disk->limit);
3703 	cpu->stripes_min = le32_to_cpu(disk->stripes_min);
3704 	cpu->stripes_max = le32_to_cpu(disk->stripes_max);
3705 }
3706 
btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args * disk,const struct btrfs_balance_args * cpu)3707 static void btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
3708 					   const struct btrfs_balance_args *cpu)
3709 {
3710 	memset(disk, 0, sizeof(*disk));
3711 
3712 	disk->profiles = cpu_to_le64(cpu->profiles);
3713 	disk->usage = cpu_to_le64(cpu->usage);
3714 	disk->devid = cpu_to_le64(cpu->devid);
3715 	disk->pstart = cpu_to_le64(cpu->pstart);
3716 	disk->pend = cpu_to_le64(cpu->pend);
3717 	disk->vstart = cpu_to_le64(cpu->vstart);
3718 	disk->vend = cpu_to_le64(cpu->vend);
3719 	disk->target = cpu_to_le64(cpu->target);
3720 	disk->flags = cpu_to_le64(cpu->flags);
3721 	disk->limit = cpu_to_le64(cpu->limit);
3722 	disk->stripes_min = cpu_to_le32(cpu->stripes_min);
3723 	disk->stripes_max = cpu_to_le32(cpu->stripes_max);
3724 }
3725 
insert_balance_item(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl)3726 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3727 			       struct btrfs_balance_control *bctl)
3728 {
3729 	struct btrfs_root *root = fs_info->tree_root;
3730 	struct btrfs_trans_handle *trans;
3731 	struct btrfs_balance_item *item;
3732 	struct btrfs_disk_balance_args disk_bargs;
3733 	struct btrfs_path *path;
3734 	struct extent_buffer *leaf;
3735 	struct btrfs_key key;
3736 	int ret, err;
3737 
3738 	path = btrfs_alloc_path();
3739 	if (!path)
3740 		return -ENOMEM;
3741 
3742 	trans = btrfs_start_transaction(root, 0);
3743 	if (IS_ERR(trans)) {
3744 		btrfs_free_path(path);
3745 		return PTR_ERR(trans);
3746 	}
3747 
3748 	key.objectid = BTRFS_BALANCE_OBJECTID;
3749 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3750 	key.offset = 0;
3751 
3752 	ret = btrfs_insert_empty_item(trans, root, path, &key,
3753 				      sizeof(*item));
3754 	if (ret)
3755 		goto out;
3756 
3757 	leaf = path->nodes[0];
3758 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3759 
3760 	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3761 
3762 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3763 	btrfs_set_balance_data(leaf, item, &disk_bargs);
3764 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3765 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3766 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3767 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3768 	btrfs_set_balance_flags(leaf, item, bctl->flags);
3769 out:
3770 	btrfs_free_path(path);
3771 	err = btrfs_commit_transaction(trans);
3772 	if (err && !ret)
3773 		ret = err;
3774 	return ret;
3775 }
3776 
del_balance_item(struct btrfs_fs_info * fs_info)3777 static int del_balance_item(struct btrfs_fs_info *fs_info)
3778 {
3779 	struct btrfs_root *root = fs_info->tree_root;
3780 	struct btrfs_trans_handle *trans;
3781 	struct btrfs_path *path;
3782 	struct btrfs_key key;
3783 	int ret, err;
3784 
3785 	path = btrfs_alloc_path();
3786 	if (!path)
3787 		return -ENOMEM;
3788 
3789 	trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3790 	if (IS_ERR(trans)) {
3791 		btrfs_free_path(path);
3792 		return PTR_ERR(trans);
3793 	}
3794 
3795 	key.objectid = BTRFS_BALANCE_OBJECTID;
3796 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3797 	key.offset = 0;
3798 
3799 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3800 	if (ret < 0)
3801 		goto out;
3802 	if (ret > 0) {
3803 		ret = -ENOENT;
3804 		goto out;
3805 	}
3806 
3807 	ret = btrfs_del_item(trans, root, path);
3808 out:
3809 	btrfs_free_path(path);
3810 	err = btrfs_commit_transaction(trans);
3811 	if (err && !ret)
3812 		ret = err;
3813 	return ret;
3814 }
3815 
3816 /*
3817  * This is a heuristic used to reduce the number of chunks balanced on
3818  * resume after balance was interrupted.
3819  */
update_balance_args(struct btrfs_balance_control * bctl)3820 static void update_balance_args(struct btrfs_balance_control *bctl)
3821 {
3822 	/*
3823 	 * Turn on soft mode for chunk types that were being converted.
3824 	 */
3825 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3826 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3827 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3828 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3829 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3830 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3831 
3832 	/*
3833 	 * Turn on usage filter if is not already used.  The idea is
3834 	 * that chunks that we have already balanced should be
3835 	 * reasonably full.  Don't do it for chunks that are being
3836 	 * converted - that will keep us from relocating unconverted
3837 	 * (albeit full) chunks.
3838 	 */
3839 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3840 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3841 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3842 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3843 		bctl->data.usage = 90;
3844 	}
3845 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3846 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3847 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3848 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3849 		bctl->sys.usage = 90;
3850 	}
3851 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3852 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3853 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3854 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3855 		bctl->meta.usage = 90;
3856 	}
3857 }
3858 
3859 /*
3860  * Clear the balance status in fs_info and delete the balance item from disk.
3861  */
reset_balance_state(struct btrfs_fs_info * fs_info)3862 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3863 {
3864 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3865 	int ret;
3866 
3867 	ASSERT(fs_info->balance_ctl);
3868 
3869 	spin_lock(&fs_info->balance_lock);
3870 	fs_info->balance_ctl = NULL;
3871 	spin_unlock(&fs_info->balance_lock);
3872 
3873 	kfree(bctl);
3874 	ret = del_balance_item(fs_info);
3875 	if (ret)
3876 		btrfs_handle_fs_error(fs_info, ret, NULL);
3877 }
3878 
3879 /*
3880  * Balance filters.  Return 1 if chunk should be filtered out
3881  * (should not be balanced).
3882  */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3883 static int chunk_profiles_filter(u64 chunk_type,
3884 				 struct btrfs_balance_args *bargs)
3885 {
3886 	chunk_type = chunk_to_extended(chunk_type) &
3887 				BTRFS_EXTENDED_PROFILE_MASK;
3888 
3889 	if (bargs->profiles & chunk_type)
3890 		return 0;
3891 
3892 	return 1;
3893 }
3894 
chunk_usage_range_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3895 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3896 			      struct btrfs_balance_args *bargs)
3897 {
3898 	struct btrfs_block_group *cache;
3899 	u64 chunk_used;
3900 	u64 user_thresh_min;
3901 	u64 user_thresh_max;
3902 	int ret = 1;
3903 
3904 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3905 	chunk_used = cache->used;
3906 
3907 	if (bargs->usage_min == 0)
3908 		user_thresh_min = 0;
3909 	else
3910 		user_thresh_min = mult_perc(cache->length, bargs->usage_min);
3911 
3912 	if (bargs->usage_max == 0)
3913 		user_thresh_max = 1;
3914 	else if (bargs->usage_max > 100)
3915 		user_thresh_max = cache->length;
3916 	else
3917 		user_thresh_max = mult_perc(cache->length, bargs->usage_max);
3918 
3919 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3920 		ret = 0;
3921 
3922 	btrfs_put_block_group(cache);
3923 	return ret;
3924 }
3925 
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3926 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3927 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3928 {
3929 	struct btrfs_block_group *cache;
3930 	u64 chunk_used, user_thresh;
3931 	int ret = 1;
3932 
3933 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3934 	chunk_used = cache->used;
3935 
3936 	if (bargs->usage_min == 0)
3937 		user_thresh = 1;
3938 	else if (bargs->usage > 100)
3939 		user_thresh = cache->length;
3940 	else
3941 		user_thresh = mult_perc(cache->length, bargs->usage);
3942 
3943 	if (chunk_used < user_thresh)
3944 		ret = 0;
3945 
3946 	btrfs_put_block_group(cache);
3947 	return ret;
3948 }
3949 
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3950 static int chunk_devid_filter(struct extent_buffer *leaf,
3951 			      struct btrfs_chunk *chunk,
3952 			      struct btrfs_balance_args *bargs)
3953 {
3954 	struct btrfs_stripe *stripe;
3955 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3956 	int i;
3957 
3958 	for (i = 0; i < num_stripes; i++) {
3959 		stripe = btrfs_stripe_nr(chunk, i);
3960 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3961 			return 0;
3962 	}
3963 
3964 	return 1;
3965 }
3966 
calc_data_stripes(u64 type,int num_stripes)3967 static u64 calc_data_stripes(u64 type, int num_stripes)
3968 {
3969 	const int index = btrfs_bg_flags_to_raid_index(type);
3970 	const int ncopies = btrfs_raid_array[index].ncopies;
3971 	const int nparity = btrfs_raid_array[index].nparity;
3972 
3973 	return (num_stripes - nparity) / ncopies;
3974 }
3975 
3976 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3977 static int chunk_drange_filter(struct extent_buffer *leaf,
3978 			       struct btrfs_chunk *chunk,
3979 			       struct btrfs_balance_args *bargs)
3980 {
3981 	struct btrfs_stripe *stripe;
3982 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3983 	u64 stripe_offset;
3984 	u64 stripe_length;
3985 	u64 type;
3986 	int factor;
3987 	int i;
3988 
3989 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3990 		return 0;
3991 
3992 	type = btrfs_chunk_type(leaf, chunk);
3993 	factor = calc_data_stripes(type, num_stripes);
3994 
3995 	for (i = 0; i < num_stripes; i++) {
3996 		stripe = btrfs_stripe_nr(chunk, i);
3997 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3998 			continue;
3999 
4000 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
4001 		stripe_length = btrfs_chunk_length(leaf, chunk);
4002 		stripe_length = div_u64(stripe_length, factor);
4003 
4004 		if (stripe_offset < bargs->pend &&
4005 		    stripe_offset + stripe_length > bargs->pstart)
4006 			return 0;
4007 	}
4008 
4009 	return 1;
4010 }
4011 
4012 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)4013 static int chunk_vrange_filter(struct extent_buffer *leaf,
4014 			       struct btrfs_chunk *chunk,
4015 			       u64 chunk_offset,
4016 			       struct btrfs_balance_args *bargs)
4017 {
4018 	if (chunk_offset < bargs->vend &&
4019 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
4020 		/* at least part of the chunk is inside this vrange */
4021 		return 0;
4022 
4023 	return 1;
4024 }
4025 
chunk_stripes_range_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)4026 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
4027 			       struct btrfs_chunk *chunk,
4028 			       struct btrfs_balance_args *bargs)
4029 {
4030 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4031 
4032 	if (bargs->stripes_min <= num_stripes
4033 			&& num_stripes <= bargs->stripes_max)
4034 		return 0;
4035 
4036 	return 1;
4037 }
4038 
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)4039 static int chunk_soft_convert_filter(u64 chunk_type,
4040 				     struct btrfs_balance_args *bargs)
4041 {
4042 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4043 		return 0;
4044 
4045 	chunk_type = chunk_to_extended(chunk_type) &
4046 				BTRFS_EXTENDED_PROFILE_MASK;
4047 
4048 	if (bargs->target == chunk_type)
4049 		return 1;
4050 
4051 	return 0;
4052 }
4053 
should_balance_chunk(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)4054 static int should_balance_chunk(struct extent_buffer *leaf,
4055 				struct btrfs_chunk *chunk, u64 chunk_offset)
4056 {
4057 	struct btrfs_fs_info *fs_info = leaf->fs_info;
4058 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4059 	struct btrfs_balance_args *bargs = NULL;
4060 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
4061 
4062 	/* type filter */
4063 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
4064 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
4065 		return 0;
4066 	}
4067 
4068 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
4069 		bargs = &bctl->data;
4070 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
4071 		bargs = &bctl->sys;
4072 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
4073 		bargs = &bctl->meta;
4074 
4075 	/* profiles filter */
4076 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
4077 	    chunk_profiles_filter(chunk_type, bargs)) {
4078 		return 0;
4079 	}
4080 
4081 	/* usage filter */
4082 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
4083 	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
4084 		return 0;
4085 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
4086 	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
4087 		return 0;
4088 	}
4089 
4090 	/* devid filter */
4091 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
4092 	    chunk_devid_filter(leaf, chunk, bargs)) {
4093 		return 0;
4094 	}
4095 
4096 	/* drange filter, makes sense only with devid filter */
4097 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
4098 	    chunk_drange_filter(leaf, chunk, bargs)) {
4099 		return 0;
4100 	}
4101 
4102 	/* vrange filter */
4103 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
4104 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
4105 		return 0;
4106 	}
4107 
4108 	/* stripes filter */
4109 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
4110 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
4111 		return 0;
4112 	}
4113 
4114 	/* soft profile changing mode */
4115 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
4116 	    chunk_soft_convert_filter(chunk_type, bargs)) {
4117 		return 0;
4118 	}
4119 
4120 	/*
4121 	 * limited by count, must be the last filter
4122 	 */
4123 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
4124 		if (bargs->limit == 0)
4125 			return 0;
4126 		else
4127 			bargs->limit--;
4128 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
4129 		/*
4130 		 * Same logic as the 'limit' filter; the minimum cannot be
4131 		 * determined here because we do not have the global information
4132 		 * about the count of all chunks that satisfy the filters.
4133 		 */
4134 		if (bargs->limit_max == 0)
4135 			return 0;
4136 		else
4137 			bargs->limit_max--;
4138 	}
4139 
4140 	return 1;
4141 }
4142 
__btrfs_balance(struct btrfs_fs_info * fs_info)4143 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
4144 {
4145 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4146 	struct btrfs_root *chunk_root = fs_info->chunk_root;
4147 	u64 chunk_type;
4148 	struct btrfs_chunk *chunk;
4149 	struct btrfs_path *path = NULL;
4150 	struct btrfs_key key;
4151 	struct btrfs_key found_key;
4152 	struct extent_buffer *leaf;
4153 	int slot;
4154 	int ret;
4155 	int enospc_errors = 0;
4156 	bool counting = true;
4157 	/* The single value limit and min/max limits use the same bytes in the */
4158 	u64 limit_data = bctl->data.limit;
4159 	u64 limit_meta = bctl->meta.limit;
4160 	u64 limit_sys = bctl->sys.limit;
4161 	u32 count_data = 0;
4162 	u32 count_meta = 0;
4163 	u32 count_sys = 0;
4164 	int chunk_reserved = 0;
4165 
4166 	path = btrfs_alloc_path();
4167 	if (!path) {
4168 		ret = -ENOMEM;
4169 		goto error;
4170 	}
4171 
4172 	/* zero out stat counters */
4173 	spin_lock(&fs_info->balance_lock);
4174 	memset(&bctl->stat, 0, sizeof(bctl->stat));
4175 	spin_unlock(&fs_info->balance_lock);
4176 again:
4177 	if (!counting) {
4178 		/*
4179 		 * The single value limit and min/max limits use the same bytes
4180 		 * in the
4181 		 */
4182 		bctl->data.limit = limit_data;
4183 		bctl->meta.limit = limit_meta;
4184 		bctl->sys.limit = limit_sys;
4185 	}
4186 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4187 	key.offset = (u64)-1;
4188 	key.type = BTRFS_CHUNK_ITEM_KEY;
4189 
4190 	while (1) {
4191 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
4192 		    atomic_read(&fs_info->balance_cancel_req)) {
4193 			ret = -ECANCELED;
4194 			goto error;
4195 		}
4196 
4197 		mutex_lock(&fs_info->reclaim_bgs_lock);
4198 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
4199 		if (ret < 0) {
4200 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4201 			goto error;
4202 		}
4203 
4204 		/*
4205 		 * this shouldn't happen, it means the last relocate
4206 		 * failed
4207 		 */
4208 		if (ret == 0)
4209 			BUG(); /* FIXME break ? */
4210 
4211 		ret = btrfs_previous_item(chunk_root, path, 0,
4212 					  BTRFS_CHUNK_ITEM_KEY);
4213 		if (ret) {
4214 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4215 			ret = 0;
4216 			break;
4217 		}
4218 
4219 		leaf = path->nodes[0];
4220 		slot = path->slots[0];
4221 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
4222 
4223 		if (found_key.objectid != key.objectid) {
4224 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4225 			break;
4226 		}
4227 
4228 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4229 		chunk_type = btrfs_chunk_type(leaf, chunk);
4230 
4231 		if (!counting) {
4232 			spin_lock(&fs_info->balance_lock);
4233 			bctl->stat.considered++;
4234 			spin_unlock(&fs_info->balance_lock);
4235 		}
4236 
4237 		ret = should_balance_chunk(leaf, chunk, found_key.offset);
4238 
4239 		btrfs_release_path(path);
4240 		if (!ret) {
4241 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4242 			goto loop;
4243 		}
4244 
4245 		if (counting) {
4246 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4247 			spin_lock(&fs_info->balance_lock);
4248 			bctl->stat.expected++;
4249 			spin_unlock(&fs_info->balance_lock);
4250 
4251 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
4252 				count_data++;
4253 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
4254 				count_sys++;
4255 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
4256 				count_meta++;
4257 
4258 			goto loop;
4259 		}
4260 
4261 		/*
4262 		 * Apply limit_min filter, no need to check if the LIMITS
4263 		 * filter is used, limit_min is 0 by default
4264 		 */
4265 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
4266 					count_data < bctl->data.limit_min)
4267 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
4268 					count_meta < bctl->meta.limit_min)
4269 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
4270 					count_sys < bctl->sys.limit_min)) {
4271 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4272 			goto loop;
4273 		}
4274 
4275 		if (!chunk_reserved) {
4276 			/*
4277 			 * We may be relocating the only data chunk we have,
4278 			 * which could potentially end up with losing data's
4279 			 * raid profile, so lets allocate an empty one in
4280 			 * advance.
4281 			 */
4282 			ret = btrfs_may_alloc_data_chunk(fs_info,
4283 							 found_key.offset);
4284 			if (ret < 0) {
4285 				mutex_unlock(&fs_info->reclaim_bgs_lock);
4286 				goto error;
4287 			} else if (ret == 1) {
4288 				chunk_reserved = 1;
4289 			}
4290 		}
4291 
4292 		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
4293 		mutex_unlock(&fs_info->reclaim_bgs_lock);
4294 		if (ret == -ENOSPC) {
4295 			enospc_errors++;
4296 		} else if (ret == -ETXTBSY) {
4297 			btrfs_info(fs_info,
4298 	   "skipping relocation of block group %llu due to active swapfile",
4299 				   found_key.offset);
4300 			ret = 0;
4301 		} else if (ret) {
4302 			goto error;
4303 		} else {
4304 			spin_lock(&fs_info->balance_lock);
4305 			bctl->stat.completed++;
4306 			spin_unlock(&fs_info->balance_lock);
4307 		}
4308 loop:
4309 		if (found_key.offset == 0)
4310 			break;
4311 		key.offset = found_key.offset - 1;
4312 	}
4313 
4314 	if (counting) {
4315 		btrfs_release_path(path);
4316 		counting = false;
4317 		goto again;
4318 	}
4319 error:
4320 	btrfs_free_path(path);
4321 	if (enospc_errors) {
4322 		btrfs_info(fs_info, "%d enospc errors during balance",
4323 			   enospc_errors);
4324 		if (!ret)
4325 			ret = -ENOSPC;
4326 	}
4327 
4328 	return ret;
4329 }
4330 
4331 /*
4332  * See if a given profile is valid and reduced.
4333  *
4334  * @flags:     profile to validate
4335  * @extended:  if true @flags is treated as an extended profile
4336  */
alloc_profile_is_valid(u64 flags,int extended)4337 static int alloc_profile_is_valid(u64 flags, int extended)
4338 {
4339 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
4340 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
4341 
4342 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
4343 
4344 	/* 1) check that all other bits are zeroed */
4345 	if (flags & ~mask)
4346 		return 0;
4347 
4348 	/* 2) see if profile is reduced */
4349 	if (flags == 0)
4350 		return !extended; /* "0" is valid for usual profiles */
4351 
4352 	return has_single_bit_set(flags);
4353 }
4354 
4355 /*
4356  * Validate target profile against allowed profiles and return true if it's OK.
4357  * Otherwise print the error message and return false.
4358  */
validate_convert_profile(struct btrfs_fs_info * fs_info,const struct btrfs_balance_args * bargs,u64 allowed,const char * type)4359 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
4360 		const struct btrfs_balance_args *bargs,
4361 		u64 allowed, const char *type)
4362 {
4363 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4364 		return true;
4365 
4366 	/* Profile is valid and does not have bits outside of the allowed set */
4367 	if (alloc_profile_is_valid(bargs->target, 1) &&
4368 	    (bargs->target & ~allowed) == 0)
4369 		return true;
4370 
4371 	btrfs_err(fs_info, "balance: invalid convert %s profile %s",
4372 			type, btrfs_bg_type_to_raid_name(bargs->target));
4373 	return false;
4374 }
4375 
4376 /*
4377  * Fill @buf with textual description of balance filter flags @bargs, up to
4378  * @size_buf including the terminating null. The output may be trimmed if it
4379  * does not fit into the provided buffer.
4380  */
describe_balance_args(struct btrfs_balance_args * bargs,char * buf,u32 size_buf)4381 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4382 				 u32 size_buf)
4383 {
4384 	int ret;
4385 	u32 size_bp = size_buf;
4386 	char *bp = buf;
4387 	u64 flags = bargs->flags;
4388 	char tmp_buf[128] = {'\0'};
4389 
4390 	if (!flags)
4391 		return;
4392 
4393 #define CHECK_APPEND_NOARG(a)						\
4394 	do {								\
4395 		ret = snprintf(bp, size_bp, (a));			\
4396 		if (ret < 0 || ret >= size_bp)				\
4397 			goto out_overflow;				\
4398 		size_bp -= ret;						\
4399 		bp += ret;						\
4400 	} while (0)
4401 
4402 #define CHECK_APPEND_1ARG(a, v1)					\
4403 	do {								\
4404 		ret = snprintf(bp, size_bp, (a), (v1));			\
4405 		if (ret < 0 || ret >= size_bp)				\
4406 			goto out_overflow;				\
4407 		size_bp -= ret;						\
4408 		bp += ret;						\
4409 	} while (0)
4410 
4411 #define CHECK_APPEND_2ARG(a, v1, v2)					\
4412 	do {								\
4413 		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
4414 		if (ret < 0 || ret >= size_bp)				\
4415 			goto out_overflow;				\
4416 		size_bp -= ret;						\
4417 		bp += ret;						\
4418 	} while (0)
4419 
4420 	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4421 		CHECK_APPEND_1ARG("convert=%s,",
4422 				  btrfs_bg_type_to_raid_name(bargs->target));
4423 
4424 	if (flags & BTRFS_BALANCE_ARGS_SOFT)
4425 		CHECK_APPEND_NOARG("soft,");
4426 
4427 	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4428 		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4429 					    sizeof(tmp_buf));
4430 		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4431 	}
4432 
4433 	if (flags & BTRFS_BALANCE_ARGS_USAGE)
4434 		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4435 
4436 	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4437 		CHECK_APPEND_2ARG("usage=%u..%u,",
4438 				  bargs->usage_min, bargs->usage_max);
4439 
4440 	if (flags & BTRFS_BALANCE_ARGS_DEVID)
4441 		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4442 
4443 	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4444 		CHECK_APPEND_2ARG("drange=%llu..%llu,",
4445 				  bargs->pstart, bargs->pend);
4446 
4447 	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4448 		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4449 				  bargs->vstart, bargs->vend);
4450 
4451 	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4452 		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4453 
4454 	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4455 		CHECK_APPEND_2ARG("limit=%u..%u,",
4456 				bargs->limit_min, bargs->limit_max);
4457 
4458 	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4459 		CHECK_APPEND_2ARG("stripes=%u..%u,",
4460 				  bargs->stripes_min, bargs->stripes_max);
4461 
4462 #undef CHECK_APPEND_2ARG
4463 #undef CHECK_APPEND_1ARG
4464 #undef CHECK_APPEND_NOARG
4465 
4466 out_overflow:
4467 
4468 	if (size_bp < size_buf)
4469 		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4470 	else
4471 		buf[0] = '\0';
4472 }
4473 
describe_balance_start_or_resume(struct btrfs_fs_info * fs_info)4474 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4475 {
4476 	u32 size_buf = 1024;
4477 	char tmp_buf[192] = {'\0'};
4478 	char *buf;
4479 	char *bp;
4480 	u32 size_bp = size_buf;
4481 	int ret;
4482 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4483 
4484 	buf = kzalloc(size_buf, GFP_KERNEL);
4485 	if (!buf)
4486 		return;
4487 
4488 	bp = buf;
4489 
4490 #define CHECK_APPEND_1ARG(a, v1)					\
4491 	do {								\
4492 		ret = snprintf(bp, size_bp, (a), (v1));			\
4493 		if (ret < 0 || ret >= size_bp)				\
4494 			goto out_overflow;				\
4495 		size_bp -= ret;						\
4496 		bp += ret;						\
4497 	} while (0)
4498 
4499 	if (bctl->flags & BTRFS_BALANCE_FORCE)
4500 		CHECK_APPEND_1ARG("%s", "-f ");
4501 
4502 	if (bctl->flags & BTRFS_BALANCE_DATA) {
4503 		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4504 		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4505 	}
4506 
4507 	if (bctl->flags & BTRFS_BALANCE_METADATA) {
4508 		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4509 		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4510 	}
4511 
4512 	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4513 		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4514 		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4515 	}
4516 
4517 #undef CHECK_APPEND_1ARG
4518 
4519 out_overflow:
4520 
4521 	if (size_bp < size_buf)
4522 		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4523 	btrfs_info(fs_info, "balance: %s %s",
4524 		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
4525 		   "resume" : "start", buf);
4526 
4527 	kfree(buf);
4528 }
4529 
4530 /*
4531  * Should be called with balance mutexe held
4532  */
btrfs_balance(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)4533 int btrfs_balance(struct btrfs_fs_info *fs_info,
4534 		  struct btrfs_balance_control *bctl,
4535 		  struct btrfs_ioctl_balance_args *bargs)
4536 {
4537 	u64 meta_target, data_target;
4538 	u64 allowed;
4539 	int mixed = 0;
4540 	int ret;
4541 	u64 num_devices;
4542 	unsigned seq;
4543 	bool reducing_redundancy;
4544 	bool paused = false;
4545 	int i;
4546 
4547 	if (btrfs_fs_closing(fs_info) ||
4548 	    atomic_read(&fs_info->balance_pause_req) ||
4549 	    btrfs_should_cancel_balance(fs_info)) {
4550 		ret = -EINVAL;
4551 		goto out;
4552 	}
4553 
4554 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4555 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4556 		mixed = 1;
4557 
4558 	/*
4559 	 * In case of mixed groups both data and meta should be picked,
4560 	 * and identical options should be given for both of them.
4561 	 */
4562 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4563 	if (mixed && (bctl->flags & allowed)) {
4564 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4565 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4566 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4567 			btrfs_err(fs_info,
4568 	  "balance: mixed groups data and metadata options must be the same");
4569 			ret = -EINVAL;
4570 			goto out;
4571 		}
4572 	}
4573 
4574 	/*
4575 	 * rw_devices will not change at the moment, device add/delete/replace
4576 	 * are exclusive
4577 	 */
4578 	num_devices = fs_info->fs_devices->rw_devices;
4579 
4580 	/*
4581 	 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4582 	 * special bit for it, to make it easier to distinguish.  Thus we need
4583 	 * to set it manually, or balance would refuse the profile.
4584 	 */
4585 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4586 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4587 		if (num_devices >= btrfs_raid_array[i].devs_min)
4588 			allowed |= btrfs_raid_array[i].bg_flag;
4589 
4590 	if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4591 	    !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4592 	    !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4593 		ret = -EINVAL;
4594 		goto out;
4595 	}
4596 
4597 	/*
4598 	 * Allow to reduce metadata or system integrity only if force set for
4599 	 * profiles with redundancy (copies, parity)
4600 	 */
4601 	allowed = 0;
4602 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4603 		if (btrfs_raid_array[i].ncopies >= 2 ||
4604 		    btrfs_raid_array[i].tolerated_failures >= 1)
4605 			allowed |= btrfs_raid_array[i].bg_flag;
4606 	}
4607 	do {
4608 		seq = read_seqbegin(&fs_info->profiles_lock);
4609 
4610 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4611 		     (fs_info->avail_system_alloc_bits & allowed) &&
4612 		     !(bctl->sys.target & allowed)) ||
4613 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4614 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4615 		     !(bctl->meta.target & allowed)))
4616 			reducing_redundancy = true;
4617 		else
4618 			reducing_redundancy = false;
4619 
4620 		/* if we're not converting, the target field is uninitialized */
4621 		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4622 			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4623 		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4624 			bctl->data.target : fs_info->avail_data_alloc_bits;
4625 	} while (read_seqretry(&fs_info->profiles_lock, seq));
4626 
4627 	if (reducing_redundancy) {
4628 		if (bctl->flags & BTRFS_BALANCE_FORCE) {
4629 			btrfs_info(fs_info,
4630 			   "balance: force reducing metadata redundancy");
4631 		} else {
4632 			btrfs_err(fs_info,
4633 	"balance: reduces metadata redundancy, use --force if you want this");
4634 			ret = -EINVAL;
4635 			goto out;
4636 		}
4637 	}
4638 
4639 	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4640 		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4641 		btrfs_warn(fs_info,
4642 	"balance: metadata profile %s has lower redundancy than data profile %s",
4643 				btrfs_bg_type_to_raid_name(meta_target),
4644 				btrfs_bg_type_to_raid_name(data_target));
4645 	}
4646 
4647 	ret = insert_balance_item(fs_info, bctl);
4648 	if (ret && ret != -EEXIST)
4649 		goto out;
4650 
4651 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4652 		BUG_ON(ret == -EEXIST);
4653 		BUG_ON(fs_info->balance_ctl);
4654 		spin_lock(&fs_info->balance_lock);
4655 		fs_info->balance_ctl = bctl;
4656 		spin_unlock(&fs_info->balance_lock);
4657 	} else {
4658 		BUG_ON(ret != -EEXIST);
4659 		spin_lock(&fs_info->balance_lock);
4660 		update_balance_args(bctl);
4661 		spin_unlock(&fs_info->balance_lock);
4662 	}
4663 
4664 	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4665 	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4666 	describe_balance_start_or_resume(fs_info);
4667 	mutex_unlock(&fs_info->balance_mutex);
4668 
4669 	ret = __btrfs_balance(fs_info);
4670 
4671 	mutex_lock(&fs_info->balance_mutex);
4672 	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) {
4673 		btrfs_info(fs_info, "balance: paused");
4674 		btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
4675 		paused = true;
4676 	}
4677 	/*
4678 	 * Balance can be canceled by:
4679 	 *
4680 	 * - Regular cancel request
4681 	 *   Then ret == -ECANCELED and balance_cancel_req > 0
4682 	 *
4683 	 * - Fatal signal to "btrfs" process
4684 	 *   Either the signal caught by wait_reserve_ticket() and callers
4685 	 *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4686 	 *   got -ECANCELED.
4687 	 *   Either way, in this case balance_cancel_req = 0, and
4688 	 *   ret == -EINTR or ret == -ECANCELED.
4689 	 *
4690 	 * So here we only check the return value to catch canceled balance.
4691 	 */
4692 	else if (ret == -ECANCELED || ret == -EINTR)
4693 		btrfs_info(fs_info, "balance: canceled");
4694 	else
4695 		btrfs_info(fs_info, "balance: ended with status: %d", ret);
4696 
4697 	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4698 
4699 	if (bargs) {
4700 		memset(bargs, 0, sizeof(*bargs));
4701 		btrfs_update_ioctl_balance_args(fs_info, bargs);
4702 	}
4703 
4704 	/* We didn't pause, we can clean everything up. */
4705 	if (!paused) {
4706 		reset_balance_state(fs_info);
4707 		btrfs_exclop_finish(fs_info);
4708 	}
4709 
4710 	wake_up(&fs_info->balance_wait_q);
4711 
4712 	return ret;
4713 out:
4714 	if (bctl->flags & BTRFS_BALANCE_RESUME)
4715 		reset_balance_state(fs_info);
4716 	else
4717 		kfree(bctl);
4718 	btrfs_exclop_finish(fs_info);
4719 
4720 	return ret;
4721 }
4722 
balance_kthread(void * data)4723 static int balance_kthread(void *data)
4724 {
4725 	struct btrfs_fs_info *fs_info = data;
4726 	int ret = 0;
4727 
4728 	sb_start_write(fs_info->sb);
4729 	mutex_lock(&fs_info->balance_mutex);
4730 	if (fs_info->balance_ctl)
4731 		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4732 	mutex_unlock(&fs_info->balance_mutex);
4733 	sb_end_write(fs_info->sb);
4734 
4735 	return ret;
4736 }
4737 
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)4738 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4739 {
4740 	struct task_struct *tsk;
4741 
4742 	mutex_lock(&fs_info->balance_mutex);
4743 	if (!fs_info->balance_ctl) {
4744 		mutex_unlock(&fs_info->balance_mutex);
4745 		return 0;
4746 	}
4747 	mutex_unlock(&fs_info->balance_mutex);
4748 
4749 	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4750 		btrfs_info(fs_info, "balance: resume skipped");
4751 		return 0;
4752 	}
4753 
4754 	spin_lock(&fs_info->super_lock);
4755 	ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
4756 	fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
4757 	spin_unlock(&fs_info->super_lock);
4758 	/*
4759 	 * A ro->rw remount sequence should continue with the paused balance
4760 	 * regardless of who pauses it, system or the user as of now, so set
4761 	 * the resume flag.
4762 	 */
4763 	spin_lock(&fs_info->balance_lock);
4764 	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4765 	spin_unlock(&fs_info->balance_lock);
4766 
4767 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4768 	return PTR_ERR_OR_ZERO(tsk);
4769 }
4770 
btrfs_recover_balance(struct btrfs_fs_info * fs_info)4771 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4772 {
4773 	struct btrfs_balance_control *bctl;
4774 	struct btrfs_balance_item *item;
4775 	struct btrfs_disk_balance_args disk_bargs;
4776 	struct btrfs_path *path;
4777 	struct extent_buffer *leaf;
4778 	struct btrfs_key key;
4779 	int ret;
4780 
4781 	path = btrfs_alloc_path();
4782 	if (!path)
4783 		return -ENOMEM;
4784 
4785 	key.objectid = BTRFS_BALANCE_OBJECTID;
4786 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
4787 	key.offset = 0;
4788 
4789 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4790 	if (ret < 0)
4791 		goto out;
4792 	if (ret > 0) { /* ret = -ENOENT; */
4793 		ret = 0;
4794 		goto out;
4795 	}
4796 
4797 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4798 	if (!bctl) {
4799 		ret = -ENOMEM;
4800 		goto out;
4801 	}
4802 
4803 	leaf = path->nodes[0];
4804 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4805 
4806 	bctl->flags = btrfs_balance_flags(leaf, item);
4807 	bctl->flags |= BTRFS_BALANCE_RESUME;
4808 
4809 	btrfs_balance_data(leaf, item, &disk_bargs);
4810 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4811 	btrfs_balance_meta(leaf, item, &disk_bargs);
4812 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4813 	btrfs_balance_sys(leaf, item, &disk_bargs);
4814 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4815 
4816 	/*
4817 	 * This should never happen, as the paused balance state is recovered
4818 	 * during mount without any chance of other exclusive ops to collide.
4819 	 *
4820 	 * This gives the exclusive op status to balance and keeps in paused
4821 	 * state until user intervention (cancel or umount). If the ownership
4822 	 * cannot be assigned, show a message but do not fail. The balance
4823 	 * is in a paused state and must have fs_info::balance_ctl properly
4824 	 * set up.
4825 	 */
4826 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED))
4827 		btrfs_warn(fs_info,
4828 	"balance: cannot set exclusive op status, resume manually");
4829 
4830 	btrfs_release_path(path);
4831 
4832 	mutex_lock(&fs_info->balance_mutex);
4833 	BUG_ON(fs_info->balance_ctl);
4834 	spin_lock(&fs_info->balance_lock);
4835 	fs_info->balance_ctl = bctl;
4836 	spin_unlock(&fs_info->balance_lock);
4837 	mutex_unlock(&fs_info->balance_mutex);
4838 out:
4839 	btrfs_free_path(path);
4840 	return ret;
4841 }
4842 
btrfs_pause_balance(struct btrfs_fs_info * fs_info)4843 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4844 {
4845 	int ret = 0;
4846 
4847 	mutex_lock(&fs_info->balance_mutex);
4848 	if (!fs_info->balance_ctl) {
4849 		mutex_unlock(&fs_info->balance_mutex);
4850 		return -ENOTCONN;
4851 	}
4852 
4853 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4854 		atomic_inc(&fs_info->balance_pause_req);
4855 		mutex_unlock(&fs_info->balance_mutex);
4856 
4857 		wait_event(fs_info->balance_wait_q,
4858 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4859 
4860 		mutex_lock(&fs_info->balance_mutex);
4861 		/* we are good with balance_ctl ripped off from under us */
4862 		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4863 		atomic_dec(&fs_info->balance_pause_req);
4864 	} else {
4865 		ret = -ENOTCONN;
4866 	}
4867 
4868 	mutex_unlock(&fs_info->balance_mutex);
4869 	return ret;
4870 }
4871 
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)4872 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4873 {
4874 	mutex_lock(&fs_info->balance_mutex);
4875 	if (!fs_info->balance_ctl) {
4876 		mutex_unlock(&fs_info->balance_mutex);
4877 		return -ENOTCONN;
4878 	}
4879 
4880 	/*
4881 	 * A paused balance with the item stored on disk can be resumed at
4882 	 * mount time if the mount is read-write. Otherwise it's still paused
4883 	 * and we must not allow cancelling as it deletes the item.
4884 	 */
4885 	if (sb_rdonly(fs_info->sb)) {
4886 		mutex_unlock(&fs_info->balance_mutex);
4887 		return -EROFS;
4888 	}
4889 
4890 	atomic_inc(&fs_info->balance_cancel_req);
4891 	/*
4892 	 * if we are running just wait and return, balance item is
4893 	 * deleted in btrfs_balance in this case
4894 	 */
4895 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4896 		mutex_unlock(&fs_info->balance_mutex);
4897 		wait_event(fs_info->balance_wait_q,
4898 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4899 		mutex_lock(&fs_info->balance_mutex);
4900 	} else {
4901 		mutex_unlock(&fs_info->balance_mutex);
4902 		/*
4903 		 * Lock released to allow other waiters to continue, we'll
4904 		 * reexamine the status again.
4905 		 */
4906 		mutex_lock(&fs_info->balance_mutex);
4907 
4908 		if (fs_info->balance_ctl) {
4909 			reset_balance_state(fs_info);
4910 			btrfs_exclop_finish(fs_info);
4911 			btrfs_info(fs_info, "balance: canceled");
4912 		}
4913 	}
4914 
4915 	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4916 	atomic_dec(&fs_info->balance_cancel_req);
4917 	mutex_unlock(&fs_info->balance_mutex);
4918 	return 0;
4919 }
4920 
4921 /*
4922  * shrinking a device means finding all of the device extents past
4923  * the new size, and then following the back refs to the chunks.
4924  * The chunk relocation code actually frees the device extent
4925  */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)4926 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4927 {
4928 	struct btrfs_fs_info *fs_info = device->fs_info;
4929 	struct btrfs_root *root = fs_info->dev_root;
4930 	struct btrfs_trans_handle *trans;
4931 	struct btrfs_dev_extent *dev_extent = NULL;
4932 	struct btrfs_path *path;
4933 	u64 length;
4934 	u64 chunk_offset;
4935 	int ret;
4936 	int slot;
4937 	int failed = 0;
4938 	bool retried = false;
4939 	struct extent_buffer *l;
4940 	struct btrfs_key key;
4941 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4942 	u64 old_total = btrfs_super_total_bytes(super_copy);
4943 	u64 old_size = btrfs_device_get_total_bytes(device);
4944 	u64 diff;
4945 	u64 start;
4946 	u64 free_diff = 0;
4947 
4948 	new_size = round_down(new_size, fs_info->sectorsize);
4949 	start = new_size;
4950 	diff = round_down(old_size - new_size, fs_info->sectorsize);
4951 
4952 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4953 		return -EINVAL;
4954 
4955 	path = btrfs_alloc_path();
4956 	if (!path)
4957 		return -ENOMEM;
4958 
4959 	path->reada = READA_BACK;
4960 
4961 	trans = btrfs_start_transaction(root, 0);
4962 	if (IS_ERR(trans)) {
4963 		btrfs_free_path(path);
4964 		return PTR_ERR(trans);
4965 	}
4966 
4967 	mutex_lock(&fs_info->chunk_mutex);
4968 
4969 	btrfs_device_set_total_bytes(device, new_size);
4970 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4971 		device->fs_devices->total_rw_bytes -= diff;
4972 
4973 		/*
4974 		 * The new free_chunk_space is new_size - used, so we have to
4975 		 * subtract the delta of the old free_chunk_space which included
4976 		 * old_size - used.  If used > new_size then just subtract this
4977 		 * entire device's free space.
4978 		 */
4979 		if (device->bytes_used < new_size)
4980 			free_diff = (old_size - device->bytes_used) -
4981 				    (new_size - device->bytes_used);
4982 		else
4983 			free_diff = old_size - device->bytes_used;
4984 		atomic64_sub(free_diff, &fs_info->free_chunk_space);
4985 	}
4986 
4987 	/*
4988 	 * Once the device's size has been set to the new size, ensure all
4989 	 * in-memory chunks are synced to disk so that the loop below sees them
4990 	 * and relocates them accordingly.
4991 	 */
4992 	if (contains_pending_extent(device, &start, diff)) {
4993 		mutex_unlock(&fs_info->chunk_mutex);
4994 		ret = btrfs_commit_transaction(trans);
4995 		if (ret)
4996 			goto done;
4997 	} else {
4998 		mutex_unlock(&fs_info->chunk_mutex);
4999 		btrfs_end_transaction(trans);
5000 	}
5001 
5002 again:
5003 	key.objectid = device->devid;
5004 	key.offset = (u64)-1;
5005 	key.type = BTRFS_DEV_EXTENT_KEY;
5006 
5007 	do {
5008 		mutex_lock(&fs_info->reclaim_bgs_lock);
5009 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5010 		if (ret < 0) {
5011 			mutex_unlock(&fs_info->reclaim_bgs_lock);
5012 			goto done;
5013 		}
5014 
5015 		ret = btrfs_previous_item(root, path, 0, key.type);
5016 		if (ret) {
5017 			mutex_unlock(&fs_info->reclaim_bgs_lock);
5018 			if (ret < 0)
5019 				goto done;
5020 			ret = 0;
5021 			btrfs_release_path(path);
5022 			break;
5023 		}
5024 
5025 		l = path->nodes[0];
5026 		slot = path->slots[0];
5027 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
5028 
5029 		if (key.objectid != device->devid) {
5030 			mutex_unlock(&fs_info->reclaim_bgs_lock);
5031 			btrfs_release_path(path);
5032 			break;
5033 		}
5034 
5035 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
5036 		length = btrfs_dev_extent_length(l, dev_extent);
5037 
5038 		if (key.offset + length <= new_size) {
5039 			mutex_unlock(&fs_info->reclaim_bgs_lock);
5040 			btrfs_release_path(path);
5041 			break;
5042 		}
5043 
5044 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
5045 		btrfs_release_path(path);
5046 
5047 		/*
5048 		 * We may be relocating the only data chunk we have,
5049 		 * which could potentially end up with losing data's
5050 		 * raid profile, so lets allocate an empty one in
5051 		 * advance.
5052 		 */
5053 		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
5054 		if (ret < 0) {
5055 			mutex_unlock(&fs_info->reclaim_bgs_lock);
5056 			goto done;
5057 		}
5058 
5059 		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
5060 		mutex_unlock(&fs_info->reclaim_bgs_lock);
5061 		if (ret == -ENOSPC) {
5062 			failed++;
5063 		} else if (ret) {
5064 			if (ret == -ETXTBSY) {
5065 				btrfs_warn(fs_info,
5066 		   "could not shrink block group %llu due to active swapfile",
5067 					   chunk_offset);
5068 			}
5069 			goto done;
5070 		}
5071 	} while (key.offset-- > 0);
5072 
5073 	if (failed && !retried) {
5074 		failed = 0;
5075 		retried = true;
5076 		goto again;
5077 	} else if (failed && retried) {
5078 		ret = -ENOSPC;
5079 		goto done;
5080 	}
5081 
5082 	/* Shrinking succeeded, else we would be at "done". */
5083 	trans = btrfs_start_transaction(root, 0);
5084 	if (IS_ERR(trans)) {
5085 		ret = PTR_ERR(trans);
5086 		goto done;
5087 	}
5088 
5089 	mutex_lock(&fs_info->chunk_mutex);
5090 	/* Clear all state bits beyond the shrunk device size */
5091 	clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
5092 			  CHUNK_STATE_MASK);
5093 
5094 	btrfs_device_set_disk_total_bytes(device, new_size);
5095 	if (list_empty(&device->post_commit_list))
5096 		list_add_tail(&device->post_commit_list,
5097 			      &trans->transaction->dev_update_list);
5098 
5099 	WARN_ON(diff > old_total);
5100 	btrfs_set_super_total_bytes(super_copy,
5101 			round_down(old_total - diff, fs_info->sectorsize));
5102 	mutex_unlock(&fs_info->chunk_mutex);
5103 
5104 	btrfs_reserve_chunk_metadata(trans, false);
5105 	/* Now btrfs_update_device() will change the on-disk size. */
5106 	ret = btrfs_update_device(trans, device);
5107 	btrfs_trans_release_chunk_metadata(trans);
5108 	if (ret < 0) {
5109 		btrfs_abort_transaction(trans, ret);
5110 		btrfs_end_transaction(trans);
5111 	} else {
5112 		ret = btrfs_commit_transaction(trans);
5113 	}
5114 done:
5115 	btrfs_free_path(path);
5116 	if (ret) {
5117 		mutex_lock(&fs_info->chunk_mutex);
5118 		btrfs_device_set_total_bytes(device, old_size);
5119 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5120 			device->fs_devices->total_rw_bytes += diff;
5121 			atomic64_add(free_diff, &fs_info->free_chunk_space);
5122 		}
5123 		mutex_unlock(&fs_info->chunk_mutex);
5124 	}
5125 	return ret;
5126 }
5127 
btrfs_add_system_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)5128 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
5129 			   struct btrfs_key *key,
5130 			   struct btrfs_chunk *chunk, int item_size)
5131 {
5132 	struct btrfs_super_block *super_copy = fs_info->super_copy;
5133 	struct btrfs_disk_key disk_key;
5134 	u32 array_size;
5135 	u8 *ptr;
5136 
5137 	lockdep_assert_held(&fs_info->chunk_mutex);
5138 
5139 	array_size = btrfs_super_sys_array_size(super_copy);
5140 	if (array_size + item_size + sizeof(disk_key)
5141 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
5142 		return -EFBIG;
5143 
5144 	ptr = super_copy->sys_chunk_array + array_size;
5145 	btrfs_cpu_key_to_disk(&disk_key, key);
5146 	memcpy(ptr, &disk_key, sizeof(disk_key));
5147 	ptr += sizeof(disk_key);
5148 	memcpy(ptr, chunk, item_size);
5149 	item_size += sizeof(disk_key);
5150 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
5151 
5152 	return 0;
5153 }
5154 
5155 /*
5156  * sort the devices in descending order by max_avail, total_avail
5157  */
btrfs_cmp_device_info(const void * a,const void * b)5158 static int btrfs_cmp_device_info(const void *a, const void *b)
5159 {
5160 	const struct btrfs_device_info *di_a = a;
5161 	const struct btrfs_device_info *di_b = b;
5162 
5163 	if (di_a->max_avail > di_b->max_avail)
5164 		return -1;
5165 	if (di_a->max_avail < di_b->max_avail)
5166 		return 1;
5167 	if (di_a->total_avail > di_b->total_avail)
5168 		return -1;
5169 	if (di_a->total_avail < di_b->total_avail)
5170 		return 1;
5171 	return 0;
5172 }
5173 
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)5174 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
5175 {
5176 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5177 		return;
5178 
5179 	btrfs_set_fs_incompat(info, RAID56);
5180 }
5181 
check_raid1c34_incompat_flag(struct btrfs_fs_info * info,u64 type)5182 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
5183 {
5184 	if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
5185 		return;
5186 
5187 	btrfs_set_fs_incompat(info, RAID1C34);
5188 }
5189 
5190 /*
5191  * Structure used internally for btrfs_create_chunk() function.
5192  * Wraps needed parameters.
5193  */
5194 struct alloc_chunk_ctl {
5195 	u64 start;
5196 	u64 type;
5197 	/* Total number of stripes to allocate */
5198 	int num_stripes;
5199 	/* sub_stripes info for map */
5200 	int sub_stripes;
5201 	/* Stripes per device */
5202 	int dev_stripes;
5203 	/* Maximum number of devices to use */
5204 	int devs_max;
5205 	/* Minimum number of devices to use */
5206 	int devs_min;
5207 	/* ndevs has to be a multiple of this */
5208 	int devs_increment;
5209 	/* Number of copies */
5210 	int ncopies;
5211 	/* Number of stripes worth of bytes to store parity information */
5212 	int nparity;
5213 	u64 max_stripe_size;
5214 	u64 max_chunk_size;
5215 	u64 dev_extent_min;
5216 	u64 stripe_size;
5217 	u64 chunk_size;
5218 	int ndevs;
5219 };
5220 
init_alloc_chunk_ctl_policy_regular(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5221 static void init_alloc_chunk_ctl_policy_regular(
5222 				struct btrfs_fs_devices *fs_devices,
5223 				struct alloc_chunk_ctl *ctl)
5224 {
5225 	struct btrfs_space_info *space_info;
5226 
5227 	space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type);
5228 	ASSERT(space_info);
5229 
5230 	ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
5231 	ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G);
5232 
5233 	if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
5234 		ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
5235 
5236 	/* We don't want a chunk larger than 10% of writable space */
5237 	ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10),
5238 				  ctl->max_chunk_size);
5239 	ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes);
5240 }
5241 
init_alloc_chunk_ctl_policy_zoned(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5242 static void init_alloc_chunk_ctl_policy_zoned(
5243 				      struct btrfs_fs_devices *fs_devices,
5244 				      struct alloc_chunk_ctl *ctl)
5245 {
5246 	u64 zone_size = fs_devices->fs_info->zone_size;
5247 	u64 limit;
5248 	int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5249 	int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5250 	u64 min_chunk_size = min_data_stripes * zone_size;
5251 	u64 type = ctl->type;
5252 
5253 	ctl->max_stripe_size = zone_size;
5254 	if (type & BTRFS_BLOCK_GROUP_DATA) {
5255 		ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5256 						 zone_size);
5257 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5258 		ctl->max_chunk_size = ctl->max_stripe_size;
5259 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5260 		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5261 		ctl->devs_max = min_t(int, ctl->devs_max,
5262 				      BTRFS_MAX_DEVS_SYS_CHUNK);
5263 	} else {
5264 		BUG();
5265 	}
5266 
5267 	/* We don't want a chunk larger than 10% of writable space */
5268 	limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10),
5269 			       zone_size),
5270 		    min_chunk_size);
5271 	ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5272 	ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5273 }
5274 
init_alloc_chunk_ctl(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5275 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5276 				 struct alloc_chunk_ctl *ctl)
5277 {
5278 	int index = btrfs_bg_flags_to_raid_index(ctl->type);
5279 
5280 	ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5281 	ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5282 	ctl->devs_max = btrfs_raid_array[index].devs_max;
5283 	if (!ctl->devs_max)
5284 		ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5285 	ctl->devs_min = btrfs_raid_array[index].devs_min;
5286 	ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5287 	ctl->ncopies = btrfs_raid_array[index].ncopies;
5288 	ctl->nparity = btrfs_raid_array[index].nparity;
5289 	ctl->ndevs = 0;
5290 
5291 	switch (fs_devices->chunk_alloc_policy) {
5292 	case BTRFS_CHUNK_ALLOC_REGULAR:
5293 		init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5294 		break;
5295 	case BTRFS_CHUNK_ALLOC_ZONED:
5296 		init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5297 		break;
5298 	default:
5299 		BUG();
5300 	}
5301 }
5302 
gather_device_info(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5303 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5304 			      struct alloc_chunk_ctl *ctl,
5305 			      struct btrfs_device_info *devices_info)
5306 {
5307 	struct btrfs_fs_info *info = fs_devices->fs_info;
5308 	struct btrfs_device *device;
5309 	u64 total_avail;
5310 	u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5311 	int ret;
5312 	int ndevs = 0;
5313 	u64 max_avail;
5314 	u64 dev_offset;
5315 
5316 	/*
5317 	 * in the first pass through the devices list, we gather information
5318 	 * about the available holes on each device.
5319 	 */
5320 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5321 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5322 			WARN(1, KERN_ERR
5323 			       "BTRFS: read-only device in alloc_list\n");
5324 			continue;
5325 		}
5326 
5327 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5328 					&device->dev_state) ||
5329 		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5330 			continue;
5331 
5332 		if (device->total_bytes > device->bytes_used)
5333 			total_avail = device->total_bytes - device->bytes_used;
5334 		else
5335 			total_avail = 0;
5336 
5337 		/* If there is no space on this device, skip it. */
5338 		if (total_avail < ctl->dev_extent_min)
5339 			continue;
5340 
5341 		ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5342 					   &max_avail);
5343 		if (ret && ret != -ENOSPC)
5344 			return ret;
5345 
5346 		if (ret == 0)
5347 			max_avail = dev_extent_want;
5348 
5349 		if (max_avail < ctl->dev_extent_min) {
5350 			if (btrfs_test_opt(info, ENOSPC_DEBUG))
5351 				btrfs_debug(info,
5352 			"%s: devid %llu has no free space, have=%llu want=%llu",
5353 					    __func__, device->devid, max_avail,
5354 					    ctl->dev_extent_min);
5355 			continue;
5356 		}
5357 
5358 		if (ndevs == fs_devices->rw_devices) {
5359 			WARN(1, "%s: found more than %llu devices\n",
5360 			     __func__, fs_devices->rw_devices);
5361 			break;
5362 		}
5363 		devices_info[ndevs].dev_offset = dev_offset;
5364 		devices_info[ndevs].max_avail = max_avail;
5365 		devices_info[ndevs].total_avail = total_avail;
5366 		devices_info[ndevs].dev = device;
5367 		++ndevs;
5368 	}
5369 	ctl->ndevs = ndevs;
5370 
5371 	/*
5372 	 * now sort the devices by hole size / available space
5373 	 */
5374 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5375 	     btrfs_cmp_device_info, NULL);
5376 
5377 	return 0;
5378 }
5379 
decide_stripe_size_regular(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5380 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5381 				      struct btrfs_device_info *devices_info)
5382 {
5383 	/* Number of stripes that count for block group size */
5384 	int data_stripes;
5385 
5386 	/*
5387 	 * The primary goal is to maximize the number of stripes, so use as
5388 	 * many devices as possible, even if the stripes are not maximum sized.
5389 	 *
5390 	 * The DUP profile stores more than one stripe per device, the
5391 	 * max_avail is the total size so we have to adjust.
5392 	 */
5393 	ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5394 				   ctl->dev_stripes);
5395 	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5396 
5397 	/* This will have to be fixed for RAID1 and RAID10 over more drives */
5398 	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5399 
5400 	/*
5401 	 * Use the number of data stripes to figure out how big this chunk is
5402 	 * really going to be in terms of logical address space, and compare
5403 	 * that answer with the max chunk size. If it's higher, we try to
5404 	 * reduce stripe_size.
5405 	 */
5406 	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5407 		/*
5408 		 * Reduce stripe_size, round it up to a 16MB boundary again and
5409 		 * then use it, unless it ends up being even bigger than the
5410 		 * previous value we had already.
5411 		 */
5412 		ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5413 							data_stripes), SZ_16M),
5414 				       ctl->stripe_size);
5415 	}
5416 
5417 	/* Stripe size should not go beyond 1G. */
5418 	ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);
5419 
5420 	/* Align to BTRFS_STRIPE_LEN */
5421 	ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5422 	ctl->chunk_size = ctl->stripe_size * data_stripes;
5423 
5424 	return 0;
5425 }
5426 
decide_stripe_size_zoned(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5427 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5428 				    struct btrfs_device_info *devices_info)
5429 {
5430 	u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5431 	/* Number of stripes that count for block group size */
5432 	int data_stripes;
5433 
5434 	/*
5435 	 * It should hold because:
5436 	 *    dev_extent_min == dev_extent_want == zone_size * dev_stripes
5437 	 */
5438 	ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5439 
5440 	ctl->stripe_size = zone_size;
5441 	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5442 	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5443 
5444 	/* stripe_size is fixed in zoned filesystem. Reduce ndevs instead. */
5445 	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5446 		ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5447 					     ctl->stripe_size) + ctl->nparity,
5448 				     ctl->dev_stripes);
5449 		ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5450 		data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5451 		ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5452 	}
5453 
5454 	ctl->chunk_size = ctl->stripe_size * data_stripes;
5455 
5456 	return 0;
5457 }
5458 
decide_stripe_size(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5459 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5460 			      struct alloc_chunk_ctl *ctl,
5461 			      struct btrfs_device_info *devices_info)
5462 {
5463 	struct btrfs_fs_info *info = fs_devices->fs_info;
5464 
5465 	/*
5466 	 * Round down to number of usable stripes, devs_increment can be any
5467 	 * number so we can't use round_down() that requires power of 2, while
5468 	 * rounddown is safe.
5469 	 */
5470 	ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5471 
5472 	if (ctl->ndevs < ctl->devs_min) {
5473 		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5474 			btrfs_debug(info,
5475 	"%s: not enough devices with free space: have=%d minimum required=%d",
5476 				    __func__, ctl->ndevs, ctl->devs_min);
5477 		}
5478 		return -ENOSPC;
5479 	}
5480 
5481 	ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5482 
5483 	switch (fs_devices->chunk_alloc_policy) {
5484 	case BTRFS_CHUNK_ALLOC_REGULAR:
5485 		return decide_stripe_size_regular(ctl, devices_info);
5486 	case BTRFS_CHUNK_ALLOC_ZONED:
5487 		return decide_stripe_size_zoned(ctl, devices_info);
5488 	default:
5489 		BUG();
5490 	}
5491 }
5492 
chunk_map_device_set_bits(struct btrfs_chunk_map * map,unsigned int bits)5493 static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int bits)
5494 {
5495 	for (int i = 0; i < map->num_stripes; i++) {
5496 		struct btrfs_io_stripe *stripe = &map->stripes[i];
5497 		struct btrfs_device *device = stripe->dev;
5498 
5499 		set_extent_bit(&device->alloc_state, stripe->physical,
5500 			       stripe->physical + map->stripe_size - 1,
5501 			       bits | EXTENT_NOWAIT, NULL);
5502 	}
5503 }
5504 
chunk_map_device_clear_bits(struct btrfs_chunk_map * map,unsigned int bits)5505 static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits)
5506 {
5507 	for (int i = 0; i < map->num_stripes; i++) {
5508 		struct btrfs_io_stripe *stripe = &map->stripes[i];
5509 		struct btrfs_device *device = stripe->dev;
5510 
5511 		__clear_extent_bit(&device->alloc_state, stripe->physical,
5512 				   stripe->physical + map->stripe_size - 1,
5513 				   bits | EXTENT_NOWAIT,
5514 				   NULL, NULL);
5515 	}
5516 }
5517 
btrfs_remove_chunk_map(struct btrfs_fs_info * fs_info,struct btrfs_chunk_map * map)5518 void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
5519 {
5520 	write_lock(&fs_info->mapping_tree_lock);
5521 	rb_erase_cached(&map->rb_node, &fs_info->mapping_tree);
5522 	RB_CLEAR_NODE(&map->rb_node);
5523 	chunk_map_device_clear_bits(map, CHUNK_ALLOCATED);
5524 	write_unlock(&fs_info->mapping_tree_lock);
5525 
5526 	/* Once for the tree reference. */
5527 	btrfs_free_chunk_map(map);
5528 }
5529 
btrfs_chunk_map_cmp(const struct rb_node * new,const struct rb_node * exist)5530 static int btrfs_chunk_map_cmp(const struct rb_node *new,
5531 			       const struct rb_node *exist)
5532 {
5533 	const struct btrfs_chunk_map *new_map =
5534 		rb_entry(new, struct btrfs_chunk_map, rb_node);
5535 	const struct btrfs_chunk_map *exist_map =
5536 		rb_entry(exist, struct btrfs_chunk_map, rb_node);
5537 
5538 	if (new_map->start == exist_map->start)
5539 		return 0;
5540 	if (new_map->start < exist_map->start)
5541 		return -1;
5542 	return 1;
5543 }
5544 
5545 EXPORT_FOR_TESTS
btrfs_add_chunk_map(struct btrfs_fs_info * fs_info,struct btrfs_chunk_map * map)5546 int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
5547 {
5548 	struct rb_node *exist;
5549 
5550 	write_lock(&fs_info->mapping_tree_lock);
5551 	exist = rb_find_add_cached(&map->rb_node, &fs_info->mapping_tree,
5552 				   btrfs_chunk_map_cmp);
5553 
5554 	if (exist) {
5555 		write_unlock(&fs_info->mapping_tree_lock);
5556 		return -EEXIST;
5557 	}
5558 	chunk_map_device_set_bits(map, CHUNK_ALLOCATED);
5559 	chunk_map_device_clear_bits(map, CHUNK_TRIMMED);
5560 	write_unlock(&fs_info->mapping_tree_lock);
5561 
5562 	return 0;
5563 }
5564 
5565 EXPORT_FOR_TESTS
btrfs_alloc_chunk_map(int num_stripes,gfp_t gfp)5566 struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp)
5567 {
5568 	struct btrfs_chunk_map *map;
5569 
5570 	map = kmalloc(btrfs_chunk_map_size(num_stripes), gfp);
5571 	if (!map)
5572 		return NULL;
5573 
5574 	refcount_set(&map->refs, 1);
5575 	RB_CLEAR_NODE(&map->rb_node);
5576 
5577 	return map;
5578 }
5579 
create_chunk(struct btrfs_trans_handle * trans,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5580 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5581 			struct alloc_chunk_ctl *ctl,
5582 			struct btrfs_device_info *devices_info)
5583 {
5584 	struct btrfs_fs_info *info = trans->fs_info;
5585 	struct btrfs_chunk_map *map;
5586 	struct btrfs_block_group *block_group;
5587 	u64 start = ctl->start;
5588 	u64 type = ctl->type;
5589 	int ret;
5590 
5591 	map = btrfs_alloc_chunk_map(ctl->num_stripes, GFP_NOFS);
5592 	if (!map)
5593 		return ERR_PTR(-ENOMEM);
5594 
5595 	map->start = start;
5596 	map->chunk_len = ctl->chunk_size;
5597 	map->stripe_size = ctl->stripe_size;
5598 	map->type = type;
5599 	map->io_align = BTRFS_STRIPE_LEN;
5600 	map->io_width = BTRFS_STRIPE_LEN;
5601 	map->sub_stripes = ctl->sub_stripes;
5602 	map->num_stripes = ctl->num_stripes;
5603 
5604 	for (int i = 0; i < ctl->ndevs; i++) {
5605 		for (int j = 0; j < ctl->dev_stripes; j++) {
5606 			int s = i * ctl->dev_stripes + j;
5607 			map->stripes[s].dev = devices_info[i].dev;
5608 			map->stripes[s].physical = devices_info[i].dev_offset +
5609 						   j * ctl->stripe_size;
5610 		}
5611 	}
5612 
5613 	trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5614 
5615 	ret = btrfs_add_chunk_map(info, map);
5616 	if (ret) {
5617 		btrfs_free_chunk_map(map);
5618 		return ERR_PTR(ret);
5619 	}
5620 
5621 	block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size);
5622 	if (IS_ERR(block_group)) {
5623 		btrfs_remove_chunk_map(info, map);
5624 		return block_group;
5625 	}
5626 
5627 	for (int i = 0; i < map->num_stripes; i++) {
5628 		struct btrfs_device *dev = map->stripes[i].dev;
5629 
5630 		btrfs_device_set_bytes_used(dev,
5631 					    dev->bytes_used + ctl->stripe_size);
5632 		if (list_empty(&dev->post_commit_list))
5633 			list_add_tail(&dev->post_commit_list,
5634 				      &trans->transaction->dev_update_list);
5635 	}
5636 
5637 	atomic64_sub(ctl->stripe_size * map->num_stripes,
5638 		     &info->free_chunk_space);
5639 
5640 	check_raid56_incompat_flag(info, type);
5641 	check_raid1c34_incompat_flag(info, type);
5642 
5643 	return block_group;
5644 }
5645 
btrfs_create_chunk(struct btrfs_trans_handle * trans,u64 type)5646 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
5647 					    u64 type)
5648 {
5649 	struct btrfs_fs_info *info = trans->fs_info;
5650 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
5651 	struct btrfs_device_info *devices_info = NULL;
5652 	struct alloc_chunk_ctl ctl;
5653 	struct btrfs_block_group *block_group;
5654 	int ret;
5655 
5656 	lockdep_assert_held(&info->chunk_mutex);
5657 
5658 	if (!alloc_profile_is_valid(type, 0)) {
5659 		ASSERT(0);
5660 		return ERR_PTR(-EINVAL);
5661 	}
5662 
5663 	if (list_empty(&fs_devices->alloc_list)) {
5664 		if (btrfs_test_opt(info, ENOSPC_DEBUG))
5665 			btrfs_debug(info, "%s: no writable device", __func__);
5666 		return ERR_PTR(-ENOSPC);
5667 	}
5668 
5669 	if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5670 		btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5671 		ASSERT(0);
5672 		return ERR_PTR(-EINVAL);
5673 	}
5674 
5675 	ctl.start = find_next_chunk(info);
5676 	ctl.type = type;
5677 	init_alloc_chunk_ctl(fs_devices, &ctl);
5678 
5679 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5680 			       GFP_NOFS);
5681 	if (!devices_info)
5682 		return ERR_PTR(-ENOMEM);
5683 
5684 	ret = gather_device_info(fs_devices, &ctl, devices_info);
5685 	if (ret < 0) {
5686 		block_group = ERR_PTR(ret);
5687 		goto out;
5688 	}
5689 
5690 	ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5691 	if (ret < 0) {
5692 		block_group = ERR_PTR(ret);
5693 		goto out;
5694 	}
5695 
5696 	block_group = create_chunk(trans, &ctl, devices_info);
5697 
5698 out:
5699 	kfree(devices_info);
5700 	return block_group;
5701 }
5702 
5703 /*
5704  * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5705  * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5706  * chunks.
5707  *
5708  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5709  * phases.
5710  */
btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle * trans,struct btrfs_block_group * bg)5711 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5712 				     struct btrfs_block_group *bg)
5713 {
5714 	struct btrfs_fs_info *fs_info = trans->fs_info;
5715 	struct btrfs_root *chunk_root = fs_info->chunk_root;
5716 	struct btrfs_key key;
5717 	struct btrfs_chunk *chunk;
5718 	struct btrfs_stripe *stripe;
5719 	struct btrfs_chunk_map *map;
5720 	size_t item_size;
5721 	int i;
5722 	int ret;
5723 
5724 	/*
5725 	 * We take the chunk_mutex for 2 reasons:
5726 	 *
5727 	 * 1) Updates and insertions in the chunk btree must be done while holding
5728 	 *    the chunk_mutex, as well as updating the system chunk array in the
5729 	 *    superblock. See the comment on top of btrfs_chunk_alloc() for the
5730 	 *    details;
5731 	 *
5732 	 * 2) To prevent races with the final phase of a device replace operation
5733 	 *    that replaces the device object associated with the map's stripes,
5734 	 *    because the device object's id can change at any time during that
5735 	 *    final phase of the device replace operation
5736 	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5737 	 *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5738 	 *    which would cause a failure when updating the device item, which does
5739 	 *    not exists, or persisting a stripe of the chunk item with such ID.
5740 	 *    Here we can't use the device_list_mutex because our caller already
5741 	 *    has locked the chunk_mutex, and the final phase of device replace
5742 	 *    acquires both mutexes - first the device_list_mutex and then the
5743 	 *    chunk_mutex. Using any of those two mutexes protects us from a
5744 	 *    concurrent device replace.
5745 	 */
5746 	lockdep_assert_held(&fs_info->chunk_mutex);
5747 
5748 	map = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5749 	if (IS_ERR(map)) {
5750 		ret = PTR_ERR(map);
5751 		btrfs_abort_transaction(trans, ret);
5752 		return ret;
5753 	}
5754 
5755 	item_size = btrfs_chunk_item_size(map->num_stripes);
5756 
5757 	chunk = kzalloc(item_size, GFP_NOFS);
5758 	if (!chunk) {
5759 		ret = -ENOMEM;
5760 		btrfs_abort_transaction(trans, ret);
5761 		goto out;
5762 	}
5763 
5764 	for (i = 0; i < map->num_stripes; i++) {
5765 		struct btrfs_device *device = map->stripes[i].dev;
5766 
5767 		ret = btrfs_update_device(trans, device);
5768 		if (ret)
5769 			goto out;
5770 	}
5771 
5772 	stripe = &chunk->stripe;
5773 	for (i = 0; i < map->num_stripes; i++) {
5774 		struct btrfs_device *device = map->stripes[i].dev;
5775 		const u64 dev_offset = map->stripes[i].physical;
5776 
5777 		btrfs_set_stack_stripe_devid(stripe, device->devid);
5778 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
5779 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5780 		stripe++;
5781 	}
5782 
5783 	btrfs_set_stack_chunk_length(chunk, bg->length);
5784 	btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
5785 	btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
5786 	btrfs_set_stack_chunk_type(chunk, map->type);
5787 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5788 	btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN);
5789 	btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN);
5790 	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5791 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5792 
5793 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5794 	key.type = BTRFS_CHUNK_ITEM_KEY;
5795 	key.offset = bg->start;
5796 
5797 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5798 	if (ret)
5799 		goto out;
5800 
5801 	set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags);
5802 
5803 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5804 		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5805 		if (ret)
5806 			goto out;
5807 	}
5808 
5809 out:
5810 	kfree(chunk);
5811 	btrfs_free_chunk_map(map);
5812 	return ret;
5813 }
5814 
init_first_rw_device(struct btrfs_trans_handle * trans)5815 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5816 {
5817 	struct btrfs_fs_info *fs_info = trans->fs_info;
5818 	u64 alloc_profile;
5819 	struct btrfs_block_group *meta_bg;
5820 	struct btrfs_block_group *sys_bg;
5821 
5822 	/*
5823 	 * When adding a new device for sprouting, the seed device is read-only
5824 	 * so we must first allocate a metadata and a system chunk. But before
5825 	 * adding the block group items to the extent, device and chunk btrees,
5826 	 * we must first:
5827 	 *
5828 	 * 1) Create both chunks without doing any changes to the btrees, as
5829 	 *    otherwise we would get -ENOSPC since the block groups from the
5830 	 *    seed device are read-only;
5831 	 *
5832 	 * 2) Add the device item for the new sprout device - finishing the setup
5833 	 *    of a new block group requires updating the device item in the chunk
5834 	 *    btree, so it must exist when we attempt to do it. The previous step
5835 	 *    ensures this does not fail with -ENOSPC.
5836 	 *
5837 	 * After that we can add the block group items to their btrees:
5838 	 * update existing device item in the chunk btree, add a new block group
5839 	 * item to the extent btree, add a new chunk item to the chunk btree and
5840 	 * finally add the new device extent items to the devices btree.
5841 	 */
5842 
5843 	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5844 	meta_bg = btrfs_create_chunk(trans, alloc_profile);
5845 	if (IS_ERR(meta_bg))
5846 		return PTR_ERR(meta_bg);
5847 
5848 	alloc_profile = btrfs_system_alloc_profile(fs_info);
5849 	sys_bg = btrfs_create_chunk(trans, alloc_profile);
5850 	if (IS_ERR(sys_bg))
5851 		return PTR_ERR(sys_bg);
5852 
5853 	return 0;
5854 }
5855 
btrfs_chunk_max_errors(struct btrfs_chunk_map * map)5856 static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map *map)
5857 {
5858 	const int index = btrfs_bg_flags_to_raid_index(map->type);
5859 
5860 	return btrfs_raid_array[index].tolerated_failures;
5861 }
5862 
btrfs_chunk_writeable(struct btrfs_fs_info * fs_info,u64 chunk_offset)5863 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5864 {
5865 	struct btrfs_chunk_map *map;
5866 	int miss_ndevs = 0;
5867 	int i;
5868 	bool ret = true;
5869 
5870 	map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5871 	if (IS_ERR(map))
5872 		return false;
5873 
5874 	for (i = 0; i < map->num_stripes; i++) {
5875 		if (test_bit(BTRFS_DEV_STATE_MISSING,
5876 					&map->stripes[i].dev->dev_state)) {
5877 			miss_ndevs++;
5878 			continue;
5879 		}
5880 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5881 					&map->stripes[i].dev->dev_state)) {
5882 			ret = false;
5883 			goto end;
5884 		}
5885 	}
5886 
5887 	/*
5888 	 * If the number of missing devices is larger than max errors, we can
5889 	 * not write the data into that chunk successfully.
5890 	 */
5891 	if (miss_ndevs > btrfs_chunk_max_errors(map))
5892 		ret = false;
5893 end:
5894 	btrfs_free_chunk_map(map);
5895 	return ret;
5896 }
5897 
btrfs_mapping_tree_free(struct btrfs_fs_info * fs_info)5898 void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info)
5899 {
5900 	write_lock(&fs_info->mapping_tree_lock);
5901 	while (!RB_EMPTY_ROOT(&fs_info->mapping_tree.rb_root)) {
5902 		struct btrfs_chunk_map *map;
5903 		struct rb_node *node;
5904 
5905 		node = rb_first_cached(&fs_info->mapping_tree);
5906 		map = rb_entry(node, struct btrfs_chunk_map, rb_node);
5907 		rb_erase_cached(&map->rb_node, &fs_info->mapping_tree);
5908 		RB_CLEAR_NODE(&map->rb_node);
5909 		chunk_map_device_clear_bits(map, CHUNK_ALLOCATED);
5910 		/* Once for the tree ref. */
5911 		btrfs_free_chunk_map(map);
5912 		cond_resched_rwlock_write(&fs_info->mapping_tree_lock);
5913 	}
5914 	write_unlock(&fs_info->mapping_tree_lock);
5915 }
5916 
btrfs_chunk_map_num_copies(const struct btrfs_chunk_map * map)5917 static int btrfs_chunk_map_num_copies(const struct btrfs_chunk_map *map)
5918 {
5919 	enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(map->type);
5920 
5921 	if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5922 		return 2;
5923 
5924 	/*
5925 	 * There could be two corrupted data stripes, we need to loop retry in
5926 	 * order to rebuild the correct data.
5927 	 *
5928 	 * Fail a stripe at a time on every retry except the stripe under
5929 	 * reconstruction.
5930 	 */
5931 	if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5932 		return map->num_stripes;
5933 
5934 	/* Non-RAID56, use their ncopies from btrfs_raid_array. */
5935 	return btrfs_raid_array[index].ncopies;
5936 }
5937 
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5938 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5939 {
5940 	struct btrfs_chunk_map *map;
5941 	int ret;
5942 
5943 	map = btrfs_get_chunk_map(fs_info, logical, len);
5944 	if (IS_ERR(map))
5945 		/*
5946 		 * We could return errors for these cases, but that could get
5947 		 * ugly and we'd probably do the same thing which is just not do
5948 		 * anything else and exit, so return 1 so the callers don't try
5949 		 * to use other copies.
5950 		 */
5951 		return 1;
5952 
5953 	ret = btrfs_chunk_map_num_copies(map);
5954 	btrfs_free_chunk_map(map);
5955 	return ret;
5956 }
5957 
btrfs_full_stripe_len(struct btrfs_fs_info * fs_info,u64 logical)5958 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5959 				    u64 logical)
5960 {
5961 	struct btrfs_chunk_map *map;
5962 	unsigned long len = fs_info->sectorsize;
5963 
5964 	if (!btrfs_fs_incompat(fs_info, RAID56))
5965 		return len;
5966 
5967 	map = btrfs_get_chunk_map(fs_info, logical, len);
5968 
5969 	if (!WARN_ON(IS_ERR(map))) {
5970 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5971 			len = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
5972 		btrfs_free_chunk_map(map);
5973 	}
5974 	return len;
5975 }
5976 
5977 #ifdef CONFIG_BTRFS_EXPERIMENTAL
btrfs_read_preferred(struct btrfs_chunk_map * map,int first,int num_stripes)5978 static int btrfs_read_preferred(struct btrfs_chunk_map *map, int first, int num_stripes)
5979 {
5980 	for (int index = first; index < first + num_stripes; index++) {
5981 		const struct btrfs_device *device = map->stripes[index].dev;
5982 
5983 		if (device->devid == READ_ONCE(device->fs_devices->read_devid))
5984 			return index;
5985 	}
5986 
5987 	/* If no read-preferred device is set use the first stripe. */
5988 	return first;
5989 }
5990 
5991 struct stripe_mirror {
5992 	u64 devid;
5993 	int num;
5994 };
5995 
btrfs_cmp_devid(const void * a,const void * b)5996 static int btrfs_cmp_devid(const void *a, const void *b)
5997 {
5998 	const struct stripe_mirror *s1 = (const struct stripe_mirror *)a;
5999 	const struct stripe_mirror *s2 = (const struct stripe_mirror *)b;
6000 
6001 	if (s1->devid < s2->devid)
6002 		return -1;
6003 	if (s1->devid > s2->devid)
6004 		return 1;
6005 	return 0;
6006 }
6007 
6008 /*
6009  * Select a stripe for reading using the round-robin algorithm.
6010  *
6011  *  1. Compute the read cycle as the total sectors read divided by the minimum
6012  *     sectors per device.
6013  *  2. Determine the stripe number for the current read by taking the modulus
6014  *     of the read cycle with the total number of stripes:
6015  *
6016  *      stripe index = (total sectors / min sectors per dev) % num stripes
6017  *
6018  * The calculated stripe index is then used to select the corresponding device
6019  * from the list of devices, which is ordered by devid.
6020  */
btrfs_read_rr(const struct btrfs_chunk_map * map,int first,int num_stripes)6021 static int btrfs_read_rr(const struct btrfs_chunk_map *map, int first, int num_stripes)
6022 {
6023 	struct stripe_mirror stripes[BTRFS_RAID1_MAX_MIRRORS] = { 0 };
6024 	struct btrfs_device *device  = map->stripes[first].dev;
6025 	struct btrfs_fs_info *fs_info = device->fs_devices->fs_info;
6026 	unsigned int read_cycle;
6027 	unsigned int total_reads;
6028 	unsigned int min_reads_per_dev;
6029 
6030 	total_reads = percpu_counter_sum(&fs_info->stats_read_blocks);
6031 	min_reads_per_dev = READ_ONCE(fs_info->fs_devices->rr_min_contig_read) >>
6032 						       fs_info->sectorsize_bits;
6033 
6034 	for (int index = 0, i = first; i < first + num_stripes; i++) {
6035 		stripes[index].devid = map->stripes[i].dev->devid;
6036 		stripes[index].num = i;
6037 		index++;
6038 	}
6039 	sort(stripes, num_stripes, sizeof(struct stripe_mirror),
6040 	     btrfs_cmp_devid, NULL);
6041 
6042 	read_cycle = total_reads / min_reads_per_dev;
6043 	return stripes[read_cycle % num_stripes].num;
6044 }
6045 #endif
6046 
find_live_mirror(struct btrfs_fs_info * fs_info,struct btrfs_chunk_map * map,int first,int dev_replace_is_ongoing)6047 static int find_live_mirror(struct btrfs_fs_info *fs_info,
6048 			    struct btrfs_chunk_map *map, int first,
6049 			    int dev_replace_is_ongoing)
6050 {
6051 	const enum btrfs_read_policy policy = READ_ONCE(fs_info->fs_devices->read_policy);
6052 	int i;
6053 	int num_stripes;
6054 	int preferred_mirror;
6055 	int tolerance;
6056 	struct btrfs_device *srcdev;
6057 
6058 	ASSERT((map->type &
6059 		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
6060 
6061 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
6062 		num_stripes = map->sub_stripes;
6063 	else
6064 		num_stripes = map->num_stripes;
6065 
6066 	switch (policy) {
6067 	default:
6068 		/* Shouldn't happen, just warn and use pid instead of failing */
6069 		btrfs_warn_rl(fs_info, "unknown read_policy type %u, reset to pid",
6070 			      policy);
6071 		WRITE_ONCE(fs_info->fs_devices->read_policy, BTRFS_READ_POLICY_PID);
6072 		fallthrough;
6073 	case BTRFS_READ_POLICY_PID:
6074 		preferred_mirror = first + (current->pid % num_stripes);
6075 		break;
6076 #ifdef CONFIG_BTRFS_EXPERIMENTAL
6077 	case BTRFS_READ_POLICY_RR:
6078 		preferred_mirror = btrfs_read_rr(map, first, num_stripes);
6079 		break;
6080 	case BTRFS_READ_POLICY_DEVID:
6081 		preferred_mirror = btrfs_read_preferred(map, first, num_stripes);
6082 		break;
6083 #endif
6084 	}
6085 
6086 	if (dev_replace_is_ongoing &&
6087 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
6088 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
6089 		srcdev = fs_info->dev_replace.srcdev;
6090 	else
6091 		srcdev = NULL;
6092 
6093 	/*
6094 	 * try to avoid the drive that is the source drive for a
6095 	 * dev-replace procedure, only choose it if no other non-missing
6096 	 * mirror is available
6097 	 */
6098 	for (tolerance = 0; tolerance < 2; tolerance++) {
6099 		if (map->stripes[preferred_mirror].dev->bdev &&
6100 		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
6101 			return preferred_mirror;
6102 		for (i = first; i < first + num_stripes; i++) {
6103 			if (map->stripes[i].dev->bdev &&
6104 			    (tolerance || map->stripes[i].dev != srcdev))
6105 				return i;
6106 		}
6107 	}
6108 
6109 	/* we couldn't find one that doesn't fail.  Just return something
6110 	 * and the io error handling code will clean up eventually
6111 	 */
6112 	return preferred_mirror;
6113 }
6114 
6115 EXPORT_FOR_TESTS
alloc_btrfs_io_context(struct btrfs_fs_info * fs_info,u64 logical,u16 total_stripes)6116 struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
6117 						u64 logical, u16 total_stripes)
6118 {
6119 	struct btrfs_io_context *bioc;
6120 
6121 	bioc = kzalloc(
6122 		 /* The size of btrfs_io_context */
6123 		sizeof(struct btrfs_io_context) +
6124 		/* Plus the variable array for the stripes */
6125 		sizeof(struct btrfs_io_stripe) * (total_stripes),
6126 		GFP_NOFS);
6127 
6128 	if (!bioc)
6129 		return NULL;
6130 
6131 	refcount_set(&bioc->refs, 1);
6132 
6133 	bioc->fs_info = fs_info;
6134 	bioc->replace_stripe_src = -1;
6135 	bioc->full_stripe_logical = (u64)-1;
6136 	bioc->logical = logical;
6137 
6138 	return bioc;
6139 }
6140 
btrfs_get_bioc(struct btrfs_io_context * bioc)6141 void btrfs_get_bioc(struct btrfs_io_context *bioc)
6142 {
6143 	WARN_ON(!refcount_read(&bioc->refs));
6144 	refcount_inc(&bioc->refs);
6145 }
6146 
btrfs_put_bioc(struct btrfs_io_context * bioc)6147 void btrfs_put_bioc(struct btrfs_io_context *bioc)
6148 {
6149 	if (!bioc)
6150 		return;
6151 	if (refcount_dec_and_test(&bioc->refs))
6152 		kfree(bioc);
6153 }
6154 
6155 /*
6156  * Please note that, discard won't be sent to target device of device
6157  * replace.
6158  */
btrfs_map_discard(struct btrfs_fs_info * fs_info,u64 logical,u64 * length_ret,u32 * num_stripes)6159 struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
6160 					       u64 logical, u64 *length_ret,
6161 					       u32 *num_stripes)
6162 {
6163 	struct btrfs_chunk_map *map;
6164 	struct btrfs_discard_stripe *stripes;
6165 	u64 length = *length_ret;
6166 	u64 offset;
6167 	u32 stripe_nr;
6168 	u32 stripe_nr_end;
6169 	u32 stripe_cnt;
6170 	u64 stripe_end_offset;
6171 	u64 stripe_offset;
6172 	u32 stripe_index;
6173 	u32 factor = 0;
6174 	u32 sub_stripes = 0;
6175 	u32 stripes_per_dev = 0;
6176 	u32 remaining_stripes = 0;
6177 	u32 last_stripe = 0;
6178 	int ret;
6179 	int i;
6180 
6181 	map = btrfs_get_chunk_map(fs_info, logical, length);
6182 	if (IS_ERR(map))
6183 		return ERR_CAST(map);
6184 
6185 	/* we don't discard raid56 yet */
6186 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6187 		ret = -EOPNOTSUPP;
6188 		goto out_free_map;
6189 	}
6190 
6191 	offset = logical - map->start;
6192 	length = min_t(u64, map->start + map->chunk_len - logical, length);
6193 	*length_ret = length;
6194 
6195 	/*
6196 	 * stripe_nr counts the total number of stripes we have to stride
6197 	 * to get to this block
6198 	 */
6199 	stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT;
6200 
6201 	/* stripe_offset is the offset of this block in its stripe */
6202 	stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr);
6203 
6204 	stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >>
6205 			BTRFS_STRIPE_LEN_SHIFT;
6206 	stripe_cnt = stripe_nr_end - stripe_nr;
6207 	stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) -
6208 			    (offset + length);
6209 	/*
6210 	 * after this, stripe_nr is the number of stripes on this
6211 	 * device we have to walk to find the data, and stripe_index is
6212 	 * the number of our device in the stripe array
6213 	 */
6214 	*num_stripes = 1;
6215 	stripe_index = 0;
6216 	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6217 			 BTRFS_BLOCK_GROUP_RAID10)) {
6218 		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6219 			sub_stripes = 1;
6220 		else
6221 			sub_stripes = map->sub_stripes;
6222 
6223 		factor = map->num_stripes / sub_stripes;
6224 		*num_stripes = min_t(u64, map->num_stripes,
6225 				    sub_stripes * stripe_cnt);
6226 		stripe_index = stripe_nr % factor;
6227 		stripe_nr /= factor;
6228 		stripe_index *= sub_stripes;
6229 
6230 		remaining_stripes = stripe_cnt % factor;
6231 		stripes_per_dev = stripe_cnt / factor;
6232 		last_stripe = ((stripe_nr_end - 1) % factor) * sub_stripes;
6233 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
6234 				BTRFS_BLOCK_GROUP_DUP)) {
6235 		*num_stripes = map->num_stripes;
6236 	} else {
6237 		stripe_index = stripe_nr % map->num_stripes;
6238 		stripe_nr /= map->num_stripes;
6239 	}
6240 
6241 	stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS);
6242 	if (!stripes) {
6243 		ret = -ENOMEM;
6244 		goto out_free_map;
6245 	}
6246 
6247 	for (i = 0; i < *num_stripes; i++) {
6248 		stripes[i].physical =
6249 			map->stripes[stripe_index].physical +
6250 			stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr);
6251 		stripes[i].dev = map->stripes[stripe_index].dev;
6252 
6253 		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6254 				 BTRFS_BLOCK_GROUP_RAID10)) {
6255 			stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev);
6256 
6257 			if (i / sub_stripes < remaining_stripes)
6258 				stripes[i].length += BTRFS_STRIPE_LEN;
6259 
6260 			/*
6261 			 * Special for the first stripe and
6262 			 * the last stripe:
6263 			 *
6264 			 * |-------|...|-------|
6265 			 *     |----------|
6266 			 *    off     end_off
6267 			 */
6268 			if (i < sub_stripes)
6269 				stripes[i].length -= stripe_offset;
6270 
6271 			if (stripe_index >= last_stripe &&
6272 			    stripe_index <= (last_stripe +
6273 					     sub_stripes - 1))
6274 				stripes[i].length -= stripe_end_offset;
6275 
6276 			if (i == sub_stripes - 1)
6277 				stripe_offset = 0;
6278 		} else {
6279 			stripes[i].length = length;
6280 		}
6281 
6282 		stripe_index++;
6283 		if (stripe_index == map->num_stripes) {
6284 			stripe_index = 0;
6285 			stripe_nr++;
6286 		}
6287 	}
6288 
6289 	btrfs_free_chunk_map(map);
6290 	return stripes;
6291 out_free_map:
6292 	btrfs_free_chunk_map(map);
6293 	return ERR_PTR(ret);
6294 }
6295 
is_block_group_to_copy(struct btrfs_fs_info * fs_info,u64 logical)6296 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6297 {
6298 	struct btrfs_block_group *cache;
6299 	bool ret;
6300 
6301 	/* Non zoned filesystem does not use "to_copy" flag */
6302 	if (!btrfs_is_zoned(fs_info))
6303 		return false;
6304 
6305 	cache = btrfs_lookup_block_group(fs_info, logical);
6306 
6307 	ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
6308 
6309 	btrfs_put_block_group(cache);
6310 	return ret;
6311 }
6312 
handle_ops_on_dev_replace(struct btrfs_io_context * bioc,struct btrfs_dev_replace * dev_replace,u64 logical,struct btrfs_io_geometry * io_geom)6313 static void handle_ops_on_dev_replace(struct btrfs_io_context *bioc,
6314 				      struct btrfs_dev_replace *dev_replace,
6315 				      u64 logical,
6316 				      struct btrfs_io_geometry *io_geom)
6317 {
6318 	u64 srcdev_devid = dev_replace->srcdev->devid;
6319 	/*
6320 	 * At this stage, num_stripes is still the real number of stripes,
6321 	 * excluding the duplicated stripes.
6322 	 */
6323 	int num_stripes = io_geom->num_stripes;
6324 	int max_errors = io_geom->max_errors;
6325 	int nr_extra_stripes = 0;
6326 	int i;
6327 
6328 	/*
6329 	 * A block group which has "to_copy" set will eventually be copied by
6330 	 * the dev-replace process. We can avoid cloning IO here.
6331 	 */
6332 	if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6333 		return;
6334 
6335 	/*
6336 	 * Duplicate the write operations while the dev-replace procedure is
6337 	 * running. Since the copying of the old disk to the new disk takes
6338 	 * place at run time while the filesystem is mounted writable, the
6339 	 * regular write operations to the old disk have to be duplicated to go
6340 	 * to the new disk as well.
6341 	 *
6342 	 * Note that device->missing is handled by the caller, and that the
6343 	 * write to the old disk is already set up in the stripes array.
6344 	 */
6345 	for (i = 0; i < num_stripes; i++) {
6346 		struct btrfs_io_stripe *old = &bioc->stripes[i];
6347 		struct btrfs_io_stripe *new = &bioc->stripes[num_stripes + nr_extra_stripes];
6348 
6349 		if (old->dev->devid != srcdev_devid)
6350 			continue;
6351 
6352 		new->physical = old->physical;
6353 		new->dev = dev_replace->tgtdev;
6354 		if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
6355 			bioc->replace_stripe_src = i;
6356 		nr_extra_stripes++;
6357 	}
6358 
6359 	/* We can only have at most 2 extra nr_stripes (for DUP). */
6360 	ASSERT(nr_extra_stripes <= 2);
6361 	/*
6362 	 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for
6363 	 * replace.
6364 	 * If we have 2 extra stripes, only choose the one with smaller physical.
6365 	 */
6366 	if (io_geom->op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) {
6367 		struct btrfs_io_stripe *first = &bioc->stripes[num_stripes];
6368 		struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1];
6369 
6370 		/* Only DUP can have two extra stripes. */
6371 		ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP);
6372 
6373 		/*
6374 		 * Swap the last stripe stripes and reduce @nr_extra_stripes.
6375 		 * The extra stripe would still be there, but won't be accessed.
6376 		 */
6377 		if (first->physical > second->physical) {
6378 			swap(second->physical, first->physical);
6379 			swap(second->dev, first->dev);
6380 			nr_extra_stripes--;
6381 		}
6382 	}
6383 
6384 	io_geom->num_stripes = num_stripes + nr_extra_stripes;
6385 	io_geom->max_errors = max_errors + nr_extra_stripes;
6386 	bioc->replace_nr_stripes = nr_extra_stripes;
6387 }
6388 
btrfs_max_io_len(struct btrfs_chunk_map * map,u64 offset,struct btrfs_io_geometry * io_geom)6389 static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset,
6390 			    struct btrfs_io_geometry *io_geom)
6391 {
6392 	/*
6393 	 * Stripe_nr is the stripe where this block falls.  stripe_offset is
6394 	 * the offset of this block in its stripe.
6395 	 */
6396 	io_geom->stripe_offset = offset & BTRFS_STRIPE_LEN_MASK;
6397 	io_geom->stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT;
6398 	ASSERT(io_geom->stripe_offset < U32_MAX);
6399 
6400 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6401 		unsigned long full_stripe_len =
6402 			btrfs_stripe_nr_to_offset(nr_data_stripes(map));
6403 
6404 		/*
6405 		 * For full stripe start, we use previously calculated
6406 		 * @stripe_nr. Align it to nr_data_stripes, then multiply with
6407 		 * STRIPE_LEN.
6408 		 *
6409 		 * By this we can avoid u64 division completely.  And we have
6410 		 * to go rounddown(), not round_down(), as nr_data_stripes is
6411 		 * not ensured to be power of 2.
6412 		 */
6413 		io_geom->raid56_full_stripe_start = btrfs_stripe_nr_to_offset(
6414 			rounddown(io_geom->stripe_nr, nr_data_stripes(map)));
6415 
6416 		ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset);
6417 		ASSERT(io_geom->raid56_full_stripe_start <= offset);
6418 		/*
6419 		 * For writes to RAID56, allow to write a full stripe set, but
6420 		 * no straddling of stripe sets.
6421 		 */
6422 		if (io_geom->op == BTRFS_MAP_WRITE)
6423 			return full_stripe_len - (offset - io_geom->raid56_full_stripe_start);
6424 	}
6425 
6426 	/*
6427 	 * For other RAID types and for RAID56 reads, allow a single stripe (on
6428 	 * a single disk).
6429 	 */
6430 	if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK)
6431 		return BTRFS_STRIPE_LEN - io_geom->stripe_offset;
6432 	return U64_MAX;
6433 }
6434 
set_io_stripe(struct btrfs_fs_info * fs_info,u64 logical,u64 * length,struct btrfs_io_stripe * dst,struct btrfs_chunk_map * map,struct btrfs_io_geometry * io_geom)6435 static int set_io_stripe(struct btrfs_fs_info *fs_info, u64 logical,
6436 			 u64 *length, struct btrfs_io_stripe *dst,
6437 			 struct btrfs_chunk_map *map,
6438 			 struct btrfs_io_geometry *io_geom)
6439 {
6440 	dst->dev = map->stripes[io_geom->stripe_index].dev;
6441 
6442 	if (io_geom->op == BTRFS_MAP_READ && io_geom->use_rst)
6443 		return btrfs_get_raid_extent_offset(fs_info, logical, length,
6444 						    map->type,
6445 						    io_geom->stripe_index, dst);
6446 
6447 	dst->physical = map->stripes[io_geom->stripe_index].physical +
6448 			io_geom->stripe_offset +
6449 			btrfs_stripe_nr_to_offset(io_geom->stripe_nr);
6450 	return 0;
6451 }
6452 
is_single_device_io(struct btrfs_fs_info * fs_info,const struct btrfs_io_stripe * smap,const struct btrfs_chunk_map * map,int num_alloc_stripes,struct btrfs_io_geometry * io_geom)6453 static bool is_single_device_io(struct btrfs_fs_info *fs_info,
6454 				const struct btrfs_io_stripe *smap,
6455 				const struct btrfs_chunk_map *map,
6456 				int num_alloc_stripes,
6457 				struct btrfs_io_geometry *io_geom)
6458 {
6459 	if (!smap)
6460 		return false;
6461 
6462 	if (num_alloc_stripes != 1)
6463 		return false;
6464 
6465 	if (io_geom->use_rst && io_geom->op != BTRFS_MAP_READ)
6466 		return false;
6467 
6468 	if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && io_geom->mirror_num > 1)
6469 		return false;
6470 
6471 	return true;
6472 }
6473 
map_blocks_raid0(const struct btrfs_chunk_map * map,struct btrfs_io_geometry * io_geom)6474 static void map_blocks_raid0(const struct btrfs_chunk_map *map,
6475 			     struct btrfs_io_geometry *io_geom)
6476 {
6477 	io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes;
6478 	io_geom->stripe_nr /= map->num_stripes;
6479 	if (io_geom->op == BTRFS_MAP_READ)
6480 		io_geom->mirror_num = 1;
6481 }
6482 
map_blocks_raid1(struct btrfs_fs_info * fs_info,struct btrfs_chunk_map * map,struct btrfs_io_geometry * io_geom,bool dev_replace_is_ongoing)6483 static void map_blocks_raid1(struct btrfs_fs_info *fs_info,
6484 			     struct btrfs_chunk_map *map,
6485 			     struct btrfs_io_geometry *io_geom,
6486 			     bool dev_replace_is_ongoing)
6487 {
6488 	if (io_geom->op != BTRFS_MAP_READ) {
6489 		io_geom->num_stripes = map->num_stripes;
6490 		return;
6491 	}
6492 
6493 	if (io_geom->mirror_num) {
6494 		io_geom->stripe_index = io_geom->mirror_num - 1;
6495 		return;
6496 	}
6497 
6498 	io_geom->stripe_index = find_live_mirror(fs_info, map, 0,
6499 						 dev_replace_is_ongoing);
6500 	io_geom->mirror_num = io_geom->stripe_index + 1;
6501 }
6502 
map_blocks_dup(const struct btrfs_chunk_map * map,struct btrfs_io_geometry * io_geom)6503 static void map_blocks_dup(const struct btrfs_chunk_map *map,
6504 			   struct btrfs_io_geometry *io_geom)
6505 {
6506 	if (io_geom->op != BTRFS_MAP_READ) {
6507 		io_geom->num_stripes = map->num_stripes;
6508 		return;
6509 	}
6510 
6511 	if (io_geom->mirror_num) {
6512 		io_geom->stripe_index = io_geom->mirror_num - 1;
6513 		return;
6514 	}
6515 
6516 	io_geom->mirror_num = 1;
6517 }
6518 
map_blocks_raid10(struct btrfs_fs_info * fs_info,struct btrfs_chunk_map * map,struct btrfs_io_geometry * io_geom,bool dev_replace_is_ongoing)6519 static void map_blocks_raid10(struct btrfs_fs_info *fs_info,
6520 			      struct btrfs_chunk_map *map,
6521 			      struct btrfs_io_geometry *io_geom,
6522 			      bool dev_replace_is_ongoing)
6523 {
6524 	u32 factor = map->num_stripes / map->sub_stripes;
6525 	int old_stripe_index;
6526 
6527 	io_geom->stripe_index = (io_geom->stripe_nr % factor) * map->sub_stripes;
6528 	io_geom->stripe_nr /= factor;
6529 
6530 	if (io_geom->op != BTRFS_MAP_READ) {
6531 		io_geom->num_stripes = map->sub_stripes;
6532 		return;
6533 	}
6534 
6535 	if (io_geom->mirror_num) {
6536 		io_geom->stripe_index += io_geom->mirror_num - 1;
6537 		return;
6538 	}
6539 
6540 	old_stripe_index = io_geom->stripe_index;
6541 	io_geom->stripe_index = find_live_mirror(fs_info, map,
6542 						 io_geom->stripe_index,
6543 						 dev_replace_is_ongoing);
6544 	io_geom->mirror_num = io_geom->stripe_index - old_stripe_index + 1;
6545 }
6546 
map_blocks_raid56_write(struct btrfs_chunk_map * map,struct btrfs_io_geometry * io_geom,u64 logical,u64 * length)6547 static void map_blocks_raid56_write(struct btrfs_chunk_map *map,
6548 				    struct btrfs_io_geometry *io_geom,
6549 				    u64 logical, u64 *length)
6550 {
6551 	int data_stripes = nr_data_stripes(map);
6552 
6553 	/*
6554 	 * Needs full stripe mapping.
6555 	 *
6556 	 * Push stripe_nr back to the start of the full stripe For those cases
6557 	 * needing a full stripe, @stripe_nr is the full stripe number.
6558 	 *
6559 	 * Originally we go raid56_full_stripe_start / full_stripe_len, but
6560 	 * that can be expensive.  Here we just divide @stripe_nr with
6561 	 * @data_stripes.
6562 	 */
6563 	io_geom->stripe_nr /= data_stripes;
6564 
6565 	/* RAID[56] write or recovery. Return all stripes */
6566 	io_geom->num_stripes = map->num_stripes;
6567 	io_geom->max_errors = btrfs_chunk_max_errors(map);
6568 
6569 	/* Return the length to the full stripe end. */
6570 	*length = min(logical + *length,
6571 		      io_geom->raid56_full_stripe_start + map->start +
6572 		      btrfs_stripe_nr_to_offset(data_stripes)) -
6573 		logical;
6574 	io_geom->stripe_index = 0;
6575 	io_geom->stripe_offset = 0;
6576 }
6577 
map_blocks_raid56_read(struct btrfs_chunk_map * map,struct btrfs_io_geometry * io_geom)6578 static void map_blocks_raid56_read(struct btrfs_chunk_map *map,
6579 				   struct btrfs_io_geometry *io_geom)
6580 {
6581 	int data_stripes = nr_data_stripes(map);
6582 
6583 	ASSERT(io_geom->mirror_num <= 1);
6584 	/* Just grab the data stripe directly. */
6585 	io_geom->stripe_index = io_geom->stripe_nr % data_stripes;
6586 	io_geom->stripe_nr /= data_stripes;
6587 
6588 	/* We distribute the parity blocks across stripes. */
6589 	io_geom->stripe_index =
6590 		(io_geom->stripe_nr + io_geom->stripe_index) % map->num_stripes;
6591 
6592 	if (io_geom->op == BTRFS_MAP_READ && io_geom->mirror_num < 1)
6593 		io_geom->mirror_num = 1;
6594 }
6595 
map_blocks_single(const struct btrfs_chunk_map * map,struct btrfs_io_geometry * io_geom)6596 static void map_blocks_single(const struct btrfs_chunk_map *map,
6597 			      struct btrfs_io_geometry *io_geom)
6598 {
6599 	io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes;
6600 	io_geom->stripe_nr /= map->num_stripes;
6601 	io_geom->mirror_num = io_geom->stripe_index + 1;
6602 }
6603 
6604 /*
6605  * Map one logical range to one or more physical ranges.
6606  *
6607  * @length:		(Mandatory) mapped length of this run.
6608  *			One logical range can be split into different segments
6609  *			due to factors like zones and RAID0/5/6/10 stripe
6610  *			boundaries.
6611  *
6612  * @bioc_ret:		(Mandatory) returned btrfs_io_context structure.
6613  *			which has one or more physical ranges (btrfs_io_stripe)
6614  *			recorded inside.
6615  *			Caller should call btrfs_put_bioc() to free it after use.
6616  *
6617  * @smap:		(Optional) single physical range optimization.
6618  *			If the map request can be fulfilled by one single
6619  *			physical range, and this is parameter is not NULL,
6620  *			then @bioc_ret would be NULL, and @smap would be
6621  *			updated.
6622  *
6623  * @mirror_num_ret:	(Mandatory) returned mirror number if the original
6624  *			value is 0.
6625  *
6626  *			Mirror number 0 means to choose any live mirrors.
6627  *
6628  *			For non-RAID56 profiles, non-zero mirror_num means
6629  *			the Nth mirror. (e.g. mirror_num 1 means the first
6630  *			copy).
6631  *
6632  *			For RAID56 profile, mirror 1 means rebuild from P and
6633  *			the remaining data stripes.
6634  *
6635  *			For RAID6 profile, mirror > 2 means mark another
6636  *			data/P stripe error and rebuild from the remaining
6637  *			stripes..
6638  */
btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_io_context ** bioc_ret,struct btrfs_io_stripe * smap,int * mirror_num_ret)6639 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6640 		    u64 logical, u64 *length,
6641 		    struct btrfs_io_context **bioc_ret,
6642 		    struct btrfs_io_stripe *smap, int *mirror_num_ret)
6643 {
6644 	struct btrfs_chunk_map *map;
6645 	struct btrfs_io_geometry io_geom = { 0 };
6646 	u64 map_offset;
6647 	int ret = 0;
6648 	int num_copies;
6649 	struct btrfs_io_context *bioc = NULL;
6650 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6651 	int dev_replace_is_ongoing = 0;
6652 	u16 num_alloc_stripes;
6653 	u64 max_len;
6654 
6655 	ASSERT(bioc_ret);
6656 
6657 	io_geom.mirror_num = (mirror_num_ret ? *mirror_num_ret : 0);
6658 	io_geom.num_stripes = 1;
6659 	io_geom.stripe_index = 0;
6660 	io_geom.op = op;
6661 
6662 	map = btrfs_get_chunk_map(fs_info, logical, *length);
6663 	if (IS_ERR(map))
6664 		return PTR_ERR(map);
6665 
6666 	num_copies = btrfs_chunk_map_num_copies(map);
6667 	if (io_geom.mirror_num > num_copies)
6668 		return -EINVAL;
6669 
6670 	map_offset = logical - map->start;
6671 	io_geom.raid56_full_stripe_start = (u64)-1;
6672 	max_len = btrfs_max_io_len(map, map_offset, &io_geom);
6673 	*length = min_t(u64, map->chunk_len - map_offset, max_len);
6674 	io_geom.use_rst = btrfs_need_stripe_tree_update(fs_info, map->type);
6675 
6676 	if (dev_replace->replace_task != current)
6677 		down_read(&dev_replace->rwsem);
6678 
6679 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6680 	/*
6681 	 * Hold the semaphore for read during the whole operation, write is
6682 	 * requested at commit time but must wait.
6683 	 */
6684 	if (!dev_replace_is_ongoing && dev_replace->replace_task != current)
6685 		up_read(&dev_replace->rwsem);
6686 
6687 	switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6688 	case BTRFS_BLOCK_GROUP_RAID0:
6689 		map_blocks_raid0(map, &io_geom);
6690 		break;
6691 	case BTRFS_BLOCK_GROUP_RAID1:
6692 	case BTRFS_BLOCK_GROUP_RAID1C3:
6693 	case BTRFS_BLOCK_GROUP_RAID1C4:
6694 		map_blocks_raid1(fs_info, map, &io_geom, dev_replace_is_ongoing);
6695 		break;
6696 	case BTRFS_BLOCK_GROUP_DUP:
6697 		map_blocks_dup(map, &io_geom);
6698 		break;
6699 	case BTRFS_BLOCK_GROUP_RAID10:
6700 		map_blocks_raid10(fs_info, map, &io_geom, dev_replace_is_ongoing);
6701 		break;
6702 	case BTRFS_BLOCK_GROUP_RAID5:
6703 	case BTRFS_BLOCK_GROUP_RAID6:
6704 		if (op != BTRFS_MAP_READ || io_geom.mirror_num > 1)
6705 			map_blocks_raid56_write(map, &io_geom, logical, length);
6706 		else
6707 			map_blocks_raid56_read(map, &io_geom);
6708 		break;
6709 	default:
6710 		/*
6711 		 * After this, stripe_nr is the number of stripes on this
6712 		 * device we have to walk to find the data, and stripe_index is
6713 		 * the number of our device in the stripe array
6714 		 */
6715 		map_blocks_single(map, &io_geom);
6716 		break;
6717 	}
6718 	if (io_geom.stripe_index >= map->num_stripes) {
6719 		btrfs_crit(fs_info,
6720 			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6721 			   io_geom.stripe_index, map->num_stripes);
6722 		ret = -EINVAL;
6723 		goto out;
6724 	}
6725 
6726 	num_alloc_stripes = io_geom.num_stripes;
6727 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6728 	    op != BTRFS_MAP_READ)
6729 		/*
6730 		 * For replace case, we need to add extra stripes for extra
6731 		 * duplicated stripes.
6732 		 *
6733 		 * For both WRITE and GET_READ_MIRRORS, we may have at most
6734 		 * 2 more stripes (DUP types, otherwise 1).
6735 		 */
6736 		num_alloc_stripes += 2;
6737 
6738 	/*
6739 	 * If this I/O maps to a single device, try to return the device and
6740 	 * physical block information on the stack instead of allocating an
6741 	 * I/O context structure.
6742 	 */
6743 	if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, &io_geom)) {
6744 		ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom);
6745 		if (mirror_num_ret)
6746 			*mirror_num_ret = io_geom.mirror_num;
6747 		*bioc_ret = NULL;
6748 		goto out;
6749 	}
6750 
6751 	bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes);
6752 	if (!bioc) {
6753 		ret = -ENOMEM;
6754 		goto out;
6755 	}
6756 	bioc->map_type = map->type;
6757 	bioc->use_rst = io_geom.use_rst;
6758 
6759 	/*
6760 	 * For RAID56 full map, we need to make sure the stripes[] follows the
6761 	 * rule that data stripes are all ordered, then followed with P and Q
6762 	 * (if we have).
6763 	 *
6764 	 * It's still mostly the same as other profiles, just with extra rotation.
6765 	 */
6766 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
6767 	    (op != BTRFS_MAP_READ || io_geom.mirror_num > 1)) {
6768 		/*
6769 		 * For RAID56 @stripe_nr is already the number of full stripes
6770 		 * before us, which is also the rotation value (needs to modulo
6771 		 * with num_stripes).
6772 		 *
6773 		 * In this case, we just add @stripe_nr with @i, then do the
6774 		 * modulo, to reduce one modulo call.
6775 		 */
6776 		bioc->full_stripe_logical = map->start +
6777 			btrfs_stripe_nr_to_offset(io_geom.stripe_nr *
6778 						  nr_data_stripes(map));
6779 		for (int i = 0; i < io_geom.num_stripes; i++) {
6780 			struct btrfs_io_stripe *dst = &bioc->stripes[i];
6781 			u32 stripe_index;
6782 
6783 			stripe_index = (i + io_geom.stripe_nr) % io_geom.num_stripes;
6784 			dst->dev = map->stripes[stripe_index].dev;
6785 			dst->physical =
6786 				map->stripes[stripe_index].physical +
6787 				io_geom.stripe_offset +
6788 				btrfs_stripe_nr_to_offset(io_geom.stripe_nr);
6789 		}
6790 	} else {
6791 		/*
6792 		 * For all other non-RAID56 profiles, just copy the target
6793 		 * stripe into the bioc.
6794 		 */
6795 		for (int i = 0; i < io_geom.num_stripes; i++) {
6796 			ret = set_io_stripe(fs_info, logical, length,
6797 					    &bioc->stripes[i], map, &io_geom);
6798 			if (ret < 0)
6799 				break;
6800 			io_geom.stripe_index++;
6801 		}
6802 	}
6803 
6804 	if (ret) {
6805 		*bioc_ret = NULL;
6806 		btrfs_put_bioc(bioc);
6807 		goto out;
6808 	}
6809 
6810 	if (op != BTRFS_MAP_READ)
6811 		io_geom.max_errors = btrfs_chunk_max_errors(map);
6812 
6813 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6814 	    op != BTRFS_MAP_READ) {
6815 		handle_ops_on_dev_replace(bioc, dev_replace, logical, &io_geom);
6816 	}
6817 
6818 	*bioc_ret = bioc;
6819 	bioc->num_stripes = io_geom.num_stripes;
6820 	bioc->max_errors = io_geom.max_errors;
6821 	bioc->mirror_num = io_geom.mirror_num;
6822 
6823 out:
6824 	if (dev_replace_is_ongoing && dev_replace->replace_task != current) {
6825 		lockdep_assert_held(&dev_replace->rwsem);
6826 		/* Unlock and let waiting writers proceed */
6827 		up_read(&dev_replace->rwsem);
6828 	}
6829 	btrfs_free_chunk_map(map);
6830 	return ret;
6831 }
6832 
dev_args_match_fs_devices(const struct btrfs_dev_lookup_args * args,const struct btrfs_fs_devices * fs_devices)6833 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
6834 				      const struct btrfs_fs_devices *fs_devices)
6835 {
6836 	if (args->fsid == NULL)
6837 		return true;
6838 	if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0)
6839 		return true;
6840 	return false;
6841 }
6842 
dev_args_match_device(const struct btrfs_dev_lookup_args * args,const struct btrfs_device * device)6843 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
6844 				  const struct btrfs_device *device)
6845 {
6846 	if (args->missing) {
6847 		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
6848 		    !device->bdev)
6849 			return true;
6850 		return false;
6851 	}
6852 
6853 	if (device->devid != args->devid)
6854 		return false;
6855 	if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
6856 		return false;
6857 	return true;
6858 }
6859 
6860 /*
6861  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6862  * return NULL.
6863  *
6864  * If devid and uuid are both specified, the match must be exact, otherwise
6865  * only devid is used.
6866  */
btrfs_find_device(const struct btrfs_fs_devices * fs_devices,const struct btrfs_dev_lookup_args * args)6867 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
6868 				       const struct btrfs_dev_lookup_args *args)
6869 {
6870 	struct btrfs_device *device;
6871 	struct btrfs_fs_devices *seed_devs;
6872 
6873 	if (dev_args_match_fs_devices(args, fs_devices)) {
6874 		list_for_each_entry(device, &fs_devices->devices, dev_list) {
6875 			if (dev_args_match_device(args, device))
6876 				return device;
6877 		}
6878 	}
6879 
6880 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6881 		if (!dev_args_match_fs_devices(args, seed_devs))
6882 			continue;
6883 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
6884 			if (dev_args_match_device(args, device))
6885 				return device;
6886 		}
6887 	}
6888 
6889 	return NULL;
6890 }
6891 
add_missing_dev(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)6892 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6893 					    u64 devid, u8 *dev_uuid)
6894 {
6895 	struct btrfs_device *device;
6896 	unsigned int nofs_flag;
6897 
6898 	/*
6899 	 * We call this under the chunk_mutex, so we want to use NOFS for this
6900 	 * allocation, however we don't want to change btrfs_alloc_device() to
6901 	 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6902 	 * places.
6903 	 */
6904 
6905 	nofs_flag = memalloc_nofs_save();
6906 	device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL);
6907 	memalloc_nofs_restore(nofs_flag);
6908 	if (IS_ERR(device))
6909 		return device;
6910 
6911 	list_add(&device->dev_list, &fs_devices->devices);
6912 	device->fs_devices = fs_devices;
6913 	fs_devices->num_devices++;
6914 
6915 	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6916 	fs_devices->missing_devices++;
6917 
6918 	return device;
6919 }
6920 
6921 /*
6922  * Allocate new device struct, set up devid and UUID.
6923  *
6924  * @fs_info:	used only for generating a new devid, can be NULL if
6925  *		devid is provided (i.e. @devid != NULL).
6926  * @devid:	a pointer to devid for this device.  If NULL a new devid
6927  *		is generated.
6928  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6929  *		is generated.
6930  * @path:	a pointer to device path if available, NULL otherwise.
6931  *
6932  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6933  * on error.  Returned struct is not linked onto any lists and must be
6934  * destroyed with btrfs_free_device.
6935  */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid,const char * path)6936 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6937 					const u64 *devid, const u8 *uuid,
6938 					const char *path)
6939 {
6940 	struct btrfs_device *dev;
6941 	u64 tmp;
6942 
6943 	if (WARN_ON(!devid && !fs_info))
6944 		return ERR_PTR(-EINVAL);
6945 
6946 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6947 	if (!dev)
6948 		return ERR_PTR(-ENOMEM);
6949 
6950 	INIT_LIST_HEAD(&dev->dev_list);
6951 	INIT_LIST_HEAD(&dev->dev_alloc_list);
6952 	INIT_LIST_HEAD(&dev->post_commit_list);
6953 
6954 	atomic_set(&dev->dev_stats_ccnt, 0);
6955 	btrfs_device_data_ordered_init(dev);
6956 	extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE);
6957 
6958 	if (devid)
6959 		tmp = *devid;
6960 	else {
6961 		int ret;
6962 
6963 		ret = find_next_devid(fs_info, &tmp);
6964 		if (ret) {
6965 			btrfs_free_device(dev);
6966 			return ERR_PTR(ret);
6967 		}
6968 	}
6969 	dev->devid = tmp;
6970 
6971 	if (uuid)
6972 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6973 	else
6974 		generate_random_uuid(dev->uuid);
6975 
6976 	if (path) {
6977 		struct rcu_string *name;
6978 
6979 		name = rcu_string_strdup(path, GFP_KERNEL);
6980 		if (!name) {
6981 			btrfs_free_device(dev);
6982 			return ERR_PTR(-ENOMEM);
6983 		}
6984 		rcu_assign_pointer(dev->name, name);
6985 	}
6986 
6987 	return dev;
6988 }
6989 
btrfs_report_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,bool error)6990 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6991 					u64 devid, u8 *uuid, bool error)
6992 {
6993 	if (error)
6994 		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6995 			      devid, uuid);
6996 	else
6997 		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6998 			      devid, uuid);
6999 }
7000 
btrfs_calc_stripe_length(const struct btrfs_chunk_map * map)7001 u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map)
7002 {
7003 	const int data_stripes = calc_data_stripes(map->type, map->num_stripes);
7004 
7005 	return div_u64(map->chunk_len, data_stripes);
7006 }
7007 
7008 #if BITS_PER_LONG == 32
7009 /*
7010  * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
7011  * can't be accessed on 32bit systems.
7012  *
7013  * This function do mount time check to reject the fs if it already has
7014  * metadata chunk beyond that limit.
7015  */
check_32bit_meta_chunk(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 type)7016 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
7017 				  u64 logical, u64 length, u64 type)
7018 {
7019 	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
7020 		return 0;
7021 
7022 	if (logical + length < MAX_LFS_FILESIZE)
7023 		return 0;
7024 
7025 	btrfs_err_32bit_limit(fs_info);
7026 	return -EOVERFLOW;
7027 }
7028 
7029 /*
7030  * This is to give early warning for any metadata chunk reaching
7031  * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
7032  * Although we can still access the metadata, it's not going to be possible
7033  * once the limit is reached.
7034  */
warn_32bit_meta_chunk(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 type)7035 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
7036 				  u64 logical, u64 length, u64 type)
7037 {
7038 	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
7039 		return;
7040 
7041 	if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
7042 		return;
7043 
7044 	btrfs_warn_32bit_limit(fs_info);
7045 }
7046 #endif
7047 
handle_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid)7048 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info,
7049 						  u64 devid, u8 *uuid)
7050 {
7051 	struct btrfs_device *dev;
7052 
7053 	if (!btrfs_test_opt(fs_info, DEGRADED)) {
7054 		btrfs_report_missing_device(fs_info, devid, uuid, true);
7055 		return ERR_PTR(-ENOENT);
7056 	}
7057 
7058 	dev = add_missing_dev(fs_info->fs_devices, devid, uuid);
7059 	if (IS_ERR(dev)) {
7060 		btrfs_err(fs_info, "failed to init missing device %llu: %ld",
7061 			  devid, PTR_ERR(dev));
7062 		return dev;
7063 	}
7064 	btrfs_report_missing_device(fs_info, devid, uuid, false);
7065 
7066 	return dev;
7067 }
7068 
read_one_chunk(struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)7069 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
7070 			  struct btrfs_chunk *chunk)
7071 {
7072 	BTRFS_DEV_LOOKUP_ARGS(args);
7073 	struct btrfs_fs_info *fs_info = leaf->fs_info;
7074 	struct btrfs_chunk_map *map;
7075 	u64 logical;
7076 	u64 length;
7077 	u64 devid;
7078 	u64 type;
7079 	u8 uuid[BTRFS_UUID_SIZE];
7080 	int index;
7081 	int num_stripes;
7082 	int ret;
7083 	int i;
7084 
7085 	logical = key->offset;
7086 	length = btrfs_chunk_length(leaf, chunk);
7087 	type = btrfs_chunk_type(leaf, chunk);
7088 	index = btrfs_bg_flags_to_raid_index(type);
7089 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
7090 
7091 #if BITS_PER_LONG == 32
7092 	ret = check_32bit_meta_chunk(fs_info, logical, length, type);
7093 	if (ret < 0)
7094 		return ret;
7095 	warn_32bit_meta_chunk(fs_info, logical, length, type);
7096 #endif
7097 
7098 	map = btrfs_find_chunk_map(fs_info, logical, 1);
7099 
7100 	/* already mapped? */
7101 	if (map && map->start <= logical && map->start + map->chunk_len > logical) {
7102 		btrfs_free_chunk_map(map);
7103 		return 0;
7104 	} else if (map) {
7105 		btrfs_free_chunk_map(map);
7106 	}
7107 
7108 	map = btrfs_alloc_chunk_map(num_stripes, GFP_NOFS);
7109 	if (!map)
7110 		return -ENOMEM;
7111 
7112 	map->start = logical;
7113 	map->chunk_len = length;
7114 	map->num_stripes = num_stripes;
7115 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
7116 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
7117 	map->type = type;
7118 	/*
7119 	 * We can't use the sub_stripes value, as for profiles other than
7120 	 * RAID10, they may have 0 as sub_stripes for filesystems created by
7121 	 * older mkfs (<v5.4).
7122 	 * In that case, it can cause divide-by-zero errors later.
7123 	 * Since currently sub_stripes is fixed for each profile, let's
7124 	 * use the trusted value instead.
7125 	 */
7126 	map->sub_stripes = btrfs_raid_array[index].sub_stripes;
7127 	map->verified_stripes = 0;
7128 	map->stripe_size = btrfs_calc_stripe_length(map);
7129 	for (i = 0; i < num_stripes; i++) {
7130 		map->stripes[i].physical =
7131 			btrfs_stripe_offset_nr(leaf, chunk, i);
7132 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
7133 		args.devid = devid;
7134 		read_extent_buffer(leaf, uuid, (unsigned long)
7135 				   btrfs_stripe_dev_uuid_nr(chunk, i),
7136 				   BTRFS_UUID_SIZE);
7137 		args.uuid = uuid;
7138 		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
7139 		if (!map->stripes[i].dev) {
7140 			map->stripes[i].dev = handle_missing_device(fs_info,
7141 								    devid, uuid);
7142 			if (IS_ERR(map->stripes[i].dev)) {
7143 				ret = PTR_ERR(map->stripes[i].dev);
7144 				btrfs_free_chunk_map(map);
7145 				return ret;
7146 			}
7147 		}
7148 
7149 		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7150 				&(map->stripes[i].dev->dev_state));
7151 	}
7152 
7153 	ret = btrfs_add_chunk_map(fs_info, map);
7154 	if (ret < 0) {
7155 		btrfs_err(fs_info,
7156 			  "failed to add chunk map, start=%llu len=%llu: %d",
7157 			  map->start, map->chunk_len, ret);
7158 		btrfs_free_chunk_map(map);
7159 	}
7160 
7161 	return ret;
7162 }
7163 
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)7164 static void fill_device_from_item(struct extent_buffer *leaf,
7165 				 struct btrfs_dev_item *dev_item,
7166 				 struct btrfs_device *device)
7167 {
7168 	unsigned long ptr;
7169 
7170 	device->devid = btrfs_device_id(leaf, dev_item);
7171 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7172 	device->total_bytes = device->disk_total_bytes;
7173 	device->commit_total_bytes = device->disk_total_bytes;
7174 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7175 	device->commit_bytes_used = device->bytes_used;
7176 	device->type = btrfs_device_type(leaf, dev_item);
7177 	device->io_align = btrfs_device_io_align(leaf, dev_item);
7178 	device->io_width = btrfs_device_io_width(leaf, dev_item);
7179 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7180 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7181 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7182 
7183 	ptr = btrfs_device_uuid(dev_item);
7184 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7185 }
7186 
open_seed_devices(struct btrfs_fs_info * fs_info,u8 * fsid)7187 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7188 						  u8 *fsid)
7189 {
7190 	struct btrfs_fs_devices *fs_devices;
7191 	int ret;
7192 
7193 	lockdep_assert_held(&uuid_mutex);
7194 	ASSERT(fsid);
7195 
7196 	/* This will match only for multi-device seed fs */
7197 	list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7198 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7199 			return fs_devices;
7200 
7201 
7202 	fs_devices = find_fsid(fsid, NULL);
7203 	if (!fs_devices) {
7204 		if (!btrfs_test_opt(fs_info, DEGRADED)) {
7205 			btrfs_err(fs_info,
7206 		"failed to find fsid %pU when attempting to open seed devices",
7207 				  fsid);
7208 			return ERR_PTR(-ENOENT);
7209 		}
7210 
7211 		fs_devices = alloc_fs_devices(fsid);
7212 		if (IS_ERR(fs_devices))
7213 			return fs_devices;
7214 
7215 		fs_devices->seeding = true;
7216 		fs_devices->opened = 1;
7217 		return fs_devices;
7218 	}
7219 
7220 	/*
7221 	 * Upon first call for a seed fs fsid, just create a private copy of the
7222 	 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7223 	 */
7224 	fs_devices = clone_fs_devices(fs_devices);
7225 	if (IS_ERR(fs_devices))
7226 		return fs_devices;
7227 
7228 	ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder);
7229 	if (ret) {
7230 		free_fs_devices(fs_devices);
7231 		return ERR_PTR(ret);
7232 	}
7233 
7234 	if (!fs_devices->seeding) {
7235 		close_fs_devices(fs_devices);
7236 		free_fs_devices(fs_devices);
7237 		return ERR_PTR(-EINVAL);
7238 	}
7239 
7240 	list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7241 
7242 	return fs_devices;
7243 }
7244 
read_one_dev(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)7245 static int read_one_dev(struct extent_buffer *leaf,
7246 			struct btrfs_dev_item *dev_item)
7247 {
7248 	BTRFS_DEV_LOOKUP_ARGS(args);
7249 	struct btrfs_fs_info *fs_info = leaf->fs_info;
7250 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7251 	struct btrfs_device *device;
7252 	u64 devid;
7253 	int ret;
7254 	u8 fs_uuid[BTRFS_FSID_SIZE];
7255 	u8 dev_uuid[BTRFS_UUID_SIZE];
7256 
7257 	devid = btrfs_device_id(leaf, dev_item);
7258 	args.devid = devid;
7259 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7260 			   BTRFS_UUID_SIZE);
7261 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7262 			   BTRFS_FSID_SIZE);
7263 	args.uuid = dev_uuid;
7264 	args.fsid = fs_uuid;
7265 
7266 	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7267 		fs_devices = open_seed_devices(fs_info, fs_uuid);
7268 		if (IS_ERR(fs_devices))
7269 			return PTR_ERR(fs_devices);
7270 	}
7271 
7272 	device = btrfs_find_device(fs_info->fs_devices, &args);
7273 	if (!device) {
7274 		if (!btrfs_test_opt(fs_info, DEGRADED)) {
7275 			btrfs_report_missing_device(fs_info, devid,
7276 							dev_uuid, true);
7277 			return -ENOENT;
7278 		}
7279 
7280 		device = add_missing_dev(fs_devices, devid, dev_uuid);
7281 		if (IS_ERR(device)) {
7282 			btrfs_err(fs_info,
7283 				"failed to add missing dev %llu: %ld",
7284 				devid, PTR_ERR(device));
7285 			return PTR_ERR(device);
7286 		}
7287 		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7288 	} else {
7289 		if (!device->bdev) {
7290 			if (!btrfs_test_opt(fs_info, DEGRADED)) {
7291 				btrfs_report_missing_device(fs_info,
7292 						devid, dev_uuid, true);
7293 				return -ENOENT;
7294 			}
7295 			btrfs_report_missing_device(fs_info, devid,
7296 							dev_uuid, false);
7297 		}
7298 
7299 		if (!device->bdev &&
7300 		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7301 			/*
7302 			 * this happens when a device that was properly setup
7303 			 * in the device info lists suddenly goes bad.
7304 			 * device->bdev is NULL, and so we have to set
7305 			 * device->missing to one here
7306 			 */
7307 			device->fs_devices->missing_devices++;
7308 			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7309 		}
7310 
7311 		/* Move the device to its own fs_devices */
7312 		if (device->fs_devices != fs_devices) {
7313 			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7314 							&device->dev_state));
7315 
7316 			list_move(&device->dev_list, &fs_devices->devices);
7317 			device->fs_devices->num_devices--;
7318 			fs_devices->num_devices++;
7319 
7320 			device->fs_devices->missing_devices--;
7321 			fs_devices->missing_devices++;
7322 
7323 			device->fs_devices = fs_devices;
7324 		}
7325 	}
7326 
7327 	if (device->fs_devices != fs_info->fs_devices) {
7328 		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7329 		if (device->generation !=
7330 		    btrfs_device_generation(leaf, dev_item))
7331 			return -EINVAL;
7332 	}
7333 
7334 	fill_device_from_item(leaf, dev_item, device);
7335 	if (device->bdev) {
7336 		u64 max_total_bytes = bdev_nr_bytes(device->bdev);
7337 
7338 		if (device->total_bytes > max_total_bytes) {
7339 			btrfs_err(fs_info,
7340 			"device total_bytes should be at most %llu but found %llu",
7341 				  max_total_bytes, device->total_bytes);
7342 			return -EINVAL;
7343 		}
7344 	}
7345 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7346 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7347 	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7348 		device->fs_devices->total_rw_bytes += device->total_bytes;
7349 		atomic64_add(device->total_bytes - device->bytes_used,
7350 				&fs_info->free_chunk_space);
7351 	}
7352 	ret = 0;
7353 	return ret;
7354 }
7355 
btrfs_read_sys_array(struct btrfs_fs_info * fs_info)7356 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7357 {
7358 	struct btrfs_super_block *super_copy = fs_info->super_copy;
7359 	struct extent_buffer *sb;
7360 	u8 *array_ptr;
7361 	unsigned long sb_array_offset;
7362 	int ret = 0;
7363 	u32 array_size;
7364 	u32 cur_offset;
7365 	struct btrfs_key key;
7366 
7367 	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7368 
7369 	/*
7370 	 * We allocated a dummy extent, just to use extent buffer accessors.
7371 	 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but
7372 	 * that's fine, we will not go beyond system chunk array anyway.
7373 	 */
7374 	sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET);
7375 	if (!sb)
7376 		return -ENOMEM;
7377 	set_extent_buffer_uptodate(sb);
7378 
7379 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7380 	array_size = btrfs_super_sys_array_size(super_copy);
7381 
7382 	array_ptr = super_copy->sys_chunk_array;
7383 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7384 	cur_offset = 0;
7385 
7386 	while (cur_offset < array_size) {
7387 		struct btrfs_chunk *chunk;
7388 		struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)array_ptr;
7389 		u32 len = sizeof(*disk_key);
7390 
7391 		/*
7392 		 * The sys_chunk_array has been already verified at super block
7393 		 * read time.  Only do ASSERT()s for basic checks.
7394 		 */
7395 		ASSERT(cur_offset + len <= array_size);
7396 
7397 		btrfs_disk_key_to_cpu(&key, disk_key);
7398 
7399 		array_ptr += len;
7400 		sb_array_offset += len;
7401 		cur_offset += len;
7402 
7403 		ASSERT(key.type == BTRFS_CHUNK_ITEM_KEY);
7404 
7405 		chunk = (struct btrfs_chunk *)sb_array_offset;
7406 		ASSERT(btrfs_chunk_type(sb, chunk) & BTRFS_BLOCK_GROUP_SYSTEM);
7407 
7408 		len = btrfs_chunk_item_size(btrfs_chunk_num_stripes(sb, chunk));
7409 
7410 		ASSERT(cur_offset + len <= array_size);
7411 
7412 		ret = read_one_chunk(&key, sb, chunk);
7413 		if (ret)
7414 			break;
7415 
7416 		array_ptr += len;
7417 		sb_array_offset += len;
7418 		cur_offset += len;
7419 	}
7420 	clear_extent_buffer_uptodate(sb);
7421 	free_extent_buffer_stale(sb);
7422 	return ret;
7423 }
7424 
7425 /*
7426  * Check if all chunks in the fs are OK for read-write degraded mount
7427  *
7428  * If the @failing_dev is specified, it's accounted as missing.
7429  *
7430  * Return true if all chunks meet the minimal RW mount requirements.
7431  * Return false if any chunk doesn't meet the minimal RW mount requirements.
7432  */
btrfs_check_rw_degradable(struct btrfs_fs_info * fs_info,struct btrfs_device * failing_dev)7433 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7434 					struct btrfs_device *failing_dev)
7435 {
7436 	struct btrfs_chunk_map *map;
7437 	u64 next_start;
7438 	bool ret = true;
7439 
7440 	map = btrfs_find_chunk_map(fs_info, 0, U64_MAX);
7441 	/* No chunk at all? Return false anyway */
7442 	if (!map) {
7443 		ret = false;
7444 		goto out;
7445 	}
7446 	while (map) {
7447 		int missing = 0;
7448 		int max_tolerated;
7449 		int i;
7450 
7451 		max_tolerated =
7452 			btrfs_get_num_tolerated_disk_barrier_failures(
7453 					map->type);
7454 		for (i = 0; i < map->num_stripes; i++) {
7455 			struct btrfs_device *dev = map->stripes[i].dev;
7456 
7457 			if (!dev || !dev->bdev ||
7458 			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7459 			    dev->last_flush_error)
7460 				missing++;
7461 			else if (failing_dev && failing_dev == dev)
7462 				missing++;
7463 		}
7464 		if (missing > max_tolerated) {
7465 			if (!failing_dev)
7466 				btrfs_warn(fs_info,
7467 	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7468 				   map->start, missing, max_tolerated);
7469 			btrfs_free_chunk_map(map);
7470 			ret = false;
7471 			goto out;
7472 		}
7473 		next_start = map->start + map->chunk_len;
7474 		btrfs_free_chunk_map(map);
7475 
7476 		map = btrfs_find_chunk_map(fs_info, next_start, U64_MAX - next_start);
7477 	}
7478 out:
7479 	return ret;
7480 }
7481 
readahead_tree_node_children(struct extent_buffer * node)7482 static void readahead_tree_node_children(struct extent_buffer *node)
7483 {
7484 	int i;
7485 	const int nr_items = btrfs_header_nritems(node);
7486 
7487 	for (i = 0; i < nr_items; i++)
7488 		btrfs_readahead_node_child(node, i);
7489 }
7490 
btrfs_read_chunk_tree(struct btrfs_fs_info * fs_info)7491 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7492 {
7493 	struct btrfs_root *root = fs_info->chunk_root;
7494 	struct btrfs_path *path;
7495 	struct extent_buffer *leaf;
7496 	struct btrfs_key key;
7497 	struct btrfs_key found_key;
7498 	int ret;
7499 	int slot;
7500 	int iter_ret = 0;
7501 	u64 total_dev = 0;
7502 	u64 last_ra_node = 0;
7503 
7504 	path = btrfs_alloc_path();
7505 	if (!path)
7506 		return -ENOMEM;
7507 
7508 	/*
7509 	 * uuid_mutex is needed only if we are mounting a sprout FS
7510 	 * otherwise we don't need it.
7511 	 */
7512 	mutex_lock(&uuid_mutex);
7513 
7514 	/*
7515 	 * It is possible for mount and umount to race in such a way that
7516 	 * we execute this code path, but open_fs_devices failed to clear
7517 	 * total_rw_bytes. We certainly want it cleared before reading the
7518 	 * device items, so clear it here.
7519 	 */
7520 	fs_info->fs_devices->total_rw_bytes = 0;
7521 
7522 	/*
7523 	 * Lockdep complains about possible circular locking dependency between
7524 	 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7525 	 * used for freeze procection of a fs (struct super_block.s_writers),
7526 	 * which we take when starting a transaction, and extent buffers of the
7527 	 * chunk tree if we call read_one_dev() while holding a lock on an
7528 	 * extent buffer of the chunk tree. Since we are mounting the filesystem
7529 	 * and at this point there can't be any concurrent task modifying the
7530 	 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7531 	 */
7532 	ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7533 	path->skip_locking = 1;
7534 
7535 	/*
7536 	 * Read all device items, and then all the chunk items. All
7537 	 * device items are found before any chunk item (their object id
7538 	 * is smaller than the lowest possible object id for a chunk
7539 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7540 	 */
7541 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7542 	key.offset = 0;
7543 	key.type = 0;
7544 	btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
7545 		struct extent_buffer *node = path->nodes[1];
7546 
7547 		leaf = path->nodes[0];
7548 		slot = path->slots[0];
7549 
7550 		if (node) {
7551 			if (last_ra_node != node->start) {
7552 				readahead_tree_node_children(node);
7553 				last_ra_node = node->start;
7554 			}
7555 		}
7556 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7557 			struct btrfs_dev_item *dev_item;
7558 			dev_item = btrfs_item_ptr(leaf, slot,
7559 						  struct btrfs_dev_item);
7560 			ret = read_one_dev(leaf, dev_item);
7561 			if (ret)
7562 				goto error;
7563 			total_dev++;
7564 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7565 			struct btrfs_chunk *chunk;
7566 
7567 			/*
7568 			 * We are only called at mount time, so no need to take
7569 			 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7570 			 * we always lock first fs_info->chunk_mutex before
7571 			 * acquiring any locks on the chunk tree. This is a
7572 			 * requirement for chunk allocation, see the comment on
7573 			 * top of btrfs_chunk_alloc() for details.
7574 			 */
7575 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7576 			ret = read_one_chunk(&found_key, leaf, chunk);
7577 			if (ret)
7578 				goto error;
7579 		}
7580 	}
7581 	/* Catch error found during iteration */
7582 	if (iter_ret < 0) {
7583 		ret = iter_ret;
7584 		goto error;
7585 	}
7586 
7587 	/*
7588 	 * After loading chunk tree, we've got all device information,
7589 	 * do another round of validation checks.
7590 	 */
7591 	if (total_dev != fs_info->fs_devices->total_devices) {
7592 		btrfs_warn(fs_info,
7593 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7594 			  btrfs_super_num_devices(fs_info->super_copy),
7595 			  total_dev);
7596 		fs_info->fs_devices->total_devices = total_dev;
7597 		btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7598 	}
7599 	if (btrfs_super_total_bytes(fs_info->super_copy) <
7600 	    fs_info->fs_devices->total_rw_bytes) {
7601 		btrfs_err(fs_info,
7602 	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7603 			  btrfs_super_total_bytes(fs_info->super_copy),
7604 			  fs_info->fs_devices->total_rw_bytes);
7605 		ret = -EINVAL;
7606 		goto error;
7607 	}
7608 	ret = 0;
7609 error:
7610 	mutex_unlock(&uuid_mutex);
7611 
7612 	btrfs_free_path(path);
7613 	return ret;
7614 }
7615 
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)7616 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7617 {
7618 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7619 	struct btrfs_device *device;
7620 	int ret = 0;
7621 
7622 	mutex_lock(&fs_devices->device_list_mutex);
7623 	list_for_each_entry(device, &fs_devices->devices, dev_list)
7624 		device->fs_info = fs_info;
7625 
7626 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7627 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7628 			device->fs_info = fs_info;
7629 			ret = btrfs_get_dev_zone_info(device, false);
7630 			if (ret)
7631 				break;
7632 		}
7633 
7634 		seed_devs->fs_info = fs_info;
7635 	}
7636 	mutex_unlock(&fs_devices->device_list_mutex);
7637 
7638 	return ret;
7639 }
7640 
btrfs_dev_stats_value(const struct extent_buffer * eb,const struct btrfs_dev_stats_item * ptr,int index)7641 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7642 				 const struct btrfs_dev_stats_item *ptr,
7643 				 int index)
7644 {
7645 	u64 val;
7646 
7647 	read_extent_buffer(eb, &val,
7648 			   offsetof(struct btrfs_dev_stats_item, values) +
7649 			    ((unsigned long)ptr) + (index * sizeof(u64)),
7650 			   sizeof(val));
7651 	return val;
7652 }
7653 
btrfs_set_dev_stats_value(struct extent_buffer * eb,struct btrfs_dev_stats_item * ptr,int index,u64 val)7654 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7655 				      struct btrfs_dev_stats_item *ptr,
7656 				      int index, u64 val)
7657 {
7658 	write_extent_buffer(eb, &val,
7659 			    offsetof(struct btrfs_dev_stats_item, values) +
7660 			     ((unsigned long)ptr) + (index * sizeof(u64)),
7661 			    sizeof(val));
7662 }
7663 
btrfs_device_init_dev_stats(struct btrfs_device * device,struct btrfs_path * path)7664 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7665 				       struct btrfs_path *path)
7666 {
7667 	struct btrfs_dev_stats_item *ptr;
7668 	struct extent_buffer *eb;
7669 	struct btrfs_key key;
7670 	int item_size;
7671 	int i, ret, slot;
7672 
7673 	if (!device->fs_info->dev_root)
7674 		return 0;
7675 
7676 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7677 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7678 	key.offset = device->devid;
7679 	ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7680 	if (ret) {
7681 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7682 			btrfs_dev_stat_set(device, i, 0);
7683 		device->dev_stats_valid = 1;
7684 		btrfs_release_path(path);
7685 		return ret < 0 ? ret : 0;
7686 	}
7687 	slot = path->slots[0];
7688 	eb = path->nodes[0];
7689 	item_size = btrfs_item_size(eb, slot);
7690 
7691 	ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7692 
7693 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7694 		if (item_size >= (1 + i) * sizeof(__le64))
7695 			btrfs_dev_stat_set(device, i,
7696 					   btrfs_dev_stats_value(eb, ptr, i));
7697 		else
7698 			btrfs_dev_stat_set(device, i, 0);
7699 	}
7700 
7701 	device->dev_stats_valid = 1;
7702 	btrfs_dev_stat_print_on_load(device);
7703 	btrfs_release_path(path);
7704 
7705 	return 0;
7706 }
7707 
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)7708 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7709 {
7710 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7711 	struct btrfs_device *device;
7712 	struct btrfs_path *path = NULL;
7713 	int ret = 0;
7714 
7715 	path = btrfs_alloc_path();
7716 	if (!path)
7717 		return -ENOMEM;
7718 
7719 	mutex_lock(&fs_devices->device_list_mutex);
7720 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7721 		ret = btrfs_device_init_dev_stats(device, path);
7722 		if (ret)
7723 			goto out;
7724 	}
7725 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7726 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7727 			ret = btrfs_device_init_dev_stats(device, path);
7728 			if (ret)
7729 				goto out;
7730 		}
7731 	}
7732 out:
7733 	mutex_unlock(&fs_devices->device_list_mutex);
7734 
7735 	btrfs_free_path(path);
7736 	return ret;
7737 }
7738 
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)7739 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7740 				struct btrfs_device *device)
7741 {
7742 	struct btrfs_fs_info *fs_info = trans->fs_info;
7743 	struct btrfs_root *dev_root = fs_info->dev_root;
7744 	struct btrfs_path *path;
7745 	struct btrfs_key key;
7746 	struct extent_buffer *eb;
7747 	struct btrfs_dev_stats_item *ptr;
7748 	int ret;
7749 	int i;
7750 
7751 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7752 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7753 	key.offset = device->devid;
7754 
7755 	path = btrfs_alloc_path();
7756 	if (!path)
7757 		return -ENOMEM;
7758 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7759 	if (ret < 0) {
7760 		btrfs_warn_in_rcu(fs_info,
7761 			"error %d while searching for dev_stats item for device %s",
7762 				  ret, btrfs_dev_name(device));
7763 		goto out;
7764 	}
7765 
7766 	if (ret == 0 &&
7767 	    btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7768 		/* need to delete old one and insert a new one */
7769 		ret = btrfs_del_item(trans, dev_root, path);
7770 		if (ret != 0) {
7771 			btrfs_warn_in_rcu(fs_info,
7772 				"delete too small dev_stats item for device %s failed %d",
7773 					  btrfs_dev_name(device), ret);
7774 			goto out;
7775 		}
7776 		ret = 1;
7777 	}
7778 
7779 	if (ret == 1) {
7780 		/* need to insert a new item */
7781 		btrfs_release_path(path);
7782 		ret = btrfs_insert_empty_item(trans, dev_root, path,
7783 					      &key, sizeof(*ptr));
7784 		if (ret < 0) {
7785 			btrfs_warn_in_rcu(fs_info,
7786 				"insert dev_stats item for device %s failed %d",
7787 				btrfs_dev_name(device), ret);
7788 			goto out;
7789 		}
7790 	}
7791 
7792 	eb = path->nodes[0];
7793 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7794 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7795 		btrfs_set_dev_stats_value(eb, ptr, i,
7796 					  btrfs_dev_stat_read(device, i));
7797 out:
7798 	btrfs_free_path(path);
7799 	return ret;
7800 }
7801 
7802 /*
7803  * called from commit_transaction. Writes all changed device stats to disk.
7804  */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans)7805 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7806 {
7807 	struct btrfs_fs_info *fs_info = trans->fs_info;
7808 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7809 	struct btrfs_device *device;
7810 	int stats_cnt;
7811 	int ret = 0;
7812 
7813 	mutex_lock(&fs_devices->device_list_mutex);
7814 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7815 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7816 		if (!device->dev_stats_valid || stats_cnt == 0)
7817 			continue;
7818 
7819 
7820 		/*
7821 		 * There is a LOAD-LOAD control dependency between the value of
7822 		 * dev_stats_ccnt and updating the on-disk values which requires
7823 		 * reading the in-memory counters. Such control dependencies
7824 		 * require explicit read memory barriers.
7825 		 *
7826 		 * This memory barriers pairs with smp_mb__before_atomic in
7827 		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7828 		 * barrier implied by atomic_xchg in
7829 		 * btrfs_dev_stats_read_and_reset
7830 		 */
7831 		smp_rmb();
7832 
7833 		ret = update_dev_stat_item(trans, device);
7834 		if (!ret)
7835 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7836 	}
7837 	mutex_unlock(&fs_devices->device_list_mutex);
7838 
7839 	return ret;
7840 }
7841 
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)7842 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7843 {
7844 	btrfs_dev_stat_inc(dev, index);
7845 
7846 	if (!dev->dev_stats_valid)
7847 		return;
7848 	btrfs_err_rl_in_rcu(dev->fs_info,
7849 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7850 			   btrfs_dev_name(dev),
7851 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7852 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7853 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7854 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7855 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7856 }
7857 
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)7858 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7859 {
7860 	int i;
7861 
7862 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7863 		if (btrfs_dev_stat_read(dev, i) != 0)
7864 			break;
7865 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
7866 		return; /* all values == 0, suppress message */
7867 
7868 	btrfs_info_in_rcu(dev->fs_info,
7869 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7870 	       btrfs_dev_name(dev),
7871 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7872 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7873 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7874 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7875 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7876 }
7877 
btrfs_get_dev_stats(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_get_dev_stats * stats)7878 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7879 			struct btrfs_ioctl_get_dev_stats *stats)
7880 {
7881 	BTRFS_DEV_LOOKUP_ARGS(args);
7882 	struct btrfs_device *dev;
7883 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7884 	int i;
7885 
7886 	mutex_lock(&fs_devices->device_list_mutex);
7887 	args.devid = stats->devid;
7888 	dev = btrfs_find_device(fs_info->fs_devices, &args);
7889 	mutex_unlock(&fs_devices->device_list_mutex);
7890 
7891 	if (!dev) {
7892 		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7893 		return -ENODEV;
7894 	} else if (!dev->dev_stats_valid) {
7895 		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7896 		return -ENODEV;
7897 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7898 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7899 			if (stats->nr_items > i)
7900 				stats->values[i] =
7901 					btrfs_dev_stat_read_and_reset(dev, i);
7902 			else
7903 				btrfs_dev_stat_set(dev, i, 0);
7904 		}
7905 		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7906 			   current->comm, task_pid_nr(current));
7907 	} else {
7908 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7909 			if (stats->nr_items > i)
7910 				stats->values[i] = btrfs_dev_stat_read(dev, i);
7911 	}
7912 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7913 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7914 	return 0;
7915 }
7916 
7917 /*
7918  * Update the size and bytes used for each device where it changed.  This is
7919  * delayed since we would otherwise get errors while writing out the
7920  * superblocks.
7921  *
7922  * Must be invoked during transaction commit.
7923  */
btrfs_commit_device_sizes(struct btrfs_transaction * trans)7924 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7925 {
7926 	struct btrfs_device *curr, *next;
7927 
7928 	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7929 
7930 	if (list_empty(&trans->dev_update_list))
7931 		return;
7932 
7933 	/*
7934 	 * We don't need the device_list_mutex here.  This list is owned by the
7935 	 * transaction and the transaction must complete before the device is
7936 	 * released.
7937 	 */
7938 	mutex_lock(&trans->fs_info->chunk_mutex);
7939 	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7940 				 post_commit_list) {
7941 		list_del_init(&curr->post_commit_list);
7942 		curr->commit_total_bytes = curr->disk_total_bytes;
7943 		curr->commit_bytes_used = curr->bytes_used;
7944 	}
7945 	mutex_unlock(&trans->fs_info->chunk_mutex);
7946 }
7947 
7948 /*
7949  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7950  */
btrfs_bg_type_to_factor(u64 flags)7951 int btrfs_bg_type_to_factor(u64 flags)
7952 {
7953 	const int index = btrfs_bg_flags_to_raid_index(flags);
7954 
7955 	return btrfs_raid_array[index].ncopies;
7956 }
7957 
7958 
7959 
verify_one_dev_extent(struct btrfs_fs_info * fs_info,u64 chunk_offset,u64 devid,u64 physical_offset,u64 physical_len)7960 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7961 				 u64 chunk_offset, u64 devid,
7962 				 u64 physical_offset, u64 physical_len)
7963 {
7964 	struct btrfs_dev_lookup_args args = { .devid = devid };
7965 	struct btrfs_chunk_map *map;
7966 	struct btrfs_device *dev;
7967 	u64 stripe_len;
7968 	bool found = false;
7969 	int ret = 0;
7970 	int i;
7971 
7972 	map = btrfs_find_chunk_map(fs_info, chunk_offset, 1);
7973 	if (!map) {
7974 		btrfs_err(fs_info,
7975 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7976 			  physical_offset, devid);
7977 		ret = -EUCLEAN;
7978 		goto out;
7979 	}
7980 
7981 	stripe_len = btrfs_calc_stripe_length(map);
7982 	if (physical_len != stripe_len) {
7983 		btrfs_err(fs_info,
7984 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7985 			  physical_offset, devid, map->start, physical_len,
7986 			  stripe_len);
7987 		ret = -EUCLEAN;
7988 		goto out;
7989 	}
7990 
7991 	/*
7992 	 * Very old mkfs.btrfs (before v4.1) will not respect the reserved
7993 	 * space. Although kernel can handle it without problem, better to warn
7994 	 * the users.
7995 	 */
7996 	if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED)
7997 		btrfs_warn(fs_info,
7998 		"devid %llu physical %llu len %llu inside the reserved space",
7999 			   devid, physical_offset, physical_len);
8000 
8001 	for (i = 0; i < map->num_stripes; i++) {
8002 		if (map->stripes[i].dev->devid == devid &&
8003 		    map->stripes[i].physical == physical_offset) {
8004 			found = true;
8005 			if (map->verified_stripes >= map->num_stripes) {
8006 				btrfs_err(fs_info,
8007 				"too many dev extents for chunk %llu found",
8008 					  map->start);
8009 				ret = -EUCLEAN;
8010 				goto out;
8011 			}
8012 			map->verified_stripes++;
8013 			break;
8014 		}
8015 	}
8016 	if (!found) {
8017 		btrfs_err(fs_info,
8018 	"dev extent physical offset %llu devid %llu has no corresponding chunk",
8019 			physical_offset, devid);
8020 		ret = -EUCLEAN;
8021 	}
8022 
8023 	/* Make sure no dev extent is beyond device boundary */
8024 	dev = btrfs_find_device(fs_info->fs_devices, &args);
8025 	if (!dev) {
8026 		btrfs_err(fs_info, "failed to find devid %llu", devid);
8027 		ret = -EUCLEAN;
8028 		goto out;
8029 	}
8030 
8031 	if (physical_offset + physical_len > dev->disk_total_bytes) {
8032 		btrfs_err(fs_info,
8033 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
8034 			  devid, physical_offset, physical_len,
8035 			  dev->disk_total_bytes);
8036 		ret = -EUCLEAN;
8037 		goto out;
8038 	}
8039 
8040 	if (dev->zone_info) {
8041 		u64 zone_size = dev->zone_info->zone_size;
8042 
8043 		if (!IS_ALIGNED(physical_offset, zone_size) ||
8044 		    !IS_ALIGNED(physical_len, zone_size)) {
8045 			btrfs_err(fs_info,
8046 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
8047 				  devid, physical_offset, physical_len);
8048 			ret = -EUCLEAN;
8049 			goto out;
8050 		}
8051 	}
8052 
8053 out:
8054 	btrfs_free_chunk_map(map);
8055 	return ret;
8056 }
8057 
verify_chunk_dev_extent_mapping(struct btrfs_fs_info * fs_info)8058 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
8059 {
8060 	struct rb_node *node;
8061 	int ret = 0;
8062 
8063 	read_lock(&fs_info->mapping_tree_lock);
8064 	for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) {
8065 		struct btrfs_chunk_map *map;
8066 
8067 		map = rb_entry(node, struct btrfs_chunk_map, rb_node);
8068 		if (map->num_stripes != map->verified_stripes) {
8069 			btrfs_err(fs_info,
8070 			"chunk %llu has missing dev extent, have %d expect %d",
8071 				  map->start, map->verified_stripes, map->num_stripes);
8072 			ret = -EUCLEAN;
8073 			goto out;
8074 		}
8075 	}
8076 out:
8077 	read_unlock(&fs_info->mapping_tree_lock);
8078 	return ret;
8079 }
8080 
8081 /*
8082  * Ensure that all dev extents are mapped to correct chunk, otherwise
8083  * later chunk allocation/free would cause unexpected behavior.
8084  *
8085  * NOTE: This will iterate through the whole device tree, which should be of
8086  * the same size level as the chunk tree.  This slightly increases mount time.
8087  */
btrfs_verify_dev_extents(struct btrfs_fs_info * fs_info)8088 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8089 {
8090 	struct btrfs_path *path;
8091 	struct btrfs_root *root = fs_info->dev_root;
8092 	struct btrfs_key key;
8093 	u64 prev_devid = 0;
8094 	u64 prev_dev_ext_end = 0;
8095 	int ret = 0;
8096 
8097 	/*
8098 	 * We don't have a dev_root because we mounted with ignorebadroots and
8099 	 * failed to load the root, so we want to skip the verification in this
8100 	 * case for sure.
8101 	 *
8102 	 * However if the dev root is fine, but the tree itself is corrupted
8103 	 * we'd still fail to mount.  This verification is only to make sure
8104 	 * writes can happen safely, so instead just bypass this check
8105 	 * completely in the case of IGNOREBADROOTS.
8106 	 */
8107 	if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8108 		return 0;
8109 
8110 	key.objectid = 1;
8111 	key.type = BTRFS_DEV_EXTENT_KEY;
8112 	key.offset = 0;
8113 
8114 	path = btrfs_alloc_path();
8115 	if (!path)
8116 		return -ENOMEM;
8117 
8118 	path->reada = READA_FORWARD;
8119 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8120 	if (ret < 0)
8121 		goto out;
8122 
8123 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8124 		ret = btrfs_next_leaf(root, path);
8125 		if (ret < 0)
8126 			goto out;
8127 		/* No dev extents at all? Not good */
8128 		if (ret > 0) {
8129 			ret = -EUCLEAN;
8130 			goto out;
8131 		}
8132 	}
8133 	while (1) {
8134 		struct extent_buffer *leaf = path->nodes[0];
8135 		struct btrfs_dev_extent *dext;
8136 		int slot = path->slots[0];
8137 		u64 chunk_offset;
8138 		u64 physical_offset;
8139 		u64 physical_len;
8140 		u64 devid;
8141 
8142 		btrfs_item_key_to_cpu(leaf, &key, slot);
8143 		if (key.type != BTRFS_DEV_EXTENT_KEY)
8144 			break;
8145 		devid = key.objectid;
8146 		physical_offset = key.offset;
8147 
8148 		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8149 		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8150 		physical_len = btrfs_dev_extent_length(leaf, dext);
8151 
8152 		/* Check if this dev extent overlaps with the previous one */
8153 		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8154 			btrfs_err(fs_info,
8155 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8156 				  devid, physical_offset, prev_dev_ext_end);
8157 			ret = -EUCLEAN;
8158 			goto out;
8159 		}
8160 
8161 		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8162 					    physical_offset, physical_len);
8163 		if (ret < 0)
8164 			goto out;
8165 		prev_devid = devid;
8166 		prev_dev_ext_end = physical_offset + physical_len;
8167 
8168 		ret = btrfs_next_item(root, path);
8169 		if (ret < 0)
8170 			goto out;
8171 		if (ret > 0) {
8172 			ret = 0;
8173 			break;
8174 		}
8175 	}
8176 
8177 	/* Ensure all chunks have corresponding dev extents */
8178 	ret = verify_chunk_dev_extent_mapping(fs_info);
8179 out:
8180 	btrfs_free_path(path);
8181 	return ret;
8182 }
8183 
8184 /*
8185  * Check whether the given block group or device is pinned by any inode being
8186  * used as a swapfile.
8187  */
btrfs_pinned_by_swapfile(struct btrfs_fs_info * fs_info,void * ptr)8188 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8189 {
8190 	struct btrfs_swapfile_pin *sp;
8191 	struct rb_node *node;
8192 
8193 	spin_lock(&fs_info->swapfile_pins_lock);
8194 	node = fs_info->swapfile_pins.rb_node;
8195 	while (node) {
8196 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8197 		if (ptr < sp->ptr)
8198 			node = node->rb_left;
8199 		else if (ptr > sp->ptr)
8200 			node = node->rb_right;
8201 		else
8202 			break;
8203 	}
8204 	spin_unlock(&fs_info->swapfile_pins_lock);
8205 	return node != NULL;
8206 }
8207 
relocating_repair_kthread(void * data)8208 static int relocating_repair_kthread(void *data)
8209 {
8210 	struct btrfs_block_group *cache = data;
8211 	struct btrfs_fs_info *fs_info = cache->fs_info;
8212 	u64 target;
8213 	int ret = 0;
8214 
8215 	target = cache->start;
8216 	btrfs_put_block_group(cache);
8217 
8218 	sb_start_write(fs_info->sb);
8219 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8220 		btrfs_info(fs_info,
8221 			   "zoned: skip relocating block group %llu to repair: EBUSY",
8222 			   target);
8223 		sb_end_write(fs_info->sb);
8224 		return -EBUSY;
8225 	}
8226 
8227 	mutex_lock(&fs_info->reclaim_bgs_lock);
8228 
8229 	/* Ensure block group still exists */
8230 	cache = btrfs_lookup_block_group(fs_info, target);
8231 	if (!cache)
8232 		goto out;
8233 
8234 	if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags))
8235 		goto out;
8236 
8237 	ret = btrfs_may_alloc_data_chunk(fs_info, target);
8238 	if (ret < 0)
8239 		goto out;
8240 
8241 	btrfs_info(fs_info,
8242 		   "zoned: relocating block group %llu to repair IO failure",
8243 		   target);
8244 	ret = btrfs_relocate_chunk(fs_info, target);
8245 
8246 out:
8247 	if (cache)
8248 		btrfs_put_block_group(cache);
8249 	mutex_unlock(&fs_info->reclaim_bgs_lock);
8250 	btrfs_exclop_finish(fs_info);
8251 	sb_end_write(fs_info->sb);
8252 
8253 	return ret;
8254 }
8255 
btrfs_repair_one_zone(struct btrfs_fs_info * fs_info,u64 logical)8256 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8257 {
8258 	struct btrfs_block_group *cache;
8259 
8260 	if (!btrfs_is_zoned(fs_info))
8261 		return false;
8262 
8263 	/* Do not attempt to repair in degraded state */
8264 	if (btrfs_test_opt(fs_info, DEGRADED))
8265 		return true;
8266 
8267 	cache = btrfs_lookup_block_group(fs_info, logical);
8268 	if (!cache)
8269 		return true;
8270 
8271 	if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) {
8272 		btrfs_put_block_group(cache);
8273 		return true;
8274 	}
8275 
8276 	kthread_run(relocating_repair_kthread, cache,
8277 		    "btrfs-relocating-repair");
8278 
8279 	return true;
8280 }
8281 
map_raid56_repair_block(struct btrfs_io_context * bioc,struct btrfs_io_stripe * smap,u64 logical)8282 static void map_raid56_repair_block(struct btrfs_io_context *bioc,
8283 				    struct btrfs_io_stripe *smap,
8284 				    u64 logical)
8285 {
8286 	int data_stripes = nr_bioc_data_stripes(bioc);
8287 	int i;
8288 
8289 	for (i = 0; i < data_stripes; i++) {
8290 		u64 stripe_start = bioc->full_stripe_logical +
8291 				   btrfs_stripe_nr_to_offset(i);
8292 
8293 		if (logical >= stripe_start &&
8294 		    logical < stripe_start + BTRFS_STRIPE_LEN)
8295 			break;
8296 	}
8297 	ASSERT(i < data_stripes);
8298 	smap->dev = bioc->stripes[i].dev;
8299 	smap->physical = bioc->stripes[i].physical +
8300 			((logical - bioc->full_stripe_logical) &
8301 			 BTRFS_STRIPE_LEN_MASK);
8302 }
8303 
8304 /*
8305  * Map a repair write into a single device.
8306  *
8307  * A repair write is triggered by read time repair or scrub, which would only
8308  * update the contents of a single device.
8309  * Not update any other mirrors nor go through RMW path.
8310  *
8311  * Callers should ensure:
8312  *
8313  * - Call btrfs_bio_counter_inc_blocked() first
8314  * - The range does not cross stripe boundary
8315  * - Has a valid @mirror_num passed in.
8316  */
btrfs_map_repair_block(struct btrfs_fs_info * fs_info,struct btrfs_io_stripe * smap,u64 logical,u32 length,int mirror_num)8317 int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
8318 			   struct btrfs_io_stripe *smap, u64 logical,
8319 			   u32 length, int mirror_num)
8320 {
8321 	struct btrfs_io_context *bioc = NULL;
8322 	u64 map_length = length;
8323 	int mirror_ret = mirror_num;
8324 	int ret;
8325 
8326 	ASSERT(mirror_num > 0);
8327 
8328 	ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length,
8329 			      &bioc, smap, &mirror_ret);
8330 	if (ret < 0)
8331 		return ret;
8332 
8333 	/* The map range should not cross stripe boundary. */
8334 	ASSERT(map_length >= length);
8335 
8336 	/* Already mapped to single stripe. */
8337 	if (!bioc)
8338 		goto out;
8339 
8340 	/* Map the RAID56 multi-stripe writes to a single one. */
8341 	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
8342 		map_raid56_repair_block(bioc, smap, logical);
8343 		goto out;
8344 	}
8345 
8346 	ASSERT(mirror_num <= bioc->num_stripes);
8347 	smap->dev = bioc->stripes[mirror_num - 1].dev;
8348 	smap->physical = bioc->stripes[mirror_num - 1].physical;
8349 out:
8350 	btrfs_put_bioc(bioc);
8351 	ASSERT(smap->dev);
8352 	return 0;
8353 }
8354