xref: /linux/fs/btrfs/fs.c (revision f3827213abae9291b7525b05e6fd29b1f0536ce6)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "messages.h"
4 #include "fs.h"
5 #include "accessors.h"
6 #include "volumes.h"
7 
8 static const struct btrfs_csums {
9 	u16		size;
10 	const char	name[10];
11 	const char	driver[12];
12 } btrfs_csums[] = {
13 	[BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
14 	[BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
15 	[BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
16 	[BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
17 				     .driver = "blake2b-256" },
18 };
19 
20 /* This exists for btrfs-progs usages. */
btrfs_csum_type_size(u16 type)21 u16 btrfs_csum_type_size(u16 type)
22 {
23 	return btrfs_csums[type].size;
24 }
25 
btrfs_super_csum_size(const struct btrfs_super_block * s)26 int btrfs_super_csum_size(const struct btrfs_super_block *s)
27 {
28 	u16 t = btrfs_super_csum_type(s);
29 
30 	/* csum type is validated at mount time. */
31 	return btrfs_csum_type_size(t);
32 }
33 
btrfs_super_csum_name(u16 csum_type)34 const char *btrfs_super_csum_name(u16 csum_type)
35 {
36 	/* csum type is validated at mount time. */
37 	return btrfs_csums[csum_type].name;
38 }
39 
40 /*
41  * Return driver name if defined, otherwise the name that's also a valid driver
42  * name.
43  */
btrfs_super_csum_driver(u16 csum_type)44 const char *btrfs_super_csum_driver(u16 csum_type)
45 {
46 	/* csum type is validated at mount time */
47 	return btrfs_csums[csum_type].driver[0] ?
48 		btrfs_csums[csum_type].driver :
49 		btrfs_csums[csum_type].name;
50 }
51 
btrfs_get_num_csums(void)52 size_t __attribute_const__ btrfs_get_num_csums(void)
53 {
54 	return ARRAY_SIZE(btrfs_csums);
55 }
56 
57 /*
58  * We support the following block sizes for all systems:
59  *
60  * - 4K
61  *   This is the most common block size. For PAGE SIZE > 4K cases the subpage
62  *   mode is used.
63  *
64  * - PAGE_SIZE
65  *   The straightforward block size to support.
66  *
67  * And extra support for the following block sizes based on the kernel config:
68  *
69  * - MIN_BLOCKSIZE
70  *   This is either 4K (regular builds) or 2K (debug builds)
71  *   This allows testing subpage routines on x86_64.
72  */
btrfs_supported_blocksize(u32 blocksize)73 bool __attribute_const__ btrfs_supported_blocksize(u32 blocksize)
74 {
75 	/* @blocksize should be validated first. */
76 	ASSERT(is_power_of_2(blocksize) && blocksize >= BTRFS_MIN_BLOCKSIZE &&
77 	       blocksize <= BTRFS_MAX_BLOCKSIZE);
78 
79 	if (blocksize == PAGE_SIZE || blocksize == SZ_4K || blocksize == BTRFS_MIN_BLOCKSIZE)
80 		return true;
81 #ifdef CONFIG_BTRFS_EXPERIMENTAL
82 	/*
83 	 * For bs > ps support it's done by specifying a minimal folio order
84 	 * for filemap, thus implying large data folios.
85 	 * For HIGHMEM systems, we can not always access the content of a (large)
86 	 * folio in one go, but go through them page by page.
87 	 *
88 	 * A lot of features don't implement a proper PAGE sized loop for large
89 	 * folios, this includes:
90 	 *
91 	 * - compression
92 	 * - verity
93 	 * - encoded write
94 	 *
95 	 * Considering HIGHMEM is such a pain to deal with and it's going
96 	 * to be deprecated eventually, just reject HIGHMEM && bs > ps cases.
97 	 */
98 	if (IS_ENABLED(CONFIG_HIGHMEM) && blocksize > PAGE_SIZE)
99 		return false;
100 	return true;
101 #endif
102 	return false;
103 }
104 
105 /*
106  * Start exclusive operation @type, return true on success.
107  */
btrfs_exclop_start(struct btrfs_fs_info * fs_info,enum btrfs_exclusive_operation type)108 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
109 			enum btrfs_exclusive_operation type)
110 {
111 	bool ret = false;
112 
113 	spin_lock(&fs_info->super_lock);
114 	if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
115 		fs_info->exclusive_operation = type;
116 		ret = true;
117 	}
118 	spin_unlock(&fs_info->super_lock);
119 
120 	return ret;
121 }
122 
123 /*
124  * Conditionally allow to enter the exclusive operation in case it's compatible
125  * with the running one.  This must be paired with btrfs_exclop_start_unlock()
126  * and btrfs_exclop_finish().
127  *
128  * Compatibility:
129  * - the same type is already running
130  * - when trying to add a device and balance has been paused
131  * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
132  *   must check the condition first that would allow none -> @type
133  */
btrfs_exclop_start_try_lock(struct btrfs_fs_info * fs_info,enum btrfs_exclusive_operation type)134 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
135 				 enum btrfs_exclusive_operation type)
136 {
137 	spin_lock(&fs_info->super_lock);
138 	if (fs_info->exclusive_operation == type ||
139 	    (fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED &&
140 	     type == BTRFS_EXCLOP_DEV_ADD))
141 		return true;
142 
143 	spin_unlock(&fs_info->super_lock);
144 	return false;
145 }
146 
btrfs_exclop_start_unlock(struct btrfs_fs_info * fs_info)147 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
148 {
149 	spin_unlock(&fs_info->super_lock);
150 }
151 
btrfs_exclop_finish(struct btrfs_fs_info * fs_info)152 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
153 {
154 	spin_lock(&fs_info->super_lock);
155 	WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
156 	spin_unlock(&fs_info->super_lock);
157 	sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
158 }
159 
btrfs_exclop_balance(struct btrfs_fs_info * fs_info,enum btrfs_exclusive_operation op)160 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
161 			  enum btrfs_exclusive_operation op)
162 {
163 	switch (op) {
164 	case BTRFS_EXCLOP_BALANCE_PAUSED:
165 		spin_lock(&fs_info->super_lock);
166 		ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
167 		       fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD ||
168 		       fs_info->exclusive_operation == BTRFS_EXCLOP_NONE ||
169 		       fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
170 		fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
171 		spin_unlock(&fs_info->super_lock);
172 		break;
173 	case BTRFS_EXCLOP_BALANCE:
174 		spin_lock(&fs_info->super_lock);
175 		ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
176 		fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
177 		spin_unlock(&fs_info->super_lock);
178 		break;
179 	default:
180 		btrfs_warn(fs_info,
181 			"invalid exclop balance operation %d requested", op);
182 	}
183 }
184 
__btrfs_set_fs_incompat(struct btrfs_fs_info * fs_info,u64 flag,const char * name)185 void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
186 			     const char *name)
187 {
188 	struct btrfs_super_block *disk_super;
189 	u64 features;
190 
191 	disk_super = fs_info->super_copy;
192 	features = btrfs_super_incompat_flags(disk_super);
193 	if (!(features & flag)) {
194 		spin_lock(&fs_info->super_lock);
195 		features = btrfs_super_incompat_flags(disk_super);
196 		if (!(features & flag)) {
197 			features |= flag;
198 			btrfs_set_super_incompat_flags(disk_super, features);
199 			btrfs_info(fs_info,
200 				"setting incompat feature flag for %s (0x%llx)",
201 				name, flag);
202 		}
203 		spin_unlock(&fs_info->super_lock);
204 		set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
205 	}
206 }
207 
__btrfs_clear_fs_incompat(struct btrfs_fs_info * fs_info,u64 flag,const char * name)208 void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
209 			       const char *name)
210 {
211 	struct btrfs_super_block *disk_super;
212 	u64 features;
213 
214 	disk_super = fs_info->super_copy;
215 	features = btrfs_super_incompat_flags(disk_super);
216 	if (features & flag) {
217 		spin_lock(&fs_info->super_lock);
218 		features = btrfs_super_incompat_flags(disk_super);
219 		if (features & flag) {
220 			features &= ~flag;
221 			btrfs_set_super_incompat_flags(disk_super, features);
222 			btrfs_info(fs_info,
223 				"clearing incompat feature flag for %s (0x%llx)",
224 				name, flag);
225 		}
226 		spin_unlock(&fs_info->super_lock);
227 		set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
228 	}
229 }
230 
__btrfs_set_fs_compat_ro(struct btrfs_fs_info * fs_info,u64 flag,const char * name)231 void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
232 			      const char *name)
233 {
234 	struct btrfs_super_block *disk_super;
235 	u64 features;
236 
237 	disk_super = fs_info->super_copy;
238 	features = btrfs_super_compat_ro_flags(disk_super);
239 	if (!(features & flag)) {
240 		spin_lock(&fs_info->super_lock);
241 		features = btrfs_super_compat_ro_flags(disk_super);
242 		if (!(features & flag)) {
243 			features |= flag;
244 			btrfs_set_super_compat_ro_flags(disk_super, features);
245 			btrfs_info(fs_info,
246 				"setting compat-ro feature flag for %s (0x%llx)",
247 				name, flag);
248 		}
249 		spin_unlock(&fs_info->super_lock);
250 		set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
251 	}
252 }
253 
__btrfs_clear_fs_compat_ro(struct btrfs_fs_info * fs_info,u64 flag,const char * name)254 void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
255 				const char *name)
256 {
257 	struct btrfs_super_block *disk_super;
258 	u64 features;
259 
260 	disk_super = fs_info->super_copy;
261 	features = btrfs_super_compat_ro_flags(disk_super);
262 	if (features & flag) {
263 		spin_lock(&fs_info->super_lock);
264 		features = btrfs_super_compat_ro_flags(disk_super);
265 		if (features & flag) {
266 			features &= ~flag;
267 			btrfs_set_super_compat_ro_flags(disk_super, features);
268 			btrfs_info(fs_info,
269 				"clearing compat-ro feature flag for %s (0x%llx)",
270 				name, flag);
271 		}
272 		spin_unlock(&fs_info->super_lock);
273 		set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
274 	}
275 }
276