xref: /linux/fs/btrfs/super.c (revision d7f39aee79f04eeaa42085728423501b33ac5be5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/blkdev.h>
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/pagemap.h>
10 #include <linux/highmem.h>
11 #include <linux/time.h>
12 #include <linux/init.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/mount.h>
17 #include <linux/writeback.h>
18 #include <linux/statfs.h>
19 #include <linux/compat.h>
20 #include <linux/parser.h>
21 #include <linux/ctype.h>
22 #include <linux/namei.h>
23 #include <linux/miscdevice.h>
24 #include <linux/magic.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/crc32c.h>
28 #include <linux/btrfs.h>
29 #include <linux/security.h>
30 #include <linux/fs_parser.h>
31 #include "messages.h"
32 #include "delayed-inode.h"
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "props.h"
38 #include "xattr.h"
39 #include "bio.h"
40 #include "export.h"
41 #include "compression.h"
42 #include "dev-replace.h"
43 #include "free-space-cache.h"
44 #include "backref.h"
45 #include "space-info.h"
46 #include "sysfs.h"
47 #include "zoned.h"
48 #include "tests/btrfs-tests.h"
49 #include "block-group.h"
50 #include "discard.h"
51 #include "qgroup.h"
52 #include "raid56.h"
53 #include "fs.h"
54 #include "accessors.h"
55 #include "defrag.h"
56 #include "dir-item.h"
57 #include "ioctl.h"
58 #include "scrub.h"
59 #include "verity.h"
60 #include "super.h"
61 #include "extent-tree.h"
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/btrfs.h>
64 
65 static const struct super_operations btrfs_super_ops;
66 static struct file_system_type btrfs_fs_type;
67 
68 static void btrfs_put_super(struct super_block *sb)
69 {
70 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
71 
72 	btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
73 	close_ctree(fs_info);
74 }
75 
76 /* Store the mount options related information. */
77 struct btrfs_fs_context {
78 	char *subvol_name;
79 	u64 subvol_objectid;
80 	u64 max_inline;
81 	u32 commit_interval;
82 	u32 metadata_ratio;
83 	u32 thread_pool_size;
84 	unsigned long mount_opt;
85 	unsigned long compress_type:4;
86 	unsigned int compress_level;
87 	refcount_t refs;
88 };
89 
90 enum {
91 	Opt_acl,
92 	Opt_clear_cache,
93 	Opt_commit_interval,
94 	Opt_compress,
95 	Opt_compress_force,
96 	Opt_compress_force_type,
97 	Opt_compress_type,
98 	Opt_degraded,
99 	Opt_device,
100 	Opt_fatal_errors,
101 	Opt_flushoncommit,
102 	Opt_max_inline,
103 	Opt_barrier,
104 	Opt_datacow,
105 	Opt_datasum,
106 	Opt_defrag,
107 	Opt_discard,
108 	Opt_discard_mode,
109 	Opt_ratio,
110 	Opt_rescan_uuid_tree,
111 	Opt_skip_balance,
112 	Opt_space_cache,
113 	Opt_space_cache_version,
114 	Opt_ssd,
115 	Opt_ssd_spread,
116 	Opt_subvol,
117 	Opt_subvol_empty,
118 	Opt_subvolid,
119 	Opt_thread_pool,
120 	Opt_treelog,
121 	Opt_user_subvol_rm_allowed,
122 	Opt_norecovery,
123 
124 	/* Rescue options */
125 	Opt_rescue,
126 	Opt_usebackuproot,
127 	Opt_nologreplay,
128 	Opt_ignorebadroots,
129 	Opt_ignoredatacsums,
130 	Opt_rescue_all,
131 
132 	/* Debugging options */
133 	Opt_enospc_debug,
134 #ifdef CONFIG_BTRFS_DEBUG
135 	Opt_fragment, Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
136 #endif
137 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
138 	Opt_ref_verify,
139 #endif
140 	Opt_err,
141 };
142 
143 enum {
144 	Opt_fatal_errors_panic,
145 	Opt_fatal_errors_bug,
146 };
147 
148 static const struct constant_table btrfs_parameter_fatal_errors[] = {
149 	{ "panic", Opt_fatal_errors_panic },
150 	{ "bug", Opt_fatal_errors_bug },
151 	{}
152 };
153 
154 enum {
155 	Opt_discard_sync,
156 	Opt_discard_async,
157 };
158 
159 static const struct constant_table btrfs_parameter_discard[] = {
160 	{ "sync", Opt_discard_sync },
161 	{ "async", Opt_discard_async },
162 	{}
163 };
164 
165 enum {
166 	Opt_space_cache_v1,
167 	Opt_space_cache_v2,
168 };
169 
170 static const struct constant_table btrfs_parameter_space_cache[] = {
171 	{ "v1", Opt_space_cache_v1 },
172 	{ "v2", Opt_space_cache_v2 },
173 	{}
174 };
175 
176 enum {
177 	Opt_rescue_usebackuproot,
178 	Opt_rescue_nologreplay,
179 	Opt_rescue_ignorebadroots,
180 	Opt_rescue_ignoredatacsums,
181 	Opt_rescue_parameter_all,
182 };
183 
184 static const struct constant_table btrfs_parameter_rescue[] = {
185 	{ "usebackuproot", Opt_rescue_usebackuproot },
186 	{ "nologreplay", Opt_rescue_nologreplay },
187 	{ "ignorebadroots", Opt_rescue_ignorebadroots },
188 	{ "ibadroots", Opt_rescue_ignorebadroots },
189 	{ "ignoredatacsums", Opt_rescue_ignoredatacsums },
190 	{ "idatacsums", Opt_rescue_ignoredatacsums },
191 	{ "all", Opt_rescue_parameter_all },
192 	{}
193 };
194 
195 #ifdef CONFIG_BTRFS_DEBUG
196 enum {
197 	Opt_fragment_parameter_data,
198 	Opt_fragment_parameter_metadata,
199 	Opt_fragment_parameter_all,
200 };
201 
202 static const struct constant_table btrfs_parameter_fragment[] = {
203 	{ "data", Opt_fragment_parameter_data },
204 	{ "metadata", Opt_fragment_parameter_metadata },
205 	{ "all", Opt_fragment_parameter_all },
206 	{}
207 };
208 #endif
209 
210 static const struct fs_parameter_spec btrfs_fs_parameters[] = {
211 	fsparam_flag_no("acl", Opt_acl),
212 	fsparam_flag_no("autodefrag", Opt_defrag),
213 	fsparam_flag_no("barrier", Opt_barrier),
214 	fsparam_flag("clear_cache", Opt_clear_cache),
215 	fsparam_u32("commit", Opt_commit_interval),
216 	fsparam_flag("compress", Opt_compress),
217 	fsparam_string("compress", Opt_compress_type),
218 	fsparam_flag("compress-force", Opt_compress_force),
219 	fsparam_string("compress-force", Opt_compress_force_type),
220 	fsparam_flag_no("datacow", Opt_datacow),
221 	fsparam_flag_no("datasum", Opt_datasum),
222 	fsparam_flag("degraded", Opt_degraded),
223 	fsparam_string("device", Opt_device),
224 	fsparam_flag_no("discard", Opt_discard),
225 	fsparam_enum("discard", Opt_discard_mode, btrfs_parameter_discard),
226 	fsparam_enum("fatal_errors", Opt_fatal_errors, btrfs_parameter_fatal_errors),
227 	fsparam_flag_no("flushoncommit", Opt_flushoncommit),
228 	fsparam_string("max_inline", Opt_max_inline),
229 	fsparam_u32("metadata_ratio", Opt_ratio),
230 	fsparam_flag("rescan_uuid_tree", Opt_rescan_uuid_tree),
231 	fsparam_flag("skip_balance", Opt_skip_balance),
232 	fsparam_flag_no("space_cache", Opt_space_cache),
233 	fsparam_enum("space_cache", Opt_space_cache_version, btrfs_parameter_space_cache),
234 	fsparam_flag_no("ssd", Opt_ssd),
235 	fsparam_flag_no("ssd_spread", Opt_ssd_spread),
236 	fsparam_string("subvol", Opt_subvol),
237 	fsparam_flag("subvol=", Opt_subvol_empty),
238 	fsparam_u64("subvolid", Opt_subvolid),
239 	fsparam_u32("thread_pool", Opt_thread_pool),
240 	fsparam_flag_no("treelog", Opt_treelog),
241 	fsparam_flag("user_subvol_rm_allowed", Opt_user_subvol_rm_allowed),
242 
243 	/* Rescue options. */
244 	fsparam_enum("rescue", Opt_rescue, btrfs_parameter_rescue),
245 	/* Deprecated, with alias rescue=nologreplay */
246 	__fsparam(NULL, "nologreplay", Opt_nologreplay, fs_param_deprecated, NULL),
247 	/* Deprecated, with alias rescue=usebackuproot */
248 	__fsparam(NULL, "usebackuproot", Opt_usebackuproot, fs_param_deprecated, NULL),
249 	/* For compatibility only, alias for "rescue=nologreplay". */
250 	fsparam_flag("norecovery", Opt_norecovery),
251 
252 	/* Debugging options. */
253 	fsparam_flag_no("enospc_debug", Opt_enospc_debug),
254 #ifdef CONFIG_BTRFS_DEBUG
255 	fsparam_enum("fragment", Opt_fragment, btrfs_parameter_fragment),
256 #endif
257 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
258 	fsparam_flag("ref_verify", Opt_ref_verify),
259 #endif
260 	{}
261 };
262 
263 /* No support for restricting writes to btrfs devices yet... */
264 static inline blk_mode_t btrfs_open_mode(struct fs_context *fc)
265 {
266 	return sb_open_mode(fc->sb_flags) & ~BLK_OPEN_RESTRICT_WRITES;
267 }
268 
269 static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
270 {
271 	struct btrfs_fs_context *ctx = fc->fs_private;
272 	struct fs_parse_result result;
273 	int opt;
274 
275 	opt = fs_parse(fc, btrfs_fs_parameters, param, &result);
276 	if (opt < 0)
277 		return opt;
278 
279 	switch (opt) {
280 	case Opt_degraded:
281 		btrfs_set_opt(ctx->mount_opt, DEGRADED);
282 		break;
283 	case Opt_subvol_empty:
284 		/*
285 		 * This exists because we used to allow it on accident, so we're
286 		 * keeping it to maintain ABI.  See 37becec95ac3 ("Btrfs: allow
287 		 * empty subvol= again").
288 		 */
289 		break;
290 	case Opt_subvol:
291 		kfree(ctx->subvol_name);
292 		ctx->subvol_name = kstrdup(param->string, GFP_KERNEL);
293 		if (!ctx->subvol_name)
294 			return -ENOMEM;
295 		break;
296 	case Opt_subvolid:
297 		ctx->subvol_objectid = result.uint_64;
298 
299 		/* subvolid=0 means give me the original fs_tree. */
300 		if (!ctx->subvol_objectid)
301 			ctx->subvol_objectid = BTRFS_FS_TREE_OBJECTID;
302 		break;
303 	case Opt_device: {
304 		struct btrfs_device *device;
305 		blk_mode_t mode = btrfs_open_mode(fc);
306 
307 		mutex_lock(&uuid_mutex);
308 		device = btrfs_scan_one_device(param->string, mode, false);
309 		mutex_unlock(&uuid_mutex);
310 		if (IS_ERR(device))
311 			return PTR_ERR(device);
312 		break;
313 	}
314 	case Opt_datasum:
315 		if (result.negated) {
316 			btrfs_set_opt(ctx->mount_opt, NODATASUM);
317 		} else {
318 			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
319 			btrfs_clear_opt(ctx->mount_opt, NODATASUM);
320 		}
321 		break;
322 	case Opt_datacow:
323 		if (result.negated) {
324 			btrfs_clear_opt(ctx->mount_opt, COMPRESS);
325 			btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
326 			btrfs_set_opt(ctx->mount_opt, NODATACOW);
327 			btrfs_set_opt(ctx->mount_opt, NODATASUM);
328 		} else {
329 			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
330 		}
331 		break;
332 	case Opt_compress_force:
333 	case Opt_compress_force_type:
334 		btrfs_set_opt(ctx->mount_opt, FORCE_COMPRESS);
335 		fallthrough;
336 	case Opt_compress:
337 	case Opt_compress_type:
338 		if (opt == Opt_compress || opt == Opt_compress_force) {
339 			ctx->compress_type = BTRFS_COMPRESS_ZLIB;
340 			ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
341 			btrfs_set_opt(ctx->mount_opt, COMPRESS);
342 			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
343 			btrfs_clear_opt(ctx->mount_opt, NODATASUM);
344 		} else if (strncmp(param->string, "zlib", 4) == 0) {
345 			ctx->compress_type = BTRFS_COMPRESS_ZLIB;
346 			ctx->compress_level =
347 				btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB,
348 							 param->string + 4);
349 			btrfs_set_opt(ctx->mount_opt, COMPRESS);
350 			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
351 			btrfs_clear_opt(ctx->mount_opt, NODATASUM);
352 		} else if (strncmp(param->string, "lzo", 3) == 0) {
353 			ctx->compress_type = BTRFS_COMPRESS_LZO;
354 			ctx->compress_level = 0;
355 			btrfs_set_opt(ctx->mount_opt, COMPRESS);
356 			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
357 			btrfs_clear_opt(ctx->mount_opt, NODATASUM);
358 		} else if (strncmp(param->string, "zstd", 4) == 0) {
359 			ctx->compress_type = BTRFS_COMPRESS_ZSTD;
360 			ctx->compress_level =
361 				btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD,
362 							 param->string + 4);
363 			btrfs_set_opt(ctx->mount_opt, COMPRESS);
364 			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
365 			btrfs_clear_opt(ctx->mount_opt, NODATASUM);
366 		} else if (strncmp(param->string, "no", 2) == 0) {
367 			ctx->compress_level = 0;
368 			ctx->compress_type = 0;
369 			btrfs_clear_opt(ctx->mount_opt, COMPRESS);
370 			btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
371 		} else {
372 			btrfs_err(NULL, "unrecognized compression value %s",
373 				  param->string);
374 			return -EINVAL;
375 		}
376 		break;
377 	case Opt_ssd:
378 		if (result.negated) {
379 			btrfs_set_opt(ctx->mount_opt, NOSSD);
380 			btrfs_clear_opt(ctx->mount_opt, SSD);
381 			btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD);
382 		} else {
383 			btrfs_set_opt(ctx->mount_opt, SSD);
384 			btrfs_clear_opt(ctx->mount_opt, NOSSD);
385 		}
386 		break;
387 	case Opt_ssd_spread:
388 		if (result.negated) {
389 			btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD);
390 		} else {
391 			btrfs_set_opt(ctx->mount_opt, SSD);
392 			btrfs_set_opt(ctx->mount_opt, SSD_SPREAD);
393 			btrfs_clear_opt(ctx->mount_opt, NOSSD);
394 		}
395 		break;
396 	case Opt_barrier:
397 		if (result.negated)
398 			btrfs_set_opt(ctx->mount_opt, NOBARRIER);
399 		else
400 			btrfs_clear_opt(ctx->mount_opt, NOBARRIER);
401 		break;
402 	case Opt_thread_pool:
403 		if (result.uint_32 == 0) {
404 			btrfs_err(NULL, "invalid value 0 for thread_pool");
405 			return -EINVAL;
406 		}
407 		ctx->thread_pool_size = result.uint_32;
408 		break;
409 	case Opt_max_inline:
410 		ctx->max_inline = memparse(param->string, NULL);
411 		break;
412 	case Opt_acl:
413 		if (result.negated) {
414 			fc->sb_flags &= ~SB_POSIXACL;
415 		} else {
416 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
417 			fc->sb_flags |= SB_POSIXACL;
418 #else
419 			btrfs_err(NULL, "support for ACL not compiled in");
420 			return -EINVAL;
421 #endif
422 		}
423 		/*
424 		 * VFS limits the ability to toggle ACL on and off via remount,
425 		 * despite every file system allowing this.  This seems to be
426 		 * an oversight since we all do, but it'll fail if we're
427 		 * remounting.  So don't set the mask here, we'll check it in
428 		 * btrfs_reconfigure and do the toggling ourselves.
429 		 */
430 		if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE)
431 			fc->sb_flags_mask |= SB_POSIXACL;
432 		break;
433 	case Opt_treelog:
434 		if (result.negated)
435 			btrfs_set_opt(ctx->mount_opt, NOTREELOG);
436 		else
437 			btrfs_clear_opt(ctx->mount_opt, NOTREELOG);
438 		break;
439 	case Opt_nologreplay:
440 		btrfs_warn(NULL,
441 		"'nologreplay' is deprecated, use 'rescue=nologreplay' instead");
442 		btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
443 		break;
444 	case Opt_norecovery:
445 		btrfs_info(NULL,
446 "'norecovery' is for compatibility only, recommended to use 'rescue=nologreplay'");
447 		btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
448 		break;
449 	case Opt_flushoncommit:
450 		if (result.negated)
451 			btrfs_clear_opt(ctx->mount_opt, FLUSHONCOMMIT);
452 		else
453 			btrfs_set_opt(ctx->mount_opt, FLUSHONCOMMIT);
454 		break;
455 	case Opt_ratio:
456 		ctx->metadata_ratio = result.uint_32;
457 		break;
458 	case Opt_discard:
459 		if (result.negated) {
460 			btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC);
461 			btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC);
462 			btrfs_set_opt(ctx->mount_opt, NODISCARD);
463 		} else {
464 			btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC);
465 			btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC);
466 		}
467 		break;
468 	case Opt_discard_mode:
469 		switch (result.uint_32) {
470 		case Opt_discard_sync:
471 			btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC);
472 			btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC);
473 			break;
474 		case Opt_discard_async:
475 			btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC);
476 			btrfs_set_opt(ctx->mount_opt, DISCARD_ASYNC);
477 			break;
478 		default:
479 			btrfs_err(NULL, "unrecognized discard mode value %s",
480 				  param->key);
481 			return -EINVAL;
482 		}
483 		btrfs_clear_opt(ctx->mount_opt, NODISCARD);
484 		break;
485 	case Opt_space_cache:
486 		if (result.negated) {
487 			btrfs_set_opt(ctx->mount_opt, NOSPACECACHE);
488 			btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE);
489 			btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE);
490 		} else {
491 			btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE);
492 			btrfs_set_opt(ctx->mount_opt, SPACE_CACHE);
493 		}
494 		break;
495 	case Opt_space_cache_version:
496 		switch (result.uint_32) {
497 		case Opt_space_cache_v1:
498 			btrfs_set_opt(ctx->mount_opt, SPACE_CACHE);
499 			btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE);
500 			break;
501 		case Opt_space_cache_v2:
502 			btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE);
503 			btrfs_set_opt(ctx->mount_opt, FREE_SPACE_TREE);
504 			break;
505 		default:
506 			btrfs_err(NULL, "unrecognized space_cache value %s",
507 				  param->key);
508 			return -EINVAL;
509 		}
510 		break;
511 	case Opt_rescan_uuid_tree:
512 		btrfs_set_opt(ctx->mount_opt, RESCAN_UUID_TREE);
513 		break;
514 	case Opt_clear_cache:
515 		btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE);
516 		break;
517 	case Opt_user_subvol_rm_allowed:
518 		btrfs_set_opt(ctx->mount_opt, USER_SUBVOL_RM_ALLOWED);
519 		break;
520 	case Opt_enospc_debug:
521 		if (result.negated)
522 			btrfs_clear_opt(ctx->mount_opt, ENOSPC_DEBUG);
523 		else
524 			btrfs_set_opt(ctx->mount_opt, ENOSPC_DEBUG);
525 		break;
526 	case Opt_defrag:
527 		if (result.negated)
528 			btrfs_clear_opt(ctx->mount_opt, AUTO_DEFRAG);
529 		else
530 			btrfs_set_opt(ctx->mount_opt, AUTO_DEFRAG);
531 		break;
532 	case Opt_usebackuproot:
533 		btrfs_warn(NULL,
534 			   "'usebackuproot' is deprecated, use 'rescue=usebackuproot' instead");
535 		btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT);
536 
537 		/* If we're loading the backup roots we can't trust the space cache. */
538 		btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE);
539 		break;
540 	case Opt_skip_balance:
541 		btrfs_set_opt(ctx->mount_opt, SKIP_BALANCE);
542 		break;
543 	case Opt_fatal_errors:
544 		switch (result.uint_32) {
545 		case Opt_fatal_errors_panic:
546 			btrfs_set_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR);
547 			break;
548 		case Opt_fatal_errors_bug:
549 			btrfs_clear_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR);
550 			break;
551 		default:
552 			btrfs_err(NULL, "unrecognized fatal_errors value %s",
553 				  param->key);
554 			return -EINVAL;
555 		}
556 		break;
557 	case Opt_commit_interval:
558 		ctx->commit_interval = result.uint_32;
559 		if (ctx->commit_interval == 0)
560 			ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
561 		break;
562 	case Opt_rescue:
563 		switch (result.uint_32) {
564 		case Opt_rescue_usebackuproot:
565 			btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT);
566 			break;
567 		case Opt_rescue_nologreplay:
568 			btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
569 			break;
570 		case Opt_rescue_ignorebadroots:
571 			btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS);
572 			break;
573 		case Opt_rescue_ignoredatacsums:
574 			btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS);
575 			break;
576 		case Opt_rescue_parameter_all:
577 			btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS);
578 			btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS);
579 			btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
580 			break;
581 		default:
582 			btrfs_info(NULL, "unrecognized rescue option '%s'",
583 				   param->key);
584 			return -EINVAL;
585 		}
586 		break;
587 #ifdef CONFIG_BTRFS_DEBUG
588 	case Opt_fragment:
589 		switch (result.uint_32) {
590 		case Opt_fragment_parameter_all:
591 			btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA);
592 			btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA);
593 			break;
594 		case Opt_fragment_parameter_metadata:
595 			btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA);
596 			break;
597 		case Opt_fragment_parameter_data:
598 			btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA);
599 			break;
600 		default:
601 			btrfs_info(NULL, "unrecognized fragment option '%s'",
602 				   param->key);
603 			return -EINVAL;
604 		}
605 		break;
606 #endif
607 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
608 	case Opt_ref_verify:
609 		btrfs_set_opt(ctx->mount_opt, REF_VERIFY);
610 		break;
611 #endif
612 	default:
613 		btrfs_err(NULL, "unrecognized mount option '%s'", param->key);
614 		return -EINVAL;
615 	}
616 
617 	return 0;
618 }
619 
620 /*
621  * Some options only have meaning at mount time and shouldn't persist across
622  * remounts, or be displayed. Clear these at the end of mount and remount code
623  * paths.
624  */
625 static void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info)
626 {
627 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
628 	btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE);
629 	btrfs_clear_opt(fs_info->mount_opt, NOSPACECACHE);
630 }
631 
632 static bool check_ro_option(struct btrfs_fs_info *fs_info,
633 			    unsigned long mount_opt, unsigned long opt,
634 			    const char *opt_name)
635 {
636 	if (mount_opt & opt) {
637 		btrfs_err(fs_info, "%s must be used with ro mount option",
638 			  opt_name);
639 		return true;
640 	}
641 	return false;
642 }
643 
644 bool btrfs_check_options(struct btrfs_fs_info *info, unsigned long *mount_opt,
645 			 unsigned long flags)
646 {
647 	bool ret = true;
648 
649 	if (!(flags & SB_RDONLY) &&
650 	    (check_ro_option(info, *mount_opt, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") ||
651 	     check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") ||
652 	     check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums")))
653 		ret = false;
654 
655 	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
656 	    !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE) &&
657 	    !btrfs_raw_test_opt(*mount_opt, CLEAR_CACHE)) {
658 		btrfs_err(info, "cannot disable free-space-tree");
659 		ret = false;
660 	}
661 	if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) &&
662 	     !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE)) {
663 		btrfs_err(info, "cannot disable free-space-tree with block-group-tree feature");
664 		ret = false;
665 	}
666 
667 	if (btrfs_check_mountopts_zoned(info, mount_opt))
668 		ret = false;
669 
670 	if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) {
671 		if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE))
672 			btrfs_info(info, "disk space caching is enabled");
673 		if (btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE))
674 			btrfs_info(info, "using free-space-tree");
675 	}
676 
677 	return ret;
678 }
679 
680 /*
681  * This is subtle, we only call this during open_ctree().  We need to pre-load
682  * the mount options with the on-disk settings.  Before the new mount API took
683  * effect we would do this on mount and remount.  With the new mount API we'll
684  * only do this on the initial mount.
685  *
686  * This isn't a change in behavior, because we're using the current state of the
687  * file system to set the current mount options.  If you mounted with special
688  * options to disable these features and then remounted we wouldn't revert the
689  * settings, because mounting without these features cleared the on-disk
690  * settings, so this being called on re-mount is not needed.
691  */
692 void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info)
693 {
694 	if (fs_info->sectorsize < PAGE_SIZE) {
695 		btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
696 		if (!btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
697 			btrfs_info(fs_info,
698 				   "forcing free space tree for sector size %u with page size %lu",
699 				   fs_info->sectorsize, PAGE_SIZE);
700 			btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
701 		}
702 	}
703 
704 	/*
705 	 * At this point our mount options are populated, so we only mess with
706 	 * these settings if we don't have any settings already.
707 	 */
708 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
709 		return;
710 
711 	if (btrfs_is_zoned(fs_info) &&
712 	    btrfs_free_space_cache_v1_active(fs_info)) {
713 		btrfs_info(fs_info, "zoned: clearing existing space cache");
714 		btrfs_set_super_cache_generation(fs_info->super_copy, 0);
715 		return;
716 	}
717 
718 	if (btrfs_test_opt(fs_info, SPACE_CACHE))
719 		return;
720 
721 	if (btrfs_test_opt(fs_info, NOSPACECACHE))
722 		return;
723 
724 	/*
725 	 * At this point we don't have explicit options set by the user, set
726 	 * them ourselves based on the state of the file system.
727 	 */
728 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
729 		btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
730 	else if (btrfs_free_space_cache_v1_active(fs_info))
731 		btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE);
732 }
733 
734 static void set_device_specific_options(struct btrfs_fs_info *fs_info)
735 {
736 	if (!btrfs_test_opt(fs_info, NOSSD) &&
737 	    !fs_info->fs_devices->rotating)
738 		btrfs_set_opt(fs_info->mount_opt, SSD);
739 
740 	/*
741 	 * For devices supporting discard turn on discard=async automatically,
742 	 * unless it's already set or disabled. This could be turned off by
743 	 * nodiscard for the same mount.
744 	 *
745 	 * The zoned mode piggy backs on the discard functionality for
746 	 * resetting a zone. There is no reason to delay the zone reset as it is
747 	 * fast enough. So, do not enable async discard for zoned mode.
748 	 */
749 	if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) ||
750 	      btrfs_test_opt(fs_info, DISCARD_ASYNC) ||
751 	      btrfs_test_opt(fs_info, NODISCARD)) &&
752 	    fs_info->fs_devices->discardable &&
753 	    !btrfs_is_zoned(fs_info))
754 		btrfs_set_opt(fs_info->mount_opt, DISCARD_ASYNC);
755 }
756 
757 char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
758 					  u64 subvol_objectid)
759 {
760 	struct btrfs_root *root = fs_info->tree_root;
761 	struct btrfs_root *fs_root = NULL;
762 	struct btrfs_root_ref *root_ref;
763 	struct btrfs_inode_ref *inode_ref;
764 	struct btrfs_key key;
765 	struct btrfs_path *path = NULL;
766 	char *name = NULL, *ptr;
767 	u64 dirid;
768 	int len;
769 	int ret;
770 
771 	path = btrfs_alloc_path();
772 	if (!path) {
773 		ret = -ENOMEM;
774 		goto err;
775 	}
776 
777 	name = kmalloc(PATH_MAX, GFP_KERNEL);
778 	if (!name) {
779 		ret = -ENOMEM;
780 		goto err;
781 	}
782 	ptr = name + PATH_MAX - 1;
783 	ptr[0] = '\0';
784 
785 	/*
786 	 * Walk up the subvolume trees in the tree of tree roots by root
787 	 * backrefs until we hit the top-level subvolume.
788 	 */
789 	while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
790 		key.objectid = subvol_objectid;
791 		key.type = BTRFS_ROOT_BACKREF_KEY;
792 		key.offset = (u64)-1;
793 
794 		ret = btrfs_search_backwards(root, &key, path);
795 		if (ret < 0) {
796 			goto err;
797 		} else if (ret > 0) {
798 			ret = -ENOENT;
799 			goto err;
800 		}
801 
802 		subvol_objectid = key.offset;
803 
804 		root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
805 					  struct btrfs_root_ref);
806 		len = btrfs_root_ref_name_len(path->nodes[0], root_ref);
807 		ptr -= len + 1;
808 		if (ptr < name) {
809 			ret = -ENAMETOOLONG;
810 			goto err;
811 		}
812 		read_extent_buffer(path->nodes[0], ptr + 1,
813 				   (unsigned long)(root_ref + 1), len);
814 		ptr[0] = '/';
815 		dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
816 		btrfs_release_path(path);
817 
818 		fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true);
819 		if (IS_ERR(fs_root)) {
820 			ret = PTR_ERR(fs_root);
821 			fs_root = NULL;
822 			goto err;
823 		}
824 
825 		/*
826 		 * Walk up the filesystem tree by inode refs until we hit the
827 		 * root directory.
828 		 */
829 		while (dirid != BTRFS_FIRST_FREE_OBJECTID) {
830 			key.objectid = dirid;
831 			key.type = BTRFS_INODE_REF_KEY;
832 			key.offset = (u64)-1;
833 
834 			ret = btrfs_search_backwards(fs_root, &key, path);
835 			if (ret < 0) {
836 				goto err;
837 			} else if (ret > 0) {
838 				ret = -ENOENT;
839 				goto err;
840 			}
841 
842 			dirid = key.offset;
843 
844 			inode_ref = btrfs_item_ptr(path->nodes[0],
845 						   path->slots[0],
846 						   struct btrfs_inode_ref);
847 			len = btrfs_inode_ref_name_len(path->nodes[0],
848 						       inode_ref);
849 			ptr -= len + 1;
850 			if (ptr < name) {
851 				ret = -ENAMETOOLONG;
852 				goto err;
853 			}
854 			read_extent_buffer(path->nodes[0], ptr + 1,
855 					   (unsigned long)(inode_ref + 1), len);
856 			ptr[0] = '/';
857 			btrfs_release_path(path);
858 		}
859 		btrfs_put_root(fs_root);
860 		fs_root = NULL;
861 	}
862 
863 	btrfs_free_path(path);
864 	if (ptr == name + PATH_MAX - 1) {
865 		name[0] = '/';
866 		name[1] = '\0';
867 	} else {
868 		memmove(name, ptr, name + PATH_MAX - ptr);
869 	}
870 	return name;
871 
872 err:
873 	btrfs_put_root(fs_root);
874 	btrfs_free_path(path);
875 	kfree(name);
876 	return ERR_PTR(ret);
877 }
878 
879 static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid)
880 {
881 	struct btrfs_root *root = fs_info->tree_root;
882 	struct btrfs_dir_item *di;
883 	struct btrfs_path *path;
884 	struct btrfs_key location;
885 	struct fscrypt_str name = FSTR_INIT("default", 7);
886 	u64 dir_id;
887 
888 	path = btrfs_alloc_path();
889 	if (!path)
890 		return -ENOMEM;
891 
892 	/*
893 	 * Find the "default" dir item which points to the root item that we
894 	 * will mount by default if we haven't been given a specific subvolume
895 	 * to mount.
896 	 */
897 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
898 	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, &name, 0);
899 	if (IS_ERR(di)) {
900 		btrfs_free_path(path);
901 		return PTR_ERR(di);
902 	}
903 	if (!di) {
904 		/*
905 		 * Ok the default dir item isn't there.  This is weird since
906 		 * it's always been there, but don't freak out, just try and
907 		 * mount the top-level subvolume.
908 		 */
909 		btrfs_free_path(path);
910 		*objectid = BTRFS_FS_TREE_OBJECTID;
911 		return 0;
912 	}
913 
914 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
915 	btrfs_free_path(path);
916 	*objectid = location.objectid;
917 	return 0;
918 }
919 
920 static int btrfs_fill_super(struct super_block *sb,
921 			    struct btrfs_fs_devices *fs_devices,
922 			    void *data)
923 {
924 	struct inode *inode;
925 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
926 	int err;
927 
928 	sb->s_maxbytes = MAX_LFS_FILESIZE;
929 	sb->s_magic = BTRFS_SUPER_MAGIC;
930 	sb->s_op = &btrfs_super_ops;
931 	sb->s_d_op = &btrfs_dentry_operations;
932 	sb->s_export_op = &btrfs_export_ops;
933 #ifdef CONFIG_FS_VERITY
934 	sb->s_vop = &btrfs_verityops;
935 #endif
936 	sb->s_xattr = btrfs_xattr_handlers;
937 	sb->s_time_gran = 1;
938 	sb->s_iflags |= SB_I_CGROUPWB;
939 
940 	err = super_setup_bdi(sb);
941 	if (err) {
942 		btrfs_err(fs_info, "super_setup_bdi failed");
943 		return err;
944 	}
945 
946 	err = open_ctree(sb, fs_devices, (char *)data);
947 	if (err) {
948 		btrfs_err(fs_info, "open_ctree failed");
949 		return err;
950 	}
951 
952 	inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
953 	if (IS_ERR(inode)) {
954 		err = PTR_ERR(inode);
955 		btrfs_handle_fs_error(fs_info, err, NULL);
956 		goto fail_close;
957 	}
958 
959 	sb->s_root = d_make_root(inode);
960 	if (!sb->s_root) {
961 		err = -ENOMEM;
962 		goto fail_close;
963 	}
964 
965 	sb->s_flags |= SB_ACTIVE;
966 	return 0;
967 
968 fail_close:
969 	close_ctree(fs_info);
970 	return err;
971 }
972 
973 int btrfs_sync_fs(struct super_block *sb, int wait)
974 {
975 	struct btrfs_trans_handle *trans;
976 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
977 	struct btrfs_root *root = fs_info->tree_root;
978 
979 	trace_btrfs_sync_fs(fs_info, wait);
980 
981 	if (!wait) {
982 		filemap_flush(fs_info->btree_inode->i_mapping);
983 		return 0;
984 	}
985 
986 	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
987 
988 	trans = btrfs_attach_transaction_barrier(root);
989 	if (IS_ERR(trans)) {
990 		/* no transaction, don't bother */
991 		if (PTR_ERR(trans) == -ENOENT) {
992 			/*
993 			 * Exit unless we have some pending changes
994 			 * that need to go through commit
995 			 */
996 			if (!test_bit(BTRFS_FS_NEED_TRANS_COMMIT,
997 				      &fs_info->flags))
998 				return 0;
999 			/*
1000 			 * A non-blocking test if the fs is frozen. We must not
1001 			 * start a new transaction here otherwise a deadlock
1002 			 * happens. The pending operations are delayed to the
1003 			 * next commit after thawing.
1004 			 */
1005 			if (sb_start_write_trylock(sb))
1006 				sb_end_write(sb);
1007 			else
1008 				return 0;
1009 			trans = btrfs_start_transaction(root, 0);
1010 		}
1011 		if (IS_ERR(trans))
1012 			return PTR_ERR(trans);
1013 	}
1014 	return btrfs_commit_transaction(trans);
1015 }
1016 
1017 static void print_rescue_option(struct seq_file *seq, const char *s, bool *printed)
1018 {
1019 	seq_printf(seq, "%s%s", (*printed) ? ":" : ",rescue=", s);
1020 	*printed = true;
1021 }
1022 
1023 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1024 {
1025 	struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1026 	const char *compress_type;
1027 	const char *subvol_name;
1028 	bool printed = false;
1029 
1030 	if (btrfs_test_opt(info, DEGRADED))
1031 		seq_puts(seq, ",degraded");
1032 	if (btrfs_test_opt(info, NODATASUM))
1033 		seq_puts(seq, ",nodatasum");
1034 	if (btrfs_test_opt(info, NODATACOW))
1035 		seq_puts(seq, ",nodatacow");
1036 	if (btrfs_test_opt(info, NOBARRIER))
1037 		seq_puts(seq, ",nobarrier");
1038 	if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1039 		seq_printf(seq, ",max_inline=%llu", info->max_inline);
1040 	if (info->thread_pool_size !=  min_t(unsigned long,
1041 					     num_online_cpus() + 2, 8))
1042 		seq_printf(seq, ",thread_pool=%u", info->thread_pool_size);
1043 	if (btrfs_test_opt(info, COMPRESS)) {
1044 		compress_type = btrfs_compress_type2str(info->compress_type);
1045 		if (btrfs_test_opt(info, FORCE_COMPRESS))
1046 			seq_printf(seq, ",compress-force=%s", compress_type);
1047 		else
1048 			seq_printf(seq, ",compress=%s", compress_type);
1049 		if (info->compress_level)
1050 			seq_printf(seq, ":%d", info->compress_level);
1051 	}
1052 	if (btrfs_test_opt(info, NOSSD))
1053 		seq_puts(seq, ",nossd");
1054 	if (btrfs_test_opt(info, SSD_SPREAD))
1055 		seq_puts(seq, ",ssd_spread");
1056 	else if (btrfs_test_opt(info, SSD))
1057 		seq_puts(seq, ",ssd");
1058 	if (btrfs_test_opt(info, NOTREELOG))
1059 		seq_puts(seq, ",notreelog");
1060 	if (btrfs_test_opt(info, NOLOGREPLAY))
1061 		print_rescue_option(seq, "nologreplay", &printed);
1062 	if (btrfs_test_opt(info, USEBACKUPROOT))
1063 		print_rescue_option(seq, "usebackuproot", &printed);
1064 	if (btrfs_test_opt(info, IGNOREBADROOTS))
1065 		print_rescue_option(seq, "ignorebadroots", &printed);
1066 	if (btrfs_test_opt(info, IGNOREDATACSUMS))
1067 		print_rescue_option(seq, "ignoredatacsums", &printed);
1068 	if (btrfs_test_opt(info, FLUSHONCOMMIT))
1069 		seq_puts(seq, ",flushoncommit");
1070 	if (btrfs_test_opt(info, DISCARD_SYNC))
1071 		seq_puts(seq, ",discard");
1072 	if (btrfs_test_opt(info, DISCARD_ASYNC))
1073 		seq_puts(seq, ",discard=async");
1074 	if (!(info->sb->s_flags & SB_POSIXACL))
1075 		seq_puts(seq, ",noacl");
1076 	if (btrfs_free_space_cache_v1_active(info))
1077 		seq_puts(seq, ",space_cache");
1078 	else if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
1079 		seq_puts(seq, ",space_cache=v2");
1080 	else
1081 		seq_puts(seq, ",nospace_cache");
1082 	if (btrfs_test_opt(info, RESCAN_UUID_TREE))
1083 		seq_puts(seq, ",rescan_uuid_tree");
1084 	if (btrfs_test_opt(info, CLEAR_CACHE))
1085 		seq_puts(seq, ",clear_cache");
1086 	if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
1087 		seq_puts(seq, ",user_subvol_rm_allowed");
1088 	if (btrfs_test_opt(info, ENOSPC_DEBUG))
1089 		seq_puts(seq, ",enospc_debug");
1090 	if (btrfs_test_opt(info, AUTO_DEFRAG))
1091 		seq_puts(seq, ",autodefrag");
1092 	if (btrfs_test_opt(info, SKIP_BALANCE))
1093 		seq_puts(seq, ",skip_balance");
1094 	if (info->metadata_ratio)
1095 		seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
1096 	if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
1097 		seq_puts(seq, ",fatal_errors=panic");
1098 	if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
1099 		seq_printf(seq, ",commit=%u", info->commit_interval);
1100 #ifdef CONFIG_BTRFS_DEBUG
1101 	if (btrfs_test_opt(info, FRAGMENT_DATA))
1102 		seq_puts(seq, ",fragment=data");
1103 	if (btrfs_test_opt(info, FRAGMENT_METADATA))
1104 		seq_puts(seq, ",fragment=metadata");
1105 #endif
1106 	if (btrfs_test_opt(info, REF_VERIFY))
1107 		seq_puts(seq, ",ref_verify");
1108 	seq_printf(seq, ",subvolid=%llu", btrfs_root_id(BTRFS_I(d_inode(dentry))->root));
1109 	subvol_name = btrfs_get_subvol_name_from_objectid(info,
1110 			btrfs_root_id(BTRFS_I(d_inode(dentry))->root));
1111 	if (!IS_ERR(subvol_name)) {
1112 		seq_puts(seq, ",subvol=");
1113 		seq_escape(seq, subvol_name, " \t\n\\");
1114 		kfree(subvol_name);
1115 	}
1116 	return 0;
1117 }
1118 
1119 /*
1120  * subvolumes are identified by ino 256
1121  */
1122 static inline int is_subvolume_inode(struct inode *inode)
1123 {
1124 	if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
1125 		return 1;
1126 	return 0;
1127 }
1128 
1129 static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1130 				   struct vfsmount *mnt)
1131 {
1132 	struct dentry *root;
1133 	int ret;
1134 
1135 	if (!subvol_name) {
1136 		if (!subvol_objectid) {
1137 			ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
1138 							  &subvol_objectid);
1139 			if (ret) {
1140 				root = ERR_PTR(ret);
1141 				goto out;
1142 			}
1143 		}
1144 		subvol_name = btrfs_get_subvol_name_from_objectid(
1145 					btrfs_sb(mnt->mnt_sb), subvol_objectid);
1146 		if (IS_ERR(subvol_name)) {
1147 			root = ERR_CAST(subvol_name);
1148 			subvol_name = NULL;
1149 			goto out;
1150 		}
1151 
1152 	}
1153 
1154 	root = mount_subtree(mnt, subvol_name);
1155 	/* mount_subtree() drops our reference on the vfsmount. */
1156 	mnt = NULL;
1157 
1158 	if (!IS_ERR(root)) {
1159 		struct super_block *s = root->d_sb;
1160 		struct btrfs_fs_info *fs_info = btrfs_sb(s);
1161 		struct inode *root_inode = d_inode(root);
1162 		u64 root_objectid = btrfs_root_id(BTRFS_I(root_inode)->root);
1163 
1164 		ret = 0;
1165 		if (!is_subvolume_inode(root_inode)) {
1166 			btrfs_err(fs_info, "'%s' is not a valid subvolume",
1167 			       subvol_name);
1168 			ret = -EINVAL;
1169 		}
1170 		if (subvol_objectid && root_objectid != subvol_objectid) {
1171 			/*
1172 			 * This will also catch a race condition where a
1173 			 * subvolume which was passed by ID is renamed and
1174 			 * another subvolume is renamed over the old location.
1175 			 */
1176 			btrfs_err(fs_info,
1177 				  "subvol '%s' does not match subvolid %llu",
1178 				  subvol_name, subvol_objectid);
1179 			ret = -EINVAL;
1180 		}
1181 		if (ret) {
1182 			dput(root);
1183 			root = ERR_PTR(ret);
1184 			deactivate_locked_super(s);
1185 		}
1186 	}
1187 
1188 out:
1189 	mntput(mnt);
1190 	kfree(subvol_name);
1191 	return root;
1192 }
1193 
1194 static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
1195 				     u32 new_pool_size, u32 old_pool_size)
1196 {
1197 	if (new_pool_size == old_pool_size)
1198 		return;
1199 
1200 	fs_info->thread_pool_size = new_pool_size;
1201 
1202 	btrfs_info(fs_info, "resize thread pool %d -> %d",
1203 	       old_pool_size, new_pool_size);
1204 
1205 	btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
1206 	btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
1207 	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
1208 	workqueue_set_max_active(fs_info->endio_workers, new_pool_size);
1209 	workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size);
1210 	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
1211 	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
1212 	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
1213 }
1214 
1215 static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
1216 				       unsigned long old_opts, int flags)
1217 {
1218 	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1219 	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
1220 	     (flags & SB_RDONLY))) {
1221 		/* wait for any defraggers to finish */
1222 		wait_event(fs_info->transaction_wait,
1223 			   (atomic_read(&fs_info->defrag_running) == 0));
1224 		if (flags & SB_RDONLY)
1225 			sync_filesystem(fs_info->sb);
1226 	}
1227 }
1228 
1229 static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
1230 					 unsigned long old_opts)
1231 {
1232 	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
1233 
1234 	/*
1235 	 * We need to cleanup all defragable inodes if the autodefragment is
1236 	 * close or the filesystem is read only.
1237 	 */
1238 	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1239 	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) {
1240 		btrfs_cleanup_defrag_inodes(fs_info);
1241 	}
1242 
1243 	/* If we toggled discard async */
1244 	if (!btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1245 	    btrfs_test_opt(fs_info, DISCARD_ASYNC))
1246 		btrfs_discard_resume(fs_info);
1247 	else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1248 		 !btrfs_test_opt(fs_info, DISCARD_ASYNC))
1249 		btrfs_discard_cleanup(fs_info);
1250 
1251 	/* If we toggled space cache */
1252 	if (cache_opt != btrfs_free_space_cache_v1_active(fs_info))
1253 		btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
1254 }
1255 
1256 static int btrfs_remount_rw(struct btrfs_fs_info *fs_info)
1257 {
1258 	int ret;
1259 
1260 	if (BTRFS_FS_ERROR(fs_info)) {
1261 		btrfs_err(fs_info,
1262 			  "remounting read-write after error is not allowed");
1263 		return -EINVAL;
1264 	}
1265 
1266 	if (fs_info->fs_devices->rw_devices == 0)
1267 		return -EACCES;
1268 
1269 	if (!btrfs_check_rw_degradable(fs_info, NULL)) {
1270 		btrfs_warn(fs_info,
1271 			   "too many missing devices, writable remount is not allowed");
1272 		return -EACCES;
1273 	}
1274 
1275 	if (btrfs_super_log_root(fs_info->super_copy) != 0) {
1276 		btrfs_warn(fs_info,
1277 			   "mount required to replay tree-log, cannot remount read-write");
1278 		return -EINVAL;
1279 	}
1280 
1281 	/*
1282 	 * NOTE: when remounting with a change that does writes, don't put it
1283 	 * anywhere above this point, as we are not sure to be safe to write
1284 	 * until we pass the above checks.
1285 	 */
1286 	ret = btrfs_start_pre_rw_mount(fs_info);
1287 	if (ret)
1288 		return ret;
1289 
1290 	btrfs_clear_sb_rdonly(fs_info->sb);
1291 
1292 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
1293 
1294 	/*
1295 	 * If we've gone from readonly -> read-write, we need to get our
1296 	 * sync/async discard lists in the right state.
1297 	 */
1298 	btrfs_discard_resume(fs_info);
1299 
1300 	return 0;
1301 }
1302 
1303 static int btrfs_remount_ro(struct btrfs_fs_info *fs_info)
1304 {
1305 	/*
1306 	 * This also happens on 'umount -rf' or on shutdown, when the
1307 	 * filesystem is busy.
1308 	 */
1309 	cancel_work_sync(&fs_info->async_reclaim_work);
1310 	cancel_work_sync(&fs_info->async_data_reclaim_work);
1311 
1312 	btrfs_discard_cleanup(fs_info);
1313 
1314 	/* Wait for the uuid_scan task to finish */
1315 	down(&fs_info->uuid_tree_rescan_sem);
1316 	/* Avoid complains from lockdep et al. */
1317 	up(&fs_info->uuid_tree_rescan_sem);
1318 
1319 	btrfs_set_sb_rdonly(fs_info->sb);
1320 
1321 	/*
1322 	 * Setting SB_RDONLY will put the cleaner thread to sleep at the next
1323 	 * loop if it's already active.  If it's already asleep, we'll leave
1324 	 * unused block groups on disk until we're mounted read-write again
1325 	 * unless we clean them up here.
1326 	 */
1327 	btrfs_delete_unused_bgs(fs_info);
1328 
1329 	/*
1330 	 * The cleaner task could be already running before we set the flag
1331 	 * BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock).  We must make
1332 	 * sure that after we finish the remount, i.e. after we call
1333 	 * btrfs_commit_super(), the cleaner can no longer start a transaction
1334 	 * - either because it was dropping a dead root, running delayed iputs
1335 	 *   or deleting an unused block group (the cleaner picked a block
1336 	 *   group from the list of unused block groups before we were able to
1337 	 *   in the previous call to btrfs_delete_unused_bgs()).
1338 	 */
1339 	wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING, TASK_UNINTERRUPTIBLE);
1340 
1341 	/*
1342 	 * We've set the superblock to RO mode, so we might have made the
1343 	 * cleaner task sleep without running all pending delayed iputs. Go
1344 	 * through all the delayed iputs here, so that if an unmount happens
1345 	 * without remounting RW we don't end up at finishing close_ctree()
1346 	 * with a non-empty list of delayed iputs.
1347 	 */
1348 	btrfs_run_delayed_iputs(fs_info);
1349 
1350 	btrfs_dev_replace_suspend_for_unmount(fs_info);
1351 	btrfs_scrub_cancel(fs_info);
1352 	btrfs_pause_balance(fs_info);
1353 
1354 	/*
1355 	 * Pause the qgroup rescan worker if it is running. We don't want it to
1356 	 * be still running after we are in RO mode, as after that, by the time
1357 	 * we unmount, it might have left a transaction open, so we would leak
1358 	 * the transaction and/or crash.
1359 	 */
1360 	btrfs_qgroup_wait_for_completion(fs_info, false);
1361 
1362 	return btrfs_commit_super(fs_info);
1363 }
1364 
1365 static void btrfs_ctx_to_info(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx)
1366 {
1367 	fs_info->max_inline = ctx->max_inline;
1368 	fs_info->commit_interval = ctx->commit_interval;
1369 	fs_info->metadata_ratio = ctx->metadata_ratio;
1370 	fs_info->thread_pool_size = ctx->thread_pool_size;
1371 	fs_info->mount_opt = ctx->mount_opt;
1372 	fs_info->compress_type = ctx->compress_type;
1373 	fs_info->compress_level = ctx->compress_level;
1374 }
1375 
1376 static void btrfs_info_to_ctx(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx)
1377 {
1378 	ctx->max_inline = fs_info->max_inline;
1379 	ctx->commit_interval = fs_info->commit_interval;
1380 	ctx->metadata_ratio = fs_info->metadata_ratio;
1381 	ctx->thread_pool_size = fs_info->thread_pool_size;
1382 	ctx->mount_opt = fs_info->mount_opt;
1383 	ctx->compress_type = fs_info->compress_type;
1384 	ctx->compress_level = fs_info->compress_level;
1385 }
1386 
1387 #define btrfs_info_if_set(fs_info, old_ctx, opt, fmt, args...)			\
1388 do {										\
1389 	if ((!old_ctx || !btrfs_raw_test_opt(old_ctx->mount_opt, opt)) &&	\
1390 	    btrfs_raw_test_opt(fs_info->mount_opt, opt))			\
1391 		btrfs_info(fs_info, fmt, ##args);				\
1392 } while (0)
1393 
1394 #define btrfs_info_if_unset(fs_info, old_ctx, opt, fmt, args...)	\
1395 do {									\
1396 	if ((old_ctx && btrfs_raw_test_opt(old_ctx->mount_opt, opt)) &&	\
1397 	    !btrfs_raw_test_opt(fs_info->mount_opt, opt))		\
1398 		btrfs_info(fs_info, fmt, ##args);			\
1399 } while (0)
1400 
1401 static void btrfs_emit_options(struct btrfs_fs_info *info,
1402 			       struct btrfs_fs_context *old)
1403 {
1404 	btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");
1405 	btrfs_info_if_set(info, old, DEGRADED, "allowing degraded mounts");
1406 	btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");
1407 	btrfs_info_if_set(info, old, SSD, "enabling ssd optimizations");
1408 	btrfs_info_if_set(info, old, SSD_SPREAD, "using spread ssd allocation scheme");
1409 	btrfs_info_if_set(info, old, NOBARRIER, "turning off barriers");
1410 	btrfs_info_if_set(info, old, NOTREELOG, "disabling tree log");
1411 	btrfs_info_if_set(info, old, NOLOGREPLAY, "disabling log replay at mount time");
1412 	btrfs_info_if_set(info, old, FLUSHONCOMMIT, "turning on flush-on-commit");
1413 	btrfs_info_if_set(info, old, DISCARD_SYNC, "turning on sync discard");
1414 	btrfs_info_if_set(info, old, DISCARD_ASYNC, "turning on async discard");
1415 	btrfs_info_if_set(info, old, FREE_SPACE_TREE, "enabling free space tree");
1416 	btrfs_info_if_set(info, old, SPACE_CACHE, "enabling disk space caching");
1417 	btrfs_info_if_set(info, old, CLEAR_CACHE, "force clearing of disk cache");
1418 	btrfs_info_if_set(info, old, AUTO_DEFRAG, "enabling auto defrag");
1419 	btrfs_info_if_set(info, old, FRAGMENT_DATA, "fragmenting data");
1420 	btrfs_info_if_set(info, old, FRAGMENT_METADATA, "fragmenting metadata");
1421 	btrfs_info_if_set(info, old, REF_VERIFY, "doing ref verification");
1422 	btrfs_info_if_set(info, old, USEBACKUPROOT, "trying to use backup root at mount time");
1423 	btrfs_info_if_set(info, old, IGNOREBADROOTS, "ignoring bad roots");
1424 	btrfs_info_if_set(info, old, IGNOREDATACSUMS, "ignoring data csums");
1425 
1426 	btrfs_info_if_unset(info, old, NODATACOW, "setting datacow");
1427 	btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations");
1428 	btrfs_info_if_unset(info, old, SSD_SPREAD, "not using spread ssd allocation scheme");
1429 	btrfs_info_if_unset(info, old, NOBARRIER, "turning off barriers");
1430 	btrfs_info_if_unset(info, old, NOTREELOG, "enabling tree log");
1431 	btrfs_info_if_unset(info, old, SPACE_CACHE, "disabling disk space caching");
1432 	btrfs_info_if_unset(info, old, FREE_SPACE_TREE, "disabling free space tree");
1433 	btrfs_info_if_unset(info, old, AUTO_DEFRAG, "disabling auto defrag");
1434 	btrfs_info_if_unset(info, old, COMPRESS, "use no compression");
1435 
1436 	/* Did the compression settings change? */
1437 	if (btrfs_test_opt(info, COMPRESS) &&
1438 	    (!old ||
1439 	     old->compress_type != info->compress_type ||
1440 	     old->compress_level != info->compress_level ||
1441 	     (!btrfs_raw_test_opt(old->mount_opt, FORCE_COMPRESS) &&
1442 	      btrfs_raw_test_opt(info->mount_opt, FORCE_COMPRESS)))) {
1443 		const char *compress_type = btrfs_compress_type2str(info->compress_type);
1444 
1445 		btrfs_info(info, "%s %s compression, level %d",
1446 			   btrfs_test_opt(info, FORCE_COMPRESS) ? "force" : "use",
1447 			   compress_type, info->compress_level);
1448 	}
1449 
1450 	if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1451 		btrfs_info(info, "max_inline set to %llu", info->max_inline);
1452 }
1453 
1454 static int btrfs_reconfigure(struct fs_context *fc)
1455 {
1456 	struct super_block *sb = fc->root->d_sb;
1457 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1458 	struct btrfs_fs_context *ctx = fc->fs_private;
1459 	struct btrfs_fs_context old_ctx;
1460 	int ret = 0;
1461 	bool mount_reconfigure = (fc->s_fs_info != NULL);
1462 
1463 	btrfs_info_to_ctx(fs_info, &old_ctx);
1464 
1465 	/*
1466 	 * This is our "bind mount" trick, we don't want to allow the user to do
1467 	 * anything other than mount a different ro/rw and a different subvol,
1468 	 * all of the mount options should be maintained.
1469 	 */
1470 	if (mount_reconfigure)
1471 		ctx->mount_opt = old_ctx.mount_opt;
1472 
1473 	sync_filesystem(sb);
1474 	set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1475 
1476 	if (!mount_reconfigure &&
1477 	    !btrfs_check_options(fs_info, &ctx->mount_opt, fc->sb_flags))
1478 		return -EINVAL;
1479 
1480 	ret = btrfs_check_features(fs_info, !(fc->sb_flags & SB_RDONLY));
1481 	if (ret < 0)
1482 		return ret;
1483 
1484 	btrfs_ctx_to_info(fs_info, ctx);
1485 	btrfs_remount_begin(fs_info, old_ctx.mount_opt, fc->sb_flags);
1486 	btrfs_resize_thread_pool(fs_info, fs_info->thread_pool_size,
1487 				 old_ctx.thread_pool_size);
1488 
1489 	if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
1490 	    (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
1491 	    (!sb_rdonly(sb) || (fc->sb_flags & SB_RDONLY))) {
1492 		btrfs_warn(fs_info,
1493 		"remount supports changing free space tree only from RO to RW");
1494 		/* Make sure free space cache options match the state on disk. */
1495 		if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
1496 			btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
1497 			btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
1498 		}
1499 		if (btrfs_free_space_cache_v1_active(fs_info)) {
1500 			btrfs_clear_opt(fs_info->mount_opt, FREE_SPACE_TREE);
1501 			btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE);
1502 		}
1503 	}
1504 
1505 	ret = 0;
1506 	if (!sb_rdonly(sb) && (fc->sb_flags & SB_RDONLY))
1507 		ret = btrfs_remount_ro(fs_info);
1508 	else if (sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY))
1509 		ret = btrfs_remount_rw(fs_info);
1510 	if (ret)
1511 		goto restore;
1512 
1513 	/*
1514 	 * If we set the mask during the parameter parsing VFS would reject the
1515 	 * remount.  Here we can set the mask and the value will be updated
1516 	 * appropriately.
1517 	 */
1518 	if ((fc->sb_flags & SB_POSIXACL) != (sb->s_flags & SB_POSIXACL))
1519 		fc->sb_flags_mask |= SB_POSIXACL;
1520 
1521 	btrfs_emit_options(fs_info, &old_ctx);
1522 	wake_up_process(fs_info->transaction_kthread);
1523 	btrfs_remount_cleanup(fs_info, old_ctx.mount_opt);
1524 	btrfs_clear_oneshot_options(fs_info);
1525 	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1526 
1527 	return 0;
1528 restore:
1529 	btrfs_ctx_to_info(fs_info, &old_ctx);
1530 	btrfs_remount_cleanup(fs_info, old_ctx.mount_opt);
1531 	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1532 	return ret;
1533 }
1534 
1535 /* Used to sort the devices by max_avail(descending sort) */
1536 static int btrfs_cmp_device_free_bytes(const void *a, const void *b)
1537 {
1538 	const struct btrfs_device_info *dev_info1 = a;
1539 	const struct btrfs_device_info *dev_info2 = b;
1540 
1541 	if (dev_info1->max_avail > dev_info2->max_avail)
1542 		return -1;
1543 	else if (dev_info1->max_avail < dev_info2->max_avail)
1544 		return 1;
1545 	return 0;
1546 }
1547 
1548 /*
1549  * sort the devices by max_avail, in which max free extent size of each device
1550  * is stored.(Descending Sort)
1551  */
1552 static inline void btrfs_descending_sort_devices(
1553 					struct btrfs_device_info *devices,
1554 					size_t nr_devices)
1555 {
1556 	sort(devices, nr_devices, sizeof(struct btrfs_device_info),
1557 	     btrfs_cmp_device_free_bytes, NULL);
1558 }
1559 
1560 /*
1561  * The helper to calc the free space on the devices that can be used to store
1562  * file data.
1563  */
1564 static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
1565 					      u64 *free_bytes)
1566 {
1567 	struct btrfs_device_info *devices_info;
1568 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
1569 	struct btrfs_device *device;
1570 	u64 type;
1571 	u64 avail_space;
1572 	u64 min_stripe_size;
1573 	int num_stripes = 1;
1574 	int i = 0, nr_devices;
1575 	const struct btrfs_raid_attr *rattr;
1576 
1577 	/*
1578 	 * We aren't under the device list lock, so this is racy-ish, but good
1579 	 * enough for our purposes.
1580 	 */
1581 	nr_devices = fs_info->fs_devices->open_devices;
1582 	if (!nr_devices) {
1583 		smp_mb();
1584 		nr_devices = fs_info->fs_devices->open_devices;
1585 		ASSERT(nr_devices);
1586 		if (!nr_devices) {
1587 			*free_bytes = 0;
1588 			return 0;
1589 		}
1590 	}
1591 
1592 	devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
1593 			       GFP_KERNEL);
1594 	if (!devices_info)
1595 		return -ENOMEM;
1596 
1597 	/* calc min stripe number for data space allocation */
1598 	type = btrfs_data_alloc_profile(fs_info);
1599 	rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)];
1600 
1601 	if (type & BTRFS_BLOCK_GROUP_RAID0)
1602 		num_stripes = nr_devices;
1603 	else if (type & BTRFS_BLOCK_GROUP_RAID1_MASK)
1604 		num_stripes = rattr->ncopies;
1605 	else if (type & BTRFS_BLOCK_GROUP_RAID10)
1606 		num_stripes = 4;
1607 
1608 	/* Adjust for more than 1 stripe per device */
1609 	min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN;
1610 
1611 	rcu_read_lock();
1612 	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
1613 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
1614 						&device->dev_state) ||
1615 		    !device->bdev ||
1616 		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
1617 			continue;
1618 
1619 		if (i >= nr_devices)
1620 			break;
1621 
1622 		avail_space = device->total_bytes - device->bytes_used;
1623 
1624 		/* align with stripe_len */
1625 		avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
1626 
1627 		/*
1628 		 * Ensure we have at least min_stripe_size on top of the
1629 		 * reserved space on the device.
1630 		 */
1631 		if (avail_space <= BTRFS_DEVICE_RANGE_RESERVED + min_stripe_size)
1632 			continue;
1633 
1634 		avail_space -= BTRFS_DEVICE_RANGE_RESERVED;
1635 
1636 		devices_info[i].dev = device;
1637 		devices_info[i].max_avail = avail_space;
1638 
1639 		i++;
1640 	}
1641 	rcu_read_unlock();
1642 
1643 	nr_devices = i;
1644 
1645 	btrfs_descending_sort_devices(devices_info, nr_devices);
1646 
1647 	i = nr_devices - 1;
1648 	avail_space = 0;
1649 	while (nr_devices >= rattr->devs_min) {
1650 		num_stripes = min(num_stripes, nr_devices);
1651 
1652 		if (devices_info[i].max_avail >= min_stripe_size) {
1653 			int j;
1654 			u64 alloc_size;
1655 
1656 			avail_space += devices_info[i].max_avail * num_stripes;
1657 			alloc_size = devices_info[i].max_avail;
1658 			for (j = i + 1 - num_stripes; j <= i; j++)
1659 				devices_info[j].max_avail -= alloc_size;
1660 		}
1661 		i--;
1662 		nr_devices--;
1663 	}
1664 
1665 	kfree(devices_info);
1666 	*free_bytes = avail_space;
1667 	return 0;
1668 }
1669 
1670 /*
1671  * Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
1672  *
1673  * If there's a redundant raid level at DATA block groups, use the respective
1674  * multiplier to scale the sizes.
1675  *
1676  * Unused device space usage is based on simulating the chunk allocator
1677  * algorithm that respects the device sizes and order of allocations.  This is
1678  * a close approximation of the actual use but there are other factors that may
1679  * change the result (like a new metadata chunk).
1680  *
1681  * If metadata is exhausted, f_bavail will be 0.
1682  */
1683 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1684 {
1685 	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
1686 	struct btrfs_super_block *disk_super = fs_info->super_copy;
1687 	struct btrfs_space_info *found;
1688 	u64 total_used = 0;
1689 	u64 total_free_data = 0;
1690 	u64 total_free_meta = 0;
1691 	u32 bits = fs_info->sectorsize_bits;
1692 	__be32 *fsid = (__be32 *)fs_info->fs_devices->fsid;
1693 	unsigned factor = 1;
1694 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
1695 	int ret;
1696 	u64 thresh = 0;
1697 	int mixed = 0;
1698 
1699 	list_for_each_entry(found, &fs_info->space_info, list) {
1700 		if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
1701 			int i;
1702 
1703 			total_free_data += found->disk_total - found->disk_used;
1704 			total_free_data -=
1705 				btrfs_account_ro_block_groups_free_space(found);
1706 
1707 			for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1708 				if (!list_empty(&found->block_groups[i]))
1709 					factor = btrfs_bg_type_to_factor(
1710 						btrfs_raid_array[i].bg_flag);
1711 			}
1712 		}
1713 
1714 		/*
1715 		 * Metadata in mixed block group profiles are accounted in data
1716 		 */
1717 		if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
1718 			if (found->flags & BTRFS_BLOCK_GROUP_DATA)
1719 				mixed = 1;
1720 			else
1721 				total_free_meta += found->disk_total -
1722 					found->disk_used;
1723 		}
1724 
1725 		total_used += found->disk_used;
1726 	}
1727 
1728 	buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
1729 	buf->f_blocks >>= bits;
1730 	buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
1731 
1732 	/* Account global block reserve as used, it's in logical size already */
1733 	spin_lock(&block_rsv->lock);
1734 	/* Mixed block groups accounting is not byte-accurate, avoid overflow */
1735 	if (buf->f_bfree >= block_rsv->size >> bits)
1736 		buf->f_bfree -= block_rsv->size >> bits;
1737 	else
1738 		buf->f_bfree = 0;
1739 	spin_unlock(&block_rsv->lock);
1740 
1741 	buf->f_bavail = div_u64(total_free_data, factor);
1742 	ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
1743 	if (ret)
1744 		return ret;
1745 	buf->f_bavail += div_u64(total_free_data, factor);
1746 	buf->f_bavail = buf->f_bavail >> bits;
1747 
1748 	/*
1749 	 * We calculate the remaining metadata space minus global reserve. If
1750 	 * this is (supposedly) smaller than zero, there's no space. But this
1751 	 * does not hold in practice, the exhausted state happens where's still
1752 	 * some positive delta. So we apply some guesswork and compare the
1753 	 * delta to a 4M threshold.  (Practically observed delta was ~2M.)
1754 	 *
1755 	 * We probably cannot calculate the exact threshold value because this
1756 	 * depends on the internal reservations requested by various
1757 	 * operations, so some operations that consume a few metadata will
1758 	 * succeed even if the Avail is zero. But this is better than the other
1759 	 * way around.
1760 	 */
1761 	thresh = SZ_4M;
1762 
1763 	/*
1764 	 * We only want to claim there's no available space if we can no longer
1765 	 * allocate chunks for our metadata profile and our global reserve will
1766 	 * not fit in the free metadata space.  If we aren't ->full then we
1767 	 * still can allocate chunks and thus are fine using the currently
1768 	 * calculated f_bavail.
1769 	 */
1770 	if (!mixed && block_rsv->space_info->full &&
1771 	    (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
1772 		buf->f_bavail = 0;
1773 
1774 	buf->f_type = BTRFS_SUPER_MAGIC;
1775 	buf->f_bsize = fs_info->sectorsize;
1776 	buf->f_namelen = BTRFS_NAME_LEN;
1777 
1778 	/* We treat it as constant endianness (it doesn't matter _which_)
1779 	   because we want the fsid to come out the same whether mounted
1780 	   on a big-endian or little-endian host */
1781 	buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
1782 	buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
1783 	/* Mask in the root object ID too, to disambiguate subvols */
1784 	buf->f_fsid.val[0] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root) >> 32;
1785 	buf->f_fsid.val[1] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root);
1786 
1787 	return 0;
1788 }
1789 
1790 static int btrfs_fc_test_super(struct super_block *sb, struct fs_context *fc)
1791 {
1792 	struct btrfs_fs_info *p = fc->s_fs_info;
1793 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1794 
1795 	return fs_info->fs_devices == p->fs_devices;
1796 }
1797 
1798 static int btrfs_get_tree_super(struct fs_context *fc)
1799 {
1800 	struct btrfs_fs_info *fs_info = fc->s_fs_info;
1801 	struct btrfs_fs_context *ctx = fc->fs_private;
1802 	struct btrfs_fs_devices *fs_devices = NULL;
1803 	struct block_device *bdev;
1804 	struct btrfs_device *device;
1805 	struct super_block *sb;
1806 	blk_mode_t mode = btrfs_open_mode(fc);
1807 	int ret;
1808 
1809 	btrfs_ctx_to_info(fs_info, ctx);
1810 	mutex_lock(&uuid_mutex);
1811 
1812 	/*
1813 	 * With 'true' passed to btrfs_scan_one_device() (mount time) we expect
1814 	 * either a valid device or an error.
1815 	 */
1816 	device = btrfs_scan_one_device(fc->source, mode, true);
1817 	ASSERT(device != NULL);
1818 	if (IS_ERR(device)) {
1819 		mutex_unlock(&uuid_mutex);
1820 		return PTR_ERR(device);
1821 	}
1822 
1823 	fs_devices = device->fs_devices;
1824 	fs_info->fs_devices = fs_devices;
1825 
1826 	ret = btrfs_open_devices(fs_devices, mode, &btrfs_fs_type);
1827 	mutex_unlock(&uuid_mutex);
1828 	if (ret)
1829 		return ret;
1830 
1831 	if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
1832 		ret = -EACCES;
1833 		goto error;
1834 	}
1835 
1836 	bdev = fs_devices->latest_dev->bdev;
1837 
1838 	/*
1839 	 * From now on the error handling is not straightforward.
1840 	 *
1841 	 * If successful, this will transfer the fs_info into the super block,
1842 	 * and fc->s_fs_info will be NULL.  However if there's an existing
1843 	 * super, we'll still have fc->s_fs_info populated.  If we error
1844 	 * completely out it'll be cleaned up when we drop the fs_context,
1845 	 * otherwise it's tied to the lifetime of the super_block.
1846 	 */
1847 	sb = sget_fc(fc, btrfs_fc_test_super, set_anon_super_fc);
1848 	if (IS_ERR(sb)) {
1849 		ret = PTR_ERR(sb);
1850 		goto error;
1851 	}
1852 
1853 	set_device_specific_options(fs_info);
1854 
1855 	if (sb->s_root) {
1856 		btrfs_close_devices(fs_devices);
1857 		if ((fc->sb_flags ^ sb->s_flags) & SB_RDONLY)
1858 			ret = -EBUSY;
1859 	} else {
1860 		snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1861 		shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id);
1862 		btrfs_sb(sb)->bdev_holder = &btrfs_fs_type;
1863 		ret = btrfs_fill_super(sb, fs_devices, NULL);
1864 	}
1865 
1866 	if (ret) {
1867 		deactivate_locked_super(sb);
1868 		return ret;
1869 	}
1870 
1871 	btrfs_clear_oneshot_options(fs_info);
1872 
1873 	fc->root = dget(sb->s_root);
1874 	return 0;
1875 
1876 error:
1877 	btrfs_close_devices(fs_devices);
1878 	return ret;
1879 }
1880 
1881 /*
1882  * Ever since commit 0723a0473fb4 ("btrfs: allow mounting btrfs subvolumes
1883  * with different ro/rw options") the following works:
1884  *
1885  *        (i) mount /dev/sda3 -o subvol=foo,ro /mnt/foo
1886  *       (ii) mount /dev/sda3 -o subvol=bar,rw /mnt/bar
1887  *
1888  * which looks nice and innocent but is actually pretty intricate and deserves
1889  * a long comment.
1890  *
1891  * On another filesystem a subvolume mount is close to something like:
1892  *
1893  *	(iii) # create rw superblock + initial mount
1894  *	      mount -t xfs /dev/sdb /opt/
1895  *
1896  *	      # create ro bind mount
1897  *	      mount --bind -o ro /opt/foo /mnt/foo
1898  *
1899  *	      # unmount initial mount
1900  *	      umount /opt
1901  *
1902  * Of course, there's some special subvolume sauce and there's the fact that the
1903  * sb->s_root dentry is really swapped after mount_subtree(). But conceptually
1904  * it's very close and will help us understand the issue.
1905  *
1906  * The old mount API didn't cleanly distinguish between a mount being made ro
1907  * and a superblock being made ro.  The only way to change the ro state of
1908  * either object was by passing ms_rdonly. If a new mount was created via
1909  * mount(2) such as:
1910  *
1911  *      mount("/dev/sdb", "/mnt", "xfs", ms_rdonly, null);
1912  *
1913  * the MS_RDONLY flag being specified had two effects:
1914  *
1915  * (1) MNT_READONLY was raised -> the resulting mount got
1916  *     @mnt->mnt_flags |= MNT_READONLY raised.
1917  *
1918  * (2) MS_RDONLY was passed to the filesystem's mount method and the filesystems
1919  *     made the superblock ro. Note, how SB_RDONLY has the same value as
1920  *     ms_rdonly and is raised whenever MS_RDONLY is passed through mount(2).
1921  *
1922  * Creating a subtree mount via (iii) ends up leaving a rw superblock with a
1923  * subtree mounted ro.
1924  *
1925  * But consider the effect on the old mount API on btrfs subvolume mounting
1926  * which combines the distinct step in (iii) into a single step.
1927  *
1928  * By issuing (i) both the mount and the superblock are turned ro. Now when (ii)
1929  * is issued the superblock is ro and thus even if the mount created for (ii) is
1930  * rw it wouldn't help. Hence, btrfs needed to transition the superblock from ro
1931  * to rw for (ii) which it did using an internal remount call.
1932  *
1933  * IOW, subvolume mounting was inherently complicated due to the ambiguity of
1934  * MS_RDONLY in mount(2). Note, this ambiguity has mount(8) always translate
1935  * "ro" to MS_RDONLY. IOW, in both (i) and (ii) "ro" becomes MS_RDONLY when
1936  * passed by mount(8) to mount(2).
1937  *
1938  * Enter the new mount API. The new mount API disambiguates making a mount ro
1939  * and making a superblock ro.
1940  *
1941  * (3) To turn a mount ro the MOUNT_ATTR_ONLY flag can be used with either
1942  *     fsmount() or mount_setattr() this is a pure VFS level change for a
1943  *     specific mount or mount tree that is never seen by the filesystem itself.
1944  *
1945  * (4) To turn a superblock ro the "ro" flag must be used with
1946  *     fsconfig(FSCONFIG_SET_FLAG, "ro"). This option is seen by the filesystem
1947  *     in fc->sb_flags.
1948  *
1949  * This disambiguation has rather positive consequences.  Mounting a subvolume
1950  * ro will not also turn the superblock ro. Only the mount for the subvolume
1951  * will become ro.
1952  *
1953  * So, if the superblock creation request comes from the new mount API the
1954  * caller must have explicitly done:
1955  *
1956  *      fsconfig(FSCONFIG_SET_FLAG, "ro")
1957  *      fsmount/mount_setattr(MOUNT_ATTR_RDONLY)
1958  *
1959  * IOW, at some point the caller must have explicitly turned the whole
1960  * superblock ro and we shouldn't just undo it like we did for the old mount
1961  * API. In any case, it lets us avoid the hack in the new mount API.
1962  *
1963  * Consequently, the remounting hack must only be used for requests originating
1964  * from the old mount API and should be marked for full deprecation so it can be
1965  * turned off in a couple of years.
1966  *
1967  * The new mount API has no reason to support this hack.
1968  */
1969 static struct vfsmount *btrfs_reconfigure_for_mount(struct fs_context *fc)
1970 {
1971 	struct vfsmount *mnt;
1972 	int ret;
1973 	const bool ro2rw = !(fc->sb_flags & SB_RDONLY);
1974 
1975 	/*
1976 	 * We got an EBUSY because our SB_RDONLY flag didn't match the existing
1977 	 * super block, so invert our setting here and retry the mount so we
1978 	 * can get our vfsmount.
1979 	 */
1980 	if (ro2rw)
1981 		fc->sb_flags |= SB_RDONLY;
1982 	else
1983 		fc->sb_flags &= ~SB_RDONLY;
1984 
1985 	mnt = fc_mount(fc);
1986 	if (IS_ERR(mnt))
1987 		return mnt;
1988 
1989 	if (!fc->oldapi || !ro2rw)
1990 		return mnt;
1991 
1992 	/* We need to convert to rw, call reconfigure. */
1993 	fc->sb_flags &= ~SB_RDONLY;
1994 	down_write(&mnt->mnt_sb->s_umount);
1995 	ret = btrfs_reconfigure(fc);
1996 	up_write(&mnt->mnt_sb->s_umount);
1997 	if (ret) {
1998 		mntput(mnt);
1999 		return ERR_PTR(ret);
2000 	}
2001 	return mnt;
2002 }
2003 
2004 static int btrfs_get_tree_subvol(struct fs_context *fc)
2005 {
2006 	struct btrfs_fs_info *fs_info = NULL;
2007 	struct btrfs_fs_context *ctx = fc->fs_private;
2008 	struct fs_context *dup_fc;
2009 	struct dentry *dentry;
2010 	struct vfsmount *mnt;
2011 
2012 	/*
2013 	 * Setup a dummy root and fs_info for test/set super.  This is because
2014 	 * we don't actually fill this stuff out until open_ctree, but we need
2015 	 * then open_ctree will properly initialize the file system specific
2016 	 * settings later.  btrfs_init_fs_info initializes the static elements
2017 	 * of the fs_info (locks and such) to make cleanup easier if we find a
2018 	 * superblock with our given fs_devices later on at sget() time.
2019 	 */
2020 	fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
2021 	if (!fs_info)
2022 		return -ENOMEM;
2023 
2024 	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
2025 	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
2026 	if (!fs_info->super_copy || !fs_info->super_for_commit) {
2027 		btrfs_free_fs_info(fs_info);
2028 		return -ENOMEM;
2029 	}
2030 	btrfs_init_fs_info(fs_info);
2031 
2032 	dup_fc = vfs_dup_fs_context(fc);
2033 	if (IS_ERR(dup_fc)) {
2034 		btrfs_free_fs_info(fs_info);
2035 		return PTR_ERR(dup_fc);
2036 	}
2037 
2038 	/*
2039 	 * When we do the sget_fc this gets transferred to the sb, so we only
2040 	 * need to set it on the dup_fc as this is what creates the super block.
2041 	 */
2042 	dup_fc->s_fs_info = fs_info;
2043 
2044 	/*
2045 	 * We'll do the security settings in our btrfs_get_tree_super() mount
2046 	 * loop, they were duplicated into dup_fc, we can drop the originals
2047 	 * here.
2048 	 */
2049 	security_free_mnt_opts(&fc->security);
2050 	fc->security = NULL;
2051 
2052 	mnt = fc_mount(dup_fc);
2053 	if (PTR_ERR_OR_ZERO(mnt) == -EBUSY)
2054 		mnt = btrfs_reconfigure_for_mount(dup_fc);
2055 	put_fs_context(dup_fc);
2056 	if (IS_ERR(mnt))
2057 		return PTR_ERR(mnt);
2058 
2059 	/*
2060 	 * This free's ->subvol_name, because if it isn't set we have to
2061 	 * allocate a buffer to hold the subvol_name, so we just drop our
2062 	 * reference to it here.
2063 	 */
2064 	dentry = mount_subvol(ctx->subvol_name, ctx->subvol_objectid, mnt);
2065 	ctx->subvol_name = NULL;
2066 	if (IS_ERR(dentry))
2067 		return PTR_ERR(dentry);
2068 
2069 	fc->root = dentry;
2070 	return 0;
2071 }
2072 
2073 static int btrfs_get_tree(struct fs_context *fc)
2074 {
2075 	/*
2076 	 * Since we use mount_subtree to mount the default/specified subvol, we
2077 	 * have to do mounts in two steps.
2078 	 *
2079 	 * First pass through we call btrfs_get_tree_subvol(), this is just a
2080 	 * wrapper around fc_mount() to call back into here again, and this time
2081 	 * we'll call btrfs_get_tree_super().  This will do the open_ctree() and
2082 	 * everything to open the devices and file system.  Then we return back
2083 	 * with a fully constructed vfsmount in btrfs_get_tree_subvol(), and
2084 	 * from there we can do our mount_subvol() call, which will lookup
2085 	 * whichever subvol we're mounting and setup this fc with the
2086 	 * appropriate dentry for the subvol.
2087 	 */
2088 	if (fc->s_fs_info)
2089 		return btrfs_get_tree_super(fc);
2090 	return btrfs_get_tree_subvol(fc);
2091 }
2092 
2093 static void btrfs_kill_super(struct super_block *sb)
2094 {
2095 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2096 	kill_anon_super(sb);
2097 	btrfs_free_fs_info(fs_info);
2098 }
2099 
2100 static void btrfs_free_fs_context(struct fs_context *fc)
2101 {
2102 	struct btrfs_fs_context *ctx = fc->fs_private;
2103 	struct btrfs_fs_info *fs_info = fc->s_fs_info;
2104 
2105 	if (fs_info)
2106 		btrfs_free_fs_info(fs_info);
2107 
2108 	if (ctx && refcount_dec_and_test(&ctx->refs)) {
2109 		kfree(ctx->subvol_name);
2110 		kfree(ctx);
2111 	}
2112 }
2113 
2114 static int btrfs_dup_fs_context(struct fs_context *fc, struct fs_context *src_fc)
2115 {
2116 	struct btrfs_fs_context *ctx = src_fc->fs_private;
2117 
2118 	/*
2119 	 * Give a ref to our ctx to this dup, as we want to keep it around for
2120 	 * our original fc so we can have the subvolume name or objectid.
2121 	 *
2122 	 * We unset ->source in the original fc because the dup needs it for
2123 	 * mounting, and then once we free the dup it'll free ->source, so we
2124 	 * need to make sure we're only pointing to it in one fc.
2125 	 */
2126 	refcount_inc(&ctx->refs);
2127 	fc->fs_private = ctx;
2128 	fc->source = src_fc->source;
2129 	src_fc->source = NULL;
2130 	return 0;
2131 }
2132 
2133 static const struct fs_context_operations btrfs_fs_context_ops = {
2134 	.parse_param	= btrfs_parse_param,
2135 	.reconfigure	= btrfs_reconfigure,
2136 	.get_tree	= btrfs_get_tree,
2137 	.dup		= btrfs_dup_fs_context,
2138 	.free		= btrfs_free_fs_context,
2139 };
2140 
2141 static int btrfs_init_fs_context(struct fs_context *fc)
2142 {
2143 	struct btrfs_fs_context *ctx;
2144 
2145 	ctx = kzalloc(sizeof(struct btrfs_fs_context), GFP_KERNEL);
2146 	if (!ctx)
2147 		return -ENOMEM;
2148 
2149 	refcount_set(&ctx->refs, 1);
2150 	fc->fs_private = ctx;
2151 	fc->ops = &btrfs_fs_context_ops;
2152 
2153 	if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
2154 		btrfs_info_to_ctx(btrfs_sb(fc->root->d_sb), ctx);
2155 	} else {
2156 		ctx->thread_pool_size =
2157 			min_t(unsigned long, num_online_cpus() + 2, 8);
2158 		ctx->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2159 		ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2160 	}
2161 
2162 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
2163 	fc->sb_flags |= SB_POSIXACL;
2164 #endif
2165 	fc->sb_flags |= SB_I_VERSION;
2166 
2167 	return 0;
2168 }
2169 
2170 static struct file_system_type btrfs_fs_type = {
2171 	.owner			= THIS_MODULE,
2172 	.name			= "btrfs",
2173 	.init_fs_context	= btrfs_init_fs_context,
2174 	.parameters		= btrfs_fs_parameters,
2175 	.kill_sb		= btrfs_kill_super,
2176 	.fs_flags		= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_ALLOW_IDMAP,
2177  };
2178 
2179 MODULE_ALIAS_FS("btrfs");
2180 
2181 static int btrfs_control_open(struct inode *inode, struct file *file)
2182 {
2183 	/*
2184 	 * The control file's private_data is used to hold the
2185 	 * transaction when it is started and is used to keep
2186 	 * track of whether a transaction is already in progress.
2187 	 */
2188 	file->private_data = NULL;
2189 	return 0;
2190 }
2191 
2192 /*
2193  * Used by /dev/btrfs-control for devices ioctls.
2194  */
2195 static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2196 				unsigned long arg)
2197 {
2198 	struct btrfs_ioctl_vol_args *vol;
2199 	struct btrfs_device *device = NULL;
2200 	dev_t devt = 0;
2201 	int ret = -ENOTTY;
2202 
2203 	if (!capable(CAP_SYS_ADMIN))
2204 		return -EPERM;
2205 
2206 	vol = memdup_user((void __user *)arg, sizeof(*vol));
2207 	if (IS_ERR(vol))
2208 		return PTR_ERR(vol);
2209 	ret = btrfs_check_ioctl_vol_args_path(vol);
2210 	if (ret < 0)
2211 		goto out;
2212 
2213 	switch (cmd) {
2214 	case BTRFS_IOC_SCAN_DEV:
2215 		mutex_lock(&uuid_mutex);
2216 		/*
2217 		 * Scanning outside of mount can return NULL which would turn
2218 		 * into 0 error code.
2219 		 */
2220 		device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
2221 		ret = PTR_ERR_OR_ZERO(device);
2222 		mutex_unlock(&uuid_mutex);
2223 		break;
2224 	case BTRFS_IOC_FORGET_DEV:
2225 		if (vol->name[0] != 0) {
2226 			ret = lookup_bdev(vol->name, &devt);
2227 			if (ret)
2228 				break;
2229 		}
2230 		ret = btrfs_forget_devices(devt);
2231 		break;
2232 	case BTRFS_IOC_DEVICES_READY:
2233 		mutex_lock(&uuid_mutex);
2234 		/*
2235 		 * Scanning outside of mount can return NULL which would turn
2236 		 * into 0 error code.
2237 		 */
2238 		device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
2239 		if (IS_ERR_OR_NULL(device)) {
2240 			mutex_unlock(&uuid_mutex);
2241 			ret = PTR_ERR(device);
2242 			break;
2243 		}
2244 		ret = !(device->fs_devices->num_devices ==
2245 			device->fs_devices->total_devices);
2246 		mutex_unlock(&uuid_mutex);
2247 		break;
2248 	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
2249 		ret = btrfs_ioctl_get_supported_features((void __user*)arg);
2250 		break;
2251 	}
2252 
2253 out:
2254 	kfree(vol);
2255 	return ret;
2256 }
2257 
2258 static int btrfs_freeze(struct super_block *sb)
2259 {
2260 	struct btrfs_trans_handle *trans;
2261 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2262 	struct btrfs_root *root = fs_info->tree_root;
2263 
2264 	set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2265 	/*
2266 	 * We don't need a barrier here, we'll wait for any transaction that
2267 	 * could be in progress on other threads (and do delayed iputs that
2268 	 * we want to avoid on a frozen filesystem), or do the commit
2269 	 * ourselves.
2270 	 */
2271 	trans = btrfs_attach_transaction_barrier(root);
2272 	if (IS_ERR(trans)) {
2273 		/* no transaction, don't bother */
2274 		if (PTR_ERR(trans) == -ENOENT)
2275 			return 0;
2276 		return PTR_ERR(trans);
2277 	}
2278 	return btrfs_commit_transaction(trans);
2279 }
2280 
2281 static int check_dev_super(struct btrfs_device *dev)
2282 {
2283 	struct btrfs_fs_info *fs_info = dev->fs_info;
2284 	struct btrfs_super_block *sb;
2285 	u64 last_trans;
2286 	u16 csum_type;
2287 	int ret = 0;
2288 
2289 	/* This should be called with fs still frozen. */
2290 	ASSERT(test_bit(BTRFS_FS_FROZEN, &fs_info->flags));
2291 
2292 	/* Missing dev, no need to check. */
2293 	if (!dev->bdev)
2294 		return 0;
2295 
2296 	/* Only need to check the primary super block. */
2297 	sb = btrfs_read_dev_one_super(dev->bdev, 0, true);
2298 	if (IS_ERR(sb))
2299 		return PTR_ERR(sb);
2300 
2301 	/* Verify the checksum. */
2302 	csum_type = btrfs_super_csum_type(sb);
2303 	if (csum_type != btrfs_super_csum_type(fs_info->super_copy)) {
2304 		btrfs_err(fs_info, "csum type changed, has %u expect %u",
2305 			  csum_type, btrfs_super_csum_type(fs_info->super_copy));
2306 		ret = -EUCLEAN;
2307 		goto out;
2308 	}
2309 
2310 	if (btrfs_check_super_csum(fs_info, sb)) {
2311 		btrfs_err(fs_info, "csum for on-disk super block no longer matches");
2312 		ret = -EUCLEAN;
2313 		goto out;
2314 	}
2315 
2316 	/* Btrfs_validate_super() includes fsid check against super->fsid. */
2317 	ret = btrfs_validate_super(fs_info, sb, 0);
2318 	if (ret < 0)
2319 		goto out;
2320 
2321 	last_trans = btrfs_get_last_trans_committed(fs_info);
2322 	if (btrfs_super_generation(sb) != last_trans) {
2323 		btrfs_err(fs_info, "transid mismatch, has %llu expect %llu",
2324 			  btrfs_super_generation(sb), last_trans);
2325 		ret = -EUCLEAN;
2326 		goto out;
2327 	}
2328 out:
2329 	btrfs_release_disk_super(sb);
2330 	return ret;
2331 }
2332 
2333 static int btrfs_unfreeze(struct super_block *sb)
2334 {
2335 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2336 	struct btrfs_device *device;
2337 	int ret = 0;
2338 
2339 	/*
2340 	 * Make sure the fs is not changed by accident (like hibernation then
2341 	 * modified by other OS).
2342 	 * If we found anything wrong, we mark the fs error immediately.
2343 	 *
2344 	 * And since the fs is frozen, no one can modify the fs yet, thus
2345 	 * we don't need to hold device_list_mutex.
2346 	 */
2347 	list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
2348 		ret = check_dev_super(device);
2349 		if (ret < 0) {
2350 			btrfs_handle_fs_error(fs_info, ret,
2351 				"super block on devid %llu got modified unexpectedly",
2352 				device->devid);
2353 			break;
2354 		}
2355 	}
2356 	clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2357 
2358 	/*
2359 	 * We still return 0, to allow VFS layer to unfreeze the fs even the
2360 	 * above checks failed. Since the fs is either fine or read-only, we're
2361 	 * safe to continue, without causing further damage.
2362 	 */
2363 	return 0;
2364 }
2365 
2366 static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2367 {
2368 	struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
2369 
2370 	/*
2371 	 * There should be always a valid pointer in latest_dev, it may be stale
2372 	 * for a short moment in case it's being deleted but still valid until
2373 	 * the end of RCU grace period.
2374 	 */
2375 	rcu_read_lock();
2376 	seq_escape(m, btrfs_dev_name(fs_info->fs_devices->latest_dev), " \t\n\\");
2377 	rcu_read_unlock();
2378 
2379 	return 0;
2380 }
2381 
2382 static long btrfs_nr_cached_objects(struct super_block *sb, struct shrink_control *sc)
2383 {
2384 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2385 	const s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
2386 
2387 	trace_btrfs_extent_map_shrinker_count(fs_info, nr);
2388 
2389 	return nr;
2390 }
2391 
2392 static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_control *sc)
2393 {
2394 	const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan);
2395 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2396 
2397 	return btrfs_free_extent_maps(fs_info, nr_to_scan);
2398 }
2399 
2400 static const struct super_operations btrfs_super_ops = {
2401 	.drop_inode	= btrfs_drop_inode,
2402 	.evict_inode	= btrfs_evict_inode,
2403 	.put_super	= btrfs_put_super,
2404 	.sync_fs	= btrfs_sync_fs,
2405 	.show_options	= btrfs_show_options,
2406 	.show_devname	= btrfs_show_devname,
2407 	.alloc_inode	= btrfs_alloc_inode,
2408 	.destroy_inode	= btrfs_destroy_inode,
2409 	.free_inode	= btrfs_free_inode,
2410 	.statfs		= btrfs_statfs,
2411 	.freeze_fs	= btrfs_freeze,
2412 	.unfreeze_fs	= btrfs_unfreeze,
2413 	.nr_cached_objects = btrfs_nr_cached_objects,
2414 	.free_cached_objects = btrfs_free_cached_objects,
2415 };
2416 
2417 static const struct file_operations btrfs_ctl_fops = {
2418 	.open = btrfs_control_open,
2419 	.unlocked_ioctl	 = btrfs_control_ioctl,
2420 	.compat_ioctl = compat_ptr_ioctl,
2421 	.owner	 = THIS_MODULE,
2422 	.llseek = noop_llseek,
2423 };
2424 
2425 static struct miscdevice btrfs_misc = {
2426 	.minor		= BTRFS_MINOR,
2427 	.name		= "btrfs-control",
2428 	.fops		= &btrfs_ctl_fops
2429 };
2430 
2431 MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
2432 MODULE_ALIAS("devname:btrfs-control");
2433 
2434 static int __init btrfs_interface_init(void)
2435 {
2436 	return misc_register(&btrfs_misc);
2437 }
2438 
2439 static __cold void btrfs_interface_exit(void)
2440 {
2441 	misc_deregister(&btrfs_misc);
2442 }
2443 
2444 static int __init btrfs_print_mod_info(void)
2445 {
2446 	static const char options[] = ""
2447 #ifdef CONFIG_BTRFS_DEBUG
2448 			", debug=on"
2449 #endif
2450 #ifdef CONFIG_BTRFS_ASSERT
2451 			", assert=on"
2452 #endif
2453 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
2454 			", ref-verify=on"
2455 #endif
2456 #ifdef CONFIG_BLK_DEV_ZONED
2457 			", zoned=yes"
2458 #else
2459 			", zoned=no"
2460 #endif
2461 #ifdef CONFIG_FS_VERITY
2462 			", fsverity=yes"
2463 #else
2464 			", fsverity=no"
2465 #endif
2466 			;
2467 	pr_info("Btrfs loaded%s\n", options);
2468 	return 0;
2469 }
2470 
2471 static int register_btrfs(void)
2472 {
2473 	return register_filesystem(&btrfs_fs_type);
2474 }
2475 
2476 static void unregister_btrfs(void)
2477 {
2478 	unregister_filesystem(&btrfs_fs_type);
2479 }
2480 
2481 /* Helper structure for long init/exit functions. */
2482 struct init_sequence {
2483 	int (*init_func)(void);
2484 	/* Can be NULL if the init_func doesn't need cleanup. */
2485 	void (*exit_func)(void);
2486 };
2487 
2488 static const struct init_sequence mod_init_seq[] = {
2489 	{
2490 		.init_func = btrfs_props_init,
2491 		.exit_func = NULL,
2492 	}, {
2493 		.init_func = btrfs_init_sysfs,
2494 		.exit_func = btrfs_exit_sysfs,
2495 	}, {
2496 		.init_func = btrfs_init_compress,
2497 		.exit_func = btrfs_exit_compress,
2498 	}, {
2499 		.init_func = btrfs_init_cachep,
2500 		.exit_func = btrfs_destroy_cachep,
2501 	}, {
2502 		.init_func = btrfs_transaction_init,
2503 		.exit_func = btrfs_transaction_exit,
2504 	}, {
2505 		.init_func = btrfs_ctree_init,
2506 		.exit_func = btrfs_ctree_exit,
2507 	}, {
2508 		.init_func = btrfs_free_space_init,
2509 		.exit_func = btrfs_free_space_exit,
2510 	}, {
2511 		.init_func = extent_state_init_cachep,
2512 		.exit_func = extent_state_free_cachep,
2513 	}, {
2514 		.init_func = extent_buffer_init_cachep,
2515 		.exit_func = extent_buffer_free_cachep,
2516 	}, {
2517 		.init_func = btrfs_bioset_init,
2518 		.exit_func = btrfs_bioset_exit,
2519 	}, {
2520 		.init_func = extent_map_init,
2521 		.exit_func = extent_map_exit,
2522 	}, {
2523 		.init_func = ordered_data_init,
2524 		.exit_func = ordered_data_exit,
2525 	}, {
2526 		.init_func = btrfs_delayed_inode_init,
2527 		.exit_func = btrfs_delayed_inode_exit,
2528 	}, {
2529 		.init_func = btrfs_auto_defrag_init,
2530 		.exit_func = btrfs_auto_defrag_exit,
2531 	}, {
2532 		.init_func = btrfs_delayed_ref_init,
2533 		.exit_func = btrfs_delayed_ref_exit,
2534 	}, {
2535 		.init_func = btrfs_prelim_ref_init,
2536 		.exit_func = btrfs_prelim_ref_exit,
2537 	}, {
2538 		.init_func = btrfs_interface_init,
2539 		.exit_func = btrfs_interface_exit,
2540 	}, {
2541 		.init_func = btrfs_print_mod_info,
2542 		.exit_func = NULL,
2543 	}, {
2544 		.init_func = btrfs_run_sanity_tests,
2545 		.exit_func = NULL,
2546 	}, {
2547 		.init_func = register_btrfs,
2548 		.exit_func = unregister_btrfs,
2549 	}
2550 };
2551 
2552 static bool mod_init_result[ARRAY_SIZE(mod_init_seq)];
2553 
2554 static __always_inline void btrfs_exit_btrfs_fs(void)
2555 {
2556 	int i;
2557 
2558 	for (i = ARRAY_SIZE(mod_init_seq) - 1; i >= 0; i--) {
2559 		if (!mod_init_result[i])
2560 			continue;
2561 		if (mod_init_seq[i].exit_func)
2562 			mod_init_seq[i].exit_func();
2563 		mod_init_result[i] = false;
2564 	}
2565 }
2566 
2567 static void __exit exit_btrfs_fs(void)
2568 {
2569 	btrfs_exit_btrfs_fs();
2570 	btrfs_cleanup_fs_uuids();
2571 }
2572 
2573 static int __init init_btrfs_fs(void)
2574 {
2575 	int ret;
2576 	int i;
2577 
2578 	for (i = 0; i < ARRAY_SIZE(mod_init_seq); i++) {
2579 		ASSERT(!mod_init_result[i]);
2580 		ret = mod_init_seq[i].init_func();
2581 		if (ret < 0) {
2582 			btrfs_exit_btrfs_fs();
2583 			return ret;
2584 		}
2585 		mod_init_result[i] = true;
2586 	}
2587 	return 0;
2588 }
2589 
2590 late_initcall(init_btrfs_fs);
2591 module_exit(exit_btrfs_fs)
2592 
2593 MODULE_LICENSE("GPL");
2594 MODULE_SOFTDEP("pre: crc32c");
2595 MODULE_SOFTDEP("pre: xxhash64");
2596 MODULE_SOFTDEP("pre: sha256");
2597 MODULE_SOFTDEP("pre: blake2b-256");
2598